diff --git "a/036.jsonl" "b/036.jsonl" new file mode 100644--- /dev/null +++ "b/036.jsonl" @@ -0,0 +1,680 @@ +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_656","text":"#!\/usr\/bin\/env python\n\n##########################################################################################################\n# Modulo Bioestadistica - 2015 de la Universidad del Comahue. Centro Regional Bariloche\n#http:\/\/crubweb.uncoma.edu.ar\/\n# Dr. \n# email: \n# licence: MIT. http:\/\/opensource.org\/licenses\/MIT \n\n#Ejemplo del libro Sokal, Introduccion a la bioestadistica, Reverte, 2002. version en ingles p.79\n#Distribucion normal o gaussiana\n#Ejecutar:\n#python 09_introNormal.py\n##########################################################################################################\n\n\n# Importo librerias\nimport numpy as np\nimport pylab as P\nimport matplotlib.pyplot as plt\nimport matplotlib.mlab as mlab\nimport math\nimport scipy.stats as stats\n\n#Generador de coef binomial\ndef normpdf(x, mean, sd):\n var = float(sd)**2\n pi = 3.1415926\n denom = (2*pi*var)**.5\n num = math.exp(-(float(x)-float(mean))**2\/(2*var))\n return num\/denom\n\n\n#Generador de coef binomial\ndef binCoefGen(k):\n f = math.factorial\n coef = []\n print k\n for i in range(k+1):\n #print i\n a = f(k)\/f(i)\/f(k-i)\n print a\n coef.append(a)\n return coef\n \n#Defino el k \nprint \"Efecto del k -> inf. Probar k = 5, 10, 50, 150. criterio pqk > 3\"\nk = 150\n\n#calculo los bin\nbins = np.array([x for x in range(0, k+1)])\n\n#Defino la probabilidad de ocurrencia \np = 0.5\n\n\ncolumna2p = np.array([p**(k-x) for x in range(0, k+1)])\nprint columna2p\n\n#calculo la probabilidad de no ocurrencia\nq = 1-p\ncolumna3q = np.array([q**x for x in range(0, k+1)])\nprint columna3q\nprint \"approx: \", p*q*k\nbincoef = np.array(binCoefGen(k))\n\ncolumna4relfrec = np.array([columna2p[x]*columna3q[x]*bincoef[x] for x in range(0, k+1)])\nprint columna4relfrec\n\ntotalMuestras = 1\nfrecAbsTeor = np.array([columna4relfrec[x]*totalMuestras for x in range(0, k+1)])\nprint frecAbsTeor\nmean= k*p\nsigma = math.sqrt(k)*p*q\nprint \"mean teorico:\", mean\nprint \"sigma teorico:\", sigma\n#Defino titulos\nplt.ylabel(\"Funcion densidad de probabilidad Normal\")\nplt.xlabel(\"Y\")\nplt.title(\"Distribuciones normales\")\n\n#Defino ancho de barra\nwidth = 1.0\n\n\n#Defino 2 graficas de barras\nnormalWLib1stats = stats.norm.pdf(bins,mean,2*sigma) \nnormalWLib2mlab = mlab.normpdf(bins,mean,2*sigma)\n\n#Check with other library\n\nmeans = [4,8,8]\nstds = [2,2,1]\n\nnormVals = []\nnormVals.append([])\nnormVals.append([])\nnormVals.append([])\n\nfor k in range(len(means)):\n\tfor i in range(len(bins)):\n\t\tnormVals[k].append(normpdf(bins[i],means[k],stds[k]))\n\ncolors = [\"red\",\"blue\",\"green\"]\nlabels = [\"Propongan 1\",\"Propongan 2\",\"Propongan 3\"]\n\n#labels = [\"Norm mu=%d sigma=%d\"%(means[0],stds[0]),\"Norm mu=%d sigma=%d\"%(means[1],stds[1]),\"Norm mu=%d sigma=%d\"%(means[2],stds[2])]\n\nfor k in range(len(means)):\n\tplt.plot(bins,normVals[k],lw = 2, color = colors[k], label=labels[k])\t\nplt.xlim(0,15)\n\n#Ubico leyenda\nplt.legend( loc='upper left', numpoints = 1 )\n\n#Dibujo\nplt.show()\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_657","text":"import unittest\nfrom unittest import TestCase\n\nfrom escnn.group import CyclicGroup\nfrom escnn.group import DihedralGroup\n\nimport numpy as np\nimport scipy.sparse as sparse\n\n\nclass TestComputeRegularRepresentations(TestCase):\n \n def test_dihedral_rr_odd(self):\n dg = DihedralGroup(9)\n self.dihedral_rr_eval(dg, dg.representations['regular'])\n \n def test_dihedral_rr_even(self):\n dg = DihedralGroup(10)\n self.dihedral_rr_eval(dg, dg.representations['regular'])\n \n def test_dihedral_rr_large(self):\n dg = DihedralGroup(16)\n self.dihedral_rr_eval(dg, dg.representations['regular'])\n\n def test_dihedral_rr_small(self):\n dg = DihedralGroup(2)\n self.dihedral_rr_eval(dg, dg.representations['regular'])\n\n def test_cyclic_rr_odd(self):\n cg = CyclicGroup(11)\n self.cyclic_rr_eval(cg, cg.representations['regular'])\n\n def test_cyclic_rr_even(self):\n cg = CyclicGroup(10)\n self.cyclic_rr_eval(cg, cg.representations['regular'])\n\n def test_cyclic_rr_large(self):\n cg = CyclicGroup(20)\n self.cyclic_rr_eval(cg, cg.representations['regular'])\n\n def test_cyclic_rr_small(self):\n cg = CyclicGroup(2)\n self.cyclic_rr_eval(cg, cg.representations['regular'])\n\n def cyclic_rr_eval(self, cg, rr):\n # rr = cg.representations['regular']\n\n # np.set_printoptions(precision=4, suppress=True)\n # print('Change of Basis')\n # print(rr.change_of_basis)\n # print('Change of Basis Inv')\n # print(rr.change_of_basis_inv)\n # print('RR')\n # n = cg.order\n # for i in range(n):\n # print(rr(i * 2 * np.pi \/ n))\n \n D = rr.change_of_basis\n D_inv = rr.change_of_basis_inv\n for i, element in enumerate(cg.elements):\n \n rho_i = np.zeros([cg.order(), cg.order()])\n \n for k in range(cg.order()):\n rho_i[(i + k) % cg.order(), k] = 1.0\n \n # Build the direct sum of the irreps for this element\n blocks = []\n for irrep in rr.irreps:\n repr = cg.irrep(*irrep)(element)\n blocks.append(repr)\n \n P = sparse.block_diag(blocks, format='csc')\n R = D @ P @ D_inv\n self.assertTrue(np.allclose(R, rho_i), f\"{element}:\\n{R}\\n!=\\n{rho_i}\\n\")\n self.assertTrue(np.allclose(rr(element), rho_i), f\"{element}:\\n{rr(element)}\\n!=\\n{rho_i}\\n\")\n\n def dihedral_rr_eval(self, dg, rr):\n \n # rr = dg.representations['regular']\n \n # np.set_printoptions(precision=2, suppress=True)\n # print('Change of Basis')\n # print(rr.change_of_basis)\n # print('Change of Basis Inv')\n # print(rr.change_of_basis_inv)\n # print('RR')\n # n = dg.rotation_order\n # for i in range(n):\n # print(rr((0, i * 2 * np.pi \/ n)))\n # for i in range(n):\n # print(rr((1, i * 2 * np.pi \/ n)))\n \n D = rr.change_of_basis\n D_inv = rr.change_of_basis_inv\n\n # np.set_printoptions(precision=3, threshold=10*rr.size**2, suppress=True, linewidth=25*rr.size + 5)\n \n for i, element in enumerate(dg.elements):\n \n rho_i = np.zeros([dg.order(), dg.order()])\n \n f = -1 if element.to('int')[0] else 1\n # r = int(np.round(element[1] * dg.rotation_order \/ (2 * np.pi)))\n r = element.to('int')[1]\n \n for k in range(dg.rotation_order):\n rho_i[dg.rotation_order * element.to('int')[0] + ((r + k * f) % dg.rotation_order), k] = 1.0\n for k in range(dg.rotation_order):\n rho_i[dg.rotation_order * (1 - element.to('int')[0]) + ((r + k * f) % dg.rotation_order), dg.rotation_order + k] = 1.0\n \n # Build the direct sum of the irreps for this element\n blocks = []\n for irrep in rr.irreps:\n repr = dg.irrep(*irrep)(element)\n blocks.append(repr)\n \n P = sparse.block_diag(blocks, format='csc')\n R = D @ P @ D_inv\n self.assertTrue(np.allclose(R, rho_i), f\"{element}:\\n{R}\\n!=\\n{rho_i}\\n\")\n self.assertTrue(np.allclose(rr(element), rho_i), f\"{element}:\\n{rr(element)}\\n!=\\n{rho_i}\\n\")\n\n\nif __name__ == '__main__':\n unittest.main()\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_658","text":"###__________________________ SymPy - Mechanics ____________________________###\n\n##_________ Reference System _________##\n# El objeto primordial que vamos a manejar van a ser los sistemas de referencia. \n# Podremos definir relaciones geométricas entre ellos y de esta forma las transformaciones \n# de vectores entre un sistema y otro serán triviales.\n\n# La manera usual de empezar a trabajar con SymPy es importar la función init_session:\nfrom sympy import init_session, Symbol, symbols, pi, I, E, cos, sin, exp, tan, simplify, expand, factor, collect, apart, cancel, expand_trig, diff, Derivative, Function, integrate, limit, series, Eq, solve, dsolve, Matrix, N\n# from sympy import init_session\ninit_session(use_latex=True)\n\n# Todo lo que necesitamos está en sympy.physics.mechanics, incluyendo la clase ReferenceFrame. \n# Nada más crear un sistema de referencia podemos acceder a sus versores unitarios: x, y y z.\n\nfrom sympy.physics.mechanics import ReferenceFrame \n\n# A = ReferenceFrame(\"A\")\n# print(A.x)\n\n# Y para definir vectores solo tenemos que multiplicar cada componente por su versor:\n# print(2 * A.x - 1 * A.y)\n\n# De ahora en adelante, para trabajar como si nos enfrentáramos a un problema de \n# la escuela, vamos a hacer dos cosas:\n # [-] Definir un sistema inercial 1 del que partir, para así poder referir todos \n # los demás sistemas a él.\n # [-] Que los versores de ese sistema sean 𝑖,𝑗,𝑘\n \n# A = ReferenceFrame(\"1\", latexs=['\\mathbf{i}', '\\mathbf{j}', '\\mathbf{k}'])\n# print(A.x + A.y + A.z)\n\n# Y para no tener que hacerlo siempre, un pequeño truco de magia:\n# Definimos nuestra propia clase para que los versores sean IJK\n#------------------------------------------------------------------------------\nclass IJKReferenceFrame(ReferenceFrame):\n def __init__(self, name):\n super().__init__(name, latexs=['\\mathbf{%s}_{%s}' % (idx, name) for idx in (\"i\", \"j\", \"k\")])\n self.i = self.x\n self.j = self.y\n self.k = self.z\n#------------------------------------------------------------------------------\nA = IJKReferenceFrame(\"1\")\nprint(A.x + A.y + A.z)\n\n##_________ Vectorial Algebra _________##\n# Nuestros vectores funcionan también con símbolos, y podemos realizar las operaciones \n# de producto escalar y producto vectorial con ellos.\nR, V = symbols('R, V', positive=True)\nr1 = R * (A.x + A.y + A.z)\nv1 = V * (A.x - 2 * A.z)\n\nfrom sympy.physics.mechanics import dot, cross\n\nprint(r1.dot(v1))\nprint(dot(r1, v1))\nprint(r1 & v1)\n\nprint(r1.cross(v1))\nprint(cross(r1, v1))\nprint(r1 ^ v1)\n\n# Podemos hallar también la norma de los vectores con su método magnitude e incluso \n# normalizarlos con normalize:\nprint((r1 ^ v1).magnitude())\nprint((r1 ^ v1).normalize())\n\n##_________ Exercise _________##\n# Usando directamente la fórmula para la derivada en ejes móviles:\n # $$\\left(\\frac{\\operatorname{d}\\!\\mathbf{a}}{\\operatorname{d}\\!t}\\right)_1 \n # = \\left(\\frac{\\operatorname{d}\\!\\mathbf{a}}{\\operatorname{d}\\!t}\\right)_0 \n # + \\mathbf{\\omega}_{01}\\! \\times \\mathbf{a}$$\n# Calcula la derivada del vector de posición $R \\mathbf{i}_0$, siendo $A_0$ un \n# sistema de referencia que gira respecto al inercial con velocidad angular \n# $\\mathbf{\\omega}_{01}=\\Omega \\mathbf{k}_0$. **¿Cuál es el módulo de la derivada?**\nR, Omega = symbols('R, Omega', positive=True)\nA0 = IJKReferenceFrame('0')\na = R * A0.i\nomega01 = Omega * A0.k\nda = omega01 ^ a # cross producut\nprint(da.magnitude())\n# Si no especificaste `positive=True` vas a ver algo como sqrt(omega^2*R^2). \n# Debería haber una forma de simplificar esta expresión _a posteriori_, pero de\n# momento no funciona del todo bien. \n\n##_________ Relative Movement _________##\n# ¿A quién no le gusta multiplicar matrices de rotación? Para esa minoría que lo \n# detesta, existe SymPy. Para ello debemos especificar la orientación de nuestros \n# sistemas de referencia usando el *método orient*, y recuperaremos la matriz de \n# cosenos directores usando el *método dcm*.\nA1 = IJKReferenceFrame(\"1\")\nA0 = IJKReferenceFrame(\"0\")\nphi = symbols('phi')\nA0.orient(A1, 'Axis', [phi, A1.z]) # Rotación phi alrededor del eje A1.z\nprint(A0.dcm(A1)) # \"Direct Cosine Matrix\"\n# Usando el argumento `Axis` hemos especificado que rotamos el sistema un ángulo \n# especificado alrededor de un eje. Otros métodos son:\n # * `Body`: se especifican los tres ángulos de Euler.\n # * `Space`: igual que `Body`, pero las rotaciones se aplican en orden inverso.\n # * `Quaternion`: utilizando cuaternios, rotación alrededor de un vector unitario\n # $\\lambda$ una cantidad $\\theta$.\n\n##_________ Different Reference System _________##\n# Para expresar un vector en otro sistema de referencia, no hay más que usar los \n# métodos express o to_matrix:\nprint(A0.x.express(A1))\nprint(A0.x.to_matrix(A1))\n\n##_________ Dynamic Symbols (time dependent) _________##\n# Si queremos especificar que un símbolo puede variar con el tiempo, hay que usar \n# la función dynamicsymbols:\nfrom sympy.physics.mechanics import dynamicsymbols\n\nalpha = dynamicsymbols('alpha')\n# Y pedir su derivada con el método diff:\nprint(alpha.diff())\n\n##_________ Exercise1 _________##\nprint('_______________Exercise1___________________')\n# from notebook completos\/041-SymPy\n## Obtener la matriz de rotación de la pala B respecto a los ejes A1. ##\nprint('______Rot Matrix_________')\nA = IJKReferenceFrame(\"A\")\nA1 = IJKReferenceFrame(\"A1\")\npsi = dynamicsymbols('psi')\nA1.orient(A, 'Axis', [psi, A.z])\nprint('A1 DCM A --> =', A1.dcm(A)) # T_{A1A}\n\nA2 = IJKReferenceFrame(\"A2\")\nbeta = dynamicsymbols('beta')\nA2.orient(A1, 'Axis', [beta, -A1.y])\nprint('A2 DCM A1 --> =', A2.dcm(A1)) # T_{A2A1}\n\nA3 = IJKReferenceFrame(\"A3\")\nzeta = dynamicsymbols('zeta')\nA3.orient(A2, 'Axis', [zeta, A2.z])\nprint('A3 DCM A1 --> =',A3.dcm(A1)) # T_{A3A1}\n\nB = IJKReferenceFrame(\"B\")\ntheta = dynamicsymbols('theta')\nB.orient(A3, 'Axis', [theta, A3.x])\nprint('B DCM A3 --> =', B.dcm(A3)) # T_{BA3}\n\nprint('B DCM A2 --> =',B.dcm(A2))\n\nprint('B DCM A1 --> =',B.dcm(A1))\n\n## Angular Velocity ##\nprint('______Ang Vel______')\n# También podemos hallar la velocidad angular de un sistema respecto a otro \n# usando el método ang_vel_in:\nprint(B.ang_vel_in(A2))\nprint(B.ang_vel_in(A))\nprint(B.ang_vel_in(A).express(A))\n\n## Derivative in moving axis ##\nprint('______Derivative in moving axis______')\n# Hacer una derivada con la fórmula lo hace cualquiera, pero SymPy puede \n# encargarse automáticamente.\nv1 = A1.x\ndv1 = v1.diff(symbols('t'), A)\nprint(dv1.to_matrix(A1))\nprint((dv1 & A1.j).simplify())\nprint('_______________End Exercise1___________________')\n\n##_________ Puntos, velocidades y la rueda que no desliza _________##\n# El último paso que nos queda para completar la cinemática es la posibilidad de \n# definir puntos en sólidos y aplicar su campo de velocidades. SymPy también \n# permite esto, y para ello no tenemos más que importar la clase Point.\nfrom sympy.physics.mechanics import Point\nO = Point(\"O\")\n# Para trabajar como lo haríamos en la escuela, vamos a especificar que O es el\n# origen de A, y para eso vamos a imponer que su velocidad es cero con el método set_vel:\nO.set_vel(A, 0)\n# Para definir nuevos puntos, podemos utilizar el método locate_new:\ne_b = symbols('e_b')\nE_b = O.locatenew('E_b', e_b * A1.x)\n# Y para obtener vectores de un punto a otro, el método pos_from:\nprint(E_b.pos_from(O))\n# La notación de este paquete está influenciada por el libro\n# . & . \"Dynamics, Theory and Applications\"\n\n# Por último, el **campo de velocidades de un sólido rígido** se formula usando \n# el método `v2pt_theory`.\n # $$v^P_A = v^O_A + \\omega_{A_1 A} \\times \\mathbf{OP}$$\n# Este método pertenece *al punto del cual queremos conocer la velocidad* y \n# recibe tres parámetros:\n # * `O`, punto de velocidad conocida respecto a A\n # * `A`, sistema de referencia donde queremos calcular la velocidad\n # * `A1`, sistema de referencia donde están fijos ambos puntos (_sistema de arrastre_)\nprint(E_b.v2pt_theory(O, A, A1))\n\n##_________ Exercise2 _________##\nprint('_______________Exercise2___________________')\n# from notebook completos\/041-SymPy\n# ¡Halla la velocidad y la aceleración de P!\n\n# Creamos nuestros sistemas de referencia\nA1 = IJKReferenceFrame('1')\nA0 = IJKReferenceFrame('0')\nA2 = IJKReferenceFrame('2')\n\n# Creamos los símbolos dinámicos necesarios\nxi, theta = dynamicsymbols('xi, theta')\n\n# Orientamos los sistemas de referencia\nA0.orient(A1, 'Axis', [0, A1.k]) # A0 no gira respecto a A1\nA2.orient(A0, 'Axis', [theta, A0.k])\nprint('A2 DCM A1 --> =', A2.dcm(A1))\n\n# Creamos el punto C, centro del disco, y especificamos su velocidad\n# respecto a A1\nC = Point('C')\nC.set_vel(A1, xi.diff() * A1.x)\n\n# Localizamos el punto P, punto fijo del disco, respecto a C, en\n# el sistema A2 (que gira solidariamente con el disco)\nR = symbols('R')\nP = C.locatenew('P', -R * A2.j)\nprint(P.pos_from(C))\n\n# Hallamos la velocidad de P en A1, expresada en A0\n# ¡Con esta llamada ya estamos diciendo que C y P son fijos en A2!\nprint(P.v2pt_theory(C, A1, A2).express(A0))\n\n#______________________________________________________________________________\n#______________________________________________________________________________\n# \n# Estabilidad y control dinámicos longitudinales en cadena abierta\n# Análisis de la estabilidad longitudinal de un B747-100\n# https:\/\/nbviewer.jupyter.org\/github\/AlexS12\/Mecanica_Vuelo\/blob\/master\/MVII_MatrizSistema.ipynb"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_659","text":"'''\nCreated on Jun 7, 2012\n\n@author: vinnie\n'''\n\nimport os\nimport sys\nimport cv2 as cv\nfrom scipy.ndimage import filters\n#import scipy.signal as signal\nimport numpy as np\nimport pylab as plb\nimport matplotlib.cm as cm\nfrom itertools import product\n\n_ALPHA = 100\n_BETA = 200\n_W_LINE = 250\n_W_EDGE = 30\n_MIN_DISTANCE = 10\n_INITIAL_SMOOTH = 15\n_INITIAL_ITERATIONS = 30\n_ITERATIONS_DELTA = 5 \n_SMOOTH_FACTOR_DELTA = 4\n\n_NUM_NEIGHBORS = 9\n_MAX_SNAXELS = 10000\n_INITIAL_DISTANCE_BETWEEN_SNAXELS = 50\n\ndef _display(image, snaxels=None):\n \"\"\"\n Display a grayscale image with pylab, and draw the contour if there is any.\n \"\"\"\n plb.clf()\n if snaxels is not None:\n for s in snaxels:\n plb.plot(s[0],s[1],'g.',markersize=10.0)\n \n plb.imshow(image, cmap=cm.Greys_r)\n plb.draw()\n \n return\n\ndef _gradientImage(image):\n \"\"\"\n Obtain a gradient image (in both x and y directions)\n \"\"\"\n gradient = np.sqrt(filters.sobel(image, 0)**2 + filters.sobel(image, 1)**2)\n gradient -= gradient.min()\n\n return gradient \n \ndef _inBounds(image, point):\n \"\"\"\n Is the point within the bounds of the image?\n \"\"\"\n return np.all(point < np.shape(image)) and np.all(point > 0)\n\ndef _externalEnergy(image, smooth_image, point):\n \"\"\"\n The external energy of the point, a combination of line and edge \n \"\"\"\n pixel = 255 * image[point[1]][point[0]]\n smooth_pixel = 255 * smooth_image[point[1]][point[0]]\n external_energy = (_W_LINE * pixel) - (_W_EDGE * (smooth_pixel**2))\n return external_energy\n\ndef _energy(image, smooth_image, current_point, next_point, previous_point=None):\n \"\"\"\n Total energy (internal and external).\n Internal energy measures the shape of the contour\n \"\"\"\n d_squared = np.linalg.norm(next_point -current_point)**2\n \n if previous_point is None:\n e = _ALPHA * d_squared + _externalEnergy(image, smooth_image, current_point)\n return e \n else:\n deriv = np.sum((next_point - 2 * current_point + previous_point)**2)\n e = 0.5 * (_ALPHA * d_squared + _BETA * deriv + _externalEnergy(image, smooth_image, current_point))\n return e\n\ndef _iterateContour(image, smooth_image, snaxels, energy_matrix, position_matrix, neighbors):\n \"\"\"\n Compute the minimum energy locations for all the snaxels in the contour\n \"\"\"\n snaxels_added = len(snaxels)\n for curr_idx in range(snaxels_added - 1, 0, -1):\n energy_matrix[curr_idx][:][:] = float(\"inf\")\n prev_idx = (curr_idx - 1) % snaxels_added\n next_idx = (curr_idx + 1) % snaxels_added\n \n for j, next_neighbor in enumerate(neighbors):\n next_node = snaxels[next_idx] + next_neighbor\n \n if not _inBounds(image, next_node):\n continue\n \n min_energy = float(\"inf\")\n for k, curr_neighbor in enumerate(neighbors):\n curr_node = snaxels[curr_idx] + curr_neighbor\n distance = np.linalg.norm(next_node - curr_node)\n \n if not _inBounds(image, curr_node) or (distance < _MIN_DISTANCE):\n continue\n \n min_energy = float(\"inf\")\n for l, prev_neighbor in enumerate(neighbors):\n prev_node = snaxels[prev_idx] + prev_neighbor\n \n if not _inBounds(image, prev_node):\n continue\n \n energy = energy_matrix[prev_idx][k][l] + _energy(image, smooth_image, curr_node, next_node, prev_node)\n \n if energy < min_energy:\n min_energy = energy\n min_position_k = k\n min_position_l = l\n \n energy_matrix[curr_idx][j][k] = min_energy\n position_matrix[curr_idx][j][k][0] = min_position_k\n position_matrix[curr_idx][j][k][1] = min_position_l\n \n min_final_energy = float(\"inf\")\n min_final_position_j = 0\n min_final_position_k = 0\n\n for j in range(_NUM_NEIGHBORS):\n for k in range(_NUM_NEIGHBORS):\n if energy_matrix[snaxels_added - 2][j][k] < min_final_energy:\n min_final_energy = energy_matrix[snaxels_added - 2][j][k]\n min_final_position_j = j\n min_final_position_k = k\n\n pos_j = min_final_position_j\n pos_k = min_final_position_k\n \n for i in range(snaxels_added - 1, -1, -1):\n snaxels[i] = snaxels[i] + neighbors[pos_j]\n if i > 0:\n pos_j = position_matrix[i - 1][pos_j][pos_k][0]\n pos_k = position_matrix[i - 1][pos_j][pos_k][1]\n \n return min_final_energy\n\ndef activeContour(image, snaxels):\n \"\"\"\n Iterate the contour until the energy reaches an equilibrium\n \"\"\"\n energy_matrix = np.zeros( (_MAX_SNAXELS - 1, _NUM_NEIGHBORS, _NUM_NEIGHBORS), dtype=np.float32)\n position_matrix = np.zeros( (_MAX_SNAXELS - 1, _NUM_NEIGHBORS, _NUM_NEIGHBORS, 2), dtype=np.int32 )\n neighbors = np.array([[i, j] for i in range(-1, 2) for j in range(-1, 2)])\n min_final_energy_prev = float(\"inf\")\n \n counter = 0\n smooth_factor = _INITIAL_SMOOTH \n iterations = _INITIAL_ITERATIONS\n gradient_image = _gradientImage(image)\n smooth_image = cv.blur(gradient_image, (smooth_factor, smooth_factor))\n \n while True:\n counter += 1\n if not (counter % iterations):\n iterations += _ITERATIONS_DELTA\n if smooth_factor > _SMOOTH_FACTOR_DELTA:\n smooth_factor -= _SMOOTH_FACTOR_DELTA \n smooth_image = cv.blur(gradient_image, (smooth_factor, smooth_factor))\n print \"Deblur step, smooth factor now: \", smooth_factor\n \n _display(smooth_image, snaxels)\n min_final_energy = _iterateContour(image, smooth_image, snaxels, energy_matrix, position_matrix, neighbors)\n \n if (min_final_energy == min_final_energy_prev) or smooth_factor < _SMOOTH_FACTOR_DELTA:\n print \"Min energy reached at \", min_final_energy\n print \"Final smooth factor \", smooth_factor\n break\n else:\n min_final_energy_prev = min_final_energy\n \n\ndef _pointsOnCircle(center, radius, num_points=12):\n points = np.zeros((num_points, 2), dtype=np.int32)\n for i in range(num_points):\n theta = float(i)\/num_points * (2 * np.pi)\n x = center[0] + radius * np.cos(theta)\n y = center[1] + radius * np.sin(theta)\n p = [x, y]\n points[i] = p\n \n return points\n\ndef activeContourFromCircle(image_file, center, radius):\n image = plb.imread(image_file)\n if image.ndim > 2:\n image = np.mean(image, axis=2)\n print \"Image size: \", image.shape\n \n plb.ion()\n plb.figure(figsize=np.array(np.shape(image))\/50.)\n \n _display(image)\n# num_points = int((2 * np.pi * radius)\/_INITIAL_DISTANCE_BETWEEN_SNAXELS)\n snaxels = _pointsOnCircle(center, radius, 30)\n _display(image, snaxels)\n activeContour(image, snaxels)\n \n # show and save the result\n plb.ioff()\n _display(image, snaxels)\n plb.savefig(os.path.splitext(image_file)[0] + \"-contour-result.png\")\n plb.show()\n return\n\ndef _test():\n \"\"\"\n Run the active contour on an image file\n \"\"\"\n activeContourFromCircle(\"mri.png\", (290, 440), 125)\n \n return\n\nif __name__ == '__main__':\n _test()"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_660","text":"from __future__ import print_function\nimport cv2\nimport PIL.Image\nimport numpy as np\nimport scipy.stats\nimport sys\nimport itertools\nfrom line_intersection import *\nnp.set_printoptions(suppress=True, precision=2)\n\ndef scaleImageIfNeeded(img, max_width=1024, max_height=1024):\n \"\"\"Scale image down to max_width \/ max_height keeping aspect ratio if needed. Do nothing otherwise.\"\"\"\n # Input and Output is a numpy array\n img = PIL.Image.fromarray(img)\n img_width, img_height = img.size\n # print(\"Image size %dx%d\" % (img_width, img_height))\n aspect_ratio = min(float(max_width)\/img_width, float(max_height)\/img_height)\n if aspect_ratio < 1.0:\n new_width, new_height = ((np.array(img.size) * aspect_ratio)).astype(int)\n # print(\" Resizing to %dx%d\" % (new_width, new_height))\n return np.array(img.resize((new_width,new_height)))\n return np.array(img)\n\ndef getAngle(a,b,c):\n # Get angle given 3 side lengths, in degrees\n k = (a*a+b*b-c*c) \/ (2*a*b)\n # Handle floating point errors\n if (k < -1):\n k=-1\n elif k > 1:\n k=1\n return np.arccos(k) * 180.0 \/ np.pi\n\ndef angleCloseDeg(a, b, angle_threshold=10):\n d = np.abs(a - b)\n # Handle angles that are ~180 degrees apart\n return d <= angle_threshold or np.abs(d-180) <= angle_threshold\n\ndef getSegmentThetaRho(line):\n x1,y1,x2,y2 = line\n theta = np.math.atan2(y2-y1, x2-x1)\n m = np.tan(theta)\n # rho = np.abs(y1 + m*x1) \/ np.sqrt(m*m+1)\n rho = x1*np.cos(theta) + y1*np.sin(theta)\n return theta, rho\n\ndef getTwoLineSegmentIntersection(p,pr,q,qs):\n # Uses http:\/\/stackoverflow.com\/a\/565282\/2574639\n # Given two line segments defined by sets of points\n # (p -> pr) and (q -> qs).\n # Return the intersection point between them\n # *assumes it always exists for our particular use-case*\n \n # Convert to floats\n p = p.astype(np.float32)\n pr = pr.astype(np.float32)\n q = q.astype(np.float32)\n qs = qs.astype(np.float32)\n r = pr-p\n s = qs-q\n # print(p, pr, r)\n # print(q, qs, s)\n rxs = np.cross(r, s)\n if rxs == 0:\n return [] # parallel\n t = np.cross((q - p), s) \/ rxs\n return p + t*r # intersect\n\ndef chooseRandomGoodQuad(lines_a, lines_b, median_contour):\n # Get random set of points\n # Redo until min side distance of random corners greater than a multiple \n # of the median tile found from initial estimator.\n sides_tile = getSquareSides(median_contour)\n for i in range(50):\n corners = chooseRandomQuad(lines_a, lines_b)\n sides_quad = getSquareSides(corners)\n if (i < 5):\n tile_size_mult = 5\n elif (i < 10):\n tile_size_mult = 4\n elif (i < 20):\n tile_size_mult = 3\n elif (i < 30):\n tile_size_mult = 2\n else:\n tile_size_mult = 1\n \n if min(sides_quad) > min(sides_tile*tile_size_mult):\n return corners\n \n print('chooseRandomGoodQuad hit max iter: %d' % i)\n return corners\n\ndef chooseRandomQuad(lines_a, lines_b):\n # Return 1 random quad (4 points) by choosing\n # 2 lines from lines_a and 2 lines from lines_b \n # and returning their intersections\n a = np.random.choice(range(len(lines_a)),2, replace=False)\n b = np.random.choice(range(len(lines_b)),2, replace=False)\n\n pairs = np.array([\n [a[0], b[0]],\n [a[0], b[1]],\n [a[1], b[1]],\n [a[1], b[0]],\n ])\n\n corners = np.zeros([4,2])\n for i in range(4):\n k1 = lines_a[pairs[i,0]]\n k2 = lines_b[pairs[i,1]]\n corners[i,:] = getTwoLineSegmentIntersection(k1[:2], k1[2:], k2[:2], k2[2:])\n return corners\n\n\ndef getSegmentTheta(line):\n x1,y1,x2,y2 = line\n theta = np.math.atan2(y2-y1, x2-x1)\n return theta\n\ndef is_square(cnt, eps=3.0, xratio_thresh = 0.5):\n # 4x2 array, rows are each point, columns are x and y\n center = cnt.sum(axis=0)\/4\n\n # Side lengths of rectangular contour\n dd0 = np.sqrt(((cnt[0,:] - cnt[1,:])**2).sum())\n dd1 = np.sqrt(((cnt[1,:] - cnt[2,:])**2).sum())\n dd2 = np.sqrt(((cnt[2,:] - cnt[3,:])**2).sum())\n dd3 = np.sqrt(((cnt[3,:] - cnt[0,:])**2).sum())\n\n # diagonal ratio\n xa = np.sqrt(((cnt[0,:] - cnt[2,:])**2).sum())\n xb = np.sqrt(((cnt[1,:] - cnt[3,:])**2).sum())\n xratio = xa\/xb if xa < xb else xb\/xa\n\n # Check whether all points part of convex hull\n # ie. not this http:\/\/i.stack.imgur.com\/I6yJY.png\n # all corner angles, angles are less than 180 deg, so not necessarily internal angles\n ta = getAngle(dd3, dd0, xb) \n tb = getAngle(dd0, dd1, xa)\n tc = getAngle(dd1, dd2, xb)\n td = getAngle(dd2, dd3, xa)\n angle_sum = np.round(ta+tb+tc+td)\n\n is_convex = angle_sum == 360\n\n angles = np.array([ta,tb,tc,td])\n good_angles = np.all((angles > 40) & (angles < 140))\n\n\n # side ratios\n dda = dd0 \/ dd1\n if dda < 1:\n dda = 1. \/ dda\n ddb = dd1 \/ dd2\n if ddb < 1:\n ddb = 1. \/ ddb\n ddc = dd2 \/ dd3\n if ddc < 1:\n ddc = 1. \/ ddc\n ddd = dd3 \/ dd0\n if ddd < 1:\n ddd = 1. \/ ddd\n side_ratios = np.array([dda,ddb,ddc,ddd])\n good_side_ratios = np.all(side_ratios < eps)\n\n # Return whether side ratios within certain ratio < epsilon\n return (\n # abs(1.0 - dda) < eps and \n # abs(1.0 - ddb) < eps and\n # xratio > xratio_thresh and \n # good_side_ratios and\n # is_convex and\n good_angles)\n\ndef minimum_distance2(v, w, p):\n # Return squared min distance between point p and line segment vw\n # Via http:\/\/stackoverflow.com\/a\/1501725\n # Return minimum distance between line segment vw and point p\n l2 = np.sum((v - w)**2) # i.e. |w-v|^2 - avoid a sqrt\n if (l2 == 0.0):\n return np.sum((p - v)**2) # v == w case\n # Consider the line extending the segment, parameterized as v + t (w - v).\n # We find projection of point p onto the line. \n # It falls where t = [(p-v) . (w-v)] \/ |w-v|^2\n # We clamp t from [0,1] to handle points outside the segment vw.\n t = max(0, min(1, np.dot(p - v, w - v) \/ l2))\n projection = v + t * (w - v) # Projection falls on the segment\n return np.sum((p - projection)**2)\n\ndef getMinLineAngleDistance(a0, a1):\n # Compare line angles (which can be 180 off from one another, or +- 180)\n v0 = abs(a1-a0)\n v1 = abs((a1+np.pi) - a0)\n v2 = abs(a1 - (a0+np.pi))\n return min([v0,v1,v2])\n\ndef getBestCorners(tile_corners, hough_lines, angle_threshold = 10*np.pi\/180):\n # Given 4x2 imperfect tile corners and Nx4 line segments\n # Expects line segments and corner points to be in same cartesian space\n #\n # Find 4 best line segments that are best match to the tile corners\n # and return the corners based off of those line segments, and those line segments\n best_lines = np.zeros([4,4])\n for i in range(4):\n corner_theta = getSegmentTheta(tile_corners[[i,i,((i+1)%4),((i+1)%4)], [0,1,0,1]])\n corner_ctr_pt = (tile_corners[i,:] + tile_corners[((i+1)%4),:]) \/ 2\n\n best_d = 1e6\n for line in hough_lines:\n theta = getSegmentTheta(line)\n # If angle within 10 degrees\n # if abs(corner_theta - theta) < angle_threshold:\n if getMinLineAngleDistance(corner_theta, theta) < angle_threshold:\n d = minimum_distance2(line[:2], line[2:], corner_ctr_pt)\n if d < best_d:\n best_d = d\n best_lines[i,:] = line\n \n new_corners = np.zeros([4,2], dtype=np.float32)\n for i in range(4):\n x = getTwoLineSegmentIntersection(\n best_lines[i,:2], best_lines[i,2:],\n best_lines[(i+1)%4,:2], best_lines[(i+1)%4,2:])\n # print(best_lines, x)\n # print(best_lines[i,:2], best_lines[i,2:], best_lines[(i+1)%4,:2], best_lines[(i+1)%4,2:])\n new_corners[i,:] = x\n\n return new_corners, best_lines\n\ndef simplifyContours(contours):\n for i in range(len(contours)):\n # Approximate contour and update in place\n contours[i] = cv2.approxPolyDP(contours[i],0.04*cv2.arcLength(contours[i],True),True)\n\ndef pruneContours(contours):\n new_contours = []\n for i in range(len(contours)):\n cnt = contours[i] \n # Only contours that fill an area of at least 8x8 pixels\n if cv2.contourArea(cnt) < 8*8:\n continue\n # Only rectangular contours allowed\n if len(cnt) != 4:\n continue\n\n if not is_square(cnt):\n continue\n\n new_contours.append(cnt)\n new_contours = np.array(new_contours)\n if len(new_contours) == 0:\n return new_contours, None\n \n norm_contours = new_contours[:,:,0,:] - new_contours[:,[0],0,:]\n median_contour = np.median(norm_contours, axis=0).astype(int)\n diff = np.sqrt(np.sum((norm_contours - median_contour)**2,axis=2))\n\n new_contours = new_contours[np.all(diff < 60, axis=1)]\n\n return np.array(new_contours), median_contour\n\ndef getSquareSides(cnt):\n # 4x2 array, rows are each point, columns are x and y\n center = cnt.sum(axis=0)\/4\n\n # Side lengths of rectangular contour\n dd0 = np.sqrt(((cnt[0,:] - cnt[1,:])**2).sum())\n dd1 = np.sqrt(((cnt[1,:] - cnt[2,:])**2).sum())\n dd2 = np.sqrt(((cnt[2,:] - cnt[3,:])**2).sum())\n dd3 = np.sqrt(((cnt[3,:] - cnt[0,:])**2).sum())\n return np.array([dd0, dd1, dd2, dd3])\n\n\nfrom time import time\ndef calculateMask(mask_shape, contours, iters=10):\n a = time()\n sum_mask = np.zeros(mask_shape, dtype=int)\n tmp_mask = np.zeros(mask_shape, dtype=int)\n for i, cnt in enumerate(contours):\n for i in np.linspace(5,23,iters):\n # for i in [3,5,7,9,11,13,15]:\n # Calculate oversized tile mask and add to sum\n # big_cnt = (cnt.mean(axis=0) + (cnt-cnt.mean(axis=0))*i).astype(int)\n cnt_center = cnt.mean(axis=0)\n big_cnt = (cnt*i + cnt_center*(1-i)).astype(int)\n tmp_mask[:] = 0 # Reset\n cv2.drawContours(tmp_mask,[big_cnt],0,1,-1) # Fill mask with 1's inside contour\n sum_mask += tmp_mask\n\n # Normalize mask to 0-1 range\n sum_mask = sum_mask.astype(float) \/ sum_mask.max()\n # print(\"Mask calc took %.4f seconds.\" % (time() - a))\n return sum_mask\n\ndef getContourThetas(contours):\n thetas = []\n for cnt in contours:\n cnt = cnt[:,0,:]\n if cnt[0,0] < cnt[1,0]:\n side0 = np.hstack([cnt[1,:],cnt[0,:]])\n else:\n side0 = np.hstack([cnt[0,:],cnt[1,:]])\n if cnt[1,0] < cnt[2,0]:\n side1 = np.hstack([cnt[2,:],cnt[1,:]])\n else:\n side1 = np.hstack([cnt[1,:],cnt[2,:]])\n if cnt[2,0] < cnt[3,0]:\n side2 = np.hstack([cnt[3,:],cnt[2,:]])\n else:\n side2 = np.hstack([cnt[2,:],cnt[3,:]])\n if cnt[3,0] < cnt[0,0]:\n side3 = np.hstack([cnt[0,:],cnt[3,:]])\n else:\n side3 = np.hstack([cnt[3,:],cnt[0,:]])\n theta0 = getSegmentTheta(side0)\n theta1 = getSegmentTheta(side1)\n theta2 = getSegmentTheta(side2)\n theta3 = getSegmentTheta(side3)\n thetas.extend([theta0,theta1,theta2,theta3])\n return np.array(thetas)\n\ndef getEstimatedChessboardMask(img, edges, iters=10):\n # Morphological Gradient to get internal squares of canny edges. \n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))\n edges_gradient = cv2.morphologyEx(edges, cv2.MORPH_GRADIENT, kernel)\n\n _, contours, hierarchy = cv2.findContours(edges_gradient, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)\n # Approximate polygons of contours\n simplifyContours(contours)\n\n if len(contours) == 0:\n return np.ones(img.shape[:2], dtype=float), None, None, None\n\n # Prune contours to rectangular ones\n contours, median_contour = pruneContours(contours)\n\n if len(contours) == 0 or median_contour is None:\n return np.ones(img.shape[:2], dtype=float), None, None, None\n\n thetas = getContourThetas(contours)\n \n top_two_angles = calculateKDE(thetas)\n\n mask = calculateMask(edges_gradient.shape, contours, iters)\n\n min_area_rect = getMinAreaRect(mask)\n\n return mask, top_two_angles, min_area_rect, median_contour\n\n\ndef calculateKDE(thetas):\n thetas *= 180\/np.pi\n thetas[thetas<0] += 180\n \n kde_func = scipy.stats.gaussian_kde(thetas)\n positions = np.linspace(-40,180+40,360)\n kde_res = kde_func(positions)\n\n left_half = np.diff(kde_res)\n right_half = np.diff(kde_res[::-1])\n \n f = kde_res.copy()\n f[1:][left_half<0] = 0\n f[:-1][right_half[::-1]<0] = 0\n\n peak_indices = np.argwhere(f).flatten()\n peak_angles = positions[peak_indices]\n\n order = np.argsort(kde_res[peak_indices])[::-1] # strongest to weakest peaks\n\n return peak_angles[order[:2]] # top two strongest angles in degrees\n\n# def plotKDE(thetas):\n# thetas *= 180\/np.pi\n# thetas[thetas<0] += 180\n \n# ax1 = plt.subplot(211)\n# plt.plot(thetas,np.zeros(thetas.shape),'.')\n# plt.hist(thetas,20)\n\n# plt.subplot(212, sharex=ax1)\n# kde_func = scipy.stats.gaussian_kde(thetas)\n# positions = np.linspace(-40,180+40,360)\n# kde_res = kde_func(positions)\n# plt.plot(positions, kde_res)\n\n# c = kde_res.copy()\n# left_half = np.diff(kde_res)\n# right_half = np.diff(kde_res[::-1])\n \n# f = c.copy()\n# f[1:][left_half<0] = 0\n# f[:-1][right_half[::-1]<0] = 0\n# peak_indices = np.argwhere(f).flatten()\n# print(peak_indices, positions[peak_indices])\n# peak_angles = positions[peak_indices]\n\n# plt.plot(peak_angles, kde_res[peak_indices],'go')\n# order = np.argsort(kde_res[peak_indices][::-1]) # strongest to weakest peaks\n# return peak_angles[order[:2]], peak_angles, kde_res[peak_indices] # two angles in degrees\n\ndef getHoughLines(edges, min_line_size=100):\n # Expects chessboard to take up over 50% of edge map\n # min_line_size = int(min(edges.shape)\/8)\n lines = cv2.HoughLinesP(edges,1,np.pi\/360.0, int(min_line_size),\n minLineLength = min_line_size, maxLineGap = min_line_size\/2)\n\n if lines is None:\n return np.array([])\n\n return lines[:,0,:]\n\ndef getSegmentTheta(line):\n x1,y1,x2,y2 = line\n theta = np.math.atan2(y2-y1, x2-x1)\n return theta\n\ndef parseHoughLines(lines, top_two_angles, angle_threshold_deg=20):\n is_good = np.zeros(len(lines)) # 0 = bad, 1 = close to 1st angle, 2 = close to 2nd angle\n for i, line in enumerate(lines):\n theta = getSegmentTheta(line) * 180\/np.pi # to degrees\n d1 = getMinLineAngleDistanceDeg(theta, top_two_angles[0])\n d2 = getMinLineAngleDistanceDeg(theta, top_two_angles[1])\n if (d1 < angle_threshold_deg):\n is_good[i] = 1\n elif (d2 < angle_threshold_deg):\n is_good[i] = 2\n lines_a = lines[is_good==1]\n lines_b = lines[is_good==2]\n return lines_a, lines_b\n\n\ndef getMinLineAngleDistance(a0, a1):\n # In radians\n # Compare line angles (which can be 180 off from one another, or +- 180)\n v0 = abs(a1-a0)\n v1 = abs((a1+np.pi) - a0)\n v2 = abs(a1 - (a0+np.pi))\n return min([v0,v1,v2])\n\ndef getMinLineAngleDistanceDeg(a0, a1):\n # In degrees\n # Compare line angles (which can be 180 off from one another, or +- 180)\n v0 = abs(a1-a0)\n v1 = abs((a1+180) - a0)\n v2 = abs(a1 - (a0+180))\n return min([v0,v1,v2])\n\n\ndef plotHoughLines(img, lines, color=(255,255,255), line_thickness=2):\n # colors = np.random.random([lines.shape[0],3])*255\n # colors = np.array([\n # [20,20,20],\n # [255,0,0],\n # [0,255,0],\n # [255,255,0],\n # [0,0,255],\n # [255,0,255],\n # [0,255,255],\n # [200,200,200],\n # ], dtype=np.uint8)\n # Plot lines\n for i, line in enumerate(lines):\n # color = list(map(int,colors[i%len(colors)]))\n cv2.line(img,\n tuple(line[:2].astype(np.int)),\n tuple(line[2:].astype(np.int)), color, thickness=line_thickness)\n\n\n\ndef getMinAreaRect(mask):\n a = np.argwhere(mask.T>0.5)\n # rect = cv2.boundingRect(a)\n rect = cv2.minAreaRect(a)\n return rect\n\ndef drawMinAreaRect(img, rect, color=(0,255,255)):\n ctr = tuple(map(int,rect[0]))\n \n box = cv2.boxPoints(rect)\n box = np.int0(box)\n\n cv2.drawContours(img,[box],0,color,2)\n cv2.circle(img, ctr, 3, (255,0,0),-1)\n\n\ndef skeletonize_1d(data):\n c = data.copy()\n left_half = np.diff(data)\n right_half = np.diff(data[::-1])\n\n f = data.copy()\n f[1:][left_half<0] = 0\n f[:-1][right_half[::-1]<0] = 0\n return f\n\ndef getWarpCheckerLines(img):\n \"\"\"Given a warped axis-aligned image of a chessboard, return internal line crossings\"\"\"\n # TODO: Fix awkward conversion\n # Convert RGB numpy array to image, then to grayscale image, then back to numpy array\n img_gray = np.array(PIL.Image.fromarray(img).convert('L'))\n img_gray = cv2.bilateralFilter(img_gray,15,75,75)\n\n # Find gradients\n sobelx = cv2.Sobel(img_gray,cv2.CV_64F,1,0,ksize=5)\n sobely = cv2.Sobel(img_gray,cv2.CV_64F,0,1,ksize=5)\n\n sobelx_pos = sobelx.copy()\n sobelx_pos[sobelx <= 0] = 0\n sobelx_neg = sobelx.copy()\n sobelx_neg[sobelx > 0] = 0\n\n sobely_pos = sobely.copy()\n sobely_pos[sobely <= 0] = 0\n sobely_neg = sobely.copy()\n sobely_neg[sobely > 0] = 0\n\n kernel = np.array([ 0. , 0. , 0.04, 0.32, 0.88, 0.88, 0.32, 0.04, 0. , 0. ])\n\n checker_x = np.sum(sobelx_pos, axis=0) * np.sum(-sobelx_neg, axis=0)\n checker_x = np.convolve(checker_x, kernel, 'same')\n checker_x = checker_x \/ checker_x.max()\n checker_x[checker_x<0.1] = 0\n checker_x = skeletonize_1d(checker_x)\n\n checker_y = np.sum(sobely_pos, axis=1) * np.sum(-sobely_neg, axis=1)\n checker_y = np.convolve(checker_y, kernel, 'same')\n checker_y = checker_y \/ checker_y.max()\n checker_y[checker_y<0.1] = 0\n checker_y = skeletonize_1d(checker_y)\n\n x_lines = np.argwhere(checker_x).flatten()\n y_lines = np.argwhere(checker_y).flatten()\n\n\n\n #######\n ## Semi-brute force approach, merge all combinations of 3 points \n # with equal spacing under one standard deviation\n x_lines = getBestEqualSpacing(x_lines)\n y_lines = getBestEqualSpacing(y_lines)\n\n ###########\n\n if len(x_lines) < 7 or len(y_lines) < 7:\n return [], [], [], []\n\n # Select set of 7 adjacent lines with max sum score\n x_scores = np.zeros(x_lines.shape[0]-7+1)\n for i in range(0,x_lines.shape[0]-7+1):\n x_scores[i] = np.sum(checker_x[x_lines[i:i+7]])\n x_start = np.argmax(x_scores)\n strongest_x_lines = range(x_start,x_start+7)\n\n y_scores = np.zeros(y_lines.shape[0]-7+1)\n for i in range(0,y_lines.shape[0]-7+1):\n y_scores[i] = np.sum(checker_y[y_lines[i:i+7]])\n y_start = np.argmax(y_scores)\n strongest_y_lines = range(y_start,y_start+7)\n\n # TODO: Sanity check areas between lines for consistent color when choosing?\n\n # Choose best internal 7 chessboard lines\n lines_x = x_lines[strongest_x_lines]\n lines_y = y_lines[strongest_y_lines]\n\n # Add outer chessboard edges assuming consistent step size\n step_x = np.median(np.diff(lines_x))\n step_y = np.median(np.diff(lines_y))\n\n lines_x = np.hstack([lines_x[0]-step_x, lines_x, lines_x[-1]+step_x])\n lines_y = np.hstack([lines_y[0]-step_y, lines_y, lines_y[-1]+step_y])\n\n return lines_x, lines_y, step_x, step_y\n\n # x_lines = np.argwhere(checker_x).flatten()\n # y_lines = np.argwhere(checker_y).flatten()\n\n # x_diff = np.diff(x_lines)\n # y_diff = np.diff(y_lines)\n\n # step_x_pred = np.median(x_diff)\n # step_y_pred = np.median(y_diff)\n\ndef pruneGradLines(a, b, eta=10):\n # Remove values from vector 'a' that aren't close to values in vector b\n is_good = np.zeros(len(a),dtype=bool)\n for i,v in enumerate(a):\n if min(b-v) < eta:\n is_good[i] = True\n return a[is_good]\n \n\n\n\n\n\n\n\n\n\n\n\n\ndef main(filenames):\n for filename in filenames:\n img = cv2.imread(filename)\n img = scaleImageIfNeeded(img, 600, 480)\n\n # Edges\n edges = cv2.Canny(img, 100, 550)\n mask, _, _, _ = getEstimatedChessboardMask(img, edges, iters=10)\n\n img_masked_full = cv2.bitwise_and(img,img,mask = (mask > 0.5).astype(np.uint8))\n img_masked = cv2.addWeighted(img,0.2,img_masked_full,0.8,0)\n edges_masked = cv2.bitwise_and(edges,edges,mask = (mask > 0.5).astype(np.uint8))\n\n cv2.imshow('img %s' % filename,img_masked)\n cv2.imshow('edges %s' % filename, edges_masked)\n cv2.imshow('mask %s' % filename, mask)\n\n\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n filenames = sys.argv[1:]\n else:\n # filenames = ['input\/1.jpg']\n filenames = ['input2\/18.jpg']\n print(\"Loading\", filenames)\n main(filenames)"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_661","text":"bartolsthoorn\/PH_XXZ\nimport h5py\nimport numpy as np\nfrom ripser import ripser\nimport gudhi as gd\nfrom persim import plot_diagrams\nfrom scipy.spatial.distance import pdist\nfrom scipy.spatial.distance import squareform\nfrom tqdm import tqdm\nfrom multiprocessing import Pool\nimport gudhi as gd\nimport pickle\nimport persim\n\nfilename = 'snapshots_4_100000_32.hdf5'\n#filename = 'snapshots_6_100000_32.hdf5'\nf = h5py.File(filename, 'r')\nL = 4\nN = L*L*L*16\n\nspins_type = 'global'\n\ncoordinates = f['coordinates'][:].transpose()\ncoordinates = coordinates.reshape(N,3)\ncoordinates_D = squareform(pdist(coordinates, metric='euclidean'))\nprint(np.min(coordinates_D[coordinates_D > 0]))\nspins = f['spins_' + spins_type][:].transpose()\nprint(spins.shape)\n\nJ_grid = f['J'][:]\nT_grid = f['T'][:]\nprint(J_grid)\n\nN_T = len(T_grid)\nN_J = len(J_grid)\n\nf.close()\n\nparams = []\nfor i, J in enumerate(J_grid):\n for j, T in enumerate(T_grid):\n params.append((J,T))\n\nn_samples = 32\ndef calculate_barcodes(samples):\n points_list = [coordinates + (spins * 0.3535533905932738 * 0.25) for spins in samples]\n points_list = points_list[0:n_samples]\n dgms = [[] for i in range(3)]\n for points in points_list:\n #rips = ripser(points, thresh=max_death, maxdim=maxdim)\n alpha_complex = gd.AlphaComplex(points=points)\n st_alpha = alpha_complex.create_simplex_tree()\n barcodes = st_alpha.persistence()\n #for i, d in enumerate(rips['dgms']):\n # dgms[i] += list(d)\n for d in np.arange(3):\n dgms[d] += [bar for dim, bar in barcodes if dim == d]\n return dgms\n\nbarcodes = []\nfor i, J in tqdm(enumerate(J_grid), total=len(J_grid)):\n for j, T in tqdm(enumerate(T_grid), total=len(T_grid)):\n barcodes.append(calculate_barcodes(spins[i, j, :]))\n\npickle.dump(barcodes, open(filename.split('.')[0] + '_' + spins_type + ('_barcodes_%d.p' % n_samples), 'wb' ))\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_662","text":"HuguesMoreau\/TMD_fusion_benchmarkpreprocess\/transforms.py\n#!\/usr\/bin\/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nThis file contains diverse preprocessing functions (mostly norms ans spectrograms),\nand basic tests and visualizations.\nIf you are to work with any IPython console (ex: with Jupyter or spyder), is is advised\nto launch a '%matplotlib qt' ,to get clean widow\n\"\"\"\n\n\nif __name__ == '__main__': # this is used to launch the file from anywhere\n import sys\n sys.path.append(\"..\")\n\nimport numpy as np\nimport torch\nimport scipy.signal, scipy.interpolate, scipy.ndimage\n\n\nfrom param import classes_names, fs, duration_window, duration_overlap, duration_segment, spectro_batch_size\nfrom preprocess import Datasets\n\nif __name__ == \"__main__\":\n import matplotlib.pyplot as plt\n n_classes = len(classes_names)\n # We will need this for the tests\n DS = Datasets.SignalsDataSet(mode='train', split='balanced', comp_preprocess_first=False)\n\n\n#%% transform functions\n\n\"\"\"In all following functions, the input parameter (data) is, by default,\n a dict of numpy arrays, containing signal names (eg. \"Gyr_z\") as keys, and 1-dimensional\n arrays as values\n\nMost of this part contains basic visualizations to make sure the preprocessing is correct\"\"\"\n\n\n\n\nclass TemporalTransform():\n \"\"\" create the base transform to use to each element of the data\n Also generates data for thr 'Random' and 'Zero' cases\n\n Parameters\n ----------\n signal_name_list: a list of string signals (ex: 'Gyr_y', 'Ori_x')\n If a string ends by \"_norm\" (ex: \"Mag_norm\"), the output will\n be the norm of the three (or four) axis of the signal.\n The signals can also be 'Zero', in which case the segments are only\n zeros, or 'Random', in which case each data oint is sampled with normal\n distribution (zero mean, unit variance)\n\n Returns\n -------\n a function with input: a dict of (_, 6000) arrays (key example: 'Gyr_y')\n and output: a dictionnary of arrays.\n \"\"\"\n def __init__(self, signal_name_list):\n super(TemporalTransform, self).__init__()\n self.signal_name_list = signal_name_list\n\n\n def __call__(self, data):\n \"\"\"\n Parameters\n ----------\n a dict of (B, 6000) arrays (key example: 'Gyr_y')\n\n Returns\n -------\n a dictionnary of arrays. This time, the keys are from signal_name_list,\n and the values are either raw signals (if the key ends with '_x',\n '_y', '_z', or '_w'); a norm of several signals (if the key ends\n with '_norm'); or a specific signal (if the key is 'Random' or\n 'Zero'). The shape of each array is (B, 6000), where B (batch size)\n depends on the input shape.\n \"\"\"\n\n outputs = {}\n for signal_name in self.signal_name_list:\n\n if signal_name[-2:] in ['_x', '_y', '_z', '_w'] or signal_name == \"Pressure\":\n processed_signal = data[signal_name]\n\n elif signal_name == 'Random':\n data_shape = data[\"Acc_x\"].shape\n processed_signal = np.random.randn(data_shape[0], data_shape[1]).astype(np.float32)\n\n elif signal_name == 'Zero':\n data_shape = data[\"Acc_x\"].shape\n processed_signal = np.zeros(data_shape).astype(np.float32)\n\n\n elif signal_name[-5:] == '_norm':\n suffix_location = signal_name.index(\"_\") # 4 if signal_name == \"LAcc\", 3 otherwise\n sensor = signal_name[:suffix_location] # ex: 'Acc', 'LAcc'\n\n if sensor == \"Ori\":\n # in that case, data[sensor+\"_x\"]**2 + data[sensor+\"_y\"]**2 + data[sensor+\"_z\"]**2 should be 1.0\n processed_signal = np.sqrt(data[sensor+\"_x\"]**2 + data[sensor+\"_y\"]**2 + data[sensor+\"_z\"]**2 \\\n + data[sensor+\"_w\"]**2)\n else :\n processed_signal = np.sqrt(data[sensor+\"_x\"]**2 + data[sensor+\"_y\"]**2 + data[sensor+\"_z\"]**2)\n else :\n raise ValueError(\"unknown signal name: '{}'. Signal names should end with either '_x', '_y', '_z', '_w', or '_norm'\".format(signal_name))\n\n outputs[signal_name] = processed_signal\n\n return outputs\n\n\n\n def __str__(self):\n \"\"\"purely for visual purposes, so that we can print() the function\"\"\"\n str_to_return = \"Temporal_transform\"\n str_to_return += \"\\n\\t Signals: {}\".format(self.signal_name_list)\n return str_to_return\n\n\n\n\n\n\nif __name__ == \"__main__\":\n\n # plot one figure per sensor\n # on each figure, one subplot per class,\n # to find one instance per each class, we start looking at index = index0\n index0 = 0\n\n for tested_signal_name in [\"Acc_norm\", \"Ori_norm\", \"Mag_norm\", \"LAcc_x\"]:\n # plot 1 segment from each class.\n plt.figure()\n\n if tested_signal_name != 'Pressure':\n suffix_location = tested_signal_name.index(\"_\")\n tested_sensor = tested_signal_name[:suffix_location] # ex: 'Acc', 'LAcc'\n else:\n tested_sensor = 'Pressure'\n\n sensor_axis = [tested_sensor + axis for axis in [\"_x\", \"_y\", \"_z\"]] if tested_sensor != 'Pressure' else ['Pressure']\n if tested_sensor == \"Ori\" : sensor_axis.append(tested_sensor+\"_w\")\n\n temporal_transform = TemporalTransform([tested_signal_name])\n\n remaining_classes = classes_names.copy()\n index = index0\n\n while len(remaining_classes)>0:\n data_tensor, class_tensor = DS[index] # data is a dict of 2D tensors (1,nb)\n data_cpu = {signal:data_tensor[signal].to(torch.device('cpu')).detach().numpy() for signal in data_tensor.keys()}\n class_index = int(class_tensor)\n class_name = classes_names[class_index-1]\n\n if class_name in remaining_classes:\n\n remaining_classes.remove(class_name)\n plt.subplot(2, 4, n_classes - len(remaining_classes))\n\n\n for k,signal in enumerate(sensor_axis):\n\n if k==0: # compute the temporal axis once\n nb = data_cpu[signal].shape[1]\n x_t = np.linspace(0, nb\/fs, nb)\n\n plt.plot(x_t, data_cpu[signal][0,:])\n\n selected_signal = temporal_transform(data_cpu)[tested_signal_name]\n\n error_message_dtype = \"One of the signals does not have the correct type: {}, {} \\n dtype should be float32, is actually {}\".format(tested_signal_name, str(temporal_transform), selected_signal.dtype)\n assert (selected_signal.dtype == 'float32'), error_message_dtype\n\n plt.plot(x_t, selected_signal[0,:], '--')\n plt.xlabel(\"t (s)\")\n legend = sensor_axis + [tested_signal_name+' (selected)']\n plt.legend(legend)\n plt.title(\"{} ({}, index={})\".format(tested_sensor, classes_names[class_index-1], index))\n\n\n index +=1\n\n plt.show()\n\n\n\n\n#%% FFT\n\nclass FFTTransform():\n \"\"\" create a transform to use to return the power of the spectrum\n (computed through a Fourier transform) of each element of the data\n\n Parameters\n ----------\n signal_name_list: a list of string signals (ex: 'Gyr_y', 'Ori_x')\n If a string ends by \"_norm\" (ex: \"Mag_norm\"), the output will\n be the norm of the three (or four) axis of the signal.\n\n Returns\n -------\n a function with input: a dict of (B, 6000) arrays (key example: 'Gyr_y')\n and output: a dictionnary of (B, 6000) arrays.\n \"\"\"\n def __init__(self, signal_name_list):\n super(FFTTransform, self).__init__()\n self.signal_name_list = signal_name_list\n self.temporal_transform = TemporalTransform(signal_name_list)\n\n\n def __call__(self, data):\n \"\"\"\n Parameters\n ----------\n a dict of (B, 6000) arrays (key example: 'Mag_x')\n\n Returns\n -------\n a dictionnary of arrays. The keys are from signal_name_list, and the\n values are the power spectra of each signal. The shape of each array is\n (B, 6000), where B (batch size) depends on the input shape\n \"\"\"\n temporal_signals = self.temporal_transform(data)\n del data # free some memory\n\n outputs = {}\n for signal_name in self.signal_name_list:\n complex_fft = np.fft.fft(temporal_signals[signal_name], axis=1)\n power_fft = np.abs(complex_fft)\n power_fft[:,0] = 0. # remove the DC component (to avoid this component\n # outscales te others)\n\n centered_power_fft = np.fft.fftshift(power_fft, axes=1) # so 0 Hz is in the middle\n\n outputs[signal_name] = centered_power_fft.astype('float32')\n del temporal_signals[signal_name] # release the memory\n\n # a faire, calculer les f et les sauver\n # self.f=f\n\n return outputs\n\n\n\n def __str__(self):\n \"\"\"purely for visual purposes, so that we can print() the function\"\"\"\n str_to_return = \"FFT_transform\"\n str_to_return += \"\\n\\t Signals: {}\".format(self.signal_name_list)\n return str_to_return\n\n\n\n\n#%%\n\nif __name__ == \"__main__\":\n\n # classes to plot,\n sel_classes = [\"Still\",\"Walk\",\"Run\",\"Train\"]\n nsel = len(sel_classes)\n\n for tested_signal_name in [\"Acc_norm\", \"Gyr_y\", \"Mag_norm\", \"Pressure\"]:\n # plot 1 segment from each class.\n plt.figure()\n tested_sensor = tested_signal_name[:3]\n if \"_\" in tested_sensor:\n sensor_axis = [tested_sensor + axis for axis in [\"_x\", \"_y\", \"_z\"]]\n else: # Pressure\n sensor_axis = [tested_sensor]\n if tested_sensor == \"Ori\" : sensor_axis.append(tested_sensor+\"_w\")\n\n fft_transform = FFTTransform([tested_signal_name])\n temporal_transform = TemporalTransform([tested_signal_name])\n\n remaining_classes = sel_classes.copy()\n\n index = 0\n isub = 1\n\n while len(remaining_classes)>0:\n data_tensor, class_tensor = DS[index]\n data_cpu = {signal:data_tensor[signal].to(torch.device('cpu')).detach().numpy() for signal in data_tensor.keys()}\n class_index = int(class_tensor)\n\n class_name = classes_names[class_index-1]\n\n if class_name in remaining_classes:\n remaining_classes.remove(class_name)\n\n plt.subplot(2, nsel, isub)\n selected_signal = temporal_transform(data_cpu)[tested_signal_name]\n\n # plot the temporal signal\n nb = selected_signal.shape[1]\n x_t = np.linspace(0, nb\/fs, nb)\n\n x_f = np.linspace(-fs\/2,fs\/2, nb)\n\n plt.plot(x_t, selected_signal[0,:])\n plt.xlabel(\"t (s)\")\n plt.title(\"{} ({}, index={})\".format(tested_signal_name, classes_names[class_index-1], index))\n\n # plot the fft\n plt.subplot(2, nsel, isub + 4)\n isub += 1\n selected_power = fft_transform(data_cpu)[tested_signal_name]\n\n error_message_dtype = \"One of the signals does not have the correct type: {}, {} \\n dtype should be float32, is actually {}\".format(tested_signal_name, str(fft_transform), selected_power.dtype)\n assert (selected_power.dtype == 'float32'), error_message_dtype\n\n plt.plot(x_f, selected_power[0,:])\n plt.xlabel(\"f (Hz)\")\n plt.title(\"FFT of {} ({}, index={})\".format(tested_signal_name, classes_names[class_index-1], index))\n\n index +=1\n plt.show()\n\n\n\n\n\n\n\n\n#%%\n\n# ---------------- Spectral transforms ---------------------\n\n\n# Interpolation functions\ndef interpol_log(f, t, spectrogram, out_size):\n \"\"\"interpolates the spectrogram in input using a linear axis for the timestamps and a LOG axis for the frequencies\n\n Parameters\n ----------\n f : numpy array, shape: (F_in,), frequencies of the spectrogram\n t : numpy array, shape: (T_in,), timestamps of the spectrogram\n spectrogram : (B, F_in, T_in), B is batch size; 3D numpy array\n\n out_size : couple of ints (F_out, T_out)\n\n Returns\n -------\n f_interpolated : numpy array, shape: (F_out,), frequencies of the spectrogram AFTER interpolation\n t_interpolated : numpy array, shape: (T_out,), timestamps of the spectrogram AFTER interpolation\n a spectrogram, where the f axis (second dimension) has been re-interpolated\n using a log axis\n\n \"\"\"\n B = spectrogram.shape[0]\n out_f, out_t = out_size\n\n log_f = np.log(f+f[1]) # log between 0.2 Hz and 50.2 Hz\n\n log_f_normalized = (log_f-log_f[0])\/(log_f[-1]-log_f[0]) # between 0.0 and 1.0\n t_normalized = (t-t[0])\/(t[-1]-t[0])\n\n rescaled_f = out_f*log_f_normalized # 0 and 48\n # rescaled_f = (out_f-1)*log_f_normalized ??\n rescaled_t = out_t*t_normalized\n\n spectrogram_interpolated = np.zeros( (B, out_f, out_t), dtype='float32')\n index_f, index_t = np.arange(out_f), np.arange(out_t) # between 0 and 47\n\n for i in range(B):\n spectrogram_fn = scipy.interpolate.interp2d(rescaled_t, rescaled_f, spectrogram[i,:,:], copy=False)\n # interp2d returns a 2D function\n spectrogram_interpolated[i,:,:] = spectrogram_fn(index_t, index_f) # care to the order\n\n f_fn = scipy.interpolate.interp1d(rescaled_f, f, copy=False)\n f_interpolated = f_fn(index_f)\n\n t_fn = scipy.interpolate.interp1d(rescaled_t, t, copy=False)\n t_interpolated = t_fn(index_t)\n\n\n return f_interpolated, t_interpolated, spectrogram_interpolated\n\n\n\ndef interpol_lin(f, t, spectrogram, out_size):\n \"\"\"interpolates the spectrogram in input using a linear axis for the timestamps AND the frequencies\n\n Parameters\n ----------\n f : numpy array, shape: (F_in,), frequencies of the spectrogram\n t : numpy array, shape: (T_in,), timestamps of the spectrogram\n spectrogram : (B, F_in, T_in) numpy array\n out_size : couple of ints (F_out, T_out)\n (does not need f or t)\n\n\n Returns\n -------\n f_interpolated : numpy array, shape: (F_out,), frequencies of the spectrogram AFTER interpolation\n t_interpolated : numpy array, shape: (T_out,), timestamps of the spectrogram AFTER interpolation\n a spectrogram: 3D numpy array, where the f axis (second dimension) has been re-interpolated\n using a linear axis\n \"\"\"\n B, F_in, T_in = spectrogram.shape\n out_f, out_t = out_size\n output_shape = (B, out_f, out_t ) # result is (B, out_f, out_t )\n\n rescale_factor_d = 1. # for depth\n rescale_factor_f = F_in\/out_f # typically 550\/48\n rescale_factor_t = T_in\/out_t\n\n matrix_transform = np.diag( np.array([rescale_factor_d, rescale_factor_f, rescale_factor_t]) ) # (3,3) matrix\n\n # spectrogram = matrix_transform * spectrogram_interpolated\n spectrogram_interpolated = scipy.ndimage.affine_transform(spectrogram, matrix_transform, offset=0, order=1, output_shape=output_shape)\n # we only use linear interpolation because we almost always downsample, and because 2nd order methods and above\n # have a nasty tendency to create small negative local minimas between two strictly positive values\n # we do not want this when we apply a log to the values of the spectrogram\n\n f_interpolated = scipy.ndimage.affine_transform(f, np.array( [rescale_factor_f] ) , offset=0, order=1, output_shape = (out_f,) )\n t_interpolated = scipy.ndimage.affine_transform(t, np.array( [rescale_factor_t] ) , offset=0, order=1, output_shape = (out_t,) )\n\n\n return f_interpolated, t_interpolated, spectrogram_interpolated\n\n\n\ndef no_interpolation(f, t, spectrogram, out_size):\n \"\"\" This function is just a placeholder that mimics the arguments\n of the two previous interpolation functions \"\"\"\n return f, t, spectrogram\n\n\n\n\n\n\n\n\n\n\n#%%\n# ---------------- The spectrogram class --------------\nclass SpectrogramTransform():\n \"\"\" create the transform to work with spectrograms. This class behaves\n essentially the same as TempralTransform, except the created transform\n returns a dict of 3d array instead of 2d\n\n\n Parameters\n ----------\n signal_name_list: a list of string signals (ex: 'Gyr_y', 'Ori_x')\n If a string ends by \"_norm\" (ex: \"Mag_norm\"), the output will\n be the norm of the three (or four) axis of the signal.\n fs: sampling frequency\n duration_window, duration_overlap: duration in sec of spectrogram window and overlap\n spectro_batch_size:\n turning 13,000 temporal signals into (550, 500) array\n spectrograms at once is too much: a single (13000, 550, 500) array,\n with simple precision requires 7.15 Go !\n This is why we work with batches of 1000 instead. For each batch,\n we compute the complete sectrogram (1000 x 550 x 500), then\n interpolate it to smaller sizes, before working wit the following batch.\n\n interpolation : string (\"log\", \"linear\", \"none\")\n log_power : bool. If True, the values of the power spectrum are replaced\n by their log\n out_size : tuple of integer (nb_interp_f, nb_interp_t) = size of spectrogram AFTER interpolation Is ignored if no interpolation occurs. Default: None\n the spectrogram is computed for 2 1D-arrays: f and t\n\n flag_debug: flag for print debugging info\n\n\n\n Returns\n -------\n a function with input: data : a dict of (_, 6000) arrays (key example: 'Gyr_y')\n and output: a dictionnary of 2d arrays.\n\n \"\"\"\n def __init__(self, signal_name_list, fs, duration_window, duration_overlap, spectro_batch_size, interpolation,\n log_power, out_size=None, flag_debug=False):\n super(SpectrogramTransform, self).__init__()\n\n self.temporal_transform = TemporalTransform(signal_name_list)\n self.fs = fs\n self.duration_window = duration_window\n self.duration_overlap = duration_overlap\n self.spectro_batch_size = spectro_batch_size\n\n self.signal_name_list = signal_name_list\n self.log_power = log_power\n self.interpolation_name = interpolation\n\n if interpolation == \"linear\":\n self.interpolation_fn = interpol_lin\n self.out_size = out_size\n\n elif interpolation == \"log\":\n self.interpolation_fn = interpol_log\n self.out_size = out_size\n\n elif interpolation == \"none\":\n self.interpolation_fn = no_interpolation\n self.out_size = None\n\n else :\n raise ValueError(\"Unknown interpolation: '{}'. Use one of 'log', 'linear', 'none'\".format(interpolation))\n\n # if interpolation == \"none\" and out_size != None :\n # warnings.warn(\"No interpolation is to take place, but an target output size was provided. the output_size argument will be ignored\", Warning)\n\n self.flag_debug = flag_debug\n\n def __call__(self, data):\n \"\"\"\n Parameters\n ----------\n data : a dict of (B, 6000) arrays (key example: 'Gyr_y')\n\n Returns\n -------\n a dictionnary of 2d arrays. The keys are from signal_name_list,\n and the values are either spectrograms of raw signals (if the key\n ends with '_x', '_y', '_z', or '_w'); or a spectogram of a norm of\n signals (if the key ends with '_norm'). The shape of the spectrogram\n is (B, F, T), where B (batch size) depends on the input shape, and\n F and T are given by self.out_size\n \"\"\"\n\n\n temporal_signals = self.temporal_transform(data)\n del data # free some memory\n\n\n fs = self.fs\n\n nperseg = int(self.duration_window * fs)\n noverlap = int(self.duration_overlap * fs)\n\n spectro_batch_size = self.spectro_batch_size\n # turning 13,000 temporal signals into (550, 500) array\n # spectrograms at once is too much: a single (13000, 550, 500) array,\n # with simple precision requires 7.15 Go !\n # This is why we work with batches of 1000 instead. For each batch,\n # we compute the complete sectrogram (1000 x 550 x 500), then\n # interpolate it to smaller sizes, before working wit the following batch.\n\n out_size = self.out_size\n\n flag_debug = self.flag_debug\n\n outputs = {}\n\n for signal_name in self.signal_name_list:\n current_spectro_batch_size = temporal_signals[signal_name].shape[0]\n\n if current_spectro_batch_size < spectro_batch_size :\n f, t, spectrogram = scipy.signal.spectrogram(temporal_signals[signal_name], fs=fs, nperseg=nperseg, noverlap=noverlap)\n\n f_interpolated, t_interpolated, interpolated_spectrogram = self.interpolation_fn(f, t, spectrogram, out_size)\n # f, t, and possibly out_size will be ignored when the function does not need them\n\n else :\n n_batches = (current_spectro_batch_size-1)\/\/spectro_batch_size +1\n\n\n\n if out_size is not None: # we actually compute the interpolation\n nb_interp_f, nb_interp_t = out_size\n\n else: # we only recompute the shapes of the raw spectrogram\n nb_interp_f = int(duration_window*fs\/2) +1\n nb_interp_t = int((duration_segment-duration_window)\/(duration_window-duration_overlap)) +1\n\n interpolated_spectrogram = np.zeros((current_spectro_batch_size, nb_interp_f, nb_interp_t), dtype='float32')\n for i in range(n_batches):\n i_min = i * spectro_batch_size\n i_max = (i+1) * spectro_batch_size # does not matter if it goes beyond current_spectro_batch_size\n this_temporal_signal = temporal_signals[signal_name][i_min:i_max,:]\n\n f, t, spectrogram = scipy.signal.spectrogram(this_temporal_signal, fs=fs, nperseg=nperseg, noverlap=noverlap)\n\n if out_size is not None:\n f_interpolated, t_interpolated, interpolated_spectrogram[i_min:i_max,:,:] = self.interpolation_fn(f, t, spectrogram, out_size) # erase the spectrogram by its interpolation\n else:\n f_interpolated, t_interpolated, interpolated_spectrogram[i_min:i_max,:,:] = f, t, spectrogram\n\n\n if flag_debug:\n print('f_interpolated:')\n print(f_interpolated[:5])\n print(f_interpolated[-5:])\n\n\n del temporal_signals[signal_name]\n\n if self.log_power :\n np.log(interpolated_spectrogram + 1e-10, dtype='float32', out=interpolated_spectrogram)\n\n outputs[signal_name] = interpolated_spectrogram\n\n self.f_interpolated = f_interpolated\n self.t_interpolated = t_interpolated\n\n # for future debug\n # self.f = f\n # self.t = t\n # self.spectrogram = spectrogram\n\n\n\n\n return outputs\n\n\n\n def __str__(self):\n \"\"\"purely for visual purposes, so that we can print() the function\"\"\"\n str_to_return = \"Spectrogram transform\"\n str_to_return += \"\\n\\t Signals: {}\".format(self.signal_name_list)\n if self.interpolation_fn != no_interpolation:\n if self.out_size != None :\n str_to_return += \"\\n\\t Output size: {}\".format(self.out_size)\n else :\n str_to_return += \"\\n\\t Output size: unchanged\"\n\n str_to_return += \"\\n\\t Interpolation: {}\".format(self.interpolation_name)\n str_to_return += \"\\n\\t Log power: {}\".format(self.log_power)\n\n return str_to_return\n\n# end of class SpectrogramTransform():\n\n\n\n#%%\nif __name__ == \"__main__\":\n\n flag_debug = 0\n flag_index = False # if true, plot the spectrogram wrt to index; if False, wrt to Timestamps and Frequencies\n fontdict = {'fontsize':10}\n vmin, vmax = 0, 60\n vmin, vmax = None, None\n n_ticks = 10\n\n # we plot the raw spectrogram and two interpolated spectrograms for the following classes\n sel_classes = [\"Run\"]\n nsel = len(sel_classes)\n functions = {\"raw spectrogram\": \"none\",\n \"linear interpolation\": \"linear\",\n \"logarithmic interpolation\": \"log\"}\n\n remaining_classes = sel_classes.copy()\n index = 3204 # where to tart the search\n\n signal_name = \"Acc_norm\"\n temporal_transform = TemporalTransform([signal_name]) # we will plot the result\n\n while len(remaining_classes)>0:\n\n flag_debug = len(remaining_classes) == (nsel-1)\n\n data_tensor, class_tensor = DS[index]\n data_cpu = {signal:data_tensor[signal].to(torch.device('cpu')).detach().numpy() for signal in data_tensor.keys()}\n class_index = int(class_tensor)\n\n class_name = classes_names[class_index-1]\n\n prefix_title = '%s (index=%d)'% (class_name,index)\n\n\n\n if class_name in remaining_classes:\n remaining_classes.remove(class_name)\n\n\n temporal_signal = temporal_transform(data_cpu)[signal_name]\n nb = temporal_signal.shape[1]\n x_t = np.linspace(0, nb\/fs, nb)\n\n\n plt.figure(figsize=(30,10))\n\n plt.subplot(2,4,1)\n\n\n plt.plot(x_t, temporal_signal[0,:])\n plt.title(prefix_title + \"\\nraw signal : {}\".format(signal_name), fontdict)\n plt.xlabel(\"t (sec)\")\n plt.ylabel(\"Acc (m\/s²)\")\n\n index_figure = 2\n\n\n # for log_power in [False]:\n for log_power in [False, True]:\n\n if flag_debug:\n print('\\n log_power = %s' % log_power)\n\n for f_name in functions :\n\n if flag_debug:\n print('\\n f_name = %s' % f_name)\n\n function_interpol = functions[f_name]\n\n data_tensor, _ = DS[index] # we need to recreate data because the variable is deleted\n data_cpu = {signal:data_tensor[signal].to(torch.device('cpu')).detach().numpy() for signal in data_tensor.keys()}\n\n spectrogram_transform = SpectrogramTransform([signal_name], fs, duration_window, duration_overlap, spectro_batch_size,\n function_interpol, log_power, out_size=(48, 48), flag_debug = flag_debug)\n\n spectrogram_interpolated = spectrogram_transform(data_cpu)[signal_name]\n\n f_interpolated = spectrogram_transform.f_interpolated\n t_interpolated = spectrogram_transform.t_interpolated\n\n\n error_message_dtype = \"One of the spectrograms does not have the correct type: {}, log_power={}, {}. \\n dtype should be float32, is actually {}\".format(signal_name, str(log_power), f_name, spectrogram_interpolated.dtype)\n assert (spectrogram_interpolated.dtype == 'float32'), error_message_dtype\n\n plt.subplot(2,4,index_figure)\n\n if flag_index:\n ylabel = \"f (index)\"\n xlabel = \"t (index)\"\n\n plt.imshow(spectrogram_interpolated[0,:,:])\n\n else:\n ylabel = \"f (Hz) \"\n xlabel = \"t (s)\"\n\n t_interpolated = spectrogram_transform.t_interpolated\n f_interpolated = spectrogram_transform.f_interpolated\n matrix_shape = spectrogram_interpolated.shape\n time_list = [f'{t_interpolated[i]:.0f}' for i in np.round(np.linspace(0, matrix_shape[2]-1,n_ticks)).astype(int)]\n freq_list = [f'{f_interpolated[i]:.1f}' for i in np.round(np.linspace(0, matrix_shape[1]-1,n_ticks)).astype(int)]\n\n plt.xticks(np.linspace(0, matrix_shape[2]-1, n_ticks), time_list)\n plt.yticks(np.linspace(0, matrix_shape[1]-1, n_ticks), freq_list)\n\n plt.imshow(spectrogram_interpolated[0,:,:])\n\n plt.ylabel(ylabel)\n plt.xlabel(xlabel)\n plt.colorbar()\n\n index_figure += 1\n\n log_power_text = 'log power' if log_power==True else 'power'\n\n plt.title(\"{} of {}\".format( log_power_text, f_name), fontdict = {'fontsize':10})\n\n index_figure += 1 # for the vertical alignment\n index +=1\n plt.show()\n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_663","text":"import numpy as np\nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport scipy.stats as sts\nfrom finn.util.gdrive import *\nimport sklearn\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.svm import SVR\nfrom sklearn import model_selection\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import cross_val_score\nimport os, csv\n\n#define the worksheet name from finn-resource-dashboard\nworksheet_name = \"FCLayer_resources\"\n#define the directory name where to save the graphs\ndirectory_name = \"FCLayer\"\n\n##create the directory\nnew_dir_path = \"..\/graphs\/%s\" % directory_name\ntry:\n os.mkdir(new_dir_path)\nexcept OSError:\n print (\"Creation of the directory %s failed\" % new_dir_path)\nelse:\n print (\"Successfully created the directory %s \" % new_dir_path)\n\nfilename = \"db_mem_const.csv\"\n\n#get all records from the selected worksheet\nlist_of_dicts = get_records_from_resource_dashboard(worksheet_name)\n\n# convert list of dicts to dataframe\ndf = pd.DataFrame(list_of_dicts)\nprint(df)\n\nfpga = df['FPGA'].iloc[0]\n\n#get synth data\ndf = df[df.apply(lambda r: r.str.contains('synthesis', case=False).any(), axis=1)]\n\n#get records where act=None\ndf = df[df['act'].astype(str) == 'None']\n\n#get records where mem_mode=external\ndf_external = df[df['mem_mode'].astype(str) == 'external']\n\n#get records where mem_mode=const\ndf_const = df[df['mem_mode'].astype(str) == 'const']\n\ndef models(res_class):\n\n #encode wdt, idt\n labelencoder = LabelEncoder()\n df_training['wdt_encoded'] = labelencoder.fit_transform(df_training['wdt'])\n df_training['idt_encoded'] = labelencoder.fit_transform(df_training['idt'])\n\n features = ['mh', 'mw', 'pe', 'simd', 'wdt_encoded', 'idt_encoded']\n #features = ['mh', 'mw', 'pe', 'simd']\n #extract features\n X = df_training.loc[:, features].values\n #extract target\n Y = df_training.loc[:, [res_class]].values\n\n #split the data into train\/test data sets 30% testing, 70% training\n X_train, X_test, Y_train, Y_test = model_selection.train_test_split(X, Y, test_size = 0.3, random_state=0)\n \n #linear regression\n linear_reg_model = LinearRegression()\n linear_reg_model = linear_reg_model.fit(X_train, Y_train)\n Y_predict_linear = linear_reg_model.predict(X_test)\n score_linear = linear_reg_model.score(X_test, Y_test)\n\n #search for the best SVR hyperparameters\n gscv = GridSearchCV(\n estimator=SVR(kernel='poly', max_iter=20000000),\n param_grid={\n #'C': [0.1, 1, 10, 100, 1000],\n #'epsilon': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5, 10],\n #'gamma': [0.0001, 0.001, 0.005, 0.1, 1, 3, 5]\n 'C': [1],\n 'epsilon': [0.1],\n 'gamma': [0.001]\n },\n cv=5, n_jobs = -1, verbose = 2)\n\n grid_result = gscv.fit(X_train, Y_train.ravel())\n\n print(\"Best parameters set found on development set:\")\n print()\n print(grid_result.best_params_)\n\n #get best hyperparameters and define the model\n best_params = grid_result.best_params_\n best_svr = SVR(kernel='poly', C=best_params[\"C\"], epsilon=best_params[\"epsilon\"], gamma=best_params[\"gamma\"])\n \n #train the SVR model\n best_svr = best_svr.fit(X_train, Y_train.ravel())\n\n Y_predict_svr = best_svr.predict(X_test)\n\n print(X_test)\n\n print(best_svr.score(X_test, Y_test))\n\n #cross-validation\n scores = cross_val_score(best_svr, X, Y.ravel(), cv=5)\n print(scores)\n\n return X_test, Y_test, Y_predict_svr, best_svr.score(X_test, Y_test), Y_predict_linear, score_linear\n\ndef generate_graph_test_set(parameter, res_class):\n\n X_test, Y_test, Y_predict_svr, score_svr, Y_predict_linear, score_linear = models(res_class)\n\n fig = plt.figure(figsize=(20, 11))\n ax = fig.gca()\n\n X_test = [col[2] for col in X_test]\n\n ax.scatter(Y_predict_linear, X_test, marker=\"o\", s=200, facecolors='none', edgecolors='g', label='predicted_linear')\n ax.scatter(Y_predict_svr, X_test, marker=\"^\", s=500, facecolors='none', edgecolors='m', label='predicted_svr')\n ax.scatter(Y_test, X_test, marker=\"x\", s=50, color='r', label='synth')\n\n ax.set_xlabel(\"%s\" % 'LUT')\n ax.set_ylabel(\"%s\" % 'pe')\n\n if \"FCLayer\" in worksheet_name:\n ax.set_title(\"%s vs %s (SVR_score = %s, linear_score = %s)\" % ('pe', 'LUT', score_svr, score_linear))\n else:\n ax.set_title(\"%s vs %s (SVR_score = %s, linear_score = %s)\" % ('pe', 'LUT', score_svr, score_linear))\n \n leg = ax.legend()\n \n fig.savefig('..\/graphs\/%s\/plot_%s_vs_%s_weight_mem_const_test.png' % (directory_name, 'pe', 'LUT'), bbox_inches='tight')\n\n\n#get dataframe headers\nheaders = list(df)\n\n#get all parameters and resource classes from the csv file\nparameters = []\nres_classes = []\nseparator = 0\nfor s in headers:\n if s == 'Resources from:':\n separator = 1\n elif separator:\n res_classes.append(s)\n else:\n parameters.append(s)\n\n#remove tools details: FPGA, finn_commit, vivado_version, vivado_build_no\n#remove timing \n#remove act, mem_mode, Res from\ncolumns_to_remove = ['act', 'mem_mode', 'Resources from:', 'FPGA', 'finn_commit', 'vivado_version', 'vivado_build_no', 'TargetClockPeriod', 'EstimatedClockPeriod', 'Delay', 'TargetClockFrequency [MHz]', 'EstimatedClockFrequency [MHz]']\nres_classes = [element for element in res_classes if element not in columns_to_remove]\nparameters = [element for element in parameters if element not in columns_to_remove]\n\ndf_const = df_const.drop(columns_to_remove, axis=1)\ndf_external = df_external.drop(columns_to_remove, axis=1)\n\nprint(parameters)\nprint(res_classes)\nprint(len(df_external))\nprint(len(df_const))\nprint(df_external)\nprint(df_const)\n\npd.set_option('display.max_columns', 500)\n\n#isolate contribution of weights to overall resource utilization by subtracting resources of equivalent (mem_mode = external) configuration \ndf_training = pd.DataFrame(columns=list(df_const))\n\n\nwith open(filename, 'w') as csvfile:\n csvwriter = csv.writer(csvfile)\n headers = list(df_training)\n #headers.insert(0, 'set')\n csvwriter.writerow(headers)\n\nfound_row = False\nfor index, row1 in df_external.iterrows():\n for index, row2 in df_const.iterrows():\n for s in parameters:\n if row1[s] == row2[s]:\n found_row = True\n else:\n found_row = False\n break\n if found_row == True:\n print(row1)\n print(row2)\n \"\"\"\n with open(filename, 'a+') as csvfile:\n csvwriter = csv.writer(csvfile)\n row = pd.concat([pd.Series(['external']), row1])\n csvwriter.writerow(row)\n row = pd.concat([pd.Series(['const']), row2])\n csvwriter.writerow(row)\n \"\"\"\n for res in res_classes:\n row2[res] = row2[res] - row1[res]\n df_training = df_training.append(row2, ignore_index=True)\n \n with open(filename, 'a+') as csvfile:\n csvwriter = csv.writer(csvfile)\n #row2 = pd.concat([pd.Series(['result']), row2])\n csvwriter.writerow(row2)\n\nprint(df_training)\n\n\n#for testing\nparameters = ['pe']\nres_classes = ['LUT']\n\nfor parameter in parameters:\n for res_class in res_classes:\n generate_graph_test_set(parameter, res_class)\n \n\"\"\"\n####test####\nparameters = ['mh', 'mw', 'pe','simd']\nres_classes = ['LUT', 'BRAM']\n\nlist_columns = ['mh', 'mw', 'pe','simd', 'LUT', 'BRAM']\nlist_data = [ [16, 16, 8, 8, 345, 34],[16, 16, 2, 2, 456, 45], [32, 32, 16, 16, 786, 77]]\ndf_ext= pd.DataFrame(columns=list_columns, data=list_data)\n\nlist_columns = ['mh', 'mw', 'pe','simd', 'LUT', 'BRAM']\nlist_data = [ [16, 16, 8, 8, 3345, 234],[32, 32, 16, 16, 5786, 477], [15, 15, 2, 2, 2456, 145]]\ndf_ct= pd.DataFrame(columns=list_columns, data=list_data)\n\nprint(df_ext)\nprint(df_ct)\ndf_training = pd.DataFrame(columns=list(df_ct))\nfound_row = False\nfor index, row1 in df_ext.iterrows():\n for index, row2 in df_ct.iterrows():\n for s in parameters:\n if row1[s] == row2[s]:\n found_row = True\n else:\n found_row = False\n break\n if found_row == True:\n for res in res_classes:\n row2[res] = row2[res] - row1[res]\n df_training = df_training.append(row2, ignore_index=True)\nprint(df_training)\n\"\"\""} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_664","text":"extract.py\nimport argparse\nimport os\nimport numpy as np\nimport scipy.io as sio\nimport torch\n\nfrom configs.default import dataset_cfg\nfrom data import get_test_loader\nfrom models.model import Model\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"gpu\", type=int)\n parser.add_argument(\"model_path\", type=str) # TODO compatible for different models\n parser.add_argument(\"--img-h\", type=int, default=256)\n parser.add_argument(\"--dataset\", type=str, default=None)\n\n args = parser.parse_args()\n model_path = args.model_path\n fname = model_path.split(\"\/\")[-1]\n\n if args.dataset is not None:\n dataset = args.dataset\n else:\n dataset = model_path.split(\"\/\")[1]\n\n prefix = os.path.splitext(fname)[0]\n\n dataset_config = dataset_cfg.get(dataset)\n image_size = (args.img_h, 128)\n\n torch.backends.cudnn.benchmark = True\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(args.gpu)\n\n model = Model(eval=True, drop_last_stride=True)\n\n state_dict = torch.load(model_path)\n\n model.load_state_dict(state_dict, strict=False)\n model.float()\n model.eval()\n model.cuda()\n\n # extract query feature\n query = get_test_loader(root=os.path.join(dataset_config.root, dataset_config.query),\n batch_size=512,\n image_size=image_size,\n num_workers=16)\n\n query_feat = []\n query_label = []\n query_cam_id = []\n query_img_path = []\n for data, label, cam_id, img_path, _ in query:\n with torch.autograd.no_grad():\n feat = model(data.cuda(non_blocking=True))\n\n query_feat.append(feat.data.cpu().numpy())\n query_label.append(label.data.cpu().numpy())\n query_cam_id.append(cam_id.data.cpu().numpy())\n query_img_path.extend(img_path)\n\n query_feat = np.concatenate(query_feat, axis=0)\n query_label = np.concatenate(query_label, axis=0)\n query_cam_id = np.concatenate(query_cam_id, axis=0)\n print(query_feat.shape)\n\n dir_name = \"features\/{}\".format(dataset, prefix)\n if not os.path.isdir(dir_name):\n os.makedirs(dir_name)\n\n save_name = \"{}\/query-{}.mat\".format(dir_name, prefix)\n sio.savemat(save_name,\n {\"feat\": query_feat,\n \"ids\": query_label,\n \"cam_ids\": query_cam_id,\n \"img_path\": query_img_path})\n\n # extract gallery feature\n gallery = get_test_loader(root=os.path.join(dataset_config.root, dataset_config.gallery),\n batch_size=512,\n image_size=image_size,\n num_workers=16)\n\n gallery_feat = []\n gallery_label = []\n gallery_cam_id = []\n gallery_img_path = []\n for data, label, cam_id, img_path, _ in gallery:\n with torch.autograd.no_grad():\n feat = model(data.cuda(non_blocking=True))\n\n gallery_feat.append(feat.data.cpu().numpy())\n gallery_label.append(label)\n gallery_cam_id.append(cam_id)\n gallery_img_path.extend(img_path)\n\n gallery_feat = np.concatenate(gallery_feat, axis=0)\n gallery_label = np.concatenate(gallery_label, axis=0)\n gallery_cam_id = np.concatenate(gallery_cam_id, axis=0)\n print(gallery_feat.shape)\n\n save_name = \"{}\/gallery-{}.mat\".format(dir_name, prefix)\n sio.savemat(save_name,\n {\"feat\": gallery_feat,\n \"ids\": gallery_label,\n \"cam_ids\": gallery_cam_id,\n \"img_path\": gallery_img_path})\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_665","text":"jupito\/dwilib\n#!\/usr\/bin\/python3\n\n\"\"\"Get grid-wise features.\"\"\"\n\n# TODO Also scale lesiontype.\n\nimport argparse\nfrom itertools import product\nimport logging\nimport os.path\n\nimport numpy as np\nfrom scipy import ndimage\n\nimport dwi.files\nimport dwi.texture\nimport dwi.util\n\nlog = logging.getLogger('grid')\n\n\ndef parse_args():\n \"\"\"Parse command-line arguments.\"\"\"\n p = argparse.ArgumentParser(description=__doc__)\n p.add_argument('--verbose', '-v', action='count',\n help='increase verbosity')\n p.add_argument('--image', required=True,\n help='input image or pmap')\n p.add_argument('--param', type=int,\n help='parameter index')\n p.add_argument('--prostate', metavar='MASKFILE', required=True,\n help='prostate mask')\n p.add_argument('--lesions', metavar='MASKFILE', nargs='+', required=True,\n help='lesion masks')\n p.add_argument('--mbb', type=float,\n help='minimum bounding box padding in millimeters (try 15)')\n p.add_argument('--voxelsize', type=float,\n help='rescaled voxel size in millimeters (try 0.25)')\n p.add_argument('--winsize', type=float, default=5,\n help='window (cube) size in millimeters (default 5)')\n p.add_argument('--voxelspacing', type=float, nargs=3,\n help='force voxel spacing (leave out to read from image)')\n p.add_argument('--use_centroid', action='store_true',\n help='align by prostate centroid instead of image corner')\n p.add_argument('--nanbg', action='store_true',\n help='set non-prostate background to nan')\n p.add_argument('--lesiontypes', metavar='TYPE', nargs='+',\n help='lesion types in mask order (CZ or PZ)')\n p.add_argument('--output', metavar='FILENAME', required=True,\n help='output pmap file')\n return p.parse_args()\n\n\ndef get_lesiontype_array(lesiontypes, lesions):\n \"\"\"Create lesiontype array. It contains -1 or 1 depending on lesion type,\n or zero where no lesion.\n \"\"\"\n lesiontype = np.zeros_like(lesions[0], dtype=np.int8)\n if lesiontypes is not None:\n for lt, l in zip(lesiontypes, lesions):\n if lt.lower() == 'cz':\n lesiontype[l] = -1\n elif lt.lower() == 'pz':\n lesiontype[l] = 1\n else:\n raise ValueError('Invalid lesiontype: {}'.format(lt))\n log.info('Lesion types: %s, +1: %i, -1: %i', lesiontypes,\n np.count_nonzero(lesiontype == 1),\n np.count_nonzero(lesiontype == -1))\n return lesiontype\n\n\ndef get_mbb(mask, spacing, pad):\n \"\"\"Get mask minimum bounding box as slices, with minimum padding in mm.\"\"\"\n padding = [int(np.ceil(pad \/ x)) for x in spacing]\n physical_padding = [x * y for x, y in zip(padding, spacing)]\n mbb = dwi.util.bounding_box(mask, padding)\n slices = tuple(slice(*x) for x in mbb)\n log.info('Cropping minimum bounding box, padding: %s', pad)\n log.debug('\\tVoxel padding: %s', padding)\n log.debug('\\tPhysical padding: %s', physical_padding)\n log.debug('\\tMinimum bounding box: %s', mbb)\n return slices\n\n\ndef rescale(img, src_spacing, dst_spacing):\n \"\"\"Rescale image according to voxel spacing sequences (mm per voxel).\"\"\"\n factor = [s\/d for s, d in zip(src_spacing, dst_spacing)]\n log.info('Scaling by factor: %s', factor)\n output = ndimage.interpolation.zoom(img, factor, order=0)\n return output\n\n\ndef generate_windows(imageshape, winshape, center):\n \"\"\"Generate slice objects for a grid of windows around given center.\n\n Float center will be rounded. Yield a tuple with coordinate slices of each\n window, and window position relative to the center.\n \"\"\"\n center = [int(round(x)) for x in center]\n starts = [i % w for i, w in zip(center, winshape)]\n stops = [i-w+1 for i, w in zip(imageshape, winshape)]\n its = (range(*x) for x in zip(starts, stops, winshape))\n for coords in product(*its):\n slices = tuple(slice(i, i+w) for i, w in zip(coords, winshape))\n relative = tuple(int((i-c)\/w) for i, c, w in zip(coords, center,\n winshape))\n yield slices, relative\n\n\ndef get_datapoint(image, prostate, lesion, lesiontype, stat):\n \"\"\"Extract output datapoint for a cube.\n\n If stat is None, median is used. Otherwise, see dwi.texture.stats().\n \"\"\"\n assert image.shape == prostate.shape == lesion.shape == lesiontype.shape\n if np.isnan(image).all():\n value = np.nan\n else:\n image = image[np.isfinite(image)] # Remove nan values.\n if stat is None:\n value = np.median(image)\n else:\n value = dwi.texture.stats(image)[stat]\n nneg = np.count_nonzero(lesiontype < 0)\n npos = np.count_nonzero(lesiontype > 0)\n # Label as lesiontype -1 or 1 based on majority, or 0 if no lesion.\n lt = 0\n if nneg > 0:\n lt = -1\n if npos > nneg:\n lt = 1\n return (\n np.count_nonzero(prostate) \/ prostate.size,\n np.count_nonzero(lesion) \/ prostate.size,\n lt,\n value,\n )\n\n\ndef create_grid_centroid(metric_winshape, metric_gridshape=(100, 150, 150)):\n \"\"\"Create and fill grid array based on prostate centroid.\"\"\"\n gridshape = [int(g\/\/w) for g, w in zip(metric_gridshape, metric_winshape)]\n gridshape = [x + x % 2 for x in gridshape] # Make any odds even.\n grid = np.full(gridshape + [4], np.nan, dtype=np.float32)\n return grid\n\n\ndef create_grid_corner(image, winshape):\n \"\"\"Create and fill grid array based on corner.\"\"\"\n gridshape = [i\/\/w for i, w in zip(image.shape, winshape)]\n grid = np.full(gridshape + [4], np.nan, dtype=np.float32)\n return grid\n\n\ndef process(image, spacing, prostate, lesion, lesiontype, metric_winshape,\n stat, voxelsize=None, use_centroid=False):\n \"\"\"Process one parameter.\"\"\"\n # TODO: Should do them all at the same time.\n # Rescale image and masks.\n if voxelsize is not None:\n src_spacing = spacing\n spacing = [voxelsize] * 3\n image = rescale(image, src_spacing, spacing)\n prostate = prostate.astype(np.float_)\n prostate = rescale(prostate, src_spacing, spacing)\n prostate = dwi.util.asbool(prostate)\n lesion = lesion.astype(np.float_)\n lesion = rescale(lesion, src_spacing, spacing)\n lesion = dwi.util.asbool(lesion)\n assert image.shape == prostate.shape == lesion.shape\n # TODO Also scale lesiontype.\n\n phys_size = [x*y for x, y in zip(image.shape, spacing)]\n log.info('Transformed image: %s %s', image.shape, image.dtype)\n log.info('Voxel spacing: %s, physical size: %s', spacing, phys_size)\n\n # Extract grid datapoints. Grid placing is based either on prostate\n # centroid, or image corner.\n voxel_winshape = [int(round(x\/y)) for x, y in zip(metric_winshape,\n spacing)]\n log.debug('Window metric: %s, voxel: %s', metric_winshape, voxel_winshape)\n\n centroid = [round(x, 2) for x in dwi.util.centroid(prostate)]\n if use_centroid:\n base = centroid\n grid = create_grid_centroid(metric_winshape)\n grid_base = [s\/\/2 for s in grid.shape]\n else:\n base = [0] * 3\n grid = create_grid_corner(image, voxel_winshape)\n grid_base = [0] * 3\n log.debug('Prostate centroid: %s, base: %s', centroid, base)\n\n windows = list(generate_windows(image.shape, voxel_winshape, base))\n for slices, relative in windows:\n indices = tuple(c+r for c, r in zip(grid_base, relative))\n values = get_datapoint(image[slices], prostate[slices], lesion[slices],\n lesiontype[slices], stat)\n grid[indices] = values\n return grid\n\n\ndef average_image(image):\n \"\"\"Do average filtering for image.\"\"\"\n d = dict(size=(3, 3), mode='nearest')\n for p in range(image.shape[-1]):\n for i in range(image.shape[0]):\n ix = (i, slice(None), slice(None), p)\n image[ix] = ndimage.filters.median_filter(image[ix], **d)\n\n\ndef indexed_path(path, i):\n \"\"\"Add an index to path before possible extension.\"\"\"\n root, ext = os.path.splitext(path)\n return '{r}-{i}{e}'.format(r=root, i=i, e=ext)\n\n\ndef set_loggin(verbosity=0):\n \"\"\"Set up logging.\"\"\"\n import sys\n loglevel = logging.INFO if verbosity else logging.WARNING\n logging.basicConfig(level=loglevel, stream=sys.stdout)\n\n\ndef main():\n \"\"\"Main.\"\"\"\n args = parse_args()\n set_loggin(verbosity=args.verbose)\n\n image, attrs = dwi.files.read_pmap(args.image, ondisk=True)\n if args.param is not None:\n image = image[..., args.param]\n image.shape += (1,)\n attrs['parameters'] = [attrs['parameters'][args.param]]\n spacing = attrs['voxel_spacing']\n\n # Read masks.\n prostate = dwi.files.read_mask(args.prostate,\n expected_voxel_spacing=spacing)\n lesions = [dwi.files.read_mask(x, expected_voxel_spacing=spacing,\n container=prostate) for x in args.lesions]\n lesion = dwi.util.unify_masks(lesions)\n\n assert image.shape[:3] == prostate.shape == lesion.shape\n if args.voxelspacing is not None:\n spacing = args.voxelspacing\n\n phys_size = [x*y for x, y in zip(image.shape[:3], spacing)]\n log.info('Image: %s %s', image.shape, image.dtype)\n log.debug('Voxel spacing: %s, physical size: %s', spacing, phys_size)\n log.debug('Lesions: %i', len(args.lesions))\n\n lesiontype = get_lesiontype_array(args.lesiontypes, lesions)\n\n # Crop MBB. The remaining image is stored in memory.\n if args.mbb is None:\n slices = tuple(slice(0, x) for x in image.shape[:3])\n else:\n slices = get_mbb(prostate, spacing, args.mbb)\n image = image[slices]\n prostate = prostate[slices]\n lesion = lesion[slices]\n lesiontype = lesiontype[slices]\n assert (image.shape[:3] == prostate.shape == lesion.shape ==\n lesiontype.shape)\n\n # average_image(image)\n\n assert image.ndim == 4, image.ndim\n image = image.astype(np.float32)\n if args.nanbg:\n image[-prostate] = np.nan # Set background to nan.\n\n basic = ['prostate', 'lesion', 'lesiontype']\n metric_winshape = [args.winsize] * 3\n if args.param is None:\n params = attrs['parameters'] # Use average of each parameter.\n else:\n params = list(dwi.texture.stats([0]).keys()) # Use statistical feats.\n d = dict(voxelsize=args.voxelsize, use_centroid=args.use_centroid)\n grid = None\n for i, param in enumerate(params):\n if args.param is None:\n img = image[..., i]\n stat = None\n else:\n img = image[..., 0]\n stat = param\n a = process(img, spacing, prostate, lesion, lesiontype,\n metric_winshape, stat, **d)\n if grid is None:\n shape = a.shape[0:-1] + (len(basic) + len(params),)\n grid = np.empty(shape, dtype=a.dtype)\n log.info('Grid shape: %s', grid.shape)\n grid[..., 0:len(basic)] = a[..., 0:-1] # Init with basic.\n grid[..., len(basic)+i] = a[..., -1] # Add each feature.\n outfile = args.output\n attrs = dict(n_lesions=len(args.lesions), spacing=metric_winshape)\n attrs['parameters'] = basic + params\n log.info('Writing %s to %s', grid.shape, outfile)\n dwi.files.write_pmap(outfile, grid, attrs)\n\n\nif __name__ == '__main__':\n main()\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_666","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jan 23 00:27:00 2022\r\n\r\n@author: marco\r\n\"\"\"\r\nimport pandas as pd\r\nimport numpy as np\r\nimport os\r\nfrom scipy.linalg import pinv as inv\r\nimport matplotlib.pyplot as plt\r\nos.chdir('C:\/\/Users\/\/marco\/\/Desktop\/\/Projects\/\/Bayesian_OLS')\r\ncwd = os.getcwd()\r\nprint(\"Current working directory: {0}\".format(cwd))\r\nimport warnings # `do not disturbe` mode\r\nwarnings.filterwarnings('ignore')\r\n\r\n#y = Puller.Banxico(serie=\"SR16734\", name=\"IGAE\", plot=False)\r\n#p = Puller.Banxico(serie=\"SP1\", name=\"Inflation\", plot=False)\r\n#r = Puller.Banxico(serie=\"SF3270\", name=\"Interest_rate\", plot=False)\r\n#m = Puller.Banxico(serie=\"SF1\", name=\"Money\", plot=False)\r\n#df = pd.concat([y, p, r, m], axis=1).dropna()\r\ndf = pd.read_excel('inflation.xls', index_col=0)\r\n\r\nY=df.iloc[:,0:1].to_numpy()\r\nX=df.iloc[:,1:].to_numpy()\r\n\r\ndef plot_dist(df, column, title):\r\n mu = df[column].mean()\r\n sigma = df[column].std()\r\n n, bins, patches = plt.hist(x=df[column], bins='auto', \r\n color='#0504aa',\r\n alpha=0.7, rwidth=0.85)\r\n plt.grid(axis='y', alpha=0.75)\r\n plt.ylabel(None)\r\n plt.title(title)\r\n maxfreq = n.max()\r\n plt.ylim(ymax=np.ceil(maxfreq \/ 10) * 10 if (maxfreq % 10 > 0) else maxfreq + 10)\r\n plt.show() #plot\r\n \r\ndef ar_companion_matrix(beta):\r\n # dont include constant\r\n k = beta.shape[0]-1\r\n FF = np.zeros((k, k))\r\n #insert identity matrix\r\n FF[1:k, 0:(k-1)] = np.eye(N=k-1, M=k-1)\r\n temp = (beta[1:k+1, :]).T\r\n #state space companion form\r\n #Insert coeffcients along top row\r\n FF[0:1,0:k+1] = temp\r\n return(FF)\r\n\r\ndef gibbs(X,Y,reps,burn,t0,d0,plot):\r\n reps = reps # number of Gibbs iterations\r\n burn = burn # percent of burn-in iterations\r\n out = np.zeros((reps, X.shape[1]+1))\r\n t1 = Y.shape[0] #number of observations\r\n b0 = np.zeros((X.shape[1],1))#Priors\r\n sigma0 = np.eye((X.shape[1])) # variance matrix\r\n # priors for sigma2\r\n t0 = t0\r\n d0 = d0\r\n # Starting values\r\n B = b0\r\n sigma2 = 1\r\n for i in range(0,reps):\r\n M = inv(inv(sigma0) + (1\/sigma2) * X.T @ X) @ (inv(sigma0) @ b0 + (1\/sigma2)* X.T @ Y)\r\n V = inv(inv(sigma0) + (1\/sigma2) * X.T @ X)\r\n chck = -1\r\n while (chck < 1):\r\n B = M + (np.random.normal(0,1,X.shape[1]) @ np.linalg.cholesky(V)).T.reshape(-1,1)\r\n b = ar_companion_matrix(B)\r\n ee = np.max(np.abs(np.linalg.eig(b)[1]))\r\n if (ee <= 1):\r\n chck = 1\r\n # compute residuals\r\n resids = Y - X @ B\r\n T2 = t0 + t1\r\n D1 = d0 + resids.T @ resids\r\n # keep samples after burn period\r\n out[i,] = np.append(B.T,sigma2)\r\n #draw from Inverse Gamma\r\n z0 = np.random.normal(1,1,t1)\r\n z0z0 = z0.T @ z0\r\n sigma2 = D1\/z0z0\r\n \r\n out = pd.DataFrame(out[burn:reps,:])\r\n \r\n if plot==True:\r\n for i in range(0,out.shape[1]):\r\n if i != out.shape[1]-1:\r\n plot_dist(df=out, column=[i], title='Estimator distribution of beta ' + str(i))\r\n else:\r\n plot_dist(df=out, column=[i], title='Estimator distribution of the variance')\r\n \r\n return(out)\r\n\r\n\r\ngibbs(X=X,Y=Y,reps=5000,burn=4000,t0 = 1,d0 = 0.1, plot=True)\r\n\r\n\r\n\r\n'''\r\ngibbs_sampler <- function(X,Y,B0,sigma0,sigma2,theta0,D0,reps,out,out1){\r\n for (i in 1:reps){\r\n \r\n M = solve(solve(sigma0)\r\n +as.numeric(1\/sigma2)*t(X)%*%X)%*%(solve(sigma0)%*%b0\r\n +as.numeric(1\/sigma2)*t(X)%*%Y)\r\n V = solve(solve(sigma0)+as.numeric(1\/sigma2)*t(X)%*%X)\r\n chck=-1\r\n while (chck < 1){ #check for stability\r\n \r\n B= M+t(rnorm(ncol(X))%*%chol(V))\r\n b = ar_companion_matrix(B)\r\n ee <- max(sapply(eigen(b)$values,abs))\r\n if(ee<=1){\r\n }\r\n }\r\n # compute residuals\r\n resids <- Y- X%*%B\r\n T2 = t0 + t1\r\n D1 = d0 + t(resids) %*% resids\r\n \r\n # keeps samples after burn period\r\n out[i,] <- t(matrix(c(t(B),sigma2)))\r\n \r\n #draw from Inverse Gamma\r\n z0 = rnorm(t1,1)\r\n z0z0 = t(z0) %*% z0\r\n sigma2 = D1\/z0z0\r\n \r\n # compute 2 year forecasts\r\n yhat = rep(0,h)\r\n end = as.numeric(length(Y))\r\n #yhat[1:2] = Y[(end-1):end,]\r\n cfactor = sqrt(sigma2)\r\n X_mat = c(1,rep(0,ncol(X)-1))\r\n \r\n \r\n for(m in ncol(X):h){\r\n for (lag in 1:(ncol(X)-1)){\r\n #create X matrix with p lags\r\n X_mat[(lag+1)] = yhat[m-lag]\r\n }\r\n # Use X matrix to forecast yhat\r\n yhat[m] = X_mat %*% B + rnorm(1) * cfactor\r\n }\r\n \r\n out1[i,] <- yhat\r\n }\r\n return = list(out,out1)\r\n}\r\n \r\n \r\n# Set the parameters\r\nreps = 5000 # number of Gibbs iterations\r\nburn = 4000 # percent of burn-in iterations\r\n\r\n# Forecast horizon\r\nh = 24\r\n\r\n# Matrix to store posterior coefficients and forecasts\r\nout = matrix(0, nrow = reps, ncol = ncol(X_BO) + 1)\r\nout1 = matrix(0, nrow = reps, ncol = h)\r\n\r\nt1 <- nrow(Y_CA) #number of observations\r\nb0 = matrix(0,ncol(X_CA),1) #Priors\r\nsigma0 <- diag(ncol(X_CA)) # variance matrix\r\n# priors for sigma2\r\nt0= 1\r\nd0=0.1\r\n\r\n# Starting values\r\nB = b0\r\nsigma2 = 1\r\ndf1 = gibbs_sampler(X_CA,Y_CA, B,sigma0,sigma2,t0,d0,reps,out,out1)\r\n\r\ncoef <- results[[1]][(burn+1):reps,]\r\nforecasts <- results[[2]][(burn+1):reps,]\r\n\r\n\r\nconst <- mean(coef[,1])\r\nbeta1 <- mean(coef[,2])\r\nbeta2 <- mean(coef[,3])\r\nbeta3 <- mean(coef[,4])\r\nsigma <- mean(coef[,5])"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_667","text":"1-10\nimport numpy as np\r\nimport infomap\r\nimport pandas as pd\r\nimport scipy\r\nfrom tqdm import tqdm\r\nimport networkx as nx\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.colors as colors\r\nfrom scipy.signal import lfilter\r\nimport warnings\r\n\r\nfrom tqdm import tqdm_notebook\r\nfrom functools import reduce\r\nwarnings.filterwarnings('ignore')\r\n\r\ndef cluster_counts(labels,features):\r\n cluster_indexes=[]\r\n for c in np.unique(labels):\r\n cluster_indexes.append(np.where(np.array(labels)==c))\r\n clusters_features=[]\r\n for cluster in cluster_indexes:\r\n # print(cluster)\r\n cluster_features=[features[j] for j in cluster[0] ]\r\n clusters_features.append(cluster_features)\r\n return cluster_indexes,clusters_features\r\n\r\ndef distances_calculations(features_vectors):\r\n distances=[]\r\n n=15\r\n b = [1.0 \/ n] * n\r\n a = 1\r\n for vector1 in tqdm(features_vectors):\r\n distance=[]\r\n for vector2 in features_vectors:\r\n distance.append(scipy.linalg.norm(vector1-vector2,2))\r\n distances.append(lfilter(b,a,distance))\r\n return distances\r\ndef weights_init(distances_total):\r\n weights=[]\r\n for dist in tqdm(distances_total):\r\n w=[]\r\n for d in (dist):\r\n w.append(1\/(0.01+d))\r\n weights.append(w)\r\n return weights\r\n\r\nimport pickle\r\ndef save_pkl(variable, name):\r\n name = name + '.pkl'\r\n output = open(name, 'wb')\r\n pickle.dump(variable, output)\r\n output.close()\r\ndef findCommunities(G):\r\n \"\"\"\r\n Partition network with the Infomap algorithm.\r\n Annotates nodes with 'community' id.\r\n \"\"\"\r\n\r\n im = infomap.Infomap(\"--two-level\")\r\n\r\n print(\"Building Infomap network from a NetworkX graph...\")\r\n for source, target in G.edges:\r\n im.add_link(source, target)\r\n\r\n print(\"Find communities with Infomap...\")\r\n im.run()\r\n\r\n print(f\"Found {im.num_top_modules} modules with codelength: {im.codelength}\")\r\n\r\n communities = im.get_modules()\r\n nx.set_node_attributes(G, communities, 'community')\r\n\r\n\r\n#create function for clearence inside the cluster, by euclidian distance\r\ndef new_weights(weights):\r\n weights_new=[]\r\n for weight in weights:\r\n w=weight.copy()\r\n w=np.array(w)\r\n w[w<(np.median(w)+np.std(w))]=0\r\n weights_new.append(w)\r\n return weights_new\r\n\r\ndef cluster_clearence(cluster_feature_vector:list):\r\n print(len(cluster_feature_vector))\r\n distance=[]\r\n for vector_1 in tqdm_notebook(cluster_feature_vector):\r\n dist=[]\r\n for vector_2 in cluster_feature_vector:\r\n dist.append(np.linalg.norm(vector_1-vector_2,2))\r\n dist=np.mean(dist)\r\n distance.append(dist)\r\n return np.mean(distance)\r\n\r\ndef cluster_creation(distances:list,features_all:list,features:list,inter:int,treshold:float):\r\n final_clusters=[]\r\n\r\n weights=weights_init(distances)\r\n weights_new=new_weights(weights)\r\n\r\n weights_matrix=np.matrix(weights_new)\r\n Graph=nx.DiGraph(weights_matrix)\r\n findCommunities(Graph)\r\n communities = [v for k,v in nx.get_node_attributes(Graph, 'community').items()]\r\n # print('number of clusters:', np.unique(communities))\r\n clusters,cluster_features=cluster_counts(communities,features_all)\r\n\r\n # clusters=[cluster[0] for cluster in clusters ]\r\n # vectors=[distances for i in range(len(clusters))]\r\n distance=list(map(cluster_clearence,cluster_features))\r\n if np.isnan(np.array(distance)).any() == True:\r\n print('DETECTED NAN')\r\n a=np.argwhere(np.isnan(distance))\r\n # print(a)\r\n distance.pop(a[0][0])\r\n cluster_features.pop(a[0][0])\r\n\r\n print('clearence of clusters',distance)\r\n \r\n bad_clusters=[]\r\n for i in range(len(distance)):\r\n \r\n if distance[i]<=treshold:\r\n final_clusters.append(cluster_features[i])\r\n else:\r\n bad_clusters.append(cluster_features[i])\r\n cluster_length=len(bad_clusters)\r\n\r\n i=0\r\n # for i in range(len(bad_clusters)):\r\n final_bad_clusters=[]\r\n while itreshold:\r\n bad_clusters.append(cluster_features[m])\r\n\r\n elif i>inter and distance[m]>treshold:\r\n final_bad_clusters.append(cluster_features[m])\r\n \r\n cluster_length=len(bad_clusters)\r\n print(cluster_length)\r\n i=i+1\r\n \r\n bad_clusters_vectors=np.concatenate(final_bad_clusters)\r\n bad_vectors=distances_calculations(bad_clusters_vectors)\r\n\r\n weights=weights_init(bad_vectors)\r\n weights_new=new_weights(weights)\r\n\r\n weights_matrix=np.matrix(weights_new)\r\n Graph=nx.DiGraph(weights_matrix)\r\n findCommunities(Graph)\r\n communities = [v for k,v in nx.get_node_attributes(Graph, 'community').items()]\r\n\r\n # print('number of mini clusters in cluster '+str(i), np.unique(communities))\r\n\r\n clusters,cluster_features=cluster_counts(communities,bad_clusters_vectors)\r\n\r\n \r\n bad_clusters=[]\r\n for i in range(len(distance)):\r\n \r\n if distance[i]<=treshold:\r\n final_clusters.append(cluster_features[i])\r\n else:\r\n bad_clusters.append(cluster_features[i])\r\n cluster_length=len(bad_clusters)\r\n i=0\r\n # print('CLUSTERS from bad')\r\n final_bad_clusters=[]\r\n while itreshold:\r\n bad_clusters.append(cluster_features[m])\r\n\r\n elif i>inter and distance[m]>treshold:\r\n final_bad_clusters.append(cluster_features[m])\r\n \r\n cluster_length=len(bad_clusters)\r\n # print(cluster_length)\r\n i=i+1\r\n \r\n return final_clusters,final_bad_clusters\r\n\r\ndef final_cluster(features,cluster_feature):\r\n final_clusters=[]\r\n for feature_vector in cluster_feature:\r\n cluster=[]\r\n for vector in feature_vector:\r\n bc=np.bincount(np.where(features==vector)[0])\r\n cluster.append(bc.argmax())\r\n final_clusters.append(cluster)\r\n return final_clusters"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_668","text":"\"\"\"\nFilename: oligopoly.py\nAuthors: , , \nThis is an example for the lecture dyn_stack.rst from the QuantEcon\nseries of lectures by and .\nWe deal with a large monopolistic firm who faces costs:\nC_t = e Q_t + .5 g Q_t^2 + .5 c (Q_{t+1} - Q_t)^2\nwhere the fringe firms face:\nsigma_t = d q_t + .5 h q_t^2 + .5 c (q_{t+1} - q_t)^2\nAdditionally, there is a linear inverse demand curve of the form:\np_t = A_0 - A_1 (Q_t + \\bar{q_t}) + \\eta_t,\nwhere:\n.. math\n \\eta_{t+1} = \\rho \\eta_t + C_{\\varepsilon} \\varepsilon_{t+1};\n \\varepsilon_{t+1} \\sim N(0, 1)\nFor more details, see the lecture.\n\"\"\"\nimport numpy as np\nimport scipy.linalg as la\nfrom quantecon import LQ\nfrom quantecon.matrix_eqn import solve_discrete_lyapunov\nfrom scipy.optimize import root\n\n\ndef setup_matrices(params):\n \"\"\"\n This function sets up the A, B, R, Q for the oligopoly problem\n described in the lecture.\n Parameters\n ----------\n params : Array(Float, ndim=1)\n Contains the parameters that describe the problem in the order\n [a0, a1, rho, c_eps, c, d, e, g, h, beta]\n Returns\n -------\n (A, B, Q, R) : Array(Float, ndim=2)\n These matrices describe the oligopoly problem.\n \"\"\"\n\n # Left hand side of (37)\n Alhs = np.eye(5)\n Alhs[4, :] = np.array([a0-d, 1., -a1, -a1-h, c])\n Alhsinv = la.inv(Alhs)\n\n # Right hand side of (37)\n Brhs = np.array([[0., 0., 1., 0., 0.]]).T\n Arhs = np.eye(5)\n Arhs[1, 1] = rho\n Arhs[3, 4] = 1.\n Arhs[4, 4] = c \/ beta\n\n # R from equation (40)\n R = np.array([[0., 0., (a0-e)\/2., 0., 0.],\n [0., 0., 1.\/2., 0., 0.],\n [(a0-e)\/2., 1.\/2, -a1 - .5*g, -a1\/2, 0.],\n [0., 0., -a1\/2, 0., 0.],\n [0., 0., 0., 0., 0.]])\n\n Rf = np.array([[0., 0., 0., 0., 0., (a0-d)\/2.],\n [0., 0., 0., 0., 0., 1.\/2.],\n [0., 0., 0., 0., 0., -a1\/2.],\n [0., 0., 0., 0., 0., -a1\/2.],\n [0., 0., 0., 0., 0., 0.],\n [(a0-d)\/2., 1.\/2., -a1\/2., -a1\/2., 0., -h\/2.]])\n\n Q = np.array([[c\/2]])\n\n A = Alhsinv.dot(Arhs)\n B = Alhsinv.dot(Brhs)\n\n return A, B, Q, R, Rf\n\n\ndef find_PFd(A, B, Q, R, Rf, beta=.95):\n \"\"\"\n Taking the parameters A, B, Q, R as found in the `setup_matrices`,\n we find the value function of the optimal linear regulator problem.\n This is steps 2 and 3 in the lecture notes.\n Parameters\n ----------\n (A, B, Q, R) : Array(Float, ndim=2)\n The matrices that describe the oligopoly problem\n Returns\n -------\n (P, F, d) : Array(Float, ndim=2)\n The matrix that describes the value function of the optimal\n linear regulator problem.\n \"\"\"\n\n lq = LQ(Q, -R, A, B, beta=beta)\n P, F, d = lq.stationary_values()\n\n Af = np.vstack((np.hstack([A-np.dot(B,F), np.array([[0., 0., 0., 0., 0.]]).T]),np.array([[0., 0., 0., 0., 0., 1.]])))\n Bf = np.array([[0., 0., 0., 0., 0., 1.]]).T\n\n lqf = LQ(Q, -Rf, Af, Bf, beta=beta)\n Pf, Ff, df = lqf.stationary_values()\n\n return P, F, d, Pf, Ff, df\n\n\ndef solve_for_opt_policy(params, eta0=0., Q0=0., q0=0.):\n \"\"\"\n Taking the parameters as given, solve for the optimal decision rules\n for the firm.\n Parameters\n ----------\n params : Array(Float, ndim=1)\n This holds all of the model parameters in an array\n Returns\n -------\n out :\n \"\"\"\n # Step 1\/2: Formulate\/Solve the optimal linear regulator\n (A, B, Q, R, Rf) = setup_matrices(params)\n (P, F, d, Pf, Ff, df) = find_PFd(A, B, Q, R, Rf, beta=beta)\n\n # Step 3: Convert implementation into state variables (Find coeffs)\n P22 = P[-1, -1]\n P21 = P[-1, :-1]\n P22inv = P22**(-1)\n\n # Step 4: Find optimal x_0 and \\mu_{x, 0}\n z0 = np.array([1., eta0, Q0, q0])\n x0 = -P22inv*np.dot(P21, z0)\n D0 = -np.dot(P22inv, P21)\n\n # Return -F and -Ff because we use u_t = -F y_t\n return P, -F, D0, Pf, -Ff\n\n\n# Parameter values\na0 = 100.\na1 = 1.\nrho = .8\nc_eps = .2\nc = 1.\nd = 20.\ne = 20.\ng = .2\nh = .2\nbeta = .95\nparams = np.array([a0, a1, rho, c_eps, c, d, e, g, h, beta])\n\n\nP, F, D0, Pf,Ff = solve_for_opt_policy(params)\n\n\n# Checking time-inconsistency:\nA, B, Q, R, Rf = setup_matrices(params)\n# arbitrary initial z_0\ny0 = np.array([[1, 1, 1, 1]]).T\n# optimal x_0 = i_0\ni0 = np.dot(D0,y0)\n# iterate one period using the closed-loop system\ny1 = np.dot( A + np.dot(B,F) , np.vstack([y0, i0]) )\n# the last element of y_1 is x_1 = i_1\ni1_0 = y1[-1,0]\n\n# compare this to the case when the leader solves a Stackelberg problem\n# in period 1. if in period 1 the leader could choose i1 given\n# (1, v_1, Q_1, \\bar{q}_1)\ni1_1 = np.dot(D0, y1[0:-1,0])\n\n\nprint(\"P = {}\".format(P))\nprint(\"-F = {}\".format(F))\nprint(\"D0 = {}\".format(D0))\nprint(\"Pf = {}\".format(Pf))\nprint(\"-Ff = {}\".format(Ff))\nprint(\"i1_0 = {}\".format(i1_0))\nprint(\"i1_1 = {}\".format(i1_1))\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_669","text":"from scipy import optimize\nimport scipy\nimport numpy\nimport matplotlib.pyplot as plt\n\ndef func(x, a, b):\n return a + b * numpy.log2(x)\n\ndef create_graph():\n with open('.\/results_n.txt') as f:\n y_pts_n = f.read().splitlines()\n with open('.\/results_n_2.txt') as f:\n y_pts_n_2 = f.read().splitlines()\n with open('.\/results_log_p.txt') as f:\n y_pts_log_p = f.read().splitlines()\n\n line_n, _ = scipy.optimize.curve_fit(lambda t,a,b: a+b*numpy.log2(t), range(2, 25, 2), y_pts_n)\n line_n_2, _ = scipy.optimize.curve_fit(lambda t,a,b: a+b*numpy.log2(t), range(2, 49, 2), y_pts_n_2)\n line_log_p, _ = scipy.optimize.curve_fit(lambda t,a,b: a+b*numpy.log2(t), range(2, 111, 2), y_pts_log_p)\n\n fig, ax = plt.subplots()\n # ax.plot(range(2, 25, 2), y_pts_n, linestyle='None', marker='o', color='b', label=r\"$p(n) = n$\")\n ax.plot(range(2, 111, 2), func(range(2, 111, 2), *line_n),\n linewidth=2.0, linestyle='-', color='b', label=r\"$Fitted Curve: n$\"\n )\n # ax.plot(range(2, 49, 2), y_pts_n_2,\n # linestyle='None', marker='o', color='r', label=r\"$p(n) = \\frac{n}{2}$\"\n # )\n ax.plot(range(2, 111, 2), func(range(2, 111, 2), *line_n_2),\n linewidth=2.0, linestyle='-', color='r', label=r\"$Fitted Curve: \\frac{n}{2}$\"\n )\n # ax.plot(range(2, 111, 2), y_pts_log_p,\n # linestyle='None', marker='o', color='g', label=r\"$p(n) = \\frac{n}{p} \\geq \\log \\, p$\"\n # )\n ax.plot(range(2, 111, 2), func(range(2, 111, 2), *line_log_p),\n linewidth=2.0, linestyle='-', color='g', label=r\"$Fitted Curve: \\frac{n}{p} \\geq \\log \\, p$\"\n )\n ax.set(xlabel='n - points count', ylabel=r'time $(\\mu s)$',\n title='Line-of-Sight')\n ax.grid()\n ax.set_ylim(ymin=0)\n ax.legend(loc=\"upper left\")\n fig.savefig(\"common_graph.pdf\", format=\"pdf\")\n plt.show()\n\nif __name__ == '__main__':\n create_graph()"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_670","text":"import sympy\nimport argparse\nimport numpy as np\n\nimport equations\nimport data\n# from dsr_utils import run_dsr\nfrom gp_utils import run_gp\nfrom interpolate import num_diff, num_diff_gp\nimport pickle\nimport os\nimport time\n\n# # set up ODE config\n# ode_param = None\n# x_id = 0\n#\n# # data generation config\n# freq = 10\n# n_sample = 100\n# noise_sigma = 0.0\n#\n# # set up algorithm config\n# alg = 'gp'\n\ndef run(ode_name, ode_param, x_id, freq, n_sample, noise_ratio, alg, seed, n_seed):\n np.random.seed(999)\n print(freq)\n\n ode = equations.get_ode(ode_name, ode_param)\n T = ode.T\n init_low = ode.init_low\n init_high = ode.init_high\n has_coef = ode.has_coef\n\n noise_sigma = ode.std_base * noise_ratio\n\n dg = data.DataGenerator(ode, T, freq, n_sample, noise_sigma, init_low, init_high)\n yt = dg.generate_data()\n\n if noise_sigma == 0:\n dxdt_hat = (yt[1:, :, :] - yt[:-1, :, :]) \/ (dg.solver.t[1:] - dg.solver.t[:-1])[:, None, None]\n elif alg != 'gp':\n dxdt_hat = num_diff(yt, dg, alg)\n else:\n dxdt_hat, xt_hat = num_diff_gp(yt, dg, ode)\n\n print('Numerical differentiation: Done.')\n\n # if alg != 'gp':\n X_train = yt[:-1, :, :]\n # else:\n # X_train = xt_hat[:-1, :, :]\n X_train = X_train.reshape(X_train.shape[0] * X_train.shape[1], X_train.shape[2])\n\n y_train = dxdt_hat[:, :, x_id].flatten()\n assert X_train.shape[0] == y_train.shape[0]\n\n if alg == 'tv':\n path_base = 'results\/{}\/noise-{}\/sample-{}\/freq-{}\/'.format(ode_name, noise_ratio, n_sample, freq)\n elif alg == 'gp':\n path_base = 'results_gp\/{}\/noise-{}\/sample-{}\/freq-{}\/'.format(ode_name, noise_ratio, n_sample, freq)\n else:\n path_base = 'results_spline\/{}\/noise-{}\/sample-{}\/freq-{}\/'.format(ode_name, noise_ratio, n_sample, freq)\n\n if not os.path.isdir(path_base):\n os.makedirs(path_base)\n\n for s in range(seed, seed+n_seed):\n print(' ')\n print('Running with seed {}'.format(s))\n if x_id == 0:\n path = path_base + 'grad_seed_{}.pkl'.format(s)\n else:\n path = path_base + 'grad_x_{}_seed_{}.pkl'.format(x_id, s)\n\n if os.path.isfile(path):\n print('Skipping seed {}'.format(s))\n continue\n start = time.time()\n f_hat, est_gp = run_gp(X_train, y_train, ode, x_id, s)\n print(f_hat)\n f_true = ode.get_expression()[x_id]\n if not isinstance(f_true, tuple):\n correct = sympy.simplify(f_hat - f_true) == 0\n else:\n correct_list = [sympy.simplify(f_hat - f) == 0 for f in f_true]\n correct = max(correct_list) == 1\n\n end = time.time()\n\n with open(path, 'wb') as f:\n pickle.dump({\n 'model': est_gp._program,\n 'X_train': X_train,\n 'y_train': y_train,\n 'seed': s,\n 'correct': correct,\n 'f_hat': f_hat,\n 'ode': ode,\n 'noise_ratio': noise_ratio,\n 'noise_sigma': noise_sigma,\n 'dg': dg,\n 'time': end-start,\n }, f)\n\n print(f_hat)\n print(correct)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--ode_name\", help=\"name of the ode\", type=str)\n parser.add_argument(\"--ode_param\", help=\"parameters of the ode (default: None)\", type=str, default=None)\n parser.add_argument(\"--x_id\", help=\"ID of the equation to be learned\", type=int, default=0)\n parser.add_argument(\"--freq\", help=\"sampling frequency\", type=float, default=10)\n parser.add_argument(\"--n_sample\", help=\"number of trajectories\", type=int, default=100)\n parser.add_argument(\"--noise_sigma\", help=\"noise level (default 0)\", type=float, default=0.)\n parser.add_argument(\"--alg\", help=\"name of the benchmark\", type=str, default='tv', choices=['tv', 'spline', 'gp'])\n parser.add_argument(\"--seed\", help=\"random seed\", type=int, default=0)\n parser.add_argument(\"--n_seed\", help=\"random seed\", type=int, default=10)\n\n args = parser.parse_args()\n print('Running with: ', args)\n\n if args.ode_param is not None:\n param = [float(x) for x in args.ode_param.split(',')]\n else:\n param = None\n if args.freq >= 1:\n freq = int(args.freq)\n else:\n freq = args.freq\n run(args.ode_name, param, args.x_id, freq, args.n_sample, args.noise_sigma, args.alg, seed=args.seed, n_seed=args.n_seed)\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_671","text":"bigpeng2012\/CarND-Vehicle-DetectionVehicle_Detection_Tracking.py\n#!\/usr\/bin\/env python\n# coding: utf-8\n\n# # P5 Vehicle Detection\n\n# ## The goals \/ steps of this project are the following:\n# \n# 1.Perform a Histogram of Oriented Gradients (HOG) feature extraction on a labeled training set of images and train a classifier Linear SVM classifier\n# \n# 2.Optionally, you can also apply a color transform and append binned color features, as well as histograms of color, to your HOG feature vector.\n# \n# 3.Note: for those first two steps don't forget to normalize your features and randomize a selection for training and testing.\n# \n# 4.Implement a sliding-window technique and use your trained classifier to search for vehicles in images.\n# \n# 5.Run your pipeline on a video stream (start with the test_video.mp4 and later implement on full project_video.mp4) and create a heat map of recurring detections frame by frame to reject outliers and follow detected vehicles.\n# \n# 6.Estimate a bounding box for vehicles detected.\n\n# In[1]:\n\n\nimport cv2\nimport glob\nimport numpy as np\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport time\n\nfrom random import shuffle\n\nfrom skimage.feature import hog\nfrom sklearn.svm import LinearSVC\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom scipy.ndimage.measurements import label\n\nfrom sklearn.preprocessing import StandardScaler\nfrom tqdm import tqdm\n\nfrom moviepy.editor import VideoFileClip\nfrom IPython.display import HTML\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# ### Load Data\n\n# In[2]:\n\n\ncar_images = glob.glob('vehicles\/**\/*.png')\nnoncar_images = glob.glob('non-vehicles\/**\/*.png')\nprint(len(car_images), len(noncar_images))\n\n\n# ### Visualize Data Example\n\n# In[3]:\n\n\nfig, axs = plt.subplots(2,8, figsize=(8, 3))\n#fig.subplots_adjust(hspace = .2, wspace=.001)\naxs = axs.ravel()\n\n\nfor i in np.arange(8):\n img = cv2.imread(car_images[np.random.randint(0,len(car_images))])\n img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n axs[i].axis('off')\n axs[i].set_title('car', fontsize=15)\n axs[i].imshow(img)\nfor i in np.arange(8,16):\n img = cv2.imread(noncar_images[np.random.randint(0,len(noncar_images))])\n img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n axs[i].axis('off')\n axs[i].set_title('noncar', fontsize=15)\n axs[i].imshow(img)\n\n\n# ## Feature Extraction\n\n# ### Histogram of Oriented Gradient (HOG) Features\n\n# In[4]:\n\n\ndef get_hog_features(img, orient, pix_per_cell, cell_per_block, \n vis=False, feature_vec=True):\n # Call with two outputs if vis==True\n if vis == True:\n features, hog_image = hog(img, orientations=orient, \n pixels_per_cell=(pix_per_cell, pix_per_cell),\n cells_per_block=(cell_per_block, cell_per_block), \n transform_sqrt=False, \n visualise=True, feature_vector=False)\n return features, hog_image\n # Otherwise call with one output\n else: \n features = hog(img, orientations=orient, \n pixels_per_cell=(pix_per_cell, pix_per_cell),\n cells_per_block=(cell_per_block, cell_per_block), \n transform_sqrt=False, \n visualise=False, feature_vector=feature_vec)\n return features\n\n\n# ### Example of HOG Features\n\n# In[5]:\n\n\n# Generate a random index to look at a car image\nind = np.random.randint(0, len(car_images))\n# Read in the image\nimage = mpimg.imread(car_images[ind])\ngray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n# Define HOG parameters\norient = 10\npix_per_cell = 8\ncell_per_block = 2\n# Call our function with vis=True to see an image output\nfeatures, hog_image = get_hog_features(gray, orient, \n pix_per_cell, cell_per_block, \n vis=True, feature_vec=False)\n\n# Plot the examples\nfig = plt.figure(figsize = (15,20))\nplt.subplot(121)\nplt.imshow(image, cmap='gray')\nplt.title('Example Car Image')\nplt.subplot(122)\nplt.imshow(hog_image, cmap='gray')\nplt.title('HOG Visualization')\nplt.savefig('output_images\/hog_visualization.png', \n bbox_inches=\"tight\")\n\n\n# ### Histogram of Color \n\n# In[6]:\n\n\ndef color_hist(img, nbins=32, bins_range=(0, 256), vis=False):\n \n bins_range=(0, 256)\n # Compute the histogram of the color channels separately\n channel1_hist = np.histogram(img[:,:,0], bins=nbins, range=bins_range)\n channel2_hist = np.histogram(img[:,:,1], bins=nbins, range=bins_range)\n channel3_hist = np.histogram(img[:,:,2], bins=nbins, range=bins_range)\n \n # Generating bin centers\n bin_edges = channel1_hist[1] #all three bins are the same size\n bin_centers = (bin_edges[1:] + bin_edges[0:len(bin_edges) - 1])\/2\n \n hist_features = np.concatenate((channel1_hist[0],\n channel2_hist[0],\n channel3_hist[0]))\n if vis == True:\n return channel1_hist, channel2_hist, channel3_hist, bin_centers\n else:\n return hist_features\n\n\n# ### Spatial Binning of Color\n\n# In[7]:\n\n\ndef bin_spatial(img, color_space='RGB', size=(32, 32)):\n # Convert image to new color space (if specified)\n if color_space != 'RGB':\n if color_space == 'HSV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n elif color_space == 'LUV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)\n elif color_space == 'HLS':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n elif color_space == 'YUV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)\n elif color_space == 'YCrCb':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)\n else: feature_image = np.copy(img) \n # Use cv2.resize().ravel() to create the feature vector\n features = cv2.resize(feature_image, size).ravel() \n # Return the feature vector\n return features\n\n\n# ### Extract Features from a list of Imgaes\n\n# In[8]:\n\n\n# Define a function to extract features from a list of images\n# Have this function call bin_spatial() and color_hist()\ndef extract_features(images, color_space='RGB', spatial_size=(32, 32), \n hist_bins=32, orient=9, pix_per_cell=8,\n cell_per_block=2, hog_channel=0,\n spatial_feat=True, hist_feat=True, hog_feat=True):\n # Create a list to append feature vectors to\n features = []\n # Iterate through the list of images\n for file in tqdm(images):\n file_features = []\n img = mpimg.imread(file) # idea for the progress came from \"nhiddink\"\n # apply color conversion if other than 'RGB'\n if color_space != 'RGB':\n if color_space == 'HSV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n elif color_space == 'LUV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)\n elif color_space == 'HLS':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n elif color_space == 'YUV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)\n elif color_space == 'YCrCb':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)\n else: feature_image = np.copy(img)\n \n if spatial_feat == True:\n # Apply bin_spatial() to get spatial color features\n spatial_features = bin_spatial(feature_image, size=spatial_size)\n file_features.append(spatial_features)\n if hist_feat == True:\n # Apply color_hist() also with a color space option now\n hist_features = color_hist(feature_image, nbins=hist_bins)\n file_features.append(hist_features)\n if hog_feat == True:\n # Call get_hog_features() with vis=False, feature_vec=True\n if hog_channel == 'ALL':\n hog_features = []\n for channel in range(feature_image.shape[2]):\n hog_features.append(get_hog_features(feature_image[:,:,channel], \n orient, pix_per_cell, cell_per_block, \n vis=False, feature_vec=True))\n hog_features = np.ravel(hog_features)\n else:\n hog_features = get_hog_features(feature_image[:,:,hog_channel], orient, \n pix_per_cell, cell_per_block, vis=False,\n feature_vec=True)\n file_features.append(hog_features)\n # Append the new feature vector to the features list\n features.append(np.concatenate(file_features))\n # Return list of feature vectors\n return features\n\n\n# ### Slide Window Search\n\n# In[9]:\n\n\ndef slide_window(img, x_start_stop=[None, None], y_start_stop=[None, None], \n xy_window=(128, 128), #(64, 64), (96, 96)\n xy_overlap=(0.5, 0.5)):\n # If x and\/or y start\/stop positions not defined, set to image size\n if x_start_stop[0] == None:\n x_start_stop[0] = 0\n if x_start_stop[1] == None:\n x_start_stop[1] = img.shape[1]\n if y_start_stop[0] == None:\n y_start_stop[0] = 0\n if y_start_stop[1] == None:\n y_start_stop[1] = img.shape[0]\n # Compute the span of the region to be searched \n xspan = x_start_stop[1] - x_start_stop[0]\n yspan = y_start_stop[1] - y_start_stop[0]\n # Compute the number of pixels per step in x\/y\n nx_pix_per_step = np.int(xy_window[0]*(1 - xy_overlap[0]))\n ny_pix_per_step = np.int(xy_window[1]*(1 - xy_overlap[1]))\n # Compute the number of windows in x\/y\n nx_buffer = np.int(xy_window[0]*(xy_overlap[0]))\n ny_buffer = np.int(xy_window[1]*(xy_overlap[1]))\n nx_windows = np.int((xspan-nx_buffer)\/nx_pix_per_step) \n ny_windows = np.int((yspan-ny_buffer)\/ny_pix_per_step) \n # Initialize a list to append window positions to\n window_list = []\n\n for ys in range(ny_windows):\n for xs in range(nx_windows):\n # Calculate window position\n startx = xs*nx_pix_per_step + x_start_stop[0]\n endx = startx + xy_window[0]\n starty = ys*ny_pix_per_step + y_start_stop[0]\n endy = starty + xy_window[1]\n # Append window position to list\n window_list.append(((startx, starty), (endx, endy)))\n # Return the list of windows\n return window_list\n\n# Define a function to draw bounding boxes\n\n\n# In[10]:\n\n\n# Define a function to draw bounding boxes\ndef draw_boxes(img, bboxes, color=(255, 0, 0), thick=6):\n # Make a copy of the image\n imcopy = np.copy(img)\n # Iterate through the bounding boxes\n for bbox in bboxes:\n # Draw a rectangle given bbox coordinates\n cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)\n # Return the image copy with boxes drawn\n return imcopy\n\n\n# ### Sliding Window Example\n\n# In[11]:\n\n\ntest_img = mpimg.imread('test_images\/test4.jpg')\n\nwindows = slide_window(test_img,\n x_start_stop=[None, None],\n y_start_stop=[400, 656], #tune the parameters\n xy_window=(64, 64),\n xy_overlap=(0.5, 0.5))\n\nwindow_img = draw_boxes(test_img, windows, color=(255,0,0),thick = 6)\nplt.imshow(window_img);\nmatplotlib.rc('xtick', labelsize=15) \nmatplotlib.rc('ytick', labelsize=15)\nplt.title('Sliding Windows Technique:', fontsize=15);\n#plt.savefig('output_images\/sliding_windows.png', bbox_inches=\"tight\")\n\n\n# ### Extract Features from a Single Image\n\n# In[12]:\n\n\n# This function is very similar to extract_features()\n# just for a single image rather than list of images\ndef single_img_features(image, color_space='RGB', spatial_size=(32, 32),\n hist_bins=32, orient=9, \n pix_per_cell=8, cell_per_block=2, hog_channel=0,\n spatial_feat=True, hist_feat=True, hog_feat=True): \n #1) Define an empty list to receive features\n img_features = []\n #2) Apply color conversion if other than 'RGB'\n if color_space != 'RGB':\n if color_space == 'HSV':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n elif color_space == 'LUV':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)\n elif color_space == 'HLS':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)\n elif color_space == 'YUV':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)\n elif color_space == 'YCrCb':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)\n else: feature_image = np.copy(image)\n #3) Compute spatial features if flag is set\n if spatial_feat == True:\n # Apply bin_spatial() to get spatial color features\n spatial_features = bin_spatial(feature_image, size=spatial_size)\n #4) Append features to list\n img_features.append(spatial_features)\n #5) Compute histogram features if flag is set\n if hist_feat == True:\n # Apply color_hist() also with a color space option now\n hist_features = color_hist(feature_image, nbins=hist_bins)\n #6) Append features to list\n img_features.append(hist_features)\n #7) Compute HOG features if flag is set\n if hog_feat == True:\n if hog_channel == 'ALL':\n hog_features = []\n for channel in range(feature_image.shape[2]):\n hog_features.append(get_hog_features(feature_image[:,:,channel], \n orient, pix_per_cell, cell_per_block, \n vis=False, feature_vec=True))\n hog_features = np.ravel(hog_features)\n else:\n hog_features = get_hog_features(feature_image[:,:,hog_channel], orient, \n pix_per_cell, cell_per_block,\n vis=False, feature_vec=True)\n #8) Append features to list\n img_features.append(hog_features)\n #9) Return concatenated array of features\n return np.concatenate(img_features)\n\n\n# In[13]:\n\n\n# Define a function you will pass an image \n# and the list of windows to be searched (output of slide_windows())\ndef search_windows(img, windows, clf, scaler, color_space='RGB', \n spatial_size=(32, 32), hist_bins=32, orient=9, \n pix_per_cell=8, cell_per_block=2, hog_channel=0,\n spatial_feat=True, hist_feat=True, hog_feat=True):\n\n #1) Create an empty list to receive positive detection windows\n on_windows = []\n #2) Iterate over all windows in the list\n for window in windows:\n #3) Extract the test window from original image\n test_img = cv2.resize(img[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64)) \n #4) Extract features for that window using single_img_features()\n features = single_img_features(test_img, color_space=color_space, \n spatial_size=spatial_size, hist_bins=hist_bins, \n orient=orient, pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, \n hog_channel=hog_channel, spatial_feat=spatial_feat, \n hist_feat=hist_feat, hog_feat=hog_feat)\n #5) Scale extracted features to be fed to classifier\n test_features = scaler.transform(np.array(features).reshape(1, -1))\n #6) Predict using your classifier\n prediction = clf.predict(test_features)\n #7) If positive (prediction == 1) then save the window\n if prediction == 1:\n on_windows.append(window)\n #8) Return windows for positive detections\n return on_windows\n\n\n# ## Train and Test the Classifier\n\n# In[14]:\n\n\n### TODO: Tweak these parameters and see how the results change.\ncolor_space = 'YCrCb' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb\n#color_space = 'RGB'\norient = 10 # HOG orientations #9\npix_per_cell = 8 # HOG pixels per cell\ncell_per_block = 2 # HOG cells per block\nhog_channel = 'ALL' # Can be 0, 1, 2, or \"ALL\"\nspatial_size = (32, 32) # Spatial binning dimensions\nhist_bins = 64 # Number of histogram bins\nspatial_feat = True # Spatial features on or off\nhist_feat = True # Histogram features on or off\nhog_feat = True # HOG features on or off\n\ncar_features = extract_features(car_images, color_space=color_space, \n spatial_size=spatial_size, hist_bins=hist_bins, \n orient=orient, pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, \n hog_channel=hog_channel,\n spatial_feat=spatial_feat, hist_feat=hist_feat,\n hog_feat=hog_feat)\nnotcar_features = extract_features(noncar_images, color_space=color_space, \n spatial_size=spatial_size, hist_bins=hist_bins, \n orient=orient, pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, \n hog_channel=hog_channel,\n spatial_feat=spatial_feat, hist_feat=hist_feat,\n hog_feat=hog_feat)\n\n\n# Create an array stack of feature vectors\nX = np.vstack((car_features, notcar_features)).astype(np.float64)\n# Fit a per-column scaler\nX_scaler = StandardScaler().fit(X)\n# Apply the scaler to X\nscaled_X = X_scaler.transform(X)\n\n# Define the labels vector\ny = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))\n\n# Split up data into randomized training and test sets\nrand_state = np.random.randint(0, 100)\nX_train, X_test, y_train, y_test = train_test_split(scaled_X, y,\n test_size=0.2,\n random_state=rand_state)\n# Use a linear SVC\nsvc = LinearSVC()\n# Check the training time for the SVC\nt=time.time()\nsvc.fit(X_train, y_train)\nt2 = time.time()\nprint(round(t2-t, 2), 'seconds to train SVC.')\nprint('Using:', orient, 'orientations', pix_per_cell,\n 'pixels per cell and', cell_per_block, 'cells per block')\nprint('Feature vector length:', len(X_train[0]))\n\n\nprint('Test Accuracy of SVC: {0:.2f}%'.format(round(svc.score(X_test, y_test)*100, 4)))\nprint()\nt = time.time()\nprint(' Predictions:', svc.predict(X_test[0:10]))\nprint(' Labels:', y_test[0:10])\nt2 = time.time()\nprint()\nprint(round(t2-t, 5), 'seconds to predict 10 labels with SVC.')\n\n\n# ### Test on test_images\n\n# In[15]:\n\n\nfor i in range(1,7):\n \n fname = 'test_images\/test{}.jpg'.format(i)\n image = mpimg.imread(fname)\n draw_image = np.copy(image)\n\n image = image.astype(np.float32)\/255\n\n windows = slide_window(test_img,\n x_start_stop=[600, None],\n y_start_stop=[400, 656], #tune the parameters\n xy_window=(128,128),\n xy_overlap=(.7,.7))\n\n hot_windows = search_windows(image, windows, svc, X_scaler, color_space=color_space, \n spatial_size=spatial_size, hist_bins=hist_bins, \n orient=orient, pix_per_cell=pix_per_cell, \n cell_per_block=cell_per_block, \n hog_channel=hog_channel, spatial_feat=spatial_feat, \n hist_feat=hist_feat, hog_feat=hog_feat) \n\n \n window_img = draw_boxes(draw_image, hot_windows)\n \n f, (ax1, ax2) = plt.subplots(1, 2, figsize=(18,9))\n plt.tight_layout()\n ax1.imshow(draw_image)\n ax1.set_title('Original Image', fontsize=30)\n ax2.imshow(window_img)\n ax2.set_title('Detect Vehicles', fontsize=30)\n #plt.savefig('output_images\/windows.png', bbox_inches=\"tight\")\n\n\n# ### Build Heat Maps to Fix Multiple Detections & False Positives\n\n# In[16]:\n\n\ndef add_heat(heatmap, bbox_list):\n # Iterate through list of bboxes\n for box in bbox_list:\n # Add += 1 for all pixels inside each bbox\n # Assuming each \"box\" takes the form ((x1, y1), (x2, y2))\n heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1\n\n # Return updated heatmap\n return heatmap# Iterate through list of bboxes\n \ndef apply_threshold(heatmap, threshold):\n # Zero out pixels below the threshold\n heatmap[heatmap <= threshold] = 0\n # Return thresholded map\n return heatmap\n\ndef draw_labeled_bboxes(img, labels):\n # Iterate through all detected cars\n for car_number in range(1, labels[1]+1):\n # Find pixels with each car_number label value\n nonzero = (labels[0] == car_number).nonzero()\n # Identify x and y values of those pixels\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n # Define a bounding box based on min\/max x and y\n bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))\n # Draw the box on the image\n cv2.rectangle(img, bbox[0], bbox[1], (255,0,0), 8)\n # Return the image\n return img\n\ndef convert_color(img, conv='RGB2YCrCb'):\n if conv == 'RGB2YCrCb':\n return cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)\n if conv == 'BGR2YCrCb':\n return cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)\n if conv == 'RGB2LUV':\n return cv2.cvtColor(img, cv2.COLOR_RGB2LUV)\n\n\n# In[17]:\n\n\ndef find_cars(img, ystart, ystop, scale, svc, X_scaler,\n orient, pix_per_cell, cell_per_block,\n spatial_size, hist_bins):\n \n draw_img = np.copy(img)\n img = img.astype(np.float32)\/255\n \n img_tosearch = img[ystart:ystop,:,:]\n ctrans_tosearch = convert_color(img_tosearch, conv='RGB2YCrCb')\n if scale != 1:\n imshape = ctrans_tosearch.shape\n ctrans_tosearch = cv2.resize(ctrans_tosearch,\n (np.int(imshape[1]\/scale),\n np.int(imshape[0]\/scale)))\n \n ch1 = ctrans_tosearch[:,:,0]\n ch2 = ctrans_tosearch[:,:,1]\n ch3 = ctrans_tosearch[:,:,2]\n\n # Define blocks and steps as above\n nxblocks = (ch1.shape[1] \/\/ pix_per_cell) - cell_per_block + 1\n nyblocks = (ch1.shape[0] \/\/ pix_per_cell) - cell_per_block + 1 \n nfeat_per_block = orient*cell_per_block**2\n \n # 64 was the orginal sampling rate, with 8 cells and 8 pix per cell\n window = 64\n nblocks_per_window = (window \/\/ pix_per_cell) - cell_per_block + 1\n cells_per_step = 2 # Instead of overlap, define how many cells to step\n nxsteps = (nxblocks - nblocks_per_window) \/\/ cells_per_step\n nysteps = (nyblocks - nblocks_per_window) \/\/ cells_per_step\n \n # Compute individual channel HOG features for the entire image\n hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)\n hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)\n hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)\n \n bbox_list=[] \n for xb in range(nxsteps):\n for yb in range(nysteps):\n ypos = yb*cells_per_step\n xpos = xb*cells_per_step\n # Extract HOG for this patch\n hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() \n hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() \n hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() \n hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))\n\n xleft = xpos*pix_per_cell\n ytop = ypos*pix_per_cell\n\n # Extract the image patch\n subimg = cv2.resize(ctrans_tosearch[ytop:ytop+window, xleft:xleft+window], (64,64))\n\n # Get color features\n spatial_features = bin_spatial(subimg, size=spatial_size)\n hist_features = color_hist(subimg, nbins=hist_bins)\n\n # Scale features and make a prediction\n test_features = X_scaler.transform(np.hstack((spatial_features,\n hist_features,\n hog_features)).reshape(1, -1)) \n \n # Scale features and make a prediction \n test_prediction = svc.predict(test_features)\n \n if test_prediction == 1:\n xbox_left = np.int(xleft*scale)\n ytop_draw = np.int(ytop*scale)\n win_draw = np.int(window*scale)\n cv2.rectangle(draw_img,(xbox_left, ytop_draw+ystart),\n (xbox_left+win_draw,ytop_draw+win_draw+ystart),\n (255,0,0),8)\n bbox_list.append(((xbox_left, ytop_draw+ystart), \n (xbox_left+win_draw,ytop_draw+win_draw+ystart)))\n \n \n return bbox_list\n\n\n# ### Test on test_images\n\n# In[18]:\n\n\nfor i in range(1,7):\n \n fname = 'test_images\/test{}.jpg'.format(i)\n img = mpimg.imread(fname)\n \n\n orient=10\n pix_per_cell=8\n cell_per_block=2\n spatial_size=(32, 32)\n hist_bins=64\n \n bbox_list = []\n \n \n ystart = 380\n ystop = 550\n scale = 1.0\n bbox_list.append(find_cars(img, ystart, ystop, scale, svc, X_scaler,\n orient, pix_per_cell, cell_per_block,\n spatial_size, hist_bins))\n \n ystart = 400\n ystop = 600\n scale = 1.5\n bbox_list.append(find_cars(img, ystart, ystop, scale, svc, X_scaler,\n orient, pix_per_cell, cell_per_block,\n spatial_size, hist_bins))\n \n ystart = 400\n ystop = 656\n scale = 2.0\n bbox_list.append(find_cars(img, ystart, ystop, scale, svc, X_scaler,\n orient, pix_per_cell, cell_per_block,\n spatial_size, hist_bins))\n \n \n bbox_list = [item for sublist in bbox_list for item in sublist] \n \n #out_img = draw_boxes(img, bbox_list, random_color=True)\n out_img = draw_boxes(img, bbox_list)\n \n heat = np.zeros_like(img[:,:,0]).astype(np.float)\n heat = add_heat(heat, bbox_list)\n heat = apply_threshold(heat, 2) \n\n # Find final boxes from heatmap using label function\n labels = label(heat)\n new_img = draw_labeled_bboxes(np.copy(img), labels)\n\n f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(18,9))\n plt.tight_layout()\n ax1.imshow(out_img)\n ax1.set_title('Search Boxes', fontsize=30)\n ax2.imshow(heat, cmap='hot')\n ax2.set_title('Heat Map', fontsize=30)\n ax3.imshow(new_img)\n ax3.set_title('Bounding Boxes', fontsize=30)\n #plt.savefig('output_images\/heat_map1.png', bbox_inches=\"tight\")\n\n\n# ## Final Pipeline\n\n# In[19]:\n\n\ndef detect_vehicle(img):\n\n orient=10\n pix_per_cell=8\n cell_per_block=2\n spatial_size=(32, 32)\n hist_bins=64\n \n bbox_list = []\n \n ystart = 380\n ystop = 550\n scale = 1.0\n bbox_list.append(find_cars(img, ystart, ystop, scale, svc, X_scaler,\n orient, pix_per_cell, cell_per_block,\n spatial_size, hist_bins))\n \n ystart = 400\n ystop = 600\n scale = 1.5\n bbox_list.append(find_cars(img, ystart, ystop, scale, svc, X_scaler,\n orient, pix_per_cell, cell_per_block,\n spatial_size, hist_bins))\n \n ystart = 400\n ystop = 656\n scale = 2.0\n bbox_list.append(find_cars(img, ystart, ystop, scale, svc, X_scaler,\n orient, pix_per_cell, cell_per_block,\n spatial_size, hist_bins))\n bbox_list = [item for sublist in bbox_list for item in sublist] \n \n heat = np.zeros_like(img[:,:,0]).astype(np.float)\n heat = add_heat(heat, bbox_list)\n heat = apply_threshold(heat, 2) \n\n # Find final boxes from heatmap using label function\n labels = label(heat)\n new_img = draw_labeled_bboxes(np.copy(img), labels)\n \n return new_img\n\n\n# ### Test on Video Stream\n\n# In[20]:\n\n\nclip1 = VideoFileClip(\"test_video.mp4\")\ntest_output = \"test_output.mp4\"\ntest_clip = clip1.fl_image(detect_vehicle)\nget_ipython().run_line_magic('time', 'test_clip.write_videofile(test_output, audio=False)')\n\n\n# In[21]:\n\n\nclip2 = VideoFileClip(\"project_video.mp4\")\noutput = \"project_output.mp4\"\nclip = clip2.fl_image(detect_vehicle)\nget_ipython().run_line_magic('time', 'clip.write_videofile(output, audio=False)')\n\n\n# In[ ]:\n\n\n\n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_304","text":"tests\/test_series.py\nfrom sympy import (\n symbols, cos, sin, log, sqrt,\n Tuple, pi, Plane, S, I, im,\n Circle, Point,\n Piecewise, And, Eq, Interval, Abs, lambdify\n)\nfrom spb.series import (\n LineOver1DRangeSeries, Parametric2DLineSeries, Parametric3DLineSeries,\n SurfaceOver2DRangeSeries, ContourSeries, ParametricSurfaceSeries,\n InteractiveSeries,\n ImplicitSeries,\n Vector2DSeries, Vector3DSeries, SliceVector3DSeries,\n ComplexSeries, ComplexInteractiveSeries, ComplexPointSeries,\n ComplexPointInteractiveSeries,\n GeometrySeries, GeometryInteractiveSeries,\n PlaneSeries, PlaneInteractiveSeries,\n List2DSeries\n)\nfrom spb.functions import _process_piecewise\nimport numpy as np\nfrom pytest import warns, raises\n\ndef test_lin_log_scale():\n # Verify that data series create the correct spacing in the data.\n x, y, z = symbols(\"x, y, z\")\n\n s = LineOver1DRangeSeries(x, (x, 1, 10), adaptive=False, n=50, xscale=\"linear\")\n xx, _ = s.get_data()\n assert np.isclose(xx[1] - xx[0], xx[-1] - xx[-2])\n\n s = LineOver1DRangeSeries(x, (x, 1, 10), adaptive=False, n=50, xscale=\"log\")\n xx, _ = s.get_data()\n assert not np.isclose(xx[1] - xx[0], xx[-1] - xx[-2])\n\n s = Parametric2DLineSeries(\n cos(x), sin(x), (x, pi \/ 2, 1.5 * pi), adaptive=False, n=50, xscale=\"linear\"\n )\n _, _, param = s.get_data()\n assert np.isclose(param[1] - param[0], param[-1] - param[-2])\n\n s = Parametric2DLineSeries(\n cos(x), sin(x), (x, pi \/ 2, 1.5 * pi), adaptive=False, n=50, xscale=\"log\"\n )\n _, _, param = s.get_data()\n assert not np.isclose(param[1] - param[0], param[-1] - param[-2])\n\n s = Parametric3DLineSeries(\n cos(x), sin(x), x, (x, pi \/ 2, 1.5 * pi), adaptive=False, n=50, xscale=\"linear\"\n )\n _, _, _, param = s.get_data()\n assert np.isclose(param[1] - param[0], param[-1] - param[-2])\n\n s = Parametric3DLineSeries(\n cos(x), sin(x), x, (x, pi \/ 2, 1.5 * pi), adaptive=False, n=50, xscale=\"log\"\n )\n _, _, _, param = s.get_data()\n assert not np.isclose(param[1] - param[0], param[-1] - param[-2])\n\n s = SurfaceOver2DRangeSeries(\n cos(x ** 2 + y ** 2),\n (x, 1, 5),\n (y, 1, 5),\n n=10,\n xscale=\"linear\",\n yscale=\"linear\",\n )\n xx, yy, _ = s.get_data()\n assert np.isclose(xx[0, 1] - xx[0, 0], xx[0, -1] - xx[0, -2])\n assert np.isclose(yy[1, 0] - yy[0, 0], yy[-1, 0] - yy[-2, 0])\n\n s = SurfaceOver2DRangeSeries(\n cos(x ** 2 + y ** 2), (x, 1, 5), (y, 1, 5), n=10, xscale=\"log\", yscale=\"log\"\n )\n xx, yy, _ = s.get_data()\n assert not np.isclose(xx[0, 1] - xx[0, 0], xx[0, -1] - xx[0, -2])\n assert not np.isclose(yy[1, 0] - yy[0, 0], yy[-1, 0] - yy[-2, 0])\n\n s = ImplicitSeries(\n cos(x ** 2 + y ** 2) > 0,\n (x, 1, 5),\n (y, 1, 5),\n n=10,\n xscale=\"linear\",\n yscale=\"linear\",\n adaptive=False,\n )\n xx, yy, _, _, _ = s.get_data()\n assert np.isclose(xx[1] - xx[0], xx[-1] - xx[-2])\n assert np.isclose(yy[1] - yy[0], yy[-1] - yy[-2])\n\n s = ImplicitSeries(\n cos(x ** 2 + y ** 2) > 0,\n (x, 1, 5),\n (y, 1, 5),\n n=10,\n xscale=\"log\",\n yscale=\"log\",\n adaptive=False,\n )\n xx, yy, _, _, _ = s.get_data()\n assert not np.isclose(xx[1] - xx[0], xx[-1] - xx[-2])\n assert not np.isclose(yy[1] - yy[0], yy[-1] - yy[-2])\n\n s = InteractiveSeries([log(x)], [(x, 1e-05, 1e05)], n=10, xscale=\"linear\")\n xx, yy = s.get_data()\n assert np.isclose(xx[1] - xx[0], xx[-1] - xx[-2])\n\n s = InteractiveSeries([log(x)], [(x, 1e-05, 1e05)], n=10, xscale=\"log\")\n xx, yy = s.get_data()\n assert not np.isclose(xx[1] - xx[0], xx[-1] - xx[-2])\n\n s = ComplexSeries(\n cos(x),\n (x, 1e-05, 1e05),\n n=10,\n xscale=\"linear\",\n adaptive=False,\n )\n xx, yy, _ = s.get_data()\n assert np.isclose(xx[0, 1] - xx[0, 0], xx[0, -1] - xx[0, -2])\n\n s = ComplexSeries(\n cos(x),\n (x, 1e-05, 1e05),\n n=10,\n xscale=\"log\",\n adaptive=False,\n )\n xx, yy, _ = s.get_data()\n assert not np.isclose(xx[0, 1] - xx[0, 0], xx[0, -1] - xx[0, -2])\n\n s = Vector3DSeries(\n x,\n y,\n z,\n (x, 1, 1e05),\n (y, 1, 1e05),\n (z, 1, 1e05),\n xscale=\"linear\",\n yscale=\"linear\",\n zscale=\"linear\",\n )\n xx, yy, zz, _, _, _ = s.get_data()\n assert np.isclose(\n xx[0, :, 0][1] - xx[0, :, 0][0], xx[0, :, 0][-1] - xx[0, :, 0][-2]\n )\n assert np.isclose(\n yy[:, 0, 0][1] - yy[:, 0, 0][0], yy[:, 0, 0][-1] - yy[:, 0, 0][-2]\n )\n assert np.isclose(\n zz[0, 0, :][1] - zz[0, 0, :][0], zz[0, 0, :][-1] - zz[0, 0, :][-2]\n )\n\n s = Vector3DSeries(\n x,\n y,\n z,\n (x, 1, 1e05),\n (y, 1, 1e05),\n (z, 1, 1e05),\n xscale=\"log\",\n yscale=\"log\",\n zscale=\"log\",\n )\n xx, yy, zz, _, _, _ = s.get_data()\n assert not np.isclose(\n xx[0, :, 0][1] - xx[0, :, 0][0], xx[0, :, 0][-1] - xx[0, :, 0][-2]\n )\n assert not np.isclose(\n yy[:, 0, 0][1] - yy[:, 0, 0][0], yy[:, 0, 0][-1] - yy[:, 0, 0][-2]\n )\n assert not np.isclose(\n zz[0, 0, :][1] - zz[0, 0, :][0], zz[0, 0, :][-1] - zz[0, 0, :][-2]\n )\n\n\ndef test_data_shape():\n # Verify that the series produces the correct data shape when the input\n # expression is a number.\n u, x, y, z = symbols(\"u, x:z\")\n\n # scalar expression: it should return a numpy ones array\n s = LineOver1DRangeSeries(1, (x, -5, 5))\n xx, yy = s.get_data()\n assert len(xx) == len(yy)\n assert np.all(yy == 1)\n\n s = LineOver1DRangeSeries(1, (x, -5, 5), adaptive=False)\n xx, yy = s.get_data()\n assert len(xx) == len(yy)\n assert np.all(yy == 1)\n\n s = Parametric2DLineSeries(sin(x), 1, (x, 0, pi))\n xx, yy, param = s.get_data()\n assert (len(xx) == len(yy)) and (len(xx) == len(param))\n assert np.all(yy == 1)\n\n s = Parametric2DLineSeries(1, sin(x), (x, 0, pi))\n xx, yy, param = s.get_data()\n assert (len(xx) == len(yy)) and (len(xx) == len(param))\n assert np.all(xx == 1)\n\n s = Parametric2DLineSeries(sin(x), 1, (x, 0, pi), adaptive=False)\n xx, yy, param = s.get_data()\n assert (len(xx) == len(yy)) and (len(xx) == len(param))\n assert np.all(yy == 1)\n\n s = Parametric2DLineSeries(1, sin(x), (x, 0, pi), adaptive=False)\n xx, yy, param = s.get_data()\n assert (len(xx) == len(yy)) and (len(xx) == len(param))\n assert np.all(xx == 1)\n\n s = Parametric3DLineSeries(cos(x), sin(x), 1, (x, 0, 2 * pi))\n xx, yy, zz, param = s.get_data()\n assert (len(xx) == len(yy)) and (len(xx) == len(zz)) and (len(xx) == len(param))\n assert np.all(zz == 1)\n\n s = Parametric3DLineSeries(cos(x), 1, x, (x, 0, 2 * pi))\n xx, yy, zz, param = s.get_data()\n assert (len(xx) == len(yy)) and (len(xx) == len(zz)) and (len(xx) == len(param))\n assert np.all(yy == 1)\n\n s = Parametric3DLineSeries(1, sin(x), x, (x, 0, 2 * pi))\n xx, yy, zz, param = s.get_data()\n assert (len(xx) == len(yy)) and (len(xx) == len(zz)) and (len(xx) == len(param))\n assert np.all(xx == 1)\n\n s = SurfaceOver2DRangeSeries(1, (x, -2, 2), (y, -3, 3))\n xx, yy, zz = s.get_data()\n assert (xx.shape == yy.shape) and (xx.shape == zz.shape)\n assert np.all(zz == 1)\n\n s = ParametricSurfaceSeries(1, x, y, (x, 0, 1), (y, 0, 1))\n xx, yy, zz = s.get_data()\n assert (xx.shape == yy.shape) and (xx.shape == zz.shape)\n assert np.all(xx == 1)\n\n s = ParametricSurfaceSeries(1, 1, y, (x, 0, 1), (y, 0, 1))\n xx, yy, zz = s.get_data()\n assert (xx.shape == yy.shape) and (xx.shape == zz.shape)\n assert np.all(yy == 1)\n\n s = ParametricSurfaceSeries(x, 1, 1, (x, 0, 1), (y, 0, 1))\n xx, yy, zz = s.get_data()\n assert (xx.shape == yy.shape) and (xx.shape == zz.shape)\n assert np.all(zz == 1)\n\n s = ComplexSeries(1, (x, -5, 5), modules=None)\n xx, yy, zz = s.get_data()\n assert (xx.shape == yy.shape) and (xx.shape == zz.shape)\n assert np.all(zz == 1)\n\n s = ComplexSeries(1, (x, -5, 5), modules=\"mpmath\")\n xx, yy, zz = s.get_data()\n assert (xx.shape == yy.shape) and (xx.shape == zz.shape)\n assert np.all(zz == 1)\n\n s = ComplexSeries(1, (x, -5 - 2 * I, 5 + 2 * I), domain_coloring=True,\n modules=None)\n rr, ii, mag, arg, colors, _ = s.get_data()\n assert (rr.shape == ii.shape) and (rr.shape[:2] == colors.shape[:2])\n assert (rr.shape == mag.shape) and (rr.shape == arg.shape)\n\n s = ComplexSeries(1, (x, -5 - 2 * I, 5 + 2 * I), domain_coloring=True,\n modules=\"mpmath\")\n rr, ii, mag, arg, colors, _ = s.get_data()\n assert (rr.shape == ii.shape) and (rr.shape[:2] == colors.shape[:2])\n assert (rr.shape == mag.shape) and (rr.shape == arg.shape)\n\n # Corresponds to LineOver1DRangeSeries\n s = InteractiveSeries([S.One], [Tuple(x, -5, 5)])\n s.update_data(dict())\n xx, yy = s.get_data()\n assert len(xx) == len(yy)\n assert np.all(yy == 1)\n\n # Corresponds to Parametric2DLineSeries\n s = InteractiveSeries([S.One, sin(x)], [Tuple(x, 0, pi)])\n s.update_data(dict())\n xx, yy, param = s.get_data()\n assert (len(xx) == len(yy)) and (len(xx) == len(param))\n assert np.all(xx == 1)\n\n s = InteractiveSeries([sin(x), S.One], [Tuple(x, 0, pi)])\n s.update_data(dict())\n xx, yy, param = s.get_data()\n assert (len(xx) == len(yy)) and (len(xx) == len(param))\n assert np.all(yy == 1)\n\n # Corresponds to Parametric3DLineSeries\n s = InteractiveSeries([cos(x), sin(x), S.One], [(x, 0, 2 * pi)])\n s.update_data(dict())\n xx, yy, zz, param = s.get_data()\n assert (len(xx) == len(yy)) and (len(xx) == len(param)) and (len(xx) == len(zz))\n assert np.all(zz == 1)\n\n s = InteractiveSeries([S.One, sin(x), x], [(x, 0, 2 * pi)])\n s.update_data(dict())\n xx, yy, zz, param = s.get_data()\n assert (len(xx) == len(yy)) and (len(xx) == len(param)) and (len(xx) == len(zz))\n assert np.all(xx == 1)\n\n s = InteractiveSeries([cos(x), S.One, x], [(x, 0, 2 * pi)])\n s.update_data(dict())\n xx, yy, zz, param = s.get_data()\n assert (len(xx) == len(yy)) and (len(xx) == len(param)) and (len(xx) == len(zz))\n assert np.all(yy == 1)\n\n # Corresponds to SurfaceOver2DRangeSeries\n s = InteractiveSeries([S.One], [(x, -2, 2), (y, -3, 3)])\n s.update_data(dict())\n xx, yy, zz = s.get_data()\n assert (xx.shape == yy.shape) and (xx.shape == zz.shape)\n assert np.all(zz == 1)\n\n # Corresponds to ParametricSurfaceSeries\n s = InteractiveSeries([S.One, x, y], [(x, 0, 1), (y, 0, 1)])\n s.update_data(dict())\n xx, yy, zz = s.get_data()\n assert (xx.shape == yy.shape) and (xx.shape == zz.shape)\n assert np.all(xx == 1)\n\n s = InteractiveSeries([x, S.One, y], [(x, 0, 1), (y, 0, 1)])\n s.update_data(dict())\n xx, yy, zz = s.get_data()\n assert (xx.shape == yy.shape) and (xx.shape == zz.shape)\n assert np.all(yy == 1)\n\n s = InteractiveSeries([x, y, S.One], [(x, 0, 1), (y, 0, 1)])\n s.update_data(dict())\n xx, yy, zz = s.get_data()\n assert (xx.shape == yy.shape) and (xx.shape == zz.shape)\n assert np.all(zz == 1)\n\n s = ComplexInteractiveSeries(S.One, (x, -5, 5), real=True, imag=False,\n modules=None)\n s.update_data(dict())\n xx, yy, zz = s.get_data()\n assert (xx.shape == yy.shape) and (xx.shape == zz.shape)\n\n\ndef test_interactive():\n u, x, y, z = symbols(\"u, x:z\")\n\n # verify that InteractiveSeries produces the same numerical data as their\n # corresponding non-interactive series.\n def do_test(data1, data2):\n assert len(data1) == len(data2)\n for d1, d2 in zip(data1, data2):\n assert np.allclose(d1, d2)\n\n s1 = InteractiveSeries([u * cos(x)], [(x, -5, 5)], \"\", params={u: 1}, n1=50)\n s2 = LineOver1DRangeSeries(cos(x), (x, -5, 5), \"\", adaptive=False, n=50)\n do_test(s1.get_data(), s2.get_data())\n\n s1 = InteractiveSeries(\n [u * cos(x), u * sin(x)], [(x, -5, 5)], \"\", params={u: 1}, n1=50\n )\n s2 = Parametric2DLineSeries(cos(x), sin(x), (x, -5, 5), \"\", adaptive=False, n=50)\n do_test(s1.get_data(), s2.get_data())\n\n s1 = InteractiveSeries(\n [u * cos(x), u * sin(x), u * x], [(x, -5, 5)], \"\", params={u: 1}, n1=50\n )\n s2 = Parametric3DLineSeries(cos(x), sin(x), x, (x, -5, 5), \"\", adaptive=False, n=50)\n do_test(s1.get_data(), s2.get_data())\n\n s1 = InteractiveSeries(\n [cos(x ** 2 + y ** 2)],\n [(x, -3, 3), (y, -3, 3)],\n \"\",\n params={u: 1},\n n1=50,\n n2=50,\n )\n s2 = SurfaceOver2DRangeSeries(\n cos(x ** 2 + y ** 2), (x, -3, 3), (y, -3, 3), \"\", adaptive=False, n1=50, n2=50\n )\n do_test(s1.get_data(), s2.get_data())\n\n s1 = InteractiveSeries(\n [cos(x + y), sin(x + y), x - y],\n [(x, -3, 3), (y, -3, 3)],\n \"\",\n params={u: 1},\n n1=50,\n n2=50,\n )\n s2 = ParametricSurfaceSeries(\n cos(x + y),\n sin(x + y),\n x - y,\n (x, -3, 3),\n (y, -3, 3),\n \"\",\n adaptive=False,\n n1=50,\n n2=50,\n )\n do_test(s1.get_data(), s2.get_data())\n\n s1 = InteractiveSeries(\n [-u * y, u * x], [(x, -3, 3), (y, -2, 2)], \"\", params={u: 1}, n1=15, n2=15\n )\n s2 = Vector2DSeries(-y, x, (x, -3, 3), (y, -2, 2), \"\", n1=15, n2=15)\n do_test(s1.get_data(), s2.get_data())\n\n s1 = InteractiveSeries(\n [u * z, -u * y, u * x],\n [(x, -3, 3), (y, -2, 2), (z, -1, 1)],\n \"\",\n params={u: 1},\n n1=15,\n n2=15,\n n3=15,\n )\n s2 = Vector3DSeries(\n z, -y, x, (x, -3, 3), (y, -2, 2), (z, -1, 1), \"\", n1=15, n2=15, n3=15\n )\n do_test(s1.get_data(), s2.get_data())\n\n s1 = InteractiveSeries(\n [u * z, -u * y, u * x],\n [(x, -3, 3), (y, -2, 2), (z, -1, 1)],\n \"\",\n params={u: 1},\n slice=Plane((-1, 0, 0), (1, 0, 0)),\n n1=15,\n n2=15,\n n3=15,\n )\n s2 = SliceVector3DSeries(\n Plane((-1, 0, 0), (1, 0, 0)),\n z,\n -y,\n x,\n (x, -3, 3),\n (y, -2, 2),\n (z, -1, 1),\n \"\",\n n1=15,\n n2=15,\n n3=15,\n )\n do_test(s1.get_data(), s2.get_data())\n\n ### Test InteractiveSeries and ComplexInteractiveSeries with complex\n ### functions\n\n # complex function evaluated over a real line with numpy\n s1 = InteractiveSeries(\n [(z ** 2 + 1) \/ (z ** 2 - 1)], [(z, -3, 3)], \"\", n1=50,\n is_complex=True, modules=None)\n s2 = LineOver1DRangeSeries(\n (z ** 2 + 1) \/ (z ** 2 - 1), (z, -3, 3), \"\", adaptive=False,\n n=50, is_complex=True, modules=None)\n do_test(s1.get_data(), s2.get_data())\n\n # complex function evaluated over a real line with mpmath\n s1 = InteractiveSeries(\n [(z ** 2 + 1) \/ (z ** 2 - 1)], [(z, -3, 3)], \"\",\n n1=11, is_complex=True, modules=\"mpmath\")\n s2 = LineOver1DRangeSeries(\n (z ** 2 + 1) \/ (z ** 2 - 1), (z, -3, 3), \"\", adaptive=False,\n n=11, is_complex=True, modules=\"mpmath\")\n do_test(s1.get_data(), s2.get_data())\n\n # abs\/arg values of complex function evaluated over a real line wit numpy\n expr = (z ** 2 + 1) \/ (z ** 2 - 1)\n s1 = InteractiveSeries(\n [expr], [(z, -3, 3)], \"\",\n n1=50, is_complex=True, absarg=expr, modules=None)\n s2 = LineOver1DRangeSeries(\n expr, (z, -3, 3), \"\", adaptive=False,\n n=50, is_complex=True, absarg=expr, modules=None)\n do_test(s1.get_data(), s2.get_data())\n\n # abs\/arg values of complex function evaluated over a real line wit mpmath\n expr = (z ** 2 + 1) \/ (z ** 2 - 1)\n s1 = InteractiveSeries(\n [expr], [(z, -3, 3)], \"\",\n n1=50, is_complex=True, absarg=expr, modules=\"mpmath\")\n s2 = LineOver1DRangeSeries(\n expr, (z, -3, 3), \"\", adaptive=False,\n n=50, is_complex=True, absarg=expr, modules=\"mpmath\")\n do_test(s1.get_data(), s2.get_data())\n\n # domain coloring or 3D\n s1 = ComplexInteractiveSeries(\n u * (z ** 2 + 1) \/ (z ** 2 - 1), (z, -3 - 4 * I, 3 + 4 * I), \"\",\n n1=20, n2=20, domain_coloring=True, params = {u: 1}, modules=None\n )\n s2 = ComplexSeries(\n (z ** 2 + 1) \/ (z ** 2 - 1), (z, -3 - 4 * I, 3 + 4 * I), \"\",\n n1=20, n2=20, domain_coloring=True, modules=None\n )\n do_test(s1.get_data(), s2.get_data())\n\ndef test_complex_discretization():\n x, y, z = symbols(\"x:z\")\n\n # test complex discretization for LineOver1DRangeSeries and\n # SurfaceOver2DRangeSeries and InteractiveSeries\n\n # is_complex=True should produce (intermediate) complex results, which are\n # later converted to floats. is_complex=False should produce (intermediate)\n # float results.\n s1 = LineOver1DRangeSeries(sqrt(x), (x, -10, 10), \"s1\",\n adaptive=False, is_complex=False, modules=None, n=10)\n s2 = LineOver1DRangeSeries(sqrt(x), (x, -10, 10), \"s1\",\n adaptive=False, is_complex=True, modules=None, n=10)\n d1 = s1._uniform_sampling(lambdify([s1.var], s1.expr, s1.modules))\n d2 = s2._uniform_sampling(lambdify([s2.var], s2.expr, s2.modules))\n assert all(isinstance(t, float) for t in d1[0])\n assert all(isinstance(t, float) for t in d1[1])\n assert all(isinstance(t, complex) for t in d2[0])\n assert all(isinstance(t, complex) for t in d2[1])\n d3 = s1.get_data()\n with warns(np.ComplexWarning):\n d4 = s2.get_data()\n\n # Mpmath and Numpy produces different results\n s1 = LineOver1DRangeSeries(im(sqrt(-x)), (x, -10, 10), \"s1\",\n adaptive=False, is_complex=True, modules=None, n=10)\n s2 = LineOver1DRangeSeries(im(sqrt(-x)), (x, -10, 10), \"s1\",\n adaptive=False, is_complex=True, modules=\"mpmath\", n=10)\n d1, d2 = s1.get_data(), s2.get_data()\n assert (d1[-1][-1] < 0) and (d2[-1][-1] > 0)\n assert np.array_equal(d1[-1], -d2[-1])\n\n def do_test(data1, data2, compare=True):\n assert len(data1) == len(data2)\n for d1, d2 in zip(data1, data2):\n assert (d1.dtype == np.float64) and (d2.dtype == np.float64)\n if compare:\n assert np.array_equal(d1, d2)\n\n # using Numpy and a real discretization will produce NaN value when x<0.\n with warns(RuntimeWarning, match=\"invalid value encountered in sqrt\"):\n s1 = LineOver1DRangeSeries(sqrt(x), (x, -10, 10), \"s1\",\n adaptive=False, is_complex=False, modules=None, n=20)\n s1.get_data()\n\n # using Numpy or Mpmath with complex discretization won't raise warnings.\n # Results between Numpy as Mpmath shoudl be really close\n s2 = LineOver1DRangeSeries(sqrt(x), (x, -10, 10), \"s2\",\n adaptive=False, is_complex=True, modules=None, n=20)\n s3 = LineOver1DRangeSeries(sqrt(x), (x, -10, 10), \"s3\",\n adaptive=False, is_complex=True, modules=\"mpmath\", n=20)\n do_test(s2.get_data(), s3.get_data())\n\n\n # using Numpy and a real discretization will produce NaN value when x<0.\n with warns(RuntimeWarning, match=\"invalid value encountered in sqrt\"):\n s4 = LineOver1DRangeSeries(sqrt(x), (x, -10, 10), \"s4\",\n adaptive=True, is_complex=False, modules=None)\n s4.get_data()\n\n # using Numpy or Mpmath with complex discretization won't raise warnings.\n # Results between Numpy as Mpmath shoudl be really close.\n # NOTE: changed the function because the adaptive algorithm works by\n # checking the collinearity between three points (the x, y coordinates must\n # be real). Instead, with \"mpmath\" the y coordinate is a complex number.\n s5 = LineOver1DRangeSeries(im(sqrt(x)), (x, -10, 10), \"s5\",\n adaptive=True, is_complex=True, modules=None)\n s6 = LineOver1DRangeSeries(im(sqrt(x)), (x, -10, 10), \"s6\",\n adaptive=True, is_complex=True, modules=\"mpmath\")\n # can't directly compare the results because of the adaptive sampling\n do_test(s5.get_data(), s6.get_data(), False)\n\n\n # Mpmath and Numpy produces different results\n s1 = SurfaceOver2DRangeSeries(im(sqrt(-x)), (x, -5, 5), (y, -5, 5),\n is_complex=False, modules=None)\n s2 = SurfaceOver2DRangeSeries(im(sqrt(-x)), (x, -5, 5), (y, -5, 5),\n is_complex=True, modules=\"mpmath\")\n d1, d2 = s1.get_data(), s2.get_data()\n assert (d1[-1][-1, -1] < 0) and (d2[-1][-1, -1] > 0)\n assert np.all(np.abs(d1[-1]) - np.abs(d2[-1])) < 1e-08\n\n # Interactive series produces the same numerical data as LineOver1DRangeSeries.\n # NOTE: InteractiveSeries doesn't support adaptive algorithm!\n s1 = LineOver1DRangeSeries(im(sqrt(-x)), (x, -10, 10), \"s1\",\n adaptive=False, is_complex=True, modules=None, n=10)\n s2 = InteractiveSeries([im(sqrt(-x))], [(x, -10, 10)], \"s2\",\n is_complex=True, modules=None, n1=10)\n s3 = InteractiveSeries([im(sqrt(-x))], [(x, -10, 10)], \"s3\",\n is_complex=True, modules=\"mpmath\", n1=10)\n d1, d2, d3 = s1.get_data(), s2.get_data(), s3.get_data()\n do_test(d1, d2)\n assert np.all(np.abs(d1[-1]) - np.abs(d3[-1])) < 1e-08\n\n expr = sqrt(-x)\n s1 = LineOver1DRangeSeries(expr, (x, -10, 10), \"s1\",\n adaptive=False, is_complex=True, modules=None, n=10, absarg=expr)\n s2 = InteractiveSeries([expr], [(x, -10, 10)], \"s2\",\n is_complex=True, modules=None, n1=10, absarg=expr)\n s3 = InteractiveSeries([expr], [(x, -10, 10)], \"s3\",\n is_complex=True, modules=\"mpmath\", n1=10, absarg=expr)\n d1, d2, d3 = s1.get_data(), s2.get_data(), s3.get_data()\n do_test(d1, d2)\n assert np.all(np.abs(d1[-1]) - np.abs(d3[-1])) < 1e-08\n\n # Interactive series produces the same numerical data as SurfaceOver2DRangeSeries\n s1 = SurfaceOver2DRangeSeries(im(sqrt(-x)), (x, -3, 3), (y, -3, 3),\n is_complex=True, modules=\"mpmath\", n1=20, n2=20)\n s2 = InteractiveSeries([im(sqrt(-x))], [(x, -3, 3), (y, -3, 3)], \"s2\",\n is_complex=True, modules=None, n1=20, n2=20)\n s3 = InteractiveSeries([im(sqrt(-x))], [(x, -3, 3), (y, -3, 3)], \"s3\",\n is_complex=True, modules=\"mpmath\", n1=20, n2=20)\n do_test(d1, d2)\n assert np.all(np.abs(d1[-1]) - np.abs(d3[-1])) < 1e-08\n\ndef test_str():\n x, y, z = symbols(\"x:z\")\n\n s = LineOver1DRangeSeries(cos(x), (x, -4, 3), \"test\")\n assert str(s) == \"cartesian line: cos(x) for x over (-4.0, 3.0)\"\n s = Parametric2DLineSeries(cos(x), sin(x), (x, -4, 3), \"test\")\n assert str(s) == \"parametric cartesian line: (cos(x), sin(x)) for x over (-4.0, 3.0)\"\n s = Parametric3DLineSeries(cos(x), sin(x), x, (x, -4, 3), \"test\")\n assert str(s) == \"3D parametric cartesian line: (cos(x), sin(x), x) for x over (-4.0, 3.0)\"\n s = SurfaceOver2DRangeSeries(cos(x * y), (x, -4, 3), (y, -2, 5), \"test\")\n assert str(s) == \"cartesian surface: cos(x*y) for x over (-4.0, 3.0) and y over (-2.0, 5.0)\"\n s = ContourSeries(cos(x * y), (x, -4, 3), (y, -2, 5), \"test\")\n assert str(s) == \"contour: cos(x*y) for x over (-4.0, 3.0) and y over (-2.0, 5.0)\"\n s = ParametricSurfaceSeries(cos(x * y), sin(x * y), x * y,\n (x, -4, 3), (y, -2, 5), \"test\")\n assert str(s) == \"parametric cartesian surface: (cos(x*y), sin(x*y), x*y) for x over (-4.0, 3.0) and y over (-2.0, 5.0)\"\n s = ImplicitSeries(x < y, (x, -5, 4), (y, -3, 2), \"test\")\n assert str(s) == \"Implicit expression: x < y for x over (-5.0, 4.0) and y over (-3.0, 2.0)\"\n s = ComplexPointSeries(2 + 3 * I, \"test\")\n assert str(s) == \"complex point 2 + 3*I\"\n s = ComplexPointSeries([2 + 3 * I, 4 * I], \"test\")\n assert str(s) == \"complex points (2 + 3*I, 4*I)\"\n s = ComplexPointInteractiveSeries([2 + 3 * I], \"test\")\n assert str(s) == \"complex interactive points: (2 + 3*I,)\"\n s = ComplexPointInteractiveSeries([2 + 3 * I, 4 * I], \"test\")\n assert str(s) == \"complex interactive points: (2 + 3*I, 4*I)\"\n s = ComplexSeries(sqrt(z), (z, -2-3j, 4+5j), \"test\", threed=True)\n assert str(s) == \"cartesian surface: sqrt(z) for re(z) over (-2.0, 4.0) and im(z) over (-3.0, 5.0)\"\n s = ComplexSeries(sqrt(z), (z, -2-3j, 4+5j), \"test\", domain_coloring=True)\n assert str(s) == \"domain coloring: sqrt(z) for re(z) over (-2.0, 4.0) and im(z) over (-3.0, 5.0)\"\n s = ComplexInteractiveSeries(x * sqrt(z), (z, -2-3j, 4+5j), \"test\",\n threed=True, params={x: 1})\n assert str(s) == \"interactive cartesian surface for expression: x*sqrt(z) over (z, (-2-3j), (4+5j)) and parameters [x, z]\"\n s = ComplexInteractiveSeries(x * sqrt(z), (z, -2-3j, 4+5j), \"test\",\n domain_coloring=True, params={x: 1})\n assert str(s) == \"interactive domain coloring for expression: x*sqrt(z) over (z, (-2-3j), (4+5j)) and parameters [x, z]\"\n s = Vector2DSeries(-y, x, (x, -5, 4), (y, -3, 2), \"test\")\n assert str(s) == \"2D vector series: [-y, x] over (x, -5.0, 4.0), (y, -3.0, 2.0)\"\n s = Vector3DSeries(z, y, x, (x, -5, 4), (y, -3, 2), (z, -6, 7), \"test\")\n assert str(s) == \"3D vector series: [z, y, x] over (x, -5.0, 4.0), (y, -3.0, 2.0), (z, -6.0, 7.0)\"\n s = SliceVector3DSeries(Plane((0, 0, 0), (1, 0, 0)), z, y, x,\n (x, -5, 4), (y, -3, 2), (z, -6, 7), \"test\")\n assert str(s) == \"sliced 3D vector series: [z, y, x] over (x, -5.0, 4.0), (y, -3.0, 2.0), (z, -6.0, 7.0) at Plane(Point3D(0, 0, 0), (1, 0, 0))\"\n s = PlaneSeries(Plane((0, 0, 0), (1, 1, 1)),\n (x, -5, 4), (y, -3, 2), (z, -6, 7), \"test\")\n assert str(s) == \"plane series of Plane(Point3D(0, 0, 0), (1, 1, 1)) over (x, -5, 4), (y, -3, 2), (z, -6, 7)\"\n s = PlaneInteractiveSeries([Plane((z, 0, 0), (1, 1, 1))],\n [(x, -5, 4), (y, -3, 2), (z, -6, 7)], \"test\", params={z: 1})\n assert str(s) == \"interactive plane series of Plane(Point3D(z, 0, 0), (1, 1, 1)) over (x, -5, 4), (y, -3, 2), (z, -6, 7) with parameters [z]\"\n s = GeometrySeries(Circle(Point(0, 0), 5))\n assert str(s) == \"geometry entity: Circle(Point2D(0, 0), 5)\"\n s = GeometryInteractiveSeries([Circle(Point(x, 0), 5)], [], params={x: 1})\n assert str(s) == \"interactive geometry entity: Circle(Point2D(x, 0), 5) with parameters [x]\"\n\n # interactive series\n s = InteractiveSeries([z * cos(x)], [(x, -4, 3)], \"test\", params={z: 1})\n assert str(s) == \"interactive expression: z*cos(x) with ranges (x, -4.0, 3.0) and parameters [x, z]\"\n s = InteractiveSeries([z * cos(x * y)], [(x, -4, 3), (y, -2, 1)], \"test\",\n params={z: 1})\n assert str(s) == \"interactive expression: z*cos(x*y) with ranges (x, -4.0, 3.0), (y, -2.0, 1.0) and parameters [x, y, z]\"\n s = InteractiveSeries([z * cos(x * y), sin(x * y), x*y],\n [(x, -4, 3), (y, -2, 1)], \"test\", params={z: 1})\n assert str(s) == \"interactive expression: (z*cos(x*y), sin(x*y), x*y) with ranges (x, -4.0, 3.0), (y, -2.0, 1.0) and parameters [x, y, z]\"\n\ndef test_piecewise():\n x = symbols(\"x\")\n\n # Test that univariate Piecewise objects are processed in such a way to\n # create multiple series, each one with the correct range\n\n f = Piecewise(\n (-1, x < -1),\n (x, And(-1 <= x, x < 0)),\n (x**2, And(0 <= x, x < 1)),\n (x**3, x >= 1)\n )\n s = _process_piecewise(f, (x, -5, 5), \"A\")\n assert len(s) == 4\n assert all(isinstance(t, LineOver1DRangeSeries) for t in s)\n assert (s[0].expr == -1) and (s[0].start == -5) and (s[0].end == -1)\n assert (s[1].expr == x) and (s[1].start == -1) and (s[1].end == 0)\n assert (s[2].expr == x**2) and (s[2].start == 0) and (s[2].end == 1)\n assert (s[3].expr == x**3) and (s[3].start == 1) and (s[3].end == 5)\n labels = [\"A\" + str(i + 1) for i in range(5)]\n assert all(t.label == l for t, l in zip(s, labels))\n\n f = Piecewise(\n (1, x < -5),\n (x, Eq(x, 0)),\n (x**2, Eq(x, 2)),\n (x**3, (x > 0) & (x < 2)),\n (x**4, True)\n )\n s = _process_piecewise(f, (x, -10, 10), \"B\")\n assert len(s) == 6\n assert all(isinstance(t, LineOver1DRangeSeries) for t in [s[0], s[3], s[4], s[5]])\n assert all(isinstance(t, List2DSeries) for t in [s[1], s[2]])\n assert (s[0].expr == 1) and (s[0].start == -10) and (s[0].end == -5)\n assert (np.allclose(s[1].list_x, np.array([0.])) and\n np.allclose(s[1].list_y, np.array([0.])))\n assert (np.allclose(s[2].list_x, np.array([2.])) and\n np.allclose(s[2].list_y, np.array([4.])))\n assert (s[3].expr == x**3) and (s[3].start == 0) and (s[3].end == 2)\n assert (s[4].expr == x**4) and (s[4].start == -5) and (s[4].end == 0)\n assert (s[5].expr == x**4) and (s[5].start == 2) and (s[5].end == 10)\n labels = [\"B\" + str(i + 1) for i in range(5)] + [\"B5\"]\n assert all(t.label == l for t, l in zip(s, labels))\n\n f = Piecewise((x, Interval(0, 1).contains(x)), (0, True))\n s = _process_piecewise(f, (x, -10, 10), \"C\")\n assert len(s) == 3\n assert all(isinstance(t, LineOver1DRangeSeries) for t in s)\n assert (s[0].expr == x) and (s[0].start == 0) and (s[0].end == 1)\n assert (s[1].expr == 0) and (s[1].start == -10) and (s[1].end == 0)\n assert (s[2].expr == 0) and (s[2].start == 1) and (s[2].end == 10)\n labels = [\"C1\", \"C2\", \"C2\"]\n assert all(t.label == l for t, l in zip(s, labels))\n\n f = Piecewise((x, Interval(0, 1, False, True).contains(x)), (0, True))\n s = _process_piecewise(f, (x, -10, 10), \"D\")\n assert len(s) == 3\n assert all(isinstance(t, LineOver1DRangeSeries) for t in s)\n assert (s[0].expr == x) and (s[0].start == 0) and (s[0].end == 1)\n assert (s[1].expr == 0) and (s[1].start == -10) and (s[1].end == 0)\n assert (s[2].expr == 0) and (s[2].start == 1) and (s[2].end == 10)\n labels = [\"D1\", \"D2\", \"D2\"]\n assert all(t.label == l for t, l in zip(s, labels))\n\n f = Piecewise((x, x < 1), (x**2, -1 <= x), (x, 3 < x))\n s = _process_piecewise(f, (x, -10, 10), \"E\")\n assert len(s) == 2\n assert all(isinstance(t, LineOver1DRangeSeries) for t in s)\n assert (s[0].expr == x) and (s[0].start == -10) and (s[0].end == 1)\n assert (s[1].expr == x**2) and (s[1].start == 1) and (s[1].end == 10)\n\n # NotImplementedError: as_set is not implemented for relationals with\n # periodic solutions\n p1 = Piecewise((cos(x), x < 0), (0, True))\n f = Piecewise((0, Eq(p1, 0)), (p1 \/ Abs(p1), True))\n raises(NotImplementedError, lambda: _process_piecewise(f, (x, -10, 10), \"F\"))\n\n f = Piecewise((1 - x, (x >= 0) & (x < 1)), (0, True))\n s = _process_piecewise(f, (x, -10, 10), \"test\")\n assert len(s) == 3\n assert all(isinstance(t, LineOver1DRangeSeries) for t in s)\n assert (s[0].expr == 1 - x) and (s[0].start == 0) and (s[0].end == 1)\n assert (s[1].expr == 0) and (s[1].start == -10) and (s[1].end == 0)\n assert (s[2].expr == 0) and (s[2].start == 1) and (s[2].end == 10)\n\n # The range is smaller than the function \"domain\"\n f = Piecewise(\n (1, x < -5),\n (x, Eq(x, 0)),\n (x**2, Eq(x, 2)),\n (x**3, (x > 0) & (x < 2)),\n (x**4, True)\n )\n s = _process_piecewise(f, (x, -3, 3), \"A\")\n labels = [\"A2\", \"A3\", \"A4\", \"A5\", \"A5\"]\n assert all(t.label == l for t, l in zip(s, labels))\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_305","text":"import cPickle\nimport numpy as np\nfrom Bio.SVDSuperimposer import SVDSuperimposer\nfrom scipy.linalg import sqrtm, inv\n\n\ndef sym(w):\n return w.dot(inv(sqrtm(w.T.dot(w))))\n\ndef params2cc(parameters, return_steps=False):\n steps = []\n\n h2_tran = parameters[-3:]\n\n h2_rot = np.reshape(parameters[-12:-3], (3,3))\n helical_params = np.reshape(parameters[:-12], (2,3))\n\n h2_rot = sym(h2_rot)\n\n h1_ref = du_mean_helix\n if return_steps:\n step_coords = np.append(h1_ref,h1_ref,axis=0)\n steps.append(step_coords)\n\n h1_dev, h2_dev = du_pca_helix.inverse_transform(helical_params)\n h1_dev = np.reshape(h1_dev, (h1_ref.shape[0],h1_ref.shape[1]))\n h2_dev = np.reshape(h2_dev, (h1_ref.shape[0],h1_ref.shape[1]))\n\n h1 = h1_ref+h1_dev\n h2 = h1_ref+h2_dev\n\n if return_steps:\n step_coords = np.append(h1,h2,axis=0)\n steps.append(step_coords)\n\n h2_new = np.dot(h2, h2_rot)\n if return_steps:\n step_coords = np.append(h1,h2_new,axis=0)\n steps.append(step_coords)\n\n h2_new = h2_new + h2_tran\n if return_steps:\n step_coords = np.append(h1,h2_new,axis=0)\n steps.append(step_coords)\n\n orig_coords = np.append(h1,h2_new,axis=0)\n\n if return_steps:\n return steps\n\n return orig_coords\n\n\ndef cc2params(coords):\n\n sup=SVDSuperimposer()\n\n n_atoms_mono = int(coords.shape[0]\/2)\n\n h1 = coords[:n_atoms_mono]\n h2 = coords[n_atoms_mono:]\n h1_ref = du_mean_helix\n\n # align h1 and h2 with mean angles to the ref helix\n\n sup.set(h1_ref, h1)\n sup.run()\n h1_aligned_ref = sup.get_transformed()\n\n sup.set(h1_ref, h2)\n sup.run()\n h2_aligned_ref = sup.get_transformed()\n\n # estimate parameters from pca\n # center to h1_ref\n h1_aligned_ref = h1_aligned_ref - h1_ref\n h2_aligned_ref = h2_aligned_ref - h1_ref\n # unwrap\n h1_aligned_ref = np.reshape(h1_aligned_ref, (h1_aligned_ref.shape[0]*h1_aligned_ref.shape[1]))\n h2_aligned_ref = np.reshape(h2_aligned_ref, (h2_aligned_ref.shape[0]*h2_aligned_ref.shape[1]))\n # get params\n helical_params =du_pca_helix.transform([h1_aligned_ref, h2_aligned_ref])\n h1_aligned_ref, h2_aligned_ref = du_pca_helix.inverse_transform(helical_params)\n h1_aligned_ref = np.reshape(h1_aligned_ref, (h1_ref.shape[0],h1_ref.shape[1]))\n h2_aligned_ref = np.reshape(h2_aligned_ref, (h1_ref.shape[0],h1_ref.shape[1]))\n\n # construct ideal helices\n\n h1_transformed = h1_ref+h1_aligned_ref\n h2_transformed = h1_ref+h2_aligned_ref\n\n # adjust hi_helix1 and hi_helix2 by the parameters\n\n # align h1 to h1 ideal and transform all coords\n\n sup.set(h1, h1_transformed)\n sup.run()\n h1_ideal = sup.get_transformed()\n\n # align h2_ideal to h2\n\n sup.set(h2, h2_transformed)\n sup.run()\n h2_ideal = sup.get_transformed()\n\n coords_ideal = np.append(h1_ideal,h2_ideal,axis=0)\n\n # align ideal coords to the ref helix\n sup.set(h1_transformed,h1_ideal)\n sup.run()\n (rot_ref, tran_ref) = sup.get_rotran()\n\n coords_ideal = np.dot(coords_ideal,rot_ref) + tran_ref\n\n h1_new = coords_ideal[:n_atoms_mono]\n h2_new = coords_ideal[n_atoms_mono:]\n\n sup.set(h2_new, h2_transformed)\n sup.run()\n (rot2, tran2) = sup.get_rotran()\n h2_rot = rot2.flatten()\n helical_params = helical_params.flatten()\n transform_params = np.append(helical_params,np.append(h2_rot, tran2))\n\n return transform_params\n\n\ndu_pca_helix, du_mean_helix = cPickle.load(open('helix_template.pkl', \"rb\"))\n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_306","text":"import sympy\nfrom qibo import gates, K\nfrom qibo.config import raise_error\n\n\nclass HamiltonianTerm:\n \"\"\"Term of a :class:`qibo.core.hamiltonians.SymbolicHamiltonian`.\n\n Symbolic Hamiltonians are represented by a list of\n :class:`qibo.core.terms.HamiltonianTerm` objects storred in the\n ``SymbolicHamiltonian.terms`` attribute. The mathematical expression of\n the Hamiltonian is the sum of these terms.\n\n Args:\n matrix (np.ndarray): Full matrix corresponding to the term representation\n in the computational basis. Has size (2^n, 2^n) where n is the\n number of target qubits of this term.\n q (list): List of target qubit ids.\n \"\"\"\n\n def __init__(self, matrix, *q):\n for qi in q:\n if qi < 0:\n raise_error(ValueError, \"Invalid qubit id {} < 0 was given \"\n \"in Hamiltonian term\".format(qi))\n if not (matrix is None or isinstance(matrix, K.qnp.numeric_types) or\n isinstance(matrix, K.qnp.tensor_types)):\n raise_error(TypeError, \"Invalid type {} of symbol matrix.\"\n \"\".format(type(matrix)))\n dim = int(matrix.shape[0])\n if 2 ** len(q) != dim:\n raise_error(ValueError, \"Matrix dimension {} given in Hamiltonian \"\n \"term is not compatible with the number \"\n \"of target qubits {}.\"\n \"\".format(dim, len(q)))\n self.target_qubits = tuple(q)\n self._gate = None\n self.hamiltonian = None\n self._matrix = matrix\n\n @property\n def matrix(self):\n \"\"\"Matrix representation of the term.\"\"\"\n return self._matrix\n\n @property\n def gate(self):\n \"\"\":class:`qibo.abstractions.gates.Unitary` gate that implements the action of the term on states.\"\"\"\n if self._gate is None:\n self._gate = gates.Unitary(self.matrix, *self.target_qubits)\n return self._gate\n\n def exp(self, x):\n \"\"\"Matrix exponentiation of the term.\"\"\"\n return K.qnp.expm(-1j * x * self.matrix)\n\n def expgate(self, x):\n \"\"\":class:`qibo.abstractions.gates.Unitary` gate implementing the action of exp(term) on states.\"\"\"\n return gates.Unitary(self.exp(x), *self.target_qubits)\n\n def merge(self, term):\n \"\"\"Creates a new term by merging the given term to the current one.\n\n The resulting term corresponds to the sum of the two original terms.\n The target qubits of the given term should be a subset of the target\n qubits of the current term.\n \"\"\"\n if not set(term.target_qubits).issubset(set(self.target_qubits)):\n raise_error(ValueError, \"Cannot merge HamiltonianTerm acting on \"\n \"qubits {} to term on qubits {}.\"\n \"\".format(term.target_qubits, self.target_qubits))\n matrix = K.np.kron(term.matrix, K.qnp.eye(2 ** (len(self) - len(term))))\n matrix = K.np.reshape(matrix, 2 * len(self) * (2,))\n order = []\n i = len(term)\n for qubit in self.target_qubits:\n if qubit in term.target_qubits:\n order.append(term.target_qubits.index(qubit))\n else:\n order.append(i)\n i += 1\n order.extend([x + len(order) for x in order])\n matrix = K.np.transpose(matrix, order)\n matrix = K.np.reshape(matrix, 2 * (2 ** len(self),))\n return HamiltonianTerm(self.matrix + matrix, *self.target_qubits)\n\n def __len__(self):\n return len(self.target_qubits)\n\n def __mul__(self, x):\n return HamiltonianTerm(x * self.matrix, *self.target_qubits)\n\n def __rmul__(self, x):\n return self.__mul__(x)\n\n def __call__(self, state, density_matrix=False):\n \"\"\"Applies the term on a given state vector or density matrix.\"\"\"\n if density_matrix:\n self.gate.density_matrix = True\n return self.gate._density_matrix_half_call(state)\n return self.gate(state) # pylint: disable=E1102\n\n\nclass SymbolicTerm(HamiltonianTerm):\n \"\"\":class:`qibo.core.terms.HamiltonianTerm` constructed using ``sympy`` expression.\n\n Example:\n ::\n\n from qibo.symbols import X, Y\n from qibo.core.terms import SymbolicTerm\n sham = X(0) * X(1) + 2 * Y(0) * Y(1)\n termsdict = sham.as_coefficients_dict()\n sterms = [SymbolicTerm(c, f) for f, c in termsdict.items()]\n\n Args:\n coefficient (complex): Complex number coefficient of the underlying\n term in the Hamiltonian.\n factors (sympy.Expr): Sympy expression for the underlying term.\n symbol_map (dict): Dictionary that maps symbols in the given ``factors``\n expression to tuples of (target qubit id, matrix).\n This is required only if the expression is not created using Qibo\n symbols and to keep compatibility with older versions where Qibo\n symbols were not available.\n \"\"\"\n\n def __init__(self, coefficient, factors=1, symbol_map={}):\n self.coefficient = complex(coefficient)\n self._matrix = None\n self._gate = None\n self.hamiltonian = None\n\n # List of :class:`qibo.symbols.Symbol` that represent the term factors\n self.factors = []\n # Dictionary that maps target qubit ids to a list of matrices that act on each qubit\n self.matrix_map = {}\n if factors != 1:\n for factor in factors.as_ordered_factors():\n # check if factor has some power ``power`` so that the corresponding\n # matrix is multiplied ``pow`` times\n if isinstance(factor, sympy.Pow):\n factor, pow = factor.args\n assert isinstance(pow, sympy.Integer)\n assert isinstance(factor, sympy.Symbol)\n pow = int(pow)\n else:\n pow = 1\n\n # if the user is using ``symbol_map`` instead of qibo symbols,\n # create the corresponding symbols\n if factor in symbol_map:\n from qibo.symbols import Symbol\n q, matrix = symbol_map.get(factor)\n factor = Symbol(q, matrix, name=factor.name)\n\n if isinstance(factor, sympy.Symbol):\n if isinstance(factor.matrix, K.qnp.tensor_types):\n self.factors.extend(pow * [factor])\n q = factor.target_qubit\n # if pow > 1 the matrix should be multiplied multiple\n # when calculating the term's total matrix so we\n # repeat it in the corresponding list that will\n # be used during this calculation\n # see the ``SymbolicTerm.matrix`` property for the\n # full matrix calculation\n if q in self.matrix_map:\n self.matrix_map[q].extend(pow * [factor.matrix])\n else:\n self.matrix_map[q] = pow * [factor.matrix]\n else:\n self.coefficient *= factor.matrix\n elif factor == sympy.I:\n self.coefficient *= 1j\n elif factor.is_number:\n self.coefficient *= complex(factor)\n else: # pragma: no cover\n raise_error(TypeError, \"Cannot parse factor {}.\".format(factor))\n\n self.target_qubits = tuple(sorted(self.matrix_map.keys()))\n\n @property\n def matrix(self):\n \"\"\"Calculates the full matrix corresponding to this term.\n\n Returns:\n Matrix as a ``np.ndarray`` of shape ``(2 ** ntargets, 2 ** ntargets)``\n where ``ntargets`` is the number of qubits included in the factors\n of this term.\n \"\"\"\n if self._matrix is None:\n def matrices_product(matrices):\n \"\"\"Product of matrices that act on the same tuple of qubits.\n\n Args:\n matrices (list): List of matrices to multiply, as exists in\n the values of ``SymbolicTerm.matrix_map``.\n \"\"\"\n if len(matrices) == 1:\n return matrices[0]\n matrix = K.np.copy(matrices[0])\n for m in matrices[1:]:\n matrix = matrix @ m\n return matrix\n\n self._matrix = self.coefficient\n for q in self.target_qubits:\n matrix = matrices_product(self.matrix_map.get(q))\n self._matrix = K.np.kron(self._matrix, matrix)\n return self._matrix\n\n def copy(self):\n \"\"\"Creates a shallow copy of the term with the same attributes.\"\"\"\n new = self.__class__(self.coefficient)\n new.factors = self.factors\n new.matrix_map = self.matrix_map\n new.target_qubits = self.target_qubits\n return new\n\n def __mul__(self, x):\n \"\"\"Multiplication of scalar to the Hamiltonian term.\"\"\"\n new = self.copy()\n new.coefficient *= x\n if self._matrix is not None:\n new._matrix = x * self._matrix\n return new\n\n def __call__(self, state, density_matrix=False):\n for factor in self.factors:\n if density_matrix:\n factor.gate.density_matrix = True\n state = factor.gate._density_matrix_half_call(state)\n else:\n state = factor.gate(state)\n return self.coefficient * state\n\n\nclass TermGroup(list):\n \"\"\"Collection of multiple :class:`qibo.core.terms.HamiltonianTerm` objects.\n\n Allows merging multiple terms to a single one for faster exponentiation\n during Trotterized evolution.\n\n Args:\n term (:class:`qibo.core.terms.HamiltonianTerm`): Parent term of the group.\n All terms appended later should target a subset of the parents'\n target qubits.\n \"\"\"\n\n def __init__(self, term):\n super().__init__([term])\n self.target_qubits = set(term.target_qubits)\n self._term = None\n\n def append(self, term):\n \"\"\"Appends a new :class:`qibo.core.terms.HamiltonianTerm` to the collection.\"\"\"\n super().append(term)\n self.target_qubits |= set(term.target_qubits)\n self._term = None\n\n def can_append(self, term):\n \"\"\"Checks if a term can be appended to the group based on its target qubits.\"\"\"\n return set(term.target_qubits).issubset(self.target_qubits)\n\n @classmethod\n def from_terms(cls, terms):\n \"\"\"Divides a list of terms to multiple :class:`qibo.core.terms.TermGroup`s.\n\n Terms that target the same qubits are grouped to the same group.\n\n Args:\n terms (list): List of :class:`qibo.core.terms.HamiltonianTerm` objects.\n\n Returns:\n List of :class:`qibo.core.terms.TermGroup` objects that contain\n all the given terms.\n \"\"\"\n # split given terms according to their order\n # order = len(term.target_qubits)\n orders = {}\n for term in terms:\n if len(term) in orders:\n orders[len(term)].append(term)\n else:\n orders[len(term)] = [term]\n\n groups = []\n # start creating groups with the higher order terms as parents and then\n # append each term of lower order to the first compatible group\n for order in sorted(orders.keys())[::-1]:\n for child in orders[order]:\n flag = True\n for i, group in enumerate(groups):\n if group.can_append(child):\n group.append(child)\n flag = False\n break\n if flag:\n groups.append(cls(child))\n return groups\n\n @property\n def term(self):\n \"\"\"Returns a single :class:`qibo.core.terms.HamiltonianTerm`. after merging all terms in the group.\"\"\"\n if self._term is None:\n self._term = self.to_term()\n return self._term\n\n def to_term(self, coefficients={}):\n \"\"\"Calculates a single :class:`qibo.core.terms.HamiltonianTerm` by merging all terms in the group.\n\n Args:\n coefficients (dict): Optional dictionary that allows passing a different\n coefficient to each term according to its parent Hamiltonian.\n Useful for :class:`qibo.core.adiabatic.AdiabaticHamiltonian` calculations.\n \"\"\"\n c = coefficients.get(self[0].hamiltonian)\n merged = self[0] * c if c is not None else self[0]\n for term in self[1:]:\n c = coefficients.get(term.hamiltonian)\n merged = merged.merge(term * c if c is not None else term)\n return merged\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_307","text":"import os\nimport sys\n\nsys.path.append(os.path.abspath(os.path.join(\".\/libs\/graph_embeddings\")))\nimport graph_embeddings\nimport numpy as np\nimport pandas as pd\nfrom scipy import sparse\n\nsys.path.append(os.path.abspath(os.path.join(\".\/libs\/residual2vec\")))\nimport residual2vec\n\n#\n# Input\n#\nnetfile = snakemake.input[\"netfile\"]\nnodefile = snakemake.input[\"nodefile\"] if \"nodefile\" in snakemake.input.keys() else None\ndim = int(snakemake.params[\"dim\"])\nwindow_length = int(snakemake.params[\"window_length\"])\nmodel_name = snakemake.params[\"model_name\"]\ndirected = snakemake.params[\"directed\"] == \"directed\"\nnoselfloop = (\n snakemake.params[\"noselfloop\"] == \"True\"\n if \"noselfloop\" in snakemake.params.keys()\n else False\n)\ncontroll_for = (\n snakemake.params[\"controlfor\"]\n if \"controlfor\" in snakemake.params.keys()\n else \"None\"\n)\nbackward_prob = (\n float(snakemake.params[\"backward_prob\"])\n if \"backward_prob\" in snakemake.params.keys()\n else 0\n)\nembfile = snakemake.output[\"embfile\"]\n\n#\n# Load\n#\nnet = sparse.load_npz(netfile)\n\nif nodefile is not None:\n node_table = pd.read_csv(nodefile)\n\n#\n# Preprocess\n#\nif directed is False:\n net = net + net.T\n\nif noselfloop:\n net.setdiag(0)\n\nif directed:\n eta = backward_prob \/ (1 - backward_prob)\n outdeg = np.array(net.sum(axis=1)).reshape(-1)\n indeg = np.array(net.sum(axis=0)).reshape(-1)\n eta_nodes = (\n outdeg * backward_prob \/ (indeg * (1 - backward_prob) + outdeg * backward_prob)\n )\n eta_nodes[outdeg == 0] = 1\n eta_nodes[indeg == 0] = 0\n net = sparse.diags(1 - eta_nodes) * net + sparse.diags(eta_nodes) @ net.T\n\n#\n# Load the emebdding models\n#\nmembership = np.zeros(net.shape[0])\noffset = np.zeros(net.shape[0])\nif model_name == \"node2vec\":\n model = graph_embeddings.Node2Vec(window_length=window_length, restart_prob=0)\nelif model_name == \"node2vec-qhalf\":\n model = graph_embeddings.Node2Vec(\n window_length=window_length, restart_prob=0, q=0.5\n )\nelif model_name == \"node2vec-qdouble\":\n model = graph_embeddings.Node2Vec(window_length=window_length, restart_prob=0, q=2)\nelif model_name == \"deepwalk\":\n model = graph_embeddings.DeepWalk(window_length=window_length, restart_prob=0,)\nelif model_name == \"glove\":\n model = graph_embeddings.Glove(window_length=window_length, restart_prob=0,)\nelif model_name == \"fairwalk\":\n if (controll_for == \"None\") or (node_table is None):\n model = graph_embeddings.Fairwalk(window_length=window_length)\n else:\n membership = node_table[controll_for].values\n model = graph_embeddings.Fairwalk(\n group_membership=membership, window_length=window_length,\n )\nelif model_name == \"residual2vec\":\n if (controll_for == \"None\") or (node_table is None):\n model = residual2vec.residual2vec_matrix_factorization(window_length=window_length,)\n else:\n membership = node_table[controll_for].values\n model = residual2vec.residual2vec_matrix_factorization(\n group_membership=membership, window_length=window_length,\n )\nelif model_name == \"leigenmap\":\n model = graph_embeddings.LaplacianEigenMap()\nelif model_name == \"netmf\":\n model = graph_embeddings.NetMF(window_length=window_length)\nelif model_name == \"graphsage\":\n model = graph_embeddings.GraphSage()\nelif model_name == \"gcn\":\n model = graph_embeddings.GCN()\nelif model_name == \"graphsage-doubleK\":\n model = graph_embeddings.GraphSage(num_default_features=dim * 2)\nelif model_name == \"gcn-doubleK\":\n model = graph_embeddings.GCN(num_default_features=dim * 2)\nelif model_name == \"gat\":\n model = graph_embeddings.GAT(layer_sizes=[64, 256])\nelif model_name == \"gat-doubleK\":\n model = graph_embeddings.GCN(num_default_features=dim * 2)\nelif model_name == \"lndeg\": # fake embedding. Just to save offset\n A = sparse.csr_matrix(net)\n deg = np.array(A.sum(axis=1)).reshape(-1)\n emb = np.zeros((len(deg), dim))\n np.savez(\n embfile,\n emb=emb,\n out_emb=emb,\n membership=np.zeros_like(deg),\n offset=np.log(np.maximum(deg, 1)),\n window_length=window_length,\n dim=dim,\n directed=directed,\n model_name=model_name,\n )\n sys.exit()\n\n\n#\n# Embedding\n#\nmodel.fit(sparse.csr_matrix(net))\nemb = model.transform(dim=dim)\n\ntry:\n offset = model.node_offset\nexcept AttributeError:\n pass\n\n#\n# Save\n#\nnp.savez(\n embfile,\n emb=emb,\n membership=membership,\n offset=offset,\n window_length=window_length,\n dim=dim,\n directed=directed,\n model_name=model_name,\n)\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_308","text":"evidence_inference\/models\/model_0.py\nfrom os.path import join, dirname, abspath\nimport sys\n\n# this monstrosity produces the module directory in an environment where this is unpacked\nsys.path.insert(0, abspath(join(dirname(abspath(__file__)), '..', '..')))\nimport copy\nimport random\n\nimport numpy as np\nfrom scipy import stats\n\nfrom sklearn.metrics import accuracy_score, precision_recall_fscore_support, classification_report\n\nfrom gensim.models import KeyedVectors\n\nimport torch\nfrom torch import optim\nimport torch.nn as nn\nfrom torch.nn import functional as F\nfrom torch.autograd import Variable\n\nUSE_CUDA = True\n\nfrom evidence_inference.preprocess.preprocessor import SimpleInferenceVectorizer as SimpleInferenceVectorizer\nfrom evidence_inference.models.utils import PaddedSequence\nfrom evidence_inference.models.attention_distributions import TokenAttention, evaluate_model_attention_distribution\n\n\nclass CBoWEncoder(nn.Module):\n \"\"\"Bag of words encoder for Intervention (also Comparator, Outcome) token sequences.\n\n Note that ordering information is discarded here, and our words are represented by continuous vectors.\n \"\"\"\n\n def __init__(self, vocab_size, embeddings: nn.Embedding=None, embedding_dim=200, use_attention=False, condition_attention=False, tokenwise_attention=False, query_dims=None):\n super(CBoWEncoder, self).__init__()\n\n self.vocab_size = vocab_size\n\n if embeddings is None:\n self.embedding = nn.Embedding(vocab_size, embedding_dim)\n else:\n self.embedding = embeddings\n self.embedding_dim = embeddings.embedding_dim\n\n self.use_attention = use_attention\n if self.use_attention:\n self.attention_mechanism = TokenAttention(self.embedding_dim, self.query_dims, condition_attention, tokenwise_attention)\n\n def forward(self, word_inputs: PaddedSequence, query_v_for_attention: torch.Tensor=None, normalize_attention_distribution=True):\n if isinstance(word_inputs, PaddedSequence):\n embedded = self.embedding(word_inputs.data)\n else:\n raise ValueError(\"Got an unexpected type {} for word_inputs {}\".format(type(word_inputs), word_inputs))\n if self.use_attention:\n a = self.attention_mechanism(word_inputs, embedded, query_v_for_attention, normalize=normalize_attention_distribution)\n output = torch.sum(a * embedded, dim=1)\n return None, output, a\n else:\n output = torch.sum(embedded, dim=1) \/ word_inputs.batch_sizes.unsqueeze(-1).to(torch.float)\n return output\n\n\nclass GRUEncoder(nn.Module):\n \"\"\" GRU encoder for Intervention (also Comparator, Outcome) token sequences.\n\n Also contains attention mechanisms for use with this particular encoder\n \"\"\"\n\n def __init__(self, vocab_size, n_layers=1, hidden_size=32, embeddings: nn.Embedding=None,\n use_attention=False, condition_attention=False, tokenwise_attention=False, query_dims=None, bidirectional=False):\n \"\"\" Prepares a GRU encoder for the Intervention, Comparator, or outcome token sequences.\n\n Either initializes embedding layer from existing embeddings or creates a random one of size vocab X hidden_size.\n\n When using attention we either:\n * condition on a hidden unit from the encoder and some query vector of size query_dims, which passes a linear\n combination of the two through a non-linearity (Tanh) and then compresses this to a final number\n * or we use a linear function from the output of the encoder.\n\n In both cases, we use a softmax over the possible outputs to impose a final attention distribution.\n \"\"\"\n super(GRUEncoder, self).__init__()\n if condition_attention and not use_attention:\n raise ValueError(\"Cannot condition attention when there is no attention mechanism! Try setting \"\n \"use_attention to true or condition_attention to false, \")\n if tokenwise_attention and not use_attention:\n raise ValueError(\"Cannot have element-wise attention when there is no attention mechanism! Try setting \"\n \"use_attention to true or condition_attention to false, \")\n\n self.vocab_size = vocab_size\n self.n_layers = n_layers\n self.use_attention = use_attention\n self.condition_attention = condition_attention\n self.tokenwise_attention = tokenwise_attention\n self.query_dims = query_dims\n self.bidirectional = bidirectional\n if self.bidirectional:\n self.hidden_size = hidden_size \/\/ 2\n else:\n self.hidden_size = hidden_size\n\n if embeddings is None:\n self.embedding = nn.Embedding(self.vocab_size, self.hidden_size)\n self.gru = nn.GRU(input_size=self.hidden_size, hidden_size=self.hidden_size, num_layers=self.n_layers, batch_first=True, bidirectional=self.bidirectional)\n else:\n self.embedding = embeddings\n self.gru = nn.GRU(input_size=embeddings.embedding_dim, hidden_size=self.hidden_size, num_layers=self.n_layers, batch_first=True, bidirectional=self.bidirectional)\n\n if self.use_attention:\n encoding_size = self.hidden_size + int(self.bidirectional) * self.hidden_size\n self.attention_mechanism = TokenAttention(encoding_size, self.query_dims, condition_attention, tokenwise_attention)\n\n def forward(self, word_inputs: PaddedSequence, init_hidden: torch.Tensor=None, query_v_for_attention: torch.Tensor=None, normalize_attention_distribution=True) -> (torch.Tensor, torch.Tensor):\n if isinstance(word_inputs, PaddedSequence):\n embedded = self.embedding(word_inputs.data)\n as_padded = word_inputs.pack_other(embedded)\n output, hidden = self.gru(as_padded, init_hidden)\n output = PaddedSequence.from_packed_sequence(output, batch_first=True)\n else:\n raise ValueError(\"Unknown input type {} for word_inputs: {}, try a PaddedSequence or a Tensor\".format(type(word_inputs), word_inputs))\n\n # concatenate the hidden representations\n if self.bidirectional:\n if self.n_layers > 1:\n raise ValueError(\"Implement me!\")\n hidden = torch.cat([hidden[0], hidden[1]], dim=1)\n\n if self.use_attention:\n # note that these hidden_input_states are masked to zeros (when appropriate) already when this is called.\n hidden_input_states = output\n a = self.attention_mechanism(hidden_input_states, query_v_for_attention, normalize=normalize_attention_distribution)\n\n # note this is an element-wise multiplication, so each of the hidden states is weighted by the attention vector\n weighted_hidden = torch.sum(a * output.data, dim=1)\n return output, weighted_hidden, a\n\n return output, hidden\n\n\nclass InferenceNet(nn.Module):\n \"\"\" Predicts the relative (statistical) benefits of a pair of medical interventions with respect to an outcome.\n\n The input to the model is:\n * an array of article tokens\n * an array of medical intervention tokens\n * an array of \"comparator\" tokens (i.e. an alternate intervention)\n * an array of outcome tokens\n\n The output is a distribution over whether or not the text of the particular article supports the intervention being\n statistically better (p=0.05), neutral, or worse than the comparator for the outcome.\n\n This model works via:\n * encoding the article via a gated recurrent unit\n * encoding the intervention, comparator, and outcome via either a gated recurrent unit or a continuous bag of words encoder\n * optionally allowing a separate attention mechanism within each of these units to either:\n * learn a distribution over article tokens\n * learn a distribution over article tokens conditioned on the intervention, comparator, and outcome encodings\n * passing the encoded result through a linear layer and then a softmax\n \"\"\"\n\n def __init__(self, vectorizer, h_size=32,\n init_embeddings=None,\n init_wvs_path=\"embeddings\/PubMed-w2v.bin\",\n weight_tying=False,\n ICO_encoder=\"CBoW\",\n article_encoder=\"GRU\",\n attention_over_article_tokens=True,\n condition_attention=True,\n tokenwise_attention=False,\n tune_embeddings=False,\n h_dropout_rate=0.2):\n super(InferenceNet, self).__init__()\n if condition_attention and not attention_over_article_tokens:\n raise ValueError(\"Must have attention in order to have conditional attention!\")\n\n self.vectorizer = vectorizer\n vocab_size = len(self.vectorizer.idx_to_str)\n \n if init_embeddings is None:\n print(\"loading pre-trained embeddings...\")\n init_embedding_weights = InferenceNet.init_word_vectors(init_wvs_path, vectorizer)\n print(\"done.\")\n else:\n print(\"Using provided embeddings\")\n init_embedding_weights = init_embeddings\n\n self.ICO_encoder = ICO_encoder\n\n # this is the size of the concatenated representations,\n # which will depend on the encoder variant being used.\n self.ICO_dims = None\n\n if ICO_encoder == \"CBoW\":\n self.intervention_encoder = CBoWEncoder(vocab_size=vocab_size, embeddings=init_embedding_weights)\n self.comparator_encoder = CBoWEncoder(vocab_size=vocab_size, embeddings=init_embedding_weights)\n self.outcome_encoder = CBoWEncoder(vocab_size=vocab_size, embeddings=init_embedding_weights)\n if article_encoder == 'CBoW':\n self.ICO_dims = init_embedding_weights.embedding_dim * 3\n MLP_input_size = self.ICO_dims + init_embedding_weights.embedding_dim\n if h_size:\n print(\"Warning: ignoring the hidden size as the article encoder is CBoW and emits a fixed output\")\n elif article_encoder == 'GRU' or article_encoder == 'biGRU':\n self.ICO_dims = init_embedding_weights.embedding_dim * 3\n MLP_input_size = self.ICO_dims + h_size\n else:\n raise ValueError(\"Unknown article_encoder type {}\".format(article_encoder))\n elif ICO_encoder == \"GRU\" or ICO_encoder == 'biGRU':\n bidirectional = ICO_encoder == 'biGRU'\n # then use an RNN encoder for I, C, O elements.\n self.intervention_encoder = GRUEncoder(vocab_size=vocab_size, hidden_size=h_size,\n embeddings=init_embedding_weights, bidirectional=bidirectional)\n self.comparator_encoder = GRUEncoder(vocab_size=vocab_size, hidden_size=h_size,\n embeddings=init_embedding_weights, bidirectional=bidirectional)\n self.outcome_encoder = GRUEncoder(vocab_size=vocab_size, hidden_size=h_size,\n embeddings=init_embedding_weights, bidirectional=bidirectional)\n self.ICO_dims = h_size * 3 \n if article_encoder == 'CBoW':\n # note that the CBoW encoder ignores the h_size here\n MLP_input_size = self.ICO_dims + init_embedding_weights.embedding_dim\n elif article_encoder == 'GRU' or article_encoder == 'biGRU':\n MLP_input_size = self.ICO_dims + h_size # the input to the MLP is the concatentation of the ICO hidden states and the article hidden states.\n else:\n raise ValueError(\"Unknown article_encoder type {}\".format(article_encoder))\n else:\n raise ValueError(\"No such encoder: {}\".format(ICO_encoder))\n\n self.article_encoder_type = article_encoder\n if article_encoder == 'GRU' or article_encoder == 'biGRU':\n bidirectional = article_encoder == 'biGRU'\n self.article_encoder = GRUEncoder(vocab_size=vocab_size, hidden_size=h_size,\n embeddings=init_embedding_weights,\n use_attention=attention_over_article_tokens,\n condition_attention=condition_attention,\n tokenwise_attention=tokenwise_attention,\n query_dims=self.ICO_dims,\n bidirectional=bidirectional)\n elif article_encoder == 'CBoW':\n self.article_encoder = CBoWEncoder(vocab_size=vocab_size,\n embeddings=init_embedding_weights,\n use_attention=attention_over_article_tokens,\n condition_attention=condition_attention,\n tokenwise_attention=tokenwise_attention,\n query_dims=self.ICO_dims)\n else:\n raise ValueError(\"Unknown article encoder type: {}\".format(article_encoder))\n\n if not tune_embeddings:\n print(\"freezing word embedding layer!\")\n for layer in (\n self.article_encoder, self.intervention_encoder, self.comparator_encoder, self.outcome_encoder):\n # note: we are relying on the fact that all encoders will have a\n # \"embedding\" layer (nn.Embedding). \n layer.embedding.requires_grad = False\n layer.embedding.weight.requires_grad = False\n\n # weight tying (optional)\n # note that this is not meaningful (or, rather, does nothing) when embeddings are\n # frozen.\n # TODO note that weights are currently tied because all the ICOEncoders use the same underlying objects.\n if weight_tying:\n print(\"tying word embedding layers\")\n self.intervention_encoder.embedding.weight = self.comparator_encoder.embedding.weight = \\\n self.outcome_encoder.embedding.weight = self.article_encoder.embedding.weight\n self.batch_first = True\n\n self.MLP_hidden = nn.Linear(MLP_input_size, 16)\n self.out = nn.Linear(16, 3)\n self.dropout = nn.Dropout(p=h_dropout_rate)\n\n def _encode(self, I_tokens, C_tokens, O_tokens):\n if self.ICO_encoder == \"CBoW\":\n # simpler case of a CBoW encoder.\n I_v = self.intervention_encoder(I_tokens)\n C_v = self.comparator_encoder(C_tokens)\n O_v = self.outcome_encoder(O_tokens)\n elif self.ICO_encoder == 'GRU' or self.ICO_encoder == 'biGRU':\n # then we have an RNN encoder. Hidden layers are automatically initialized\n _, I_v = self.intervention_encoder(I_tokens)\n _, C_v = self.comparator_encoder(C_tokens)\n _, O_v = self.outcome_encoder(O_tokens)\n else:\n raise ValueError(\"No such encoder: {}\".format(self.ICO_encoder))\n return I_v, C_v, O_v\n\n def forward(self, article_tokens: PaddedSequence, I_tokens: PaddedSequence, C_tokens: PaddedSequence, O_tokens: PaddedSequence,\n batch_size, debug_attn=False, verbose_attn=False):\n if isinstance(article_tokens, PaddedSequence):\n assert all([isinstance(x, PaddedSequence) for x in [I_tokens, C_tokens, O_tokens]])\n elif isinstance(article_tokens, torch.Tensor):\n # TODO test this codepath\n assert all([isinstance(x, torch.Tensor) for x in [I_tokens, C_tokens, O_tokens]]) and all([x.shape[0] == 1 for x in [article_tokens, I_tokens, C_tokens, O_tokens]])\n else:\n raise ValueError(\"Got an unexpected type for our input tensor: {}\".format(type(article_tokens)))\n\n ##################################################\n # First encode the I, C, O frame (the query) #\n ##################################################\n # the output of each of these should be of shape (batch x word_embedding_size)\n I_v, C_v, O_v = self._encode(I_tokens, C_tokens, O_tokens)\n\n if self.article_encoder.use_attention:\n\n query_v = None\n if self.article_encoder.condition_attention:\n query_v = torch.cat([I_v, C_v, O_v], dim=1)\n\n _, a_v, attn_weights = self.article_encoder(article_tokens, query_v_for_attention=query_v)\n\n # @TODO return to debugging\/inspecting attention\n if verbose_attn:\n attn_weights = attn_weights.data.cpu().numpy()\n for i in range(batch_size):\n attn_weights_slice = attn_weights[i][:article_tokens.batch_sizes[i].item()].squeeze()\n sorted_idx = np.argsort(attn_weights_slice)\n # hack\n if sorted_idx.size == 1:\n continue\n length = len(attn_weights_slice)\n top_words = [self.vectorizer.idx_to_str[article_tokens.data[i][idx]] for idx in sorted_idx[max(-20, -1 * length):]]\n top_words.reverse()\n top_words_weights = [attn_weights_slice[idx] for idx in sorted_idx[max(-20, -1 * length):]]\n top_words_weights.reverse()\n bottom_words = [self.vectorizer.idx_to_str[article_tokens.data[i][idx]] for idx in sorted_idx[:min(20, length)]]\n bottom_words.reverse()\n bottom_words_weights = [attn_weights_slice[idx] for idx in sorted_idx[:min(20, length)]]\n bottom_words_weights.reverse()\n\n def tokens_to_str(tokens):\n return \", \".join([self.vectorizer.idx_to_str[x.item()] for x in tokens])\n print(\"I, C, O frame:\",\n tokens_to_str(I_tokens.data[i][:I_tokens.batch_sizes[i]]), \";\",\n tokens_to_str(C_tokens.data[i][:C_tokens.batch_sizes[i]]), \":\",\n tokens_to_str(O_tokens.data[i][:O_tokens.batch_sizes[i]]))\n print(\"top words:\", \", \".join(top_words))\n print(\"weights:\", \", \".join(str(x) for x in top_words_weights))\n print(\"bottom words:\", \", \".join(bottom_words))\n print(\"weights:\", \", \".join(str(x) for x in bottom_words_weights))\n\n else:\n if self.article_encoder_type == 'CBoW':\n # TODO implement attention for the CBoW model\n a_v = self.article_encoder(article_tokens)\n elif self.article_encoder_type == 'GRU' or self.article_encoder_type == 'biGRU':\n _, a_v = self.article_encoder(article_tokens)\n else:\n raise ValueError(\"Unknown article encoder type {}\".format(self.article_encoder_type))\n\n # TODO document this\n if len(a_v.size()) == 3:\n a_v = a_v.squeeze(0)\n h = torch.cat([a_v, I_v, C_v, O_v], dim=1)\n h = self.dropout(h)\n raw_out = self.out(self.MLP_hidden(h))\n\n return F.softmax(raw_out, dim=1)\n\n @classmethod\n def init_word_vectors(cls, path_to_wvs, vectorizer, use_cuda=USE_CUDA) -> nn.Embedding:\n WVs = KeyedVectors.load_word2vec_format(path_to_wvs, binary=True)\n\n E = np.zeros((len(vectorizer.str_to_idx), WVs.vector_size))\n WV_matrix = np.matrix([WVs[v] for v in WVs.vocab.keys()])\n mean_vector = np.mean(WV_matrix, axis=0)\n\n for idx, token in enumerate(vectorizer.idx_to_str):\n if token in WVs:\n E[idx] = WVs[token]\n else:\n E[idx] = mean_vector\n # TODO make this cleaner\n padding_idx = int(vectorizer.str_to_idx[SimpleInferenceVectorizer.PAD])\n E[padding_idx] = torch.zeros(E.shape[1])\n embedding = nn.Embedding(E.shape[0], E.shape[1], padding_idx=padding_idx)\n embedding.weight.data.copy_(torch.from_numpy(E))\n embedding.weight.requires_grad = False\n if use_cuda:\n embedding = embedding.cuda()\n return embedding\n\n\ndef _get_y_vec(y_dict, as_vec=True, majority_lbl=True) -> torch.LongTensor:\n # +1 because raw labels are -1, 0, 1 -> 0, 1, 2\n # for indexing reasons that appear in the loss function\n # (cross-entropy loss wants the index of the highest value, and we index at 0)\n all_labels = [y_j[0] + 1 for y_j in y_dict]\n if majority_lbl:\n y_collapsed = int(stats.mode(all_labels)[0][0])\n else:\n y_collapsed = random.choice(all_labels)\n\n if as_vec:\n y_vec = np.zeros(3)\n y_vec[y_collapsed] = 1.0\n ret = torch.LongTensor(y_vec)\n else:\n ret = torch.LongTensor([y_collapsed])\n if USE_CUDA:\n ret = ret.cuda()\n return ret\n\n\ndef _to_torch_var(x):\n var_x = Variable(torch.LongTensor(x))\n if USE_CUDA:\n var_x = var_x.cuda()\n return var_x\n\n\ndef predict_for_inst(nnet, inst, verbose_attn=False):\n abstract = _to_torch_var(inst[\"article\"]).unsqueeze(0)\n I, C, O = _to_torch_var(inst[\"I\"]).unsqueeze(0), _to_torch_var(inst[\"C\"]).unsqueeze(0), _to_torch_var(inst[\"O\"]).unsqueeze(0)\n print(\"sizes:\", abstract.size(), I.size(), C.size(), O.size())\n y_hat = nnet(abstract, I, C, O, batch_size=1, verbose_attn=verbose_attn)\n return y_hat\n\n\n'''\ndef conf_matrix(nnet, instances):\n M = np.zeros((3,3))\n for inst in instances:\n y = _get_y_vec(inst['y'], as_vec=False)\n y_hat = np.argmax(predict_for_inst(nnet, inst))\n M[y, y_hat] += 1.0\n return M\n'''\n\n\ndef make_preds(nnet, instances, batch_size, inference_vectorizer, verbose_attn_to_batches=False, cuda=USE_CUDA):\n # TODO consider removing the inference_vectorizer since all we need is an unk_idx from it\n y_vec = torch.cat([_get_y_vec(inst['y'], as_vec=False) for inst in instances]).squeeze()\n unk_idx = int(inference_vectorizer.str_to_idx[SimpleInferenceVectorizer.PAD])\n y_hat_vec = []\n # we batch this so the GPU doesn't run out of memory\n nnet.eval()\n for i in range(0, len(instances), batch_size):\n batch_instances = instances[i:i+batch_size]\n articles, Is, Cs, Os = [PaddedSequence.autopad([torch.LongTensor(inst[x]) for inst in batch_instances], batch_first=True, padding_value=unk_idx) for x in ['article', 'I', 'C', 'O']]\n if cuda:\n articles, Is, Cs, Os = articles.cuda(), Is.cuda(), Cs.cuda(), Os.cuda()\n verbose_attn = verbose_attn_to_batches and i in verbose_attn_to_batches\n y_hat_batch = nnet(articles, Is, Cs, Os, batch_size=len(batch_instances), verbose_attn=verbose_attn)\n y_hat_vec.append(y_hat_batch)\n nnet.train()\n return y_vec, torch.cat(y_hat_vec, dim=0)\n\n\ndef to_int_preds(y):\n # the cast to int is necessary as this gets passed to sklearn packages that don't understand numpy.int64, which is the default return type here.\n return [int(np.argmax(y_i)) for y_i in y.cpu()]\n\n\ndef _loss_for_inst(inst, nnet, criterion):\n y = _get_y_vec(inst['y'], as_vec=False).squeeze()\n y_hat = predict_for_inst(nnet, inst)\n ####\n # as per https:\/\/github.com\/pytorch\/pytorch\/issues\/5554, \n # output needs to have dims (N, C), so we add an extra\n # dim for N here (just 1).\n y_hat = torch.unsqueeze(y_hat, dim=0)\n if USE_CUDA:\n y_hat = y_hat.cuda()\n y = y.cuda()\n\n return criterion(y_hat, y)\n\n\ndef _get_majority_label(inst):\n all_lbls = [y[0] + 1 for y in inst['y']]\n return stats.mode(all_lbls)[0][0]\n\n\ndef train(ev_inf: InferenceNet, train_Xy, val_Xy, test_Xy, inference_vectorizer, epochs=10, batch_size=16, shuffle=True):\n # we sort these so batches all have approximately the same length (ish), which decreases the \n # average amount of padding needed, and thus total number of steps in training.\n if not shuffle:\n train_Xy.sort(key=lambda x: len(x['article']))\n val_Xy.sort(key=lambda x: len(x['article']))\n test_Xy.sort(key=lambda x: len(x['article']))\n print(\"Using {} training examples, {} validation examples, {} testing examples\".format(len(train_Xy), len(val_Xy), len(test_Xy)))\n most_common = stats.mode([_get_majority_label(inst) for inst in train_Xy])[0][0]\n\n best_val_model = None\n best_val_f1 = float('-inf')\n if USE_CUDA:\n ev_inf = ev_inf.cuda()\n\n optimizer = optim.Adam(ev_inf.parameters())\n criterion = nn.CrossEntropyLoss(reduction='sum') # sum (not average) of the batch losses.\n\n # TODO add epoch timing information here\n epochs_since_improvement = 0\n val_metrics = {\n \"val_acc\": [],\n \"val_p\": [],\n \"val_r\": [],\n \"val_f1\": [],\n \"val_loss\": [],\n 'train_loss': [],\n 'val_aucs': [],\n 'train_aucs': [],\n 'val_entropies': [],\n 'val_evidence_token_mass': [],\n 'val_evidence_token_err': [],\n 'train_entropies': [],\n 'train_evidence_token_mass': [],\n 'train_evidence_token_err': []\n }\n for epoch in range(epochs):\n if epochs_since_improvement > 10:\n print(\"Exiting early due to no improvement on validation after 10 epochs.\")\n break\n if shuffle:\n random.shuffle(train_Xy)\n\n epoch_loss = 0\n for i in range(0, len(train_Xy), batch_size):\n instances = train_Xy[i:i+batch_size]\n ys = torch.cat([_get_y_vec(inst['y'], as_vec=False) for inst in instances], dim=0)\n # TODO explain the use of padding here\n unk_idx = int(inference_vectorizer.str_to_idx[SimpleInferenceVectorizer.PAD])\n articles, Is, Cs, Os = [PaddedSequence.autopad([torch.LongTensor(inst[x]) for inst in instances], batch_first=True, padding_value=unk_idx) for x in ['article', 'I', 'C', 'O']]\n optimizer.zero_grad()\n if USE_CUDA:\n articles, Is, Cs, Os = articles.cuda(), Is.cuda(), Cs.cuda(), Os.cuda()\n ys = ys.cuda()\n verbose_attn = (epoch == epochs - 1 and i == 0) or (epoch == 0 and i == 0)\n if verbose_attn:\n print(\"Training attentions:\")\n tags = ev_inf(articles, Is, Cs, Os, batch_size=len(instances), verbose_attn=verbose_attn)\n loss = criterion(tags, ys)\n #if loss.item() != loss.item():\n # import pdb; pdb.set_trace()\n epoch_loss += loss.item()\n loss.backward()\n optimizer.step()\n val_metrics['train_loss'].append(epoch_loss)\n\n with torch.no_grad():\n verbose_attn_to_batches = set([0,1,2,3,4]) if epoch == epochs - 1 or epoch == 0 else False\n if verbose_attn_to_batches:\n print(\"Validation attention:\")\n # make_preds runs in eval mode\n val_y, val_y_hat = make_preds(ev_inf, val_Xy, batch_size, inference_vectorizer, verbose_attn_to_batches=verbose_attn_to_batches)\n val_loss = criterion(val_y_hat, val_y.squeeze())\n y_hat = to_int_preds(val_y_hat)\n\n if epoch == 0:\n dummy_preds = [most_common] * len(val_y)\n dummy_acc = accuracy_score(val_y.cpu(), dummy_preds)\n val_metrics[\"baseline_val_acc\"] = dummy_acc\n p, r, f1, _ = precision_recall_fscore_support(val_y.cpu(), dummy_preds, labels=None, beta=1, average='macro', pos_label=1, warn_for=('f-score',), sample_weight=None)\n val_metrics['p_dummy'] = p\n val_metrics['r_dummy'] = r\n val_metrics['f_dummy'] = f1\n\n print(\"val dummy accuracy: {:.3f}\".format(dummy_acc))\n print(\"classification report for dummy on val: \")\n print(classification_report(val_y.cpu(), dummy_preds))\n print(\"\\n\\n\")\n\n acc = accuracy_score(val_y.cpu(), y_hat)\n val_metrics[\"val_acc\"].append(acc)\n val_loss = val_loss.cpu().item()\n val_metrics[\"val_loss\"].append(val_loss)\n \n # f1 = f1_score(val_y, y_hat, average=\"macro\")\n p, r, f1, _ = precision_recall_fscore_support(val_y.cpu(), y_hat, labels=None, beta=1, average='macro', pos_label=1, warn_for=('f-score',), sample_weight=None)\n val_metrics[\"val_f1\"].append(f1)\n val_metrics[\"val_p\"].append(p)\n val_metrics[\"val_r\"].append(r)\n\n if ev_inf.article_encoder.use_attention:\n train_auc, train_entropies, train_evidence_token_masses, train_evidence_token_err = evaluate_model_attention_distribution(ev_inf, train_Xy, cuda=USE_CUDA, compute_attention_diagnostics=True)\n val_auc, val_entropies, val_evidence_token_masses, val_evidence_token_err = evaluate_model_attention_distribution(ev_inf, val_Xy, cuda=USE_CUDA, compute_attention_diagnostics=True)\n print(\"train auc: {:.3f}, entropy: {:.3f}, evidence mass: {:.3f}, err: {:.3f}\".format(train_auc, train_entropies, train_evidence_token_masses, train_evidence_token_err))\n print(\"val auc: {:.3f}, entropy: {:.3f}, evidence mass: {:.3f}, err: {:.3f}\".format(val_auc, val_entropies, val_evidence_token_masses, val_evidence_token_err))\n else:\n train_auc, train_entropies, train_evidence_token_masses, train_evidence_token_err = \"\", \"\", \"\", \"\"\n val_auc, val_entropies, val_evidence_token_masses, val_evidence_token_err = \"\", \"\", \"\", \"\"\n val_metrics['train_aucs'].append(train_auc)\n val_metrics['train_entropies'].append(train_entropies)\n val_metrics['train_evidence_token_mass'].append(train_evidence_token_masses)\n val_metrics['train_evidence_token_err'].append(train_evidence_token_err)\n val_metrics['val_aucs'].append(val_auc)\n val_metrics['val_entropies'].append(val_entropies)\n val_metrics['val_evidence_token_mass'].append(val_evidence_token_masses)\n val_metrics['val_evidence_token_err'].append(val_evidence_token_err)\n if f1 > best_val_f1:\n print(\"New best model at {} with val f1 {:.3f}\".format(epoch, f1))\n best_val_f1 = f1\n best_val_model = copy.deepcopy(ev_inf)\n epochs_since_improvement = 0\n else:\n epochs_since_improvement += 1\n\n #if val_loss != val_loss or epoch_loss != epoch_loss:\n # import pdb; pdb.set_trace()\n\n print(\"epoch {}. train loss: {}; val loss: {}; val acc: {:.3f}\".format(\n epoch, epoch_loss, val_loss, acc))\n \n print(classification_report(val_y.cpu(), y_hat))\n print(\"val macro f1: {0:.3f}\".format(f1))\n print(\"\\n\\n\")\n\n val_metrics['best_val_f1'] = best_val_f1\n with torch.no_grad():\n print(\"Test attentions:\")\n verbose_attn_to_batches = set([0,1,2,3,4])\n # make_preds runs in eval mode\n test_y, test_y_hat = make_preds(best_val_model, test_Xy, batch_size, inference_vectorizer, verbose_attn_to_batches=verbose_attn_to_batches)\n test_loss = criterion(test_y_hat, test_y.squeeze())\n y_hat = to_int_preds(test_y_hat)\n final_test_preds = zip([t['a_id'] for t in test_Xy], [t['p_id'] for t in test_Xy], y_hat)\n\n acc = accuracy_score(test_y.cpu(), y_hat)\n val_metrics[\"test_acc\"] = acc\n test_loss = test_loss.cpu().item()\n val_metrics[\"test_loss\"] = test_loss\n\n # f1 = f1_score(test_y, y_hat, average=\"macro\")\n p, r, f1, _ = precision_recall_fscore_support(test_y.cpu(), y_hat, labels=None, beta=1, average='macro', pos_label=1, warn_for=('f-score',), sample_weight=None)\n val_metrics[\"test_f1\"] = f1\n val_metrics[\"test_p\"] = p\n val_metrics[\"test_r\"] = r\n if ev_inf.article_encoder.use_attention:\n test_auc, test_entropies, test_evidence_token_masses, test_evidence_token_err = evaluate_model_attention_distribution(best_val_model, test_Xy, cuda=USE_CUDA, compute_attention_diagnostics=True)\n print(\"test auc: {:.3f}, , entropy: {:.3f}, kl_to_uniform {:.3f}\".format(test_auc, test_entropies, test_evidence_token_masses))\n else:\n test_auc, test_entropies, test_evidence_token_masses, test_evidence_token_err = \"\", \"\", \"\", \"\"\n val_metrics['test_auc'] = test_auc\n val_metrics['test_entropy'] = test_entropies\n val_metrics['test_evidence_token_mass'] = test_evidence_token_masses\n val_metrics['test_evidence_token_err'] = test_evidence_token_err\n\n print(\"test loss: {}; test acc: {:.3f}\".format(test_loss, acc))\n\n print(classification_report(test_y.cpu(), y_hat))\n print(\"test macro f1: {}\".format(f1))\n print(\"\\n\\n\")\n\n return best_val_model, inference_vectorizer, train_Xy, val_Xy, val_metrics, final_test_preds\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_309","text":"LCAV\/lippmann-photography\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 21 16:27:58 2017\n\n@author: gbaechle\n\"\"\"\n\nfrom scipy import misc, io\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom skimage.color import rgb2xyz, xyz2rgb\nfrom lippmann import *\nimport imageio\n\nimport sys\nsys.path.append(\"..\/\")\nimport color_tools as ct\n\nplt.close('all')\n\n\n\ndef read_image(path):\n\n return imageio.imread(path).astype(float)\/255.\n \n \ndef compute_spectrum_slice(sliced, lambdas):\n \n #comppute the spectrum\n im_xyz = xyz2rgb(sliced.reshape((1,-1,3))).reshape(-1, 3)\n spectrum = ct.from_xyz_to_spectrum(im_xyz, lambdas)\n \n return spectrum\n \n\ndef compute_lippmann_slice(spectrums, lambdas, depths):\n \n lippmann = np.zeros((len(spectrums), len(depths)))\n \n for i, s in enumerate(spectrums):\n print(i)\n lip, _ = lippmann_transform(lambdas, s, depths) \n lippmann[i, :] = lip\n \n return lippmann\n \n \ndef compute_end_plate(im, lambdas, vmax):\n \n two_k = 4 * np.pi \/ lambdas\n \n im_xyz = xyz2rgb(im)\n spectrums = ct.from_xyz_to_spectrum(im_xyz, lambdas)\n \n intensity = -np.trapz(spectrums, two_k*c\/2, axis=2)\n mpl.image.imsave('Figures\/baseline.png', intensity, vmax=vmax, vmin=0)\n \n return intensity\n\n \ndef generate_slices(im, N=500):\n \n lambdas, _ = generate_wavelengths(N)\n depths = generate_depths(delta_z=2.5E-9, max_depth=2.5E-6)\n \n H = 883-1\n L = 883-1\n slice1 = compute_spectrum_slice(im[:H, L, :3], lambdas)\n slice2 = compute_spectrum_slice(im[H, :L, :3], lambdas)\n slice3 = compute_spectrum_slice(im[:H, 0, :3], lambdas)\n slice4 = compute_spectrum_slice(im[0, :L, :3], lambdas)\n \n lip1 = compute_lippmann_slice(slice1, lambdas, depths)\n lip2 = compute_lippmann_slice(slice2, lambdas, depths)\n lip3 = compute_lippmann_slice(slice3, lambdas, depths)\n lip4 = compute_lippmann_slice(slice4, lambdas, depths)\n \n print(np.max(lip1), np.max(lip2), np.max(lip3), np.max(lip4))\n vmax = max(np.max(lip1), np.max(lip2), np.max(lip3), np.max(lip4))\n \n for i in range(1,5): \n \n i_str = str(i)\n mpl.image.imsave('Figures\/slice' + i_str + '.png', eval('lip' + i_str), vmax=vmax)\n \n return lambdas, vmax\n \n \n \nif __name__ == '__main__':\n \n# path = '..\/images\/original.png'\n path = '..\/images\/lippmann_image.jpg'\n im = read_image(path) \n \n lambdas, vmax = generate_slices(im, N=500)\n \n# spectrum = compute_end_plate(im[:800, :750, :3], lambdas, vmax) \n spectrum = compute_end_plate(im[:, :, :3], lambdas, vmax) \n \n# misc.imsave('Figures\/front.png', im[:800, :750])\n misc.imsave('Figures\/front.png', im)\n \n plt.figure()\n plt.imshow(im)\n plt.figure()\n# plt.imshow(im[:800, :750, :3])\n plt.imshow(im[:, :, :3])\n \n "} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_310","text":"testing\/gen_feature_space_samples.py\nimport numpy as np\nimport pandas as pd\nimport graphviz\nimport graphviz.backend\nfrom numpy.distutils.system_info import f2py_info\nfrom sklearn import tree\nfrom sklearn.datasets import load_boston, load_iris, load_wine, load_digits, load_breast_cancer, load_diabetes, fetch_mldata\nfrom matplotlib.figure import figaspect\nimport string\nimport re\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom dtreeviz.shadow import *\nfrom numbers import Number\nimport matplotlib.patches as patches\nfrom scipy import stats\nfrom sklearn.neighbors import KernelDensity\nimport inspect, sys, tempfile\n\nfrom dtreeviz.trees import *\n\ndef viz_digits(features, feature_names, max_depth):\n digits = load_digits()\n\n # \"8x8 image of integer pixels in the range 0..16.\"\n columns = [f'pixel[{i},{j}]' for i in range(8) for j in range(8)]\n\n fig, ax = plt.subplots(1, 1)\n X_train = digits.data[:,features]\n y_train = digits.target\n if len(features)==1:\n x_train = digits.data[:, features[0]]\n\n ctreeviz_univar(ax, x_train, y_train, max_depth=max_depth, feature_name=feature_names[0],\n class_names=[str(i) for i in range(10)], gtype='strip', target_name='digit')\n filename = f\"\/tmp\/digits-{feature_names[0]}-featspace-depth-{max_depth}.svg\"\n else:\n ctreeviz_bivar(ax, X_train, y_train, max_depth=max_depth,\n feature_names=feature_names, class_names=[str(i) for i in range(10)], target_name='digit')\n filename = f\"\/tmp\/digits-{','.join(feature_names)}-featspace-depth-{max_depth}.svg\"\n\n print(f\"Create {filename}\")\n plt.tight_layout()\n plt.savefig(filename, bbox_inches=0, pad_inches=0)\n plt.show()\n # plt.close()\n\ndef viz_wine(features, feature_names, max_depth):\n wine = load_wine()\n\n X_train = wine.data[:,features]\n y_train = wine.target\n if len(features)==1:\n figsize = (6, 2)\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n x_train = wine.data[:, features[0]]\n\n ctreeviz_univar(ax, x_train, y_train, max_depth=max_depth, feature_name=feature_names[0],\n class_names=list(wine.target_names), gtype='strip', target_name='wine')\n filename = f\"\/tmp\/wine-{feature_names[0]}-featspace-depth-{max_depth}.svg\"\n else:\n figsize = (6, 5)\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n ctreeviz_bivar(ax, X_train, y_train, max_depth=max_depth,\n feature_names=feature_names, class_names=list(wine.target_names), target_name='wine',show={'splits'})\n filename = f\"\/tmp\/wine-{','.join(feature_names)}-featspace-depth-{max_depth}.svg\"\n\n print(f\"Create {filename}\")\n plt.tight_layout()\n plt.savefig(filename, bbox_inches=0, pad_inches=0)\n plt.show()\n\n\ndef viz_knowledge(features, feature_names, max_depth):\n know = pd.read_csv(\"data\/knowledge.csv\")\n class_names = ['very_low', 'Low', 'Middle', 'High']\n know['UNS'] = know['UNS'].map({n: i for i, n in enumerate(class_names)})\n\n X_train = know.drop('UNS', axis=1)\n X_train = X_train.values[:,features]\n y_train = know['UNS']\n if len(features)==1:\n figsize = (6, 2)\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n x_train = know.PEG\n\n ctreeviz_univar(ax, x_train, y_train, max_depth=max_depth, feature_name=feature_names[0],\n class_names=class_names, gtype='strip', target_name='knowledge')\n filename = f\"\/tmp\/knowledge-{feature_names[0]}-featspace-depth-{max_depth}.svg\"\n else:\n figsize = (6, 5)\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n ctreeviz_bivar(ax, X_train, y_train, max_depth=max_depth,\n feature_names=feature_names, class_names=class_names, target_name='knowledge')\n filename = f\"\/tmp\/knowledge-{','.join(feature_names)}-featspace-depth-{max_depth}.svg\"\n\n print(f\"Create {filename}\")\n plt.tight_layout()\n plt.savefig(filename, bbox_inches=0, pad_inches=0)\n plt.show()\n\n\ndef viz_diabetes(features, feature_names, max_depth):\n diabetes = load_diabetes()\n\n X_train = diabetes.data\n X_train = X_train[:,features]\n y_train = diabetes.target\n if len(features)==1:\n figsize = (6, 2)\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n x_train = diabetes.data[:, features[0]]\n\n rtreeviz_univar(ax, x_train, y_train, max_depth=max_depth, feature_name=feature_names[0], target_name='diabetes')\n filename = f\"\/tmp\/diabetes-{feature_names[0]}-featspace-depth-{max_depth}.svg\"\n else:\n figsize = (6, 5)\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n rtreeviz_bivar_heatmap(ax, X_train, y_train, max_depth=max_depth,\n feature_names=feature_names)\n filename = f\"\/tmp\/diabetes-{','.join(feature_names)}-featspace-depth-{max_depth}.svg\"\n\n print(f\"Create {filename}\")\n plt.tight_layout()\n plt.savefig(filename, bbox_inches=0, pad_inches=0)\n plt.show()\n\n\ndef viz_boston(features, feature_names, max_depth):\n boston = load_boston()\n\n X_train = boston.data\n X_train = X_train[:,features]\n y_train = boston.target\n if len(features)==1:\n figsize = (6, 2)\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n x_train = boston.data[:, features[0]]\n\n rtreeviz_univar(ax, x_train, y_train, max_depth=max_depth, feature_name=feature_names[0], target_name='price')\n filename = f\"\/tmp\/boston-{feature_names[0]}-featspace-depth-{max_depth}.svg\"\n else:\n figsize = (6, 5)\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n rtreeviz_bivar_heatmap(ax, X_train, y_train, max_depth=max_depth,\n feature_names=feature_names)\n filename = f\"\/tmp\/boston-{','.join(feature_names)}-featspace-depth-{max_depth}.svg\"\n\n print(f\"Create {filename}\")\n plt.tight_layout()\n plt.savefig(filename, bbox_inches=0, pad_inches=0)\n plt.show()\n\n\nviz_boston(features=[5],feature_names=['RM'], max_depth=2)\nviz_boston(features=[5],feature_names=['RM'], max_depth=4)\nviz_boston(features=[5,12],feature_names=['RM','LSTAT'], max_depth=2)\nviz_boston(features=[5,12],feature_names=['RM','LSTAT'], max_depth=4)\n\nviz_diabetes(features=[2],feature_names=['bmi'], max_depth=2)\nviz_diabetes(features=[2],feature_names=['bmi'], max_depth=5)\nviz_diabetes(features=[2,0],feature_names=['bmi','age'], max_depth=2)\nviz_diabetes(features=[2,0],feature_names=['bmi','age'], max_depth=5)\n\nviz_knowledge(features=[4],feature_names=['PEG'], max_depth=2)\nviz_knowledge(features=[4],feature_names=['PEG'], max_depth=3)\nviz_knowledge(features=[4,3],feature_names=['PEG','LPR'], max_depth=2)\nviz_knowledge(features=[4,3],feature_names=['PEG','LPR'], max_depth=3)\n\nviz_wine(features=[12],feature_names=['proline'], max_depth=2)\nviz_wine(features=[12],feature_names=['proline'], max_depth=3)\nviz_wine(features=[12,6],feature_names=['proline','flavanoids'], max_depth=1)\nviz_wine(features=[12,6],feature_names=['proline','flavanoids'], max_depth=2)\nviz_wine(features=[12,6],feature_names=['proline','flavanoids'], max_depth=3)\nviz_digits(features=[2*8+5], feature_names=['pixel[2,5]'], max_depth=20)\nviz_digits(features=[4*8+4,2*8+5], feature_names=['pixel[4,4]','pixel[2,5]'], max_depth=5)"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_311","text":"appleface2050\/Coursera-ML\n# coding:utf-8\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn import svm\nimport seaborn as sns\nimport scipy.io as sio\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\n\nif __name__ == '__main__':\n mat = sio.loadmat('data\/ex6data3.mat')\n print(mat.keys())\n training = pd.DataFrame(mat.get('X'), columns=['X1', 'X2'])\n training['y'] = mat.get('y')\n\n cv = pd.DataFrame(mat.get('Xval'), columns=['X1', 'X2'])\n cv['y'] = mat.get('yval')\n print(training.shape)\n print(training.head())\n\n print(cv.shape)\n print(cv.head())\n\n candidate = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30, 100]\n # gamma to comply with sklearn parameter name\n combination = [(C, gamma) for C in candidate for gamma in candidate]\n print(len(combination))\n\n search = []\n\n for C, gamma in combination:\n svc = svm.SVC(C=C, gamma=gamma)\n svc.fit(training[['X1', 'X2']], training['y'])\n search.append(svc.score(cv[['X1', 'X2']], cv['y']))\n print(search)\n best_score = search[np.argmax(search)]\n best_param = combination[np.argmax(search)]\n print(best_score, best_param)\n\n best_svc = svm.SVC(C=best_param[1], gamma=best_param[0])\n best_svc.fit(training[['X1', 'X2']], training['y'])\n ypred = best_svc.predict(cv[['X1', 'X2']])\n\n print(metrics.classification_report(cv['y'], ypred))"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_312","text":"1-10\n\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom matplotlib.dates import MonthLocator\n# import matplotlib.ticker\nimport numpy as np\nfrom sense.canopy import OneLayer\nfrom sense.soil import Soil\nfrom sense import model\nimport scipy.stats\nfrom scipy.optimize import minimize\nimport pdb\n\n\n# Helper functions for statistical parameters\n#--------------------------------------------\ndef rmse_prediction(predictions, targets):\n \"\"\" calculation of RMSE \"\"\"\n return np.sqrt(np.nanmean((predictions - targets) ** 2))\n\ndef linregress(predictions, targets):\n \"\"\" Calculate a linear least-squares regression for two sets of measurements \"\"\"\n slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(predictions, targets)\n return slope, intercept, r_value, p_value, std_err\n\ndef read_mni_data(path, file_name, extention, field, sep=';'):\n \"\"\" read MNI campaign data \"\"\"\n df = pd.io.parsers.read_csv(os.path.join(path, file_name + extension), header=[0, 1], sep=sep)\n df = df.set_index(pd.to_datetime(df[field]['date']))\n df = df.drop(df.filter(like='date'), axis=1)\n return df\n\ndef read_agrometeo(path, file_name, extentio, sep=';', decimal=','):\n \"\"\" read agro-meteorological station (hourly data) \"\"\"\n df = pd.read_csv(os.path.join(path, file_name + extension), sep=sep, decimal=decimal)\n df['SUM_NN050'] = df['SUM_NN050'].str.replace(',','.')\n df['SUM_NN050'] = df['SUM_NN050'].str.replace('-','0').astype(float)\n\n df['date'] = df['Tag'] + ' ' + df['Stunde']\n\n df = df.set_index(pd.to_datetime(df['date'], format='%d.%m.%Y %H:%S'))\n return df\n\ndef filter_relativorbit(data, field, orbit1, orbit2=None, orbit3=None, orbit4=None):\n \"\"\" data filter for relativ orbits \"\"\"\n output = data[[(check == orbit1 or check == orbit2 or check == orbit3 or check == orbit4) for check in data[(field,'relativeorbit')]]]\n return output\n\ndef smooth(x,window_len=11,window='hanning'):\n if x.ndim != 1:\n raise ValueError #, \"smooth only accepts 1 dimension arrays.\"\n if x.size < window_len:\n raise ValueError #, \"Input vector needs to be bigger than window size.\"\n if window_len<3:\n return x\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError #, \"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\"\n s=np.r_[2*x[0]-x[window_len-1::-1],x,2*x[-1]-x[-1:-window_len:-1]]\n if window == 'flat': #moving average\n w=np.ones(window_len,'d')\n else:\n w=eval('np.'+window+'(window_len)')\n y=np.convolve(w\/w.sum(),s,mode='same')\n return y[window_len:-window_len+1]\n\ndef read_data(path, file_name, extension, field, path_agro, file_name_agro, extension_agro):\n # Read MNI data\n df = read_mni_data(path, file_name, extension, field)\n\n # Read agro-meteorological station\n df_agro = read_agrometeo(path_agro, file_name_agro, extension_agro)\n\n # filter for field\n field_data = df.filter(like=field)\n\n # filter for relativorbit\n field_data_orbit = filter_relativorbit(field_data, field, 95, 168)\n # field_data = field_data_orbit\n\n # get rid of NaN values\n parameter_nan = 'LAI'\n field_data = field_data[~np.isnan(field_data.filter(like=parameter_nan).values)]\n\n # available auxiliary data\n theta_field = np.deg2rad(field_data.filter(like='theta'))\n # theta_field[:] = 45\n sm_field = field_data.filter(like='SM')\n height_field = field_data.filter(like='Height')\/100\n lai_field = field_data.filter(like='LAI')\n vwc_field = field_data.filter(like='VWC')\n pol_field = field_data.filter(like='sigma_sentinel_'+pol)\n return df, df_agro, field_data, field_data_orbit, theta_field, sm_field, height_field, lai_field, vwc_field, pol_field\n\n### Optimization ###\n#-----------------------------------------------------------------\ndef solve_fun(VALS):\n\n for i in range(len(var_opt)):\n dic[var_opt[i]] = VALS[i]\n\n ke = dic['coef'] * np.sqrt(dic['lai'])\n # ke = dic['coef'] * np.sqrt(dic['vwc'])\n # ke=1\n dic['ke'] = ke\n\n # surface\n soil = Soil(mv=dic['mv'], C_hh=dic['C_hh'], C_vv=dic['C_vv'], D_hh=dic['D_hh'], D_vv=dic['D_vv'], C_hv=dic['C_hv'], D_hv=dic['D_hv'], V2=dic['V2'], s=dic['s'], clay=dic['clay'], sand=dic['sand'], f=dic['f'], bulk=dic['bulk'], l=dic['l'])\n\n # canopy\n can = OneLayer(canopy=dic['canopy'], ke_h=dic['ke'], ke_v=dic['ke'], d=dic['d'], ks_h = dic['omega']*dic['ke'], ks_v = dic['omega']*dic['ke'], V1=dic['V1'], V2=dic['V2'], A_hh=dic['A_hh'], B_hh=dic['B_hh'], A_vv=dic['A_vv'], B_vv=dic['B_vv'], A_hv=dic['A_hv'], B_hv=dic['B_hv'])\n\n S = model.RTModel(surface=soil, canopy=can, models=models, theta=dic['theta'], freq=dic['f'])\n S.sigma0()\n\n return S.__dict__['stot'][pol[::-1]]\n\ndef fun_opt(VALS):\n\n\n # return(10.*np.log10(np.nansum(np.square(solve_fun(VALS)-dic['pol_value']))))\n return(np.nansum(np.square(solve_fun(VALS)-dic['pol_value'])))\n\ndef data_optimized_run(n, field_data, theta_field, sm_field, height_field, lai_field, vwc_field, pol):\n n = np.int(np.floor(n\/2))\n\n if n > 0:\n field_data = field_data.drop(field_data.index[-n:])\n field_data = field_data.drop(field_data.index[0:n])\n theta_field = theta_field.drop(theta_field.index[-n:])\n theta_field = theta_field.drop(theta_field.index[0:n])\n\n sm_field = field_data.filter(like='SM')\n height_field = field_data.filter(like='Height')\/100\n lai_field = field_data.filter(like='LAI')\n vwc_field = field_data.filter(like='VWC')\n\n vv_field = field_data.filter(like='sigma_sentinel_vv')\n vh_field = field_data.filter(like='sigma_sentinel_vh')\n\n pol_field = field_data.filter(like='sigma_sentinel_'+pol)\n return field_data, theta_field, sm_field, height_field, lai_field, vwc_field, vv_field, vh_field, pol_field\n#-----------------------------------------------------------------\n\n### Data preparation ###\n#-----------------------------------------------------------------\n# storage information\npath = '\/media\/tweiss\/Daten\/new_data'\nfile_name = 'multi10' # theta needs to be changed to for norm multi\nextension = '.csv'\n\npath_agro = '\/media\/nas_data\/2017_MNI_campaign\/field_data\/meteodata\/agrarmeteorological_station'\nfile_name_agro = 'Eichenried_01012017_31122017_hourly'\nextension_agro = '.csv'\n\nfield = '508_high'\nfield_plot = ['508_high', '508_low', '508_med']\npol = 'vv'\n# pol = 'vh'\n\n# output path\nplot_output_path = '\/media\/tweiss\/Daten\/plots\/paper\/'\n\ndf, df_agro, field_data, field_data_orbit, theta_field, sm_field, height_field, lai_field, vwc_field, pol_field = read_data(path, file_name, extension, field, path_agro, file_name_agro, extension_agro)\n\n#-----------------------------------------------------------------\n\n### Run SenSe module\n#-----------------------------------------------------------------\n#### Choose models\n#-----------------\n\nsurface_list = ['Oh92', 'Oh04', 'Dubois95', 'WaterCloud', 'I2EM']\n# surface_list = ['Oh92', 'Oh04', 'WaterCloud']\n# surface_list = ['WaterCloud']\ncanopy_list = ['turbid_isotropic', 'water_cloud']\n# canopy_list = ['water_cloud']\n\n# surface_list = ['Oh92']\n# surface_list = ['Oh04']\n# surface_list = ['Dubois95']\n# surface_list = ['WaterCloud']\n# surface_list = ['I2EM']\n# canopy_list = ['turbid_isotropic']\n# canopy_list = ['water_cloud']\n\n### option for time invariant or variant calibration of parameter\n#-------------------------------\nopt_mod = 'time invariant'\n# opt_mod = 'time variant'\n#---------------------------\n\n### plot option: \"single\" or \"all\" modelcombination\n#------------------------------\n# plot = 'single'\nplot = 'all'\n#------------------------------\n\n### plot option scatterplot or not\n#-------------------------------\n# style = 'scatterplot'\nstyle = ''\n\n### plot option for scatterplot single ESU\n#------------------------------------\n# style_2 = 'scatterplot_single_ESU'\nstyle_2 = ''\n#-----------------------------------\n\n# Initialize plot settings\n#---------------------------\nif style == 'scatterplot':\n fig, ax = plt.subplots(figsize=(10, 10))\nelse:\n fig, ax = plt.subplots(figsize=(17, 10))\n# plt.title('Winter Wheat')\nplt.ylabel('Backscatter [dB]', fontsize=15)\nplt.xlabel('Date', fontsize=15)\nplt.tick_params(labelsize=12)\n\n\nif pol == 'vv':\n ax.set_ylim([-25,-7.5])\nelif pol == 'vh':\n ax.set_ylim([-30,-15])\n\ncolormaps = ['Greens', 'Purples', 'Blues', 'Oranges', 'Reds', 'Greys', 'pink', 'bone', 'Blues', 'Blues', 'Blues']\nj = 0\n\ncolormap = plt.get_cmap(colormaps[j])\ncolors = [colormap(jj) for jj in np.linspace(0.35, 1., 3)]\n\nfor k in surface_list:\n\n for kk in canopy_list:\n df, df_agro, field_data, field_data_orbit, theta_field, sm_field, height_field, lai_field, vwc_field, pol_field = read_data(path, file_name, extension, field, path_agro, file_name_agro, extension_agro)\n freq = 5.405\n clay = 0.08\n sand = 0.12\n bulk = 1.5\n s = 0.0105 # vv\n s = 0.0115\n # s = 0.009 # vh ?????\n\n C_hh = 0\n D_hh = 0\n C_hv = -22.5\n D_hv = 3.2\n C_vv = -14.609339\n D_vv = 12.884086\n\n ### Canopy\n # Water Cloud (A, B, V1, V2, theta)\n # SSRT (coef, omega, theta)\n #-----------------------------------\n A_hh = 0\n B_hh = 0\n A_hv = 0.029\n B_hv = 0.0013\n A_vv = 0.0029\n B_vv = 0.13\n V1 = lai_field.values.flatten()\n V2 = V1 # initialize in surface model\n coef = 1.\n omega = 0.027 # vv\n omega = 0.015 # vh\n # IEM\n l = 0.01\n\n\n surface = k\n canopy = kk\n models = {'surface': surface, 'canopy': canopy}\n\n #### Optimization\n #-----------------\n\n if opt_mod == 'time invariant':\n\n dic = {\"mv\":sm_field.values.flatten(), \"C_hh\":C_hh, \"C_vv\":C_vv, \"D_hh\":D_hh, \"D_vv\":D_vv, \"C_hv\":C_hv, \"D_hv\":D_hv, \"s\":s, \"clay\":clay, \"sand\":sand, \"f\":freq, \"bulk\":bulk, \"l\":l, \"canopy\":canopy, \"d\":height_field.values.flatten(), \"V1\":V1, \"V2\":V2, \"A_hh\":A_hh, \"B_hh\":B_hh, \"A_vv\":A_vv, \"B_vv\":B_vv, \"A_hv\":A_hv, \"B_hv\":B_hv, \"lai\":lai_field.values.flatten(), \"vwc\":vwc_field.values.flatten(), \"pol_value\":pol_field.values.flatten(), \"theta\":theta_field.values.flatten(), \"omega\": omega, \"coef\": coef}\n\n if canopy == 'turbid_isotropic':\n var_opt = ['coef']\n guess = [2.]\n bounds = [(0.001,5.5)]\n elif surface == 'WaterCloud' and canopy == 'water_cloud':\n var_opt = ['A_vv', 'B_vv', 'A_hv', 'B_hv', 'C_vv', 'D_vv', 'C_hv', 'D_hv']\n guess = [A_vv, B_vv, A_hv, B_hv, C_vv, D_vv, C_hv, D_hv]\n bounds = [(0.,1), (0.,1), (0.,1), (0.,1), (-20.,-1.), (1.,20.), (-20.,-1.), (1.,20.)]\n elif canopy == 'water_cloud':\n var_opt = ['A_vv', 'B_vv', 'A_hv', 'B_hv']\n guess = [A_vv, B_vv, A_hv, B_hv]\n bounds = [(0.,1), (0.,1), (0.,1), (0.,1)]\n\n method = 'L-BFGS-B'\n\n res = minimize(fun_opt,guess,bounds=bounds, method=method)\n\n fun_opt(res.x)\n aaa = res.x\n\n if opt_mod == 'time variant':\n aaa = [[],[],[],[],[],[],[],[],[],[],[],[]]\n n=7\n\n for i in range(len(pol_field.values.flatten())-n+1):\n\n if type(coef) == float:\n dic = {\"mv\":sm_field.values.flatten()[i:i+n], \"C_hh\":C_hh, \"C_vv\":C_vv, \"D_hh\":D_hh, \"D_vv\":D_vv, \"C_hv\":C_hv, \"D_hv\":D_hv, \"V2\":V2[i:i+n], \"s\":s, \"clay\":clay, \"sand\":sand, \"f\":freq, \"bulk\":bulk, \"l\":l, \"canopy\":canopy, \"d\":height_field.values.flatten()[i:i+n], \"V1\":V1[i:i+n], \"A_hh\":A_hh, \"B_hh\":B_hh, \"A_vv\":A_vv, \"B_vv\":B_vv, \"A_hv\":A_hv, \"B_hv\":B_hv, \"lai\":lai_field.values.flatten()[i:i+n], \"vwc\":vwc_field.values.flatten()[i:i+n], \"pol_value\":pol_field.values.flatten()[i:i+n], \"theta\":theta_field.values.flatten()[i:i+n], \"omega\": omega, \"coef\": coef}\n else:\n dic = {\"mv\":sm_field.values.flatten()[i:i+n], \"C_hh\":C_hh, \"C_vv\":C_vv, \"D_hh\":D_hh, \"D_vv\":D_vv, \"C_hv\":C_hv, \"D_hv\":D_hv, \"V2\":V2[i:i+n], \"s\":s, \"clay\":clay, \"sand\":sand, \"f\":freq, \"bulk\":bulk, \"l\":l, \"canopy\":canopy, \"d\":height_field.values.flatten()[i:i+n], \"V1\":V1[i:i+n], \"A_hh\":A_hh, \"B_hh\":B_hh, \"A_vv\":A_vv, \"B_vv\":B_vv, \"A_hv\":A_hv, \"B_hv\":B_hv, \"lai\":lai_field.values.flatten()[i:i+n], \"vwc\":vwc_field.values.flatten()[i:i+n], \"pol_value\":pol_field.values.flatten()[i:i+n], \"theta\":theta_field.values.flatten()[i:i+n], \"omega\": omega, \"coef\": coef[i:i+n]}\n\n if canopy == 'turbid_isotropic' and surface == 'WaterCloud':\n var_opt = ['coef', 'C_vv', 'D_vv', 'C_hv', 'D_hv']\n guess = [0.01, C_vv, D_vv, C_hv, D_hv]\n bounds = [(0.1,5.5), (-20.,-1.), (1.,20.), (-20.,-1.), (1.,20.)]\n elif canopy == 'turbid_isotropic':\n var_opt = ['coef']\n guess = [0.1]\n bounds = [(0.,2)]\n elif surface == 'WaterCloud' and canopy == 'water_cloud':\n # var_opt = ['A_vv', 'B_vv', 'A_hv', 'B_hv', 'C_vv', 'D_vv', 'C_hv', 'D_hv']\n # guess = [A_vv, B_vv, A_hv, B_hv, C_vv, D_vv, C_hv, D_hv]\n # bounds = [(0.,1), (guess[1]*0.55, guess[1]*1.55), (0.,1), (guess[3]*0.75, guess[3]*1.25), (-20.,-1.), (1.,20.), (-20.,-1.), (1.,20.)]\n var_opt = ['C_vv', 'D_vv', 'C_hv', 'D_hv']\n guess = [C_vv, D_vv, C_hv, D_hv]\n bounds = [(-20.,-1.), (1.,20.), (-20.,-1.), (1.,20.)]\n elif canopy == 'water_cloud':\n var_opt = ['A_vv', 'B_vv', 'A_hv', 'B_hv']\n guess = [A_vv, B_vv, A_hv, B_hv]\n bounds = [(0.,1), (0.,1), (0.00001,1), (0.00001,1)]\n\n # var_opt = ['omega']\n # guess = [0.1]\n # bounds = [(0.,5.5)]\n\n # var_opt = ['s', 'coef', 'omega']\n # guess = [0.01, 0.1, 0.01]\n # bounds = [(0.001,0.03),(0.,2.5),(0.001,0.1)]\n\n # var_opt = ['C_hv', 'D_hv']\n # guess = [-13, 14]\n # bounds = [(-200.,100.),(-200.,400.)]\n\n # var_opt = ['A_vv', 'B_vv']\n\n # try:\n # guess = [res.x[0], res.x[1]]\n # except:\n # guess = [0.005, 0.09]\n # # bounds = [(0.000,5.),(0.001,5.)]\n # bounds = [(guess[0]*0.75, guess[0]*1.25), (guess[1]*0.75, guess[1]*1.25)]\n # bounds = [(guess[0]*0.9, guess[0]*1.1), (guess[1]*0.75, guess[1]*1.25)]\n # var_opt = ['coef', 'omega']\n # guess = [0.1, 0.22]\n # bounds = [(0.,5.5),(0.00001,0.2)]\n method = 'L-BFGS-B'\n # method = 'trust-exact'\n\n res = minimize(fun_opt,guess,bounds=bounds, method=method)\n\n fun_opt(res.x)\n\n for j in range(len(res.x)):\n aaa[j].append(res.x[j])\n\n field_data, theta_field, sm_field, height_field, lai_field, vwc_field, vv_field, vh_field, pol_field = data_optimized_run(n, field_data, theta_field, sm_field, height_field, lai_field, vwc_field, pol)\n V1 = lai_field.values.flatten()\n V2 = V1 # initialize in surface model\n\n #-----------------------------------------------------------------\n\n for i in range(len(res.x)):\n exec('%s = %s' % (var_opt[i],aaa[i]))\n\n ke = coef * np.sqrt(lai_field.values.flatten())\n # ke = smooth(ke, 11)\n\n soil = Soil(mv=sm_field.values.flatten(), C_hh=np.array(C_hh), C_vv=np.array(C_vv), D_hh=np.array(D_hh), D_vv=np.array(D_vv), C_hv=np.array(C_hv), D_hv=np.array(D_hv), s=s, clay=clay, sand=sand, f=freq, bulk=bulk, l=l)\n\n can = OneLayer(canopy=canopy, ke_h=ke, ke_v=ke, d=height_field.values.flatten(), ks_h = omega*ke, ks_v = omega*ke, V1=np.array(V1), V2=np.array(V2), A_hh=np.array(A_hh), B_hh=np.array(B_hh), A_vv=np.array(A_vv), B_vv=np.array(B_vv), A_hv=np.array(A_hv), B_hv=np.array(B_hv))\n\n S = model.RTModel(surface=soil, canopy=can, models=models, theta=theta_field.values.flatten(), freq=freq)\n S.sigma0()\n#-----------------------------------------------------------------\n date = field_data.index\n\n colormap = plt.get_cmap(colormaps[j])\n colors = [colormap(jj) for jj in np.linspace(0.35, 1., 4)]\n\n # ax.plot(10*np.log10(pol_field), 'ks-', label='Sentinel-1 Pol: ' + pol, linewidth=3)\n # ax.plot(date, 10*np.log10(S.__dict__['s0g'][pol[::-1]]), color=colors[0], marker='s', linestyle='--', label=pol+' s0g')\n # ax.plot(date, 10*np.log10(S.__dict__['s0c'][pol[::-1]]), color=colors[1], marker='s', linestyle='--', label=pol+' s0c')\n # ax.plot(date, 10*np.log10(S.__dict__['s0cgt'][pol[::-1]]), 'ms-', label=pol+' s0cgt')\n # ax.plot(date, 10*np.log10(S.__dict__['s0gcg'][pol[::-1]]), 'ys-', label=pol+' s0gcg')\n\n mask = ~np.isnan(pol_field.values.flatten()) & ~np.isnan(S.__dict__['stot'][pol[::-1]])\n slope, intercept, r_value, p_value, std_err = scipy.stats.linregress((pol_field.values.flatten()[mask]), (S.__dict__['stot'][pol[::-1]][mask]))\n slope1, intercept1, r_value1, p_value1, std_err1 = scipy.stats.linregress(10*np.log10(pol_field.values.flatten())[mask], 10*np.log10(S.__dict__['stot'][pol[::-1]])[mask])\n rmse = rmse_prediction(10*np.log10(pol_field.values.flatten()), 10*np.log10(S.__dict__['stot'][pol[::-1]]))\n\n if k == 'Oh92':\n hm = 'Oh92'\n colors = 'blue'\n elif k == 'Oh04':\n hm = 'Oh04'\n colors = 'red'\n elif k == 'Dubois95':\n hm='Dubois95'\n colors = 'orange'\n elif k == 'WaterCloud':\n hm = 'Water Cloud'\n colors = 'purple'\n elif k == 'I2EM':\n hm = 'IEM'\n colors = 'green'\n\n if plot == 'all':\n if kk == 'turbid_isotropic':\n\n ax.plot(date, 10*np.log10(S.__dict__['stot'][pol[::-1]]), color=colors, marker='s', linestyle='dashed', label = hm+ ' + ' + 'SSRT' + '; Pol: ' + pol + '; RMSE: ' + str(rmse)[0:4] + '; $R^2$: ' + str(r_value)[0:4])\n else:\n ax.plot(date, 10*np.log10(S.__dict__['stot'][pol[::-1]]), color=colors, marker='s', label = hm+ ' + ' + 'Water Cloud' + '; Pol: ' + pol + '; RMSE: ' + str(rmse)[0:4] + '; $R^2$: ' + str(r_value)[0:4])\n\n if plot == 'single':\n if style == 'scatterplot':\n if pol == 'vv':\n ax.set_xlim([-22.5,-7.5])\n elif pol == 'vh':\n ax.set_xlim([-30,-15])\n\n if style_2 == 'scatterplot_single_ESU':\n ax.plot(10*np.log10(pol_field.values.flatten()),10*np.log10(S.__dict__['stot'][pol[::-1]]), 'rs', label=field)\n\n x = 10*np.log10(pol_field.values.flatten())\n y = 10*np.log10(S.__dict__['stot'][pol[::-1]])\n\n lower_position = np.nanargmin(x)\n upper_position = np.nanargmax(x)\n\n ax.plot(np.array((x[lower_position],x[upper_position])),np.array((y[lower_position],y[upper_position])), '--r')\n\n\n else:\n aa = []\n bb = []\n # cc = []\n\n # field_plot = ['508_high', '508_low', '508_med']\n jj = 0\n colors = ['ks', 'ys', 'ms', 'rs']\n\n for field in field_plot:\n df, df_agro, field_data, field_data_orbit, theta_field, sm_field, height_field, lai_field, vwc_field, pol_field = read_data(path, file_name, extension, field, path_agro, file_name_agro, extension_agro)\n field_data, theta_field, sm_field, height_field, lai_field, vwc_field, vv_field, vh_field, pol_field = data_optimized_run(n, field_data, theta_field, sm_field, height_field, lai_field, vwc_field, pol)\n\n soil = Soil(mv=sm_field.values.flatten(), C_hh=np.array(C_hh), C_vv=np.array(C_vv), D_hh=np.array(D_hh), D_vv=np.array(D_vv), C_hv=np.array(C_hv), D_hv=np.array(D_hv), s=s, clay=clay, sand=sand, f=freq, bulk=bulk, l=l)\n\n can = OneLayer(canopy=canopy, ke_h=ke, ke_v=ke, d=height_field.values.flatten(), ks_h = omega*ke, ks_v = omega*ke, V1=np.array(V1), V2=np.array(V2), A_hh=np.array(A_hh), B_hh=np.array(B_hh), A_vv=np.array(A_vv), B_vv=np.array(B_vv), A_hv=np.array(A_hv), B_hv=np.array(B_hv))\n\n S = model.RTModel(surface=soil, canopy=can, models=models, theta=theta_field.values.flatten(), freq=freq)\n S.sigma0()\n\n ax.plot(10*np.log10(pol_field.values.flatten()),10*np.log10(S.__dict__['stot'][pol[::-1]]), colors[jj], label=field)\n\n slope, intercept, r_value, p_value, std_err = linregress(10*np.log10(pol_field.values.flatten())[~np.isnan(10*np.log10(S.__dict__['stot'][pol[::-1]]))], 10*np.log10(S.__dict__['stot'][pol[::-1]])[~np.isnan(10*np.log10(S.__dict__['stot'][pol[::-1]]))])\n line = slope * 10*np.log10(S.__dict__['stot'][pol[::-1]]) + intercept\n\n # ax.plot(10*np.log10(S.__dict__['stot'][pol[::-1]]), line)\n\n lower_position = np.nanargmin(line)\n upper_position = np.nanargmax(line)\n\n ax.plot(np.array((10*np.log10(S.__dict__['stot'][pol[::-1]])[lower_position],10*np.log10(S.__dict__['stot'][pol[::-1]])[upper_position])),np.array((line[lower_position],line[upper_position])), '--'+colors[jj][0])\n\n aa = np.append(aa, 10*np.log10(pol_field.values.flatten()))\n bb = np.append(bb, 10*np.log10(S.__dict__['stot'][pol[::-1]]))\n jj = jj+1\n else:\n ax.plot(date, 10*np.log10(S.__dict__['stot'][pol[::-1]]), color='orange', marker='s', label=S.models['surface']+ ' + ' + S.models['canopy'] + ' Pol: ' + pol + '; RMSE: ' + str(rmse)[0:4] + '; $R^2$: ' + str(r_value)[0:4])\n ax.plot(date, 10*np.log10(S.__dict__['s0g'][pol[::-1]]), color='red', marker='s', label='Ground contribution')\n ax.plot(date, 10*np.log10(S.__dict__['s0c'][pol[::-1]]), color='green', marker='s', label='Canopy contribution')\n\n j = j+1\n\n\nif style == 'scatterplot':\n pass\nelse:\n ax.plot(10*np.log10(pol_field), 'ks-', label='Sentinel-1 Pol: ' + pol, linewidth=3)\n plt.legend()\n plt.title(field)\n\nif plot == 'all':\n # plt.show()\n plt.savefig(plot_output_path+pol+'_all_'+opt_mod)\n\nif plot == 'single':\n if style == 'scatterplot':\n plt.ylabel(surface + ' ' + canopy + ' [dB]')\n plt.xlabel('Sentinel-1 [dB]')\n plt.legend()\n x = np.linspace(np.min(10*np.log10(pol_field.values.flatten()))-2, np.max(10*np.log10(pol_field.values.flatten()))+2, 16)\n ax.plot(x,x)\n if style_2 == 'scatterplot_single_ESU':\n www = rmse_prediction(10*np.log10(pol_field).values.flatten(), 10*np.log10(S.__dict__['stot'][pol[::-1]]))\n plt.title(pol+' ' + field + ' ' + surface + ' ' + canopy + '$R^2$='+str(r_value)+' RMSE='+str(www))\n plt.savefig(plot_output_path+'scatterplot_fertig_single_'+field+'_'+pol+'_'+file_name+'_'+S.models['surface']+'_'+S.models['canopy'])\n else:\n www = rmse_prediction(aa, bb)\n # slope, intercept, r_value, p_value, std_err = linregress(aaa[~np.isnan(bbb)], bbb[~np.isnan(bbb)])\n plt.title(pol+' ' + field + ' ' + surface + ' ' + canopy + '$R^2$='+str(r_value)+' RMSE='+str(www))\n plt.savefig(plot_output_path+'scatterplot_fertig_'+field+'_'+pol+'_'+file_name+'_'+S.models['surface']+'_'+S.models['canopy'])\n else:\n plt.savefig(plot_output_path+pol+'_single_'+opt_mod+'_'+S.models['surface']+'_'+S.models['canopy'])\n\n\npdb.set_trace()\n\n\n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_313","text":"Pandas-analyzing-dataset\/code.py\n# --------------\n# Importing header files\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom scipy.stats import mode \r\n \r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\n\r\n#Reading file\r\nbank = pd.read_csv(path)\r\n#Code starts here\r\ncategorical_var=bank.select_dtypes(include = 'object')\r\n#print(categorical_var)\r\nnumerical_var=bank.select_dtypes(include = 'number')\r\n#print(numerical_var)\r\nprint(categorical_var.shape)\r\nprint(numerical_var.shape)\r\nbank.drop('Loan_ID',inplace=True,axis=1)\r\nbanks = pd.DataFrame(bank)\r\nprint(banks.isnull().sum())\r\nprint(banks.shape)\r\nbank_mode = banks.mode().iloc[0]\r\nbanks.fillna(bank_mode,inplace=True)\r\nprint(banks.isnull().sum().values.sum())\r\navg_loan_amount = pd.pivot_table(banks,index=('Gender','Married','Self_Employed'),values='LoanAmount').agg(np.mean)\r\nprint(avg_loan_amount)\r\nloan_approved_se = banks[(banks['Self_Employed'] == 'Yes') & (banks['Loan_Status'] == 'Y')].count() \r\nloan_approved_nse = banks[(banks['Self_Employed'] == 'No') & (banks['Loan_Status'] == 'Y')].count()\r\nLoan_Status = 614\r\npercentage_se = round((loan_approved_se\/Loan_Status)*100,2)\r\npercentage_nse = round((loan_approved_nse\/Loan_Status)*100,2)\r\nprint(percentage_se)\r\nprint(percentage_nse)\r\nloan_term = banks['Loan_Amount_Term'].apply(lambda x : int(x) \/ 12)\r\nbig_loan_term = len(loan_term[loan_term>=25])\r\nprint(big_loan_term)\r\ncolumns_to_show = ['ApplicantIncome', 'Credit_History']\r\nloan_groupby=banks.groupby(['Loan_Status'])\r\nloan_groupby=loan_groupby[columns_to_show]\r\nmean_values = loan_groupby.agg(np.mean)\r\nprint(mean_values)\r\n\r\n\r\n\r\n\r\n\r\n\n\n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_314","text":"ArgonneCPAC\/skysim\n\"\"\"\n\"\"\"\nimport numpy as np\nfrom scipy.stats import johnsonsb\nfrom astropy.utils.misc import NumpyRNGContext\nfrom halotools.empirical_models import conditional_abunmatch\n\n\ndef calculate_johnsonsb_params_disk(\n magr, johnsonsb_disk_table_abscissa=[-19, -21],\n johnsonsb_disk_table=[0.15, -0.15], **kwargs):\n return np.interp(magr, johnsonsb_disk_table_abscissa, johnsonsb_disk_table)\n\n\ndef calculate_johnsonsb_params_bulge(\n magr, johnsonsb_bulge_table_abscissa=[-19, -21],\n johnsonsb_bulge_table=[1.5, 0.6], **kwargs):\n return np.interp(magr, johnsonsb_bulge_table_abscissa, johnsonsb_bulge_table)\n\n\ndef monte_carlo_ellipticity_disk(magr, inclination = None, seed=None, **kwargs):\n \"\"\"\n Parameters\n ----------\n magr : ndarray\n Numpy array of shape (ngals, )\n\n inclination : ndarray\n Numpy array of shape (ngals, )\n\n Returns\n -------\n ellipticity_realization : ndarray\n \"\"\"\n\n magr = np.atleast_1d(magr)\n inclination = np.atleast_1d(inclination)\n\n a = calculate_johnsonsb_params_disk(magr, **kwargs)\n b = np.ones_like(a)\n\n with NumpyRNGContext(seed):\n ellipticity_realization = johnsonsb.rvs(a, b)\n\n nwin = 101\n if inclination is None:\n inclination_correlated_ellipticity = conditional_abunmatch(\n magr, inclination, magr, ellipticity_realization, nwin)\n return inclination_correlated_ellipticity\n else:\n return ellipticity_realization\n\n\ndef monte_carlo_ellipticity_bulge(magr, seed=None, **kwargs):\n \"\"\"\n Parameters\n ----------\n magr : ndarray\n Numpy array of shape (ngals, )\n\n Returns\n -------\n ellipticity_realization : ndarray\n \"\"\"\n magr = np.atleast_1d(magr)\n\n a = calculate_johnsonsb_params_bulge(magr, **kwargs)\n b = np.ones_like(a)\n\n with NumpyRNGContext(seed):\n ellipticity_realization = johnsonsb.rvs(a, b)\n return ellipticity_realization\n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_315","text":"100-1000\n#!\/usr\/bin\/env python\n# -*- coding: utf-8 -*-\nfrom . import _unittest as unittest\nimport cmath\nimport decimal\nimport math\nimport re\n\ntry:\n import numpy\nexcept ImportError:\n numpy = False\n\nfrom datatest._vendor.predicate import (\n _check_type,\n _check_callable,\n _check_wildcard,\n _check_truthy,\n _check_falsy,\n _check_nan,\n _check_regex,\n _check_set,\n _get_matcher_parts,\n get_matcher,\n MatcherBase,\n MatcherObject,\n MatcherTuple,\n Predicate,\n PredicateIntersectionType,\n PredicateUnionType,\n)\n\n\nclass TestCheckType(unittest.TestCase):\n def test_isinstance(self):\n function = lambda x: _check_type(int, x)\n self.assertTrue(function(0))\n self.assertTrue(function(1))\n self.assertFalse(function(0.0))\n self.assertFalse(function(1.0))\n\n def test_is_type(self):\n self.assertTrue(_check_type(int, int))\n\n\nclass TestCheckCallable(unittest.TestCase):\n def test_function(self):\n def divisible3or5(x): # <- Helper function.\n return (x % 3 == 0) or (x % 5 == 0)\n\n function = lambda x: _check_callable(divisible3or5, x)\n self.assertFalse(function(1))\n self.assertFalse(function(2))\n self.assertTrue(function(3))\n self.assertFalse(function(4))\n self.assertTrue(function(5))\n self.assertTrue(function(6))\n\n def test_error(self):\n def fails_internally(x): # <- Helper function.\n raise TypeError('raising an error')\n\n function = lambda x: _check_callable(fails_internally, x)\n with self.assertRaises(TypeError):\n self.assertFalse(function('abc'))\n\n def test_identity(self):\n def always_false(x): # <- Helper function.\n return False\n\n function = lambda x: _check_callable(always_false, x)\n self.assertTrue(function(always_false))\n\n def test_identity_with_error(self):\n def fails_internally(x): # <- Helper function.\n raise TypeError('raising an error')\n\n function = lambda x: _check_callable(fails_internally, x)\n self.assertTrue(function(fails_internally))\n\n\nclass TestCheckWildcard(unittest.TestCase):\n def test_always_true(self):\n self.assertTrue(_check_wildcard(1))\n self.assertTrue(_check_wildcard(object()))\n self.assertTrue(_check_wildcard(None))\n\n\nclass TestCheckTruthy(unittest.TestCase):\n def test_matches(self):\n self.assertTrue(_check_truthy('x'))\n self.assertTrue(_check_truthy(1.0))\n self.assertTrue(_check_truthy([1]))\n self.assertTrue(_check_truthy(range(1)))\n\n def test_nonmatches(self):\n self.assertFalse(_check_truthy(''))\n self.assertFalse(_check_truthy(0.0))\n self.assertFalse(_check_truthy([]))\n self.assertFalse(_check_truthy(range(0)))\n\n\nclass TestCheckFalsy(unittest.TestCase):\n def test_matches(self):\n self.assertTrue(_check_falsy(''))\n self.assertTrue(_check_falsy(0.0))\n self.assertTrue(_check_falsy([]))\n self.assertTrue(_check_falsy(range(0)))\n\n def test_nonmatches(self):\n self.assertFalse(_check_falsy('x'))\n self.assertFalse(_check_falsy(1.0))\n self.assertFalse(_check_falsy([1]))\n self.assertFalse(_check_falsy(range(1)))\n\n\nclass TestCheckNaN(unittest.TestCase):\n def test_matches(self):\n self.assertTrue(_check_nan(float('NaN')))\n self.assertTrue(_check_nan(complex(float('NaN'))))\n self.assertTrue(_check_nan(decimal.Decimal('NaN')))\n if hasattr(math, 'nan'): # New in version 3.5\n self.assertTrue(_check_nan(math.nan))\n if hasattr(cmath, 'nan'): # New in version 3.6\n self.assertTrue(_check_nan(cmath.nan))\n\n def test_nonmatches(self):\n self.assertFalse(_check_nan('x'))\n self.assertFalse(_check_nan(1))\n self.assertFalse(_check_nan(1.0))\n self.assertFalse(_check_nan(complex(1)))\n self.assertFalse(_check_nan(decimal.Decimal('1.123')))\n\n @unittest.skipUnless(numpy, 'requires numpy')\n def test_numpy_cases(self):\n self.assertTrue(_check_nan(numpy.nan))\n self.assertFalse(_check_nan(numpy.int64(123)))\n\n\nclass TestCheckRegex(unittest.TestCase):\n def test_function(self):\n regex = re.compile('(Ch|H)ann?ukk?ah?')\n function = lambda x: _check_regex(regex, x)\n\n self.assertTrue(function('Happy Hanukkah'))\n self.assertTrue(function('Happy Chanukah'))\n self.assertFalse(function('Merry Christmas'))\n\n def test_incompatible_types(self):\n regex = re.compile('abc')\n self.assertFalse(_check_regex(regex, 123))\n self.assertFalse(_check_regex(regex, ('a', 'b')))\n\n def test_identity(self):\n regex = re.compile('abc')\n self.assertTrue(_check_regex(regex, regex))\n\n\nclass TestCheckSet(unittest.TestCase):\n def test_function(self):\n function = lambda x: _check_set(set(['abc', 'def']), x)\n self.assertTrue(function('abc'))\n self.assertFalse(function('xyz'))\n\n def test_whole_set_equality(self):\n function = lambda x: _check_set(set(['abc', 'def']), x)\n self.assertTrue(function(set(['abc', 'def'])))\n\n def test_unhashable_check(self):\n function = lambda x: _check_set(set(['abc', 'def']), x)\n self.assertFalse(function(['abc']))\n self.assertFalse(function((1, ['xyz'])))\n\n\nclass TestGetMatcherParts(unittest.TestCase):\n def test_type(self):\n pred_handler, repr_string = _get_matcher_parts(int)\n self.assertTrue(pred_handler(1))\n self.assertFalse(pred_handler(1.0))\n self.assertEqual(repr_string, 'int')\n\n def test_callable(self):\n def userfunc(x):\n return x == 1\n pred_handler, repr_string = _get_matcher_parts(userfunc)\n self.assertTrue(pred_handler(1))\n self.assertFalse(pred_handler(2))\n self.assertEqual(repr_string, 'userfunc')\n\n userlambda = lambda x: x == 1\n pred_handler, repr_string = _get_matcher_parts(userlambda)\n self.assertTrue(pred_handler(1))\n self.assertFalse(pred_handler(2))\n self.assertEqual(repr_string, '')\n\n def test_ellipsis_wildcard(self):\n pred_handler, repr_string = _get_matcher_parts(Ellipsis)\n self.assertIs(pred_handler, _check_wildcard)\n self.assertEqual(repr_string, '...')\n\n def test_truthy(self):\n pred_handler, repr_string = _get_matcher_parts(True)\n self.assertIs(pred_handler, _check_truthy)\n self.assertEqual(repr_string, 'True')\n\n def test_falsy(self):\n pred_handler, repr_string = _get_matcher_parts(False)\n self.assertIs(pred_handler, _check_falsy)\n self.assertEqual(repr_string, 'False')\n\n def test_nan(self):\n pred_handler, repr_string = _get_matcher_parts(float('nan'))\n self.assertIs(pred_handler, _check_nan)\n self.assertEqual(repr_string, 'NaN')\n\n def test_regex(self):\n regex = re.compile('ab[cd]')\n\n pred_handler, repr_string = _get_matcher_parts(regex)\n self.assertTrue(pred_handler('abc'))\n self.assertFalse(pred_handler('abe'))\n self.assertEqual(repr_string, \"re.compile('ab[cd]')\")\n\n def test_set(self):\n myset = set(['a'])\n pred_handler, repr_string = _get_matcher_parts(myset)\n self.assertTrue(pred_handler('a'))\n self.assertFalse(pred_handler('b'))\n self.assertEqual(repr_string, repr(myset))\n\n def test_no_special_handling(self):\n self.assertIsNone(_get_matcher_parts(1))\n self.assertIsNone(_get_matcher_parts(0))\n\n\nclass TestMatcherInheritance(unittest.TestCase):\n def test_inheritance(self):\n self.assertTrue(issubclass(MatcherTuple, MatcherBase))\n self.assertTrue(issubclass(MatcherObject, MatcherBase))\n\n\nclass TestGetMatcher(unittest.TestCase):\n def assertIsInstance(self, obj, cls, msg=None): # New in Python 3.2.\n if not isinstance(obj, cls):\n standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls)\n self.fail(self._formatMessage(msg, standardMsg))\n\n def test_single_value(self):\n # Check for MatcherObject wrapping.\n def isodd(x): # <- Helper function.\n return x % 2 == 1\n matcher = get_matcher(isodd)\n self.assertIsInstance(matcher, MatcherObject)\n\n # When original is adequate, it should be returned unchanged.\n original = object()\n matcher = get_matcher(original)\n self.assertIs(matcher, original)\n\n def test_tuple_of_values(self):\n # Check for MatcherTuple wrapping.\n def isodd(x): # <- Helper function.\n return x % 2 == 1\n matcher = get_matcher((1, isodd))\n self.assertIsInstance(matcher, MatcherTuple)\n\n # When tuple contains no MatcherObject objects,\n # the original should be returned unchanged.\n original = ('abc', 123)\n matcher = get_matcher(original)\n self.assertIs(matcher, original)\n\n def test_get_matcher_from_matcher(self):\n original = get_matcher((1, 'abc'))\n matcher = get_matcher(original)\n self.assertIs(matcher, original)\n\n def test_get_matcher_from_predicate(self):\n predicate = Predicate('abc')\n matcher = get_matcher(predicate)\n self.assertIs(matcher, predicate.matcher)\n\n def test_integration(self):\n \"\"\"A small integration test that checks a tuple containing all\n of the different special handling cases.\n \"\"\"\n def mycallable(x): # <- Helper function.\n return x == '_'\n\n myregex = re.compile('_')\n\n myset = set(['_'])\n\n matcher = get_matcher(\n (mycallable, myregex, myset, '_', Ellipsis)\n )\n\n self.assertTrue(matcher == ('_', '_', '_', '_', '_')) # <- Passes all conditions.\n self.assertFalse(matcher == ('X', '_', '_', '_', '_')) # <- Callable returns False.\n self.assertFalse(matcher == ('_', 'X', '_', '_', '_')) # <- Regex has no match.\n self.assertFalse(matcher == ('_', '_', 'X', '_', '_')) # <- Not in set.\n self.assertFalse(matcher == ('_', '_', '_', 'X', '_')) # <- Does not equal string.\n self.assertTrue(matcher == ('_', '_', '_', '_', 'X')) # <- Passes all conditions (wildcard).\n\n expected = \"(mycallable, re.compile('_'), {0!r}, '_', ...)\".format(myset)\n self.assertEqual(repr(matcher), expected)\n\n\nclass TestPredicate(unittest.TestCase):\n def test_predicate_function(self):\n pred = Predicate('abc')\n self.assertTrue(pred('abc'))\n self.assertFalse(pred('def'))\n self.assertFalse(pred(123))\n\n pred = Predicate(re.compile('^abc$'))\n self.assertTrue(pred('abc'))\n self.assertFalse(pred('def'))\n self.assertFalse(pred(123))\n\n pred = Predicate(1)\n self.assertTrue(pred(1))\n self.assertFalse(pred(2))\n self.assertFalse(pred('abc'))\n\n pred = Predicate(('abc', int))\n self.assertTrue(pred(('abc', 1)))\n self.assertFalse(pred(('abc', 1.0)))\n\n pred = Predicate((str, float('nan')))\n self.assertTrue(pred(('abc', float('nan'))))\n self.assertFalse(pred(('abc', 1.0)))\n self.assertFalse(pred(('abc', 'xyz')))\n\n @unittest.skipUnless(numpy, 'requires numpy')\n def test_numpy_types(self):\n \"\"\"Check that built-in types can match numpy types.\"\"\"\n # Match numpy.character sub-types.\n pred = Predicate(str)\n self.assertTrue(pred(numpy.str_('abc')))\n self.assertTrue(pred(numpy.unicode_('def')))\n\n # Match numpy.integer sub-types.\n pred = Predicate(int)\n self.assertTrue(pred(numpy.int8(123)))\n self.assertTrue(pred(numpy.uint64(456)))\n\n # Match numpy.floating sub-types.\n pred = Predicate(float)\n self.assertTrue(pred(numpy.float32(1.0)))\n self.assertTrue(pred(numpy.float64(2.0)))\n\n # Match numpy.complexfloating sub-types.\n pred = Predicate(complex)\n self.assertTrue(pred(numpy.complex64(1.0)))\n self.assertTrue(pred(numpy.complex128(2.0)))\n\n @unittest.skipUnless(numpy, 'requires numpy')\n def test_numpy_equality_error(self):\n \"\"\"Doing `numpy.dtype(float) == 1` raises a TypeError.\n Comparisons that error-out should be caught and considered\n non-matches\/False.\n \"\"\"\n pred = Predicate(numpy.dtype(float))\n self.assertFalse(pred(1))\n\n pred = Predicate(1)\n self.assertFalse(pred(numpy.dtype(float)))\n\n def test_inverted_logic(self):\n pred = ~Predicate('abc')\n self.assertFalse(pred('abc'))\n self.assertTrue(pred('def'))\n\n def test_repr(self):\n pred = Predicate('abc')\n self.assertEqual(repr(pred), \"Predicate('abc')\")\n\n pred = ~Predicate('abc')\n self.assertEqual(repr(pred), \"~Predicate('abc')\")\n\n pred = Predicate('abc', name='custom_name')\n self.assertEqual(repr(pred), \"Predicate('abc', name='custom_name')\")\n\n def test_optional_name(self):\n pred1 = Predicate('abc') # <- No name arg provided.\n self.assertFalse(hasattr(pred1, '__name__'))\n\n pred2 = Predicate(pred1) # <- No name arg inherited from pred1.\n self.assertFalse(hasattr(pred1, '__name__'))\n\n pred3 = Predicate('abc', name='pred3_name') # <- Provides name.\n self.assertEqual(pred3.__name__, 'pred3_name')\n\n pred4 = Predicate(pred3) # <- Inherits name from pred3.\n self.assertEqual(pred4.__name__, 'pred3_name')\n\n pred5 = Predicate(pred3, name='pred5_name') # <- Overrides pred3 name.\n self.assertEqual(pred5.__name__, 'pred5_name')\n\n # Test bad name values.\n with self.assertRaises(ValueError):\n Predicate('abc', name='1foo')\n\n with self.assertRaises(ValueError):\n Predicate('abc', name='foo!')\n\n with self.assertRaises(ValueError):\n Predicate('abc', name='foo bar')\n\n with self.assertRaises(ValueError):\n Predicate('abc', name='foo()')\n\n with self.assertRaises(ValueError):\n Predicate('abc', name='foo ')\n\n with self.assertRaises(ValueError):\n Predicate('abc', name='')\n\n def test_str(self):\n pred = Predicate('abc')\n self.assertEqual(str(pred), \"'abc'\")\n\n inverted = ~Predicate('abc')\n self.assertEqual(str(inverted), \"not 'abc'\")\n\n def test_predicate_from_predicate(self):\n pred1 = Predicate('abc')\n pred2 = Predicate(pred1)\n self.assertIsNot(pred1, pred2, msg='should be different object')\n self.assertIs(pred1.obj, pred2.obj, msg='should keep original reference')\n self.assertEqual(pred1.matcher, pred2.matcher)\n self.assertEqual(pred1._inverted, pred2._inverted)\n\n from_inverted = Predicate(~Predicate('abc'))\n self.assertTrue(from_inverted._inverted)\n\n @unittest.skipUnless(numpy, 'requires numpy')\n def test_predicate_from_predicate_numpy(self):\n pred1 = Predicate(numpy.int64)\n pred2 = Predicate(pred1)\n self.assertIsNot(pred1, pred2, msg='should be different object')\n self.assertIs(pred1.obj, pred2.obj, msg='should keep original reference')\n self.assertIs(pred1.matcher, pred2.matcher)\n self.assertEqual(pred1._inverted, pred2._inverted)\n\n def test_passthrough(self):\n \"\"\"Callable predicates should return the values provided by\n the given function as-is--the values should not be converted\n to True or False.\n \"\"\"\n TOKEN = object()\n\n def divisible_or_token(x): # <- Helper function.\n if x % 3 == 0:\n return True\n if x % 5 == 0:\n return TOKEN\n return False\n\n predicate = Predicate(divisible_or_token)\n self.assertEqual(predicate(1), False)\n self.assertEqual(predicate(3), True)\n self.assertIs(predicate(5), TOKEN, msg='TOKEN should be returned, not True.')\n\n def test_equality_inconsistency(self):\n \"\"\"Badly behaved objects could have inconsistent EQ and NE\n behavior. To make sure that Predicate objects are consistent\n they should only use`==` internally, not `!=`.\n \"\"\"\n class Inconsistent(object):\n def __init__(self_, value):\n self_.value = value\n\n def __eq__(self_, other):\n return self_.value == other\n\n def __ne__(self_, other):\n \"\"\"Badly behaved not-equals method.\"\"\"\n return self_.__eq__(other) # <- DECEPTIVE RESULT!\n\n # Test `Inconsistent` class, itself.\n obj = Inconsistent(1)\n self.assertTrue(obj == 1, msg='expected behavior')\n self.assertTrue(obj != 1, msg='badly behaved comparison result')\n self.assertFalse(obj == 2, msg='expected behavior')\n self.assertFalse(obj != 2, msg='badly behaved comparison result')\n\n # Test predicate matching.\n pred = Predicate(Inconsistent(1))\n self.assertTrue(pred(1))\n self.assertFalse(pred(2))\n\n # Test inverted predicate matching.\n pred = ~Predicate(Inconsistent(1))\n self.assertFalse(pred(1))\n self.assertTrue(pred(2))\n\n def test_equality_failure(self):\n class BadObj(object):\n def __eq__(self, other):\n if isinstance(other, BadObj):\n return True\n raise TypeError('Sudden but inevitable betrayal!')\n\n pred = Predicate(BadObj())\n self.assertFalse(pred(1))\n\n pred = Predicate(1)\n self.assertFalse(pred(BadObj()))\n\n pred = ~Predicate(BadObj()) # Check inverted case.\n self.assertTrue(pred(1))\n\n pred = ~Predicate(1) # Check inverted case.\n self.assertTrue(pred(BadObj()))\n\n\nclass TestPredicateIntersectionType(unittest.TestCase):\n def setUp(self):\n \"\"\"Define simple predicates to use for testing.\"\"\"\n self.pred_gt3 = Predicate(lambda x: x > 3) # Greater-than three.\n self.pred_even = Predicate(lambda x: x % 2 == 0) # Is even.\n\n def test_basics(self):\n pred = PredicateIntersectionType(self.pred_gt3, self.pred_even)\n self.assertFalse(pred(1))\n self.assertFalse(pred(2))\n self.assertFalse(pred(3))\n self.assertTrue(pred(4))\n self.assertFalse(pred(5))\n self.assertTrue(pred(6))\n self.assertFalse(pred(7))\n\n def test_inverted(self):\n # Using the inversion operator (~).\n inv_pred = ~PredicateIntersectionType(self.pred_gt3, self.pred_even)\n self.assertTrue(inv_pred(1))\n self.assertTrue(inv_pred(2))\n self.assertTrue(inv_pred(3))\n self.assertFalse(inv_pred(4))\n self.assertTrue(inv_pred(5))\n self.assertFalse(inv_pred(6))\n self.assertTrue(inv_pred(7))\n\n def test_repr(self):\n pred = PredicateIntersectionType(self.pred_gt3, self.pred_even)\n self.assertEqual(repr(pred), 'Predicate() & Predicate()')\n\n inv_pred = ~pred\n self.assertEqual(repr(inv_pred), '~(Predicate() & Predicate())')\n\n def test_bad_type(self):\n with self.assertRaises(TypeError):\n PredicateIntersectionType(self.pred_gt3, 'foobarbaz')\n\n with self.assertRaises(TypeError):\n PredicateIntersectionType('foobarbaz', self.pred_gt3)\n\n def test_bitwise_operator(self):\n pred = self.pred_gt3 & self.pred_even # <- Bitwise operator.\n self.assertIsInstance(pred, PredicateIntersectionType)\n\n def test_bitwise_operator_bad_type(self):\n with self.assertRaises(TypeError):\n self.pred_gt3 & 'foobarbaz'\n\n with self.assertRaises(TypeError):\n 'foobarbaz' & self.pred_gt3\n\n def test_intersection_method(self):\n pred = self.pred_gt3.intersection(self.pred_even) # <- Intersection method.\n self.assertIsInstance(pred, PredicateIntersectionType)\n\n with self.assertRaises(TypeError):\n self.pred_gt3.intersection('foobarbaz')\n\n\nclass TestPredicateUnionType(unittest.TestCase):\n def setUp(self):\n \"\"\"Define simple predicates to use for testing.\"\"\"\n self.pred_foo = Predicate('foo') # Is \"foo\".\n self.pred_bar = Predicate('bar') # Is \"bar\".\n\n def test_basics(self):\n pred = PredicateUnionType(self.pred_foo, self.pred_bar)\n self.assertTrue(pred('foo'))\n self.assertTrue(pred('bar'))\n self.assertFalse(pred('baz'))\n\n def test_inverted(self):\n # Using the inversion operator (~).\n inv_pred = ~PredicateUnionType(self.pred_foo, self.pred_bar)\n self.assertFalse(inv_pred('foo'))\n self.assertFalse(inv_pred('bar'))\n self.assertTrue(inv_pred('baz'))\n\n def test_repr(self):\n pred = PredicateUnionType(self.pred_foo, self.pred_bar)\n self.assertEqual(repr(pred), \"Predicate('foo') | Predicate('bar')\")\n\n inv_pred = ~pred\n self.assertEqual(repr(inv_pred), \"~(Predicate('foo') | Predicate('bar'))\")\n\n def test_bad_type(self):\n with self.assertRaises(TypeError):\n PredicateIntersectionType(self.pred_foo, 'foobarbaz')\n\n with self.assertRaises(TypeError):\n PredicateIntersectionType('foobarbaz', self.pred_foo)\n\n def test_bitwise_operator(self):\n pred = self.pred_foo | self.pred_bar\n self.assertIsInstance(pred, PredicateUnionType)\n\n def test_bitwise_operator_bad_type(self):\n with self.assertRaises(TypeError):\n self.pred_foo | 'foobarbaz'\n\n with self.assertRaises(TypeError):\n 'foobarbaz' | self.pred_foo\n\n def test_union_method(self):\n pred = self.pred_foo.union(self.pred_bar) # <- Union method.\n self.assertIsInstance(pred, PredicateUnionType)\n\n with self.assertRaises(TypeError):\n self.pred_foo.union('foobarbaz')\n\n\nclass TesCombinedTypeNesting(unittest.TestCase):\n def setUp(self):\n \"\"\"Define simple predicates to use for testing.\"\"\"\n self.pred_foo = Predicate('foo') # Is \"foo\".\n self.pred_bar = Predicate('bar') # Is \"bar\".\n self.pred_baz = Predicate('baz') # Is \"baz\".\n self.pred_gt3 = Predicate(lambda x: x > 3) # Greater-than three.\n self.pred_even = Predicate(lambda x: x % 2 == 0) # Is even.\n\n def test_behavior(self):\n pred = self.pred_foo | self.pred_bar | self.pred_baz | (self.pred_gt3 & self.pred_even)\n self.assertFalse(pred(1))\n self.assertFalse(pred(2))\n self.assertFalse(pred(3))\n self.assertTrue(pred(4))\n self.assertFalse(pred(5))\n self.assertTrue(pred(6))\n self.assertFalse(pred(7))\n self.assertTrue(pred('foo'))\n self.assertTrue(pred('bar'))\n self.assertTrue(pred('baz'))\n self.assertFalse(pred('qux'))\n\n def test_repr(self):\n \"\"\"Several of the following predicates are logically nonsensical\n but since we're only testing the repr behavior, this is OK.\n \"\"\"\n pred = self.pred_foo | self.pred_bar | self.pred_baz\n expected = \"Predicate('foo') | Predicate('bar') | Predicate('baz')\"\n self.assertEqual(repr(pred), expected)\n\n pred = self.pred_foo & self.pred_bar & self.pred_baz\n expected = \"Predicate('foo') & Predicate('bar') & Predicate('baz')\"\n self.assertEqual(repr(pred), expected)\n\n pred = self.pred_foo | self.pred_bar & self.pred_baz # \"&\" takes precedence\n expected = \"Predicate('foo') | (Predicate('bar') & Predicate('baz'))\"\n self.assertEqual(repr(pred), expected)\n\n pred = (self.pred_foo | self.pred_bar) & self.pred_baz # Change order\n expected = \"(Predicate('foo') | Predicate('bar')) & Predicate('baz')\"\n self.assertEqual(repr(pred), expected)\n\n pred = self.pred_foo & self.pred_bar | self.pred_baz # \"&\" takes precedence\n expected = \"(Predicate('foo') & Predicate('bar')) | Predicate('baz')\"\n self.assertEqual(repr(pred), expected)\n\n pred = self.pred_foo & (self.pred_bar | self.pred_baz) # Change order\n expected = \"Predicate('foo') & (Predicate('bar') | Predicate('baz'))\"\n self.assertEqual(repr(pred), expected)\n\n pred = self.pred_foo | self.pred_bar | self.pred_gt3 & self.pred_even | self.pred_baz\n expected = \"Predicate('foo') | Predicate('bar') | (Predicate() & Predicate()) | Predicate('baz')\"\n self.assertEqual(repr(pred), expected)\n\n pred = self.pred_foo | (self.pred_bar | self.pred_gt3) & self.pred_even | self.pred_baz\n expected = \"Predicate('foo') | ((Predicate('bar') | Predicate()) & Predicate()) | Predicate('baz')\"\n self.assertEqual(repr(pred), expected)\n\n\nif __name__ == '__main__':\n unittest.main()\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_316","text":"\"\"\"\nperformance test\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport miepy\nfrom tqdm import tqdm\nfrom functools import partial\nfrom scipy.sparse.linalg import bicg, bicgstab\nfrom miepy.interactions import solve_linear_system\nfrom topics.photonic_clusters.create_lattice import hexagonal_lattice_particles\nfrom timer import time_function\n\nnm = 1e-9\n\nAg = miepy.materials.Ag()\nradius = 75*nm\nsource = miepy.sources.plane_wave.from_string(polarization='rhc')\nsource = miepy.sources.gaussian_beam(2500*nm, [1, 1j], power=.002)\n\nx = np.linspace(-4000*nm, 4000*nm, 100)\ny = np.linspace(-4000*nm, 4000*nm, 100)\nsource = miepy.sources.grid_interpolate_source(source, [x,y,0])\nseparation = 600*nm\n\nx = np.arange(-1500*nm, 1500*nm, 50*nm)\ny = np.arange(-1500*nm, 1500*nm, 50*nm)\n# source = miepy.sources.grid_interp_source(source, grid=(x,y,0))\n\n\ndef tests(Nmax, step=1):\n Nparticles = np.arange(1, Nmax+1, step)\n t_force, t_flux, t_build, t_solve, t_source = [np.zeros_like(Nparticles, dtype=float) for i in range(5)]\n for i,N in enumerate(Nparticles):\n print(N, Nmax)\n # positions = [[n*separation, 0, 0] for n in range(N)]\n positions = hexagonal_lattice_particles(N)*separation\n mie = miepy.sphere_cluster(position=positions,\n radius=radius,\n material=Ag,\n source=source,\n wavelength=600*nm,\n lmax=2)\n \n t_force[i] = time_function(mie.force)\n t_flux[i] = time_function(mie.cross_sections)\n t_build[i] = time_function(partial(miepy.interactions.sphere_aggregate_tmatrix, \n mie.position, mie.mie_scat, mie.material_data.k_b))\n\n A = miepy.interactions.sphere_aggregate_tmatrix(mie.position, mie.mie_scat, k=mie.material_data.k_b)\n t_solve[i] = time_function(partial(solve_linear_system, A, mie.p_src, method=miepy.solver.bicgstab))\n \n x = np.linspace(0, N*separation, 1)\n y = 2*radius*np.ones_like(x)\n z = np.zeros_like(x)\n\n t_source[i] = time_function(mie._solve_source_decomposition)\n\n fig, ax = plt.subplots()\n\n ax.plot(Nparticles, t_force*1e3, '-o', label='force')\n ax.plot(Nparticles, t_flux*1e3,'-o', label='flux')\n ax.plot(Nparticles, t_build*1e3, '-o', label='build')\n ax.plot(Nparticles, t_solve*1e3, '-o', label='solve')\n ax.plot(Nparticles, t_source*1e3, '-o', label='source')\n\n ax.legend()\n ax.set(xlabel='number of particles', ylabel='runtime (ms)')\n\n plt.show()\n\ntests(10, step=1)\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_317","text":"from lightweaver.fal import Falc82\nfrom lightweaver.rh_atoms import H_6_atom, H_6_CRD_atom, H_3_atom, C_atom, O_atom, OI_ord_atom, Si_atom, Al_atom, CaII_atom, Fe_atom, FeI_atom, He_9_atom, He_atom, He_large_atom, MgII_atom, N_atom, Na_atom, S_atom\nimport lightweaver as lw\nimport numpy as np\nimport scipy.interpolate as interp\nfrom astropy.convolution import Box1DKernel\nfrom astropy.convolution import convolve\nfrom astropy.io import fits\nfrom enum import IntEnum\nfrom mpi4py import MPI\nfrom tqdm import tqdm\nimport pickle\nimport argparse\n\nclass tags(IntEnum):\n READY = 0\n DONE = 1\n EXIT = 2\n START = 3\n\ndef smooth(sig, width):\n return convolve(sig, Box1DKernel(width))\n\ndef iterate_ctx_crd(ctx, Nscatter=10, NmaxIter=500):\n for i in range(NmaxIter):\n dJ = ctx.formal_sol_gamma_matrices(verbose=False)\n if i < Nscatter:\n continue\n delta = ctx.stat_equil(printUpdate=False)\n\n if dJ < 3e-3 and delta < 1e-3:\n # print(i, flush=True)\n # print('----------')\n return\n\ndef synth_spectrum(atmos, depthData=False, Nthreads=1, conserveCharge=False):\n atmos.quadrature(5)\n aSet = lw.RadiativeSet([H_6_atom(),\n C_atom(),\n OI_ord_atom(), Si_atom(), Al_atom(),\n CaII_atom(),\n Fe_atom(),\n He_9_atom(),\n MgII_atom(), N_atom(), Na_atom(), S_atom()\n ])\n # aSet.set_active('H', 'Ca')\n aSet.set_active('Ca')\n spect = aSet.compute_wavelength_grid()\n\n eqPops = aSet.compute_eq_pops(atmos)\n\n ctx = lw.Context(atmos, spect, eqPops, Nthreads=Nthreads, conserveCharge=conserveCharge)\n if depthData:\n ctx.depthData.fill = True\n \n iterate_ctx_crd(ctx)\n eqPops.update_lte_atoms_Hmin_pops(atmos, quiet=True)\n ctx.formal_sol_gamma_matrices(verbose=False)\n return ctx\n \ndef master_work(filename, write_frequency=1): \n task_index = 0\n num_workers = size - 1\n closed_workers = 0\n\n fmodel = fits.open('\/net\/drogon\/scratch1\/aasensio\/3dcubes\/Enhanced_network_385_tau_from_RH_01_tau8.fits') \n bifrost = fmodel[0].data[:].astype('{source}')\n \n except:\n comm.send(None, dest=source, tag=tags.EXIT)\n\n elif tag == tags.DONE:\n index = dataReceived['index'] \n success = dataReceived['success']\n \n if (not success):\n tasks[index] = -1\n else:\n log_departure_list[index] = dataReceived['log_departure']\n T_list[index] = dataReceived['T']\n tau_list[index] = dataReceived['tau']\n vturb_list[index] = dataReceived['vturb']\n cmass = dataReceived['cmass']\n \n pbar.update(1) \n \n elif tag == tags.EXIT:\n print(\" * MASTER : worker {0} exited.\".format(source))\n closed_workers += 1\n\n if (pbar.n \/ write_frequency == pbar.n \/\/ write_frequency):\n\n with open(f'{filename}_logdeparture.pk', 'wb') as filehandle:\n pickle.dump(log_departure_list[0:task_index], filehandle)\n\n with open(f'{filename}_T.pk', 'wb') as filehandle:\n pickle.dump(T_list[0:task_index], filehandle)\n \n with open(f'{filename}_vturb.pk', 'wb') as filehandle:\n pickle.dump(vturb_list[0:task_index], filehandle)\n\n with open(f'{filename}_tau.pk', 'wb') as filehandle:\n pickle.dump(tau_list[0:task_index], filehandle)\n\n with open(f'{filename}_cmass.pk', 'wb') as filehandle:\n pickle.dump(cmass, filehandle)\n\n print(\"Master finishing\")\n\n with open(f'{filename}_cmass.pk', 'wb') as filehandle:\n pickle.dump(cmass, filehandle)\n\n with open(f'{filename}_logdeparture.pk', 'wb') as filehandle:\n pickle.dump(log_departure_list, filehandle)\n\n with open(f'{filename}_T.pk', 'wb') as filehandle:\n pickle.dump(T_list, filehandle)\n\n with open(f'{filename}_vturb.pk', 'wb') as filehandle:\n pickle.dump(vturb_list, filehandle)\n\n with open(f'{filename}_tau.pk', 'wb') as filehandle:\n pickle.dump(tau_list, filehandle)\n \n\ndef slave_work(rank):\n \n while True:\n comm.send(None, dest=0, tag=tags.READY)\n dataReceived = comm.recv(source=0, tag=MPI.ANY_TAG, status=status) \n\n tag = status.Get_tag()\n \n if tag == tags.START: \n # Do the work here\n task_index = dataReceived['index']\n tau500 = dataReceived['tau500']\n T = dataReceived['T']\n vlos = dataReceived['vlos']\n vturb = dataReceived['vturb']\n \n success = 1\n\n try:\n atmos = lw.Atmosphere.make_1d(scale=lw.ScaleType.Tau500, depthScale=tau500, temperature=T, vlos=vlos, vturb=vturb, verbose=False)\n ctx = synth_spectrum(atmos, depthData=True, conserveCharge=False)\n tau = atmos.tauRef\n cmass = atmos.cmass\n temperature = atmos.temperature\n vturb = atmos.vturb\n log_departure = np.log10(ctx.activeAtoms[0].n \/ ctx.activeAtoms[0].nStar)\n except:\n success = 0 \n \n dataToSend = {'index': task_index, 'T': temperature, 'log_departure': log_departure, 'tau': tau, 'cmass': cmass, 'vturb': vturb, 'success': success}\n\n comm.send(dataToSend, dest=0, tag=tags.DONE)\n\n elif tag == tags.EXIT:\n break\n\n comm.send(None, dest=0, tag=tags.EXIT)\n \n\nif (__name__ == '__main__'):\n\n # Initializations and preliminaries\n comm = MPI.COMM_WORLD # get MPI communicator object\n size = comm.size # total number of processes\n rank = comm.rank # rank of this process\n status = MPI.Status() # get MPI status object\n\n print(f\"Node {rank}\/{size} active\", flush=True)\n\n \n if rank == 0: \n parser = argparse.ArgumentParser(description='Generate synthetic models and solve NLTE problem') \n parser.add_argument('--f', '--freq', default=1, type=int, metavar='FREQ', help='Frequency of model write') \n\n parsed = vars(parser.parse_args())\n \n master_work('bifrost', write_frequency=parsed['f'])\n else:\n slave_work(rank)\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_318","text":"jeti182\/tvatoj-power1-10\n# MIT License\n\n# Copyright (c) 2020 \n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom numpy import random, exp, array, zeros, log, clip\nfrom theano.tensor import mean, cast\nfrom theano.tensor import exp as ttexp\nfrom math import floor\nfrom numpy.random import random as runif\nfrom tqdm import tqdm\nimport pymc3\nimport pandas as pd\nfrom scipy.optimize import fmin\nfrom scipy.stats import *\nfrom tqdm import tqdm\nfrom scipy.stats import beta\nimport sys, logging\nimport warnings\n\nlogger = logging.getLogger(__name__)\ntry: \n import coloredlogs\n coloredlogs.install(level='DEBUG')\nexcept ImportError:\n logging.info('If you like the terminal output colored.' + \n 'Install colored coloredlogs (e.g., pip install coloredlogs)')\n\n# The TVAOJ psychometric function, see Tünnermann, Petersen, & Scharlau (2015):\ndef tvatoj_psychometric_function(SOA, C, wp, vp=None, vr=None):\n \"\"\" Takes SOAs in ms and either C in 1\/ms and w or vp and vr in 1\/ms \"\"\"\n if vp is None or vr is None:\n vp = C * wp\n vr = C * (1 - wp)\n SOA = array(SOA)\n left = (1-exp(-vp*abs(SOA))) + exp(-vp*abs(SOA)) * vp\/(vp+vr)\n right = exp(-vr*abs(SOA))*vp\/(vp+vr)\n return ((SOA<=0)*left + (SOA>0)*right)\n\n# A generative simulation of the process\ndef simulate_subject_toj(SOA, reps, C, wp):\n v1 = C * wp # attentional weights and overall rate C ...\n v2 = C * (1 -wp) # ... determine the individual rates\n probe_first_count = 0 # Our counter each SOA starts with zero\n for i in range(0, reps): # For every repetition\n tS = -log(1 - runif(1)) \/ v2 # let stimulus 2 race and record its VSTM arrival\n tC = SOA - log(1 - runif(1)) \/ v1 # sane for stimulus 1, offset by the SOA\n if tC < tS: # Did 1 arrive before 2?\n probe_first_count += 1 # Count as a \"probe first judment\"\n return probe_first_count # Return the result across all SOAs\n\n\n# Simulate TOJs for a group of participants, by drawing \n# their individual parameters from distributions\ndef simulate_tojs(simulation_setup): \n\n s = simulation_setup # For convenient access ...\n single_wp=False\n\n # Get the paras per individual\n if 'C_sd_within' in s: # within subject design\n logging.info('[SIM] Simulating two different (but correlated) C parameters.')\n C_sub_mu = clip(random.normal(s['C_mu'], s['C_sd_between'], size=s['num_participants']), 0, None)\n C_a_sub = clip(random.normal(C_sub_mu, s['C_sd_within'], size=s['num_participants']), 0, None)\n C_n_sub = clip(random.normal(C_sub_mu, s['C_sd_within'], size=s['num_participants']), 0,None)\n wp_a_sub = clip(random.normal(s['wp_a_mu'], s['wp_a_sd_between'], size=s['num_participants']), 0, None)\n wp_n_sub = clip(random.normal(s['wp_n_mu'], s['wp_n_sd_between'], size=s['num_participants']), 0, None)\n elif 'C_a_mu' in s: # between design\n logging.info('[SIM] Simulating two independent C parameters.')\n C_a_sub = clip(random.normal(s['C_a_mu'], s['C_a_sd_between'], size=s['num_participants']), 0, None)\n C_n_sub = clip(random.normal(s['C_n_mu'], s['C_n_sd_between'], size=s['num_participants']), 0,None)\n wp_a_sub = clip(random.normal(s['wp_a_mu'], s['wp_a_sd_between'], size=s['num_participants']), 0, None)\n wp_n_sub = clip(random.normal(s['wp_n_mu'], s['wp_n_sd_between'], size=s['num_participants']), 0, None)\n elif 'C_single_mu' in s:\n logging.info('[SIM] Simulating a single C parameter for both conditions.')\n C_a_sub = clip(random.normal(s['C_single_mu'], s['C_single_sd_between'], size=s['num_participants']), 0, None)\n C_n_sub = C_a_sub\n wp_a_sub = clip(random.normal(s['wp_a_mu'], s['wp_a_sd_between'], size=s['num_participants']), 0, None)\n wp_n_sub = clip(random.normal(s['wp_n_mu'], s['wp_n_sd_between'], size=s['num_participants']), 0, None)\n elif 'wp_mu' in s: # A single wp ==> Single condition experient\n logging.info('[SIM] Simulating a single condition.')\n C_sub = clip(random.normal(s['C_mu'], s['C_sd_between'], size=s['num_participants']), 0, None)\n wp_sub = clip(random.normal(s['wp_mu'], s['wp_sd_between'], size=s['num_participants']), 0, None)\n single_wp=True\n else:\n logger.error('Could not infer the design from the simulation parameters provided. Please refer to the exmaples')\n sys.exit('Aborting')\n\n\n # Get the TOJs\n participant_id = []\n condition_id = []\n probe_first_count = []\n repetitions = []\n SOA = []\n\n if single_wp:\n condition_nums = [0]\n else:\n condition_nums = [0, 1]\n\n for p in range(0, s['num_participants']):\n for i,soa in enumerate(s['SOAs']):\n for c in condition_nums:\n participant_id.append(p)\n condition_id.append(c)\n SOA.append(soa)\n repetitions.append(s['repetitions'][i])\n if c == 0 and not single_wp: # simulate a neutral condition TOJ\n probe_first_count.append(simulate_subject_toj(soa, s['repetitions'][i], C_n_sub[p], wp_n_sub[p]))\n if c == 1 and not single_wp: # simulate an attention condition TOJ\n probe_first_count.append(simulate_subject_toj(soa, s['repetitions'][i], C_a_sub[p], wp_a_sub[p]))\n if c == 0 and single_wp:\n probe_first_count.append(simulate_subject_toj(soa, s['repetitions'][i], C_sub[p], wp_sub[p]))\n\n df = pd.DataFrame()\n df['participant_id'] = participant_id\n df['condition_id'] = condition_id\n df['SOA'] = SOA\n df['repetitions'] = repetitions\n df['probe_first_count'] = probe_first_count\n \n \n return df\n\n\n\n# Using the non-centered reparamtrization to reduce divergenses\n# See here for the rationale: https:\/\/twiecki.io\/blog\/2017\/02\/08\/bayesian-hierchical-non-centered\/\ndef hierarchical_model_noncentered(data, single_C=False, single_wp=False):\n '''Sets up a pymc3 model based on TVATOJ.\n\n :param data: A TOJ dataframe as return by the simulations\n :param single_C: Whether to use single C (for both conditions)\n :param single_wp: Whether to use a single wp (implies single C and produces a model for a single condition only)\n\n :returns: Model\n :rtype: pymc3.Model\n '''\n \n model = pymc3.Model()\n with model: \n\n p_id = data['participant_id']\n c_id = data['condition_id']\n\n if single_wp: \n wp_c_id = len(data['condition_id']) * [0]\n single_C = True\n else:\n wp_c_id = c_id\n\n if single_C: \n C_c_id = len(data['condition_id']) * [0]\n else:\n C_c_id = c_id\n\n \n pfc = pymc3.Data('probe_first_count',data['probe_first_count'])\n\n C_mu = pymc3.Normal('C_mu', 0.080, 0.050, shape=len(set(C_c_id)))\n C_sd = pymc3.HalfCauchy('C_sd', 0.1, shape=len(set(C_c_id)))\n \n wp_mu = pymc3.Normal('wp_mu', 0.5,0.2, shape=len(set(wp_c_id)))\n wp_sd = pymc3.HalfCauchy('wp_sd', 0.2, shape=len(set(wp_c_id)))\n\n wp_e = pymc3.Normal('wp_e', 0,1, shape=(len(set(p_id)), len(set(wp_c_id))))\n C_e = pymc3.Normal('C_e', 0,1, shape=(len(set(p_id)), len(set(C_c_id))))\n\n C = pymc3.Deterministic('C', (C_mu + C_e * C_sd).clip(0.0001, 0.9999))\n wp = pymc3.Deterministic('wp', (wp_mu + wp_e * wp_sd).clip(0.0001, 0.9999))\n \n theta = pymc3.Deterministic('theta', tvatoj_psychometric_function(\n data['SOA'], C[(p_id, C_c_id)], wp[(p_id, wp_c_id)]))\n\n y = pymc3.Binomial('y', n=cast(data['repetitions'], 'int64'),\n p=theta, observed=pfc,\n dtype='int64') \n\n # The deterministic transformation could probably be externalized\n # However, here the calculation is most safe to produce prober within subject estimates\n vp = pymc3.Deterministic('vp', wp * C)\n vr = pymc3.Deterministic('vr', (1 - wp) * C)\n \n \n vp_mean = pymc3.Deterministic('vp_mean', mean(vp, axis=0)) \n vr_mean = pymc3.Deterministic('vr_mean', mean(vr, axis=0)) \n if not single_wp:\n va_diff_mean = pymc3.Deterministic('va_diff_mean', mean(vp[:,1] - vr[:,1])) # Diff of probe and ref rate in the attention cond\n vp_diff_mean = pymc3.Deterministic('vp_diff_mean', mean(vp[:,1] - vp[:,0])) # Diff of attention and neutral condition probe rates\n vr_diff_mean = pymc3.Deterministic('vr_diff_mean', mean(vr[:,1] - vr[:,0])) # Diff of attention and neutral condition probe rates\n wpa_mean = pymc3.Deterministic('wpa_mean', mean(wp[:,1])) \n wp_diff_mean = pymc3.Deterministic('wp_diff_mean', mean(wp[:,1] - wp[:,0])) \n else:\n wp_vs_point5_mean = pymc3.Deterministic('wp_mean', mean(wp)) \n return(model)\n\n \n# This function is borrowed from @aloctavodia, who ported it from 's scripts\n# https:\/\/github.com\/aloctavodia\/Doing_bayesian_data_analysis\/blob\/master\/HDIofICDF.py\ndef HDIofICDF(dist_name, credMass=0.95, **args):\n # freeze distribution with given arguments\n distri = dist_name(**args)\n # initial guess for HDIlowTailPr\n incredMass = 1.0 - credMass\n\n def intervalWidth(lowTailPr):\n return distri.ppf(credMass + lowTailPr) - distri.ppf(lowTailPr)\n\n # find lowTailPr that minimizes intervalWidth\n HDIlowTailPr = fmin(intervalWidth, incredMass, ftol=1e-8, disp=False)[0]\n # return interval as array([low, high])\n return distri.ppf([HDIlowTailPr, credMass + HDIlowTailPr])\n\ndef sim_and_fit(setup, model_func, iterations, condition_func, \n goal_var_names=None, log_var_names=['C_mu', 'wp_mu'],\n single_C=False, single_wp=False, outfile='out.csv',\n turn_off_warnings=True,\n tune=1000,\n target_accept=0.85,\n init='adapt_diag'):\n\n if (turn_off_warnings):\n warnings.filterwarnings(\"ignore\")\n logging.warning('Attention: Warnings turned off. ') # There is so much from pymc3 and theano ..\n\n if log_var_names==None or len(log_var_names) < 1:\n sys.exit('log_var_names should not be empty or None! Log at least one variable!')\n num_success=0\n model = None\n for i in tqdm(range(iterations), desc='Overall progress'):\n data = simulate_tojs(setup)\n if model is None:\n model = model_func(data, single_C=single_C, single_wp=single_wp)\n with model:\n pymc3.set_data({'probe_first_count': data['probe_first_count']})\n trace = pymc3.sample(2000, tune=tune, cores=4, init=init, target_accept=target_accept)\n summary_stats = pymc3.summary(trace, var_names=goal_var_names, hdi_prob=0.95)\n print(summary_stats)\n success = condition_func(summary_stats) * 1 # Either 0 or 1, depending on reaching our goals.\n num_success += success\n attempts = (i+1)\n success_rate = num_success \/ attempts\n hdi = HDIofICDF(beta,a=1+num_success, b=1+(attempts-num_success))\n logging.info(('[ESTIMATE] Success rate: %.2f' % success_rate +\n ' [95 %% HDI: %.2f to %.2f]' % (hdi[0],hdi[1]) + \n '\\n' + '-'* 20))\n\n out_df = pymc3.summary(trace, var_names=log_var_names, hdi_prob=0.95)\n out_df.insert(0, 'iteration', attempts)\n out_df.insert(1, 'success', success)\n out_df.insert(2, 'power_est', success_rate)\n out_df.insert(3, 'power_hdi_2.5%', hdi[0])\n out_df.insert(4, 'power_hdi_97.5%', hdi[1])\n if attempts == 1:\n out_df.to_csv(outfile)\n else:\n out_df.to_csv(outfile, mode='a', header=False)\n\n'''\nConvenience function to fit with logging.\n''' \ndef fit(model, outfile='fit.csv'):\n with model:\n trace = pymc3.sample(2000, tune=1000, cores=4, init='adapt_diag') #, target_accept=.85)\n summary_stats = pymc3.summary(trace, hdi_prob=0.95)\n summary_stats.to_csv(outfile)\n logger.info('The model was fitted and a summary was written to: ' + outfile)\n logger.info('You can analyze the returned trace with help of the Arviz library (https:\/\/arviz-devs.github.io\/arviz\/)')\n logger.info('For instance, plot parameter posteriors with arviz.plot_posterior(trace, var_names=[\"C\", \"wp\"])')\n return trace\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_319","text":"# Hysteresis model\n# http:\/\/eprints.lancs.ac.uk\/1375\/1\/MFI_10c.pdf\n# Identification of Hysteresis Functions Using a Multiple Model Approach\n# Mihaylova, Lampaert et al\n\nimport numpy as npy\nfrom scipy.optimize import root\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport copy\n\n#%%\n\nplt.close('all')\n\nclass HysteresisModel:\n \"\"\"\n Hysteresis model comprising a number of elementary Maxwell-slip models\n refer http:\/\/eprints.lancs.ac.uk\/1375\/1\/MFI_10c.pdf\n \"\"\"\n \n def __init__(self,N,K,W=None,delta=None):\n \n self.N = N\n \"\"\"\n Integer number of elementary models\n \"\"\"\n \n self.K = npy.ravel(npy.abs(K))\n \"\"\"\n Array of stiffness values for each elementary model\n \"\"\"\n \n if delta is None :\n # K and W specified\n \n if W is None:\n raise ValueError(\"Error: either delta or W arguments \"+\n \"must be provided!\")\n else:\n W = npy.ravel(npy.abs(W)) # limiting friction values\n \n else:\n # K and delta specified\n # W to be inferred, given this input\n delta = npy.abs(npy.ravel(delta))\n W = self.K * delta\n \n self.W = W\n \"\"\"\n Array of limiting friction values for each elementary model\n \"\"\"\n \n # Initialise matrices F and C, which do not vary with input\n self.F = npy.asmatrix(npy.identity(self.N))\n self.C = npy.asmatrix(npy.diag(-self.K))\n \n # Initialise matrices G and D, as empty\n self.G = npy.asmatrix(npy.empty((self.N,1)))\n self.D = npy.asmatrix(npy.empty((self.N,1)))\n \n # Initialise array to contain case indexs\n self.case = npy.zeros((self.N,),dtype=int)\n \n \n \n @property\n def x0(self):\n return self._x0 \n\n @x0.setter\n def x0(self,x0):\n \"\"\"\n Set initial states\n \"\"\"\n self._x0 = npy.asmatrix(npy.ravel(x0)).T\n self.x =self.x0\n \n if self.x.shape[0] != self.N:\n raise ValueError(\"Error: x0 wrong shape!\")\n \n @property\n def x(self):\n return self._x\n \n @x.setter\n def x(self,val):\n #print(\"States updated\")\n self._x = npy.asmatrix(val)\n \n \n def update(self,u,save_states=True):\n \"\"\"\n Function to advance state-space description of model dynamics \n by a single time step, returning next state and output vectors\n \"\"\"\n \n x = copy.deepcopy(self.x)\n \n # Define G and D matrix entries\n for i in range(self.N): # loop over all elementary models\n \n Wi = self.W[i]\n Ki = self.K[i]\n \n # Evaluate switching parameter\n fi = Ki * (u - x[i])\n \n if fi > Wi:\n # Case 2\n self.case[i] = 2\n self.G[i] = 1\n self.D[i] = 0\n x[i] = -Wi\/Ki\n \n \n elif fi < -Wi:\n # Case 3\n self.case[i] = 3\n self.G[i] = 1\n self.D[i] = 0 \n x[i] = +Wi\/Ki\n \n else:\n # Case 1\n self.case[i] = 1\n self.G[i] = 0\n self.D[i] = Ki\n \n # Compute next states and output\n # using eqns (10) and (11) in Mihaylova's paper\n x_next = self.F * x + self.G * u\n y_k = self.C * x + self.D * u\n \n Fh_k = y_k.sum() # total hysteresis force\n \n # Update states\n if save_states:\n self.x = x_next\n\n return x_next, y_k, Fh_k\n \n \n def run(self,x0,uVals):\n \"\"\"\n Run simulation from initial conditions, given inputs u\n x0 : column vector [Nx1]\n u : list or vector of length (nSteps,1)\n \"\"\"\n \n # Convert and check shape of u\n uVals = npy.ravel(uVals)\n nSteps = uVals.shape[0] \n \n # Initialise state space eqns\n self.x0 = x0\n \n # Step through state space eqns\n xVals = npy.zeros((nSteps,self.N))\n yVals = npy.zeros((nSteps,self.N))\n Fh_vals = npy.zeros((nSteps,))\n \n for k, u_k in enumerate(uVals):\n \n # Get next states and output\n x_k, y_k, Fh_k = self.update(u_k)\n \n # Store\n xVals[k,:] = npy.ravel(x_k)\n yVals[k,:] = y_k.T\n Fh_vals[k] = Fh_k\n \n # Store results\n self.uVals = uVals\n self.xVals = xVals\n self.yVals = yVals\n self.FhVals = Fh_vals\n \n # Return states and output for each step\n return xVals, yVals, Fh_vals\n \n \n def write_results(self,\n fname='results.csv',\n delimiter=','):\n \n arr = npy.asmatrix(self.uVals).T\n titles = [\"u\"]\n N = self.N\n \n arr = npy.hstack((arr,self.xVals))\n titles += [\"x%d\" % (i+1) for i in range(N)]\n \n arr = npy.hstack((arr,self.yVals))\n titles += [\"y%d\" % (i+1) for i in range(N)]\n \n arr = npy.hstack((arr,npy.asmatrix(self.FhVals).T))\n titles += [\"Fh\"]\n \n npy.savetxt(fname=fname,\n X=arr,\n delimiter=delimiter,\n header=delimiter.join(str(x) for x in titles))\n \n \n def PlotResults_timeSeries(self,tVals):\n \"\"\"\n Plot results as time series\n [t,u], [t,x], [t,y], [t,Fh]\n \"\"\"\n \n fig, axarr = plt.subplots(4,sharex=True)\n fig.set_size_inches(16,9,forward=True)\n \n ax1 = axarr[0]\n ax1.plot(tVals,self.uVals)\n ax1.xaxis.set_visible(False)\n ax1.set_ylabel(\"u\")\n ax1.set_xlabel(\"Input displacement, u(t)\")\n \n ax2 = axarr[1]\n ax2.plot(tVals,self.xVals)\n ax2.xaxis.set_visible(False)\n ax2.set_ylabel(\"x\")\n ax2.set_title(\"States of\\nelementary models, x(t)\")\n \n ax3 = axarr[2]\n ax3.plot(tVals,self.yVals)\n ax3.xaxis.set_visible(False)\n ax3.set_ylabel(\"y\")\n ax3.set_title(\"Outputs from\\nelementary models, y(t)\")\n \n ax4 = axarr[3]\n ax4.plot(tVals,self.FhVals)\n ax4.set_xlabel(\"Time (seconds)\")\n ax4.set_ylabel(\"F$_h$\")\n ax4.set_title(\"Net output F$_h$\")\n \n \n def PlotResults(self):\n \"\"\"\n Plot results as [u,x], [u,y], [u,Fh] plots\n \"\"\"\n \n fig, axarr = plt.subplots(1,3,sharex=True)\n fig.set_size_inches(16,9,forward=True)\n \n ax1 = axarr[0]\n ax1.plot(self.uVals,self.xVals)\n ax1.set_xlabel(\"Input u\")\n ax1.set_title(\"States of\\nelementary models, x\")\n \n ax2 = axarr[1]\n \n ax2.plot(self.uVals,self.yVals)\n ax2.set_xlabel(\"Slip (u-x)\")\n ax2.set_title(\"Outputs from\\nelementary models, y\")\n \n ax3 = axarr[2]\n ax3.plot(self.uVals,self.FhVals)\n ax3.set_xlabel(\"Input u\")\n ax3.set_title(\"Net output F$_h$\")\n \n\nclass static_response():\n \"\"\"\n Class used to compute response to forcing input\n \"\"\"\n\n def __init__(self,hys_obj,K1, K2):\n self.hys_obj = hys_obj\n self.K1 = K1\n self.K2 = K2\n \n def net_force(self,d,F_ext,verbose=False):\n \"\"\"\n Function which defines net force \n given position 'u' and external force 'F_ext'\n \"\"\"\n \n u = d[0] - d[1] # relative displacement at friction interface\n F_hys = self.hys_obj.update(u=u,save_states=False)[2]\n\n F_net_1 = self.K1 * d[0] + F_hys - F_ext\n F_net_2 = self.K2 * d[1] - F_hys\n F_net = npy.array([F_net_1,F_net_2])\n \n if verbose:\n print(\"u = %.3e\" % u)\n print(\"x = {0}\".format(self.hys_obj.x))\n print(\"F_hys = {0}\".format(F_hys))\n print(\"F_net = {0}\".format(F_net))\n \n return F_net\n \n def run(self,F_vals,x0=None,d0=None):\n \n # Define function to solve for next u\n def solve(d_last,F_k,hys_obj):\n \n # Determine next u to satify equilibrium - i.e. zero net force\n sol = root(fun=self.net_force,x0=d_last,args=(F_k,))\n d_k = sol.x\n u_k = d_k[0]-d_k[1]\n \n F_net = self.net_force(d_k,F_k)\n \n if not sol.success:\n pass#print(sol.message)\n \n x_k, y_k, F_hys_k = hys_obj.update(u=u_k,save_states=True)\n \n return F_hys_k, d_k, u_k, x_k, y_k, F_net\n \n # Set initial conditions\n if x0 is None:\n x0 = npy.zeros((self.hys_obj.N,))\n self.hys_obj.x0 = x0\n \n if d0 is None:\n d0 = npy.array([0.0,0.0])\n d_j = d0 # initial guess\n \n # Run step by step \n F_hys_vals = []\n x_vals = []\n u_vals = []\n y_vals = []\n F_net_vals = []\n \n for j, F_j in enumerate(F_vals):\n \n #print(\"--- Step #%d ---\" % j)\n F_hys_j, d_j, u_j, x_j, y_j, F_net = solve(d_j,F_j,self.hys_obj)\n \n F_hys_vals.append(F_hys_j)\n x_vals.append(npy.ravel(x_j))\n y_vals.append(npy.ravel(y_j))\n u_vals.append(u_j)\n F_net_vals.append(F_net)\n \n self.x_vals = x_vals\n self.y_vals = y_vals\n self.u_vals = u_vals\n self.F_hys_vals = F_hys_vals\n self.F_vals = F_vals\n self.F_net_vals = F_net_vals\n \n \n def plot(self):\n \n fig, axarr = plt.subplots(3,2,sharex='col')\n fig.set_size_inches(14,8)\n \n ax = axarr[0,0]\n ax.plot(self.F_vals,label='$F_{external}$')\n ax.plot(self.F_hys_vals,label='$F_{hysteresis}$')\n ax.legend()\n ax.set_ylabel(\"Forces\")\n \n ax = axarr[1,0]\n ax.plot(self.u_vals)\n ax.set_ylabel(\"Displacement, u\")\n \n ax = axarr[2,0]\n ax.plot(self.x_vals)\n ax.set_xlabel(\"Step index\")\n ax.set_ylabel(\"States, x\")\n \n ax = axarr[0,1]\n ax.plot(self.u_vals,self.y_vals)\n ax.set_ylabel(\"Outputs, y\")\n \n ax = axarr[1,1]\n ax.plot(self.u_vals,self.F_hys_vals)\n ax.set_ylabel(\"$F_{hysteresis}$\")\n \n ax = axarr[2,1]\n ax.plot(self.u_vals,self.F_vals)\n ax.set_xlabel(\"Displacement, u\")\n ax.set_ylabel(\"$F_{external}$\")\n \n return fig\n \n \n# -------- TEST ROUTINE ----------\n\nif __name__ == \"__main__\":\n \n test_routine = 1\n \n if test_routine == 0:\n \n # Define hysteresis model\n K = [1000,2000,3000] \n delta = [1,2,3]\n Ne = len(K)\n hys = HysteresisModel(Ne,K,delta=delta)\n \n # Define displacement inputs\n dt = 0.02\n tmax = 10\n u0 = 10\n \n import random\n\n def randomWalk(N,normalise=True):\n \n x= [0]\n \n for j in range(N-1):\n step_x = random.randint(0,1)\n if step_x == 1:\n x.append(x[j] + 1 + 0.05*npy.random.normal())\n else:\n x.append(x[j] - 1 + 0.05*npy.random.normal())\n \n x = npy.asarray(x)\n \n if normalise:\n absmaxVal = npy.max([npy.max(x),-npy.min(x)])\n x = x \/ absmaxVal\n \n return x\n \n tVals = npy.arange(0,tmax,dt)\n uVals = u0*randomWalk(tVals.shape[0])\n \n #uVals = 4.5*npy.sin(2*npy.pi*0.5*tVals)\n \n # Obtain states and outputs by state space stepping\n hys.run(npy.zeros((Ne,)),uVals)\n \n # Plot results\n hys.PlotResults()\n hys.PlotResults_timeSeries(tVals)\n \n #hys.write_results()\n \n elif test_routine==1:\n \n # Define hysteresis model\n K = [1000,2000,3000] \n W = [1000,1000,1000]\n Ne = len(K)\n hys = HysteresisModel(Ne,K,W=W)\n \n # Define force function\n # Define displacement inputs\n dt = 0.02\n tmax = 10\n u0 = 10\n F0 = 3000\n \n t_vals = npy.arange(0,tmax,dt)\n F_vals = F0 * (npy.sin(2*npy.pi*t_vals) + npy.sin(2*npy.pi*3.2*t_vals))\n \n # Define spring\n K_spring = 1500\n \n # Define and run analysis\n analysis = static_response(hys_obj=hys,K_spring=K_spring)\n analysis.run(F_vals=F_vals)\n analysis.plot() \n \n else:\n raise ValueError(\"No test selected!\")\n \n#%%\n\n#\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_352","text":"#!\/usr\/bin\/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 11 09:37:22 2021\n\n@author: \n\"\"\"\n'''\nComplexity = Hard\n'''\n'''\nGiven two sorted arrays nums1 and nums2 of size m and n respectively, return the median of the two sorted arrays.\n\nThe overall run time complexity should be O(log (m+n)).\n\n \n\nExample 1:\n\nInput: nums1 = [1,3], nums2 = [2]\nOutput: 2.00000\nExplanation: merged array = [1,2,3] and median is 2.\nExample 2:\n\nInput: nums1 = [1,2], nums2 = [3,4]\nOutput: 2.50000\nExplanation: merged array = [1,2,3,4] and median is (2 + 3) \/ 2 = 2.5.\nExample 3:\n\nInput: nums1 = [0,0], nums2 = [0,0]\nOutput: 0.00000\nExample 4:\n\nInput: nums1 = [], nums2 = [1]\nOutput: 1.00000\nExample 5:\n\nInput: nums1 = [2], nums2 = []\nOutput: 2.00000\n \n\nConstraints:\n\nnums1.length == m\nnums2.length == n\n0 <= m <= 1000\n0 <= n <= 1000\n1 <= m + n <= 2000\n-106 <= nums1[i], nums2[i] <= 106\nAccepted\n937,027\nSubmissions\n2,961,335\n'''\n\n'''\nResults of below solution:\n \nRuntime: 92 ms, faster than 64.31% of Python3 online submissions for Median of Two Sorted Arrays.\nMemory Usage: 14.4 MB, less than 78.54% of Python3 online submissions for Median of Two Sorted Arrays.\n'''\nclass Solution:\n def findMedianSortedArrays(self, nums1, nums2) -> float:\n array=sorted(nums1+nums2)\n import statistics\n return statistics.median(array)\n \n \n \ny=Solution()\nnums1 = [1,3] \nnums2 = [2]\nprint(y.findMedianSortedArrays(nums1, nums2))\nnums1 = [1,2]\nnums2 = [3,4]\nprint(y.findMedianSortedArrays(nums1, nums2))\nnums1 = [0,0]\nnums2 = [0,0]\nprint(y.findMedianSortedArrays(nums1, nums2))\nnums1 = []\nnums2 = [1]\nprint(y.findMedianSortedArrays(nums1, nums2))\nnums1 = [2]\nnums2 = []\nprint(y.findMedianSortedArrays(nums1, nums2)) \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_353","text":"# -*- coding:utf-8 -*-\n__author__ = 'yangjian'\n\"\"\"\n\n\"\"\"\n\nfrom collections import defaultdict\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.cluster import hierarchy\nfrom scipy.stats import spearmanr\nfrom sklearn.impute import SimpleImputer\n\nfrom hypernets.core import randint\nfrom hypernets.tabular import sklearn_ex as skex, dask_ex as dex, column_selector as cs\nfrom hypernets.tabular.cfg import TabularCfg as cfg\nfrom hypernets.utils import logging\n\nlogger = logging.get_logger(__name__)\n\n\ndef select_by_multicollinearity(X, method=None):\n \"\"\"\n Adapted from https:\/\/scikit-learn.org\/stable\/auto_examples\/inspection\/plot_permutation_importance_multicollinear.html\n handling multicollinearity is by performing hierarchical clustering on the features’ Spearman\n rank-order correlations, picking a threshold, and keeping a single feature from each cluster.\n \"\"\"\n X_shape = X.shape\n if dex.is_dask_dataframe(X):\n X_shape = dex.compute(X_shape)[0]\n sample_limit = cfg.multi_collinearity_sample_limit\n if X_shape[0] > sample_limit:\n logger.info(f'{X_shape[0]} rows data found, sample to {sample_limit}')\n frac = sample_limit \/ X_shape[0]\n X, _, = dex.train_test_split(X, train_size=frac, random_state=randint())\n\n n_values = [X[c].value_counts() for c in X.columns]\n if dex.is_dask_dataframe(X):\n n_values = dex.compute(*n_values)\n one_values = [n.name for n in n_values if len(n) <= 1]\n if len(one_values) > 0:\n X = X[[c for c in X.columns if c not in one_values]]\n\n logger.info('computing correlation')\n if (method is None or method == 'spearman') and isinstance(X, pd.DataFrame):\n Xt = SimpleImputer(missing_values=np.nan, strategy='most_frequent').fit_transform(X)\n corr = spearmanr(Xt).correlation\n elif isinstance(X, pd.DataFrame):\n Xt = X.copy()\n cols = cs.column_number_exclude_timedelta(X)\n if cols:\n Xt[cols] = SimpleImputer(missing_values=np.nan, strategy='most_frequent').fit_transform(Xt[cols])\n Xt = skex.SafeOrdinalEncoder().fit_transform(Xt)\n corr = Xt.corr(method=method).values\n else: # dask\n Xt = dex.SafeOrdinalEncoder().fit_transform(X)\n corr = Xt.corr(method='pearson' if method is None else method).compute().values\n\n logger.info('computing cluster')\n corr_linkage = hierarchy.ward(corr)\n cluster_ids = hierarchy.fcluster(corr_linkage, 1, criterion='distance')\n cluster_id_to_feature_ids = defaultdict(list)\n for idx, cluster_id in enumerate(cluster_ids):\n cluster_id_to_feature_ids[cluster_id].append(idx)\n selected = [X.columns[v[0]] for v in cluster_id_to_feature_ids.values()]\n unselected = list(set(X.columns.to_list()) - set(selected)) + one_values\n feature_clusters = [[X.columns[i] for i in v] for v in cluster_id_to_feature_ids.values()]\n return feature_clusters, selected, unselected\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_354","text":"DeepK\/hoDMD-experiments\nfrom scipy.fftpack import dct\nfrom traintestutil import train_test\nimport numpy\nfrom functools import partial\nimport argparse\n\n\ndef dct_keep(to_keep, vectors):\n return numpy.hstack(dct(vectors)[:to_keep, :])\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='Evaluate DCT based sentence embedding')\n parser.add_argument(\"pickled_training_data_path\", help=\"pickled train path\")\n parser.add_argument(\"pickled_test_data_path\", help=\"pickled test path\")\n parser.add_argument(\"DCT_components_to_keep\", help=\"DCT components to keep\", type = int)\n\n args = parser.parse_args()\n\n pickled_training_data_path = args.pickled_training_data_path\n pickled_test_data_path = args.pickled_test_data_path\n DCT_components_to_keep = args.DCT_components_to_keep\n\n print (\"DCT components to keep -> %s\"%DCT_components_to_keep)\n\n func_to_apply = partial(dct_keep, DCT_components_to_keep)\n\n filter_by_len = DCT_components_to_keep\n results = train_test(pickled_training_data_path, pickled_test_data_path, func_to_apply, filter_by_len)\n results = results.split(\"\\n\")[-2]\n print (results)\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_355","text":"'''\nCopyright(C) 2016 Engineering Department, University of Cambridge, UK.\n\nLicense\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\nAuthor\n <>\n'''\n\nimport os\nimport subprocess\nimport numpy as np\nimport scipy.interpolate\nfrom scipy import signal as sig\n\nfrom lib import sigproc as sp\nfrom . import fileio\n\ndef resample(wav, fs, trgfs, method=2, deterministic=True):\n '''\n deterministic [True] : Try to make it deterministic.\n (e.g. sox (mehtod=2) is not deterministic by default)\n ATTENTION This option has been tested only for method==2\n '''\n if method==1:\n # sndfile-resample (libresample)\n # 'c' argument\n #0 : Best Sinc Interpolator\n #1 : Medium Sinc Interpolator (default)\n #2 : Fastest Sinc Interpolator TO AVOID\n #3 : ZOH Interpolator TO AVOID\n #4 : Linear Interpolator TO AVOID\n # sndfile-resample _seems_ to be always deterministic\n\n tmpinfname = sp.gentmpfile('sndfile-resample-in.wav')\n tmpoutfname = sp.gentmpfile('sndfile-resample-out.wav')\n\n try:\n wavwrite(tmpinfname, wav, fs)\n\n cmd = 'sndfile-resample -c 0 -to '+str(trgfs)+' '+tmpinfname+' '+tmpoutfname\n out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)\n #print(out)\n\n syn, synfs, synenc = wavread(tmpoutfname)\n except:\n if os.path.exists(tmpinfname): os.remove(tmpinfname)\n if os.path.exists(tmpoutfname): os.remove(tmpoutfname)\n raise\n\n if os.path.exists(tmpinfname): os.remove(tmpinfname)\n if os.path.exists(tmpoutfname): os.remove(tmpoutfname)\n\n elif method==2:\n # SOX\n # VHQ: -v -s: The fastest with the results among the bests\n # ATTENTION:If deterministic=False, sox is NOT deterministic!\n # I.e. it does NOT produce the same samples for each run!\n\n tmpinfname = sp.gentmpfile('sox-resample-in.wav')\n tmpoutfname = sp.gentmpfile('sox-resample-out.wav')\n\n try:\n fileio.wavwrite(tmpinfname, wav, fs)\n\n cmd = 'sox '\n if deterministic: cmd += '--no-dither '\n cmd += tmpinfname+' '+tmpoutfname+' rate -v -s '+str(trgfs)\n out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)\n #print(out)\n\n syn, synfs, synenc = fileio.wavread(tmpoutfname)\n except:\n if os.path.exists(tmpinfname): os.remove(tmpinfname)\n if os.path.exists(tmpoutfname): os.remove(tmpoutfname)\n raise\n\n if os.path.exists(tmpinfname): os.remove(tmpinfname)\n if os.path.exists(tmpoutfname): os.remove(tmpoutfname)\n\n elif method==3:\n '''\n Resample using FFT and power of 2\n Create sometimes a significant peak at Nyquist\n '''\n syn = wav.copy()\n wavlen = syn.shape[0]\n wavlenpow2 = int(np.power(2, np.floor(np.log2(wavlen))+1))\n syn = np.pad(syn, (0, wavlenpow2-wavlen), constant_values=(0,0), mode='constant')\n syn = scipy.signal.resample(syn, np.round(len(syn)*float(trgfs)\/fs))\n syn = syn[:np.round(wavlen*float(trgfs)\/fs)]\n\n if 0:\n import matplotlib.pyplot as plt\n plt.ion()\n plt.plot(np.arange(len(wav))\/float(fs), wav, 'k')\n plt.plot(np.arange(len(syn))\/float(trgfs), syn, 'b')\n from IPython.core.debugger import Pdb; Pdb().set_trace()\n\n return syn\n\n# Resample feature using the nearest\ndef featureresample(ts, X, nts):\n if len(X.shape)>1:\n Y = np.zeros((len(nts), X.shape[1]))\n else:\n Y = np.zeros(len(nts))\n for n, t in enumerate(nts):\n idx = np.where(ts>=t)[0]\n if len(idx)==0:\n idx = X.shape[0]-1\n else:\n idx = np.min(idx) # Nearest\n idx = np.clip(idx, 0, X.shape[0]-1)\n if len(X.shape)>1:\n Y[n,:] = X[idx,:]\n else:\n Y[n] = X[idx]\n return Y\n\ndef f0s_resample_pitchsync(f0s, nbperperiod, f0min=20.0, f0max=5000.0):\n f0s = f0s.copy()\n\n # Interpolate where there is zero values\n f0s[:,1] = np.interp(f0s[:,0], f0s[f0s[:,1]>0,0], f0s[f0s[:,1]>0,1])\n\n f0s[:,1] = np.clip(f0s[:,1], f0min, f0max)\n\n ts = [0.0]\n while ts[-1]0,1] = 1\n\n nts = np.arange(f0s[0,0], f0s[-1,0], timeshift)\n\n # The voicing resampling has to be done using nearest ...\n vcsfn = scipy.interpolate.interp1d(vcs[:,0], vcs[:,1], kind='nearest', bounds_error=False, fill_value=0)\n\n # ... whereas the frequency resampling need linear interpolation, while ignoring the voicing\n f0s = np.interp(nts, f0s[f0s[:,1]>0,0], f0s[f0s[:,1]>0,1])\n\n # Put back the voicing\n f0s[vcsfn(nts)==0] = 0.0\n\n f0s = np.vstack((nts, f0s)).T\n\n if 0:\n plt.plot(f0s[:,0], f0s[:,1])\n\n return f0s\n\ndef f0s_rmsteps(f0s):\n '''\n Description\n Removes steps in the F0 curve.\n\n Steps can come from some F0 estimator (e.g. those based on GCI\n detection are likely to exhibits these).\n\n For pulse synthesis, it avoids some glitches around the main lobes\n\n It might be bad for creaky voice (oversmoothing the f0 curve),\n though F0 estimate in creaky voice is quite likely to be wrong anyway.\n '''\n f0sori = f0s.copy()\n f0s = f0s.copy()\n voicedi = np.where(f0s[:,1]>0)[0]\n shift = np.mean(np.diff(f0s[:,0]))\n fc = (1.0\/shift)\/4.0 # The cut-off frequency\n hshift = (1.0\/fc)\/8.0 # The high sampling rate for resampling the original curve\n data = np.interp(np.arange(0.0, f0s[-1,0], hshift), f0s[voicedi,0], f0s[voicedi,1])\n b, a = sig.butter(8, fc\/(0.5\/hshift), btype='low')\n f0ss = sig.filtfilt(b, a, data)\n f0s[voicedi,1] = np.interp(f0s[voicedi,0], np.arange(0.0, f0s[-1,0], hshift), f0ss)\n\n if 0:\n plt.plot(f0sori[:,0], f0sori[:,1], 'k')\n plt.plot(f0s[:,0], f0s[:,1], 'b')\n from IPython.core.debugger import Pdb; Pdb().set_trace()\n\n return f0s\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_356","text":"from pathlib import Path\nimport os\nimport matplotlib as mpl\nif os.environ.get('DISPLAY') is None: # NOQA\n mpl.use('Agg') # NOQA\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport matplotlib\nimport itertools\nimport numpy as np\n\n# from pylab import rcParams\n# rcParams['figure.figsize'] = (12, 12)\nfontsize = 12\nparams = {\n 'figure.figsize': (12, 12),\n 'axes.labelsize': fontsize,\n 'axes.titlesize': fontsize,\n 'legend.fontsize': fontsize,\n 'xtick.labelsize': fontsize - 1,\n 'ytick.labelsize': fontsize - 1,\n}\nmpl.rcParams.update(params)\n\n\ndef histogram(\n values,\n *,\n name,\n filetype=\"png\",\n plt_kwargs=None,\n hist_kwargs=None):\n n, bins, patches = mpl_plot(\n plt_fn=lambda: plt.hist(values, linewidth=10, **(hist_kwargs or {})),\n name=name,\n filetype=filetype,\n **(plt_kwargs or {}))\n return n, bins\n\n\ndef scatter(\n x,\n y,\n *,\n name,\n filetype=\"png\",\n plt_kwargs=None,\n scatter_kwargs=None,\n fit_y=None,\n fit_plot_kwargs=None,\n legend=False):\n def plot_fit():\n if fit_y is not None:\n plt.plot(x, fit_y, **(fit_plot_kwargs or {}))\n\n def legend_fn():\n if legend:\n if isinstance(legend, dict):\n plt.legend(**legend)\n else:\n plt.legend()\n\n if isinstance(y, dict):\n def plt_fn():\n for label, _y in y.items():\n plt.scatter(x, _y, label=label, **(scatter_kwargs or {}))\n plot_fit()\n legend_fn()\n else:\n def plt_fn():\n plt.scatter(x, y, **(scatter_kwargs or {}))\n plot_fit()\n legend_fn()\n mpl_plot(\n plt_fn=plt_fn,\n name=name,\n filetype=filetype,\n **plt_kwargs)\n\n\ndef mpl_plot(\n plt_fn,\n *,\n name,\n filetype=\"png\",\n **plt_kwargs):\n Figure.file_types = [filetype]\n Figure.set_defaults(**plt_kwargs)\n with Figure(name):\n return plt_fn()\n\n\n# http:\/\/pytorch.org\/tutorials\/intermediate\/seq2seq_translation_tutorial.html#the-seq2seq-model # NOQA\ndef plot_attention(\n input_labels, output_labels, attentions,\n out_colors=None, filepath=None):\n # Set up figure with colorbar\n fig = plt.figure()\n ax = fig.add_subplot(111)\n cax = ax.matshow(attentions, cmap='bone')\n fig.colorbar(cax)\n\n # Set up axes\n ax.tick_params(axis='both', which='major', labelsize=10)\n ax.tick_params(axis='both', which='minor', labelsize=8)\n ax.set_xticklabels([''] + output_labels, rotation=90)\n ax.set_yticklabels([''] + input_labels)\n\n # Show label at every tick\n ax.xaxis.set_major_locator(ticker.MultipleLocator(1))\n ax.yaxis.set_major_locator(ticker.MultipleLocator(1))\n\n if out_colors:\n out_colors = [\"k\"] + out_colors\n assert len(out_colors) == 1 + len(output_labels), \\\n f\"got {len(out_colors)} colors for {len(output_labels)} labels\"\n for xtick, color in zip(ax.get_xticklabels(), out_colors):\n xtick.set_color(color)\n\n if filepath:\n plt.savefig(filepath)\n else:\n plt.show()\n plt.close()\n\n\n# http:\/\/scikit-learn.org\/stable\/auto_examples\/model_selection\/plot_confusion_matrix.html # NOQA\ndef plot_confusion_matrix(\n cm, classes,\n normalize=False, title='Confusion matrix', cmap=plt.cm.Blues,\n filepath=None):\n \"\"\"This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') \/ cm.sum(axis=1)[:, np.newaxis]\n plt.imshow(\n cm, interpolation='nearest', cmap=cmap, vmin=0, vmax=1)\n else:\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=90)\n plt.yticks(tick_marks, classes)\n\n thresh = cm.max() \/ 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(\n j, i, f\"{cm[i, j]:.2f}\",\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n if filepath:\n plt.savefig(filepath)\n else:\n plt.show()\n plt.close()\n\n\n# https:\/\/stackoverflow.com\/questions\/18195758\/set-matplotlib-colorbar-size-to-match-graph # NOQA\ndef add_colorbar(im, aspect=20, pad_fraction=0.5, **kwargs):\n \"\"\"Add a vertical color bar to an image plot.\"\"\"\n from mpl_toolkits import axes_grid1\n divider = axes_grid1.make_axes_locatable(im.axes)\n width = axes_grid1.axes_size.AxesY(im.axes, aspect=1.\/aspect)\n pad = axes_grid1.axes_size.Fraction(pad_fraction, width)\n current_ax = plt.gca()\n cax = divider.append_axes(\"right\", size=width, pad=pad)\n plt.sca(current_ax)\n return im.axes.figure.colorbar(im, cax=cax, **kwargs)\n\n\ndef simple_imshow(\n matrix,\n cmap=\"viridis\", figsize=(10, 10), aspect_equal=True, outfile=None,\n title=None, xlabel=None, ylabel=None,\n xticks=True,\n yticks=True,\n xtick_labels=None,\n ytick_labels=None,\n xtick_locs_labels=None,\n ytick_locs_labels=None,\n xtick_label_rotation='vertical',\n xgrid=None,\n ygrid=None,\n colorbar=True, scale=\"lin\", cbar_title=None,\n bad_color=None,\n origin='upper'):\n if aspect_equal and figsize[1] is None:\n matrix_aspect = matrix.shape[0] \/ matrix.shape[1]\n width = figsize[0]\n height = max(3, width * matrix_aspect)\n figsize = (width, height)\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(1, 1, 1)\n if aspect_equal:\n ax.set_aspect('equal')\n if title:\n plt.title(title)\n if xlabel:\n ax.set_xlabel(xlabel)\n if ylabel:\n ax.set_ylabel(ylabel)\n norm = matplotlib.colors.SymLogNorm(1) if scale == \"log\" else None\n cmap = mpl.cm.get_cmap(cmap)\n if bad_color is not None:\n cmap.set_bad(bad_color)\n im = plt.imshow(\n matrix, interpolation='nearest', cmap=cmap, norm=norm, origin=origin)\n if xtick_labels is not None:\n assert xtick_locs_labels is None\n locs = np.arange(0, len(xtick_labels))\n xtick_locs_labels = locs, xtick_labels\n if ytick_labels is not None:\n assert ytick_locs_labels is None\n locs = np.arange(0, len(ytick_labels))\n ytick_locs_labels = locs, ytick_labels\n if xtick_locs_labels is not None:\n plt.xticks(*xtick_locs_labels, rotation=xtick_label_rotation)\n if ytick_locs_labels is not None:\n plt.yticks(*ytick_locs_labels)\n if xgrid is not None or ygrid is not None:\n if xgrid is not None:\n ax.set_xticks(xgrid, minor=True)\n if ygrid is not None:\n ax.set_yticks(ygrid, minor=True)\n ax.grid(which=\"minor\")\n if xticks is not True:\n plt.tick_params(\n axis='x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n top=False,) # ticks along the top edge are off\n if yticks is not True:\n plt.tick_params(\n axis='y', # changes apply to the y-axis\n which='both', # both major and minor ticks are affected\n left=False, # ticks along the bottom edge are off\n right=False,) # ticks along the top edge are off\n if colorbar:\n cbar = add_colorbar(im)\n if cbar_title:\n cbar.ax.set_ylabel(cbar_title, rotation=270)\n plt.tight_layout()\n if outfile:\n plt.savefig(outfile)\n else:\n plt.show()\n plt.clf()\n\n\ndef embed_2d(\n emb, emb_method=\"UMAP\", umap_n_neighbors=15, umap_min_dist=0.1,\n return_proj=False):\n if hasattr(emb_method, 'fit_transform'):\n proj = emb_method\n elif emb_method.lower() == \"umap\":\n try:\n from umap import UMAP\n except ImportError:\n print(\"Please install umap to use emb_method='UMAP'\")\n print(\"pip install umap-learn (NOT pip install umap)\")\n print(\"https:\/\/github.com\/lmcinnes\/umap\")\n raise\n proj = UMAP(\n init=\"random\",\n n_neighbors=umap_n_neighbors,\n min_dist=umap_min_dist)\n else:\n import sklearn.manifold\n proj = getattr(sklearn.manifold, emb_method)()\n emb_2d = proj.fit_transform(emb)\n if return_proj:\n return emb_2d, proj\n return emb_2d\n\n\ndef plot_embeddings(\n emb, emb_method=None,\n labels=None, color=None, classes=None, class2color=None, title=None,\n outfile=None, cmap=\"viridis\", max_labels=100,\n colorbar_ticks=None, reverse_colorbar=False, colorbar_label=None,\n label_fontpath=None,\n **scatter_kwargs):\n \"\"\"\n Plot a scatterplot of the embeddings contained in emb.\n\n emb: an array with dim (n_embeddings x 2) or (n_embeddings x emb_dim).\n In the latter case an embedding method emb_method should be supplied\n to project from emb_dim to dim=2.\n\n emb_method: \"UMAP\", \"TSNE\", or any other algorithm in sklearn.manifold\n labels: Optional text labels for each embedding\n color: Optional color for each embedding, according to which it will be\n colored in the plot.\n classes: Optional class for each embedding, according to which it will\n be colored in the plot.\n class2color: A map which determines the color assigned to each class\n outfile: If provided, save plot to this file instead of showing it\n cmap: colormap\n max_labels: maximum number of labels to be displayed\n \"\"\"\n from matplotlib.ticker import NullFormatter\n if emb_method:\n x, y = embed_2d(emb, emb_method).T\n else:\n x, y = emb.T\n figsize = (14, 12) if color is not None else (12, 12)\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111)\n ax.set_aspect('equal')\n ax.xaxis.set_major_formatter(NullFormatter())\n ax.yaxis.set_major_formatter(NullFormatter())\n if not scatter_kwargs:\n scatter_kwargs = dict(marker=\"o\", s=1, alpha=1)\n if classes is not None:\n for cls in set(classes):\n i = (classes == cls).nonzero()\n ax.scatter(x[i], y[i], label=cls, **scatter_kwargs)\n elif color is not None:\n sc = ax.scatter(x, y, c=color, cmap=cmap, **scatter_kwargs)\n cb = fig.colorbar(sc, ticks=colorbar_ticks)\n if reverse_colorbar:\n cb.ax.invert_yaxis()\n if colorbar_label:\n cb.set_label(colorbar_label)\n else:\n ax.scatter(x, y, **scatter_kwargs)\n\n if labels is not None:\n if label_fontpath:\n import matplotlib.font_manager as fm\n fontproperties = fm.FontProperties(fname=label_fontpath)\n else:\n fontproperties = None\n n_labels = len(labels)\n for i in range(len(emb)):\n if (\n max_labels < 0 or\n n_labels <= max_labels or\n not i % (n_labels \/\/ max_labels)):\n ax.annotate(\n labels[i], (x[i], y[i]), alpha=0.76, size=10,\n fontproperties=fontproperties)\n if title:\n plt.title(title)\n plt.axis('tight')\n if classes is not None:\n plt.legend(loc='best', scatterpoints=1, markerscale=5, fontsize=10)\n plt.tight_layout()\n if outfile:\n plt.savefig(str(outfile))\n else:\n plt.show()\n\n\ndef plot_dendrogram(\n dist,\n labels,\n outfile=None,\n method=\"centroid\",\n figsize=(50, 45),\n font_size=10,\n cmap='magma_r',\n ):\n from scipy.cluster import hierarchy\n fig = plt.figure(figsize=figsize)\n # dendrogram\n axdendro = fig.add_axes([0.09, 0.1, 0.2, 0.8])\n axdendro.set_xticks([])\n axdendro.set_yticks([])\n Y = hierarchy.linkage(dist, method=method)\n Z = hierarchy.dendrogram(\n Y, orientation='right', labels=labels, leaf_font_size=font_size)\n # distance matrix\n index = Z['leaves']\n D = dist[index, :]\n D = D[:, index]\n axmatrix = fig.add_axes([0.3, 0.1, 0.6, 0.8])\n im = axmatrix.matshow(D, aspect='auto', origin='lower', cmap=cmap)\n axmatrix.set_xticks([])\n axmatrix.set_yticks([])\n # colorbar\n axcolor = fig.add_axes([0.91, 0.1, 0.02, 0.8])\n plt.colorbar(im, cax=axcolor)\n\n if outfile:\n fig.savefig(str(outfile))\n else:\n fig.show()\n plt.close(fig)\n\n\ndef get_palette(categories, cmap=None):\n from bokeh.palettes import (\n Category20,\n Category20b,\n Category20c,\n viridis,\n )\n n_cat = len(set(categories))\n if cmap is not None:\n return cmap[n_cat]\n if n_cat <= 20:\n if n_cat <= 2:\n palette = Category20[3]\n return [palette[0], palette[-1]]\n else:\n return Category20[n_cat]\n if n_cat <= 40:\n return Category20[20] + Category20b[20]\n if n_cat <= 60:\n return Category20[20] + Category20b[20] + Category20c[20]\n return viridis(n_cat)\n\n\ndef plot_embeddings_bokeh(\n emb,\n emb_method='UMAP',\n classes=None,\n class_category=None,\n labels=None,\n color=None,\n raw_colors=None,\n color_category=None,\n color_categorical=False,\n cmap=None,\n cmap_reverse=False,\n colorbar=False,\n colorbar_ticks=None,\n outfile=None,\n title=None,\n scatter_labels=False,\n tooltip_fields=None,\n figure_kwargs=None,\n write_png=False,\n return_plot=False,\n plot_width=None,\n plot_height=None,\n reuse_figure=None,\n **circle_kwargs,\n ):\n \"\"\"\n Creates an interactive scatterplot of the embeddings contained in emb,\n using the bokeh library.\n\n emb: an array with dim (n_embeddings x 2) or (n_embeddings x emb_dim).\n In the latter case an embedding method emb_method should be supplied\n to project from emb_dim to dim=2.\n\n emb_method: \"UMAP\", \"TSNE\", or any other algorithm in sklearn.manifold\n labels: Optional text labels for each embedding\n color: Optional color for each embedding, according to which it will be\n colored in the plot.\n classes: Optional class for each embedding, according to which it will\n be colored in the plot.\n outfile: If provided, save plot to this file instead of showing it\n cmap: colormap\n title: optional title of the plot\n \"\"\"\n from bokeh.plotting import figure, output_file, show, save\n from bokeh.models import (\n ColumnDataSource, CategoricalColorMapper, LinearColorMapper,\n ColorBar, FixedTicker, Text)\n from bokeh.palettes import Viridis256\n\n if emb_method:\n emb = embed_2d(emb, emb_method)\n else:\n assert emb.shape[1] == 2\n\n if outfile:\n output_file(outfile)\n\n if cmap is not None:\n if isinstance(cmap, str):\n import bokeh.palettes\n # matplotib suffix for reverse color maps\n if cmap.endswith(\"_r\"):\n cmap_reverse = True\n cmap = cmap[:-2]\n cmap = getattr(bokeh.palettes, cmap)\n elif isinstance(cmap, dict):\n cmap = cmap[max(cmap.keys())]\n if cmap_reverse:\n if isinstance(cmap, dict):\n new_cmap = {}\n for k, v in cmap.items():\n v = list(v)\n v.reverse()\n new_cmap[k] = v\n cmap = new_cmap\n else:\n cmap = list(cmap)\n cmap.reverse()\n\n source_dict = dict(x=emb[:, 0], y=emb[:, 1])\n if labels is not None:\n source_dict[\"label\"] = labels\n\n if raw_colors is not None:\n assert color is None\n if any(isinstance(c, str) for c in raw_colors):\n assert all(isinstance(c, str) for c in raw_colors)\n else:\n assert all(len(c) == 3 for c in raw_colors)\n assert cmap is None\n from bokeh.colors import RGB\n raw_colors = [RGB(*c) for c in raw_colors]\n source_dict[\"color\"] = raw_colors\n color_conf = {\"field\": \"color\"}\n elif color is not None:\n if any(isinstance(c, str) for c in color):\n assert all(isinstance(c, str) for c in color)\n palette = get_palette(color, cmap=cmap)\n color_mapper = CategoricalColorMapper(\n factors=sorted(set(color)),\n palette=palette)\n else:\n if cmap is None:\n cmap = Viridis256\n elif isinstance(cmap, dict):\n cmap = cmap[max(cmap.keys())]\n color_mapper = LinearColorMapper(cmap)\n color_conf = {\n \"field\": \"color\",\n \"transform\": color_mapper}\n source_dict[\"color\"] = color\n else:\n color_conf = \"red\"\n\n if classes is not None:\n source_dict[\"class\"] = classes\n if tooltip_fields:\n for k, v in tooltip_fields.items():\n source_dict[k] = v\n source = ColumnDataSource(source_dict)\n\n tools = \"crosshair,pan,wheel_zoom,box_zoom,reset,hover\"\n figure_kwargs = figure_kwargs or {}\n if plot_width is not None:\n figure_kwargs['plot_width'] = plot_width\n if plot_height is not None:\n figure_kwargs['plot_height'] = plot_height\n if reuse_figure is None:\n p = figure(tools=tools, sizing_mode='stretch_both', **figure_kwargs)\n else:\n p = reuse_figure\n if title:\n p.title.text = title\n\n if colorbar:\n if colorbar_ticks:\n ticker = FixedTicker(ticks=colorbar_ticks)\n else:\n ticker = None\n colorbar = ColorBar(\n color_mapper=color_mapper, ticker=ticker)\n\n if labels is not None and scatter_labels:\n glyph = Text(\n x=\"x\", y=\"y\", text=\"label\", angle=0.0,\n text_color=color_conf, text_alpha=0.95, text_font_size=\"8pt\",\n **circle_kwargs)\n p.add_glyph(source, glyph)\n else:\n plot_kwargs = dict(\n x='x', y='y',\n source=source,\n color=color_conf,\n **circle_kwargs\n )\n if classes is not None:\n legend_field = 'class'\n elif color is not None and not raw_colors:\n legend_field = 'color'\n else:\n legend_field = None\n if legend_field:\n plot_kwargs['legend_field'] = legend_field\n # sort by color field to order the legend entries nicely\n sorted_source = source.to_df().sort_values(legend_field)\n plot_kwargs['source'] = source.from_df(sorted_source)\n\n p.circle(**plot_kwargs)\n\n if labels is not None:\n from bokeh.models import HoverTool\n from collections import OrderedDict\n hover = p.select(dict(type=HoverTool))\n hover_entries = [\n (\"label\", \"@label{safe}\"),\n (\"(x, y)\", \"(@x, @y)\"),\n ]\n if color is not None and color_category:\n hover_entries.append((color_category, \"@color\"))\n if classes is not None and class_category:\n hover_entries.append((class_category, \"@class\"))\n if tooltip_fields:\n for field in tooltip_fields:\n hover_entries.append((field, \"@\" + field))\n hover.tooltips = OrderedDict(hover_entries)\n if colorbar:\n assert color is not None\n p.add_layout(colorbar, 'right')\n if return_plot:\n return p\n if outfile:\n save(p)\n if write_png:\n from bokeh.io import export_png\n png_file = outfile.with_suffix('.png')\n export_png(p, filename=png_file)\n else:\n show(p)\n\n\nclass Figure():\n \"\"\"Provides a context manager that automatically saves and closes\n a matplotlib plot.\n\n >>> with Figure(\"figure_name\"):\n >>> plt.plot(x, y)\n >>> # saves plot to {Figure.fig_dir}\/{figure_name}.{Figure.file_type}\n\n When creating many figures with the same settings, e.g. plt.xlim(0, 100)\n and plt.ylim(0, 1.0), defaults can be set with:\n\n >>> Figure.set_defaults(xlim=(0, 100), ylim=(0, 1.0))\n >>> # context manager will call plt.xlim(0, 100) and plt.ylim(0, 1.0)\n \"\"\"\n fig_dir = Path(\"out\/fig\")\n file_types = [\"png\", \"pdf\"]\n default_plt_calls = {}\n late_calls = [\"xscale\", \"xlim\", \"yscale\", \"ylim\"] # order is important\n\n def __init__(\n self, name,\n figwidth=6, figheight=None, fontsize=12,\n invert_xaxis=False, invert_yaxis=False,\n **kwargs):\n self.fig = plt.figure()\n self.fig.set_figwidth(figwidth)\n phi = 1.6180\n self.fig.set_figheight(figheight or figwidth \/ phi)\n # params = {\n # 'figure.figsize': (figwidth, figheight or figwidth \/ phi),\n # 'axes.labelsize': fontsize,\n # 'axes.titlesize': fontsize,\n # 'legend.fontsize': fontsize,\n # 'xtick.labelsize': fontsize - 1,\n # 'ytick.labelsize': fontsize - 1,\n # }\n # mpl.rcParams.update(params)\n self.name = name\n self.plt_calls = {**kwargs}\n self.invert_xaxis = invert_xaxis\n self.invert_yaxis = invert_yaxis\n for attr, val in self.default_plt_calls.items():\n if attr not in self.plt_calls:\n self.plt_calls[attr] = val\n\n def __enter__(self):\n for attr, val in self.plt_calls.items():\n # if attr in self.late_calls:\n # continue\n try:\n getattr(plt, attr)(val)\n except:\n getattr(plt, attr)(*val)\n\n return self.fig\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n # for attr in self.late_calls:\n # if attr in self.plt_calls:\n # print(attr, self.plt_calls[attr])\n # getattr(plt, attr)(self.plt_calls[attr])\n if self.invert_xaxis:\n plt.gca().invert_xaxis()\n if self.invert_yaxis:\n plt.gca().invert_yaxis()\n plt.tight_layout()\n for file_type in self.file_types:\n outfile = self.fig_dir \/ f\"{self.name}.{file_type}\"\n plt.savefig(outfile)\n plt.clf()\n\n @classmethod\n def set_defaults(cls, **kwargs):\n cls.default_plt_calls = kwargs\n for attr, val in kwargs.items():\n setattr(cls, attr, val)\n\n @classmethod\n def reset_defaults(cls):\n cls.default_plt_calls = {}\n\n\nlinestyles = [\n \"-\", \"--\", \"-.\", \":\",\n \"-\", \"--\", \"-.\", \":\",\n \"-\", \"--\", \"-.\", \":\",\n \"-\", \"--\", \"-.\", \":\",\n \"-\", \"--\", \"-.\", \":\",\n \"-\", \"--\", \"-.\", \":\"]\n\ntry:\n from bokeh.palettes import Category20\n colors = Category20[20]\nexcept ImportError:\n try:\n import seaborn as sns\n colors = sns.color_palette(\"muted\")\n except ImportError:\n # https:\/\/gist.github.com\/huyng\/816622\n colors = [\n \"348ABD\", \"7A68A6\", \"A60628\",\n \"467821\", \"CF4457\", \"188487\", \"E24A33\",\n \"348ABD\", \"7A68A6\", \"A60628\",\n \"467821\", \"CF4457\", \"188487\", \"E24A33\",\n \"348ABD\", \"7A68A6\", \"A60628\",\n \"467821\", \"CF4457\", \"188487\", \"E24A33\",\n ]\n\n# https:\/\/matplotlib.org\/api\/markers_api.html\nmarkers = [\n \".\", # point\n \",\", # pixel\n \"o\", # circle\n \"v\", # triangle_down\n \"^\", # triangle_up\n \"<\", # triangle_left\n \">\", # triangle_right\n \"1\", # tri_down\n \"2\", # tri_up\n \"3\", # tri_left\n \"4\", # tri_right\n \"8\", # octagon\n \"s\", # square\n \"p\", # pentagon\n \"P\", # plus (filled)\n \"*\", # star\n \"h\", # hexagon1\n \"H\", # hexagon2\n \"+\", # plus\n \"x\", # x\n \"X\", # x (filled)\n \"D\", # diamond\n \"d\", # thin_diamond\n \"|\", # vline\n \"_\", # hline\n ]\n\n\nif __name__ == \"__main__\":\n plot_attention(\n \"1 2 3 4\".split(),\n \"a b c d\".split(),\n np.random.rand(4, 4),\n out_colors=\"r g b r\".split())\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_357","text":"# encoding: utf-8\n\n\n__author__ = ' <>'\n__date__ = '06\/2012'\n\n\n\"\"\"Module to convert frame representation as output by kinect recording\nto angle and\/or angle velocity histogram representations.\n\"\"\"\n\n\nimport numpy as np\nfrom scipy.cluster.vq import kmeans, whiten\n\nfrom ..lib.transformations import (quaternion_multiply, quaternion_inverse,\n euler_from_quaternion)\nfrom ..lib.utils import delayed_velocities, meta_map\nfrom ..lib.vector_quantization import get_histos\nfrom ..lib.kde2d import gaussian_kde_2d\n\n# Note: frame names from ros kinect seems to denote left \/ right from\n# the observer point of view.\n\n\nANGLES = [\n ('left_shoulder', 'left_elbow'),\n ('left_elbow', 'left_hand'),\n ('torso', 'left_hip'),\n ('left_hip', 'left_knee'),\n ('left_knee', 'left_foot'),\n ('right_shoulder', 'right_elbow'),\n ('right_elbow', 'right_hand'),\n ('torso', 'right_hip'),\n ('right_hip', 'right_knee'),\n ('right_knee', 'right_foot'),\n ]\n\n\ndef angles_indices(marker_names):\n return [(marker_names.index(source), marker_names.index(dest))\n for source, dest in ANGLES]\n\n\ndef get_angles(sample, source_frame, dest_frame):\n \"\"\"Compute rotation along three basis axis between two frames\n in the given sample.\n\n :param sample: array of translations and rotations (shape: (nb_frames, 7)\n :param source_frame, dest_frame: indices of source and dest frames\n \"\"\"\n # All transformations are from the base frame, to get transformation from\n # one frame to the other, the first one needs to be inversed.\n # q = q1^{-1} * q2\n q = quaternion_multiply(quaternion_inverse(sample[source_frame, 3:]),\n sample[dest_frame, 3:])\n return euler_from_quaternion(q)\n\n\ndef get_angle_array(sample, angles_idx):\n angles = [get_angles(sample, s, d) for s, d in angles_idx]\n return np.hstack(angles)\n\n\ndef record_to_angle_array(record, angles_idx):\n return np.vstack([get_angle_array(sample, angles_idx)\n for sample in record])\n\n\ndef db_to_list_of_angle_arrays(db):\n angle_idx = angles_indices(db.marker_names)\n return [record_to_angle_array(r[0], angle_idx) for r in db.records]\n\n\ndef db_to_angles_and_vels(db, vel_delay=1, vel_padding='zeros'):\n angles = db_to_list_of_angle_arrays(db)\n vels = [delayed_velocities(vel_delay, angle, padding=vel_padding)\n for angle in angles]\n return angles, vels\n\n\ndef get_bounds(vels):\n min_vel = np.min(np.vstack(vels))\n max_vel = np.max(np.vstack(vels))\n return min_vel, max_vel\n\n\ndef filter_values(data, bounds):\n \"\"\"Filter big values in data, according to given bounds.\n :param data: numpy array\n :param bounds: (min, max)\n \"\"\"\n cut = lambda x: np.maximum(np.minimum(x, bounds[1]), bounds[0])\n return map(cut, data)\n\n\ndef db_to_binned_hist_matrix(db, vel_delay=1, vel_padding='zeros',\n nb_bins=16, bounds=None, vel_bounds=None, rel_h=.3, fft=True):\n \"\"\"Compute the histogram matrix from the database, using binned histograms\n smoothed by a Gaussian kernel.\n\n :param db:\n the Database\n\n :param vel_delay, vel_padding:\n delayed velocity parameters\n\n :param nb_bins: int,\n number of bins (output dimension of histograms for a joint)\n\n :param bounds, vel_bounds: (min, max), couples of floats\n bounds on angle and velocities, if given, data\n is cut to fit in bounds, else they are computed from data.\n\n :param rel_h: float,\n relative width of the Gaussian smoother\n\n :param fft: bool,\n whether to use fft convolution (default)\n\n :return: (nb ex, nb features) matrix\n \"\"\"\n angles, vels = db_to_angles_and_vels(db, vel_delay=1, vel_padding='zeros')\n # Angle bounds\n if bounds is None:\n bounds = get_bounds(angles)\n else:\n angles = filter_values(angles, bounds)\n # Velocity bounds\n if vel_bounds is None:\n vel_bounds = get_bounds(vels)\n else:\n vels = filter_values(vels, vel_bounds)\n # Histogram are specific to each angle and corresponding velocity\n # Compute Gaussian width from relative width for angles\n h = rel_h * (bounds[1] - bounds[0])\n # Compute gaussian width for velocities\n h_vel = rel_h * (vel_bounds[1] - vel_bounds[0])\n # For fair comparison with 1D hist and VQ\n nb_bins_sqrt = int(np.sqrt(nb_bins))\n to_gaussKDEs2 = lambda x: [ # x = (angles, vels)\n gaussian_kde_2d(\n np.hstack([x[0][:, dim][:, np.newaxis],\n x[1][:, dim][:, np.newaxis]]),\n h, h_vel, nb_bins=nb_bins_sqrt,\n bounds=(np.array([bounds[0], vel_bounds[0]]),\n np.array([bounds[1], vel_bounds[1]])),\n fft=fft)\n for dim in range(x[0].shape[1])]\n kdes = map(to_gaussKDEs2, zip(angles, vels))\n # Each kde is a triplet (x_grid, y_grid, bins)\n # Get and flatten histograms (second element of the couple)\n hist = meta_map(2, lambda x: x[2].flatten())(kdes)\n data_matrix = np.vstack(map(np.hstack, hist))\n return data_matrix\n\n\ndef compact_examples(x):\n \"\"\"Vertically stack list of array and returns stacked\n array and indices to un_compact it.\n \"\"\"\n idx = [y.shape[0] for y in x]\n return np.vstack(x), list(np.cumsum(idx))\n\n\ndef un_compact_examples(v, idx):\n return [v[i:j, :]\n for i, j in zip([0] + idx[:-1], idx)]\n\n\ndef db_to_VQ_hist_matrix(db, vel_delay=1, vel_padding='zeros',\n nb_bins=16, bounds=None, vel_bounds=None, soft_vq=None):\n \"\"\"Compute the histogram matrix from the database, using binned histograms\n smoothed by a Gaussian kernel.\n\n :param db:\n the Database\n\n :param vel_delay, vel_padding:\n delayed velocity parameters\n\n :param nb_bins: int,\n number of bins (output dimension of histograms for a joint)\n\n :param bounds, vel_bounds: (min, max), couples of floats\n bounds on angle and velocities, if given, data\n is cut to fit in bounds\n\n :param soft_vq:\n if not None (default) soft vector quantization parameter.\n \"\"\"\n angles, vels = db_to_angles_and_vels(db, vel_delay=1, vel_padding='zeros')\n # Angle bounds\n if bounds is not None:\n angles = filter_values(angles, bounds)\n # Velocity bounds\n if vel_bounds is not None:\n vels = filter_values(vels, vel_bounds)\n # For each DOF and each example compute 2D angle-vel vects\n # angles \/ vels => [(time, dof) for each example]\n nb_dofs = angles[0].shape[1]\n nb_ex = len(angles)\n data = [[np.hstack([a[:, dof][:, np.newaxis],\n v[:, dof][:, np.newaxis]])\n for a, v in zip(angles, vels)]\n for dof in range(nb_dofs)]\n compacted = map(compact_examples, data)\n # Whiten data for each dof\n all_data = [whiten(d) for d, _ in compacted]\n\n # Compute centroids for each DOF\n centro = [kmeans(d, nb_bins, iter=20)[0] for d in all_data]\n # Compute hitograms for each sample\n histos = [get_histos(d, c, soft=soft_vq)\n for d, c in zip(all_data, centro)]\n # Group and sum by example\n histos_by_ex = [un_compact_examples(h, c[1])\n for h, c in zip(histos, compacted)]\n ex_histos = np.array([[h.sum(axis=0) for h in hs] for hs in histos_by_ex])\n # ex_histo is now (nb_dofs, nb_ex, nb_bins)\n Xdata = np.swapaxes(ex_histos, 0, 1).reshape((nb_ex, nb_bins * nb_dofs))\n Xdata \/= Xdata.sum(axis=1)[:, np.newaxis]\n return Xdata\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_358","text":"import os\nimport argparse\nimport numpy as np\nimport paddle.fluid as fluid\nfrom scipy import sparse\nimport pdb\n\nfrom utils import *\nfrom layer import vmat, qe\n\n\nparser = argparse.ArgumentParser(description='GNN_Reranking')\nparser.add_argument('--data_path', \n type=str, \n default='..\/features\/market_88_test.pkl',\n help='path to dataset')\nparser.add_argument('--k1', \n type=int, \n default=26, # Market-1501\n # default=60, # Veri-776\n help='parameter k1')\nparser.add_argument('--k2', \n type=int, \n default=7, # Market-1501\n # default=10, # Veri-776\n help='parameter k2')\n\nargs = parser.parse_args()\n\ndef main(): \n data = load_pickle(args.data_path)\n k1 = args.k1\n k2 = args.k2\n \n query_cam = data['query_cam']\n query_label = data['query_label']\n gallery_cam = data['gallery_cam']\n gallery_label = data['gallery_label']\n \n gallery_feature = data['gallery_f']\n query_feature = data['query_f']\n total_features = np.concatenate((query_feature,gallery_feature),axis=0)\n query_num = query_feature.shape[0]\n\n X_u = fluid.layers.data(name=\"all_fea\",shape=[total_features.shape[0], total_features.shape[1]],dtype='float32')\n original_score = fluid.layers.matmul(X_u, X_u, transpose_x=False, transpose_y=True)\n\n _, initial_rank_k1 = fluid.layers.topk(original_score, k=k1)\n S, initial_rank_k2 = fluid.layers.topk(original_score, k=k2)\n\n initial_rank_k1_fp32 = fluid.layers.cast(initial_rank_k1, dtype='float32')\n initial_rank_k2_fp32 = fluid.layers.cast(initial_rank_k2, dtype='float32')\n\n # stage 1\n A = vmat(initial_rank_k1_fp32)\n S = S * S\n\n # stage 2\n if k2 != 1: \n for i in range(2):\n AT = fluid.layers.transpose(A, perm=[1,0])\n A = A + AT\n A = qe(A, initial_rank_k2_fp32, S)\n A_norm = fluid.layers.sqrt(fluid.layers.reduce_sum(fluid.layers.square(A), dim=1))\n A = fluid.layers.elementwise_div(A, A_norm, axis=0)\n\n score = fluid.layers.matmul(A, A, transpose_x=False, transpose_y=True)\n\n use_cuda = True\n place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() \n exe = fluid.Executor(place) \n\n exe.run(fluid.default_startup_program()) \n\n outs = exe.run(\n feed={'all_fea':total_features},\n fetch_list=[score])\n\n cosine_similarity = np.array(outs[0])\n indices = np.argsort(-cosine_similarity[:query_num, query_num:], axis=1)\n indices = indices.reshape(query_feature.shape[0], gallery_feature.shape[0])\n evaluate_ranking_list(indices, query_label, query_cam, gallery_label, gallery_cam)\n\nif __name__ == '__main__':\n main()\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_359","text":"lanl\/pyDRESCALk\n#@author: ,\nfrom scipy.stats import wilcoxon\nfrom . import config\nfrom .dist_clustering import *\nfrom .pyDRESCAL import *\nfrom .plot_results import *\n\nclass sample():\n \"\"\"\n Generates perturbed version of data based on sampling distribution.\n\n Args:\n data (ndarray, sparse matrix): Array of which to find a perturbation.\n noise_var (float): The perturbation amount.\n method (str) : Method for sampling (uniform\/poisson)\n seed (float),optional : Set seed for random data generation\n \"\"\"\n\n\n @comm_timing()\n def __init__(self, data, noise_var, method, params,seed=None):\n self.np = params.np\n self.X = data\n self.noise_var = noise_var\n self.seed = seed\n if self.seed != None:\n self.np.random.seed(self.seed)\n self.method = method\n self.X_per = 0\n\n @comm_timing()\n def randM(self):\n \"\"\"\n Multiplies each element of X by a uniform random number in (1-epsilon, 1+epsilon).\n \"\"\"\n\n M = 2 * self.noise_var * self.np.random.random_sample(self.X.shape).astype(self.X.dtype) + self.noise_var\n M = M + 1\n self.X_per = self.np.multiply(self.X, M)\n\n @comm_timing()\n def poisson(self):\n \"\"\"Resamples each element of a matrix from a Poisson distribution with the mean set by that element. Y_{i,j} = Poisson(X_{i,j}\"\"\"\n\n self.X_per = self.np.random.poisson(self.X).astype(self.X.dtype)\n\n @comm_timing()\n def fit(self):\n r\"\"\"\n Calls the sub routines to perform resampling on data\n\n Returns\n -------\n X_per : ndarry\n Perturbed version of data\n \"\"\"\n\n if self.method == 'uniform':\n self.randM()\n elif self.method == 'poisson':\n self.poisson()\n return self.X_per\n\n\nclass pyDRESCALk():\n r\"\"\"\n Performs the distributed RESCAL decomposition with custom clustering for estimating hidden factors k\n\n Parameters:\n A_ij (ndarray) : Distributed Data\n factors (tuple), optional : Distributed factors W and H\n params (class): Class which comprises following attributes\n params.init (str) : RESCAL initialization(rand\/nnsvd)\n params.comm1 (object): Global Communicator\n params.comm (object): Modified communicator object\n params.k (int) : Rank for decomposition\n params.m (int) : Global dimensions m\n params.n (int) : Global dimensions n\n params.p_r (int): Cartesian grid row count\n params.p_c (int): Cartesian grid column count\n params.row_comm (object) : Sub communicator along row\n params.col_comm (object) : Sub communicator along columns\n params.A_update (bool) : flag to set W update True\/False\n params.norm (str): RESCAL norm to be minimized\n params.method(str): RESCAL optimization method\n params.eps (float) : Epsilon value\n params.verbose (bool) : Flag to enable\/disable display results\n params.save_factors (bool) : Flag to enable\/disable saving computed factors\n params.perturbations (int) : Number of Perturbations for clustering\n params.noise_var (float) : Set noise variance for perturbing the data\n params.sill_thr (float) : Set the sillhouette threshold for estimating K with p-test\n params.start_k (int) : Starting range for Feature search K\n params.end_k (int) : Ending range for Feature search K\"\"\"\n\n @comm_timing()\n def __init__(self, X_ijk, factors=None, params=None):\n self.X_ijk = X_ijk\n self.local_m, self.local_n, self.local_n = len(self.X_ijk),self.X_ijk[0].shape[0],self.X_ijk[0].shape[1]\n self.params = params\n self.np = self.params.np\n self.comm1 = self.params.comm1\n self.rank = self.comm1.rank\n self.p_r, self.p_c = self.params.p_r, self.params.p_c\n self.fpath = self.params.fpath\n self.fname = self.params.fname\n #self.fname = \"Testrescalk\"\n self.p = self.p_r * self.p_c\n if self.p_r != 1 and self.p_c != 1:\n self.topo = '2d'\n else:\n self.topo = '1d'\n self.sampling = var_init(self.params,'sampling',default='uniform')\n self.perturbations = var_init(self.params,'perturbations',default=10)\n self.noise_var = var_init(self.params,'noise_var',default=.03)\n self.Rall = 0\n self.Aall = 0\n self.recon_err = 0\n self.AvgR = 0\n self.AvgG = 0\n self.col_err = 0\n self.clusterSilhouetteCoefficients, self.avgSilhouetteCoefficients = 0, 0\n self.L_errDist = 0\n self.avgErr = 0\n self.start_k = self.params.start_k # ['start_k']\n self.end_k = self.params.end_k # ['end_k']\n self.step_k = var_init(self.params,'step_k',default=1)\n self.verbose = var_init(params,'verbose',default=True)\n\n\n @comm_timing()\n def fit(self):\n r\"\"\"\n Calls the sub routines to perform distributed RESCAL decomposition and then custom clustering to estimate k\n\n Returns\n -------\n nopt : int\n Estimated value of latent features\n \"\"\"\n SILL_MIN = []\n SILL_AVG = []\n errRegres = []\n errRegresTol = []\n RECON = []\n RECON1 = []\n self.params.results_paths = self.params.results_path +self.params.fname + '\/'\n if self.rank == 0:\n try: os.makedirs(self.params.results_paths)\n except: pass\n for self.k in range(self.start_k, self.end_k + 1,self.step_k):\n self.params.k = self.k\n self.pyrescalk_per_k()\n SILL_MIN.append(self.np.around(self.np.min(self.clusterSilhouetteCoefficients), 2))\n SILL_AVG.append(self.np.around(self.np.mean(self.clusterSilhouetteCoefficients), 2))\n errRegres.append([self.col_err])\n errRegresTol.append([self.recon_err])\n RECON.append(self.L_errDist)\n RECON1.append(self.avgErr)\n if self.rank==0:\n plot_results_paper(self.start_k, self.end_k,self.step_k, RECON, SILL_AVG, SILL_MIN, self.params.results_path, self.fname)\n\n\n @comm_timing()\n def pyrescalk_per_k(self):\n \"\"\"Performs RESCAL decomposition and clustering for each k to estimate silhouette statistics\"\"\"\n self.params.results_paths = self.params.results_path+ str(self.k) + '\/'\n if self.rank == 0:\n try: os.makedirs(self.params.results_paths)\n except: pass\n results = []\n if self.rank == 0: print('*************Computing for k=', self.k, '************')\n for i in range(self.perturbations):\n if self.rank == 0: print('Current perturbation =', i)\n self.params.perturbation = i\n data = sample(data=self.X_ijk, noise_var=self.noise_var, method=self.sampling,params=self.params, seed=self.rank*1000+i*100).fit()\n self.params.A_update = True\n results.append(pyDRESCAL(data, factors=None, params=self.params).fit())\n self.Aall = self.np.stack([results[i][0] for i in range(self.perturbations)],axis=-1)\n #self.Aall = self.Aall.reshape(self.Aall.shape[0], self.k, self.perturbations, order='F') #n x k x perturbations\n self.Rall = self.np.stack([results[i][2] for i in range(self.perturbations)],axis=-1)\n #self.Rall = self.Rall.reshape(results[0][2].shape[0], self.k, self.Rall.shape[1], self.perturbations) #m x k x k x perturbations\n self.recon_err = [results[i][3] for i in range(self.perturbations)]\n [processAvg, processSTD, self.Rall, self.clusterSilhouetteCoefficients, self.avgSilhouetteCoefficients,\n idx] = custom_clustering(self.Aall, self.Rall, self.params).fit()\n self.AvgR = self.np.median(self.Rall, axis=-1)\n self.AvgA = processAvg\n self.params.A_update = False\n regressH = pyDRESCAL(self.X_ijk, factors=[self.AvgA, self.AvgR], params=self.params)\n self.AvgA, self.AvgA_j, self.AvgR, self.L_errDist = regressH.fit()\n self.avgErr = np.mean(self.recon_err)\n cluster_stats = {'clusterSilhouetteCoefficients': self.clusterSilhouetteCoefficients,\n 'avgSilhouetteCoefficients': self.avgSilhouetteCoefficients, \\\n 'avgErr': self.avgErr, 'recon_err': self.recon_err,'L_errDist':self.L_errDist}\n data_writer = data_write(self.params)\n data_writer.save_factors([self.AvgA, self.AvgR], reg=True)\n data_writer.save_cluster_results(cluster_stats)\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_360","text":"bsuite\/bsuite\/models\/agent_bootdqn.py1-10\nimport dm_env\nimport numpy as np\nimport torch\nimport torch.nn.functional as functional\nimport torch.optim as optim\nimport typing\nimport wandb\n\nfrom utils.memory import Experience, ReplayMemory, PrioritizedReplayMemory\nfrom models.qnet_MCdrop import Dqn, DuelDQN, TwoHeadDqn\n\nfrom scipy.optimize import minimize\nfrom collections import namedtuple, deque, Counter\n\n# from qnet import Dqn, DuelDQN\n\ndef get_iv_weights(variances):\n '''\n Returns Inverse Variance weights\n Params\n ======\n variances (numpy array): variance of the targets\n '''\n weights = 1\/variances\n (weights)\n weights = weights\/np.sum(weights)\n (weights)\n return weights\n\ndef compute_eff_bs(weights):\n # Compute original effective mini-batch size\n eff_bs = 1\/np.sum(np.square(weights))\n #print(eff_bs)\n return eff_bs\n\ndef get_optimal_xi(variances, minimal_size, epsilon_start):\n minimal_size = min(variances.shape[0] - 1, minimal_size)\n if compute_eff_bs(get_iv_weights(variances)) >= minimal_size:\n return 0 \n fn = lambda x: np.abs(compute_eff_bs(get_iv_weights(variances+np.abs(x))) - minimal_size)\n epsilon = minimize(fn, 0, method='Nelder-Mead', options={'fatol': 1.0, 'maxiter':100})\n xi = np.abs(epsilon.x[0])\n xi = 0 if xi is None else xi\n return xi\n\n\nclass BootstrapDQN:\n def __init__(self,\n opt,\n action_spec: dm_env.specs.DiscreteArray,\n observation_spec: dm_env.specs.Array,\n num_ensemble: int,\n net_seed: int,\n device: torch.device,\n settings: dict) -> None:\n \"\"\"\n Initializes the agent, constructs the qnet and the q_target, initializes the optimizer and ReplayMemory.\n Args:\n action_spec(dm_env.specs.DiscreteArray): description of the action space of the environment\n observation_spec(dm_env.specs.Array): description of observations form the environment\n device(str): \"gpu\" or \"cpu\"\n settings(dict): dictionary with settings\n \"\"\"\n self.device = device\n self.opt = opt\n self.num_ensemble = num_ensemble\n action_size = action_spec.num_values\n state_size = np.prod(observation_spec.shape)\n self.action_size = action_size\n self.state_size = state_size\n self.batch_size = settings['batch_size']\n self.noisy_nets = settings['qnet_settings']['noisy_nets']\n\n self.qnets, self.tnets, self.optims = [], [], []\n for i in range(num_ensemble):\n if settings[\"duelling_dqn\"]:\n qnet = DuelDQN(state_size, action_size, settings['qnet_settings']).to(device)\n q_target = DuelDQN(state_size, action_size, settings['qnet_settings']).to(device)\n else:\n qnet = Dqn(state_size, action_size, settings['qnet_settings'], seed=opt.net_seed+i).to(device)\n q_target = Dqn(state_size, action_size, settings['qnet_settings'], seed=opt.net_seed+i).to(device)\n self.drop_porb = 0\n\n self.qnets.append(qnet)\n q_target.load_state_dict(qnet.state_dict())\n self.tnets.append(q_target)\n self.optims.append(optim.Adam(qnet.parameters(), lr=settings['lr']))\n\n self.epsilon = settings[\"epsilon_start\"]\n self.decay = settings[\"epsilon_decay\"]\n self.epsilon_min = settings[\"epsilon_min\"]\n self.gamma = settings['gamma']\n\n self.start_optimization = settings[\"start_optimization\"]\n self.update_qnet_every = settings[\"update_qnet_every\"]\n self.update_target_every = settings[\"update_target_every\"]\n self.number_steps = 0\n self.ddqn = settings[\"ddqn\"]\n\n self.xi = settings[\"xi\"]\n self.dynamic_xi = settings[\"dynamic_xi\"]\n self.minimal_eff_bs_ratio = settings[\"minimal_eff_bs_ratio\"]\n self.minimal_eff_bs = int(self.batch_size * self.minimal_eff_bs_ratio)\n self.mask_prob = settings[\"mask_prob\"]\n\n self._rng = np.random.RandomState(net_seed)\n self._active_head = self._rng.randint(self.num_ensemble)\n # Initialize replay memory\n self.prioritized_replay = settings[\"prioritized_buffer\"]\n if self.prioritized_replay:\n self.memory = PrioritizedReplayMemory(device, settings[\"buffer_size\"], self.gamma, settings[\"n_steps\"],\n settings[\"alpha\"], settings[\"beta0\"], settings[\"beta_increment\"])\n else:\n self.memory = ReplayMemory(device, settings[\"buffer_size\"], self.gamma, settings[\"n_steps\"])\n return\n\n def select_action(self, timestep: dm_env.TimeStep) -> int:\n \"\"\"\n Returns an action following an epsilon-greedy policy.\n Args:\n timestep(dm_env.TimeStep): An observation from the environment\n\n Returns:\n int: The chosen action.\n \"\"\"\n observation = np.array(timestep.observation).flatten()\n observation = torch.from_numpy(observation).float().to(self.device)\n self.number_steps += 1\n\n if not self.noisy_nets:\n self.update_epsilon()\n\n if np.random.rand() < self.epsilon:\n return np.random.choice(self.action_size)\n else:\n return int(self.qnets[self._active_head].get_max_action(observation))\n\n\n def greedy(self, Q_ensemble):\n mean_Q = np.mean(Q_ensemble, 0)\n # ------------------- action selection ------------------- #\n # if self.opt.select_action == \"vote\":\n actions = [np.argmax(Q) for Q in Q_ensemble]\n data = Counter(actions)\n action = data.most_common(1)[0][0]\n # elif self.opt.select_action == \"mean\":\n # action = np.argmax(mean_Q)\n\n return action\n\n def select_action_test(self, timestep: dm_env.TimeStep) -> int:\n \"\"\"\n Returns an action following an epsilon-greedy policy.\n Args:\n timestep(dm_env.TimeStep): An observation from the environment\n\n Returns:\n int: The chosen action.\n \"\"\"\n observation = np.array(timestep.observation).flatten()\n observation = torch.from_numpy(observation).float().to(self.device)\n # self.number_steps += 1\n\n with torch.no_grad():\n Q_ensemble = np.array([qnet(observation).cpu().data.numpy()\n for qnet in self.qnets])\n\n return int(self.greedy(Q_ensemble))\n\n def update_epsilon(self) -> None:\n \"\"\"\n Decays epsilon until self.epsilon_min\n Returns:\n None\n \"\"\"\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.decay\n\n @staticmethod\n def calc_loss(q_observed: torch.Tensor,\n q_target: torch.Tensor,\n weights: torch.Tensor) -> typing.Tuple[torch.Tensor, np.float64]:\n \"\"\"\n Returns the mean weighted MSE loss and the loss for each sample\n Args:\n q_observed(torch.Tensor): calculated q_value\n q_target(torch.Tensor): target q-value\n weights: weights of the batch samples\n\n Returns:\n tuple(torch.Tensor, np.float64): mean squared error loss, loss for each indivdual sample\n \"\"\"\n losses = functional.mse_loss(q_observed, q_target, reduction='none')\n loss = (weights * losses).sum()\n return loss, losses.cpu().detach().numpy() + 1e-8\n\n def update(self,\n step: dm_env.TimeStep,\n action: int,\n next_step: dm_env.TimeStep) -> None:\n \"\"\"\n Adds experience to the replay memory, performs an optimization_step and updates the q_target neural network.\n Args:\n step(dm_env.TimeStep): Current observation from the environment\n action(int): The action that was performed by the agent.\n next_step(dm_env.TimeStep): Next observation from the environment\n Returns:\n None\n \"\"\"\n\n logs = []\n observation = np.array(step.observation).flatten()\n next_observation = np.array(next_step.observation).flatten()\n done = next_step.last()\n\n if next_step.last():\n self._active_head = self._rng.randint(self.num_ensemble)\n\n exp = Experience(observation,\n action,\n next_step.reward,\n next_step.discount,\n next_observation,\n 0,\n done,\n self._rng.binomial(1, self.mask_prob, self.num_ensemble).astype(np.float32)\n )\n self.memory.add(exp)\n\n if self.memory.number_samples() < self.start_optimization:\n return logs\n\n if self.number_steps % self.update_qnet_every == 0:\n s0, a0, n_step_reward, discount, s1, _, dones, indices, weights, masks = self.memory.sample_batch(self.batch_size)\n logs = self.optimization_step(s0, a0, n_step_reward, discount, s1, indices, weights, masks)\n\n if self.number_steps % self.update_target_every == 0:\n for i in range(self.num_ensemble):\n self.tnets[i].load_state_dict(self.qnets[i].state_dict())\n return logs\n\n def optimization_step(self,\n s0: torch.Tensor,\n a0: torch.Tensor,\n n_step_reward: torch.Tensor,\n discount: torch.Tensor,\n s1: torch.Tensor,\n indices: typing.Optional[torch.Tensor],\n weights: typing.Optional[torch.Tensor],\n masks: torch.Tensor) -> None:\n \"\"\"\n Calculates the Bellmann update and updates the qnet.\n Args:\n s0(torch.Tensor): current state\n a0(torch.Tensor): current action\n n_step_reward(torch.Tensor): n-step reward\n discount(torch.Tensor): discount factor\n s1(torch.Tensor): next state\n indices(torch.Tensor): batch indices, needed for prioritized replay. Not used yet.\n weights(torch.Tensor): weights needed for prioritized replay\n\n Returns:\n None\n \"\"\"\n\n with torch.no_grad():\n if self.noisy_nets:\n self.q_target.reset_noise()\n self.qnet.reset_noise()\n\n # Calculating the target values\n next_q_vals = torch.stack([self.tnets[i](s1) for i in range(self.num_ensemble)])\n next_actions = torch.stack([next_q_vals[i].max(1)[1] for i in range(self.num_ensemble)])\n # if self.ddqn:\n # a1 = torch.argmax(self.qnet(s1), dim=1).unsqueeze(-1)\n # next_q_val = next_q_vals.gather(1, a1).squeeze()\n # else:\n # next_q_val = torch.max(next_q_vals, dim=2).values\n q_targets = torch.stack([n_step_reward.squeeze() + self.gamma * discount.squeeze() * torch.max(next_q_vals[i], dim=1).values\\\n for i in range(self.num_ensemble)])\n # print(discount.size(), next_q_vals.size(), next_actions.size())\n q_target_var_all = (self.gamma**2) * (discount.repeat(1, self.action_size)**2) * next_q_vals.var(0)\n\n eff_batch_size_list, xi_list, loss_list = [], [], [] \n for i in range(self.num_ensemble):\n # print(next_actions[i].size(), masks.size(), q_targets.size())\n q_target_var = q_target_var_all.gather(1, next_actions[i].unsqueeze(-1).long())[masks[:, i, 0]]\n # print(q_target_var.size())\n self.xi = get_optimal_xi(q_target_var.detach().cpu().numpy(\n ), self.minimal_eff_bs, self.xi) if self.dynamic_xi else self.xi\n weights = self.get_mse_weights(q_target_var)\n q_observed = self.qnets[i](s0).gather(1, a0.long()).squeeze()[masks[:, i, 0]]\n critic_loss, batch_loss = self.calc_loss(q_observed, q_targets[i][masks[:, i, 0]], weights.to(self.device))\n\n # Backpropagation of the gradients\n self.optims[i].zero_grad()\n critic_loss.backward()\n torch.nn.utils.clip_grad_norm_(self.qnets[i].parameters(), 5)\n self.optims[i].step()\n\n eff_batch_size_list.append(\n compute_eff_bs(weights.detach().cpu().numpy()))\n xi_list.append(self.xi)\n # loss_list.append(loss.item())\n\n # Update replay memory\n self.memory.update_priorities(indices, batch_loss)\n return q_target_var.detach().cpu().numpy(), weights.squeeze().detach().cpu().numpy(), np.mean(eff_batch_size_list), np.mean(xi_list)\n\n def train_log(self, var, weights, eff_batch_size, eps_list):\n wandb.log({\"IV Weights(VAR)\": np.var(weights), \"IV Weights(Mean)\": np.mean(weights),\n \"IV Weights(Min)\": np.min(weights), \"IV Weights(Max)\": np.max(weights), \"IV Weights(Median)\": np.median(weights)}, commit=False)\n wandb.log({\"Variance(Q) (VAR)\": np.var(var), \"Variance(Q) (Mean)\": np.mean(var),\n \"Variance(Q) (Min)\": np.min(var), \"Variance(Q) (Max)\": np.max(var), \"Variance(Q) (Median)\": np.median(var)}, commit=False)\n wandb.log(\n {\"Avg Effective Batch Size \/ Episode\": np.mean(eff_batch_size), \"Avg Epsilon \/ Episode\": np.mean(eps_list),\n \"Max Epsilon \/ Episode\": np.max(eps_list), \"Median Epsilon \/ Episode\": np.median(eps_list), \n \"Min Epsilon \/ Episode\": np.min(eps_list)}, commit=False)\n\n def get_mse_weights(self, variance):\n return torch.ones(variance.size()) \/ variance.size()[0]\n\n\nclass EnsembleDQN(BootstrapDQN):\n def __init__(self,\n opt,\n action_spec: dm_env.specs.DiscreteArray,\n observation_spec: dm_env.specs.Array,\n num_ensemble: int,\n net_seed: int,\n device: torch.device,\n settings: dict) -> None:\n\n super().__init__(opt, action_spec, observation_spec, num_ensemble, net_seed, device, settings)\n\n def greedy(self, Q_ensemble):\n mean_Q = np.mean(Q_ensemble, 0)\n # ------------------- action selection ------------------- #\n # if self.opt.select_action == \"vote\":\n # actions = [np.argmax(Q) for Q in Q_ensemble]\n # data = Counter(actions)\n # action = data.most_common(1)[0][0]\n # elif self.opt.select_action == \"mean\":\n action = np.argmax(mean_Q)\n\n return action\n\n def select_action(self, timestep: dm_env.TimeStep) -> int:\n \"\"\"\n Returns an action following an epsilon-greedy policy.\n Args:\n timestep(dm_env.TimeStep): An observation from the environment\n\n Returns:\n int: The chosen action.\n \"\"\"\n observation = np.array(timestep.observation).flatten()\n observation = torch.from_numpy(observation).float().to(self.device)\n self.number_steps += 1\n\n for qnet in self.qnets:\n qnet.eval()\n\n with torch.no_grad():\n Q_ensemble = np.array([qnet(observation).cpu().data.numpy()\n for qnet in self.qnets])\n\n if not self.noisy_nets:\n self.update_epsilon()\n\n if np.random.rand() < self.epsilon:\n return np.random.choice(self.action_size)\n else:\n return int(self.greedy(Q_ensemble))\n\n def select_action_test(self, timestep: dm_env.TimeStep) -> int:\n \"\"\"\n Returns an action following an epsilon-greedy policy.\n Args:\n timestep(dm_env.TimeStep): An observation from the environment\n\n Returns:\n int: The chosen action.\n \"\"\"\n observation = np.array(timestep.observation).flatten()\n observation = torch.from_numpy(observation).float().to(self.device)\n self.number_steps += 1\n\n for qnet in self.qnets:\n qnet.eval()\n\n with torch.no_grad():\n Q_ensemble = np.array([qnet(observation).cpu().data.numpy()\n for qnet in self.qnets])\n\n return int(self.greedy(Q_ensemble))\n\n\n\nclass LakshmiBootDQN(BootstrapDQN):\n def __init__(self,\n opt,\n action_spec: dm_env.specs.DiscreteArray,\n observation_spec: dm_env.specs.Array,\n num_ensemble: int,\n net_seed: int,\n device: torch.device,\n settings: dict) -> None:\n\n super().__init__(opt, action_spec, observation_spec, num_ensemble, net_seed, device, settings)\n\n\n self.qnets, self.tnets, self.optims = [], [], []\n for i in range(num_ensemble):\n if settings[\"duelling_dqn\"]:\n qnet = DuelDQN(self.state_size, self.action_size, settings['qnet_settings']).to(device)\n q_target = DuelDQN(self.state_size, self.action_size, settings['qnet_settings']).to(device)\n else:\n qnet = TwoHeadDqn(self.state_size, self.action_size, settings['qnet_settings'], seed=opt.net_seed+i).to(device)\n q_target = TwoHeadDqn(self.state_size, self.action_size, settings['qnet_settings'], seed=opt.net_seed+i).to(device)\n self.drop_porb = 0\n\n self.qnets.append(qnet)\n q_target.load_state_dict(qnet.state_dict())\n self.tnets.append(q_target)\n self.optims.append(optim.Adam(qnet.parameters(), lr=settings['lr']))\n\n def optimization_step(self,\n s0: torch.Tensor,\n a0: torch.Tensor,\n n_step_reward: torch.Tensor,\n discount: torch.Tensor,\n s1: torch.Tensor,\n indices: typing.Optional[torch.Tensor],\n weights: typing.Optional[torch.Tensor],\n masks: torch.Tensor) -> None:\n \"\"\"\n Calculates the Bellmann update and updates the qnet.\n Args:\n s0(torch.Tensor): current state\n a0(torch.Tensor): current action\n n_step_reward(torch.Tensor): n-step reward\n discount(torch.Tensor): discount factor\n s1(torch.Tensor): next state\n indices(torch.Tensor): batch indices, needed for prioritized replay. Not used yet.\n weights(torch.Tensor): weights needed for prioritized replay\n\n Returns:\n None\n \"\"\"\n\n with torch.no_grad():\n if self.noisy_nets:\n self.q_target.reset_noise()\n self.qnet.reset_noise()\n\n # Calculating the target values\n next_q_vals_all = torch.stack([torch.stack(self.tnets[i](s1, is_training=True))\n for i in range(self.num_ensemble)])\n next_q_vals, next_q_vals_std = next_q_vals_all[:,0], next_q_vals_all[:,1]\n next_actions = torch.stack([next_q_vals[i].max(1)[1] for i in range(self.num_ensemble)])\n # q_targets_all = torch.stack([n_step_reward.squeeze() + self.gamma * discount.squeeze() * next_q_vals[i]\\\n # for i in range(self.num_ensemble)])\n # print(discount.size(), n_step_reward.size(), next_q_vals.size())\n q_targets_all = torch.stack([n_step_reward.repeat(1, self.action_size) + self.gamma * discount.repeat(1, self.action_size) * next_q_vals[i]\\\n for i in range(self.num_ensemble)])\n q_targets = torch.stack([n_step_reward.squeeze() + self.gamma * discount.squeeze() * torch.max(next_q_vals[i], dim=1).values\\\n for i in range(self.num_ensemble)])\n # print(discount.size(), next_q_vals.size(), next_actions.size())\n # q_target_var_all = (self.gamma**2) * (discount.repeat(1, self.action_size)**2) * next_q_vals.var(0)\n next_q_vals_std = (self.gamma**2) * torch.stack([next_q_vals_std[i].gather(1, next_actions[i].unsqueeze(-1).long()) for i in range(self.num_ensemble)])\n # print(next_q_vals.size(), next_q_vals_std.size(), q_targets.size())\n # print((next_q_vals_std**2 + q_targets**2 - q_targets.mean(0).unsqueeze(-1).repeat(self.num_ensemble,1,1)**2).mean(0).size())\n q_var_mixture = (discount.repeat(1, self.action_size)**2) * (next_q_vals_std**2 + q_targets_all**2 - q_targets_all.mean(0).unsqueeze(0).repeat(self.num_ensemble,1,1)**2).mean(0)\n\n eff_batch_size_list, xi_list, loss_list = [], [], [] \n for i in range(self.num_ensemble):\n # print(next_actions[i].size(), masks.size(), q_targets.size())\n q_target_var = q_var_mixture.gather(1, next_actions[i].unsqueeze(-1).long())[masks[:, i, 0]]\n # print(q_target_var.size())\n self.xi = get_optimal_xi(q_target_var.detach().cpu().numpy(\n ), self.minimal_eff_bs, self.xi) if self.dynamic_xi else self.xi\n weights = self.get_mse_weights(q_target_var)\n q_observed, q_observed_std = self.qnets[i](s0, is_training=True)\n q_observed = q_observed.gather(1, a0.long()).squeeze()#[masks[:, i, 0]]\n q_observed_std = q_observed_std.gather(1, a0.long()).squeeze()#[masks[:,i,0]]\n\n y, mu, std = q_targets, q_observed, q_observed_std\n lossatt = torch.mean((y - mu)**2 \/ (2 * (std**2)) + (1\/2) * torch.log((std**2)))\n\n critic_loss, batch_loss = self.calc_loss(q_observed[masks[:, i, 0]], q_targets[i][masks[:, i, 0]], weights.to(self.device))\n\n # Backpropagation of the gradients\n self.optims[i].zero_grad()\n critic_loss += self.opt.lossatt_weight * lossatt\n critic_loss.backward()\n torch.nn.utils.clip_grad_norm_(self.qnets[i].parameters(), 5)\n self.optims[i].step()\n\n eff_batch_size_list.append(\n compute_eff_bs(weights.detach().cpu().numpy()))\n xi_list.append(self.xi)\n # loss_list.append(loss.item())\n\n # Update replay memory\n self.memory.update_priorities(indices, batch_loss)\n return q_target_var.detach().cpu().numpy(), weights.squeeze().detach().cpu().numpy(), np.mean(eff_batch_size_list), np.mean(xi_list)\n\n\nclass IV_BootstrapDQN(BootstrapDQN):\n def __init__(self,\n opt,\n action_spec: dm_env.specs.DiscreteArray,\n observation_spec: dm_env.specs.Array,\n num_ensemble: int,\n net_seed: int,\n device: torch.device,\n settings: dict) -> None:\n\n super().__init__(opt, action_spec, observation_spec, num_ensemble, net_seed, device, settings)\n\n def iv_weights(self, variance):\n weights = (1. \/ (variance+self.xi))\n weights \/= weights.sum(0)\n return weights\n\n def get_mse_weights(self, variance):\n return self.iv_weights(variance)\n\n\nclass IV_DQN(EnsembleDQN):\n def __init__(self,\n opt,\n action_spec: dm_env.specs.DiscreteArray,\n observation_spec: dm_env.specs.Array,\n num_ensemble: int,\n net_seed: int,\n device: torch.device,\n settings: dict) -> None:\n\n super().__init__(opt, action_spec, observation_spec, num_ensemble, net_seed, device, settings)\n\n def iv_weights(self, variance):\n weights = (1. \/ (variance+self.xi))\n weights \/= weights.sum(0)\n return weights\n\n def get_mse_weights(self, variance):\n return self.iv_weights(variance)\n\nclass IV_LakshmiBootDQN(LakshmiBootDQN):\n def __init__(self,\n opt,\n action_spec: dm_env.specs.DiscreteArray,\n observation_spec: dm_env.specs.Array,\n num_ensemble: int,\n net_seed: int,\n device: torch.device,\n settings: dict) -> None:\n\n super().__init__(opt, action_spec, observation_spec, num_ensemble, net_seed, device, settings)\n\n def iv_weights(self, variance):\n weights = (1. \/ (variance+self.xi))\n weights \/= weights.sum(0)\n return weights\n\n def get_mse_weights(self, variance):\n return self.iv_weights(variance)\n\n\nclass SunriseDQN(BootstrapDQN):\n def __init__(self,\n opt,\n action_spec: dm_env.specs.DiscreteArray,\n observation_spec: dm_env.specs.Array,\n num_ensemble: int,\n net_seed: int,\n device: torch.device,\n settings: dict) -> None:\n\n super().__init__(opt, action_spec, observation_spec, num_ensemble, net_seed, device, settings)\n self.opt = opt\n\n def sunrise_weights(self, variance):\n temp = self.opt.sunrise_temp\n weights = torch.sigmoid(-torch.sqrt(variance)*temp) + 0.5\n return weights\n\n def get_mse_weights(self, variance):\n \treturn self.sunrise_weights(variance)\n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_361","text":"mengdong\/mapr-streams-mxnet-face\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom symbol.resnet import *\nfrom symbol.config import config\nfrom symbol.processing import bbox_pred, clip_boxes, nms\nimport face_embedding\nfrom mapr_streams_python import Consumer, KafkaError, Producer\nimport numpy as np\nimport cv2, os, json, time, sys, pickle\nimport mxnet as mx\nimport argparse, random, sklearn\nimport tensorflow as tf\nfrom scipy import misc\nfrom sklearn.decomposition import PCA\nfrom time import sleep\nfrom easydict import EasyDict as edict\nfrom mtcnn_detector import MtcnnDetector\nimport face_image, face_preprocess\nfrom flask import Flask, Response\n\napp = Flask(__name__)\n\n@app.route('\/')\ndef index():\n return Response(kafkastream(),\n mimetype='multipart\/x-mixed-replace; boundary=frame')\n\ndef ch_dev(arg_params, aux_params, ctx):\n new_args = dict()\n new_auxs = dict()\n for k, v in arg_params.items():\n new_args[k] = v.as_in_context(ctx)\n for k, v in aux_params.items():\n new_auxs[k] = v.as_in_context(ctx)\n return new_args, new_auxs\n\ndef resize(im, target_size, max_size):\n \"\"\"\n only resize input image to target size and return scale\n :param im: BGR image input by opencv\n :param target_size: one dimensional size (the short side)\n :param max_size: one dimensional max size (the long side)\n :return:\n \"\"\"\n im_shape = im.shape\n im_size_min = np.min(im_shape[0:2])\n im_size_max = np.max(im_shape[0:2])\n im_scale = float(target_size) \/ float(im_size_min)\n if np.round(im_scale * im_size_max) > max_size:\n im_scale = float(max_size) \/ float(im_size_max)\n im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)\n return im, im_scale\n\ndef get_face_embedding(filename, arg_params, aux_params, sym, model, ctx):\n img_orig = cv2.imread(filename)\n img_orig = cv2.cvtColor(img_orig, cv2.COLOR_BGR2RGB)\n img, scale = resize(img_orig.copy(), 600, 1000)\n im_info = np.array([[img.shape[0], img.shape[1], scale]], dtype=np.float32) # (h, w, scale)\n img = np.swapaxes(img, 0, 2)\n img = np.swapaxes(img, 1, 2) # change to (c, h, w) order\n img = img[np.newaxis, :] # extend to (n, c, h, w)\n arg_params[\"data\"] = mx.nd.array(img, ctx)\n arg_params[\"im_info\"] = mx.nd.array(im_info, ctx)\n exe = sym.bind(ctx, arg_params, args_grad=None, grad_req=\"null\", aux_states=aux_params)\n\n exe.forward(is_train=False)\n output_dict = {name: nd for name, nd in zip(sym.list_outputs(), exe.outputs)}\n rois = output_dict['rpn_rois_output'].asnumpy()[:, 1:] # first column is index\n scores = output_dict['cls_prob_reshape_output'].asnumpy()[0]\n bbox_deltas = output_dict['bbox_pred_reshape_output'].asnumpy()[0]\n pred_boxes = bbox_pred(rois, bbox_deltas)\n pred_boxes = clip_boxes(pred_boxes, (im_info[0][0], im_info[0][1]))\n cls_boxes = pred_boxes[:, 4:8]\n cls_scores = scores[:, 1]\n keep = np.where(cls_scores >0.6)[0]\n cls_boxes = cls_boxes[keep, :]\n cls_scores = cls_scores[keep]\n dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(dets.astype(np.float32), 0.3)\n dets = dets[keep, :]\n bbox = dets[0, :4]\n roundfunc = lambda t: int(round(t\/scale))\n vfunc = np.vectorize(roundfunc)\n bbox = vfunc(bbox)\n f_vector, jpeg = model.get_feature(img_orig, bbox, None)\n fT = f_vector.T\n return fT\n\ndef kafkastream():\n if args.gpuid >= 0:\n ctx = mx.gpu(args.gpuid)\n else:\n ctx = mx.cpu()\n _, arg_params, aux_params = mx.model.load_checkpoint('mxnet-face-fr50', 0)\n arg_params, aux_params = ch_dev(arg_params, aux_params, ctx)\n sym = resnet_50(num_class=2)\n model = face_embedding.FaceModel(args.gpuid)\n\n f1T = get_face_embedding(args.filename, arg_params, aux_params, sym, model, ctx)\n\n c = Consumer({'group.id': args.groupid,\n 'default.topic.config': {'auto.offset.reset': 'earliest', 'enable.auto.commit': 'false'}})\n c.subscribe([args.readstream+':'+args.readtopic])\n running = True\n p = Producer({'streams.producer.default.stream': args.writestream})\n\n while running:\n msg = c.poll(timeout=0)\n if msg is None: continue\n if not msg.error():\n pickle_vector = pickle.loads(msg.value())\n nparr = np.fromstring(pickle_vector[0], np.uint8)\n img_orig = cv2.imdecode(nparr, 1)\n\n bbox_vector = pickle_vector[1]\n print(len(bbox_vector))\n embedding_vector = pickle_vector[2]\n if len(embedding_vector) > 0:\n sim_vector = [np.dot(f, f1T) for f in embedding_vector]\n idx = sim_vector.index(max(sim_vector))\n bbox = bbox_vector[idx]\n sim = sim_vector[idx]\n if sim > args.threshold:\n img = cv2.cvtColor(img_orig, cv2.COLOR_RGB2BGR)\n cv2.rectangle(img, (int(round(bbox[0])), int(round(bbox[1]))),\n (int(round(bbox[2])), int(round(bbox[3]))), (0, 255, 0), 2)\n ret, jpeg = cv2.imencode('.png', img)\n bytecode = jpeg.tobytes()\n time.sleep(args.timeout)\n yield (b'--frame\\r\\n'\n b'Content-Type: image\/png\\r\\n\\r\\n' + bytecode + b'\\r\\n\\r\\n')\n if args.writetostream:\n p.produce(args.writetopic, jpeg.tostring())\n print(args.writetopic)\n elif msg.error().code() != KafkaError._PARTITION_EOF:\n print(msg.error())\n running = False\n\n c.close()\n p.flush()\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='mapr consumer settings')\n parser.add_argument('--groupid', default='dong001', help='mapr consumer to read from')\n parser.add_argument('--gpuid', default='-1', type=int, help='')\n parser.add_argument('--port', default='5013', type=int, help='')\n parser.add_argument('--threshold', default='0.3', type=float, help='')\n parser.add_argument('--readstream', default='\/tmp\/processedvideostream', help='')\n parser.add_argument('--writestream', default='\/tmp\/identifiedstream', help='')\n parser.add_argument('--timeout', default='0.3', type=float, help='')\n parser.add_argument('--writetostream', default='0', type=int, help='')\n parser.add_argument('--writetopic', default='sam', help='topic to write to')\n parser.add_argument('--readtopic', default='topic1', help='topic to write to')\n parser.add_argument('--filename', default='sam_.jpg', help='')\n args = parser.parse_args()\n app.run(host='0.0.0.0', port=args.port, debug=True)\n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_362","text":"# cifar10_svm.py\n\n# Support Vector Machine (SVM)\n\nimport time\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom sklearn import model_selection\nfrom scipy.io import loadmat\n\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.metrics import hinge_loss\nfrom sklearn.metrics import classification_report, f1_score, accuracy_score, confusion_matrix\n\ndef run_svc(svc, title):\n # Fit model\n start = time.time()\n svc.fit(x_train, y_train)\n end = time.time()\n print(\"\\nModel took %0.2f seconds to train\"%(end - start))\n\n # Calculate predictions\n start = time.time()\n predicted = svc.predict(x_test)\n end = time.time()\n print(\"Model took %0.2f seconds to calculate predictions\"%(end - start))\n\n # Output results\n print('\\naccuracy', accuracy_score(y_test, predicted))\n print('\\nSVM Results for ' + title)\n print('\\nConfusion Matrix:')\n print(confusion_matrix(y_test, predicted))\n\n print('\\nClassification Report:', classification_report(y_test, predicted))\n #print(\"Hinge loss\", hinge_loss(y_test, predicted))\n\n\n# Load datasets from file\nnpzfile = np.load('cifar10.npz')\nprint(npzfile.files)\n\nx_train = npzfile['x_train']\nx_test = npzfile['x_test']\ny_train = npzfile['y_train']\ny_test = npzfile['y_test']\n\n\n# Standardize the columns\nx_train = x_train \/ 255\nx_test = x_test \/ 255\n\n# The model cannot deal with 2D array so we have to convert to 1D array.\nx_train_flat = np.empty(shape=[x_train.shape[0]] + [3072], dtype='float32')\n\nfor i in range(x_train.shape[0]):\n x_train_flat[i,:] = x_train[i,:,:].flatten()\n\n# Flatten x_test array\nx_test_flat = np.empty(shape=[x_test.shape[0]] + [3072], dtype='float32')\nfor i in range(x_test.shape[0]):\n x_test_flat[i,:] = x_test[i,:,:].flatten()\n\nx_train = x_train_flat\nx_test = x_test_flat\ny_train = y_train.ravel()\ny_test = y_test.ravel()\n\nprint('\\n', type(x_train))\nprint('x_train shape:', x_train.shape)\nprint('x_test shape:', x_test.shape)\nprint('y_train shape:', y_train.shape)\nprint('y_test shape:', y_test.shape)\n\n\n# Linear\nsvc = SVC(kernel='linear', C=1)\nrun_svc(svc, 'Linear')\n\n# Radial Basis Function (RBF)\nsvc = SVC(kernel='rbf', gamma=1, C=1)\nrun_svc(svc, 'Radial Basis Function (RBF)')\n\n# Polynomial\nsvc = SVC(kernel='poly', degree=5, C=1)\nrun_svc(svc, 'Polynomial)')\n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_363","text":"twod_phase.py\n\"\"\"\n2D neural field phase model\n\nall evaluations on NxN matrix.\n\nconvolutions performed on 0,2pi x 0,2pi domain. plotted on -pi,pi x -pi,pi domain.\n\nnotes:\n-\n\ntodo: \n-include methods to get and view slices, steady-state bumps\n\"\"\"\n\n\n\nimport numpy as np\nnp.random.seed(0)\n\nimport matplotlib\n#matplotlib.use(\"Agg\")\n#matplotlib.use(\"GTKAgg\")\n\n# for dynamic print updating\nfrom sys import stdout\nimport sys\nimport getopt\n\n#import twod_phase\nimport collections\nimport matplotlib.pylab as mp\nimport os\n#import scipy as sp\nimport scipy as sp\nfrom scipy.integrate import odeint,dblquad\nfrom scipy.interpolate import interp1d\nfrom scipy.optimize import fsolve\nimport scipy.spatial as spatial\nimport scipy.spatial.distance as dist\nimport scipy.cluster.hierarchy as hier\nimport time\n#from colorsys import hsv_to_rgb\nfrom mpl_toolkits.mplot3d import Axes3D\nimport copy\nimport math\n\n\n\nfrom matplotlib import rc\nrc('text', usetex=True)\nrc('font', family='serif', serif=['Computer Modern Roman'])\n\n# anim\nimport matplotlib.pyplot as plt\n\n\nimport fourier_2d as f2d\nfrom euler import ESolve\nfrom twod_full import SimDat as sd\nfrom twod_full import f,plot_s\nfrom lib import *\n\n#sd = sd(display_params=False)\n\n\nsin = np.sin\ncos = np.cos\npi = np.pi\nsqrt = np.sqrt\nexp = np.exp\n\nperiodization_lower = -5\nperiodization_upper = 5\n\n\ndef usage():\n print \"-l, --use-last\\t\\t: use last data from last sim\"\n print \"-v, --save-last\\t\\t: save last data of current sim\"\n print \"-s, --use-ss\\t\\t: use last saved steady-state data\"\n print \"-e, --save-ss\\t\\t: save solution as steady-state data\"\n print \"-r, --use-random\\t: use random inits\"\n print \"-h, --help\\t\\t: help function\"\n print \"-p, --run-phase\\t\\t: run phase\"\n print \"-f, --run-full\\t\\t: run full\"\n\n\ndef shift(Z,x,y):\n \"\"\"\n shift surface Z by coordinates x,y\n \"\"\"\n N,N = Z.shape\n Nx = int(N*x\/(2*pi))\n Ny = int(N*y\/(2*pi))\n return np.roll(np.roll(Z,Nx,axis=1),Ny,axis=0)\n\n\nclass Phase(sd):\n \"\"\"\n simulate phase equation\n \"\"\"\n def __init__(self,\n check_h=False,\n check_j=False,\n recompute_h=False,\n recompute_j=False,\n recompute_fq=True,\n recompute_phase_lc=False,\n compute_h_error=False,\n new_phase_rhs=False,\n low_memory=False,\n use_last=False,\n save_last=False,\n pertx=False,\n perty=False,\n init_mode='polar',\n dde_T=100,\n dde_dt=.1,\n dde_delay_t=20,\n g=0.,q=0.,\n x0=0,y0=0,\n x1=0,y1=0,\n dde_periodization_lower=-2,\n dde_periodization_upper=2,\n phase_option='full'):\n\n\n \"\"\"\n compute_h_error: True or False. Compute the error between lookup table H_1 and Fourier approximation of H_1\n low_memory: if false, excludes all simulations that are memory-intensive. Some plots may not be available.\n \n \"\"\"\n\n \"\"\"\n Sim.__init__(self)\n Kernel.__init__(self)\n \n \"\"\"\n sd.__init__(self,display_params=False)\n \n #SteadyState.__init__(self)\n\n self.init_mode = init_mode\n\n self.x0 = x0 # initial x-coordinate (1st pair)\n self.y0 = y0 # initial y-coordinate (1st pair)\n\n self.x1 = x1 # initial x-coordinate (2nd pair)\n self.y1 = y1 # initial y-coordinate (2nd pair)\n\n self.phase_option = phase_option\n self.new_phase_rhs=new_phase_rhs\n\n self.dde_periodization_lower = dde_periodization_lower\n self.dde_periodization_upper = dde_periodization_upper\n\n self.g = g\n self.q = q\n\n self.dde_T = dde_T\n self.dde_dt = dde_dt\n self.dde_delay_t = dde_delay_t\n self.dde_TN = int(self.dde_T\/self.dde_dt)\n self.dde_t = np.linspace(0,self.dde_T+self.dde_dt,self.dde_TN)\n self.dde_delay_N = int(self.dde_delay_t\/self.dde_dt)\n\n\n self.recompute_h = recompute_h\n self.recompute_j = recompute_j\n self.recompute_phase_lc = recompute_phase_lc\n self.recompute_fq = recompute_fq\n\n self.use_last = use_last\n self.save_last = save_last\n\n self.check_h = check_h\n self.check_j = check_j\n\n self.pertx = pertx\n self.perty = perty\n\n self.dde_dir = 'opt='+str(phase_option)+\\\n '_delayN='+str(self.dde_delay_N)+\\\n '_dt='+str(self.dde_dt)\n\n if (not os.path.exists(self.savedir+'\/'+self.dde_dir)):\n os.makedirs(self.savedir+'\/'+self.dde_dir)\n\n\n self.filename_th1 = self.savedir+'\/'+self.dde_dir+'\/th1_last.dat'\n self.filename_th2 = self.savedir+'\/'+self.dde_dir+'\/th2_last.dat'\n self.filename_thi_t = self.savedir+'\/'+self.dde_dir+'\/thi_t_last.dat'\n\n self.H1,self.H2 = self.H_i()\n self.J1,self.J2 = self.J_i()\n\n print '* Running phase_dde()...'\n self.th1_ph,self.th2_ph = self.phase_dde()\n\n print ' ... done.'\n \n if compute_h_error:\n err_h1,err_j1 = self.HJ_i_error()\n print 'H1_lookup vs H1_fourier error =',err_h1\n print 'J1_lookup vs J1_fourier error =',err_j1\n\n\n def h1_approx(self,x,y,sig=5.,a=.1):\n #return x*exp(-(x**2+y**2)**2)\n # based on numerics h1 seems to be y*exp\n if self.phase_option == 'approx2':\n return x*exp(-(x**2+y**2)**2\/sig**2) - a*sin(x)\n else:\n return x*exp(-(x**2+y**2)**2\/sig**2)\n\n def h2_approx(self,x,y):\n return self.h1_approx(y,x)\n\n def h1_approx_p(self,x,y):\n \"\"\"\n periodized kernel using difference of gaussians\n \"\"\"\n tot = 0\n for n in np.arange(self.dde_periodization_lower,self.dde_periodization_upper+1,1):\n for m in np.arange(self.dde_periodization_lower,self.dde_periodization_upper+1,1):\n tot = tot + self.h1_approx(x+n*2*pi,y+m*2*pi)\n return tot\n\n\n def h2_approx_p(self,x,y):\n \"\"\"\n periodized kernel using difference of gaussians\n \"\"\"\n tot = 0\n for n in np.arange(self.dde_periodization_lower,self.dde_periodization_upper+1,1):\n for m in np.arange(self.dde_periodization_lower,self.dde_periodization_upper+1,1):\n tot = tot + self.h2_approx(x+n*2*pi,y+m*2*pi)\n return tot\n\n\n def phase_dde(self):\n \"\"\"\n the full integro-delay-differential equation using Euler's method\n x: x[:N], x[N:]. x history and y history, respectively. up to N time steps. \n\n todo:\n -start with bard's approximations\n -once the bard method works, move on to Fourier stuff.\n\n I will use this guy's code if I have to:\n https:\/\/zulko.wordpress.com\/2013\/03\/01\/delay-differential-equations-easy-with-python\/\n \"\"\"\n\n file_not_found = False\n while True:\n\n if self.use_last and not(file_not_found):\n if os.path.isfile(self.filename_th1) and\\\n os.path.isfile(self.filename_th2):\n print 'using last'\n th1_0 = np.loadtxt(self.filename_th1)\n th2_0 = np.loadtxt(self.filename_th2)\n break\n else:\n print 'init file not found'\n file_not_found = True\n else:\n #np.random.seed(0)\n if self.init_mode == 'polar':\n print 'using polar init'\n r0 = self.x0#.36219\n nu0 = self.y0#1.2458\n th0 = np.linspace(0,-self.dde_delay_t,self.dde_delay_N)*nu0\n th1_0 = r0*cos(th0)\n th2_0 = r0*sin(th0)\n \n elif self.init_mode == 'cartesian':\n print 'using cartesian init'\n init_angle = np.arctan2(self.y1-self.y0,self.x0-self.x1)\n if init_angle < 0:\n init_angle += 2*pi\n\n print 'initial angle',init_angle\n x_line = np.linspace(self.x0,self.x1,self.dde_delay_N)\n y_line = np.linspace(self.y0,self.y1,self.dde_delay_N)\n th1_0 = x_line\n th2_0 = y_line\n \n if self.pertx:\n print 'Reminder: added small perturbation to x init'\n N = 20\n th1_0[-N:]+=.01*np.exp(-np.linspace(0,N*self.dde_dt,N))\n #th2_0[-150:-145]+=.01\n\n if self.perty:\n print 'Reminder: added small perturbation to y init'\n th1_0[-150:-145]+=.01\n #th2_0[-150:-145]+=.01\n\n\n \n else:\n raise ValueError('no initial choice'+str(self.init_mode))\n\n break\n \n th1 = np.zeros(self.dde_TN)\n th2 = np.zeros(self.dde_TN)\n\n th1[:self.dde_delay_N] = th1_0\n th2[:self.dde_delay_N] = th2_0\n\n # approximate the H function as a negative gaussian derivative: x*exp(-(x^2+y^2))\n\n # solve dde\n # for reference: H_1(x,y) = x*exp(-(x^2+y^2))\n # so H_1(th1(tau-s)-th1(tau),th2(tau-s)-th2(tau))\n\n n = np.arange(0,self.dde_delay_N,1)\n for i in range(self.dde_delay_N-1,self.dde_TN):\n if self.phase_option == 'approx' or self.phase_option == 'approx2':\n\n h1_val = self.h1_approx_p(th1[i-1-n]-th1[i-1],th2[i-1-n]-th2[i-1])\n h2_val = self.h2_approx_p(th1[i-1-n]-th1[i-1],th2[i-1-n]-th2[i-1])\n\n j1 = -self.h1_approx_p(th1[i-1],th2[i-1])\n j2 = -self.h2_approx_p(th1[i-1],th2[i-1])\n\n elif self.phase_option == 'full':\n h1_val = f2d.H1_fourier(th1[i-1-n]-th1[i-1],th2[i-1-n]-th2[i-1])\n h2_val = f2d.H2_fourier(th1[i-1-n]-th1[i-1],th2[i-1-n]-th2[i-1])\n \n j1 = -f2d.H1_fourier(th1[i-1],th2[i-1])\n j2 = -f2d.H2_fourier(th1[i-1],th2[i-1])\n\n elif self.phase_option == 'trunc':\n h1_val = self.h1(th1[i-1-n]-th1[i-1],th2[i-1-n]-th2[i-1],0.8)\n h2_val = self.h2(th1[i-1-n]-th1[i-1],th2[i-1-n]-th2[i-1],0.8)\n \n j1 = -self.h1(th1[i-1],th2[i-1],0.8)\n j2 = -self.h2(th1[i-1],th2[i-1],0.8)\n\n \n th1[i] = th1[i-1] + self.dde_dt*( -(1.*self.g)*np.sum(np.exp(-n*self.dde_dt)*h1_val)*self.dde_dt + self.q*j1 )\n th2[i] = th2[i-1] + self.dde_dt*( -(1.*self.g)*np.sum(np.exp(-n*self.dde_dt)*h2_val)*self.dde_dt + self.q*j2 )\n\n\n #if self.phase_option == 'approx':\n th1 = np.mod(th1+pi,2*pi)-pi\n th2 = np.mod(th2+pi,2*pi)-pi\n #elif self.phase_option == 'full':\n if self.q == 0:\n xv = th1[-1]-th1[-2]#np.mean(np.gradient(th1[-10:],self.dde_dt))\n yv = th2[-1]-th2[-2]#np.mean(np.gradient(th2[-10:],self.dde_dt))\n print 'velocity components'+' (xv,yv)='+str(xv)+','+str(yv)+')'\n print 'velocity =',np.sqrt(xv**2 + yv**2)\n final_angle = np.arctan2(yv,xv)\n if final_angle < 0:\n final_angle += 2*pi\n print 'velocity angle',final_angle\n\n\n if False:\n mp.figure()\n mp.plot(th1[-self.dde_delay_N:],th2[-self.dde_delay_N:])\n mp.show()\n\n if self.save_last:\n np.savetxt(self.filename_th1,th1[-self.dde_delay_N:])\n np.savetxt(self.filename_th2,th2[-self.dde_delay_N:])\n np.savetxt(self.filename_thi_t,self.dde_t[-self.dde_delay_N:])\n\n return th1,th2\n\n\n\n def phase_dde_v2(self,dde_TN,x0,y0,phase_option='full'):\n \"\"\"\n v2 is the same as above, but with manual input params and improved control over initial conditions\n\n x0,y0: initial arrays up to self.dde_delay_N, self.dde_delay_t\n\n the full integro-delay-differential equation using Euler's method\n x: x[:N], x[N:]. x history and y history, respectively. up to N time steps. \n\n \"\"\"\n \n th1 = np.zeros(dde_TN)\n th2 = np.zeros(dde_TN)\n\n th1[:self.dde_delay_N] = x0\n th2[:self.dde_delay_N] = y0\n\n # approximate the H function as a negative gaussian derivative: x*exp(-(x^2+y^2))\n\n # solve dde\n # for reference: H_1(x,y) = x*exp(-(x^2+y^2))\n # so H_1(th1(tau-s)-th1(tau),th2(tau-s)-th2(tau))\n\n n = np.arange(0,self.dde_delay_N,1)\n for i in range(self.dde_delay_N-1,dde_TN):\n if phase_option == 'approx' or phase_option == 'approx2':\n\n h1_val = self.h1_approx_p(th1[i-1-n]-th1[i-1],th2[i-1-n]-th2[i-1])\n h2_val = self.h2_approx_p(th1[i-1-n]-th1[i-1],th2[i-1-n]-th2[i-1])\n\n j1 = -self.h1_approx_p(th1[i-1],th2[i-1])\n j2 = -self.h2_approx_p(th1[i-1],th2[i-1])\n\n elif phase_option == 'full':\n h1_val = f2d.H1_fourier(th1[i-1-n]-th1[i-1],th2[i-1-n]-th2[i-1])\n h2_val = f2d.H2_fourier(th1[i-1-n]-th1[i-1],th2[i-1-n]-th2[i-1])\n \n j1 = -f2d.H1_fourier(th1[i-1],th2[i-1])\n j2 = -f2d.H2_fourier(th1[i-1],th2[i-1])\n\n\n elif self.phase_option == 'trunc':\n h1_val = self.h1(th1[i-1-n]-th1[i-1],th2[i-1-n]-th2[i-1],0.8)\n h2_val = self.h2(th1[i-1-n]-th1[i-1],th2[i-1-n]-th2[i-1],0.8)\n \n j1 = -self.h1(th1[i-1],th2[i-1],0.8)\n j2 = -self.h2(th1[i-1],th2[i-1],0.8)\n\n \n th1[i] = th1[i-1] + self.dde_dt*( -(1.*self.g)*np.sum(np.exp(-n*self.dde_dt)*h1_val)*self.dde_dt + self.q*j1 )\n th2[i] = th2[i-1] + self.dde_dt*( -(1.*self.g)*np.sum(np.exp(-n*self.dde_dt)*h2_val)*self.dde_dt + self.q*j2 )\n\n th1 = np.mod(th1+pi,2*pi)-pi\n th2 = np.mod(th2+pi,2*pi)-pi\n\n return th1,th2\n\n\n def load_phase_lc(self):\n \"\"\"\n if lc data exists, load. if DNE or recompute required, compute here.\n \"\"\"\n file_not_found = False\n\n self.filename_lc_phase = self.lcdir+'\/'+'lc_phase.dat'\n\n while True:\n if self.recompute_phase_lc or file_not_found:\n \"\"\"\n force recomputation of LC\n \"\"\"\n self.compute_phase_lc() # contains self.lc_phase_data\n np.savetxt(self.filename_lc_phase,self.lc_phase_data)\n break\n\n else:\n if os.path.isfile(self.filename_lc_phase):\n\n lc_phase_data = np.loadtxt(self.filename_lc_phase)\n\n self.lc_t_phase = lc_phase_data[:,0]\n self.lc_th1_phase = lc_phase_data[:,1]\n self.lc_th2_phase = lc_phase_data[:,2]\n \n self.lc_per = self.lc_t_phase[-1]\n print 'limit cycle period', self.lc_per\n # check to see if file contains lc or not.\n # non-lc parameter files have [-1,-1] as the data.\n\n if (lc_phase_data[0,0] == -1) and\\\n (lc_phase_data[0,1] == -1) and\\\n (lc_phase_data[0,2] == -1):\n self.limit_cycle_exists = False\n else:\n self.limit_cycle_exists = True\n \n self.lc_th1_phase_fn = interp1d(self.lc_t_phase,self.lc_th1_phase)\n self.lc_th2_phase_fn = interp1d(self.lc_t_phase,self.lc_th2_phase)\n\n break\n else:\n file_not_found = True\n\n # make lookup tables for easier access and implementation\n\n def phase_lc(self,t,choice):\n if choice == 1:\n return self.lc_th1_phase_fn(np.mod(t,self.lc_per))\n if choice == 2:\n return self.lc_th2_phase_fn(np.mod(t,self.lc_per))\n\n\n def compute_phase_lc(self):\n \"\"\"\n if lc not found, or if recomputation requested, compute LC.\n\n algorithm:\n 1. use existing data. if there are enough crossings, skip to 2. if there are not enough crossings detected, re-run with more time (print time). if there are enough crossings, skip to 2. else, quit.\n 2. given that there are enough crossings, check periodicity by using the last period estimate and check if the solution comes back to the start (up to some tolerance, print this). if the tolerance check fails, quit. else go to 3.\n 3. if a limit cycle exists, save the limit cycle solution data with a filename containing all parameter info in the format array=[time|theta1|theta2] (i.e. to plot theta1 over time i would use plot([array[:,0],array[:,1])).\n \n \"\"\"\n \n tol = .01\n\n # first try finding crossings with current solution data.\n\n temp_th1 = copy.deepcopy(self.th1_ph)\n temp_th2 = copy.deepcopy(self.th2_ph)\n\n find_crossings_iter = 0 # count # of times attempted to find enough LC crossings\n max_find_crossings_iter = 1\n crossings_exist = True # assume true to start\n\n temp_TN = self.dde_TN\n temp_t = self.dde_t\n\n\n\n # step 1 use existing data.\n while True:\n # find ccw crossings on right\n crossing_idx_ccw = (temp_th1[1:]>0)*(temp_th2[1:]>0)*(temp_th2[:-1]<=0)\n crossing_idx_cw = (temp_th1[1:]>0)*(temp_th2[1:]<=0)*(temp_th2[:-1]>0)\n\n cross_fail = 0\n\n # check number of crossings in each direction\n if np.sum(crossing_idx_ccw) <= 5:\n print 'not enough crossings in ccw direction ('+str(np.sum(crossing_idx_ccw))+')'\n cross_fail += 1\n else:\n print 'enough candidate crossings found in ccw direction ('+str(np.sum(crossing_idx_ccw))+')'\n crossing_idx = crossing_idx_ccw\n break # break to leave loop and go to step 2\n\n if np.sum(crossing_idx_cw) <= 5:\n print 'not enough crossings in cw direction ('+str(np.sum(crossing_idx_cw))+')'\n cross_fail += 1\n else:\n print 'enough candidate crossings found in ccw direction ('+str(np.sum(crossing_idx_cw))+')'\n crossing_idx = crossing_idx_cw\n break # break to leave loop and go to step 2\n\n if find_crossings_iter >= max_find_crossings_iter:\n # if there was a limit cycle, it would have been detected in the 2nd pass above.\n # give up if limit cycle not found in 2nd pass.\n crossings_exist = False # gloabl var\n self.limit_cycle_exists = False # global var\n print 'no limit cycle found at step 1.', find_crossings_iter\n\n # save dummy file.\n break\n\n if cross_fail == 2 and (find_crossings_iter < max_find_crossings_iter):\n # if both crossing checks fail in step 1, run sim for longer\n # this should not run in the second pass (when find_crossings_iter >= 1)\n temp_T = 100\n print 'not enough crossings. Re-initializing with additional time T='+str(temp_T)\n\n dde_TN = int(temp_T\/self.dde_dt)\n temp_temp_th1 = np.zeros(dde_TN+temp_TN)\n temp_temp_th2 = np.zeros(dde_TN+temp_TN)\n \n x0 = temp_th1[-self.dde_delay_N:]\n y0 = temp_th2[-self.dde_delay_N:]\n\n temp_temp_th1[:self.dde_TN] = temp_th1\n temp_temp_th2[:self.dde_TN] = temp_th2\n\n temp_temp_th1[self.dde_TN:],temp_temp_th2[self.dde_TN:] = self.phase_dde_v2(dde_TN,x0,y0)\n\n temp_th1 = temp_temp_th1\n temp_th2 = temp_temp_th2\n\n find_crossings_iter += 1 # add 1 to number of longer sims run\n\n # step 2 check periodicity.\n if crossings_exist:\n print 'checking periodicity...'\n # get last idx #\n # http:\/\/stackoverflow.com\/questions\/34667282\/numpy-where-detailed-step-by-step-explanation-examples\n final_idx = np.where(crossing_idx==1)[0][-1]\n\n # get approx period\n crossing_t = temp_t[1:][crossing_idx]\n period = crossing_t[-1]-crossing_t[-4]\n \n temp_TN = int(period\/self.dde_dt)\n\n # get approx init\n temp_th1_2 = np.zeros(temp_TN)\n temp_th2_2 = np.zeros(temp_TN)\n\n x0 = temp_th1[(final_idx-self.dde_delay_N):final_idx]\n y0 = temp_th2[(final_idx-self.dde_delay_N):final_idx]\n\n print np.shape(x0)\n print len(temp_th1_2)\n print crossing_t\n\n\n #temp_th1_2[:self.dde_delay_N] = x0\n #temp_th2_2[:self.dde_delay_N] = y0\n\n\n # integrate for 1 period\n temp_th1_2,temp_th2_2 = self.phase_dde_v2(temp_TN,x0,y0)\n\n temp_th1 = temp_th1_2\n temp_th2 = temp_th2_2\n \n if False:\n # just test plotting\n mp.figure()\n mp.plot(temp_th1,temp_th2)\n\n mp.figure()\n mp.plot(temp_th1)\n mp.plot(temp_th2)\n mp.show()\n\n # check tolerance\n err = (np.abs(temp_th1[-1]-temp_th1[0])+np.abs(temp_th2[-1]-temp_th2[0]))\n if errzero_vel_tol) and\\\n (np.abs(v2_data[min_idx])>zero_vel_tol) and\\\n (np.abs(v2_data[min_idx] - v1_data[min_idx])>zero_vel_tol):\n\n tol = smallest_diff\n v1 = v1_data[min_idx]\n v2 = v2_data[min_idx]\n\n return v1,v2\n\n def parameteric_intersection(self):\n \"\"\"\n get the first intersection between two parametric curves\n \"\"\"\n\n def twod_velocity_v2(self,g,b,mode='trunc',\n tol=5e-2,\n diag_tol=5e-2,\n zero_vel_tol=1e-5,\n M_nu1 = 100,\n M_nu2 = 100,\n N = 200\n ):\n \"\"\"\n return nu1,nu2 given g and b.\n does not depend on any bifurcation data.\n g: bifurcation parameter. adaptation strength\n b: Fourier coefficient\n mode: 'trunc' or 'full'. uses full H function or truncated h function\n\n zero_vel_tol: if velocity in axial direction, ignore. If one of the velocities is below this small number, it means the movement is axial. in this case, ignore.\n\n solve:\n (1) 0 = -\\nu_1 + g \\int_0^\\infty e^{-s} H_1(\\nu_1 s,\\nu_2 s) ds \n (2) 0 = -\\nu_2 + g \\int_0^\\infty e^{-s} H_2(\\nu_1 s,\\nu_2 s) ds\n\n \"\"\"\n\n nu1 = np.linspace(0,3,M_nu1)\n nu2 = np.linspace(0,3,M_nu2)\n sint = np.linspace(0,N\/10,N)\n \n nu1,nu2,sint = np.meshgrid(nu1,nu2,sint)\n\n N = np.shape(sint)[-1] # get size of integration variable array\n sint_pos = len(np.shape(sint))-1# get position of integration var\n \n # get limits of integration\n int_lo = sint[0,0,0]\n int_hi = sint[0,0,-1]\n dx = (int_hi-1.*int_lo)\/N\n \n if mode == 'trunc':\n integrand1 = exp(-sint)*self.h1(nu1*sint,nu2*sint,b)\n integrand2 = exp(-sint)*self.h2(nu1*sint,nu2*sint,b)\n\n elif mode == 'full':\n integrand1 = exp(-sint)*f2d.H1_fourier(nu1*sint,nu2*sint)\n integrand2 = exp(-sint)*f2d.H2_fourier(nu1*sint,nu2*sint)\n else:\n raise ValueError('Invalid choice='+mode)\n\n eq1 = -nu1[:,:,0] + g*integrand1.sum(sint_pos)*dx\n eq2 = -nu2[:,:,0] + g*integrand2.sum(sint_pos)*dx\n \n # get contours\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n cs1 = ax.contour(nu1[:,:,0],nu2[:,:,0],eq1,levels=[0.])\n cs2 = ax.contour(nu1[:,:,0],nu2[:,:,0],eq2,levels=[0.])\n\n p1_all = cs1.collections[0].get_paths()\n p2_all = cs2.collections[0].get_paths()\n\n p1x_dict = {}\n p1y_dict = {}\n\n p2x_dict = {}\n p2y_dict = {}\n\n # this block of code will separate all branches into dictionaries.\n # redundant since we have two nontrivial curves.\n\n # gather nontrival zero contour from first equation\n for i in range(len(p1_all)):\n v = p1_all[i].vertices\n x = v[:,0]\n y = v[:,1]\n \n if (np.sum(np.abs(x)) <= zero_vel_tol) or (np.sum(np.abs(y)) <= zero_vel_tol):\n pass\n else:\n p1x_dict[str(i)] = x\n p1y_dict[str(i)] = y\n\n # gather nontrival zero contour from second equation\n for i in range(len(p2_all)):\n v = p2_all[i].vertices\n x = v[:,0]\n y = v[:,1]\n if (np.sum(np.abs(x)) <= zero_vel_tol) or (np.sum(np.abs(y)) <= zero_vel_tol):\n pass\n else:\n p2x_dict[str(i)] = x\n p2y_dict[str(i)] = y\n\n # warn user if there are more than 2 unique contours\n if (len(p1x_dict) > 1) or\\\n (len(p1y_dict) > 1) or\\\n (len(p2x_dict) > 1) or\\\n (len(p2y_dict) > 1):\n raise ValueError('Warning: multiple zero contours detected. use the plot function in twod_velocity_v2')\n print 'there should be 1 zero contour for each existence equation'\n\n\n if (len(p1x_dict) < 1) or\\\n (len(p1y_dict) < 1) or\\\n (len(p2x_dict) < 1) or\\\n (len(p2y_dict) < 1):\n raise RuntimeError('Warning: no contours detected. use the plot function in twod_velocity_v2')\n print 'there should be 1 zero contour for each existence equation'\n \n \n if False:\n mp.figure(5)\n for key in p1x_dict.keys():\n mp.plot(p1x_dict[key],p1y_dict[key])\n for key in p2x_dict.keys():\n mp.plot(p2x_dict[key],p2y_dict[key])\n\n \n mp.show()\n\n # find contour intersection. we only need the first.\n for key in p1x_dict.keys():\n x1 = p1x_dict[key]\n y1 = p1y_dict[key]\n \n for key in p2x_dict.keys():\n x2 = p2x_dict[key]\n y2 = p2y_dict[key]\n\n # create the interpolated functions\n t = np.linspace(0,1,len(x1))\n z = np.zeros((2,len(x1)))\n z[0,:] = x1\n z[1,:] = y1\n c1 = interp1d(t,z)\n\n t = np.linspace(0,1,len(x2))\n z = np.zeros((2,len(x2)))\n z[0,:] = x2\n z[1,:] = y2\n c2 = interp1d(t,z)\n\n def err(tt):\n t1 = tt[0]\n t2 = tt[1]\n return c1(t1)-c2(t2)\n\n \n\n try:\n t1,t2 = fsolve(err,x0=[.65,.75],factor=.01)\n except ValueError:\n print 'if you get the error, ValueError: A value in x_new is above the interpolation range. then modify starting times in def twod_velocity_v2 in twod_phase.py'\n\n v1,v2 = c1(t1)\n\n\n if False:\n mp.figure()\n z1 = c1(np.linspace(0,.6,10))\n x1 = z1[0,:]\n y1 = z1[1,:]\n mp.plot(x1,y1)\n\n z2 = c2(np.linspace(0,.9,10))\n x2 = z2[0,:]\n y2 = z2[1,:]\n mp.plot(x2,y2)\n\n\n mp.show()\n \n plt.clf()\n return v1,v2\n\n \n def h1(self,x,y,b,d=False):\n if d:\n return cos(x)*(1+b*cos(y)),-b*sin(x)*sin(y)\n else:\n return sin(x)*(1+b*cos(y))\n\n def h2(self,x,y,b,d=False):\n if d:\n return -b*sin(x)*sin(y),cos(y)*(1+b*cos(x))\n else:\n return sin(y)*(1+b*cos(x))\n\n def evans(self,lam,sint,g=2.):\n \"\"\"\n evans function\n all meshgrids size\/shape of (M,M,N)\n lam: complex number, or meshgrid on complex domain (M values)\n sint: integration variable. meshgrid on real domain (N values)\n nu1,nu2: velocity values\n \"\"\"\n\n\n\n # get nu1,nu2 given g\n print 'reminder: implement g to nu1,nu2 conversion'\n\n # g=3\n #nu1=1.21;nu2=2.09\n\n # g=4\n #nu1=1.45;nu2=2.54\n\n b=.8\n\n def h1(x,y,d=False):\n if d:\n return cos(x)*(1+b*cos(y)),-b*sin(x)*sin(y)\n else:\n return sin(x)*(1+b*cos(y))\n\n def h2(x,y,d=False):\n if d:\n # sin(y)*(1+b*cos(x))\n return -b*sin(x)*sin(y),cos(y)*(1+b*cos(x))\n else:\n return sin(y)*(1+b*cos(x))\n \n Q1,Q2 = f2d.H1_fourier(-nu1*sint,-nu2*sint,d=True)\n Q3,Q4 = f2d.H2_fourier(-nu1*sint,-nu2*sint,d=True)\n \n #Q1,Q2 = h1(-nu1*sint,-nu2*sint,d=True)\n #Q3,Q4 = h2(-nu1*sint,-nu2*sint,d=True)\n \n # Q3,Q4 should be same as Q4,Q3=H1_fourier(-nu2*sint,-nu2*sint,d=True)\n \n sam = ( exp(-lam*sint)-1 ) \/ lam\n \n N = np.shape(sint)[-1] # get size of integration variable array\n sint_pos = len(np.shape(sint))-1# get position of integration var\n\n Qhat1 = (np.exp(-sint)*Q1*sam).sum(sint_pos)\/N\n Qhat2 = (np.exp(-sint)*Q2*sam).sum(sint_pos)\/N\n Qhat3 = (np.exp(-sint)*Q3*sam).sum(sint_pos)\/N\n Qhat4 = (np.exp(-sint)*Q4*sam).sum(sint_pos)\/N\n\n # return the complex valued functions\n\n return (1.\/g + Qhat1)*(1.\/g + Qhat4) - Qhat3*Qhat2\n\n\n def evans_v2(self,al,be,sint,g=2.5,b=0.8,return_intermediates=False,mode='trunc'):\n \"\"\"\n evans function\n all meshgrids size\/shape of (M,M,N)\n al,be: real and imaginary parts of some eigenvalue\n sint: integration variable. meshgrid on real domain (N values)\n \"\"\"\n\n\n # get nu1,nu2 given g\n\n nu1,nu2=self.twod_velocity_v2(g,b,mode=mode)\n print 'velocity',nu1,nu2, \"g=\"+str(g)+\", b=\"+str(b)\n\n # g=4\n #nu1=1.45;nu2=2.54\n\n # g=3\n #nu1=1.21;nu2=2.09\n\n # g=2.5\n #nu1=1.0712;nu2=1.8395\n\n # g=2\n #nu1=.91067;nu2=1.5529\n\n # g=1.5\n #nu1=.70711;nu2=1.2247\n\n if mode == 'full':\n Q1,Q2 = f2d.H1_fourier(-nu1*sint,-nu2*sint,d=True)\n Q3,Q4 = f2d.H2_fourier(-nu1*sint,-nu2*sint,d=True)\n \n elif mode == 'trunc':\n Q1,Q2 = self.h1(-nu1*sint,-nu2*sint,b,d=True)\n Q3,Q4 = self.h2(-nu1*sint,-nu2*sint,b,d=True)\n #Q3,Q4 = h1(-nu2*sint,-nu1*sint,d=True)\n\n \n # Q3,Q4 should be same as Q4,Q3=H1_fourier(-nu2*sint,-nu2*sint,d=True)\n \n samp = exp(-al*sint)*cos(-be*sint) - 1\n samq = exp(-al*sint)*sin(-be*sint) \n \n N = np.shape(sint)[-1] # get size of integration variable array\n sint_pos = len(np.shape(sint))-1# get position of integration var\n\n # get limits of integration\n int_lo = sint[0,0,0]\n int_hi = sint[0,0,-1]\n dx = (int_hi-1.*int_lo)\/N\n\n\n ph1 = (np.exp(-sint)*Q1*samp).sum(sint_pos)*dx\n qh1 = (np.exp(-sint)*Q1*samq).sum(sint_pos)*dx\n\n ph2 = (np.exp(-sint)*Q2*samp).sum(sint_pos)*dx\n qh2 = (np.exp(-sint)*Q2*samq).sum(sint_pos)*dx\n\n ph3 = ph2\n qh3 = qh2\n\n ph4 = (np.exp(-sint)*Q4*samp).sum(sint_pos)*dx\n qh4 = (np.exp(-sint)*Q4*samq).sum(sint_pos)*dx\n\n\n # return the complex valued functions\n alf = al[:,:,0]\n bef = be[:,:,0]\n\n e_re = (g**2.)*(ph1*ph4 - qh1*qh4 - ph2*ph3 + qh2*qh3) + g*(alf*(ph1+ph4) - bef*(qh1+qh4)) + alf**2.-bef**2.\n e_im = (g**2.)*(ph1*qh4 + qh1*ph4 - qh2*ph3 - ph2*qh3) + g*(alf*(qh1+qh4) + bef*(ph1+ph4)) + 2.*alf*bef\n \n if return_intermediates:\n return e_re,e_im,ph1,qh1,ph2,qh2,ph3,qh3,ph4,qh4\n return e_re,e_im\n\n\n def evans_zero_alpha(self,g,b,al,be,sint,real=True,tol=1e-2):\n \"\"\"\n return the real part of the input that yields a zero in the evans function.\n \"\"\"\n\n\n\n e_re,e_im = self.evans_v2(al,be,sint,\n return_intermediates=False,g=g,b=b)\n \n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n \n cs1 = ax.contour(al[:,:,0],be[:,:,0],e_re,levels=[0.])\n cs2 = ax.contour(al[:,:,0],be[:,:,0],e_im,levels=[0.])\n\n if False:\n intersection_example,contour_pts1,contour_pts2 = findIntersection(cs1,cs2,return_intermediates=True)\n \n\n plt.plot(contour_pts1[:,0]+.0001,contour_pts1[:,1]+.0001)\n plt.plot(contour_pts2[:,0],contour_pts2[:,1])\n plt.show()\n\n\n p1_all = cs1.collections[0].get_paths()\n p2_all = cs2.collections[0].get_paths()\n\n p1x_dict_raw = {}\n p1y_dict_raw = {}\n\n p2x_dict_raw = {}\n p2y_dict_raw = {}\n\n # this block of code will separate all branches into dictionaries.\n\n # gather nontrival zero contour from real part\n for i in range(len(p1_all)):\n v = p1_all[i].vertices\n x = v[:,0]\n y = v[:,1]\n\n p1x_dict_raw[str(i)] = x\n p1y_dict_raw[str(i)] = y\n\n # gather nontrival zero contour from imaginary part\n for i in range(len(p2_all)):\n v = p2_all[i].vertices\n x = v[:,0]\n y = v[:,1]\n\n p2x_dict_raw[str(i)] = x\n p2y_dict_raw[str(i)] = y\n\n\n if False:\n\n mp.figure(5)\n mp.title('original branches')\n for key in p1x_dict_raw.keys():\n mp.plot(p1x_dict_raw[key],p1y_dict_raw[key])\n for key in p2x_dict_raw.keys():\n mp.plot(p2x_dict_raw[key],p2y_dict_raw[key])\n\n \n #mp.show()\n \n\n\n\n # remove branches that cross the origin\n p1x_dict = {}\n p1y_dict = {}\n\n for key in p1x_dict_raw.keys():\n skipflag = False\n for i in range(len(p1x_dict_raw[key])):\n if np.abs(p1x_dict_raw[key][i]-p1y_dict_raw[key][i])<.01:\n skipflag = True\n\n if not(skipflag):\n p1x_dict[key] = p1x_dict_raw[key]\n p1y_dict[key] = p1y_dict_raw[key]\n\n p2x_dict = {}\n p2y_dict = {}\n\n for key in p2x_dict_raw.keys():\n skipflag = False\n for i in range(len(p2x_dict_raw[key])):\n if np.abs(p2x_dict_raw[key][i]-p2y_dict_raw[key][i])<.01:\n skipflag = True\n\n if not(skipflag):\n p2x_dict[key] = p2x_dict_raw[key]\n p2y_dict[key] = p2y_dict_raw[key]\n\n\n if False:\n mp.figure(6)\n mp.title('remaining branches')\n for key in p1x_dict.keys():\n mp.plot(p1x_dict[key],p1y_dict[key])\n for key in p2x_dict.keys():\n mp.plot(p2x_dict[key],p2y_dict[key])\n\n \n mp.show()\n\n\n # find contour intersection. if multiple mins found, take one with greater magnitude in complex plane\n # find minimia by taking differences\n \n min_xs = []\n min_ys = []\n\n for key1 in p1x_dict.keys():\n for key2 in p2x_dict.keys():\n rex = p1x_dict[key1]\n rey = p1y_dict[key1]\n \n imx = p2x_dict[key2]\n imy = p2y_dict[key2]\n\n if False:\n mp.figure()\n mp.plot(rex,rey,color='black',lw=3)\n mp.plot(imx,imy,color='gray',lw=3)\n \n mp.show()\n\n\n newtol = tol\n for i in range(len(rex)):\n diff_arr = (rex[i]-imx)**2. + (rey[i]-imy)**2.\n minval = np.amin(diff_arr)\n\n if minval < newtol:\n newtol = minval\n minx = rex[i]\n miny = rey[i]\n #print minx,miny\n min_xs.append(minx)\n min_ys.append(miny)\n\n \"\"\"\n smallest_diff = np.amin(np.abs(g-g_data))\n min_idx = np.argmin(np.abs(g-g_data))\n \n \n if (smallest_diff < tol) and\\\n (np.abs(v1_data[min_idx])>zero_vel_tol) and\\\n (np.abs(v2_data[min_idx])>zero_vel_tol) and\\\n (np.abs(v2_data[min_idx] - v1_data[min_idx])>zero_vel_tol):\n \n tol = smallest_diff\n v1 = v1_data[min_idx]\n v2 = v2_data[min_idx]\n \"\"\"\n\n print 'minx,miny',min_xs,min_ys, 'for g,b=',g,b\n\n\n \"\"\"\n for key in p1x_dict.keys():\n x1 = p1x_dict[key]\n y1 = p1y_dict[key]\n \n for key in p2x_dict.keys():\n x2 = p2x_dict[key]\n y2 = p2y_dict[key]\n\n # create the interpolated functions\n t = np.linspace(0,1,len(x1))\n z = np.zeros((2,len(x1)))\n z[0,:] = x1\n z[1,:] = y1\n c1 = interp1d(t,z)\n\n t = np.linspace(0,1,len(x2))\n z = np.zeros((2,len(x2)))\n z[0,:] = x2\n z[1,:] = y2\n c2 = interp1d(t,z)\n\n def err(tt):\n t1 = tt[0]\n t2 = tt[1]\n return c1(t1)-c2(t2)\n\n t1,t2 = fsolve(err,x0=[.6,.8],factor=.01)\n\n v1,v2 = c1(t1)\n\n\n if False:\n mp.figure()\n z1 = c1(np.linspace(0,.6,10))\n x1 = z1[0,:]\n y1 = z1[1,:]\n mp.plot(x1,y1)\n\n z2 = c2(np.linspace(0,.9,10))\n x2 = z2[0,:]\n y2 = z2[1,:]\n mp.plot(x2,y2)\n\n\n mp.show()\n \n \"\"\"\n\n # if no intersections, return nan\n if (min_xs == []) or (min_ys == []):\n return np.nan\n\n # if two or more intersections, use the one with greatest magnitude in complex plane\n min_xs = np.array(min_xs)\n min_ys = np.array(min_ys)\n\n if (len(min_xs) >= 2):\n max_idx = np.argmax(min_xs**2. + min_yx**2.)\n min_xs = [min_xs[max_idx]]\n min_ys = [min_ys[max_idx]]\n \n\n if real:\n return min_xs[0]\n return min_xs[0],min_ys[0]\n\n\n\n def plot(self,option=\"h1\"):\n\n fig = plt.figure()\n \n\n if option == 'h1':\n ax = fig.gca(projection='3d')\n ax.set_title(\"H1 (numerical)\")\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n\n ax = plot_s(ax,self.H1)\n\n\n elif option == 'h2':\n ax = fig.gca(projection='3d')\n ax.set_title(\"H2 (numerical)\")\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n #ax = plot_s(ax,self.XX,self.YY,self.H2)\n ax = plot_s(ax,self.H2)\n\n elif option == 'j1':\n ax = fig.gca(projection='3d')\n ax.set_title(\"J1 (numerical)\")\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n #ax = plot_s(ax,self.XX,self.YY,self.J1)\n ax = plot_s(ax,self.J1)\n\n elif option == 'j2':\n ax = fig.gca(projection='3d')\n ax.set_title(\"J2 (numerical)\")\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n ax = plot_s(ax,self.J2)\n\n elif option == 'h1_approx2':\n ax = fig.gca(projection='3d')\n ax.set_title(\"H1 (approx v2)\")\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n\n z = np.sin(self.XX)*.2+self.h1_approx(self.XX,self.YY,sig=4.)\n ax = plot_s(ax,z\/2.)\n\n elif option == 'evans':\n M_re = 300\n M_im = 300\n N = 200\n\n lam_re = np.linspace(-.25,1.,M_re)\n lam_im = np.linspace(-.01,2,M_im)\n sint = np.linspace(0,N\/10,N)\n\n #LAM_re, LAM_im, SINT = np.meshgrid(lam_re,lam_im,sint,dtype=np.complex)\n LAM_re, LAM_im, SINT = np.meshgrid(lam_re,lam_im,sint)\n\n LAM_re_contour, LAM_im_contour = np.meshgrid(lam_re,lam_im)\n \n e_re,e_im = self.evans_v2(LAM_re,LAM_im,SINT,\n return_intermediates=False,g=4.,b=.4)\n\n\n ax = fig.add_subplot(111)\n\n #e_re = np.cos(2*LAM_re_contour*pi)*np.sin(LAM_im_contour*pi)\n #e_im = np.sin(2*LAM_re_contour*pi)*np.cos(LAM_re_contour*pi)\n\n cs_re = ax.contour(LAM_re_contour,LAM_im_contour,e_re,levels=[0.])\n cs_im = ax.contour(LAM_re_contour,LAM_im_contour,e_im,levels=[0.])\n\n p = cs_re.collections[0].get_paths()[0]\n v = p.vertices\n x = v[:,0]\n y = v[:,1]\n\n cs_re.collections[0].set_color('black')\n cs_re.collections[0].set_label('re')\n cs_re.collections[0].set_linewidths(2)\n\n cs_im.collections[0].set_color('gray')\n cs_im.collections[0].set_label('im')\n cs_im.collections[0].set_linewidths(2)\n\n ax.legend()\n\n\n\n # plot real and imag parts\n \n elif option == 'phase_time':\n ax = fig.add_subplot(111)\n ax.set_title(\"phase over time\")\n ax.set_xlabel('t')\n ax.set_ylabel(r\"$\\theta$\")\n ax.plot(self.dde_t,np.mod(self.th1_ph+pi,2*pi)-pi)\n ax.plot(self.dde_t,np.mod(self.th2_ph+pi,2*pi)-pi)\n\n elif option == 'phase_space':\n ax = fig.add_subplot(111)\n ax.set_title(\"phase in space\")\n ax.set_xlabel(r\"$x$\")\n ax.set_ylabel(r\"$y$\")\n #ax.set_xlim(-pi,pi)\n #ax.set_ylim(-pi,pi)\n #ax.plot(np.mod(self.th1_ph+pi,2*pi)-pi,np.mod(self.th2_ph+pi,2*pi)-pi)\n \n #ax.set_xlim(-pi,pi)\n #ax.set_ylim(-pi,pi)\n ax.plot(np.mod(self.th1_ph+pi,2*pi)-pi,np.mod(self.th2_ph+pi,2*pi)-pi)\n\n elif option == 'h1_fourier':\n ax = fig.gca(projection='3d')\n ax.set_title(\"H1 (Fourier)\")\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n #ax = plot_s(ax,self.XX,self.YY,self.J1)\n ax = plot_s(ax,f2d.H1_fourier(self.XX,self.YY))\n\n elif option == 'h2_fourier':\n ax = fig.gca(projection='3d')\n ax.set_title(\"H2 (Fourier)\")\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n #ax = plot_s(ax,self.XX,self.YY,self.J1)\n ax = plot_s(ax,f2d.H2_fourier(self.XX,self.YY))\n\n\n elif option == 'h1_fourier_dx':\n ax = fig.gca(projection='3d')\n ax.set_title(\"dH1dx (Fourier)\")\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n #ax = plot_s(ax,self.XX,self.YY,self.J1)\n dh1x,hd1y = f2d.H1_fourier(self.XX,self.YY,d=True)\n ax = plot_s(ax,dh1x)\n\n elif option == 'h1_fourier_dy':\n ax = fig.gca(projection='3d')\n ax.set_title(\"dH1dy (Fourier)\")\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n #ax = plot_s(ax,self.XX,self.YY,self.J1)\n dh1x,hd1y = f2d.H1_fourier(self.XX,self.YY,d=True)\n ax = plot_s(ax,dh1y)\n\n \n elif option == 'h1_1d':\n ax = fig.add_subplot(111)\n ax.plot([-pi,pi],[0,0],color='black')\n\n ax.plot(self.X,-f2d.H1_fourier(self.X,self.X),label='-h1(x,x)',lw=3)\n ax.plot(self.X,-f2d.H1_fourier(-self.X,-self.X),label='-h1(-x,-x)',lw=3)\n ax.plot(self.X,-f2d.H1_fourier(self.X,-self.X),label='-h1(x,-x)',ls='--',lw=2)\n ax.plot(self.X,-f2d.H1_fourier(-self.X,self.X),label='-h1(-x,x)',ls='--',lw=2)\n\n ax.set_xlim(-pi,pi)\n \n handles, labels = ax.get_legend_handles_labels()\n ax.legend(handles,labels)\n #ax.plot(self.X,f2d.H1_fourier(self.X,0))\n\n elif option == 'nullclines':\n \n # get contours\n # http:\/\/stackoverflow.com\/questions\/18304722\/python-find-contour-lines-from-matplotlib-pyplot-contour\n \n ax = fig.add_subplot(111)\n \n h1_x0_idx_vals = np.where(np.diff(np.sign(-f2d.H1_fourier(self.X,self.X))))[0]\n h1_x0_vals = self.X[h1_x0_idx_vals]\n \n t = np.linspace(0,100,1000)\n \n print h1_x0_vals\n \n for x0 in h1_x0_vals:\n # run sim, get solution\n sol = odeint(H_i_contour,[x0,x0],t)\n ax.plot(sol[:,0],sol[:,1])\n ax.plot(self.X,-f2d.H1_fourier(self.X,self.X))\n ax.set_xlim(-pi,pi)\n ax.set_ylim(-pi,pi)\n\n \n\n\n elif option == 'h1_centered_d':\n h1_dx,h1_dy = f2d.H1_fourier_centered(self.XX,self.YY,d=True)\n\n ax1 = fig.add_subplot(121,projection='3d')\n ax1.set_title(\"H1 dx (Fourier, centered)\")\n ax1.set_xlabel(\"x\")\n ax1.set_ylabel(\"y\")\n ax1 = plot_s(ax1,h1_dx)\n\n ax2 = fig.add_subplot(122,projection='3d')\n ax2.set_title(\"H1 dy (Fourier, centered)\")\n ax2.set_xlabel(\"x\")\n ax2.set_ylabel(\"y\")\n ax2 = plot_s(ax2,h1_dy)\n\n dx_val,dy_val = f2d.H1_fourier(0,0,d=True)\n \n print 'h1_fourier dx value at (0,0) =',dx_val\n print 'h1_fourier dy value at (0,0) =',dy_val\n\n dx_val,dy_val = f2d.H1_fourier(pi,pi,d=True)\n\n print 'h1_fourier dx value at (pi,pi) =',dx_val\n print 'h1_fourier dy value at (pi,pi) =',dy_val\n \n\n elif option == 'h1_fourier_d':\n h1_dx,h1_dy = f2d.H1_fourier(self.XX,self.YY,d=True)\n\n ax1 = fig.add_subplot(121,projection='3d')\n ax1.set_title(\"H1 dx (Fourier)\")\n ax1.set_xlabel(\"x\")\n ax1.set_ylabel(\"y\")\n ax1 = plot_s(ax1,h1_dx)\n\n ax2 = fig.add_subplot(122,projection='3d')\n ax2.set_title(\"H1 dy (Fourier)\")\n ax2.set_xlabel(\"x\")\n ax2.set_ylabel(\"y\")\n ax2 = plot_s(ax2,h1_dy)\n\n dx_val,dy_val = f2d.H1_fourier(0,0,d=True)\n \n print 'h1_fourier dx value at (0,0) =',dx_val\n print 'h1_fourier dy value at (0,0) =',dy_val\n\n dx_val,dy_val = f2d.H1_fourier(pi,pi,d=True)\n\n print 'h1_fourier dx value at (pi,pi) =',dx_val\n print 'h1_fourier dy value at (pi,pi) =',dy_val\n\n\n\n\n elif option == 'h2_fourier_d':\n h2_dx,h2_dy = f2d.H2_fourier(self.XX,self.YY,d=True)\n\n ax1 = fig.add_subplot(121,projection='3d')\n ax1.set_title(\"H2 dx (Fourier)\")\n ax1.set_xlabel(\"x\")\n ax1.set_ylabel(\"y\")\n ax1 = plot_s(ax1,h2_dx)\n\n ax2 = fig.add_subplot(122,projection='3d')\n ax2.set_title(\"H2 dy (Fourier)\")\n ax2.set_xlabel(\"x\")\n ax2.set_ylabel(\"y\")\n ax2 = plot_s(ax2,h2_dy)\n\n dx_val,dy_val = f2d.H2_fourier(0,0,d=True)\n \n print 'h2_fourier dx value at (0,0) =',dx_val\n print 'h2_fourier dy value at (0,0) =',dy_val\n\n dx_val,dy_val = f2d.H1_fourier(pi,pi,d=True)\n\n print 'h2_fourier dx value at (pi,pi) =',dx_val\n print 'h2_fourier dy value at (pi,pi) =',dy_val\n\n\n\n elif option == 'h2_fourier_dy':\n ax = fig.gca(projection='3d')\n ax.set_title(\"H1 dx (Fourier)\")\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n #ax = plot_s(ax,self.XX,self.YY,self.J1)\n ax = plot_s(ax,f2d.H2_fourier(self.XX,self.YY,d=True))\n print 'h1_fourier dy value at (0,0) =',f2d.H2_fourier(0,0,d=True)\n print 'h1_fourier dy value at (pi,pi) =',f2d.H2_fourier(pi,pi,d=True)\n\n elif option == 'h1_approx':\n ax = fig.gca(projection='3d')\n ax.set_title(\"H1 approx_p. phase_option=\"+str(self.phase_option))\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n #ax = plot_s(ax,self.XX,self.YY,self.J1)\n ax = plot_s(ax,self.h1_approx(self.XX,self.YY))\n\n elif option == 'h2_approx':\n ax = fig.gca(projection='3d')\n ax.set_title(\"H2 approx_p. phase_option=\"+str(self.phase_option))\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n #ax = plot_s(ax,self.XX,self.YY,self.J1)\n ax = plot_s(ax,self.h2_approx_p(self.XX,self.YY))\n\n \n elif option == 'test':\n ax = fig.gca(projection='3d')\n ax.set_title(\"abs(x)\")\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n ax = plot_s(ax,np.abs(self.XX))\n else:\n print 'option, ', option, ' not found.'\n\n return fig\n\ndef H_i_contour(z,t,i=1):\n x = z[0]\n y = z[1]\n \n # get derivatives of fourier\n if i == 1:\n h1x,h1y = f2d.H1_fourier(x,y,d=True)\n return -h1y,h1x\n elif i == 2:\n h2x,h2y = f2d.H2_fourier(x,y,d=True)\n return -h2y,h2x\n\n\n\n#def main(screen):\ndef main(argv):\n\n # process terminal flags\n try:\n opts, args = getopt.getopt(argv, \"lvserhpf\", [\"use-last\",\"save-last\",\"use-ss\",\"save-ss\",\"use-random\",\"help\",\"run-phase\",\"run-full\"])\n\n except getopt.GetoptError:\n usage()\n sys.exit(2)\n\n use_last=False;save_last=False;use_ss=False;save_ss=False;use_random=False\n run_full=False;run_phase=False\n\n if opts == []:\n print \"Please run using flags -p (phase model) and\/or -f (full model)\"\n usage()\n sys.exit(2)\n\n for opt, arg in opts:\n if opt in (\"-h\", \"--help\"):\n\n usage()\n sys.exit()\n else:\n if opt in (\"-l\",\"--use-last\"):\n use_last = True\n print \"use_last=True\"\n elif opt in ('-v','--save-last'):\n save_last = True\n print \"save_last=True\"\n elif opt in ('-s','--use-ss'):\n use_ss = True\n print \"use_ss=True\"\n elif opt in ('-e','save-ss'):\n save_ss = True\n print \"save_ss=True\"\n elif opt in ('-r','use-random'):\n use_random = True\n print \"use_random=True\"\n elif opt in ('-p','run-phase'):\n run_phase = True\n print \"run class phase=True\"\n elif opt in ('-f','run-full'):\n run_full = True\n print \"run class theta (full sim)=True\"\n\n \"\"\"\n ktest = Kernel(recompute_kernel=False,kernel_type='diff_gauss')\n ktest.plot()\n plt.show()\n\n u0b_test = SteadyState(recompute_ss=False)\n u0b_test.plot(\"u0b\")\n u0b_test.plot(\"u0b_grad_x\")\n u0b_test.plot(\"u0b_grad_y\")\n\n plt.show()\n #phase = Phase(recompute_h=False,phase_option='approx2')\n \"\"\"\n\n if run_phase:\n phase = Phase(x0=-2,x1=2.,y0=0,y1=0.0,\n init_mode='cartesian',\n q=0.,g=.989006,\n dde_dt=.05,\n dde_T=500,\n phase_option='full',\n recompute_h=False,recompute_j=False,\n recompute_fq=False,recompute_phase_lc=False,\n compute_h_error=False,\n save_last=save_last,\n use_last=use_last,\n )\n \n #phase.plot(\"h1_fourier\")\n #phase.plot(\"h2_fourier\")\n\n #phase.plot('h1')\n #phase.plot('h2')\n\n #phase.plot('h1_approx2')\n \n #phase.plot(\"h1_fourier_d\")\n #phase.plot(\"h1_fourier_centered_d\")\n #phase.plot(\"h2_fourier_d\")\n \n #phase.plot(\"nullclines\")\n \n #phase.plot(\"h1_approx\")\n #phase.plot(\"h2_approx\")\n\n phase.plot(\"phase_time\")\n phase.plot(\"phase_space\")\n\n #phase.plot('evans')\n\n #phase.plot(\"h1_1d\")\n\n #phase.plot('h1_fourier_dx')\n #phase.plot('h2_fourier_dx')\n \n #phase.plot(\"j1\")\n #phase.plot(\"j2\")\n \n #hettest = Heterogeneity()\n #hettest.plot()\n\n\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_364","text":"# cmd> snakeviz hi2lo.profile\n\nimport numpy as np\nimport scipy\nfrom imcut import pycut\nimport cProfile\n\n# import io3d\n\n\ndef make_data(sz=32, offset=0, sigma=80):\n seeds = np.zeros([sz, sz, sz], dtype=np.int8)\n seeds[offset + 12, offset + 9 : offset + 14, offset + 10] = 1\n seeds[offset + 20, offset + 18 : offset + 21, offset + 12] = 1\n img = np.ones([sz, sz, sz])\n img = img - seeds\n\n seeds[\n offset + 3 : offset + 15, offset + 2 : offset + 6, offset + 27 : offset + 29\n ] = 2\n img = scipy.ndimage.morphology.distance_transform_edt(img)\n segm = img < 7\n img = (100 * segm + sigma * np.random.random(img.shape)).astype(np.uint8)\n return img, segm, seeds\n\n\nimg, seg, seeds = make_data(64, 20)\nsegparams = {\n # 'method':'graphcut',\n # \"method\": \"multiscale_graphcut\",\n # \"method\": \"hi2lo\",\n \"method\": \"lo2hi\",\n \"use_boundary_penalties\": False,\n \"boundary_dilatation_distance\": 2,\n \"boundary_penalties_weight\": 1,\n \"block_size\": 8,\n \"tile_zoom_constant\": 1,\n}\ngc = pycut.ImageGraphCut(img, segparams=segparams)\ngc.set_seeds(seeds)\ngc.run()\n# cProfile.run(\"gc.run()\")\n# import sed3\n# ed = sed3.sed3(gc.segmentation==0, contour=seg)\n# ed.show()\n\n# self.assertLess(\n# np.sum(\n# np.abs((gc.segmentation == 0).astype(np.int8) - seg.astype(np.int8))\n# ),\n# 600,\n# )\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_365","text":"scdsr\/NCTU-CS-assignmentsDIP_2019_Spring\/Hw3\/test.py1-10\nfrom __future__ import division\n\n### git@yash0307 ###\nimport tensorflow as tf\nfrom tensorflow import keras\n\nfrom skimage.segmentation import slic\nfrom skimage.segmentation import mark_boundaries\nfrom skimage.util import img_as_float\nfrom skimage import io, color\n\nfrom keras import applications\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras import optimizers\nfrom keras.models import Sequential, Model, load_model\nfrom keras.layers import Dropout, Flatten, Dense, merge, Activation, Conv1D, Input, MaxPooling1D, Convolution1D\nfrom keras.layers.pooling import GlobalAveragePooling1D\nfrom keras.callbacks import LearningRateScheduler\nfrom keras.callbacks import ModelCheckpoint\nimport keras.backend as K\n'''\nfrom tensorflow.python.keras.layers import Dropout, Flatten, Dense, Activation, Conv1D, Input, MaxPooling1D, Convolution1D\nfrom tensorflow.python.keras import activations\nfrom tensorflow.python.keras import backend as K\n'''\nimport numpy as np\nimport math\nimport json\nimport sys\nimport random\nimport scipy.io as sio\nimport os\nfrom PIL import Image\nimport glob\n\n# print('tensorflow Ver: ' + tf.VERSION)\n# print('Keras Ver: ' +tf.keras.__version__)\n\ndef initialize_net(train_params):\n '''\n #tensorflow keras modification\n model = tf.keras.Sequential()\n \n model.add(Conv1D(nb_filter=5, \n filter_length=10, \n init='glorot_uniform', \n border_mode='same', \n input_shape=(train_params['max_size'], 3), \n bias=True))\n \n model.add(tf.keras.layers.Activation('tanh'))\n \n model.add(MaxPooling1D(pool_size=2))\n \n model.add(Conv1D(nb_filter=10,\n filter_length=20, \n init='glorot_uniform', \n border_mode='same', \n bias=True))\n \n model.add(tf.keras.layers.Activation('tanh'))\n \n model.add(MaxPooling1D(pool_size=2))\n \n model.add(Conv1D(nb_filter=20,\n filter_length=20,\n init='glorot_uniform',\n border_mode='same',\n bias=True))\n model.add(tf.keras.layers.Activation('tanh'))\n \n model.add(MaxPooling1D(pool_size=2))\n \n model.add(GlobalAveragePooling1D(input_shape=model.output_shape[1:]))\n \n model.add(Dense(input_dim=20, \n output_dim=2,\n init='glorot_uniform'))\n \n model.add(tf.keras.layers.Activation('tanh'))\n \n model.add(Dropout(0.3))\n \n model.add(Dense(input_dim=2, \n output_dim=2, \n init='glorot_uniform'))\n \n model.add(tf.keras.layers.Activation('softmax'))\n ''' \n model = Sequential() \n model.add(Conv1D(nb_filter=5, \n filter_length=10, \n init='glorot_uniform', \n border_mode='same', \n input_shape=(train_params['max_size'], 3), \n bias=True))\n \n model.add(Activation('tanh'))\n \n model.add(MaxPooling1D(pool_size=2))\n model.add(Conv1D(nb_filter=10,\n filter_length=20, \n init='glorot_uniform', \n border_mode='same', \n bias=True)) \n model.add(Activation('tanh'))\n model.add(MaxPooling1D(pool_size=2))\n \n model.add(Conv1D(nb_filter=20,\n filter_length=20,\n init='glorot_uniform',\n border_mode='same',\n bias=True))\n model.add(Activation('tanh'))\n model.add(MaxPooling1D(pool_size=2))\n model.add(GlobalAveragePooling1D(input_shape=model.output_shape[1:]))\n \n model.add(Dense(input_dim=20, \n output_dim=2,\n init='glorot_uniform'))\n model.add(Activation('tanh'))\n model.add(Dropout(0.3))\n \n model.add(Dense(input_dim=2, \n output_dim=2, \n init='glorot_uniform'))\n model.add(Activation('softmax'))\n\n return model\n\ndef initialize_params(train_data, data):\n print('neg_samples', len(data[0]))\n print('pos_samples', len(data[1])) \n train_params = {'batch_size':256, \n 'max_size':256, \n 'base_lr':0.001, \n 'decay_steps':5,\n 'decay_factor':0.5, \n 'num_epochs':15, \n 'neg_samples':len(data[0]), \n 'pos_samples':len(data[1]), \n 'total_samples':len(data[0])+len(data[1]), \n 'checkpoint':1}\n\n return train_params\n\ndef get_train_data(train_data, train_labels):\n data = {1:[], 0:[]}\n num_images = train_data.shape[1]\n for i in range(0, num_images):\n given_image_sp = train_data[0][i]\n # print('given_image_sp',len(given_image_sp))\n given_image_lb = train_labels[i][0]\n # print('given_image_lb',len(given_image_lb))\n num_sp = given_image_lb.shape[1]\n for j in range(0, num_sp):\n given_label = given_image_lb[0][j]\n given_rep = np.asarray(given_image_sp[j][:], dtype='float')\n #print('given_rep: ',given_rep)\n if given_label == 0:\n #given_rep = np.asarray(given_image_sp[j][:], dtype='float')\n #print('given_label == 0')\n data[0].append(given_rep)\n elif given_label == 1:\n #given_rep = np.asarray(given_image_sp[j][:], dtype='float')\n #print('given_label == 1')\n data[1].append(given_rep)\n else:\n print('SOMETHING IS WRONG !')\n return data\n\ndef load_data(data, train_params):\n data_frac = 0.5\n X_temp = np.zeros((train_params['batch_size'], train_params['max_size'], 3))\n Y_temp = np.zeros((train_params['batch_size'], 2))\n #print('population: ', range(0,train_params['pos_samples']))\n #print('sample: ',int(train_params['batch_size']*data_frac+2))\n idx = random.sample(range(0,train_params['pos_samples']), int(train_params['batch_size']*data_frac+2))\n for i in range(0, int(train_params['batch_size']*data_frac)):\n Y_temp[i][1] = float(1)\n sam = data[1][idx[i]]\n sam_len = sam.shape[0]\n X_temp[i, :sam_len, :] = np.true_divide(sam, sam.max())\n idx = random.sample(range(0, train_params['neg_samples']), int(train_params['batch_size']-(train_params['batch_size']*data_frac)+2))\n for i in range(int(train_params['batch_size']*data_frac), train_params['batch_size']):\n Y_temp[i][0] = float(1)\n sam = data[0][idx[i-int(train_params['batch_size']*data_frac)]]\n sam_len = sam.shape[0]\n X_temp[i, :sam_len, :] = np.true_divide(sam, sam.max())\n X = np.zeros((train_params['batch_size'], train_params['max_size'], 3))\n Y = np.zeros((train_params['batch_size'], 2))\n perm_idx = np.random.permutation(train_params['batch_size'])\n for i in range(0, train_params['batch_size']):\n X[i,:,:] = X_temp[perm_idx[i],:,:]\n Y[i,:] = Y_temp[perm_idx[i],:]\n return (X,Y)\n print('----write result, read mats')\n f_out = open(resultFile,'w')\n train_data = sio.loadmat(all_Q_mat)['all_Q']\n train_labels = sio.loadmat(superpixel_label_mat)['all_superpixel_labels']\n print('----get_train_data') \n data = get_train_data(train_data, train_labels)\n print(len(data))\n print('----initialize_params')\n train_params = initialize_params(train_data, data)\n print('----initialize_net')\n model = initialize_net(train_params)\n model.summary()\n print('----model compile')\n model.compile(loss='categorical_crossentropy',\n optimizer=optimizers.Adam(lr=train_params['base_lr']),\n metrics=['accuracy'])\n print('----ImageDataGenerator')\n train_datagen = ImageDataGenerator(\n featurewise_center=True,\n featurewise_std_normalization=True)\n\n for epoch in range(0, train_params['num_epochs']):\n num_iterations = int(train_params['total_samples']\/train_params['batch_size']) + 1\n for iteration in range(0, num_iterations):\n print ('Epoch : ' + str(epoch) + ' | Iteration : ' + str(iteration))\n given_data = load_data(data, train_params)\n X = given_data[0]\n Y = given_data[1]\n model.fit(X,Y,\n epochs=1,\n verbose=1)\n if epoch%train_params['decay_steps'] == 0 and epoch != 0:\n print (' Changing learning rate ... ')\n lr = K.get_value(model.optimizer.lr)\n K.set_value(model.optimizer.lr, lr*train_params['decay_factor'])\n print(\"lr changed to {}\".format(lr*train_params['decay_factor']))\n if epoch%train_params['checkpoint'] == 0 and epoch != 0:\n print (' Saving model ... ')\n model_name = 'model_' + str(epoch) + '.h5'\n model.save(model_name)\n if epoch%1 == 0:\n acu_pos = 0\n acu_neg = 0\n acu = 0\n for i in range(0, int(train_params['pos_samples']\/train_params['batch_size'])):\n X = np.zeros((train_params['batch_size'], train_params['max_size'], 3))\n Y = np.zeros((train_params['batch_size'], 2))\n for j in range(0, train_params['batch_size']):\n sam = data[1][i*train_params['batch_size'] + j]\n sam_len = sam.shape[0]\n X[j, :sam_len, :] = np.true_divide(sam, sam.max())\n Y[j][1] = float(1)\n pred = model.evaluate(X,Y, \n batch_size=train_params['batch_size'])\n print(pred)\n acu_pos = acu_pos + pred[1]\n acu = acu + pred[1]\n for i in range(0, int(train_params['neg_samples']\/train_params['batch_size'])):\n X = np.zeros((train_params['batch_size'], train_params['max_size'], 3))\n Y = np.zeros((train_params['batch_size'], 2))\n for j in range(0, train_params['batch_size']):\n sam = data[0][i*train_params['batch_size'] + j]\n sam_len = sam.shape[0]\n X[j, :sam_len, :] = np.true_divide(sam, sam.max())\n Y[j][0] = float(1)\n pred = model.evaluate(X,Y, \n batch_size=train_params['batch_size'])\n print(pred)\n acu_neg = acu_neg + pred[1]\n acu = acu + pred[1]\n acu_pos = float(acu_pos)\/float(int(train_params['pos_samples']\/train_params['batch_size'])) \n acu_neg = float(acu_neg)\/float(int(train_params['neg_samples']\/train_params['batch_size']))\n acu = float(acu)\/float(int(train_params['pos_samples']\/train_params['batch_size']) + int(train_params['neg_samples']\/train_params['batch_size']))\n f_out.write('acu_pos: ' + str(acu_pos)+', acu_neg: '+str(acu_neg)+', acu:'+str(acu)+'\\n')\n\n'''\ndef label2idx(img_mat,N):\n print('------label 2 index processing------') \n #labelset\n idx_1d= np.arange(1,img_mat.size+1,1)\n #ind_eleMap=np.reshape(idx_1d, (img_mat.shape))\n #print(idx_1d)\n #ind_eleMap=np.zeros_like(img_mat)\n ind_eleMap=np.reshape(idx_1d,(img_mat.shape))\n #print(ind_eleMap)\n for pix in range(1,N):\n #np.where(a < 5, a, 10*a)\n bin_elementMap=np.where(img_mat==pix,1,0)\n remain_ele=np.trim_zeros(ind_eleMap*bin_elementMap)\n np.concatenate(labelset, remain_ele,axis=0)\n #print(vect)\n #print(bin_elementMap)\n'''\ndef gaussian_weight(X,mu,sigma):\n return math.exp((-1\/(2*sigma*sigma))*((X-mu)*(X-mu)));\n\ndef slicSeg(srcFiles_img,srcFiles_labels,img_mat,lbl_mat):\n #suerpixel extraction\n # Define hyperparameters\n dist_sigma = 10; #sigma for gaussian distance weight in Part 1\n gauss_weight = 1;\n numSegments=200\n \n # srcFiles_img = dir('G:\\SPFExtarct\\MSRA10K_Imgs_GT\\MSRA10K_Imgs_GT\\Imgs\\*.jpg');\n # srcFiles_labels = dir('G:\\SPFExtarct\\MSRA10K_Imgs_GT\\MSRA10K_Imgs_GT\\Imgs\\*.png');\n # may add some normalization to distance weight\n #path, dirs, files = next(os.walk(srcFiles_img+'\/*.jpg'))\n #file_count = len(files)\n dir_img=glob.glob(srcFiles_img)\n dir_lbl=glob.glob(srcFiles_labels)\n filenum=3\n # filenum=len(dir_img)\n all_Q={}\n all_superpixel_labels={}\n #for img in glob.glob(srcFiles_img):\n for a in range(0,filenum):\n sp_tx = '--superpixel segmentation for image: %s--' % (a+1)\n print(sp_tx)\n # read image\n #print(dir_img[i])\n #print(dir_lbl[i])\n print(a)\n path_img=dir_img[a]\n path_lbl=dir_lbl[a]\n im_image = io.imread(path_img)\n #print(im_image)\n im_label = io.imread(path_lbl)\n #print(im_label)\n #[L,N] = superpixels(im_lab,200,'IsInputLab',1);\n # L= label numbers, N = superpixel numbers \n # im_lab = rgb2lab(im);\n L = slic(img_as_float(im_image), n_segments = numSegments) #include the lab convert\n L=L+1 # start from 1\n N=np.amax(L)\n print('superpixel segment number: ', N)\n\n # Vectorize superpixels in R and make mean color vector for each r\n print('----mean color calculation----')\n #im_size = io.imread(path_img).size;\n C = np.zeros((N,3));\n #r_val=im_image[:,:,0]\n #g_val=im_image[:,:,1]\n #b_val=im_image[:,:,2]\n for i in range(1,N):\n #print(np.where(L==i,1,0))\n #r_val=im_image(:,:,0)\n #g_val=im_image(:,:,1)\n #b_valim_image(:,:,2)\n red_spi_value=np.mean(im_image[(np.where(L==i,1,0)==1),0])\n green_spi_value=np.mean(im_image[(np.where(L==i,1,0)==1),1])\n blue_spi_value=np.mean(im_image[(np.where(L==i,1,0)==1),2])\n #r_val_txt= 'sp:%s, r_val: %s'% (i,red_spi_value)\n #print(r_val_txt)\n \n C[i,:]=[red_spi_value, green_spi_value, blue_spi_value];\n #np.append(C,[red_spi_value, green_spi_value, blue_spi_value], axis=0)\n #print(C)\n print('----mean color calculation: done!----')\n \n # Find the superpixel center for each region r\n print('----center position calculation----')\n #P = np.zeros((N,1));\n segments_ids = np.unique(L)\n #print('sp segments id: ', segments_ids)\n # centers\n #label_idx = label2idx(L,N);\n for i in range(1,N):\n centers = np.round(np.array([np.mean(np.nonzero(L==i),axis=1) for i in segments_ids]))\n #P(i,1) = round(mean(label_idx{i}));\n #print(centers)\n print('----center position calculation: done!----')\n \n # Make contrast separation vector Q by comparing each superpixel\n print('----mat obtaining----')\n Q_color = np.zeros((N,N,3))\n Q = np.zeros((N,N,3))\n dist = np.zeros((N,N))\n \n for i in range(1,N):\n for j in range(1,N):\n p_i=centers[i]\n p_j=centers[j]\n #dist(i,j) = norm(p_i - p_j);\n dist[i,j] = np.linalg.norm(p_i - p_j);\n #dist_txt='i: %s, j: %s, Euc distance: %s'% (p_i,p_j,dist[i,j])\n #print(dist_txt)\n #print('----distance of inter-superpixel: finished----')\n #count of unit number in each superpixel\n t_j = np.sum((L==j).astype(int)) #np.sum([np.nonzero(L==j)]) #numel(label_idx{j});\n dist_weight = gaussian_weight(dist[i,j],0,dist_sigma);\n #print(t_j)\n \n Q[i,j,0] = t_j*abs(C[i,0]-C[j,0])*gauss_weight*dist_weight;\n Q[i,j,1] = t_j*abs(C[i,1]-C[j,1])*gauss_weight*dist_weight;\n Q[i,j,2] = t_j*abs(C[i,2]-C[j,2])*gauss_weight*dist_weight;\n #print('----Q weighted by distance: finished----')\n \n #print(dist[i,:])\n #print(np.argsort(dist[i,:],axis=0))\n #[~,I] = sort(dist(i,:)];\n I=np.argsort(dist[i,:],axis=0)\n Q_color[i,:,:] = Q[i,I,:]\n #print('----Q_color weighted by distance: finished----') \n #all_Q(1,a) = {Q_color};\n #print(Q_color)\n all_Q = dict(zip([1,a], Q_color)) #{Q_color}\n print('------all_Q obtaining: done!------')\n \n \n #label\n superpixel_label = np.zeros((1,N))\n im_bw=im_label #binary\n for j in range(1,N): #1:size(label_idx,1)\n #label_idx_j = label_idx{j};\n label_region = L==j\n if ( np.count_nonzero(label_region)>np.count_nonzero(~label_region) ):\n superpixel_label[1,j]= 1;\n \n all_superpixel_labels = dict(zip([a,1], superpixel_label)) #transpose\n print('------all_superpixel_labels obtaining: done!------')\n \n #save imagelists and segmentation labels \n #save('all_Q.mat','all_Q');\n \n sio.savemat(img_mat,{'all_Q':all_Q});\n print('--save mat: All_Q.mat done!--')\n \n sio.savemat(lbl_mat,{'all_superpixel_labels':all_superpixel_labels});\n print('--save mat: all_superpixel_labels.mat done!--')\n \ndef testing_eval(testingResult,testingQmat,testingLabelmat):\n f_out = open(testingResult,'w')\n train_data = sio.loadmat(testingQmat)['all_Q']\n train_labels = sio.loadmat(testingLabelmat)['all_superpixel_labels']\n data = get_train_data(train_data, train_labels)\n train_params = initialize_params(train_data, data)\n\n model = load_model('model_4.h5')\n num_images = train_data.shape[1]\n actual_images=0;\n avg_acu = 0\n out_mat = np.zeros((num_images, train_params['max_size']))\n try:\n for i in range(0, num_images):\n given_image_sp = train_data[0][i]\n given_image_lb = train_labels[i][0]\n num_sp = given_image_lb.shape[1]\n acu = 0\n for j in range(0, num_sp):\n given_label = given_image_lb[0][j]\n X = np.zeros((1,train_params['max_size'], 3))\n if given_label == 0:\n given_rep = np.asarray(given_image_sp[j][:], dtype='float')\n sam_len = given_rep.shape[0]\n #X[0,:sam_len, :] = np.true_divide(given_rep, given_rep.max())\n if (given_rep.max()==0):\n X[0,:sam_len, :] = given_rep\n else:\n X[0,:sam_len, :] = np.true_divide(given_rep, given_rep.max()) \n pred = model.predict(X)\n pred_idx = np.where(pred == pred.max())[1][0]\n #if (pred.max() < 0.60) and (pred_idx == 1): # constraint\n # pred_idx = 0\n out_mat[i][j] = pred_idx\n if pred_idx == given_label:\n acu += 1\n else:\n pass\n elif given_label == 1:\n given_rep = np.asarray(given_image_sp[j][:], dtype='float')\n sam_len = given_rep.shape[0]\n #X[0,:sam_len, :] = np.true_divide(given_rep, given_rep.max())\n if (given_rep.max()==0):\n X[0,:sam_len, :] = given_rep\n else:\n X[0,:sam_len, :] = np.true_divide(given_rep, given_rep.max())\n pred = model.predict(X)\n pred_idx = np.where(pred == pred.max())[1][0]\n out_mat[i][j] = pred_idx\n if pred_idx == given_label:\n acu += 1\n else:\n pass\n else:\n print('SOMETHING IS WRONG !')\n sys.exit(1)\n acu = float(acu)\/float(num_sp)\n print('Given Image Acu: ' + str(acu))\n avg_acu = avg_acu + acu\n actual_images +=1\n f_out.write('acu:'+str(acu)+'\\n')\n except IndexError:\n avg_acu = float(avg_acu)\/float(actual_images)\n print('(Index Except) Over Acu: ' + str(avg_acu))\n #sio.savemat('.\/Test_predict.mat', mdict={'predict_mat':pred}) \n sio.savemat('.\/Test_out.mat', mdict={'out_mat':out_mat})\n f_out.write('(Index Except) Over Acu:'+str(avg_acu)+'\\n')\n else: \n avg_acu = float(avg_acu)\/float(num_images)\n print('Over Acu: ' + str(avg_acu))\n #sio.savemat('.\/Test_predict.mat', mdict={'predict_mat':pred}) \n sio.savemat('.\/Test_out.mat', mdict={'out_mat':out_mat})\n f_out.write('Over Acu:'+str(avg_acu)+'\\n')\n\nif __name__ == '__main__':\n \n #SLIC mat information\n '''\n train_image='..\/MSRA10K_Imgs_GT\/Imgs\/*.jpg'\n train_mask='..\/MSRA10K_Imgs_GT\/Imgs\/*.png'\n test_image='..\/testing\/*.jpg'\n test_mask='..\/testing\/*.png'\n slicSeg(train_image,train_mask,train_image_mat,train_mask_mat)\n slicSeg(test_image,test_mask,test_image_mat,test_mask_mat)\n '''\n # make_superpixel_labels()\n #input data directory path training\n train_image_mat='..\/train\/all_Q.mat'\n train_mask_mat='..\/train\/all_superpixel_labels.mat'\n #testing\n test_image_mat='..\/test\/all_Q.mat'\n test_mask_mat='..\/test\/all_superpixel_labels.mat'\n\n # training\n #readSLICandMDLInit('train_result.txt',train_image_mat,train_mask_mat)\n #testing\n testing_eval('test_results.txt',test_image_mat,test_mask_mat)\n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_366","text":"src\/metrics.py\nimport numpy as np\nimport pandas as pd\n\ndef annualizedRet(r, num_periods):\n '''\n\n @param r: series, returns\n @param num_periods: scalar, number of periods in return series\n @return: scalar, annualized return\n '''\n\n comp = (1 + r).prod()\n nPer = r.shape[0]\n return comp ** (num_periods \/ nPer) - 1\n\ndef annualizedVol(r, num_periods, downside = False):\n '''\n\n @param r: series, returns\n @param num_periods: scalar, number of periods in return series\n @param downside: bool, for downside std\n @return: scalar, annualized volatility\n '''\n\n if downside:\n semistd = r[r < 0].std()\n return semistd * (num_periods ** 0.5)\n else:\n return r.std() * (num_periods ** 0.5)\n\n\ndef drawdown(r: pd.Series):\n '''\n\n @param r: series, returns\n @return: dictionary: 'hwm':high watermark, 'drawdowns': drawdown periods\n '''\n\n index = 1000 * (1 + r).cumprod()\n highwatermark = index.cummax()\n drawdowns = (index - highwatermark) \/ highwatermark\n return pd.DataFrame(dict(hwm = highwatermark,\n drawdowns=drawdowns))\n\ndef skewness(r):\n '''\n\n @param r: series, returns\n @return: scalar, third moment\n '''\n centerMoment = r - r.mean()\n sigR = r.std(ddof=0)\n exp = (centerMoment ** 3).mean()\n return exp \/ sigR ** 3\n\ndef kurtosis(r):\n '''\n\n @param r: series, returns\n @return: scalar, fourth moment\n '''\n\n centerMoment = r - r.mean()\n sigR = r.std(ddof=0)\n exp = (centerMoment ** 4).mean()\n return exp \/ sigR ** 4\n\ndef varGaussian(r, level=5, modified=False):\n '''\n\n @param r: series, returns\n @param level: scalar, significance level\n @param modified: bool, taylor expansion and approximation of the VAR\n @return: scalar, percentage of portfolio Value at Risk\n '''\n from scipy.stats import norm\n z = norm.ppf(level \/ 100)\n\n if modified is True:\n s = skewness(r)\n k = kurtosis(r)\n z = (z +\n (z ** 2 - 1) * s \/ 6 +\n (z ** 3 - 3 * z) * (k - 3) \/ 24 -\n (2 * z ** 3 - 5 * z) * (s ** 2) \/ 36\n )\n return - (r.mean() + z * r.std(ddof=0))\n\ndef sharpeRatio(r, rf, num_periods):\n '''\n\n @param r: series, returns\n @param rf: scalar or series, of risk-free rate proxy\n @param num_periods: scalar, number of periods\n @return: scalar, risk-adjusted return\n '''\n # convert the annual riskfree to per period\n rf = (1 + rf) ** (1 \/ num_periods) - 1\n excessRets = r - rf\n annExcessRets = annualizedRet(excessRets, num_periods)\n annVol = annualizedVol(r, num_periods)\n return annExcessRets \/ annVol\n\ndef sortinoRatio(r,rf, num_periods):\n '''\n\n @param r: series, returns\n @param rf: scalar or series, of risk-free rate proxy\n @param num_periods: scalar, number of periods\n @return: scalar, risk-adjusted return\n '''\n\n rf = (1 + rf) ** (1 \/ num_periods) - 1\n excessRets = r - rf\n annExcessRets = annualizedRet(excessRets, num_periods)\n anndownsideVol = annualizedVol(r, num_periods, downside=True)\n return annExcessRets \/ anndownsideVol\n\ndef summary_stats(r, riskFree=0, periodsInYear=252):\n '''\n\n @param r: series, return\n @param riskFree: scalar or series, of risk-free rate proxy\n @param num_periods: scalar, number of periods\n @param title: string, title of the returned df\n @return: DataFrame of summary statistics\n '''\n\n if not isinstance(r,pd.DataFrame):\n r = pd.DataFrame(r)\n\n annR = r.aggregate(annualizedRet, num_periods= periodsInYear)\n annVol = r.aggregate(annualizedVol, num_periods= periodsInYear)\n dd = r.aggregate(lambda r: drawdown(r).drawdowns.min())\n skew = r.aggregate(skewness)\n kurt = r.aggregate(kurtosis)\n modVar = r.aggregate(varGaussian, level=5, modified=True)\n sharpe = r.aggregate(sharpeRatio, rf=riskFree, num_periods = periodsInYear)\n sortino = r.aggregate(sortinoRatio, rf = riskFree, num_periods = periodsInYear)\n\n stats = pd.DataFrame({\n 'Annualized Returns': annR*100,\n 'Annualized Volatility': annVol*100,\n 'Sharpe Ratio': sharpe,\n 'Sortino Ratio': sortino,\n 'Max Drawdown': dd*100,\n 'Skewness': skew,\n 'Kurtosis': kurt,\n 'Cornish Fisher adj. VAR 5%': modVar*100,\n })\n\n #formatting\n stats['Annualized Returns'] = stats['Annualized Returns'].map('{:,.2f}%'.format)\n stats['Annualized Volatility'] = stats['Annualized Volatility'].map('{:,.2f}%'.format)\n stats['Sharpe Ratio'] = stats['Sharpe Ratio'].map('{:,.2f}'.format)\n stats['Sortino Ratio'] = stats['Sortino Ratio'].map('{:,.2f}'.format)\n stats['Max Drawdown'] = stats['Max Drawdown'].map('{:,.2f}%'.format)\n stats['Skewness'] = stats['Skewness'].map('{:,.2f}'.format)\n stats['Kurtosis'] = stats['Kurtosis'].map('{:,.2f}'.format)\n stats['Cornish Fisher adj. VAR 5%'] = stats['Cornish Fisher adj. VAR 5%'].map('{:,.2f}%'.format)\n\n return stats.T"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_367","text":"y-kunii\/face_classificationsrc\/video_emotion_color_demo_stereo.py\nfrom statistics import mode\n\nimport cv2\nfrom keras.models import load_model\nimport numpy as np\n\nfrom utils.datasets import get_labels\nfrom utils.inference import detect_faces\nfrom utils.inference import draw_text\nfrom utils.inference import draw_bounding_box\nfrom utils.inference import apply_offsets\nfrom utils.inference import load_detection_model\nfrom utils.preprocessor import preprocess_input\n\ndef draw_image(gray_image,rgb_image, faces):\n for face_coordinates in faces:\n \n x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)\n gray_face = gray_image[y1:y2, x1:x2]\n try:\n gray_face = cv2.resize(gray_face, (emotion_target_size))\n except:\n continue\n\n gray_face = preprocess_input(gray_face, True)\n gray_face = np.expand_dims(gray_face, 0)\n gray_face = np.expand_dims(gray_face, -1)\n emotion_prediction = emotion_classifier.predict(gray_face)\n emotion_probability = np.max(emotion_prediction)\n emotion_label_arg = np.argmax(emotion_prediction)\n emotion_text = emotion_labels[emotion_label_arg]\n emotion_window.append(emotion_text)\n\n if len(emotion_window) > frame_window:\n emotion_window.pop(0)\n try:\n emotion_mode = mode(emotion_window)\n except:\n continue\n\n if emotion_text == 'angry':\n color = emotion_probability * np.asarray((255, 0, 0))\n elif emotion_text == 'sad':\n color = emotion_probability * np.asarray((0, 0, 255))\n elif emotion_text == 'happy':\n color = emotion_probability * np.asarray((255, 255, 0))\n elif emotion_text == 'surprise':\n color = emotion_probability * np.asarray((0, 255, 255))\n else:\n color = emotion_probability * np.asarray((0, 255, 0))\n\n color = color.astype(int)\n color = color.tolist()\n\n draw_bounding_box(face_coordinates, rgb_image, color)\n draw_text(face_coordinates, rgb_image, emotion_mode,\n color, 0, -45, 1, 1)\n\n\n# parameters for loading data and images\ndetection_model_path = '..\/trained_models\/detection_models\/haarcascade_frontalface_default.xml'\nemotion_model_path = '..\/trained_models\/emotion_models\/fer2013_mini_XCEPTION.102-0.66.hdf5'\nemotion_labels = get_labels('fer2013')\n\n# hyper-parameters for bounding boxes shape\nframe_window = 10\nemotion_offsets = (20, 40)\n\n# loading models\nface_detection = load_detection_model(detection_model_path)\nemotion_classifier = load_model(emotion_model_path, compile=False)\n\n# getting input model shapes for inference\nemotion_target_size = emotion_classifier.input_shape[1:3]\n\n# starting lists for calculating modes\nemotion_window = []\n\n# starting video streaming\ncv2.namedWindow('window_frame')\nvideo_captureL = cv2.VideoCapture(0)\nvideo_captureR = cv2.VideoCapture(2)\nbgr_imageL = np.zeros((480,640,3), np.uint8)\nbgr_imageR = np.zeros((480,640,3), np.uint8)\nwhile True:\n bgr_imageL = video_captureL.read()[1]\n bgr_imageR = video_captureR.read()[1]\n gray_imageL = cv2.cvtColor(bgr_imageL, cv2.COLOR_BGR2GRAY)\n gray_imageR = cv2.cvtColor(bgr_imageR, cv2.COLOR_BGR2GRAY)\n rgb_imageL = cv2.cvtColor(bgr_imageL, cv2.COLOR_BGR2RGB)\n rgb_imageR = cv2.cvtColor(bgr_imageR, cv2.COLOR_BGR2RGB)\n facesL = detect_faces(face_detection, gray_imageL)\n facesR = detect_faces(face_detection, gray_imageR)\n\n draw_image(gray_imageL,rgb_imageL, facesL)\n draw_image(gray_imageR,rgb_imageR, facesR)\n\n bgr_imageL = cv2.cvtColor(rgb_imageL, cv2.COLOR_RGB2BGR)\n bgr_imageR = cv2.cvtColor(rgb_imageR, cv2.COLOR_RGB2BGR)\n stereoImg = cv2.hconcat([bgr_imageL, bgr_imageR])\n cv2.imshow('window_frame', stereoImg)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\nvideo_captureL.release()\nvideo_captureR.release()\ncv2.destroyAllWindows()\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_592","text":"import hashlib\nimport os\nimport pickle\nimport time\nimport csv\nimport gzip\nimport numpy\nimport pandas\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport scipy\nimport pprint\nimport sklearn.cluster\nimport mpl_toolkits.mplot3d\nimport matplotlib.patches\n\n\ndef get_matrix(filename):\n return pandas.DataFrame(scipy.io.mmread(filename).toarray())\n\n\ndef get_feature_ids(filename):\n return [row[0] for row in csv.reader(gzip.open(filename, mode=\"rt\"), delimiter=\"\\t\")]\n\n\ndef get_gene_name(filename):\n return [row[1] for row in csv.reader(gzip.open(filename, mode=\"rt\"), delimiter=\"\\t\")]\n\n\ndef get_feature_type(filename):\n return [row[2] for row in csv.reader(gzip.open(filename, mode=\"rt\"), delimiter=\"\\t\")]\n\n\ndef get_barcodes(filename):\n return [row[0] for row in csv.reader(gzip.open(filename, mode=\"rt\"), delimiter=\"\\t\")]\n\n\ndata = dict()\n\n\ndef get_all(ID):\n if ID in data:\n return data[ID]\n dirname = \"\/home\/jwlee\/Spermatogenesis\/result\/\" + ID + \"\/outs\/filtered_feature_bc_matrix\"\n matrix_dir = os.path.join(dirname, \"matrix.mtx.gz\")\n features_path = os.path.join(dirname, \"features.tsv.gz\")\n barcodes_path = os.path.join(dirname, \"barcodes.tsv.gz\")\n\n data[ID] = {\"matrix\": get_matrix(matrix_dir), \"feature_ids\": get_feature_ids(features_path), \"gene_name\": get_gene_name(features_path), \"feature_type\": get_feature_type(features_path), \"barcodes\": get_barcodes(barcodes_path)}\n\n return data[ID]\n\n\nnow = time.strftime(\"%m%d%H%M%S\")\nfigure_directory = \"\/home\/jwlee\/Spermatogenesis\/figures\/\"\nIDs = [\"NS_SW1\", \"NS_SW2\", \"NS_SW3\", \"NS_SW4\"]\n\n\ndef select_highly_variable_genes(raw_data, show=True, datum_point=95):\n a = raw_data.mean(axis=1).to_numpy()\n b = raw_data.var(axis=1).to_numpy()\n data = pandas.DataFrame.from_dict({\"means\": a, \"cvs\": numpy.divide(b, a)})\n\n data = data.loc[(data[\"cvs\"] > 0) & (data[\"means\"] > 0)]\n\n selected = data.loc[(data[\"cvs\"] >= numpy.percentile(data[\"cvs\"], datum_point)) & (data[\"means\"] >= numpy.percentile(data[\"means\"], datum_point))]\n unselected = data.loc[(data[\"cvs\"] < numpy.percentile(data[\"cvs\"], datum_point)) | (data[\"means\"] < numpy.percentile(data[\"means\"], datum_point))]\n\n raw_data = raw_data.iloc[selected.index]\n print(\"Gene & Cell:\", raw_data.shape)\n\n if show:\n mpl.use(\"Agg\")\n mpl.rcParams.update({\"font.size\": 30})\n\n plt.figure()\n plt.scatter(numpy.log(selected[\"means\"]), numpy.log(selected[\"cvs\"]), c=\"blue\", alpha=0.6, label=\"Selected\")\n plt.scatter(numpy.log(unselected[\"means\"]), numpy.log(unselected[\"cvs\"]), c=\"red\", alpha=0.6, label=\"Unselected\")\n\n plt.grid(True)\n plt.title(str(selected.shape[0]) + \" Genes: \" + str(100 - datum_point) + \"%\")\n plt.xlabel(\"log(means)\")\n plt.ylabel(\"log(CV)\")\n plt.legend()\n\n fig = plt.gcf()\n fig.set_size_inches(24, 18)\n fig.savefig(figure_directory + \"HighlyVariableGene_\" + now + \".png\")\n plt.close()\n\n return raw_data\n\n\ndef get_whole_data(genes=None):\n def make_md5(data):\n if data is None:\n return hashlib.md5(\"\".encode(\"utf-8\")).hexdigest()\n else:\n return hashlib.md5(str(sorted(genes)).encode(\"utf-8\")).hexdigest()\n\n if os.path.exists(make_md5(genes) + \".data\"):\n with open(make_md5(genes) + \".data\", \"rb\") as f:\n return pickle.load(f)\n\n if genes is not None and \"ref\" in genes:\n data = get_matrix(\"\/home\/jwlee\/Spermatogenesis\/result\/ref\/outs\/filtered_feature_bc_matrix\/matrix.mtx.gz\")\n print(data)\n\n data = sklearn.decomposition.PCA(random_state=0, n_components=data.shape[1]).fit_transform(numpy.swapaxes(data.values, 0, 1))\n print(\"PCA data:\", data)\n print(\"Cell & Gene-like:\", len(data), len(data[0]))\n\n data = numpy.swapaxes(sklearn.manifold.TSNE(n_components=2, random_state=0).fit_transform(data), 0, 1)\n\n projection = dict()\n projection[\"Barcode\"] = get_barcodes(\"\/home\/jwlee\/Spermatogenesis\/result\/ref\/outs\/filtered_feature_bc_matrix\/barcodes.tsv.gz\")\n projection[\"std_TSNE-1\"] = scipy.stats.zscore(data[0])\n projection[\"std_TSNE-2\"] = scipy.stats.zscore(data[1])\n\n with open(make_md5(genes) + \".data\", \"wb\") as f:\n pickle.dump(projection, f)\n\n return projection\n\n data = get_matrix(\"\/home\/jwlee\/Spermatogenesis\/result\/aggr\/outs\/filtered_feature_bc_matrix\/matrix.mtx.gz\")\n\n if genes is None:\n data = select_highly_variable_genes(data)\n else:\n data[\"gene\"] = get_gene_name(\"\/home\/jwlee\/Spermatogenesis\/result\/aggr\/outs\/filtered_feature_bc_matrix\/features.tsv.gz\")\n data = data[data[\"gene\"].isin(genes)]\n del data[\"gene\"]\n\n data = sklearn.decomposition.PCA(random_state=0, n_components=\"mle\").fit_transform(numpy.swapaxes(data.values, 0, 1))\n print(\"PCA data: \", data)\n print(\"Cell & Gene-like:\", len(data), len(data[0]))\n\n data = numpy.swapaxes(sklearn.manifold.TSNE(n_components=2, random_state=0).fit_transform(data), 1, 0)\n\n projection = dict()\n projection[\"Barcode\"] = numpy.array(get_barcodes(\"\/home\/jwlee\/Spermatogenesis\/result\/aggr\/outs\/filtered_feature_bc_matrix\/barcodes.tsv.gz\"))\n projection[\"std_TSNE-1\"] = scipy.stats.zscore(data[0])\n projection[\"std_TSNE-2\"] = scipy.stats.zscore(data[1])\n\n projection = pandas.DataFrame.from_dict(projection)\n\n with open(make_md5(genes) + \".data\", \"wb\") as f:\n pickle.dump(projection, f)\n\n return projection\n\n\ndef draw_all():\n mpl.use(\"Agg\")\n mpl.rcParams.update({\"font.size\": 30})\n\n projection = get_whole_data()\n\n plt.figure()\n plt.scatter(projection[\"std_TSNE-1\"], projection[\"std_TSNE-2\"], alpha=0.6)\n\n plt.grid(True)\n plt.title(\"Total\")\n plt.xlabel(\"Standardized TSNE-1\")\n plt.ylabel(\"Standardized TSNE-2\")\n\n fig = plt.gcf()\n fig.set_size_inches(24, 18)\n fig.savefig(figure_directory + \"total\" + \"_\" + now + \".png\")\n plt.close()\n\n\ndef get_real_barcodes(ID):\n projection = pandas.read_csv(\"\/home\/jwlee\/Spermatogenesis\/result\/\" + ID + \"\/outs\/analysis\/tsne\/2_components\/projection.csv\", header=0)\n\n return [barcode[:-1] + ID[-1] for barcode in projection[\"Barcode\"]]\n\n\ndef get_data_from_id(ID, genes=None):\n if ID == \"ref\":\n return get_whole_data(genes=[\"ref\"])\n\n projection = get_whole_data(genes)\n return projection[numpy.isin(projection[\"Barcode\"], get_real_barcodes(ID))]\n\n\ndef get_data_from_id_3d(ID, genes=None):\n projection = get_whole_data_3d(genes)\n return projection[numpy.isin(projection[\"Barcode\"], get_real_barcodes(ID))]\n\n\ndef draw_all_with_color():\n mpl.use(\"Agg\")\n mpl.rcParams.update({\"font.size\": 30})\n\n plt.figure()\n for ID in IDs:\n projection = get_data_from_id(ID)\n plt.scatter(projection[\"std_TSNE-1\"], projection[\"std_TSNE-2\"], alpha=0.6, label=ID)\n\n plt.grid(True)\n plt.title(\"Total\")\n plt.xlabel(\"Standardized TSNE-1\")\n plt.ylabel(\"Standardized TSNE-2\")\n plt.legend()\n\n fig = plt.gcf()\n fig.set_size_inches(24, 18)\n fig.savefig(figure_directory + \"total_\" + now + \".png\")\n plt.close()\n\n\ndef draw_tSNE(ID, genes=None):\n mpl.use(\"Agg\")\n mpl.rcParams.update({\"font.size\": 30})\n\n whole_projection = get_whole_data(genes)\n\n wanted = whole_projection[numpy.isin(whole_projection[\"Barcode\"], get_real_barcodes(ID))]\n unwanted = whole_projection[numpy.invert(numpy.isin(whole_projection[\"Barcode\"], get_real_barcodes(ID)))]\n\n plt.figure()\n plt.scatter(unwanted[\"std_TSNE-1\"], unwanted[\"std_TSNE-2\"], c=\"tab:gray\", alpha=0.6)\n plt.scatter(wanted[\"std_TSNE-1\"], wanted[\"std_TSNE-2\"], c=\"tab:blue\", alpha=1)\n\n plt.grid(True)\n plt.title(ID)\n plt.xlabel(\"Standardized TSNE-1\")\n plt.ylabel(\"Standardized TSNE-2\")\n\n fig = plt.gcf()\n fig.set_size_inches(24, 18)\n fig.savefig(figure_directory + ID + \"_\" + now + \".png\")\n plt.close()\n\n\ndef make_cluster_dict(cells):\n cells = cells.tolist()\n given = dict()\n for i in range(max(cells) + 1):\n given[i] = list(filter(lambda x: cells[x] == i, list(range(len(cells)))))\n return given\n\n\ndef clustering_Spectral_with_num(ID, num_groups):\n mpl.use(\"Agg\")\n mpl.rcParams.update({\"font.size\": 30})\n\n projection = get_data_from_id(ID)\n\n projection[\"group\"] = sklearn.cluster.SpectralClustering(n_clusters=num_groups, random_state=0, n_jobs=-1).fit_predict(projection[[\"std_TSNE-1\", \"std_TSNE-2\"]].values)\n\n group = make_cluster_dict(projection[\"group\"])\n data = [group[i] for i in group]\n cluster_centers = [numpy.mean([projection.loc[d, \"std_TSNE-1\"], projection.loc[d, \"std_TSNE-2\"]], axis=1) for d in data]\n\n plt.figure()\n plt.scatter(projection[\"std_TSNE-1\"], projection[\"std_TSNE-2\"], c=projection[\"group\"])\n plt.scatter([elem[0] for elem in cluster_centers], [elem[1] for elem in cluster_centers], c=\"k\", marker=\"X\")\n for i, loc in enumerate(cluster_centers):\n plt.text(loc[0] + 0.05, loc[1], str(i), fontsize=30, bbox=dict(color=\"white\", alpha=0.8))\n\n plt.grid(True)\n plt.title(\"Spectral: \" + str(num_groups))\n plt.xlabel(\"Standardized TSNE-1\")\n plt.ylabel(\"Standardized TSNE-2\")\n\n fig = plt.gcf()\n fig.set_size_inches(24, 18)\n fig.savefig(figure_directory + \"Spectral_\" + ID + \"_\" + str(num_groups) + \"_\" + now + \".png\")\n plt.close()\n\n return (group, cluster_centers)\n\n\ndef clustering_Kmeans_with_num(ID, num_groups):\n mpl.use(\"Agg\")\n mpl.rcParams.update({\"font.size\": 30})\n\n projection = get_data_from_id(ID)\n\n kmeans = sklearn.cluster.KMeans(n_clusters=num_groups, random_state=0, n_jobs=-1).fit(projection[[\"std_TSNE-1\", \"std_TSNE-2\"]].values)\n\n projection[\"group\"] = kmeans.fit_predict(projection[[\"std_TSNE-1\", \"std_TSNE-2\"]].values)\n\n plt.figure()\n plt.scatter(projection[\"std_TSNE-1\"], projection[\"std_TSNE-2\"], c=projection[\"group\"])\n plt.scatter([elem[0] for elem in kmeans.cluster_centers_], [elem[1] for elem in kmeans.cluster_centers_], c=\"k\", marker=\"X\", s=500)\n for i, loc in enumerate(kmeans.cluster_centers_):\n plt.text(loc[0] + 0.05, loc[1], str(i), fontsize=30, bbox=dict(color=\"white\", alpha=0.8))\n\n plt.grid(True)\n plt.title(\"KMeans: \" + str(num_groups))\n plt.xlabel(\"Standardized TSNE-1\")\n plt.ylabel(\"Standardized TSNE-2\")\n\n fig = plt.gcf()\n fig.set_size_inches(24, 18)\n fig.savefig(figure_directory + \"KMeans_\" + ID + \"_\" + str(num_groups) + \"_\" + now + \".png\")\n plt.close()\n\n return (make_cluster_dict(projection[\"group\"]), kmeans.cluster_centers_)\n\n\ndef clustering_Kmeans(ID, num=10):\n return [clustering_Kmeans_with_num(ID, i) for i in range(2, num + 1)]\n\n\ndef clustering_Kmeans_with_num_3d(ID, num_groups):\n def change_str(num):\n color = [\"#7e1e9c\", \"#15b01a\", \"#0343df\", \"#ff81c0\", \"#653700\", \"#e50000\", \"#95d0fc\", \"#029386\", \"#f97306\", \"#96f97b\", \"c20078\", \"#ffff14\", \"#75bbfd\", \"#929591\", \"#89fe05\", \"#bf77f6\", \"#9a0eea\", \"#033500\"]\n return color[num % len(color)]\n\n mpl.use(\"Agg\")\n mpl.rcParams.update({\"font.size\": 30})\n\n projection = get_data_from_id_3d(ID)\n\n kmeans = sklearn.cluster.KMeans(n_clusters=num_groups, random_state=0, n_jobs=-1).fit(projection[[\"std_TSNE-1\", \"std_TSNE-2\", \"std_TSNE-3\"]].values)\n\n projection[\"group\"] = kmeans.fit_predict(projection[[\"std_TSNE-1\", \"std_TSNE-2\", \"std_TSNE-3\"]].values)\n\n fig = plt.figure()\n ax = mpl_toolkits.mplot3d.Axes3D(fig, elev=45, azim=135)\n ax.scatter(projection[\"std_TSNE-1\"], projection[\"std_TSNE-2\"], projection[\"std_TSNE-3\"], c=projection[\"group\"])\n ax.scatter([elem[0] for elem in kmeans.cluster_centers_], [elem[1] for elem in kmeans.cluster_centers_], [elem[2] for elem in kmeans.cluster_centers_], c=\"k\", marker=\"X\", s=500)\n\n ax.set_title(\"KMeans: \" + str(num_groups))\n ax.set_xlabel(\"Standardized TSNE-1\")\n ax.set_ylabel(\"Standardized TSNE-2\")\n ax.set_zlabel(\"Standardized TSNE-3\")\n\n fig = plt.gcf()\n fig.set_size_inches(24, 18)\n fig.savefig(figure_directory + \"KMeans3D_\" + ID + \"_\" + str(num_groups) + \"_\" + now + \".png\")\n plt.close()\n\n if not os.path.exists(\"Kmeans_\" + ID + \"_\" + str(num_groups) + \".data\"):\n with open(\"KMeans_\" + ID + \"_\" + str(num_groups) + \".data\", \"w\") as f:\n f.write(\"x,y,z,c\\n\")\n for x, y, z, c in zip(projection[\"std_TSNE-1\"], projection[\"std_TSNE-2\"], projection[\"std_TSNE-3\"], list(map(lambda x: change_str(x), projection[\"group\"]))):\n f.write(str(x) + \",\" + str(y) + \",\" + str(z) + \",\" + c + \"\\n\")\n\n return (make_cluster_dict(projection[\"group\"]), kmeans.cluster_centers_)\n\n\ndef gene_in_cells(ID, cell_numbers=None):\n all_data = get_all(ID)\n all_data[\"matrix\"].index = all_data[\"gene_name\"]\n\n if cell_numbers is None:\n return all_data[\"matrix\"]\n\n data = all_data[\"matrix\"].copy()\n\n data.drop(all_data[\"matrix\"].columns[list(filter(lambda x: x not in cell_numbers, list(range(all_data[\"matrix\"].shape[1]))))], axis=1, inplace=True)\n\n return data\n\n\ndef gene_sum_in_cells(ID, cell_numbers=None, num_gene=None):\n data = gene_in_cells(ID, cell_numbers).sum(axis=1).sort_values(ascending=False)\n data = data[data > 0]\n\n return data if num_gene is None else data[:num_gene]\n\n\ndef gene_mean_in_cells(ID, cell_numbers=None, num_gene=100, text=True):\n data = gene_in_cells(ID, cell_numbers).mean(axis=1).sort_values(ascending=False)\n data = data[data > 0]\n\n return data if num_gene is None else data[:num_gene]\n\n\ndef check_valid_function(cluster_function):\n allowed_functions = [clustering_Kmeans_with_num, clustering_Spectral_with_num]\n if cluster_function not in allowed_functions:\n print(\"cluster_function must be in\", allowed_functions)\n return False\n else:\n return True\n\n\ndef check_valid_function_3d(cluster_function):\n allowed_functions = [clustering_Kmeans_with_num_3d]\n if cluster_function not in allowed_functions:\n print(\"cluster_function must be in\", allowed_functions)\n return False\n else:\n return True\n\n\ndef stacked_bar_gene_sum(ID, cluster_function, num_groups=10, num_gene=5):\n if not check_valid_function(cluster_function):\n return\n cluster_group, cluster_centers = cluster_function(ID, num_groups)\n\n gene_list = numpy.swapaxes([list(gene_sum_in_cells(ID, cluster_group[i], num_gene)) for i in cluster_group], 0, 1)\n gene_name = numpy.swapaxes([list(gene_sum_in_cells(ID, cluster_group[i], num_gene).index) for i in cluster_group], 0, 1)\n\n pprint.pprint(gene_list)\n pprint.pprint(gene_name)\n\n mpl.use(\"Agg\")\n mpl.rcParams.update({\"font.size\": 30})\n\n plt.figure()\n plt.bar(numpy.arange(num_groups), gene_list[0], 0.35)\n for i in range(1, num_gene):\n plt.bar(numpy.arange(num_groups), gene_list[i], 0.35, bottom=numpy.sum(numpy.array([gene_list[j] for j in range(i)]), axis=0))\n\n gene_tick = numpy.amax(numpy.sum(gene_list, axis=0)) \/ 5 \/ num_gene\n for i in range(num_groups):\n for j in range(num_gene):\n plt.text(i + 0.05, (j + 1) * gene_tick, gene_name[j][i], fontsize=10, bbox=dict(color=\"white\", alpha=0.3))\n\n plt.grid(True)\n plt.title(\"Stacked Bar \" + ID + \" with \" + str(num_gene) + \" Gene\")\n plt.xlabel(\"Group\")\n plt.ylabel(\"# of Genes\")\n plt.xticks(numpy.arange(num_groups), list(range(num_groups)))\n\n fig = plt.gcf()\n fig.set_size_inches(24, 18)\n fig.savefig(figure_directory + \"StackedBar_\" + ID + \"_\" + str(num_groups) + \"_\" + str(num_gene) + \"_\" + now + \".png\")\n plt.close()\n\n\ndef stacked_bar_gene_mean(ID, cluster_function, num_groups=10, num_gene=5):\n if not check_valid_function(cluster_function):\n return\n cluster_group, cluster_centers = cluster_function(ID, num_groups)\n\n gene_list = numpy.swapaxes([list(gene_mean_in_cells(ID, cluster_group[i], num_gene)) for i in cluster_group], 0, 1)\n gene_name = numpy.swapaxes([list(gene_mean_in_cells(ID, cluster_group[i], num_gene).index) for i in cluster_group], 0, 1)\n\n pprint.pprint(gene_list)\n pprint.pprint(gene_name)\n\n mpl.use(\"Agg\")\n mpl.rcParams.update({\"font.size\": 30})\n\n plt.figure()\n plt.bar(numpy.arange(num_groups), gene_list[0], 0.35)\n for i in range(1, num_gene):\n plt.bar(numpy.arange(num_groups), gene_list[i], 0.35, bottom=numpy.sum(numpy.array([gene_list[j] for j in range(i)]), axis=0))\n\n gene_tick = numpy.amax(numpy.sum(gene_list, axis=0)) \/ 5 \/ num_gene\n for i in range(num_groups):\n for j in range(num_gene):\n plt.text(i + 0.05, (j + 1) * gene_tick, gene_name[j][i], fontsize=10, bbox=dict(color=\"white\", alpha=0.3))\n\n plt.grid(True)\n plt.title(\"Stacked Bar \" + ID + \" with \" + str(num_gene) + \" Genes\")\n plt.xlabel(\"Group\")\n plt.ylabel(\"# of Gene\")\n plt.xticks(numpy.arange(num_groups), list(range(num_groups)))\n\n fig = plt.gcf()\n fig.set_size_inches(24, 18)\n fig.savefig(figure_directory + \"StackedBar_\" + ID + \"_\" + str(num_groups) + \"_\" + str(num_gene) + \"_\" + now + \".png\")\n plt.close()\n\n\ndef sort_index(gene_list):\n group_order = [tuple(list(scipy.stats.rankdata(data)) + [i]) for i, data in enumerate(gene_list)]\n\n group_order.sort()\n\n group_order = [list(elem)[-1] for elem in group_order]\n answer = [[i for i in gene_list[j]] for j in group_order]\n\n return (group_order, answer)\n\n\ndef heatmap_sum_top(ID, cluster_function, num_groups=10, num_gene=None, show_text=True):\n if not check_valid_function(cluster_function) and not check_valid_function_3d(cluster_function):\n return\n\n cluster_group, cluster_centers = cluster_function(ID, num_groups)\n\n gene_name = list(gene_sum_in_cells(ID).index)\n if num_gene is not None:\n gene_name = gene_name[:num_gene]\n gene_name = sorted(gene_name)\n\n group_order, gene_list = sort_index([gene_sum_in_cells(ID, cluster_group[i]) for i in cluster_group])\n for i, data in enumerate(gene_list):\n data.drop(labels=list(filter(lambda x: x not in gene_name, list(data.index))), inplace=True)\n data.sort_index(inplace=True)\n gene_list[i] = scipy.stats.zscore(data.tolist())\n\n pprint.pprint(gene_name)\n pprint.pprint(gene_list)\n\n mpl.use(\"Agg\")\n mpl.rcParams.update({\"font.size\": 30})\n\n plt.figure()\n plt.imshow(gene_list)\n\n plt.title(\"HeatMap _ \" + ID + \"_\" + str(num_gene) + \" Genes\")\n plt.xlabel(\"Genes\")\n plt.ylabel(\"Groups\")\n plt.xticks(numpy.arange(len(gene_name)), gene_name, fontsize=10, rotation=90)\n plt.yticks(numpy.arange(len(group_order)), group_order, fontsize=10)\n\n threshold = numpy.amax([numpy.amax(i) for i in gene_list]) \/ 2\n for i in range(len(gene_name)):\n for j in range(num_groups):\n if show_text:\n plt.text(j, i, str(gene_list[i][j]), color=\"white\" if gene_list[i][j] < threshold else 'black', fontsize=10)\n\n fig = plt.gcf()\n fig.set_size_inches(max(24, len(gene_name) * 0.5), 18)\n fig.savefig(figure_directory + \"HeatMap_\" + ID + \"_\" + str(num_groups) + \"_\" + str(len(gene_name)) + \"_\" + now + \".png\")\n plt.close()\n\n return (cluster_group, group_order, cluster_centers)\n\n\ndef heatmap_mean_top(ID, cluster_function, num_groups=10, num_gene=10, show_text=False):\n if not check_valid_function(cluster_function) and not check_valid_function_3d(cluster_function):\n return\n\n cluster_group, cluster_centers = cluster_function(ID, num_groups)\n\n gene_name = list(gene_mean_in_cells(ID).index)\n if num_gene is not None:\n gene_name = gene_name[:num_gene]\n gene_name = sorted(gene_name)\n\n gene_list = [gene_mean_in_cells(ID, cluster_group[i]).sort_index() for i in cluster_group]\n for i, data in enumerate(gene_list):\n data = data.add(pandas.Series(0, index=gene_name), fill_value=0)\n data.drop(labels=list(filter(lambda x: x not in gene_name, list(data.index))), inplace=True)\n data.sort_index(inplace=True)\n gene_list[i] = scipy.stats.zscore(data.tolist())\n\n mpl.use(\"Agg\")\n mpl.rcParams.update({\"font.size\": 30})\n\n plt.figure()\n plt.imshow(gene_list)\n\n plt.title(\"HeatMap_\" + ID + \"_\" + str(num_gene) + \" Genes\")\n plt.xlabel(\"Genes\")\n plt.ylabel(\"Groups\")\n plt.xticks(numpy.arange(len(gene_name)), gene_name, fontsize=10, rotation=90)\n plt.yticks(numpy.arange(num_groups), list(range(num_groups)), fontsize=10)\n\n threshold = numpy.amax([numpy.amax(i) for i in gene_list]) \/ 2\n for i in range(len(gene_name)):\n for j in range(num_groups):\n if show_text:\n plt.text(j, i, str(gene_list[i][j]), color=\"white\" if gene_list[i][j] < threshold else 'black', fontsize=10)\n\n fig = plt.gcf()\n fig.set_size_inches(max(24, len(gene_name) * 0.5), 18)\n fig.savefig(figure_directory + \"HeatMap_\" + ID + \"_\" + str(num_groups) + \"_\" + str(len(gene_name)) + \"_\" + now + \".png\")\n plt.close()\n\n return (cluster_group, list(range(num_groups)), cluster_centers)\n\n\ndef find_marker_gene(ID, cluster_function, num_groups=10):\n if not check_valid_function(cluster_function):\n return\n\n cluster_group, cluster_centers = cluster_function(ID, num_groups)\n\n whole_cells = gene_in_cells(ID)\n marker_gene = list()\n for i in cluster_group:\n selected_gene = list()\n data = gene_in_cells(ID, cell_numbers=cluster_group[i])\n for row in list(data.index):\n value = scipy.stats.ttest_ind(list(data.loc[row]), list(whole_cells.loc[row]))\n if value[0] > 0 and value[1] < 0.05:\n selected_gene.append((value[1], row))\n selected_gene.sort()\n print(selected_gene[:10])\n marker_gene.append(tuple(selected_gene[i][1] for i in range(3)))\n\n return marker_gene\n\n\ngene_1 = [\"Grfa1\", \"Zbtb16\", \"Nanos3\", \"Nanos2\", \",Sohlh1\", \"Neurog3\", \"Piwil4\", \"Lin28a\", \"Utf1\", \"Kit\", \"Uchl1\", \"Dmrt1\", \"Sohlh2\", \"Dazl\", \"Stra8\", \"Scml2\", \"Rpa2\", \"Rad51\", \"Rhox13\", \"Dmc1\", \"Melob\", \"Sycp1\", \"Sycp3\", \"Ccnb1ip1\", \"Hormad1\", \"Piwil2\", \"Piwil1\", \"Atr\", \"Mybl1\", \"Dyx1c1\", \"Msh3\", \"Ccnb1\", \"Spo11\", \"Ldha\", \"Ldhc\", \"Cetn4\", \"Tekt1\", \"Acr\", \"Ssxb1\", \"Ssxb2\", \"Acrv1\", \"Catsper3\", \"Catsper1\", \"Saxo1\", \"Hsfy2\", \"Txndc8\", \"Tnp1\", \"Tnp2\", \"Tmod4\", \"Gapdhs\", \"Car2\", \"Prm2\", \"Prm1\", \"Prm3\", \"Pgk2\", \"Wt1\", \"Sox9\", \"Cyp11a1\", \"Nr5a1\", \"Star\", \"Hsd3b1\", \"Clu\", \"Cyp17a1\", \"Gata4\", \"Acta2\"]\ngene_2 = [\"Id4\", \"Gfra1\", \"Zbtb16\", \"Stra8\", \"Rhox13\", \"Sycp3\", \"Dmc1\", \"Piwil1\", \"Pgk2\", \"Acr\", \"Gapdhs\", \"Prm1\"]\n\n\ndef heatmap_given_genes(ID, cluster_function, gene_name=gene_1, num_groups=10):\n if not check_valid_function(cluster_function) and not check_valid_function_3d(cluster_function):\n return\n\n cluster_group, cluster_centers = cluster_function(ID, num_groups)\n\n raw_gene_list = [gene_mean_in_cells(ID, cluster_group[i]) for i in cluster_group]\n gene_list = [[None for i in gene_name] for j in raw_gene_list]\n for i, data in enumerate(raw_gene_list):\n for j, gene in enumerate(gene_name):\n gene_list[i][j] = float(data.loc[gene]) if (gene in list(data.index)) else 0.0\n\n for i, data in enumerate(gene_list):\n if numpy.unique(data).size > 1:\n gene_list[i] = scipy.stats.zscore(data)\n else:\n gene_list[i] = [0 for gene in data]\n\n group_order, gene_list = sort_index(gene_list)\n pprint.pprint(gene_list)\n\n mpl.use(\"Agg\")\n mpl.rcParams.update({\"font.size\": 30})\n\n plt.figure()\n plt.imshow(gene_list)\n\n plt.title(\"Heatmap_\" + ID + \"_\" + str(len(gene_name)) + \" Genes\")\n plt.xlabel(\"Genes\")\n plt.ylabel(\"Groups\")\n plt.xticks(numpy.arange(len(gene_name)), gene_name, fontsize=10, rotation=90)\n plt.yticks(numpy.arange(num_groups), group_order, fontsize=10)\n\n fig = plt.gcf()\n fig.set_size_inches(max(24, 0.5 * len(gene_name)), max(18, 0.2 * num_groups))\n fig.savefig(figure_directory + \"HeatMap_\" + ID + \"_\" + str(num_groups) + \"_\" + str(len(gene_name)) + \"_\" + now + \".png\")\n plt.close()\n\n return (cluster_group, group_order, cluster_centers)\n\n\ndef pseudotime(ID, cluster_function, num_groups=10, select_gene=True):\n if not check_valid_function(cluster_function):\n return\n\n if select_gene:\n cluster_group, group_order, cluster_centers = heatmap_given_genes(ID, cluster_function, num_groups=num_groups)\n else:\n cluster_group, group_order, cluster_centers = heatmap_mean_top(ID, cluster_function, show_text=False)\n projection = get_data_from_id(ID)\n\n mpl.use(\"Agg\")\n mpl.rcParams.update({\"font.size\": 30})\n\n plt.figure()\n for i in cluster_group:\n plt.scatter(projection[\"std_TSNE-1\"].iloc[cluster_group[i]], projection[\"std_TSNE-2\"].iloc[cluster_group[i]], c=[\"C\" + str(i % 10) for _ in range(projection[\"std_TSNE-1\"].iloc[cluster_group[i]].size)])\n for i in range(1, len(cluster_centers)):\n plt.arrow(cluster_centers[group_order[i - 1]][0], cluster_centers[group_order[i - 1]][1], 0.8 * (cluster_centers[group_order[i]][0] - cluster_centers[group_order[i - 1]][0]), 0.8 * (cluster_centers[group_order[i]][1] - cluster_centers[group_order[i - 1]][1]), width=0.05, edgecolor=None, linestyle=\":\")\n\n plt.grid(True)\n plt.title(\"Ordering Groups\")\n plt.xlabel(\"Standardized TSNE-1\")\n plt.ylabel(\"Standardized TSNE-2\")\n\n fig = plt.gcf()\n fig.set_size_inches(24, 18)\n fig.savefig(figure_directory + \"Arrow_\" + ID + \"_\" + str(num_groups) + \"_\" + now + \".png\")\n plt.close()\n\n\ndef pseudotime_3d(ID, cluster_function, num_groups=10, select_gene=True):\n class Arrow3D(matplotlib.patches.FancyArrowPatch):\n def __init__(self, xs, ys, zs, *args, **kwargs):\n matplotlib.patches.FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)\n self._verts3d = xs, ys, zs\n\n def draw(self, renderer):\n xs3d, ys3d, zs3d = self._verts3d\n xs, ys, zs = mpl_toolkits.mplot3d.proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)\n self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))\n matplotlib.patches.FancyArrowPatch.draw(self, renderer)\n\n if not check_valid_function_3d(cluster_function):\n return\n\n if select_gene:\n cluster_group, group_order, cluster_centers = heatmap_given_genes(ID, cluster_function, num_groups=num_groups)\n else:\n cluster_group, group_order, cluster_centers = heatmap_mean_top(ID, cluster_function, num_groups=num_groups)\n projection = get_data_from_id_3d(ID)\n\n mpl.use(\"Agg\")\n mpl.rcParams.update({\"font.size\": 30})\n\n fig = plt.figure()\n ax = mpl_toolkits.mplot3d.Axes3D(fig, elev=45, azim=135)\n\n for i in cluster_group:\n ax.scatter(projection[\"std_TSNE-1\"].iloc[cluster_group[i]], projection[\"std_TSNE-2\"].iloc[cluster_group[i]], projection[\"std_TSNE-3\"].iloc[cluster_group[i]], c=[\"C\" + str(i % 10) for _ in range(projection[\"std_TSNE-1\"].iloc[cluster_group[i]].size)])\n for i in range(1, len(cluster_centers)):\n ax.add_artist(Arrow3D([cluster_centers[group_order[i - 1]][0], cluster_centers[group_order[i]][0]], [cluster_centers[group_order[i - 1]][1], cluster_centers[group_order[i]][1]], [cluster_centers[group_order[i - 1]][2], cluster_centers[group_order[i]][2]], mutation_scale=20, lw=3, arrowstyle=\"-|>\", color=\"k\"))\n\n ax.set_title(\"Ordering Groups in 3D\")\n ax.set_xlabel(\"Standardized TSNE-1\")\n ax.set_ylabel(\"Standardized TSNE-2\")\n ax.set_zlabel(\"Standardized TSNE-3\")\n\n fig = plt.gcf()\n fig.set_size_inches(24, 18)\n fig.savefig(figure_directory + \"Arraw3D_\" + ID + \"_\" + str(num_groups) + \"_\" + now + \".png\")\n plt.close()\n\n\ndef bar_given_genes(ID, cluster_function, gene_name=gene_1, num_groups=10):\n if not check_valid_function(cluster_function):\n return\n\n cluster_group, cluster_centers = cluster_function(ID, num_groups)\n\n raw_gene_list = [gene_mean_in_cells(ID, cluster_group[i]) for i in cluster_group]\n gene_list = [[None for i in gene_name] for j in cluster_group]\n for i, data in enumerate(raw_gene_list):\n for j, gene in enumerate(gene_name):\n gene_list[i][j] = float(data.loc[gene]) if (gene in list(data.index)) else 0.0\n\n for i, data in enumerate(gene_list):\n if numpy.unique(data).size > 1:\n gene_list[i] = scipy.stats.zscore(data)\n else:\n gene_list[i] = [0 for _ in data]\n\n mpl.use(\"Agg\")\n mpl.rcParams.update({\"font.size\": 30})\n\n plt.figure()\n fig, ax = plt.subplots(num_groups)\n\n for i, data in enumerate(gene_list):\n for j, high in enumerate(data):\n ax[i].bar(j, high, color=\"C\" + str(j % 10), edgecolor=\"k\", label=gene_name[j])\n\n plt.setp(ax, xticks=list(range(len(gene_name))), xticklabels=gene_name)\n\n fig = plt.gcf()\n fig.set_size_inches(max(24, 2.5 * len(gene_name)), max(18, 4 * num_groups))\n fig.savefig(figure_directory + \"Bar_graph_\" + ID + \"_\" + str(num_groups) + \"_\" + str(len(gene_name)) + \"_\" + now + \".png\")\n plt.close()\n\n\ndef get_common_genes(ID, cluster_function, num_groups=10):\n if not check_valid_function:\n return\n\n cluster_group, cluster_centers = cluster_function(ID, num_groups)\n\n gene_list = [list(gene_mean_in_cells(ID, cluster_group[i]).index) for i in cluster_group]\n\n common_gene = set(gene_list[0])\n for gene in gene_list[1:]:\n if not common_gene:\n return common_gene\n common_gene = common_gene & set(gene)\n\n pprint.pprint(common_gene)\n print(len(common_gene))\n\n return common_gene\n\n\ndef scatter_given_genes(ID, genes=gene_1):\n def change_scale(gene_expression):\n minimum, maximum = min(gene_expression), max(gene_expression)\n\n return list(map(lambda x: x if x > 0.1 else 0.1, list(map(lambda x: (x - minimum) \/ (maximum - minimum), gene_expression))))\n\n data_1 = get_data_from_id(ID, genes)\n data_2 = get_all(ID)\n\n for gene in genes:\n try:\n number_gene = data_2[\"gene_name\"].index(gene)\n except ValueError:\n print(gene, \"is not here\")\n continue\n\n gene_expression = change_scale(data_2[\"matrix\"].iloc[number_gene].values)\n\n mpl.use(\"Agg\")\n mpl.rcParams.update({\"font.size\": 30})\n\n plt.figure()\n for x, y, alpha in zip(data_1[\"std_TSNE-1\"], data_1[\"std_TSNE-2\"], gene_expression):\n plt.scatter(x, y, c='b', alpha=alpha)\n\n plt.grid(True)\n plt.title(ID + \"_\" + gene)\n plt.xlabel(\"Standardized TSNE-1\")\n plt.ylabel(\"Standardized TSNE-2\")\n\n fig = plt.gcf()\n fig.set_size_inches(24, 18)\n fig.savefig(figure_directory + \"Scatter_\" + ID + \"_\" + gene + \"_\" + now + \".png\")\n plt.close()\n\n print(gene, \"Done!!\")\n\n\ndef get_whole_data_3d(genes=None):\n def make_md5(data):\n if data is None:\n return hashlib.md5(\"3d\".encode(\"utf-8\")).hexdigest()\n else:\n return hashlib.md5((\"3d\" + str(sorted(data))).encode(\"utf-8\")).hexdigest()\n\n if os.path.exists(make_md5(genes) + \".data\"):\n with open(make_md5(genes) + \".data\", \"rb\") as f:\n return pickle.load(f)\n\n data = get_matrix(\"\/home\/jwlee\/Spermatogenesis\/result\/aggr\/outs\/filtered_feature_bc_matrix\/matrix.mtx.gz\")\n\n if genes is None:\n data = select_highly_variable_genes(data)\n else:\n data[\"gene\"] = get_gene_name(\"\/home\/jwlee\/Spermatogenesis\/result\/aggr\/outs\/filtered_feature_bc_matrix\/features.tsv.gz\")\n data = data[data[\"gene\"].isin(genes)]\n del data[\"gene\"]\n\n data = sklearn.decomposition.PCA(random_state=0, n_components=\"mle\").fit_transform(numpy.swapaxes(data.values, 0, 1))\n print(\"PCA data: \", data)\n print(\"Cell & Gene-like:\", len(data), len(data[0]))\n\n data = numpy.swapaxes(sklearn.manifold.TSNE(n_components=3, random_state=0).fit_transform(data), 0, 1)\n\n projection = dict()\n projection[\"Barcode\"] = numpy.array(get_barcodes(\"\/home\/jwlee\/Spermatogenesis\/result\/aggr\/outs\/filtered_feature_bc_matrix\/barcodes.tsv.gz\"))\n projection[\"std_TSNE-1\"] = scipy.stats.zscore(data[0])\n projection[\"std_TSNE-2\"] = scipy.stats.zscore(data[1])\n projection[\"std_TSNE-3\"] = scipy.stats.zscore(data[2])\n\n projection = pandas.DataFrame.from_dict(projection)\n\n with open(make_md5(genes) + \".data\", \"wb\") as f:\n pickle.dump(projection, f)\n\n return projection\n\n\ndef draw_tSNE_3d(ID, genes=None):\n mpl.use(\"Agg\")\n mpl.rcParams.update({\"font.size\": 30})\n\n whole_projection = get_whole_data_3d(genes)\n\n if not os.path.exists(\"whole.data\"):\n with open(\"whole.data\", \"w\") as f:\n f.write(\"x,y,z\\n\")\n for x, y, z in zip(whole_projection[\"std_TSNE-1\"], whole_projection[\"std_TSNE-2\"], whole_projection[\"std_TSNE-3\"]):\n f.write(str(x) + \",\" + str(y) + \",\" + str(z) + \"\\n\")\n\n wanted = whole_projection[numpy.isin(whole_projection[\"Barcode\"], get_real_barcodes(ID))]\n unwanted = whole_projection[numpy.invert(numpy.isin(whole_projection[\"Barcode\"], get_real_barcodes(ID)))]\n\n if not os.path.exists(ID + \".data\"):\n with open(ID + \".data\", \"w\") as f:\n f.write(\"x,y,z\\n\")\n for x, y, z in zip(wanted[\"std_TSNE-1\"], wanted[\"std_TSNE-2\"], wanted[\"std_TSNE-3\"]):\n f.write(str(x) + \",\" + str(y) + \",\" + str(z) + \"\\n\")\n\n fig = plt.figure()\n ax = mpl_toolkits.mplot3d.Axes3D(fig, elev=45, azim=135)\n\n ax.scatter(unwanted[\"std_TSNE-1\"], unwanted[\"std_TSNE-2\"], unwanted[\"std_TSNE-3\"], c=\"tab:gray\", alpha=0.6)\n ax.scatter(wanted[\"std_TSNE-1\"], wanted[\"std_TSNE-2\"], wanted[\"std_TSNE-3\"], c=\"tab:blue\", alpha=1)\n\n ax.set_xlabel(\"Standardized TSNE-1\")\n ax.set_ylabel(\"Standardized TSNE-2\")\n ax.set_zlabel(\"Standardized TSNE-3\")\n ax.set_title(ID)\n\n fig = plt.gcf()\n fig.set_size_inches(24, 18)\n fig.savefig(figure_directory + ID + \"_3D_\" + now + \".png\")\n plt.close()\n\n\nif __name__ == \"__main__\":\n for ID in IDs:\n pseudotime_3d(ID, clustering_Kmeans_with_num_3d, select_gene=False)\n for _ in range(5):\n print(\"\\a\")\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_594","text":"from numpy.core.numeric import array_equal\nfrom numpy.lib import math\nimport scipy.ndimage\nimport scipy.stats\nfrom scipy import signal\nimport matplotlib.pyplot as plt\nimport numpy as np\nnp.set_printoptions(suppress=True)\n\n\ndef gauss1d_a(pointCount: int, std: float):\n return \n\n\ndef gauss1d_b(pointCount: int, std: float):\n xs = range(pointCount)\n xs = [x - (pointCount - 1)\/2 for x in xs]\n ys = np.exp(-np.square(xs)\/(2 * std * std))\n return ys\n\n\ndef show1d():\n a = gauss1d_a(25, 3)\n b = gauss1d_b(25, 3)\n np.testing.assert_array_almost_equal(a, b)\n print(\", \".join([str(x) for x in a]))\n plt.plot(a)\n plt.plot(b)\n plt.show()\n\ndef show2d():\n gkern1d = gauss1d_b(5, 2)\n gkern2d = np.outer(gkern1d, gkern1d)\n #plt.imshow(gkern2d, interpolation='none', cmap='Greys_r')\n #plt.show()\n print(gkern2d)\n\n\nif __name__ == \"__main__\":\n show2d()"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_595","text":"0\nimport numpy as np\nimport scipy.stats as ss\nimport tensorflow as tf\nfrom tensorflow.keras import backend as K\n\nrating_weights = K.expand_dims(tf.constant(np.arange(1, 11), dtype='float32'), -1)\n\n\ndef earth_movers_distance(y_true, y_pred):\n cdf_true = K.cumsum(y_true, axis=-1)\n cdf_pred = K.cumsum(y_pred, axis=-1)\n emd = K.sqrt(K.mean(K.square(cdf_true - cdf_pred), axis=-1))\n return K.mean(emd)\n\n\ndef pearson_correlation(y_true, y_pred):\n # y_true = y_true * rating_weights\n # y_pred = y_pred * rating_weights\n #\n # xm = y_true - K.mean(y_true)\n # ym = y_pred - K.mean(y_pred)\n # print(xm, ym)\n # pearson_correlation = K.sum(xm * ym) \/ K.sqrt(K.sum(K.square(xm) * K.square(ym)))\n # print(pearson_correlation)\n # return K.square(pearson_correlation) # is is actually R-squared from regression\n means_true = y_true - K.mean(y_true)\n means_pred = y_pred - K.mean(y_pred)\n\n # normalizing stage - setting a 1 variance\n means_true = K.l2_normalize(means_true, axis=0)\n means_pred = K.l2_normalize(means_pred, axis=0)\n\n # final result\n pearson_correlation = K.sum(means_true * means_pred)\n\n return pearson_correlation\n\n\ndef pearson_correlation_ava(y_true, y_pred):\n x = K.cumsum(K.dot(y_true, rating_weights))\n y = K.cumsum(K.dot(y_pred, rating_weights))\n return pearson_correlation(x, y)\n\n\ndef spearman_correlation(y_true, y_pred):\n return ss.spearmanr(y_true.numpy(), y_pred.numpy())[0]\n\n\ndef two_class_quality(y_true, y_pred):\n x = K.dot(y_true, rating_weights)\n y = K.dot(y_pred, rating_weights)\n score = K.equal(tf.floor(x \/ 5), tf.floor(y \/ 5))\n return K.mean(score)\n\n\ndef two_class_quality_acc(y_true, y_pred):\n x = np.array(y_true)\n y = np.array(y_pred)\n score = np.equal(np.floor(x \/ 5), np.floor(y \/ 5))\n return np.mean(score)\n\n\ndef mean_abs_percentage(y_true, y_pred):\n abs_diff = K.abs(y_pred - y_true) \/ y_true\n return K.mean(1 - abs_diff)\n\n\ndef mean_abs_percentage_acc(y_true, y_pred):\n x, y = np.array(y_true), np.array(y_pred)\n abs_diff = np.abs(x - y) \/ x\n return np.mean(1 - abs_diff)\n\n\ndef mean_abs_percentage_ava(y_true, y_pred):\n x = K.dot(y_true, rating_weights)\n y = K.dot(y_pred, rating_weights)\n return mean_abs_percentage(x, y)\n\n\nif __name__ == '__main__':\n a = np.array([1.62, 1.83, 1.89, 1.55, 1.74, 1.6, 1.6, 1.72, 1.54, 1.82])\n b = np.array([57.15, 91.69, 95.27, 56.16, 78.52, 66.09, 63.71, 79.58, 50.22, 93.39])\n\n print(a)\n print(b)\n print(pearson_correlation(a, b))\n print(ss.pearsonr(a, b))\n print(spearman_correlation(a, b))\n print(ss.spearmanr(a, b))\n\n a = np.array([1.62, 4.83, 5.89, 8.55, 8.74, 6.6, ])\n b = np.array([2.62, 3.83, 1.89, 6.55, 5.74, 4.6, ])\n print(two_class_quality(a, b))\n print(mean_abs_percentage(a, b))\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_596","text":"# import pyedflib\nimport numpy as np\nfrom scipy import signal as sg\nimport argparse\nimport sys\nimport json\n# import matplotlib.pyplotmatplot as plt\nfrom pprint import pprint\n\nimport pandas as pd\n\nclass Notch():\n Q = 0 \n f0 = 0 \n def __init__(self,f0=60,Q=50):\n self.f0=f0\n self.Q=Q\n\n def argparse(self):\n parser = argparse.ArgumentParser()\n parser.add_argument('-i','--archivo',help='Ingrese el nombre del archivo .edf a utilizar',type = str)\n parser.add_argument('-fo','--fo',help='Frecuencia que se desea filtrar. Por defecto fo = 60',type = float)\n parser.add_argument('-Q','--Q',help='Factor de calidad del filtro. Por defecto Q = 50',type = int)\n parser.add_argument('-e','--edf',help='Nombre y dirección del archivo .edf de salida',type = str)\n parsedargs = parser.parse_args()\n arc = parsedargs.archivo\n output = parsedargs.edf\n if (parsedargs.fo != None):\n if (parsedargs.fo> 0):\n self.f0 = parsedargs.fo\n if (parsedargs.Q != None):\n if (parsedargs.Q>0):\n self.Q = parsedargs.Q\n return arc,output\n\n # def read_edf(self,nameEdf):\n # '''\n # Descripción: Se encarga de leer el archivo .edf\n # Entradas: - nameEdf: nombre del archivo .edf\n # Salidas: - in_signal: Matriz de Canales X Tiempo\n # - fs: Frecuencia de muestro\n # - headers: Etiquetas del archivo .edf \n # ''' \n # edf = pyedflib.EdfReader(nameEdf) \n # headers = edf.getSignalHeaders() \n # nch = edf.signals_in_file\n # nsig = edf.getNSamples()[0]\n # fs = edf.getSampleFrequency(0)\n # in_signal = np.zeros((nch,nsig))\n # for x in range(nch):\n # in_signal[x,:] = edf.readSignal(x)\n # edf._close()\n # del edf\n # return in_signal,fs,headers\n\n def filt(self,in_signal,fs):\n '''\n Descripción: Se encarga de filtrar los datos del EEG\n Entradas: - in_signal: Matriz de Canales X Tiempo\n - fs: Frecuencia de muestro\n Salidas: - out_signal: EEG filtrado (Matriz de CanalesXTiempo)\n ''' \n w0 = self.f0\/(fs\/2) \n num,den = sg.iirnotch(w0,self.Q)\n out_signal = np.zeros((len(in_signal),len(in_signal[0])))\n for i in range(0,len(in_signal)):\n out_signal[i]=sg.filtfilt(num,den,in_signal[i])\n return out_signal,num,den\n\n # def write_edf(self,in_signal,headers,nameEdf):\n # '''\n # Descripción: Se encarga de escribir los datos del nuevo EEG\n # Entradas: - headers: etiquetas del .edf \n # - in_signal: Matriz de Canales X Tiempo\n # - nameEdf : Nombre con el que se desea guardar el nuevo .edf\n # ''' \n # edf = pyedflib.EdfWriter(nameEdf,len(in_signal),file_type=pyedflib.FILETYPE_EDFPLUS)\n # edf_info = []\n # edf_signal = []\n # for i in range (len(in_signal)):\n # channel_info={'label':headers[i]['label'],'dimension':headers[i]['dimension'],'sample_rate':headers[i]['sample_rate'],'physical_max':headers[i]['physical_max'] , 'physical_min': headers[i]['physical_min'], 'digital_max': headers[i]['digital_max'], 'digital_min': headers[i]['digital_min'], 'transducer':headers[i]['transducer'] , 'prefilter':headers[i]['prefilter']+',notch '+str(self.f0)+'Hz'}\n # edf_info.append(channel_info)\n # edf_signal.append(in_signal[i])\n # edf.setSignalHeaders(edf_info)\n # edf.writeSamples(edf_signal)\n # edf.close()\n # del edf\n\n#Read data from stdin\ndef read_in():\n lines = sys.stdin.readlines()\n #Since our input would only be having one line, parse our JSON data from that\n return json.loads(lines[0])\n\nif __name__ == '__main__':\n \n notch1 = Notch()\n # argparse input mode\n # print (\"start of notch\")\n # arc,output = notch1.argparse()\n # signal , fs ,headers= notch1.read_edf(arc)\n # filtered_signal,num,den = notch1.filt(signal[:,232250:234750],fs)\n # print(\"size of output\",filtered_signal.shape)\n # print(vals)\n # print(\"size of input\",in_signal.shape)\n # fig,subplt=plt.subplots(3,1,figsize=(8,5))\n # subplt[0].plot(t,inp[9][ni:nf])\n # subplt[0].title.set_text('Señal original')\n # subplt[0].grid()\n #notch1.write_edf(filtered_signal,headers,output)\n \n # python-shell input mode\n inSignals=read_in()\n nch=len(inSignals)\n nSamples = len(inSignals[0]['data'])\n fs=inSignals[0]['samplefrequency']\n # print(nch,nSamples)\n in_signal = np.zeros((nch,nSamples))\n\n # print(len(inSignals))\n # print(len(inSignals[0]['data']))\n currentCh=0\n for item in inSignals:\n for subitem in item['data']:\n subitem.pop('time', None)\n df = pd.DataFrame(item['data'])\n in_signal[currentCh,:]=np.array(df.values).transpose()\n # print (in_signal[currentCh,:],currentCh)\n currentCh = currentCh +1\n\n\n \n\n # python-shell execute mode\n filtered_signal,num,den = notch1.filt(in_signal,fs)\n\n # python-shell output mode\n response={}\n response['channels']=[]\n currentCh=0\n for channel in inSignals:\n \n channelObj={}\n channelObj['id']=channel['id']\n channelObj['label']=channel['label']\n channelObj['samples']=channel['samples']\n channelObj['physicalMaximum']=channel['physicalMaximum']\n channelObj['physicalMinimum']=channel['physicalMinimum']\n channelObj['digitalMaximum']=channel['digitalMaximum']\n channelObj['digitalMinimum']=channel['digitalMinimum']\n channelObj['samplefrequency']= channel['samplefrequency']\n channelObj['data']=[]\n currentD=0\n for subitem in channel['data']:\n d={}\n # d['value']=float(subitem['value'])\n d['value']=float(filtered_signal[currentCh,currentD])\n channelObj['data'].append(d)\n currentD=currentD+1\n response['channels'].append(channelObj)\n currentCh=currentCh+1\n # print(channelObj['id'])\n print (json.dumps(response))\n\n "} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_597","text":"import numpy as np\nfrom scipy import sparse\nimport scipy.sparse.linalg as la\nimport time\nfrom random import randint\nfrom operators import *\nfrom spectrum_analysis import *\nfrom myio import *\nfrom hamiltonians import *\nfrom ipr import *\nimport sys\n\n\ndef simulation(dim_loc, L, n_dis, data, Hfunc, Kfunc, Zfunc=None, time_set=None):\n dim=dim_loc**L\n\n # Setting cycle\n if time_set is not None:\n steps=len(time_set)\n Z=np.zeros((n_dis, L, steps), dtype=complex)\n Znew=np.zeros((n_dis, L, steps), dtype=complex)\n spectral_matrix= np.zeros((n_dis, 7))\n\n # Disorder cycle\n for counter in range(n_dis):\n start = time.time()\n\n H = Hfunc(**data)\n kick = Kfunc(**data)\n U_F = np.dot(la.expm(-1j*H),kick)\n eigval, eigvec = np.linalg.eig(U_F)\n\n # Spectral properties\n spectrum = np.angle(eigval)\n gaps = gap(spectrum)\n shifted_gaps = shifted_gap(spectrum, dim_loc)\n shifted_gaps_2 = shifted_gap(spectrum, 2)\n log10_gaps = np.log10(gap(spectrum))\n log10_shifted_gaps = np.log10(shifted_gap(spectrum, dim_loc))\n log10_shifted_gaps_2 = np.log10(shifted_gap(spectrum, 2))\n r = ratio(spectrum)\n spectral_matrix[counter]=np.array([np.mean(gaps), np.mean(shifted_gaps),\\\n np.mean(shifted_gaps_2), np.mean(log10_gaps), np.mean(log10_shifted_gaps),\\\n np.mean(log10_shifted_gaps_2), r ])\n\n if time_set is not None:\n #Initial state\n initial_state = np.zeros(dim)\n initial_state[randint(0, dim-1)] = 1\n\n final_state = evolve(time_set, initial_state, eigvec, eigval)\n\n for i in range(L):\n Z[counter, i], Znew[counter, i] = Zfunc(initial_state, final_state, i, time_set, **data)\n\n elapsed = time.time()-start\n print('size', L, '\\tdisorder realization', counter,'\\ttime elapsed', elapsed)\n\n if time_set is not None:\n Z_mean=np.mean(Z, axis=(0,1))\n Z_var=np.var(Z, axis=(0,1))\n Znew_mean=np.mean(Znew, axis=(0,1))\n Znew_var=np.var(Znew, axis=(0,1))\n else:\n Z_mean=0\n Z_var=0\n Znew_mean=0\n Znew_var=0\n spectral_data=np.mean(spectral_matrix, axis=0)\n spectral_data_var=np.var(spectral_matrix, axis=0) #not really the variance!!!!\n return Z_mean, Z_var, Znew_mean, Znew_var, spectral_data, spectral_data_var\n\ndef IPR(dim_loc, L, n_dis, datavec, Hfunc, Kfunc):\n dim=dim_loc**L\n IPR_step_vec=np.zeros(len(datavec))\n IPR_tot_vec=np.zeros(len(datavec))\n IPR_sum_step_vec=np.zeros(len(datavec))\n IPR_sum_tot_vec=np.zeros(len(datavec))\n\n\n # Disorder cycle\n for counter in range(n_dis):\n start = time.time()\n JZZ_array = datavec[0]['JZZ']*(1\/2+np.random.rand(L))\n hZ_array = datavec[0]['hZ']*(np.random.rand(L))\n hX_array = datavec[0]['hX']*(np.random.rand(L))\n kick = Kfunc(**datavec[0])\n for dcount, data in enumerate(datavec):\n H = Hfunc(**data, JZZ_array=JZZ_array, hZ_array=hZ_array, hX_array=hX_array)\n U_F = np.dot(la.expm(-1j*H),kick)\n eigval, eigvec = np.linalg.eig(U_F)\n if dcount==0:\n eigvec0=eigvec\n eigvec_old=eigvec\n else:\n eigvec, IPR_step =rearrange(eigvec,eigvec_old)\n IPR_step_vec[dcount]+=IPR_step\n IPR_tot_vec[dcount]+=IPR_func(eigvec, eigvec0)\n IPR_sum_step_vec[dcount]+=sum_IPR(eigvec, eigvec_old)\n IPR_sum_tot_vec[dcount]+=sum_IPR(eigvec, eigvec0)\n eigvec_old=eigvec\n\n\n elapsed = time.time()-start\n print('size', L, '\\tdisorder realization', counter,'\\ttime elapsed', elapsed)\n\n return IPR_step_vec\/n_dis, IPR_tot_vec\/n_dis, IPR_sum_step_vec\/n_dis, IPR_sum_tot_vec\/n_dis\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_598","text":"0\n\"\"\"\nauthor: @nimrobotics\ndescription: calculates the functional connectivity between regions and plots them\n\"\"\"\n\nfrom multiprocessing import Condition\nimport numpy as np\nimport scipy.io\nimport glob\nimport os\nimport pandas as pd\n# from fnirslib.activation import getPeakActivation\n# from fnirslib.plots import plotData\nimport sys\nsys.path.append('..\/fnirslib')\nfrom plots import plotData\nfrom activation import getPeakActivation, getMeanActivation\n\nif __name__ == '__main__':\n in_dir = \".\/procDataAct\/\" #directory of the data\n out_dir = '.\/actData\/' #directory to save the plots\n subdirs = [x[0] for x in os.walk(in_dir)][1:] #get all the subdirectories\n threshold=0.4\n\n females = ['SAI01', 'SAI02', 'SAI04', 'SAI09', 'SAI10', 'SAI14', 'SAI19', 'SAI22', 'SAI25', 'SAI26', 'SAI27', \n 'SAI28', 'SAI29', 'SAI30', 'SAI31', 'SAI32', 'SAI33', 'SAI35', 'SAI36', 'SAI38', 'SAI39', 'SAI40']\n males = ['SAI03', 'SAI05', 'SAI06', 'SAI07', 'SAI08', 'SAI11', 'SAI12', 'SAI13', 'SAI15', 'SAI16', 'SAI17', \n 'SAI18', 'SAI20', 'SAI21', 'SAI23', 'SAI24', 'SAI34', 'SAI37']\n\n # initialize a pd df\n actDF = pd.DataFrame(columns=['ID', 'sex', 'condition', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11', 'C12', 'C13', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21', 'C22', 'C23', 'C24', 'C25', 'C26', 'C27', 'C28', 'C29', 'C30', 'C31', 'C32', 'C33', 'C34', 'C35', 'C36', 'C37', 'C38', 'C39', 'C40', 'C41', 'C42', 'C43', 'C44', 'C45', 'C46'])\n\n for dir in subdirs:\n condition = dir.split('\/')[-1]\n files = glob.glob(dir+'\/*.mat') # get all the files in the directory\n # avgActivation = np.zeros(np.\n for file in files:\n print('Processing file: ', file)\n ID = file.split('\/')[-1].split('.')[0][1:]\n if 'SAI'+ID in females:\n sex = 'F'\n if 'SAI'+ID in males:\n sex = 'M'\n\n print('\\nProcessing file: {}, condition: {}'.format(ID, condition))\n data = scipy.io.loadmat(file) #load data from the directory\n data = data['pdata'] #get the data from the dictionary\n data = getPeakActivation(data, interval=8)\n # data = getMeanActivation(data)\n print('data shape: ', data.shape)\n print('data: ', data[0])\n actDF.loc[len(actDF)] = [ID, sex, condition] + list(data)\n\n # save df to csv\n actDF.to_csv(out_dir+'actDF.csv', index=False)"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_599","text":"vislab\/datasets\/imagenet.py\n\"\"\"\nImageNet classification and detection challenges.\n\nEverything loaded from files, and images distributed with dataset.\n\"\"\"\nimport os\nimport pandas as pd\nimport glob\nimport scipy.io\nimport networkx as nx\nimport numpy as np\nimport multiprocessing\nimport vislab\nfrom vislab.datasets.pascal import load_annotation_files\n\n\nclass ImagenetGraph(object):\n \"\"\"\n Represents the ImageNet structure, loaded from .mat files provided\n in the ILSVRC2013_devkit.\n\n Download devkit from [1] and untar into devkit_dirname.\n Then download meta_10k from [2] and place into devkit_dirname\/data.\n\n [1]: http:\/\/imagenet.stanford.edu\/image\/ilsvrc2013\/ILSVRC2013_devkit.tgz\n [2]: https:\/\/dl.dropboxusercontent.com\/u\/44891\/research\/meta_10k.mat\n \"\"\"\n def __init__(self, metafile, type='1k'):\n \"\"\"\n Parameters\n ----------\n type: string\n In ['1k', '10k', 'det'].\n \"\"\"\n data = scipy.io.loadmat(metafile)['synsets']\n if not type == '10k':\n data = data[0]\n\n g = nx.DiGraph()\n\n # First pass: add nodes.\n wnids = []\n for node in data:\n if type == '10k':\n node = node[0]\n\n wnid = str(node[1][0])\n wnids.append(wnid)\n g.add_node(wnid, {'words': node[2][0]})\n\n # Second pass: add edges.\n for i, node in enumerate(data):\n if type == '10k':\n node = node[0]\n\n if type == 'det':\n children = node[4].flatten()\n else:\n children = node[5][0]\n\n # Children are IDs from the original metafile, which is 1-indexed.\n for child in children:\n g.add_edge(wnids[i], wnids[child - 1])\n\n self.g = g\n\n def node_name(self, wnid):\n word = self.g.node[wnid]['words'].split(',')[0]\n return '{} ({})'.format(word, wnid)\n\n def get_all_successors(self, wnid):\n children = self.g.successors(wnid)\n all_children = list(children)\n for child in children:\n all_children += self.get_all_successors(child)\n return all_children\n\n def get_leaf_nodes(self, wnids):\n return [\n wnid for wnid in wnids\n if not self.g.successors(wnid)\n ]\n\n\ndef load_imagenet_detection(year='2013', force=False, args=None):\n \"\"\"\n TODO: currently only loads val split.\n TODO: current hard-coded to be 2013 split.\n\n Load all the annotations, including object bounding boxes.\n Loads XML data in args['num_workers'] threads using joblib.Parallel.\n\n Warning: this takes a few minutes to load from scratch!\n \"\"\"\n if args is None:\n args = {'num_workers': multiprocessing.cpu_count()}\n\n cache_filename = \\\n vislab.config['paths']['shared_data'] + \\\n '\/ilsvrc{}_dfs.h5'.format(year)\n if not force and os.path.exists(cache_filename):\n images_df = pd.read_hdf(cache_filename, 'images_df')\n objects_df = pd.read_hdf(cache_filename, 'objects_df')\n return images_df, objects_df\n\n # Load all annotation file data (should take < 30 s).\n # TODO: concat the dataframes here\n splits = ['val']\n for split in splits:\n annotation_filenames = glob.glob('{}\/DET_bbox_{}\/*.xml'.format(\n vislab.config['paths']['ILSVRC{}'.format(year)], split))\n images_df, objects_df = load_annotation_files(\n annotation_filenames, args['num_workers'])\n images_df['_split'] = split\n\n # Make sure that all labels are either True or False.\n images_df = images_df.fillna(False)\n\n # Propagate split info to objects_df\n objects_df['split'] = np.repeat(\n images_df['_split'].values,\n images_df['_num_objects'].values.astype(int)\n )\n\n images_df.to_hdf(cache_filename, 'images_df', mode='w')\n objects_df.to_hdf(cache_filename, 'objects_df', mode='a')\n return images_df, objects_df\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_600","text":"import base64\nfrom io import BytesIO\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport re\nfrom numpy import mean, std\nimport random\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.base import clone\nimport scipy.stats as stats\nfrom .models import *\nfrom .forms import *\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import pearsonr\n\n\n# function to get the Image and plot\n\ndef get_image() -> object:\n # create a byte buffer for the image to save\n buffer = BytesIO()\n # create a plot with the use of bytesio object\n plt.savefig(buffer, format='png')\n # set the cursor to the beginning of the screen\n buffer.seek(0)\n # retrieve the entire content of the file\n image_png = buffer.getvalue()\n # encoding and decoding\n graph = base64.b64encode(image_png)\n graph = graph.decode('utf-8')\n # free the memory of the buffer\n buffer.close()\n return graph\n\n\n# enrollment capacity by school trend\ndef get_plot(chart_type, **kwargs):\n plt.switch_backend('AGG')\n # academic year\n x = kwargs.get('x')\n # y = total enrollment\n y = kwargs.get('y')\n # z = capacity of school\n z = kwargs.get('z')\n school_name = kwargs.get('name_of_school')\n\n if chart_type == 'barplot':\n fig, ax = plt.subplots(figsize=(10, 8))\n ax.bar(x, z, width=0.50, color='gray', label='capacity of school')\n ax.bar(x, y, width=0.25, color='g', label='total enrollment')\n ax.set_ylabel(\"Total Enrollment \/ Capacity\")\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(10)\n # specify integer or one of preset strings, e.g.\n # tick.label.set_fontsize('x-small')\n tick.label.set_rotation('60')\n for bar in ax.patches:\n # Using Matplotlib annotate function and\n # passing the coordinates where the annotation shall be done\n # x-coordinate: bar.get_x() + bar.get_width() \/ 2\n # y-coordinate: bar.get_height()\n # free space to be left to make graph pleasing: (0, 8)\n # ha and va stand for the horizontal and vertical alignment\n plt.annotate(format(bar.get_height(), '.2f'),\n (bar.get_x() + bar.get_width() \/ 2,\n bar.get_height()), ha='center', va='center',\n size=8, xytext=(0, 3),\n textcoords='offset points')\n plt.title(school_name)\n ax.set_ylim([0, max(z) + 300])\n ax.legend()\n\n else:\n title = school_name\n plt.figure(figsize=(10, 8))\n plt.title(title)\n plt.plot(x, y, 'b-', label='enrollments')\n plt.plot(x, z, 'g-', label='capacity')\n # plt.ylim(0, max(y) + 100)\n plt.ylabel(\"Total Enrollment \/ Capacity\")\n plt.xlabel(\"Academic Year\")\n plt.legend()\n plt.tight_layout()\n graph = get_image()\n return graph\n\n\n# This graph plots trends of enrollment versus school capacity for schools in a district for a select academic year\ndef get_pairs(**kwargs):\n plt.switch_backend('AGG')\n # total enrollment\n x = kwargs.get('x')\n # y = capacity of school\n y = kwargs.get('y')\n # selected year\n year = kwargs.get('academic_year')\n school = kwargs.get('name_of_school')\n district = kwargs.get('district_name')\n fig, ax = plt.subplots(figsize=(10, 8))\n ax.bar(school, y, width=0.50, color='gray', label='capacity of school')\n ax.bar(school, x, width=0.25, color='g', label='total enrollment')\n ax.set_ylabel(\"Total Enrollment \/ Capacity\")\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(10)\n # specify integer or one of preset strings, e.g.\n # tick.label.set_fontsize('x-small')\n tick.label.set_rotation(45)\n for bar in ax.patches:\n # Using Matplotlib annotate function and\n # passing the coordinates where the annotation shall be done\n # x-coordinate: bar.get_x() + bar.get_width() \/ 2\n # y-coordinate: bar.get_height()\n # free space to be left to make graph pleasing: (0, 8)\n # ha and va stand for the horizontal and vertical alignment\n plt.annotate(format(bar.get_height(), '.2f'),\n (bar.get_x() + bar.get_width() \/ 2,\n bar.get_height()), ha='center', va='center',\n size=8, xytext=(0, 3),\n textcoords='offset points')\n plt.title(\"Enrollment\/capacity for the academic year \" + year + \" of district \" + district)\n\n plt.legend()\n\n plt.tight_layout()\n graph = get_image()\n return graph\n\n\ndef get_grade_plot(**kwargs):\n plt.switch_backend('AGG')\n # the selected academic year\n year = kwargs.get('academic_year')\n\n # the selected school\n school_name = kwargs.get('name_of_school')\n data_boys = kwargs.get('data_boys')\n data_girls = kwargs.get('data_girls')\n data_none = kwargs.get('data')\n\n # get subplots for boys, girls and single gender\n\n if len(data_none) > 0:\n fig, ax1 = plt.subplots(1, figsize=(10, 8))\n ax1.plot(data_none['grade'], data_none['enrollment'], 'ro')\n ax1.set_title('Enrollment')\n school_title = School.objects.filter(id=school_name).distinct().values_list('school_name', flat=True)\n school_name_title = school_title[0]\n fig.suptitle(\"Enrollment for \" + school_name_title + \" - Academic Year \" + year)\n plt.xlabel('Grades')\n plt.tight_layout()\n ax1.grid()\n graph = get_image()\n return graph\n\n elif len(data_girls) > 0 and len(data_boys) > 0:\n fig, (ax1, ax2) = plt.subplots(2, figsize=(10, 8))\n ax1.plot(data_girls['grade'], data_girls['enrollment'], 'go')\n ax1.set_title('Girls Enrollment')\n ax2.plot(data_boys['grade'], data_boys['enrollment'], 'bo')\n ax2.set_title('Boys Enrollment')\n\n school_title = School.objects.filter(id=school_name).distinct().values_list('school_name', flat=True)\n school_name_title = school_title[0]\n fig.suptitle(\"Enrollment for \" + school_name_title + \" - Academic Year \" + year)\n plt.xlabel('Grades')\n plt.tight_layout()\n ax1.grid()\n ax2.grid()\n graph = get_image()\n return graph\n else:\n graph = \"No data recorded for the selected year\"\n return graph\n\n\ndef get_district_grade_plot_girls(**kwargs):\n plt.switch_backend('AGG')\n # the selected academic year\n year = kwargs.get('academic_year')\n grade = kwargs.get('grade')\n # the selected school\n district_name = kwargs.get('district')\n data_girls = kwargs.get('data_girls')\n school_girls = kwargs.get('school_girls')\n\n # get subplots for boys, girls and single gender\n fig, ax1 = plt.subplots(1, figsize=(10, 8))\n ax1.plot(data_girls['enrollment'], school_girls, 'go')\n ax1.set_title('Enrollment')\n plt.xticks(np.arange(0, max(data_girls['enrollment']) + 5, 5.0))\n for tick in ax1.xaxis.get_major_ticks():\n tick.label.set_fontsize(10)\n # specify integer or one of preset strings, e.g.\n # tick.label.set_fontsize('x-small')\n tick.label.set_rotation(60)\n district_title = School.objects.filter(id=district_name).distinct().values_list('district_name', flat=True)\n district_name_title = district_title[0]\n fig.suptitle(\"Grade - \" + str(grade) + \" Girls Enrollment for Academic Year \" + year + \" for district\" + str(\n district_name_title))\n plt.xlabel('Enrollment')\n plt.ylabel('School')\n plt.tight_layout()\n ax1.grid()\n graph = get_image()\n return graph\n\n\ndef get_district_grade_plot_boys(**kwargs):\n plt.switch_backend('AGG')\n # the selected academic year\n year = kwargs.get('academic_year')\n grade = kwargs.get('grade')\n # the selected school\n district_name = kwargs.get('district')\n data_boys = kwargs.get('data_boys')\n school_boys = kwargs.get('school_boys')\n\n # get subplots for boys, girls and single gender\n fig, ax1 = plt.subplots(1, figsize=(10, 8))\n ax1.plot(data_boys['enrollment'], school_boys, 'bo')\n ax1.set_title('Enrollment')\n for tick in ax1.xaxis.get_major_ticks():\n tick.label.set_fontsize(10)\n # specify integer or one of preset strings, e.g.\n # tick.label.set_fontsize('x-small')\n tick.label.set_rotation(60)\n plt.xticks(np.arange(0, max(data_boys['enrollment']) + 5, 5.0))\n district_title = School.objects.filter(id=district_name).distinct().values_list('district_name', flat=True)\n district_name_title = district_title[0]\n fig.suptitle(\"Grade - \" + str(grade) + \" Boys Enrollment for Academic Year \" + year + \" for district\" + str(\n district_name_title))\n plt.xlabel('Enrollment')\n plt.ylabel('School')\n plt.tight_layout()\n ax1.grid()\n graph = get_image()\n return graph\n\n\ndef get_district_grade_plot_none(**kwargs):\n plt.switch_backend('AGG')\n # the selected academic year\n year = kwargs.get('academic_year')\n grade = kwargs.get('grade')\n # the selected school\n district_name = kwargs.get('district')\n data_none = kwargs.get('data_none')\n school_none = kwargs.get('school_none')\n\n # get subplots for boys, girls and single gender\n fig, ax1 = plt.subplots(1, figsize=(10, 8))\n ax1.plot(data_none['enrollment'], school_none, 'ro')\n ax1.set_title('Enrollment')\n plt.xticks(np.arange(0, max(data_none['enrollment']) + 5, 5.0))\n for tick in ax1.xaxis.get_major_ticks():\n tick.label.set_fontsize(10)\n # specify integer or one of preset strings, e.g.\n # tick.label.set_fontsize('x-small')\n tick.label.set_rotation(60)\n district_title = School.objects.filter(id=district_name).distinct().values_list('district_name', flat=True)\n district_name_title = district_title[0]\n fig.suptitle(\"Grade - \" + str(grade) + \" Total Enrollment for Academic Year \" + year + \" for district\" + str(\n district_name_title))\n plt.xlabel('Enrollment')\n plt.ylabel('School')\n plt.tight_layout()\n ax1.grid()\n graph = get_image()\n return graph\n\n\ndef get_plot_boys_primary(**kwargs):\n plt.switch_backend('AGG')\n data = kwargs.get('data')\n for _ in data.shape:\n ger = (data['enrollment'] \/ data['age_5_to_11_years']) * 100\n academic_year = data.academic_year\n sns.set(font_scale=1)\n sns.set_style(\"white\")\n ax = ger.plot.bar(figsize=(15, 6))\n sns.despine(left=True, bottom=True)\n # label and title\n ax.set_xticklabels(np.arange(len(academic_year)))\n ax.set_title('Gross Enrollment Ratio (%) for Boys in Primary School In St. Lucia', size=18)\n ax.set_xticklabels(academic_year)\n for tick in ax.get_xticklabels():\n tick.set_rotation(-30)\n ax.set(xlabel='Academic Year', ylabel='Gross enrollment rate (%)')\n\n # annotations\n for p in ax.patches:\n ax.annotate(format(p.get_height(), '.2f'),\n (p.get_x() + p.get_width() \/ 2., p.get_height()),\n ha='center', va='center',\n xytext=(0, 9),\n textcoords='offset points')\n # adjust legend\n\n plt.tight_layout()\n graph = get_image()\n return graph\n\n\ndef get_plot_girls_primary(**kwargs):\n plt.switch_backend('AGG')\n data = kwargs.get('data')\n for _ in data.shape:\n ger = (data['enrollment'] \/ data['age_5_to_11_years']) * 100\n academic_year = data.academic_year\n sns.set(font_scale=1)\n sns.set_style(\"white\")\n ax = ger.plot.bar(figsize=(15, 6), color='green')\n sns.despine(left=True, bottom=True)\n # label and title\n ax.set_xticklabels(np.arange(len(academic_year)))\n ax.set_title('Gross Enrollment Ratio (%) for Girls in Primary School In St. Lucia', size=18)\n ax.set_xticklabels(academic_year)\n for tick in ax.get_xticklabels():\n tick.set_rotation(-30)\n ax.set(xlabel='Academic Year', ylabel='Gross enrollment rate (%)')\n\n # annotations\n for p in ax.patches:\n ax.annotate(format(p.get_height(), '.2f'),\n (p.get_x() + p.get_width() \/ 2., p.get_height()),\n ha='center', va='center',\n xytext=(0, 9),\n textcoords='offset points')\n # adjust legend\n\n plt.tight_layout()\n graph = get_image()\n return graph\n\n\ndef get_plot_primary(**kwargs):\n plt.switch_backend('AGG')\n data = kwargs.get('data')\n data_boys = kwargs.get('data_boys')\n data_girls = kwargs.get('data_girls')\n\n for _ in data.shape:\n ger_boys = (data_boys['enrollment'] \/ data_boys['age_5_to_11_years']) * 100\n ger_girls = (data_girls['enrollment'] \/ data_girls['age_5_to_11_years']) * 100\n academic_year = data_girls.academic_year\n title = 'Trend of GER for Primary Schools in St. Lucia'\n plt.figure(figsize=(10, 8))\n plt.title(title)\n plt.plot(academic_year, ger_boys, 'b-', label='boys')\n plt.plot(academic_year, ger_girls, 'g-', label='girls')\n plt.xticks(rotation=60)\n # plt.ylim(0, max(y) + 100)\n plt.ylabel(\"Gross Enrollment Ratio for boys and girls in St. Lucia\")\n plt.xlabel(\"Academic Year\")\n plt.legend()\n plt.grid()\n plt.tight_layout()\n graph = get_image()\n return graph\n\n\ndef get_plot_boys_secondary(**kwargs):\n plt.switch_backend('AGG')\n data = kwargs.get('data')\n for _ in data.shape:\n ger = (data['enrollment'] \/ data['age_12_to_16_years']) * 100\n academic_year = data.academic_year\n sns.set(font_scale=1)\n sns.set_style(\"white\")\n ax = ger.plot.bar(figsize=(15, 6))\n sns.despine(left=True, bottom=True)\n # label and title\n ax.set_xticklabels(np.arange(len(academic_year)))\n ax.set_title('Gross Enrollment Ratio (%) for Boys in Secondary School In St. Lucia', size=18)\n ax.set_xticklabels(academic_year)\n for tick in ax.get_xticklabels():\n tick.set_rotation(-30)\n ax.set(xlabel='Academic Year', ylabel='Gross enrollment rate (%)')\n\n # annotations\n for p in ax.patches:\n ax.annotate(format(p.get_height(), '.2f'),\n (p.get_x() + p.get_width() \/ 2., p.get_height()),\n ha='center', va='center',\n xytext=(0, 9),\n textcoords='offset points')\n # adjust legend\n\n plt.tight_layout()\n graph = get_image()\n return graph\n\n\ndef get_plot_girls_secondary(**kwargs):\n plt.switch_backend('AGG')\n data = kwargs.get('data')\n for _ in data.shape:\n ger = (data['enrollment'] \/ data['age_12_to_16_years']) * 100\n academic_year = data.academic_year\n sns.set(font_scale=1)\n sns.set_style(\"white\")\n ax = ger.plot.bar(figsize=(15, 6), color='green')\n sns.despine(left=True, bottom=True)\n # label and title\n ax.set_xticklabels(np.arange(len(academic_year)))\n ax.set_title('Gross Enrollment Ratio (%) for Girls in secondary School In St. Lucia', size=18)\n ax.set_xticklabels(academic_year)\n for tick in ax.get_xticklabels():\n tick.set_rotation(-30)\n ax.set(xlabel='Academic Year', ylabel='Gross enrollment rate (%)')\n\n # annotations\n for p in ax.patches:\n ax.annotate(format(p.get_height(), '.2f'),\n (p.get_x() + p.get_width() \/ 2., p.get_height()),\n ha='center', va='center',\n xytext=(0, 9),\n textcoords='offset points')\n # adjust legend\n\n plt.tight_layout()\n graph = get_image()\n return graph\n\n\ndef get_plot_secondary(**kwargs):\n plt.switch_backend('AGG')\n data = kwargs.get('data')\n data_boys = kwargs.get('data_boys')\n data_girls = kwargs.get('data_girls')\n\n for _ in data.shape:\n ger_boys = (data_boys['enrollment'] \/ data_boys['age_12_to_16_years']) * 100\n ger_girls = (data_girls['enrollment'] \/ data_girls['age_12_to_16_years']) * 100\n academic_year = data_girls.academic_year\n title = 'Trend of GER for Primary Schools in St. Lucia'\n plt.figure(figsize=(10, 8))\n plt.title(title)\n plt.plot(academic_year, ger_boys, 'b-', label='boys')\n plt.plot(academic_year, ger_girls, 'g-', label='girls')\n plt.xticks(rotation=60)\n # plt.ylim(0, max(y) + 100)\n plt.ylabel(\"Gross Enrollment Ratio for boys and girls in Secondary School\")\n plt.xlabel(\"Academic Year\")\n plt.legend()\n plt.grid()\n\n plt.tight_layout()\n graph = get_image()\n return graph\n\n\ndef clean_secondary_name(name):\n name = re.sub(\"[^a-zA-Z]+\", \"\", name)\n name = name.lower().replace('secondary', \"\")\n name = name.replace('school', \"\")\n return ' '.join(name.split())\n\n\ndef match_name(name, schools, district_dict):\n for school in schools:\n if clean_secondary_name(name) == clean_secondary_name(getattr(school, 'school_name')):\n district_code = getattr(school, 'district_name_id')\n district_dict[name] = district_code\n return district_code\n return None\n\n\ndef get_district(school_code, schools, district_dict):\n for school in schools:\n if int(getattr(school, 'school_code')) == school_code:\n district = getattr(school, 'district_name_id')\n district_dict[school_code] = district\n return district\n return None\n\n\ndef csec_performance_plot(data, district_1, district_2):\n left_out = set()\n\n df = pd.DataFrame(data.values())\n plt.switch_backend('AGG')\n years = [int(y) for y in df['year'].drop_duplicates()]\n years.sort()\n min_year = min(years)\n\n schools = School.objects.all()\n # schools = School.objects.filter(category_of_school='public secondary')\n\n N_DISTRICTS = District.objects.count()\n scores = np.zeros((len(years), N_DISTRICTS))\n n_tests = np.zeros((len(years), N_DISTRICTS))\n passing_scores = np.zeros((len(years), N_DISTRICTS))\n\n # cache school to district matches\n district_dict = {}\n\n for index, row in df.iterrows():\n school_code = int(row['school_id'])\n if school_code in district_dict:\n district = district_dict[school_code]\n else:\n district = get_district(school_code, schools, district_dict)\n if not district:\n left_out.add(row['school_id'])\n continue\n year = int(row['year']) - min_year\n n_tests[year][district - 1] += 1\n score = row['overall_grade']\n if score == 'I' or score == 'II' or score == 'III':\n scores[year][district - 1] += 1\n\n passing_scores = 100 * scores \/ n_tests\n passing_scores = pd.DataFrame(passing_scores)\n\n labels = ['District ' + str(d + 1) for d in range(N_DISTRICTS)]\n if not (district_1 and district_2):\n for d in range(N_DISTRICTS):\n plt.plot(years, passing_scores[d])\n else:\n plt.plot(years, passing_scores[district_1 - 1])\n plt.plot(years, passing_scores[district_2 - 1])\n labels = ['District ' + str(district_1), 'District ' + str(district_2)]\n plt.xticks([min(years), max(years)])\n plt.legend(labels, loc='upper left', bbox_to_anchor=(1, 1.05))\n plt.title(\"Percentage of Passing Scores (CSEC)\")\n plt.tight_layout()\n graph = get_image()\n\n plt.clf()\n passing_scores = passing_scores.T\n passing_scores.columns = years\n passing_scores.index = ['District ' + str(d + 1) for d in range(N_DISTRICTS)]\n ax = sns.heatmap(passing_scores, annot=True)\n plt.tight_layout()\n heatmap = get_image()\n return [graph, heatmap, left_out]\n\n\n# ===================================================================\n# Outlier detection at district level\n# ===================================================================\ndef get_outlier_district_plot(**kwargs):\n plt.switch_backend('AGG')\n\n school_enrollment = kwargs.get('x')\n school_name = kwargs.get('y')\n\n datamean = kwargs.get('data_mean')\n input_school_type = kwargs.get('input_school_type')\n academic_year = kwargs.get('academic_year')\n district_input = kwargs.get('input_district')\n\n fig, ax1 = plt.subplots(figsize=(10, 8))\n\n ax1.set_title('Enrollment for District')\n ax1.set_xlabel('School_Name')\n ax1.set_ylabel('School_Scores')\n\n ax1.bar(school_name, school_enrollment, color='b')\n\n for tick in ax1.xaxis.get_major_ticks():\n tick.label.set_fontsize(8)\n tick.label.set_rotation('15')\n plt.plot(school_name, datamean, linewidth=5, ls='solid', color='r')\n\n plt.xlabel(\"School Name\")\n plt.ylabel(\"Enrollment\")\n\n plt.title(\n \"Enrollment for \" + input_school_type + \" schools for district \" + district_input + \" and \" + academic_year + \" academic year \")\n\n plt.tight_layout()\n graph = get_image()\n return graph\n\n plt.tight_layout()\n graph = get_image()\n return graph\n\n\n# ==========================================================================\n# Outlier detection at national level\n# ==========================================================================\n\ndef get_outlier_national_plot(**kwargs):\n plt.switch_backend('AGG')\n\n school_enrollment = kwargs.get('x')\n school_name = kwargs.get('y')\n\n datamean = kwargs.get('data_mean')\n input_school_type = kwargs.get('input_school_type')\n academic_year = kwargs.get('academic_year')\n\n fig, ax1 = plt.subplots(figsize=(12, 10))\n\n ax1.set_title('Enrollment for Selected Year')\n ax1.set_xlabel('School_Name')\n\n ax1.bar(school_name, school_enrollment, width=0.1, color='b')\n for tick in ax1.xaxis.get_major_ticks():\n tick.label.set_fontsize(10)\n tick.label.set_rotation('vertical')\n plt.plot(school_name, datamean, linewidth=3, ls='solid', color='r')\n\n plt.xlabel(\"School Name\")\n plt.ylabel(\"Enrollment\")\n plt.title(\"Enrollment for \" + input_school_type + \" schools for year \" + academic_year)\n\n plt.tight_layout()\n graph = get_image()\n return graph\n\n plt.tight_layout()\n graph = get_image()\n return graph\n\n\ndef get_plot_regression(**kwargs):\n plt.switch_backend('AGG')\n plt.figure(figsize=(10, 8))\n\n data = kwargs.get('data')\n sns.set_theme(color_codes=True)\n sns.regplot(x=data.enrollment, y=data.gdp_millions, data=data, x_estimator=np.mean, label='GDP');\n sns.regplot(x=data.enrollment, y=data.educational_expenditure, data=data, x_estimator=np.mean,\n label='Educational Expenditure');\n sns.regplot(x=data.enrollment, y=data.government_expenditure, data=data, x_estimator=np.mean,\n label='Government Expenditure');\n plt.xlabel(\"Enrollment\")\n plt.ylabel(\"Expenditure\")\n plt.title(\"Linear Regression - Enrollment \/ GDP \/ Education \/ government expenditure\")\n plt.legend()\n plt.tight_layout()\n graph = get_image()\n return graph\n\n\ndef get_plot_gdp_regress(**kwargs):\n plt.switch_backend('AGG')\n plt.figure(figsize=(10, 8))\n data = kwargs.get('data')\n sns.set_theme(color_codes=True)\n sns.jointplot(x=data.enrollment, y=data.gdp_millions, data=data, x_estimator=np.mean,\n label='GDP', kind=\"reg\");\n plt.xlabel(\"Enrollment\")\n plt.ylabel(\"Expenditure\")\n plt.tight_layout()\n graph = get_image()\n return graph\n\n\ndef get_enrollment_joint_pearsons(**kwargs):\n plt.switch_backend('AGG')\n plt.figure(figsize=(10, 8))\n data = kwargs.get('data')\n sns.set_theme(color_codes=True)\n import scipy.stats as stats\n graph = sns.jointplot(data=data, x=data.enrollment, y=data.gdp_millions)\n r, p = stats.pearsonr(x=data.enrollment, y=data.gdp_millions)\n phantom, = graph.ax_joint.plot([], [], linestyle=\"\", alpha=0)\n # here graph is not a ax but a joint grid, so we access the axis through ax_joint method\n graph.ax_joint.legend([phantom], ['r={:f}, p={:f}'.format(r, p)])\n plt.tight_layout()\n graph = get_image()\n return graph\n\n\ndef get_enrollment_joint_spearman(**kwargs):\n plt.switch_backend('AGG')\n plt.figure(figsize=(10, 8))\n data = kwargs.get('data')\n sns.set_theme(color_codes=True)\n graph = sns.jointplot(data=data, x=data.enrollment, y=data.gdp_millions)\n r, p = stats.spearmanr(data.enrollment, data.gdp_millions)\n phantom, = graph.ax_joint.plot([], [], linestyle=\"\", alpha=0)\n # here graph is not a ax but a joint grid, so we access the axis through ax_joint method\n\n graph.ax_joint.legend([phantom], ['r={:f}, p={:f}'.format(r, p)])\n plt.tight_layout()\n graph = get_image()\n return graph\n\n\ndef get_enrollment_multicollinearity(**kwargs):\n plt.figure(figsize=(10, 8))\n plt.switch_backend('AGG')\n data = kwargs.get('data')\n data = data[[\"educational_expenditure\", \"gdp_millions\", \"government_expenditure\", \"primary_school_expenditure\",\n \"secondary_school_expenditure\", \"enrollment\", \"age_5_to_11_years\", \"age_12_to_16_years\"]]\n sns.set(style='white')\n corr = data.corr()\n mask = np.zeros_like(corr, dtype=np.bool)\n mask[np.triu_indices_from(mask)] = True\n f, ax = plt.subplots(figsize=(12, 10))\n cmap = sns.diverging_palette(220, 10, as_cmap=True)\n sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.9, center=0, square=True, linewidths=.5, annot=True,\n cbar_kws={'shrink': .5});\n plt.tight_layout()\n graph = get_image()\n return graph\n\n\ndef get_kernel_density(**kwargs):\n plt.figure(figsize=(10, 8))\n plt.switch_backend('AGG')\n data = kwargs.get('data')\n sns.kdeplot(data=data.enrollment)\n sns.despine()\n plt.tight_layout()\n graph = get_image()\n return graph\n\n\ndef plot_national_gender_enrollment(**kwargs):\n plt.switch_backend('AGG')\n data_boys_primary = kwargs.get('data_boys_primary')\n data_boys_secondary = kwargs.get('data_boys_secondary')\n data_girls_primary = kwargs.get('data_girls_primary')\n data_girls_secondary = kwargs.get('data_girls_secondary')\n title = 'Trend in enrollments over time'\n plt.figure(figsize=(10, 8))\n plt.title(title)\n plt.plot(data_boys_primary['academic_year'], data_boys_primary['enrollment'], 'b-',\n label='Boys enrolled in Primary School')\n plt.plot(data_boys_secondary['academic_year'], data_boys_secondary['enrollment'], 'bo',\n label='Boys enrolled in Secondary School')\n plt.plot(data_girls_primary['academic_year'], data_girls_primary['enrollment'], 'r-',\n label='Girls enrolled in Primary School')\n plt.plot(data_girls_secondary['academic_year'], data_girls_secondary['enrollment'], 'ro',\n label='Girls enrolled in secondary School')\n\n plt.xticks(rotation=60)\n # plt.ylim(0, max(y) + 100)\n plt.ylabel(\"National Enrollment Trends\")\n plt.xlabel(\"Academic Year\")\n plt.legend()\n plt.grid()\n\n plt.tight_layout()\n graph = get_image()\n return graph\n\n\ndef national_gender_enrollment_hist(**kwargs):\n data_boys_primary = kwargs.get('data_boys_primary')\n data_boys_secondary = kwargs.get('data_boys_secondary')\n data_girls_primary = kwargs.get('data_girls_primary')\n data_girls_secondary = kwargs.get('data_girls_secondary')\n\n # boys primary mean of distribution\n mu_boys_primary = mean(data_boys_primary.enrollment)\n mu_girls_primary = mean(data_girls_primary.enrollment)\n mu_boys_secondary = mean(data_boys_secondary.enrollment)\n mu_girls_secondary = mean(data_girls_secondary.enrollment)\n\n sigma_boys_primary = std(data_boys_primary.enrollment)\n sigma_girls_primary = std(data_girls_primary.enrollment)\n sigma_boys_secondary = std(data_boys_secondary.enrollment)\n sigma_girls_secondary = std(data_girls_secondary.enrollment)\n\n x_mu_boys_primary = mu_boys_primary + sigma_boys_primary * np.random.randn(437)\n x_mu_girls_primary = mu_girls_primary + sigma_girls_primary * np.random.randn(437)\n x_mu_boys_secondary = mu_boys_secondary + sigma_boys_secondary * np.random.randn(437)\n x_mu_girls_secondary = mu_girls_secondary + sigma_girls_secondary * np.random.randn(437)\n num_bins = 50\n\n # fig, ax = plt.subplots()\n\n fig, axs = plt.subplots(2, 2, figsize=(15, 15))\n\n # the histogram of the data\n n_boys_primary, bins_boys_primary, patches_boys_primary = axs[0, 0].hist(x_mu_boys_primary, num_bins, density=True)\n n_boys_secondary, bins_boys_secondary, patches_boys_secondary = axs[0, 1].hist(x_mu_boys_secondary, num_bins,\n density=True)\n n_girls_primary, bins_girls_primary, patches_girls_primary = axs[1, 0].hist(x_mu_girls_primary, num_bins,\n density=True)\n n_girls_secondary, bins_girls_secondary, patches_girls_secondary = axs[1, 1].hist(x_mu_girls_secondary, num_bins,\n density=True)\n # add a 'best fit' line\n y_boys_primary = ((1 \/ (np.sqrt(2 * np.pi) * sigma_boys_primary)) *\n np.exp(-0.5 * (1 \/ sigma_boys_primary * (bins_boys_primary - mu_boys_primary)) ** 2))\n y_boys_secondary = ((1 \/ (np.sqrt(2 * np.pi) * sigma_boys_secondary)) *\n np.exp(-0.5 * (1 \/ sigma_boys_secondary * (bins_boys_secondary - mu_boys_secondary)) ** 2))\n y_girls_primary = ((1 \/ (np.sqrt(2 * np.pi) * sigma_girls_primary)) *\n np.exp(-0.5 * (1 \/ sigma_girls_primary * (bins_girls_primary - mu_girls_primary)) ** 2))\n\n y_girls_secondary = ((1 \/ (np.sqrt(2 * np.pi) * sigma_girls_secondary)) *\n np.exp(-0.5 * (1 \/ sigma_girls_secondary * (bins_girls_secondary - mu_girls_secondary)) ** 2))\n\n for ax in axs.flat:\n ax.set(xlabel='Enrollment', ylabel='Probability Density')\n\n # Hide x labels and tick labels for top plots and y ticks for right plots.\n for ax in axs.flat:\n ax.label_outer()\n\n axs[0, 0].plot(bins_boys_primary, y_boys_primary, '--')\n axs[0, 0].set_title('Primary-Boys')\n\n axs[0, 1].plot(bins_boys_secondary, y_boys_secondary, '--')\n axs[0, 1].set_title('Secondary-Boys')\n\n axs[1, 0].plot(bins_girls_primary, y_girls_primary, '--')\n axs[1, 0].set_title('Primary-Girls')\n\n axs[1, 1].plot(bins_girls_secondary, y_girls_secondary, '--')\n axs[1, 1].set_title('Secondary-Girls')\n\n plt.tight_layout()\n graph = get_image()\n return graph\n\n\ndef plot_national_education_census(**kwargs):\n plt.switch_backend('AGG')\n data = kwargs.get('data')\n\n title = 'Education Census Over time'\n plt.figure(figsize=(10, 8))\n plt.title(title)\n plt.plot(data['academic_year'], data['age_3_to_4_years'], 'b-',\n label='Population of Age Group 3-4')\n plt.plot(data['academic_year'], data['age_5_to_11_years'], 'y-',\n label='Population of Age Group > 5 and less than 12')\n plt.plot(data['academic_year'], data['age_12_to_16_years'], 'r-',\n label='population of children above 12 years old')\n\n plt.xticks(rotation=60)\n # plt.ylim(0, max(y) + 100)\n plt.ylabel(\"Education Census Trends\")\n plt.xlabel(\"Academic Year\")\n plt.legend()\n plt.grid()\n\n plt.tight_layout()\n graph = get_image()\n return graph\n\n\ndef national_education_census_hist(**kwargs):\n data = kwargs.get('data')\n # boys primary mean of distribution\n mu_data_3_4 = mean(data.age_3_to_4_years)\n mu_data_5_11 = mean(data.age_5_to_11_years)\n mu_data_12_16 = mean(data.age_12_to_16_years)\n\n sigma_data_3_4 = std(data.age_3_to_4_years)\n sigma_data_5_11 = std(data.age_5_to_11_years)\n sigma_data_12_16 = std(data.age_12_to_16_years)\n\n x_mu_data_3_4 = mu_data_3_4 + sigma_data_3_4 * np.random.randn(437)\n x_mu_data_5_11 = mu_data_5_11 + sigma_data_5_11 * np.random.randn(437)\n x_mu_data_12_16 = mu_data_12_16 + sigma_data_12_16 * np.random.randn(437)\n num_bins = 50\n\n # fig, ax = plt.subplots()\n\n fig, (ax1, ax2, ax3) = plt.subplots(3, figsize=(15, 15))\n\n # the histogram of the data\n n_3_4, bins_3_4, patches_3_4 = ax1.hist(x_mu_data_3_4, num_bins, density=True)\n n_5_11, bins_5_11, patches_5_11 = ax2.hist(x_mu_data_5_11, num_bins, density=True)\n n_12_16, bins_12_16, patches_12_16 = ax3.hist(x_mu_data_12_16, num_bins, density=True)\n\n # add a 'best fit' line\n y_3_4 = ((1 \/ (np.sqrt(2 * np.pi) * sigma_data_3_4)) *\n np.exp(-0.5 * (1 \/ sigma_data_3_4 * (bins_3_4 - mu_data_3_4)) ** 2))\n y_5_11 = ((1 \/ (np.sqrt(2 * np.pi) * sigma_data_5_11)) *\n np.exp(-0.5 * (1 \/ sigma_data_5_11 * (bins_5_11 - mu_data_5_11)) ** 2))\n y_12_16 = ((1 \/ (np.sqrt(2 * np.pi) * sigma_data_12_16)) *\n np.exp(-0.5 * (1 \/ sigma_data_12_16 * (bins_12_16 - mu_data_12_16)) ** 2))\n\n ax1.plot(bins_3_4, y_3_4, '--')\n ax1.set_title('3 - 4 years')\n\n ax2.plot(bins_5_11, y_5_11, '--')\n ax2.set_title('Greater \/ equal 5, Less than 12')\n\n ax3.plot(bins_12_16, y_12_16, '--')\n ax3.set_title('>=12 years')\n\n plt.tight_layout()\n graph = get_image()\n return graph\n\n\ndef plot_national_expenditure(**kwargs):\n plt.switch_backend('AGG')\n data = kwargs.get('data')\n title = 'Education Expenditure'\n plt.figure(figsize=(10, 8))\n plt.title(title)\n plt.plot(data['academic_year'], data.educational_expenditure, 'b-',\n label='Educational Expenditure')\n plt.plot(data['academic_year'], data['gdp_millions'], 'y-',\n label='GDP (Million XCD)')\n plt.plot(data['academic_year'], data['government_expenditure'], 'r-',\n label='Government Expenditure')\n plt.plot(data['academic_year'], data['primary_school_expenditure'], 'g-',\n label='Primary School Expenditure')\n plt.plot(data['academic_year'], data['secondary_school_expenditure'], 'k-',\n label='Secondary School Expenditure')\n\n plt.xticks(rotation=60)\n # plt.ylim(0, max(y) + 100)\n plt.ylabel(\"Expenditure Trends\")\n plt.xlabel(\"Academic Year\")\n plt.legend()\n plt.grid()\n\n plt.tight_layout()\n graph = get_image()\n return graph\n\n\ndef national_expenditure_hist(**kwargs):\n data = kwargs.get('data')\n mu_educational_expenditure = mean(data['educational_expenditure'])\n mu_gdp_millions = mean(data['gdp_millions'])\n mu_government_expenditure = mean(data['government_expenditure'])\n mu_primary_school_expenditure = mean(data['primary_school_expenditure'])\n mu_secondary_school_expenditure = mean(data['secondary_school_expenditure'])\n\n sigma_educational_expenditure = std(data.educational_expenditure)\n sigma_gdp_millions = std(data.gdp_millions)\n sigma_government_expenditure = std(data.government_expenditure)\n sigma_primary_school_expenditure = std(data.primary_school_expenditure)\n sigma_secondary_school_expenditure = std(data.secondary_school_expenditure)\n\n x_mu_educational_expenditure = mu_educational_expenditure + sigma_educational_expenditure * np.random.randn(437)\n x_mu_gdp_millions = mu_gdp_millions + sigma_gdp_millions * np.random.randn(437)\n x_mu_government_expenditure = mu_government_expenditure + sigma_government_expenditure * np.random.randn(437)\n x_mu_primary_school_expenditure = mu_primary_school_expenditure + sigma_primary_school_expenditure * np.random.randn(\n 437)\n x_mu_secondary_school_expenditure = mu_secondary_school_expenditure + sigma_secondary_school_expenditure * np.random.randn(\n 437)\n num_bins = 50\n\n # fig, ax = plt.subplots()\n\n fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, figsize=(15, 15))\n\n # the histogram of the data\n n_educational_expenditure, bins_educational_expenditure, patches_educational_expenditure = \\\n ax1.hist(x_mu_educational_expenditure, num_bins, density=True)\n n_gdp_millions, bins_gdp_millions, patches_gdp_millions = \\\n ax2.hist(x_mu_gdp_millions, num_bins, density=True)\n n_government_expenditure, bins_government_expenditure, patches_government_expenditure = \\\n ax3.hist(x_mu_government_expenditure, num_bins, density=True)\n n_primary_school_expenditure, bins_primary_school_expenditure, patches_primary_school_expenditure = \\\n ax4.hist(x_mu_primary_school_expenditure, num_bins, density=True)\n n_secondary_school_expenditure, bins_secondary_school_expenditure, patches_secondary_school_expenditure = \\\n ax5.hist(x_mu_secondary_school_expenditure, num_bins, density=True)\n # add a 'best fit' line\n y_educational_expenditure = ((1 \/ (np.sqrt(2 * np.pi) * sigma_educational_expenditure)) *\n np.exp(-0.5 * (1 \/ sigma_educational_expenditure * (\n bins_educational_expenditure - mu_educational_expenditure)) ** 2))\n y_gdp_millions = ((1 \/ (np.sqrt(2 * np.pi) * sigma_gdp_millions)) *\n np.exp(-0.5 * (1 \/ sigma_gdp_millions * (bins_gdp_millions - mu_gdp_millions)) ** 2))\n y_government_expenditure = ((1 \/ (np.sqrt(2 * np.pi) * sigma_government_expenditure)) *\n np.exp(-0.5 * (1 \/ sigma_government_expenditure * (\n bins_government_expenditure - mu_government_expenditure)) ** 2))\n y_primary_school_expenditure = ((1 \/ (np.sqrt(2 * np.pi) * sigma_primary_school_expenditure)) *\n np.exp(-0.5 * (1 \/ sigma_primary_school_expenditure * (\n bins_primary_school_expenditure - mu_primary_school_expenditure)) ** 2))\n y_secondary_school_expenditure = ((1 \/ (np.sqrt(2 * np.pi) * sigma_secondary_school_expenditure)) *\n np.exp(-0.5 * (1 \/ sigma_secondary_school_expenditure * (\n bins_secondary_school_expenditure - mu_secondary_school_expenditure)) ** 2))\n\n ax1.plot(bins_educational_expenditure, y_educational_expenditure, '--')\n ax1.set_title('Educational Expenditure')\n\n ax2.plot(bins_gdp_millions, y_gdp_millions, '--')\n ax2.set_title('gdp(Million XCD)')\n\n ax3.plot(bins_government_expenditure, y_government_expenditure, '--')\n ax3.set_title('Government Expenditure')\n\n ax4.plot(bins_primary_school_expenditure, y_primary_school_expenditure, '--')\n ax4.set_title('Primary School Expenditure')\n\n ax5.plot(bins_secondary_school_expenditure, y_secondary_school_expenditure, '--')\n ax5.set_title('Secondary School Expenditure')\n\n plt.tight_layout()\n graph = get_image()\n return graph\n\n\ndef primary_performance_plot(data, district_1, district_2):\n df = pd.DataFrame(data.values())\n plt.switch_backend('AGG')\n years = df['academic_year'].drop_duplicates().str.split(\"\/\")\n years = [int(y[1]) for y in years]\n min_year = min(years)\n\n N_DISTRICTS = District.objects.count()\n\n tests = np.zeros((len(years), N_DISTRICTS), dtype=int)\n above_avg = np.zeros((len(years), N_DISTRICTS), dtype=int)\n performance = np.zeros((len(years), N_DISTRICTS), dtype=float)\n\n for index, row in df.iterrows():\n year = int(row['academic_year'].split('\/')[1])\n school_code = row['school_id']\n school = School.objects.get(school_code=school_code)\n district = getattr(school, 'district_name_id')\n n_tests = int(row['tests_sat'])\n n_above_avg = int(row['above_average_scores'])\n if np.isnan(n_tests) or np.isnan(n_above_avg):\n continue\n tests[year - min_year][district - 1] += n_tests\n above_avg[year - min_year][district - 1] += n_above_avg\n\n for y in range(len(years)):\n performance[y] = 100 * above_avg[y] \/ tests[y]\n performance = pd.DataFrame(performance)\n labels = ['District ' + str(d + 1) for d in range(N_DISTRICTS)]\n if not (district_1 and district_2):\n for d in range(N_DISTRICTS):\n plt.plot(years, performance[d])\n else:\n plt.plot(years, performance[district_1 - 1])\n plt.plot(years, performance[district_2 - 1])\n labels = ['District ' + str(district_1), 'District' + str(district_2)]\n plt.xticks([min(years), max(years)])\n plt.legend(labels, loc='upper left', bbox_to_anchor=(1, 1.05))\n plt.title(\"Percentage of Students Scoring Above Mean (CEE)\")\n plt.tight_layout()\n graph = get_image()\n\n plt.clf()\n performance = performance.T\n performance.columns = np.arange(1999, 2018, step=1)\n performance.index = ['District ' + str(d + 1) for d in range(N_DISTRICTS)]\n ax = sns.heatmap(performance, annot=True)\n ax.set_title(\"Percentage of Students Scoring above Mean (CEE)\")\n plt.tight_layout()\n heatmap = get_image()\n return [graph, heatmap]\n\n\ndef get_sex(character):\n if character == \"F\":\n return \"female\"\n else:\n return \"male\"\n\n\ndef store_scores(data, required_fields, user_data, type):\n result = {}\n lines = data.replace(\"\\r\", \"\").split(\"\\n\")\n field_names = lines[0].split(\",\")\n if not set(required_fields).issubset(set(field_names)):\n diff = set(required_fields) - set(field_names)\n missing_fields = []\n for d in diff:\n missing_fields.append(d)\n result['missing_fields'] = missing_fields\n result['error_message'] = 'The following fields are missing:\\n'\n result['n_scores'] = 0\n else:\n succeeded = 0\n failed = 0\n for line in lines[1:]:\n if line:\n fields = line.split(\",\")\n data = {}\n for required_field in required_fields:\n if required_field == \"school_id\":\n data[\"school\"] = fields[field_names.index(\"school_id\")]\n elif required_field == \"primsch_id\":\n data[\"primsch\"] = fields[field_names.index(\"primsch_id\")]\n elif required_field == \"secsch_id\":\n data[\"secsch\"] = fields[field_names.index(\"secsch_id\")]\n elif required_field == \"district_id\":\n data[\"district\"] = fields[field_names.index(\"district_id\")]\n elif required_field == \"sex\":\n data[\"sex\"] = get_sex(fields[field_names.index(required_field)])\n else:\n data[required_field] = fields[field_names.index(required_field)]\n data = {**data, **user_data}\n if type == \"CEE\":\n form = CEEForm(data)\n if type == \"CSEC\":\n form = CSECForm(data)\n if form.is_valid():\n form.save()\n succeeded += 1\n else:\n failed += 1\n result['n_scores'] = succeeded\n result['failed'] = failed\n return result\n\n\n# =======================================================================================\n# Box plots at district level\n# =======================================================================================\n\ndef get_boxplot_district_plot(**kwargs):\n plt.switch_backend('AGG')\n\n school_enrollment = kwargs.get('x')\n school_name = kwargs.get('y')\n\n input_school_type = kwargs.get('input_school_type')\n academic_year = kwargs.get('academic_year')\n district_input = kwargs.get('input_district')\n\n fig, ax1 = plt.subplots(figsize=(11, 6))\n\n plt.boxplot(school_enrollment, patch_artist=True,\n boxprops=dict(facecolor='purple'),\n meanline=True, showmeans=True)\n\n plt.xticks([1], [input_school_type])\n plt.ylabel('Enrollment')\n plt.title(\n \"Box Plot for Enrollment in \" + input_school_type + \" schools in \" + \" District\" + district_input + \" and academic year \" + academic_year)\n\n plt.tight_layout()\n graph = get_image()\n return graph\n\n\n# =========================================================================================\n# Box plots at national level\n# =========================================================================================\n\ndef get_boxplot_national_plot(**kwargs):\n plt.switch_backend('AGG')\n\n school_enrollment = kwargs.get('x')\n school_name = kwargs.get('y')\n\n input_school_type = kwargs.get('input_school_type')\n academic_year = kwargs.get('academic_year')\n\n fig, ax1 = plt.subplots(figsize=(11, 6))\n\n plt.boxplot(school_enrollment, patch_artist=True,\n boxprops=dict(facecolor='purple'),\n meanline=True, showmeans=True)\n\n plt.xticks([1], [input_school_type])\n plt.ylabel('Enrollment')\n plt.title(\"Box Plot for Enrollment in \" + input_school_type + \" schools \" + \" for academic year \" + academic_year)\n\n plt.tight_layout()\n graph = get_image()\n return graph\n\n\ndef plot_national_ratio_trend(**kwargs):\n plt.switch_backend('AGG')\n data_primary = kwargs.get('data_primary')\n data_secondary = kwargs.get('data_secondary')\n for _ in data_secondary.shape:\n school_enrollment = data_secondary.total_enrollment\n total_number_of_teachers_secondary = data_secondary.total_number_of_teachers\n academic_year = data_secondary.academic_year\n student_teacher_ratio_secondary = (\n (school_enrollment \/ total_number_of_teachers_secondary).replace(np.inf, 0)).astype(float)\n fig, ax = plt.subplots(figsize=(12, 10))\n ax.bar(academic_year, student_teacher_ratio_secondary, width=0.8, color='b',\n label='Student to teacher ratio in Secondary Schools'\n , alpha=0.5)\n ax.set_ylabel(\"Student - Teacher Ratio (7-11)\")\n\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(10)\n tick.label.set_rotation(45)\n for bar in ax.patches:\n plt.annotate(format(bar.get_height(), '.2f'),\n (bar.get_x() + bar.get_width() \/ 2,\n bar.get_height()), ha='center', va='center',\n size=7, xytext=(0, 5),\n textcoords='offset points')\n plt.title(\"Student to Teacher Ratio Trends in Secondary Schools\")\n plt.xlabel('Academic Year')\n ax.set_ylim([0, max(student_teacher_ratio_secondary) + 100])\n\n ax.legend()\n\n plt.tight_layout()\n # plt.grid()\n graph = get_image()\n return graph\n\n\ndef plot_national_ratio_trend_primary(**kwargs):\n plt.switch_backend('AGG')\n data_primary = kwargs.get('data_primary')\n for _ in data_primary.shape:\n school_enrollment = data_primary.total_enrollment\n total_number_of_teachers_primary = data_primary.total_number_of_teachers\n academic_year = data_primary.academic_year\n student_teacher_ratio_primary = (\n (school_enrollment \/ total_number_of_teachers_primary).replace(np.inf, 0)).astype(float)\n fig, ax = plt.subplots(figsize=(12, 10))\n ax.bar(academic_year, student_teacher_ratio_primary, width=0.8, color='g',\n label='Student to teacher ratio in Primary Schools'\n , alpha=0.5)\n ax.set_ylabel(\"Student - Teacher Ratio (k-6)\")\n\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(10)\n tick.label.set_rotation(45)\n for bar in ax.patches:\n plt.annotate(format(bar.get_height(), '.2f'),\n (bar.get_x() + bar.get_width() \/ 2,\n bar.get_height()), ha='center', va='center',\n size=7, xytext=(0, 5),\n textcoords='offset points')\n plt.title(\"Student to Teacher Ratio Trends in Primary Schools\")\n plt.xlabel('Academic Year')\n ax.set_ylim([0, max(student_teacher_ratio_primary) + 100])\n\n ax.legend()\n plt.tight_layout()\n # plt.grid()\n graph = get_image()\n return graph\n\n\ndef national_ratio_hist(**kwargs):\n plt.switch_backend('AGG')\n data = kwargs.get('data')\n mu_total_enrollment = mean(data['total_enrollment'])\n mu_number_of_trained_male_teachers = mean(data['number_of_trained_male_teachers'])\n mu_number_of_trained_female_teachers = mean(data['number_of_trained_female_teachers'])\n mu_number_of_untrained_male_teachers = mean(data['number_of_untrained_male_teachers'])\n mu_number_of_untrained_female_teachers = mean(data['number_of_untrained_female_teachers'])\n mu_total_no_of_teachers = mean(data['total_number_of_teachers'])\n\n sigma_total_enrollment = std(data['total_enrollment'])\n sigma_number_of_trained_male_teachers = std(data['number_of_trained_male_teachers'])\n sigma_number_of_trained_female_teachers = std(data['number_of_trained_female_teachers'])\n sigma_number_of_untrained_male_teachers = std(data['number_of_untrained_male_teachers'])\n sigma_number_of_untrained_female_teachers = std(data['number_of_untrained_female_teachers'])\n sigma_total_no_of_teachers = std(data['total_number_of_teachers'])\n\n x_mu_total_enrollment = mu_total_enrollment + sigma_total_enrollment * np.random.randn(437)\n x_mu_number_of_trained_male_teachers = mu_number_of_trained_male_teachers + sigma_number_of_trained_male_teachers * np.random.randn(\n 437)\n x_mu_number_of_trained_female_teachers = mu_number_of_trained_female_teachers + sigma_number_of_trained_female_teachers * np.random.randn(\n 437)\n x_mu_number_of_untrained_male_teachers = mu_number_of_untrained_male_teachers + sigma_number_of_untrained_male_teachers * np.random.randn(\n 437)\n x_mu_number_of_untrained_female_teachers = mu_number_of_untrained_female_teachers + sigma_number_of_untrained_female_teachers * np.random.randn(\n 437)\n x_mu_total_no_of_teachers = mu_total_no_of_teachers + sigma_total_no_of_teachers * np.random.randn(\n 437)\n num_bins = 50\n\n # fig, ax = plt.subplots()\n\n fig, (ax1, ax2, ax3, ax4, ax5, ax6) = plt.subplots(6, figsize=(15, 15))\n\n # the histogram of the data\n n_total_enrollment, bins_total_enrollment, patches_total_enrollment = \\\n ax1.hist(x_mu_total_enrollment, num_bins, density=True)\n n_number_of_trained_male_teachers, bins_number_of_trained_male_teachers, patches_number_of_trained_male_teachers = \\\n ax2.hist(x_mu_number_of_trained_male_teachers, num_bins, density=True)\n n_number_of_trained_female_teachers, bins_number_of_trained_female_teachers, patches_number_of_trained_female_teachers = \\\n ax3.hist(x_mu_number_of_trained_female_teachers, num_bins, density=True)\n n_number_of_untrained_male_teachers, bins_number_of_untrained_male_teachers, patches_number_of_untrained_male_teachers = \\\n ax4.hist(x_mu_number_of_untrained_male_teachers, num_bins, density=True)\n n_number_of_untrained_female_teachers, bins_number_of_untrained_female_teachers, patches_number_of_untrained_female_teachers = \\\n ax5.hist(x_mu_number_of_untrained_female_teachers, num_bins, density=True)\n n_total_number_of_teachers, bins_total_number_of_teachers, patches_total_number_of_teachers = \\\n ax6.hist(x_mu_total_no_of_teachers, num_bins, density=True)\n # add a 'best fit' line\n y_total_enrollment = ((1 \/ (np.sqrt(2 * np.pi) * sigma_total_enrollment)) *\n np.exp(-0.5 * (1 \/ sigma_total_enrollment * (\n bins_total_enrollment - mu_total_enrollment)) ** 2))\n y_number_of_trained_male_teachers = ((1 \/ (np.sqrt(2 * np.pi) * sigma_number_of_trained_male_teachers)) *\n np.exp(-0.5 * (1 \/ sigma_number_of_trained_male_teachers * (\n bins_number_of_trained_male_teachers - mu_number_of_trained_male_teachers)) ** 2))\n y_number_of_trained_female_teachers = ((1 \/ (np.sqrt(2 * np.pi) * sigma_number_of_trained_female_teachers)) *\n np.exp(-0.5 * (1 \/ sigma_number_of_trained_female_teachers * (\n bins_number_of_trained_female_teachers - mu_number_of_trained_female_teachers)) ** 2))\n y_number_of_untrained_male_teachers = ((1 \/ (np.sqrt(2 * np.pi) * sigma_number_of_untrained_male_teachers)) *\n np.exp(-0.5 * (1 \/ sigma_number_of_untrained_male_teachers * (\n bins_number_of_untrained_male_teachers - mu_number_of_untrained_male_teachers)) ** 2))\n y_number_of_untrained_female_teachers = ((1 \/ (np.sqrt(2 * np.pi) * sigma_number_of_untrained_female_teachers)) *\n np.exp(-0.5 * (1 \/ sigma_number_of_untrained_female_teachers * (\n bins_number_of_untrained_female_teachers - mu_number_of_untrained_female_teachers)) ** 2))\n y_total_no_of_teachers = ((1 \/ (np.sqrt(2 * np.pi) * sigma_total_no_of_teachers)) *\n np.exp(-0.5 * (1 \/ sigma_total_no_of_teachers * (\n bins_total_number_of_teachers - mu_total_no_of_teachers)) ** 2))\n\n ax1.plot(bins_total_enrollment, y_total_enrollment, '--')\n ax1.set_title('Total Enrollment')\n\n ax2.plot(bins_number_of_trained_male_teachers, y_number_of_trained_male_teachers, '--')\n ax2.set_title('Number of Trained Male Teachers')\n\n ax3.plot(bins_number_of_trained_female_teachers, y_number_of_trained_female_teachers, '--')\n ax3.set_title('Number of Trained Female Teachers')\n\n ax4.plot(bins_number_of_untrained_male_teachers, y_number_of_untrained_male_teachers, '--')\n ax4.set_title('Number of Untrained Male Teachers')\n\n ax5.plot(bins_number_of_untrained_female_teachers, y_number_of_untrained_female_teachers, '--')\n ax5.set_title('Untrained Female Teachers')\n\n ax6.plot(bins_total_number_of_teachers, y_total_no_of_teachers, '--')\n ax6.set_title('Total Number of Teachers')\n\n plt.tight_layout()\n graph = get_image()\n return graph\n\n\ndef get_prev_yr(year_string):\n year = int(year_string.split(\"\/\")[0])\n prev_year = year - 1\n return str(prev_year) + \"\/\" + str(year)\n\n\ndef get_previous_performance(data):\n previous = []\n for index, row in data.iterrows():\n previous_year = data.loc[(data['school_id'] == row['school_id']) &\n (data['academic_year'] == get_prev_yr(row['academic_year']))]\n if not previous_year.empty:\n previous.append(previous_year['performance'].values[0])\n else:\n previous.append(np.nan)\n\n previous = pd.Series(previous)\n data['previous'] = previous\n return data\n\n\ndef get_enrollment(data):\n enrollment = []\n capacity = []\n found = 0\n not_found = 0\n for index, row in data.iterrows():\n school = AggregateEnrollment.objects.filter(name_of_school_id=row['school_id'],\n academic_year=row['academic_year'])\n if school:\n enr = getattr(school[0], 'total_enrollment')\n enrollment.append(enr)\n capacity.append(enr \/ getattr(school[0], 'capacity_of_school'))\n else:\n enrollment.append(np.nan)\n capacity.append(np.nan)\n\n enrollment = pd.Series(enrollment)\n data['enrollment'] = enrollment\n capacity = pd.Series(capacity)\n data['capacity'] = capacity\n return data\n\n\ndef divide_by_enrollment(data):\n data['teachers'] = data['enrollment'] \/ data['teachers']\n data['girls'] = data['girls'] \/ data['enrollment']\n data['total_bursaries'] = data['total_bursaries'] \/ data['enrollment']\n data['feeding_program'] = data['feeding_program'] \/ data['enrollment']\n data['repeaters'] = data['repeaters'] \/ data['enrollment']\n data['trained_teachers'] = data['trained_teachers'] \/ data['teachers']\n data['primary_enrollment'] = data['primary_enrollment'] \/ data['enrollment']\n return data\n\n\ndef correlations(data, excluded_fields):\n plt.switch_backend('AGG')\n data = pd.DataFrame(data.values())\n data['performance'] = data['above_average_scores'] \/ data['tests_sat']\n data = data.drop(columns=excluded_fields)\n\n data = get_previous_performance(data)\n data = get_enrollment(data)\n\n data = data.drop(columns=['academic_year', 'school_id', 'above_average_scores', 'tests_sat'])\n data = data.apply(pd.to_numeric)\n data = divide_by_enrollment(data)\n\n correlation = []\n spearman = []\n for f in data.columns:\n if f == 'performance':\n continue\n df = data[data[f].notna()]\n x = df[f]\n y = df['performance']\n correlation.append(stats.pearsonr(x, y))\n spearman.append(stats.spearmanr(x, y))\n correlation = pd.DataFrame(correlation)\n spearman = pd.DataFrame(spearman)\n correlation.columns = ['r (Pe)', 'p (Pe)']\n correlation.index = data.columns.drop('performance')\n spearman.index = data.columns.drop('performance')\n correlation['r (Sp)'] = spearman['correlation']\n correlation['p (Sp)'] = spearman['pvalue']\n\n mask = np.zeros((len(correlation), 4))\n mask[:, 3] = True\n mask[:, 1] = True\n ax = sns.heatmap(correlation, annot=True, mask=mask)\n ax.set_title(\"Correlations between School Factors and Exam Performance\")\n\n for (j, i), label in np.ndenumerate(correlation.values):\n label = \"{:.2e}\".format(label)\n if i == 1 or i == 3:\n ax.text(i + 0.5, j + 0.5, label,\n fontdict=dict(ha='center', va='center', color='black'))\n plt.tight_layout()\n graph = get_image()\n return graph\n\n\ndef dropcol_importances(rf, X_train, y_train):\n r = random.randint(1, 999)\n rf_ = clone(rf)\n rf_.random_state = r\n rf_.fit(X_train, y_train)\n baseline = rf_.oob_score_\n imp = []\n for col in X_train.columns:\n X = X_train.drop(col, axis=1)\n rf_ = clone(rf)\n rf_.random_state = r\n rf_.fit(X, y_train)\n o = rf_.oob_score_\n imp.append(baseline - o)\n imp = np.array(imp)\n I = pd.DataFrame(\n data={'Feature': X_train.columns,\n 'Importance': imp})\n I = I.set_index('Feature')\n I = I.sort_values('Importance', ascending=True)\n return I\n\n\ndef rf_model(data, excluded_fields, random):\n plt.switch_backend('AGG')\n data = pd.DataFrame(data.values())\n data['performance'] = data['above_average_scores'] \/ data['tests_sat']\n data = data.drop(columns=excluded_fields)\n\n data = get_previous_performance(data)\n data = get_enrollment(data)\n data = data.drop(columns=['academic_year', 'school_id', 'above_average_scores', 'tests_sat'])\n\n data = data.apply(pd.to_numeric)\n data = divide_by_enrollment(data)\n\n imp_mean = SimpleImputer(missing_values=np.nan, strategy='median')\n imp_mean.fit(data)\n SimpleImputer()\n imputed_data = pd.DataFrame(imp_mean.transform(data))\n imputed_data.columns = data.columns\n imputed_data.index = data.index\n\n data = imputed_data\n\n performance = data['performance']\n data = data.drop(columns=['performance'])\n\n X_train, X_test, y_train, y_test = train_test_split(data, performance, train_size=0.8)\n\n clf = RandomForestRegressor(max_features=None, oob_score=True)\n clf.fit(X_train, y_train)\n\n accuracy = round(clf.score(X_test, y_test), 3)\n\n data = data.drop(columns=['previous'])\n if random:\n data['random'] = pd.Series([random.random() for x in range(len(data))])\n importance = dropcol_importances(clf, data, performance)\n\n # plot feature importance\n ax = pd.Series(importance['Importance']).plot(kind='barh')\n y_labels = importance.index\n ax.set_yticklabels(y_labels)\n plt.xlabel(\"Change in OOB Error\")\n plt.title(\"Random Forest Regression Feature Importance\")\n\n plt.tight_layout()\n graph = get_image()\n return [graph, accuracy]\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_601","text":"0\n\"\"\"Models for estimating the aerodynamics of a 3D foil from its sections.\"\"\"\n\nfrom __future__ import annotations\n\nimport abc\nfrom typing import TYPE_CHECKING, Protocol, runtime_checkable\n\nimport numpy as np\nimport scipy.optimize\n\nfrom pfh.glidersim.util import cross3\n\n\nif TYPE_CHECKING:\n from pfh.glidersim.foil import SimpleFoil\n\n\n__all__ = [\n \"FoilAerodynamics\",\n \"ConvergenceError\",\n \"Phillips\",\n]\n\n\ndef __dir__():\n return __all__\n\n\n@runtime_checkable\nclass FoilAerodynamics(Protocol):\n \"\"\"Interface for classes that implement a FoilAerodynamics model.\"\"\"\n\n @abc.abstractmethod\n def __call__(self, ai, v_W2f, rho_air, **kwargs):\n \"\"\"\n Estimate the forces and moments on a foil.\n\n Parameters\n ----------\n ai : array_like of float\n Airfoil indices. The shape must be able to broadcast to (K,), where\n `K` is the number of control points being used by the estimator.\n v_W2f : array_like of float [m\/s]\n The velocity of the wind relative to the control points in foil frd\n coordinates. The shape must be able to broadcast to (K, 3), where\n `K` is the number of control points being used by the estimator.\n rho_air : array_like of float [kg\/m^3]\n Air density\n \"\"\"\n\n @abc.abstractmethod\n def r_CP2LE(self):\n \"\"\"\n Compute the control points for the section aerodynamics.\n\n Returns\n -------\n ndarray of float, shape (K,3) [m]\n Control points relative to the central leading edge `LE`.\n Coordinates are in canopy frd, and `K` is the number of points\n being used by the estimation method.\n \"\"\"\n\n\nclass ConvergenceError(RuntimeError):\n \"\"\"The estimator failed to converge on a solution.\"\"\"\n\n\nclass Phillips(FoilAerodynamics):\n \"\"\"\n A non-linear numerical lifting-line method.\n\n Uses a set of spanwise bound vortices instead of a single, uniform lifting\n line. Unlike the Prandtl's classic lifting-line theory, this method allows\n for wing sweep and dihedral.\n\n Parameters\n ----------\n foil : SimpleFoil\n Defines the lifting-line and section coefficients.\n v_ref_mag : float [m\/s]\n The reference solution airspeed\n alpha_ref : float [degrees]\n The reference solution angle of attack\n s_nodes : array_like of floats, shape (K+1,)\n Section indices of the `K + 1` section nodes (wing segment endpoints).\n The `K >= 1` aerodynamic control points are centered between the nodes.\n Two common point distributions are:\n\n * Linear: ``np.linspace(-1, 1, K + 1)``\n * Cosine: ``np.cos(np.linspace(np.pi, 0, K + 1))``\n\n s_clamp : float, optional\n Section index to enable clamped output of the aerodynamic coefficients\n for section indices `abs(s) >= s_clamp`. Instead of returning `nan`,\n clamping uses the value of the largest `alpha` that produces a\n non-`nan` coefficient for the given (ai, Re) pair.\n\n This option is experimental and should be used with caution. Its\n purpose is to mitigate the fictitious, large angles of attack induced\n at the wing tips due to the control points being placed on the lifting\n line. The theory is that if the induced velocity is indeed fictious,\n then the true angle of attack is likely much closer to the standard\n range. By limiting clamping to just the outer `s > s_clamp`, if the\n wing is experiencing a genuinely large angle of attack, then the other\n non-clamped sections will still fail, thus signalling stall conditions.\n If the segments are small the error introduced should be negligible.\n\n References\n ----------\n .. [1] , \"Modern Adaptation of Prandtl’s Classic\n Lifting-Line Theory\", Journal of Aircraft, 2000\n\n .. [2] Snyder, \"A lifting-line approach to estimating\n propeller\/wing interactions\", 2006\n\n .. [3] McLeanauth, \"Understanding Aerodynamics - Arguing from the Real\n Physics\", 2013, p382\n\n Notes\n -----\n This implementation uses a single distribution for the entire span, which\n is suitable for parafoils, which is a continuous lifting surface, but for\n wings with left and right segments separated by some discontinuity at the\n root you should distribute the points across each semispan independently.\n See [1]_ for a related discussion.\n\n This method does suffer an issue where induced velocity goes to infinity as\n the segment lengths tend toward zero (as the number of segments increases,\n or for a poorly chosen point distribution). See [2]_, section 8.2.3.\n \"\"\"\n\n def __init__(\n self,\n foil: SimpleFoil,\n v_ref_mag,\n alpha_ref: float,\n s_nodes,\n s_clamp: float | None = None,\n ) -> None:\n self.foil = foil\n self.K = len(s_nodes) - 1 # Number of control points\n self.s_nodes = np.asarray(s_nodes)\n self.nodes = self.foil.surface_xyz(self.s_nodes, 0, 0.25, surface=\"chord\")\n self.s_cps = (self.s_nodes[1:] + self.s_nodes[:-1]) \/ 2\n self.cps = self.foil.surface_xyz(self.s_cps, 0, 0.25, surface=\"chord\")\n\n # Enable clamped coefficients at some control points\n if s_clamp is not None:\n self.clamped = np.abs(self.s_cps) >= s_clamp\n else:\n self.clamped = np.full(self.K, False)\n\n # axis0 are nodes, axis1 are control points, axis2 are vectors or norms\n self.R1 = self.cps - self.nodes[:-1, None]\n self.R2 = self.cps - self.nodes[1:, None]\n self.r1 = np.linalg.norm(self.R1, axis=2) # Magnitudes of R_{i1,j}\n self.r2 = np.linalg.norm(self.R2, axis=2) # Magnitudes of R_{i2,j}\n\n # Wing section orientation unit vectors at each control point\n # Note: Phillip's derivation uses back-left-up coordinates (not `frd`)\n u = -self.foil.section_orientation(self.s_cps).T\n self.u_a, self.u_s, self.u_n = u[0].T, u[1].T, u[2].T\n\n # Define the differential areas as parallelograms by assuming a linear\n # chord variation between nodes.\n self.dl = self.nodes[1:] - self.nodes[:-1]\n node_chords = self.foil.chord_length(self.s_nodes)\n self.c_avg = (node_chords[1:] + node_chords[:-1]) \/ 2\n self.dA = self.c_avg * np.linalg.norm(cross3(self.u_a, self.dl), axis=1)\n\n # Precompute the `v` terms that do not depend on `u_inf`, which are the\n # first bracketed term in Hunsaker Eq:6.\n R1, R2, r1, r2 = self.R1, self.R2, self.r1, self.r2 # Shorthand\n self.v_ij = np.zeros((self.K, self.K, 3)) # Extra terms when `i != j`\n for ij in [(i, j) for i in range(self.K) for j in range(self.K)]:\n if ij[0] == ij[1]: # Skip singularities when `i == j`\n continue\n self.v_ij[ij] = (\n ((r1[ij] + r2[ij]) * cross3(R1[ij], R2[ij])) # fmt: skip\n \/ (r1[ij] * r2[ij] * (r1[ij] * r2[ij] + np.dot(R1[ij], R2[ij])))\n )\n\n # Precompute a reference solution from a (hopefully easy) base case.\n # Sets an initial \"solution\" (which isn't actually a solution) just to\n # bootstrap the `__call__` method with an initial `Gamma` value.\n alpha_ref = np.deg2rad(alpha_ref)\n v_mag = np.broadcast_to(v_ref_mag, (self.K, 3))\n v_W2f_ref = -v_mag * np.array([np.cos(alpha_ref), 0, np.sin(alpha_ref)])\n self._reference_solution = {\n \"ai\": 0,\n \"v_W2f\": v_W2f_ref,\n \"Gamma\": np.sqrt(1 - self.s_cps**2), # Naive ellipse\n }\n try:\n _, _, self._reference_solution = self.__call__(0, v_W2f_ref, 1.2)\n except ConvergenceError as e:\n raise RuntimeError(\"Phillips: failed to initialize base case\")\n\n def _compute_Reynolds(self, v_W2f, rho_air):\n \"\"\"Compute the Reynolds number at each control point.\"\"\"\n # FIXME: verify that using the total airspeed (including spanwise flow)\n # is okay. A few tests show minimal differences, so for now I'm\n # not wasting time computing the normal and chordwise flows.\n u = np.linalg.norm(v_W2f, axis=-1) # airspeed [m\/s]\n mu = 1.81e-5 # Standard dynamic viscosity of air\n Re = rho_air * u * self.c_avg \/ mu\n return Re\n\n def r_CP2LE(self):\n cps = self.cps.view()\n cps.flags.writeable = False\n return cps\n\n def _induced_velocities(self, u_inf):\n # 2. Compute the \"induced velocity\" unit vectors\n # * ref: Phillips, Eq:6\n R1, R2, r1, r2 = self.R1, self.R2, self.r1, self.r2 # Shorthand\n v = self.v_ij.copy()\n v += (\n cross3(u_inf, R2)\n \/ (r2 * (r2 - np.einsum(\"k,ijk->ij\", u_inf, R2)))[..., None]\n )\n v -= (\n cross3(u_inf, R1)\n \/ (r1 * (r1 - np.einsum(\"k,ijk->ij\", u_inf, R1)))[..., None]\n )\n\n return v \/ (4 * np.pi) # axes: (inducer, inducee, 3-vector)\n\n def _local_velocities(self, v_W2f, Gamma, v):\n # Compute the local fluid velocities\n # * ref: Hunsaker Eq:5\n # * ref: Phillips Eq:5 (nondimensional version)\n V = v_W2f + np.einsum(\"j,jik->ik\", Gamma, v)\n\n # Compute the local angle of attack for each section\n # * ref: Phillips Eq:9 (dimensional) or Eq:12 (dimensionless)\n V_n = np.einsum(\"ik,ik->i\", V, self.u_n) # Normal-wise\n V_a = np.einsum(\"ik,ik->i\", V, self.u_a) # Chordwise\n alpha = np.arctan2(V_n, V_a)\n\n return V, V_n, V_a, alpha\n\n def _f(self, Gamma, ai, v_W2f, v, Re):\n # Compute the residual error vector\n # * ref: Hunsaker Eq:8\n # * ref: Phillips Eq:14\n V, V_n, V_a, alpha = self._local_velocities(v_W2f, Gamma, v)\n W = cross3(V, self.dl)\n W_norm = np.sqrt(np.einsum(\"ik,ik->i\", W, W))\n Cl = self.foil.sections.Cl(\n self.s_cps,\n ai,\n alpha,\n Re,\n clamp=self.clamped,\n )\n # return 2 * Gamma * W_norm - np.einsum(\"ik,ik,i,i->i\", V, V, self.dA, Cl)\n return 2 * Gamma * W_norm - (V_n**2 + V_a**2) * self.dA * Cl\n\n def _J(self, Gamma, ai, v_W2f, v, Re, verify_J=False):\n # 7. Compute the Jacobian matrix, `J[ij] = d(f_i)\/d(Gamma_j)`\n # * ref: Hunsaker Eq:11\n V, V_n, V_a, alpha = self._local_velocities(v_W2f, Gamma, v)\n W = cross3(V, self.dl)\n W_norm = np.sqrt(np.einsum(\"ik,ik->i\", W, W))\n Cl = self.foil.sections.Cl(\n self.s_cps,\n ai,\n alpha,\n Re,\n clamp=self.clamped,\n )\n Cl_alpha = self.foil.sections.Cl_alpha(\n self.s_cps,\n ai,\n alpha,\n Re,\n clamp=self.clamped,\n )\n\n J = 2 * np.diag(W_norm) # Additional terms for i==j\n J2 = 2 * np.einsum(\"i,ik,i,jik->ij\", Gamma, W, 1 \/ W_norm, cross3(v, self.dl))\n J3 = (\n np.einsum(\"i,jik,ik->ij\", V_a, v, self.u_n)\n - np.einsum(\"i,jik,ik->ij\", V_n, v, self.u_a) # fmt: skip\n )\n J3 *= (\n (self.dA * Cl_alpha)[:, None]\n * np.einsum(\"ik,ik->i\", V, V)\n \/ (V_n**2 + V_a**2)\n )\n J4 = 2 * np.einsum(\"i,i,ik,jik->ij\", self.dA, Cl, V, v)\n J += J2 - J3 - J4\n\n # Compare the analytical gradient to the finite-difference version\n if verify_J:\n J_true = self._J_finite(Gamma, ai, v_W2f, v, Re)\n if not np.allclose(J, J_true):\n print(\"\\n !!! The analytical Jacobian disagrees. Halting. !!!\")\n breakpoint()\n\n return J\n\n def _J_finite(self, Gamma, ai, v_W2f, v, Re):\n \"\"\"Compute the Jacobian using a centered finite distance.\n\n Useful for checking the analytical gradient.\n\n Examples\n --------\n >>> J1 = self._J(Gamma, v_W2f, v, ai)\n >>> J2 = self._J_finite(Gamma, v_W2f, v, ai)\n >>> np.allclose(J1, J2)\n True\n \"\"\"\n # This uses the same method as `scipy.optimize.approx_fprime`, but that\n # function only works for scalar-valued functions.\n JT = np.empty((self.K, self.K)) # Jacobian transpose (J_ji)\n eps = np.sqrt(np.finfo(float).eps)\n\n # Build the Jacobian column-wise (row-wise of the tranpose)\n Gp, Gm = Gamma.copy(), Gamma.copy()\n for k in range(self.K):\n Gp[k], Gm[k] = Gamma[k] + eps, Gamma[k] - eps\n fp = self._f(Gp, ai, v_W2f, v, Re)\n fm = self._f(Gm, ai, v_W2f, v, Re)\n JT[k] = (fp - fm) \/ (2 * eps)\n Gp[k], Gm[k] = Gamma[k], Gamma[k]\n\n return JT.T\n\n def _solve_circulation(self, ai, v_W2f, Re, Gamma0):\n \"\"\"\n Solve for the spanwise circulation distribution.\n\n Parameters\n ----------\n ai : array of float, shape (K,) [radians]\n Airfoil indices.\n v_W2f : array of float, shape (K,) [m\/s]\n Relative wind velocity at each control point.\n Re : array of float, shape (K,)\n Reynolds number at each segment\n Gamma0 : array of float, shape (K,)\n The initial proposal\n\n Returns\n -------\n Gamma : array of float, shape (K,)\n Circulation strengths of each segment.\n v : array, shape (K,K,3) [m\/s]\n Induced velocities between each segment, indexed as (inducer,\n inducee).\n \"\"\"\n v_mid = v_W2f[self.K \/\/ 2]\n u_inf = v_mid \/ np.linalg.norm(v_mid) # FIXME: what if PQR != 0?\n v = self._induced_velocities(u_inf)\n args = (ai, v_W2f, v, Re)\n res = scipy.optimize.root(self._f, Gamma0, args, jac=self._J, tol=1e-4)\n\n if not res[\"success\"]:\n raise ConvergenceError\n\n return res[\"x\"], v\n\n def __call__(\n self,\n ai,\n v_W2f,\n rho_air,\n *,\n reference_solution: dict | None = None,\n max_splits: int = 10,\n ):\n v_W2f = np.broadcast_to(v_W2f, (self.K, 3))\n Re = self._compute_Reynolds(v_W2f, rho_air)\n\n if reference_solution is None:\n reference_solution = self._reference_solution\n\n ai_ref = reference_solution[\"ai\"]\n v_W2f_ref = reference_solution[\"v_W2f\"]\n Gamma_ref = reference_solution[\"Gamma\"]\n\n # Try to solve for the target (`Gamma` as a function of `v_W2f` and\n # `ai`) directly using the `reference_solution`. If that fails, pick a\n # point between the target and the reference, solve for that easier\n # case, then use its solution as the new starting point for the next\n # target. Repeat for intermediate targets until either solving for the\n # original target, or exceeding `max_splits`.\n target_backlog = [] # Stack of pending targets\n num_splits = 0\n while True:\n try:\n Gamma, v = self._solve_circulation(ai, v_W2f, Re, Gamma_ref)\n except ConvergenceError:\n if num_splits == max_splits:\n raise ConvergenceError(\"max splits reached\")\n num_splits += 1\n target_backlog.append((ai, v_W2f))\n P = 0.5 # Ratio, a point between the reference and the target\n ai = (1 - P) * ai_ref + P * ai\n v_W2f = (1 - P) * v_W2f_ref + P * v_W2f\n continue\n\n ai_ref = ai\n v_W2f_ref = v_W2f\n Gamma_ref = Gamma\n\n if target_backlog:\n ai, v_W2f = target_backlog.pop()\n else:\n break\n\n V, V_n, V_a, alpha = self._local_velocities(v_W2f, Gamma, v)\n\n # Compute the inviscid forces using the 3D vortex lifting law\n # * ref: Hunsaker Eq:1\n # * ref: Phillips Eq:4\n dF_inviscid = Gamma * cross3(V, self.dl).T\n\n # Compute the viscous forces.\n # * ref: Hunsaker Eq:17\n #\n # The equation in the paper uses the \"characteristic chord\", but I\n # believe that is a mistake; it produces *massive* drag. Here I use the\n # section area like they do in \"MachUp_Py\" (see where they compute\n # `f_parasite_mag` in `llmodel.py:LLModel:_compute_forces`).\n Cd = self.foil.sections.Cd(\n self.s_cps,\n ai,\n alpha,\n Re,\n clamp=self.clamped,\n )\n V2 = np.einsum(\"ik,ik->i\", V, V)\n u_drag = V.T \/ np.sqrt(V2)\n dF_viscous = 0.5 * V2 * self.dA * Cd * u_drag\n\n # The total forces applied at each control point\n dF = dF_inviscid + dF_viscous\n\n # Compute the section moments.\n # * ref: Hunsaker Eq:19\n # * ref: Phillips Eq:28\n #\n # These are strictly the section moments caused by airflow around the\n # section. It does not include moments about the aircraft reference\n # point (commonly the center of gravity); those extra moments must be\n # calculated by the wing.\n # * ref: Hunsaker Eq:19\n # * ref: Phillips Eq:28\n Cm = self.foil.sections.Cm(\n self.s_cps,\n ai,\n alpha,\n Re,\n clamp=self.clamped,\n )\n dM = -0.5 * V2 * self.dA * self.c_avg * Cm * self.u_s.T\n\n solution = {\n \"ai\": ai,\n \"v_W2f\": v_W2f_ref,\n \"Gamma\": Gamma_ref,\n }\n\n # print(\"\\nFinished `Phillips.__call__`\")\n # breakpoint()\n\n dF *= rho_air\n dM *= rho_air\n\n return dF.T, dM.T, solution\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_602","text":"smujiang\/WSIPenMarkingRemoval1-10\n#!\/usr\/bin\/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCompare two or more images using MSE, PSNR, SNR, SSIM, UQI, PBVIF, MSSIM,\nNQM and WSNR metrics.\n\nFor usage and a list of options, try this:\n$ .\/pymetrikz -h\n\nThis program and its regression test suite live here:\nhttp:\/\/www.sawp.com.br\/projects\/pymetrikz\"\"\"\n\nimport numpy as __n\nfrom scipy.ndimage.filters import gaussian_filter as __gaussian_filter\nfrom scipy.ndimage.filters import convolve as __convolve\nfrom scipy.ndimage.filters import correlate as __correlate\nfrom scipy.fftpack import fftshift as __fftshift\n\n\n__author__ = \" <>\"\n__copyright__ = \"Copyright (c) 2011-2014 \"\n__license__ = \"GPLv2\"\n\n\ndef mse(reference, query):\n \"\"\"Computes the Mean Square Error (MSE) of two images.\n\n value = mse(reference, query)\n\n Parameters\n ----------\n reference: original image data.\n query : modified image data to be compared.\n\n Return\n ----------\n value : MSE value\n \"\"\"\n (ref, que) = (reference.astype('double'), query.astype('double'))\n diff = ref - que\n square = (diff ** 2)\n mean = square.mean()\n return mean\n\n\ndef rmse(reference, query):\n msev = mse(reference, query)\n return __n.sqrt(msev)\n\n\ndef psnr(reference, query, normal=255):\n \"\"\"Computes the Peak Signal-to-Noise-Ratio (PSNR).\n\n value = psnr(reference, query, normalization=255)\n\n Parameters\n ----------\n reference: original image data.\n query : modified image data to be compared.\n normal : normalization value (255 for 8-bit image\n\n Return\n ----------\n value : PSNR value\n \"\"\"\n normalization = float(normal)\n msev = mse(reference, query)\n if msev != 0:\n value = 10.0 * __n.log10(normalization * normalization \/ msev)\n else:\n value = float(\"inf\")\n return value\n\n\ndef snr(reference, query):\n \"\"\"Computes the Signal-to-Noise-Ratio (SNR).\n\n value = snr(reference, query)\n\n Parameters\n ----------\n reference: original image data.\n query : modified image data to be compared.\n\n Return\n ----------\n value : SNR value\n \"\"\"\n signal_value = (reference.astype('double') ** 2).mean()\n msev = mse(reference, query)\n if msev != 0:\n value = 10.0 * __n.log10(signal_value \/ msev)\n else:\n value = float(\"inf\")\n return value\n\n\ndef ssim(reference, query):\n \"\"\"Computes the Structural SIMilarity Index (SSIM).\n\n value = ssim(reference, query)\n\n Parameters\n ----------\n reference: original image data.\n query : modified image data to be compared.\n\n Return\n ----------\n value : SSIM value\n \"\"\"\n def __get_kernels():\n k1, k2, l = (0.01, 0.03, 255.0)\n kern1, kern2 = map(lambda x: (x * l) ** 2, (k1, k2))\n return kern1, kern2\n\n def __get_mus(i1, i2):\n mu1, mu2 = map(lambda x: __gaussian_filter(x, 1.5), (i1, i2))\n m1m1, m2m2, m1m2 = (mu1 * mu1, mu2 * mu2, mu1 * mu2)\n return m1m1, m2m2, m1m2\n\n def __get_sigmas(i1, i2, delta1, delta2, delta12):\n f1 = __gaussian_filter(i1 * i1, 1.5) - delta1\n f2 = __gaussian_filter(i2 * i2, 1.5) - delta2\n f12 = __gaussian_filter(i1 * i2, 1.5) - delta12\n return f1, f2, f12\n\n def __get_positive_ssimap(C1, C2, m1m2, mu11, mu22, s12, s1s1, s2s2):\n num = (2 * m1m2 + C1) * (2 * s12 + C2)\n den = (mu11 + mu22 + C1) * (s1s1 + s2s2 + C2)\n return num \/ den\n\n def __get_negative_ssimap(C1, C2, m1m2, m11, m22, s12, s1s1, s2s2):\n (num1, num2) = (2.0 * m1m2 + C1, 2.0 * s12 + C2)\n (den1, den2) = (m11 + m22 + C1, s1s1 + s2s2 + C2)\n ssim_map = __n.ones(img1.shape)\n indx = (den1 * den2 > 0)\n ssim_map[indx] = (num1[indx] * num2[indx]) \/ (den1[indx] * den2[indx])\n indx = __n.bitwise_and(den1 != 0, den2 == 0)\n ssim_map[indx] = num1[indx] \/ den1[indx]\n return ssim_map\n\n (img1, img2) = (reference.astype('double'), query.astype('double'))\n (m1m1, m2m2, m1m2) = __get_mus(img1, img2)\n (s1, s2, s12) = __get_sigmas(img1, img2, m1m1, m2m2, m1m2)\n (C1, C2) = __get_kernels()\n if C1 > 0 and C2 > 0:\n ssim_map = __get_positive_ssimap(C1, C2, m1m2, m1m1, m2m2, s12, s1, s2)\n else:\n ssim_map = __get_negative_ssimap(C1, C2, m1m2, m1m1, m2m2, s12, s1, s2)\n ssim_value = ssim_map.mean()\n return ssim_value\n\n\ndef uqi(reference, query):\n \"\"\"Computes the Universal Quality Index (UQI).\n\n value = uqi(reference, query\n\n Parameters\n ----------\n reference: original image data.\n query : modified image data to be compared.\n\n Return\n ----------\n value : UQI value\n \"\"\"\n def __conv(x):\n window = __n.ones((BLOCK_SIZE, BLOCK_SIZE))\n if len(x.shape) < 3:\n return __convolve(x, window)\n else:\n channels = x.shape[2]\n f = [__convolve(x[:, :, c], window) for c in range(channels)]\n return __n.array(f)\n\n def __get_filtered(im1, im2, BLOCK_SIZE):\n (im1im1, im2im2, im1im2) = (im1 * im1, im2 * im2, im1 * im2)\n (b1, b2, b3, b4, b5) = map(__conv, (im1, im2, im1im1, im2im2, im1im2))\n (b6, b7) = (b1 * b2, b1 * b1 + b2 * b2)\n return (b1, b2, b3, b4, b5, b6, b7)\n\n def __get_quality_map(b1, b2, b3, b4, b5, b6, b7, BLOCK_SIZE):\n N = BLOCK_SIZE * BLOCK_SIZE\n numerator = 4.0 * (N * b5 - b6) * b6\n denominator1 = N * (b3 + b4) - b7\n denominator = denominator1 * b7\n index = __n.bitwise_and(denominator1 == 0, b7 != 0)\n quality_map = __n.ones(denominator.shape)\n quality_map[index] = 2.0 * b6[index] \/ b7[index]\n index = (denominator != 0)\n quality_map[index] = numerator[index] \/ denominator[index]\n return quality_map[index]\n\n BLOCK_SIZE = 8\n (img1, img2) = (reference.astype('double'), query.astype('double'))\n (b1, b2, b3, b4, b5, b6, b7) = __get_filtered(img1, img2, BLOCK_SIZE)\n quality_map = __get_quality_map(b1, b2, b3, b4, b5, b6, b7, BLOCK_SIZE)\n value = quality_map.mean()\n return value\n\n\ndef pbvif(reference, query):\n \"\"\"Computes the Pixel-Based Visual Information Fidelity (PB-VIF).\n\n value = pbvif(reference, query)\n\n Parameters\n ----------\n reference: original image data.\n query : modified image data to be compared.\n\n Return\n ----------\n value : PB-VIF value\n \"\"\"\n def __get_sigma(win, ref, dist, mu1_sq, mu2_sq, mu1_mu2):\n sigma1_sq = __filter2(win, ref * ref) - mu1_sq\n sigma2_sq = __filter2(win, dist * dist) - mu2_sq\n sigma12 = __filter2(win, ref * dist) - mu1_mu2\n (sigma1_sq[sigma1_sq < 0], sigma2_sq[sigma2_sq < 0]) = (0.0, 0.0)\n return (sigma2_sq, sigma12, sigma1_sq)\n\n def __get_normalized(s1s1, s2s2, s1s2):\n g = s1s2 \/ (s1s1 + 1e-10)\n sv_sq = s2s2 - g * s1s2\n g[s1s1 < 1e-10] = 0\n sv_sq[s1s1 < 1e-10] = s2s2[s1s1 < 1e-10]\n s1s1[s1s1 < 1e-10] = 0\n g[s2s2 < 1e-10] = 0\n sv_sq[s2s2 < 1e-10] = 0\n sv_sq[g < 0] = s2s2[g < 0]\n g[g < 0] = 0\n sv_sq[sv_sq <= 1e-10] = 1e-10\n return (g, sv_sq)\n\n def __get_num(s1s1, sv_sq, sigma_nsq, g):\n normg = (g ** 2) * s1s1 \/ (sv_sq + sigma_nsq)\n snr = __n.log10(1.0 + normg).sum()\n return snr\n\n def __get_den(s1s1, sigma_nsq):\n snr = __n.log10(1.0 + s1s1 \/ sigma_nsq)\n return snr.sum()\n\n def __get_num_den_level(ref, dist, scale):\n sig = 2.0\n N = (2.0 ** (4 - scale + 1.0)) + 1.0\n win = __get_gaussian_kernel(N, N \/ 5.0)\n if scale > 1:\n ref = __filter2(win, ref)\n dist = __filter2(win, dist)\n ref = ref[::2, ::2]\n dist = dist[::2, ::2]\n (mu1, mu2) = (__filter2(win, ref), __filter2(win, dist))\n (m1m1, m2m2, m1m2) = (mu1 * mu1, mu2 * mu2, mu1 * mu2)\n (s2s2, s1s2, s1s1) = __get_sigma(win, ref, dist, m1m1, m2m2, m1m2)\n (g, svsv) = __get_normalized(s1s1, s2s2, s1s2)\n (num, den) = (__get_num(s1s1, svsv, sig, g), __get_den(s1s1, sig))\n return (num, den)\n\n (ref, dist) = (reference.astype('double'), query.astype('double'))\n zipped = map(lambda x: __get_num_den_level(ref, dist, x), range(1, 5))\n (nums, dens) = zip(*zipped)\n value = sum(nums) \/ sum(dens)\n return value\n\n\ndef mssim(reference, query):\n \"\"\"Computes the Multi-Scale SSIM Index (MSSIM).\n\n value = mssim(reference, query)\n\n Parameters\n ----------\n reference: original image data.\n query : modified image data to be compared.\n\n Return\n ----------\n value : MSSIM value\n \"\"\"\n def __get_filt_kern():\n n = [131, -199, -101, 962, 932, 962, -101, -199, 131]\n d = [3463, 8344, 913, 2549, 1093, 2549, 913, 8344, 3463]\n num = __n.matrix(n).T\n den = __n.matrix(d).T\n lod = num.astype('double') \/ den.astype('double')\n lpf = __n.dot(lod, lod.T)\n return lpf \/ lpf.sum()\n\n def __get_ssim(img1, img2, K):\n comp_ssim = __ssim_modified(img1, img2, K)[1]\n return (comp_ssim[1], comp_ssim[2])\n\n def __get_MVR(img1, img2, K, nlevs):\n (ssim_v, ssim_r) = (__n.zeros((nlevs, 1)), __n.zeros((nlevs, 1)))\n (ssim_v[0], ssim_r[0]) = __get_ssim(img1, img2, K)\n filt_kern = __get_filt_kern()\n for s in range(nlevs - 1):\n (img1, img2) = map(lambda x: __filter2(filt_kern, x), (img1, img2))\n (img1, img2) = (img1[::2, ::2], img2[::2, ::2])\n comp_ssim = __ssim_modified(img1, img2, K)[1]\n ssim_m = comp_ssim[0]\n ssim_v[s + 1] = comp_ssim[1]\n ssim_r[s + 1] = comp_ssim[2]\n return (ssim_m, ssim_v, ssim_r)\n\n def __calc_mssim_mvr(img1, img2):\n (K, weights) = ((0.01, 0.03), (0.0448, 0.2856, 0.3001, 0.2363, 0.1333))\n (alpha, beta, lvl) = (0.1333, __n.matrix(weights).T, len(weights))\n (ssim_m, ssim_v, ssim_r) = __get_MVR(img1, img2, K, lvl)\n m = ssim_m ** alpha\n v = (ssim_v ** beta).prod()\n r = (ssim_r ** beta).prod()\n return (m, v, r)\n\n (ref, quer) = (reference.astype('double'), query.astype('double'))\n ssim_mvr = __n.matrix(__calc_mssim_mvr(ref, quer))\n value = ssim_mvr.prod()\n return value\n\n\ndef __filter2(B, X, shape='nearest'):\n B2 = __n.rot90(__n.rot90(B))\n if len(X.shape) < 3:\n return __correlate(X, B2, mode=shape)\n else:\n channels = X.shape[2]\n f = [__correlate(X[:, :, c], B2, mode=shape) for c in range(channels)]\n return __n.array(f)\n\n\ndef __get_gaussian_kernel(N=15, sigma=1.5):\n (H, W) = ((N - 1) \/ 2, (N - 1) \/ 2)\n std = sigma\n (y, x) = __n.mgrid[-H:H + 1, -W:W + 1]\n arg = -(x * x + y * y) \/ (2.0 * std * std)\n h = __n.exp(arg)\n index = h < __n.finfo(float).eps * h.max(0)\n h[index] = 0\n sumh = h.sum()\n if sumh != 0:\n h = h \/ sumh\n return h\n\n\ndef __ssim_modified(reference, query, K):\n def __get_kern(K):\n L = 255\n kern = map(lambda x: (x * L) ** 2, K)\n return (kern[0], kern[1])\n\n def __get_filtering_window():\n window = __get_gaussian_kernel(11, 1.5)\n return window \/ window.sum()\n\n def __get_mus(img1, img2, window):\n (mu1, mu2) = map(lambda x: __filter2(window, x), (img1, img2))\n (m1m1, m2m2, m1m2) = (mu1 * mu1, mu2 * mu2, mu1 * mu2)\n return (mu1, mu2, m1m1, m2m2, m1m2)\n\n def __get_sigmas(img1, img2, window, m1m1, m2m2, m1m2):\n s1s1 = __filter2(window, img1 * img1) - m1m1\n s2s2 = __filter2(window, img2 * img2) - m2m2\n s12 = __filter2(window, img1 * img2) - m1m2\n (s1, s2) = map(__n.sqrt, (__n.abs(s1s1), __n.abs(s2s2)))\n return (s1s1, s2s2, s1, s2, s12)\n\n def __MVR_pos_kern(m, kern, s, s_square):\n (m11, m22, m12) = m\n (k1, k2) = kern\n (s1, s2) = s\n (s1s1, s2s2, s12) = s_square\n M = (2.0 * m12 + k1) \/ (m11 + m22 + k1)\n V = (2.0 * s1 * s2 + k2) \/ (s1s1 + s2s2 + k2)\n R = (s12 + k2 \/ 2.0) \/ (s1 * s2 + k2 \/ 2.0)\n return (M, V, R)\n\n def __MVR_neg_kern(m, s, s_square):\n def __calcM(mu1, m11, m22, m12):\n ssim_ln = 2.0 * m12\n ssim_ld = m11 + m22\n index_l = ssim_ld > 0\n M = __n.ones(mu1.shape)\n M[index_l] = ssim_ln[index_l] \/ ssim_ld[index_l]\n return M\n\n def __calcV(mu1, s1, s2, s11, s22):\n ssim_cn = 2.0 * s1 * s2\n ssim_cd = s11 + s22\n V = __n.ones(mu1.shape)\n index_c = ssim_cd > 0\n V[index_c] = ssim_cn[index_c] \/ ssim_cd[index_c]\n return V\n\n def __calcR(mu1, s1, s2, s12):\n (ssim_sn, ssim_sd) = (s12, s1 * s2)\n R = __n.ones(mu1.shape)\n (index1, index2) = (s1 > 0, s2 > 0)\n index_s1 = index1 * index2 > 0\n R[index_s1] = ssim_sn[index_s1] \/ ssim_sd[index_s1]\n index_s2 = index1 * __n.logical_not(index2) > 0\n R[index_s2] = 0.0\n return R\n\n (mu1, mu2, m11, m22, m12) = m\n (s1, s2) = s\n (s11, s22, s12) = s_square\n M = __calcM(mu1, m11, m22, m12)\n V = __calcV(mu1, s1, s2, s11, s22)\n R = __calcR(mu1, s1, s2, s12)\n return (M, V, R)\n\n def __get_composition_vector(img1, img2):\n filt = __get_filtering_window()\n (mu1, mu2, m11, m22, m12) = __get_mus(img1, img2, filt)\n (s11, s22, s1, s2, s12) = __get_sigmas(img1, img2, filt, m11, m22, m12)\n (kern1, kern2) = __get_kern(K)\n if kern1 > 0 and kern2 > 0:\n (m, kern, s) = ((m11, m22, m12), (kern1, kern2), (s1, s2))\n s_square = (s11, s22, s12)\n (M, V, R) = __MVR_pos_kern(m, kern, s, s_square)\n else:\n (m, s) = ((mu1, mu2, m11, m22, m12), (s1, s2))\n s_square = (s11, s22, s12)\n (M, V, R) = __MVR_neg_kern(m, s, s_square)\n return (M, V, R)\n\n def __get_ssim_map(M, V, R):\n ssim_map = M * V * R\n return ssim_map\n\n def __get_ssim_from_composition_vector(M, V, R):\n ssim_map = __get_ssim_map(M, V, R)\n ssim = ssim_map.mean()\n return ssim\n\n (img1, img2) = reference.astype('double'), query.astype('double')\n (M, V, R) = __get_composition_vector(img1, img2)\n composite_mean_vector = (M.mean(), V.mean(), R.mean())\n ssim = __get_ssim_from_composition_vector(M, V, R)\n return (ssim, composite_mean_vector)\n\n\ndef __convert_to_luminance(x):\n return __n.dot(x[..., :3], [0.299, 0.587, 0.144]).astype('double')\n\n\ndef nqm(reference, query):\n \"\"\"Computes the NQM metric.\n\n value = nqm(reference, query)\n\n Parameters\n ----------\n reference: original image data.\n query : modified image data to be compared.\n\n Return\n ----------\n value : NQM value\n \"\"\"\n def __ctf(f_r):\n \"\"\" Bandpass Contrast Threshold Function for RGB\"\"\"\n (gamma, alpha) = (0.0192 + 0.114 * f_r, (0.114 * f_r) ** 1.1)\n beta = __n.exp(-alpha)\n num = 520.0 * gamma * beta\n return 1.0 \/ num\n\n def _get_masked(c, ci, a, ai, i):\n (H, W) = c.shape\n (c, ci, ct) = (c.flatten(1), ci.flatten(1), __ctf(i))\n ci[abs(ci) > 1.0] = 1.0\n T = ct * (0.86 * ((c \/ ct) - 1.0) + 0.3)\n (ai, a, a1) = (ai.flatten(1), a.flatten(1), (abs(ci - c) - T) < 0.0)\n ai[a1] = a[a1]\n return ai.reshape(H, W)\n\n def __get_thresh(x, T, z, trans=True):\n (H, W) = x.shape\n if trans:\n (x, z) = (x.flatten(1).T, z.flatten())\n else:\n (x, z) = (x.flatten(1), z.flatten(1))\n z[abs(x) < T] = 0.0\n return z.reshape(H, W)\n\n def __decompose_cos_log_filter(w1, w2, phase=__n.pi):\n return 0.5 * (1 + __n.cos(__n.pi * __n.log2(w1 + w2) - phase))\n\n def __get_w(r):\n w = [(r + 2) * ((r + 2 <= 4) * (r + 2 >= 1))]\n w += [r * ((r <= 4) * (r >= 1))]\n w += [r * ((r >= 2) * (r <= 8))]\n w += [r * ((r >= 4) * (r <= 16))]\n w += [r * ((r >= 8) * (r <= 32))]\n w += [r * ((r >= 16) * (r <= 64))]\n return w\n\n def __get_u(r):\n u = [4 * (__n.logical_not((r + 2 <= 4) * (r + 2 >= 1)))]\n u += [4 * (__n.logical_not((r <= 4) * (r >= 1)))]\n u += [0.5 * (__n.logical_not((r >= 2) * (r <= 8)))]\n u += [4 * (__n.logical_not((r >= 4) * (r <= 16)))]\n u += [0.5 * (__n.logical_not((r >= 8) * (r <= 32)))]\n u += [4 * (__n.logical_not((r >= 16) * (r <= 64)))]\n return u\n\n def __get_G(r):\n (w, u) = (__get_w(r), __get_u(r))\n phase = [__n.pi, __n.pi, 0.0, __n.pi, 0.0, __n.pi]\n dclf = __decompose_cos_log_filter\n return [dclf(w[i], u[i], phase[i]) for i in range(len(phase))]\n\n def __compute_fft_plane_shifted(ref, query):\n (x, y) = ref.shape\n (xplane, yplane) = __n.mgrid[-y \/ 2:y \/ 2, -x \/ 2:x \/ 2]\n plane = (xplane + 1.0j * yplane)\n r = abs(plane)\n G = __get_G(r)\n Gshifted = map(__fftshift, G)\n return [Gs.T for Gs in Gshifted]\n\n def __get_c(a, l_0):\n c = [a[0] \/ l_0]\n c += [a[1] \/ (l_0 + a[0])]\n c += [a[2] \/ (l_0 + a[0] + a[1])]\n c += [a[3] \/ (l_0 + a[0] + a[1] + a[2])]\n c += [a[4] \/ (l_0 + a[0] + a[1] + a[2] + a[3])]\n return c\n\n def __get_ci(ai, li_0):\n ci = [ai[0] \/ (li_0)]\n ci += [ai[1] \/ (li_0 + ai[0])]\n ci += [ai[2] \/ (li_0 + ai[0] + ai[1])]\n ci += [ai[3] \/ (li_0 + ai[0] + ai[1] + ai[2])]\n ci += [ai[4] \/ (li_0 + ai[0] + ai[1] + ai[2] + ai[3])]\n return ci\n\n def __compute_contrast_images(a, ai, l, li):\n ci = __get_ci(ai, li)\n c = __get_c(a, l)\n return (c, ci)\n\n def __get_detection_thresholds():\n viewing_angle = (1.0 \/ 3.5) * (180.0 \/ __n.pi)\n rotations = [2.0, 4.0, 8.0, 16.0, 32.0]\n return map(lambda x: __ctf(x \/ viewing_angle), rotations)\n\n def __get_account_for_supra_threshold_effects(c, ci, a, ai):\n r = range(len(a))\n return [_get_masked(c[i], ci[i], a[i], ai[i], i + 1) for i in r]\n\n def __apply_detection_thresholds(c, ci, d, a, ai):\n A = [__get_thresh(c[i], d[i], a[i], False) for i in range(len(a))]\n AI = [__get_thresh(ci[i], d[i], ai[i], True) for i in range(len(a))]\n return (A, AI)\n\n def __reconstruct_images(A, AI):\n return map(lambda x: __n.add.reduce(x), (A, AI))\n\n def __compute_quality(imref, imquery):\n return snr(imref, imquery)\n\n def __get_ref_basis(ref_fft, query_fft, GS):\n (L_0, LI_0) = map(lambda x: GS[0] * x, (ref_fft, query_fft))\n (l_0, li_0) = map(lambda x: __n.real(__n.fft.ifft2(x)), (L_0, LI_0))\n return (l_0, li_0)\n\n def __compute_inverse_convolution(convolved_fft, GS):\n convolved = [GS[i] * convolved_fft for i in range(1, len(GS))]\n return map(lambda x: __n.real(__n.fft.ifft2(x)), convolved)\n\n def __correlate_in_fourier_domain(ref, query):\n (ref_fft, query_fft) = map(lambda x: __n.fft.fft2(x), (ref, query))\n GS = __compute_fft_plane_shifted(ref, query)\n (l_0, li_0) = __get_ref_basis(ref_fft, query_fft, GS)\n a = __compute_inverse_convolution(ref_fft, GS)\n ai = __compute_inverse_convolution(query_fft, GS)\n return (a, ai, l_0, li_0)\n\n def __get_correlated_images(ref, query):\n (a, ai, l_0, li_0) = __correlate_in_fourier_domain(ref, query)\n (c, ci) = __compute_contrast_images(a, ai, l_0, li_0)\n d = __get_detection_thresholds()\n ai = __get_account_for_supra_threshold_effects(c, ci, a, ai)\n return __apply_detection_thresholds(c, ci, d, a, ai)\n\n if not len(reference.shape) < 3:\n reference = __convert_to_luminance(reference)\n query = __convert_to_luminance(query)\n (A, AI) = __get_correlated_images(reference, query)\n (y1, y2) = __reconstruct_images(A, AI)\n y = __compute_quality(y1, y2)\n return y\n\n\ndef wsnr(reference, query):\n \"\"\"Computes the Weighted Signal to Noise Ratio (WSNR) metric.\n\n value = wsnr(reference, query)\n\n Parameters\n ----------\n reference: original image data.\n query : modified image data to be compared.\n\n Return\n ----------\n value : wsnr value\n \"\"\"\n def __genetate_meshgrid(x, y):\n f = lambda u: u \/ 2 + 0.5 - 1\n (H, W) = map(f, (x, y))\n return (H, W)\n\n def __create_complex_planes(x, y):\n (H, W) = __genetate_meshgrid(x, y)\n (xplane, yplane) = __n.mgrid[-H:H + 1, -W:W + 1]\n return (xplane, yplane)\n\n def __get_evaluated_contrast_sensivity(plane):\n w = 0.7\n angle = __n.angle(plane)\n return ((1.0 - w) \/ 2.0) * __n.cos(4.0 * angle) + (1.0 + w) \/ 2.0\n\n def __get_radial_frequency(x, y):\n (xplane, yplane) = __create_complex_planes(x, y)\n nfreq = 60\n plane = (xplane + 1.0j * yplane) \/ x * 2.0 * nfreq\n s = __get_evaluated_contrast_sensivity(plane)\n radfreq = abs(plane) \/ s\n return radfreq\n\n def __generate_CSF(radfreq):\n a = -((0.114 * radfreq) ** 1.1)\n csf = 2.6 * (0.0192 + 0.114 * radfreq) * __n.exp(a)\n f = radfreq < 7.8909\n csf[f] = 0.9809\n return csf\n\n def __weighted_fft_domain(ref, quer, csf):\n err = ref.astype('double') - quer.astype('double')\n err_wt = __fftshift(__n.fft.fft2(err)) * csf\n im = __n.fft.fft2(ref)\n return (err, err_wt, im)\n\n def __get_weighted_error_power(err_wt):\n return (err_wt * __n.conj(err_wt)).sum()\n\n def __get_signal_power(im):\n return (im * __n.conj(im)).sum()\n\n def __get_ratio(mss, mse):\n if mse != 0:\n ratio = 10.0 * __n.log10(mss \/ mse)\n else:\n ratio = float(\"inf\")\n return __n.real(ratio)\n\n if not len(reference.shape) < 3:\n reference = __convert_to_luminance(reference)\n query = __convert_to_luminance(query)\n size = reference.shape\n (x, y) = (size[0], size[1])\n radfreq = __get_radial_frequency(x, y)\n csf = __generate_CSF(radfreq)\n (err, err_wt, im) = __weighted_fft_domain(reference, query, csf)\n mse = __get_weighted_error_power(err_wt)\n mss = __get_signal_power(im)\n ratio = __get_ratio(mss, mse)\n return ratio\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_603","text":"naotohori\/cafysis\n#!\/usr\/bin\/env python\n# coding: utf-8\n\n# This program was originally coded by as simp.py\n\nimport scipy\nimport scipy.linalg\nimport pylab\nimport sys\n\nclass nya(object):\n def __init__(self, coords, pdb_Bfactors):\n self.coords = coords\n self.pdb_Bfactors = pdb_Bfactors\n self.num_atoms = len(self.coords)\n \n def cmpt_graph_mats(self, cutoff=10.0):\n dist = scipy.linalg.norm\n self.adj_mat = scipy.zeros((self.num_atoms, self.num_atoms))\n self.deg_mat = [0] * self.num_atoms\n for i in range(self.num_atoms - 1):\n for j in range(i + 1, self.num_atoms):\n if dist(self.coords[i, :] - self.coords[j, :]) <= cutoff:\n self.deg_mat[i] += 1.0\n self.deg_mat[j] += 1.0\n self.adj_mat[i, j] = 1.0\n self.adj_mat[j, i] = 1.0\n self.deg_mat = scipy.diag(self.deg_mat)\n self.lap_mat = self.deg_mat - self.adj_mat\n \n def cmpt_graph_eig(self):\n self.graph_eigval, self.graph_eigvec = scipy.linalg.eigh(self.lap_mat, self.deg_mat)\n\n def cmpt_hessian(self):\n self.hessian = scipy.zeros((3*self.num_atoms, 3*self.num_atoms))\n for i in range(self.num_atoms - 1):\n for j in range(i + 1, self.num_atoms):\n v_ij = self.coords[j, :] - self.coords[i, :]\n d2 = sum(v_ij * v_ij)\n for a in range(3):\n for b in range(3):\n self.hessian[3*i + a, 3*j + b] = -v_ij[a] * v_ij[b] \/ d2 * self.adj_mat[i, j]\n self.hessian[3*j + b, 3*i + a] = self.hessian[3*i + a, 3*j + b]\n for i in range(self.num_atoms):\n for a in range(3):\n for b in range(a, 3):\n for j in range(self.num_atoms):\n if j != i: \n self.hessian[3*i + a, 3*i + b] += -self.hessian[3*i + a, 3*j + b]\n self.hessian[3*i + b, 3*i + a] = self.hessian[3*i + a, 3*i + b]\n\n def cmpt_en_eig(self):\n self.en_eigval, self.en_eigvec = scipy.linalg.eigh(self.hessian)\n\n def cmpt_inverse_hessian(self):\n self.inverse_hessian = scipy.linalg.pinv(self.hessian)\n\n def cmpt_Bfactors(self):\n Bfactors = [self.inverse_hessian[3*i,3*i] +\n self.inverse_hessian[3*i+1, 3*i+1] +\n self.inverse_hessian[3*i+2, 3*i+2] \n for i in range(self.num_atoms)]\n k = sum(self.pdb_Bfactors) \/ sum(Bfactors)\n self.Bfactors = [Bfactors[i] * k for i in range(self.num_atoms)]\n\n def cmpt_cross_correlation(self):\n self.cross_correlation = scipy.zeros((self.num_atoms, self.num_atoms))\n self.norm_cross_correlation = scipy.zeros((self.num_atoms, self.num_atoms))\n for i in range(self.num_atoms):\n for j in range(i, self.num_atoms):\n self.cross_correlation[i, j] = (self.inverse_hessian[3*i, 3*j] + \n self.inverse_hessian[3*i+1, 3*j+1] +\n self.inverse_hessian[3*i+2, 3*j+2])\n self.cross_correlation[j, i] = self.cross_correlation[i, j]\n for i in range(self.num_atoms):\n for j in range(i, self.num_atoms):\n if i == j:\n self.norm_cross_correlation[i, i] = 1.0\n else:\n self.norm_cross_correlation[i, j] = (\n self.cross_correlation[i, j] \/\n scipy.sqrt(self.cross_correlation[i, i] * \n self.cross_correlation[j, j]))\n self.norm_cross_correlation[j, i] = self.norm_cross_correlation[i, j]\n \n \ndef get_lines(filename):\n lines = []\n for line in open(filename):\n if (line[0:6] == \"ATOM \" and\n line[12:16] == \" CA \" and\n (line[16:17] == \" \" or line[16:17] == \"A\") and\n line[21:22] == \"A\"):\n lines.append(line)\n return lines\n\ndef get_coords(lines):\n def ext_coords(line):\n try:\n x = float(line[30:38])\n y = float(line[38:46])\n z = float(line[46:54])\n except ValueError:\n print(\"Invalid format(coords)\")\n print(line)\n quit()\n return (x, y, z)\n return [ext_coords(line) for line in lines]\n\ndef get_Bfactors(lines):\n def ext_Bfactors(line):\n try:\n b = float(line[60:66])\n except ValueError:\n print(\"Invalid format(B-factors)\")\n print(line)\n quit()\n return b\n return [ext_Bfactors(line) for line in lines]\n\ndef plot_figs():\n pylab.subplot(221, aspect=\"equal\")\n X, Y = pylab.meshgrid(list(range(ins.num_atoms)), list(range(ins.num_atoms)))\n pylab.pcolor(X, Y, ins.norm_cross_correlation)\n pylab.colorbar()\n pylab.clim(-0.15, 0.15)\n pylab.title(\"Cross Correlations\")\n\n pylab.subplot(222)\n pylab.plot(pdb_Bfactors, \"bo-\", label=\"ex.\")\n pylab.plot(ins.Bfactors, \"ro-\", label=\"calc.\")\n pylab.legend()\n pylab.xlabel(\"Residue\")\n# pylab.ylabel(\"a.u.\")\n pylab.title(\"B factors\")\n pylab.grid()\n\n pylab.subplot(223, aspect=\"equal\")\n X, Y = pylab.meshgrid(list(range(ins.num_atoms)), list(range(ins.num_atoms)))\n pylab.pcolor(X, Y, ins.adj_mat)\n pylab.colorbar()\n pylab.title(\"Adjacency Mat.\")\n\n pylab.subplot(224)\n pylab.plot(ins.graph_eigvec[:, 1], \"go-\")\n pylab.xlabel(\"Residue\")\n pylab.grid()\n\n pylab.show()\n\n\nif __name__ == \"__main__\":\n\n filename = sys.argv[1]\n\n lines = get_lines(filename)\n coords = scipy.array(get_coords(lines))\n pdb_Bfactors = get_Bfactors(lines)\n\n ins = nya(coords, pdb_Bfactors)\n ins.cmpt_graph_mats()\n ins.cmpt_graph_eig()\n ins.cmpt_hessian()\n ins.cmpt_en_eig()\n ins.cmpt_inverse_hessian()\n ins.cmpt_Bfactors()\n ins.cmpt_cross_correlation()\n\n plot_figs()\n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_604","text":"0\nimport numpy as np\nimport pandas as pd\nfrom scipy.spatial.distance import cdist, pdist, squareform\nimport random\nfrom geopy import distance\nimport geoplotlib\nfrom geoplotlib.utils import BoundingBox\nfrom geoplotlib.layers import BaseLayer\nfrom geoplotlib.core import BatchPainter\n\n\ndef geodesic_dist(p1, p2):\n \"\"\"\n Compute geodesic distance between points\n p1 and p2 described by latitude and longitude.\n\n Args:\n p1 (numpy.ndarray): The first point\n p2 (numpy.ndarray): The second point\n \"\"\"\n return distance.distance(p1, p2).m\n\n\ndef group_distance(g1, g2, method='average-linkage'):\n \"\"\"\n Compute distance between the specified groups using specified method and\n metric.\n\n Args:\n g1 (list): The first group\n g2 (list): The second group\n method (str): The method to use (single-linkage, complete-linkage or average-linkage)\n\n Returns:\n (float): The evaluated distance between the groups.\n \"\"\"\n \n # Compute distance between groups using specified metric and method.\n return np.mean(np.ravel(cdist(np.vstack(g1), np.vstack(g2), metric='cityblock')))\n\n\ndef get_groups(data, delta_condition):\n \"\"\"\n Merge close nodes into clusters using agglomerative procedure.\n\n Args:\n data (np.ndarray): Data points represented as a numpy array\n delta_condition (float): Distance limit for considering nodes to\n be part of same cluster.\n\n Returns:\n (list): List of clusters represented as numpy arrays.\n \"\"\"\n \n # Initialize list for storing the groups.\n groups = []\n \n # Go over all data-points.\n for data_idx in range(data.shape[0]):\n\n # Initialize list for storing indices of merged groups.\n to_remove = []\n\n # Consider the next point as a single group.\n group_nxt = [data[data_idx, :]]\n\n # Go over all existing groups.\n for idx, group in enumerate(groups):\n \n # Compute distance to next group.\n dist = group_distance(group_nxt, group)\n\n # If distance below set threshold, merge groups.\n if dist < delta_condition:\n group_nxt = group + group_nxt\n\n # Add index of merged group to be removed later.\n to_remove.append(idx)\n \n # Remove groups that were merged.\n for rem_idx in sorted(to_remove, reverse=True):\n del groups[rem_idx] \n to_remove = []\n\n # Append next found group to list of groups.\n groups.append(group_nxt)\n\n # Stack data points in groups into numpy arrays.\n return list(map(np.vstack, groups))\n\n\ndef get_medoids(groups):\n \"\"\"\n Get medoids of found groups and stack them\n into a numpy array.\n\n Args:\n groups (list): List of groups\n\n Returns:\n (numpy.ndarray): Array of found medoids.\n \"\"\"\n\n # Initialize list for found medoids.\n medoids = []\n\n # Go over groups and compute medoids.\n for group in groups:\n idx_min = np.argmin(np.sum(squareform(pdist(group, metric='cityblock')), axis=0))\n medoids.append(group[idx_min, :])\n \n # Stack medoids into numpy array.\n return np.vstack(medoids)\n\n\ndef get_grid(n_samples=10000, min_dist=10, return_sample=False):\n \"\"\"\n Get grid of points using the sample and cluster process.\n\n Args:\n n_samples (int): Number of samples to use in the process\n min_dist (float): Distance limit for considering nodes to\n be part of same cluster.\n return_sample (bool): If true, return all the sampled nodes\n along the filtered ones as a second return value.\n\n Returns:\n (numpy.ndarray): Spatial points forming the grid as well as the corresponding\n sample indices.\n \"\"\"\n\n # Parse list of latitude and longitude values and join.\n df = pd.read_csv('.\/data\/trip_data\/sampled.csv')\n lat_1 = df['Pickup_latitude'].to_numpy()\n lon_1 = df['Pickup_longitude'].to_numpy()\n lat_2 = df['Dropoff_latitude'].to_numpy()\n lon_2 = df['Dropoff_longitude'].to_numpy()\n lat_all = np.hstack((lat_1, lat_2))\n lon_all = np.hstack((lon_1, lon_2))\n data = np.vstack((lat_all, lon_all)).T\n\n # Sample spatial points for grid generation using specified sample size.\n sample_indices = random.sample(range(data.shape[0]), n_samples)\n node_sample = data[sample_indices, :]\n\n # Join nodes in clusters and find medoids.\n clusters = get_groups(node_sample, min_dist)\n nodes_filtered = get_medoids(clusters)\n return nodes_filtered if not return_sample else (nodes_filtered, node_sample)\n\n\ndef draw_grid(nodes, unfiltered=None):\n \"\"\"\n Draw grid using computed nodes.\n\n Args:\n nodes (numpy.ndarray): Data points to plot\n unfiltered (numpy.ndarray): Unfiltered data points. If not None,\n plot using different color.\n \"\"\"\n \n # Layer for plotting the nodes\n class PointsLayer(BaseLayer):\n\n def __init__(self, data, color, point_size):\n self.data = data\n self.color = color\n self.point_size = point_size\n\n def invalidate(self, proj):\n x, y = proj.lonlat_to_screen(self.data['lon'], self.data['lat'])\n self.painter = BatchPainter()\n self.painter.set_color(self.color)\n self.painter.points(x, y, point_size=self.point_size, rounded=True)\n\n def draw(self, proj, mouse_x, mouse_y, ui_manager):\n self.painter.batch_draw()\n\n # Get grid node data into dict format.\n data_grid = {\n 'lat' : nodes[:, 0],\n 'lon' : nodes[:, 1]\n }\n \n # If unfiltered nodes specified, get data into dict format.\n if unfiltered is not None:\n data_unfiltered = {\n 'lat' : unfiltered[:, 0],\n 'lon' : unfiltered[:, 1]\n }\n \n # If unfiltered nodes specified, plot on layer.\n if unfiltered is not None:\n geoplotlib.add_layer(PointsLayer(data_unfiltered, color=[255, 0, 0], point_size=4))\n\n # Plot grid nodes.\n geoplotlib.add_layer(PointsLayer(data_grid, color=[0, 0, 255], point_size = 7))\n \n # Set bounding box and show.\n geoplotlib.set_bbox(BoundingBox(north=40.897994, west=-73.199040, south=40.595581, east=-74.55040))\n geoplotlib.show()\n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_605","text":"ClandininLab\/pasna-acr-currbio2021\nimport itertools\nimport numpy as np\nimport scipy as sp\nimport seaborn as sns\n\ndef nansem(a, axis=0, ddof=1, nan_policy='omit'):\n '''\n Returns standard error of the mean, while omitting nan values.\n '''\n return sp.stats.sem(a, axis, ddof, nan_policy)\n\ndef mean_ci(data, ci=95, axis=0, bootstrap=True, n_boot=10000):\n '''\n Returns mean and 95% confidence intervals, computed by bootstrapping\n '''\n a = 1.0 * np.array(data)\n m = np.nanmean(a, axis=axis)\n if bootstrap:\n boots = sns.algorithms.bootstrap(a, n_boot=1000, func=np.nanmean, axis=axis)\n ci_lo, ci_hi = sns.utils.ci(boots, ci, axis=axis)\n else:\n se = nansem(a, axis=axis)\n h = se * sp.stats.t.ppf((1 + ci\/100) \/ 2., len(a)-1)\n ci_lo, ci_hi = m-h, m+h\n return m, ci_lo, ci_hi\n\ndef flatten_nested_list(list_of_lists):\n '''\n Flattens a list of lists to a list\n '''\n return list(itertools.chain(*list_of_lists))\n\ndef uneven_list2d_to_np(v, fillval=np.nan):\n '''\n Given a list of uneven lists, returns a 2-dimensional numpy array in which all lists are padded with fillval\n to the length of the longest list.\n '''\n lens = np.array([len(item) for item in v])\n if len(np.unique(lens)) == 1:\n return np.asarray(v)\n mask = lens[:,None] > np.arange(lens.max())\n out = np.full(mask.shape,fillval)\n out[mask] = np.concatenate(v)\n return out\n\n\ndef generate_standard_timestamp(timestamps, trim=False, min_time=None, max_time=None):\n '''\n Finds mean framerate and generates a single timestamp series starting from 0 evenly spaced to the max timestamp.\n\n timestamps: 2d numpy array with nan padding for uneven timestamp lengths\n \n If trim=True, finds the largest of the leftmost timestamps and the smallest of the rightmost timestamps.\n If min_time or max_time is defined, that value is used regardless of trim.\n '''\n if not isinstance(timestamps, np.ndarray):\n timestamps = uneven_list2d_to_np(timestamps)\n mean_diff = np.nanmean(np.diff(timestamps))\n if trim:\n min_time = np.nanmax(np.nanmin(timestamps,axis=1)) if min_time is None else min_time\n max_time = np.nanmin(np.nanmax(timestamps,axis=1)) if max_time is None else max_time\n else:\n min_time = np.nanmin(timestamps) if min_time is None else min_time\n max_time = np.nanmax(timestamps) if max_time is None else max_time\n\n return np.arange(min_time, max_time, mean_diff)\n\ndef interpolate_to_new_timestamp(y, t, nt):\n '''\n y: 1d data, length same as t\n t: original timestamp\n nt: new timestamp to interpolate to\n Returns ny, linearly interpolated data at nt\n '''\n not_nan = ~np.isnan(y)\n return np.interp(nt, t[not_nan], y[not_nan], left=np.nan, right=np.nan)\n\n\ndef align_traces_to_standardized_timestamp(ts, xs, ts_standard=None, trim=False, min_time=None, max_time=None):\n '''\n Given ts and xs, 2d numpy arrays representing timestamps and corresponding values, \n returns xs_standardized, xs values interpolated to a standardized timestamp, ts_standard.\n If ts_standard is not provided, it is computed.\n '''\n if ts_standard is None:\n ts_standard = generate_standard_timestamp(ts, trim=trim, min_time=min_time, max_time=max_time)\n xs_standardized = np.array([interpolate_to_new_timestamp(xs[i], ts[i], ts_standard) for i in range(len(xs))])\n\n return ts_standard, xs_standardized"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_606","text":"barry\/postprocessing\/bao_extractor.py\nimport numpy as np\nfrom barry.postprocessing.postprocessor import PkPostProcess\n\n\nclass PureBAOExtractor(PkPostProcess):\n \"\"\" The pure BAO extractor detailed in Noda 2017 (1705.01475), Nishimishi 2018 (1708.00375), Noda 2019 (1901.06854)\n\n See https:\/\/ui.adsabs.harvard.edu\/abs\/2017JCAP...08..007N\n See https:\/\/ui.adsabs.harvard.edu\/abs\/2018JCAP...01..035N\n See https:\/\/ui.adsabs.harvard.edu\/abs\/2019arXiv190106854N\n\n Parameters\n ----------\n r_s : float\n The sound horizon distance. In units of Mpc\/h\n plot : bool, optional\n Whether to output debugging plots\n delta : float, optional\n The window (in units of `r_s` to smooth)\n \"\"\"\n\n def __init__(self, r_s, plot=False, delta=0.6):\n super().__init__()\n self.r_s = r_s\n self.plot = plot\n self.delta = delta\n\n def get_krange(self):\n r\"\"\" Returns $k_s \\Delta$ as defined in Eq 6 of Nishimishi 2018\"\"\"\n k_s = 2 * np.pi \/ self.r_s # BAO Wavenumber\n k_range = self.delta * k_s # Range of k to sum over\n return k_range\n\n def postprocess(self, ks, pk, mask, return_denominator=False, plot=False):\n \"\"\" Runs the BAO Extractor method and returns the extracted BAO signal.\n\n Warning that this is the estimator given in Eq5 Nishimichi 2018\n\n As such, make sure your k values are finely sampled and linearly spaced. Alas for\n our data, this isn't always possible to do because the window function wrecks us.\n\n Parameters\n ----------\n ks : np.array\n The k values for the BAO power spectrum\n pk : np.array\n The power spectrum at `ks`\n\n Returns\n -------\n\n \"\"\"\n k_range = self.get_krange()\n\n result = []\n denoms = []\n for k, p in zip(ks, pk):\n k_diff = np.abs(ks - k)\n m = k_diff < k_range\n numerator = (1 - (pk[m] \/ p)).sum()\n denominator = (1 - np.cos(self.r_s * (ks[m] - k))).sum()\n res = numerator \/ denominator\n denoms.append(denominator)\n result.append(res)\n result = np.array(result)\n\n # Plots for debugging purposes to make sure everything looks good\n if self.plot:\n import matplotlib.pyplot as plt\n\n fig, axes = plt.subplots(nrows=2, figsize=(5, 7))\n axes[0].plot(ks, pk, label=\"Input\")\n axes[1].plot(ks, result, label=\"Output\")\n plt.show()\n\n if mask is None:\n mask = np.ones(result.shape).astype(np.bool)\n\n # Optionally return the denominator instead\n # Used for manually verifying the correctness of the covariance\n # described in Eq7 (and Noda2019 eq 21,22,23)\n if return_denominator:\n return np.array(denoms)[mask]\n return result[mask]\n\n\nclass BAOExtractor(PureBAOExtractor):\n \"\"\" Implements the mix of BAO extractor and power spectrum as defined in Noda 2019, with\n index mixing taken from page 9, paragraph 1 and confirmed via private communication:\n\n pi_i = {1, 2, 3, 7, 15}\n rho_i = {4,5,6,8,9,10,11,12,13,14,16,17,18,19,20,21,22,23,24,25}\n\n \"\"\"\n\n def __init__(self, r_s, plot=False, delta=0.6, mink=0.06, extra_ks=(0.0925, 0.1775), reorder=True, invert=False):\n super().__init__(r_s, plot=plot, delta=delta)\n self.mink = mink\n self.extra_ks = extra_ks\n self.reorder = reorder\n self.invert = invert\n\n def get_is_extracted(self, ks):\n # Use indexes to blend the two together\n indices = np.array(list(range(ks.size)))\n extra = None\n for e in self.extra_ks:\n ind = np.argmin(np.abs(ks - e))\n if extra is None:\n extra = indices == ind\n else:\n extra |= indices == ind\n mask_power = (ks < self.mink) | extra\n if self.invert:\n return mask_power\n else:\n return ~mask_power\n\n def postprocess(self, ks, pk, mask):\n \"\"\" Process the power spectrum to get a mix of extracted BAO and P(k)\n\n Parameters\n ----------\n ks : np.ndarray\n Wavenumbers\n pk : np.ndarray\n Power at wavenumber\n mask : np.ndarray (bool mask), optional\n Which k values to return at the end. Used to remove k values below \/ above certain values.\n I pass them in here because if we reorder the k values the masking cannot be done outside this function.\n \"\"\"\n if mask is None:\n mask = np.ones(pk.shape).astype(np.bool)\n extracted_pk = super().postprocess(ks, pk, None)\n mask_bao = self.get_is_extracted(ks)\n if self.reorder:\n result = np.concatenate((pk[mask & ~mask_bao], extracted_pk[mask & mask_bao]))\n else:\n mask_int = mask_bao.astype(np.int)\n result = (extracted_pk * (mask_int) + pk * (1 - mask_int))[mask]\n return result\n\n\nif __name__ == \"__main__\":\n from barry.cosmology import CambGenerator\n\n camb = CambGenerator(om_resolution=10, h0_resolution=1)\n ks = camb.ks\n print(ks.shape)\n r_s, pk_lin, _ = camb.get_data(0.3, 0.70)\n\n from scipy.interpolate import splev, splrep\n\n rep = splrep(ks, pk_lin)\n # ks2 = np.linspace(ks.min(), 1, 1000)\n ks2 = np.linspace(0, 0.398, 100) # Matching the winfit_2 data binning\n pk_lin2 = splev(ks2, rep)\n\n print(\"Got pklin\")\n b = BAOExtractor(r_s)\n pk_extract = b.postprocess(ks2, pk_lin2)\n print(\"Got pk_extract\")\n\n import matplotlib.pyplot as plt\n\n fig, axes = plt.subplots(nrows=2, figsize=(5, 9), sharex=True)\n axes[0].plot(ks2, pk_lin2)\n axes[0].set_title(\"pk_lin\")\n axes[1].plot(ks2, pk_extract)\n axes[1].set_title(\"Extracted BAO, using winfit_2 bins (0, 0.398, 100)\")\n plt.show()\n\n from barry.datasets.mock_power import MockPowerSpectrum\n\n dataset = MockPowerSpectrum(name=\"Recon mean\", recon=True, min_k=0.02, step_size=2, postprocess=b)\n data = dataset.get_data()\n import seaborn as sb\n\n sb.heatmap(data[\"corr\"])\n plt.show()\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_607","text":"hw4\/c\/spectral_clustering.py\n#spectral_clustering:\n#1)consruct a matrix representation of the graph;\n#2)matrix decomposition:compute eigenvalue and eigenvectors of the matrix,map each point to a lower dimension\n#3)clustering\nimport time,operator\nimport networkx as nx\nimport numpy as np\nimport scipy,time\nfrom scipy.sparse.linalg import eigs\nfrom sklearn.cluster import KMeans\n\n#construct a grapgh of data\ndef parse(filename):\n G = nx.Graph()\n data = open(filename)\n n1 = []\n n2 = []\n edges=[]\n for i, rows in enumerate(data):\n if '#' in rows:\n continue\n rows = rows.strip().split('\\t')\n node_a = int(rows[0])\n node_b = int(rows[1])\n G.add_edge(node_a, node_b)\n return G\n\n#consruct a matrix representation of the graph\ndef getLaplacian(m):\n print time.ctime()\n d=[row.sum() for row in m] \n print time.ctime()\n D=scipy.sparse.diags(d,0)\n print D\n print time.ctime()\n L=D-m\n print time.ctime()\n return L\n\n#matrix decomposition \ndef getLowerDimension(w,k):\n print w.shape\n eigValue,eigVec = eigs(w,k=100,which='LR')\n import cPickle\n print 'finished'\n print time.ctime()\n dim = len(eigValue)\n print dim\n print eigValue\n return eigVec\n\nif __name__ == '__main__':\n print time.ctime()\n graph = parse('com-youtube.ungraph.txt')\n laplacianMat = nx.laplacian_matrix(graph)\n print time.ctime()\n print time.ctime()\n fout = open('spectral_clustering_result.txt','w')\n np_matrix = nx.to_scipy_sparse_matrix(graph,dtype=np.float32)\n print 'begin'\n lapW = getLaplacian(np_matrix)\n print 'end'\n reduced_matrix = getLowerDimension(lapW,100)\n print reduced_matrix.shape\n print time.ctime()\n #use Kmean to clustering\n kmeans_model = KMeans(n_clusters=100,init='k-means++',n_init=10).fit(reduced_matrix)\n print time.ctime()\n count=0\n for i in kmeans_model.labels_:\n count=count+1\n fout.write(str(count)+'\t'+str(i)+'\\n')\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_593","text":"__author__ = 'dvgodoy'\n\nfrom neurons import Input, Hidden, Output\nfrom losses import LogLoss\nfrom scipy.io import loadmat\nimport os\n\nfrom activations import SigmoidActivation, ReluActivation, TanhActivation\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\nclass Layer(object):\n classes = ['Input', 'Hidden', 'Output']\n\n def __init__(self, n_units, layer_type, **kwargs):\n assert layer_type in self.classes\n self._charged = False\n self.n_units = n_units\n self.layer_type = layer_type\n neuron = globals()[layer_type]\n self.units = [neuron(**kwargs) for _ in range(n_units)]\n\n def connect(self, layer):\n assert isinstance(layer, Layer)\n for source in self.units:\n for destination in layer.units:\n source.connect(destination)\n\n def fire(self):\n mu, var = self.batch_norm()\n for unit in self.units:\n unit.fire(mu, var)\n\n def feedback(self):\n for unit in self.units:\n unit.feedback()\n\n def examples(self, X):\n assert X.shape[0] == self.n_units\n for i, unit in enumerate(self.units):\n unit.examples(X[i, np.newaxis, :])\n\n def responses(self, Y):\n assert Y.shape[0] == self.n_units\n for i, unit in enumerate(self.units):\n unit.responses(Y[i, np.newaxis, :])\n\n def batch_norm(self):\n mu, var = 0.0, 1.0\n if np.all([unit.charged for unit in self.units]):\n z = np.array([unit._z for unit in self.units])\n mu = z.mean(axis = 0)\n var = z.var(axis = 0)\n return mu, var\n\n @property\n def activations(self):\n activations = np.array([unit.activations for unit in self.units])\n return activations.reshape(activations.shape[0], -1)\n\n @property\n def weights(self):\n weights = np.array([unit.weights for unit in self.units])\n return weights.reshape(weights.shape[0], -1)\n\n @property\n def biases(self):\n biases = np.array([unit.biases for unit in self.units])\n return biases.reshape(biases.shape[0], -1)\n\n @property\n def cost(self):\n return np.array([unit.cost for unit in self.units])\n\n\nif __name__ == '__main__':\n np.random.seed(13)\n\n name = 'var_u'\n data = loadmat(os.path.join('..\/data', name + '.mat'))\n\n sigmoid = SigmoidActivation()\n relu = ReluActivation()\n tanh = TanhActivation()\n\n l_i = Layer(12, 'Input')\n l_h1 = Layer(10, 'Hidden', activation_function=relu)\n l_h2 = Layer(7, 'Hidden', activation_function=relu)\n l_h3 = Layer(5, 'Hidden', activation_function=relu)\n l_h4 = Layer(4, 'Hidden', activation_function=relu)\n l_h5 = Layer(3, 'Hidden', activation_function=relu)\n l_o = Layer(1, 'Output', activation_function=sigmoid, loss_function=LogLoss(), learning_rate=0.0004)\n layers = [l_i, l_h1, l_h2, l_h3, l_h4, l_h5, l_o]\n\n l_i.connect(l_h1)\n l_h1.connect(l_h2)\n l_h2.connect(l_h3)\n l_h3.connect(l_h4)\n l_h4.connect(l_h5)\n l_h5.connect(l_o)\n\n epochs = 10000\n h1_weight_history = []\n #h2_weight_history = []\n #h3_weight_history = []\n cost_history = []\n examples = data['F'].reshape(12, -1)\n responses = data['y']\n batch_size = 4096\n for epoch in range(epochs):\n if not (epoch % 100):\n print(epoch)\n for n_batch in range(1):\n examples = data['F'].reshape(12, -1)[:, (n_batch * batch_size):(n_batch * batch_size + batch_size)]\n responses = data['y'][:, (n_batch * batch_size):(n_batch * batch_size + batch_size)]\n l_i.examples(examples)\n l_o.responses(responses)\n for layer in layers:\n layer.fire()\n cost_history.append(l_o.cost)\n h1_weight_history.append(l_h1.weights)\n #h2_weight_history.append(l_h2.weights)\n #h3_weight_history.append(l_h3.weights)\n for layer in layers[::-1]:\n layer.feedback()\n #print(l_o.activations)\n #print(l_o.cost)\n print(l_o.activations)\n predictions = l_o.activations > 0.5\n print((predictions == responses).mean())\n #h1_mean = [w.mean() for w in h1_weight_history]\n #h2_mean = [w.mean() for w in h2_weight_history]\n #h3_mean = [w.mean() for w in h3_weight_history]\n #h1_std = [w.std() for w in h1_weight_history]\n #h2_std = [w.std() for w in h2_weight_history]\n #h3_std = [w.std() for w in h3_weight_history]\n #plt.plot(h1_mean, 'k')\n #plt.plot(h2_mean, 'r')\n #plt.plot(h3_mean, 'g')\n #plt.plot(h1_std, 'k--')\n #plt.plot(h2_std, 'r--')\n #plt.plot(h3_std, 'g--')\n plt.plot(cost_history)\n plt.show()"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_224","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.optimize\n\ndef funzione (t, A, tau, w, phi, Vbias) :\n return A * np.exp(-t\/tau) * np.cos(w*t+phi) + Vbias\n\nt, ddp = np.genfromtxt(\"long3.txt\", unpack=True)\n\nx = np.linspace(0, 100000, 4000)\nplt.figure()\nplt.plot(t, ddp, 'o')\nplt.show()"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_226","text":"RoundTrQBlackScholes.py\n\"\"\"\nAuthor : \nDate : 2019-08-04\nDesc : Valuing Option Price under Transaction Cost. Re-hedging with round trip method.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom numpy.random import standard_normal, seed\nfrom scipy.stats import norm\n\nimport sys\n\nimport datetime\nimport time\nimport bspline\nimport bspline.splinelab as splinelab\n\nREG_PARAM = 1e-3\n\n\nclass RoundTrQBlackScholes:\n def __init__(self,\n mu: float,\n vol: float,\n s0: float,\n T: int,\n K : float,\n r: float,\n num_steps: int,\n num_paths: int,\n risk_lambda: float,\n tr_alpha: float):\n self.mu = mu\n self.vol = vol\n self.s0 = s0\n self.T = T\n self.K = K\n self.r = r\n self.num_steps = num_steps\n self.num_paths = num_paths\n self.risk_lambda = risk_lambda\n\n self.dt = T \/ num_steps\n self.gamma = np.exp(-r * self.dt)\n self.tr_alpha = tr_alpha\n\n self.s_values = np.zeros((self.num_paths, self.num_steps + 1), 'float')\n self.delta_S = None\n self.s_values[:, 0] = s0 * np.ones(self.num_paths, 'float')\n\n self.opt_hedge = np.zeros((self.num_paths, self.num_steps + 1), 'float')\n\n self.X = None\n self.data = None\n self.delta_S_hat = None\n coef = 1.0 \/ (2 * self.gamma * self.risk_lambda)\n self.coef = coef\n\n self.pi = np.zeros((self.num_paths, self.num_steps + 1), 'float')\n self.pi_hat = np.zeros((self.num_paths, self.num_steps + 1), 'float')\n\n self.q = np.zeros((self.num_paths, self.num_steps + 1), 'float')\n self.r = np.zeros((self.num_paths, self.num_steps + 1), 'float')\n\n def gen_path(self):\n # Path Generator (Black Scholes )\n seed(42)\n\n for i in range(1, self.num_steps + 1):\n std_norm = standard_normal(self.num_paths)\n exp_pow = (self.mu - self.vol ** 2 \/ 2) * self.dt \\\n + self.vol * np.sqrt(self.dt) * std_norm\n self.s_values[:, i] = self.s_values[:, i - 1] * np.exp(exp_pow)\n\n delta_S = (1 - self.tr_alpha) * self.s_values[:, 1:] - 1 \/ self.gamma * self.s_values[:, :self.num_steps]\n self.delta_S = delta_S\n self.delta_S_hat = np.apply_along_axis(lambda x: x - np.mean(x), axis=0, arr=delta_S)\n self.X = - (self.mu - 0.5 * self.vol ** 2) * np.arange(self.num_steps + 1) * self.dt + np.log(self.s_values)\n\n X_min = np.min(np.min(self.X))\n X_max = np.max(np.max(self.X))\n\n print(\"Shape of X : {} \\n Max : {} \\n Min : {}\".format(self.X.shape, X_max, X_min))\n\n self.pi[:, -1] = np.maximum(self.s_values[:, -1] - self.K, 0)\n self.pi_hat[:, -1] = self.pi[:, -1] - np.mean(self.pi[:, -1])\n\n self.q[:, -1] = -self.pi[:, -1] - self.risk_lambda * np.var(self.pi[:, -1])\n self.r[:, -1] = -self.risk_lambda * np.var(self.pi[:, -1])\n\n p = 4\n ncolloc = 12\n tau = np.linspace(X_min, X_max, ncolloc)\n\n k = splinelab.aptknt(tau, p)\n basis = bspline.Bspline(k, p)\n\n num_basis = ncolloc\n self.data = np.zeros((self.num_steps + 1, self.num_paths, num_basis))\n\n t0 = time.time()\n for ix in np.arange(self.num_steps + 1):\n x = self.X[:, ix]\n self.data[ix, :, :] = np.array([basis(el) for el in x])\n t1 = time.time()\n print(\"\\nTime for basis expansion {}\".format(t1 - t0))\n\n def function_A_vec(self, t, reg_param=1e-3):\n \"\"\" Equation for matrix A\n self.data : T x N_MC x num_basis\n self.delta_S_hat : N_MC x T\n :param t:\n :param reg_param:\n :return:\n \"\"\"\n x_data = self.data[t, :, :]\n num_basis_funcs = x_data.shape[1]\n self_dS = self.delta_S_hat[:, t]\n # hat_dS2 = (self_dS ** 2).reshape(-1, 1)\n # mat_A = np.dot(x_data.T, x_data * hat_dS2)\n x_data = x_data.T * self_dS\n mat_A = x_data @ x_data.T # Wrong line, its result is scalar ???\n\n\n\n return mat_A + reg_param * np.eye(num_basis_funcs)\n\n def function_B_vec(self, t, pi_hat):\n x_data = self.data[t, :, :]\n this_dS = self.delta_S_hat[:, t]\n # coef = 1 \/ (2 * self.gamma * self.risk_lambda)\n coef = 0\n mat_B = x_data.T @ (pi_hat * this_dS + coef * self.delta_S[:, t])\n\n return mat_B\n\n def roll_backward_hedge(self):\n \"\"\"\n Roll backward and get the price and optimal hedge vals\n :return:\n \"\"\"\n for t in range(self.num_steps - 1, -1, -1):\n pi_next = self.pi[:, t + 1]\n pi_next_prime = pi_next + self.tr_alpha * self.opt_hedge[:, t + 1] * self.s_values[:, t + 1]\n pi_prime_hat = pi_next_prime - np.mean(pi_next_prime)\n\n mat_A = self.function_A_vec(t, REG_PARAM)\n vec_B = self.function_B_vec(t, pi_prime_hat)\n\n phi = np.linalg.inv(mat_A) @ vec_B\n self.opt_hedge[:, t] = np.dot(self.data[t, :, :], phi)\n self.pi[:, t] = self.gamma * (pi_next_prime - self.opt_hedge[:, t] * self.delta_S[:, t])\n self.r[:, t] = self.gamma * self.opt_hedge[:, t] * self.delta_S[:, t] \\\n - self.risk_lambda * np.var(self.pi[:, t])\n\n\n def function_C_vec(self, t, reg_param):\n this_data = self.data[t, :, :]\n mat_C = this_data.T @ this_data\n return mat_C + reg_param * np.eye(this_data.shape[1])\n\n\n def function_D_vec(self, t):\n this_data = self.data[t, :, :]\n this_q = self.q[:, t + 1]\n this_r = self.r[:, t]\n vec_D = this_data.T @ (this_r + self.gamma * this_q\n - self.tr_alpha * self.opt_hedge[:, t + 1] * self.s_values[:, t + 1])\n return vec_D\n\n def roll_backward_q(self):\n \"\"\"\n Roll backward to get q values\n :return:\n \"\"\"\n start_time = time.time()\n for t in range(self.num_steps - 1, -1, -1):\n c_mat = self.function_C_vec(t, REG_PARAM)\n d_vec = self.function_D_vec(t)\n omega = np.linalg.inv(c_mat) @ d_vec\n\n self.q[:, t] = self.data[t, :, :] @ omega\n print(\"\\n Time : \", time.time() - start_time)\n\n\nif __name__ == \"__main__\":\n trMC = RoundTrQBlackScholes(0.02, 0.2, 100, 1, 0.04, 25200, 1000, 0.001, 0.001)\n trMC.gen_path()\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_227","text":"from basic import *\nfrom all_genes import all_genes\nfrom scipy.cluster import hierarchy\nfrom scipy.spatial import distance\nimport os\nimport html_colors\nimport parse_tsv\n\nverbose = __name__ == '__main__'\n\n# these helper functions used to do more work, now it's a little silly...\n\ndef get_rep( gene, organism ):\n assert gene.startswith('TR')\n return all_genes[organism][gene].rep\n\ndef get_mm1_rep( gene, organism ):\n assert gene.startswith('TR')\n return all_genes[organism][gene].mm1_rep\n\ndef get_rep_ignoring_allele( gene, organism ):\n rep = get_rep( gene, organism )\n rep = rep[:rep.index('*')]\n return rep\n\ndef get_mm1_rep_gene_for_counting( allele, organism ):\n return all_genes[organism][allele].count_rep\n\ndef countreps_from_genes( genes, organism ):\n return set( ( all_genes[organism][x].count_rep for x in genes ) )\n\n\ndef tree_sort( old_l, distances, return_leaves=True ): ## average linkage\n assert len(distances) == len(old_l)\n\n if len(old_l)==1:\n leaves = [0]\n else:\n y = distance.squareform( distances, checks=True )\n Z = hierarchy.average( y )\n #c,coph_dists = hierarchy.cophenet(Z,y)\n leaves = hierarchy.leaves_list( Z )\n\n new_l = [ old_l[x] for x in leaves ]\n\n if not return_leaves:\n return new_l\n else:\n return new_l, leaves\n\ndef get_top_genes( blast_hits_string ):\n hits = dict( [ ( x.split(':')[0], int( x.split(':')[1] ) ) for x in blast_hits_string.split(';') ] )\n top_score = max( hits.values() )\n return set( [ x for x,y in hits.iteritems() if y >= top_score ] )\n\ndef get_top_reps( blast_hits_string, organism ):\n hits = dict( [ ( x.split(':')[0], int( x.split(':')[1] ) ) for x in blast_hits_string.split(';') ] )\n top_score = max( hits.values() )\n return set( [ all_genes[organism][x].rep for x,y in hits.iteritems() if y >= top_score ] )\n\n\ndef reps_from_genes( genes, organism, mm1=False, trim_allele=False ):\n reps = set( ( all_genes[organism][x].mm1_rep for x in genes ) ) if mm1 else \\\n set( ( all_genes[organism][x].rep for x in genes ) )\n if trim_allele:\n reps = set( ( x[:x.index('*')] for x in reps ) )\n return reps\n\ndef readme( pngfile, text ):\n \"\"\"Generate some readme text associated to an image file, that will be incorporated into the\n big html results file by run_basic_analysis.py\"\"\"\n\n out = open(pngfile+'.readme','w')\n cmd = ' '.join(argv)\n out.write(\"\"\"\nCommand<\/u>:\n{}\n

\nFilename<\/u>:\n{}\n

\nReadme<\/u>:\n{}\n

\n\"\"\".format(cmd, pngfile, text))\n out.close()\n\n\n## setup a mapping that we can use for counting when allowing mm1s and also ignoring alleles\n\n# allele2mm1_rep_gene_for_counting = {}\n# def get_mm1_rep_ignoring_allele( gene, organism ): # helper fxn\n# rep = get_mm1_rep( gene, organism )\n# rep = rep[:rep.index('*')]\n# return rep\n\n# for organism in ['human','mouse']:\n# allele2mm1_rep_gene_for_counting[ organism ] = {}\n\n# for chain in 'AB':\n\n# ## look at gene\/allele maps\n# vj_alleles = { 'V': [ id for (id,g) in all_genes[organism].iteritems() if g.chain==chain and g.region=='V'],\n# 'J': [ id for (id,g) in all_genes[organism].iteritems() if g.chain==chain and g.region=='J'] }\n\n# for vj, alleles in vj_alleles.iteritems():\n# gene2rep = {}\n# gene2alleles = {}\n# rep_gene2alleles = {}\n\n# for allele in alleles:\n# #assert allele[2] == chain\n# gene = allele[:allele.index('*')]\n# rep_gene = get_mm1_rep_ignoring_allele( allele, organism )\n# if rep_gene not in rep_gene2alleles:\n# rep_gene2alleles[ rep_gene ] = []\n# rep_gene2alleles[ rep_gene ].append( allele )\n\n# if gene not in gene2rep:\n# gene2rep[gene] = set()\n# gene2alleles[gene] = []\n# gene2rep[ gene ].add( rep_gene )\n# gene2alleles[gene].append( allele )\n\n# merge_rep_genes = {}\n# for gene,reps in gene2rep.iteritems():\n# if len(reps)>1:\n# assert vj=='V'\n# if verbose:\n# print 'multireps:',organism, gene, reps\n# for allele in gene2alleles[gene]:\n# print ' '.join(all_genes[organism][allele].cdrs), allele, \\\n# get_rep(allele,organism), get_mm1_rep(allele,organism)\n\n# ## we are going to merge these reps\n# ## which one should we choose?\n# l = [ (len(rep_gene2alleles[rep]), rep ) for rep in reps ]\n# l.sort()\n# l.reverse()\n# assert l[0][0] > l[1][0]\n# toprep = l[0][1]\n# for (count,rep) in l:\n# if rep in merge_rep_genes:\n# assert rep == toprep and merge_rep_genes[rep] == rep\n# merge_rep_genes[ rep ] = toprep\n\n\n# for allele in alleles:\n# count_rep = get_mm1_rep_ignoring_allele( allele, organism )\n# if count_rep in merge_rep_genes:\n# count_rep = merge_rep_genes[ count_rep ]\n# allele2mm1_rep_gene_for_counting[ organism ][ allele] = count_rep\n# if verbose:\n# print 'allele2mm1_rep_gene_for_counting:',organism, allele, count_rep\n\n\n\ndef assign_label_reps_and_colors_based_on_most_common_genes_in_repertoire( tcr_infos, organism ):\n ## assumes that each element of tcr_infos is a dictionary with fields that would have come from parse_tsv_line\n ## uses the *_countreps info that was filled in by read_pair_seqs.py\n ## the _label_rep* fields get over-written if they were present\n for segtype in segtypes_lowercase:\n countreps_tag = segtype+'_countreps'\n rep_tag = segtype+'_label_rep'\n color_tag = segtype+'_label_rep_color' ## where we will store the rep info\n\n counts = {}\n for tcr_info in tcr_infos:\n reps = tcr_info[countreps_tag].split(';')\n for rep in reps:\n counts[rep] = counts.get(rep,0)+1\n\n newcounts = {}\n for tcr_info in tcr_infos:\n reps = tcr_info[countreps_tag].split(';')\n toprep = max( [ ( counts[x],x) for x in reps ] )[1]\n tcr_info[rep_tag] = toprep ## doesnt have allele info anymore\n newcounts[toprep] = newcounts.get(toprep,0)+1\n\n l = [(y,x) for x,y in newcounts.iteritems()]\n l.sort()\n l.reverse()\n rep_colors = dict( zip( [x[1] for x in l], html_colors.get_rank_colors_no_lights(len(l)) ) )\n for tcr_info in tcr_infos:\n tcr_info[ color_tag ] = rep_colors[ tcr_info[ rep_tag ] ]\n\n return ## we modified the elements of the tcr_infos list in place\n\n\n## this is not exactly perfect, but probably OK to start with...\n##\ndef detect_fake_chains( clones_file, Achain='A', Bchain='B' ):\n tcrs = parse_tsv.parse_tsv_file( clones_file, key_fields = [], store_fields = ['va_gene','cdr3a','vb_gene','cdr3b'] )\n fake_chains = []\n if len( set( [ (x[0],x[1]) for x in tcrs ] ) )==1:\n fake_chains.append( Achain )\n if len( set( [ (x[2],x[3]) for x in tcrs ] ) )==1:\n fake_chains.append( Bchain )\n if fake_chains:\n print 'Fake sequence data detected for chains: {}'.format( ' '.join( fake_chains ) )\n return fake_chains\n\n\n\n\n\n# if __name__ == '__main__':\n# for organism in allele2mm1_rep_gene_for_counting:\n# for allele in allele2mm1_rep_gene_for_counting[ organism ]:\n# print 'get_mm1_rep_gene_for_counting\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_228","text":"zenmood\/IndoorFarmWizvenv\/lib\/python3.7\/site-packages\/pba\/dists.py10-100\nif __name__ is not None and \".\" in __name__:\n from .interval import Interval\nelse:\n from interval import Interval\n\nif __name__ is not None and \".\" in __name__:\n from .pbox import Pbox\nelse:\n from pbox import Pbox\n\nimport scipy.stats as sps\nimport numpy as np\nimport itertools\n\ndists = {\n 'alpha' : sps.alpha,\n 'anglit' : sps.anglit,\n 'arcsine' : sps.arcsine,\n 'argus' : sps.argus,\n 'beta' : sps.beta,\n 'betaprime' : sps.betaprime,\n 'bradford' : sps.bradford,\n 'burr' : sps.burr,\n 'burr12' : sps.burr12,\n 'cauchy' : sps.cauchy,\n 'chi' : sps.chi,\n 'chi2' : sps.chi2,\n 'cosine' : sps.cosine,\n 'crystalball' : sps.crystalball,\n 'dgamma' : sps.dgamma,\n 'dweibull' : sps.dweibull,\n 'erlang' : sps.erlang,\n 'expon' : sps.expon,\n 'exponnorm' : sps.exponnorm,\n 'exponweib' : sps.exponweib,\n 'exponpow' : sps.exponpow,\n 'f' : sps.f,\n 'fatiguelife' : sps.fatiguelife,\n 'fisk' : sps.fisk,\n 'foldcauchy' : sps.foldcauchy,\n 'foldnorm' : sps.foldnorm,\n # 'frechet_r' : sps.frechet_r,\n # 'frechet_l' : sps.frechet_l,\n 'genlogistic' : sps.genlogistic,\n 'gennorm' : sps.gennorm,\n 'genpareto' : sps.genpareto,\n 'genexpon' : sps.genexpon,\n 'genextreme' : sps.genextreme,\n 'gausshyper' : sps.gausshyper,\n 'gamma' : sps.gamma,\n 'gengamma' : sps.gengamma,\n 'genhalflogistic' : sps.genhalflogistic,\n 'geninvgauss' : sps.geninvgauss,\n 'gilbrat' : sps.gilbrat,\n 'gompertz' : sps.gompertz,\n 'gumbel_r' : sps.gumbel_r,\n 'gumbel_l' : sps.gumbel_l,\n 'halfcauchy' : sps.halfcauchy,\n 'halflogistic' : sps.halflogistic,\n 'halfnorm' : sps.halfnorm,\n 'halfgennorm' : sps.halfgennorm,\n 'hypsecant' : sps.hypsecant,\n 'invgamma' : sps.invgamma,\n 'invgauss' : sps.invgauss,\n 'invweibull' : sps.invweibull,\n 'johnsonsb' : sps.johnsonsb,\n 'johnsonsu' : sps.johnsonsu,\n 'kappa4' : sps.kappa4,\n 'kappa3' : sps.kappa3,\n 'ksone' : sps.ksone,\n 'kstwobign' : sps.kstwobign,\n 'laplace' : sps.laplace,\n 'levy' : sps.levy,\n 'levy_l' : sps.levy_l,\n 'levy_stable' : sps.levy_stable,\n 'logistic' : sps.logistic,\n 'loggamma' : sps.loggamma,\n 'loglaplace' : sps.loglaplace,\n 'lognorm' : sps.lognorm,\n 'loguniform' : sps.loguniform,\n 'lomax' : sps.lomax,\n 'maxwell' : sps.maxwell,\n 'mielke' : sps.mielke,\n 'moyal' : sps.moyal,\n 'nakagami' : sps.nakagami,\n 'ncx2' : sps.ncx2,\n 'ncf' : sps.ncf,\n 'nct' : sps.nct,\n 'norm' : sps.norm,\n 'norminvgauss' : sps.norminvgauss,\n 'pareto' : sps.pareto,\n 'pearson3' : sps.pearson3,\n 'powerlaw' : sps.powerlaw,\n 'powerlognorm' : sps.powerlognorm,\n 'powernorm' : sps.powernorm,\n 'rdist' : sps.rdist,\n 'rayleigh' : sps.rayleigh,\n 'rice' : sps.rice,\n 'recipinvgauss' : sps.recipinvgauss,\n 'semicircular' : sps.semicircular,\n 'skewnorm' : sps.skewnorm,\n 't' : sps.t,\n 'trapz' : sps.trapz,\n 'triang' : sps.triang,\n 'truncexpon' : sps.truncexpon,\n 'truncnorm' : sps.truncnorm,\n 'tukeylambda' : sps.tukeylambda,\n 'uniform' : sps.uniform,\n 'vonmises' : sps.vonmises,\n 'vonmises_line' : sps.vonmises_line,\n 'wald' : sps.wald,\n 'weibull_min' : sps.weibull_min,\n 'weibull_max' : sps.weibull_max,\n 'wrapcauchy' : sps.wrapcauchy,\n 'bernoulli' : sps.bernoulli,\n 'betabinom' : sps.betabinom,\n 'binom' : sps.binom,\n 'boltzmann' : sps.boltzmann,\n 'dlaplace' : sps.dlaplace,\n 'geom' : sps.geom,\n 'hypergeom' : sps.hypergeom,\n 'logser' : sps.logser,\n 'nbinom' : sps.nbinom,\n 'planck' : sps.planck,\n 'poisson' : sps.poisson,\n 'randint' : sps.randint,\n 'skellam' : sps.skellam,\n 'zipf' : sps.zipf,\n 'yulesimon' : sps.yulesimon\n}\n\ndef __get_bounds(function_name = None,steps = 200,*args):\n\n # define support\n x = np.linspace(0.0001,0.9999,steps)\n\n #get bound arguments\n new_args = itertools.product(*args)\n\n bounds = []\n\n mean_hi = -np.inf\n mean_lo = np.inf\n var_lo = np.inf\n var_hi = 0\n\n for a in new_args:\n\n bounds.append(dists[function_name].ppf(x,*a))\n bmean, bvar = dists[function_name].stats(*a, moments = 'mv')\n\n if bmean < mean_lo:\n mean_lo = bmean\n if bmean > mean_hi:\n mean_hi = bmean\n if bvar > var_hi:\n var_hi = bvar\n if bvar < var_lo:\n var_lo = bvar\n\n\n Left = [min([b[i] for b in bounds]) for i in range(steps)]\n Right = [max([b[i] for b in bounds]) for i in range(steps)]\n\n var = Interval(np.float64(var_lo),np.float64(var_hi))\n mean = Interval(np.float64(mean_lo),np.float64(mean_hi))\n\n Left = np.array(Left)\n Right = np.array(Right)\n\n return Left, Right, mean, var\n\n\ndef lognormal(mean, var, steps = 200):\n\n x = np.linspace(0,1,steps)\n\n if mean.__class__.__name__ != 'Interval':\n mean = Interval(mean,mean)\n if var.__class__.__name__ != 'Interval':\n var = Interval(var,var)\n\n bound0 = sps.lognorm.ppf(x, mean.left, var.left)\n bound1 = sps.lognorm.ppf(x, mean.right, var.left)\n bound2 = sps.lognorm.ppf(x, mean.left, var.right)\n bound3 = sps.lognorm.ppf(x, mean.right, var.right)\n\n Left = [min(bound0[i],bound1[i],bound2[i],bound3[i]) for i in range(steps)]\n Right = [max(bound0[i],bound1[i],bound2[i],bound3[i]) for i in range(steps)]\n\n Left = np.array(Left)\n Right = np.array(Right)\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape='lognormal',\n mean_left=mean.left,\n mean_right=mean.right,\n var_left=var.left,\n var_right=var.right)\nlognorm = lognormal\n\ndef alpha(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('alpha',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'alpha',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef anglit(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('anglit',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'anglit',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef arcsine(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('arcsine',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'arcsine',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef argus(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('argus',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'argus',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef beta(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('beta',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'beta',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef betaprime(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('betaprime',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'betaprime',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef bradford(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('bradford',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'bradford',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef burr(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('burr',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'burr',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef burr12(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('burr12',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'burr12',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef cauchy(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('cauchy',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'cauchy',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef chi(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('chi',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'chi',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef chi2(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('chi2',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'chi2',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef cosine(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('cosine',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'cosine',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef crystalball(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('crystalball',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'crystalball',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef dgamma(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('dgamma',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'dgamma',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef dweibull(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('dweibull',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'dweibull',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef erlang(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('erlang',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'erlang',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef expon(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('expon',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'expon',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef exponnorm(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('exponnorm',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'exponnorm',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef exponweib(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('exponweib',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'exponweib',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef exponpow(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('exponpow',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'exponpow',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef f(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('f',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'f',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef fatiguelife(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('fatiguelife',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'fatiguelife',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef fisk(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('fisk',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'fisk',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef foldcauchy(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('foldcauchy',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'foldcauchy',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef foldnorm(mu,s, steps = 200):\n\n x = np.linspace(0.0001,0.9999,steps)\n if mu.__class__.__name__ != 'Interval':\n mu = Interval(mu)\n if s.__class__.__name__ != 'Interval':\n s = Interval(s)\n\n new_args = [\n [mu.lo()\/s.lo(),0,s.lo()],\n [mu.hi()\/s.lo(),0,s.lo()],\n [mu.lo()\/s.hi(),0,s.hi()],\n [mu.hi()\/s.hi(),0,s.hi()]\n ]\n\n\n bounds = []\n\n mean_hi = -np.inf\n mean_lo = np.inf\n var_lo = np.inf\n var_hi = 0\n\n for a in new_args:\n\n bounds.append(sps.foldnorm.ppf(x,*a))\n bmean, bvar = sps.foldnorm.stats(*a, moments = 'mv')\n\n if bmean < mean_lo:\n mean_lo = bmean\n if bmean > mean_hi:\n mean_hi = bmean\n if bvar > var_hi:\n var_hi = bvar\n if bvar < var_lo:\n var_lo = bvar\n\n\n Left = [min([b[i] for b in bounds]) for i in range(steps)]\n Right = [max([b[i] for b in bounds]) for i in range(steps)]\n\n var = Interval(np.float64(var_lo),np.float64(var_hi))\n mean = Interval(np.float64(mean_lo),np.float64(mean_hi))\n\n Left = np.array(Left)\n Right = np.array(Right)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'foldnorm',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\n# def frechet_r(*args, steps = 200):\n# args = list(args)\n# for i in range(0,len(args)):\n# if args[i].__class__.__name__ != 'Interval':\n# args[i] = Interval(args[i])\n\n# Left, Right, mean, var = __get_bounds('frechet_r',steps,*args)\n\n# return Pbox(\n# Left,\n# Right,\n# steps = steps,\n# shape = 'frechet_r',\n# mean_left = mean.left,\n# mean_right = mean.right,\n# var_left = var.left,\n# var_right = var.right\n# )\n\n# def frechet_l(*args, steps = 200):\n# args = list(args)\n# for i in range(0,len(args)):\n# if args[i].__class__.__name__ != 'Interval':\n# args[i] = Interval(args[i])\n\n# Left, Right, mean, var = __get_bounds('frechet_l',steps,*args)\n\n# return Pbox(\n# Left,\n# Right,\n# steps = steps,\n# shape = 'frechet_l',\n# mean_left = mean.left,\n# mean_right = mean.right,\n# var_left = var.left,\n# var_right = var.right\n# )\n\ndef genlogistic(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('genlogistic',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'genlogistic',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef gennorm(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('gennorm',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'gennorm',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef genpareto(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('genpareto',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'genpareto',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef genexpon(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('genexpon',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'genexpon',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef genextreme(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('genextreme',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'genextreme',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef gausshyper(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('gausshyper',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'gausshyper',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef gamma(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('gamma',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'gamma',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef gengamma(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('gengamma',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'gengamma',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef genhalflogistic(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('genhalflogistic',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'genhalflogistic',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef geninvgauss(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('geninvgauss',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'geninvgauss',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef gilbrat(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('gilbrat',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'gilbrat',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef gompertz(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('gompertz',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'gompertz',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef gumbel_r(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('gumbel_r',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'gumbel_r',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef gumbel_l(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('gumbel_l',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'gumbel_l',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef halfcauchy(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('halfcauchy',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'halfcauchy',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef halflogistic(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('halflogistic',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'halflogistic',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef halfnorm(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('halfnorm',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'halfnorm',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef halfgennorm(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('halfgennorm',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'halfgennorm',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef hypsecant(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('hypsecant',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'hypsecant',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef invgamma(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('invgamma',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'invgamma',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef invgauss(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('invgauss',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'invgauss',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef invweibull(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('invweibull',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'invweibull',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef johnsonsb(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('johnsonsb',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'johnsonsb',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef johnsonsu(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('johnsonsu',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'johnsonsu',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef kappa4(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('kappa4',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'kappa4',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef kappa3(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('kappa3',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'kappa3',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef ksone(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('ksone',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'ksone',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef kstwobign(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('kstwobign',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'kstwobign',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef laplace(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('laplace',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'laplace',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef levy(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('levy',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'levy',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef levy_l(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('levy_l',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'levy_l',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef levy_stable(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('levy_stable',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'levy_stable',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef logistic(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('logistic',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'logistic',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef loggamma(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('loggamma',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'loggamma',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef loglaplace(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('loglaplace',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'loglaplace',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef lognorm(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('lognorm',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'lognorm',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef loguniform(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('loguniform',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'loguniform',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef lomax(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('lomax',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'lomax',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef maxwell(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('maxwell',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'maxwell',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef mielke(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('mielke',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'mielke',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef moyal(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('moyal',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'moyal',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef nakagami(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('nakagami',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'nakagami',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef ncx2(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('ncx2',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'ncx2',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef ncf(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('ncf',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'ncf',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef nct(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('nct',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'nct',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef norm(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('norm',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'norm',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef norminvgauss(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('norminvgauss',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'norminvgauss',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef pareto(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('pareto',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'pareto',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef pearson3(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('pearson3',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'pearson3',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef powerlaw(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('powerlaw',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'powerlaw',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef powerlognorm(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('powerlognorm',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'powerlognorm',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef powernorm(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('powernorm',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'powernorm',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef rdist(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('rdist',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'rdist',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef rayleigh(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('rayleigh',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'rayleigh',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef rice(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('rice',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'rice',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef recipinvgauss(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('recipinvgauss',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'recipinvgauss',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef semicircular(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('semicircular',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'semicircular',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef skewnorm(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('skewnorm',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'skewnorm',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef t(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('t',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 't',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef trapz(a,b,c,d , steps = 200):\n if a.__class__.__name__ != 'Interval':\n a = Interval(a)\n if b.__class__.__name__ != 'Interval':\n b = Interval(b)\n if c.__class__.__name__ != 'Interval':\n c = Interval(c)\n if d.__class__.__name__ != 'Interval':\n d = Interval(d)\n\n x = np.linspace(0.0001,0.9999,steps)\n left = sps.trapz.ppf(x,b.lo()\/d.lo(),c.lo()\/d.lo(),a.lo(),d.lo()-a.lo())\n right = sps.trapz.ppf(x,b.hi()\/d.hi(),c.hi()\/d.hi(),a.hi(),d.hi()-a.hi())\n\n return Pbox(\n left,\n right,\n steps = steps,\n shape = 'trapz'\n )\n\ndef triang(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('triang',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'triang',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef truncexpon(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('truncexpon',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'truncexpon',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef truncnorm(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('truncnorm',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'truncnorm',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef tukeylambda(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('tukeylambda',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'tukeylambda',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\n\ndef uniform(a, b, steps = 200):\n\n x = np.linspace(0,1,steps)\n\n if a.__class__.__name__ != 'Interval':\n a = Interval(a,a)\n if b.__class__.__name__ != 'Interval':\n b = Interval(b,b)\n\n Left = np.linspace(a.left,b.left)\n Right = np.linspace(a.right,b.right)\n\n mean = 0.5 * (a+b)\n var = ((b-a)**2 )\/12\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'uniform',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef vonmises(*args, steps = Pbox.STEPS):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('vonmises',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'vonmises',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef vonmises_line(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('vonmises_line',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'vonmises_line',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef wald(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('wald',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'wald',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef weibull_min(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('weibull_min',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'weibull_min',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef weibull_max(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('weibull_max',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'weibull_max',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef wrapcauchy(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('wrapcauchy',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'wrapcauchy',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef bernoulli(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('bernoulli',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'bernoulli',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef betabinom(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('betabinom',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'betabinom',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef binom(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('binom',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'binom',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef boltzmann(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('boltzmann',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'boltzmann',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef dlaplace(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('dlaplace',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'dlaplace',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef geom(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('geom',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'geom',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef hypergeom(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('hypergeom',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'hypergeom',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef logser(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('logser',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'logser',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef nbinom(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('nbinom',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'nbinom',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef planck(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('planck',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'planck',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef poisson(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('poisson',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'poisson',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef randint(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('randint',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'randint',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef skellam(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('skellam',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'skellam',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef zipf(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('zipf',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'zipf',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\ndef yulesimon(*args, steps = 200):\n args = list(args)\n for i in range(0,len(args)):\n if args[i].__class__.__name__ != 'Interval':\n args[i] = Interval(args[i])\n\n Left, Right, mean, var = __get_bounds('yulesimon',steps,*args)\n\n return Pbox(\n Left,\n Right,\n steps = steps,\n shape = 'yulesimon',\n mean_left = mean.left,\n mean_right = mean.right,\n var_left = var.left,\n var_right = var.right\n )\n\n\n### Other distributions\ndef KM(k,m,steps = 200):\n return beta(Interval(k,k+1),Interval(m,m+1),steps = steps)\n\ndef KN(k,n,steps = 200):\n return KM(k,n-k,steps=steps)\n\n\n### Alternate names\nnormal = norm\nN = normal\nunif = uniform\nU = uniform\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_229","text":"import numpy as np\nimport itertools\nimport raam\nimport pickle\nfrom raam import features\nfrom scipy import cluster\nfrom warnings import warn\n\ndef degradation(fun,charge,discharge):\n \"\"\"\n Constructs a capacity degradation function from given parameters.\n \n The functions are D- and D+: the anti-derivatives. See the paper for more details\n \n Parameters\n ----------\n fun : {\"polynomial\"}\n Type of the function\n charge : object\n List of parameters for the function. \n polynomial: (charge,discharge)\n parameters are the coefficients with low to high order \n discharge : object\n List of parameters for the function. \n polynomial: (charge,discharge)\n parameters are the coefficients with low to high order \n\n Returns\n -------\n out : function (x1,x2) \n x1: initial level\n x2: new level\n \"\"\"\n if fun == 'polynomial':\n charge_a = np.array(charge)\n discharge_a = np.array(discharge)\n def polynomial(x1,x2):\n if x1 <= x2: # charge\n # intentional x2 - x1\n return np.polynomial.polynomial.polyval(x2,charge_a) - np.polynomial.polynomial.polyval(x1,charge_a) \n else: # discharge\n # intentional: x1 - x2\n return np.polynomial.polynomial.polyval(x1,discharge_a) - np.polynomial.polynomial.polyval(x2,discharge_a) \n return polynomial\n else:\n raise ValueError('Incorrect function type: \"%s\"' % funtype)\n\nDefaultConfiguration = {\n \"price_buy\" : [1.2,2.1,3.3],\n \"price_sell\" : [1,2,3],\n \"price_probabilities\" : np.array([[0.8, 0.1, 0.1],[0.1, 0.8, 0.1],[0.1, 0.1, 0.8]]),\n \"initial_capacity\" : 1,\n \"initial_inventory\" : 0.5,\n \"degradation\" : {\"fun\":\"polynomial\",\"charge\":[0,0,0.01],\"discharge\":[0.01,0.02,0.01]},\n \"capacity_cost\" : 1,\n \"change_capacity\" : True,\n \"charge_limit\" : 1, # limit on charge in a single step\n \"discharge_limit\" : 1 # limit on discharge in a single step, absolute value\n }\n\n\nclass Simulator(raam.Simulator):\n \"\"\"\n Simulates the evolution of the inventory, the total capacity, and the price \n levels. The prices come from a Markov chain process.\n \n The initial state is generated from an expectation state \n \n State (tuple): \n - inventory\n - capacity\n - priceindex \n \n Action: charge change\n This is not an index but an absolute value of the change in charge\n\n Parameters\n ----------\n config : dict\n Configuration. See DefaultConfiguration for an example\n discount : float, optional\n Discount factor\n action_cnt : int, optional\n Number of ticks for discretizing actions.\n inventory_cnt : int, optional\n Number of ticks for discretizing inventory states. \n capacity_cnt : int, optional\n Discretization set for storage capacity states. \n This step must be fine enough to capture the small\n change in capacities\n \"\"\"\n\n def __init__(self,config,discount=0.9999,action_cnt=20,inventory_cnt=100,\\\n capacity_cnt=100):\n self._discount = discount\n self._action_cnt = action_cnt\n self._inventory_cnt = inventory_cnt\n self._capacity_cnt = capacity_cnt\n \n self.degradation = degradation(**config['degradation'])\n self.initial_capacity = config['initial_capacity']\n self.price_buy = config['price_buy']\n self.price_sell = config['price_sell']\n self.price_probabilities = config['price_probabilities']\n self.capacity_cost = config['capacity_cost']\n self.change_capacity = config['change_capacity']\n self.initial_inventory = config['initial_inventory']\n\n if 'charge_limit' not in config:\n warn('No charge_limit in config, using 1')\n if 'discharge_limit' not in config:\n warn('No discharge_limit in config, using 1')\n self.charge_limit = config['charge_limit'] if 'charge_limit' in config else 1\n self.discharge_limit = config['discharge_limit'] if 'discharge_limit' in config else 1\n\n # state and the distributions\n self._all_states = None\n self._initial_distribution = None\n\n assert np.all(np.array(self.price_buy) >= 0)\n assert np.all(np.array(self.price_sell) >= 0)\n assert len(self.price_buy) == len(self.price_sell)\n assert self.price_probabilities.shape[0] == self.price_probabilities.shape[1] == len(self.price_sell)\n assert np.max(np.abs(np.sum(self.price_probabilities,1) - 1)) < 0.01\n assert np.all(np.array(self.price_probabilities) >= 0)\n assert self.capacity_cost >= 0\n assert type(self.change_capacity) is bool\n assert self.initial_inventory <= self.initial_capacity and self.initial_inventory >= 0 \n\n @property\n def discount(self):\n return self._discount\n\n def get_stateindex(self, decstate):\n \"\"\"\n Finds the index of the state in the list returned by all_states\n \"\"\"\n # lazy initialization\n if self._all_states is None:\n self.all_states()\n\n return self._aggindex2stateindex[self._state_aggregation.classify(decstate)]\n\n\n def all_states(self):\n \"\"\"\n Returns all states (quantized according to the parameters provided in the constructor)\n\n There is no iteration over capacities if self.change_capacity = False and it is \n fixed to be 0.\n\n Important: Use self.get_stateindex() to get index of a state instead of searching this list.\n It is much more efficient.\n\n Returns\n -------\n out : np.ndarray\n List of all states\n \"\"\"\n \n # lazy initialization\n if self._all_states is None:\n\n pricecount = len(self.price_buy)\n\n # if the capacity does not change, then aggregate the capacity dimension to only one value\n if self.change_capacity:\n # make sure that the centers of price clusters are integer numbers\n self._state_aggregation = raam.features.GridAggregation(\\\n ((0,self.initial_capacity), (0,self.initial_capacity), (-0.5,pricecount-0.5)), \\\n (self._inventory_cnt, self._capacity_cnt, pricecount) )\n else:\n self._state_aggregation = raam.features.GridAggregation(\\\n ((0,self.initial_capacity), (self.initial_capacity-0.1,self.initial_capacity+0.1), (-0.5,pricecount-0.5)), \\\n (self._inventory_cnt, 1, pricecount) )\n\n \n \n centers = list(self._state_aggregation.centers())\n\n # select only states that have smaller inventory than capacity,\n # and construct the mapping from aggregation index to the index in all_states\n self._all_states, stateindex2aggindex = \\\n zip(*( (s,i) for i,s in enumerate(centers) \\\n if s[0] <= s[1]))\n\n # invert the aggregation index\n self._aggindex2stateindex = [None] * len(centers)\n for si,ai in enumerate(stateindex2aggindex):\n self._aggindex2stateindex[ai] = si\n\n return self._all_states\n\n def initial_distribution(self):\n \"\"\"\n Returns initial distributions over states returned be all_states\n \n Returns\n -------\n out : np.ndarray\n Initial distribution\n \"\"\"\n\n # lazy initialization\n if self._initial_distribution is None:\n from scipy import cluster\n \n allstates = self.all_states()\n initialstate = self.initstates().__next__()\n\n init_index = self.get_stateindex(initialstate)\n\n distribution = np.zeros(len(allstates))\n distribution[init_index] = 1.0\n\n self._initial_distribution = distribution\n\n return self._initial_distribution\n \n\n def all_transitions_continuous(self, decstate, action):\n \"\"\"\n Returns all transitions and probabilities for the given state and action.\n\n The returned states are continuous and are not quantized according to \n self._all_states()\n\n Returns\n -------\n out : list\n Sequence of tuples: (nextstate, probability, reward)\n \"\"\"\n\n inventory, capacity, priceindex = decstate\n priceindex = int(priceindex)\n\n assert(inventory >= 0 and inventory <= capacity)\n \n # determine buy and sell prices\n pricesell = self.price_sell[priceindex]\n pricebuy = self.price_buy[priceindex]\n \n # trim action based on the current inventory\n action = max(action, - inventory)\n action = min(action, capacity - inventory)\n \n # update the next inventory based on the action\n ninventory = inventory + action\n \n # compute capacity loss\n capacity_loss = self.degradation(inventory \/ capacity, ninventory \/ capacity) * capacity\n assert capacity_loss >= -1e-10, 'Cannot have negative capacity loss' \n\n if self.change_capacity:\n ncapacity = max(0,capacity - capacity_loss)\n ninventory = min(ninventory, ncapacity)\n else:\n ncapacity = capacity\n\n # compute the reward for the transition\n reward = - (pricebuy if action >= 0 else pricesell) * action\n reward -= capacity_loss * self.capacity_cost\n\n # sample the next price index\n return (((ninventory,ncapacity,npriceindex),probability,reward) \\\n for npriceindex, probability in \\\n enumerate(self.price_probabilities[priceindex,:]) if probability > 0)\n\n def all_transitions(self, stateindex, actionindex):\n \"\"\"\n Returns all transitions and probabilities for the given state and action.\n\n The returned states are continuous and are not quantized according to \n self._all_states()\n\n Parameters\n ----------\n stateindex : int\n Index of the state in the list returned by all_states\n actionindex : int\n Index of the action in the list returned by actions\n\n Returns\n -------\n out : sequence\n Sequence of tuples: (nextstate, probability, reward)\n \"\"\"\n allstates = self.all_states()\n decstate = allstates[stateindex]\n\n allactions = self.actions(decstate)\n action = allactions[actionindex]\n\n # map transitions to the state indexes\n return [(self.get_stateindex(s),p,r) \n for s,p,r in \n self.all_transitions_continuous(decstate, action)]\n\n def transition(self,decstate,action):\n \"\"\" \n Represents a transition from a state.\n\n Charging over the available capacity, or discharging below empty is not possible.\n Any action that attempts to do that is automatically limited to the capacity.\n\n Parameters\n ----------\n decstate : state\n inventory,capacity,priceindex\n action : float\n change in charge (this is a float value, not the index)\n\n Returns\n -------\n out : expectation state\n inventory,capacity,reward\n \"\"\"\n #TODO: replace by a call to all_transitions\n inventory, capacity, priceindex = decstate\n assert inventory >= 0 and inventory <= capacity\n \n # determine buy and sell prices\n pricesell = self.price_sell[priceindex]\n pricebuy = self.price_buy[priceindex]\n \n # trim action based on the current inventory\n action = max(action, - inventory)\n action = min(action, capacity - inventory)\n \n # update the next inventory based on the action\n ninventory = inventory + action\n \n # compute capacity loss\n capacity_loss = self.degradation(inventory \/ capacity, ninventory \/ capacity) * capacity\n assert capacity_loss >= -1e-10, 'Cannot have negative capacity loss' \n\n if self.change_capacity:\n ncapacity = max(0,capacity - capacity_loss)\n ninventory = min(ninventory, ncapacity)\n else:\n ncapacity = capacity\n\n # compute the reward for the transition\n reward = - (pricebuy if action >= 0 else pricesell) * action\n reward -= capacity_loss * self.capacity_cost\n\n # sample the next price index\n pricecount = self.price_probabilities.shape[1]\n npriceindex = np.random.choice(\\\n np.arange(pricecount,dtype=int), \\\n p=self.price_probabilities[priceindex,:])\n\n return (reward,(ninventory,ncapacity,npriceindex))\n\n def actions(self, state):\n \"\"\"\n List of applicable actions in the state. Relative change\n in capacity\n \"\"\"\n inventory, capacity, _ = state\n\n discharge_floor = max(-inventory,-self.discharge_limit)\n charge_ceil = min(capacity - inventory,self.charge_limit)\n\n return np.linspace(discharge_floor, charge_ceil, self._action_cnt)\n\n def initstates(self):\n \"\"\" The initial state is given by the configuration and the 1st state of the \n price process. \"\"\"\n return itertools.repeat( (self.initial_inventory,self.initial_capacity,0) )\n\n def price_levels(self):\n \"\"\" Returns the number of price states in the Markov model \"\"\"\n return self.price_probabilities.shape[0]\n\nclass Features:\n \"\"\" \n Suitable features for inventory management \n \"\"\"\n linear = (features.piecewise_linear(None), features.piecewise_linear(None))\n\n## Threshold policy definitions\ndef threshold_policy(lowers, uppers, simulator):\n \"\"\"\n Construct a threshold policy with different thresholds for different price \n indexes.\n \n Assumes that the capacity of the battery does not change.\n \n Lower is the lower inventory target, and upper is the upper inventory target\n \n Parameters\n ----------\n lowers : list\n List of lower thresholds\n uppers : list\n List of upper thresholds\n simulator : inventory.Simulator\n Simulator of the inventory problem (used to determine available actions)\n \"\"\" \n assert len(lowers) == len(uppers)\n assert np.min(uppers - lowers) >= -1e-4\n \n def policy(state):\n inventory,capacity,priceindex = state\n \n # compute the target charge change\n if inventory < lowers[priceindex]:\n target = lowers[priceindex] - inventory # the target charge change\n elif inventory > uppers[priceindex]:\n target = uppers[priceindex] - inventory # the target charge change\n else:\n # if it is between the thresholds, then there is no change\n target = 0\n \n # find the closest (discretized) action\n actions = simulator.actions(state)\n actionindex = np.argmin(np.abs(actions - target))\n return actions[actionindex]\n \n return policy\n\n## Threshold Optimization Functions\n\nimport math\nimport random\n \ndef _eval_dimchange(sim,lowers,uppers,dim,l,u,horizon,runs):\n \"\"\" Evaluates the dimension change impact \"\"\"\n dim_lowers = lowers.copy()\n dim_uppers = uppers.copy()\n \n dim_lowers[dim] = l\n dim_uppers[dim] = u\n \n policy = raam.examples.inventory.threshold_policy(dim_lowers, dim_uppers, sim)\n \n # Common random numbers for the evaluation!\n np.random.seed(0)\n random.seed(0)\n \n samples = sim.simulate(horizon,policy,runs)\n \n print('.', end='')\n return samples.statistics(sim.discount)['mean_return']\n\n\ndef optimize_jointly(sim,step=0.1,horizon=600,runs=5):\n \"\"\"\n Jointly optimizes uppen and lower thresholds for charging and discharging for \n each dimension.\n \n It can be shown (a publication pending) that this method will compute\n the optimal solution when there is no degradation in the battery.\n \"\"\"\n \n values = [(l,u) for l in np.arange(0,1+step\/2,step) for u in np.arange(l,1+step\/2,step) ]\n \n # copy the lower and upper bounds\n lowers = np.zeros(len(sim.price_buy)) # lower thresholds\n uppers = np.ones(len(sim.price_buy)) # upper thresholds\n \n for iteration in range(10):\n print('Lowers', lowers)\n print('Uppers', uppers)\n \n for dimension in range(len(sim.price_sell)):\n print('Dimension', dimension)\n returns = [_eval_dimchange(sim,lowers,uppers,dimension,l,u,horizon,runs) for (l,u) in values]\n \n maxindex = np.argmax(returns)\n \n print('\\n', returns[maxindex])\n l,u = values[maxindex]\n lowers[dimension] = l\n uppers[dimension] = u\n \n print('Lowers', lowers)\n print('Uppers', uppers)\n\n\ndef optimize_independently(sim,step=0.1,horizon=600,runs=5):\n \"\"\"\n Optimizes the upper and lower thresholds independently. It is not clear \n that this method actually computes the optimal policy \n \"\"\"\n \n epsilon = 1e-6 # small value to deal with numertical issues\n\n # copy the lower and upper bounds\n lowers = 0.5*np.ones(len(sim.price_buy)) # lower thresholds\n uppers = 0.5*np.ones(len(sim.price_buy)) # upper thresholds\n \n \n for iteration in range(10):\n print('Lowers', lowers)\n print('Uppers', uppers)\n \n weight = 1.0 \/ math.sqrt(iteration + 1)\n \n for dimension in range(len(sim.price_sell)):\n print('Dimension', dimension)\n \n print(' lowers')\n values = np.arange(0,1+_epsilon,step)\n if len(values) > 0:\n returns = [_eval_dimchange(sim,lowers,uppers,dimension,\\\n l,max(l,uppers[dimension]),horizon,runs)\\\n for l in values]\n maxindex = np.argmax(returns)\n l = values[maxindex]\n lowers[dimension] = weight * l + (1-weight)*lowers[dimension]\n uppers[dimension] = max(uppers[dimension],lowers[dimension])\n assert lowers[dimension] <= uppers[dimension]\n \n print('\\n',returns[maxindex])\n \n print('\\n uppers')\n values = np.arange(0,1+_epsilon,step)\n if len(values) > 0:\n returns = [_eval_dimchange(sim,lowers,uppers,dimension,\\\n min(lowers[dimension],u),u,horizon,runs) \\\n for u in values]\n maxindex = np.argmax(returns)\n u = values[maxindex]\n uppers[dimension] = weight*u + (1-weight)*uppers[dimension]\n lowers[dimension] = min(lowers[dimension],uppers[dimension])\n assert lowers[dimension] <= uppers[dimension]\n \n print('\\n',returns[maxindex])\n\n print('Lowers', lowers)\n print('Uppers', uppers)\n\n\n## Plotting functions\n\ndef plot_degradation(degrad, ex_inventories = [0.1,0.5,0.9],delta=None):\n \"\"\"\n Plots the degradation function for examples of the current inventory\n \n Parameters\n ----------\n degrad : fun\n Degradation function, the output of :fun:`degradation`\n ex_inventories : list, optional\n List of example inventories to use for plotting\n delta : dict\n Two delta functions (the derivative of the degradation)\n \"\"\"\n \n import matplotlib.pyplot as pp\n \n x = np.linspace(0,1,100)\n \n #ax1 = pp.subplot()\n \n for ei in ex_inventories:\n y = np.array([degrad(ei, ix) for ix in x])\n pp.plot(100*x,100*y,label=\"$d(x,y-x)$,$x=%2.0f\\\\%%$\" % (100*ei))\n \n #ax2 = ax1.twinx()\n \n if delta is not None:\n pp.plot(100*x, 100*delta['charge'](x), '.', label='$\\\\delta_+$')\n pp.plot(100*x, 100*delta['discharge'](x), '.', label='$\\\\delta_-$')\n \n pp.xlabel('New State of Charge (%): $y$')\n pp.ylabel('Capacity Loss (%): $d(x,y-x)$')\n \n pp.legend(loc=9)\n pp.grid()\n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_230","text":"100-1000\n# Copyright (c) 2020 Graphcore Ltd. All rights reserved.\nimport numpy as np\nimport scipy as sp\nfrom scipy import sparse\nimport os\nimport ctypes\nimport popart\n\nso_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n \"..\/..\/custom_ops.so\")\nctypes.cdll.LoadLibrary(so_path)\n\n\"\"\"\nThis example presents the extension of the sparse_softmax_subblock_demo.py example to multiple inputs\nIn NLP the attention matrix will have dims [batch_size, heads, n_sequence, n_sequence]\nWhere each batch item and head may have a different sparsity pattern.\n\nIn this example there are two attention heads each with a different mask (and number of active blocks)\n\"\"\"\n\n# INPUT DATA\nn_windows = 2\nn_sequence = 256\nwindow_size = n_sequence\/\/n_windows\nblocksize = [16, 16]\n\n\ndef type1_mask(window_size, n_windows, blocksize_x):\n # Add the sparsity for the first attention head (autoregressive windows)\n auto_mask = sp.sparse.tril(np.ones([window_size, window_size]), k = 0)\n summary_mask = sp.sparse.lil_matrix((window_size, window_size))\n summary_mask[:, window_size-blocksize_x:] = 1\n global_mask = sp.sparse.kron(sp.sparse.tril(np.ones([n_windows, n_windows]), k = -1), summary_mask)\n global_mask = (global_mask + sp.sparse.kron(sp.sparse.eye(n_windows), auto_mask)).sign()\n return global_mask\n\n\ndef type2_mask(n_sequence, blocksize_x):\n # Local mask attends to local block plus one backward (a bit like reformer)\n # Autoregressive block on diagonal\n A = np.expand_dims(sp.sparse.tril(np.ones((blocksize_x, blocksize_x)), 0).toarray(), 0)\n A = A.repeat(n_sequence\/\/blocksize_x, axis = 0)\n mask = sp.sparse.block_diag(A)\n\n # Add full blocks on the -1 diagonal\n C = sp.sparse.dia_matrix((np.ones((1, n_sequence)), [-1]), shape=[n_sequence\/\/blocksize_x]*2)\n mask += sp.sparse.kron(C, np.ones((blocksize_x, blocksize_x)))\n return mask\n\n\ndef mask_to_blocks(global_mask, blocksize):\n # Get the block sparse format\n bsr = sp.sparse.bsr_matrix(global_mask, blocksize = blocksize)\n bsr.eliminate_zeros() # need to call this to eliminate blocks of all zeros\n\n # The dense blocks\n blocks = np.reshape(bsr.data, [bsr.data.shape[0], -1])\n blocks = sp.float32(list(blocks))\n\n # Dense mask for each active block\n mask_data = np.array([[[1]]]*len(bsr.indices))\n active_mask = sp.sparse.bsr_matrix((mask_data, bsr.indices, bsr.indptr)).toarray()\n active_mask = list(active_mask.flatten())\n return blocks, active_mask\n\n# Get the two attention patterns\nhead1_blocks, head1_sparsity = mask_to_blocks(type1_mask(window_size, n_windows, blocksize[0]), blocksize)\nhead2_blocks, head2_sparsity = mask_to_blocks(type2_mask(n_sequence, blocksize[0]), blocksize)\n\n\ndef concat(h1, h2):\n out = np.concatenate((h1, h2), 0)\n out = np.tile(out, [2, 1])\n return out\n\n# Build a matrix which is [2, 2, 256, 256] (B, H, S, S)\nmatrix_dims = [2, 2, n_sequence, n_sequence]\ninput_blocks = concat(head1_blocks, head2_blocks)\nsparsity = np.tile([*head1_sparsity, *head2_sparsity], 2)\n# There are 4 groups in total (B*H)\ngroup_sizes = np.tile([len(head1_blocks), len(head2_blocks)], 2)\n# note that group_sizes are equal to [80, 31, 80, 31]\n\n# #### MODEL CREATION ####\nbuilder = popart.Builder()\nlogits = np.array(list(input_blocks), dtype = sp.float32)\nlogits = builder.addInitializedInputTensor(logits, \"logits\")\n\nprobs = builder.customOp(opName = \"BsSoftmax\",\n opVersion = 1,\n domain = \"ai.graphcore\",\n inputs = [logits],\n attributes = {\n \"matrixDims\": matrix_dims,\n \"blockSize\": blocksize,\n \"sparsity\": sparsity.tolist(),\n \"groupSizes\": group_sizes.tolist(),\n \"subBlockMaskPerGroup\": \"[ZeroUpperTriangle, ZeroUpperTriangle, ZeroUpperTriangle, ZeroUpperTriangle]\"\n })[0]\ndlogits = popart.reservedGradientPrefix() + logits # the gradient tensor's name\nupstream_grad = popart.reservedGradientPrefix() + probs # the gradient tensor's name\n\n# Make some blocks to regress agains just so there are gradients\nexpected_tokens = np.zeros_like(input_blocks) + np.eye(16).flatten()\nexpected_tokens = -sp.float32(np.array(list(expected_tokens))) # negative sign for negative logprob\nexpected_tokens = builder.aiOnnx.constant(expected_tokens, 'expected_tokens')\n\npbias = builder.aiOnnx.constant(np.zeros([1, input_blocks.shape[-1]], dtype=np.float32)+1e-6, 'pbias')\nbiased_probs = builder.aiOnnx.add([probs, pbias])\nlogprobs = builder.aiOnnx.log([biased_probs])\n\nout = builder.aiOnnx.mul([logprobs, expected_tokens])\nloss = builder.aiGraphcore.l1loss([out], 1.0)\n\n# Describe how to run the model\nanchor_desc = {probs: popart.AnchorReturnType(\"ALL\"), dlogits: popart.AnchorReturnType(\"ALL\"), upstream_grad: popart.AnchorReturnType(\"ALL\")}\ndataFlow = popart.DataFlow(1, anchor_desc)\n\nsession = popart.TrainingSession(fnModel = builder.getModelProto(),\n loss = loss,\n deviceInfo = popart.DeviceManager().acquireAvailableDevice(1),\n optimizer = popart.ConstSGD(0.01),\n dataFlow = dataFlow)\n\n# Compile graph\nsession.prepareDevice()\n\n# Create buffers to receive results from the execution\nanchors = session.initAnchorArrays()\n\n# TRAINING\nsession.weightsFromHost()\nstepio = popart.PyStepIO({}, anchors)\nsession.run(stepio)\nprint(\"Mean max grad of each row: \", np.mean(np.max(anchors[dlogits].reshape([-1, *blocksize]), axis = -1)))\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_231","text":"\"\"\"\nUtility functions needed to compute the HP4-EDA descriptors\n\n@author: ()\n\"\"\"\n# ------------------------------------------------------------------------------------------------- #\n#Import the needed library.\nimport numpy as np\nimport glob, re\nfrom scipy.spatial.distance import pdist, squareform \n\n# ------------------------------------------------------------------------------------------------- #\n#Generic Function that returns a SORTED list of ALL filenames in a given directory(path), in ascending order.\ndef sort_directoryfiles(path, ext):\n '''\n INPUTS:\n\t\t(i) path: (Path to the directory containing files. Example: 'c:\/ekpo\/dataset\/')\n\t\t(ii) ext: (Extension of the files we intend to sort. Example: '.txt')\n OUTPUTS:\n\t\t(i) sortedf: (List of ONLY filenames in 'path'. Example: '1.txt, 2.txt, ..., 9.txt, 10.txt, 11.txt ..., 99.txt, 100.txt,')\n \n AUTHOR: () \n '''\n filepaths = glob.glob1(path,'*'+ ext)\n sortedf = sorted(filepaths, key=lambda x:float(re.findall(\"(\\d+)\",x)[0]))\n sortedpath = []\n for i in np.arange(0, len(sortedf)):\n fullpath = path + sortedf[i]\n sortedpath.append(fullpath)\n return sortedpath\n\n\n# ------------------------------------------------------------------------------------------------- #\ndef compare_allDescriptors_EMD(allDescriptors, outputdir, save_matrix_as = True):\n\t'''\n\tPURPOSE:\n\tTo compute the EMD (Wasserstein Distance) between a set of shape-descriptors.\n\n\tINPUTS:\n\t\t(i) allDescriptors: An [M x K] matrix of all descriptors, where M is the total number of models\/objects in the database, and K is the length of each descriptor.\n\t\t(ii) outputdir: Location or Directory, where the output to this function would be saved in. E.g: outputdir = \"c:\/myPyGraphics\/2018_Research_Implementations\/5_May_2018\/ekpoMayImplementations\/\"\n\t\t(iii) save_matrix_as: Default(None): If 'save_matrix_as' is given, This MUST be 'STRING' input, and this function adds the '.matrix' extension. E.g: \"jaccardDistanceMatrix_spbLSD.txt\"\n\tOUTPUT: \n\t\t(i) Dist_matrix: An N x N matrix, where the ij entry is the wasserstein_distance between the shape-descriptor for point cloud i and point cloud j.\n\tAUTHOR: () \n\t'''\n\tpwdist = pdist(allDescriptors, wasserstein_distance) \n\tDist_matrix = squareform(pwdist)\n\tif save_matrix_as:\n\t\tdescrpath = outputdir + save_matrix_as + \".matrix\"\n\t\tnp.savetxt(descrpath, Dist_matrix, fmt='%f')\n\t\t\n\treturn Dist_matrix\n# ------------------------------------------------------------------------------------------------- #\n\n\ndef compare_allDescriptors_kld(allDescriptors, outputdir, save_matrix_as = True):\n\t'''\n\tPURPOSE:\n\tTo compute the Kullback Leibner Divergence Similarity\/Distance between a set of shape-descriptors.\n\n\tINPUTS:\n\tallDescriptors: An MxK matrix of all descriptors, where M is the total number of models(3D meshes) in the database, and K is the length of each descriptor.\n\toutputdir: Location or Directory, where the output to this function would be saved in. E.g: outputdir = \"c:\/myPyGraphics\/2018_Research_Implementations\/5_May_2018\/ekpoMayImplementations\/\"\n\tsave_matrix_as: Default(None): If 'save_matrix_as' is given, This MUST be 'STRING' input, and this function adds '.txt' extension. E.g: \"kldDistanceMatrix_spbLSD.txt\"\n\n\tOUTPUT: \n\tDist_matrix: An N x N matrix, where the ij entry is the KLD distance between the shape-descriptor for point cloud i and point cloud j.\n\t'''\n\tpw_dist = pdist(allDescriptors, kullback_divergence)\n\tDist_matrix = squareform(pw_dist)\n\tif save_matrix_as:\n\t\tdescrpath = outputdir + save_matrix_as + \".matrix\"\n\t\tnp.savetxt(descrpath, Dist_matrix, fmt='%f')\n\t\t\n\treturn Dist_matrix"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_232","text":"#IMPORTING LIBRARIES\nimport numpy as np \nimport pandas as pd\nimport os\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom statistics import mean\nfrom sklearn.metrics import accuracy_score\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy.stats import norm\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.preprocessing import LabelEncoder\nfrom scipy import stats\nimport random\nfrom matplotlib import rcParams\nimport re\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport tensorflow as tf\nimport tensorflowjs as tfjs\nfrom tensorflow.keras import models, regularizers\nimport cv2\nimport tensorflow as tf\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras.layers import Conv2D,MaxPool2D,Dropout,Flatten,Dense,BatchNormalization\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom main_module import get_acc,model,split #IMPORTING FROM main_module.py\nimport warnings\n\n#######################CONFIG_ONLY########################################\n\n#SETTING UP SOME CONFIG\nwarnings.filterwarnings(\"ignore\")\npd.pandas.set_option('display.max_columns',None)\npd.pandas.set_option('display.max_rows',None)\n\n#CHECKING TF VERSIONS\nprint(\"tf version : {}\".format(tf.__version__)) #IN MY CASE ITS 2.3+\nprint(\"tfjs version : {}\".format(tfjs.__version__)) #IN MY CASE ITS 2.7.0\n\n#SEEDING EVERYTHING\ndef seed_everything(seed):\n np.random.seed(seed)\n tf.random.set_seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n os.environ['TF_DETERMINISTIC_OPS'] = '1'\n os.environ['TF_KERAS'] = '1'\n\nSEED = 42\nseed_everything(SEED)\n\nwidth = 128\nheight = 128\n\n#######################CONFIG_ONLY########################################\n\n\n#FOR GENERATING DATA ON THE FLY\ndatagen = ImageDataGenerator(rescale=1\/255.0, validation_split=0.2)\n\n\n\ntrainDatagen = datagen.flow_from_directory(directory='..\/input\/cell-images-for-detecting-malaria\/cell_images\/cell_images\/',\n target_size=(width,height),\n class_mode = 'binary',\n batch_size = 16,\n subset='training')\n\n\n\nvalDatagen = datagen.flow_from_directory(directory='..\/input\/cell-images-for-detecting-malaria\/cell_images\/cell_images\/',\n target_size=(width,height),\n class_mode = 'binary',\n batch_size = 16,\n subset='validation')\n\n\n\n\n#MODEL ARCHITECTURE\nmodel = Sequential()\nmodel.add(Conv2D(16,(3,3),activation='relu',input_shape=(128,128,3)))\nmodel.add(MaxPool2D(2,2))\nmodel.add(Dropout(0.2))\n\nmodel.add(Conv2D(32,(3,3),activation='relu'))\nmodel.add(MaxPool2D(2,2))\nmodel.add(Dropout(0.3))\n\nmodel.add(Conv2D(64,(3,3),activation='relu'))\nmodel.add(MaxPool2D(2,2))\nmodel.add(Dropout(0.3))\n\nmodel.add(Flatten())\nmodel.add(Dense(64,activation='relu'))\nmodel.add(Dropout(0.5))\n\nmodel.add(Dense(1,activation='sigmoid'))\nmodel.summary()\n\nmodel.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])\n\n\n#ADDING EARLYSTOP\nearly_stop = EarlyStopping(monitor='val_loss',patience=2)\n\n# FITTING DATA TO THE MODEL\nhistory = model.fit_generator(generator = trainDatagen,\n steps_per_epoch = len(trainDatagen),\n epochs =20,\n validation_data = valDatagen,\n validation_steps=len(valDatagen),\n callbacks=[early_stop])\n\n\ntfjs.converters.save_keras_model(model, 'malaria_model')\n\n\n\n\n\n\n\n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_233","text":"tests\/opt_matrix_test.py\n\"\"\"\nTest of the basic optimization functionality by basic matrix problems.\n\nMatrices are the simplest tensors. Here we have simple matrix examples that are\nvery easy to think about. All the core optimization strategies should first be\ntested here.\n\n\"\"\"\n\nimport pytest\nfrom drudge import Range, Drudge\nfrom sympy import symbols, IndexedBase\n\nfrom gristmill import optimize, verify_eval_seq, get_flop_cost\n\n\n@pytest.fixture\ndef three_ranges(spark_ctx):\n \"\"\"Fixture with three ranges.\n\n This drudge has three ranges, named M, N, L with sizes m, n, and l,\n respectively. It also has a substitution dictionary setting n = 2m and l\n = 3m.\n\n \"\"\"\n\n dr = Drudge(spark_ctx)\n\n # The sizes.\n m, n, l = symbols('m n l')\n\n # The ranges.\n m_range = Range('M', 0, m)\n n_range = Range('N', 0, n)\n l_range = Range('L', 0, l)\n\n dr.set_dumms(m_range, symbols('a b c d e f g'))\n dr.set_dumms(n_range, symbols('i j k l m n'))\n dr.set_dumms(l_range, symbols('p q r'))\n dr.add_resolver_for_dumms()\n dr.set_name(m, n, l)\n\n dr.substs = {\n n: m * 2,\n l: m * 3\n }\n\n return dr\n\n\n#\n# Test of core functionality\n# --------------------------\n#\n\n\ndef test_matrix_chain(three_ranges):\n \"\"\"Test a basic matrix chain multiplication problem.\n\n Here a very simple matrix chain multiplication problem with three\n matrices are used to test the factorization facilities. In this simple\n test, we will have three matrices :math:`x`, :math:`y`, and :math:`z`,\n which are of shapes :math:`m\\\\times n`, :math:`n \\\\times l`, and :math:`l\n \\\\times m` respectively. In the factorization, we are going to set\n :math:`n = 2 m` and :math:`l = 3 m`.\n\n \"\"\"\n\n dr = three_ranges\n p = dr.names\n m, n, l = p.m, p.n, p.l\n\n # The indexed bases.\n x = IndexedBase('x', shape=(m, n))\n y = IndexedBase('y', shape=(n, l))\n z = IndexedBase('z', shape=(l, m))\n\n target_base = IndexedBase('t')\n target = dr.define_einst(\n target_base[p.a, p.b],\n x[p.a, p.i] * y[p.i, p.p] * z[p.p, p.b]\n )\n\n # Perform the factorization.\n targets = [target]\n stats = {}\n eval_seq = optimize(targets, substs=dr.substs, stats=stats)\n assert stats['Number of nodes'] < 2 ** 3\n assert len(eval_seq) == 2\n\n # Check the correctness.\n assert verify_eval_seq(eval_seq, targets)\n\n # Check the cost.\n cost = get_flop_cost(eval_seq)\n leading_cost = get_flop_cost(eval_seq, leading=True)\n expected_cost = 2 * l * m * n + 2 * m ** 2 * n\n assert cost == expected_cost\n assert leading_cost == expected_cost\n\n\n@pytest.mark.parametrize('rand_constr', [True, False])\ndef test_shallow_matrix_factorization(three_ranges, rand_constr):\n \"\"\"Test a shallow matrix multiplication factorization problem.\n\n In this test, there are four matrices involved, X, Y, U, and V. The final\n expression to optimize is mathematically\n\n .. math::\n\n (2 X - Y) * (2 U + V)\n\n Here, the expression is to be given in its expanded form originally, and\n we test if it can be factorized into something similar to what we have\n above. Here we have the signs and coefficients to have better code\n coverage for these cases. This test case more concentrates on the\n horizontal complexity in the input.\n\n \"\"\"\n\n #\n # Basic context setting-up.\n #\n\n dr = three_ranges\n p = dr.names\n\n m = p.m\n a, b, c, d = p.a, p.b, p.c, p.d\n\n # The indexed bases.\n x = IndexedBase('X')\n y = IndexedBase('Y')\n u = IndexedBase('U')\n v = IndexedBase('V')\n t = IndexedBase('T')\n\n # The target.\n target = dr.define_einst(\n t[a, b],\n 4 * x[a, c] * u[c, b] + 2 * x[a, c] * v[c, b]\n - 2 * y[a, c] * u[c, b] - y[a, c] * v[c, b]\n )\n targets = [target]\n\n # The actual optimization.\n res = optimize(targets, rand_constr=rand_constr)\n assert len(res) == 3\n\n # Test the correctness.\n assert verify_eval_seq(res, targets, simplify=False)\n\n # Test the cost.\n cost = get_flop_cost(res)\n leading_cost = get_flop_cost(res, leading=True)\n assert cost == 2 * m ** 3 + 2 * m ** 2\n assert leading_cost == 2 * m ** 3\n cost = get_flop_cost(res, ignore_consts=False)\n assert cost == 2 * m ** 3 + 4 * m ** 2\n\n\ndef test_deep_matrix_factorization(three_ranges):\n \"\"\"Test a basic matrix multiplication factorization problem.\n\n Similar to the shallow factorization test, the final expression to optimize\n is mathematically\n\n .. math::\n\n (X - 2 Y) * U * V\n\n Different from the shallow test case, here we concentrate more on the\n treatment of depth complexity in the input. The sum intermediate needs to\n be factored again.\n\n \"\"\"\n\n #\n # Basic context setting-up.\n #\n\n dr = three_ranges\n p = dr.names\n\n m = p.m\n a, b, c, d = p.a, p.b, p.c, p.d\n\n # The indexed bases.\n x = IndexedBase('X')\n y = IndexedBase('Y')\n u = IndexedBase('U')\n v = IndexedBase('V')\n t = IndexedBase('T')\n\n # The target.\n target = dr.define_einst(\n t[a, b], x[a, c] * u[c, d] * v[d, b] - 2 * y[a, c] * u[c, d] * v[d, b]\n )\n targets = [target]\n\n # The actual optimization.\n res = optimize(targets)\n assert len(res) == 3\n\n # Test the correctness.\n assert verify_eval_seq(res, targets, simplify=True)\n\n # Test the cost.\n cost = get_flop_cost(res)\n leading_cost = get_flop_cost(res, leading=True)\n assert cost == 4 * m ** 3 + m ** 2\n assert leading_cost == 4 * m ** 3\n cost = get_flop_cost(res, ignore_consts=False)\n assert cost == 4 * m ** 3 + 2 * m ** 2\n\n # Test disabling summation optimization.\n res = optimize(targets, opt_sum=False)\n assert verify_eval_seq(res, targets, simplify=True)\n new_cost = get_flop_cost(res, ignore_consts=False)\n assert new_cost - cost != 0\n\n\ndef test_factorization_of_two_products(three_ranges):\n \"\"\"Test a sum where we have two disjoint products.\n\n The final expression to optimize is\n\n .. math::\n\n 2 X (3 U + 5 V) - 7 Y (11 U + 13 V) + 17 T\n\n In this test case, we concentrate on the handling of multiple disjoint\n possible factorization inside a single sum.\n\n \"\"\"\n\n #\n # Basic context setting-up.\n #\n\n dr = three_ranges\n p = dr.names\n\n m = p.m\n a, b, c = p.a, p.b, p.c\n\n # The indexed bases.\n x = IndexedBase('X')\n y = IndexedBase('Y')\n u = IndexedBase('U')\n v = IndexedBase('V')\n t = IndexedBase('T')\n\n # The target.\n target = dr.define_einst(\n IndexedBase('r')[a, b],\n 6 * x[a, c] * u[c, b] + 10 * x[a, c] * v[c, b]\n - 77 * y[a, c] * u[c, b] - 91 * y[a, c] * v[c, b]\n + 17 * t[a, b]\n )\n targets = [target]\n\n # The actual optimization.\n res = optimize(targets)\n assert len(res) == 3\n assert res[-1].n_terms == 3\n\n # Test the correctness.\n assert verify_eval_seq(res, targets, simplify=True)\n\n # Test the cost.\n cost = get_flop_cost(res)\n assert cost == 4 * m ** 3 + 4 * m ** 2\n\n\ndef test_general_matrix_problem(three_ranges):\n \"\"\"Test optimization of a very general matrix computation.\n\n This is a very general problem trying to test and illustrate many different\n aspects of the optimization, parenthesization, recursion to newly-formed\n factors, and sum of disjoint factorizations. The target to evaluate reads\n\n .. math::\n\n (A + 2B) (3C + 5D) (7E + 13F) + (17P + 19Q) (23X + 29Y)\n\n where\n\n - A, B, P, Q is over ranges M, L\n - C, D is over M, N\n - E, F is over N, L\n - X, Y is over L, N\n\n \"\"\"\n\n dr = three_ranges\n p = dr.names\n\n m, n, l = p.m, p.n, p.l\n a, b = p.a, p.b\n i = p.i\n p = p.p\n\n f1 = IndexedBase('A')[a, i] + 2 * IndexedBase('B')[a, i]\n f2 = 3 * IndexedBase('C')[i, p] + 5 * IndexedBase('D')[i, p]\n f3 = 7 * IndexedBase('E')[p, b] + 13 * IndexedBase('F')[p, b]\n f4 = 17 * IndexedBase('P')[a, i] + 19 * IndexedBase('Q')[a, i]\n f5 = 23 * IndexedBase('X')[i, b] + 29 * IndexedBase('Y')[i, b]\n\n target = dr.define_einst(\n IndexedBase('R')[a, b],\n (f1 * f2 * f3 + f4 * f5).expand()\n )\n targets = [target]\n assert target.n_terms == 12\n assert get_flop_cost(targets).subs(dr.substs) == (\n 144 * m ** 4 + 16 * m ** 3 + 11 * m ** 2\n )\n\n eval_seq = optimize(targets, substs=dr.substs)\n\n # Check the correctness.\n assert verify_eval_seq(eval_seq, targets)\n assert len(eval_seq) == 7\n cost = get_flop_cost(eval_seq)\n assert cost.subs(dr.substs) == 20 * m ** 3 + 16 * m ** 2\n\n\n#\n# Test of special cases\n# ---------------------\n#\n\n\n@pytest.mark.xfail(reason='Flaky until the following test is fixed')\ndef test_disconnected_outer_product_factorization(three_ranges):\n \"\"\"Test optimization of expressions with disconnected outer products.\n \"\"\"\n\n dr = three_ranges\n p = dr.names\n\n m = p.m\n a, b, c, d, e = p.a, p.b, p.c, p.d, p.e\n\n # The indexed bases.\n u = IndexedBase('U')\n x = IndexedBase('X')\n y = IndexedBase('Y')\n z = IndexedBase('Z')\n t = IndexedBase('T')\n\n # The target.\n target = dr.define_einst(\n t[a, b],\n u[a, b] * z[c, e] * x[e, c] + u[a, b] * z[c, e] * y[e, c]\n )\n targets = [target]\n\n # The actual optimization.\n res = optimize(targets)\n assert len(res) == 3\n\n # Test the correctness.\n assert verify_eval_seq(res, targets, simplify=False)\n\n # Test the cost.\n cost = get_flop_cost(res)\n leading_cost = get_flop_cost(res, leading=True)\n assert cost == 4 * m ** 2\n assert leading_cost == 4 * m ** 2\n\n\n@pytest.mark.xfail(reason='TODO: Needs investigation')\ndef test_factorization_needing_canonicalization(three_ranges):\n \"\"\"Test a simple factorization needing canonicalization.\n\n The inability of gristmill to fully optimize this test is the ultimate\n reason why the above test is flaky.\n \"\"\"\n\n dr = three_ranges\n p = dr.names\n\n m = p.m\n a, b = p.a, p.b\n\n x = IndexedBase('X')\n y = IndexedBase('Y')\n z = IndexedBase('Z')\n t = Symbol('T')\n\n # The target.\n target = dr.define_einst(\n t, x[b, a] * z[a, b] + y[a, b] * z[b, a]\n )\n targets = [target]\n\n # The actual optimization.\n res = optimize(targets)\n assert len(res) == 2\n\n # Test the correctness.\n assert verify_eval_seq(res, targets, simplify=False)\n\n\ndef test_optimization_of_common_terms(three_ranges):\n \"\"\"Test optimization of common terms in summations.\n\n In this test, there are just two matrices involved, X, Y. The target reads\n\n .. math::\n\n T[a, b] = X[a, b] - X[b, a] + 2 Y[a, b] - 2 Y[b, a]\n\n Ideally, it should be evaluated as,\n\n .. math::\n\n I[a, b] = X[a, b] + 2 Y[a, b]\n T[a, b] = I[a, b] - I[b, a]\n\n or,\n\n .. math::\n\n I[a, b] = X[a, b] - 2 Y[b, a]\n T[a, b] = I[a, b] - I[b, a]\n\n \"\"\"\n\n #\n # Basic context setting-up.\n #\n dr = three_ranges\n p = dr.names\n\n a, b, c, d = p.a, p.b, p.c, p.d\n\n # The indexed bases.\n x = IndexedBase('x')\n y = IndexedBase('y')\n t = dr.define_einst(\n IndexedBase('t')[a, b],\n x[a, b] - x[b, a] + 2 * y[a, b] - 2 * y[b, a]\n )\n\n targets = [t]\n eval_seq = optimize(targets)\n assert len(eval_seq) == 2\n verify_eval_seq(eval_seq, targets)\n\n # Check the result when the common symmetrization optimization is disabled.\n eval_seq = optimize(targets, opt_symm=False)\n assert len(eval_seq) == 1\n verify_eval_seq(eval_seq, targets)\n\n\ndef test_eval_compression(three_ranges):\n \"\"\"Test compression of optimized evaluations.\n\n Here we have two targets,\n\n .. math::\n\n U X V + U Y V\n\n and\n\n .. math::\n\n U X W + U Y W\n\n and it has been deliberately made such that the multiplication with U\n should be carried out first. Then after the factorization of U, we have\n an intermediate U (X + Y), which is a sum of a single product\n intermediate. This test succeeds when we have two intermediates only,\n without the unnecessary addition of a single product.\n\n \"\"\"\n\n # Basic context setting-up.\n dr = three_ranges\n p = dr.names\n\n a = p.a # Small range\n i, j, k = p.i, p.j, p.k # Big range\n\n # The indexed bases.\n u = IndexedBase('U')\n v = IndexedBase('V')\n w = IndexedBase('W')\n x = IndexedBase('X')\n y = IndexedBase('Y')\n\n s = IndexedBase('S')\n t1 = IndexedBase('T1')\n t2 = IndexedBase('T2')\n\n # The target.\n s_def = dr.define_einst(\n s[i, j],\n u[i, k] * x[k, j] + u[i, k] * y[k, j]\n )\n targets = [dr.define_einst(\n t1[i, j],\n s_def[i, a] * v[a, j]\n ), dr.define_einst(\n t2[i, j],\n s_def[i, a] * w[a, j]\n )]\n\n # The actual optimization.\n res = optimize(targets, substs=dr.substs)\n assert len(res) == 4\n\n # Test the correctness.\n assert verify_eval_seq(res, targets, simplify=False)\n\n\n@pytest.mark.parametrize('res_at_end', [True, False])\ndef test_interleaving_res_interm(three_ranges, res_at_end):\n r\"\"\"Test the interleaving of results and intermediates.\n\n Here we have intermediate,\n\n .. math::\n\n I = X Y\n\n and result\n\n .. math::\n R1 = I * 2\n\n and result\n\n .. math::\n\n R2 = I * tr(R1)\n\n \"\"\"\n\n dr = three_ranges\n p = dr.names\n a, b, c, d, e = p.a, p.b, p.c, p.d, p.e\n\n x = IndexedBase('X')\n y = IndexedBase('Y')\n r1 = IndexedBase('R1')\n r2 = IndexedBase('R2')\n\n r1_def = dr.define_einst(r1[a, b], x[a, c] * y[c, b] * 2)\n r2_def = dr.define_einst(r2[a, b], x[a, c] * y[c, b] * x[d, e] * y[e, d])\n\n origs = [r1_def, r2_def]\n eval_seq = optimize(origs, res_at_end=res_at_end)\n\n assert verify_eval_seq(eval_seq, origs)\n\n assert len(eval_seq) == 4\n if res_at_end:\n assert eval_seq[2].base == r1\n else:\n assert eval_seq[1].base == r1\n assert eval_seq[3].base == r2\n\n for i in eval_seq:\n assert i.if_interm == (not (str(i.base)[0] == 'R'))\n continue\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_234","text":"import math\nfrom fractions import Fraction\nfrom django.contrib import admin\nfrom products.models import Product, Image\n\n\nclass ProductLabelInlineAdmin(admin.TabularInline):\n \"\"\"Selection to select labels for a product.\"\"\"\n model = Product.labels.through\n\nclass ImageInlineAdmin(admin.TabularInline):\n \"\"\"Selection to select images for a product.\"\"\"\n model = Image\n\n\nclass ProductAdmin(admin.ModelAdmin):\n list_display = ('id','name','brand','active','stock')\n fields = ('id','name','review_calculated','stock','price','active', 'category','brand','description','info','weight','contents', 'label_info')\n readonly_fields = ('id','info','review_calculated','label_info')\n list_filter = ('brand',)\n list_editable = ('active','stock')\n search_fields = ('name','id','brand__name')\n inlines = (ProductLabelInlineAdmin, ImageInlineAdmin)\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n def info(self, obj):\n return \"If the product is not cereal please leave weight & contents fields empty.\"\n\n def label_info(self, obj):\n return \"Each label can only be selected once.\"\n\n\nclass ProductDiscountAdmin(admin.ModelAdmin):\n list_display = ('id', 'name', 'percentage_off', 'price', 'new_price')\n list_display_links = None\n fields = ['name', 'brand', 'description', 'contents', 'weight', 'price', 'category', 'active', 'stock',\n 'percentage_off', 'discounted_price']\n readonly_fields = ('id',)\n list_filter = ('brand',)\n list_editable = ('percentage_off',)\n search_fields = ('name', 'id', 'brand__name')\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n def new_price(self, obj):\n \"\"\"Returns and sets the discounted price for a given product object.\"\"\"\n\n # If percentage_off exceeds 100, reset the price\n if obj.percentage_off > 100 or obj.percentage_off <= 0:\n obj.percentage_off = 0\n obj.discounted_price = None\n obj.save()\n\n return obj.price\n\n # Calculate the discounted price\n price = Fraction(obj.price)\n percentage_off = Fraction(obj.percentage_off)\n discount = (percentage_off \/ 100) * price\n final_price = Fraction(price - discount)\n new = float(Fraction(math.floor(final_price*100), 100))\n\n # Save and set the discounted price\n obj.discounted_price = new\n obj.save()\n\n return new\n\n def has_add_permission(self, request):\n return False\n\n\nclass ProductDiscount(Product):\n \"\"\"Proxy model so we can use the Product model\n on two different admin pages.\"\"\"\n class Meta:\n proxy = True\n\n\n# admin.site.register(Label, LabelAdmin)\nadmin.site.register(Product, ProductAdmin)\nadmin.site.register(ProductDiscount, ProductDiscountAdmin)\n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_235","text":"#! usr\/bin\/env python\nimport numpy as np\nimport seaborn as sns\nimport scipy as sp\nimport functools\nimport numpy as np\nfrom scipy.stats import multivariate_normal\nimport scipy.stats as stats\nimport time\nimport scipy as scipy\nimport sys\nimport pandas as pd\nfrom scipy.stats import norm\nfrom numpy import linalg as la\nimport pandas as pd\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.model_selection import train_test_split\nimport itertools\n__author__ = ''\n\n\nclass IBO(object):\n \"\"\"\n IBO: Intelligent Bayesian OPtimization\n A class to perform Bayesian Optimization on a 1D or 2D domain.\n Can either have an objective function to maximize or a true function\n to maximize\"\"\"\n\n def __init__(self, kernel = 'squared_kernel'):\n \"\"\"Define the parameters for the bayesian optimization.\n\n The train points should be x,y coordinate that you already know about your\n function\"\"\"\n if kernel == 'squared_kernel':\n self.kernel = self.__squared_kernel__\n elif kernel == 'matern':\n self.kernel = self.__matern_kernel__\n\n def fit(self, train_points_x, train_points_y,\n test_domain, train_y_func, y_func_type = 'real',\n samples = 10 , test_points_x = None, test_points_y = None,\n model_train_points_x = None, model_train_points_y = None,\n covariance_noise = 5e-5, n_posteriors = 30, kernel_params = None,\n model_obj = GradientBoostingRegressor,\n verbose = True):\n \"\"\"Define the parameters for the GP.\n PARAMS:\n train_points_x, - x coordinates to train on\n train_points_y, - resulting output from the function, either objective or\n true function\n test_domain - the domain to test\n test_points_y - If using ab objective function, this is from the\n train test split data\n test_points_x = if using an objective function, this is from the\n train test split\n model - the model to fit for use with the objective function. Currently\n works with Gradient Boosting\n y_func_type - either the real function or the objective function.\n The objective function implemented in negative MSE (since BO is\n a maximization procedure)\n verbose = Whether to print out the points Bayesian OPtimization is\n picking\n train_y_func - This can either be an objective function or a true function\n kernel_params: dictionary of {'length':value} for squaredkernel\n model_train_points: the training points for the objective function\n \"\"\"\n\n try:\n type(train_points_x).__module__ == np.__name__\n type(train_points_y).__module__ == np.__name__\n except Exception as e:\n print(e)\n return ' You need to input numpy types'\n # Store the training points\n self.train_points_x = train_points_x\n self.train_points_y = train_points_y\n self.test_domain = test_domain\n\n # setup the kernel parameters\n if kernel_params != None:\n self.squared_length = kernel_params['rbf_length']\n else:\n self.squared_length = None\n\n\n # Y func can either be an objective function, or the true underlying func.\n if y_func_type == 'real':\n self.train_y_func = train_y_func\n elif y_func_type == 'objective':\n if model_obj == None:\n return ' you need to pass in a model (GradientBoostingRegressor)'\n\n # Only if using an objective function, from the 'test' split\n self.test_points_x = test_points_x\n self.test_points_y = test_points_y\n self.model_train_points_x = model_train_points_x\n self.model_train_points_y = model_train_points_y\n # model to train and fit\n self.model = model_obj\n self.train_y_func = self.hyperparam_choice_function\n\n\n\n # store the testing parameters\n self.covariance_noise = covariance_noise\n self.n_posteriors = n_posteriors\n self.samples = samples\n self.verbose = verbose\n\n\n if self.train_points_x.shape[1] ==1: # one dimension\n self.dimensions ='one'\n elif self.train_points_x.shape[1] ==2:\n self.dimensions = 'two'\n else:\n print('Either you entered more than two dimensions, \\\n or not a numpy array.')\n print(type(self.train_points_x))\n # create the generator\n self.bo_gen = self.__sample_from_function__(verbose=self.verbose)\n\n\n\n\n def predict(self):\n \"\"\"returns x_sampled_points, y_sampled_points, best_x, best_y\"\"\"\n\n x_sampled_points, y_sampled_points, sampled_var, \\\n best_x, best_y, improvements, domain, mus = next(self.bo_gen)\n\n return x_sampled_points, y_sampled_points, best_x, best_y\n\n def maximize(self, n_steps=10, verbose = None):\n \"\"\"For the n_steps defined, find the best x and y coordinate\n and return them.\n Verbose controls whether to print out the points being sampled\"\"\"\n verbose_ = self.verbose\n self.samples = n_steps\n bo_gen = self.__sample_from_function__(verbose = verbose_)\n for _ in range(self.samples):\n x_sampled_points, y_sampled_points, sampled_var, \\\n best_x, best_y, improvements, domain, mus = next(self.bo_gen)\n\n self.best_x = best_x\n self.best_y = best_y\n # return the best PARAMS\n return best_x, best_y\n\n\n\n def __test_gaussian_process__(self, return_cov = False,\n return_sample = False):\n \"\"\"Test one new point in the Gaussian process or an array of points\n Returns the mu, variance, as well as the posterior vector.\n Improvements is the expected improvement for each potential test point.\n Domain, is the domain over which you are searching.\n\n Return cov = True will return the full covariance matrix.\n\n If return_sample= True\n returns samples ( a vector) from the\n informed posterior and the uninformed prior distribution\n\n Covariance diagonal noise is used to help enforce positive definite matrices\n\n \"\"\"\n\n # Update the covaraince matrices\n self.covariance_train_train = self.kernel(self.train_points_x,\n self.train_points_x, train=True)\n self.covariance_test_train = self.kernel(self.test_domain,\n self.train_points_x)\n self.covariance_test_test = self.kernel(self.test_domain,\n self.test_domain)\n\n\n # Use cholskey decomposition to increase speed for calculating mean\n try :# First try,\n L_test_test = np.linalg.cholesky(self.covariance_test_test + \\\n self.covariance_noise * np.eye(len(self.covariance_test_test)))\n L_train_train = np.linalg.cholesky(self.covariance_train_train + \\\n self.covariance_noise * np.eye(len(self.covariance_train_train)))\n Lk = np.linalg.solve(L_train_train, self.covariance_test_train.T)\n mus = np.dot(Lk.T, np.linalg.solve(L_train_train,\n self.train_points_y)).reshape(\n (len(self.test_domain),))\n # Compute the standard deviation so we can plot it\n s2 = np.diag(self.covariance_test_test) - np.sum(Lk**2, axis=0)\n stdv = np.sqrt(abs(s2))\n\n except Exception as e:\n print(e)#LinAlgError: # In case the covariance matrix is not positive definite\n # Find the near positive definite matrix to decompose\n decompose_train_train = self.nearestPD(\n self.covariance_train_train + self.covariance_noise * np.eye(\n len(self.train_points_x)))\n decompose_test_test = self.nearestPD(\n self.covariance_test_test + self.covariance_noise * np.eye(\n len(self.test_domain)))\n\n # cholskey decomposition on the nearest PD matrix\n L_train_train = np.linalg.cholesky(decompose_train_train)\n L_test_test = np.linalg.cholesky(decompose_test_test)\n Lk = np.linalg.solve(L_train_train, self.covariance_test_train.T)\n mus = np.dot(Lk.T, np.linalg.solve(L_train_train,\n self.train_points_y)).reshape((len(self.test_domain)),)\n # Compute the standard deviation so we can plot it\n s2 = np.diag(self.covariance_test_test) - np.sum(Lk**2, axis=0)\n stdv = np.sqrt(abs(s2))\n\n # ##### FULL INVERSION ####\n # mus = covariance_test_train @ np.linalg.pinv(covariance_train_train) @ train_y_numbers\n # s2 = covariance_test_test - covariance_test_train @ np.linalg.pinv(covariance_train_train ) \\\n # @ covariance_test_train.T\n\n def sample_from_posterior(n_priors=3):\n \"\"\"Draw samples from the prior distribution of the GP.\n len(test_x) is the number of samplese to draw.\n Resource: http:\/\/katbailey.github.io\/post\/gaussian-processes-for-dummies\/.\n\n N-Posteriors \/ N-Priors tells the number of functions to samples from the dsitribution\"\"\"\n\n\n try: # try inside sample from posterior function\n L = np.linalg.cholesky(self.covariance_test_test +\n self.covariance_noise * np.eye(\n len(self.test_domain))- np.dot(Lk.T, Lk))\n except Exception as e:\n print(e)\n # Find the neareset Positive Definite Matrix\n near_decompose = self.nearestPD(self.covariance_test_test +\n self.covariance_noise * np.eye(\n len(self.test_domain)) - np.dot(Lk.T, Lk))\n L = np.linalg.cholesky(near_decompose.astype(float) )\n # within posterior\n # sample from the posterior\n f_post = mus.reshape(-1,1) + np.dot(L, np.random.normal(\n size=(len(self.test_domain), self.n_posteriors)))\n\n # Sample X sets of standard normals for our test points,\n # multiply them by the square root of the covariance matrix\n f_prior_uninformed = np.dot(L_test_test,\n np.random.normal(size=(len(self.test_domain), n_priors)))\n # For the posterior, the columns are the vector for that function\n return (f_prior_uninformed, f_post)\n\n if return_cov == True:\n return y_pred_mean.ravel(), var_y_pred_diag.ravel(), var_y_pred\n\n if return_sample == True:\n f_prior, f_post = sample_from_posterior()\n return mus.ravel(), s2.ravel(), f_prior, f_post\n else:\n return mus.ravel(), s2.ravel()\n\n\n def __sample_from_function__(self, verbose=None):\n \"\"\"Sample N times from the unknown function and for each time find the\n point that will have the highest expected improvement (find the maxima of the function).\n Verbose signifies if the function should print out the points where it is sampling\n\n Returns a generator of x_sampled_points, y_sampled_points, vars_, best_x, best_y, \\\n list_of_expected_improvements, testing_domain, mus\n for improvements. Mus and Vars are the mean and var for each sampled point\n in the gaussian process.\n\n Starts off the search for expected improvement with a coarse search and then hones in on\n the domain the the highest expected improvement.\n\n Note - the y-function can EITHER by the actual y-function (for evaluation\n purposes, or an objective function\n (i.e. - RMSE))\"\"\"\n verbose = self.verbose\n\n\n # for plotting the points sampled\n x_sampled_points = []\n y_sampled_points = []\n best_x = self.train_points_x[np.argmax(self.train_points_y ),:]\n best_y =self.train_points_y [np.argmax(self.train_points_y ),:]\n\n\n\n for i in range(self.samples):\n if i == 0:\n if self.train_points_x .shape[1]==1: ## one dimensional case\n testing_domain = np.array([self.test_domain]).reshape(-1,1)\n else:\n testing_domain = self.test_domain\n\n # find the next x-point to sample\n mus, vars_, prior, post = self.__test_gaussian_process__(\n return_sample = True)\n\n\n sigmas_post = np.var(post,axis=1)\n mus_post = np.mean(post,axis=1)\n # get the expected values from the posterior distribution\n list_of_expected_improvements = self.expected_improvement(\n mus_post, sigmas_post ,best_y)\n\n max_improv_x_idx = np.argmax(np.array(\n list_of_expected_improvements))\n #print(max_improv_x_idx,'max_improv_x_idx')\n max_improv_x = testing_domain[max_improv_x_idx]\n # don't resample the same point\n c = 1\n while max_improv_x in x_sampled_points:\n if c == 1:\n if self.train_points_x .shape[1]==1:\n sorted_points_idx = np.argsort(list(np.array(\n list_of_expected_improvements)))\n else:\n sorted_points_idx = np.argsort(list(np.array(\n list_of_expected_improvements)),axis=0)\n c+=1\n max_improv_x_idx = int(sorted_points_idx[c])\n max_improv_x = testing_domain[max_improv_x_idx]\n # only wait until we've gon through half of the list\n if c > round(len(list_of_expected_improvements)\/2):\n max_improv_x_idx = int(\n np.argmax(list_of_expected_improvements))\n max_improv_x = testing_domain[max_improv_x_idx]\n break\n if self.train_points_x.shape[1]==1:\n max_improv_y = self.train_y_func(max_improv_x)\n else: # Two D\n try: # see if we are passing in the actual function\n max_improv_y = self.train_y_func(\n max_improv_x[0], max_improv_x[1])\n except: # we are passing the objective function in\n max_improv_y = self.train_y_func(\n max_improv_x[0], dimensions = 'two',\n hyperparameter_value_two = max_improv_x[1])\n if max_improv_y > best_y: ## use to find out where to search next\n best_y = max_improv_y\n best_x = max_improv_x\n if verbose:\n print(f\"Bayesian Optimization just sampled point = {best_x}\")\n print(f\"Best x (Bayesian Optimization) = {best_x},\\\n Best y = {best_y}\")\n # append the point to sample\n x_sampled_points.append(max_improv_x)\n y_sampled_points.append(max_improv_y)\n # append our new the newly sampled point to the training data\n self.train_points_x = np.vstack((self.train_points_x,\n max_improv_x))\n self.train_points_y = np.vstack((self.train_points_y,\n max_improv_y))\n\n yield x_sampled_points, y_sampled_points, vars_, best_x, best_y, \\\n list_of_expected_improvements, testing_domain, mus\n\n else:\n # append the point to sample\n x_sampled_points.append(max_improv_x)\n y_sampled_points.append(max_improv_y)\n\n # append our new the newly sampled point to the training data\n self.train_points_x = np.vstack((self.train_points_x, max_improv_x))\n self.train_points_y = np.vstack((self.train_points_y, max_improv_y))\n\n yield x_sampled_points, y_sampled_points, vars_, best_x, best_y, \\\n list_of_expected_improvements, testing_domain, mus\n\n\n else:\n\n if self.train_points_x.shape[1]==1:\n testing_domain = np.array([testing_domain]).reshape(-1,1)\n else:\n testing_domain = self.test_domain\n\n mus, vars_, prior, post = self.__test_gaussian_process__(\n return_sample = True)\n\n igmas_post = np.var(post,axis=1)\n mus_post = np.mean(post,axis=1)\n # get the expected values from the posterior distribution\n list_of_expected_improvements = self.expected_improvement(\n mus_post, sigmas_post ,best_y)\n max_improv_x_idx = np.argmax(list_of_expected_improvements)\n max_improv_x = testing_domain[max_improv_x_idx]\n # don't resample the same point\n c = 1\n while max_improv_x in x_sampled_points:\n if c == 1:\n if self.train_points_x .shape[1]==1:\n sorted_points_idx = np.argsort(list(np.array(\n list_of_expected_improvements)))\n else:\n sorted_points_idx = np.argsort(list(np.array(\n list_of_expected_improvements)),axis=0)\n c+=1\n max_improv_x_idx = int(sorted_points_idx[c])\n max_improv_x = testing_domain[max_improv_x_idx]\n # only wait until we've gon through half of the list\n if c > round(len(list_of_expected_improvements)\/2):\n max_improv_x_idx = int(\n np.argmax(list_of_expected_improvements))\n max_improv_x = testing_domain[max_improv_x_idx]\n break\n if self.train_points_x .shape[1]==1:\n max_improv_y = self.train_y_func(max_improv_x)\n else: # Two D\n try: # see if we are passing in the actual function\n max_improv_y = self.train_y_func(\n max_improv_x[0], max_improv_x[1])\n\n except: # we are passing the objective function in\n max_improv_y = self.train_y_func(\n max_improv_x[0], dimensions = 'two',\n hyperparameter_value_two = max_improv_x[1])\n\n if max_improv_y > best_y: ## use to find out where to search next\n best_y = max_improv_y\n best_x = max_improv_x\n if verbose:\n print(f\"Bayesian Optimization just sampled point = {max_improv_x}\")\n print(f\"Best x (Bayesian Optimization) = {best_x}, Best y = {best_y}\")\n # append the point to sample\n x_sampled_points.append(max_improv_x)\n y_sampled_points.append(max_improv_y)\n\n\n # append our new the newly sampled point to the training data\n self.train_points_x = np.vstack((self.train_points_x, max_improv_x))\n self.train_points_y = np.vstack((self.train_points_y, max_improv_y))\n\n yield x_sampled_points, y_sampled_points, vars_, best_x, best_y, \\\n list_of_expected_improvements, testing_domain, mus\n\n else:\n # append the point to sample\n x_sampled_points.append(max_improv_x)\n y_sampled_points.append(max_improv_y)\n\n # append our new the newly sampled point to the training data\n self.train_points_x = np.vstack((self.train_points_x, max_improv_x))\n self.train_points_y = np.vstack((self.train_points_y, max_improv_y))\n\n yield x_sampled_points, y_sampled_points, vars_, best_x, best_y, \\\n list_of_expected_improvements, testing_domain, mus\n\n\n\n\n def hyperparam_choice_function(self, hyperparameter_value,\n dimensions = 'one', hyperparameter_value_two = None):\n \"\"\"Returns the negative MSE of the input hyperparameter for the given\n hyperparameter.\n Used with GradientBoostingRegressor estimator currently\n If dimensions = one, then search n_estimators. if dimension equal\n two then search over n_estimators and max_depth\"\"\"\n #definethe model\n model = self.model\n # define the training points\n train_points_x = self.model_train_points_x\n train_points_y = self.model_train_points_y\n\n if self.dimensions == 'one':\n try:\n m = model(n_estimators= int(hyperparameter_value))\n except:\n m = model(n_estimators= hyperparameter_value)\n m.fit(train_points_x, train_points_y)\n pred = m.predict(self.test_points_x )\n n_mse = self.root_mean_squared_error(self.test_points_y , pred)\n return n_mse\n elif self.dimensions =='two':\n try:\n m = model(n_estimators = int(hyperparameter_value),\n max_depth = int(hyperparameter_value_two))\n except:\n m = model(n_estimators = hyperparameter_value,\n max_depth = hyperparameter_value_two)\n m.fit(train_points_x, train_points_y)\n pred = m.predict(self.test_points_x)\n n_mse = self.root_mean_squared_error(self.test_points_y , pred)\n return n_mse\n else:\n return ' We do not support this number of dimensions yet'\n\n\n\n def root_mean_squared_error(self, actual, predicted, negative = True):\n \"\"\"MSE of actual and predicted value.\n Negative turn the MSE negative to allow for\n maximization instead of minimization\"\"\"\n if negative == True:\n return - np.sqrt(sum((actual.reshape(-1,1) - predicted.reshape(-1,1)**2))\n \/len(actual))\n else:\n return np.sqrt(sum((actual.reshape(-1,1) - predicted.reshape(-1,1)**2))\n \/len(actual))\n\n def expected_improvement(self, mean_x, sigma_squared_x,\n y_val_for_best_hyperparameters, normal_dist=None,\n point_est = False):\n \"\"\"Finds the expected improvement of a point give the current best point.\n If point_est = False, then computes the expected value on a vector\n from the posterior distribution.\n \"\"\"\n\n with np.errstate(divide='ignore'): # in case sigma equals zero\n # Expected val for one point\n if point_est ==True:\n sigma_x = np.sqrt(sigma_squared_x) # get the standard deviation from the variance\n\n Z = (mean_x - y_val_for_best_hyperparameters) \/ sigma_x\n\n if round(sigma_x,8) == 0:\n return 0\n else:\n return (mean_x -\n y_val_for_best_hyperparameters)*normal_dist.cdf(Z)+\\\n sigma_x*normal_dist.pdf(Z)\n\n else:\n # Sample from the posterior functions\n for _ in range(len(mean_x)):\n list_of_improvements = []\n m_s = []\n for m, z, s in zip(mean_x, ((mean_x -y_val_for_best_hyperparameters)\\\n \/ np.std(sigma_squared_x)),np.sqrt(sigma_squared_x) ):\n\n list_of_improvements.append(((m-y_val_for_best_hyperparameters)*\\\n norm().cdf(z)\\\n +s * norm().pdf(z)))\n m_s.append(m)\n\n return list_of_improvements\n\n\n\n\n\n def nearestPD(self, A):\n \"\"\"\n #https:\/\/stackoverflow.com\/questions\/43238173\/python-convert-matrix-to-positive-semi-definite\/43244194#43244194\n\n Find the nearest positive-definite matrix to input\n\n A Python\/Numpy port of 's `nearestSPD` MATLAB code [1], which\n credits [2].\n\n [1] https:\/\/www.mathworks.com\/matlabcentral\/fileexchange\/42885-nearestspd\n\n [2] , \"Computing a nearest symmetric positive semidefinite\n matrix\" (1988): https:\/\/doi.org\/10.1016\/0024-3795(88)90223-6\n \"\"\"\n def isPD(B):\n \"\"\"Returns true when input is positive-definite, via Cholesky\"\"\"\n try:\n _ = la.cholesky(B)\n return True\n except la.LinAlgError:\n return False\n\n\n\n B = (A + A.T) \/ 2\n _, s, V = la.svd(B)\n\n H = np.dot(V.T, np.dot(np.diag(s), V))\n\n A2 = (B + H) \/ 2\n\n A3 = (A2 + A2.T) \/ 2\n\n if isPD(A3):\n return A3\n\n spacing = np.spacing(la.norm(A))\n # The above is different from [1]. It appears that MATLAB's `chol` Cholesky\n # decomposition will accept matrixes with exactly 0-eigenvalue, whereas\n # Numpy's will not. So where [1] uses `eps(mineig)` (where `eps` is Matlab\n # for `np.spacing`), we use the above definition. CAVEAT: our `spacing`\n # will be much larger than [1]'s `eps(mineig)`, since `mineig` is usually on\n # the order of 1e-16, and `eps(1e-16)` is on the order of 1e-34, whereas\n # `spacing` will, for Gaussian random matrixes of small dimension, be on\n # othe order of 1e-16. In practice, both ways converge, as the unit test\n # below suggests.\n I = np.eye(A.shape[0])\n k = 1\n while not self.isPD(A3):\n mineig = np.min(np.real(la.eigvals(A3)))\n A3 += I * (-mineig * k**2 + spacing)\n k += 1\n\n return A3\n\n\n\n def __squared_kernel__(self, a, b, param=2.0, train=False,\n train_noise = 5e-3, vertical_scale=1.5):\n \"\"\"Calculated the squared exponential kernel.\n Adds a noise term for the covariance of the training data\n Adjusting the param changes the difference where points will have a positive covariance\n Returns a covaraince Matrix.\n Vertical scale controls the vertical scale of the function\"\"\"\n if self.squared_length != None:\n vertical_scale = self.squared_length\n\n if train == False:\n # ensure a and b are numpy arrays\n a = np.array(a)\n b = np.array(b)\n sqdist = np.sum(a**2,1).reshape(-1,1) + np.sum(b**2,1) - 2*np.dot(a, b.T)\n return vertical_scale*np.exp(-.5 * (1\/param) * sqdist)\n\n else:\n # ensure a and b are numpy arrays\n a = np.array(a)\n b = np.array(b)\n noisy_observations = train_noise*np.eye(len(a))\n sqdist = np.sum(a**2,1).reshape(-1,1) + np.sum(b**2,1) - 2*np.dot(a, b.T)\n return vertical_scale*np.exp(-.5 * (1\/param) * sqdist) + noisy_observations\n\n def __matern_kernel__(self, a,b,C_smoothness=3\/2,train=False, train_noise = 5e-2):\n \"\"\"The class of Matern kernels is a generalization of the RBF and the\n absolute exponential kernel parameterized by an additional parameter\n nu. The smaller nu, the less smooth the approximated function is.\n For nu=inf, the kernel becomes equivalent to the RBF kernel and for nu=0.5\n to the absolute exponential kernel. Important intermediate values are\n nu=1.5 (once differentiable functions) and nu=2.5 (twice differentiable\n functions).\n\n c_smoother = inf = RBF\n\n The train keyword is used to add noisy observations to the matrix\"\"\"\n if C_smoothness not in [1\/2,3\/2]:\n return \"You choose an incorrect hyparameter, please choose either 1\/2 or 3\/2\"\n matrix_norm = np.array([np.linalg.norm(a[i] - b,axis=(1)) for i in range(len(a))])\n if C_smoothness == 1\/2:\n if train == True:\n max(np.var(a),np.var(b)) * np.exp(-matrix_norm) + np.eye(len(matrix_norm))*train_noise\n else:\n return max(np.var(a),np.var(b)) * np.exp(-matrix_norm)\n elif C_smoothness == 3\/2:\n if train == True:\n return max(np.var(a),np.var(b))* (1\n + np.sqrt(3)*matrix_norm)*np.exp(-np.sqrt(3)*matrix_norm) \\\n + np.eye(len(matrix_norm))*train_noise\n else:\n return max(np.var(a),np.var(b))* (1 +np.sqrt(3) *\n matrix_norm) * np.exp(-np.sqrt(3)*matrix_norm)\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_236","text":"1-10\n\"\"\"Copyright (c) 2021 \n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\"\"\"\n\n\nfrom scipy import integrate\nimport numpy as np\n\n\nfrom percolate.framework import Port\nfrom percolate.framework import InPort\nfrom percolate.framework import OutPort\nfrom percolate.framework import StreamOutput\nfrom percolate.framework import TextOutput\nfrom percolate.framework import StreamInput\nfrom percolate.framework import ArrayOutput\nfrom percolate.framework import FilePathInput\nfrom percolate.framework import DirPathInput\nfrom percolate.framework import MuxInput\nfrom percolate.framework import MuxOutput\nfrom percolate.framework import Param_input\nfrom percolate.framework import func_Output\nfrom percolate.framework import int_input\nfrom percolate.framework import bool_input\nfrom percolate.framework import choice_input\nfrom percolate.framework import Function\n\n\n# toolKit\nfrom percolate.toolkit.step2 import step2\nfrom percolate.toolkit.single_step import single_step\nfrom percolate.toolkit.make_zero_array import make_zero_array\nfrom percolate.toolkit.single_step_xanes import single_step_xanes\nfrom percolate.toolkit.single_step_xanes import pre_edge_fit\n\n\nclass args_step:\n def __init__(self, parent):\n\n self.apply_step = parent.apply_step.default\n self.fit_function = parent.fit_function.default\n\n self.pre_feature_min = parent.pre_feature_min.default\n self.pre_feature_max = parent.pre_feature_max.default\n self.post_feature_min = parent.post_feature_min.default\n self.post_feature_max = parent.post_feature_max.default\n\n\nclass single_step_subtraction_xanes(Function):\n \"\"\"TODO: Centre the step function on the peaks energy!\"\"\"\n\n def __init__(self):\n\n super().__init__(\"step_subtraction\")\n\n # Input Ports\n self.input_array = StreamInput(self, \"input_array\")\n\n self.apply_step = choice_input(self, \"Apply\", \"off\", [\"off\", \"on\"])\n self.fit_function = choice_input(\n self, \"fit_function\", \"Voight\", [\"Voight\", \"Arctan\"]\n )\n self.pre_feature_min = int_input(\n self, \"pre_feature_min\", self.input_array, None\n )\n self.pre_feature_max = int_input(\n self, \"pre_feature_max\", self.input_array, None\n )\n self.post_feature_min = int_input(\n self, \"post_feature_min\", self.input_array, None\n )\n self.post_feature_max = int_input(\n self, \"post_feature_max\", self.input_array, None\n )\n\n # output ports\n self.stepfunction = ArrayOutput(self, \"stepfunction\", self.read_stepfunction)\n self.subtracted_step = ArrayOutput(\n self, \"subtracted_step\", self.read_subtracted_step\n )\n\n\n\n\n # evaluate method\n def evaluate(self):\n\n local_arguments = args_step(self)\n\n x = self.input_array.read()[\"data\"][0]\n y = self.input_array.read()[\"data\"][1]\n pre_feature_min = local_arguments.pre_feature_min\n pre_feature_max = local_arguments.pre_feature_max\n post_feature_min = local_arguments.post_feature_min\n post_feature_max = local_arguments.post_feature_max\n\n if local_arguments.apply_step == \"off\":\n\n x = x\n background = make_zero_array(x)\n y = y - background\n\n else:\n x, y, background = pre_edge_fit(\n x,\n y,\n pre_feature_min,\n pre_feature_max,\n post_feature_min,\n post_feature_max,\n )\n\n self.x = x\n self.y = y\n self.background = background\n\n self.lines = [\n pre_feature_min,\n pre_feature_max,\n post_feature_min,\n post_feature_max,\n ]\n\n def read_stepfunction(self):\n return {\n \"data\": [self.x, self.background, self.lines],\n \"label\": self.input_array.read()[\"label\"],\n }\n # return self.stepfunction_a\n\n def read_subtracted_step(self):\n return {\n \"data\": [self.x, self.y, self.lines],\n \"label\": self.input_array.read()[\"label\"],\n }\n # return self.post_step_p\n\n def calculate_fit(self, x, y, argument):\n pass\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_237","text":"Aaron5210\/yolo_seq_nmsyolo_seqnms.py\n# -*- coding: utf-8 -*-\nimport numpy as np\nimport cv2\nimport time\nimport copy\n#import cPickle as pickle\nimport os, sys\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\nimport scipy.misc\nimport yolo_detection\nimport visualization_utils as vis_util\nimport label_map_util\nfrom seq_nms import *\n\ndef load_image_into_numpy_array(image):\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)\n\ndef get_labeled_image(image_path, path_to_labels, num_classes, boxes, classes, scores):\n label_map = label_map_util.load_labelmap(path_to_labels)\n categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=num_classes,\n use_display_name=True)\n category_index = label_map_util.create_category_index(categories)\n image = Image.open(image_path)\n image_np = load_image_into_numpy_array(image)\n image_process = vis_util.visualize_boxes_and_labels_on_image_array(\n image_np,\n boxes,\n classes,\n scores,\n category_index)\n return image_process\n\nif __name__ == \"__main__\":\n # load image\n load_begin=time.time()\n pkllistfile=open(os.path.join('video', 'pkllist.txt'))\n pkllist=pkllistfile.readlines()\n pkllistfile.close()\n pkllist=[pkl.strip() for pkl in pkllist]\n load_end=time.time()\n print('load: {:.4f}s'.format(load_end - load_begin))\n\n # detection\n detect_begin=time.time()\n if len(sys.argv) > 1 and sys.argv[1]=='tiny':\n res = yolo_detection.detect_imgs(pkllist, cfg=\"cfg\/tiny-yolo.cfg\", weights=\"tiny-yolo.weights\", nms=0, thresh=0.25)\n else:\n res = yolo_detection.detect_imgs(pkllist, nms=0, thresh=0.25)\n detect_end=time.time()\n print('total detect: {:.4f}s'.format(detect_end - detect_begin))\n print('average detect: {:.4f}s'.format((detect_end - detect_begin)\/len(pkllist)))\n\n # nms\n nms_begin=time.time()\n if len(sys.argv) > 1 and sys.argv[1]=='only_person':\n boxes, classes, scores = dsnms(res, only_person=True)\n else:\n boxes, classes, scores = dsnms(res)\n nms_end=time.time()\n print('total nms: {:.4f}s'.format(nms_end - nms_begin))\n\n # save&visualization\n save_begin=time.time()\n PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')\n NUM_CLASSES = 80\n if not os.path.exists('video\/output'):\n os.makedirs('video\/output')\n for i, image_path in enumerate(pkllist):\n image_process = get_labeled_image(image_path, PATH_TO_LABELS, NUM_CLASSES, np.array(boxes[i]), np.array(classes[i]), np.array(scores[i]))\n #plt.imshow(image_process)\n #plt.show()\n scipy.misc.imsave('video\/output\/frame{}.jpg'.format(i), image_process)\n if i%100==0:\n print('finish writing image{}'.format(i))\n save_end=time.time()\n print('total writing images: {:.4f}s'.format(save_end - save_begin))\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_238","text":"cnc\/motor_hub\/motor_hub\/set_screw_drill\/params.py\nimport scipy\n\n# Basic layout parameters\nparams = {}\nparams['numParts'] = 5\nparams['partSpacing'] = 2.0 \nlayoutLen = (params['numParts']-1)*params['partSpacing']\nxPosArray = scipy.linspace(-0.5*layoutLen, 0.5*layoutLen,params['numParts'])\nyPosArray = scipy.zeros(xPosArray.size)\nparams['xPosList'] = list(xPosArray)\nparams['yPosList'] = list(yPosArray)\n\nparams['xPosRelDrill'] = 0.0\nparams['yPosRelDrill'] = -0.1\n\n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_239","text":"# Created by \n\nfrom scipy.optimize import fmin_cobyla as max\n\ndef farm_optimize(acres, hours, p1, p2, h1, h2): \n #objective\n def objective(X, p1, p2):\n x,y = X\n return -(p1*x + p2*y)\n #constraint 1\n def c1(X, acres, hours, h1, h2):\n x,y = X\n return hours - h1*x - h2*y\n #constraint 2\n def c2(X, acres, hours, h1, h2):\n x,y = X\n return acres - x - y\n #constraint 3\n def c3(X, acres, hours, h1, h2):\n return X[0]\n #constraint 4\n def c4(X, acres, hours, h1, h2):\n return X[1]\n \n X = max(objective, x0=[acres, 0], cons=[c1,c2,c3,c4],\n args=(p1,p2), consargs=(acres, hours, h1, h2))\n \n print ('With input:')\n print (acres,'total acres',hours,'total labor-hours')\n print ('${}'.format(p1),'corn profit-per-acre',\n '${}'.format(p2),'oat profit-per-acre')\n print (h1,'corn labor-hours',h2,'oat labor-hours')\n print (' We should plant {0:.1f} acres of corn.'.format(X[0]))\n print (' We should plant {0:.1f} acres of oats.'.format(X[1]))\n print (' The maximum profit we can earn is ${0:.2f}.'\n .format(-objective(X, p1, p2)))\n print ()\n\nfarm_optimize(240, 320, 40, 30, 2, 1)\nfarm_optimize(300, 380, 70, 45, 3, 1)\nfarm_optimize(180, 420, 65, 55, 3, 1)\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_225","text":"import itertools\nimport logging\nimport multiprocessing\nimport os\nimport typing\n\nimport numpy as np\nimport pandas as pd\nimport scipy.stats\n\nfrom pyextremes.models.model_base import AbstractModelBaseClass\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_fit_parameters(params) -> typing.List[tuple]:\n n, fit_function, extremes, fixed_parameters, seed = params\n size = len(extremes)\n rng_generator = np.random.default_rng(seed=seed)\n sampler = rng_generator.choice\n return [\n fit_function(\n data=sampler(a=extremes, size=size, replace=True),\n **fixed_parameters,\n )\n for _ in range(n)\n ]\n\n\nclass MLE(AbstractModelBaseClass):\n def __init__(\n self,\n extremes: pd.Series,\n distribution: typing.Union[str, scipy.stats.rv_continuous],\n distribution_kwargs: typing.Optional[dict] = None,\n ) -> None:\n \"\"\"\n Maximum Likelihood Estimate (MLE) model.\n\n Built around the scipy.stats.rv_continuous.fit method.\n\n \"\"\"\n super().__init__(\n extremes=extremes,\n distribution=distribution,\n distribution_kwargs=distribution_kwargs,\n )\n\n # Initialize 'fit_parameter_cache' and 'seed_cache'\n self.fit_parameter_cache: typing.List[tuple] = []\n self.seed_cache: typing.Set[int] = set()\n\n @property\n def name(self) -> str:\n return \"MLE\"\n\n def fit(self, **kwargs) -> None:\n if len(kwargs) != 0:\n raise TypeError(\n f\"unrecognized arguments passed in: {', '.join(kwargs.keys())}\"\n )\n self._fit_parameters = self.distribution.mle_parameters\n logger.debug(\n f\"fit {self.distribution.name} distribution \"\n f\"with parameters {self.distribution.mle_parameters}\"\n )\n\n def get_return_value(\n self, exceedance_probability, alpha: typing.Optional[float] = None, **kwargs\n ) -> tuple:\n \"\"\"\n Calculate return value and confidence interval bounds.\n\n Parameters\n ----------\n exceedance_probability : array-like\n Exceedance probability or 1D array of exceedance probabilities.\n Each exceedance probability must be in the [0, 1) range.\n alpha : float, optional\n Width of confidence interval (0, 1).\n If None (default), return None\n for upper and lower confidence interval bounds.\n kwargs\n n_samples : int, optional\n Number of bootstrap samples used to estimate\n confidence interval bounds (default=100).\n\n Returns\n -------\n return_value : array-like\n Return values.\n ci_lower : array-like\n Lower confidence interval bounds.\n ci_upper : array-like\n Upper confidence interval bounds.\n\n \"\"\"\n # Parse 'kwargs'\n n_samples = kwargs.pop(\"n_samples\", 100)\n if not n_samples > 0:\n raise ValueError(\n f\"invalid value in {n_samples} for the 'n_samples' \"\n f\"argument, must be positive number\"\n )\n if len(kwargs) != 0:\n raise TypeError(\n f\"unrecognized arguments passed in: {', '.join(kwargs.keys())}\"\n )\n\n # Convert 'exceedance_probability' to ndarray\n exceedance_probability = np.asarray(\n a=exceedance_probability, dtype=np.float64\n ).copy()\n if exceedance_probability.ndim == 0:\n exceedance_probability = exceedance_probability[np.newaxis]\n if exceedance_probability.ndim != 1:\n raise ValueError(\n f\"invalid shape in {exceedance_probability.shape} \"\n f\"for the 'exceedance_probability' argument, must be 1D array\"\n )\n\n # If cache doesn't have enough values, calculate new fit parameters\n if alpha is not None:\n n_extra_fit_parameters = n_samples - len(self.fit_parameter_cache)\n if n_extra_fit_parameters > 0:\n self._extend_fit_parameter_cache(n=n_extra_fit_parameters)\n\n # Calculate return values\n return_value = np.full(\n shape=exceedance_probability.shape, fill_value=np.nan, dtype=np.float64\n )\n ci_lower = return_value.copy()\n ci_upper = return_value.copy()\n for i, ep in enumerate(exceedance_probability):\n key: typing.Tuple[float, typing.Optional[float], int] = (\n ep,\n alpha,\n n_samples,\n )\n try:\n # Try to fetch pre-calculated values from cache\n rv, cil, ciu = self.return_value_cache[key]\n logger.debug(\n f\"fetched return value for {key} from cache as {(rv, cil, ciu)}\"\n )\n except KeyError:\n # Value not in cache - calculate new return value\n rv = self.distribution.distribution.isf(\n q=ep,\n **self.fit_parameters,\n **self.distribution._fixed_parameters,\n )\n\n # Calculate confidence intervals\n if alpha is None:\n cil = None\n ciu = None\n else:\n # Calculate confidence intervals\n rv_sample = self.distribution.distribution.isf(\n ep, *np.transpose(self.fit_parameter_cache[:n_samples])\n )\n cil, ciu = np.quantile(\n a=rv_sample, q=[(1 - alpha) \/ 2, (1 + alpha) \/ 2]\n )\n\n # Add calculated return value and intervals to cache\n self.return_value_cache[key] = (rv, cil, ciu)\n logger.debug(f\"calculated return value for {key} as {(rv, cil, ciu)}\")\n\n return_value[i] = rv\n ci_lower[i] = cil\n ci_upper[i] = ciu\n\n # Return results\n if len(return_value) == 1:\n return return_value[0], ci_lower[0], ci_upper[0]\n else:\n return return_value, ci_lower, ci_upper\n\n def _extend_fit_parameter_cache(self, n: int) -> None:\n # Prepare local variables used by fit parameter calculator\n extremes = self.extremes.values\n fit_function = self.distribution.distribution.fit\n fixed_parameters = self.distribution.fixed_parameters\n\n min_samples_per_core = 50\n if n <= min_samples_per_core:\n # Calculate without multiprocessing\n logger.debug(\"getting random seed value for fit parameter sampler\")\n seed = None\n while seed is None:\n _seed = np.random.randint(low=0, high=1e6, size=None)\n if _seed not in self.seed_cache:\n seed = _seed\n self.seed_cache.add(_seed)\n\n logger.debug(f\"calculating {n} additional fit parameters using single core\")\n new_fit_parameters = get_fit_parameters(\n params=(\n n,\n fit_function,\n extremes,\n fixed_parameters,\n seed,\n )\n )\n else:\n # Find number of cores\n n_cores = min(\n os.cpu_count() or 2,\n int(np.ceil(n \/ min_samples_per_core)),\n )\n\n # Calculate number of samples per core\n min_samples_per_core = int(n \/ n_cores)\n core_samples = [min_samples_per_core for _ in range(n_cores)]\n\n # Distribute remaining samples evenly across cores\n for i in range(n - sum(core_samples)):\n core_samples[i] += 1\n\n # Get unique random seed for each core and add it to `self.seed_cache`\n logger.debug(\"getting random seed values for each core\")\n seeds: typing.List[int] = []\n while len(seeds) < n_cores:\n seed = np.random.randint(low=0, high=1e6, size=None)\n if seed not in self.seed_cache:\n seeds.append(seed)\n self.seed_cache.add(seed)\n\n # Calculate new fit parameters using processor pool\n logger.debug(\n f\"calculating {n} additional fit parameters using {n_cores} cores \"\n f\"having {core_samples} samples accordingly\"\n )\n with multiprocessing.Pool(processes=n_cores) as pool:\n new_fit_parameters = list(\n itertools.chain(\n *pool.map(\n get_fit_parameters,\n zip(\n core_samples,\n [fit_function for _ in range(n_cores)],\n [extremes for _ in range(n_cores)],\n [fixed_parameters for _ in range(n_cores)],\n seeds,\n ),\n )\n )\n )\n\n # Extend fit parameter cache\n logger.debug(f\"extending fit parameter cache with {n} new entries\")\n self.fit_parameter_cache.extend(new_fit_parameters)\n return None\n\n def __repr__(self) -> str:\n free_parameters = \", \".join(\n [\n f\"{parameter}={self.fit_parameters[parameter]:.3f}\"\n for parameter in self.distribution.free_parameters\n ]\n )\n\n fixed_parameters = \", \".join(\n [\n f\"{key}={value:.3f}\"\n for key, value in self.distribution.fixed_parameters.items()\n ]\n )\n if fixed_parameters == \"\":\n fixed_parameters = \"all parameters are free\"\n\n summary = [\n \"MLE model\",\n \"\",\n f\"free parameters: {free_parameters}\",\n f\"fixed parameters: {fixed_parameters}\",\n f\"AIC: {self.AIC:.3f}\",\n f\"loglikelihood: {self.loglikelihood:.3f}\",\n f\"return value cache size: {len(self.return_value_cache):,d}\",\n f\"fit parameter cache size: {len(self.fit_parameter_cache):,d}\",\n ]\n\n longest_row = max(map(len, summary))\n summary[1] = \"-\" * longest_row\n summary.append(summary[1])\n summary[0] = \" \" * ((longest_row - len(summary[0])) \/\/ 2) + summary[0]\n for i, row in enumerate(summary):\n summary[i] += \" \" * (longest_row - len(row))\n\n return \"\\n\".join(summary)\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_112","text":"#!c:\\users\\hooma\\documents\\github\\spinesegmentation\\segmentation_test\\scripts\\python.exe\n\n\"\"\"\nCreates the superimposition image of two label images.\n\nCopyright (C) 2013 \n\nThis program is free software: you can redistribute it and\/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see .\n\"\"\"\n\n# build-in modules\nfrom argparse import ArgumentError\nimport argparse\nimport logging\nimport os\n\n# third-party modules\nimport scipy\n\n# path changes\n\n# own modules\nfrom medpy.io import load, save\nfrom medpy.core import Logger\n\n# information\n__author__ = \"\"\n__version__ = \"r0.2.1, 2011-01-04\"\n__email__ = \"\"\n__status__ = \"Release\"\n__description__ = \"\"\"\n Takes two label images as input and creates their superimposition i.e.\n all the regions borders are preserved and the resulting image contains\n more or the same number of regions as the respective input images.\n \n The resulting image has the same name as the first input image, just\n with a '_superimp' suffix.\n \n Copyright (C) 2013 \n This program comes with ABSOLUTELY NO WARRANTY; This is free software,\n and you are welcome to redistribute it under certain conditions; see\n the LICENSE file or for details. \n \"\"\"\n\n# code\ndef main():\n # parse cmd arguments\n parser = getParser()\n parser.parse_args()\n args = getArguments(parser)\n \n # prepare logger\n logger = Logger.getInstance()\n if args.debug: logger.setLevel(logging.DEBUG)\n elif args.verbose: logger.setLevel(logging.INFO)\n\n # build output image name\n image_superimposition_name = args.folder + '\/' + args.image1.split('\/')[-1][:-4] + '_superimp'\n image_superimposition_name += args.image1.split('\/')[-1][-4:]\n \n # check if output image exists\n if not args.force:\n if os.path.exists(image_superimposition_name):\n raise ArgumentError('The output image {} already exists. Please provide the -f\/force flag, if you wish to override it.'.format(image_superimposition_name))\n \n # load image1 using\n logger.info('Loading image {}...'.format(args.image1))\n image1_data, image1_header = load(args.image1)\n \n # load image2 using\n logger.info('Loading image {}...'.format(args.image2))\n image2_data, _ = load(args.image2)\n \n # check input images to be valid\n logger.info('Checking input images for correctness...')\n if image1_data.shape != image2_data.shape:\n raise ArgumentError('The two input images shape do not match with 1:{} and 2:{}'.format(image1_data.shape, image2_data.shape))\n int_types = (scipy.uint, scipy.uint0, scipy.uint8, scipy.uint16, scipy.uint32, scipy.uint64, scipy.uintc, scipy.uintp,\n scipy.int_, scipy.int0, scipy.int8, scipy.int16, scipy.int32, scipy.int64, scipy.intc, scipy.intp)\n if image1_data.dtype not in int_types:\n raise ArgumentError('Input image 1 is of type {}, an int type is required.'.format(image1_data.dtype))\n if image2_data.dtype not in int_types:\n raise ArgumentError('Input image 2 is of type {}, an int type is required.'.format(image2_data.dtype))\n if 4294967295 < abs(image1_data.min()) + image1_data.max() + abs(image2_data.min()) + image2_data.max():\n raise ArgumentError('The input images contain so many (or not consecutive) labels, that they will not fit in a uint32 range.')\n \n # create superimposition of the two label images\n logger.info('Creating superimposition image...')\n image_superimposition_data = scipy.zeros(image1_data.shape, dtype=scipy.uint32)\n translation = {}\n label_id_counter = 0\n for x in range(image1_data.shape[0]):\n for y in range(image1_data.shape[1]):\n for z in range(image1_data.shape[2]):\n label1 = image1_data[x,y,z]\n label2 = image2_data[x,y,z]\n if not (label1, label2) in translation:\n translation[(label1, label2)] = label_id_counter\n label_id_counter += 1\n image_superimposition_data[x,y,z] = translation[(label1, label2)]\n \n # save resulting superimposition image\n logger.info('Saving superimposition image as {} in the same format as input image...'.format(image_superimposition_name))\n save(image_superimposition_data, args.output, image1_header, args.force)\n \n logger.info('Successfully terminated.')\n \ndef getArguments(parser):\n \"Provides additional validation of the arguments collected by argparse.\"\n return parser.parse_args()\n\ndef getParser():\n \"Creates and returns the argparse parser object.\"\n parser = argparse.ArgumentParser(description=__description__)\n parser.add_argument('image1', help='The first input label image.')\n parser.add_argument('image2', help='The second input label image.')\n parser.add_argument('output', help='The output image.')\n parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.')\n parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.')\n parser.add_argument('-f', dest='force', action='store_true', help='Silently override existing output images.')\n \n return parser \n \nif __name__ == \"__main__\":\n main() \n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_113","text":"zig1000\/spacechem-level-generator\n#!\/usr\/bin\/env python\n# -*- coding: utf-8 -*-\n\nimport base64\nimport collections\nimport copy\nimport fractions\nimport gzip\nimport io\nimport json\n\nimport elements_data\n\n# Element class is defined in elements_data.py to avoid circular dependencies\n\n'''Classes For more apropros errors'''\nclass FormulaValidityError(ValueError):\n pass\nclass MoleculeValidityError(ValueError):\n pass\n\nclass Formula(collections.Counter):\n '''Represent a chemical formula, as a Counter of elements.'''\n # Redefine Counter's built-in elements() method to return the list of unique ELements in the\n # formula, and move its original functionality to 'atoms()'.\n def elements(self):\n '''Return a list of unique elements in this formula.'''\n # Make sure not to include any 0-counts that Counter leaves hanging around\n return [e for e in self.keys() if self[e] != 0]\n\n def elements_collection(self):\n '''Return a list containing each element as many times as its count.'''\n return list(super().elements())\n\n # Have to override Counter's add method or else adding two Formulas will make a Counter\n def __add__(self, other):\n result = Formula()\n for k in self.keys():\n result[k] += self[k]\n for k in other.keys():\n result[k] += other[k]\n return result\n\n def __mul__(self, other):\n return Formula({i: other * self[i] for i in self.keys()})\n __rmul__ = __mul__\n\n def num_atoms(self):\n return sum(self.values())\n\n def least_common_formula(self):\n '''Return a new formula which is this formula divided by the GCD of its element counts'''\n gcd = 0\n for v in self.values():\n gcd = fractions.gcd(v, gcd)\n\n new_formula = copy.copy(self)\n if gcd > 1: # Make sure we don't divide by 0 in empty case\n for e in new_formula.elements():\n new_formula[e] = new_formula[e] \/\/ gcd\n return new_formula\n\n def get_json_str(self):\n '''Return a string representing this formula using the Hill System (C, then H, then\n alphabetized), in the game's accepted format. E.g. Glycine would be 'C~02H~05NO~02'.\n '''\n result = ''\n # Sort Carbon and Hydrogen to the front and alphabetize the rest\n elements = sorted(self.elements(),\n key=lambda e: '0' if e.symbol == 'C'\n else '1' if e.symbol == 'H'\n else e.symbol)\n for e in elements:\n result += e.symbol\n if self[e] != 1:\n result += '~' + str(self[e]).rjust(2, '0')\n return result\n __str__ = get_json_str # For debugging convenience\n\n def is_valid(self, large_output=False):\n '''Check if it's possible to form a molecule with this formula within an input\/output zone.\n Empty formulas are considered invalid. Default 4x4 zone, optionally large (8x4) output zone.\n '''\n # Verify size constraints\n if not 1 <= self.num_atoms() <= 16 + 16*large_output:\n return False\n\n # We'll calculate validity based on whether there are enough bonds in the atom list to form\n # a minimally-connected molecule. To check this, consider a simple linearly connected\n # molecule with 2 endpoints. An \"endpoint\" element has max bonds 1, while elements with 3 or\n # 4+ max bonds each allow for 1 or 2 additional endpoints in the molecule, respectively.\n # Elements of max bonds 2 do not affect the count and thus do not affect the validity of\n # the formula, apart from the 16 or 32-atom limit of the zone.\n #\n # Though not formally proven, it appears that the maximum number of endpoints is in each\n # case a constant equal to half the number of cells in the zone. By trial and error with the\n # 4x4 zone it appears that this maximum can be reached with any composition of 3 vs 4 bond\n # atoms, and a simplifying assumption is made that this holds for the 8x4 case.\n # The cases for which an incorrect return value by this method could cause an exception\n # would in any case be prohibitively rare and of little concern, and imo worth the tradeoff\n # for O(k) runtime (where k is the # of unique elements in the formula).\n\n # Due to the limited sizes of the zones, each max bound count element can only contribute\n # extra connections so many times before it reaches its limit. For example, in a 4x4 zone,\n # only two atoms with max bond count 4 will provide two extra endpoints each. Subsequent\n # max bond count 4 atoms will only allow for 1 extra endpoint each due to space\n # constraints. E.g. C3H8 is impossible to construct in a 4x4 zone.\n #\n if large_output:\n extra_endpoints_dict = {3:14, 4:6}\n else:\n extra_endpoints_dict = {3:6, 4:2}\n\n allowed_endpoint_count = 2 # H-H base case\n for element in self.elements():\n if element.max_bonds == 0:\n # A noble gas is only valid as the only atom in a molecule\n return self.num_atoms() == 1\n elif element.max_bonds == 1:\n allowed_endpoint_count -= self[element]\n # Count one extra allowed endpoint per atom with bond count 3 or more (up to limit)\n elif element.max_bonds >= 3:\n # As long as our formula has no negatives this should be fine\n extra_endpoints = min(self[element], extra_endpoints_dict[3])\n allowed_endpoint_count += extra_endpoints\n extra_endpoints_dict[3] -= extra_endpoints\n\n # Count an additional extra endpoint per atom with bond count 4 or more (up to limit)\n if element.max_bonds >= 4:\n extra_endpoints = min(self[element],\n extra_endpoints_dict[3],\n extra_endpoints_dict[4])\n allowed_endpoint_count += extra_endpoints\n extra_endpoints_dict[3] -= extra_endpoints\n extra_endpoints_dict[4] -= extra_endpoints\n\n return allowed_endpoint_count >= 0\n\n def fission_sources(self):\n '''Return a dict of atomic masses and their counts for all elements that could have fission\n performed on them to obtain part of this formula.\n The heavy lifting of this method is tucked away at the bottom of this file in\n splittable_sources since it's a monster of a function.\n '''\n output_masses = collections.Counter({e.atomic_num: count for e, count in self.items()\n if count > 0})\n return splittable_sources(output_masses)\n\n def remove_fissile_element(self, element, count):\n '''Used while converting an output formula to an input formula via inverse fission.\n Given a fissile element and count, remove the target count of the element from\n this formula, drilling down into its 'fission tree' as needed.\n Raise FormulaValidityError if the element \/ its fission tree doesn't add up to the count.\n '''\n # Remove as much as we can of this element without fission\n direct_removals = min(count, self[element])\n count -= direct_removals\n self[element] -= direct_removals\n # Keep this object clean\n if self[element] == 0:\n del self[element]\n\n if count != 0:\n # If we hit the bottom of the fission tree and aren't done, raise an exception\n if element.atomic_num == 1:\n raise FormulaValidityError(f\"Couldn't remove {count} of {element} from formula\")\n\n try:\n if element.atomic_num % 2 == 0:\n child = elements_data.elements_dict[element.atomic_num \/\/ 2]\n self.remove_fissile_element(child, 2*count)\n else:\n child_A, child_B = (elements_data.elements_dict[element.atomic_num \/\/ 2 + 1],\n elements_data.elements_dict[element.atomic_num \/\/ 2])\n self.remove_fissile_element(child_A, count)\n self.remove_fissile_element(child_B, count)\n except FormulaValidityError:\n raise FormulaValidityError(f\"Couldn't remove {count} of {element} from formula\")\n\n# Enum-esque directional vars for convenience\nDIRECTIONS = UP, RIGHT, DOWN, LEFT = (0, 1, 2, 3) # Python, baby\n\ndef opposite_dir(dir):\n '''Given an Int representing a direction return its opposite direction.'''\n return (dir + 2) % 4\n\nclass GridPos:\n '''Represent a 0-indexed (row, col) position within an input\/output zone.\n Indices increase from left to right and top to bottom.\n '''\n num_cols = 4\n\n def __init__(self, row, col, large_output=False):\n self.row = row\n self.col = col\n self.large_output = large_output\n self.num_rows = 4 + 4*large_output\n\n def __str__(self):\n return f'({self.row}, {self.col})'\n __repr__ = __str__\n\n # __eq__ and __hash__ so we can use GridPos as dictionary keys\n def __eq__(self, other):\n return type(self) == type(other) and (self.row, self.col) == (other.row, other.col)\n\n def __hash__(self):\n return hash((self.row, self.col))\n\n def is_valid(self):\n '''Check that this position consists of integer positions within the zone's grid.'''\n return isinstance(self.row, int) and isinstance(self.col, int) \\\n and (0 <= self.row < self.num_rows) and (0 <= self.col < self.num_cols)\n\n def neighbor(self, dir):\n '''Return the neighbor GridPos in the indicated direction, or None if out-of-bounds.'''\n if dir == UP:\n r, c = self.row - 1, self.col\n elif dir == RIGHT:\n r, c = self.row, self.col + 1\n elif dir == DOWN:\n r, c = self.row + 1, self.col\n elif dir == LEFT:\n r, c = self.row, self.col - 1\n else:\n raise ValueError(f\"Invalid direction: {dir}\")\n\n if 0 <= r < self.num_rows and 0 <= c < self.num_cols:\n return GridPos(r, c, self.large_output)\n return None\n\n def neighbors(self):\n '''Return all orthogonally adjacent positions within the zone's grid.'''\n return [p for p in (self.neighbor(dir) for dir in DIRECTIONS) if p is not None]\n\n def dirs_and_neighbors(self):\n '''Return a list of (dir, pos) pairs for each neighboring position within the grid.'''\n return [(d, p) for d, p in ((_d, self.neighbor(_d)) for _d in DIRECTIONS) if p is not None]\n\n\nclass Atom:\n '''Represent an Atom, including its element, grid position, and attached bonds.\n '''\n def __init__(self, element, pos):\n self.bonds = [0, 0, 0, 0] # up, right, down, left\n self.set_element(element)\n self.set_pos(pos)\n\n def __str__(self):\n return self.symbol.rjust(2) # Pad element symbol to two chars\n\n def __repr__(self):\n return f'Atom({self.symbol}, {self.pos}, {self.bonds})'\n\n def __eq__(self, other):\n return (type(self) == type(other)\n and self.element == other.element\n and self.pos == other.pos\n and self.bonds == other.bonds)\n\n def get_json_str(self):\n '''Return a string representing this atom in the level json's format.'''\n return f'{self.col}{self.row}{self.atomic_num}{self.bonds[RIGHT]}{self.bonds[DOWN]}'\n\n def remaining_bonds(self):\n '''Return the # of remaining bonds this atom is allowed.'''\n return self.max_bonds - sum(self.bonds)\n\n def set_pos(self, pos):\n '''Change this atom's position in the grid.'''\n self.pos = pos\n self.row = self.pos.row\n self.col = self.pos.col\n\n def set_element(self, element):\n if sum(self.bonds) > element.max_bonds:\n raise ValueError(f\"Too many bonds to change atom {self} to element {element}\")\n\n self.element = element\n\n # Exposing some sub-attributes for convenience\n self.atomic_num = element.atomic_num\n self.symbol = element.symbol\n self.max_bonds = element.max_bonds\n\nclass Molecule:\n '''Represents an input\/output zone and the molecule constructed therein.\n '''\n def __init__(self, large_output=False):\n self.name = 'Randite'\n self.large_output = large_output\n self.num_rows = 4 + 4*large_output\n self.num_cols = 4\n self.formula = Formula()\n # TODO: Could potentially merge grid and used_posns into a single GridPos:Atom dict.\n # The main convenience of the grid at this point is that it'll automatically yell at us\n # if we start asking for atoms from a GridPos that's out-of-bounds.\n self.grid = [[None, None, None, None] for _ in range(self.num_rows)]\n self.used_posns = set() # Tracked so that we can easily iterate over the atoms in the molecule\n\n # To optimize the performance of available_positions(), we'll roughly track the # of open\n # bonds available on this molecule.\n # An atom with no open adjacencies in the grid contributes 0 to this count.\n self.open_bonds = 0\n\n def __getitem__(self, pos):\n '''Return the atom at the specified grid position or None.'''\n return self.grid[pos.row][pos.col]\n\n def __setitem__(self, pos, item):\n '''Set the specified grid position (item should be None or an Atom).'''\n self.grid[pos.row][pos.col] = item\n if item is None:\n self.used_posns.remove(pos)\n else:\n self.used_posns.add(pos)\n\n def __iter__(self):\n '''Iterate over each atom in this molecule. Order of iteration is not defined.'''\n return (self[p] for p in self.used_posns)\n\n def __len__(self):\n '''Return the # of atoms in this molecule.'''\n return len(self.used_posns)\n\n def __str__(self):\n '''Pretty-print this molecule.'''\n result = ' _________________ \\n' # Border of the input\/output zone\n for r in range(self.num_rows):\n result += '|'\n for c in range(self.num_cols):\n atom = self.grid[r][c]\n # Represent any atoms here\n if atom is None:\n result += 2*' '\n else:\n result += str(atom).rjust(2)\n # Represent any bonds to the right of the atom\n left_atom = atom\n right_atom = self.grid[r][c + 1] if c + 1 < self.num_cols else None\n\n bond_str = ' '\n if left_atom is not None and right_atom is not None \\\n and left_atom.bonds[RIGHT] != right_atom.bonds[LEFT]:\n bond_str = '?'\n elif left_atom is not None and left_atom.bonds[RIGHT] != 0:\n bond_str = str(left_atom.bonds[RIGHT])\n elif right_atom is not None and right_atom.bonds[LEFT] != 0:\n bond_str = str(right_atom.bonds[LEFT])\n if c < self.num_cols - 1:\n result += ' ' + bond_str + ' '\n result += '|\\n'\n # Add a row of vertical bonds\n if r < self.num_rows - 1:\n result += '|'\n for c in range(self.num_cols):\n top_atom = self.grid[r][c]\n if r + 1 < self.num_rows:\n bottom_atom = self.grid[r + 1][c]\n else:\n bottom_atom = None\n bond_str = ' '\n if top_atom is not None and bottom_atom is not None \\\n and top_atom.bonds[DOWN] != bottom_atom.bonds[UP]:\n bond_str = '??'\n elif top_atom is not None and top_atom.bonds[DOWN] != 0:\n bond_str = ' ' + str(top_atom.bonds[DOWN])\n elif bottom_atom is not None and bottom_atom.bonds[UP] != 0:\n bond_str = ' ' + str(bottom_atom.bonds[UP])\n result += bond_str\n if c < self.num_cols - 1:\n result += 3*' '\n result += '|\\n'\n result += '|_________________|\\n'\n return result\n\n __repr__ = __str__\n\n def get_json_str(self):\n '''Return a string representing this molecule in the level json's format.'''\n result = f'{self.name};{self.formula.get_json_str()}'\n for atom in self:\n result += ';' + atom.get_json_str()\n return result\n\n def update_formula(self):\n '''To be called after mutating any atom elements. Update the formula of this molecule.'''\n self.formula = Formula()\n for atom in self:\n self.formula[atom.element] += 1\n\n def update_open_bonds(self):\n '''Update the count of open bonds. Since we only care about updating it well\n enough to know when it's 0, we'll ignore the triple bond limit, and count any open side of\n an atom as adding the remainder of its max bond count to the open bonds.\n '''\n self.open_bonds = 0\n for atom in self:\n if any(self[pos] is None for pos in atom.pos.neighbors()):\n self.open_bonds += atom.remaining_bonds() # Not exact but we don't need it to be\n\n def open_positions(self):\n '''Return a list of valid grid positions where an atom could be added to this molecule.'''\n # For an empty molecule, all positions are open\n if len(self) == 0:\n return [GridPos(r, c, large_output=self.large_output)\n for r in range(self.num_rows) for c in range(self.num_cols)]\n # If there are no remaining bonds, we can skip the overhead of walking through the atoms\n elif self.open_bonds == 0:\n return []\n\n checked_posns = set() # For O(1) checks on whether a position has already been added\n for atom in self:\n if atom.remaining_bonds() > 0:\n for pos in atom.pos.neighbors():\n if self[pos] is None and pos not in checked_posns:\n checked_posns.add(pos)\n return list(checked_posns)\n\n def add_atom(self, new_atom):\n '''Adds the given Atom to this molecule. The Atom's position must be open in this molecule.\n Also adds any bonds specified by the incoming atom to its neighboring atoms.\n For convenience of more complex operations, it is allowable to add an atom with unfulfilled\n bonds or which is not connected to the rest of the molecule.\n '''\n if self[new_atom.pos] is not None:\n raise Exception(f\"Conflict with existing atom; cannot add {repr(new_atom)} to \\n{self}\")\n\n # Add the atom into our grid \/ formula. Then add its bonds while re-calculating self.open_bonds\n self[new_atom.pos] = new_atom\n self.used_posns.add(new_atom.pos)\n self.formula[new_atom.element] += 1\n\n # Quick helper to check if an atom within this molecule's grid has at least 1 open side\n def has_open_side(atom):\n return any(self[pos] is None for pos in atom.pos.neighbors())\n\n # Partial update of the number of open bonds this molecule has\n if has_open_side(new_atom):\n self.open_bonds += new_atom.remaining_bonds()\n\n # Add bonds to neighbours matching the bonds indicated on this atom\n for dir, pos in new_atom.pos.dirs_and_neighbors():\n adj_atom = self[pos]\n if adj_atom is not None:\n adj_atom.bonds[opposite_dir(dir)] = new_atom.bonds[dir]\n # Subtract the bond we just added from the molecule's 'open bonds'\n self.open_bonds -= new_atom.bonds[dir]\n\n # If we closed off the neighbor's last open face, we've additionally removed\n # however many bonds it now has left from the molecule's 'open' bonds\n if not has_open_side(adj_atom):\n self.open_bonds -= adj_atom.remaining_bonds()\n\n def remove_atom(self, atom):\n '''Remove the specified atom from this molecule. Must exactly match.'''\n if self[atom.pos] != atom:\n raise ValueError(f\"Specified atom {repr(new_atom)} does not match an atom in:\\n{self}\"\n + \"\\nCannot be removed.\")\n\n self[atom.pos] = None\n self.formula[atom.element] -= 1\n\n # Remove any now-trailing bonds on neighbors\n for dir, pos in atom.pos.dirs_and_neighbors():\n adj_atom = self[pos]\n if adj_atom is not None:\n adj_atom.bonds[opposite_dir(dir)] = 0\n self.update_open_bonds()\n\n def is_connected(self):\n '''For the purposes of more advanced construction algorithms we allow adding atoms in\n unconnected cells. This checks if the molecule is currently 'connected' and thus valid.\n We'll count empty molecules as unconnected.\n '''\n if len(self) == 0:\n return False\n\n # Do a DFS starting from one atom and following the bonds of the molecule. If we don't\n # find every atom, it's not connected\n sample_pos = next(iter(self.used_posns))\n stack = [self[sample_pos]]\n # We don't have to actually 'visit' every atom, seeing them as neighbors is sufficient\n seen = {sample_pos} # Track the grid positions of seen connected atoms\n while stack:\n if len(seen) == len(self):\n return True\n\n atom = stack.pop()\n # Check for connected neighbors. When we see an unseen connected atom, add it to the\n # stack\n for dir, adj_pos in atom.pos.dirs_and_neighbors():\n if atom.bonds[dir] != 0 and adj_pos not in seen:\n seen.add(adj_pos)\n adj_atom = self[adj_pos]\n stack.append(adj_atom)\n return False\n\n def shift(self, rows=0, cols=0):\n '''Shift the current contents of this molecule downward\/rightward by the specified number\n of rows\/columns. Negative numbers shift upward\/leftward.\n\n Raise an exception if this would place atoms out-of-bounds.\n '''\n # Make sure this is a legal shift\n for atom in self:\n if (atom.row + rows < 0 or atom.row + rows > self.num_rows) \\\n or (atom.col + cols < 0 or atom.col + cols > self.num_cols):\n raise Exception(f'Cannot shift molecule\\n{self}\\nby {rows} rows and {cols} cols')\n\n # Wipe the grid clean and re-add the atoms in their new positions\n atoms = list(self)\n self.grid = [[None, None, None, None] for r in range(self.num_rows)]\n self.used_posns = set()\n for atom in atoms:\n atom.set_pos(GridPos(atom.row + rows, atom.col + cols, large_output=self.large_output))\n self[atom.pos] = atom\n\n # Recount open bonds once we're done since some atoms may no longer have open sides\n self.update_open_bonds()\n\n def add_molecule(self, other):\n '''Add the specified molecule to this molecule. Must not have any atoms in conflicting\n positions.\n '''\n # Check for conflicts\n if any(self[atom.pos] is not None for atom in other):\n raise Exception(f'Cannot add molecule\\n{other} to molecule\\n{self}; conflicting atoms')\n\n # Add the new atoms\n for atom in other:\n self.add_atom(atom)\n\nclass Level:\n '''Parent class for Research and Production levels.'''\n def __init__(self):\n self.dict = {}\n\n def __getitem__(self, item):\n return self.dict[item]\n\n def __setitem__(self, item, val):\n self.dict[item] = val\n\n def __str__(self):\n return json.dumps(self.dict)\n\n def get_code(self):\n '''Get the mission code - gzip then b64 the level json.'''\n out = io.BytesIO()\n with gzip.GzipFile(fileobj=out, mode=\"w\") as f:\n f.write(json.dumps(self.dict).encode('utf-8'))\n return base64.b64encode(out.getvalue()).decode()\n\nclass ResearchLevel(Level):\n def __init__(self):\n Level.__init__(self)\n self['input-zones'] = {}\n self['output-zones'] = {}\n\n self['has-large-output'] = False\n\n # Features of the level\n self['bonder-count'] = 0\n self['has-sensor'] = False\n self['has-fuser'] = False\n self['has-splitter'] = False\n self['has-teleporter'] = False\n\n self['type'] = 'research'\n self['name'] = 'RandomlyGenerated'\n self['author'] = \"\"\n self['difficulty'] = 0\n\nclass ProductionLevel(Level):\n def __init__(self):\n Level.__init__(self)\n self['terrain'] = 0\n self['random-input-zones'] = {} # Max 1 random\n self['fixed-input-zones'] = {} # Max 2 fixed\n self['output-zones'] = {} # Max 3 outputs\n\n self['max-reactors'] = 6 # Default maximum allowed\n\n self['has-starter'] = False\n self['has-assembly'] = False\n self['has-disassembly'] = False\n self['has-advanced'] = False # Sensor reactor\n self['has-nuclear'] = False\n self['has-superbonder'] = False\n self['has-recycler'] = False\n\n self['type'] = 'production'\n self['name'] = 'RandomlyGenerated'\n self['author'] = \"\"\n self['difficulty'] = 0\n\n\ndef splittable_sources(given):\n '''Given a Counter of ints, return a dict of ints and their total counts that can be\n non-trivially constructed from the given integers, using only series' of addition of integers\n within 1 of each other, and where no given int is used more times than its count in the Counter.\n In other words, we're using the inverse fission operation to calculate viable input elements\n that could have been split any (non-0) # of times to create part of the given output.\n '''\n # NOTE: We ask for a Counter as input because unlike dicts, they implicitly return 0 if\n # asked for a key they don't contain, which simplifies our code\n\n # Tally tracking ints we were given\/discover, the max of each we can create at once, and\n # some additional helper values - as needed we'll create dicts that track how many N-1 ints\n # we can create at the same time as any possible count of N's. We'll also track the\n # 'most balanced' such allocation of N vs N-1, which will allow us to properly get counts for\n # odd ints, as well as assisting in the creation of higher dicts.\n tally = {}\n # Dict of ints that were constructable (not just given), and their max counts\n constructed = {}\n\n # Keep a running sum of what we were given so we don't waste time on clearly impossible sums\n givens_sum = 0\n\n # To avoid the overhead of a priority queue, use one queue for the given ints,\n # and one queue for ints we obtained by addition (we'll add them in numerical order).\n # Loop on whichever's next value is lower\n given_queue = collections.deque(sorted(given.keys()))\n added_queue = collections.deque()\n while given_queue or added_queue:\n # Pop the element we're iterating on - pop from both queues at once if they match\n if (not added_queue\n or (given_queue and given_queue[0] < added_queue[0])):\n n = given_queue.popleft()\n else:\n if given_queue and given_queue[0] == added_queue[0]:\n given_queue.popleft()\n n = added_queue.popleft()\n\n # Calculate how many copies of n we can obtain at once\n if n % 2 == 0:\n # If n is even, we only need to update its count, based on\n # the count of n \/ 2 and however much of n we were given to start\n component_count = tally[n \/\/ 2]['count'] if n \/\/ 2 in tally else 0\n this_count = component_count \/\/ 2 + given[n]\n else:\n # To count odd n, we must first make a dict that pairs the max # of n \/\/ 2 that can\n # be created for any given count of n \/\/ 2 + 1. We can do this recursively off\n # a previous such dict. When creating this dict we will also store the count\n # of n \/\/ 2 + 1 for which there can simultaneously be created a most closely balanced\n # count of n \/\/ 2. This can be used directly to count n and also to speed up\n # the recursive creation of dicts.\n # However we can skip this if either of n \/\/ 2 or n \/\/ 2 + 1 are unavailable.\n # Note that even if both are available they may not be addable so our count could\n # still come out to 0\n upper_child, lower_child = n \/\/ 2 + 1, n \/\/ 2\n if upper_child in tally and lower_child in tally:\n # In this case, calculate and store upper_child's neighbour dict\n tally[upper_child]['neighbour_counts'] = {}\n if upper_child == 2: # Calc 2->1 dict\n balanced_upper_count, min_count_in_best_balance = -1, -1\n for upper_count in range(1, tally[upper_child]['count'] + 1):\n lower_count = (tally[lower_child]['count']\n - 2*max(upper_count - given[upper_child], 0))\n tally[upper_child]['neighbour_counts'][upper_count] = lower_count\n\n # Check how balanced this allocation is\n worst_count = min(upper_count, lower_count)\n if worst_count >= min_count_in_best_balance:\n balanced_upper_count = upper_count\n min_count_in_best_balance = worst_count\n tally[upper_child]['balanced_count'] = balanced_upper_count\n elif upper_child == 3: # Calc 3->2 dict\n balanced_upper_count, min_count_in_best_balance = -1, -1\n for upper_count in range(1, tally[upper_child]['count'] + 1):\n nongiven_upper_count = max(upper_count - given[upper_child], 0)\n # 2s count = (2s constructable given 1s used in 3s) - (2s used in 3s)\n lower_count = (given[2] + (given[1] - nongiven_upper_count) \/\/ 2\n - nongiven_upper_count)\n tally[upper_child]['neighbour_counts'][upper_count] = lower_count\n\n # Check how balanced this allocation is\n worst_count = min(upper_count, lower_count)\n if worst_count >= min_count_in_best_balance:\n balanced_upper_count = upper_count\n min_count_in_best_balance = worst_count\n # Store the most balanced count for upper_child\n tally[upper_child]['balanced_count'] = balanced_upper_count\n # If either the upper child or the lower child had no compound components,\n # the upper_child's neighbour_counts dict is just the max count of lower_child,\n # regardless of the count of upper_child\n elif (tally[upper_child]['count'] == given[upper_child]\n or tally[lower_child]['count'] == given[lower_child]):\n tally[upper_child]['neighbour_counts'] = {\n i: tally[lower_child]['count']\n for i in range(1, tally[upper_child]['count'] + 1) }\n # Since the lower_child gets the same count no matter what, just maximize\n # upper_child's count for the 'balanced' allocation\n tally[upper_child]['balanced_count'] = tally[upper_child]['count']\n # Otherwise, based on our recursion principle, the upper child's upper\n # child must already have its neighbour_counts dict set. Use that to calculate\n # the upper child's neighbour_counts. The algorithm for this depends on which of\n # upper_child\/lower_child is even.\n # We also have a couple of base cases to handle when building the neighbour dict\n # dict for 3->2 and 2-> 1, since in those cases lower_child is also a component\n # of upper_child\n elif upper_child % 2 == 0:\n # If the upper child is even, calculate how much of lower_child's components\n # are used up by any valid count of upper_child, and thus the max\n # lower_child count for that count of upper_child.\n # Call A upper_child \/\/ 2 and B the other (lower) component of lower_child\n A = upper_child \/\/ 2\n balanced_upper_count, min_count_in_best_balance = -1, -1\n for upper_count in range(1, tally[upper_child]['count'] + 1):\n if upper_count <= given[upper_child]:\n lower_count = tally[lower_child]['count']\n else:\n A_used_in_upper = 2*(upper_count - given[upper_child])\n if A_used_in_upper == tally[A]['count']:\n lower_count = given[lower_child]\n else:\n # Search to the right of the original balance point and\/or our\n # new A limit, to find a balance given the unusable As:\n start_idx = max(tally[A]['balanced_count'], A_used_in_upper + 1)\n built_lower_count = 0\n for used_A in range(start_idx, tally[A]['count'] + 1):\n worst_count = min(used_A - A_used_in_upper,\n tally[A]['neighbour_counts'][used_A])\n built_lower_count = max(built_lower_count, worst_count)\n lower_count = built_lower_count + given[lower_child]\n tally[upper_child]['neighbour_counts'][upper_count] = lower_count\n\n # Check how balanced this allocation is\n worst_count = min(upper_count, lower_count)\n if worst_count >= min_count_in_best_balance:\n balanced_upper_count = upper_count\n min_count_in_best_balance = worst_count\n # Store the most balanced count for upper child\n tally[upper_child]['balanced_count'] = balanced_upper_count\n else:\n # If the upper child is odd, call its subchildren A and B, where B =\n # lower_child \/\/ 2. Using A's neighbour_counts dict, calculate how much B and\n # from that how much lower_child we can make for any valid count of\n # upper_child\n A = upper_child \/\/ 2 + 1\n # For each possible count of upper_child, calculate how many copies of\n # lower_child can be simultaneously constructed from the leftovers\n # Also track the 'most balanced' count we can assign to upper_child\n balanced_upper_count, min_count_in_best_balance = -1, -1\n for upper_count in range(1, tally[upper_child]['count'] + 1):\n if upper_count <= given[upper_child]:\n lower_count = tally[lower_child]['count']\n else:\n used_A = used_B = upper_count - given[upper_child]\n available_B = (tally[A]['neighbour_counts'][used_A] - used_A)\n lower_count = available_B \/\/ 2 + given[lower_child]\n tally[upper_child]['neighbour_counts'][upper_count] = lower_count\n\n # Check how balanced this allocation is\n worst_count = min(upper_count, lower_count)\n if worst_count >= min_count_in_best_balance:\n balanced_upper_count = upper_count\n min_count_in_best_balance = worst_count\n # Store the most balanced count for upper child\n tally[upper_child]['balanced_count'] = balanced_upper_count\n\n # Calculate the count of n based on upper_child's most balanced count\n balanced_upper_count = tally[upper_child]['balanced_count']\n this_count = (min(balanced_upper_count,\n tally[upper_child]['neighbour_counts'][balanced_upper_count])\n + given[n])\n else:\n # If n only occurred as an input and not a compound, set it to the given count\n # The n = 1 case is handled here since 1 can never be compound\n # We don't need to calculate its neighbour dict in this case.\n this_count = given[n]\n\n # If the count came out to 0, ignore this int\n if this_count == 0:\n continue\n # Update the tally with the discovered count\n tally[n] = {'count': this_count}\n # Add this int to the output dict if it was possible to construct\n # (not just obtained from the givens)\n if this_count != given[n]:\n constructed[n] = this_count\n\n # Add any viable sums (restricted to valid atomic masses) obtained from n to the queue\n # As a mini-optimization, we won't add odd numbers to the queue that exceed the sum of\n # the givens up to n\n givens_sum += n*given[n]\n # If n - 1 is in the tally, add 2n - 1 to the queue\n if (n - 1 in tally\n and (2*n - 1 <= 109 or 2*n - 1 in (201, 203))\n and 2*n - 1 <= givens_sum):\n added_queue.append(2*n - 1)\n # If the count for n was at least 2, add 2n to the queue\n if tally[n]['count'] >= 2 and (2*n <= 109 or 2*n in (200, 202)):\n added_queue.append(2*n)\n\n # Once we've looped over all possible sums, return a dict of the relevant ints and their counts\n return constructed\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_114","text":"src\/hcb\/tools\/analysis\/probability_util.py\nimport math\nfrom typing import Union, Callable, Sequence, Tuple\n\nimport numpy as np\nfrom scipy.stats import linregress\nfrom scipy.optimize import leastsq\nfrom scipy.stats._stats_mstats_common import LinregressResult\n\n\ndef log_binomial(*, p: Union[float, np.ndarray], n: int, hits: int) -> Union[float, np.ndarray]:\n r\"\"\"Approximates $\\ln(P(hits = B(n, p)))$; the natural logarithm of a binomial distribution.\n\n All computations are done in log space to ensure intermediate values can be represented as\n floating point numbers without underflowing to 0 or overflowing to infinity. This is necessary\n when computing likelihoods over many samples. For example, if 80% of a million samples are hits,\n the maximum likelihood estimate is p=0.8. But even this optimal estimate assigns a prior\n probability of roughly 10^-217322 for seeing *exactly* 80% hits out of a million (whereas the\n smallest representable double is roughly 10^-324).\n\n This method can be broadcast over multiple hypothesis probabilities.\n\n Args:\n p: The independent probability of a hit occurring for each sample. This can also be an array\n of probabilities, in which case the function is broadcast over the array.\n n: The number of samples that were taken.\n hits: The number of hits that were observed amongst the samples that were taken.\n\n Returns:\n $\\ln(P(hits = B(n, p)))$\n \"\"\"\n # Clamp probabilities into the valid [0, 1] range (in case float error put them outside it).\n p_clipped = np.clip(p, 0, 1)\n\n result = np.zeros(shape=p_clipped.shape, dtype=np.float32)\n misses = n - hits\n\n # Handle p=0 and p=1 cases separately, to avoid arithmetic warnings.\n if hits:\n result[p_clipped == 0] = -np.inf\n if misses:\n result[p_clipped == 1] = -np.inf\n\n # Multiply p**hits and (1-p)**misses onto the total, in log space.\n result[p_clipped != 0] += np.log(p_clipped[p_clipped != 0]) * hits\n result[p_clipped != 1] += np.log1p(-p_clipped[p_clipped != 1]) * misses\n\n # Multiply (n choose hits) onto the total, in log space.\n log_n_choose_hits = log_factorial(n) - log_factorial(misses) - log_factorial(hits)\n result += log_n_choose_hits\n\n return result\n\n\ndef log_factorial(n: int) -> float:\n r\"\"\"Approximates $\\ln(n!)$; the natural logarithm of a factorial.\n\n Uses Stirling's approximation for large n.\n \"\"\"\n if n < 20:\n return sum(math.log(k) for k in range(1, n + 1))\n return (n + 0.5) * math.log(n) - n + math.log(2 * np.pi) \/ 2\n\n\ndef binary_search(*, func: Callable[[int], float], min_x: int, max_x: int, target: float) -> int:\n \"\"\"Performs an approximate granular binary search over a monotonically ascending function.\"\"\"\n while max_x > min_x + 1:\n med_x = (min_x + max_x) \/\/ 2\n out = func(med_x)\n if out < target:\n min_x = med_x\n elif out > target:\n max_x = med_x\n else:\n return med_x\n fmax = func(max_x)\n fmin = func(min_x)\n dmax = 0 if fmax == target else fmax - target\n dmin = 0 if fmin == target else fmin - target\n return max_x if abs(dmax) < abs(dmin) else min_x\n\n\ndef binary_intercept(*, func: Callable[[float], float], start_x: float, step: float, target_y: float, atol: float) -> float:\n \"\"\"Performs an approximate granular binary search over a monotonically ascending function.\"\"\"\n start_y = func(start_x)\n if abs(start_y - target_y) <= atol:\n return start_x\n while (func(start_x + step) >= target_y) == (start_y >= target_y):\n step *= 2\n if np.isinf(step) or step == 0:\n raise ValueError(\"Failed.\")\n xs = [start_x, start_x + step]\n min_x = min(xs)\n max_x = max(xs)\n increasing = func(min_x) < func(max_x)\n\n while True:\n med_x = (min_x + max_x) \/ 2\n med_y = func(med_x)\n if abs(med_y - target_y) <= atol:\n return med_x\n assert med_x not in [min_x, max_x]\n if (med_y < target_y) == increasing:\n min_x = med_x\n else:\n max_x = med_x\n\n\ndef least_squares_cost(*, xs: np.ndarray, ys: np.ndarray, intercept: float, slope: float) -> float:\n assert len(xs.shape) == 1\n assert xs.shape == ys.shape\n return np.sum((intercept + slope*xs - ys)**2)\n\n\ndef least_squares_through_point(*, xs: np.ndarray, ys: np.ndarray, required_x: float, required_y: float) -> LinregressResult:\n xs2 = xs - required_x\n ys2 = ys - required_y\n\n def err(slope: float) -> float:\n return least_squares_cost(xs=xs2, ys=ys2, intercept=0, slope=slope)\n\n (best_slope,), _ = leastsq(func=err, x0=0.0)\n intercept = required_y - required_x * best_slope\n return LinregressResult(best_slope, intercept, None, None, None, intercept_stderr=False)\n\n\ndef least_squares_with_slope(*, xs: np.ndarray, ys: np.ndarray, required_slope: float) -> LinregressResult:\n def err(intercept: float) -> float:\n return least_squares_cost(xs=xs, ys=ys, intercept=intercept, slope=required_slope)\n\n (best_intercept,), _ = leastsq(func=err, x0=0.0)\n return LinregressResult(required_slope, best_intercept, None, None, None, intercept_stderr=False)\n\n\ndef least_squares_output_range(*,\n xs: Sequence[float],\n ys: Sequence[float],\n target_x: float,\n cost_increase: float) -> Tuple[float, float, float]:\n xs = np.array(xs, dtype=np.float64)\n ys = np.array(ys, dtype=np.float64)\n fit = linregress(xs, ys)\n base_cost = least_squares_cost(xs=xs, ys=ys, intercept=fit.intercept, slope=fit.slope)\n base_y = float(fit.intercept + target_x * fit.slope)\n\n def cost_for_y(y2: float) -> float:\n fit2 = least_squares_through_point(xs=xs, ys=ys, required_x=target_x, required_y=y2)\n return least_squares_cost(xs=xs, ys=ys, intercept=fit2.intercept, slope=fit2.slope)\n\n low_y = binary_intercept(start_x=base_y, step=-1, target_y=base_cost + cost_increase, func=cost_for_y, atol=1e-5)\n high_y = binary_intercept(start_x=base_y, step=1, target_y=base_cost + cost_increase, func=cost_for_y, atol=1e-5)\n return low_y, base_y, high_y\n\n\ndef least_squares_slope_range(*,\n xs: Sequence[float],\n ys: Sequence[float],\n cost_increase: float) -> Tuple[float, float, float]:\n xs = np.array(xs, dtype=np.float64)\n ys = np.array(ys, dtype=np.float64)\n fit = linregress(xs, ys)\n base_cost = least_squares_cost(xs=xs, ys=ys, intercept=fit.intercept, slope=fit.slope)\n\n def cost_for_slope(slope: float) -> float:\n fit2 = least_squares_with_slope(xs=xs, ys=ys, required_slope=slope)\n return least_squares_cost(xs=xs, ys=ys, intercept=fit2.intercept, slope=fit2.slope)\n\n low_slope = binary_intercept(start_x=fit.slope, step=-1, target_y=base_cost + cost_increase, func=cost_for_slope, atol=1e-5)\n high_slope = binary_intercept(start_x=fit.slope, step=1, target_y=base_cost + cost_increase, func=cost_for_slope, atol=1e-5)\n return low_slope, fit.slope, high_slope\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_115","text":"from sklearn.ensemble import GradientBoostingClassifier\nfrom commons import variables\nfrom commons import tools\nfrom scipy.stats import mode\n\n\ndef learn(x, y, test_x):\n # set sample weight\n\n\n weight_list = []\n for j in range(len(y)):\n if y[j] == \"0\":\n weight_list.append(variables.weight_0_gdbt_b)\n if y[j] == \"1000\":\n weight_list.append(variables.weight_1000_gdbt_b)\n if y[j] == \"1500\":\n weight_list.append(variables.weight_1500_gdbt_b)\n if y[j] == \"2000\":\n weight_list.append(variables.weight_2000_gdbt_b)\n\n clf = GradientBoostingClassifier(loss='deviance', n_estimators=variables.n_estimators_gdbt_b,\n learning_rate=variables.learning_rate_gdbt_b,\n max_depth=variables.max_depth_gdbt_b, random_state=0,\n min_samples_split=variables.min_samples_split_gdbt_b,\n min_samples_leaf=variables.min_samples_leaf_gdbt_b,\n subsample=variables.subsample_gdbt_b,\n ).fit(x, y, weight_list)\n prediction_list = clf.predict(test_x)\n\n return prediction_list\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_117","text":"'''original example for checking how far GAM works\n\nNote: uncomment plt.show() to display graphs\n'''\n\nexample = 2 # 1,2 or 3\n\nimport numpy as np\nimport numpy.random as R\nimport matplotlib.pyplot as plt\n\nfrom statsmodels.sandbox.gam import AdditiveModel\nfrom statsmodels.sandbox.gam import Model as GAM #?\nfrom statsmodels.genmod.families import family\nfrom statsmodels.genmod.generalized_linear_model import GLM\n\nstandardize = lambda x: (x - x.mean()) \/ x.std()\ndemean = lambda x: (x - x.mean())\nnobs = 150\nx1 = R.standard_normal(nobs)\nx1.sort()\nx2 = R.standard_normal(nobs)\nx2.sort()\ny = R.standard_normal((nobs,))\n\nf1 = lambda x1: (x1 + x1**2 - 3 - 1 * x1**3 + 0.1 * np.exp(-x1\/4.))\nf2 = lambda x2: (x2 + x2**2 - 0.1 * np.exp(x2\/4.))\nz = standardize(f1(x1)) + standardize(f2(x2))\nz = standardize(z) * 2 # 0.1\n\ny += z\nd = np.array([x1,x2]).T\n\n\nif example == 1:\n print \"normal\"\n m = AdditiveModel(d)\n m.fit(y)\n x = np.linspace(-2,2,50)\n\n print m\n\n y_pred = m.results.predict(d)\n plt.figure()\n plt.plot(y, '.')\n plt.plot(z, 'b-', label='true')\n plt.plot(y_pred, 'r-', label='AdditiveModel')\n plt.legend()\n plt.title('gam.AdditiveModel')\n\nimport scipy.stats, time\n\nif example == 2:\n print \"binomial\"\n f = family.Binomial()\n b = np.asarray([scipy.stats.bernoulli.rvs(p) for p in f.link.inverse(y)])\n b.shape = y.shape\n m = GAM(b, d, family=f)\n toc = time.time()\n m.fit(b)\n tic = time.time()\n print tic-toc\n\n\nif example == 3:\n print \"Poisson\"\n f = family.Poisson()\n y = y\/y.max() * 3\n yp = f.link.inverse(y)\n p = np.asarray([scipy.stats.poisson.rvs(p) for p in f.link.inverse(y)], float)\n p.shape = y.shape\n m = GAM(p, d, family=f)\n toc = time.time()\n m.fit(p)\n tic = time.time()\n print tic-toc\n\n\nplt.figure()\nplt.plot(x1, standardize(m.smoothers[0](x1)), 'r')\nplt.plot(x1, standardize(f1(x1)), linewidth=2)\nplt.figure()\nplt.plot(x2, standardize(m.smoothers[1](x2)), 'r')\nplt.plot(x2, standardize(f2(x2)), linewidth=2)\n\n\n\n\nplt.show()\n\n\n\n## pylab.figure(num=1)\n## pylab.plot(x1, standardize(m.smoothers[0](x1)), 'b')\n## pylab.plot(x1, standardize(f1(x1)), linewidth=2)\n## pylab.figure(num=2)\n## pylab.plot(x2, standardize(m.smoothers[1](x2)), 'b')\n## pylab.plot(x2, standardize(f2(x2)), linewidth=2)\n## pylab.show()\n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_118","text":"0\nimport math\r\nimport os\r\nimport random\r\n\r\nimport bpy\r\nimport scipy.misc\r\n\r\n\r\nIMAGE_SIZE = 64\r\nDISTANCE = 2.732\r\n\r\n\r\ndef set_camera_location(elevation, azimuth, distance):\r\n # set location\r\n x = 1 * math.cos(math.radians(-azimuth)) * math.cos(math.radians(elevation)) * distance\r\n y = 1 * math.sin(math.radians(-azimuth)) * math.cos(math.radians(elevation)) * distance\r\n z = 1 * math.sin(math.radians(elevation)) * distance\r\n camera = bpy.data.objects[\"Camera\"]\r\n camera.location = x, y, z\r\n\r\n # look at center\r\n direction = - camera.location\r\n rot_quat = direction.to_track_quat('-Z', 'Y')\r\n camera.rotation_euler = rot_quat.to_euler()\r\n\r\n\r\ndef render(directory, elevation=30, distance=DISTANCE):\r\n for azimuth in range(0, 360, 15):\r\n filename = os.path.join(directory, 'e%03d_a%03d.png' % (elevation, azimuth))\r\n set_camera_location(elevation, azimuth, distance) \r\n bpy.context.scene.render.filepath = filename\r\n bpy.ops.render.render(write_still=True)\r\n\r\n if False:\r\n img = scipy.misc.imread(filename)[:, :, :].astype('float32') \/ 255.\r\n if False:\r\n img = (img[::2, ::2] + img[1::2, ::2] + img[::2, 1::2] + img[1::2, 1::2]) \/ 4.\r\n else:\r\n import chainer.functions as cf\r\n img = img.transpose((2, 0, 1))[None, :, :, :]\r\n img = cf.resize_images(img, (64, 64))\r\n img = img[0].data.transpose((1, 2, 0))\r\n\r\n img = (img * 255).clip(0., 255.).astype('uint8')\r\n scipy.misc.imsave(filename, img)\r\n\r\n\r\ndef setup():\r\n context = bpy.context\r\n if False:\r\n context.scene.render.resolution_x = IMAGE_SIZE * 2\r\n context.scene.render.resolution_y = IMAGE_SIZE * 2\r\n context.scene.render.resolution_percentage = 100\r\n context.scene.render.use_antialiasing = False\r\n else:\r\n context.scene.render.resolution_x = IMAGE_SIZE\r\n context.scene.render.resolution_y = IMAGE_SIZE\r\n context.scene.render.resolution_percentage = 100\r\n context.scene.render.use_antialiasing = True\r\n context.scene.render.use_free_unused_nodes = True\r\n context.scene.render.use_free_image_textures = True\r\n context.scene.render.alpha_mode = 'TRANSPARENT'\r\n bpy.context.scene.render.image_settings.color_mode = 'RGBA'\r\n\r\n # camera\r\n camera = bpy.data.cameras.values()[0]\r\n camera.sensor_width = 1\r\n camera.sensor_height = 1\r\n camera.lens = 1.8660254037844388\r\n\r\n # lighting\r\n light = bpy.data.objects['Lamp']\r\n light.data.energy = 1\r\n context.scene.world.light_settings.use_environment_light = True\r\n context.scene.world.light_settings.environment_energy = 0.5\r\n context.scene.world.light_settings.environment_color = 'PLAIN'\r\n\r\n\r\ndef load_obj(filename):\r\n # filename = '\/home\/hkato\/temp\/obj\/model.obj'\r\n # filename = '\/media\/disk2\/lab\/large_data\/ShapeNetCore.v1\/03001627\/1bcec47c5dc259ea95ca4adb70946a21\/model.obj'\r\n bpy.ops.import_scene.obj(filepath=filename, use_smooth_groups=False, use_split_objects=False,\r\n use_split_groups=False)\r\n object_id = len(bpy.data.objects) - 1\r\n obj = bpy.data.objects[object_id]\r\n bpy.context.scene.objects.active = obj\r\n\r\n # get max & min of vertices\r\n inf = 10000\r\n vertex_max = [-inf, -inf, -inf]\r\n vertex_min = [inf, inf, inf]\r\n for j in range(8):\r\n for i in range(3):\r\n vertex_max[i] = max(vertex_max[i], obj.bound_box[j][i])\r\n vertex_min[i] = min(vertex_min[i], obj.bound_box[j][i])\r\n dimensions = obj.dimensions # = max - min\r\n\r\n # centering\r\n for i in range(3):\r\n obj.location[i] += (vertex_max[i] + vertex_min[i]) \/ 2\r\n\r\n # scaling\r\n scale = max(dimensions)\r\n for i in range(3):\r\n obj.scale[i] = obj.scale[i] \/ scale\r\n\r\n # materials\r\n for m in bpy.data.materials:\r\n m.ambient = 0.5\r\n m.use_shadeless = False\r\n m.use_transparency = False\r\n m.use_raytrace = False\r\n\r\n\r\ndef clear():\r\n bpy.ops.wm.open_mainfile(filepath='\/home\/hkato\/temp\/untitled.blend')\r\n\r\n\r\ndef run():\r\n \r\n #se crea una lista de id, dentro del dataset para entrenar el modelo con esos objetos\r\n \"\"\"class_ids = [\r\n '02691156', '02828884', '02933112', '02958343', '03001627', '03211117', '03636649', '03691459', '04090263',\r\n '04256520', '04379243', '04401088', '04530566']\"\"\"\r\n class_ids = ['02942699']\r\n\r\n \"\"\"\r\n directory_shapenet_id = '..\/..\/resource\/shapenetcore_ids'\r\n directory_rendering = '\/media\/disk2\/lab\/projection\/reconstruction\/shapenet_images_%d_%.1f\/%s\/%s'\r\n filename_shapenet_obj = '\/media\/disk2\/lab\/large_data\/ShapeNetCore.v1\/%s\/%s\/model.obj'\r\n \"\"\"\r\n\r\n \"\"\"\r\n directory_shapenet_id = '\/media\/gustavo\/gusgus\/TROOPS\/shapenetcore_ids'\r\n directory_rendering = '\/media\/gustavo\/gusgus\/TROOPS\/mesh_reconstruction\/shapenet_images_%d_%.1f\/%s\/%s'\r\n filename_shapenet_obj = '\/media\/gustavo\/gusgus\/TROOPS\/ShapeNetCore.v2\/%s\/%s\/model.obj'\r\n \"\"\"\r\n\r\n directory_shapenet_id = '\/content\/3D-Sneakers\/shapenetcore_ids'\r\n directory_rendering = '\/content\/3D-Sneakers\/mesh_reconstruction\/shapenet_images_%d_%.1f\/%s\/%s'\r\n filename_shapenet_obj = '\/content\/3D-Sneakers\/ShapeNetCore.v2\/%s\/%s\/model.obj'\r\n\r\n # ce33bf3ec6438e5bef662d1962a11f02\r\n for class_id in class_ids:\r\n\r\n ids = open(os.path.join(directory_shapenet_id, '%s_trainids.txt' % class_id), 'w+').readlines()\r\n ids += open(os.path.join(directory_shapenet_id, '%s_valids.txt' % class_id), 'w+').readlines()\r\n ids += open(os.path.join(directory_shapenet_id, '%s_testids.txt' % class_id), 'w+').readlines()\r\n obj_ids = [i.strip().split('\/')[-1] for i in ids if len(i.strip()) != 0]\r\n\r\n for i, obj_id in enumerate(obj_ids):\r\n print('rendering: %s %d \/ %d' % (class_id, i, len(obj_ids)))\r\n\r\n directory = directory_rendering % (IMAGE_SIZE, DISTANCE, class_id, obj_id)\r\n directory_tmp = directory + '_'\r\n if os.path.exists(directory):\r\n continue\r\n if os.path.exists(directory_tmp):\r\n continue\r\n try:\r\n os.makedirs(directory_tmp)\r\n except:\r\n continue\r\n\r\n clear()\r\n setup()\r\n load_obj(filename_shapenet_obj % (class_id, obj_id))\r\n render(directory_tmp)\r\n try:\r\n os.rename(directory_tmp, directory)\r\n except:\r\n continue\r\n\r\n\r\nrun()\r\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_119","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Main module.\"\"\"\nimport numpy as np\nimport scipy.stats as ss\nimport matplotlib.pyplot as plt\nimport argparse\n\n\ndef getinput():\n input = argparse.ArgumentParser()\n input.add_argument('--initial_position', type = float, default = 0, help = 'Initial position of the particle, default = 0' )\n input.add_argument('--initial_velocity', type = float, default = 0, help = 'Initial velocity of the particle, default = 0' )\n input.add_argument('--temperature', type = float, default = 300, help = 'Temperature of the molecule, default = 300' )\n input.add_argument('--damping_coefficient', type = float, default = 0.1, help = 'Damping Coefficient of the molecule, default = 0.1' )\n input.add_argument('--time_step', type = float, default = 0.2, help = 'Time interval of the simulation, default = 0.01' )\n input.add_argument('--wall_size', type = float, default = 5, help = 'Wall size of the simulation, default = 5' )\n input.add_argument('--total_time', type = float, default = 1000, help = 'Total time of the simulation, default = 1000' )\n inp = input.parse_args()\n return inp\n\ndef acceleration(gamma=0.1,velocity=0,temperature=300,timestep=0.1,mass=1):\n sigma=np.sqrt(2*temperature*gamma*1*timestep)\n return (-gamma*velocity\/mass + np.random.normal(0,sigma))*timestep\n\ndef checkwall(position, wallsize):\n if position >= wallsize or position<=0:\n return True\n else:\n return False\n \n\ndef lgmotion(velocity,timestep):\n return velocity*timestep\n\ndef integrate(position=0,velocity=0,temperature=300,gamma=0.1,timestep=0.1,wallsize=5,totaltime=1000,mass=1):\n \n timepass=0\n indexnum=0\n index=[]\n \n while timepass < totaltime :\n indexnum +=1\n index.append([indexnum,timepass,position,velocity])\n timepass+=timestep\n velocity += acceleration(gamma, velocity, temperature, timestep)\n position += lgmotion(velocity, timestep)\n if checkwall(position,wallsize):\n if position >= wallsize:\n position = wallsize\n index.append([indexnum+1,timepass,position,velocity])\n else:\n position= 0\n index.append([indexnum+1,timepass,position,velocity])\n break\n \n return timepass,index\n\n\ndef filecreation(index):\n indexf=np.array(index)\n timef=indexf[:,1]\n positionf=indexf[:,2]\n velocityf=indexf[:,3]\n with open('Langevin_Motion.txt','w+') as file:\n file.write('Index Time Position Velocity \\n')\n for i in range(len(timef)):\n file.write('{} {:.3f} {:.5f} {:.5f} \\n'.format(i,timef[i],positionf[i],velocityf[i]))\n\ndef histogram(arr):\n plt.figure(0)\n plt.hist(arr,bins=20)\n plt.title('100 runs of Langevin Motion')\n plt.xlabel('Time passed')\n plt.ylabel('Number of runs')\n plt.savefig('histogram.png')\n\ndef trajectory(x,y):\n plt.figure(1)\n plt.plot(x,y)\n plt.title('Position vs Time')\n plt.xlabel('Time passed')\n plt.ylabel('Position')\n plt.savefig('trajectory.png')\n\n\ndef main():\n #get input for simulation\n inp=getinput()\n \n #run for 100 times, collecting all the relavant data\n\n t_arr=[] #time\n for i in range(100):\n t,idx=integrate(position=inp.initial_position,velocity=inp.initial_velocity,temperature=inp.temperature,gamma=inp.damping_coefficient,timestep=inp.time_step,wallsize=inp.wall_size,totaltime=inp.total_time,mass=1)\n t_arr.append(t)\n \n \n #plot the histogram of 100 runs\n histogram(t_arr)\n\n #plot the position vs time plot of the last run\n trjdata=np.array(idx)\n xdata=trjdata[:,1]\n ydata=trjdata[:,2]\n trajectory(xdata,ydata)\n\n #write the index in to a txt file of the first run\n filecreation(idx)\n\nif __name__ == '__main__':\n main()\n\n \n\n\n \n \n\n\n\n\n\n\n\n \n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_120","text":"0\nimport numpy as np\nfrom scipy import interpolate\nimport os\nimport sys\nimport subprocess\nimport abc\nfrom six import with_metaclass\n\ntry:\n devnull = open(os.devnull, 'w')\n subprocess.call(['gnuplot', '--version'], stdout=devnull, stderr=devnull)\n import gnuplotpy as gp\n MPL = False\nexcept:\n import matplotlib.pylab as plt\n MPL = True\n\ndef use_gnuplot():\n \"\"\"\n Use gnuplot as the plotting tool for any structure related outputs.\n \"\"\"\n global gp\n import gnuplotpy as gp\n global MPL\n MPL = False\n\ndef use_matplotlib():\n \"\"\"\n Use matplotlib as the plotting tool for any structure related outputs.\n \"\"\"\n global plt\n import matplotlib.pylab as plt\n global MPL\n MPL = True\n\nclass _AbstractStructure(with_metaclass(abc.ABCMeta)):\n @abc.abstractproperty\n def n(self):\n '''\n np.array: A grid of refractive indices representing\n the refractive index profile of the structure.\n '''\n pass\n\n @property\n def x_pts(self):\n '''\n int: The number of grid points in x.\n '''\n return int((self.x_max - self.x_min) \/ self.x_step + 1)\n\n @property\n def y_pts(self):\n '''\n int: The number of grid points in y.\n '''\n return int((self.y_max - self.y_min) \/ self.y_step)\n\n @property\n def x_ctr(self):\n '''\n float: The centre distance in x.\n '''\n return 0.5*(self.x_max + self.x_min)\n\n @property\n def y_ctr(self):\n '''\n float: The centre distance in y\n '''\n return 0.5*(self.y_max + self.y_min)\n\n @property\n def xc(self):\n '''\n np.array: The centre points of the x points.\n '''\n return 0.5*(self.x[1:] + self.x[:-1])\n\n @property\n def yc(self):\n '''\n np.array: The centre points of the y points.\n '''\n return 0.5*(self.y[1:] + self.y[:-1])\n\n @property\n def xc_pts(self):\n '''\n int: The number of points in `xc`.\n '''\n return self.x_pts - 1\n\n @property\n def yc_pts(self):\n '''\n int: The number of points in `yc`.\n '''\n return self.y_pts - 1\n\n @property\n def xc_min(self):\n '''\n float: The minimum value of `xc`.\n '''\n return self.xc[0]\n\n @property\n def xc_max(self):\n '''\n float: The maximum value of `xc`.\n '''\n return self.xc[-1]\n\n @property\n def yc_min(self):\n '''\n float: The minimum value of `yc`.\n '''\n return self.yc[0]\n\n @property\n def yc_max(self):\n '''\n float: The maximum value of `yc`.\n '''\n return self.yc[-1]\n\n @property\n def x(self):\n '''\n np.array: The grid points in x.\n '''\n if None not in (self.x_min, self.x_max, self.x_step) and \\\n self.x_min != self.x_max:\n x = np.arange(self.x_min, self.x_max+self.x_step-self.y_step*0.1, self.x_step)\n else:\n x = np.array([])\n return x\n\n @property\n def y(self):\n '''\n np.array: The grid points in y.\n '''\n if None not in (self.y_min, self.y_max, self.y_step) and \\\n self.y_min != self.y_max:\n y = np.arange(self.y_min, self.y_max-self.y_step*0.1, self.y_step)\n else:\n y = np.array([])\n return y\n\n @property\n def eps(self):\n '''\n np.array: A grid of permittivies representing\n the permittivity profile of the structure.\n '''\n return self.n**2\n\n @property\n def eps_func(self):\n '''\n function: a function that when passed a `x` and `y` values,\n returns the permittivity profile of the structure,\n interpolating if necessary.\n '''\n interp_real = interpolate.interp2d(self.x, self.y, self.eps.real)\n interp_imag = interpolate.interp2d(self.x, self.y, self.eps.imag)\n interp = lambda x, y: interp_real(x, y) + 1.j*interp_imag(x, y)\n return interp\n\n @property\n def n_func(self):\n '''\n function: a function that when passed a `x` and `y` values,\n returns the refractive index profile of the structure,\n interpolating if necessary.\n '''\n return interpolate.interp2d(self.x, self.y, self.n)\n\n def _add_triangular_sides(self, xy_mask, angle, y_top_right, y_bot_left,\n x_top_right, x_bot_left, n_material):\n angle = np.radians(angle)\n trap_len = (y_top_right - y_bot_left) \/ np.tan(angle)\n num_x_iterations = trap_len \/ self.x_step\n y_per_iteration = num_x_iterations \/ self.y_pts\n\n lhs_x_start_index = int(x_bot_left\/ self.x_step + 0.5)\n rhs_x_stop_index = int(x_top_right\/ self.x_step + 1 + 0.5)\n\n running_removal_float = y_per_iteration\n for i, _ in enumerate(xy_mask):\n if running_removal_float >= 1:\n removal_int = int(round(running_removal_float))\n lhs_x_start_index -= removal_int\n rhs_x_stop_index += removal_int\n running_removal_float -= removal_int\n running_removal_float += y_per_iteration\n\n xy_mask[i][:lhs_x_start_index] = False\n xy_mask[i][lhs_x_start_index:rhs_x_stop_index] = True\n\n self.n[xy_mask] = n_material\n return self.n\n\n def _add_material(self, x_bot_left, y_bot_left, x_top_right, y_top_right,\n n_material, angle=0):\n '''\n A low-level function that allows writing a rectangle refractive\n index profile to a `Structure`.\n\n Args:\n x_bot_left (float): The bottom-left x-coordinate of the\n rectangle.\n y_bot_left (float): The bottom-left y-coordinate of the\n rectangle.\n x_top_right (float): The top-right x-coordinate of the\n rectangle.\n y_top_right (float): The top-right y-coordinate of the\n rectangle.\n n_material (float): The refractive index of the points\n encompassed by the defined rectangle.\n angle (float): The angle in degrees of the sidewalls\n of the defined rectangle. Default is 0. This\n is useful for creating a ridge with angled\n sidewalls.\n '''\n x_mask = np.logical_and(x_bot_left<=self.x, self.x<=x_top_right)\n y_mask = np.logical_and(y_bot_left<=self.y, self.y<=y_top_right)\n\n xy_mask = np.kron(y_mask, x_mask).reshape((y_mask.size, x_mask.size))\n self.n[xy_mask] = n_material\n\n if angle:\n self._add_triangular_sides(xy_mask, angle, y_top_right, y_bot_left,\n x_top_right, x_bot_left, n_material)\n\n return self.n\n\n def write_to_file(self, filename='material_index.dat', plot=True):\n '''\n Write the refractive index profile to file.\n\n Args:\n filename (str): The nominal filename the refractive\n index data should be saved to.\n plot (bool): `True` if plots should be generates,\n otherwise `False`. Default is `True`.\n '''\n path = os.path.dirname(sys.modules[__name__].__file__) + '\/'\n\n with open(filename, 'w') as fs:\n for n_row in np.abs(self.n[::-1]):\n n_str = ','.join([str(v) for v in n_row])\n fs.write(n_str+'\\n')\n\n if plot:\n filename_image_prefix, _ = os.path.splitext(filename)\n filename_image = filename_image_prefix + '.png'\n args = {\n 'title': 'Refractive Index Profile',\n 'x_pts': self.x_pts,\n 'y_pts': self.y_pts,\n 'x_min': self.x_min,\n 'x_max': self.x_max,\n 'y_min': self.y_min,\n 'y_max': self.y_max,\n 'filename_data': filename,\n 'filename_image': filename_image\n }\n\n if MPL:\n heatmap = np.loadtxt(args['filename_data'], delimiter=',')\n structure_plot = plt.figure()\n plt.clf()\n plt.title(args['title'])\n plt.xlabel('$x$')\n plt.ylabel('$y$')\n plt.imshow(np.flipud(heatmap),\n extent=(args['x_min'], args['x_max'], args['y_min'], args['y_max']),\n aspect=\"auto\")\n plt.colorbar()\n plt.savefig(filename_image)\n #plt.show()\n print(\"Returning structure plot\")\n return structure_plot\n else:\n print(\"using gnu plot\")\n gp.gnuplot(path+'structure.gpi', args)\n\n def __str__(self):\n return self.n.__str__()\n\nclass Structure(_AbstractStructure):\n def __init__(self, x_step, y_step, x_max, y_max, x_min=0., y_min=0.,\n n_background=1.):\n self.x_min = x_min\n self.x_max = x_max\n self.y_min = y_min\n self.y_max = y_max\n self.x_step = x_step\n self.y_step = y_step\n self.n_background = n_background\n self._n = np.ones((self.y.size,self.x.size), 'complex_') * n_background\n\n @property\n def n(self):\n return self._n\n\nclass Slabs(_AbstractStructure):\n '''\n Class to implement device refractive index\n profile cross-section designs.\n\n :class:`Slabs` is a collection of :class:`Slab` objects. Each\n slab has a fixed height (usually less than the\n maximum height of the desired simulation window),\n and is as wide as the simulation window.\n\n :class:`Slabs` objects can be index using `[name]` to return\n the various :class:`Slab` objects. The bottom slab is\n returned first and so on up to the top slab.\n\n .. image:: ..\/images\/slabs.svg\n :width: 200%\n\n Args:\n wavelength (float): The wavelength the structure\n operates at.\n y_step (float): The step in y.\n x_step (float): The step in x.\n x_max (float): The maximum x-value.\n x_min (float): The minimum x-value. Default is 0.\n\n Attributes:\n slabs (dict): The key is the name of the slab,\n and the value is the :class:`Slab` object.\n slab_count (int): The number of :class:`Slab` objects\n added so far.\n '''\n def __init__(self, wavelength, y_step, x_step, x_max, x_min=0.):\n _AbstractStructure.__init__(self)\n\n self._wl = wavelength\n self.x_min = x_min\n self.x_max = x_max\n self.x_step = x_step\n self.y_step = y_step\n self.y_min = 0\n\n self.slabs = {}\n self.slab_count = 0\n self._next_start = 0.\n\n def add_slab(self, height, n_background=1., position='top'):\n '''\n Creates and adds a :class:`Slab` object.\n\n Args:\n height (float): Height of the slab.\n n_background (float): The nominal refractive\n index of the slab. Default is 1 (air).\n\n Returns:\n str: The name of the slab.\n '''\n assert position in ('top', 'bottom')\n\n name = str(self.slab_count)\n\n if not callable(n_background):\n n_back = lambda wl: n_background\n else:\n n_back = n_background\n\n height_discretised = self.y_step*((height \/\/ self.y_step) + 1)\n\n y_min = self._next_start\n y_max = y_min + height_discretised\n self.slabs[name] = Slab(name, self.x_step, self.y_step, self.x_max,\n y_max, self.x_min, y_min, n_back, self._wl)\n\n self.y_max = y_max\n self._next_start = y_min + height_discretised\n self.slab_count += 1\n\n if position == 'bottom':\n slabs = {}\n for k in self.slabs.keys():\n slabs[str(int(k)+1)] = self.slabs[k]\n slabs['0'] = slabs.pop(str(self.slab_count))\n self.slabs = slabs\n\n return name\n\n def change_wavelength(self, wavelength):\n '''\n Changes the wavelength of the structure.\n\n This will affect the mode solver and potentially\n the refractive indices used (provided functions\n were provided as refractive indices).\n\n Args:\n wavelength (float): The new wavelength.\n '''\n for name, slab in self.slabs.items():\n const_args = slab._const_args\n mat_args = slab._mat_params\n\n const_args[8] = wavelength\n\n s = Slab(*const_args)\n for mat_arg in mat_args:\n s.add_material(*mat_arg)\n\n self.slabs[name] = s\n\n self._wl = wavelength\n\n @property\n def n(self):\n '''\n np.array: The refractive index profile matrix\n of the current slab.\n '''\n try:\n n_mat = self.slabs['0'].n\n for s in range(1, self.slab_count):\n n_mat = np.vstack((self.slabs[str(s)].n, n_mat))\n except KeyError:\n n_mat = None\n return n_mat\n\n def __getitem__(self, slab_name):\n return self.slabs[str(slab_name)]\n\nclass Slab(Structure):\n '''\n A :class:`Slab` represents a horizontal slice of\n the refractive index profile.\n\n A :class:`Slabs` object composes many :class:`Slab` objects.\n The more :class:`Slab` are added, the more horizontal\n slices are added. A :class:`Slab` has a chosen fixed\n height, and a background (nominal) refractive\n index. A slab can then be customised to include\n a desired design.\n\n Args:\n name (str): The name of the slab.\n x_step (float): The step in x.\n y_step (float): The step in y.\n x_max (float): The maximum x-value.\n y_max (float): The maximum y-value.\n x_min (float): The minimum x-value.\n y_min (float): The minimum x-value.\n n_background (float): The nominal refractive\n index.\n wavelength (float): The wavelength the structure\n operates at.\n\n Attributes:\n name (str): The name of the :class:`Slab` object.\n position (int): A unique identifier for the\n :class:`Slab` object.\n '''\n position = 0\n\n def __init__(self, name, x_step, y_step, x_max, y_max, x_min, y_min,\n n_background, wavelength):\n self._wl = wavelength\n self.name = name\n self.position = Slab.position\n Slab.position += 1\n\n Structure.__init__(self, x_step, y_step, x_max, y_max, x_min, y_min,\n n_background(self._wl))\n\n self._const_args = [name, x_step, y_step, x_max, y_max, x_min, y_min, n_background, wavelength]\n self._mat_params = []\n\n def add_material(self, x_min, x_max, n, angle=0):\n '''\n Add a refractive index between two x-points.\n\n Args:\n x_min (float): The start x-point.\n x_max (float): The stop x-point.\n n (float, function): Refractive index between\n `x_min` and `x_max`. Either a constant (`float`), or\n a function that accepts one parameters, the\n wavelength, and returns a float of the refractive\n index. This is useful when doing wavelength\n sweeps and solving for the group velocity. The\n function provided could be a Sellmeier equation.\n angle (float): Angle in degrees of the slope of the\n sidewalls at `x_min` and `x_max`. This is useful\n for defining a ridge with angled sidewalls.\n '''\n self._mat_params.append([x_min, x_max, n, angle])\n\n if not callable(n):\n n_mat = lambda wl: n\n else:\n n_mat = n\n\n Structure._add_material(self, x_min, self.y_min, x_max, self.y_max, n_mat(self._wl), angle)\n return self.n\n\nclass StructureAni():\n r\"\"\"\n Anisottropic structure object.\n\n This is used with the fully-vectorial simulation when\n an anisotropic material is being used.\n\n The form of the refractive index is\n\n .. math::\n\n n = \\begin{bmatrix}\n n_{xx} & n_{xy} & 0 \\\\\n n_{yx} & n_{yy} & 0 \\\\\n 0 & 0 & n_{zz}\n \\end{bmatrix}.\n\n Args:\n structure_xx (Structure): The structure with refractive\n index, :math:`n_{xx}`.\n structure_yy (Structure): The structure with refractive\n index, :math:`n_{yy}`. Presumably the same structure\n as `structure_xx`, but with different refractive index\n parameters.\n structure_zz (Structure): The structure with refractive\n index, :math:`n_{zz}`. Presumably the same structure\n as `structure_xx`, but with different refractive index\n parameters.\n structure_xy (None, Structure): The structure with refractive\n index, :math:`n_{yx}`. Presumably the same structure\n as `structure_xx`, but with different refractive index\n parameters. Default is `None`.\n structure_yx (None, Structure): The structure with refractive\n index, :math:`n_{yx}`. Presumably the same structure\n as `structure_xx`, but with different refractive index\n parameters. Default is `None`.\n \"\"\"\n def __init__(self, structure_xx, structure_yy, structure_zz,\n structure_xy=None, structure_yx=None):\n self.xx = structure_xx\n self.yy = structure_yy\n self.zz = structure_zz\n\n if not structure_xy or not structure_yx:\n struct_dummy = Structure(self.xx.x_step, self.xx.y_step,\n self.xx.x_max, self.xx.y_max,\n self.xx.x_min, self.xx.y_min,\n n_background=0.)\n struct_dummy._wl = self.xx._wl\n\n if structure_xy:\n self.xy = structure_xy\n else:\n self.xy = struct_dummy\n\n if structure_yx:\n self.yx = structure_yx\n else:\n self.yx = struct_dummy\n\n assert self.xx._wl == self.xy._wl == self.yx._wl == \\\n self.yy._wl == self.zz._wl\n\n self._wl = structure_xx._wl\n\n self.axes = (self.xx, self.xy, self.yx, self.yy, self.zz)\n self.axes_str = ('xx', 'xy', 'yx', 'yy', 'zz')\n\n @property\n def n(self):\n return [a.n for a in self.axes]\n\n @property\n def x_step(self):\n return self.xx.x_step\n\n @property\n def y_step(self):\n return self.xx.y_step\n\n @property\n def x_pts(self):\n return int((self.xx.x_max - self.xx.x_min) \/ self.xx.x_step + 1)\n\n @property\n def y_pts(self):\n return int((self.xx.y_max - self.xx.y_min) \/ self.xx.y_step)\n\n @property\n def x_ctr(self):\n return 0.5*(self.xx.x_max + self.xx.x_min)\n\n @property\n def y_ctr(self):\n return 0.5*(self.xx.y_max + self.xx.y_min)\n\n @property\n def xc(self):\n return 0.5*(self.xx.x[1:] + self.xx.x[:-1])\n\n @property\n def yc(self):\n return 0.5*(self.xx.y[1:] + self.xx.y[:-1])\n\n @property\n def xc_pts(self):\n return self.xx.x_pts - 1\n\n @property\n def yc_pts(self):\n return self.xx.y_pts - 1\n\n @property\n def xc_min(self):\n return self.xx.xc[0]\n\n @property\n def xc_max(self):\n return self.xx.xc[-1]\n\n @property\n def yc_min(self):\n return self.xx.yc[0]\n\n @property\n def yc_max(self):\n return self.xx.yc[-1]\n\n @property\n def x(self):\n if None not in (self.xx.x_min, self.xx.x_max, self.xx.x_step) and \\\n self.xx.x_min != self.xx.x_max:\n x = np.arange(self.xx.x_min, self.xx.x_max+self.xx.x_step-self.xx.y_step*0.1, self.xx.x_step)\n else:\n x = np.array([])\n return x\n\n @property\n def y(self):\n if None not in (self.xx.y_min, self.xx.y_max, self.xx.y_step) and \\\n self.xx.y_min != self.xx.y_max:\n y = np.arange(self.xx.y_min, self.xx.y_max-self.xx.y_step*0.1, self.xx.y_step)\n else:\n y = np.array([])\n return y\n\n @property\n def eps(self):\n eps_ani = [a.n**2 for a in self.axes]\n return eps_ani\n\n @property\n def eps_func(self):\n return lambda x,y: tuple(axis.eps_func(x,y) for axis in self.axes)\n\n @property\n def n_func(self):\n return lambda x,y: tuple(axis.n_func(x,y) for axis in self.axes)\n\n def write_to_file(self, filename='material_index.dat', plot=True):\n '''\n Write the refractive index profile to file.\n\n Args:\n filename (str): The nominal filename the refractive\n index data should be saved to.\n plot (bool): `True` if plots should be generates,\n otherwise `False`. Default is `True`.\n '''\n path = os.path.dirname(sys.modules[__name__].__file__) + '\/'\n\n dir_plot = 'material_index\/'\n if not os.path.exists(dir_plot):\n os.makedirs(dir_plot)\n\n for axis, name in zip(self.axes, self.axes_str):\n root, ext = os.path.splitext(filename)\n fn = dir_plot + root + '_'+ name + ext\n with open(fn, 'w') as fs:\n for n_row in np.abs(axis.n[::-1]):\n n_str = ','.join([str(v) for v in n_row])\n fs.write(n_str+'\\n')\n\n if plot:\n filename_image_prefix, _ = os.path.splitext(fn)\n filename_image = filename_image_prefix + '.png'\n args = {\n 'title': 'Refractive Index Profile: %s' % name,\n 'x_pts': self.xx.x_pts,\n 'y_pts': self.xx.y_pts,\n 'x_min': self.xx.x_min,\n 'x_max': self.xx.x_max,\n 'y_min': self.xx.y_min,\n 'y_max': self.xx.y_max,\n 'filename_data': fn,\n 'filename_image': filename_image\n }\n if MPL:\n heatmap = np.loadtxt(args['filename_data'], delimiter=',')\n plt.clf()\n plt.title(args['title'])\n plt.xlabel('$x$')\n plt.ylabel('$y$')\n plt.imshow(np.flipud(heatmap),\n extent=(args['x_min'], args['x_max'], args['y_min'], args['y_max']),\n aspect=\"auto\")\n plt.colorbar()\n plt.savefig(filename_image)\n else:\n gp.gnuplot(path+'structure.gpi', args, silent=False)\n\n def change_wavelength(self, wavelength):\n '''\n Changes the wavelength of the structure.\n\n This will affect the mode solver and potentially\n the refractive indices used (provided functions\n were provided as refractive indices).\n\n Args:\n wavelength (float): The new wavelength.\n '''\n for axis in self.axes:\n if issubclass(type(axis), Slabs):\n axis.change_wavelength(wavelength)\n self.xx, self.xy, self.yx, self.yy, self.zz = self.axes\n self._wl = wavelength\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_121","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport cvxpy as cp\nimport random\nimport time\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KernelDensity\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.model_selection import GridSearchCV, KFold\nfrom sklearn.decomposition import PCA\nfrom sklearn.datasets import load_iris, load_digits, load_breast_cancer, load_boston, load_wine\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.utils import shuffle\n\nfrom plausible_counterfactuals import HighDensityEllipsoids, PlausibleCounterfactualOfHyperplaneClassifier, PlausibleCounterfactualOfDecisionTree\n\n\ndef load_house_prices(file_path=\"housepricesdataset.npz\"):\n X, y = load_boston(return_X_y=True)\n y = y >= 20\n y = y.astype(np.int).flatten()\n\n return X, y\n\n\nif __name__ == \"__main__\":\n use_decision_tree = False # If False, softmax regression is used!\n\n # Load data set\n X, y = load_iris(return_X_y=True);pca_dim=None\n #X, y = load_breast_cancer(return_X_y=True);pca_dim=5\n #X, y = load_house_prices();pca_dim=10\n #X, y = load_wine(return_X_y=True);pca_dim=8\n X, y = load_digits(return_X_y=True);pca_dim=40\n\n X, y = shuffle(X, y, random_state=42)\n\n # k-fold cross validation\n scores_with_density_constraint = []\n scores_without_density_constraint = []\n\n original_data = []\n original_data_labels = []\n cfs_with_density_constraint = []\n cfs_without_density_constraint = []\n cfs_target_label = []\n computation_time_without_density_constraint = []\n computation_time_with_density_constraint = []\n distances_with_density_constraint = []\n distances_without_density_constraint = []\n\n kf = KFold(n_splits=5, random_state=42)\n for train_index, test_index in kf.split(X):\n # Split data into training and test set\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n\n # Choose target labels\n y_test_target = []\n labels = np.unique(y)\n for i in range(X_test.shape[0]):\n y_test_target.append(random.choice(list(filter(lambda l: l != y_test[i], labels))))\n y_test_target = np.array(y_test_target)\n\n # If requested: Reduce dimensionality\n X_train_orig = np.copy(X_train)\n X_test_orig = np.copy(X_test)\n projection_matrix = None\n projection_mean_sub = None\n pca = None\n if pca_dim is not None:\n pca = PCA(n_components=pca_dim)\n pca.fit(X_train)\n\n projection_matrix = pca.components_ # Projection matrix\n projection_mean_sub = pca.mean_\n\n X_train = np.dot(X_train - projection_mean_sub, projection_matrix.T)\n X_test = np.dot(X_test - projection_mean_sub, projection_matrix.T)\n\n # Fit classifier\n model = LogisticRegression(multi_class=\"multinomial\", solver=\"lbfgs\", random_state=42)\n if use_decision_tree is True:\n model = DecisionTreeClassifier(max_depth=7, random_state=42)\n model.fit(X_train, y_train)\n\n # Compute accuracy on test set\n print(\"Accuracy: {0}\".format(accuracy_score(y_test, model.predict(X_test))))\n\n # For each class, fit density estimators\n density_estimators = {}\n kernel_density_estimators = {}\n labels = np.unique(y)\n for label in labels:\n # Get all samples with the 'correct' label\n idx = y_train == label\n X_ = X_train[idx, :]\n\n # Optimize hyperparameters\n cv = GridSearchCV(estimator=KernelDensity(), iid=False, param_grid={'bandwidth': np.arange(0.1, 10.0, 0.05)}, n_jobs=-1, cv=5)\n cv.fit(X_)\n bandwidth = cv.best_params_[\"bandwidth\"]\n print(\"bandwidth: {0}\".format(bandwidth))\n\n cv = GridSearchCV(estimator=GaussianMixture(covariance_type='full'), iid=False, param_grid={'n_components': range(2, 10)}, n_jobs=-1, cv=5)\n cv.fit(X_)\n n_components = cv.best_params_[\"n_components\"]\n print(\"n_components: {0}\".format(n_components))\n\n # Build density estimators\n kde = KernelDensity(bandwidth=bandwidth)\n kde.fit(X_)\n\n de = GaussianMixture(n_components=n_components, covariance_type='full', random_state=42)\n de.fit(X_)\n\n density_estimators[label] = de\n kernel_density_estimators[label] = kde\n\n # For each point in the test set\n # Compute and plot counterfactual without density constraints\n print(\"n_test_samples: {0}\".format(X_test.shape[0]))\n for i in range(X_test.shape[0]):\n x_orig = X_test[i,:]\n x_orig_orig = X_test_orig[i,:]\n y_orig = y_test[i]\n y_target = y_test_target[i]\n\n if(model.predict([x_orig]) == y_target): # Model already predicts target label!\n print(\"Requested prediction already satisfied\")\n continue\n\n # Compute and plot counterfactual WITH kernel density constraints\n idx = y_train == y_target\n X_ = X_train[idx, :]\n\n # Build density estimator\n de = density_estimators[y_target]\n kde = kernel_density_estimators[y_target]\n\n # Compute media NLL of training samples\n # TODO: Move this to the outer loop\n from scipy.stats import multivariate_normal\n densities_training_samples = []\n densities_training_samples_ex = []\n for j in range(X_.shape[0]):\n x = X_[j,:]\n z = []\n dim = x.shape[0]\n for i in range(de.weights_.shape[0]):\n x_i = de.means_[i]\n w_i = de.weights_[i]\n cov = de.covariances_[i]\n cov = np.linalg.inv(cov)\n\n b = -2.*np.log(w_i) + dim*np.log(2.*np.pi) - np.log(np.linalg.det(cov))\n z.append(np.dot(x - x_i, np.dot(cov, x - x_i)) + b) # NLL\n\n densities_training_samples.append(np.min(z))\n densities_training_samples_ex.append(z)\n\n densities_training_samples = np.array(densities_training_samples)\n densities_training_samples_ex = np.array(densities_training_samples_ex)\n\n # Compute soft cluster assignments\n cluster_prob_ = de.predict_proba(X_)\n density_threshold = np.median(densities_training_samples)\n # Compute high density ellipsoids - constraint: test if sample is included in ellipsoid -> this is the same as the proposed constraint but nummerically much more stable, in particular when we add a dimensionality reduction from a high dimensional space to a low dimensional space\n r = HighDensityEllipsoids(X_, densities_training_samples_ex, cluster_prob_, de.means_, de.covariances_, density_threshold).compute_ellipsoids()\n \n # Compute counterfactual without any density\/plausibility\/feasibility constraints\n xcf_t1 = time.time()\n cf = None\n if use_decision_tree is False:\n cf = PlausibleCounterfactualOfHyperplaneClassifier(model.coef_, model.intercept_, n_dims=X_train.shape[1], density_constraint=False, ellipsoids_r=r, gmm_weights=de.weights_, gmm_means=de.means_, gmm_covariances=de.covariances_, projection_matrix=projection_matrix, projection_mean_sub=projection_mean_sub)\n else:\n cf = PlausibleCounterfactualOfDecisionTree(model, n_dims=X_train.shape[1], density_constraint=False, ellipsoids_r=r, gmm_weights=de.weights_, gmm_means=de.means_, gmm_covariances=de.covariances_, projection_matrix=projection_matrix, projection_mean_sub=projection_mean_sub)\n xcf = cf.compute_counterfactual(x_orig_orig, y=y_target)\n xcf_t1 = time.time() - xcf_t1\n if xcf is None:\n print(\"No counterfactual found!\")\n continue\n\n # Compute counterfactul with proposed density constraint\n xcf_t2 = time.time()\n cf2 = None\n if use_decision_tree is False:\n cf2 = PlausibleCounterfactualOfHyperplaneClassifier(model.coef_, model.intercept_, n_dims=X_train.shape[1], ellipsoids_r=r, gmm_weights=de.weights_, gmm_means=de.means_, gmm_covariances=de.covariances_, projection_matrix=projection_matrix, projection_mean_sub=projection_mean_sub, density_threshold=density_threshold)\n else:\n cf2 = PlausibleCounterfactualOfDecisionTree(model, n_dims=X_train.shape[1], ellipsoids_r=r, gmm_weights=de.weights_, gmm_means=de.means_, gmm_covariances=de.covariances_, projection_matrix=projection_matrix, projection_mean_sub=projection_mean_sub, density_threshold=density_threshold)\n xcf2 = cf2.compute_counterfactual(x_orig_orig, y=y_target)\n xcf_t2 = time.time() - xcf_t2\n if xcf2 is None:\n print(\"No counterfactual found!\")\n continue\n\n original_data.append(x_orig_orig)\n original_data_labels.append(y_orig)\n cfs_with_density_constraint.append(xcf2)\n cfs_without_density_constraint.append(xcf)\n cfs_target_label.append(y_target)\n computation_time_without_density_constraint.append(xcf_t1)\n computation_time_with_density_constraint.append(xcf_t2)\n distances_with_density_constraint.append(np.sum(np.abs(x_orig_orig - xcf2)))\n distances_without_density_constraint.append(np.sum(np.abs(x_orig_orig - xcf)))\n\n if pca is not None: # If necessary: Project the counterfactuals to the lower dimensional space where we did the density estimation\n xcf = pca.transform([xcf])\n xcf2 = pca.transform([xcf2])\n\n # Evaluate\n scores_without_density_constraint.append(kde.score_samples(xcf.reshape(1, -1)))\n scores_with_density_constraint.append(kde.score_samples(xcf2.reshape(1, -1)))\n\n # Final evaluation\n print(\"Without density constrain: Median: {0} Mean: {1} Var: {2}\".format(np.median(scores_without_density_constraint), np.mean(scores_without_density_constraint), np.var(scores_without_density_constraint)))\n print(\"With density constrain: Median: {0} Mean: {1} Var: {2}\".format(np.median(scores_with_density_constraint), np.mean(scores_with_density_constraint), np.var(scores_with_density_constraint)))\n \n print(\"Computation time: With density constraint: {0} Without density constraint: {1}\".format(np.median(computation_time_with_density_constraint), np.median(computation_time_without_density_constraint)))\n print(\"Distances: With density constraint: {0} {1} Without density constraint: {2} {3}\".format(np.median(distances_with_density_constraint), np.mean(distances_with_density_constraint), np.median(distances_without_density_constraint), np.mean(distances_without_density_constraint)))\n\n #\"\"\"\n # Plot some samples: Counterfactual generated with vs. without density constraint\n original_data = np.array(original_data)\n original_data_labels = np.array(original_data_labels)\n cfs_with_density_constraint = np.array(cfs_with_density_constraint)\n cfs_without_density_constraint = np.array(cfs_without_density_constraint)\n cfs_target_label = np.array(cfs_target_label)\n np.savez(\"cfs_comparision_data_softmax_regression\", X_original=original_data, y_original=original_data_labels, y_target=cfs_target_label, X_with_density_constraint=cfs_with_density_constraint, X_without_density_constraint=cfs_without_density_constraint)\n #\"\"\"\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_122","text":"1-10\nimport os\nimport scipy.interpolate as spi\n\ndataLabel = [\"one\", \"two\", \"three\", \"four\"]\n\ndataRoot = \"..\/toneclassifier\/train\"\n\nnormalLen = 200\n\nfor label in dataLabel:\n subsetPath = dataRoot + \"\/\" + label\n dataset = set()\n for filename in os.listdir(subsetPath):\n if filename[0] == \".\":\n continue\n if \".engy\" in filename:\n dataset.add(filename[0:-5])\n elif \".f0\" in filename:\n dataset.add(filename[0:-3])\n try:\n os.makedirs(\"..\/data-process-output\/trim-interpolation\/train\/\" + label)\n except OSError as err:\n pass\n\n for dataname in dataset:\n engyfile = open(subsetPath + \"\/\" + dataname + \".engy\", \"r\")\n f0file = open(subsetPath + \"\/\" + dataname + \".f0\", \"r\")\n engy = map(float, engyfile.readlines())\n f0 = map(float, f0file.readlines())\n engyfile.close()\n f0file.close()\n\n start = None\n end = None\n\n for i in xrange(len(f0)):\n if (f0[i] > 1e-5):\n start = i\n break\n for i in xrange(len(f0) - 1, -1, -1):\n if (f0[i] > 1e-5):\n end = i + 1\n break\n engy = engy[start:end]\n f0 = f0[start:end]\n\n dataLen = len(engy)\n k = float(normalLen - 1) \/ float(dataLen - 1)\n\n x = [i * k for i in xrange(dataLen)]\n newX = [i * 1.0 for i in xrange(normalLen)]\n newX[-1] = x[-1]\n # tck = spi.splrep(x, engy)\n # newEngy = spi.splev(newX, tck)\n # tck = spi.splrep(x, f0)\n # newF0 = spi.splev(newX, tck)\n func = spi.interp1d(x, engy, kind='cubic')\n newEngy = func(newX)\n func = spi.interp1d(x, f0, kind='cubic')\n newF0 = func(newX)\n\n engyfile = open(\"..\/data-process-output\/trim-interpolation\/train\/\" + label + \"\/\" + dataname + \".engy\", \"w\")\n f0file = open(\"..\/data-process-output\/trim-interpolation\/train\/\" + label + \"\/\" + dataname + \".f0\", \"w\")\n for i in xrange(normalLen):\n engyfile.write(\"%.5f\\n\" % newEngy[i])\n f0file.write(\"%.5f\\n\" % newF0[i])\n engyfile.close()\n f0file.close()"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_123","text":"import matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport glob\nimport numpy as np\nfrom scipy.optimize import curve_fit\nimport traceback\nfrom lmfit import minimize, Parameters\nfrom uncertainties import ufloat\nimport pandas as pd\nimport Settings\nimport sys\n\n# todo: check program with several different settings\n# todo: solve the problems with manual fitting\n\n# todo: adjust the plotting function to write the parameters better in the tight_layout\n# todo: implement R2' calculations, sorting, and adding them to the plots\n# R'2 = 1 - MQres\/MQTcorr; MQTcor = SQTcor \/ I-1; MQres = SQres \/ I-p\n# I: degrees of freedom. p: number of parameters in the model\n# SQTcor = sum(yi - mean(y)) ^ 2\n# SQres = chisqr\n# todo: check why the R2 on the automatic linear fit is buggy\n\n# todo: Check the results file, if the correct parameters are being recorded\n# todo: check if converting GP and Eta to ndarrays at the beginning breaks anything.\n\n# todo: Remove the prev_extracted setting and try to guess this parameter\n\n# todo: remove the debugging setting. Just use the debugging tools.\n\nclass Fitter:\n def __init__(self, filename, settings, do_fit=True):\n self.VISC_LIMIT = 10000000\n self.l_first_point = 0\n self.l_last_point = -1\n self.nl_first_point = 0\n self.nl_last_point = -1\n self.manip = FileManip()\n self.filename = filename\n self.settings = settings\n self.model = self.settings.NL_FITTING_METHOD\n self.l_R2 = 0\n self.nl_R2 = 0\n self.wait = float(settings.WAIT)\n self.fixed_fp = settings.FIXED_FP_NL\n if not self.fixed_fp:\n self.max_fp = int(settings.MAX_FP_NL)\n else:\n self.max_fp = 0\n\n self.lin_done = False\n self.nl_done = False\n\n if self.model == 'Cross':\n #self.param_names = ['eta_0', 'eta_inf', 'GP_b', 'n']\n self.param_names = 'eta_0 eta_inf GP_b n'\n elif self.model == 'Carreau':\n #self.param_names = ['eta_0', 'eta_inf', 'GP_b', 'n']\n self.param_names = 'eta_0 eta_inf GP_b n'\n elif self.model == 'Carreau-Yasuda':\n #self.param_names = ['eta_0', 'eta_inf', 'lambda', 'a', 'n']\n self.param_names = 'eta_0 eta_inf lambda a n'\n else:\n raise NameError(f'Did not understand model {self.model}')\n\n self.param_names_lin = ['Int', 'Slp'] # todo: check if this is the correct order.\n\n if self.settings.DO_LIN:\n self.int = 50\n self.int_err = 0.1\n self.slp = 0\n self.slp_err = 0\n elif self.settings.DO_NL:\n if self.model == 'Carreau' or self.model == 'Cross' or self.model == 'Carreau-Yasuda':\n self.params = [0, 0, 0, 0]\n self.param_errs = [0, 0, 0, 0]\n else:\n raise ValueError(f'Unknown model: {model}')\n\n try:\n if self.settings.PREV_EXTRACTED:\n self.GP, self.Eta = self.manip.ExtractData_pd(filename)\n self.GP = np.array(self.GP)\n self.Eta = np.array(self.Eta)\n else:\n self.GP, self.Eta = self.manip.ExtractData(filename)\n self.GP = np.array(self.GP)\n self.Eta = np.array(self.Eta)\n except ValueError:\n self.manip.logger(filename, 'Failed to open')\n raise ValueError(f'!!!! No Flow Curve data was found! Re-export the data on file{filename}.')\n except KeyError:\n self.manip.logger(filename, 'Failed to open')\n raise ValueError(f'!!!! No Flow Curve data was found! Re-export the data on file{filename}.')\n\n if len(self.GP) != len(self.Eta):\n self.manip.logger(self.filename, 'Failed to open')\n raise ValueError(f'!!!! GP and Eta have different lengths. '\n f'Re-export {filename} or fix the problem manually.')\n\n if do_fit:\n self._fit()\n\n def _fit(self): # Uses fit_curve. Does not provide an R2 value.\n if self.settings.DO_LIN:\n if self.settings.AUTO_LIN:\n self.automatic_lin_fitting(True)\n else: # todo: plot, save and ask for the required points\n self.manual_fit(0, -1, 'Linear')\n if self.settings.DO_NL:\n if self.settings.AUTO_NL:\n self.automatic_nl_fitting(True)\n else:\n self.manual_fit(0, -1, self.settings.NL_FITTING_METHOD, True)\n\n def fit(self):\n if self.settings.DO_LIN:\n if self.settings.AUTO_LIN:\n self.automatic_lin_fitting_lm(True)\n else: # todo: plot, save and ask for the required points\n self.manual_fit(0, -1, 'Linear')\n if self.settings.DO_NL:\n if self.settings.AUTO_NL:\n self.automatic_nl_fitting_lm(True)\n else:\n self.manual_fit(0, -1, self.settings.NL_FITTING_METHOD, True)\n\n @staticmethod\n def fit_Carreau(GP, eta_0, eta_inf, GP_b, n):\n \"\"\"Eta = eta_inf + (eta_0 - eta_inf) \/ (1+(GP\/GP_b)**2)**(n\/2)\n GP_b is a constant with the dimension of time and n is a dimensionless constant\"\"\"\n return eta_inf + (eta_0 - eta_inf) \/ (1 + (GP \/ GP_b) ** 2) ** (n \/ 2)\n\n @staticmethod\n def fit_Cross(GP, eta_0, eta_inf, GP_b, n):\n return eta_inf + (eta_0 - eta_inf) \/ (1 + (GP \/ GP_b) ** n)\n\n @staticmethod\n def fit_PowerLaw(GP, k, n):\n \"\"\"Power Law: eta = k * GP ** (n-1)\"\"\"\n return k * GP ** (n - 1)\n\n @staticmethod\n def fit_CarreauYasuda(GP, eta_0, eta_inf, lbda, a, n):\n \"\"\"Carreau-Yasuda: eta(GP) = eta_inf + (eta_0 - eta_inf)(1+(lambda * GP)**a)**((n-1)\/a)\"\"\"\n return eta_inf + (eta_0 - eta_inf) \/ (1 + (lbda * GP) ** a) ** ((n - 1) \/ a)\n\n @staticmethod\n def fit_lin(x, a, b):\n \"\"\"Simple function for a linear fit, with a as the linear coefficient and b the angular coefficient.\"\"\"\n return a + b * x\n\n @staticmethod\n def carr_uncertainty(GP, eta0, etainf, GPb, n, eta0_err, etainf_err, GPb_err, n_err):\n \"\"\"Uses the uncertainty package to calculate the Carreau model values. GP\n can be a numpy array, which returns two lists of values and errors, a float64,\n float or int and returns a tuple (val, err)\"\"\"\n f_eta0 = ufloat(eta0, eta0_err)\n f_etainf = ufloat(etainf, etainf_err)\n f_GPb = ufloat(GPb, GPb_err)\n f_n = ufloat(n, n_err)\n Carr = f_etainf + (f_eta0 - f_etainf) \/ (1 + (GP \/ f_GPb) ** 2) ** (f_n \/ 2)\n\n # Extracts all val +- err pairs if GP is an ndarray\n if type(GP) is np.ndarray:\n Carr_val = [a.nominal_value for a in Carr]\n Carr_err = [a.std_dev for a in Carr]\n\n # If GP is numeric, separates the two values.\n if (type(GP) is np.float64) or (type(GP) is float) or (type(GP) is int):\n Carr_val = Carr.nominal_value\n Carr_err = Carr.std_dev\n\n return Carr_val, Carr_err\n\n @staticmethod\n def cross_uncertainty(GP, eta0, etainf, GPb, n, eta0_err, etainf_err, GPb_err, n_err):\n f_eta0 = ufloat(eta0, eta0_err)\n f_etainf = ufloat(etainf, etainf_err)\n f_GPb = ufloat(GPb, GPb_err)\n f_n = ufloat(n, n_err)\n Cross = f_etainf + (f_eta0 - f_etainf) \/ (1 + (GP \/ f_GPb) ** f_n)\n\n # Extracts all val +- err pairs if GP is an ndarray\n if type(GP) is np.ndarray:\n Cross_val = [a.nominal_value for a in Cross]\n Cross_err = [a.std_dev for a in Cross]\n\n # If GP is numeric, separates the two values.\n if (type(GP) is np.float64) or (type(GP) is float) or (type(GP) is int):\n Cross_val = Cross.nominal_value\n Cross_err = Cross.std_dev\n\n return Cross_val, Cross_err\n\n @staticmethod\n def carryas_uncertainty(GP, eta0, etainf, lbda, a, n, eta0_err, etainf_err, lbda_err, a_err, n_err):\n f_eta0 = ufloat(eta0, eta0_err)\n f_etainf = ufloat(etainf, etainf_err)\n f_n = ufloat(n, n_err)\n f_lbda = ufloat(lbda, lbda_err)\n f_a = ufloat(a, a_err)\n CY = f_etainf + (f_eta0 - f_etainf) * (1 + (f_lbda * GP) ** f_a) ** ((f_n - 1) \/ f_a)\n\n # Extracts all val +- err pairs if GP is an ndarray\n if type(GP) is np.ndarray:\n CY_val = [a.nominal_value for a in CY]\n CY_err = [a.std_dev for a in CY]\n\n # If GP is numeric, separates the two values.\n if (type(GP) is np.float64) or (type(GP) is float) or (type(GP) is int):\n CY_val = CY.nominal_value\n CY_err = CY.std_dev\n\n return CY_val, CY_err\n\n # todo: alterar o termo int para outro valor para impedir que haja um clash.\n def lm_curvefit(self, GP, Eta, do_lin=False):\n params = Parameters()\n SStot = sum((Eta - np.mean(Eta)) ** 2)\n if do_lin: # todo: Check why R2 is very weird here.\n params.add('Int', 50, vary=True, min=0)\n params.add('Slp', 0, vary=False)\n fit = minimize(self.residual_lin, params, args=(GP, Eta))\n slp = fit.params['Slp'].value\n int = fit.params['Int'].value\n slp_err = fit.params['Slp'].stderr\n int_err = fit.params['Int'].stderr\n chisqr = fit.chisqr\n R2 = 1 - fit.chisqr \/ SStot\n return [slp, int], [slp_err, int_err], R2\n elif self.model == 'Carreau':\n params.add('eta_0', 100, vary=True, min=0)\n params.add('eta_inf', 1, vary=True, min=0)\n params.add('GP_b', 5, vary=True, min=0)\n params.add('n', 1, vary=True, min=0)\n fit = minimize(self.residual, params, args=(GP, Eta))\n params = [fit.params[par].value for par in fit.params]\n param_errs = [fit.params[par].stderr for par in fit.params]\n R2 = 1 - fit.chisqr \/ SStot\n return params, param_errs, R2\n elif self.model == 'Cross':\n params.add('eta_0', 100, vary=True, min=0)\n params.add('eta_inf', 1, vary=True, min=0)\n params.add('GP_b', 5, vary=True, min=0)\n params.add('n', 1, vary=True, min=0)\n fit = minimize(self.residual, params, args=(GP, Eta))\n params = [fit.params[par].value for par in fit.params]\n param_errs = [fit.params[par].stderr for par in fit.params]\n R2 = 1 - fit.chisqr \/ SStot\n return params, param_errs, R2\n elif self.model == 'Carreau-Yasuda':\n params.add('eta_0', 100, vary=True, min=0)\n params.add('eta_inf', 1, vary=True, min=0)\n params.add('lbda', 5, vary=True, min=0)\n params.add('a', 1, vary=True, min=0)\n params.add('n', 1, vary=True, min=0)\n fit = minimize(self.residual, params, args=(GP, Eta))\n params = [fit.params[par].value for par in fit.params]\n param_errs = [fit.params[par].stderr for par in fit.params]\n SSres = fit.chisqr\n R2 = 1 - SSres \/ SStot\n return params, param_errs, R2\n\n def residual(self, params, x, dataset):\n if self.model == 'Carreau':\n mod = self.fit_Carreau(x, params['eta_0'], params['eta_inf'], params['GP_b'], params['n'])\n elif self.model == 'Cross':\n mod = self.fit_Cross(x, params['eta_0'], params['eta_inf'], params['GP_b'], params['n'])\n elif self.model == 'Carreau-Yasuda':\n mod = self.fit_CarreauYasuda(x, params['eta_0'], params['eta_inf'], params['lbda'], params['a'],\n params['n'])\n resid = dataset - mod\n return resid\n\n def residual_lin(self, params, x, dataset):\n if type(x) == list:\n x = np.array(x)\n mod = params['Int'] + params['Slp'] * x\n resid = dataset - mod\n return resid\n\n def automatic_lin_fitting_lm(self, save=True):\n length = len(self.GP)\n fittings = []\n\n # Go through several possible ranges to fit, and fit them, then get the best fit\n for first_point in range(0, length\/\/3, 1):\n for last_point in range(first_point + 3, length \/\/ 2, 1):\n GP_arr = np.array(self.GP[first_point:last_point + 1]) # todo: check if this conversion is necessary\n Eta_arr = np.array(self.Eta[first_point:last_point + 1])\n try:\n #popt, pcov = curve_fit(self.fit_lin, GP_arr, Eta_arr, p0=(30, 0),\n # bounds=(0, [self.VISC_LIMIT, 0.0001]))\n params, param_errs, R2 = self.lm_curvefit(GP_arr, Eta_arr, do_lin=True)\n except: # todo: test here and find what types of errors can occur\n print(f'Error while using linear fit for file {self.filename}')\n print(traceback.format_exc())\n self.manip.logger(self.filename, 'Generic')\n\n #perr = np.sqrt(np.diag(pcov))\n fittings.append((first_point, last_point, params, param_errs, R2))\n\n if self.settings.LIN_SORTING_METHOD == 'by_error':\n fittings.sort(key=lambda x: np.log(x[3][0]))\n elif self.settings.LIN_SORTING_METHOD == 'by_error_length':\n fittings.sort(key=lambda x: np.log(x[2][1]) \/ (x[1] - x[0]))\n elif self.settings.LIN_SORTING_METHOD == 'by_R2':\n fittings.sort(key=lambda x: x[4])\n\n self.l_first_point = fittings[0][0]\n # todo: add variable names to first and last points of linear and nl\n self.l_last_point = fittings[0][1]\n self.int = fittings[0][2][1]\n self.int_err = fittings[0][3][1]\n self.l_R2 = fittings[0][4]\n self.lin_done = True\n\n if self.settings.DEBUG:\n print('Debug: fittings_sorted: ', fittings)\n print('Debug: a: ', self.int)\n print('Debug: aerr: ', self.int_err)\n\n if save:\n self.manip.record_fit('linear', self.int, self.int_err, silent=False,\n extra=f\"{fittings[0][0]};{fittings[0][1]};\")\n # todo: check how this was done before, for consistency\n\n return self.int, self.int_err, self.l_R2\n\n def automatic_lin_fitting(self, save=True):\n \"\"\"Goes through all the files, fits them and selects the best fit according to two algorithms.\n First, it selects two points, a beginning and an end point, the first starting at point 0\n and going to a third of the curve. The second, starting at points to the right,\n going until the middle of the curve.\n Then, it fits the data by fixing the slope at 0 and goes through every possible combination\n of the first and second points.\n It selects the data based on two criteria:\n 1. sorting = 'by_error': finds the minimal error. Tends to select less points overall and\n gives a fitting with a less than ideal representation overall.\n 2. sorting = 'by_error_length': divides the error by how many points were used in the fit.\n May result in a higher overall error, but gives a better representation of the curve.\n \"\"\"\n\n length = len(self.GP)\n fittings = []\n\n # Go through several possible ranges to fit, and fit them, then get the best fit\n for first_point in range(0, length\/\/3, 1):\n for last_point in range(first_point + 3, length \/\/ 2, 1):\n GP_arr = np.array(self.GP[first_point:last_point + 1])\n Eta_arr = np.array(self.Eta[first_point:last_point + 1])\n try:\n popt, pcov = curve_fit(self.fit_lin, GP_arr, Eta_arr, p0=(30, 0),\n bounds=(0, [self.VISC_LIMIT, 0.0001]))\n except: # todo: test here and find what types of errors can occur\n print(f'Error while using linear fit for file {self.filename}')\n print(traceback.format_exc())\n self.manip.logger(self.filename, 'Generic')\n\n perr = np.sqrt(np.diag(pcov))\n fittings.append((first_point, last_point, popt, perr))\n\n if self.settings.LIN_SORTING_METHOD == 'by_error':\n fittings.sort(key=lambda x: np.log(x[3][0])) # gets perr of eta_0\n elif self.settings.LIN_SORTING_METHOD == 'by_error_length':\n fittings.sort( key=lambda x: np.log(x[3][0]) \/ (x[1] - x[0]) ) # divides perr by last-first\n\n self.int = fittings[0][2][0]\n self.int_err = fittings[0][3][0]\n self.l_first_point = fittings[0][0] # todo: add variable names to first and last points of linear and nl\n self.l_last_point = fittings[0][1]\n self.lin_done = True\n\n if self.settings.DEBUG:\n print('Debug: fittings_sorted: ', fittings)\n print('Debug: a: ', self.int)\n print('Debug: aerr: ', self.int_err)\n\n if save:\n self.manip.record_fit(self.filename, self.int, self.int_err, silent=False,\n extra=f\"{fittings[0][0]};{fittings[0][1]};\")\n\n return self.int, self.int_err\n\n # todo: change from curve_fit to lm_fit.\n # todo: calculate R2 for all fittings and add it in the end to the class\n # todo: add options to sort by R2.\n\n def automatic_nl_fitting_lm(self, save=True):\n fittings = []\n try:\n max_range = len(self.GP) \/\/ self.max_fp\n except ZeroDivisionError:\n max_range = 1\n\n for first_point in range(0, max_range, 1):\n GP_arr = np.array(self.GP[first_point:])\n Eta_arr = np.array(self.Eta[first_point:])\n nonlinear_has_error = ''\n try:\n params, param_errs, R2 = self.lm_curvefit(GP_arr, Eta_arr, do_lin=False)\n except FloatingPointError: # todo: check if these exceptions work\n print('!!!! Overflow detected on one of the parameters. Could not determine all parameters')\n nonlinear_has_error = ';param_overflow_during_fitting'\n self.manip.logger(self.filename, 'Overflow')\n except RuntimeError:\n print('!!!! Overflow detected on one of the parameters. Could not determine all parameters')\n nonlinear_has_error = ';param_overflow_during_fitting'\n self.manip.logger(self.filename, 'Overflow')\n except OverflowError:\n print('!!!! Overflow detected on one of the parameters.')\n self.manip.logger(self.filename, 'Overflow')\n\n fittings.append((first_point, params, param_errs, R2))\n\n if self.settings.NL_SORTING_METHOD == 'eta_0':\n fittings.sort(key=lambda x: x[2][0])\n elif self.settings.NL_SORTING_METHOD == 'overall':\n # fittings.sort(key=lambda x: x[2][0] + x[2][1] + x[2][2] + x[2][3])\n fittings.sort(key=lambda x: sum(x[2])) # sums the errors\n elif self.settings.NL_SORTING_METHOD == 'R2':\n fittings.sort(key=lambda x: x[3])\n else:\n raise ValueError(f'Could not understand the sorting method {self.settings.NL_SORTING_METHOD}')\n\n self.nl_first_point = fittings[0][0]\n self.params = fittings[0][1]\n self.param_errs = fittings[0][2]\n self.nl_R2 = fittings[0][3]\n\n if save: # todo: check here to return a good destination file\n try:\n self.manip.record_fit(\n self.filename, self.params[0],\n self.param_errs[0], silent=False,\n extra=f\"{fittings[0][0]};{fittings[0][1]};nonlinear_auto_{self.settings.NL_FITTING_METHOD};\"\n f\"{nonlinear_has_error}\", fdest_name=self.settings.NL_FITTING_METHOD + '.csv'\n )\n except UnboundLocalError:\n print('Unable to write to file because the subroutine did not return the fitting parameters')\n print(traceback.format_exc())\n self.manip.record_fit(self.filename, 0, 0, extra=f'nonlinear_auto_{self.settings.NL_FITTING_METHOD};'\n f'unable_to_find_viscosity',\n fdest_name=self.settings.NL_FITTING_METHOD + '.csv')\n self.manip.logger(self.filename, 'No Viscosity')\n\n self.nl_done = True\n return self.nl_first_point, self.params, self.param_errs, self.nl_R2\n\n def automatic_nl_fitting(self, save=True):\n fittings = []\n try:\n max_range = len(self.GP) \/\/ self.max_fp\n except ZeroDivisionError:\n max_range = len(self.GP)\n\n for first_point in range(0, max_range, 1):\n GP_arr = np.array(self.GP[first_point:])\n Eta_arr = np.array(self.Eta[first_point:])\n nonlinear_has_error = ''\n try:\n if self.settings.NL_FITTING_METHOD == 'Carreau':\n popt, pcov = curve_fit(self.fit_Carreau, GP_arr, Eta_arr, bounds=(0, np.inf))\n elif self.settings.NL_FITTING_METHOD == 'Cross':\n popt, pcov = curve_fit(self.fit_Cross, GP_arr, Eta_arr, bounds=(0, np.inf))\n elif self.settings.NL_FITTING_METHOD == 'Carreau-Yasuda':\n popt, pcov = curve_fit(self.fit_CarreauYasuda, GP_arr, Eta_arr)\n else:\n raise ValueError(f'Model not present: {self.settings.NL_FITTING_METHOD}')\n\n except FloatingPointError:\n print('!!!! Overflow detected on one of the parameters. Could not determine all parameters')\n nonlinear_has_error = ';param_overflow_during_fitting'\n self.manip.logger(self.filename, 'Overflow')\n continue\n except RuntimeError:\n print('!!!! Overflow detected on one of the parameters. Could not determine all parameters')\n nonlinear_has_error = ';param_overflow_during_fitting'\n self.manip.logger(self.filename, 'Overflow')\n continue\n except OverflowError:\n print('!!!! Overflow detected on one of the parameters.')\n self.manip.logger(self.filename, 'Overflow')\n continue\n\n perr = np.sqrt(np.diag(pcov))\n fittings.append((first_point, popt, perr))\n\n if self.settings.DEBUG:\n fitting_params_str = ' '.join([str(round(i, 2)) + '+\/-' +\n str(round(j,2)) for i, j in zip(popt, perr) ])\n # 'a+\/-aerr b+\/-berr ...'\n print(f\"{self.settings.NL_FITTING_METHOD} fitting: {fitting_params_str}\")\n\n if self.settings.NL_SORTING_METHOD == 'eta_0':\n fittings.sort(key=lambda x: x[2][0])\n elif self.settings.NL_SORTING_METHOD == 'overall':\n #fittings.sort(key=lambda x: x[2][0] + x[2][1] + x[2][2] + x[2][3])\n fittings.sort(key=lambda x: sum(x[2])) # sums the errors\n else:\n raise ValueError(f'Could not understand the sorting method {self.settings.NL_SORTING_METHOD}')\n\n self.nl_first_point = fittings[0][0]\n self.params = fittings[0][1]\n self.param_errs = fittings[0][2]\n\n if save: # todo: check here to return a good destination file\n try:\n self.manip.record_fit(\n self.filename, self.params[0],\n self.param_errs[0], silent=False,\n extra=f\"{fittings[0][0]};{fittings[0][1]};nonlinear_auto_{self.settings.NL_FITTING_METHOD};\"\n f\"{nonlinear_has_error}\", fdest_name=self.settings.NL_FITTING_METHOD + '.csv'\n )\n except UnboundLocalError:\n print('Unable to write to file because the subroutine did not return the fitting parameters')\n print(traceback.format_exc())\n self.manip.record_fit(self.filename, 0, 0, extra=f'nonlinear_auto_{self.settings.NL_FITTING_METHOD};'\n f'unable_to_find_viscosity',\n fdest_name=self.settings.NL_FITTING_METHOD+'.csv')\n self.manip.logger(self.filename, 'No Viscosity')\n\n self.nl_done = True\n return self.nl_first_point, self.params, self.param_errs\n\n # TODO: check if the bounds are correct\n # TODO: increment this function to be able to accept multiple fittings\n def manual_fit(self, first, last, fit_types, save=True):\n GP_arr = np.array(self.GP[first:last + 1])\n Eta_arr = np.array(self.Eta[first:last + 1])\n fittings = []\n\n for type in fit_types:\n if 'Linear' in type:\n popt, pcov = curve_fit(self.fit_lin, GP_arr, Eta_arr, p0=(30, 0),\n bounds=(0, [self.VISC_LIMIT, 0.0001]))\n elif 'Carreau' in type:\n popt, pcov = curve_fit(self.fit_Carreau, GP_arr, Eta_arr, p0=(30, 0),\n bounds=(0, np.inf))\n elif 'Cross' in type:\n popt, pcov = curve_fit(self.fit_Carreau, GP_arr, Eta_arr, p0=(30, 0),\n bounds=(0, np.inf))\n elif 'Carreau-Yasuda' in type:\n popt, pcov = curve_fit(self.fit_CarreauYasuda, GP_arr, Eta_arr, p0=(30, 0),\n bounds=(0, np.inf))\n else:\n raise NameError(f'Could not understand the list fit_types {fit_types}')\n\n perr = np.sqrt(np.diag(pcov))\n\n self.params = popt # Will be continuously overwritten. todo: will this be a problem?\n self.param_errs = perr\n\n if self.settings.DEBUG:\n # 'a+\/-aerr b+\/-berr ...'\n fitting_params_str = ' '.join([str(round(i, 2)) + '+\/-' +\n str(round(j, 2)) for i, j in zip(popt, perr)])\n print(f\"{fit_types} fitting: {fitting_params_str}\")\n\n if save: # todo: check here to return a good destination file\n self.manip.record_fit(self.settings.NL_FITTING_METHOD, self.params,\n self.param_errs, silent=False)\n\n fittings.append((type, popt, perr))\n\n return fittings\n\n # todo: use AnchoredText instead of text to write the fitting results;\n # from mpl_toolkits.axes_grid.anchored_artists import AnchoredText\n # at = AnchoredText(\"Figure 1a\",\n # prop=dict(size=8), frameon=True,\n # loc=2,\n # )\n # at.patch.set_boxstyle(\"round,pad=0.,rounding_size=0.2\")\n # ax.add_artist(at)\n def plot_error_graphs(self): # todo: If it has both plots, make them side by side\n from mpl_toolkits.axes_grid.anchored_artists import AnchoredText\n TEXT_FILENAME_X = 0.1\n TEXT_PARAMS_X = 0.3\n TEXT_Y = 0.98\n x = np.logspace(np.log10(self.GP[0]), np.log10(self.GP[-1]))\n\n if self.settings.DEBUG:\n print('Debug: x', x)\n print('Debug: params', self.params)\n print('Debug: GP', self.GP, 'Eta', self.Eta)\n\n if self.nl_done:\n if self.model == 'Carreau':\n y, yerr = self.carr_uncertainty(x, *self.params, *self.param_errs)\n elif self.model == 'Cross':\n y, yerr = self.cross_uncertainty(x, *self.params, *self.param_errs)\n elif self.model == 'Carreau-Yasuda':\n y, yerr = self.carryas_uncertainty(x, *self.params, *self.param_errs)\n if self.lin_done:\n y_l, yerr_l = np.ones(len(x)) * self.int, np.ones(len(x)) * self.int_err\n # Creates a horizontal line with n points\n\n if self.nl_done and self.lin_done:\n fig, [axn, axl] = plt.subplots(ncols=2, nrows=1, figsize=(12, 4))\n elif self.nl_done and not self.lin_done:\n fig, axn = plt.subplots(ncols=1, nrows=1, figsize=(6, 4))\n elif not self.nl_done and not self.lin_done:\n fig, axl = plt.subplot(ncols=1, nrows=1, figsize=(6, 4))\n\n if self.nl_done:\n axn.set_xscale('log')\n axn.set_yscale('log')\n axn.plot(self.GP, self.Eta, linewidth=0, marker='o', markersize=5)\n axn.errorbar(x, y, yerr=yerr)\n axn.annotate(str(self.nl_first_point + 1), (self.GP[self.nl_first_point], self.Eta[self.nl_first_point]), color='red')\n if self.nl_last_point == -1:\n axn.annotate(str(len(self.GP)), (self.GP[self.nl_last_point], self.Eta[self.nl_last_point]),\n color='red') # todo: check this function\n else:\n axn.annotate(str(self.nl_last_point), (self.GP[self.nl_last_point], self.Eta[self.nl_last_point]), color='red')\n model_param_names = 'Model: ' + self.model + ' Params: ' + self.param_names\n param_text = \" \".join([str(round(par, 2)) + '+\/-' + str(round(err, 2))\n for par, err in zip(self.params, self.param_errs)])\n\n total_text = f'{self.filename}\\n{model_param_names}\\n{param_text}\\n$R^2$={round(self.nl_R2, 2)}'\n anchored_text = AnchoredText(total_text, loc=3, frameon=True, prop={'fontsize':'small'})\n axn.add_artist(anchored_text)\n\n if self.lin_done:\n axl.set_xscale('log')\n axl.set_yscale('log')\n axl.plot(self.GP, self.Eta, linewidth=0, marker='o', markersize=5)\n axl.errorbar(x, y_l, yerr=yerr_l)\n axl.annotate(str(self.l_first_point + 1), (self.GP[self.l_first_point], self.Eta[self.l_first_point]), color='red')\n if self.l_last_point == -1:\n axl.annotate(str(len(self.GP)), (self.GP[self.l_last_point], self.Eta[self.l_last_point]),\n color='red') # todo: check this function\n else:\n axl.annotate(str(self.l_last_point), (self.GP[self.l_last_point], self.Eta[self.l_last_point]), color='red')\n \n model_param_names = 'Model: Linear. Params: Intercept'\n param_text = f\"int = {self.int}+\/-{self.int_err}\"\n total_text = f'{self.filename}\\n{model_param_names}\\n{param_text}\\n$R^2$={round(self.l_R2, 2)}'\n anchored_text = AnchoredText(total_text, loc=3, frameon=True, prop={'fontsize':'small'})\n axl.add_artist(anchored_text)\n\n plt.tight_layout()\n\n if self.settings.SAVE_GRAPHS:\n fig.savefig(self.filename[:-4] + '.png')\n print('Figure saved.')\n if not self.settings.INLINE_GRAPHS and self.settings.PLOT_GRAPHS:\n plt.draw()\n plt.pause(self.wait)\n #plt.clf()\n plt.close(fig)\n elif self.settings.PLOT_GRAPHS:\n plt.show()\n return\n\n\nclass FileManip:\n #def __init__(self, sett):\n # self.settings = sett\n\n @staticmethod\n def ExtractData(fname, FC_segment=0):\n \"\"\"Opens the file fname and extracts the data based on where it finds the word 'Eta' and 'GP', these being\n the Viscosity and the Shear Rate (gamma point). If the file has multiple segments, for example, when multiple\n experiments were done in succession, FC_segment indicates which of those experiments was a Flow Curve.\"\"\"\n fhand = open(fname, 'r')\n GP = []\n Eta = []\n column_eta = 0\n column_gp = 0\n # FC_segment = '3'\n\n # while FC_segment == 0:\n # FC_segment = input(\"What is the segment that has the flow curves? (eg. [1], 2, 3) If you do not know, don't write anything. \")\n # if FC_segment == '':\n # print(fhand.read())\n # elif FC_segment.isnumeric():\n # break\n # else:\n # print('Not a valid number')\n\n for line in fhand:\n if line.startswith(';'):\n column_names = line.rstrip().split(';')\n # if settings['DEBUG']:\n # print('Debug: column names', column_names)\n for i, column in enumerate(column_names):\n if 'Eta' in column and 'Eta*' not in column:\n column_eta = i\n #if settings['DEBUG']:\n # print('Debug: Found Eta at', column_eta)\n if 'GP' in column:\n column_gp = i\n #if settings['DEBUG']:\n # print('Debug: Found GP at', column_gp)\n try:\n GP.append(float(line.replace(',', '.').split(';')[column_gp]))\n Eta.append(float(line.replace(',', '.').split(';')[column_eta]))\n except:\n pass\n\n # if line.startswith(FC_segment + '|'):\n # line = line.rstrip()\n # num, gp, tau, eta, *rest = line.replace(',','.').split(';')\n # GP.append(float(gp))\n # Eta.append(float(eta))\n # #print(line)\n\n fhand.close()\n if len(GP) == 0:\n # print('!!!!No Flow Curve data was found! Re-export the data on file', fname)\n raise ValueError\n # return pd.Series(GP), pd.Series(Eta)\n # if settings['DEBUG']:\n # print('Debug: Extracted Data: GP:', GP, 'Eta:', Eta)\n return GP, Eta\n\n @staticmethod\n def ExtractData_pd(fname):\n \"\"\"Uses pandas do extract the data if it was exported using the data extraction tool\"\"\"\n pd_temp = pd.read_csv(fname, delimiter=';', encoding='latin1', decimal=',')\n pd_temp = pd_temp[pd_temp > 0].dropna()\n\n col_GP = ''\n col_Eta = ''\n\n for col in pd_temp.columns:\n if 'GP' in col:\n col_GP = col\n # print('achou GP em', col)\n if 'Eta' in col:\n col_Eta = col\n # print('achou Eta em', col)\n\n GP = pd_temp[col_GP].tolist()\n Eta = pd_temp[col_Eta].tolist()\n return GP, Eta\n\n @staticmethod\n def record_fit(name, eta0, eta0_err, silent=False, extra='', fdest_name='results.csv'):\n if not silent:\n print(f\"{name}: Intercept={eta0} +- {eta0_err}. Extra={extra}\")\n #print(name + ':', 'Intercept', eta0, '+-', eta0_err, extra)\n\n with open(fdest_name, 'a', encoding='utf-8') as fdest:\n #fdest.write(name + ';' + str(eta0) + ';' + str(eta0_err) + ';' + extra + '\\n')\n fdest.write(f\"{name};{eta0};{eta0_err};{extra}\\n\")\n\n @staticmethod\n def select_files():\n files = []\n extension = input('What is the file extension? txt, dat, etc:\\n')\n allfiles = glob.glob('*.' + extension)\n print(*[str(num) + ')' + file + '\\n' for num, file in enumerate(allfiles)], sep='')\n while True:\n file_to_add = input('Which file to add? Number, nothing to continue or \"quit\" to exit: ')\n if file_to_add == 'quit':\n return []\n elif file_to_add == '':\n break\n else:\n try:\n files.append(allfiles[int(file_to_add)])\n except IndexError:\n print('Invalid value')\n except ValueError:\n print('Enter a number, not text')\n if len(files) == 0:\n print('No file was selected! The program will now quit.')\n return files\n print('====Selected files:====')\n print(*[file + '\\n' for file in files], sep='', end='')\n print('=======================')\n return files\n\n @staticmethod\n def logger(file, type, extra=''):\n with open('log', 'a') as log:\n if type == 'Overflow':\n log.write(f'Parameter overflow while trying to fit file {file}: {extra}\\n')\n if type == 'No Viscosity':\n log.write(f'Unable to find viscosity for file {file}\\n')\n if type == 'Failed to open':\n log.write(f'Failed to open file {file}. Re-export the data.')\n else: # type == 'Generic'\n log.write(f'Error while processing {file}: {extra}\\n')\n\n\ndef test():\n settings = Settings.Settings()\n settings.NL_FITTING_METHOD = 'Carreau-Yasuda'\n filename = 'CF_Sac50-3--0.csv'\n fit = Fitter(filename, settings, do_fit=False)\n fit.automatic_nl_fitting_lm(save=True)\n print(fit.model, fit.nl_R2, *fit.params)\n\n return fit\n\n\ndef main():\n settings = Settings.Settings()\n manip = FileManip()\n settings.print_settings()\n do_change = input('Do you want to change the settings? y\/[n]')\n if do_change == 'y':\n settings.edit_settings()\n\n if settings.TREAT_ALL:\n files = glob.glob(f'*.{settings.EXT}')\n if len(files) == 0:\n print(f'No files with the extension {settings.EXT} found.'\n f' Please select them manually or change EXT accordingly.')\n files = manip.select_files()\n\n else:\n files = manip.select_files()\n\n if len(files) == 0:\n print('No files selected. Quitting.')\n sys.exit()\n\n for file in files:\n try:\n fit = Fitter(file, settings, do_fit=True)\n except ValueError: # todo: debug and check what would be needed here.\n print(f'Skipping {file}: Value Error')\n continue\n except KeyError:\n print(f'Skipping {file} Key Error')\n continue\n #print(traceback.format_exc())\n\n if settings.PLOT_GRAPHS or settings.SAVE_GRAPHS:\n try:\n fit.plot_error_graphs()\n except OverflowError: # todo: write which parameter has overflown\n print('!!!! Overflow detected on one of the parameters. Could not plot the data')\n nonlinear_has_error = ';param_overflow_during_fitting'\n # todo: log this\n except UnboundLocalError:\n print('Not able to write to file because the subroutine did not return the fitting parameters')\n # todo: log this\n\n\n #fit.plot_error_graphs(file[:-4] + '_lin_' + file[-4:], fit.params, fit.first_point, fit.last_point,\n # model=fit.settings.NL_FITTING_METHOD, param_names=[''])\n\n # # except:\n # # print('Error found while plotting the linear fit')\n # # print(traceback.format_exc())\n # # lin_has_error = 'error_during_fitting'\n # record(file, a, aerr,\n # extra='linear automatic;FP=' + str(lin_points[0]) + 'LP=' + str(lin_points[1]) +\n # lin_has_error, fdest_name='linear.csv')\n\n\n # if settings['PLOT_GRAPHS']:\n # plot_error_graphs(file[:-4] + '_carr_' + file[-4:], GP, Eta,\n # params=np.concatenate((popt, perr)),\n # first_point=nl_first, model=settings['NL_FITTING_METHOD'],\n # param_names=Param_names_errs[settings['NL_FITTING_METHOD']])\n # except OverflowError: # todo: write which parameter has overflown\n # print('!!!! Overflow detected on one of the parameters. Could not plot the data')\n # nonlinear_has_error = ';param_overflow_during_fitting'\n # try:\n # record(file, popt[0], perr[0], extra='nonlinear_auto_' + settings['NL_FITTING_METHOD'] +\n # nonlinear_has_error,\n # fdest_name=settings['NL_FITTING_METHOD'] + '.csv')\n # except UnboundLocalError:\n # print('Not able to write to file because the subroutine did not return the fitting parameters')\n # record(file, 0, 0, extra='nonlinear_auto_' + settings['NL_FITTING_METHOD'] + ';' +\n # 'unable_to_find_viscosity',\n # fdest_name=settings['NL_FITTING_METHOD'] + '.csv')\n # with open('log', 'a') as log:\n # # log.write('Unable to find viscosity for file ' + file + '\\n')\n # except:\n # print('!!!!We have encountered an error while processing file', file)\n # print(traceback.format_exc())\n # with open('log', 'a') as log:\n # log.write('Error while processing ' + file + '\\n')\n # log.write(traceback.format_exc())\n\nif __name__ == '__main__':\n fit = main()\n #main()\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_124","text":"def EDMFanalysis(numberofsensor,numberofmodelinstance,meanuncertaintydata,stduncertaintydata,ID_to_be_excluded,finalpathprediction,finalpathmeas,sidakvalue,uncmultiplier,indextext):\n \"\"\"\n Perform EDMF analysis with all uploaded files and intermediate results\n\n Developed by : (ETH Singapore)\n Contact : \n Date: August 03, 2021\n\n INPUTS:\n numberofsensor : number of sensors involved in the studied problem.\n numberofmodelinstance : number of initial model instances involved in the studied problem.\n ID_to_be_excluded: index of measurements to be excluded for EDMF analysis.\n finalpathprediction : directory of the Prediction(Geo).xlsx.\n finalpathmeas: directory of the Measurements(Geo).xlsx.\n sidakvalue: target reliability of identification\n uncmultiplier : multiplier for uncertainty to assess sensitivity of results to uncertainty definitions\n indextext: index of measurements to be excluded for EDMF analysis in the original format.\n OUTPUTS:\n prediction : predictions made with candidate models.\n measurement : measurement data.\n finalresults: logic results of initial model instances: 0 represents falsified model; 1 represents candidate model\n IMS: initial model instances\n CMS: candidate models\n CMSID: location reference of candidate models\n NOTE:\n These inputs will be automatically read from other functions. No maunal actions are needed.\n \"\"\"\n\n import numpy\n import os\n import xlrd\n from openpyxl import load_workbook\n import pandas\n from scipy.stats import norm\n\n\n #########for validation\n fullset = numpy.arange(numberofsensor)+1\n ID_to_be_included_ = set(ID_to_be_excluded) ^ set(fullset)\n ID_to_be_included = numpy.zeros((len(ID_to_be_included_), 1))\n for i in range(0, len(ID_to_be_included_)):\n ID_to_be_included[i][0] = (list(ID_to_be_included_)[i])\n ID_to_be_included = ID_to_be_included.astype(numpy.int64)-1\n\n #####load uncertainty excel\n excelfileuncertainty1_ = meanuncertaintydata\n excelfileuncertainty2_ = stduncertaintydata\n\n excelfileprediction = pandas.read_excel(finalpathprediction,sheet_name = 'Prediction', engine = 'openpyxl')\n\n excelfilemeas = pandas.read_excel(finalpathmeas,header = None,sheet_name = 'Measurement', engine = 'openpyxl')\n\n ##########read uncertainty\n excelsheetuncertainty1 = excelfileuncertainty1_[0:numberofmodelinstance, 0:numberofsensor]\n ucombinemean = excelsheetuncertainty1\n\n excelsheetuncertainty2 = excelfileuncertainty2_[0:numberofmodelinstance, 0:numberofsensor]\n ucombinesd = excelsheetuncertainty2\n\n\n ###########calculate sidak\n lowerbound = numpy.zeros((numberofmodelinstance, numberofsensor))\n upperbound = numpy.zeros((numberofmodelinstance, numberofsensor))\n\n sidak = sidakvalue ** (1 \/ len(ID_to_be_included))\n\n for i in range(0,numberofmodelinstance):\n for s in range(0,numberofsensor):\n bound1_ = norm.ppf((1-sidak)\/2,loc = ucombinemean[i][s]*uncmultiplier,scale = ucombinesd[i][s])\n bound2_ = norm.ppf(1-(1-sidak)\/2,loc = ucombinemean[i][s]*uncmultiplier,scale = ucombinesd[i][s])\n lowerbound[i][s] = bound1_\n upperbound[i][s] = bound2_\n\n\n #######falsification\n #######load predictions and measurements from other functions or excel\n excelsheetprediction = excelfileprediction.to_numpy()\n prediction = numpy.zeros((numberofmodelinstance, numberofsensor))\n for i in range(0,numberofmodelinstance):\n for s in range(0,numberofsensor):\n excelsheetpredictionvalue_= excelsheetprediction[i,s+2]\n prediction[i][s] = excelsheetpredictionvalue_\n\n excelsheetmeas = excelfilemeas.to_numpy()\n measurement = numpy.zeros((numberofsensor,1))\n for s in range(0,numberofsensor):\n excelsheetmeasvalue_= excelsheetmeas[0,s]\n measurement[s][0] = excelsheetmeasvalue_\n\n falsification = numpy.zeros((numberofmodelinstance, numberofsensor))\n\n for i in range(0,numberofmodelinstance):\n for s in range(0,numberofsensor):\n residual = prediction[i][s] - measurement[s][0]\n falsification_ = (residuallowerbound[i][s])\n if falsification_==True:\n falsification[i][s] = 1\n else:\n falsification[i][s] = 0\n\n ##########for validation\n falsificationfinal = numpy.zeros((numberofmodelinstance,len(ID_to_be_included)))\n for i in range(0,len(ID_to_be_included)):\n for s in range(0,numberofmodelinstance):\n falsificationfinal[s][i] = falsification[s,int(ID_to_be_included[i][0])]\n\n final = sum(numpy.transpose(falsificationfinal))\n is_candidate = numpy.zeros((numberofmodelinstance,1))\n\n for i in range(0,numberofmodelinstance):\n if final[i] == float(len(ID_to_be_included)):\n is_candidate[i][0] = 1\n else:\n is_candidate[i][0] = 0\n\n ###write to excel\n finalpathCMS = finalpathprediction\n savefile = load_workbook(finalpathCMS)\n savesheets = savefile.sheetnames\n sheetCMS = savefile[savesheets[1]]\n\n for s in range(0,numberofmodelinstance):\n sheetCMS.cell(row = s+3, column = 5).value = is_candidate[s][0]\n\n savefile.save(finalpathCMS)\n\n ###########load IMS\n excelsheetIMS = excelfileprediction.to_numpy()[0:numberofmodelinstance, 0:2]\n ims = numpy.zeros((numberofmodelinstance, 2))\n for i in range(0,numberofmodelinstance):\n for s in range(0,2):\n excelsheetIMSvalue_= excelsheetIMS[i,s]\n ims[i][s] = excelsheetIMSvalue_\n\n ########CMS\n cmsID = numpy.where(is_candidate == 1)\n cms = ims[cmsID[0], :]\n\n ########write to excel\n from f_resultsexcel_Geo import f_resultsexcel_Geo\n f_resultsexcel_Geo(ims, prediction, is_candidate,uncmultiplier,sidakvalue, indextext)\n\n return prediction,measurement,is_candidate,ims,cms,cmsID,sidakvalue,indextext,uncmultiplier"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_125","text":"import scipy.io\nimport scipy.misc\nfrom glob import glob\nimport os\nimport numpy as np\nfrom ops import *\nimport tensorflow as tf\nfrom tensorflow import contrib\nfrom menpo_functions import *\nfrom logging_functions import *\nfrom data_loading_functions import *\n\n\nclass DeepHeatmapsModel(object):\n\n \"\"\"facial landmark localization Network\"\"\"\n\n def __init__(self, mode='TRAIN', train_iter=100000, batch_size=10, learning_rate=1e-3, adam_optimizer=True,\n momentum=0.95, step=100000, gamma=0.1, reg=0, weight_initializer='xavier', weight_initializer_std=0.01,\n bias_initializer=0.0, image_size=256, c_dim=3, num_landmarks=68, sigma=1.5, scale=1, margin=0.25,\n bb_type='gt', approx_maps=True, win_mult=3.33335, augment_basic=True, basic_start=0,\n augment_texture=False, p_texture=0., augment_geom=False, p_geom=0., artistic_step=-1, artistic_start=0,\n output_dir='output', save_model_path='model', save_sample_path='sample', save_log_path='logs',\n test_model_path='model\/deep_heatmaps-50000', pre_train_path='model\/deep_heatmaps-50000',load_pretrain=False,\n img_path='data', test_data='full', valid_data='full', valid_size=0, log_valid_every=5,\n train_crop_dir='crop_gt_margin_0.25', img_dir_ns='crop_gt_margin_0.25_ns',\n print_every=100, save_every=5000, sample_every=5000, sample_grid=9, sample_to_log=True,\n debug_data_size=20, debug=False, epoch_data_dir='epoch_data', use_epoch_data=False, menpo_verbose=True):\n\n # define some extra parameters\n\n self.log_histograms = False # save weight + gradient histogram to log\n self.save_valid_images = True # sample heat maps of validation images\n self.log_artistic_augmentation_probs = False # save p_texture & p_geom to log\n self.sample_per_channel = False # sample heatmaps separately for each landmark\n self.approx_maps_gpu = False # create heat-maps on gpu. NOT RECOMMENDED. TODO: REMOVE\n\n # for fine-tuning, choose reset_training_op==True. when resuming training, reset_training_op==False\n self.reset_training_op = False\n\n self.allocate_once = True # create batch images\/landmarks\/maps zero arrays only once\n\n self.fast_img_gen = True\n\n self.compute_nme = True # compute normalized mean error\n\n self.config = tf.ConfigProto()\n self.config.gpu_options.allow_growth = True\n\n # sampling and logging parameters\n self.print_every = print_every # print losses to screen + log\n self.save_every = save_every # save model\n self.sample_every = sample_every # save images of gen heat maps compared to GT\n self.sample_grid = sample_grid # number of training images in sample\n self.sample_to_log = sample_to_log # sample images to log instead of disk\n self.log_valid_every = log_valid_every # log validation loss (in epochs)\n\n self.debug = debug\n self.debug_data_size = debug_data_size\n self.use_epoch_data = use_epoch_data\n self.epoch_data_dir = epoch_data_dir\n\n self.load_pretrain = load_pretrain\n self.pre_train_path = pre_train_path\n\n self.mode = mode\n self.train_iter = train_iter\n self.learning_rate = learning_rate\n\n self.image_size = image_size\n self.c_dim = c_dim\n self.batch_size = batch_size\n\n self.num_landmarks = num_landmarks\n\n self.save_log_path = save_log_path\n self.save_sample_path = save_sample_path\n self.save_model_path = save_model_path\n self.test_model_path = test_model_path\n self.img_path=img_path\n\n self.momentum = momentum\n self.step = step # for lr decay\n self.gamma = gamma # for lr decay\n self.reg = reg # weight decay scale\n\n self.weight_initializer = weight_initializer # random_normal or xavier\n self.weight_initializer_std = weight_initializer_std\n self.bias_initializer = bias_initializer\n self.adam_optimizer = adam_optimizer\n\n self.sigma = sigma # sigma for heatmap generation\n self.scale = scale # scale for image normalization 255 \/ 1 \/ 0\n self.win_mult = win_mult # gaussian filter size for cpu\/gpu approximation: 2 * sigma * win_mult + 1\n self.approx_maps_cpu = approx_maps # create heat-maps by inserting gaussian filter around landmark locations\n\n self.test_data = test_data # if mode is TEST, this choose the set to use full\/common\/challenging\/test\/art\n self.train_crop_dir = train_crop_dir\n self.img_dir_ns = os.path.join(img_path, img_dir_ns)\n self.augment_basic = augment_basic # perform basic augmentation (rotation,flip,crop)\n self.augment_texture = augment_texture # perform artistic texture augmentation (NS)\n self.p_texture = p_texture # initial probability of artistic texture augmentation\n self.augment_geom = augment_geom # perform artistic geometric augmentation\n self.p_geom = p_geom # initial probability of artistic geometric augmentation\n self.artistic_step = artistic_step # increase probability of artistic augmentation every X epochs\n self.artistic_start = artistic_start # min epoch to start artistic augmentation\n self.basic_start = basic_start # min epoch to start basic augmentation\n\n self.valid_size = valid_size\n self.valid_data = valid_data\n\n # load image, bb and landmark data using menpo\n self.bb_dir = os.path.join(img_path, 'Bounding_Boxes')\n self.bb_dictionary = load_bb_dictionary(self.bb_dir, mode, test_data=self.test_data)\n\n if self.use_epoch_data:\n epoch_0 = os.path.join(self.epoch_data_dir, '0')\n self.img_menpo_list = load_menpo_image_list(\n img_path, train_crop_dir=epoch_0, img_dir_ns=None, mode=mode, bb_dictionary=self.bb_dictionary,\n image_size=self.image_size,test_data=self.test_data, augment_basic=False, augment_texture=False,\n augment_geom=False, verbose=menpo_verbose)\n else:\n self.img_menpo_list = load_menpo_image_list(\n img_path, train_crop_dir, self.img_dir_ns, mode, bb_dictionary=self.bb_dictionary,\n image_size=self.image_size, margin=margin, bb_type=bb_type, test_data=self.test_data,\n augment_basic=(augment_basic and basic_start == 0),\n augment_texture=(augment_texture and artistic_start == 0 and p_texture > 0.), p_texture=p_texture,\n augment_geom=(augment_geom and artistic_start == 0 and p_geom > 0.), p_geom=p_geom,\n verbose=menpo_verbose)\n\n if mode == 'TRAIN':\n\n train_params = locals()\n print_training_params_to_file(train_params) # save init parameters\n\n self.train_inds = np.arange(len(self.img_menpo_list))\n\n if self.debug:\n self.train_inds = self.train_inds[:self.debug_data_size]\n self.img_menpo_list = self.img_menpo_list[self.train_inds]\n\n if valid_size > 0:\n\n self.valid_bb_dictionary = load_bb_dictionary(self.bb_dir, 'TEST', test_data=self.valid_data)\n self.valid_img_menpo_list = load_menpo_image_list(\n img_path, train_crop_dir, self.img_dir_ns, 'TEST', bb_dictionary=self.valid_bb_dictionary,\n image_size=self.image_size, margin=margin, bb_type=bb_type, test_data=self.valid_data,\n verbose=menpo_verbose)\n\n np.random.seed(0)\n self.val_inds = np.arange(len(self.valid_img_menpo_list))\n np.random.shuffle(self.val_inds)\n self.val_inds = self.val_inds[:self.valid_size]\n\n self.valid_img_menpo_list = self.valid_img_menpo_list[self.val_inds]\n\n if self.approx_maps_cpu:\n self.valid_images_loaded, self.valid_gt_maps_loaded, self.valid_landmarks_loaded =\\\n load_images_landmarks_approx_maps(\n self.valid_img_menpo_list, np.arange(self.valid_size), primary=True, image_size=self.image_size,\n num_landmarks=self.num_landmarks, c_dim=self.c_dim, scale=self.scale, win_mult=self.win_mult,\n sigma=self.sigma, save_landmarks=True)\n else:\n self.valid_images_loaded, self.valid_gt_maps_loaded, self.valid_landmarks_loaded =\\\n load_images_landmarks_maps(\n self.valid_img_menpo_list, np.arange(self.valid_size), primary=True, image_size=self.image_size,\n c_dim=self.c_dim, num_landmarks=self.num_landmarks, scale=self.scale, sigma=self.sigma,\n save_landmarks=True)\n\n if self.allocate_once:\n self.valid_landmarks_pred = np.zeros([self.valid_size, self.num_landmarks, 2]).astype('float32')\n\n if self.valid_size > self.sample_grid:\n self.valid_gt_maps_loaded = self.valid_gt_maps_loaded[:self.sample_grid]\n else:\n self.val_inds = None\n\n self.epoch_inds_shuffle = train_val_shuffle_inds_per_epoch(\n self.val_inds, self.train_inds, train_iter, batch_size, save_log_path)\n\n def add_placeholders(self):\n\n if self.mode == 'TEST':\n self.images = tf.placeholder(\n tf.float32, [None, self.image_size, self.image_size, self.c_dim], 'images')\n\n self.heatmaps_small = tf.placeholder(\n tf.float32, [None, int(self.image_size\/4), int(self.image_size\/4), self.num_landmarks], 'heatmaps_small')\n self.lms_small = tf.placeholder(tf.float32, [None, self.num_landmarks, 2], 'lms_small')\n self.pred_lms_small = tf.placeholder(tf.float32, [None, self.num_landmarks, 2], 'pred_lms_small')\n\n elif self.mode == 'TRAIN':\n self.images = tf.placeholder(\n tf.float32, [None, self.image_size, self.image_size, self.c_dim], 'train_images')\n\n self.heatmaps_small = tf.placeholder(\n tf.float32, [None, int(self.image_size\/4), int(self.image_size\/4), self.num_landmarks], 'train_heatmaps_small')\n\n self.train_lms_small = tf.placeholder(tf.float32, [None, self.num_landmarks, 2], 'train_lms_small')\n self.train_pred_lms_small = tf.placeholder(tf.float32, [None, self.num_landmarks, 2], 'train_pred_lms_small')\n\n self.valid_lms_small = tf.placeholder(tf.float32, [None, self.num_landmarks, 2], 'valid_lms_small')\n self.valid_pred_lms_small = tf.placeholder(tf.float32, [None, self.num_landmarks, 2], 'valid_pred_lms_small')\n\n self.p_texture_log = tf.placeholder(tf.float32, [])\n self.p_geom_log = tf.placeholder(tf.float32, [])\n\n self.sparse_hm_small = tf.placeholder(tf.float32, [None, int(self.image_size\/4), int(self.image_size\/4), 1])\n\n if self.sample_to_log:\n row = int(np.sqrt(self.sample_grid))\n self.log_image_map = tf.placeholder(\n tf.uint8, [None,row * int(self.image_size\/4), 3 * row *int(self.image_size\/4), self.c_dim], 'sample_img_map')\n if self.sample_per_channel:\n row = np.ceil(np.sqrt(self.num_landmarks)).astype(np.int64)\n self.log_map_channels = tf.placeholder(\n tf.uint8, [None, row * int(self.image_size\/4), 2 * row * int(self.image_size\/4), self.c_dim],\n 'sample_map_channels')\n\n def heatmaps_network(self, input_images, reuse=None, name='pred_heatmaps'):\n\n with tf.name_scope(name):\n\n if self.weight_initializer == 'xavier':\n weight_initializer = contrib.layers.xavier_initializer()\n else:\n weight_initializer = tf.random_normal_initializer(stddev=self.weight_initializer_std)\n\n bias_init = tf.constant_initializer(self.bias_initializer)\n\n with tf.variable_scope('heatmaps_network'):\n with tf.name_scope('primary_net'):\n\n l1 = conv_relu_pool(input_images, 5, 128, conv_ker_init=weight_initializer, conv_bias_init=bias_init,\n reuse=reuse, var_scope='conv_1')\n l2 = conv_relu_pool(l1, 5, 128, conv_ker_init=weight_initializer, conv_bias_init=bias_init,\n reuse=reuse, var_scope='conv_2')\n l3 = conv_relu(l2, 5, 128, conv_ker_init=weight_initializer, conv_bias_init=bias_init,\n reuse=reuse, var_scope='conv_3')\n\n l4_1 = conv_relu(l3, 3, 128, conv_dilation=1, conv_ker_init=weight_initializer,\n conv_bias_init=bias_init, reuse=reuse, var_scope='conv_4_1')\n l4_2 = conv_relu(l3, 3, 128, conv_dilation=2, conv_ker_init=weight_initializer,\n conv_bias_init=bias_init, reuse=reuse, var_scope='conv_4_2')\n l4_3 = conv_relu(l3, 3, 128, conv_dilation=3, conv_ker_init=weight_initializer,\n conv_bias_init=bias_init, reuse=reuse, var_scope='conv_4_3')\n l4_4 = conv_relu(l3, 3, 128, conv_dilation=4, conv_ker_init=weight_initializer,\n conv_bias_init=bias_init, reuse=reuse, var_scope='conv_4_4')\n\n l4 = tf.concat([l4_1, l4_2, l4_3, l4_4], 3, name='conv_4')\n\n l5_1 = conv_relu(l4, 3, 256, conv_dilation=1, conv_ker_init=weight_initializer,\n conv_bias_init=bias_init, reuse=reuse, var_scope='conv_5_1')\n l5_2 = conv_relu(l4, 3, 256, conv_dilation=2, conv_ker_init=weight_initializer,\n conv_bias_init=bias_init, reuse=reuse, var_scope='conv_5_2')\n l5_3 = conv_relu(l4, 3, 256, conv_dilation=3, conv_ker_init=weight_initializer,\n conv_bias_init=bias_init, reuse=reuse, var_scope='conv_5_3')\n l5_4 = conv_relu(l4, 3, 256, conv_dilation=4, conv_ker_init=weight_initializer,\n conv_bias_init=bias_init, reuse=reuse, var_scope='conv_5_4')\n\n l5 = tf.concat([l5_1, l5_2, l5_3, l5_4], 3, name='conv_5')\n\n l6 = conv_relu(l5, 1, 512, conv_ker_init=weight_initializer,\n conv_bias_init=bias_init, reuse=reuse, var_scope='conv_6')\n l7 = conv_relu(l6, 1, 256, conv_ker_init=weight_initializer,\n conv_bias_init=bias_init, reuse=reuse, var_scope='conv_7')\n primary_out = conv(l7, 1, self.num_landmarks, conv_ker_init=weight_initializer,\n conv_bias_init=bias_init, reuse=reuse, var_scope='conv_8')\n\n self.all_layers = [l1, l2, l3, l4, l5, l6, l7, primary_out]\n\n return primary_out\n\n def build_model(self):\n self.pred_hm_p = self.heatmaps_network(self.images,name='heatmaps_prediction')\n\n def build_hm_generator(self): # TODO: remove\n # generate heat-maps using:\n # a sparse base (matrix of zeros with 1's in landmark locations) and convolving with a gaussian filter\n print (\"*** using convolution to create heat-maps. use this option only with GPU support ***\")\n\n # create gaussian filter\n win_small = int(self.win_mult * self.sigma)\n x_small, y_small = np.mgrid[0:2*win_small+1, 0:2*win_small+1]\n\n gauss_small = (8. \/ 3) * self.sigma * gaussian(x_small, y_small, win_small, win_small, sigma=self.sigma)\n gauss_small = tf.constant(gauss_small, tf.float32)\n gauss_small = tf.reshape(gauss_small, [2 * win_small + 1, 2 * win_small + 1, 1, 1])\n\n # convolve sparse map with gaussian\n self.filt_hm_small = tf.nn.conv2d(self.sparse_hm_small, gauss_small, strides=[1, 1, 1, 1], padding='SAME')\n self.filt_hm_small = tf.transpose(\n tf.concat(tf.split(self.filt_hm_small, self.batch_size, axis=0), 3), [3, 1, 2, 0])\n\n def create_loss_ops(self): # TODO: calculate NME on resized maps to 256\n\n def l2_loss_norm_eyes(pred_landmarks, real_landmarks, normalize=True, name='NME'):\n\n with tf.name_scope(name):\n with tf.name_scope('real_pred_landmarks_rmse'):\n landmarks_rms_err = tf.reduce_mean(\n tf.sqrt(tf.reduce_sum(tf.square(pred_landmarks - real_landmarks), axis=2)), axis=1)\n if normalize:\n with tf.name_scope('inter_pupil_dist'):\n with tf.name_scope('left_eye_center'):\n p1 = tf.reduce_mean(tf.slice(real_landmarks, [0, 42, 0], [-1, 6, 2]), axis=1)\n with tf.name_scope('right_eye_center'):\n p2 = tf.reduce_mean(tf.slice(real_landmarks, [0, 36, 0], [-1, 6, 2]), axis=1)\n\n eye_dist = tf.sqrt(tf.reduce_sum(tf.square(p1 - p2), axis=1))\n\n return landmarks_rms_err \/ eye_dist\n else:\n return landmarks_rms_err\n\n if self.mode is 'TRAIN':\n primary_maps_diff = self.pred_hm_p-self.heatmaps_small\n self.total_loss = 1000.*tf.reduce_mean(tf.square(primary_maps_diff))\n\n # add weight decay\n self.total_loss += self.reg * tf.add_n(\n [tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name])\n\n if self.compute_nme:\n self.nme_loss = tf.reduce_mean(l2_loss_norm_eyes(self.train_pred_lms_small,self.train_lms_small))\n\n if self.valid_size > 0 and self.compute_nme:\n self.valid_nme_loss = tf.reduce_mean(l2_loss_norm_eyes(self.valid_pred_lms_small,self.valid_lms_small))\n\n elif self.mode == 'TEST' and self.compute_nme:\n self.nme_per_image = l2_loss_norm_eyes(self.pred_lms_small, self.lms_small)\n self.nme_loss = tf.reduce_mean(self.nme_per_image)\n\n def predict_landmarks_in_batches(self, image_paths, session):\n\n num_batches = int(1.*len(image_paths)\/self.batch_size)\n if num_batches == 0:\n batch_size = len(image_paths)\n num_batches = 1\n else:\n batch_size = self.batch_size\n\n img_inds = np.arange(len(image_paths))\n for j in range(num_batches):\n batch_inds = img_inds[j * batch_size:(j + 1) * batch_size]\n\n batch_images, _, batch_lms_small = \\\n load_images_landmarks_maps(\n self.img_menpo_list, batch_inds, primary=True, image_size=self.image_size,\n c_dim=self.c_dim, num_landmarks=self.num_landmarks, scale=self.scale, sigma=self.sigma,\n save_landmarks=self.compute_nme)\n\n batch_maps_small_pred = session.run(self.pred_hm_p, {self.images: batch_images})\n batch_pred_landmarks = batch_heat_maps_to_landmarks(\n batch_maps_small_pred, batch_size=batch_size, image_size=int(self.image_size\/4),\n num_landmarks=self.num_landmarks)\n\n if j == 0:\n all_pred_landmarks = batch_pred_landmarks.copy()\n all_gt_landmarks = batch_lms_small.copy()\n else:\n all_pred_landmarks = np.concatenate((all_pred_landmarks,batch_pred_landmarks),0)\n all_gt_landmarks = np.concatenate((all_gt_landmarks, batch_lms_small), 0)\n\n reminder = len(image_paths)-num_batches*batch_size\n\n if reminder > 0:\n reminder_inds = img_inds[-reminder:]\n\n batch_images, _, batch_lms_small = \\\n load_images_landmarks_maps(\n self.img_menpo_list, reminder_inds, primary=True, image_size=self.image_size,\n c_dim=self.c_dim, num_landmarks=self.num_landmarks, scale=self.scale, sigma=self.sigma,\n save_landmarks=self.compute_nme)\n\n batch_maps_small_pred = session.run(self.pred_hm_p, {self.images: batch_images})\n batch_pred_landmarks = batch_heat_maps_to_landmarks(\n batch_maps_small_pred, batch_size=reminder, image_size=int(self.image_size\/4),\n num_landmarks=self.num_landmarks)\n\n all_pred_landmarks = np.concatenate((all_pred_landmarks, batch_pred_landmarks), 0)\n all_gt_landmarks = np.concatenate((all_gt_landmarks, batch_lms_small), 0)\n\n return all_pred_landmarks, all_gt_landmarks\n\n def predict_landmarks_in_batches_loaded(self, images, session):\n\n num_images = int(images.shape[0])\n num_batches = int(1.*num_images\/self.batch_size)\n if num_batches == 0:\n batch_size = num_images\n num_batches = 1\n else:\n batch_size = self.batch_size\n\n for j in range(num_batches):\n\n batch_images = images[j * batch_size:(j + 1) * batch_size,:,:,:]\n batch_maps_small_pred = session.run(self.pred_hm_p, {self.images: batch_images})\n if self.allocate_once:\n batch_heat_maps_to_landmarks_alloc_once(\n batch_maps=batch_maps_small_pred,\n batch_landmarks=self.valid_landmarks_pred[j * batch_size:(j + 1) * batch_size, :, :],\n batch_size=batch_size, image_size=int(self.image_size\/4), num_landmarks=self.num_landmarks)\n else:\n batch_pred_landmarks = batch_heat_maps_to_landmarks(\n batch_maps_small_pred, batch_size=batch_size, image_size=int(self.image_size\/4),\n num_landmarks=self.num_landmarks)\n\n if j == 0:\n all_pred_landmarks = batch_pred_landmarks.copy()\n else:\n all_pred_landmarks = np.concatenate((all_pred_landmarks, batch_pred_landmarks), 0)\n\n reminder = num_images-num_batches*batch_size\n if reminder > 0:\n\n batch_images = images[-reminder:, :, :, :]\n batch_maps_small_pred = session.run(self.pred_hm_p, {self.images: batch_images})\n if self.allocate_once:\n batch_heat_maps_to_landmarks_alloc_once(\n batch_maps=batch_maps_small_pred,\n batch_landmarks=self.valid_landmarks_pred[-reminder:, :, :],\n batch_size=reminder, image_size=int(self.image_size\/4), num_landmarks=self.num_landmarks)\n else:\n batch_pred_landmarks = batch_heat_maps_to_landmarks(\n batch_maps_small_pred, batch_size=reminder, image_size=int(self.image_size\/4),\n num_landmarks=self.num_landmarks)\n\n all_pred_landmarks = np.concatenate((all_pred_landmarks, batch_pred_landmarks), 0)\n\n if not self.allocate_once:\n return all_pred_landmarks\n\n def create_summary_ops(self):\n\n self.batch_summary_op = tf.summary.scalar('l_total', self.total_loss)\n\n if self.compute_nme:\n l_nme = tf.summary.scalar('l_nme', self.nme_loss)\n self.batch_summary_op = tf.summary.merge([self.batch_summary_op, l_nme])\n\n if self.log_histograms:\n var_summary = [tf.summary.histogram(var.name, var) for var in tf.trainable_variables()]\n grads = tf.gradients(self.total_loss, tf.trainable_variables())\n grads = list(zip(grads, tf.trainable_variables()))\n grad_summary = [tf.summary.histogram(var.name + '\/grads', grad) for grad, var in grads]\n activ_summary = [tf.summary.histogram(layer.name, layer) for layer in self.all_layers]\n self.batch_summary_op = tf.summary.merge([self.batch_summary_op, var_summary, grad_summary, activ_summary])\n\n if self.augment_texture and self.log_artistic_augmentation_probs:\n p_texture_summary = tf.summary.scalar('p_texture', self.p_texture_log)\n self.batch_summary_op = tf.summary.merge([self.batch_summary_op, p_texture_summary])\n\n if self.augment_geom and self.log_artistic_augmentation_probs:\n p_geom_summary = tf.summary.scalar('p_geom', self.p_geom_log)\n self.batch_summary_op = tf.summary.merge([self.batch_summary_op, p_geom_summary])\n\n if self.valid_size > 0 and self.compute_nme:\n self.valid_summary = tf.summary.scalar('valid_l_nme', self.valid_nme_loss)\n\n if self.sample_to_log:\n img_map_summary =tf.summary.image('compare_map_to_gt',self.log_image_map)\n if self.sample_per_channel:\n map_channels_summary = tf.summary.image('compare_map_channels_to_gt', self.log_map_channels)\n self.img_summary = tf.summary.merge([img_map_summary, map_channels_summary])\n else:\n self.img_summary = img_map_summary\n if self.valid_size >= self.sample_grid:\n img_map_summary_valid = tf.summary.image('compare_map_to_gt_valid', self.log_image_map)\n if self.sample_per_channel:\n map_channels_summary_valid = tf.summary.image('compare_map_channels_to_gt_valid', self.log_map_channels)\n self.img_summary_valid = tf.summary.merge([img_map_summary_valid, map_channels_summary_valid])\n else:\n self.img_summary_valid = img_map_summary_valid\n\n def eval(self):\n\n self.add_placeholders()\n # build model\n self.build_model()\n self.create_loss_ops()\n\n if self.debug:\n self.img_menpo_list = self.img_menpo_list[:np.min([self.debug_data_size, len(self.img_menpo_list)])]\n\n num_images = len(self.img_menpo_list)\n img_inds = np.arange(num_images)\n\n sample_iter = np.ceil(1. * num_images \/ self.sample_grid).astype('int')\n\n with tf.Session(config=self.config) as sess:\n\n # load trained parameters\n print ('loading test model...')\n saver = tf.train.Saver()\n saver.restore(sess, self.test_model_path)\n\n _, model_name = os.path.split(self.test_model_path)\n\n gt_provided = self.img_menpo_list[0].has_landmarks # check if GT landmarks provided\n\n for i in range(sample_iter):\n\n batch_inds = img_inds[i * self.sample_grid:(i + 1) * self.sample_grid]\n\n if not gt_provided:\n batch_images = load_images(self.img_menpo_list, batch_inds, image_size=self.image_size,\n c_dim=self.c_dim, scale=self.scale)\n\n batch_maps_small_pred = sess.run(self.pred_hm_p, {self.images: batch_images})\n\n batch_maps_gt = None\n else:\n # TODO: add option for approx maps + allocate once\n batch_images, batch_maps_gt, _ = \\\n load_images_landmarks_maps(\n self.img_menpo_list, batch_inds, primary=True, image_size=self.image_size,\n c_dim=self.c_dim, num_landmarks=self.num_landmarks, scale=self.scale, sigma=self.sigma,\n save_landmarks=False)\n\n batch_maps_small_pred = sess.run(self.pred_hm_p, {self.images: batch_images})\n\n sample_path_imgs = os.path.join(\n self.save_sample_path, model_name +'-'+ self.test_data+'-sample-%d-to-%d-1.png' % (\n i * self.sample_grid, (i + 1) * self.sample_grid))\n\n merged_img = merge_images_landmarks_maps_gt(\n batch_images.copy(), batch_maps_small_pred, batch_maps_gt, image_size=self.image_size,\n num_landmarks=self.num_landmarks, num_samples=self.sample_grid, scale=self.scale, circle_size=0,\n fast=self.fast_img_gen)\n\n scipy.misc.imsave(sample_path_imgs, merged_img)\n\n if self.sample_per_channel:\n map_per_channel = map_comapre_channels(\n batch_images.copy(), batch_maps_small_pred,batch_maps_gt, image_size=int(self.image_size\/4),\n num_landmarks=self.num_landmarks, scale=self.scale)\n\n sample_path_channels = os.path.join(\n self.save_sample_path, model_name + '-' + self.test_data + '-sample-%d-to-%d-3.png' % (\n i * self.sample_grid, (i + 1) * self.sample_grid))\n\n scipy.misc.imsave(sample_path_channels, map_per_channel)\n\n print ('saved %s' % sample_path_imgs)\n\n if self.compute_nme and self.test_data in ['full', 'challenging', 'common', 'training', 'test']:\n print ('\\n Calculating NME on: ' + self.test_data + '...')\n pred_lms, lms_gt = self.predict_landmarks_in_batches(self.img_menpo_list, sess)\n nme = sess.run(self.nme_loss, {self.pred_lms_small: pred_lms, self.lms_small: lms_gt})\n print ('NME on ' + self.test_data + ': ' + str(nme))\n\n def train(self):\n # set random seed\n tf.set_random_seed(1234)\n np.random.seed(1234)\n # build a graph\n # add placeholders\n self.add_placeholders()\n # build model\n self.build_model()\n # create loss ops\n self.create_loss_ops()\n # create summary ops\n self.create_summary_ops()\n\n # create optimizer and training op\n global_step = tf.Variable(0, trainable=False)\n lr = tf.train.exponential_decay(self.learning_rate,global_step, self.step, self.gamma, staircase=True)\n if self.adam_optimizer:\n optimizer = tf.train.AdamOptimizer(lr)\n else:\n optimizer = tf.train.MomentumOptimizer(lr, self.momentum)\n\n train_op = optimizer.minimize(self.total_loss,global_step=global_step)\n\n # TODO: remove\n if self.approx_maps_gpu: # create heat-maps using tf convolution. use only with GPU support!\n self.build_hm_generator()\n\n with tf.Session(config=self.config) as sess:\n\n tf.global_variables_initializer().run()\n\n # load pre trained weights if load_pretrain==True\n if self.load_pretrain:\n print\n print('*** loading pre-trained weights from: '+self.pre_train_path+' ***')\n loader = tf.train.Saver()\n loader.restore(sess, self.pre_train_path)\n print(\"*** Model restore finished, current global step: %d\" % global_step.eval())\n\n # for fine-tuning, choose reset_training_op==True. when resuming training, reset_training_op==False\n if self.reset_training_op:\n print (\"resetting optimizer and global step\")\n opt_var_list = [optimizer.get_slot(var, name) for name in optimizer.get_slot_names()\n for var in tf.global_variables() if optimizer.get_slot(var, name) is not None]\n opt_var_list_init = tf.variables_initializer(opt_var_list)\n opt_var_list_init.run()\n sess.run(global_step.initializer)\n\n # create model saver and file writer\n summary_writer = tf.summary.FileWriter(logdir=self.save_log_path, graph=tf.get_default_graph())\n saver = tf.train.Saver()\n\n print\n print('*** Start Training ***')\n\n # initialize some variables before training loop\n resume_step = global_step.eval()\n num_train_images = len(self.img_menpo_list)\n batches_in_epoch = int(float(num_train_images) \/ float(self.batch_size))\n epoch = int(resume_step \/ batches_in_epoch)\n img_inds = self.epoch_inds_shuffle[epoch, :]\n p_texture = self.p_texture\n p_geom = self.p_geom\n artistic_reload = False\n basic_reload = True\n log_valid = True\n log_valid_images = True\n\n if self.allocate_once:\n batch_images = np.zeros([self.batch_size, self.image_size, self.image_size, self.c_dim]).astype('float32')\n batch_lms_small = np.zeros([self.batch_size, self.num_landmarks, 2]).astype('float32')\n batch_lms_small_pred = np.zeros([self.batch_size, self.num_landmarks, 2]).astype('float32')\n if self.approx_maps_gpu:\n batch_hm_base_small = np.zeros((self.batch_size * self.num_landmarks,\n int(self.image_size\/4), int(self.image_size\/4), 1)).astype('float32')\n else:\n batch_maps_small = np.zeros((self.batch_size, int(self.image_size\/4),\n int(self.image_size\/4), self.num_landmarks)).astype('float32')\n\n if self.approx_maps_cpu:\n gaussian_filt = create_gaussian_filter(sigma=self.sigma, win_mult=self.win_mult)\n\n for step in range(resume_step, self.train_iter):\n\n j = step % batches_in_epoch # j==0 if we finished an epoch\n\n if step > resume_step and j == 0: # if we finished an epoch and this isn't the first step\n epoch += 1\n img_inds = self.epoch_inds_shuffle[epoch, :] # get next shuffled image inds\n artistic_reload = True\n log_valid = True\n log_valid_images = True\n if self.use_epoch_data:\n epoch_dir = os.path.join(self.epoch_data_dir, str(epoch))\n self.img_menpo_list = load_menpo_image_list(\n self.img_path, train_crop_dir=epoch_dir, img_dir_ns=None, mode=self.mode,\n bb_dictionary=self.bb_dictionary, image_size=self.image_size, test_data=self.test_data,\n augment_basic=False, augment_texture=False, augment_geom=False)\n\n # add basic augmentation (if basic_start > 0 and augment_basic is True)\n if basic_reload and (epoch >= self.basic_start) and self.basic_start > 0 and self.augment_basic:\n basic_reload = False\n self.img_menpo_list = reload_menpo_image_list(\n self.img_path, self.train_crop_dir, self.img_dir_ns, self.mode, self.train_inds,\n image_size=self.image_size, augment_basic=self.augment_basic,\n augment_texture=(self.augment_texture and epoch >= self.artistic_start), p_texture=p_texture,\n augment_geom=(self.augment_geom and epoch >= self.artistic_start), p_geom=p_geom)\n print (\"****** adding basic augmentation ******\")\n\n # increase artistic augmentation probability\n if ((epoch % self.artistic_step == 0 and epoch >= self.artistic_start and self.artistic_step != -1)\n or (epoch == self.artistic_start)) and (self.augment_geom or self.augment_texture)\\\n and artistic_reload:\n\n artistic_reload = False\n\n if epoch == self.artistic_start:\n print (\"****** adding artistic augmentation ******\")\n print (\"****** augment_geom: \" + str(self.augment_geom) + \", p_geom: \" + str(p_geom) + \" ******\")\n print (\"****** augment_texture: \" + str(self.augment_texture) + \", p_texture: \" +\n str(p_texture) + \" ******\")\n\n if epoch % self.artistic_step == 0 and self.artistic_step != -1:\n print (\"****** increasing artistic augmentation probability ******\")\n\n p_geom = 1.- 0.95 ** (epoch\/self.artistic_step)\n p_texture = 1. - 0.95 ** (epoch\/self.artistic_step)\n\n print (\"****** augment_geom: \" + str(self.augment_geom) + \", p_geom: \" + str(p_geom) + \" ******\")\n print (\"****** augment_texture: \" + str(self.augment_texture) + \", p_texture: \" +\n str(p_texture) + \" ******\")\n\n self.img_menpo_list = reload_menpo_image_list(\n self.img_path, self.train_crop_dir, self.img_dir_ns, self.mode, self.train_inds,\n image_size=self.image_size, augment_basic=(self.augment_basic and epoch >= self.basic_start),\n augment_texture=self.augment_texture, p_texture=p_texture,\n augment_geom=self.augment_geom, p_geom=p_geom)\n\n # get batch images\n batch_inds = img_inds[j * self.batch_size:(j + 1) * self.batch_size]\n\n if self.approx_maps_gpu: # TODO: remove\n if self.allocate_once:\n load_images_landmarks_alloc_once(\n self.img_menpo_list, batch_inds, images=batch_images, landmarks_small=batch_lms_small,\n landmarks=None, primary=True, image_size=self.image_size, scale=self.scale)\n\n create_heat_maps_base_alloc_once(\n landmarks_small=batch_lms_small.astype(int), landmarks=None,\n hm_small=batch_hm_base_small, hm_large=None, primary=True, num_images=self.batch_size,\n num_landmarks=self.num_landmarks)\n else:\n batch_images, batch_lms_small = load_images_landmarks(\n self.img_menpo_list, batch_inds, primary=True, image_size=self.image_size, c_dim=self.c_dim,\n num_landmarks=self.num_landmarks, scale=self.scale)\n\n batch_hm_base_small = create_heat_maps_base(\n landmarks_small=batch_lms_small.astype(int), landmarks=None, primary=True,\n num_images=self.batch_size, image_size=self.image_size, num_landmarks=self.num_landmarks)\n\n batch_maps_small = sess.run(self.filt_hm_small, {self.sparse_hm_small: batch_hm_base_small})\n elif self.approx_maps_cpu:\n if self.allocate_once:\n load_images_landmarks_approx_maps_alloc_once(\n self.img_menpo_list, batch_inds, images=batch_images, maps_small=batch_maps_small,\n maps=None, landmarks=batch_lms_small, primary=True, image_size=self.image_size,\n num_landmarks=self.num_landmarks, scale=self.scale, gauss_filt_small=gaussian_filt,\n win_mult=self.win_mult, sigma=self.sigma, save_landmarks=self.compute_nme)\n else:\n batch_images, batch_maps_small, batch_lms_small = load_images_landmarks_approx_maps(\n self.img_menpo_list, batch_inds, primary=True, image_size=self.image_size,\n num_landmarks=self.num_landmarks, c_dim=self.c_dim, scale=self.scale,\n gauss_filt_small=gaussian_filt, win_mult=self.win_mult, sigma=self.sigma,\n save_landmarks=self.compute_nme)\n else:\n if self.allocate_once:\n load_images_landmarks_maps_alloc_once(\n self.img_menpo_list, batch_inds, images=batch_images, maps_small=batch_maps_small,\n landmarks=batch_lms_small, maps=None, primary=True, image_size=self.image_size,\n num_landmarks=self.num_landmarks, scale=self.scale, sigma=self.sigma,\n save_landmarks=self.compute_nme)\n else:\n batch_images, batch_maps_small, batch_lms_small = load_images_landmarks_maps(\n self.img_menpo_list, batch_inds, primary=True, image_size=self.image_size, c_dim=self.c_dim,\n num_landmarks=self.num_landmarks, scale=self.scale, sigma=self.sigma,\n save_landmarks=self.compute_nme)\n\n feed_dict_train = {self.images: batch_images, self.heatmaps_small: batch_maps_small}\n\n sess.run(train_op, feed_dict_train)\n\n # save to log and print status\n if step == resume_step or (step + 1) % self.print_every == 0:\n\n # log probability of artistic augmentation\n if self.log_artistic_augmentation_probs and (self.augment_geom or self.augment_texture):\n if self.augment_geom and not self.augment_texture:\n art_augment_prob_dict = {self.p_geom_log: p_geom}\n elif self.augment_texture and not self.augment_geom:\n art_augment_prob_dict = {self.p_texture_log: p_texture}\n else:\n art_augment_prob_dict = {self.p_texture_log: p_texture, self.p_geom_log: p_geom}\n\n # train data log\n if self.compute_nme:\n batch_maps_small_pred = sess.run(self.pred_hm_p, {self.images: batch_images})\n if self.allocate_once:\n batch_heat_maps_to_landmarks_alloc_once(\n batch_maps=batch_maps_small_pred, batch_landmarks=batch_lms_small_pred,\n batch_size=self.batch_size, image_size=int(self.image_size\/4),\n num_landmarks=self.num_landmarks)\n else:\n batch_lms_small_pred = batch_heat_maps_to_landmarks(\n batch_maps_small_pred, self.batch_size, image_size=int(self.image_size\/4),\n num_landmarks=self.num_landmarks)\n\n train_feed_dict_log = {\n self.images: batch_images, self.heatmaps_small: batch_maps_small,\n self.train_lms_small: batch_lms_small, self.train_pred_lms_small: batch_lms_small_pred}\n if self.log_artistic_augmentation_probs and (self.augment_geom or self.augment_texture):\n train_feed_dict_log.update(art_augment_prob_dict)\n\n summary, l_t, l_nme = sess.run(\n [self.batch_summary_op, self.total_loss, self.nme_loss], train_feed_dict_log)\n\n print (\n 'epoch: [%d] step: [%d\/%d] primary loss: [%.6f] NME: [%.6f]' % (\n epoch, step + 1, self.train_iter, l_t, l_nme))\n else:\n train_feed_dict_log = {self.images: batch_images, self.heatmaps_small: batch_maps_small}\n if self.log_artistic_augmentation_probs and (self.augment_geom or self.augment_texture):\n train_feed_dict_log.update(art_augment_prob_dict)\n\n summary, l_t = sess.run(\n [self.batch_summary_op, self.total_loss], train_feed_dict_log)\n\n print (\n 'epoch: [%d] step: [%d\/%d] primary loss: [%.6f]' % (\n epoch, step + 1, self.train_iter, l_t))\n\n summary_writer.add_summary(summary, step)\n\n # valid data log\n if self.valid_size > 0 and (log_valid and epoch % self.log_valid_every == 0)\\\n and self.compute_nme:\n log_valid = False\n\n if self.allocate_once:\n self.predict_landmarks_in_batches_loaded(self.valid_images_loaded, sess)\n valid_feed_dict_log = {\n self.valid_lms_small: self.valid_landmarks_loaded,\n self.valid_pred_lms_small: self.valid_landmarks_pred}\n else:\n valid_pred_lms = self.predict_landmarks_in_batches_loaded(self.valid_images_loaded, sess)\n valid_feed_dict_log = {\n self.valid_lms_small: self.valid_landmarks_loaded,\n self.valid_pred_lms_small: valid_pred_lms}\n\n v_summary,l_v_nme = sess.run([self.valid_summary, self.valid_nme_loss], valid_feed_dict_log)\n summary_writer.add_summary(v_summary, step)\n\n print (\n 'epoch: [%d] step: [%d\/%d] valid NME: [%.6f]' % (\n epoch, step + 1, self.train_iter, l_v_nme))\n\n # save model\n if (step + 1) % self.save_every == 0:\n saver.save(sess, os.path.join(self.save_model_path, 'deep_heatmaps'), global_step=step + 1)\n print ('model\/deep-heatmaps-%d saved' % (step + 1))\n\n # save images. TODO: add option to allocate once\n if step == resume_step or (step + 1) % self.sample_every == 0:\n\n if not self.compute_nme:\n batch_maps_small_pred = sess.run(self.pred_hm_p, {self.images: batch_images})\n batch_lms_small_pred=None\n\n merged_img = merge_images_landmarks_maps_gt(\n batch_images.copy(), batch_maps_small_pred, batch_maps_small,\n landmarks=batch_lms_small_pred, image_size=self.image_size,\n num_landmarks=self.num_landmarks, num_samples=self.sample_grid, scale=self.scale,\n circle_size=0, fast=self.fast_img_gen)\n\n if self.sample_per_channel:\n map_per_channel = map_comapre_channels(\n batch_images.copy(), batch_maps_small_pred,batch_maps_small,\n image_size=int(self.image_size\/4), num_landmarks=self.num_landmarks, scale=self.scale)\n\n if self.sample_to_log:\n if self.sample_per_channel:\n summary_img = sess.run(\n self.img_summary, {self.log_image_map: np.expand_dims(merged_img, 0),\n self.log_map_channels: np.expand_dims(map_per_channel, 0)})\n else:\n summary_img = sess.run(\n self.img_summary, {self.log_image_map: np.expand_dims(merged_img, 0)})\n\n summary_writer.add_summary(summary_img, step)\n\n if (self.valid_size >= self.sample_grid) and self.save_valid_images and\\\n (log_valid_images and epoch % self.log_valid_every == 0):\n log_valid_images=False\n\n batch_maps_small_pred_val = sess.run(\n self.pred_hm_p, {self.images: self.valid_images_loaded[:self.sample_grid]})\n\n merged_img = merge_images_landmarks_maps_gt(\n self.valid_images_loaded[:self.sample_grid].copy(), batch_maps_small_pred_val,\n self.valid_gt_maps_loaded, image_size=self.image_size,\n num_landmarks=self.num_landmarks, num_samples=self.sample_grid,\n scale=self.scale, circle_size=0, fast=self.fast_img_gen)\n\n if self.sample_per_channel:\n map_per_channel = map_comapre_channels(\n self.valid_images_loaded[:self.sample_grid].copy(), batch_maps_small_pred_val,\n self.valid_gt_maps_loaded, image_size=int(self.image_size\/4),\n num_landmarks=self.num_landmarks, scale=self.scale)\n\n summary_img = sess.run(\n self.img_summary_valid, {self.log_image_map: np.expand_dims(merged_img, 0),\n self.log_map_channels: np.expand_dims(map_per_channel, 0)})\n else:\n summary_img = sess.run(\n self.img_summary_valid, {self.log_image_map: np.expand_dims(merged_img, 0)})\n summary_writer.add_summary(summary_img, step)\n\n else:\n sample_path_imgs = os.path.join(self.save_sample_path,'epoch-%d-train-iter-%d-1.png'\n % (epoch, step + 1))\n scipy.misc.imsave(sample_path_imgs, merged_img)\n if self.sample_per_channel:\n sample_path_ch_maps = os.path.join(self.save_sample_path, 'epoch-%d-train-iter-%d-3.png'\n % (epoch, step + 1))\n scipy.misc.imsave(sample_path_ch_maps, map_per_channel)\n\n print('*** Finished Training ***')\n\n def get_maps_image(self, test_image, reuse=None):\n self.add_placeholders()\n # build model\n pred_hm_p = self.heatmaps_network(self.images,reuse=reuse)\n\n with tf.Session(config=self.config) as sess:\n # load trained parameters\n saver = tf.train.Saver()\n saver.restore(sess, self.test_model_path)\n _, model_name = os.path.split(self.test_model_path)\n\n test_image = test_image.pixels_with_channels_at_back().astype('float32')\n if self.scale is '255':\n test_image *= 255\n elif self.scale is '0':\n test_image = 2 * test_image - 1\n\n test_image_map = sess.run(pred_hm_p, {self.images: np.expand_dims(test_image,0)})\n\n return test_image_map\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_126","text":"import random\nimport numpy as np\nfrom statistics import mean\n\n# Using \"random\" to create a dataset\ndef create_dataset(hm, variance, step=2, correlation=False):\n val = 1\n sy = []\n for i in range(hm):\n y = val + random.randrange(-variance, variance)\n sy.append(y)\n if correlation and correlation == 'pos':\n val += step\n elif correlation and correlation == 'neg':\n val -= step\n\n sx = [i for i in range(len(sy))]\n\n return np.array(sx, dtype=np.float64), np.array(sy, dtype=np.float64)\n\n\n# finding best fit slope and intercept of dataset\ndef best_fit_slope_and_intercept(sx, sy):\n m = (((mean(sx) * mean(sy)) - mean(sx * sy)) \/\n ((mean(sx) * mean(sx)) - mean(sx * sx)))\n\n b = mean(sy) - m * mean(sx)\n\n return m, b\n\n\n# dataset - coefficient of determination\ndef coefficient_of_determination(sy_orig, sy_line):\n y_mean_line = [mean(sy_orig) for y in sy_orig]\n\n squared_error_regr = sum((sy_line - sy_orig) * (sy_line - sy_orig))\n squared_error_y_mean = sum((y_mean_line - sy_orig) * (y_mean_line - sy_orig))\n\n print(squared_error_regr)\n print(squared_error_y_mean)\n\n r_squared = 1 - (squared_error_regr \/ squared_error_y_mean)\n\n return r_squared\n\n\ndef get_result():\n sx, sy = create_dataset(20, 10, 3)\n m, b = best_fit_slope_and_intercept(sx, sy)\n regression_line = [(m * x) + b for x in sx]\n r_squared = coefficient_of_determination(sy, regression_line)\n print('Rsquared result: ')\n return r_squared"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_127","text":"import os\nimport pandas as pd\nimport numpy as np\n\nimport tsfresh.feature_extraction.feature_calculators as fc\n\nfrom scipy.fftpack import fft\nfrom notebook.pca_reduction import PCAReduction\nfrom notebook.utils import general_normalization, universal_normalization, trim_or_pad_data,\tfeature_matrix_extractor\nfrom notebook.utils import modelAndSave\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn import svm\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import classification_report\n\n\nTRIM_DATA_SIZE_BUY = 30\nGESTURE = 'buy'\n\ndef feature_vector_buy_ind(trimmed_data, column_name, isBuy=False, test=False):\n\n r = trimmed_data[column_name]\n if column_name == 'rightWrist_x':\n normRawColumn = universal_normalization(r, trimmed_data, x_norm=True)\n else:\n normRawColumn = universal_normalization(r, trimmed_data, x_norm=False)\n normRawColumn = general_normalization(normRawColumn)\n\n diffNormRawData = np.diff(normRawColumn)\n\n zeroCrossingArray = np.array([])\n maxDiffArray = np.array([])\n\n # Fast Fourier Transform\n fftArray = np.array([])\n fftVal = []\n fft_coefficients = fft(diffNormRawData, n=6)[1:]\n fft_coefficients_real = [value.real for value in fft_coefficients]\n fftVal += fft_coefficients_real\n fftArray = np.append(fftArray, fftVal)\n\n # Windowed Mean for each second of the video\n windowedVal = np.array([])\n for i in range(0,diffNormRawData.shape[0],30):\n windowedVal = np.append(windowedVal, fc.mean(diffNormRawData[i:i+30]))\n\n # Other features\n if diffNormRawData[0] > 0:\n initSign = 1\n else:\n initSign = 0\n\n windowSize = 5\n\n for x in range(1, len(diffNormRawData)):\n\n if diffNormRawData[x] > 0:\n newSign = 1\n else:\n newSign = 0\n\n if initSign != newSign:\n zeroCrossingArray = np.append(zeroCrossingArray, x)\n initSign = newSign\n maxIndex = np.minimum(len(diffNormRawData), x + windowSize)\n minIndex = np.maximum(0, x - windowSize)\n\n maxVal = np.amax(diffNormRawData[minIndex:maxIndex])\n minVal = np.amin(diffNormRawData[minIndex:maxIndex])\n\n maxDiffArray = np.append(maxDiffArray, (maxVal - minVal))\n\n index = np.argsort(-maxDiffArray)\n\n featureVector = np.array([])\n featureVector = np.append(featureVector, fftArray)\n featureVector = np.append(featureVector, windowedVal)\n featureVector = np.append(featureVector, maxDiffArray[index[0:5]])\n\n if TRIM_DATA_SIZE_BUY - 1 > featureVector.shape[0]:\n featureVector = np.pad(featureVector, (0, TRIM_DATA_SIZE_BUY - featureVector.shape[0] - 1), 'constant')\n featureVector = featureVector[:TRIM_DATA_SIZE_BUY - 1]\n if not test:\n if isBuy:\n featureVector = np.append(featureVector, 1)\n else:\n featureVector = np.append(featureVector, 0)\n return featureVector\n\n\ndef feature_vector_buy(data, isBuy=False, test=False):\n trimmed_data = trim_or_pad_data(data, TRIM_DATA_SIZE_BUY)\n featureVector = feature_vector_buy_ind(trimmed_data, 'rightWrist_x', isBuy, test=True)\n featureVector = np.append(featureVector, feature_vector_buy_ind(trimmed_data, 'rightWrist_y', isBuy, test))\n\n return featureVector\n\n\ndef modeling_buy(dirPath):\n listDir = ['buy']\n featureMatrixBuy = feature_matrix_extractor(dirPath, listDir, feature_vector_buy, pos_sample=True)\n buy_df = pd.DataFrame(featureMatrixBuy)\n\n # Number of negative samples per folder needed to balance the dataset with positive and negative samples\n count_neg_samples = buy_df.shape[0] \/ 5\n listDir = ['communicate', 'really', 'hope', 'mother', 'fun']\n featureMatrixNotBuy = feature_matrix_extractor(dirPath, listDir, feature_vector_buy, pos_sample=False,\n th=count_neg_samples)\n not_buy_df = pd.DataFrame(featureMatrixNotBuy)\n\n final_df = pd.concat([buy_df, not_buy_df], ignore_index=True)\n shuffled_df = final_df.sample(frac=1, random_state=42).reset_index(drop=True)\n labelVector = shuffled_df.pop(shuffled_df.shape[1]-1)\n labelVector = labelVector.astype(int).tolist()\n\n final_df, pca, minmax = PCAReduction(shuffled_df)\n\n modelAndSave(final_df, labelVector, GESTURE, pca, minmax)\n\n # clf = svm.SVC(random_state=42, probability=True)\n # clf = svm.SVC(random_state=42)\n clf = LogisticRegression(random_state=42)\n # clf = MLPClassifier(max_iter=5000, random_state=42)\n # clf = GaussianNB()\n\n\n # 70:30 Train-Test Split\n train_size = int(final_df.shape[0] * 70 \/ 100)\n clf.fit(final_df.iloc[:train_size, :], labelVector[:train_size])\n pred_labels = clf.predict(final_df.iloc[train_size:, :])\n true_labels = labelVector[train_size:]\n print(classification_report(true_labels, pred_labels))\n\n\n# TEST Function:\n# modeling_buy(os.path.abspath('..\/JSON'))\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_116","text":"#!\/usr\/bin\/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 10 14:19:04 2020\n\n@author: corkep\n\n\"\"\"\n\n\nimport numpy as np\nimport numpy.testing as nt\nimport unittest\nfrom math import pi\nimport math\nfrom scipy.linalg import logm, expm\n\nfrom spatialmath.base.transforms3d import *\nfrom spatialmath.base.numeric import *\nfrom spatialmath.base.transformsNd import isR, t2r, r2t, rt2tr\n\nimport matplotlib.pyplot as plt\n\n\nclass TestVelocity(unittest.TestCase):\n def test_numjac(self):\n\n # test on algebraic example\n def f(X):\n x = X[0]\n y = X[1]\n return np.r_[x, x ** 2, x * y ** 2]\n\n nt.assert_array_almost_equal(\n numjac(f, [2, 3]),\n np.array([[1, 0], [4, 0], [9, 12]]), # x, 0 # 2x, 0 # y^2, 2xy\n )\n\n # test on rotation matrix\n nt.assert_array_almost_equal(numjac(rotx, [0], SO=3), np.array([[1, 0, 0]]).T)\n\n nt.assert_array_almost_equal(\n numjac(rotx, [pi \/ 2], SO=3), np.array([[1, 0, 0]]).T\n )\n\n nt.assert_array_almost_equal(numjac(roty, [0], SO=3), np.array([[0, 1, 0]]).T)\n\n nt.assert_array_almost_equal(numjac(rotz, [0], SO=3), np.array([[0, 0, 1]]).T)\n\n def test_rpy2jac(self):\n\n # ZYX order\n gamma = [0, 0, 0]\n nt.assert_array_almost_equal(rpy2jac(gamma), numjac(rpy2r, gamma, SO=3))\n gamma = [pi \/ 4, 0, -pi \/ 4]\n nt.assert_array_almost_equal(rpy2jac(gamma), numjac(rpy2r, gamma, SO=3))\n gamma = [-pi \/ 4, pi \/ 2, pi \/ 4]\n nt.assert_array_almost_equal(rpy2jac(gamma), numjac(rpy2r, gamma, SO=3))\n\n # XYZ order\n f = lambda gamma: rpy2r(gamma, order=\"xyz\")\n gamma = [0, 0, 0]\n nt.assert_array_almost_equal(\n rpy2jac(gamma, order=\"xyz\"), numjac(f, gamma, SO=3)\n )\n f = lambda gamma: rpy2r(gamma, order=\"xyz\")\n gamma = [pi \/ 4, 0, -pi \/ 4]\n nt.assert_array_almost_equal(\n rpy2jac(gamma, order=\"xyz\"), numjac(f, gamma, SO=3)\n )\n f = lambda gamma: rpy2r(gamma, order=\"xyz\")\n gamma = [-pi \/ 4, pi \/ 2, pi \/ 4]\n nt.assert_array_almost_equal(\n rpy2jac(gamma, order=\"xyz\"), numjac(f, gamma, SO=3)\n )\n\n def test_eul2jac(self):\n\n # ZYX order\n gamma = [0, 0, 0]\n nt.assert_array_almost_equal(eul2jac(gamma), numjac(eul2r, gamma, SO=3))\n gamma = [pi \/ 4, 0, -pi \/ 4]\n nt.assert_array_almost_equal(eul2jac(gamma), numjac(eul2r, gamma, SO=3))\n gamma = [-pi \/ 4, pi \/ 2, pi \/ 4]\n nt.assert_array_almost_equal(eul2jac(gamma), numjac(eul2r, gamma, SO=3))\n\n def test_exp2jac(self):\n\n # ZYX order\n gamma = np.r_[1, 0, 0]\n nt.assert_array_almost_equal(exp2jac(gamma), numjac(exp2r, gamma, SO=3))\n print(numjac(exp2r, gamma, SO=3))\n\n gamma = np.r_[0.2, 0.3, 0.4]\n nt.assert_array_almost_equal(exp2jac(gamma), numjac(exp2r, gamma, SO=3))\n gamma = np.r_[0, 0, 0]\n nt.assert_array_almost_equal(exp2jac(gamma), numjac(exp2r, gamma, SO=3))\n\n def test_rot2jac(self):\n\n gamma = [0.1, 0.2, 0.3]\n R = rpy2r(gamma, order=\"zyx\")\n A = rot2jac(R, representation=\"rpy\/zyx\")\n self.assertEqual(A.shape, (6, 6))\n A3 = np.linalg.inv(A[3:6, 3:6])\n nt.assert_array_almost_equal(A3, rpy2jac(gamma, order=\"zyx\"))\n\n gamma = [0.1, 0.2, 0.3]\n R = rpy2r(gamma, order=\"xyz\")\n A = rot2jac(R, representation=\"rpy\/xyz\")\n self.assertEqual(A.shape, (6, 6))\n A3 = np.linalg.inv(A[3:6, 3:6])\n nt.assert_array_almost_equal(A3, rpy2jac(gamma, order=\"xyz\"))\n\n gamma = [0.1, 0.2, 0.3]\n R = eul2r(gamma)\n A = rot2jac(R, representation=\"eul\")\n self.assertEqual(A.shape, (6, 6))\n A3 = np.linalg.inv(A[3:6, 3:6])\n nt.assert_array_almost_equal(A3, eul2jac(gamma))\n\n gamma = [0.1, 0.2, 0.3]\n R = trexp(gamma)\n A = rot2jac(R, representation=\"exp\")\n self.assertEqual(A.shape, (6, 6))\n A3 = np.linalg.inv(A[3:6, 3:6])\n nt.assert_array_almost_equal(A3, exp2jac(gamma))\n\n def test_angvelxform(self):\n\n gamma = [0.1, 0.2, 0.3]\n A = angvelxform(gamma, full=False, representation=\"rpy\/zyx\")\n Ai = angvelxform(gamma, full=False, inverse=True, representation=\"rpy\/zyx\")\n nt.assert_array_almost_equal(Ai, rpy2jac(gamma, order=\"zyx\"))\n nt.assert_array_almost_equal(A @ Ai, np.eye(3))\n\n gamma = [0.1, 0.2, 0.3]\n A = angvelxform(gamma, full=False, representation=\"rpy\/xyz\")\n Ai = angvelxform(gamma, full=False, inverse=True, representation=\"rpy\/xyz\")\n nt.assert_array_almost_equal(Ai, rpy2jac(gamma, order=\"xyz\"))\n nt.assert_array_almost_equal(A @ Ai, np.eye(3))\n\n gamma = [0.1, 0.2, 0.3]\n A = angvelxform(gamma, full=False, representation=\"eul\")\n Ai = angvelxform(gamma, full=False, inverse=True, representation=\"eul\")\n nt.assert_array_almost_equal(Ai, eul2jac(gamma))\n nt.assert_array_almost_equal(A @ Ai, np.eye(3))\n\n gamma = [0.1, 0.2, 0.3]\n A = angvelxform(gamma, full=False, representation=\"exp\")\n Ai = angvelxform(gamma, full=False, inverse=True, representation=\"exp\")\n nt.assert_array_almost_equal(Ai, exp2jac(gamma))\n nt.assert_array_almost_equal(A @ Ai, np.eye(3))\n\n # def test_angvelxform_dot(self):\n\n # gamma = [0.1, 0.2, 0.3]\n # options = dict(full=False, representation='rpy\/zyx')\n\n # f = lambda gamma: angvelxform(gamma, options)\n\n # nt.assert_array_almost_equal(angvelxform_dot(gamma, options), numjac(f))\n\n\n# ---------------------------------------------------------------------------------------#\nif __name__ == \"__main__\":\n\n unittest.main()\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_128","text":"#!\/usr\/bin\/env python\n\n\"\"\"\nUsage:\n>> server.py --time 60 --batch 64\n>> .\/make_gif.py transition --name transition --time 15 --batch 64\n\"\"\"\nimport os\nos.environ[\"KERAS_BACKEND\"] = \"tensorflow\"\nimport time\nimport cv2\nfrom keras import callbacks as cbks\nfrom keras import backend as K\nimport logging\nimport tensorflow as tf\nimport numpy as np\nfrom scipy.misc import imsave, imresize\nfrom tqdm import *\n\nfrom server import client_generator\nmixtures = 1\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(description='MiniBatch server')\n parser.add_argument('model', type=str, default=\"transition\", help='Model definitnion file')\n parser.add_argument('--name', type=str, default=\"transition\", help='Name of the model.')\n parser.add_argument('--host', type=str, default=\"localhost\", help='Data server ip address.')\n parser.add_argument('--port', type=int, default=5557, help='Port of server.')\n parser.add_argument('--time', type=int, default=1, help='How many temporal frames in a single input.')\n parser.add_argument('--batch', type=int, default=256, help='Batch size.')\n parser.add_argument('--epoch', type=int, default=200, help='Number of epochs.')\n parser.add_argument('--gpu', type=int, default=0, help='Which gpu to use')\n parser.add_argument('--loadweights', dest='loadweights', action='store_true', help='Start from checkpoint.')\n parser.set_defaults(skipvalidate=False)\n parser.set_defaults(loadweights=False)\n args = parser.parse_args()\n\n MODEL_NAME = args.model\n logging.info(\"Importing get_model from {}\".format(args.model))\n exec(\"from models.\"+MODEL_NAME+\" import get_model, load, save\")\n # try to import `cleanup` from model file\n try:\n exec(\"from models.\"+MODEL_NAME+\" import cleanup\")\n except:\n cleanup = old_cleanup\n\n model_code = open('models\/'+MODEL_NAME+'.py').read()\n\n with tf.Session() as sess:\n K.set_session(sess)\n g_train, d_train, sampler, saver, loader, [G, E, T] = get_model(sess=sess, name=args.name, batch_size=args.batch, gpu=args.gpu)\n\n print(\"loading weights...\")\n G.load_weights(\".\/results_autoencoder\/G_weights.keras\".format(args.name))\n E.load_weights(\".\/results_autoencoder\/E_weights.keras\".format(args.name))\n checkpoint_dir = '.\/results_' + args.name\n T.load_weights(checkpoint_dir+\"\/T_weights.keras\")\n\n if not os.path.exists(\".\/video_\"+args.name):\n os.makedirs(\".\/video_\"+args.name)\n\n # get data\n data = client_generator(hwm=20, host=\"localhost\", port=5557)\n X = next(data)[0] # [:, ::2]\n sh = X.shape\n X = X.reshape((-1, 3, 160, 320))\n X = np.asarray([cv2.resize(x.transpose(1, 2, 0), (160, 80)) for x in X])\n X = X\/127.5 - 1.\n x = X.reshape((sh[0], args.time, 80, 160, 3))\n\n # estimate frames\n z_dim = 512\n I = E.input\n E_out = E(I)\n O = G.input\n G_out = G(O)\n print \"Sampling...\"\n for i in tqdm(range(128)):\n x = x.reshape((-1, 80, 160, 3))\n # code = E.predict(x, batch_size=args.batch*args.time)[0]\n code = sess.run([E_out[0]], feed_dict={I: x, K.learning_phase(): 1})[0]\n code = code.reshape((args.batch, args.time, z_dim))\n inp = code[:, :5] # context is based on the first 5 frames only\n outs = T.predict(inp, batch_size=args.batch)\n imgs = sess.run([G_out], feed_dict={O: outs.reshape((-1, z_dim)), K.learning_phase(): 1})[0]\n # imgs = G.predict(outs[:, 0], batch_size=args.batch)\n x = x.reshape((args.batch, args.time, 80, 160, 3))\n x[0, :-1] = x[0, 1:]\n x[0, -1] = imgs[0]\n imsave(\"video_\"+args.name+\"\/%03d.png\" % i, imresize(imgs[0], (160, 320)))\n\n cmd = \"ffmpeg -y -i .\/video_\"+args.name+\"\/%03d.png .\/video_\"+args.name+\"\/output.gif -vf fps=1\"\n print(cmd)\n os.system(cmd)\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_129","text":"vafaei-ar\/pymce\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport numpy as np\nimport pylab as plt\nfrom pandas import DataFrame\nfrom .splitters import Splitter\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.neighbors import VALID_METRICS\nfrom sklearn.ensemble import IsolationForest\nfrom sklearn.metrics import matthews_corrcoef\nfrom sklearn.neighbors import LocalOutlierFactor\nfrom scipy.spatial.distance import braycurtis,canberra,chebyshev,cityblock\nfrom scipy.spatial.distance import correlation,minkowski,wminkowski\n\nfrom .utils import COLORS\n\nall_metrics = ['cityblock','L2','L4','braycurtis',\n 'canberra','chebyshev','correlation']\n\ndef plot_main_shapes(X,labels,cl=16):\n\n ll = int(np.sqrt(len(np.unique(labels))))\n l1 = ll+1\n l2 = len(np.unique(labels))\/\/l1+1\n fig,axs = plt.subplots(l2,l1,figsize=(4*l1,4*l2))\n\n [axi.set_xticks([]) for axi in axs.ravel()]\n [axi.set_yticks([]) for axi in axs.ravel()]\n\n clrs = 4*COLORS\n for i in np.unique(labels).astype(int):\n X0 = X[labels==i]\n try:\n ax = axs[i\/\/l1,i%l1]\n except:\n ax = axs[i\/\/l1]\n ax.set_title(X0.shape[0],y=0.9)\n ax.plot(np.percentile(X0,50,axis=0),color=clrs[i])\n ax.plot(np.mean(X0,axis=0),ls='-.',color='k')\n ax.fill_between(np.arange(X0.shape[1]),\n np.percentile(X0,cl,axis=0),\n np.percentile(X0,100-cl,axis=0),\n color=clrs[i],\n alpha=0.5)\n\n plt.subplots_adjust(wspace=0.01,hspace=0.01)\n\n\ndef get_main_shapes(X,labels,trsh=0.1):\n main_shapes = []\n for i in np.unique(labels).astype(int):\n filt = labels==i\n if np.mean(filt)auc_max:\n auc_max = auc_test\n df['method'][0] = f_name[i]\n df['MCC'][0] = MCC(T_o, outliers)\n df['AUC'][0] = auc_max\n df['RWS'][0] = rws_score(T_o, outliers)\n\n df['method'][1] = f_name[3]\n isof = f_f[3]\n isof.fit(X_train)\n scores_pred = isof.decision_function(X_test)\n outliers = scores_pred.max()-scores_pred\n df['MCC'][1] = MCC(T_o, outliers)\n df['AUC'][1] = roc_auc_score(T_o, outliers)\n df['RWS'][1] = rws_score(T_o, outliers)\n\n return df\n \ndef d_lof(X_seen,X_unseen=None,n_neighbors=20,algorithm='auto',metric='minkowski'):\n lof = LocalOutlierFactor(n_neighbors = n_neighbors,\n algorithm = algorithm,\n metric = metric,\n novelty=not (X_unseen is None),\n n_jobs=-1)\n lof.fit(X_seen)\n if X_unseen is None:\n return -lof.negative_outlier_factor_\n else:\n return -lof.score_samples(X_unseen)\n\ndef grid_run_lof(X_seen,y_seen=None,\n X_unseen=None,y_unseen=None,\n n_neighbors = [5,20,35],\n algorithms = ['ball_tree', 'kd_tree', 'brute'],\n metrics=None):\n ''' \n This function is able to deal with three modes:\n 1- Unsupervised outlier detection \n 2- Semi-supervised outlier detection\n 3- Novelty detection \n ''' \n \n novelty = 0 \n semisupervised = 0 \n if (np.all(y_seen==0)) | (y_seen is None):\n novelty = 1\n X_unseen_p = X_unseen\n y_seen = y_unseen\n print('Novelty detection mode.')\n conds = (X_unseen is not None and y_unseen is not None)\n assert conds,'In novelty detection you need to input the unseen data sets.'\n elif y_unseen is not None and X_unseen is not None:\n semisupervised = 1\n# print('Semi-supervised option is not available for novelty detection.')\n X_unseen_p = None\n print('Semi-supervised outlier detection mode.')\n elif X_seen is not None:\n X_unseen_p = X_unseen\n print('Unsupervised outlier detection mode.')\n else:\n assert 0, 'The configuration is not recognized!'\n \n aucs,mccs,rwss,conf = [],[],[],[]\n\n for nn in n_neighbors:\n for al in algorithms:\n if metrics is None:\n metrics = VALID_METRICS[al]\n for mt in metrics:\n try:\n outliers = d_lof(X_seen=X_seen,X_unseen=X_unseen_p,n_neighbors=nn,algorithm=al,metric=mt)\n conf.append([nn,al,mt])\n aucs.append(roc_auc_score(y_seen, outliers))\n mccs.append(MCC(y_seen, outliers))\n rwss.append(rws_score(y_seen, outliers))\n except:\n pass\n\n \n if semisupervised:\n nn,al,mt = conf[np.argmax(aucs)]\n outliers = d_lof(X_seen=X_unseen,n_neighbors=nn,algorithm=al,metric=mt)\n auc = roc_auc_score(y_unseen, outliers)\n \n nn,al,mt = conf[np.argmax(mccs)]\n outliers = d_lof(X_seen=X_unseen,n_neighbors=nn,algorithm=al,metric=mt)\n mcc = roc_auc_score(y_unseen, outliers)\n \n nn,al,mt = conf[np.argmax(rwss)]\n outliers = d_lof(X_seen=X_unseen,n_neighbors=nn,algorithm=al,metric=mt)\n rws = roc_auc_score(y_unseen, outliers)\n \n return auc,mcc,rws\n \n else:\n return np.array(aucs),np.array(mccs),np.array(rwss),np.array(conf)\n\ndef d_iforest(X_seen,X_unseen=None,n_estimators=100,max_features=1.0,bootstrap=False):\n isof = IsolationForest(n_estimators=n_estimators,\n max_features=max_features,\n bootstrap=bootstrap,\n behaviour=\"new\",\n n_jobs=-1)\n isof.fit(X_seen)\n if X_unseen is None:\n scores_pred = isof.decision_function(X_seen)\n else:\n scores_pred = isof.decision_function(X_unseen)\n return scores_pred.max()-scores_pred\n\ndef grid_run_iforest(X_seen,y_seen,\n X_unseen=None,y_unseen=None,\n n_estimators= [50,100,150],\n max_features= [0.2,0.5,1.0],\n bootstrap=[False,True]):\n \n ''' \n This function is able to deal with three modes:\n 1- Unsupervised outlier detection \n 2- Semi-supervised outlier detection\n 3- Novelty detection \n ''' \n \n novelty = 0 \n semisupervised = 0 \n if (np.all(y_seen==0)) | (y_seen is None):\n novelty = 1\n X_unseen_p = X_unseen\n y_seen = y_unseen\n print('Novelty detection mode.')\n conds = (X_unseen is not None and y_unseen is not None)\n assert conds,'In novelty detection you need to input the unseen data sets.'\n elif y_unseen is not None and X_unseen is not None:\n semisupervised = 1\n# print('Semi-supervised option is not available for novelty detection.')\n X_unseen_p = None\n print('Semi-supervised outlier detection mode.')\n elif X_seen is not None:\n X_unseen_p = X_unseen\n print('Unsupervised outlier detection mode.')\n else:\n assert 0, 'The configuration is not recognized!'\n\n aucs,mccs,rwss,conf = [],[],[],[]\n\n for ns in n_estimators:\n for mf in max_features:\n for bs in bootstrap:\n conf.append([ns,mf,bs])\n outliers = d_iforest(X_seen,X_unseen_p,n_estimators=ns,max_features=mf,bootstrap=bs)\n aucs.append(roc_auc_score(y_seen, outliers))\n mccs.append(MCC(y_seen, outliers))\n rwss.append(rws_score(y_seen, outliers))\n \n if semisupervised:\n ns,mf,bs = conf[np.argmax(aucs)]\n outliers = d_iforest(X_unseen,n_estimators=ns,max_features=mf,bootstrap=bs)\n auc = roc_auc_score(y_unseen, outliers)\n \n ns,mf,bs = conf[np.argmax(mccs)]\n outliers = d_iforest(X_unseen,n_estimators=ns,max_features=mf,bootstrap=bs)\n mcc = roc_auc_score(y_unseen, outliers)\n \n ns,mf,bs = conf[np.argmax(rwss)]\n outliers = d_iforest(X_unseen,n_estimators=ns,max_features=mf,bootstrap=bs)\n rws = roc_auc_score(y_unseen, outliers)\n \n return auc,mcc,rws\n \n else:\n return np.array(aucs),np.array(mccs),np.array(rwss),np.array(conf)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_130","text":"DocVaughan\/CRAWLAB-Code-Snippets\n#! \/usr\/bin\/env python \n\n##########################################################################################\n# trajectory_animation.py\n#\n# Script to a demonstrate a simple animation of a trajectory showing a vector\n# force field \n#\n# NOTE: Plotting is set up for output, not viewing on screen.\n# So, it will likely be ugly on screen. The saved PDFs should look\n# better.\n# \n# Created: 12\/6\/13 \n# - \n# - \n# - http:\/\/www.ucs.louisiana.edu\/~jev9637\n#\n# Modified:\n# * 06\/26\/20 - JEV \n# - updated for new animation API\n# - styling updated to match CRAWLAB norms\n#\n##########################################################################################\n\n\nimport numpy as np\nfrom scipy.integrate import odeint\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n\ndef eq_of_motion(states, t, p):\n \"\"\"\n Defines the differential equations for the coupled spring-mass system.\n\n Arguments:\n states : vector of the state variables:\n t : time\n p : vector of the parameters:\n \"\"\"\n x, x_dot, y, y_dot = states\n m, kp, kd, L, StartTime, Fmax, FcurAmp, FcurAngle = p\n\n # Create system diff eq\n sysODE = [x_dot,\n 1\/m * (np.dot(F(t, states, p), [1, 0]) + np.dot(Fcur(t, p), [1, 0])),\n y_dot,\n 1\/m * (np.dot(F(t, states, p), [0, 1]) + np.dot(Fcur(t, p), [0, 1]))]\n \n return sysODE\n \n \ndef F(t, states, p):\n \"\"\" \n Defines the force\/control input to the system. Is limited by Fmax\n \n Note: This is not tuned for best performance. It's effectively placeholder.\n \"\"\"\n x, x_dot, y, y_dot = states\n m, kp, kd, L, StartTime, Fmax, FcurAmp, FcurAngle = p\n\n Lx, Ly = L\n endpoint = des_pos(t,L,StartTime)\n xd = endpoint[0]\n yd = endpoint[1]\n \n # We're using the non-derivative kick version of the PD controller\n Fx = kp * (xd - x) + kd * (-x_dot)\n Fy = kp * (yd - y) + kd * (-y_dot)\n \n # Limit the force to within symmetric limits defined by Umax\n # There are more clever\/faster ways to do this, but this is most easiest \n # to understand.\n F_amp = np.sqrt(Fx**2 + Fy**2)\n F_ang = np.arctan2(Fy, Fx)\n \n if F_amp > Fmax:\n F_amp = Fmax\n \n Fx = F_amp * np.cos(F_ang)\n Fy = F_amp * np.sin(F_ang)\n \n F = np.array([Fx, Fy])\n \n return F\n \n\ndef Fcur(t,p):\n \"\"\"\n Defines the current disturbance input to the system\n \"\"\"\n \n # Unpack variables\n m, kp, kd, L, StartTime, Fmax, FcurAmp, FcurAngle = p\n \n Current_Amplitude = FcurAmp\n cur_angle = FcurAngle\n \n Fcur = Current_Amplitude * np.asarray([np.cos(cur_angle), np.sin(cur_angle)])\n \n return Fcur\n \n\ndef des_pos(t, L, StartTime):\n \"\"\"\n defines the desired trajectory\n \"\"\"\n \n Lx, Ly = L # unpack the two desired end coords\n \n xd = 5 * np.cos(0.1 * 2 * np.pi * t)\n yd = 5 * np.sin(0.1 * 2 * np.pi * t)\n \n des_pos = np.array([xd, yd])\n \n return des_pos\n\n\n#---- Main script -----------------------------------------------------------------------\n\n# System Parameters\nm = 1.0 # mass\nkp = 40.0\nkd = 35.0\nFmax = 100\n\n# Water current parameters\nFcurAmp = 25.0 # amplitude of the effective current force\nFcurAngle = np.deg2rad(30.0) # angle of the effective current force\n\n\n# Input Parameters\nLx = 100.0 # Desired X position\nLy = 100.0 # Desired Y position\nStartTime = 0.0 # Time of command start\n\n# ODE solver parameters\nabserr = 1.0e-8\nrelerr = 1.0e-6\nstoptime = 15.0\nnumpoints = 1501\nstepmax = 0.01\n\n# Create the time samples for the output of the ODE solver.\n# create a time array from 0..100 sampled at 0.1 second steps\ndt = 0.05\nt = np.arange(0.0, stoptime + dt, dt)\n\n\n# Pack up the parameters and initial conditions:\nL = [Lx, Ly]\np = [m, kp, kd, L, StartTime, Fmax, FcurAmp, FcurAngle]\n\n# Initial conditions\nx_init, y_init = des_pos(0,L,StartTime)\nx_dot_init = 0.0\ny_dot_init = 0.0\n# Pack them into a vector\nx0 = [x_init, x_dot_init, y_init, y_dot_init]\n\n\n# Call the ODE solver.\nresp = odeint(eq_of_motion, x0, t, args=(p,), atol=abserr, rtol=relerr)\n\n# get the x and y position responses for plotting\nx_resp = resp[:,0]\ny_resp = resp[:,2]\n\n# Save the desired trajectory for plotting too\ndesired_traj = des_pos(t, L, StartTime)\nx_desired = desired_traj[0]\ny_desired = desired_traj[1]\n\n# Get current for plotting\nFcurrent = np.zeros((len(t), 2))\n\n# Set up vector field of current\nfor ii in np.arange(len(t)):\n Fcurrent[ii,:] = Fcur(t[ii], p)\n \n# Define the range of x and y coordinates to draw the current quivers over\nxrange = np.linspace(-20, 20, 8) \nyrange = np.linspace(-10, 10, 8)\n \ncurX, curY = np.meshgrid(xrange, yrange)\n\n\n# Set the plot size - 16x9 aspect ratio is best for videos\n# We are mostly setting up the size and overall plot formatting here.\n# The data here is just a placeholder, it will be filled during the animation\nfig = plt.figure(figsize=(8, 4.5))\nax = plt.gca()\nplt.subplots_adjust(bottom=0.17, left=0.17, top=0.96, right=0.96)\n\n# Change the axis units font\nplt.setp(ax.get_ymajorticklabels(),fontsize=18)\nplt.setp(ax.get_xmajorticklabels(),fontsize=18)\n\nax.spines['right'].set_color('none')\nax.spines['top'].set_color('none')\n\nax.xaxis.set_ticks_position('bottom')\nax.yaxis.set_ticks_position('left')\n\n# Turn on the plot grid and set appropriate linestyle and color\nax.grid(True, linestyle=':', color='0.75')\nax.set_axisbelow(True)\n\n# Define the X and Y axis labels\nplt.xlabel('X Position (m)', fontsize=22, weight='bold', labelpad=5)\nplt.ylabel('Y Position (m)', fontsize=22, weight='bold', labelpad=10)\n \n \n# , '#377eb8', '#4daf4a', '#984ea3', '#ff7f00', '#ffff33', '#a65628'\n \n# define items to animate\ntime_template = 'Time = {:5.2f} s'\ntime_text = ax.text(0.05, 0.95, '', \n transform=ax.transAxes, \n fontsize=18, \n bbox=dict(facecolor='white', edgecolor='white', alpha=0.75))\n\nplt.plot(x_desired, y_desired, linewidth=2, color='#4daf4a', linestyle='-.', label=r'Desired')\n\n# This marker will serve as the vehicle\nmarker = ax.annotate('',xy=(x_resp[0], y_resp[0]),\n xytext=(x_resp[1], y_resp[1]),\n xycoords='data',\n arrowprops=dict(width=2, headlength=16, facecolor='#e41a1c', edgecolor='#e41a1c'),\n animated=True)\n\n# We'll trail the past behind the marker\nghost, = ax.plot([],[], '#e41a1c', linewidth=2, linestyle='-', alpha=1, label=r'Actual')\n\n# And use a quiver plot to represent the wind\/current\/etc\nQ = ax.quiver(curX, curY, \n Fcurrent[:,0], Fcurrent[:,1], \n color='#377eb8', \n edgecolors=('#377eb8'), \n alpha = 0.5,\n width=0.0025, \n animated=True)\n\n# uncomment below and set limits if needed\nplt.axis('equal')\nplt.xlim(-15, 15)\nplt.ylim(-10, 10)\n\n# Create the legend, then fix the fontsize\nleg = plt.legend(loc='upper right', ncol = 1, fancybox=True)\nltext = leg.get_texts()\nplt.setp(ltext,fontsize=18)\n\n# Adjust the page layout filling the page using the new tight_layout command\nplt.tight_layout(pad=0.5)\n\n\n\n\ndef init():\n '''\n Define the items to animate\n '''\n \n marker.xytext = ([], [])\n marker.xy = ([], [])\n \n ghost.set_data([], [])\n time_text.set_text('')\n \n ax.set_xlim(-15, 15)\n ax.set_ylim(-10, 10)\n \n return marker, ghost, Q,\n \n \ndef animate(i):\n ''' \n Do the actual animation by updating values at each time step\n '''\n ax.set_xlim(-15, 15)\n ax.set_ylim(-10, 10)\n \n Q.set_UVC(Fcurrent[i,0], Fcurrent[i,1])\n \n x = x_resp[i]\n y = y_resp[i]\n \n \n # Here, I just use the difference between the last position and the current\n # one to get the angle. You can also use a heading response to determine\n # this angle.\n if i == 0:\n last_x, last_y = 0, 0\n else:\n last_x, last_y = x_resp[i - 1], y_resp[i - 1]\n \n angle = np.arctan2((y - last_y), (x - last_x))\n \n x_base = x - 1.0\/2 * np.cos(angle)\n y_base = y - 1.0\/2 * np.sin(angle)\n\n marker.set_position((x_base, y_base))\n marker.xytext = (x_base, y_base)\n marker.xy = (x, y)\n \n # Leave a \"trail\" behind the boat to show the path it took\n # You can leave the full trail\n x_ghost = x_resp[:i]\n y_ghost = y_resp[:i]\n \n # Or just a portion of it.\n # Here, we have it hard coded to be a two second trail (2\/dt steps)\n # x_ghost = x_resp[np.max((0, i-int(2\/dt))):i]\n # y_ghost = y_resp[np.max((0, i-int(2\/dt))):i]\n \n ghost.set_data(x_ghost, y_ghost)\n \n time_text.set_text(time_template.format(i * dt))\n \n return marker, ghost, Q,\n \n\n# Call the matplotlib animation function\nanim = animation.FuncAnimation(fig, \n animate, \n np.arange(1, len(resp)), \n init_func=init,\n interval=20, \n blit=True)\n\n\n# save the animation as an mp4. This requires ffmpeg or mencoder to be\n# installed. The extra_args ensure that the x264 codec is used, so that\n# the video can be embedded in html5. You may need to adjust this for\n# your system: for more information, see\n# http:\/\/matplotlib.sourceforge.net\/api\/animation_api.html\nanim.save('trajectory_animation.mp4', fps=30, dpi=300, bitrate = 2500, extra_args=['-vcodec', 'libx264'])\n\n# close \"Figure\" - actually just removes from queue to show at next show() command\nplt.close(fig)"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_131","text":"#!\/usr\/bin\/env python2\nfrom __future__ import print_function\nimport roslib\nimport sys\nimport rospy\nimport numpy as np\nimport datetime\nimport time\nfrom geometry_msgs.msg import PoseArray\nfrom geometry_msgs.msg import Pose\nfrom geometry_msgs.msg import PoseWithCovariance\nfrom nav_msgs.msg import Odometry\nfrom dse_msgs.msg import PoseMarkers\nfrom std_msgs.msg import Float64MultiArray\nfrom std_msgs.msg import MultiArrayLayout\nfrom std_msgs.msg import MultiArrayDimension\nfrom dse_msgs.msg import InfFilterResults\nfrom visualization_msgs.msg import Marker\nfrom scipy.spatial.transform import Rotation as R\nfrom gazebo_msgs.msg import LinkStates\nimport tf_conversions\nimport tf2_ros\n\nimport dse_lib\nimport dse_constants\nroslib.load_manifest('dse_simulation')\n\n\nclass information_filter:\n\n # Define initial\/setup values\n def __init__(self):\n\n # Get parameters from launch file\n self.ros_prefix = rospy.get_param('~prefix')\n if len(self.ros_prefix) != 0 and self.ros_prefix[0] != '\/':\n self.ros_prefix = '\/' + self.ros_prefix\n self.tf_pretix = self.ros_prefix[1:]\n self.this_agent_id = rospy.get_param('~id')\n self.dim_state = rospy.get_param('~dim_state')\n\n # self.ros_prefix = '\/tb3_0'\n # self.tf_pretix = self.ros_prefix[1:]\n # self.this_agent_id = 5\n # self.dim_state = 6\n\n self.camera_pose_sub = rospy.Subscriber(self.ros_prefix + \"\/dse\/pose_markers\", PoseMarkers, self.measurement_callback)\n self.inf_results_sub = rospy.Subscriber(self.ros_prefix + \"\/dse\/inf\/results\", InfFilterResults, self.results_callback)\n self.meas_vis_pub = rospy.Publisher(self.ros_prefix + \"\/dse\/vis\/measurement\", PoseArray, queue_size=10)\n\n self.est_ids = []\n self.est_vis_pubs = []#rospy.Publisher(self.ros_prefix + \"\/dse\/vis\/estimates\", PoseArray, queue_size=10)\n\n if self.dim_state == 6:\n self.dim_obs = 3\n elif self.dim_state == 12:\n self.dim_obs = 6\n else:\n rospy.signal_shutdown('invalid state dimension passed in')\n\n # Define static variables\n self.dt = 0.1\n self.t_last = rospy.get_time()\n self.gzbo_ref_obj_state = None\n self.pthn_ref_obj_state = None\n\n # Create pose_array for measurement data\n def measurement_callback(self, data):\n poses = PoseArray()\n for pose_stamped in data.pose_array:\n poses.poses += [pose_stamped.pose.pose]\n poses.header.stamp = rospy.Time.now()\n if self.ros_prefix == '':\n poses.header.frame_id = 'odom'\n else:\n poses.header.frame_id = self.tf_pretix + '\/odom'\n self.meas_vis_pub.publish(poses)\n\n # Create pose_array for the information results\n def results_callback(self, data):\n inf_id_list = np.array(data.ids)\n inf_Y = dse_lib.multi_array_2d_output(data.inf_matrix)\n inf_y = dse_lib.multi_array_2d_output(data.inf_vector)\n self.inf_x = np.linalg.inv(inf_Y).dot(inf_y)\n inf_P = np.linalg.inv(inf_Y)\n inf_P[inf_P < 0] = 0\n inf_P = np.sqrt(inf_P)\n\n odom = Odometry()\n odom.header.stamp = rospy.Time.now()\n odom.header.frame_id = self.tf_pretix + '\/odom'\n # if self.ros_prefix == '':\n # odom.header.frame_id = 'base_footprint'\n # else:\n # odom.header.frame_id = self.tf_pretix + '\/base_footprint'\n\n for id in inf_id_list:\n if id not in self.est_ids:\n self.est_ids.append(id)\n self.est_vis_pubs.append(rospy.Publisher(self.ros_prefix + \"\/dse\/vis\/estimates\/\" + str(id), Odometry, queue_size=10))\n\n for id in inf_id_list:\n i = np.where(inf_id_list == id)[0][0]\n j = self.est_ids.index(id)\n\n i_min = i * self.dim_state\n i_max = i_min + self.dim_state\n odom.pose.pose = dse_lib.pose_from_state_3D(self.inf_x[i_min:i_max])\n cov = dse_lib.sub_matrix(inf_P, inf_id_list, id, self.dim_state)\n cov = dse_lib.state_cov_to_covariance_matrix(cov)\n odom.pose.covariance = list(dse_lib.covariance_to_ros_covariance(cov))\n self.est_vis_pubs[j].publish(odom)\n\n\ndef main(args):\n\n rospy.init_node('dse_gazebo_visualization_node', anonymous=True)\n imf = information_filter()\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down\")\n\n\nif __name__ == '__main__':\n main(sys.argv)\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_133","text":"#!\/usr\/bin\/env python\n\"\"\"MAGeCK test module\nCopyright (c) 2014 , , lab\nThis code is free software; you can redistribute it and\/or modify it\nunder the terms of the BSD License (see the file COPYING included with\nthe distribution).\n@status: experimental\n@version: $Revision$\n@author: \n@contact: li.david.wei AT gmail.com\n\"\"\"\n\n\nfrom __future__ import print_function\nimport sys\nimport math\nimport types\nimport logging\n\nfrom mageck.mageckCount import *\n\nfrom mageck.fileOps import *\nfrom mageck.testVisual import *\n\nfrom mageck.fdr_calculation import *\n\n\n\n\n# debug\n# try:\n# from IPython.core.debugger import Tracer\n# except:\n# pass\n\ndef mmedian(lst):\n \"\"\"\n get the median value\n \"\"\"\n sortedLst = sorted(lst)\n lstLen = len(lst)\n if lstLen==0:\n return 0.0\n index = (lstLen - 1) \/\/ 2\n\n if (lstLen % 2):\n return sortedLst[index]\n else:\n return (sortedLst[index] + sortedLst[index + 1])\/2.0\n\ndef getgeomean(v):\n meanval=sum([math.log(vx+0.1,2) for vx in v])\/float(len(v))\n return 2**meanval-0.1\n\ndef getMeans(matt):\n # arithmatic mean\n #meanvalue=[sum(v)\/float(len(v)) for v in matt]\n # geometric mean\n meanvalue=[getgeomean(v) for v in matt]\n return meanvalue\n\ndef getVars(matt):\n meanvalue=getMeans(matt)\n varvalue=[ sum([ (kj-meanvalue[i])*(kj-meanvalue[i]) for kj in matt[i] ] )\/(float(len(matt[i]))-1) for i in range(len(meanvalue))]\n #varvalue={k:sum([ (x-meanvalue[k])*(x-meanvalue[k]) for x in v])\/(float(len(v))-1) for (k,v) in ctable.iteritems()}\n return varvalue\n\ndef leastsquare(x,y,weight=None):\n \"\"\"\n least squares fitting\n coefficients from y= a+bx\n return (b,a)\n reference: http:\/\/mathworld.wolfram.com\/LeastSquaresFitting.html\n For weighted least square: http:\/\/goo.gl\/pGpTZ6\n \"\"\"\n n=len(x)\n if n != len(y):\n logging.error('Unequal length of vectors of x and y in least square')\n sys.exit(-1)\n if weight is None:\n sy=sum(y)\n sx=sum(x)\n sx2=sum([t*t for t in x])\n sxy=sum([x[i]*y[i] for i in range(n)])\n a=(sy*sx2-sx*sxy)\/(n*sx2-sx*sx)\n b=(n*sxy-sx*sy)\/(n*sx2-sx*sx)\n return (b,a)\n else:\n nw=sum(weight)\n sy=sum([y[i]*weight[i] for i in range(n)])\n sx=sum([x[i]*weight[i] for i in range(n)])\n sx2=sum([x[i]*x[i]*weight[i] for i in range(n)])\n sxy=sum([x[i]*y[i]*weight[i] for i in range(n)])\n a=(sy*sx2-sx*sxy)\/(nw*sx2-sx*sx)\n b=(nw*sxy-sx*sy)\/(nw*sx2-sx*sx)\n return (b,a)\n\ndef modelmeanvar(ctable,method='edger'):\n \"\"\"\n model the relation between mean and variance\n \"\"\"\n # calculate the mean and variance\n tablemat=ctable.values()\n meanvalue=getMeans(tablemat)\n varvalue=getVars(tablemat)\n # choose values with variance greater than mean\n meangood=[meanvalue[i] for i in range(len(meanvalue)) if meanvalue[i]0.01 else 0.01 )(varvalue[i]-meanvalue[i]) for i in range(len(varvalue)) ]\n # log\n meanglog=[math.log(x+1,2) for x in meangood]\n varglog=[math.log(x+1,2) for x in vargood]\n # Tracer()()\n if method=='linear':\n # least square\n (k,b)=leastsquare(meanglog,varglog,meangood)\n if k<1:\n k=1\n if b<0:\n b=0\n return (k,b)\n elif method=='edger':\n dy=varglog\n dx=[2*x for x in meanglog]\n ret=(sum(dy)-sum(dx))*1.0\/len(dx)\n return ret\n else:\n return 0\n\n\ndef getadjustvar(coef,meanval,method='mixed'):\n \"\"\"\n From the model, get the adjusted variance\n \"\"\"\n if method=='linear':\n k=coef[0];b=coef[1]\n if type(meanval) is types.FloatType:\n return (meanval**k)*(2**b)+meanval\n if type(meanval) is types.ListType:\n return [(z**k)*(2**b)+z for z in meanval]\n elif method=='edger':\n if type(meanval) is types.FloatType:\n return (meanval**2)*(2**coef)+meanval\n if type(meanval) is types.ListType:\n return [(z**2)*(2**coef)+z for z in meanval]\n elif method=='mixed':\n var1=getadjustvar(coef,meanval,method='linear')\n var2=getadjustvar(coef[2],meanval,method='edger')\n return [ (lambda x,y: x if x>y else y)(var1[i],var2[i]) for i in range(len(var1))]\n else:\n return meanval\n\ndef getnormcdf(x,lowertail=True):\n \"\"\"\n Get the normal CDF function. used to calculate p-value\n \"\"\"\n # ax=math.fabs(x)\n #axv=math.erfc(x\/(2**0.5))\/2; # higher tail\n if lowertail==False:\n #return axv\n return math.erfc(x\/(2**0.5))\/2\n else:\n #return 1-axv\n return math.erfc(-x\/(2**0.5))\/2\n #if (x>0 and lowertail==False) or (x<0 and lowertail==True):\n # return axv\n #else:\n # return 1-axv\n\ndef getNormalPValue(mean0,var0,mean1, lower=False):\n \"\"\"\n Use truncated normal distribution to calculate the pvalue\n \"\"\"\n # use ttmean to calculate the pvalue\n n=len(mean0)\n minmean1=min([x for x in mean1 if x>0])\n mean1_adj=[(lambda x: x if x >minmean1 else minmean1)(t) for t in mean1]\n # first, convert to standard normal distribution values\n t_theta=[(mean1_adj[i]-mean0[i])\/math.sqrt(var0[i]) for i in range(n)]\n t_theta_0=[(0.0-mean0[i])\/math.sqrt(var0[i]) for i in range(n)]\n #\n t_p=[getnormcdf(x,lowertail=lower) for x in t_theta]\n t_p_0=[getnormcdf(x,lowertail=True) for x in t_theta_0]\n if lower==True:\n return [(t_p[i]-t_p_0[i])\/(1-t_p_0[i]) for i in range(n)]\n else:\n return [t_p[i]\/(1-t_p_0[i]) for i in range(n)]\n\n\ndef getNBPValue(mean0,var0,mean1, lower=False,log=False):\n \"\"\"\n Use negative binomial to calculate p-value\n Reference:\n http:\/\/docs.scipy.org\/doc\/scipy\/reference\/generated\/scipy.stats.nbinom.html#scipy.stats.nbinom\n \"\"\"\n from scipy.stats import nbinom\n n=len(mean0)\n nb_p=[mean0[i]\/var0[i] for i in range(n)]; # consisitent with R\n nb_n0=[mean0[i]*mean0[i]\/(var0[i]-mean0[i]) for i in range(n)]\n nb_n=[ (lambda t: t if t>=1 else 1)(x) for x in nb_n0]\n #\n if lower==True:\n if log==False:\n nb_p_low=nbinom.cdf(mean1,nb_n,nb_p)\n else:\n nb_p_low=nbinom.logcdf(mean1,nb_n,nb_p)\n return list(nb_p_low)\n else:\n if log==False:\n nb_p_low=nbinom.sf(mean1,nb_n,nb_p)\n else:\n nb_p_low=nbinom.logsf(mean1,nb_n,nb_p)\n return list(nb_p_low)\n\ndef calculate_gene_lfc(args,lfcval,sort_id,n_lower,sgrna2genelist,destkeys,ispos=False):\n \"\"\"\n Calculate gene LFC using different methods\n Parameters:\n args\n Arguments\n lfcval\n sgRNA log fold change vector\n sortid\n sgRNA sort index\n n_lower\n alpha cutoff (integer)\n sgrna2genelist\n a {sgrnaid:gene} dict\n destkeys\n a [sgrnaid] vector\n ispos\n a boolean vector to indicate whether this is a positive selection\n Return value:\n genelfc\n a {geneid:lfc} dict\n \"\"\"\n genesglfc={}\n ni=0\n for i in sort_id:\n ni+=1\n targetgene=sgrna2genelist[destkeys[i]]\n if targetgene not in genesglfc:\n genesglfc[targetgene]=[]\n if args.gene_lfc_method=='alphamean' or args.gene_lfc_method=='alphamedian':\n if ni*1.0<=n_lower:\n genesglfc[targetgene]+=[lfcval[i]]\n else:\n genesglfc[targetgene]+=[lfcval[i]]\n genelfc={}\n for (gid,vl) in genesglfc.items():\n if args.gene_lfc_method=='median' or args.gene_lfc_method=='alphamedian':\n lfc=mmedian(vl)\n elif args.gene_lfc_method=='secondbest':\n if ispos:\n vll=sorted(vl,reverse=True)\n else:\n vll=sorted(vl)\n if len(vll)>1:\n lfc=vll[1]\n else:\n lfc=0.0\n elif args.gene_lfc_method=='mean' or args.gene_lfc_method=='alphamean':\n if len(vl)>0:\n lfc=sum(vl)\/len(vl)\n else:\n lfc=0.0\n else:\n lfc=0.0\n genelfc[gid]=lfc\n return genelfc\n\ndef crispr_test(tab,ctrlg,testg, destfile,sgrna2genelist,args):\n \"\"\"\n main function of crispr test\n Parameters:\n tab\n Read count table\n ctrlg\n Index for control samples\n testg\n Index for treatment samples\n destfile\n Prefix for output file (sgrna_summary.txt)\n sgrna2genelist\n {sgrna:gene} mapping\n args\n Arguments\n Return value:\n (lowp,highp,sgrnalfc)\n lowp\n alpha cutoff for neg. selection\n highp\n alpha cutoff for pos. selection\n lower_gene_lfc\n {gene:lfc} dict. lfc is for neg. selection\n higher_gene_lfc\n {gene:lfc} dict. lfc is for pos. selection\n \"\"\"\n n=len(tab)\n # control and test matrix\n tabctrl={k:[v[i] for i in range(len(v)) if i in ctrlg] for (k,v) in tab.iteritems()}\n tabtest={k:[v[i] for i in range(len(v)) if i in testg] for (k,v) in tab.iteritems()}\n # control matrix for mean-var estimation\n if len(ctrlg)>1 and args.variance_from_all_samples==False: # more than 1 controls\n tabctrlmod={k:[v[i] for i in range(len(v)) if i in ctrlg] for (k,v) in tab.iteritems()}\n else: # only 1 control: use all the samples for estimation\n tabctrlmod={k:[v[i] for i in range(len(v)) if i in (ctrlg+testg)] for (k,v) in tab.iteritems()}\n # training using control samples\n model1=modelmeanvar(tabctrlmod,method='linear')\n #model2=modelmeanvar(tabctrl,method='edger')\n model=[x for x in model1];#+[model2]\n if type(model) is types.ListType:\n logging.debug('Adjusted model: '+'\\t'.join([str(x) for x in model]))\n else:\n logging.debug('Adjusted model: k='+str(model))\n\n tabctrl_mat=tabctrl.values()\n tabctrlmodel_mat=tabctrlmod.values()\n tabc_mean=getMeans(tabctrl_mat)\n tabcmodel_mean=getMeans(tabctrlmodel_mat)\n #\n # setup the valid sgRNA flag\n validsgrna=[1]*n\n if hasattr(args,\"remove_zero\") and ( args.remove_zero==\"control\" or args.remove_zero==\"both\"):\n validsgrna=[ (lambda x: 1 if x>0 else 0)(t) for t in tabc_mean]\n # if mean of the control samples is 0: set it to greater than 0\n tabc_min=min([x for x in tabc_mean if x>0])\n tabc_mean=[ (lambda x: x if x>tabc_min else tabc_min)(t) for t in tabc_mean]\n tabc_var=getVars(tabctrlmodel_mat)\n tabc_adjvar=getadjustvar(model,tabc_mean,method='linear')\n\n # testing using tebtest\n nt=tabtest[tabtest.keys()[0]]\n ttmat=tabtest.values()\n ttmean=getMeans(ttmat)\n # set up the valid sgRNA flag\n if hasattr(args,\"remove_zero\") and ( args.remove_zero==\"treatment\" or args.remove_zero==\"both\"):\n validsgrna2=[ (lambda x: 1 if x>0 else 0)(t) for t in ttmean]\n validsgrna=[validsgrna[t]*validsgrna2[t] for t in range(n)]\n # use ttmean to calculate the pvalue\n # first, convert to standard normal distribution values\n tt_theta=[(ttmean[i]-tabc_mean[i])\/math.sqrt(tabc_adjvar[i]) for i in range(n)]\n tt_abstheta=[math.fabs(tt_theta[i]) for i in range(n)]\n #\n try:\n # for consistency, use normal p values\n tt_p_lower=getNormalPValue(tabc_mean,tabc_adjvar,ttmean,lower=True)\n tt_p_higher=getNormalPValue(tabc_mean,tabc_adjvar,ttmean,lower=False)\n #tt_p_lower=getNBPValue(tabc_mean,tabc_adjvar,ttmean,lower=True)\n #tt_p_higher=getNBPValue(tabc_mean,tabc_adjvar,ttmean,lower=False)\n\n # tt_p_lower_score=getNBPValue(tabc_mean,tabc_adjvar,ttmean,lower=True,log=True)\n # tt_p_higher_score=getNBPValue(tabc_mean,tabc_adjvar,ttmean,lower=False,log=True)\n #except ImportError:\n # #logging.warning('An error occurs while trying to compute p values using scipy. Will use normal model instead of Negative Binomial model, but please check with your scipy installation.')\n # #tt_p_lower=getNormalPValue(tabc_mean,tabc_adjvar,ttmean,lower=True)\n # #tt_p_higher=getNormalPValue(tabc_mean,tabc_adjvar,ttmean,lower=False)\n except:\n logging.error('An error occurs while trying to compute p values. Quit..')\n sys.exit(-1)\n #\n #\n tt_p_twosided=[ (lambda x,y: 2*x if xtabc_mean[i]]\n #if CNVnorm:\n # report+=[dfmt.format(norm_tt_abstheta[i])] # add CNV-adjusted sgRNA scores\n print('\\t'.join([str(x) for x in report]),file=destf)\n destf.close()\n #\n # prepare files for gene test\n if sgrna2genelist is not None:\n destfname=destfile+'.plow.txt'\n destkeys=tabctrl.keys()\n sort_id=[i[0] for i in sorted(enumerate(tt_p_lower_score), key=lambda x:x[1],reverse=False)]\n # output to file\n destf=open(destfname,'w')\n print('\\t'.join(['sgrna','symbol','pool','p.low','prob','chosen']),file=destf)\n for i in sort_id:\n report=[destkeys[i], sgrna2genelist[destkeys[i]],'list', tt_p_lower_score[i], '1', validsgrna[i]]\n print('\\t'.join([str(x) for x in report]),file=destf)\n destf.close()\n tt_p_lower_fdr=pFDR(tt_p_lower,method=args.adjust_method)\n n_lower=sum([1 for x in tt_p_lower if x <= args.gene_test_fdr_threshold])\n n_lower_p=n_lower*1.0\/len(tt_p_lower)\n logging.debug('lower test FDR cutoff: '+str(n_lower_p))\n # calculate gene lfc\n lower_gene_lfc=calculate_gene_lfc(args,sgrnalfc,sort_id,n_lower,sgrna2genelist,destkeys)\n #\n destfname=destfile+'.phigh.txt'\n destf=open(destfname,'w')\n destkeys=tabctrl.keys()\n sort_id=[i[0] for i in sorted(enumerate(tt_p_higher_score), key=lambda x:x[1],reverse=False)]\n # output to file\n print('\\t'.join(['sgrna','symbol','pool','p.high','prob','chosen']),file=destf)\n for i in sort_id:\n report=[destkeys[i], sgrna2genelist[destkeys[i]],'list', tt_p_higher_score[i], '1', validsgrna[i]]\n print('\\t'.join([str(x) for x in report]),file=destf)\n destf.close()\n tt_p_higher_fdr=pFDR(tt_p_higher,method=args.adjust_method)\n n_higher=sum([1 for x in tt_p_higher if x <= args.gene_test_fdr_threshold])\n if n_higher>0:\n n_higher_p=n_higher*1.0\/len(tt_p_higher)\n else:\n n_higher_p=0.01\n logging.debug('higher test FDR cutoff: '+str(n_higher_p))\n # calculate gene lfc\n higher_gene_lfc=calculate_gene_lfc(args,sgrnalfc,sort_id,n_higher,sgrna2genelist,destkeys,ispos=True)\n #\n return (n_lower_p,n_higher_p,lower_gene_lfc,higher_gene_lfc)\n else:\n return (None,None,None,None)\n\ndef rank_association_test(file,outfile,cutoff,args,adjustcutoff=True):\n if adjustcutoff: # adjust the alpha threshold to 0.05-0.5\n if cutoff<0.05:\n cutoff=0.05\n if cutoff>0.5:\n cutoff=0.5\n #rrapath='\/'.join(sys.argv[0].split('\/')[:-1]+[\"..\/bin\/RRA\"])\n rrapath='RRA'\n command=rrapath+\" -i \"+file+\" -o \"+outfile+\" -p \"+str(cutoff)\n if hasattr(args,'control_sgrna') and args.control_sgrna != None :\n command+=\" --control \"+args.control_sgrna\n if hasattr(args,'skip_gene'):\n if args.skip_gene != None :\n for g in args.skip_gene:\n command+=\" --skip-gene \"+g\n else:\n command+=\" --skip-gene NA --skip-gene na \"\n else:\n command+=\" --skip-gene NA \"\n # command+=\" --min-number-goodsgrna 2 \"\n if hasattr(args,\"additional_rra_parameters\") and args.additional_rra_parameters != None:\n command+=\" \"+args.additional_rra_parameters+\" \"\n systemcall(command)\n\n\ndef magecktest_removetmp(prefix):\n tmpfile=[prefix+'.plow.txt',prefix+'.phigh.txt',prefix+'.gene.low.txt',prefix+'.gene.high.txt']\n for f in tmpfile:\n systemcall('rm '+f,cmsg=False)\n\n\ndef magecktest_parsetreatmentsfromday0(args,samplelabelindex):\n \"\"\"\n Reconstruct the groups of treatment and control from --day0-label\n \"\"\"\n samples=[s for s in samplelabelindex.keys()]\n day0labelstr=args.day0_label\n args.day0_label=args.day0_label.split(',')\n for dl in args.day0_label:\n if dl not in samples:\n logging.error('Label '+dl+' specified in the --day0-label option does not match count table. Please double check.')\n sys.exit(-1)\n nonday0sample=[x for x in samples if x not in args.day0_label]\n if len(nonday0sample)==0:\n logging.error('At least 1 non day0-label sample should be specified.')\n sys.exit(-1)\n args.treatment_id=nonday0sample\n args.control_id=[day0labelstr]*len(nonday0sample)\n\n\ndef magecktest_main(args):\n \"\"\"\n Main entry for MAGeCK test function\n \"\"\"\n\n # stat test\n if args.subcmd == 'run' or args.subcmd == 'test':\n # read counts from file\n if args.subcmd == 'test':\n mapres=getcounttablefromfile(args.count_table)\n else:\n mapres=getcounttablefromfile(args.output_prefix+'.count.txt')\n cttab=mapres[0]\n sgrna2genelist=mapres[1]\n samplelabelindex=mapres[2]\n\n if len(cttab)==0:\n sys.exit(-1)\n nsample=len(cttab[cttab.keys()[0]])\n\n # process day0-label\n if args.day0_label != None:\n magecktest_parsetreatmentsfromday0(args,samplelabelindex)\n\n # iterate control group and treatment group\n supergroup_control=args.control_id\n supergroup_treat=args.treatment_id\n # control group and treatment group labels\n labellist_control=[]\n labellist_treat=[]\n # R visualization init\n vrv=VisualRValue()\n vrv.outprefix=args.output_prefix\n vrv.genesummaryfile=args.output_prefix+'.gene_summary.txt'\n vrv.startRTemplate()\n vrvrnwcplabel=[]; # labels to write in rnw\n\n # loop by comparisons\n for cpindex in range(len(supergroup_treat)):\n # convert the sample label to sample index\n if cpindex==0:\n cp_prefix=args.output_prefix\n else:\n cp_prefix=args.output_prefix+'.'+str(cpindex)\n # labels\n (treatgroup,treatgrouplabellist)=parse_sampleids(supergroup_treat[cpindex],samplelabelindex)\n treatgroup_label=str(supergroup_treat[cpindex])\n logging.info('Treatment samples:'+treatgroup_label)\n logging.info('Treatment sample index:'+','.join([str(x) for x in treatgroup]))\n labellist_treat+=[treatgroup_label]\n if supergroup_control != None:\n (controlgroup,controlgrouplabellist)=parse_sampleids(supergroup_control[cpindex],samplelabelindex)\n controlgroup_label=str(supergroup_control[cpindex]); # only for display\n logging.info('Control samples:'+controlgroup_label)\n else:\n #controlgroup=[x for x in range(nsample) if x not in treatgroup]\n #controlgrouplabellist=[samplelabelindex[x] for x in range(nsample) if x not in treatgroup]\n xls=[x for x in range(nsample) if x not in treatgroup]\n (controlgroup,controlgrouplabellist)=parse_sampleids(','.join([str(t) for t in xls]),samplelabelindex)\n controlgroup_label='rest'\n logging.info('Control samples: the rest of the samples')\n logging.info('Control sample index:'+','.join([str(x) for x in controlgroup]))\n labellist_control+=[controlgroup_label]\n # read the sgRNA-gene table for rank association\n # normalization\n cttab_sel={k:([v[i] for i in controlgroup + treatgroup]) for (k,v) in cttab.iteritems()}; # controlgroup do not overlap with treatgroup\n if hasattr(args,'norm_method'):\n nttab=normalizeCounts(cttab_sel,method=args.norm_method,controlsgfile=args.control_sgrna)\n else:\n nttab=normalizeCounts(cttab_sel)\n # write normalized counts to file\n if hasattr(args,'normcounts_to_file'):\n if args.normcounts_to_file:\n # counts\n mageck_printdict(nttab,args,sgrna2genelist,samplelabelindex,controlgroup+treatgroup)\n controlgroup_ids=list(range(len(controlgroup)))\n treatgroup_ids=list(range(len(controlgroup),len(controlgroup+treatgroup)))\n # perform sgRNA test, and prepare files for gene test\n gene_as_cutoff=crispr_test(nttab, controlgroup_ids, treatgroup_ids, cp_prefix,sgrna2genelist,args)\n #\n if gene_as_cutoff[0] is not None:\n rank_association_test(cp_prefix+'.plow.txt',cp_prefix+'.gene.low.txt',gene_as_cutoff[0],args)\n if gene_as_cutoff[1] is not None:\n rank_association_test(cp_prefix+'.phigh.txt',cp_prefix+'.gene.high.txt',gene_as_cutoff[1],args,adjustcutoff=False) # update: fpr positive selection, do not adjust alpha cutoff\n # merge different files\n merge_rank_files(cp_prefix+'.gene.low.txt',cp_prefix+'.gene.high.txt',cp_prefix+'.gene_summary.txt',args,gene_as_cutoff)\n if cpindex>0:\n if cpindex>1:\n label1=''\n else:\n if len(labellist_treat)>0:\n label1=labellist_treat[0]+'_vs_'+labellist_control[0]+'|'\n else:\n label1=''\n label2=treatgroup_label+'_vs_'+controlgroup_label+'|'\n merge_rank_summary_files(args.output_prefix+'.gene_summary.txt',cp_prefix+'.gene_summary.txt',args.output_prefix+'.gene_summary.txt',args,lowfile_prefix=label1,highfile_prefix=label2)\n # visualization: load top k genes\n # print(str(samplelabelindex))\n vrv.cplabel=treatgroup_label+'_vs_'+controlgroup_label+' neg.'\n vrvrnwcplabel+=[vrv.cplabel]\n vrv.cpindex=[2+12*cpindex+1]\n vrv.loadTopKWithExp(cp_prefix+'.gene.low.txt',nttab,sgrna2genelist,controlgrouplabellist+treatgrouplabellist)\n vrv.cplabel=treatgroup_label+'_vs_'+controlgroup_label+' pos.'\n vrvrnwcplabel+=[vrv.cplabel]\n vrv.cpindex=[2+12*cpindex+6+1]\n vrv.loadTopKWithExp(cp_prefix+'.gene.high.txt',nttab,sgrna2genelist,controlgrouplabellist+treatgrouplabellist)\n\n # clean the file\n if args.keep_tmp==False:\n magecktest_removetmp(cp_prefix)\n if cpindex>0:\n systemcall('rm '+cp_prefix+'.gene_summary.txt',cmsg=False)\n systemcall('rm '+cp_prefix+'.sgrna_summary.txt',cmsg=False)\n # end cleaning\n # end cpindex loop\n\n # generate pdf file\n # write to rnw file buffer\n vrv.genesummaryfile=args.output_prefix+'.gene_summary.txt'\n vrv.getGeneSummaryStat(args,isplot=False)\n vrv.comparisonlabel=vrvrnwcplabel; # replace the label field\n vrv.writeGeneSummaryStatToBuffer()\n # write to rnw and R file\n vrv.closeRTemplate()\n if hasattr(args, \"pdf_report\") and args.pdf_report:\n vrv.generatePDF(args.keep_tmp)\n # end if\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_134","text":"0\n#!\/usr\/bin\/python\n# -*- coding: utf-8 -*-\nfrom optparse import OptionParser\nimport os\nfrom scipy.stats import norm\nfrom collections import defaultdict\nimport bz2\nimport random\nimport math\n#from get_data_set import FILE_TYPES\n\nFILE_TYPES = ['malware', 'non_malware', 'file']\n\nALLOWED_EXTENSIONS = dict([ (ext, num) for (ext, num) in zip(FILE_TYPES, range(1,len(FILE_TYPES)+1))])\n\n# Arbitrary mapping from extensions we're interested in to numerical labels\n\n##---------------------- Feature Calculators ----------------------------- ##\n\n# All the below functions take as input a file fragment, as a raw string. They\n# return a list (in many cases of length one) describing some feature of the\n# file fragment.\n\ndef unigram_counts(fragment):\n counts = defaultdict(int)\n for byte in fragment:\n counts[byte] += 1\n\n c= [ counts[chr(byte)] for byte in range(255) ]\n mean, std = norm.fit(c)\n return [mean, std]\n\ndef entropy_and_bigram_counts(fragment):\n \"\"\"Package together to avoid having to calculate this a second time when\n calculating entropy.\n \"\"\"\n counts = defaultdict(int)\n for i in range(len(fragment)-1):\n counts[fragment[i]+fragment[i+1]] += 1\n\n bigram_frequencies = [counts[chr(b1)+chr(b2)] for b1 in range(255) for b2 in range(255)]\n mean, std = norm.fit(bigram_frequencies)\n entropy = 0.0\n #bigram_frequencies = bigram_counts(fragment)\n for i in range(len(bigram_frequencies)):\n if bigram_frequencies[i] > 0.0:\n entropy += bigram_frequencies[i] * math.log10(bigram_frequencies[i])\n entropy = -entropy\n\n #return [entropy]\n\n return [entropy] + [mean, std]\ndef entropy_and_trigram_counts(fragment):\n \"\"\"Package together to avoid having to calculate this a second time when\n calculating entropy.\n \"\"\"\n counts = defaultdict(int)\n for i in range(len(fragment)-1):\n counts[fragment[i]+fragment[i+1]] += 1\n\n bigram_frequencies = [counts[chr(b1)+chr(b2)+chr(b3)] for b1 in range(255) for b2 in range(255) for b3 in range(255)]\n mean, std = norm.fit(bigram_frequencies)\n entropy = 0.0\n #bigram_frequencies = bigram_counts(fragment)\n for i in range(len(bigram_frequencies)):\n if bigram_frequencies[i] > 0.0:\n entropy += bigram_frequencies[i] * math.log10(bigram_frequencies[i])\n entropy = -entropy\n\n #return [entropy]\n\n return [entropy] + [mean, std]\n\ndef contiguity(fragment):\n \"\"\" A vague measurement of the average contiguity from byte to byte.\n \"\"\"\n total_diff = 0\n total = 0\n for i in range(len(fragment)-1):\n total_diff += abs(ord(fragment[i]) - ord(fragment[i+1]))\n total += 1\n\n return [total_diff\/(total+0.0)]\n\ndef mean_byte_value(fragment):\n return [ sum([ord(char) for char in fragment]) ]\n\ndef longest_streak(fragment):\n \"\"\" The length of the longest repeating subsequence.\n \"\"\"\n longest = 0\n last = fragment[0]\n current_streak = 1\n for char in fragment[1:]:\n if char == last:\n current_streak += 1\n else:\n if current_streak > longest:\n longest = current_streak\n last = char\n current_streak = 1\n\n return [longest]\n\ndef compressed_length(fragment):\n \"\"\"Return a feature vector with the ratio of the compressed length of the\n file fragment to the actual length of the file fragment\n \"\"\"\n return [ float( len(bz2.compress(fragment)) ) \/ float(len(fragment)) ]\n\ndef entropy(fragment):\n entropy = 0.0\n bigram_frequencies = bigram_counts(fragment)\n for i in range(len(bigram_frequencies)):\n if bigram_frequencies[i] > 0.0:\n entropy += bigram_frequencies[i] * math.log10(bigram_frequencies[i])\n entropy = -entropy\n\n return [entropy]\n\ndef chi_squared(fragment):\n chi_squared = 0.0\n C2 = 0.0\n expected = 2.0 #expected frequency of a byte (fileSize\/number of possible byte values)->(512\/256)\n\n for index in range(0,256):\n observed = feature_vector_1grams[index]\n C2 += ((observed-expected)**2)\/expected\n\n chi_squared = stats.achisqprob(C2,255)\n\n return [chi_squared]\n\ndef hamming_weight(fragment):\n hamming_weight = 0.0\n for i in range(len(fragment)):\n current_byte = ord(fragment[i])\n while current_byte != 0:\n hamming_weight += float(current_byte & 1)\n current_byte = current_byte >> 1\n hamming_weight \/= float(8 * len(fragment))\n\n return [hamming_weight]\n\n## ----------------------------------------------------------------------- ##\n\n\ndef to_vectorfile_format(label, vector):\n \"\"\"\n Given a label (e.g. 1, 2, 3, 4) and a list representing a vector, return a\n vector string that fits the format used by libsvm and svm-light.\n \"\"\"\n vector_string = str(label)\n feat_index = 1 # Start from 1 rather than 0, oddly\n for value in vector:\n # Can save a ton of space by ignoring 0-valued features\n\n #if value != 0:\n\tvector_string += \" \" + \",\" + str(value)\n feat_index += 1\n #vector_string += ''\n\n return vector_string\n\nif __name__ == '__main__':\n parser = OptionParser()\n parser.add_option(\"-i\", \"--input-dir\", dest=\"input_dir\", default=\"fragments\",\n help=\"Directory containing the files to be processed (default .\/fragments)\")\n parser.add_option(\"-o\", \"--output-dir\", dest=\"output_dir\", default=\"\/h\/90\/oles\/csc2208h\/vectors\",\n help=\"Directory to write vector file to (default .\/vectors)\")\n parser.add_option(\"-l\", \"--label\", dest=\"label\", default=\"\",\n help=\"String to be added to the name of the output vector file\")\n parser.add_option(\"-n\", \"--limit\", dest=\"limit\", type=int, default=0,\n help=\"Limit to the number of fragments to take of each type. Default: 0=unlimited.\")\n\n parser.add_option(\"--omit\", dest=\"omit\", type=int, default=-1,\n help=\"Omit the feature with the given index (should be from 0-6). Default:-1 -> don't omit anything\")\n\n print(\"Starting\")\n (options, args) = parser.parse_args()\n\n features = [unigram_counts, entropy_and_trigram_counts, contiguity, mean_byte_value, longest_streak, compressed_length, hamming_weight, entropy_and_bigram_counts]\n if options.omit != -1:\n features = features[:features.omit] + features[features.omit+1:]\n\n output_fname = os.path.join(options.output_dir, 'vector' + options.label + '.svm')\n out = open(output_fname, 'w')\n\n fragments_seen = 0\n options1 = dict()\n\n files_parsed = 0\n\n print(\"Converting\")\n for subdir in os.listdir(options.input_dir):\n fulldir = os.path.join(options.input_dir, subdir)\n frags = os.listdir(fulldir)\n # If we're only taking a subset of the fragments (when options.limit is set), we want to make sure it's a random one\n random.shuffle(frags)\n\n for fragment_name in (frags[:options.limit] if options.limit else frags):\n fragments_seen += 1\n if (fragments_seen % 1000) == 0:\n print \"On %dth fragment\" % (fragments_seen)\n f = open(os.path.join(fulldir, fragment_name))\n fragment = f.read()\n f.close()\n\n #ext is the name of the folder fragment came directory\n if (subdir == \"malware\"):\n ext = 0\n else:\n ext = 1\n\n files_parsed = files_parsed + 1\n print ('subdir: %s' %subdir)\n print ('files_parsed: %f\\n' %files_parsed)\n\n vector = sum([feature_calc(fragment) for feature_calc in features], [])\n\n # ext = 0 is a malware fragment\n # ext = 1 is a non-malware fragment\n vector_str = to_vectorfile_format(ext, vector) + '\\n'\n\n #write features\n out.write(vector_str)\n\n out.close()\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_135","text":"thara3\/FosterCauer_CauerFoster\n# # Foster to Cauer\n# 2019\/05\/06 created by \nimport argparse\nimport sympy\nimport datetime\n\n# version of this script\nmyVersion = '0.0.01'\n\n##############################################################################\n# arg parsing\n##############################################################################\nparser = argparse.ArgumentParser(\n prog='Foster2Cauer.py',\n usage='Convert Foster RC network to Cauer RC network.',\n epilog='end',\n add_help=True\n )\n\nparser.add_argument('input_file', help='specify input filename',\n action='store', type=str)\nparser.add_argument('output_file', help='specify output filename',\n action='store', type=str)\n\nparser.add_argument('-r', '--rational_rth',\n help='better accuracy but computationally expensive',\n action='store_true')\nparser.add_argument('--version', action='version',\n version='%(prog)s ' + myVersion)\n\nargs = parser.parse_args()\n\n# Input file, output file, and flag(s):\ninput_file = args.input_file\noutput_file = args.output_file\nrational_rth = args.rational_rth\n\n##############################################################################\n\n\"\"\"\nhttps:\/\/stackoverflow.com\/questions\/13890935\/does-pythons-time-time-return-the-local-or-utc-timestamp\n\"\"\"\n# time stamp when the script started.\ntimestamp = str(datetime.datetime.now()).split('.')[0].replace(\":\", \"-\")\n\n\nsympy.init_printing()\n\ns = sympy.Symbol('s')\ncc1, rc1, tauc1 = sympy.symbols(r\"C_c1, R_c1, \\tau_{c1}\")\ncc2, rc2, tauc2 = sympy.symbols(r\"C_c2, R_c2, \\tau_{c2}\")\ncc3, rc3, tauc3 = sympy.symbols(r\"C_c3, R_c3, \\tau_{c3}\")\n\n# CauerMatSample3x3 used for debugging at jupyter\nCauerMatSample3x3 = sympy.Matrix([[cc1, rc1, tauc1],\n [cc2, rc2, tauc2],\n [cc3, rc3, tauc3]])\n\n\nwith open(input_file, 'r', encoding=\"utf-8\") as fileobj:\n datastr = fileobj.read() # read all data from a file\n adatastr = datastr.rstrip() # remove the last \"\\n\"\n datalist = adatastr.split(\"\\n\") # create a list (size: n row * 1 column)\n\nc_list = list() # a list for foster network Cth\nr_list = list() # a list for foster network Rth\n\nfor line in datalist: # read one row at a time\n tmplist = line.split() # split a row into a list\n # print(tmplist)\n if tmplist == []: # skip empty rows\n continue\n if tmplist[0][0] == '#': # skip comment rows\n continue\n if tmplist[0][0:6] == \"STAGES\": # number of RC stages\n stages = int(tmplist[1])\n print(\"stages = \" + str(stages))\n continue\n # start reading actual data\n # (1st column is stage number)\n c_list.append(tmplist[1]) # Cth on the 2nd column\n r_list.append(tmplist[2]) # Rth on the 3rd column\n\n\nCauerMat = sympy.zeros(stages, 3) # Final results will be stored here.\n\nFosterMat = sympy.zeros(stages, 3) # Input data will be stored here\n\nfor i in range(stages):\n FosterMat[i, 0] = sympy.Rational(c_list[i])\n\n # By default, reduced the accuracy level by not Rationalizing Rth.\n FosterMat[i, 1] = sympy.Rational(r_list[i]) if rational_rth else r_list[i]\n\n FosterMat[i, 2] = FosterMat[i, 0] * FosterMat[i, 1]\n\n\n# ### As shown in the CauerMatSample3x3, variables line up in ascending order.\n# Cc1 and Rc1 pair represents the first stage of the Cauer model.\n# They are next to Junction.\n# So as the Cf1 and Rf1 of the Foster model.\n\n\n# # FosterMatrix\n# This is a faster way to calculate the coeffcients of pf and qf,\n# in higher stages.\n\naMatFoster = sympy.zeros(stages, stages+1)\nbMatFoster = sympy.zeros(stages+1, stages+1)\n\naMatFoster[0, 1] = FosterMat[stages-1, 1]\nbMatFoster[0, 1] = 1\nbMatFoster[1, 1] = FosterMat[stages-1, 2]\n\nfor i in range(2, stages+1):\n aMatFoster[:i, i] = \\\n FosterMat[stages - i, 2] * \\\n aMatFoster[:i-1, i-1].row_insert(0, sympy.Matrix([0])) + \\\n aMatFoster[:i-1, i-1].row_insert(i-1, sympy.Matrix([0])) + \\\n FosterMat[stages - i, 1] * bMatFoster[:i, i-1]\n\n bMatFoster[:i+1, i] = \\\n FosterMat[stages - i, 2] * \\\n bMatFoster[:i, i-1].row_insert(0, sympy.Matrix([0])) + \\\n bMatFoster[:i, i-1].row_insert(i, sympy.Matrix([0]))\n\nsvector4Coeff_a = sympy.Matrix(stages, 1, lambda i, j: s**i)\nsvector4Coeff_b = sympy.Matrix(stages+1, 1, lambda i, j: s**i)\nsvector4Coeff_a, svector4Coeff_b, stages\n\nZfall = \\\n sympy.Poly(sympy.transpose(\n aMatFoster.col(stages)).dot(svector4Coeff_a), s) \/ \\\n sympy.Poly(sympy.transpose(\n bMatFoster.col(stages)).dot(svector4Coeff_b), s)\n\n\n# # Recursive Foster to Cauer conversion\n# For details, check\n# \"20190504_Foster2Cauer3rdOrder_MatrixCalc.ipynb\" and\n# \"20190504_Foster2Cauer3rdOrder_MatrixCalc_recursive_pre.ipynb\"\n\nfor i in range(stages):\n (pf, qf) = sympy.fraction(Zfall)\n pf = sympy.Poly(pf, s)\n qf = sympy.Poly(qf, s)\n CauerMat[i, 0] = qf.nth(stages-i)\/pf.nth(stages-1-i)\n\n Yfall = (1\/Zfall - CauerMat[i, 0]*s).cancel()\n (qf, pf) = sympy.fraction(Yfall)\n qf = sympy.Poly(qf, s)\n pf = sympy.Poly(pf, s)\n CauerMat[i, 1] = pf.nth(stages-1-i)\/qf.nth(stages-1-i)\n\n # calculate tauc\n CauerMat[i, 2] = CauerMat[i, 0] * CauerMat[i, 1]\n\n Zfall = (1\/Yfall - CauerMat[i, 1]).cancel()\n\n\n# # Final results in floating values\n\nCauerMat_float = sympy.zeros(stages, 3)\nfor i in range(stages):\n for j in range(3):\n CauerMat_float[i, j] = float(CauerMat[i, j])\n\n\n# # Resistance sum value check\nRc_all = 0\nRf_all = 0\nfor i in range(stages):\n Rc_all = Rc_all + CauerMat_float[i, 1]\n Rf_all = Rf_all + FosterMat[i, 1]\nprint(\"Rc_all = %g, Rf_all = %g\" % (Rc_all, Rf_all))\n\nepsilon = 1e-8\nres = float(abs(Rc_all - Rf_all))\nif res > epsilon:\n print(\"Rc_all and Rf_all don't match, ERROR!!!\")\n\n\n# # output results\nwith open(output_file, \"w\") as fileobj:\n tmpstring = \"\"\n # header\n tmpstring = \"## Foster2Cauer results \" + str(stages) + \"stages\\n\"\n fileobj.write(tmpstring)\n tmpstring = \"## Created: \" + timestamp + \"\\n\"\n fileobj.write(tmpstring)\n tmpstring = \"# First stage (Cc1 and Rc1) is connected to Junction.\\n\"\n fileobj.write(tmpstring)\n tmpstring = \"STAGES=\\t\" + str(stages) + \"\\n\\n\"\n fileobj.write(tmpstring)\n\n tmpstring = \"# stage\" + \"\\t\" + \"C_cauer\" + \"\\t\\t\\t\" + \\\n \"R_cauer\" + \"\\t\\t\\t\" + \"Tau_cauer\\n\"\n fileobj.write(tmpstring)\n for i in range(stages):\n tmpstring = str(i+1) + \"\\t\" + \\\n str(CauerMat_float[i, 0]) + \"\\t\" + \\\n str(CauerMat_float[i, 1]) + \"\\t\" + \\\n str(CauerMat_float[i, 2]) + \"\\n\"\n fileobj.write(tmpstring)\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_136","text":"from copy import deepcopy\nfrom IPython.display import Math\nfrom ipywidgets import *\nimport numpy as np\nimport pandas as pd\nfrom patsy import dmatrix\nimport scipy.sparse as sp\nfrom scipy.special import comb\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error\nimport sys\nfrom tqdm import tnrange, tqdm_notebook\nimport warnings\n\ndef TauPath(lam_1 = None,\n lam_2 = None,\n beta = None,\n zeta = None,\n delta = None,\n alpha = None,\n P = None,\n P_interaction = None,\n taus = np.logspace(start=0, stop=-2, num=50, base=10),\n CD_J_AS = None,\n active_set = None,\n active_interaction_set = None,\n B = None,\n B_interaction = None,\n K_main = None, \n K_interaction = None, \n Xval = None,\n Xmin = None,\n Xmax = None,\n Y = None,\n Yval = None,\n y_scaler = None,\n S = None,\n S_interaction = None,\n interaction_terms = None,\n eval_criteria = None,\n path = None,\n r = None,\n logging = False):\n \"\"\"Hyperparameter grid search for tau penalty for nonparametric additive models with interactions under hierarchy\n \n Args:\n lam_1: smoothness penalty for b-splines, float scaler.\n lam_2: L0 penalty for b-splines, float scaler.\n beta: coefficients for main effects, list of arrays of shapes [(Ki+1, 1), ...].\n zeta: binary vector to track which main effects are in the active set, a bool array of shape (1, d)\n corresponds to z_i's in the paper.\n delta: coefficients for interaction effects, list of arrays of shapes [(Kij+1, 1), ...].\n corresponds to theta in the paper.\n alpha: binary vector to track which interactions effects are in the active interaction set, a bool array of shape (1, Imax)\n corresponds to z_ij's in the paper.\n P: B^T*B + 2*N*(lam_1*S_i + eps*I) matrices for main effects, list of sparse matrices of shapes [(Ki+1, Ki+1), ...].\n eps is a small epsilon for numerical stability.\n P_interaction: B^T*B + 2*N*(lam_1*S_ij + eps*I) matrices for interaction effects, list of sparse matrices of shapes [(Kij+1, Kij+1), ...].\n eps is a small epsilon for numerical stability.\n taus: thresholding penalty for generating feasible subsets of main\/interaction effects that maintain strong hierarchy, array of float scalers.\n CD_J_AS: function for cyclic block coordinate descent over an active set, callable.\n active_set: indices of main effects to optimize over, a numpy int array.\n active_interaction_set: indices of interaction effects to optimize over, a numpy int array.\n B: B-spline transformed sparse matrices for main effects, list of sparse matrices of shapes [(N, Ki+1), ...].\n B_interaction: B-spline transformed sparse matrices for interaction effects, list of sparse matrices of shapes [(N, Kij+1), ...].\n K_main: Number of knots used for each main effect, a list of int scalers of shape (d,) \n K_interaction: Number of knots used for each interaction effect, a list of int scalers of shape (Imax,) \n Xval: validation covariates, a float numpy array of shape (Nval, p).\n Xmin: minimum values of X for all covariates, needed for spline generation, a float numpy array of shape (1, d).\n Xmax: maximum values of X for all covariates, needed for spline generation, a float numpy array of shape (1, d).\n Y: training target responses, a float numpy array of shape (N,).\n Yval: validation target responses, a float numpy array of shape (Nval,).\n y_scaler: sklearn transformation object on responses to inverse transform the responses, see data_utils.py\n supports z-normalization\/identity.\n S: Smoothness matrices for main effects, list of sparse matrices of shapes [(Ki+1, Ki+1), ...].\n S_interaction: Smoothness matrices for interaction effects, list of sparse matrices of shapes [(Kij+1, Kij+1), ...].\n interaction_terms: list of interaction effects to consider if only a subset need to be considered, \n a 2D numpy array of of shape (Imax, 2).\n eval_criteria: evaluation metric for hyperparameter tuning,\n - 'mse', 'mae'\n path: folder path to log results to, str.\n r: relative scaling factor for L0 penalty between main and interaction effects.\n We consider r=1.0 (corresponds to alpha symbol in the paper), float scaler. \n logging: whether to log results to a file, bool scaler.\n \n Returns:\n optimal_solution_path: (beta_opt, delta_opt, zeta_opt, alpha_opt, tau_opt, J_opt, active_set_opt, active_interaction_set_opt, val_loss_opt).\n sparse_solution_path: (beta_sp, delta_sp, zeta_sp, alpha_sp, tau_sp, J_sp, active_set_sp, active_interaction_set_sp, val_loss_sp).\n \"\"\"\n \n d = len(B)\n N = Y.shape[0]\n val_loss_opt = np.inf\n val_loss = np.inf*np.ones((taus.shape[0],),dtype=float)\n val_std_err = np.inf*np.ones((taus.shape[0],),dtype=float)\n sparsity = (active_set.shape[0]+active_interaction_set.shape[0])*np.ones((taus.shape[0],),dtype=float)\n J = np.zeros((taus.shape[0],),dtype=float)\n eps = 1e-8\n if eval_criteria == 'mse':\n evaluate = mean_squared_error\n elif eval_criteria == 'mae':\n evaluate = mean_absolute_error\n else:\n raise ValueError(\"Evaluation criteria {} is not supported\".format(eval_criteria))\n \n \n # Generate b-splines for validation set for active set\n Bval = [None]*d\n for k in active_set:\n Bval[k] = sp.csr_matrix(dmatrix(\"bs(x, df={}, degree=3, include_intercept=False, lower_bound={}, upper_bound={})\".format(K_main[k], Xmin[k], Xmax[k]), {\"x\": Xval[:,k]}),dtype=np.float64)\n Bval_interaction = [None]*len(interaction_terms)\n for k in active_interaction_set:\n f_i, f_j = interaction_terms[k]\n Bval_interaction[k] = sp.csr_matrix(dmatrix(\"te(bs(x1, df={}, degree=3, include_intercept=False, lower_bound={}, upper_bound={}), bs(x2, df={}, degree=3, include_intercept=False, lower_bound={}, upper_bound={}))\".format(K_interaction[f_i], Xmin[f_i], Xmax[f_i], K_interaction[f_j], Xmin[f_j], Xmax[f_j]), {\"x1\": Xval[:,f_i], \"x2\": Xval[:,f_j]}),dtype=np.float64)\n\n # Tau path\n beta_HS = [deepcopy(beta)]*taus.shape[0]\n zeta_HS = [deepcopy(zeta)]*taus.shape[0]\n delta_HS = [deepcopy(delta)]*taus.shape[0]\n alpha_HS = [deepcopy(alpha)]*taus.shape[0]\n \n for i, tau in tqdm_notebook(enumerate(taus),desc='$\\\\tau$'):\n \n if i==0:\n beta_current = deepcopy(beta_HS[0])\n delta_current = deepcopy(delta_HS[0])\n else:\n beta_current = deepcopy(beta_HS[i-1])\n delta_current = deepcopy(delta_HS[i-1])\n \n if len(active_set)==0 and len(active_interaction_set)==0:\n Ypred = np.mean(Y)*np.ones(Y.shape,dtype=float)\n elif len(active_set)==0 and len(active_interaction_set)>0:\n Ypred = np.mean(Y)*np.ones(Y.shape,dtype=float)\\\n +np.array(sum([(B_interaction[k]).dot(delta_current[k]) for k in active_interaction_set])).reshape(Y.shape)\n elif len(active_set)>0 and len(active_interaction_set)==0:\n Ypred = np.mean(Y)*np.ones(Y.shape,dtype=float)\\\n +np.array(sum([(B[k]).dot(beta_current[k]) for k in active_set])).reshape(Y.shape)\n elif len(active_set)>0 and len(active_interaction_set)>0:\n Ypred = np.mean(Y)*np.ones(Y.shape,dtype=float)\\\n +np.array(sum([(B[k]).dot(beta_current[k]) for k in active_set])).reshape(Y.shape)\\\n +np.array(sum([(B_interaction[k]).dot(delta_current[k]) for k in active_interaction_set])).reshape(Y.shape) \n \n z_max = np.max([np.max(zeta_HS[i]), np.max(alpha_HS[i])])\n zeta_HS[i] = np.where((zeta_HS[i]\/z_max)>tau,\n np.ones(zeta_HS[i].shape,dtype=float),\n np.zeros(zeta_HS[i].shape,dtype=float))\n alpha_HS[i] = np.where((alpha_HS[i]\/z_max)>tau,\n np.ones(alpha_HS[i].shape,dtype=float),\n np.zeros(alpha_HS[i].shape,dtype=float))\n Ypred, beta_HS[i], zeta_HS[i], delta_HS[i], alpha_HS[i] = CD_J_AS(Ypred = Ypred,\n beta = [deepcopy(beta_current), deepcopy(delta_current)],\n zeta = [zeta_HS[i], alpha_HS[i]],\n active_set = [np.where(zeta_HS[i][0,:]>tau)[0], np.where(alpha_HS[i][0,:]>tau)[0]],\n lam = [lam_1, 0.0],\n P = P, \n P_interaction = P_interaction)\n train_loss = evaluate(y_scaler.inverse_transform(Y), y_scaler.inverse_transform(Ypred))\n\n\n if len(active_set)==0 and len(active_interaction_set)==0:\n Yvalpred = np.mean(Y)*np.ones(Yval.shape,dtype=float)\n elif len(active_set)==0 and len(active_interaction_set)>0:\n Yvalpred = np.mean(Y)*np.ones(Yval.shape,dtype=float)\\\n +np.array(sum([(Bval_interaction[k]).dot(delta_HS[i][k]) for k in active_interaction_set])).reshape(Yval.shape)\n elif len(active_set)>0 and len(active_interaction_set)==0:\n Yvalpred = np.mean(Y)*np.ones(Yval.shape,dtype=float)\\\n +np.array(sum([(Bval[k]).dot(beta_HS[i][k]) for k in active_set])).reshape(Yval.shape)\n elif len(active_set)>0 and len(active_interaction_set)>0:\n Yvalpred = np.mean(Y)*np.ones(Yval.shape,dtype=float)\\\n +np.array(sum([(Bval[k]).dot(beta_HS[i][k]) for k in active_set])).reshape(Yval.shape)\\\n +np.array(sum([(Bval_interaction[k]).dot(delta_HS[i][k]) for k in active_interaction_set])).reshape(Yval.shape) \n val_loss[i] = evaluate(y_scaler.inverse_transform(Yval), y_scaler.inverse_transform(Yvalpred))\n val_std_err[i] = (mean_squared_error(y_scaler.inverse_transform(Yval), y_scaler.inverse_transform(Yvalpred))**0.5)\/(Yval.shape[0]**0.5)\n sparsity[i] = np.count_nonzero(zeta_HS[i][0,:]) + np.count_nonzero(alpha_HS[i][0,:])\n J[i] = 0.5*mean_squared_error(Y, Ypred)+\\\n lam_1*sum([(np.transpose(beta_HS[i][k])).dot(S[k].dot(beta_HS[i][k]))[0,0] for k in active_set])+\\\n lam_1*sum([(np.transpose(delta_HS[i][k])).dot(S_interaction[k].dot(delta_HS[i][k]))[0,0] for k in active_interaction_set])+\\\n eps*sum([np.dot(beta_HS[i][k][:,0],beta_HS[i][k][:,0]) for k in active_set])+\\\n eps*sum([np.dot(delta_HS[i][k][:,0],delta_HS[i][k][:,0]) for k in active_interaction_set])+\\\n lam_2*(np.sum(zeta_HS[i][0,:]))+\\\n r*lam_2*(np.sum(alpha_HS[i][0,:])) \n if logging ==True:\n with open(path+'\/Training-HS.csv', \"a\") as f:\n f.write('{:.7f},{:.7f},{:.6f},{:.6f},{:.6f},{:.6f},{},{}\\n'.format(lam_1,lam_2,tau,train_loss, val_loss[i], J[i],np.count_nonzero(zeta_HS[i][0,:]),np.count_nonzero(alpha_HS[i][0,:]))) \n print('{:.7f},{:.7f},{:.6f},{:.6f},{:.6f},{:.6f},{},{}\\n'.format(lam_1,lam_2,tau,train_loss, val_loss[i], J[i],np.count_nonzero(zeta_HS[i][0,:]),np.count_nonzero(alpha_HS[i][0,:])))\n# display(Math(r'\\lambda_1: {:.6f}, \\lambda_2: {:.6f}, Train-MAE: {:.6f}, Val-MAE: {:.6f}, Obj: {:.0f},'.format(lam_1,lam_2,train_loss, val_loss, J)+'\\sum_{j \\in S^c} z_j: '+'{},'.format(np.count_nonzero(zeta[j][0,:]))+'\\sum_{ij \\in S^c} z_{ij}: '+'{}.'.format(np.count_nonzero(alpha[j][0,:]))))\n df = pd.DataFrame(columns=[lam_1, lam_2, tau, *(zeta_HS[i][0,:])])\n with open(os.path.join(path, 'main_support_regularization_path.csv'), 'a') as f:\n df.to_csv(f, header=True, index=False)\n df = pd.DataFrame(columns=[lam_1, lam_2, tau, *(alpha_HS[i][0,:])])\n with open(os.path.join(path, 'interaction_support_regularization_path.csv'), 'a') as f:\n df.to_csv(f, header=True, index=False)\n if val_loss[i] < val_loss_opt:\n val_loss_opt = deepcopy(val_loss[i])\n val_std_err_opt = deepcopy(val_std_err[i])\n beta_opt = deepcopy(beta_HS[i]) \n zeta_opt = deepcopy(zeta_HS[i]) \n delta_opt = deepcopy(delta_HS[i]) \n alpha_opt = deepcopy(alpha_HS[i])\n active_set_opt = np.where(zeta_HS[i][0,:] == 1)[0] \n active_interaction_set_opt = np.where(alpha_HS[i][0,:] == 1)[0]\n tau_opt = deepcopy(tau) \n J_opt = deepcopy(J[i])\n \n# val_loss_percent = ((val_loss-val_loss_opt*np.ones((taus.shape[0],),dtype=float))\/(val_loss_opt*np.ones((taus.shape[0],),dtype=float)))*100\n if eval_criteria == 'mse':\n val_loss_diff = val_loss**0.5 - val_loss_opt**0.5\n elif eval_criteria == 'mae':\n val_loss_diff = val_loss - val_loss_opt\n else:\n raise ValueError(\"Evaluation criteria {} is not supported\".format(eval_criteria))\n# subset_indices = np.where(val_loss_percent<1)[0] \n subset_indices = np.where(val_loss_diff: Calculate statistics'}\n\n\ndef stats(lst):\n\tif len(lst) < 2:\n\t\thelp.helper(\"stats\")\n\t\treturn None\n\tlist = []\n\tfor x in lst[1:]:\n\t\tlist.append(int(x))\n\t#print(list)\n\tst= {}\n\ttry:\n\t\tst[\"mean\"] = mean(list)\n\t\tst[\"standard deviation\"] = stdev(list)\n\t\tst[\"median\"] = median(list)\n\t\tst[\"mode\"] = mode(list)\n\texcept:\n\t\tpass\n\tfor x,y in st.items():\n\t\tprint(\"%s = %d\" % (x,y))\n\ncoms = {'\/stats' : stats}\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_140","text":"100-1000\n\"\"\"\nScript with classes and functions for nucleosome calling.\n\n@author: , Greenleaf Lab, Stanford University\n\"\"\"\n\nimport numpy as np\nfrom scipy import optimize, signal\nfrom copy import copy\nfrom bisect import bisect_left\nimport pyximport; pyximport.install(setup_args={\"include_dirs\":np.get_include()})\nfrom nucleoatac.multinomial_cov import calculateCov\nfrom nucleoatac.Occupancy import OccupancyTrack\nfrom pyatac.tracks import Track, CoverageTrack\nfrom pyatac.chunk import Chunk\nfrom pyatac.utils import call_peaks, reduce_peaks, read_chrom_sizes_from_bam\nfrom pyatac.chunkmat2d import FragmentMat2D, BiasMat2D\nfrom pyatac.bias import InsertionBiasTrack, PWM\n\n\n#import warnings\n#warnings.filterwarnings('error')\n\n\nclass SignalTrack(Track):\n \"\"\"Class for getting V-plot signal\"\"\"\n def __init__(self, chrom, start, end):\n Track.__init__(self, chrom, start, end, \"signal\")\n def calculateSignal(self, mat, vmat):\n offset=self.start-mat.start-vmat.w\n if offset<0:\n raise Exception(\"Insufficient flanking region on \\\n mat to calculate signal\")\n self.vals = signal.correlate(mat.get(vmat.lower,vmat.upper,\n mat.start + offset, mat.end - offset),\n vmat.mat,mode = 'valid')[0]\n\nclass NormSignalTrack(Track):\n \"\"\"Class for storing normalized signal track\"\"\"\n def __init__(self, chrom, start, end):\n Track.__init__(self, chrom, start, end, \"normalized signal\")\n def calculateNormSignal(self, raw, bias):\n self.vals = raw.get(self.start, self.end) - bias.get(self.start,self.end)\n\nclass BiasTrack(Track):\n \"\"\"Class for getting Bias Signal Track-- Background model\"\"\"\n def __init__(self, chrom, start, end):\n Track.__init__(self, chrom, start, end, \"bias\")\n def calculateBackgroundSignal(self, mat, vmat, nuc_cov):\n offset=self.start-mat.start-vmat.w\n if offset<0:\n raise Exception(\"Insufficient flanking region on \\\n mat to calculate signal\")\n self.vmat = vmat\n self.bias_mat = mat\n self.cov = CoverageTrack(self.chrom, self.start, self.end)\n self.cov.calculateCoverage(self.bias_mat, vmat.lower,\n vmat.upper, vmat.w*2+1)\n self.nuc_cov = nuc_cov.vals\n self.vals = signal.correlate(self.bias_mat.get(vmat.lower,vmat.upper,\n self.bias_mat.start + offset,\n self.bias_mat.end - offset),\n vmat.mat,mode = 'valid')[0]\n self.vals = self.vals * self.nuc_cov\/ self.cov.vals\n\n\n\nclass SignalDistribution:\n \"\"\"Class for determining distribution of signal\"\"\"\n def __init__(self, position, vmat, bias_mat, reads):\n self.position = position\n self.reads = reads\n self.vmat = vmat\n bias_mat = bias_mat.get(vmat.lower,vmat.upper,position - vmat.w,position + vmat.w + 1)\n self.prob_mat = bias_mat \/ np.sum(bias_mat)\n self.probs = self.prob_mat.flatten()\n def simulateReads(self):\n sim_vect = np.random.multinomial(self.reads,self.probs)\n sim_mat = np.reshape(sim_vect, self.vmat.mat.shape)\n return sim_mat\n def simulateDist(self, numiters = 1000):\n self.scores = map(lambda x: np.sum(self.simulateReads() * self.vmat.mat),range(numiters))\n def analStd(self):\n flatv = np.ravel(self.vmat.mat)\n var = calculateCov(self.probs, flatv, self.reads)\n return np.sqrt(var)\n def analMean(self):\n return np.sum(self.prob_mat * self.vmat.mat * self.reads)\n\n\n\ndef norm(x, v, w, mean):\n \"\"\"compute values of normal pdf with given mean and sd at values in x\"\"\"\n norm = (1.0\/(np.sqrt(2*np.pi*v)) *\n np.exp(-(x - mean)**2\/(2*v)))\n norm = norm * (w\/max(norm))\n return norm\n\nclass Nucleosome(Chunk):\n \"\"\"Class for storing information about a single nucleosome\"\"\"\n def __init__(self, pos,nuctrack):\n self.chrom = nuctrack.chrom\n self.start = pos\n self.end = pos + 1\n self.nfr_cov = nuctrack.nfr_cov.get(pos = pos)\n self.nuc_cov = nuctrack.nuc_cov.get(pos = pos)\n self.nuc_signal = nuctrack.nuc_signal.get(pos = pos)\n self.norm_signal = nuctrack.norm_signal.get(pos = pos)\n self.smoothed = nuctrack.smoothed.get(pos= pos)\n def getLR(self,nuctrack):\n mat = nuctrack.mat.get(nuctrack.params.lower,nuctrack.params.upper,\n self.start - nuctrack.params.vmat.w, self.start + nuctrack.params.vmat.w +1)\n null_mat = nuctrack.bias_mat.get(nuctrack.params.lower,nuctrack.params.upper,\n self.start - nuctrack.params.vmat.w, self.start + nuctrack.params.vmat.w +1)\n bias_mat =nuctrack.bias_mat_prenorm.get(nuctrack.params.lower,nuctrack.params.upper,\n self.start - nuctrack.params.vmat.w, self.start + nuctrack.params.vmat.w +1)\n nuc_model = nuctrack.params.vmat.mat * bias_mat\n nuc_model = nuc_model \/ np.sum(nuc_model)\n null_model = null_mat \/ np.sum(null_mat)\n nuc_lik = np.sum(np.log(nuc_model) * mat)\n null_lik = np.sum(np.log(null_model) * mat)\n self.lr = nuc_lik - null_lik\n def getZScore(self, nuctrack):\n s = SignalDistribution(self.start, nuctrack.params.vmat, nuctrack.bias_mat,\n self.nuc_cov)\n std = s.analStd()\n self.z = self.norm_signal \/ std\n def getOcc(self, nuctrack):\n try:\n self.occ = nuctrack.occ.get(pos = self.start)\n self.occ_lower = nuctrack.occ_lower.get(pos = self.start)\n self.occ_upper = nuctrack.occ_upper.get(pos = self.start)\n except:\n self.occ = np.nan\n self.occ_lower = np.nan\n self.occ_upper = np.nan\n def getFuzz(self, nuctrack):\n def addNorms(x,params):\n \"\"\"Add several normal distributions together\"\"\"\n l = len(x)\n fit = np.zeros(l)\n i = len(params)\/3\n for j in range(i):\n fit += norm(x,params[j*3],params[3*j+1],params[3*j+2])\n return fit\n def err_func(pars,y):\n \"\"\"error function for normal fit; to be used for fitNorm\"\"\"\n x = np.linspace(0,len(y)-1,len(y))\n return sum((addNorms(x, pars) - y)**2)\n def fitNorm(guess, bound, sig):\n \"\"\"Fit a normal to the signal with lower and upperbounds to sd\"\"\"\n a = (sig,)\n res = optimize.minimize(err_func,guess,args = a, bounds=bound,method=\"L-BFGS-B\")\n return res\n index = self.start - nuctrack.start\n allnucs = nuctrack.sorted_nuc_keys\n x = bisect_left(allnucs,index)\n if x == 0:\n left = index - nuctrack.params.nonredundant_sep\/3\n means = (nuctrack.params.nonredundant_sep\/3,)\n elif index - allnucs[x-1] < nuctrack.params.nonredundant_sep:\n left = allnucs[x-1]\n means = (index - allnucs[x-1],0)\n else:\n left = index - nuctrack.params.nonredundant_sep\/3\n means = (nuctrack.params.nonredundant_sep\/3,)\n if x == len(allnucs)-1:\n right = index + nuctrack.params.nonredundant_sep\/3 + 1\n elif allnucs[x+1] - index < nuctrack.params.nonredundant_sep:\n right = allnucs[x+1]\n means += (allnucs[x+1] - left,)\n else:\n right = index + nuctrack.params.nonredundant_sep\/3 +1\n sig = nuctrack.smoothed.vals[left:right]\n sig[sig<0] = 0\n if len(means)==1:\n bounds = ((2**2,50**2),(0.001,max(sig)*1.1),(means[0]-10,means[0]+10))\n guesses = (nuctrack.params.smooth_sd ** 2,max(sig)*0.9,means[0])\n elif len(means)==2:\n bounds = ((2**2,50**2),(0.001,max(sig)*1.1),(means[0]-10,means[0]+10),\n (2**2,50**2),(0.001,max(sig)*1.1),(means[1]-10,means[1]+10))\n guesses = (nuctrack.params.smooth_sd ** 2,max(sig)*0.9,means[0],\n nuctrack.params.smooth_sd ** 2,max(sig)*0.9,means[1])\n elif len(means)==3:\n bounds = ((2**2,50**2),(0.001,max(sig)*1.1),(means[0]-10,means[0]+10),\n (2**2,50**2),(0.001,max(sig)*1.1),(means[1]-10,means[1]+10),\n (2**2,50**2),(0.001,max(sig)*1.1),(means[2]-10,means[2]+10))\n guesses = (nuctrack.params.smooth_sd ** 2,max(sig)*0.9,means[0],\n nuctrack.params.smooth_sd ** 2,max(sig)*0.9,means[1],\n nuctrack.params.smooth_sd ** 2,max(sig)*0.9,means[2])\n res= fitNorm(guesses, bounds, sig)\n self.fuzz= np.sqrt(res['x'][0])\n self.weight = res['x'][1]\n self.fit_pos = res['x'][2]+left\n def asBed(self):\n out = \"\\t\".join(map(str,[self.chrom, self.start, self.end, self.z, self.occ, self.occ_lower, self.occ_upper, self.lr,\n self.norm_signal, self.nuc_signal, self.nuc_cov, self.nfr_cov,\n self.fuzz]))\n return out\n def write(self, handle):\n handle.write(self.asBed() + \"\\n\")\n\n\nclass NucParameters:\n \"\"\"Class for storing parameters related to nucleosome calling\"\"\"\n def __init__(self, vmat, fragmentsizes, bam, fasta, pwm,\n occ_track = None, atac = True,\n sd = 25, nonredundant_sep = 120, redundant_sep = 25,\n min_z = 3, min_lr = 0, min_reads = 1):\n self.atac = atac\n self.vmat = vmat\n self.lower = vmat.lower\n self.upper= vmat.upper\n self.window = vmat.mat.shape[1]\n self.fragmentsizes= fragmentsizes\n self.min_reads = min_reads\n self.min_z = min_z\n self.min_lr = min_lr\n self.smooth_sd = sd\n self.redundant_sep = redundant_sep\n self.nonredundant_sep = nonredundant_sep\n self.fasta = fasta\n self.pwm = PWM.open(pwm)\n self.chrs = read_chrom_sizes_from_bam(bam)\n self.bam = bam\n self.occ_track = occ_track\n\n\n\nclass NucChunk(Chunk):\n \"\"\"Class for storing and determining collection of nucleosome positions\n \"\"\"\n def __init__(self, chunk):\n self.start = chunk.start\n self.end = chunk.end\n self.chrom = chunk.chrom\n def initialize(self, parameters):\n self.params = parameters\n def getFragmentMat(self):\n self.mat = FragmentMat2D(self.chrom, self.start - max(self.params.window,self.params.upper\/2+1),\n self.end + max(self.params.window,self.params.upper\/2+1), 0, self.params.upper, atac = self.params.atac)\n self.mat.makeFragmentMat(self.params.bam)\n def makeBiasMat(self):\n self.bias_mat = BiasMat2D(self.chrom, self.start - self.params.window,\n self.end + self.params.window, 0, self.params.upper)\n bias_track = InsertionBiasTrack(self.chrom, self.start - self.params.window - self.params.upper\/2,\n self.end + self.params.window + self.params.upper\/2 + 1, log = True)\n if self.params.fasta is not None:\n bias_track.computeBias(self.params.fasta, self.params.chrs, self.params.pwm)\n self.bias_mat.makeBiasMat(bias_track)\n self.bias_mat_prenorm = BiasMat2D(self.chrom, self.start - self.params.window,\n self.end + self.params.window, 0, self.params.upper)\n self.bias_mat_prenorm.mat = copy(self.bias_mat.mat)\n self.bias_mat.normByInsertDist(self.params.fragmentsizes)\n def getNucSignal(self):\n \"\"\"Gets Nucleosome Signal Track\"\"\"\n self.nuc_cov = CoverageTrack(self.chrom, self.start,\n self.end)\n self.nuc_cov.calculateCoverage(self.mat, self.params.lower, self.params.upper,\n self.params.window)\n self.bias = BiasTrack(self.chrom, self.start,\n self.end)\n self.bias.calculateBackgroundSignal(self.bias_mat, self.params.vmat, self.nuc_cov)\n self.nuc_signal = SignalTrack(self.chrom, self.start,\n self.end)\n self.nuc_signal.calculateSignal(self.mat, self.params.vmat)\n self.norm_signal = NormSignalTrack(self.chrom, self.start, self.end)\n self.norm_signal.calculateNormSignal(self.nuc_signal,self.bias)\n def getNFR(self):\n \"\"\"get number of reads of sub-nucleosomal length\"\"\"\n self.nfr_cov = CoverageTrack(self.chrom, self.start, self.end)\n self.nfr_cov.calculateCoverage(self.mat, 0, self.params.lower,\n self.params.window)\n def smoothSignal(self):\n \"\"\"Smooth thenormalized signal track\"\"\"\n window_len = 6 * self.params.smooth_sd + 1\n self.smoothed = Track(self.chrom,self.start,self.end, \"Smooth Signal\")\n tmp = copy(self.norm_signal.vals)\n self.smoothed.assign_track(tmp)\n self.smoothed.vals[ self.smoothed.vals < 0] = 0\n self.smoothed.smooth_track(window_len, window = \"gaussian\",\n sd = self.params.smooth_sd, mode = 'same',\n norm = True)\n def getOcc(self):\n \"\"\"gets occupancy track-- either reads in from bw handle given, or makes new\"\"\"\n self.occ = Track(self.chrom,self.start,self.end,\"Occupancy\")\n self.occ.read_track(self.params.occ_track)\n lower_file = self.params.occ_track[:-11] + 'lower_bound.bedgraph.gz'\n self.occ_lower = Track(self.chrom,self.start,self.end,\"Occupancy\")\n self.occ_lower.read_track(lower_file)\n upper_file = self.params.occ_track[:-11] + 'upper_bound.bedgraph.gz'\n self.occ_upper = Track(self.chrom,self.start,self.end,\"Occupancy\")\n self.occ_upper.read_track(upper_file)\n def findAllNucs(self):\n \"\"\"Find peaks in data\"\"\"\n self.nuc_collection = {}\n combined = self.norm_signal.vals + self.smoothed.vals\n #find peaks in normalized sigal\n cands1 = call_peaks(combined, min_signal = 0,\n sep = self.params.redundant_sep,\n boundary = self.params.nonredundant_sep\/2, order = self.params.redundant_sep\/2)\n for i in cands1:\n nuc = Nucleosome(i + self.start, self)\n if nuc.nuc_cov > self.params.min_reads:\n nuc.getLR(self)\n if nuc.lr > self.params.min_lr:\n nuc.getZScore(self)\n if nuc.z >= self.params.min_z:\n nuc.getOcc(self)\n self.nuc_collection[i] = nuc\n self.sorted_nuc_keys = np.array(sorted(self.nuc_collection.keys()))\n self.nonredundant = reduce_peaks( self.sorted_nuc_keys,\n map(lambda x: self.nuc_collection[x].z, self.sorted_nuc_keys),\n self.params.nonredundant_sep)\n self.redundant = np.setdiff1d(self.sorted_nuc_keys, self.nonredundant)\n def fit(self):\n x = np.linspace(0,self.length() -1, self.length())\n fit = np.zeros(self.length())\n for nuc in self.sorted_nuc_keys:\n self.nuc_collection[nuc].getFuzz(self)\n fit += norm(x,self.nuc_collection[nuc].fuzz**2, self.nuc_collection[nuc].weight, self.nuc_collection[nuc].fit_pos)\n self.fitted = Track(self.chrom, self.start, self.end,\n \"Fitted Nucleosome Signal\")\n self.fitted.assign_track(fit)\n def makeInsertionTrack(self):\n \"\"\"make insertion track for chunk\"\"\"\n self.ins = self.mat.getIns()\n def process(self, params):\n \"\"\"wrapper to carry out all methods needed to call nucleosomes and nfrs\"\"\"\n self.initialize(params)\n self.getFragmentMat()\n self.makeBiasMat()\n self.getNucSignal()\n self.getNFR()\n self.smoothSignal()\n if params.occ_track is not None:\n self.getOcc()\n self.findAllNucs()\n self.fit()\n self.makeInsertionTrack()\n def removeData(self):\n \"\"\"remove data from chunk-- deletes all attributes\"\"\"\n names = self.__dict__.keys()\n for name in names:\n delattr(self,name)\n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_141","text":"import numpy as np\n\n# for interactive drawing\nimport tkanvas\nimport pykalman\nimport scipy.stats\n\n\ndef kf_loglik(C, mean, cov, obs):\n pred_obs_mean = np.dot(C, mean)\n pred_obs_cov = np.dot(C, np.dot(cov, C.T))\n obs_arr = np.array(obs)\n # likelihood of this sample\n return scipy.stats.multivariate_normal.logpdf(\n obs_arr, mean=pred_obs_mean, cov=pred_obs_cov\n )\n\nclass KFDisplay(object):\n def __init__(self, A, C, sigma_a, sigma_c, mu_0, sigma_0, path, frame_time=2000, reject_lik=None):\n self.track = True\n self.A = A\n self.C = C\n self.reject_lik = reject_lik\n self.sigma_c = sigma_c\n \n self.path = iter(path)\n self.kalman_filter = pykalman.KalmanFilter(\n transition_matrices=A,\n observation_matrices=C,\n transition_covariance=sigma_a,\n observation_covariance=sigma_c,\n initial_state_mean=mu_0,\n initial_state_covariance=sigma_0,\n )\n self.obs_path = []\n self.track_path = []\n self.obs = next(self.path)\n self.src = tkanvas.TKanvas(\n draw_fn=self.kalman_draw,\n frame_time=frame_time,\n w=800,\n h=800,\n bgcolor=\"black\",\n )\n self.mean, self.cov = mu_0, sigma_0\n self.new_mean, self.new_cov = self.kalman_filter.filter_update(\n self.mean, self.cov, observation=self.obs\n )\n self.lik = kf_loglik(self.C, self.new_mean, self.cov, self.obs)\n\n self.kalman_iter = self.draw_kalman_filter()\n\n # Draw each step of the Kalman filter onto a TKinter canvas\n def draw_kalman_filter(self):\n\n self.src.clear()\n font = (\"Arial\", 24)\n for p in self.obs_path:\n self.src.circle(p[0], p[1], 2, fill=\"white\")\n for p in self.track_path:\n self.src.circle(p[0], p[1], 2, fill=\"blue\")\n if self.obs is not None:\n self.obs_path.append(self.obs)\n self.track_path.append(self.new_mean[:2])\n # don't bother drawing circles when at speed\n\n # draw the prior\n self.src.normal(self.mean[:2], self.cov[:2, :2], outline=\"#0000ff\")\n loglik = self.src.text(\n 20, 40, text=\"%.0f\"%self.lik, anchor=\"w\", fill=\"gray\", font=(\"Arial\", 10)\n )\n if self.src.frame_time < 50:\n return\n text = self.src.text(\n 20, 20, text=\"Prior P(X_t)\", anchor=\"w\", fill=\"gray\", font=font\n )\n self.src.to_front(text)\n yield 0 # this is a trick to allow to \"return\" here but resume later\n ax = np.dot(self.A, self.mean)\n acov = np.dot(np.dot(self.A, self.cov), self.A.T)\n # prediction after linear dynamics\n self.src.normal(ax[:2], acov[:2, :2], outline=\"#00ff00\", dash=(2, 4))\n self.src.modify(text, text=\"Prediction f(x_(t-1)) -> x_t\")\n self.src.to_front(text)\n yield 0\n # prediction after linear dynamics\n self.src.normal(ax[:2], acov[:2, :2], outline=\"#dd00ff\", dash=(2, 2))\n self.src.modify(text, text=\"Expected observation y_t g(x_t) -> y'_t\")\n self.src.to_front(text)\n yield 0\n if self.obs is not None:\n # observation (if there is one)\n self.src.circle(self.obs[0], self.obs[1], 5, fill=\"#ffffff\")\n # src.modify(text, text=\"Observation y_t\")\n # uncertainty of observation\n self.src.normal(\n self.obs, self.sigma_c[:2, :2], outline=\"#6600ff\", dash=(2, 2)\n )\n self.src.modify(text, text=\"Observation w\/uncertainty\")\n self.src.to_front(text)\n yield 0\n yield 0\n # posterior\n self.src.normal(self.new_mean[:2], self.new_cov[:2, :2], outline=\"#8899ff\")\n self.src.modify(text, text=\"Posterior P(Xt|Yt)\")\n self.src.to_front(text)\n yield 0\n\n # draw the Kalman filter updates interactively\n\n def kalman_draw(self, src):\n\n if self.src.frame_time > 20:\n # slowly speed up over time\n self.src.frame_time = src.frame_time * 0.95\n try:\n next(self.kalman_iter)\n # we've drawn all the steps, so make another update\n except StopIteration:\n self.mean, self.cov = self.new_mean, self.new_cov\n try:\n self.obs = next(self.path) \n except StopIteration:\n src.quit(None)\n return\n \n self.lik = kf_loglik(self.C, self.mean, self.cov, self.obs)\n if self.reject_lik is None or self.lik>self.reject_lik: \n self.new_mean, self.new_cov = self.kalman_filter.filter_update(\n self.mean, self.cov, observation=self.obs\n )\n \n self.kalman_iter = self.draw_kalman_filter()\n return\n\n\n\n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_142","text":"from IPython.display import Image\nfrom IPython.core.display import HTML \nimport numpy as np\nimport sympy as sp\nimport random as r\nimport time\nimport matplotlib.pyplot as plt\nimport ipyturtle as turtle\n\nfrom scipy.ndimage.filters import gaussian_filter1d\nfrom scipy.signal import savgol_filter"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_143","text":"src\/S_optic.py\n## Calculation of optic mode contributions to entropy (S)\r\n\r\nimport math\r\nimport numpy as np\r\nfrom scipy.integrate import quad\r\n\r\n#import params.py for all input parameters\r\nfrom params import *\r\n\r\n##########################################################################\r\n\r\nS_o = []\r\n\r\nfor i in ATEMP:\r\n x_L = wc_L*CONV\/i #define lower bound of optic box integral\r\n \r\n x_U = wc_U*CONV\/i #define upper bound of optic box integral\r\n \r\n def optic_1(x1):\r\n return x1\/(math.exp(x1)-1.)\r\n\r\n optic_quad_1, error = quad(optic_1, x_L, x_U)\r\n\r\n optic_S_1 = 3.*AVO*BOLTZ*(1.0-1.\/(Natoms*Z)-q_c)*optic_quad_1\r\n\r\n # Set up second function\r\n\r\n def optic_2(x2):\r\n return math.log(1. - math.exp(-x2))\r\n\r\n optic_quad_2, error = quad(optic_2, x_L, x_U)\r\n \r\n optic_S_2 = 3.*AVO*BOLTZ*(1.0-1.\/(Natoms*Z)-q_c)*optic_quad_2\r\n\r\n S_o.append(optic_S_1 - optic_S_2)\r\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_132","text":"################################################################################\n# #\n# Advection in 2D of a Passive Scalar #\n# #\n################################################################################\n\nfrom __future__ import print_function, division\nimport os\nimport sys; sys.dont_write_bytecode = True\nsys.path.insert(0, '..\/script\/')\nsys.path.insert(0, '..\/script\/analysis')\nfrom subprocess import call\nfrom shutil import copyfile\nimport glob\nimport numpy as np\nimport scipy as sp\nfrom scipy import optimize\nimport hdf5_to_dict as io\nimport util\nfrom bhlight import bcall\nimport multiprocessing\n\nTMP_DIR = 'TMP'\nTMP_BUILD = 'build_tmp.py'\nutil.safe_remove(TMP_DIR)\n\nPROBLEM = 'advection2d'\nAUTO = '-auto' in sys.argv\nMPI = '-mpi' in sys.argv\nMOVIE = '-movie' in sys.argv\nRES = [64,128,256]\nif AUTO:\n import pickle\nelse:\n import matplotlib as mpl; mpl.use('Agg')\n from matplotlib import animation, rc\n from matplotlib import pyplot as plt\n rc('font',size=18)\n\nkwave = 2*np.pi\namp = 1.0\nnspace = 2.0\nddiag = np.sqrt(nspace)\ncadv = 0.5*ddiag\ncsqr = cadv**2\ngamma = np.sqrt(1.\/(1. - csqr))\nqsqr = gamma**2 - 1.0\nu1d = np.sqrt(qsqr\/nspace)\nc1d = cadv \/ ddiag\n\ndef phi_true(t,x,y):\n phi_x = np.cos(kwave*(x - c1d*t))\n phi_y = np.cos(kwave*(y - c1d*t))\n return amp*phi_x*phi_y\n\nutil.make_dir(TMP_DIR)\nos.chdir('..\/prob\/' + PROBLEM)\ncopyfile('build.py', TMP_BUILD)\n\n# Since this test is designed to run on a single machine (no batch scripts)\n# set openmpi to only use a few threads. Let MPI handle the rest.\nif MPI:\n num_mpi = 4\n num_cpus = multiprocessing.cpu_count()\n os.environ['OMP_NUM_THREADS'] = str(int(np.max([2,num_cpus\/num_mpi])))\n\n# COMPILE CODE AT MULTIPLE RESOLUTIONS USING SEPARATE BUILD FILE\nfor n,res in enumerate(RES):\n for d in [1,2]:\n util.change_cparm('N{}TOT'.format(d), res, TMP_BUILD)\n if MPI:\n for d in [1,2]:\n util.change_cparm('N{}CPU'.format(d), 2, TMP_BUILD)\n call([sys.executable, TMP_BUILD, '-dir', TMP_DIR])\n parm_src = os.path.join(os.getcwd(), TMP_DIR, 'param_template.dat')\n parm_dest = '..\/..\/test\/' + TMP_DIR + '\/param.dat'\n call(['cp', os.path.join(os.getcwd(), TMP_DIR, 'bhlight'),\n '..\/..\/test\/' + TMP_DIR + '\/bhlight_' + str(res)])\n copyfile(parm_src,parm_dest)\nutil.safe_remove(TMP_BUILD)\nutil.safe_remove(TMP_DIR)\nos.chdir('..\/..\/test\/')\nos.chdir(TMP_DIR)\n\n# and convergence plot\nprint(\"Convergence test...\")\nerrs = [None for res in RES]\nfor n,res in enumerate(RES):\n print(\"Res = {}\".format(res))\n call_string = ['.\/bhlight_' + str(res), '-p', 'param.dat']\n if MPI:\n bcall(call_string,int(num_mpi))\n else:\n bcall(call_string)\n dfiles = sorted(glob.glob('dumps\/dump*.h5'))\n hdr = io.load_hdr(dfiles[-1])\n geom = io.load_geom(hdr, recalc=True)\n dump = io.load_dump(dfiles[-1],geom)\n N1,N2 = [hdr['N{}'.format(d)] for d in [1,2]]\n mshape = (N1,N2)\n t = dump['t']\n x,y = [geom[d].reshape(mshape) for d in ['x','y']]\n phi = dump['var0'].reshape(mshape)\n error = phi - phi_true(t,x,y)\n max_error = np.max(np.abs(error))\n errs[n] = max_error\n\nprint(\"Richardson extrapolating...\")\nerrf = lambda h, alpha, p: alpha*(h**p)\np0 = 2.0\nh0 = 1.0\/RES[0]\nerr0 = errs[0]\nalpha0 = err0*h0*h0\nhs = 1.0\/np.array(RES)\n(alpha,p),pcov = optimize.curve_fit(errf,hs,errs,p0=(alpha0,p0))\nprint(\"Convergence data:\\nalpha = {}, p = {}\\npcov = {}\\n\".format(\n alpha,p,pcov))\n\nif AUTO:\n os.chdir(\"..\/\")\n data = {}\n data['SOL'] = [np.array([0.0]), np.array([2.0])]\n data['CODE'] = [np.array([0.0]), np.array([p])]\n data['THRESHOLD'] = 0.03\n pickle.dump(data, open('data.p', 'wb'))\n util.safe_remove(TMP_DIR)\n sys.exit()\n\nprint(\"Plotting convergence...\")\nplt.loglog(RES,errf(hs,alpha,p),lw=2,ls='--',\n label=(r'$%.2f h^{%.2f}$' % (alpha,p)))\nplt.loglog(RES,errs,'ro',ms=12,label='measured')\nplt.xlabel('Resolution')\nplt.ylabel(r'$\\left| \\phi^h - \\phi_{true} \\right|_\\infty$')\nplt.legend()\nplt.savefig('..\/{}.png'.format(PROBLEM),bbox_inches='tight')\nplt.clf()\n\nif MOVIE:\n print(\"Making movie\")\n dfiles = sorted(glob.glob('dumps\/dump*.h5'))\n hdr = io.load_hdr(dfiles[0])\n geom = io.load_geom(hdr)\n N1,N2 = [hdr['N{}'.format(d)] for d in [1,2]]\n mshape = (N1,N2)\n x,y = [geom[d].reshape(mshape) for d in ['x','y']]\n def get_phi(i):\n dump = io.load_dump(dfiles[i],geom)\n phi = dump['var0'].reshape(mshape)\n return phi\n phi0 = get_phi(0)\n fig, ax = plt.subplots()\n ax.set_xlim((0,1))\n ax.set_ylim((0,1))\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n pc = ax.pcolormesh(x,y,phi0,\n cmap='viridis',\n shading='gouraud')\n cbar = fig.colorbar(pc)\n cbar.set_clim(-1.1,1.1)\n cbar.set_label(r'$\\phi$')\n\n def init():\n pc.set_array(phi0.ravel())\n return pc\n def animate(i):\n phi = get_phi(i)\n pc.set_array(phi.ravel())\n return pc\n\n anim = mpl.animation.FuncAnimation(fig,animate,\n init_func=init,\n frames=101,\n interval=20,blit=False)\n\n anim.save(\"..\/{}.mp4\".format(PROBLEM),\n writer='ffmpeg',\n extra_args=['-loglevel','verbose'])\n\nprint(\"Done.\")\nutil.safe_remove(TMP_DIR)\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_80","text":"asantentata\/Stats-project\n#!\/usr\/bin\/env python\n# Function which takes as inputs a hidden satate x_i and a pixel intensity y_i and then gives the \n#probability that the observed intensity of pixel i is y_i and does not consider information from\n#neigbouring particles\n\nfrom scipy import stats\nimport numpy as np\nimport matplotlib.pyplot as plt\n#import cv as cv2\nfrom PIL import Image\n\n#importing image and turning into greyscale (each pixel an array of values from 0-255)\nimport Image\nimage = Image.open(\"noisy_logo.png\")\ngray = np.asarray(image.convert('L'))\n \n\n#defining the outputs of function\ndef likelihood(y_i,x_i):\t\t\t\t\t\n\tif x_i == 1 and y_i <127:\n\t\t\tprobability = 0.15\n\tif x_i == 1 and y_i >=127:\n\t\t\tprobability = 0.85\n\tif x_i == 0 and y_i <127:\n\t\t\tprobability = 0.85\n\tif x_i == 0 and y_i >=127:\n\t\t\tprobability = 0.15\t\n\treturn probability\n\n\n#Testing function by making plot#\nx_i = 1\t\t\t#value of hidden state x_i\nprobability = []\t\t#array of probabilities\nfor y_i in range(0,255):\n\tprobability.append(likelihood(y_i,x_i))\nplt.plot(range(0,255),probability)\nplt.show()\n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_81","text":"components\/outlier-detection\/mahalanobis\/CoreMahalanobis.py\nimport logging\nimport numpy as np\nfrom scipy.linalg import eigh\n\nlogger = logging.getLogger(__name__)\n\nclass CoreMahalanobis(object):\n \"\"\" Outlier detection using the Mahalanobis distance.\n \n Parameters\n ----------\n threshold (float) : Mahalanobis distance threshold used to classify outliers\n n_components (int) : number of principal components used\n n_stdev (float) : stdev used for feature-wise clipping of observations\n start_clip (int) : number of observations before clipping is applied\n max_n (int) : algorithm behaves as if it has seen at most max_n points\n \n Functions\n ----------\n predict : detect and return outliers\n transform_input : detect outliers and return input features\n send_feedback : add target labels as part of the feedback loop\n tags : add metadata for input transformer\n metrics : return custom metrics\n \"\"\"\n def __init__(self,threshold=25,n_components=3,n_stdev=3,start_clip=50,max_n=-1):\n \n logger.info(\"Initializing model\")\n self.threshold = threshold\n self.n_components = n_components\n self.max_n = max_n\n self.n_stdev = n_stdev\n self.start_clip = start_clip\n \n self.clip = None\n self.mean = 0\n self.C = 0\n self.n = 0\n self.nb_outliers = 0\n \n \n def predict(self, X, feature_names):\n \"\"\" Return outlier predictions.\n\n Parameters\n ----------\n X : array-like\n feature_names : array of feature names (optional)\n \"\"\"\n logger.info(\"Using component as a model\")\n return self._get_preds(X)\n \n \n def transform_input(self, X, feature_names):\n \"\"\" Transform the input. \n Used when the outlier detector sits on top of another model.\n\n Parameters\n ----------\n X : array-like\n feature_names : array of feature names (optional)\n \"\"\"\n logger.info(\"Using component as an outlier-detector transformer\")\n self.prediction_meta = self._get_preds(X)\n return X\n \n \n def _get_preds(self,X):\n \"\"\" Detect outliers using the Mahalanobis distance threshold. \n \n Parameters\n ----------\n X : array-like\n \"\"\"\n\n nb = X.shape[0] # batch size\n p = X.shape[1] # number of features\n n_components = min(self.n_components,p)\n if self.max_n>0:\n n = min(self.n,self.max_n) # n can never be above max_n\n else:\n n = self.n\n \n # Clip X\n if self.n > self.start_clip:\n Xclip = np.clip(X,self.clip[0],self.clip[1])\n else:\n Xclip = X\n \n # Tracking the mean and covariance matrix\n roll_partial_means = Xclip.cumsum(axis=0)\/(np.arange(nb)+1).reshape((nb,1))\n coefs = (np.arange(nb)+1.)\/(np.arange(nb)+n+1.)\n new_means = self.mean + coefs.reshape((nb,1))*(roll_partial_means-self.mean)\n new_means_offset = np.empty_like(new_means)\n new_means_offset[0] = self.mean\n new_means_offset[1:] = new_means[:-1]\n\n coefs = ((n+np.arange(nb))\/(n+np.arange(nb)+1.)).reshape((nb,1,1))\n B = coefs*np.matmul((Xclip - new_means_offset)[:,:,None],(Xclip - new_means_offset)[:,None,:])\n cov_batch = (n-1.)\/(n+max(1,nb-1.))*self.C + 1.\/(n+max(1,nb-1.))*B.sum(axis=0)\n\n # PCA\n eigvals, eigvects = eigh(cov_batch,eigvals=(p-n_components,p-1))\n \n # Projections\n proj_x = np.matmul(X,eigvects)\n proj_x_clip = np.matmul(Xclip,eigvects)\n proj_means = np.matmul(new_means_offset,eigvects)\n if type(self.C) == int and self.C == 0:\n proj_cov = np.diag(np.zeros(n_components))\n else:\n proj_cov = np.matmul(eigvects.transpose(),np.matmul(self.C,eigvects))\n\n # Outlier detection in the PC subspace\n coefs = (1.\/(n+np.arange(nb)+1.)).reshape((nb,1,1))\n B = coefs*np.matmul((proj_x_clip - proj_means)[:,:,None],(proj_x_clip - proj_means)[:,None,:])\n\n all_C_inv = np.zeros_like(B)\n c_inv = None\n _EPSILON = 1e-8\n\n for i, b in enumerate(B):\n if c_inv is None:\n if abs(np.linalg.det(proj_cov)) > _EPSILON:\n c_inv = np.linalg.inv(proj_cov)\n all_C_inv[i] = c_inv\n continue\n else:\n if n + i == 0:\n continue\n proj_cov = (n + i -1. )\/(n + i)*proj_cov + b\n continue\n else:\n c_inv = (n + i - 1.)\/float(n + i - 2.)*all_C_inv[i-1]\n BC1 = np.matmul(B[i-1],c_inv)\n all_C_inv[i] = c_inv - 1.\/(1.+np.trace(BC1))*np.matmul(c_inv,BC1)\n\n # Updates\n self.mean = new_means[-1]\n self.C = cov_batch\n stdev = np.sqrt(np.diag(cov_batch))\n self.n += nb\n if self.n > self.start_clip:\n self.clip = [self.mean-self.n_stdev*stdev,self.mean+self.n_stdev*stdev]\n \n # Outlier scores and predictions\n x_diff = proj_x-proj_means\n self.score = np.matmul(x_diff[:,None,:],np.matmul(all_C_inv,x_diff[:,:,None])).reshape(nb)\n self.prediction = np.array([1 if s > self.threshold else 0 for s in self.score]).astype(int)\n\n return self.prediction\n \n \n def send_feedback(self,X,feature_names,reward,truth):\n \"\"\" Return additional data as part of the feedback loop.\n \n Parameters\n ----------\n X : array of the features sent in the original predict request\n feature_names : array of feature names. May be None if not available.\n reward (float): the reward\n truth : array with correct value (optional)\n \"\"\"\n logger.info(\"Send feedback called\")\n return []\n \n \n def tags(self):\n \"\"\"\n Use predictions made within transform to add these as metadata\n to the response. Tags will only be collected if the component is\n used as an input-transformer.\n \"\"\"\n try:\n return {\"outlier-predictions\": self.prediction_meta.tolist()}\n except AttributeError:\n logger.info(\"No metadata about outliers\")\n \n \n def metrics(self):\n \"\"\" Return custom metrics averaged over the prediction batch.\n \"\"\"\n self.nb_outliers += np.sum(self.prediction)\n \n is_outlier = {\"type\":\"GAUGE\",\"key\":\"is_outlier\",\"value\":np.mean(self.prediction)}\n outlier_score = {\"type\":\"GAUGE\",\"key\":\"outlier_score\",\"value\":np.mean(self.score)}\n nb_outliers = {\"type\":\"GAUGE\",\"key\":\"nb_outliers\",\"value\":int(self.nb_outliers)}\n fraction_outliers = {\"type\":\"GAUGE\",\"key\":\"fraction_outliers\",\"value\":int(self.nb_outliers)\/self.n}\n obs = {\"type\":\"GAUGE\",\"key\":\"observation\",\"value\":self.n}\n threshold = {\"type\":\"GAUGE\",\"key\":\"threshold\",\"value\":self.threshold}\n\n return [is_outlier,outlier_score,nb_outliers,fraction_outliers,obs,threshold]"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_82","text":"0\n#!\/usr\/bin\/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nimport math\nfrom scipy.special import gamma\nfrom abc import ABCMeta, abstractmethod\nfrom utils.utils import *\n\nclass Distribution(metaclass=ABCMeta):\n @abstractmethod\n def __init__(self):\n pass\nclass ReparametrizedGaussian(Distribution):\n \"\"\"\n Diagonal ReparametrizedGaussian distribution with parameters mu (mean) and rho. The standard\n deviation is parametrized as sigma = log(1 + exp(rho))\n A sample from the distribution can be obtained by sampling from a unit Gaussian,\n shifting the samples by the mean and scaling by the standard deviation:\n w = mu + log(1 + exp(rho)) * epsilon\n \"\"\"\n def __init__(self, mu, rho):\n self.mean = mu\n self.rho = rho\n self.normal = torch.distributions.Normal(0, 1)\n self.point_estimate = self.mean\n\n @property\n def std_dev(self):\n return torch.log1p(torch.exp(self.rho))\n\n# def sample(self, n_samples=1):\n# epsilon = torch.distributions.Normal(0, 1).sample(sample_shape=(n_samples, *self.mean.size()))\n# return self.mean + self.std_dev * epsilon\n\n def sample(self):\n epsilon = torch.distributions.Normal(0, 1).sample(self.mean.size())\n return self.mean + self.std_dev * epsilon\n \n def logprob(self, target):\n return (-math.log(math.sqrt(2 * math.pi))\n - torch.log(self.std_dev)\n - ((target - self.mean) ** 2) \/ (2 * self.std_dev ** 2)).sum()\n\n def entropy(self):\n \"\"\"\n Computes the entropy of the Diagonal Gaussian distribution.\n Details on the computation can be found in the 'diagonal_gaussian_entropy' notes in the repo\n \"\"\"\n if self.mean.dim() > 1:\n n_inputs, n_outputs = self.mean.shape\n else:\n n_inputs = len(self.mean)\n n_outputs = 1\n\n part1 = (n_inputs * n_outputs) \/ 2 * (torch.log(torch.tensor([2 * math.pi])) + 1)\n part2 = torch.sum(torch.log(self.std_dev))\n\n return part1 + part2\n\nclass Gamma(Distribution):\n \"\"\" Gamma distribution \"\"\"\n def __init__(self, shape, rate):\n \"\"\"\n Class constructor, sets parameters\n Args:\n shape: float, shape parameter of the distribution\n rate: float, rate parameter of the distribution\n Raises:\n TypeError: if given rate or shape are not floats\n ValueError: if given rate or shape are not positive\n \"\"\"\n if not isinstance(shape, float) or not isinstance(rate, float):\n raise TypeError(\"Shape and rate should be floats!\")\n\n if shape < 0 or rate < 0:\n raise ValueError(\"Shape and rate must be positive!\")\n\n self.shape = shape\n self.rate = rate\n self.mean = self.shape \/ self.rate\n self.variance = self.shape \/ self.rate**2\n self.point_estimate = self.mean\n\n def update(self, shape, rate):\n \"\"\"\n Updates mean and variance automatically when a and b get updated\n Args:\n shape: float, shape parameter of the distribution\n rate: float, rate parameter of the distribution\n Raises:\n TypeError: if given rate or shape are not floats\n ValueError: if given rate or shape are not positive\n \"\"\"\n if not isinstance(shape, float) or not isinstance(rate, float):\n raise TypeError(\"Shape and rate should be floats!\")\n\n if shape < 0 or rate < 0:\n raise ValueError(\"Shape and rate must be positive!\")\n\n self.shape = shape\n self.rate = rate\n self.mean = shape \/ rate\n self.variance = shape \/ rate ** 2\n\nclass InverseGamma(Distribution):\n \"\"\" Inverse Gamma distribution \"\"\"\n def __init__(self, shape, rate):\n \"\"\"\n Class constructor, sets parameters of the distribution.\n Args:\n shape: torch tensor of floats, shape parameters of the distribution\n rate: torch tensor of floats, rate parameters of the distribution\n \"\"\"\n self.shape = shape\n self.rate = rate\n\n def exp_inverse(self):\n \"\"\"\n Calculates the expectation E[1\/x], where x follows\n the inverse gamma distribution\n \"\"\"\n return self.shape \/ self.rate\n\n def exp_log(self):\n \"\"\"\n Calculates the expectation E[log(x)], where x follows\n the inverse gamma distribution\n \"\"\"\n exp_log = torch.log(self.rate) - torch.digamma(self.shape)\n return exp_log\n\n def entropy(self):\n \"\"\"\n Calculates the entropy of the inverse gamma distribution\n \"\"\"\n entropy = self.shape + torch.log(self.rate) + torch.lgamma(self.shape) \\\n - (1 + self.shape) * torch.digamma(self.shape)\n return torch.sum(entropy)\n\n def logprob(self, target):\n \"\"\"\n Computes the value of the predictive log likelihood at the target value\n Args:\n target: Torch tensor of floats, point(s) to evaluate the logprob\n Returns:\n loglike: float, the log likelihood\n \"\"\"\n part1 = (self.rate**self.shape) \/ gamma(self.shape)\n part2 = target**(-self.shape - 1)\n part3 = torch.exp(-self.rate \/ target)\n\n return torch.log(part1 * part2 * part3)\n\n def update(self, shape, rate):\n \"\"\"\n Updates shape and rate of the distribution\n Args:\n shape: float, shape parameter of the distribution\n rate: float, rate parameter of the distribution\n \"\"\"\n self.shape = shape\n self.rate = rate\n\nclass Discrete_Flatten_Laplace(Distribution):\n \"\"\"\n Flatten Laplace Distribution with mode interval [mu_dowm, mu_up], and precision parameter tau.\n \"\"\"\n def __init__(self, mu_down, mu_up, tau, D):\n self.mu_up = torch.tensor(mu_up)\n self.mu_down = torch.tensor(mu_down)\n self.tau = tau\n self.domain = torch.tensor(np.linspace(0, D, D + 1)).float()\n\n @property\n def constant(self):\n return torch.sum(torch.exp(- self.tau * torch.sqrt(self.flatten(self.domain) ** 2)))\n\n def pmf(self):\n return torch.exp(- self.tau * torch.sqrt(self.flatten(self.domain) ** 2)) \/ self.constant\n\n def sample(self):\n return torch.multinomial(self.pmf(), 1, replacement=False)\n\n def flatten(self, x):\n return torch.max(\n torch.max(torch.max(self.mu_down - x, torch.zeros(1)), torch.max(-self.mu_up + x, torch.zeros(1))),\n torch.zeros(1))\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_83","text":"from scipy.stats._discrete_distns import zipf_gen\nimport requests\nimport threading\nimport json\nimport codecs\nimport time\nimport os\nimport urllib\nimport errno\nimport zipfile\nfrom wsd.database import MySQLDatabase\nfrom conf import *\n\n\n\n#MEDIAWIKI_API_ENDPOINT = 'https:\/\/en.wikipedia.org\/w\/api.php?action=parse&format=json&prop=text&oldid=' #see:\n#MEDIAWIKI_API_ENDPOINT = 'https:\/\/en.wikipedia.org\/w\/index.php?oldid='#alternative for getting the html\nMEDIAWIKI_API_ENDPOINT = 'https:\/\/en.wikipedia.org\/api\/rest_v1\/page\/html\/'# see: https:\/\/en.wikipedia.org\/api\/rest_v1\/?doc\n\n# Limit the number of threads.\npool = threading.BoundedSemaphore(20)\n\ndef worker(u, article, iteration_number):\n headers = {'user-agent': EMAIL}\n # Request passed URL.\n r = requests.get(u, headers=headers, stream=True)\n directory = STATIC_HTML_DUMP_ARTICLES_DIR+str(iteration_number)\n try:\n os.mkdir(directory)\n except OSError, e:\n if e.errno == errno.EEXIST and os.path.isdir(directory):\n # File exists, and it's a directory,\n # another process beat us to creating this dir, that's OK.\n pass\n else:\n # Our target dir exists as a file, or different error,\n # reraise the error!\n raise\n error_directory = STATIC_HTML_DUMP_ERRORS_DIR+str(iteration_number)\n try:\n os.mkdir(error_directory)\n except OSError, e:\n if e.errno == errno.EEXIST and os.path.isdir(error_directory):\n # File exists, and it's a directory,\n # another process beat us to creating this dir, that's OK.\n pass\n else:\n # Our target dir exists as a file, or different error,\n # reraise the error!\n raise\n if not os.path.exists(error_directory):\n os.makedirs(error_directory)\n if r.status_code == 200:\n html_article_filename = STATIC_HTML_DUMP_ARTICLES_DIR+str(iteration_number)+'\/article_'+str(article['id'])+'_' +\\\n str(article['rev_id'])+'.html'\n zip_article_filename = STATIC_HTML_DUMP_ARTICLES_DIR+str(iteration_number)+'\/article_'+str(article['id'])+'_' +\\\n str(article['rev_id'])+'.zip'\n handle_response(r, html_article_filename, zip_article_filename)\n else:\n html_article_filename = STATIC_HTML_DUMP_ERRORS_DIR+str(iteration_number)+'\/article_'+str(article['id'])+'_' +\\\n str(article['rev_id'])+'.html'\n zip_article_filename = STATIC_HTML_DUMP_ERRORS_DIR+str(iteration_number)+'\/article_'+str(article['id'])+'_' +\\\n str(article['rev_id'])+'.zip'\n handle_response(r, html_article_filename, zip_article_filename)\n\n # Release lock for other threads.\n pool.release()\n # Show the number of active threads.\n #print threading.active_count()\n\ndef req():\n # Get URLs from a text file, remove white space.\n db = MySQLDatabase(DATABASE_HOST, DATABASE_USER, DATABASE_PASSWORD, DATABASE_NAME)\n db_worker_view = db.get_work_view()\n articles = db_worker_view.retrieve_all_articles()\n #articles = db_worker_view.retrieve_all_articles_questionmark()\n # measure time\n start = time.clock()\n start_time_iteration = start\n iteration_number = 483\n for i, article in enumerate(articles):\n # print some progress\n if i % 10000 == 0:\n #print time for the iteration\n seconds = time.clock() - start_time_iteration\n m, s = divmod(seconds, 60)\n h, m = divmod(m, 60)\n print \"Number of crawled articles: %d. Total time for last iteration of 10000 articles: %d:%02d:%02d\" % (i, h, m, s)\n start_time_iteration = time.clock()\n iteration_number += 1\n\n # Thread pool.\n # Blocks other threads (more than the set limit).\n pool.acquire(blocking=True)\n # Create a new thread.\n # Pass each URL (i.e. u parameter) to the worker function.\n t = threading.Thread(target=worker, args=(MEDIAWIKI_API_ENDPOINT+urllib.quote(article['title'])+'\/'+str(article['rev_id']), article, iteration_number))\n\n # Start the newly create thread.\n t.start()\n seconds = time.clock() - start\n m, s = divmod(seconds, 60)\n h, m = divmod(m, 60)\n print \"Total time: %d:%02d:%02d\" % (h, m, s)\n\n\ndef handle_response(r, html_article_filename, zip_article_filename):\n with open(html_article_filename, 'wb') as outfile:\n for chunk in r.iter_content(1024):\n outfile.write(chunk)\n outfile.flush()\n outfile.close()\n zf = zipfile.ZipFile(zip_article_filename, mode='w', compression=zipfile.ZIP_DEFLATED)\n try:\n zf.write(html_article_filename, os.path.basename(html_article_filename))\n os.remove(html_article_filename)\n finally:\n zf.close()\n\n\nreq()\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_84","text":"cliu3\/pf_geolocation\nfrom __future__ import print_function\nimport numpy as np\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\nimport scipy.io\nimport scipy.stats\nfrom astropy.time import Time\nimport pandas as pd\nfrom my_project import *\nimport os.path\n#from config import *\nimport sys\n\n\n#tagid = 7\n# try:\ntagid = int(sys.argv[1])\n#tagid = 12\n# except:\n\n\n# load tag file\npath_to_tags = '\/home\/cliu3\/pf_geolocation\/data\/tag_files'\ntag=scipy.io.loadmat(path_to_tags+'\/'+str(tagid)+'_raw.mat',squeeze_me =False,struct_as_record=True)\ntag=tag['tag'][0,0]\nrelease_lon = tag['release_lon'][0,0]\nrelease_lat = tag['release_lat'][0,0]\n[release_x, release_y] = my_project(release_lon, release_lat, 'forward')\nrecapture_lon = tag['recapture_lon'][0,0]\nrecapture_lat = tag['recapture_lat'][0,0]\n[recapture_x, recapture_y] = my_project(recapture_lon, recapture_lat, 'forward')\n\ntagname = str(tagid)+'_'+tag['tag_id'][0]\n\n# load result file\nresult = scipy.io.loadmat('result'+tagname+'.mat',squeeze_me =False,struct_as_record=True)\nparticles = result['particles']\nmpt_idx = result['mpt_idx']\n# determine most probable track\nmpt_x = particles[:,mpt_idx,0].flatten()\nmpt_y = particles[:,mpt_idx,1].flatten()\n(mpt_lon, mpt_lat) = my_project(mpt_x, mpt_y, 'reverse')\n\nday_dnum = np.array(range(int(tag['dnum'][0]), int(tag['dnum'][-1])+1))\ndate = Time(day_dnum-678942,format='mjd',scale='utc').datetime\nMPT = pd.DataFrame({'date':date, 'lon':mpt_lon, 'lat':mpt_lat, 'X':mpt_x, 'Y':mpt_y})\nMPT['date'] = pd.to_datetime(MPT['date'])\nMPT = MPT[['date', 'X', 'Y', 'lat', 'lon']]\nMPT.to_csv('mpt_'+tagname+'.csv')\n#-- calculate cumulative probability distribution\n# construct daily distrubution using kernel density estimation\nxmin = particles[:,:,0].min()\nxmax = particles[:,:,0].max()\nymin = particles[:,:,1].min()\nymax = particles[:,:,1].max()\nX, Y = np.meshgrid(np.linspace(xmin,xmax,50), np.linspace(ymin,ymax,50))\npositions = np.vstack([X.ravel(), Y.ravel()])\n\nndays = len(particles)\nudist = np.zeros_like(X)\n\n# for i in range(ndays):\n# print(\"Processing kde for Day \"+str(i+1)+\"\/\"+str(ndays)+\"...\")\n# values = particles[i].T\n# kernel = scipy.stats.gaussian_kde(values)\n# Z = np.reshape(kernel(positions).T, X.shape)\n# Z = Z\/Z.max()\n# udist += Z\nprint(\"Processing kde...\")\nvalues = np.vstack([particles[:,:,0].flatten(), particles[:,:,1].flatten()])\nkernel = scipy.stats.gaussian_kde(values)\nudist = np.reshape(kernel(positions).T, X.shape)\nudist = udist\/udist.max()\n\nscipy.io.savemat('UD_'+tagname+'.mat',{'X':X, 'Y':Y, 'udist':udist})\n\n# create basemap\nprint('Generating plot...')\nlatStart = 41.15\nlatEnd = 43.15\nlonStart =-71\nlonEnd =-68\n\nmap = Basemap(projection='merc', lat_0 = 42, lon_0 = -70,resolution = 'h', area_thresh = 0.1,llcrnrlon=lonStart, llcrnrlat=latStart,\n urcrnrlon=lonEnd, urcrnrlat=latEnd)\nmap.fillcontinents(color = 'green')\n\n#-- plot mpt\nmptlon, mptlat = my_project(mpt_x, mpt_y, 'inverse')\nmptx, mpty = map(mptlon, mptlat)\nmap.plot(mptx,mpty,'b-')\n#plot release and recapture location\n\nmap.plot(mptx[0],mpty[0],'kx',label=\"Release\")\nrecap_x, recap_y = map(recapture_lon, recapture_lat)\nmap.plot(recap_x, recap_y,'k^',markeredgecolor='k',label=\"Reported Recapture\")\nmap.plot(mptx[-1],mpty[-1],'bv',markeredgecolor='b',label=\"Simulated Recapture\")\n\n\n#-- plot uncertainty distribution\nlon_g, lat_g = my_project(X, Y, 'inverse')\nmap.pcolormesh(lon_g, lat_g,udist,cmap=plt.cm.cubehelix_r,latlon=True,shading='gouraud')\n\nplt.legend(numpoints=1,prop={'size':16},loc='lower right')\nplt.title(tagname+' gpu')\n\nplt.savefig('track'+tagname+'_gpu.pdf', dpi=300, bbox_inches='tight')\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_85","text":"0\nimport time\n\nfrom typing import Tuple\nimport math\n\nfrom vendor.nfb.pynfb.protocols.ssd.topomap_selector_ica import ICADialog\n\nimport numpy as np\nimport mne\nfrom numpy.linalg import svd\nfrom scipy.optimize import linprog\nfrom sklearn.preprocessing import normalize\nfrom mne.preprocessing import find_outliers\nfrom mne.minimum_norm import apply_inverse_raw # , make_inverse_operator\nfrom mne.minimum_norm import make_inverse_operator as mne_make_inverse_operator\nfrom mne.beamformer import apply_lcmv_raw\nfrom ..helpers.make_lcmv import make_lcmv\n\nfrom .node import ProcessorNode\nfrom ..helpers.matrix_functions import (make_time_dimension_second,\n put_time_dimension_back_from_second,\n last_sample)\nfrom ..helpers.inverse_model import (get_default_forward_file,\n get_clean_forward,\n make_inverse_operator,\n matrix_from_inverse_operator)\n\nfrom ..helpers.pynfb import (pynfb_ndarray_function_wrapper,\n ExponentialMatrixSmoother)\nfrom ..helpers.channels import channel_labels_saver\nfrom ..helpers.aux_tools import nostdout\nfrom .. import TIME_AXIS\nfrom vendor.nfb.pynfb.signal_processing import filters\n\n\nclass Preprocessing(ProcessorNode):\n CHANGES_IN_THESE_REQUIRE_RESET = ('collect_for_x_seconds', )\n UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION = ('mne_info', )\n SAVERS_FOR_UPSTREAM_MUTABLE_OBJECTS = {'mne_info': channel_labels_saver}\n\n def __init__(self, collect_for_x_seconds: int=60):\n super().__init__()\n self.collect_for_x_seconds = collect_for_x_seconds # type: int\n\n self._samples_collected = None # type: int\n self._samples_to_be_collected = None # type: int\n self._enough_collected = None # type: bool\n self._means = None # type: np.ndarray\n self._mean_sums_of_squares = None # type: np.ndarray\n self._bad_channel_indices = None # type: List[int]\n self._interpolation_matrix = None # type: np.ndarray\n\n self._reset_statistics()\n\n def _initialize(self):\n self.mne_info = self.traverse_back_and_find('mne_info')\n frequency = self.mne_info['sfreq']\n self._samples_to_be_collected = int(math.ceil(\n self.collect_for_x_seconds * frequency))\n\n def _update(self):\n # Have we collected enough samples without the new input?\n enough_collected = self._samples_collected >=\\\n self._samples_to_be_collected\n if not enough_collected:\n if self.input_node.output is not None and\\\n self.input_node.output.shape[TIME_AXIS] > 0:\n self._update_statistics()\n\n elif not self._enough_collected: # We just got enough samples\n self._enough_collected = True\n standard_deviations = self._calculate_standard_deviations()\n self._bad_channel_indices = find_outliers(standard_deviations)\n if any(self._bad_channel_indices):\n # message = Message(there_has_been_a_change=True,\n # output_history_is_no_longer_valid=True)\n # self._deliver_a_message_to_receivers(message)\n # self.mne_info['bads'].append(self._bad_channel_indices)\n # self.mne_info['bads'] = self._bad_channel_indices\n\n # TODO: handle emergent bad channels on the go\n pass\n\n self.output = self.input_node.output\n\n def _reset(self) -> bool:\n self._reset_statistics()\n self._input_history_is_no_longer_valid = True\n return self._input_history_is_no_longer_valid\n\n def _reset_statistics(self):\n self._samples_collected = 0\n self._enough_collected = False\n self._means = 0\n self._mean_sums_of_squares = 0\n self._bad_channel_indices = []\n\n def _update_statistics(self):\n input_array = self.input_node.output.astype(np.dtype('float64'))\n # Using float64 is necessary because otherwise rounding error\n # in recursive formula accumulate\n n = self._samples_collected\n m = input_array.shape[TIME_AXIS] # number of new samples\n self._samples_collected += m\n\n self._means = (\n self._means * n + np.sum(input_array, axis=TIME_AXIS)) \/ (n + m)\n self._mean_sums_of_squares = (\n self._mean_sums_of_squares * n +\n np.sum(input_array ** 2, axis=TIME_AXIS)) \/ (n + m)\n\n def _calculate_standard_deviations(self):\n n = self._samples_collected\n return np.sqrt(\n n \/ (n - 1) * (self._mean_sums_of_squares - self._means ** 2))\n\n def _on_input_history_invalidation(self):\n self._reset_statistics()\n\n def _check_value(self, key, value):\n pass\n\n\nclass InverseModel(ProcessorNode):\n SUPPORTED_METHODS = ['MNE', 'dSPM', 'sLORETA']\n UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION = ('mne_info', )\n CHANGES_IN_THESE_REQUIRE_RESET = ('mne_inverse_model_file_path',\n 'mne_forward_model_file_path',\n 'snr', 'method')\n SAVERS_FOR_UPSTREAM_MUTABLE_OBJECTS = {'mne_info': channel_labels_saver}\n\n def __init__(self, forward_model_path=None, snr=1.0, method='MNE'):\n super().__init__()\n\n self.snr = snr\n self._user_provided_forward_model_file_path = forward_model_path\n self._default_forward_model_file_path = None\n self.mne_info = None\n self.fwd = None\n\n self._inverse_model_matrix = None\n self.method = method\n\n def _initialize(self):\n mne_info = self.traverse_back_and_find('mne_info')\n self._bad_channels = mne_info['bads']\n\n if self._user_provided_forward_model_file_path is None:\n self._default_forward_model_file_path =\\\n get_default_forward_file(mne_info)\n\n self.fwd, missing_ch_names = get_clean_forward(\n self.mne_forward_model_file_path, mne_info)\n mne_info['bads'] = list(set(mne_info['bads'] + missing_ch_names))\n\n inverse_operator = make_inverse_operator(self.fwd, mne_info)\n self._inverse_model_matrix = matrix_from_inverse_operator(\n inverse_operator=inverse_operator, mne_info=mne_info,\n snr=self.snr, method=self.method)\n\n frequency = mne_info['sfreq']\n # channel_count = self._inverse_model_matrix.shape[0]\n channel_count = self.fwd['nsource']\n channel_labels = ['vertex #{}'.format(i + 1)\n for i in range(channel_count)]\n self.mne_info = mne.create_info(channel_labels, frequency)\n\n def _update(self):\n mne_info = self.traverse_back_and_find('mne_info')\n bads = mne_info['bads']\n if bads != self._bad_channels:\n inverse_operator = make_inverse_operator(self.fwd, mne_info)\n self._inverse_model_matrix = matrix_from_inverse_operator(\n inverse_operator=inverse_operator, mne_info=mne_info,\n snr=self.snr, method=self.method)\n self._bad_channels = bads\n\n input_array = self.input_node.output\n raw_array = mne.io.RawArray(input_array, mne_info, verbose='ERROR')\n raw_array.pick_types(eeg=True, meg=False, stim=False, exclude='bads')\n data = raw_array.get_data()\n self.output = self._apply_inverse_model_matrix(data)\n\n def _on_input_history_invalidation(self):\n # The methods implemented in this node do not rely on past inputs\n pass\n\n def _check_value(self, key, value):\n if key == 'method':\n if value not in self.SUPPORTED_METHODS:\n raise ValueError(\n 'Method {} is not supported.'.format(value) +\n ' Use one of: {}'.format(self.SUPPORTED_METHODS))\n\n if key == 'snr':\n if value <= 0:\n raise ValueError(\n 'snr (signal-to-noise ratio) must be a positive number.')\n\n def _reset(self):\n self._should_reinitialize = True\n self.initialize()\n output_history_is_no_longer_valid = True\n return output_history_is_no_longer_valid\n\n @property\n def mne_forward_model_file_path(self):\n return self._user_provided_forward_model_file_path or\\\n self._default_forward_model_file_path\n\n @mne_forward_model_file_path.setter\n def mne_forward_model_file_path(self, value):\n # This setter is for public use, hence the \"user_provided\"\n self._user_provided_forward_model_file_path = value\n\n def _apply_inverse_model_matrix(self, input_array: np.ndarray):\n W = self._inverse_model_matrix # VERTICES x CHANNELS\n output_array = W.dot(make_time_dimension_second(input_array))\n return put_time_dimension_back_from_second(output_array)\n\n\nclass LinearFilter(ProcessorNode):\n UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION = ('mne_info', )\n CHANGES_IN_THESE_REQUIRE_RESET = ('lower_cutoff', 'upper_cutoff')\n SAVERS_FOR_UPSTREAM_MUTABLE_OBJECTS = {'mne_info':\n lambda info: (info['nchan'], )}\n\n def __init__(self, lower_cutoff, upper_cutoff):\n super().__init__()\n self.lower_cutoff = lower_cutoff\n self.upper_cutoff = upper_cutoff\n self._linear_filter = None # type: filters.ButterFilter\n\n def _initialize(self):\n mne_info = self.traverse_back_and_find('mne_info')\n frequency = mne_info['sfreq']\n channel_count = mne_info['nchan']\n if not (self.lower_cutoff is None and self.upper_cutoff is None):\n band = (self.lower_cutoff, self.upper_cutoff)\n\n self._linear_filter = filters.ButterFilter(\n band, fs=frequency, n_channels=channel_count)\n\n self._linear_filter.apply = pynfb_ndarray_function_wrapper(\n self._linear_filter.apply)\n else:\n self._linear_filter = None\n\n def _update(self):\n input = self.input_node.output\n if self._linear_filter is not None:\n self.output = self._linear_filter.apply(input)\n else:\n self.output = input\n\n def _check_value(self, key, value):\n if value is None:\n pass\n\n elif key == 'lower_cutoff':\n if (hasattr(self, 'upper_cutoff') and\n self.upper_cutoff is not None and\n value > self.upper_cutoff):\n raise ValueError(\n 'Lower cutoff can`t be set higher that the upper cutoff')\n if value < 0:\n raise ValueError('Lower cutoff must be a positive number')\n\n elif key == 'upper_cutoff':\n if (hasattr(self, 'upper_cutoff') and\n self.lower_cutoff is not None and\n value < self.lower_cutoff):\n raise ValueError(\n 'Upper cutoff can`t be set lower that the lower cutoff')\n if value < 0:\n raise ValueError('Upper cutoff must be a positive number')\n\n def _on_input_history_invalidation(self):\n if self._linear_filter is not None:\n self._linear_filter.reset()\n\n def _reset(self):\n self._should_reinitialize = True\n self.initialize()\n output_history_is_no_longer_valid = True\n return output_history_is_no_longer_valid\n\n\nclass EnvelopeExtractor(ProcessorNode):\n def __init__(self, factor=0.9):\n super().__init__()\n self.method = 'Exponential smoothing'\n self.factor = factor\n self._envelope_extractor = None # type: ExponentialMatrixSmoother\n\n def _initialize(self):\n channel_count = self.traverse_back_and_find('mne_info')['nchan']\n self._envelope_extractor = ExponentialMatrixSmoother(\n factor=self.factor, column_count=channel_count)\n self._envelope_extractor.apply = pynfb_ndarray_function_wrapper(\n self._envelope_extractor.apply)\n\n def _update(self):\n input = self.input_node.output\n self.output = self._envelope_extractor.apply(np.abs(input))\n\n def _check_value(self, key, value):\n if key == 'factor':\n if value <= 0 or value >= 1:\n raise ValueError('Factor must be a number between 0 and 1')\n\n if key == 'method':\n if value not in self.SUPPORTED_METHODS:\n raise ValueError(\n 'Method {} is not supported.' +\n ' Use one of: {}'.format(value, self.SUPPORTED_METHODS))\n\n def _reset(self):\n self._should_reinitialize = True\n self.initialize()\n output_history_is_no_longer_valid = True\n return output_history_is_no_longer_valid\n\n def _on_input_history_invalidation(self):\n self._envelope_extractor.reset()\n\n UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION = ('mne_info', )\n CHANGES_IN_THESE_REQUIRE_RESET = ('method', 'factor')\n SUPPORTED_METHODS = ('Exponential smoothing', )\n SAVERS_FOR_UPSTREAM_MUTABLE_OBJECTS = {'mne_info':\n lambda info: (info['nchan'],)}\n\n\nclass Beamformer(ProcessorNode):\n\n SUPPORTED_OUTPUT_TYPES = ('power', 'activation')\n UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION = ('mne_info',)\n CHANGES_IN_THESE_REQUIRE_RESET = ('snr', 'output_type', 'is_adaptive',\n 'fixed_orientation',\n 'mne_forward_model_file_path')\n\n SAVERS_FOR_UPSTREAM_MUTABLE_OBJECTS = {'mne_info': channel_labels_saver}\n\n def __init__(self, snr: float=1.0, output_type: str='power',\n is_adaptive: bool=False, fixed_orientation: bool=True,\n forward_model_path: str=None,\n forgetting_factor_per_second: float=0.99):\n super().__init__()\n\n self.snr = snr # type: float\n self._user_provided_forward_model_file_path = forward_model_path\n self._default_forward_model_file_path = None # type: str\n self.mne_info = None # type: mne.Info\n\n self.output_type = output_type # type: np.dtype\n self.is_adaptive = is_adaptive # type: bool\n self._initialized_as_adaptive = None # type: bool\n self.fixed_orientation = fixed_orientation # type: bool\n self._initialized_as_fixed = None # type: bool\n\n self._channel_indices = None # type: list\n self._gain_matrix = None # type: np.ndarray\n self._Rxx = None # type: np.ndarray\n self.forgetting_factor_per_second = forgetting_factor_per_second\n self._forgetting_factor_per_sample = None # type: float\n\n def _initialize(self):\n mne_info = self.traverse_back_and_find('mne_info')\n\n if self._user_provided_forward_model_file_path is None:\n self._default_forward_model_file_path = get_default_forward_file(\n mne_info)\n\n try:\n fwd, missing_ch_names = get_clean_forward(\n self.mne_forward_model_file_path, mne_info)\n except ValueError:\n raise Exception('BAD FORWARD + DATA COMBINATION!')\n\n mne_info['bads'] = list(set(mne_info['bads'] + missing_ch_names))\n self._gain_matrix = fwd['sol']['data']\n G = self._gain_matrix\n if self.is_adaptive is False:\n Rxx = G.dot(G.T)\n elif self.is_adaptive is True:\n Rxx = np.zeros([G.shape[0], G.shape[0]]) # G.dot(G.T)\n\n goods = mne.pick_types(mne_info, eeg=True, meg=False, exclude='bads')\n ch_names = [mne_info['ch_names'][i] for i in goods]\n\n self._Rxx = mne.Covariance(Rxx, ch_names, mne_info['bads'],\n mne_info['projs'], nfree=1)\n\n self._mne_info = mne_info\n\n frequency = mne_info['sfreq']\n self._forgetting_factor_per_sample = np.power(\n self.forgetting_factor_per_second, 1 \/ frequency)\n\n n_vert = fwd['nsource']\n channel_labels = ['vertex #{}'.format(i + 1) for i in range(n_vert)]\n self.mne_info = mne.create_info(channel_labels, frequency)\n self._initialized_as_adaptive = self.is_adaptive\n self._initialized_as_fixed = self.fixed_orientation\n\n self.fwd_surf = mne.convert_forward_solution(\n fwd, surf_ori=True, force_fixed=False)\n if not self.is_adaptive:\n self._filters = make_lcmv(\n info=self._mne_info, forward=self.fwd_surf,\n data_cov=self._Rxx, reg=0.05, pick_ori='max-power',\n weight_norm='unit-noise-gain', reduce_rank=False)\n else:\n self._filters = None\n\n def _update(self):\n t1 = time.time()\n input_array = self.input_node.output\n raw_array = mne.io.RawArray(\n input_array, self._mne_info, verbose='ERROR')\n\n raw_array.pick_types(eeg=True, meg=False, stim=False, exclude='bads')\n raw_array.set_eeg_reference(ref_channels='average', projection=True)\n t2 = time.time()\n self.logger.debug('Prepare arrays in {:.1f} ms'.format(\n (t2 - t1) * 1000))\n\n if self.is_adaptive:\n self._update_covariance_matrix(input_array)\n t1 = time.time()\n self._filters = make_lcmv(info=self._mne_info,\n forward=self.fwd_surf,\n data_cov=self._Rxx, reg=0.5,\n pick_ori='max-power',\n weight_norm='unit-noise-gain',\n reduce_rank=False)\n t2 = time.time()\n self.logger.debug('Assembled lcmv instance in {:.1f} ms'.format(\n (t2 - t1) * 1000))\n\n self._filters['source_nn'] = []\n t1 = time.time()\n stc = apply_lcmv_raw(raw=raw_array, filters=self._filters,\n max_ori_out='signed')\n t2 = time.time()\n self.logger.debug('Applied lcmv inverse in {:.1f} ms'.format(\n (t2 - t1) * 1000))\n\n output = stc.data\n t1 = time.time()\n if self.fixed_orientation is True:\n if self.output_type == 'power':\n output = output ** 2\n else:\n vertex_count = self.fwd_surf['nsource']\n output = np.sum(\n np.power(output, 2).reshape((vertex_count, 3, -1)), axis=1)\n if self.output_type == 'activation':\n output = np.sqrt(output)\n\n self.output = output\n t2 = time.time()\n self.logger.debug(\n 'Finalized in {:.1f} ms'.format(\n (t2 - t1) * 1000))\n\n @property\n def mne_forward_model_file_path(self):\n # TODO: fix this\n return (self._user_provided_forward_model_file_path or\n self._default_forward_model_file_path)\n\n @mne_forward_model_file_path.setter\n def mne_forward_model_file_path(self, value):\n # This setter is for public use, hence the \"user_provided\"\n self._user_provided_forward_model_file_path = value\n\n def _reset(self) -> bool:\n\n # Only change adaptiveness or fixed_orientation requires reinit\n # if (self._initialized_as_adaptive is not self.is_adaptive\n # or self._initialized_as_fixed is not self.fixed_orientation):\n self._should_reinitialize = True\n self.initialize()\n\n output_history_is_no_longer_valid = True\n return output_history_is_no_longer_valid\n\n def _on_input_history_invalidation(self):\n # Only adaptive version relies on history\n if self._initialized_as_adaptive is True:\n self._should_reinitialize = True\n self.initialize()\n\n def _check_value(self, key, value):\n if key == 'output_type':\n if value not in self.SUPPORTED_OUTPUT_TYPES:\n raise ValueError(\n 'Method {} is not supported.' +\n ' Use one of: {}'.format(\n value, self.SUPPORTED_OUTPUT_TYPES))\n\n if key == 'snr':\n if value <= 0:\n raise ValueError(\n 'snr (signal-to-noise ratio) must be a positive number')\n\n if key == 'is_adaptive':\n if not isinstance(value, bool):\n raise ValueError(\n 'Beamformer type (adaptive vs nonadaptive) is not set')\n\n def _update_covariance_matrix(self, input_array):\n t1 = time.time()\n alpha = self._forgetting_factor_per_sample\n sample_count = input_array.shape[TIME_AXIS]\n self.logger.debug('Number of samples: {}'.format(sample_count))\n new_Rxx_data = self._Rxx.data\n\n raw_array = mne.io.RawArray(\n input_array, self._mne_info, verbose='ERROR')\n raw_array.pick_types(eeg=True, meg=False, stim=False, exclude='bads')\n raw_array.set_eeg_reference(ref_channels='average', projection=True)\n input_array_nobads = raw_array.get_data()\n\n t2 = time.time()\n self.logger.debug(\n 'Prepared covariance update in {:.2f} ms'.format((t2 - t1) * 1000))\n samples = make_time_dimension_second(input_array_nobads).T\n new_Rxx_data = (alpha * new_Rxx_data +\n (1 - alpha) * samples.T.dot(samples))\n t3 = time.time()\n self.logger.debug(\n 'Updated matrix data in {:.2f} ms'.format((t3 - t2) * 1000))\n\n self._Rxx = mne.Covariance(new_Rxx_data, self._Rxx.ch_names,\n raw_array.info['bads'],\n raw_array.info['projs'], nfree=1)\n t4 = time.time()\n self.logger.debug('Created instance of covariance' +\n ' in {:.2f} ms'.format((t4 - t4) * 1000))\n\n\n# TODO: implement this function\ndef pynfb_filter_based_processor_class(pynfb_filter_class):\n \"\"\"\n Returns a ProcessorNode subclass with the functionality of\n pynfb_filter_class\n\n pynfb_filter_class: subclass of pynfb.signal_processing.filters.BaseFilter\n\n Sample usage 1:\n\n LinearFilter = pynfb_filter_based_processor_class(filters.ButterFilter)\n linear_filter = LinearFilter(band, fs, n_channels, order)\n\n Sample usage 2\n (this would correspond to a different implementation of this function):\n\n LinearFilter = pynfb_filter_based_processor_class(filters.ButterFilter)\n linear_filter = LinearFilter(band, order)\n\n In this case LinearFilter should provide\n fs and n_channels parameters to filters.ButterFilter automatically\n\n \"\"\"\n class PynfbFilterBasedProcessorClass(ProcessorNode):\n def _on_input_history_invalidation(self):\n pass\n\n def _check_value(self, key, value):\n pass\n\n @property\n def CHANGES_IN_THESE_REQUIRE_RESET(self) -> Tuple[str]:\n pass\n\n @property\n def UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION(self) -> Tuple[str]:\n pass\n\n def _reset(self):\n pass\n\n def __init__(self):\n pass\n\n def _initialize(self):\n pass\n\n def _update(self):\n pass\n return PynfbFilterBasedProcessorClass\n\n\nclass MCE(ProcessorNode):\n input = []\n output = []\n\n UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION = ()\n CHANGES_IN_THESE_REQUIRE_RESET = ('mne_forward_model_file_path', 'snr')\n\n def __init__(self, snr=1.0, forward_model_path=None, n_comp=40):\n super().__init__()\n self.snr = snr\n self.mne_forward_model_file_path = forward_model_path\n self.n_comp = n_comp\n self.mne_info = None\n # pass\n\n def _initialize(self):\n print('INITIALIZING MCE NODE ...')\n mne_info = self.traverse_back_and_find('mne_info')\n # mne_info['custom_ref_applied'] = True\n # -------- truncated svd for fwd_opr operator -------- #\n fwd, missing_ch_names = get_clean_forward(\n self.mne_forward_model_file_path, mne_info)\n mne_info['bads'] = list(set(mne_info['bads'] + missing_ch_names))\n fwd_fix = mne.convert_forward_solution(\n fwd, surf_ori=True, force_fixed=False)\n\n self._gain_matrix = fwd_fix['sol']['data']\n\n print('MCE: COMPUTING SVD OF THE FORWARD OPERATOR')\n U, S, V = svd(self._gain_matrix)\n\n Sn = np.zeros([self.n_comp, V.shape[0]])\n Sn[:self.n_comp, :self.n_comp] = np.diag(S[:self.n_comp])\n\n self.Un = U[:, :self.n_comp]\n self.A_non_ori = Sn @ V\n # ---------------------------------------------------- #\n\n # -------- leadfield dims -------- #\n N_SEN = self._gain_matrix.shape[0]\n # -------------------------------- #\n\n # ------------------------ noise-covariance ------------------------ #\n cov_data = np.identity(N_SEN)\n ch_names = np.array(mne_info['ch_names'])[mne.pick_types(mne_info,\n eeg=True,\n meg=False)]\n ch_names = list(ch_names)\n noise_cov = mne.Covariance(\n cov_data, ch_names, mne_info['bads'],\n mne_info['projs'], nfree=1)\n # ------------------------------------------------------------------ #\n\n self.mne_inv = mne_make_inverse_operator(\n mne_info, fwd_fix, noise_cov, depth=0.8,\n loose=1, fixed=False, verbose='ERROR')\n self.mne_info = mne_info\n self.Sn = Sn\n self.V = V\n\n def _update(self):\n input_array = self.input_node.output\n last_slice = last_sample(input_array)\n n_src = self.mne_inv['nsource']\n n_times = input_array.shape[1]\n output_mce = np.empty([n_src, n_times])\n\n raw_slice = mne.io.RawArray(np.expand_dims(last_slice, axis=1),\n self.mne_info, verbose='ERROR')\n raw_slice.pick_types(eeg=True, meg=False, stim=False, exclude='bads')\n raw_slice.set_eeg_reference(ref_channels='average', projection=True)\n\n # ------------------- get dipole orientations --------------------- #\n stc_slice = apply_inverse_raw(raw_slice, self.mne_inv,\n pick_ori='vector',\n method='MNE', lambda2=1, verbose='ERROR')\n Q = normalize(stc_slice.data[:, :, 0]) # dipole orientations\n # ----------------------------------------------------------------- #\n\n # -------- setup linprog params -------- #\n n_sen = self.A_non_ori.shape[0]\n A_eq = np.empty([n_sen, n_src])\n for i in range(n_src):\n A_eq[:, i] = self.A_non_ori[:, i * 3: (i + 1) * 3] @ Q[i, :].T\n data_slice = raw_slice.get_data()[:, 0]\n b_eq = self.Un.T @ data_slice\n c = np.ones(A_eq.shape[1])\n # -------------------------------------- #\n\n with nostdout():\n sol = linprog(c, A_eq=A_eq, b_eq=b_eq,\n method='interior-point', bounds=(0, None),\n options={'disp': False})\n output_mce[:, :] = sol.x[:, np.newaxis]\n\n self.output = output_mce\n self.sol = sol\n return Q, A_eq, data_slice, b_eq, c\n\n def _on_input_history_invalidation(self):\n # The methods implemented in this node do not rely on past inputs\n pass\n\n def _reset(self):\n self._should_reinitialize = True\n self.initialize()\n output_history_is_no_longer_valid = True\n return output_history_is_no_longer_valid\n\n def _check_value(self, key, value):\n if key == 'snr':\n if value <= 0:\n raise ValueError(\n 'snr (signal-to-noise ratio) must be a positive number.')\n\n\nclass ICARejection(ProcessorNode):\n\n def __init__(self, collect_for_x_seconds: int=60):\n super().__init__()\n self.collect_for_x_seconds = collect_for_x_seconds # type: int\n\n self._samples_collected = None # type: int\n self._samples_to_be_collected = None # type: int\n self._enough_collected = None # type: bool\n\n self._reset_statistics()\n self._ica_rejector = None\n\n def _on_input_history_invalidation(self):\n self._reset_statistics()\n\n def _check_value(self, key, value):\n pass\n\n CHANGES_IN_THESE_REQUIRE_RESET = ('collect_for_x_seconds', )\n\n def _initialize(self):\n self._mne_info = self.traverse_back_and_find('mne_info')\n self._frequency = self._mne_info['sfreq']\n self._good_ch_inds = mne.pick_types(self._mne_info, eeg=True,\n meg=False, stim=False,\n exclude='bads')\n\n channels = self._mne_info['chs']\n self._ch_locs = np.array([ch['loc'] for ch in channels])\n\n n_ch = len(self._good_ch_inds)\n self._samples_to_be_collected = int(math.ceil(\n self.collect_for_x_seconds * self._frequency))\n self._collected_timeseries = np.zeros(\n [n_ch, self._samples_to_be_collected])\n self._linear_filter = filters.ButterFilter(\n [1, 200], fs=self._frequency,\n n_channels=len(self._good_ch_inds))\n self._linear_filter.apply = pynfb_ndarray_function_wrapper(\n self._linear_filter.apply)\n\n def _reset(self) -> bool:\n self._reset_statistics()\n self._input_history_is_no_longer_valid = True\n return self._input_history_is_no_longer_valid\n\n def _reset_statistics(self):\n self._samples_collected = 0\n self._enough_collected = False\n\n def _update(self):\n # Have we collected enough samples without the new input?\n self.output = self.input_node.output\n\n enough_collected = self._samples_collected >=\\\n self._samples_to_be_collected\n if not enough_collected:\n if self.input_node.output is not None and\\\n self.input_node.output.shape[TIME_AXIS] > 0:\n self._update_statistics()\n\n elif not self._enough_collected: # We just got enough samples\n self._enough_collected = True\n print('COLLECTED ENOUGH SAMPLES')\n ica = ICADialog(\n self._collected_timeseries.T,\n list(np.array(self._mne_info['ch_names'])[self._good_ch_inds]),\n self._ch_locs[self._good_ch_inds, :], self._frequency)\n\n ica.exec_()\n self._ica_rejector = ica.rejection.val.T\n else:\n self.output[self._good_ch_inds, :] = np.dot(\n self._ica_rejector,\n self.input_node.output[self._good_ch_inds, :])\n\n def _update_statistics(self):\n input_array = self.input_node.output.astype(np.dtype('float64'))\n n = self._samples_collected\n m = input_array.shape[TIME_AXIS] # number of new samples\n self._samples_collected += m\n self._collected_timeseries[:, n:n + m] = self._linear_filter.apply(\n input_array[self._good_ch_inds, :])\n # Using float64 is necessary because otherwise rounding error\n # in recursive formula accumulate\n pass\n\n UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION = ('mne_info', )\n SAVERS_FOR_UPSTREAM_MUTABLE_OBJECTS = {'mne_info': channel_labels_saver}\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_86","text":"import numpy as np\nfrom scipy import optimize\nimport matplotlib.pyplot as plt\n\nclass Neural_Network(object):\n def __init__(self):\n # Define Hyperparameters\n # self.inputLayerSize = 2\n # self.outputLayerSize = 1\n # self.hiddenLayerSize = 3\n self.inputLayerSize = 784\n self.outputLayerSize = 1\n self.hiddenLayerSize = 100000\n\n # Weights (parameters)\n self.W1 = np.random.randn(self.inputLayerSize, self.hiddenLayerSize)\n self.W2 = np.random.randn(self.hiddenLayerSize, self.outputLayerSize)\n\n def forward(self, X):\n # Propogate inputs though network\n self.z2 = np.dot(X, self.W1)\n self.a2 = self.sigmoid(self.z2)\n self.z3 = np.dot(self.a2, self.W2)\n yHat = self.sigmoid(self.z3)\n return yHat\n\n def sigmoid(self, z):\n # Apply sigmoid activation function to scalar, vector, or matrix\n return 1 \/ (1 + np.exp(-z))\n\n def sigmoidPrime(self, z):\n # Gradient of sigmoid\n return np.exp(-z) \/ ((1 + np.exp(-z)) ** 2)\n\n def costFunction(self, X, y):\n # Compute cost for given X,y, use weights already stored in class.\n self.yHat = self.forward(X)\n J = 0.5 * sum((y - self.yHat) ** 2)\n return J\n\n def costFunctionPrime(self, X, y):\n # Compute derivative with respect to W and W2 for a given X and y:\n self.yHat = self.forward(X)\n\n delta3 = np.multiply(-(y - self.yHat), self.sigmoidPrime(self.z3))\n dJdW2 = np.dot(self.a2.T, delta3)\n\n delta2 = np.dot(delta3, self.W2.T) * self.sigmoidPrime(self.z2)\n dJdW1 = np.dot(X.T, delta2)\n\n return dJdW1, dJdW2\n\n # Helper Functions for interacting with other classes:\n def getParams(self):\n # Get W1 and W2 unrolled into vector:\n params = np.concatenate((self.W1.ravel(), self.W2.ravel()))\n return params\n\n def setParams(self, params):\n # Set W1 and W2 using single paramater vector.\n W1_start = 0\n W1_end = self.hiddenLayerSize * self.inputLayerSize\n self.W1 = np.reshape(params[W1_start:W1_end], (self.inputLayerSize, self.hiddenLayerSize))\n W2_end = W1_end + self.hiddenLayerSize * self.outputLayerSize\n self.W2 = np.reshape(params[W1_end:W2_end], (self.hiddenLayerSize, self.outputLayerSize))\n\n def computeGradients(self, X, y):\n dJdW1, dJdW2 = self.costFunctionPrime(X, y)\n return np.concatenate((dJdW1.ravel(), dJdW2.ravel()))\n\nclass trainer(object):\n def __init__(self, N):\n # Make Local reference to network:\n self.N = N\n\n def callbackF(self, params):\n self.N.setParams(params)\n self.J.append(self.N.costFunction(self.X, self.y))\n\n def costFunctionWrapper(self, params, X, y):\n self.N.setParams(params)\n cost = self.N.costFunction(X, y)\n grad = self.N.computeGradients(X, y)\n\n return cost, grad\n\n def train(self, X, y):\n # Make an internal variable for the callback function:\n self.X = X\n self.y = y\n\n # Make empty list to store costs:\n self.J = []\n\n params0 = self.N.getParams()\n\n options = {'maxiter': 200, 'disp': False}\n _res = optimize.minimize(self.costFunctionWrapper, params0, jac=True, method='L-BFGS-B', \\\n args=(X, y), options=options, callback=self.callbackF)\n\n self.N.setParams(_res.x)\n self.optimizationResults = _res\n\nimport mnist\nif __name__ == '__main__':\n x_train, t_train, x_test, t_test = mnist.load()\n print(x_train.shape)\n print(t_train.shape)\n print(x_test.shape)\n print(t_test.shape)\n # # X = (hours sleeping, hours studying), y = Score on test\n # X = np.array(([3, 5], [5, 1], [10, 2]), dtype=float)\n # y = np.array(([75], [82], [93]), dtype=float)\n # # Normalize\n # X = X \/ np.amax(X, axis=0)\n # y = y \/ 100 # Max test score is 100\n\n train_number=100\n X =x_train[0:train_number,:]\n y=t_train[0:train_number]\n y=y.reshape((len(y),1))\n\n NN = Neural_Network()\n\n print('y is ',y[0:10])\n yHat=NN.forward(X)\n print('yHat before train ',yHat[0:10])\n\n T = trainer(NN)\n T.train(X, y)\n yHat = np.round(NN.forward(X))\n print('yHat after is ',yHat[0:10])\n\n # # plt.plot(T.J)\n # # plt.grid(1)\n # # plt.xlabel('Iterations')\n # # plt.ylabel('Cost')\n # # plt.show()"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_87","text":"\"\"\"Miscellaneous stuff that doesn't really fit anywhere else.\"\"\"\n\nfrom textwrap import fill, dedent\n\n# if you use\n# filldedent('''\n# the text''')\n# a space will be put before the first line because dedent will\n# put a \\n as the first line and fill replaces \\n with spaces\n# so we strip off any leading and trailing \\n since printed wrapped\n# text should not have leading or trailing spaces.\nfilldedent = lambda s: '\\n' + fill(dedent(s).strip('\\n'))\n\ndef default_sort_key(item, order=None):\n \"\"\"\n A default sort key for lists of SymPy objects to pass to functions like sorted().\n\n This uses the default ordering. If you want a nonstandard ordering, you will\n have to create your own sort key using the sort_key() method of the object.\n\n Examples\n ========\n\n >>> from sympy import Basic, S, I, default_sort_key\n >>> from sympy.abc import x\n\n >>> sorted([S(1)\/2, I, -I], key=default_sort_key)\n [1\/2, -I, I]\n >>> a = [S(1)\/2, I, -I]\n >>> a.sort(key=default_sort_key)\n >>> a\n [1\/2, -I, I]\n\n >>> b = S(\"[x, 1\/x, 1\/x**2, x**2, x**(1\/2), x**(1\/4), x**(3\/2)]\")\n >>> b.sort(key=default_sort_key)\n\n The built-in functions min() and max() also take a key function (in Python\n 2.5 or higher), that this can be used for.\n \"\"\"\n\n #XXX: The following should also be in the docstring, but orders do not\n # actually work at the moment.\n\n # To use a nonstandard order, you must create your own sort key. The default\n # order is lex.\n\n # >>> from sympy import sympify\n # >>> mykey = lambda item: sympify(item).sort_key(order='rev-lex')\n # >>> sorted([x, x**2, 1], key=default_sort_key)\n # [x**2, x, 1]\n # >>> sorted([x, x**2, 1], key=mykey)\n # [1, x, x**2]\n\n from sympy.core import S, Basic\n from sympy.core.sympify import sympify, SympifyError\n from sympy.core.compatibility import iterable\n\n if isinstance(item, Basic):\n return item.sort_key(order=order)\n\n if iterable(item, exclude=basestring):\n if isinstance(item, dict):\n args = item.items()\n else:\n args = list(item)\n\n args = [default_sort_key(arg, order=order) for arg in args]\n\n if isinstance(item, dict):\n args = sorted(args)\n\n cls_index, args = 10, (len(args), tuple(args))\n else:\n if not isinstance(item, basestring):\n try:\n item = sympify(item)\n except SympifyError:\n pass\n\n if isinstance(item, Basic):\n return item.sort_key(order=order)\n\n cls_index, args = 0, (1, (str(item),))\n\n return (cls_index, 0, item.__class__.__name__), args, S.One.sort_key(), S.One\n\nimport sys\nsize = getattr(sys, \"maxint\", None)\nif size is None: #Python 3 doesn't have maxint\n size = sys.maxsize\nif size > 2**32:\n ARCH = \"64-bit\"\nelse:\n ARCH = \"32-bit\"\n\ndef debug(*args):\n \"\"\"\n Print ``*args`` if SYMPY_DEBUG is True, else do nothing.\n \"\"\"\n from sympy import SYMPY_DEBUG\n if SYMPY_DEBUG:\n for a in args:\n print a,\n print\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_88","text":"import numpy as np\nimport pylab as pl\nimport matplotlib.pyplot as plt\n\nfrom matplotlib.ticker import AutoMinorLocator\nfrom matplotlib.ticker import MultipleLocator\n\nfrom scipy import interpolate\n\nimport importlib\nimport sys\nimport os\n\nimport paths; importlib.reload(paths)\nimport pltaux; importlib.reload(pltaux)\nimport sysaux; importlib.reload(sysaux)\n\nmu = ['1.00', '0.90', '0.80', '0.70', '0.60', '0.50', '0.40', '0.30', '0.20', '0.10', '0.05']\n\nidxs_mu = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]\n\nip = np.zeros(len(mu))\n\nfor i in range(len(mu)): ip[i] = np.sqrt(1.0 - float(mu[i])**2.0)\n\nclv_n = np.loadtxt(paths.it0f + 'murmean_r_atl_abd\/CLV_RAD')\nclv_a = np.loadtxt(paths.out + 'atl_clv_86.dat')\n\nwvl_n = clv_n[:, 0] * 1e4 \/ 1e8\nwvl_a = clv_a[:, 0] * 1e4 \/ 1e7; wvl = wvl_a\n\nidx_n = np.where((wvl > min(wvl_n)) & (wvl < max(wvl_n)))\n\nwvl = wvl[idx_n]\n\n#sysaux.clean_dir(paths.figdir)\n\npltaux.figpar(fontsize = 20)\n\nfig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (12, 10))\n\nfig.tight_layout()\n\np = str(ip[0])[0 : 6]\n\nits_a = clv_a[idx_n[0], 1]\n\nits_n = interpolate.interp1d(wvl_n, clv_n[:, 1])(wvl)\n\nax.plot(wvl, its_n \/ its_a, color = 'k', label = '$\\mu =$ ' + mu[0] + ', $p =$ ' + p + '000')\n\nfor idx_mu in idxs_mu:\n\n if idx_mu != 1:\n\n p = str(ip[idx_mu - 1])[0 : 6]\n\n if len(p) == 3: p = p + '000'\n\n its_a = clv_a[idx_n[0], idx_mu]\n\n its_n = interpolate.interp1d(wvl_n, clv_n[:, idx_mu])(wvl)\n\n ax.plot(wvl, its_n \/ its_a, label = '$\\mu =$ ' + mu[idx_mu - 1] + ', $p =$ ' + p)\n\nax.xaxis.set_major_locator(MultipleLocator(10))\nax.yaxis.set_major_locator(MultipleLocator(0.002))\n\nax.set_xlabel('Wavelength, [$\\mu$m]')\nax.set_ylabel('NESSY \/ ATLAS')\n\nax.grid(True)\n\nax.set_xlim(7, 160)\n#ax.set_ylim(1.002, 1.022)\nax.set_ylim(0.998, 1.020)\n\nleg = ax.legend(frameon = True, framealpha = 1, loc = 2, prop={'size': 15})\n\nfor handle in leg.legendHandles: handle.set_linewidth(3.0)\n\npltaux.savepdf('nesatl_rad')\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_89","text":"ariolwork\/uni_conditional_gradient\nfrom scipy.optimize import linprog\nimport math\n# от рукинаписанные нужные части numpy\nfrom my_numpy import np, my_list\neps = 0.00001\n\n\n# class that contain function, it's derivative and spetial combination for optimization\nclass Func:\n def __init__(self, func, func_der):\n self.f = func # function J(u)\n self.fder = func_der # function derivate J'(u)\n \n def J_k(self, u_k, u):\n return sum(self.fder(u_k) * (u - u_k)) #function J_k(u)\n \n# class that contain some type frames for optimisation and minimization method for this type of frames and linear function\nclass Frames:\n #-----------------------------------------------\n #type 0: frames like a_i <= u_i <= b_i\n def __set_frames0(self, a, b):\n self.a = a\n self.b = b\n #type 1: frame for symplex method\n def __set_frames1(self, A, b):\n self.A = A\n self.b = b\n \n __setframesfuncs = {\n 0: __set_frames0,\n 1: __set_frames1\n }\n #-----------------------------------------------\n def __set_size0(self):\n if len(self.a) == len(self.b):\n return len(self.a)\n else:\n return -1\n def __set_size1(self):\n if len(self.A) == len(self.b):\n return len(self.b)\n else:\n return -1\n \n __setSize = {\n 0: __set_size0,\n 1: __set_size1\n }\n #------------------------------------------------\n def __init__(self, type_of_conditions, minimize_func):\n self.type = type_of_conditions\n self.minimize = minimize_func\n self.setframes = self.__setframesfuncs.get(type_of_conditions)\n self.size = self.__setSize.get(type_of_conditions)\n \n def get_size(self):\n return self.size(self) \n\n# class of task contains function, frames and some help functions and parameters\nclass Job:\n def __init__(self, func, frames, u_0, alpha):\n self.f = func\n self.frames = frames\n self.u_0 = u_0 # start point\n self.u_k = u_0 # the point got last time\n self.__alpha = alpha # rule(function) for alpha calculation\n self.k = 0 # step number\n \n # сheck task by\n # compare dimension of function, derivative, frames and x \n def check_errors(self):\n a = type(self.f.f(self.u_0))\n b = len(self.f.fder(self.u_0))\n c = self.frames.get_size()\n print(\"func:\",a,\"\\nframes:\",c,\"\\nder:\",b,\"\\nu_0:\", len(self.u_0), \"\\n\")\n \n # calculate new point using previus\n def get_next_u(self, u1_k):\n self.k+=1\n return self.u_k + (u1_k - self.u_k)*self.__alpha(self, u1_k)\n \n # find abutting poin\n def find_u1_k(self):\n return self.frames.minimize(self.f, self.frames, self.u_k)\n\n\n# one variable function minimisation methods\nclass One_variable_function_minimisation_methods: \n #---------------------------------------------------\n @staticmethod\n def golden_ratio_method(func, a, b, eps=0.000001):\n __MAGIC_CONSTANT_1 = (3 - math.sqrt(5))\/2\n __MAGIC_CONSTANT_2 = (math.sqrt(5) - 1)\/2\n while True:\n if b-a < eps:\n return (a+b)\/2\n u1 = a + __MAGIC_CONSTANT_1*(b-a)\n u2 = a + __MAGIC_CONSTANT_2*(b-a)\n if func(u1)<=func(u2):\n b = u2\n else:\n a = u1\n return -1\n #---------------------------------------------------\n @staticmethod\n def tangent_method(func, a, b, eps=0.000001):\n while True:\n if func.fder(a)>=0:\n return a\n if func.fder(b)<=0:\n return b\n if abs(a-b)0:\n ans.append(frames.a[i])\n elif der[i]<0:\n ans.append(frames.b[i])\n else:\n ans.append((frames.a[i]+frames.b[i])\/2)\n return np.array(ans)\n\n# minimisation function(symplex method) for spetioal(linear) type of frames\ndef symplex_meyhod_minimize_function(func, frames, u_k):\n return np.array(linprog(func.fder(u_k), frames.A, frames.b).x)\n \n\n# method for different stop rules\ndef calculate_m(job, eps, steps):\n def method_full(J, eps, steps):\n f_sequ = []\n u_k_sequ = []\n k = 0\n f_sequ.append(J.f.f(J.u_k))\n u_k_sequ.append(J.u_k)\n u_k = 0\n while True:\n u1_k = J.find_u1_k()\n u_k = J.u_k\n J.u_k = J.get_next_u(u1_k)\n f_sequ.append(J.f.f(J.u_k))\n u_k_sequ.append(J.u_k)\n if k>steps or np.all(J.u_k == u_k) or abs(J.f.f(J.u_k) - J.f.f(u_k)) <= eps:\n break\n k+=1\n return J.f.f(J.u_k), J.u_k, f_sequ, u_k_sequ, k, abs(J.f.f(J.u_k) - J.f.f(u_k))\n def method_eps(J, eps):\n f_sequ = []\n u_k_sequ = []\n k = 0\n f_sequ.append(J.f.f(J.u_k))\n u_k_sequ.append(J.u_k)\n u_k = 0\n while True:\n u1_k = J.find_u1_k()\n u_k = J.u_k\n J.u_k = J.get_next_u(u1_k)\n f_sequ.append(J.f.f(J.u_k))\n u_k_sequ.append(J.u_k)\n if k>100000000 or np.all(J.u_k == u_k) or abs(J.f.f(J.u_k) - J.f.f(u_k)) <= eps:\n break\n k+=1\n return J.f.f(J.u_k), J.u_k, f_sequ, u_k_sequ, k, abs(J.f.f(J.u_k) - J.f.f(u_k))\n def method_steps(J, steps):\n f_sequ = []\n u_k_sequ = []\n k = 0\n f_sequ.append(J.f.f(J.u_k))\n u_k_sequ.append(J.u_k)\n u_k = 0\n# print(\"u_k:{}, f:{}\".format(J.u_k, J.f.f(J.u_k)))\n while True:\n u1_k = J.find_u1_k()\n u_k = J.u_k\n J.u_k = J.get_next_u(u1_k)\n # print(\"u_k:{}, f:{}, u1_k:{}\".format(J.u_k, J.f.f(J.u_k), u1_k))\n f_sequ.append(J.f.f(J.u_k))\n u_k_sequ.append(J.u_k)\n if k>steps or np.all(J.u_k == u_k):\n break\n k+=1\n return J.f.f(J.u_k), J.u_k, f_sequ, u_k_sequ, k+1, abs(J.f.f(J.u_k) - J.f.f(u_k))\n\n if steps == -1:\n return method_eps(job, eps)\n elif eps == -1:\n return method_steps(job, steps)\n return method_full(job, eps, steps)\n\n#calculate_m(job1, -1, 100)"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_90","text":"dangeng\/infiniteGANorama\nimport os.path\nfrom data.base_dataset import BaseDataset, get_transform\nfrom data.image_folder import make_dataset\nimport numpy as np\nfrom PIL import Image\nfrom scipy.misc import imresize\n\n\nclass FrankensteinDataset(BaseDataset):\n @staticmethod\n def modify_commandline_options(parser, is_train):\n return parser\n\n def initialize(self, opt):\n self.opt = opt\n self.root = opt.dataroot\n self.dir_A = os.path.join(opt.dataroot)\n\n self.A_paths = make_dataset(self.dir_A)\n\n self.A_paths = sorted(self.A_paths)\n\n self.transform = get_transform(opt)\n\n def random_crop(self, im, size=500, resize=256):\n h,w,_ = im.shape\n h_start, w_start = np.random.randint(h-size), np.random.randint(w-size)\n crop = im[h_start:h_start+size, w_start:w_start+size, :].copy()\n if resize:\n return imresize(crop, (resize, resize))\n else:\n return crop\n\n def __getitem__(self, index):\n idx_l = np.random.randint(len(self.A_paths))\n idx_r = np.random.randint(len(self.A_paths))\n A_path_l = self.A_paths[idx_l]\n A_path_r = self.A_paths[idx_r]\n A_img_l = Image.open(A_path_l).convert('RGB')\n A_img_r = Image.open(A_path_r).convert('RGB')\n\n A_l = np.array(A_img_l)\n A_r = np.array(A_img_r)\n\n #A_l, A_r = self.random_crop(A_l), self.random_crop(A_r) # CHANGE\n\n h,w,c = A_l.shape\n\n A_img = np.zeros_like(A_l)\n A_img[:,:w\/\/2,:] = A_l[:,:w\/\/2,:]\n A_img[:,w\/\/2:,:] = A_r[:,w\/\/2:,:]\n A_img = Image.fromarray(A_img)\n\n A = self.transform(A_img)\n if self.opt.direction == 'BtoA':\n input_nc = self.opt.output_nc\n else:\n input_nc = self.opt.input_nc\n\n if input_nc == 1: # RGB to gray\n tmp = A[0, ...] * 0.299 + A[1, ...] * 0.587 + A[2, ...] * 0.114\n A = tmp.unsqueeze(0)\n\n return {'A': A, 'A_paths': '[{}]+[{}]]'.format(A_path_l, A_path_r)}\n\n def __len__(self):\n return len(self.A_paths)\n\n def name(self):\n return 'FrankensteinImageDataset'\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_91","text":"import vertica_python\nimport numpy as np\nimport scipy.stats as ss\nimport math\nfrom collections import Counter\n\n\ndef XHash(token, hash_size=128):\n number_of_ones = 5\n char = [' ', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',\n 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n segment_size_dict = {64: 1, 128: 3, 256: 6, 512: 13}\n segment_size = segment_size_dict[hash_size]\n length_bit_start = 37 * segment_size\n result = 0\n cnt_dict = Counter(token)\n selected_chars = [y[0] for y in sorted(cnt_dict.items(), key=lambda x: (x[1], x[0]), reverse=False)[:number_of_ones]]\n for c in selected_chars:\n if c not in char:\n continue\n indices = [i for i, ltr in enumerate(token) if ltr == c]\n mean_index = np.mean(indices)\n token_size = len(token)\n for i in np.arange(segment_size):\n if mean_index <= ((i + 1) * token_size \/ segment_size):\n location = char.index(c) * segment_size + i\n break\n result = result | int(math.pow(2, location))\n\n # rotation\n n = int(result)\n d = int((length_bit_start * (len(token) % (hash_size - length_bit_start))) \/ (\n hash_size - length_bit_start))\n INT_BITS = int(length_bit_start)\n x = n << d\n y = n >> (INT_BITS - d)\n r = int(math.pow(2, INT_BITS))\n result = int((x | y) % r)\n\n result = int(result) | int(math.pow(2, len(token) % (hash_size - length_bit_start)) * math.pow(2, length_bit_start))\n\n return result\n\ndef generate_index(hash_size = 128):\n conn_info = {'host': 'SERVER_IP_ADDRESS',\n 'port': 5433,\n 'user': 'USERNAME',\n 'password': 'PASSWORD',\n 'database': 'DATABASE_NAME',\n 'session_label': 'some_label',\n 'read_timeout': 6000,\n 'unicode_error': 'strict',\n }\n\n connection = vertica_python.connect(**conn_info)\n cur = connection.cursor()\n cur.execute('SELECT tableid, MAX(rowid) FROM main_tokenized GROUP BY tableid LIMIT 10;')\n\n for row in cur.fetchall():\n tableid = int(row[0])\n rowid_max = int(row[1])\n for rowid in np.arange(rowid_max):\n cur.execute('SELECT tokenized FROM main_tokenized WHERE tableid = {} AND rowid = {};'.format(tableid, rowid))\n\n row_tokens = cur.fetchall()\n row_tokens = [item for sublist in row_tokens for item in sublist]\n\n superkey = 0\n for token in row_tokens:\n superkey = superkey | XHash(str(token), hash_size)\n\n cur.execute('UPDATE main_tokenized SET superkey = {} WHERE tableid = {} AND rowid = {}; COMMIT;'.format(superkey, tableid, rowid))\n\n\ngenerate_index()\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_92","text":"vmf_embeddings\/evaluate.py1-10\n# coding=utf-8\n# Copyright 2021 The vMF Embeddings Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Functions for performing fixed-set and open-set validation of a model.\"\"\"\n\nimport faiss\nimport numpy as np\nfrom scipy.stats import mode\nfrom sklearn.neighbors import KNeighborsClassifier\nimport torch\nimport torch.nn.functional as F\n\nfrom vmf_embeddings import utils\nfrom vmf_embeddings.third_party.hyperbolic_image_embeddings import hypnn\nfrom vmf_embeddings.third_party.s_vae_pytorch import distributions\n\n\ndef fixed_set_val_loop(arch, ds, method, cfg):\n \"\"\"Performs fixed-set validation by computing accuracy.\n\n Args:\n arch: An archs.arch.Arch object (e.g., ResNet50 or N-digit MNIST arch).\n ds: A datasets object (e.g., MNIST, CIFAR10).\n method: A methods.methods softmax cross-entropy object (e.g., ArcFace).\n cfg: The configuration dictionary from Hydra.\n\n Returns:\n Validation accuracy.\n \"\"\"\n method_name = method.__class__.__name__\n arch.train(False)\n ds.switch_split(\"valid\")\n loader = utils.get_data_loader(\"sequential\", ds, cfg.num_workers,\n {\"batch_size\": 128})\n n_correct = 0\n with torch.no_grad():\n for batch in loader:\n examples = batch[\"examples\"]\n ids = batch[\"ids\"]\n if torch.cuda.is_available():\n examples = examples.cuda()\n ids = ids.cuda()\n # vMF has samples, so computing validation accuracy differs\n if method_name == \"VMFSoftmax\":\n preds, _ = method(\n examples,\n ids,\n get_predictions=True,\n n_samples=method.n_samples,\n )\n preds = torch.mean(F.softmax(preds, dim=2), dim=1)\n preds = torch.argmax(preds, dim=1)\n n_correct += torch.sum((preds == ids).int()).item()\n # Compute n_correct for deterministic methods\n else:\n preds, _ = method(examples, ids, get_predictions=True)\n n_correct += torch.sum((torch.argmax(preds, dim=1) == ids).int()).item()\n return n_correct \/ float(len(ds))\n\n\ndef open_set_val_loop(arch, ds, method, cfg):\n \"\"\"Performs open-set validation by computing recall@1.\n\n Args:\n arch: An archs.arch.Arch object (e.g., ResNet50 or N-digit MNIST arch).\n ds: A datasets object (e.g., MNIST, CIFAR10).\n method: A methods.methods softmax cross-entropy object (e.g., ArcFace).\n cfg: The configuration dictionary from Hydra.\n\n Returns:\n Validation recall@1.\n \"\"\"\n method_name = method.__class__.__name__\n arch.train(False)\n ds.switch_split(\"valid\")\n loader = utils.get_data_loader(\"sequential\", ds, cfg.num_workers,\n {\"batch_size\": 128})\n\n # Extract embeddings and ids\n embs = []\n ids = []\n with torch.no_grad():\n for batch in loader:\n ids.append(batch[\"ids\"].detach().cpu().numpy())\n examples = batch[\"examples\"]\n if torch.cuda.is_available():\n examples = examples.cuda()\n embs.append(method.get_embeddings(examples).detach().cpu().numpy())\n embs = np.concatenate(embs, axis=0)\n ids = np.concatenate(ids, axis=0)\n\n # For l2-normalized methods\n if method_name in [\"VMFSoftmax\", \"ArcFace\", \"NormalizedSoftmaxCE\"]:\n norm_method = utils.get_norm_method_by_name(\"l2\")\n embs, norms = norm_method(embs, use_torch=False, return_norms=True)\n # For hyperbolic softmax\n elif method_name == \"HyperbolicSoftmaxCE\":\n norm_method = utils.get_norm_method_by_name(\"hyperbolic\")\n embs = norm_method(embs, use_torch=False, return_norms=False, c=method.c)\n\n # For vMF, need to marginalize over samples\n if method_name == \"VMFSoftmax\":\n with torch.no_grad():\n z = torch.from_numpy(embs)\n z_norms = torch.from_numpy(norms)\n if torch.cuda.is_available():\n z = z.cuda()\n z_norms = z_norms.cuda()\n z_dist = distributions.VonMisesFisher(z, z_norms)\n z_samples = (\n z_dist.sample(torch.Size([method.n_samples\n ])).permute(1, 0, 2).detach().cpu().numpy())\n\n norms = norms.squeeze(1)\n corrects = []\n for i in range(method.n_samples):\n z = z_samples[:, i, :]\n index = faiss.IndexFlatIP(z.shape[1])\n # pylint: disable=no-value-for-parameter\n index.add(z)\n # pylint: disable=no-value-for-parameter\n _, idxs = index.search(z, 2)\n preds = ids[idxs[:, 1]]\n correct = ids == preds\n corrects.append(correct)\n corrects = np.array(corrects)\n valid_acc = np.mean(mode(corrects, axis=0)[0])\n\n # For hyperbolic, need to compute Poincare distance matrix\n elif method_name == \"HyperbolicSoftmaxCE\":\n # Since hyperbolic distance is non-trivial to compute, we use numpy instead\n # of faiss\n dist_matrix = hypnn.pairwise_dist_matrix(embs, method.c, batch_size=256)\n # NOTE: Need to use kNN with precomputed distances since we're using\n # hyperbolic distance\n knn = KNeighborsClassifier(n_neighbors=1, n_jobs=1, metric=\"precomputed\")\n knn.fit(dist_matrix, ids)\n idxs = knn.kneighbors(return_distance=False)\n preds = np.squeeze(ids[idxs], axis=1)\n correct = ids == preds\n valid_acc = np.mean(correct)\n\n # For all other methods, just compute pairwise distances\n else:\n index = faiss.IndexFlatL2(embs.shape[1])\n # pylint: disable=no-value-for-parameter\n index.add(embs)\n # pylint: disable=no-value-for-parameter\n _, idxs = index.search(embs, 2)\n preds = ids[idxs[:, 1]]\n correct = ids == preds\n valid_acc = np.mean(correct)\n\n return valid_acc\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_93","text":"gkuznetsov\/veles.znicz\n# encoding: utf-8\n\"\"\"\n.. invisible:\n _ _ _____ _ _____ _____\n | | | | ___| | | ___\/ ___|\n | | | | |__ | | | |__ \\ `--.\n | | | | __|| | | __| `--. \\\n \\ \\_\/ \/ |___| |___| |___\/\\__\/ \/\n \\___\/\\____\/\\_____|____\/\\____\/\n\nCreated on July 30, 2014\n\n███████████████████████████████████████████████████████████████████████████████\n\nLicensed to the Apache Software Foundation (ASF) under one\nor more contributor license agreements. See the NOTICE file\ndistributed with this work for additional information\nregarding copyright ownership. The ASF licenses this file\nto you under the Apache License, Version 2.0 (the\n\"License\"); you may not use this file except in compliance\nwith the License. You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing,\nsoftware distributed under the License is distributed on an\n\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\nKIND, either express or implied. See the License for the\nspecific language governing permissions and limitations\nunder the License.\n\n███████████████████████████████████████████████████████████████████████████████\n\"\"\"\n\ntry:\n import cv2\nexcept ImportError:\n import warnings\n warnings.warn(\"Failed to import OpenCV bindings\")\nimport numpy\nimport scipy.stats\nimport statsmodels as sm\n\n\ndef is_background(pic_path, thr=8.0):\n \"\"\"\n Reads an image in grayscale, then fits its color intensity distribution\n as normal. Then compares fitted CDF with empirical CDF. If they are\n similar, thinks, that it is background\n\n Args:\n pic_path(str): path to image\n the(float): a threshold\n Returns:\n bool\n\n \"\"\"\n pic_ravel = cv2.imread(pic_path, 0).ravel()\n mu, std = scipy.stats.norm.fit(pic_ravel)\n x_array = numpy.linspace(0, 255, num=256)\n cdf = scipy.stats.norm.cdf(x_array, mu, std)\n ecdf = sm.tools.tools.ECDF(pic_ravel)(x_array)\n\n delta = numpy.sum(numpy.abs(ecdf - cdf))\n\n return delta < thr\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_94","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author: \r\n\"\"\"\r\n\r\n\r\nsource_directory = 'point to source directory here'\r\nprojects_directory = 'point to SEEG data directory here'\r\n\r\nimport numpy as np\r\nimport csv\r\nimport cPickle as pick\r\nimport sys\r\nimport copy\r\nsys.path.append(source_directory + 'Python27\\\\Utilities')\r\nimport CF_functions as cffun\r\nimport plot_functions as plots\r\nimport matplotlib.pyplot as mpl\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.colors as colors\r\nimport time\r\nimport bootstrap as bst\r\nfrom scipy import stats as stat\r\nimport statsmodels.sandbox.stats.multicomp as multicomp\r\n\r\n\r\n### CF type and PS metric\r\n\r\nCF_type = 'CFS' # 'PAC' or 'CFS'\r\nPS_metric = 'wPLI' # 'PLV' or 'wPLI'\r\n\r\nsign_z_CFC = 2.42\r\nif PS_metric == 'PLV':\r\n sign_z_PS = 2.42 \r\nelse:\r\n sign_z_PS = 2.33\r\n\r\n\r\n\r\n### subject, frequency and parcellation settings\r\nsubjects = ['S' + str(i+1).zfill(3) for i in range(59)]\r\n \r\ndirectory = projects_directory+'resting_state\\\\RS_CF_SEEG\\\\'\r\n\r\n\r\nCFM_filename = directory + '_support_files\\\\CF_matrix_SEEG.csv'\r\nfreqs_filename= directory + '_support_files\\\\all_frequencies_SEEG.csv'\r\nCFM = np.genfromtxt(CFM_filename, delimiter=';') \r\nLFs = CFM[:,0]\r\nfreqs = np.genfromtxt(freqs_filename, delimiter=';') \r\n\r\nparc = 'parc2009' \r\nN_freq = len(freqs)\r\nN_LF = len(LFs)\r\nN_subj = len(subjects)\r\nN_ratios = 6 \r\ncutoff_PS = 100 \r\ncutoff_CF = 350 \r\nxlims_PS = [1,100]\r\nxlims_CFC = [1,100] \r\n\r\nHFs = [] \r\nfor f in range(len(LFs)):\r\n x = CFM[f,1:N_ratios+1]\r\n xx = x[np.intersect1d(np.where(x0)) ] \r\n if len(xx) >0:\r\n HFs.append(xx) \r\nHFs_env = [CFM[:30,i+1] for i in range(6)] \r\n\r\nfreq_bands = [range(0,5),range(5,11),range(11,18),range(18,25),range(25,N_freq)]\r\nfreq_bands_LF = [range(1,6),range(3,10),range(9,16),range(16,22),range(22,N_LF)]\r\n \r\nratios = ['1:'+str(i+2) for i in range(N_ratios)] \r\nratios2 = ['1-'+str(i+2) for i in range(N_ratios)] \r\n\r\n\r\n### get epileptic-contact-masks and distance masks\r\n\r\ndist_thresholds = [0.02] \r\nall_dist = np.empty(0)\r\ndist = [None for i in subjects]\r\nmasks = [None for i in subjects]\r\nch_per_subject = [None for i in subjects]\r\nfor s,subject in enumerate(subjects):\r\n dist_filename = directory + '_support_files\\\\distances\\\\' + subject + '.csv'\r\n dist[s] = np.genfromtxt(dist_filename, delimiter=';') \r\n mask_filename = directory + '_support_files\\\\masks\\\\' + subject + '.csv'\r\n masks[s] = np.genfromtxt(mask_filename, delimiter=';') \r\n d2 = dist[s]*masks[s]\r\n all_dist = np.append(all_dist,d2.reshape(len(dist[s])**2)) \r\n ch_per_subject[s] = len(list(masks[s]))\r\nall_dist = all_dist[np.where(all_dist>0)] \r\ndist_thresholds.extend(np.percentile(all_dist,[33.3333,66.66667]))\r\ndist_max = max(all_dist)\r\ndist_thresholds.extend([dist_max])\r\ndist_strs = ['{:.1f}'.format(d*100) for d in dist_thresholds] \r\nN_dist_bins = len(dist_thresholds)-1 \r\ndistances = [dist_strs[i]+'-'+dist_strs[i+1]+'cm' for i in range(N_dist_bins)]\r\n\r\ndists_short = ['short','mid','long']\r\n\r\n\r\n\r\n\r\n### get GMPI info\r\n\r\nGMPI_vals = [[] for i in subjects]\r\nGMPI_list = [[] for i in subjects]\r\nGMPI_vals_all = []\r\n\r\nfor s,subject in enumerate(subjects):\r\n gmpi_filename = directory + '_support_files\\\\gmpi\\\\' + subject + '.csv'\r\n with open(gmpi_filename, 'rb') as csvfile:\r\n reader = csv.reader(csvfile, delimiter = ';')\r\n for row in reader:\r\n GMPI_list[s].append(row) \r\n GMPI_vals[s].append(float(row[1]))\r\n GMPI_vals_all.append(float(row[1]))\r\n \r\n \r\n### get layer interaction masks \r\n \r\nGMPI_vals_all=filter(lambda v: v==v, GMPI_vals_all) # remove nans \r\nN_layer_int = 4 \r\nN_layer = 3 \r\nlayer_int_masks = [[None for j in range(N_layer_int)] for i in subjects] # 0: deep-to-deep, 1: sup-to-sup, 2: deep-to-sup\r\nN_pairs_layer_int = [[None for j in range(N_layer_int)] for i in subjects]\r\nlayer_int = ['superf-surf','deep-deep','superf-deep','deep-superf']\r\nlayers = ['superf','interm','deep']\r\nchannel_layers = [None for s in subjects]\r\nN_ch_layer_s = np.zeros([N_subj,N_layer])\r\n\r\nfor s,subject in enumerate(subjects):\r\n channel_layers[s] = np.full(ch_per_subject[s],np.nan)\r\n for l in range(N_layer_int): \r\n layer_int_masks[s][l] = np.zeros([ch_per_subject[s],ch_per_subject[s]]) \r\n for ch1,g1 in enumerate(GMPI_vals[s]):\r\n if ( 0.5 < g1 < 1.2):\r\n channel_layers[s][ch1] = 0 # surface\r\n if ( 0 < g1 < 0.5):\r\n channel_layers[s][ch1] = 1 # intermed.\r\n if (-0.3 < g1 < 0 ): \r\n channel_layers[s][ch1] = 2 # deep\r\n for ch1,g1 in enumerate(GMPI_vals[s]): \r\n for ch2,g2 in enumerate(GMPI_vals[s]):\r\n if (0.5 < g1 < 1.2 and 0.5 < g2 < 1.2 ): # surface to surface\r\n layer_int_masks[s][0][ch1,ch2]=1\r\n if (-0.3 < g1 < 0 and -0.3 < g2 < 0 ): # deep to deep\r\n layer_int_masks[s][1][ch1,ch2]=1 \r\n if (0.5 < g1 < 1.2 and -0.3 < g2 < 0): # surf to deep\r\n layer_int_masks[s][3][ch1,ch2]=1 \r\n if (-0.3 < g1 < 0 and 0.5 < g2 < 1.2 ): # deep to surface\r\n layer_int_masks[s][2][ch1,ch2]=1\r\n \r\n for l in range(N_layer_int): \r\n mask1 = copy.copy(masks[s])\r\n layer_int_masks[s][l] = layer_int_masks[s][l]*mask1 \r\n N_pairs_layer_int[s][l] = int(np.sum(layer_int_masks[s][l])) - np.sum(np.diag(layer_int_masks[s][l])>0) \r\n for l in range(N_layer): \r\n N_ch_layer_s[s,l] = np.sum(channel_layers[s]==l)\r\n\r\nN_ch_layer = np.nansum(N_ch_layer_s,0)\r\n \r\n\r\n\r\n### colormaps for plotting\r\nmy_cmap = plots.make_cmap([(1.0, 0.0, 0.0), (0.0, 0.0, 1.0), (0.0, 1.0, 0.0), (0.8, 0.0, 1.0)])\r\nmy_cmap2 = plots.make_cmap([(0.0, 0.0, 0.0), (0.5, 0.5, 1.0), (0.6, 0.6, 1.0), (0.7, 0.7, 1.0), (0.8, 0.8, 1.0),(0.9, 0.9, 1.0), (1, 1, 1)])\r\nmy_cmap3 = plots.make_cmap([(1.0, 0.0, 0.0), (0.0, 0.6, 0.0), (1.0, 0.5, 0.0), (0.5, 0.0, 1.0), (0.6, 0.4, 0.4)]) \r\nmy_cmap4 = plots.make_cmap([(0.8, 0.6, 0.0), (1.0, 0.0, 0.0), (0.0, 0.8, 0.0), (0.1, 0.1, 0.1), (1.0, 0.4, 0.9), (0.0, 0.0, 1.0), (0.8, 0.0, 0.9)])\r\nmy_cmap5 = plots.make_cmap([(1,0,0), (0,1,0), (0,0,1)])\r\nmy_cmap6 = plots.make_cmap([(1,0,0), (0,0.7,0), (0,0,1), (1, 0.4, 0.4), (0.4,1,0.4), (0.4,0.4,1) ])\r\nmy_cmap7 = plots.make_cmap([(0.0, 0.0, 1.0), (1.0, 0.0, 0.0), (0.0, 0.6, 0.0), (1.0, 0.5, 0.0), (0.5, 0.0, 1.0), (0.6, 0.4, 0.4)]) \r\n\r\n\r\n\r\n### set matplotlib parameters \r\nmpl.rcParams['pdf.fonttype'] = 42 # for PDF compatibility with Illustrator\r\nmpl.rcParams.update({'font.size': 8})\r\nmpl.rcParams.update({'axes.titlesize': 8})\r\nmpl.rcParams.update({'axes.labelsize': 8})\r\nmpl.rcParams.update({'legend.fontsize': 6})\r\nmpl.rcParams.update({'xtick.labelsize': 7})\r\nmpl.rcParams.update({'ytick.labelsize': 7})\r\n\r\n\r\n\r\n\r\n### initialize lists\r\nPS = [[ None for i in range(N_freq)] for j in range(N_subj)] \r\nPS_dist = [[[ None for b in range(N_dist_bins)] for i in range(N_freq)] for j in range(N_subj)] \r\nPS_layer = [[[ None for b in range(N_layer_int)] for i in range(N_freq)] for j in range(N_subj)] \r\nCFC = [[[ None for i in range(N_ratios)] for j in range(N_LF)] for k in range(N_subj)]\r\nCFC_dist = [[[[ None for b in range(N_dist_bins)] for i in range(N_ratios)] for j in range(N_LF)] for k in range(N_subj)]\r\nCFC_layer = [[[[ None for b in range(N_layer_int)] for i in range(N_ratios)] for j in range(N_LF)] for k in range(N_subj)]\r\nPS_ENV = [[[ None for i in range(N_ratios)] for j in range(N_LF)] for k in range(N_subj)]\r\nPS_ENV_dist = [[[[ None for b in range(N_dist_bins)] for i in range(N_ratios)] for j in range(N_LF)] for k in range(N_subj)]\r\nPS_ENV_layer = [[[[ None for b in range(N_layer_int)] for i in range(N_ratios)] for j in range(N_LF)] for k in range(N_subj)]\r\n\r\n\r\n \r\n \r\n\r\n#### compute PS single-subject stats \r\nfor s,subject in enumerate(subjects): \r\n for f,F in enumerate(freqs): \r\n F_str = '{:.2f}'.format(F) \r\n mask = copy.copy(masks[s])\r\n N_pot = np.nansum(mask) \r\n if PS_metric == 'wPLI': \r\n file1 = directory + '_data\\\\_PS_wPLI\\\\' + subject + ' f=' + F_str + '.csv'\r\n file2 = directory + '_data\\\\_PS_wPLI\\\\' + subject + ' f=' + F_str + '_surr.csv'\r\n else: \r\n file1 = directory + '_data\\\\_PS_PLV\\\\' + subject + ' f=' + F_str + '.csv'\r\n file2 = directory + '_data\\\\_PS_PLV\\\\' + subject + ' f=' + F_str + '_surr.csv'\r\n data = mask*np.genfromtxt(file1, delimiter=';') \r\n data_surr = mask*np.genfromtxt(file2, delimiter=';') \r\n stats = cffun.K_stats_PS_2(data,data_surr,sign_z_PS,PS_metric)\r\n\r\n PS[s][f] = stats \r\n \r\n for d in range(N_dist_bins):\r\n dist_mask = mask * ( ( (dist[s]>dist_thresholds[d]) * (dist[s]<=dist_thresholds[d+1]) )>0)\r\n N_potD = np.nansum(dist_mask) \r\n dataD = data*dist_mask \r\n if N_potD>0:\r\n stats = cffun.K_stats_PS_2(dataD,data_surr,sign_z_PS,PS_metric) \r\n else:\r\n stats = cffun.Stats_PS(np.nan) \r\n PS_dist[s][f][d] = stats\r\n\r\n for l in range(N_layer_int):\r\n layer_mask = mask * layer_int_masks[s][l]\r\n N_potL = np.nansum(layer_mask) \r\n dataL = data*layer_mask\r\n if N_potL>0:\r\n stats = cffun.K_stats_PS_2(dataL,data_surr,sign_z_PS,PS_metric) \r\n else:\r\n stats = cffun.Stats_PS(np.nan) \r\n PS_layer[s][f][l] = stats\r\n \r\n print(time.strftime(\"%Y-%m-%d %H:%M\") + ' ' + subject)\r\n \r\n\r\n#### for PAC: compute PLV single-subject stats of LF-envelope filtered HF data \r\nif CF_type == 'PAC':\r\n for s,subject in enumerate(subjects): \r\n mask=masks[s] \r\n for lf,LF in enumerate(LFs):\r\n for hf,HF in enumerate(HFs[lf]): \r\n np.fill_diagonal(mask,1) \r\n path = directory + '_data\\\\_ENV\\\\'\r\n LF_str = '{:.2f}'.format(LF) \r\n HF_str = '{:.2f}'.format(HF) \r\n file1 = path + subject + ' LF= ' + LF_str + ' HF= ' + HF_str + '.csv' \r\n file2 = path + subject + ' LF= ' + LF_str + ' HF= ' + HF_str + '_surr.csv'\r\n N_pot = np.nansum(mask) \r\n data = mask*np.genfromtxt(file1, delimiter=';') \r\n data_surr = mask*np.genfromtxt(file2, delimiter=';') \r\n stats = cffun.K_stats_PS_2(data,data_surr,sign_z_PS,PS_metric)\r\n PS_ENV[s][lf][hf]= stats \r\n for d in range(N_dist_bins):\r\n dist_mask = mask * ( ( (dist[s]>dist_thresholds[d]) * (dist[s]<=dist_thresholds[d+1]) )>0)\r\n N_potD = np.nansum(dist_mask) \r\n dataD = data*dist_mask \r\n if N_potD>0:\r\n stats = cffun.K_stats_PS_2(dataD,data_surr,sign_z_PS,PS_metric) \r\n else:\r\n stats = cffun.Stats_PS(np.nan) \r\n PS_ENV_dist[s][lf][hf][d] = stats\r\n \r\n for l in range(N_layer_int):\r\n layer_mask = mask * layer_int_masks[s][l]\r\n N_potL = np.nansum(layer_mask) \r\n dataL = data*layer_mask\r\n if N_potL>0:\r\n stats = cffun.K_stats_PS_2(dataL,data_surr,sign_z_PS,PS_metric) \r\n else:\r\n stats = cffun.Stats_PS(np.nan) \r\n PS_ENV_layer[s][lf][hf][l] = stats\r\n \r\n print(time.strftime(\"%Y-%m-%d %H:%M\") + ' ' + subject)\r\n \r\n\r\n############################################################################################################################\r\n\r\n\r\n\r\n############ SEEG: compute CFC or PAC stats ############ \r\nfor s,subject in enumerate(subjects): \r\n mask=copy.copy(masks[s])\r\n for lf,LF in enumerate(LFs):\r\n for hf,HF in enumerate(HFs[lf]): \r\n np.fill_diagonal(mask,1) # for local!\r\n LF_str = '{:.2f}'.format(LF) \r\n HF_str = '{:.2f}'.format(HF) \r\n LF_idx = np.where(freqs==LF)[0][0] \r\n LF_PS = PS[s][LF_idx].data_sign \r\n if CF_type == 'CFS': \r\n HF_idx = np.where(freqs==HF)[0][0] \r\n HF_PS = PS[s][HF_idx].data_sign\r\n path = directory + '_data\\\\_CFS\\\\' \r\n file0 = path + subject + ' LF=' + LF_str + ' HF=' + HF_str + '.csv' \r\n file_surr = path + subject + ' LF=' + LF_str + ' HF=' + HF_str + '_surr.csv'\r\n else: \r\n HF_PS = PS_ENV[s][lf][hf].data_sign\r\n path = directory + '_data\\\\_PAC\\\\' \r\n file0 = path + subject + ' LF= ' + LF_str + ' HF= ' + HF_str + '.csv' \r\n file_surr = path + subject + ' LF= ' + LF_str + ' HF= ' + HF_str + '_surr.csv'\r\n masked_data = np.genfromtxt(file0, delimiter=';') * mask\r\n surr_data = np.genfromtxt(file_surr, delimiter=';') * mask\r\n np.fill_diagonal(mask,0) \r\n N_CH = len(mask)\r\n N_pot = np.nansum(masked_data>0) - np.trace(masked_data>0) \r\n if N_pot > 0 and np.nansum(masked_data)-np.trace(masked_data)>0: \r\n stats = cffun.K_stats_CFC_2(masked_data,surr_data,sign_z_CFC,LF_PS,HF_PS) \r\n else:\r\n stats = cffun.Stats_CFC(np.nan)\r\n CFC[s][lf][hf] = stats \r\n \r\n for d in range(len(distances)):\r\n dist_mask = mask * ((dist[s]>dist_thresholds[d]) * (dist[s]<=dist_thresholds[d+1])) >0\r\n np.fill_diagonal(dist_mask,1) # 1 so that local CFC is preserved in data\r\n masked_dataD = masked_data * dist_mask \r\n np.fill_diagonal(dist_mask,0) # 0 so that local CFC is removed in subfunction\r\n N_potD = np.sum(masked_dataD>0)-np.trace(masked_dataD>0)\r\n if N_potD>0: \r\n upper_idx = np.triu_indices(N_CH)\r\n statsD = cffun.K_stats_CFC_2(masked_dataD,surr_data,sign_z_CFC,LF_PS,HF_PS) \r\n else:\r\n statsD = cffun.Stats_CFC(np.nan) # set K to 0 if no edges in dist mask\r\n CFC_dist[s][lf][hf][d] = statsD \r\n \r\n for l in range(N_layer_int):\r\n layer_mask = mask * layer_int_masks[s][l]\r\n np.fill_diagonal(layer_mask,1) # 1 so that local CFC is preserved in data\r\n masked_dataL = masked_data * layer_mask \r\n N_potL = np.sum(masked_dataL>0)-np.trace(masked_dataL>0)\r\n if N_potL>0: \r\n upper_idx = np.triu_indices(N_CH)\r\n statsL = cffun.K_stats_CFC_2(masked_dataL,surr_data,sign_z_CFC,LF_PS,HF_PS) \r\n else:\r\n statsL = cffun.Stats_CFC(np.nan) # set K to 0 if no edges in dist mask\r\n CFC_layer[s][lf][hf][l] = statsL \r\n \r\n print(time.strftime(\"%Y-%m-%d %H:%M\") + ' ' + subject)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n###############################################################################\r\n################# SAVE DATA WITH PICKLE #################\r\n\r\n\r\nsave=1\r\nif save:\r\n fileout1 = directory + '_results\\\\_pickle dump\\\\SEEG ' + PS_metric + ', z=' + '{:.2f}'.format(sign_z_PS) + ', ' + time.strftime(\"%Y-%m-%d\") + '.dat'\r\n fileout2 = directory + '_results\\\\_pickle dump\\\\SEEG ' + CF_type + ' corrected with ' + PS_metric + ', z=' + '{:.2f}'.format(sign_z_PS) + ', ' + time.strftime(\"%Y-%m-%d\") + '.dat'\r\n fileout3 = directory + '_results\\\\_pickle dump\\\\SEEG ' + CF_type + ' by layer interaction corrected with ' + PS_metric +', z=' + '{:.2f}'.format(sign_z_PS) + ', ' + time.strftime(\"%Y-%m-%d\") + '.dat'\r\n fileout4 = directory + '_results\\\\_pickle dump\\\\SEEG PLV Envelope ' + time.strftime(\"%Y-%m-%d\") + '.dat'\r\n pick.dump([PS,PS_dist],open(fileout1,'wb'))\r\n pick.dump([CFC,CFC_dist],open(fileout2,'wb'))\r\n pick.dump([PS_layer,CFC_layer],open(fileout3,'wb')) \r\n if CF_type=='PAC':\r\n pick.dump([PS_ENV,PS_ENV_dist,PS_ENV_layer],open(fileout4,'wb')) \r\n\r\n\r\n\r\n\r\n############################################################################### \r\n############## LOAD DATA #################\r\n \r\nuse_IMs = False # only need to be loaded for degree and directionality analyses\r\n \r\nif use_IMs:\r\n file_in1 = directory + '_results\\\\_pickle dump\\\\SEEG ' + PS_metric + '.dat'\r\n file_in2 = directory + '_results\\\\_pickle dump\\\\SEEG ' + CF_type + ' corrected with ' + PS_metric + '.dat' \r\n file_in3 = directory + '_results\\\\_pickle dump\\\\SEEG ' + CF_type + ' by layer interaction corrected with ' + PS_metric + '.dat' \r\nelse: \r\n file_in1 = directory + '_results\\\\_pickle dump\\\\SEEG ' + PS_metric + '_no_IMS' + '.dat'\r\n file_in2 = directory + '_results\\\\_pickle dump\\\\SEEG ' + CF_type + ' corrected with ' + PS_metric + '_no_IMS' + '.dat' \r\n file_in3 = directory + '_results\\\\_pickle dump\\\\SEEG ' + CF_type + ' by layer interaction corrected with ' + PS_metric + '_no_IMS' + '.dat'\r\n\r\n\r\nPS,PS_dist = pick.load(open(file_in1,'rb'))\r\nCFC,CFC_dist = pick.load(open(file_in2,'rb'))\r\nPS_layer,CFC_layer = pick.load(open(file_in3,'rb'))\r\n\r\nfile_in4 = directory + '_results\\\\_pickle dump\\\\SEEG PLV Envelope.dat'\r\nPS_ENV,PS_ENV_dist,PS_ENV_layer = pick.load(open(file_in4,'rb'))\r\n\r\n\r\n\r\n\r\n\r\n\r\n###############################################################################\r\n###############################################################################\r\n######### PLOT PS \r\n \r\n \r\n\r\n### get numbers of edges for PS \r\nN_edges = 0\r\nN_edges_dist = np.zeros([N_dist_bins])\r\nN_edges_layer = np.zeros([N_layer_int])\r\nN_edges_subj = np.zeros(N_subj)\r\nN_edges_dist_subj = np.zeros([N_subj,N_dist_bins])\r\nN_edges_layer_subj = np.zeros([N_subj,N_layer_int])\r\nN_CH_subj = np.zeros(N_subj)\r\nN_subj_contr = 0 # number of contributing subjects\r\nN_subj_contr_dist = np.zeros(N_dist_bins)\r\nN_layer_contr_dist = np.zeros(N_layer_int)\r\n\r\nfor s in range(N_subj): \r\n N_edges += np.nan_to_num(PS[s][0].N_pot) \r\n N_subj_contr += np.nan_to_num(int(PS[s][0].N_pot>0))\r\n N_edges_subj[s] = PS[s][0].N_pot\r\n\r\n for d in range(N_dist_bins):\r\n N_edges_dist[d] += np.nan_to_num(PS_dist[s][0][d].N_pot)\r\n N_edges_dist_subj[s,d] = np.nan_to_num(PS_dist[s][0][d].N_pot)\r\n N_subj_contr_dist[d] += np.nan_to_num(int(PS_dist[s][0][d].N_pot>0))\r\n \r\n for l in range(N_layer_int):\r\n N_edges_layer[l] += np.nan_to_num(PS_layer[s][0][l].N_pot)\r\n N_edges_layer_subj[s,l] = np.nan_to_num(PS_layer[s][0][l].N_pot)\r\n N_layer_contr_dist[l] += np.nan_to_num(int(PS_layer[s][0][l].N_pot>0))\r\n \r\n \r\n### set divisors for mean calculation \r\ndiv1 = N_edges\r\ndiv2 = N_edges_dist \r\ndiv3 = N_edges_subj \r\ndiv4 = N_edges_layer\r\n \r\n### init PS arrays\r\nPLV_PS_ps = np.zeros([N_subj,N_freq]) # PS = \"Phase Synch\", ps = \"per subject\"\r\nK_PS_ps = np.zeros([N_subj,N_freq])\r\nPLV_PS_dist_ps = np.zeros([N_subj,N_freq,N_dist_bins])\r\nK_PS_dist_ps = np.zeros([N_subj,N_freq,N_dist_bins])\r\nPLV_PS_layer_ps = np.zeros([N_subj,N_freq,N_layer_int])\r\nK_PS_layer_ps = np.zeros([N_subj,N_freq,N_layer_int])\r\n\r\n### get PS values\r\nfor f,F in enumerate(freqs): \r\n for s in range(N_subj): \r\n PLV_PS_ps[s,f] = PS[s][f].mean_masked * PS[s][f].N_pot \r\n K_PS_ps[s,f] = 100*PS[s][f].K * PS[s][f].N_pot \r\n for d in range(N_dist_bins): \r\n PLV_PS_dist_ps[s,f,d] = PS_dist[s][f][d].mean_masked * PS_dist[s][f][d].N_pot\r\n K_PS_dist_ps[s,f,d] = 100*PS_dist[s][f][d].K * PS_dist[s][f][d].N_pot \r\n for l in range(N_layer_int): \r\n PLV_PS_layer_ps[s,f,l] = PS_layer[s][f][l].mean_masked * PS_layer[s][f][l].N_pot\r\n K_PS_layer_ps[s,f,l] = 100*PS_layer[s][f][l].K * PS_layer[s][f][l].N_pot \r\n \r\n \r\n### get bootstrap stats for PS\r\nN_boot = 1000 \r\nK_PS_stats = [np.array(bst.CI_from_bootstrap(K_PS_ps,N_boot, 2.5,97.5,N_edges_subj))-1] \r\nPLV_PS_stats = [bst.CI_from_bootstrap(PLV_PS_ps,N_boot,2.5,97.5,N_edges_subj)] \r\nK_PS_stats_dist = [np.array(bst.CI_from_bootstrap(K_PS_dist_ps[:,:,i], N_boot,2.5,97.5, N_edges_dist_subj[:,i]))-1 for i in range(N_dist_bins)] \r\nPLV_PS_stats_dist = [bst.CI_from_bootstrap(PLV_PS_dist_ps[:,:,i], N_boot,2.5,97.5, N_edges_dist_subj[:,i]) for i in range(N_dist_bins)] \r\nK_PS_stats_layer = [np.array(bst.CI_from_bootstrap(K_PS_layer_ps[:,:,i], N_boot,2.5,97.5, N_edges_layer_subj[:,i]))-1 for i in range(N_layer_int)] \r\nPLV_PS_stats_layer = [bst.CI_from_bootstrap(PLV_PS_layer_ps[:,:,i],N_boot,2.5,97.5, N_edges_layer_subj[:,i]) for i in range(N_layer_int)] \r\n\r\n### get PS means \r\nmean_K_PS = [(np.nansum(K_PS_ps,0)\/div1)-1]\r\nmean_PLV_PS = [(np.nansum(PLV_PS_ps,0)\/div1)] \r\nmean_K_PS_dist = np.transpose(np.nansum(K_PS_dist_ps,0)\/div2)-1\r\nmean_PLV_PS_dist = np.transpose(np.nansum(PLV_PS_dist_ps,0)\/div2) \r\nmean_K_PS_layer = np.transpose(np.nansum(K_PS_layer_ps,0)\/div4)-1\r\nmean_PLV_PS_layer = np.transpose(np.nansum(PLV_PS_layer_ps,0)\/div4) \r\nK_PS_ps = (K_PS_ps\/div3[:,np.newaxis])-1\r\nPLV_PS_ps = (PLV_PS_ps\/div3[:,np.newaxis]) \r\n \r\n\r\n### PLOT PS \r\n \r\nfigsize = [6.1,2.3]\r\nrows = 2\r\ncols = 3\r\ndataL = [PLV_PS_stats,PLV_PS_stats_dist,PLV_PS_ps,K_PS_stats,K_PS_stats_dist,K_PS_ps]\r\nxlimA = [xlims_PS for i in range(6)]\r\nylimA = [[0,0.2],[0,0.2],[0,0.3],[0,100],[0,100],[0,100]]\r\ntitlesA = ['' for i in range(6)] ###['mean '+PS_metric,'mean '+PS_metric+' per distance','mean '+PS_metric+' per subject','mean K','mean K per distance','mean K per subject']\r\nlegendA = [None, distances, None, None, distances, None]\r\nylabA = [PS_metric,PS_metric,PS_metric,'K [%]','K [%]','K [%]']\r\ncmapA = ['brg','brg','brg','brg','brg','brg']\r\nCI = [0.2,0.2,None,0.2,0.2,None]\r\nlegend_posA = [None,'ur',None,None,None,None]\r\nplots.semi_log_plot_multi(figsize,rows,cols,dataL,freqs,xlimA,ylabA,titlesA,cmapA,legendA,None,legend_posA,ylimA,True,1,CI) \r\n\r\n## export .pdf\r\no66 = directory + '_results\\\\SEEG PS new\\\\SEEG ' + PS_metric + '.pdf'\r\nplots.semi_log_plot_multi(figsize,rows,cols,dataL,freqs,xlimA,ylabA,titlesA,cmapA,legendA,o66,legend_posA,ylimA,False,1,CI) \r\n\r\n\r\n### PLOT PS with distances and layers \r\n\r\nfigsize = [7.7,3]\r\nrows = 2\r\ncols = 3\r\ndataL = [PLV_PS_stats,PLV_PS_stats_dist,PLV_PS_stats_layer[:3],K_PS_stats,K_PS_stats_dist,K_PS_stats_layer[:3]]\r\nxlimA = [xlims_PS for i in range(6)]\r\nylimA = [[0,0.2],[0,0.2],[0,0.2],[0,100],[0,100],[0,100]]\r\ntitlesA = ['mean '+PS_metric, 'mean '+PS_metric+' per distance','mean '+PS_metric+' per layer int.',\r\n 'mean K','mean K per distance', 'mean K per layer int.']\r\nlegendA = [None, distances, layer_int[:3], None, distances, layer_int[:3]]\r\nylabA = [PS_metric,'','','K','','']\r\ncmapA = ['brg','brg',my_cmap,'brg','brg',my_cmap,]\r\nCI = [0.2 for i in range(6)]\r\nlegend_posA = [None,'ur','ur',None,'ur','ur']\r\nxlab = [0,0,0,1,1,1]\r\nplots.semi_log_plot_multi(figsize,rows,cols,dataL,freqs,xlimA,ylabA,titlesA,cmapA,legendA,None,legend_posA,ylimA,True,1,CI,xlab) \r\n\r\n## export .pdf\r\no67 = directory + '_results\\\\SEEG PS new\\\\SEEG '+PS_metric + '_with layers.pdf' \r\nplots.semi_log_plot_multi(figsize,rows,cols,dataL,freqs,xlimA,ylabA,titlesA,cmapA,legendA,o67,legend_posA,ylimA,False,1,CI,xlab) \r\n\r\n\r\n#### export plot data as csv files\r\n\r\no31 = directory + '_results\\\\_plot_data_new\\\\PS\\\\SEEG\\\\SEEG ' + PS_metric + ' K.csv'\r\no32 = directory + '_results\\\\_plot_data_new\\\\PS\\\\SEEG\\\\SEEG ' + PS_metric + ' GS.csv'\r\no33a = [directory + '_results\\\\_plot_data_new\\\\PS\\\\SEEG\\\\SEEG ' + PS_metric + ' K, ' + dists_short[i] + '.csv' for i in range(3)] \r\no34a = [directory + '_results\\\\_plot_data_new\\\\PS\\\\SEEG\\\\SEEG ' + PS_metric + ' GS, ' + dists_short[i] + '.csv' for i in range(3)] \r\n\r\nnp.savetxt(o31,K_PS_stats[0][:3],delimiter=';')\r\nnp.savetxt(o32,PLV_PS_stats[0][:3],delimiter=';')\r\nfor i in range(3):\r\n np.savetxt(o33a[i],K_PS_stats_dist[i][:3],delimiter=';')\r\n np.savetxt(o34a[i],PLV_PS_stats_dist[i][:3],delimiter=';')\r\n\r\n\r\n\r\n\r\n\r\n###############################################################################\r\n###############################################################################\r\n\r\n\r\n\r\n\r\n\r\n\r\n###############################################################################\r\n###############################################################################\r\n\r\n############## PLOT CFC \r\n\r\n\r\n### get numbers of edges for CFC\r\n\r\nN_CH_subj = np.zeros(N_subj)\r\nN_pot_subj_CF = np.zeros(N_subj)\r\nN_pot_subj_CF_mod = np.zeros([N_subj,N_LF,N_ratios])\r\nN_pot_subj_CF_excl = np.zeros([N_subj,N_LF,N_ratios])\r\nN_pot_dist_subj_CF = np.zeros([N_subj,N_dist_bins])\r\nN_pot_dist_subj_CF_mod = np.zeros([N_subj,N_dist_bins,N_LF,N_ratios])\r\nN_pot_dist_subj_CF_excl = np.zeros([N_subj,N_dist_bins,N_LF,N_ratios])\r\nN_pot_layer_subj_CF = np.zeros([N_subj,N_layer_int])\r\nN_pot_layer_subj_CF_mod = np.zeros([N_subj,N_layer_int,N_LF,N_ratios])\r\nN_pot_layer_subj_CF_excl = np.zeros([N_subj,N_layer_int,N_LF,N_ratios])\r\n\r\n\r\nN_subj_contr = 0 # number of contributing subjects\r\nN_subj_contr_dist = np.zeros(N_dist_bins)\r\nN_subj_contr_layer = np.zeros(N_layer_int)\r\n\r\nfor s in range(N_subj): \r\n N_pot_subj_CF[s] = CFC[s][0][0].N_pot # these are dependent on mask only\r\n N_CH_subj[s] = CFC[s][0][0].N_CH\r\n for lf in range(N_LF):\r\n for hf in range(N_ratios): \r\n try:\r\n N_pot_subj_CF_mod[s,lf,hf] = CFC[s][lf][hf].N_pot_mod\r\n N_pot_subj_CF_excl[s,lf,hf] = CFC[s][lf][hf].N_pot_excl \r\n N_subj_contr += np.int(N_pot_subj_CF[s]>0) \r\n except:\r\n pass \r\n \r\nfor s in range(N_subj): \r\n for d in range(N_dist_bins): \r\n N_pot_dist_subj_CF[s,d] = np.nan_to_num(CFC_dist[s][0][0][d].N_pot)\r\n N_subj_contr_dist[d] += np.int(N_pot_dist_subj_CF[s,d]>0)\r\n for lf in range(N_LF):\r\n for hf in range(N_ratios): \r\n try: \r\n N_pot_dist_subj_CF_mod[s,d,lf,hf] = np.nan_to_num(CFC_dist[s][lf][hf][d].N_pot_mod)\r\n N_pot_dist_subj_CF_excl[s,d,lf,hf] = np.nan_to_num(CFC_dist[s][lf][hf][d].N_pot_excl)\r\n except:\r\n pass\r\n \r\n for l in range(N_layer_int): \r\n N_pot_layer_subj_CF[s,l] = np.nan_to_num(CFC_layer[s][0][0][l].N_pot)\r\n N_subj_contr_layer[l] += np.int(N_pot_layer_subj_CF[s,l]>0)\r\n for lf in range(N_LF):\r\n for hf in range(N_ratios): \r\n try: \r\n N_pot_layer_subj_CF_mod[s,l,lf,hf] = np.nan_to_num(CFC_layer[s][lf][hf][l].N_pot_mod)\r\n N_pot_layer_subj_CF_excl[s,l,lf,hf] = np.nan_to_num(CFC_layer[s][lf][hf][l].N_pot_excl)\r\n except:\r\n pass\r\n \r\nN_pot_CF = np.nansum(N_pot_subj_CF) \r\nN_pot_CF_mod = np.nansum(N_pot_subj_CF_mod,0) \r\nN_pot_CF_excl = np.nansum(N_pot_subj_CF_excl,0) \r\n\r\nN_pot_dist_CF = np.nansum(N_pot_dist_subj_CF,0)\r\nN_pot_dist_CF_mod = np.nansum(N_pot_dist_subj_CF_mod,0)\r\nN_pot_dist_CF_excl = np.nansum(N_pot_dist_subj_CF_excl,0)\r\nN_pot_dist_CF_mod2 = np.moveaxis(N_pot_dist_CF_mod,0,-1)\r\n\r\nN_pot_layer_CF = np.nansum(N_pot_layer_subj_CF,0)\r\nN_pot_layer_CF_mod = np.nansum(N_pot_layer_subj_CF_mod,0)\r\nN_pot_layer_CF_excl = np.nansum(N_pot_layer_subj_CF_excl,0)\r\nN_pot_layer_CF_mod2 = np.moveaxis(N_pot_layer_CF_mod,0,-1)\r\n \r\n \r\n\r\n\r\n## initialize arrays \r\nPLV_CFC_ps = np.full([N_subj,N_LF,N_ratios],np.nan)\r\nPLV_CFC_local_ps = np.full([N_subj,N_LF,N_ratios],np.nan)\r\nPLV_CFC_ps_mod = np.full([N_subj,N_LF,N_ratios],np.nan)\r\nPLV_CFC_ps_excl = np.full([N_subj,N_LF,N_ratios],np.nan)\r\nPLV_CFC_dist_ps = np.full([N_subj,N_LF,N_ratios,N_dist_bins],np.nan) \r\nPLV_CFC_dist_ps_mod = np.full([N_subj,N_LF,N_ratios,N_dist_bins],np.nan)\r\nPLV_CFC_dist_ps_excl = np.full([N_subj,N_LF,N_ratios,N_dist_bins],np.nan)\r\nPLV_CFC_layer_ps = np.full([N_subj,N_LF,N_ratios,N_layer_int],np.nan) \r\nPLV_CFC_layer_ps_mod = np.full([N_subj,N_LF,N_ratios,N_layer_int],np.nan)\r\nPLV_CFC_layer_ps_excl = np.full([N_subj,N_LF,N_ratios,N_layer_int],np.nan)\r\nnPLV_CFC_dist_ps = np.full([N_subj,N_LF,N_ratios,N_dist_bins],np.nan) \r\nN_CFC_ps = np.full([N_subj,N_LF,N_ratios],np.nan)\r\nN_CFC_local_ps = np.full([N_subj,N_LF,N_ratios],np.nan)\r\nN_CFC_ps_mod = np.full([N_subj,N_LF,N_ratios],np.nan)\r\nN_CFC_ps_excl = np.full([N_subj,N_LF,N_ratios],np.nan)\r\nN_CFC_dist_ps = np.full([N_subj,N_LF,N_ratios,N_dist_bins],np.nan) \r\nN_CFC_dist_ps_mod = np.full([N_subj,N_LF,N_ratios,N_dist_bins],np.nan)\r\nN_CFC_layer_ps = np.full([N_subj,N_LF,N_ratios,N_layer_int],np.nan) \r\nN_CFC_layer_ps_mod = np.full([N_subj,N_LF,N_ratios,N_layer_int],np.nan)\r\nN_CFC_layer_ps_excl = np.full([N_subj,N_LF,N_ratios,N_layer_int],np.nan)\r\n\r\n\r\n \r\n# get CFC values\r\nfor lf,LF in enumerate(LFs): \r\n for hf,HF in enumerate(HFs[lf]):\r\n if HF=0) for i in range(4)] for rat in range(6)]\r\nK_CFC_stats_mod = [[K_CFC_stats_mod[rat][i]*(K_CFC_stats_mod[rat][i]>=0) for i in range(4)] for rat in range(6)]\r\nK_CFC_dist_12_stats = [[K_CFC_dist_12_stats[dist][i]*(K_CFC_dist_12_stats[dist][i]>=0) for i in range(4)] for dist in range(3)]\r\nK_CFC_dist_12_stats_mod = [[K_CFC_dist_12_stats_mod[dist][i]*(K_CFC_dist_12_stats_mod[dist][i]>=0) for i in range(4)] for dist in range(3)]\r\nK_CFC_dist_13_stats = [[K_CFC_dist_13_stats[dist][i]*(K_CFC_dist_13_stats[dist][i]>=0) for i in range(4)] for dist in range(3)]\r\nK_CFC_dist_13_stats_mod = [[K_CFC_dist_13_stats_mod[dist][i]*(K_CFC_dist_13_stats_mod[dist][i]>=0) for i in range(4)] for dist in range(3)]\r\nK_CFC_layer_12_stats = [[K_CFC_layer_12_stats[layer][i]*(K_CFC_layer_12_stats[layer][i]>=0) for i in range(4)] for layer in range(4)]\r\nK_CFC_layer_12_stats_mod = [[K_CFC_layer_12_stats_mod[layer][i]*(K_CFC_layer_12_stats_mod[layer][i]>=0) for i in range(4)] for layer in range(4)]\r\nK_CFC_layer_13_stats = [[K_CFC_layer_13_stats[layer][i]*(K_CFC_layer_13_stats[layer][i]>=0) for i in range(4)] for layer in range(4)]\r\nK_CFC_layer_13_stats_mod = [[K_CFC_layer_13_stats_mod[layer][i]*(K_CFC_layer_13_stats_mod[layer][i]>=0) for i in range(4)] for layer in range(4)]\r\n\r\nPLV_CFC_layer_13_stats = [[np.array(filter(lambda a: a != -0 , i)) for i in j] for j in PLV_CFC_layer_13_stats] \r\nPLV_CFC_local_layer_13_stats = [[np.array(filter(lambda a: a != -0 , i)) for i in j] for j in PLV_CFC_local_layer_13_stats] \r\nK_CFC_dist_13_stats = [[np.array(filter(lambda a: a != -1, i)) for i in j] for j in K_CFC_dist_13_stats]\r\nK_CFC_dist_13_stats_mod = [[np.array(filter(lambda a: a != -1, i)) for i in j] for j in K_CFC_dist_13_stats_mod]\r\nK_CFC_layer_13_stats = [[np.array(filter(lambda a: a != -1, i)) for i in j] for j in K_CFC_layer_13_stats]\r\nK_CFC_layer_13_stats_mod = [[np.array(filter(lambda a: a != -1, i)) for i in j] for j in K_CFC_layer_13_stats_mod]\r\nK_CFC_local_layer_13_stats = [[np.array(filter(lambda a: a != -1, i)) for i in j] for j in K_CFC_local_layer_13_stats]\r\n\r\nPLV_CFC_stats = [[np.array(filter(lambda a: a != np.nan, i)) for i in j] for j in PLV_CFC_stats] \r\nPLV_CFC_local_stats = [[np.array(filter(lambda a: a != np.nan, i)) for i in j] for j in PLV_CFC_local_stats]\r\nK_CFC_stats = [[np.array(filter(lambda a: a != np.nan, i)) for i in j] for j in K_CFC_stats]\r\nK_CFC_stats_mod = [[np.array(i[~np.isnan(i)]) for i in j] for j in K_CFC_stats_mod]\r\nK_CFC_local_stats = [[np.array(i[~np.isnan(i)]) for i in j] for j in K_CFC_local_stats] \r\nK_CFC_dist_13_stats = [[np.array(i[~np.isnan(i)]) for i in j] for j in K_CFC_dist_13_stats]\r\nK_CFC_dist_13_stats_mod = [[np.array(i[~np.isnan(i)]) for i in j] for j in K_CFC_dist_13_stats_mod]\r\nK_CFC_layer_13_stats = [[np.array(i[~np.isnan(i)]) for i in j] for j in K_CFC_layer_13_stats]\r\nK_CFC_layer_13_stats_mod = [[np.array(i[~np.isnan(i)]) for i in j] for j in K_CFC_layer_13_stats_mod]\r\n\r\n\r\n###############################################################################\r\n########## plot CFC \r\n\r\n\r\nfigsize = [6.3,2.3] \r\n#figsize = [12.7,3.6] \r\n \r\nrows = 2\r\ncols = 3\r\ndataL = [PLV_CFC_stats[:1],K_CFC_stats[:1],K_CFC_stats_mod[:1],\r\n PLV_CFC_stats[1:],K_CFC_stats[1:],K_CFC_stats_mod[1:]] \r\nxlimA = [xlims_CFC for i in range(6)]\r\ntitlesA = ['' for i in range(6)] #['mean PLV','mean K','mean K (controlled)','','','']\r\nif CF_type == 'CFS':\r\n ylimA = [[-0.005,0.049], [-1.4, 14], [-1.4,14], [-0.005,0.049], [-0.25,2.5], [-0.25,2.5]] \r\nelse:\r\n ylimA = [[-0.007,0.069], [-2.9, 29], [-2.9,29], [-0.007,0.069], [-2.9,29], [-2.9,29]] \r\nlegendA = [ratios[:1],ratios[:1],ratios[:1],\r\n ratios[1:],ratios[1:],ratios[1:],] \r\nylabA = ['GS','K [%]','K [%]', 'GS','K [%]','K [%]']\r\ncmapA = ['brg','brg','brg',my_cmap3,my_cmap3,my_cmap3]\r\nlegend_posA = ['ur',None,None,'ur',None,None]\r\nCI = [0.2 for i in range(6)]\r\nxlab = [0,0,0,1,1,1]\r\nRyt = [1,1,1,1,1,1]\r\nplots.semi_log_plot_multi(figsize,rows,cols,dataL,LFs,xlimA,ylabA,titlesA,cmapA,legendA,None,legend_posA,ylimA,True,1,CI,xlab,Ryt) \r\n\r\n## export PDF\r\no80 = directory + '_results\\\\SEEG CF new\\\\SEEG ' + CF_type + ', controlled with ' + PS_metric + '.pdf'\r\nplots.semi_log_plot_multi(figsize,rows,cols,dataL,LFs,xlimA,ylabA,titlesA,cmapA,legendA,o80,legend_posA,ylimA,False,1,CI,xlab,Ryt) \r\n\r\n\r\n\r\n#### plot as heatmap\r\n\r\ndata1 = np.transpose(mean_K_CFC)\r\ndata2 = np.transpose(mean_K_CFC_mod)\r\nfigsize_hm = [1.6,1.9]\r\n\r\nif CF_type == 'CFS':\r\n zmax1 = 12\r\n zmax2 = 4\r\n ztix1 = [0,3,6,9,12] \r\n ztix2 = [0,1,2,3,4] \r\nelse:\r\n zmax1 = 24\r\n zmax2 = 8\r\n ztix1 = [0,6,12,18,24] \r\n ztix2 = [0,2,4,6,8] \r\n \r\n\r\nLF_ics = [0,3,6,9,12,15,18,21,24,27,29] \r\nLF_map = ['1.2', '2.4', '3.7', '5.9', '8.6', '13.2', '19.5', '29.5', '47.3', '68.1', '94.5']\r\n \r\nplots.simple_CF_plot(data1,figsize_hm,'ratio','Low Frequency [Hz]',np.arange(0.5,5.6,1),LF_ics,ratios,LF_map,zmax=zmax1,ztix=ztix1,outfile=None) \r\nplots.simple_CF_plot(data2,figsize_hm,'ratio','Low Frequency [Hz]',np.arange(0.5,5.6,1),LF_ics,ratios,LF_map,zmax=zmax2,ztix=ztix2,outfile=None) \r\n \r\n# export PDFs \r\no90 = directory + '_results\\\\SEEG CF new\\\\SEEG ' + CF_type + ' heatmap.pdf'\r\no90a = directory + '_results\\\\SEEG CF new\\\\SEEG ' + CF_type + ' heatmap, controlled with ' + PS_metric + '.pdf'\r\nplots.simple_CF_plot(data1,figsize_hm,'ratio','Low Frequency [Hz]',np.arange(0.5,5.6,1),LF_ics,ratios,LF_map,zmax=zmax1,ztix=ztix1,outfile=o90) \r\nplots.simple_CF_plot(data2,figsize_hm,'ratio','Low Frequency [Hz]',np.arange(0.5,5.6,1),LF_ics,ratios,LF_map,zmax=zmax2,ztix=ztix2,outfile=o90a) \r\n \r\n\r\n\r\n\r\n\r\n###############################################################################\r\n##### GROUPS STATS AND PLOTS FOR ENVELOPE \r\n\r\n# init ENV arrays\r\nK_ENV_ps = np.full([N_subj,N_LF,N_ratios,],np.nan)\r\nPLV_ENV_ps = np.full([N_subj,N_LF,N_ratios],np.nan)\r\nK_ENV_local_ps = np.full([N_subj,N_LF,N_ratios],np.nan)\r\nPLV_ENV_local_ps = np.full([N_subj,N_LF,N_ratios],np.nan)\r\nK_ENV_dist_ps = np.full([N_subj,N_LF,N_ratios,N_dist_bins],np.nan)\r\nPLV_ENV_dist_ps = np.full([N_subj,N_LF,N_ratios,N_dist_bins],np.nan) \r\n\r\n### get ENV values\r\nfor lf,LF in enumerate(LFs): \r\n for hf,HF in enumerate(HFs[lf]):\r\n for s,ss in enumerate(subjects): \r\n K_ENV_ps [s,lf,hf] = 100*PS_ENV[s][lf][hf].K\r\n PLV_ENV_ps [s,lf,hf] = PS_ENV[s][lf][hf].mean_masked\r\n for d in range(N_dist_bins): \r\n PLV_ENV_dist_ps [s,lf,hf,d] = PS_ENV_dist[s][lf][hf][d].mean_masked\r\n K_ENV_dist_ps [s,lf,hf,d] = 100*PS_ENV_dist[s][lf][hf][d].K\r\n\r\n\r\n### get ENV means and 95% confidence intervals\r\nN_boot=1000 \r\nPLV_ENV_stats = [np.array(bst.CI_from_bootstrap(PLV_ENV_ps[:,:,i])) for i in range(N_ratios)] # returns [mean, mean_boot, lower, upper] x freq x ratio\r\nK_ENV_stats = [np.array(bst.CI_from_bootstrap(K_ENV_ps[:,:,i])) -1 for i in range(N_ratios)] \r\nPLV_ENV_dist_12_stats = [bst.CI_from_bootstrap(PLV_ENV_dist_ps[:,:,0,i]) for i in range(N_dist_bins)] # returns [mean, mean_boot, lower, upper] x freq x dist\r\nPLV_ENV_dist_13_stats = [bst.CI_from_bootstrap(PLV_ENV_dist_ps[:,:,1,i]) for i in range(N_dist_bins)] # returns [mean, mean_boot, lower, upper] x freq x dist \r\nK_ENV_dist_12_stats = [np.array(bst.CI_from_bootstrap(K_ENV_dist_ps[:,:,0,i]))-1 for i in range(N_dist_bins)] # returns [mean, mean_boot, lower, upper] x freq x dist\r\nK_ENV_dist_13_stats = [np.array(bst.CI_from_bootstrap(K_ENV_dist_ps[:,:,1,i]))-1 for i in range(N_dist_bins)] # returns [mean, mean_boot, lower, upper] x freq x dist\r\n\r\n\r\n###############################################################################\r\n########### plot amplitude envelope\r\n\r\nfigsize = [5.3,2.3] \r\nrows = 2\r\ncols = 2\r\ndataL = [PLV_ENV_stats,K_ENV_stats,\r\n PLV_ENV_stats,K_ENV_stats,] \r\nxlimA = [[1,330] for i in range(4)]\r\ntitlesA = ['' for i in range(4)] #['mean PLV','mean K','mean K (controlled)','','','']\r\nylimA = [[-0.007,0.07], [-2, 22],[-0.007,0.07], [-2,22], ] \r\nlegendA = [ratios,ratios,\r\n ratios,ratios,] \r\nylabA = ['GS','K [%]', 'GS','K [%]',]\r\ncmapA = [my_cmap7,my_cmap7,my_cmap7,my_cmap7]\r\nlegend_posA = [None,None,None,None,]\r\nCI = [0.2 for i in range(4)]\r\nxlab = [0,0,1,1,]\r\nRyt = [1,1,1,1,]\r\n\r\nplots.semi_log_plot_multi(figsize,rows,cols,dataL,LFs, xlimA,ylabA,titlesA,cmapA,legendA,None,legend_posA,ylimA,True,1,CI,xlab,Ryt) \r\nplots.semi_log_plot_multi(figsize,rows,cols,dataL,HFs_env,xlimA,ylabA,titlesA,cmapA,legendA,None,legend_posA,ylimA,True,1,CI,xlab,Ryt) \r\n\r\n\r\n## export PDF\r\no80e = directory + '_results\\\\SEEG CF new\\\\SEEG Envelopes LFx.pdf'\r\no80f = directory + '_results\\\\SEEG CF new\\\\SEEG Envelopes HFx.pdf'\r\nplots.semi_log_plot_multi(figsize,rows,cols,dataL,LFs, xlimA,ylabA,titlesA,cmapA,legendA,o80e,legend_posA,ylimA,False,1,CI,xlab,Ryt) \r\nplots.semi_log_plot_multi(figsize,rows,cols,dataL,HFs_env,xlimA,ylabA,titlesA,cmapA,legendA,o80f,legend_posA,ylimA,False,1,CI,xlab,Ryt) \r\n\r\n\r\n#### plot as heatmap\r\ndata1 = np.transpose(mean_K_CFC)\r\nfigsize_hm = [1.6,1.9]\r\nzmax1 = 20\r\nzmax2 = 4\r\nztix1 = [0,5,10,15,20] \r\nztix2 = [0,1,2,3,4] \r\nLF_ics = [0,3,6,9,12,15,18,21,24,27,29] \r\nLF_map = ['1.2', '2.4', '3.7', '5.9', '8.6', '13.2', '19.5', '29.5', '47.3', '68.1', '94.5'] \r\nplots.simple_CF_plot(data1,figsize_hm,'ratio','Low Frequency [Hz]',np.arange(0.5,5.6,1),LF_ics,ratios,LF_map,zmax=zmax1,ztix=ztix1,outfile=None) \r\n\r\n## export pdf\r\no99 = directory + '_results\\\\SEEG CF new\\\\SEEG Envelope heatmap.pdf'\r\nplots.simple_CF_plot(data1,figsize_hm,'ratio','Low Frequency [Hz]',np.arange(0.5,5.6,1),LF_ics,ratios,LF_map,zmax=zmax1,ztix=ztix1,outfile=o99) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n###############################################################################\r\n##### plot CFC in distance bins \r\n\r\n\r\nfigsize = [6.3,2.3] \r\nrows = 2\r\ncols = 3\r\ndataL = [PLV_CFC_dist_12_stats, K_CFC_dist_12_stats, K_CFC_dist_12_stats_mod,\r\n PLV_CFC_dist_13_stats, K_CFC_dist_13_stats, K_CFC_dist_13_stats_mod] \r\nxlimA = [xlims_CFC for i in range(6)]\r\ntitlesA = ['' for i in range(6)] \r\nif CF_type =='CFS':\r\n ylimA = [[-0.005,0.049], [-2, 19], [-2,19], [-0.005,0.049], [-0.35,3.4], [-0.35,3.4]] \r\nelse:\r\n ylimA = [[-0.007,0.067], [-3.8, 38], [-3.8,38], [-0.007,0.067], [-3.8, 38], [-3.8,38]] # PAC \r\nlegendA = [distances for i in range(6)]\r\nylabA = ['GS','K [%]','K [%]', 'GS','K [%]','K [%]']\r\ncmapA = ['brg','brg','brg','brg','brg','brg']\r\nlegend_posA = ['ur']+[None for i in range(5)]\r\nCI = [0.2 for i in range(6)]\r\nxlab = [0,0,0,1,1,1]\r\nRyt = [1,1,1,1,1,1]\r\nplots.semi_log_plot_multi(figsize,rows,cols,dataL,LFs,xlimA,ylabA,titlesA,cmapA,legendA,None,legend_posA,ylimA,True,1,CI,xlab,Ryt) \r\n\r\n## export PDF\r\no81 = directory + '_results\\\\SEEG CF new\\\\SEEG ' + CF_type + ' controlled with ' + PS_metric + ', distance bins.pdf'\r\nplots.semi_log_plot_multi(figsize,rows,cols,dataL,LFs,xlimA,ylabA,titlesA,cmapA,legendA,o81,legend_posA,ylimA,False,1,CI,xlab,Ryt) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n###############################################################################\r\n##### plot CF by layer combination\r\n\r\nfigsize = [6.3,2.3] \r\nrows = 2\r\ncols = 3\r\ndataL = [PLV_CFC_layer_12_stats, K_CFC_layer_12_stats, K_CFC_layer_12_stats_mod,\r\n PLV_CFC_layer_13_stats, K_CFC_layer_13_stats, K_CFC_layer_13_stats_mod] \r\nxlimA = [xlims_CFC for i in range(6)]\r\ntitlesA = ['' for i in range(6)] \r\nif CF_type =='CFS':\r\n ylimA = [[-0.005,0.049], [-2, 19], [-2,19], [-0.005,0.049], [-0.44,4.6], [-0.44,4.6]] \r\nelse:\r\n ylimA = [[-0.007,0.067], [-3.8, 38], [-3.8,38], [-0.007,0.067], [-3.8, 38], [-3.8,38]] # PAC \r\nlegendA = [layer_int for i in range(6)]\r\nylabA = ['GS','K [%]','K [%]', 'GS','K [%]','K [%]']\r\ncmapA = [my_cmap for i in range(6)]\r\nlegend_posA = [None,None,'ur',None,None,None ]\r\nCI = [0.2 for i in range(6)]\r\nxlab = [0,0,0,1,1,1]\r\nRyt = [1,0,1,1,1,1]\r\nplots.semi_log_plot_multi(figsize,rows,cols,dataL,LFs,xlimA,ylabA,titlesA,cmapA,legendA,None,legend_posA,ylimA,True,1,CI,xlab,Ryt) \r\n\r\n## export PDF\r\no82 = directory + '_results\\\\SEEG CF new\\\\SEEG ' + CF_type + ' controlled with ' + PS_metric + ', layer interation.pdf'\r\nplots.semi_log_plot_multi(figsize,rows,cols,dataL,LFs,xlimA,ylabA,titlesA,cmapA,legendA,o82,legend_posA,ylimA,False,1,CI,xlab,Ryt) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n###############################################################################\r\n##### plot local CF \r\n\r\nfigsize = [4.1,2.3] \r\nfigsize = [4.5,2.3] \r\nrows = 2\r\ncols = 2\r\ndataL = [PLV_CFC_local_stats[:1], K_CFC_local_stats[:1], \r\n PLV_CFC_local_stats[1:], K_CFC_local_stats[1:]] \r\nxlimA = [xlims_CFC for i in range(4)]\r\ntitlesA = ['', '', '', '']\r\nif CF_type == 'CFS':\r\n ylimA = [[-0.01,0.13], [-0.01,100], [-0.005,0.044], [-0.01,30]] \r\nelse:\r\n ylimA = [[-0.02,0.2], [-0.01,100], [-0.02,0.2], [-0.01,100]] \r\nlegendA = [ratios[:1],ratios[:1],\r\n ratios[1:],ratios[1:],] \r\nylabA = ['GS','K','GS','K']\r\ncmapA = ['brg','brg',my_cmap3,my_cmap3]\r\nlegend_posA = [None,None,None,None]\r\nCI = [0.2,0.2,0.2,0.2]\r\nxlab = [0,0,1,1]\r\nRyt = [0,0,0,0]\r\nplots.semi_log_plot_multi(figsize,rows,cols,dataL,LFs,xlimA,ylabA,titlesA,cmapA,legendA,None,legend_posA,ylimA,True,1,CI,xlab,Ryt) \r\n\r\n# export PDF\r\no83 = directory + '_results\\\\SEEG CF new\\\\SEEG local ' + CF_type + '.pdf'\r\nplots.semi_log_plot_multi(figsize,rows,cols,dataL,LFs,xlimA,ylabA,titlesA,cmapA,legendA,o83,legend_posA,ylimA,False,1,CI,xlab,Ryt) \r\n\r\n\r\n### plot heatmap\r\ndata = np.transpose(np.array(mean_K_CFC_local))\r\nfigsize_hm = [1.6,1.9]\r\nzmax = 80 \r\nztix = [0,20,40,60,80] \r\nLF_ics = [0,3,6,9,12,15,18,21,24,27,29] \r\nLF_map = ['1.2', '2.4', '3.7', '5.9', '8.6', '13.2', '19.5', '29.5', '47.3', '68.1', '94.5'] \r\nplots.simple_CF_plot(data,figsize_hm,'ratio','LF',np.arange(0.5,5.6,1),LF_ics,ratios,LF_map,zmax=zmax,ztix=ztix,outfile=None) \r\n \r\n# export PDF \r\no93 = directory + '_results\\\\SEEG CF new\\\\SEEG local ' + CF_type + ' heatmap.pdf'\r\nplots.simple_CF_plot(data,figsize_hm,'ratio','LF',np.arange(0.5,5.6,1),LF_ics,ratios,LF_map,zmax=zmax,ztix=ztix,outfile=o93) \r\n\r\n\r\n\r\n\r\n\r\n\r\n###### save plot data as csv files\r\n\r\no41a = [directory + '_results\\\\_plot_data_new\\\\' + CF_type + '\\\\SEEG\\\\SEEG ' + CF_type + ' K ' + r + '.csv' for r in ratios2 ]\r\no41b = [directory + '_results\\\\_plot_data_new\\\\' + CF_type + '\\\\SEEG\\\\SEEG ' + CF_type + ' K_mod using ' + PS_metric + ' ' + r + '.csv' for r in ratios2 ]\r\no41c = [directory + '_results\\\\_plot_data_new\\\\' + CF_type + '\\\\SEEG\\\\SEEG ' + CF_type + ' GS ' + r + '.csv' for r in ratios2 ]\r\no42a = [directory + '_results\\\\_plot_data_new\\\\' + CF_type + '_dist\\\\SEEG\\\\SEEG ' + CF_type + ' 1-2 K, ' + d + ' .csv' for d in dists_short ]\r\no42b = [directory + '_results\\\\_plot_data_new\\\\' + CF_type + '_dist\\\\SEEG\\\\SEEG ' + CF_type + ' 1-2 K_mod using ' + PS_metric + ', ' + d + '.csv' for d in dists_short ]\r\no42c = [directory + '_results\\\\_plot_data_new\\\\' + CF_type + '_dist\\\\SEEG\\\\SEEG ' + CF_type + ' 1-2 GS, ' + d + ' .csv' for d in dists_short ]\r\no43a = [directory + '_results\\\\_plot_data_new\\\\' + CF_type + '_dist\\\\SEEG\\\\SEEG ' + CF_type + ' 1-3 K, ' + d + '.csv' for d in dists_short ]\r\no43b = [directory + '_results\\\\_plot_data_new\\\\' + CF_type + '_dist\\\\SEEG\\\\SEEG ' + CF_type + ' 1-3 K_mod using ' + PS_metric + ', ' + d + '.csv' for d in dists_short ]\r\no43c = [directory + '_results\\\\_plot_data_new\\\\' + CF_type + '_dist\\\\SEEG\\\\SEEG ' + CF_type + ' 1-3 GS, ' + d + '.csv' for d in dists_short ]\r\no44a = [directory + '_results\\\\_plot_data_new\\\\' + CF_type + '_local\\\\SEEG\\\\SEEG local ' + CF_type + ' K ' + r + '.csv' for r in ratios2 ]\r\no44b = [directory + '_results\\\\_plot_data_new\\\\' + CF_type + '_local\\\\SEEG\\\\SEEG local ' + CF_type + ' GS ' + r + '.csv' for r in ratios2 ] \r\no45a = [directory + '_results\\\\_plot_data_new\\\\ENV\\\\SEEG\\\\SEEG ENV K ' + r + '.csv' for r in ratios2 ]\r\no45b = [directory + '_results\\\\_plot_data_new\\\\ENV\\\\SEEG\\\\SEEG ENV GS ' + r + '.csv' for r in ratios2 ]\r\no46a = [directory + '_results\\\\_plot_data_new\\\\' + CF_type + '_layer\\\\SEEG\\\\SEEG ' + CF_type + ' 1-2 K, ' + l + ' .csv' for l in layer_int ]\r\no46b = [directory + '_results\\\\_plot_data_new\\\\' + CF_type + '_layer\\\\SEEG\\\\SEEG ' + CF_type + ' 1-2 K_mod using ' + PS_metric + ', ' + l + '.csv' for l in layer_int ]\r\no46c = [directory + '_results\\\\_plot_data_new\\\\' + CF_type + '_layer\\\\SEEG\\\\SEEG ' + CF_type + ' 1-2 GS, ' + l + ' .csv' for l in layer_int ]\r\no47a = [directory + '_results\\\\_plot_data_new\\\\' + CF_type + '_layer\\\\SEEG\\\\SEEG ' + CF_type + ' 1-3 K, ' + l + '.csv' for l in layer_int ]\r\no47b = [directory + '_results\\\\_plot_data_new\\\\' + CF_type + '_layer\\\\SEEG\\\\SEEG ' + CF_type + ' 1-3 K_mod using ' + PS_metric + ', ' + l + '.csv' for l in layer_int ]\r\no47c = [directory + '_results\\\\_plot_data_new\\\\' + CF_type + '_layer\\\\SEEG\\\\SEEG ' + CF_type + ' 1-3 GS, ' + l + '.csv' for l in layer_int ]\r\nfor r in range(6):\r\n np.savetxt(o41a[r],K_CFC_stats [r][:3],delimiter=';')\r\n np.savetxt(o41b[r],K_CFC_stats_mod [r][:3],delimiter=';')\r\n np.savetxt(o41c[r],PLV_CFC_stats [r][:3],delimiter=';')\r\n for d in range(3):\r\n np.savetxt(o42a[d],K_CFC_dist_12_stats [d][:3],delimiter=';')\r\n np.savetxt(o42b[d],K_CFC_dist_12_stats_mod [d][:3],delimiter=';')\r\n np.savetxt(o42c[d],PLV_CFC_dist_12_stats [d][:3],delimiter=';')\r\n np.savetxt(o43a[d],K_CFC_dist_13_stats [d][:3],delimiter=';')\r\n np.savetxt(o43b[d],K_CFC_dist_13_stats_mod [d][:3],delimiter=';')\r\n np.savetxt(o43c[d],PLV_CFC_dist_13_stats [d][:3],delimiter=';') \r\n np.savetxt(o44a[r],K_CFC_local_stats[r][:3],delimiter=';')\r\n np.savetxt(o44b[r],PLV_CFC_local_stats[r][:3],delimiter=';')\r\n if CF_type == 'PAC':\r\n np.savetxt(o45a[r],K_ENV_stats[r][:3],delimiter=';')\r\n np.savetxt(o45b[r],PLV_ENV_stats[r][:3],delimiter=';')\r\n for l in range(4):\r\n np.savetxt(o46a[l],K_CFC_layer_12_stats [l][:3],delimiter=';')\r\n np.savetxt(o46b[l],K_CFC_layer_12_stats_mod [l][:3],delimiter=';')\r\n np.savetxt(o46c[l],PLV_CFC_layer_12_stats [l][:3],delimiter=';')\r\n np.savetxt(o47a[l],K_CFC_layer_13_stats [l][:3],delimiter=';')\r\n np.savetxt(o47b[l],K_CFC_layer_13_stats_mod [l][:3],delimiter=';')\r\n np.savetxt(o47c[l],PLV_CFC_layer_13_stats [l][:3],delimiter=';')\r\n\r\n\r\n\r\n\r\n\r\n###############################################################################\r\n####### compare long distance vs short dist. with Wilcoxon\r\n\r\nK_CFC_dist_ps = np.full([N_subj,N_LF,N_ratios,N_dist_bins],np.nan)\r\nK_CFC_dist_ps_mod = np.full([N_subj,N_LF,N_ratios,N_dist_bins],np.nan)\r\n \r\n# get CFC values\r\nfor lf,LF in enumerate(LFs): \r\n for hf,HF in enumerate(HFs[lf]):\r\n for s in range(N_subj): \r\n for d in range(N_dist_bins): \r\n PLV_CFC_dist_ps[s,lf,hf,d] = CFC_dist[s][lf][hf][d].mean_masked\r\n K_CFC_dist_ps[s,lf,hf,d] = CFC_dist[s][lf][hf][d].K\r\n K_CFC_dist_ps_mod[s,lf,hf,d] = CFC_dist[s][lf][hf][d].K_mod\r\n \r\nwilc_pm = np.zeros([N_LF,2,3])\r\nwilc_p = np.zeros([N_LF,2,3])\r\nwilc_p_mod = np.zeros([N_LF,2,3])\r\n\r\ncombo1 = [0,0,1]\r\ncombo2 = [1,2,2]\r\n\r\nfor lf,LF in enumerate(LFs): \r\n for rat in range(2):\r\n for co in range(3):\r\n c1 = combo1[co]\r\n c2 = combo2[co]\r\n aaa, wilc_pm[lf,rat,co] = stat.wilcoxon(PLV_CFC_dist_ps [:,lf,rat,c1], PLV_CFC_dist_ps [:,lf,rat,c2])\r\n aaa, wilc_p[lf,rat,co] = stat.wilcoxon(K_CFC_dist_ps [:,lf,rat,c1], K_CFC_dist_ps [:,lf,rat,c2])\r\n aaa, wilc_p_mod[lf,rat,co] = stat.wilcoxon(K_CFC_dist_ps_mod[:,lf,rat,c1], K_CFC_dist_ps_mod[:,lf,rat,c2])\r\n \r\n \r\ns_12_ps = np.reshape( (1.*multicomp.multipletests(np.reshape(wilc_pm [:,0],N_LF*3), method ='fdr_bh')[0]),[N_LF,3])\r\ns_13_ps = np.reshape( (1.*multicomp.multipletests(np.reshape(wilc_pm [:,1],N_LF*3), method ='fdr_bh')[0]),[N_LF,3])\r\ns_12 = np.reshape( (1.*multicomp.multipletests(np.reshape(wilc_p [:,0],N_LF*3), method ='fdr_bh')[0]),[N_LF,3])\r\ns_13 = np.reshape( (1.*multicomp.multipletests(np.reshape(wilc_p [:,1],N_LF*3), method ='fdr_bh')[0]),[N_LF,3])\r\ns_12_mod = np.reshape( (1.*multicomp.multipletests(np.reshape(wilc_p_mod[:,0],N_LF*3), method ='fdr_bh')[0]),[N_LF,3])\r\ns_13_mod = np.reshape( (1.*multicomp.multipletests(np.reshape(wilc_p_mod[:,1],N_LF*3), method ='fdr_bh')[0]),[N_LF,3])\r\n\r\ndists_short = ['short', 'mid', 'long']\r\n\r\n### plot long vs short bin results\r\nfor co in range(3):\r\n combo_str = dists_short[combo1[co]] + '-' + dists_short[combo2[co]] \r\n dataA = [[s_12_ps[:,co]],[s_12[:,co]],[s_12_mod[:,co]],[s_13_ps[:,co]],[s_13[:,co]],[s_13_mod[:,co]]]\r\n cmapA = ['brg','brg','brg','brg','brg','brg']\r\n xlimA = [xlims_CFC for i in range(6)]\r\n ylabA = ['','','','','','','']\r\n ylimA = [1,1,1,1,1,1] \r\n titlesA = ['1-2','1-2','1-2 c','1-3','1-3','1-3 c']\r\n plots.semi_log_plot_multi([7.7,3],2,3,dataA,LFs,xlimA,ylabA,titlesA,cmapA,None,None,None,None,True,1,None,None,None,'auto',8,3) \r\n \r\n # save pdf\r\n o85 = directory + '_results\\\\SEEG CF new\\\\SEEG ' + CF_type + ', controlled with ' + PS_metric + ', distance comparison ' + combo_str + '.pdf'\r\n plots.semi_log_plot_multi([7.7,3],2,3,dataA,LFs,xlimA,ylabA,titlesA,cmapA,None,o85,None,ylimA,True,1,None,None,None,'auto',8,3) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n###############################################################################\r\n######## compare superficial vs. deep layer int with Wilcoxon\r\n\r\nK_CFC_layer_ps = np.full([N_subj,N_LF,N_ratios,N_layer_int],np.nan)\r\nK_CFC_layer_ps_mod = np.full([N_subj,N_LF,N_ratios,N_layer_int],np.nan)\r\n\r\n# get CFC values\r\nfor lf,LF in enumerate(LFs): \r\n for hf,HF in enumerate(HFs[lf]):\r\n for s in range(N_subj): \r\n for d in range(N_layer_int): \r\n PLV_CFC_layer_ps[s,lf,hf,d] = CFC_layer[s][lf][hf][d].mean_masked\r\n K_CFC_layer_ps[s,lf,hf,d] = CFC_layer[s][lf][hf][d].K\r\n K_CFC_layer_ps_mod[s,lf,hf,d] = CFC_layer[s][lf][hf][d].K_mod\r\n # K_CFC_layer_ps_excl[s,lf,hf,d] = CFC_layer[s][lf][hf][d].K_excl \r\n \r\nwilc_pps = np.zeros([N_LF,2,6])\r\nwilc_p = np.zeros([N_LF,2,6])\r\nwilc_p_mod = np.zeros([N_LF,2,6])\r\n\r\ncombo1 = [0,0,0,1,1,2]\r\ncombo2 = [1,2,3,2,3,3]\r\n\r\nfor lf,LF in enumerate(LFs): \r\n for rat in range(2):\r\n for co in range(6):\r\n c1 = combo1[co]\r\n c2 = combo2[co] \r\n aaa, wilc_pps [lf,rat,co] = stat.wilcoxon(PLV_CFC_layer_ps [:,lf,rat,c1], PLV_CFC_layer_ps [:,lf,rat,c2])\r\n aaa, wilc_p [lf,rat,co] = stat.wilcoxon(K_CFC_layer_ps [:,lf,rat,c1], K_CFC_layer_ps [:,lf,rat,c2])\r\n aaa, wilc_p_mod[lf,rat,co] = stat.wilcoxon(K_CFC_layer_ps_mod[:,lf,rat,c1], K_CFC_layer_ps_mod[:,lf,rat,c2])\r\n \r\n\r\ns_12_ps = np.reshape( (1.*multicomp.multipletests(np.reshape(wilc_pps [:,0],N_LF*6), method ='fdr_bh')[0]),[N_LF,6])\r\ns_13_ps = np.reshape( (1.*multicomp.multipletests(np.reshape(wilc_pps [:,1],N_LF*6), method ='fdr_bh')[0]),[N_LF,6])\r\ns_12 = np.reshape( (1.*multicomp.multipletests(np.reshape(wilc_p [:,0],N_LF*6), method ='fdr_bh')[0]),[N_LF,6])\r\ns_13 = np.reshape( (1.*multicomp.multipletests(np.reshape(wilc_p [:,1],N_LF*6), method ='fdr_bh')[0]),[N_LF,6])\r\ns_12_mod = np.reshape( (1.*multicomp.multipletests(np.reshape(wilc_p_mod[:,0],N_LF*6), method ='fdr_bh')[0]),[N_LF,6])\r\ns_13_mod = np.reshape( (1.*multicomp.multipletests(np.reshape(wilc_p_mod[:,1],N_LF*6), method ='fdr_bh')[0]),[N_LF,6])\r\n\r\n\r\n\r\ns_12_ps[s_12_ps==0]=np.nan\r\ns_12[s_12==0]=np.nan\r\ns_12_mod[s_12_mod==0]=0.01\r\ns_13_ps[s_13_ps==0]=0.01\r\ns_13[s_13==0]=0.01\r\ns_13_mod[s_13_mod==0]=0.01\r\n\r\nlay_sh = ['SS','DD','SD','DS']\r\n\r\n\r\nfor co in range(6):\r\n dataA = [[s_12_ps[:,co]],[s_12[:,co]],[s_12_mod[:,co]],[s_13_ps[:,co]],[s_13[:,co]],[s_13_mod[:,co]]]\r\n cmapA = ['brg','brg','brg','brg','brg','brg']\r\n xlimA = [xlims_CFC for i in range(6)]\r\n ylimA = [[0,1.1] for i in range(6)]\r\n ylabA = ['','','','','','']\r\n ylabA = ['','','','','','']\r\n titlesA = ['1-2','1-2','1-2 c','1-3','1-3','1-3 c']\r\n plots.semi_log_plot_multi([7.7,3],2,3,dataA,LFs,xlimA,ylabA,titlesA,cmapA,None,None,None,ylimA,True,1,None,None,None,'auto',8,3) \r\n\r\n\r\n combo_str_l = lay_sh[combo1[co]] + '-' + lay_sh[combo2[co]]\r\n\r\n ## save pdf\r\n o86 = directory + '_results\\\\SEEG CF new\\\\SEEG ' + CF_type + ', controlled with ' + PS_metric + ', layer comparison ' + combo_str_l + '.pdf'\r\n plots.semi_log_plot_multi([7.7,3],2,3,dataA,LFs,xlimA,ylabA,titlesA,cmapA,None,o86,None,ylimA,True,1,None,None,None,'auto',8,3) \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n###############################################################################\r\n######## initialize morphing ops and networks, get degrees and strengths \r\n\r\n\r\nfile_in1 = directory + '_results\\\\_pickle dump\\\\SEEG ' + PS_metric + '.dat'\r\nfile_in2 = directory + '_results\\\\_pickle dump\\\\SEEG ' + CF_type + ' corrected with ' + PS_metric + '.dat' \r\nfile_in3 = directory + '_results\\\\_pickle dump\\\\SEEG ' + CF_type + ' by layer interaction corrected with ' + PS_metric + '.dat'\r\nPS,PS_dist = pick.load(open(file_in1,'rb'))\r\nCFC,CFC_dist = pick.load(open(file_in2,'rb'))\r\nPS_layer,CFC_layer = pick.load(open(file_in3,'rb'))\r\n\r\n### initialize networks \r\nnetwork_names = ['C','DM','DA','Lim','VA','SM','Vis']\r\nN_network = 7\r\n\r\nfile_networks = 'M:\\\\SEEG_Morlet\\\\_RAW_line_filtered\\\\_support_files\\\\networks parc2009.csv'\r\nnetwork_indices = np.array(np.genfromtxt(file_networks, delimiter=';'),'int')\r\n\r\n\r\nnetworks = [np.where(network_indices==i)[0] for i in range(7)] \r\n\r\n\r\n\r\n### do edge counting \r\nN, edges = cffun.edge_counting(directory,subjects,ch_per_subject,freqs[:37],LFs,HFs,PS,CFC,CFC_dist,CFC_layer,'parc2009',channel_layers)\r\n\r\n### do edge counting in layers\r\n\r\nN_layer = [[] for i in range(4)]\r\nedges_layer = [[] for i in range(4)]\r\nCFC_dummy = [[[ None for i in range(N_ratios)] for j in range(N_LF)] for k in range(N_subj)]\r\n\r\n\r\n\r\nreload(cffun)\r\nfor l in range(4): \r\n for c1 in range(58):\r\n for c2 in range(30):\r\n for c3 in range(6):\r\n CFC_dummy[c1][c2][c3] = CFC_layer[c1][c2][c3][l] \r\n N_layer[l], edges_layer[l] = cffun.edge_counting(directory,subjects_red,ch_per_subject,freqs[:37],LFs,HFs,PS,CFC_dummy,'parc2009',channel_layers)\r\n print l\r\n\r\n# save layer edges to pickle dump\r\nfileout37 = directory + '_results\\\\_pickle dump\\\\Edges in layers ' + CF_type + ', ' + PS_metric + ', ' + parc + ', ' + time.strftime(\"%Y-%m-%d\") + '.dat' # save with pickle\r\npick.dump([N_layer,edges_layer],open(fileout37,'wb'))\r\n\r\n\r\nfor l in range(4):\r\n D = cffun.degree_analysis(edges_layer[l],N_layer[l],networks)\r\n cffun.write_degrees(directory,D,ratios2,'parc2009',CF_type, add_inf=layer_int) \r\n\r\n\r\n### analyze local CF and write to .csv\r\nN, edges = cffun.analyze_local(N,edges,networks,N_ch_layer)\r\ncffun.write_csv_local(directory,edges,ratios2,parc,CF_type, add_inf='')\r\n\r\n### analyze PS per parcel and network ####################\r\nN, edges = cffun.analyze_PS(N,edges,networks,N_ch_layer)\r\n\r\n### save edges to pickle dump\r\nfileout24 = directory + '_results\\\\_pickle dump\\\\Edges ' + CF_type + ', ' + PS_metric + ', ' + parc + ', ' + time.strftime(\"%Y-%m-%d\") + '.dat' # save with pickle\r\npick.dump([N,edges],open(fileout24,'wb'))\r\n\r\n\r\n### load edges\r\nif CF_type == 'CFS':\r\n filein24 = 'M:\\\\SEEG_Morlet\\\\_RAW_line_filtered\\\\_results\\\\_pickle dump\\\\Edges CFC, wPLI, parc2009.dat'\r\n\r\nelse: \r\n filein24 = 'M:\\\\SEEG_Morlet\\\\_RAW_line_filtered\\\\_results\\\\_pickle dump\\\\Edges PAC, wPLI, parc2009.dat'\r\n\r\n[N,edges] = pick.load(open(filein24,'rb'))\r\n\r\n\r\n\r\nxlimA = [xlims_CFC for i in range(6)]\r\n\r\n\r\n\r\n######## plot local CFC per network \r\n\r\ndataL = [edges.mean_PLV_local_pn[:,:,0],edges.K_local_pn [:,:,0],edges.mean_PLV_local_pn[:,:,1],edges.K_local_pn[:,:,1]]\r\ntitlesA = ['local str 1:2','local K [%] 1:2','local str 1:3','local K [%] 1:3' ]\r\nylabA = ['Str','K [%]','GS','K [%]']\r\ncmapA = [my_cmap2 for i in dataL]\r\nlegendA = [network_names for i in dataL]\r\nfigsize = [7,4]\r\nplots.semi_log_plot_multi(figsize,2,2,dataL,LFs,xlimA,ylabA,titlesA,cmapA,legendA,None,['ur',None,None,None],None,1,1,None,None,Nyt=[1,1,1,1],'auto',fontsize=8,markersize=0)\r\n\r\no15 = directory + '_results\\\\results by system\\\\Local ' + CF_type + ' by system.pdf'\r\nplots.semi_log_plot_multi(figsize,2,2,dataL,LFs,xlimA,ylabA,titlesA,cmapA,legendA,o15,['ur',None,None,None],None,1,1,None,None,Nyt=[1,1,1,1],'auto',fontsize=8,markersize=0)\r\n\r\n\r\n\r\n\r\n##################################################################\r\n#### get regular and relative degrees, in-degrees and out-degrees\r\n\r\nD = cffun.degree_analysis(edges,N,networks)\r\ncffun.write_degrees(directory,D,ratios2,'parc2009',CF_type, add_inf='') \r\n\r\n\r\n\r\n\r\n\r\n\r\n###############################################################################\r\n########## low-high analysis \r\n\r\n### run low-high analysis with Wilcoxon and permutation tests\r\nalpha = 0.05\r\nN_perm = 1000\r\nN_rat = 6\r\nN, lh = cffun.low_to_high_analysis(edges,N,LFs,HFs,alpha,N_perm,parc,directory,networks,N_rat=N_rat) \r\nN_min = 8\r\nlh_thr = cffun.low_to_high_threshold(lh,N,N_min,networks) # apply N_min threshold \r\nlh = lh_thr\r\n \r\n### save results with pickle\r\nfileout4 = directory + '_results\\\\_pickle dump\\\\Low-high ' + CF_type + ', ' + PS_metric + ', ' + parc + ' ' + time.strftime(\"%Y-%m-%d\") + '.dat' # save with pickle\r\npick.dump(lh,open(fileout4,'wb'))\r\n\r\n### load results\r\nfilein4 = directory + '_results\\\\_pickle dump\\\\Low-high ' + CF_type + ', ' + PS_metric + ', parc2009, N_rat=2.dat'\r\nlh = pick.load(open(filein4,'rb'))\r\n\r\n\r\n \r\n### write in&out values csv\r\ncffun.write_csv_low_to_high(directory,lh_thr,ratios2,parc, CF_type,add_inf=' corr')\r\n\r\n### plot out-in\r\nplots.semi_log_plot([10,5],lh.out_minus_in_degree_pn[:,:,0],LFs, [1,50], 'degree', network_names,None,'ur',None,True,cmap=my_cmap4,ncols=2,CI=False) \r\nplots.semi_log_plot([10,5],lh.out_minus_in_degree_pn[:,:,1],LFs, [1,50], 'degree', network_names,None,'ur',None,True,cmap=my_cmap4,ncols=2,CI=False) \r\n\r\n\r\n### plot results of difference tests: Wilc and perm.\r\ndataL = [[lh.K_LTH_wilc[:,0]],[lh.K_LTH_wilc[:24,1]],[lh.K_LH[:,0]],[lh.K_LH[:24,1]]] \r\nylimA = [[0,0.3] for i in range(4)]\r\nplots.semi_log_plot_multi([7,4],2,2,dataL,LFs,[[0,50] for i in range(4)],['K','K','K','K'],['wilc 1:2','wilc 1:3','perm 1:2','perm 1:3'],['brg','brg','brg','brg'],ylimA=ylimA,show=True,xlab=[0,0,1,1],fontsize=12)\r\n\r\n\r\n\r\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_95","text":"# importing packages\nimport math\nimport numpy as np\nimport pandas as pd\nimport scipy.stats as stats\nimport sklearn\nimport imblearn\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nplt.style.use('ggplot')\n\nfrom imblearn.over_sampling import RandomOverSampler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import LinearSVC\nimport xgboost as xgb \nfrom sklearn import metrics\nfrom imblearn.over_sampling import SMOTE\nfrom imblearn.over_sampling import ADASYN\nimport math\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.utils import shuffle\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.calibration import CalibratedClassifierCV\nfrom collections import Counter\n\nclass modelpipeline:\n def __init__(self):\n pass\n \n def run_model(self, df, varlist, response, standardize, sampletype, modelname, text, n_fold):\n # Remove any features not wanted based on varlist input and re-order based on varlist\n df = df[varlist]\n # We have to remove response from varlist - varlist_noresponse - as it is used later to subset out features\n # Refer to the for loop for the cross validation where X_train and X_test is created at the end of loop\n varlist_noresponse = []\n for col in varlist:\n if col != response:\n varlist_noresponse.append(col)\n \n \n if isinstance(n_fold, int) and n_fold > 1:\n # Initialize dictionary to store results\n self.store = {\"accuracy\": [], \"actual_accuracy\": [], \"sensitivity\": [], \"specificity\": [], \n \"precision\": [], \"f1\": [], \"auc\": [], \"pr_auc\": [], \"final\": {}}\n \n # Split dataframes into 2, one for positive response and one for negative\n df_zero = df[df[response] == 0]\n df_one = df[df[response] == 1]\n \n # Shuffle dataframe for response=0 and =1 so that train-test will not be biased in case rows that are similar are placed side by side\n # Later on, we will reset the index and select by the index number by sections\n df_zero = shuffle(df_zero, random_state=42)\n df_one = shuffle(df_one, random_state=42)\n df_zero = df_zero.reset_index(drop=True)\n df_one = df_one.reset_index(drop=True)\n \n # Get the average number of records required for negative response and positive response for test records\n # Train records will then have all the other records not in the test records\n # n_fold is the number of folds for cross validation\n start_index_one = 0\n end_index_one = math.floor(df_one.shape[0]\/n_fold)\n start_index_zero = 0\n end_index_zero = math.floor(df_zero.shape[0]\/n_fold)\n \n for i in range(1,n_fold+1):\n if i != n_fold:\n print('Getting TEST DF for response 1 from index ' + str(start_index_one) + ' to ' + str(end_index_one))\n df_one_test = df_one.iloc[start_index_one:end_index_one]\n print('Getting TRAIN DF for response 1 from index 0 to ' + str(start_index_one) + ' and from index ' + str(end_index_one) + ' to ' + str(df_one.shape[0]))\n df_one_train = pd.concat([df_one.iloc[0:start_index_one],df_one.iloc[end_index_one:]], axis=0)\n start_index_one += math.floor(df_one.shape[0]\/n_fold)\n end_index_one += math.floor(df_one.shape[0]\/n_fold)\n \n print('Getting TEST DF for response 0 from index ' + str(start_index_zero) + ' to ' + str(end_index_zero))\n df_zero_test = df_zero.iloc[start_index_zero:end_index_zero]\n print('Getting TRAIN DF for response 0 from index 0 to ' + str(start_index_zero) + ' and from index ' + str(end_index_zero) + ' to ' + str(df_zero.shape[0]))\n df_zero_train = pd.concat([df_zero.iloc[0:start_index_zero],df_zero.iloc[end_index_zero:]], axis=0)\n start_index_zero += math.floor(df_zero.shape[0]\/n_fold)\n end_index_zero += math.floor(df_zero.shape[0]\/n_fold)\n\n else:\n # Last section of split needs to reach the end of dataset\n print('Getting TEST DF for response 1 from index ' + str(start_index_one) + ' to ' + str(df_one.shape[0]))\n df_one_test = df_one.iloc[start_index_one:df_one.shape[0]]\n print('Getting TRAIN DF for response 1 from index 0 to ' + str(start_index_one))\n df_one_train = df_one.iloc[0:start_index_one]\n \n # Last section of split needs to reach the end of dataset\n print('Getting TEST DF for response 0 from index ' + str(start_index_zero) + ' to ' + str(df_zero.shape[0]))\n df_zero_test = df_zero.iloc[start_index_zero:df_zero.shape[0]]\n print('Getting TRAIN DF for response 0 from index 0 to ' + str(start_index_zero))\n df_zero_train = df_zero.iloc[0:start_index_zero]\n \n # Combine the subsetted sections for negatives and postives for both train and test before oversampling \n df_train = pd.concat([df_one_train, df_zero_train], axis=0)\n df_test = pd.concat([df_one_test, df_zero_test], axis=0)\n # varlist_noresponse has the feature list X without Y while response is the Y\n # print(varlist_noresponse)\n X_train = df_train[varlist_noresponse]\n # print('Check X train vars after combining pds')\n # print(X_train.columns.values)\n y_train = df_train[response]\n X_test = df_test[varlist_noresponse]\n y_test = df_test[response]\n \n if standardize == True:\n scaling = MinMaxScaler(feature_range=(-1,1)).fit(X_train)\n X_train = scaling.transform(X_train)\n X_test = scaling.transform(X_test)\n X_train = pd.DataFrame(X_train, columns=varlist_noresponse)\n X_test = pd.DataFrame(X_test, columns=varlist_noresponse)\n\n if sampletype == 'smote':\n X_train, X_test, y_train, y_test = sampling.smote_oversample(X_train, X_test, y_train, y_test, response)\n elif sampletype == 'adasyn':\n X_train, X_test, y_train, y_test = sampling.adasyn_oversample(X_train, X_test, y_train, y_test, response)\n elif sampletype == 'naive':\n X_train, X_test, y_train, y_test = sampling.naive_oversample(X_train, X_test, y_train, y_test, response)\n else:\n # Convert all DF to numpy array for model building later\n X_train = X_train.values\n y_train = y_train.values\n X_test = X_test.values\n y_test = y_test.values\n \n # Build model in current fold\/iteration and get accuracy, sensitivity, specificity, precision, f1, auc\n self.store = self.build_model(X_train, X_test, y_train, y_test, text, modelname, i, n_fold, self.store)\n \n # test model with all actual fraud results\n if standardize == True:\n df_acc = pd.concat([pd.DataFrame(scaling.transform(df[varlist_noresponse]),columns=varlist_noresponse),df[response]],axis=1)\n # print(df)\n self.store['actual_accuracy'].append(evaluate.actual_acc(df_acc, self.store['model'], response))\n else:\n self.store['actual_accuracy'].append(evaluate.actual_acc(df, self.store['model'], response))\n \n # Before results are returned, get average of all evaluation metrics and store in store['final'] section\n self.store['final']['accuracy'] = self.avg(self.store['accuracy'])\n self.store['final']['sensitivity'] = self.avg(self.store['sensitivity'])\n self.store['final']['specificity'] = self.avg(self.store['specificity'])\n self.store['final']['precision'] = self.avg(self.store['precision'])\n self.store['final']['f1'] = self.avg(self.store['f1'])\n self.store['final']['auc'] = self.avg(self.store['auc'])\n self.store['final']['pr_auc'] = self.avg(self.store['pr_auc'])\n self.store['final']['actual_accuracy'] = self.avg(self.store['actual_accuracy'])\n \n print('Final Results of ' + str(n_fold) + ' fold CV:')\n print(self.store['final'])\n return self.store\n \n else:\n print('n fold must be an integer greater than 1')\n return self.store\n \n def build_model(self, X_train, X_test, y_train, y_test, text, modelname, i, n_fold, store):\n if modelname == 'LogisticRegression':\n model = LogisticRegression(max_iter=300, C=0.8, solver='liblinear')\n model.fit(X_train,y_train)\n elif modelname == 'XGBoost':\n model = xgb.XGBClassifier(seed=42, nthread=1, max_depth=math.ceil(math.sqrt(X_train.shape[1])),\n n_estimators=100, random_state=42)\n model.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_test, y_test)], verbose=5)\n elif modelname == 'XGBoostminus1':\n # XGBoost with one less depth\n model = xgb.XGBClassifier(seed=42, nthread=1, max_depth=math.ceil(math.sqrt(X_train.shape[1])-1),\n n_estimators=100, random_state=42)\n model.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_test, y_test)], verbose=5)\n elif modelname == 'XGBoostplus1':\n # XGBoost with one more depth\n model = xgb.XGBClassifier(seed=42, nthread=1, max_depth=math.ceil(math.sqrt(X_train.shape[1]))+1,\n n_estimators=100, random_state=42)\n model.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_test, y_test)], verbose=5)\n elif modelname == 'XGBoostplus3':\n # XGBoost with 3 more depth\n model = xgb.XGBClassifier(seed=42, nthread=1, max_depth=math.ceil(math.sqrt(X_train.shape[1]))+3,\n n_estimators=100, random_state=42)\n model.fit(X_train, y_train, eval_set=[(X_train, y_train), (X_test, y_test)], verbose=5)\n \n # Use Linear SVC instead of sklearn svm.SVC as the former as way faster processing speed\n # However, LinearSVC does not have .predict_proba function to get probability of response 1\n # Hence, we need to use CalibratedClassifier that provides .predict_proba functionality\n # On the bad side, it has it's own CV, so we put 10 fold CV to minimize the dataset loss due to train-test fold\n # Ideally, we should use the older pipeline code that does not do customized k-fold CV (refer to SCI16. Jupyter Notebook)\n elif modelname == 'SVM_Linear':\n model = LinearSVC(random_state=42) # default C=1 (regularization parameter)\n model = CalibratedClassifierCV(model, cv=10)\n model.fit(X_train,y_train)\n elif modelname == 'SVM_Linear2':\n model = LinearSVC(random_state=42, C=2)\n model = CalibratedClassifierCV(model, cv=10)\n model.fit(X_train,y_train)\n elif modelname == 'SVM_Linear0.5':\n model = LinearSVC(random_state=42, C=0.5)\n model = CalibratedClassifierCV(model, cv=10)\n model.fit(X_train,y_train)\n elif modelname == 'SVM_Linear0.3':\n model = LinearSVC(random_state=42, C=0.3)\n model = CalibratedClassifierCV(model, cv=10)\n model.fit(X_train,y_train)\n elif modelname == 'RandomForest':\n treedepth = math.ceil(math.sqrt(X_train.shape[1]))\n model = RandomForestClassifier(random_state=42, max_depth=treedepth, n_estimators=100)\n model.fit(X_train,y_train)\n elif modelname == 'RandomForestminus1':\n treedepth = math.ceil(math.sqrt(X_train.shape[1]))-1\n model = RandomForestClassifier(random_state=42, max_depth=treedepth, n_estimators=100)\n model.fit(X_train,y_train)\n elif modelname == 'RandomForestminus2':\n treedepth = math.ceil(math.sqrt(X_train.shape[1]))-2\n model = RandomForestClassifier(random_state=42, max_depth=treedepth, n_estimators=100)\n model.fit(X_train,y_train)\n elif modelname == 'RandomForestplus2':\n treedepth = math.ceil(math.sqrt(X_train.shape[1]))+2\n model = RandomForestClassifier(random_state=42, max_depth=treedepth, n_estimators=100)\n model.fit(X_train,y_train)\n elif modelname == 'RandomForestplus4':\n treedepth = math.ceil(math.sqrt(X_train.shape[1]))+4\n model = RandomForestClassifier(random_state=42, max_depth=treedepth, n_estimators=100)\n model.fit(X_train,y_train)\n else:\n # Parameters based on gridsearchcv of modelname = logistic regresion\n # Leave parameter blank for modelname to run this instance of logistic regression\n model = LogisticRegression(C=0.8, max_iter=300, solver='liblinear')\n model.fit(X_train,y_train)\n \n y_predict = model.predict(X_test)\n y_predictprob = model.predict_proba(X_test)[:, 1]\n store = evaluate.model_results(y_test, y_predict, y_predictprob, text, store, i, n_fold)\n \n # Store model for usage in measuring actual accuracy of fraud cases\n store['model'] = model\n print(\"Iteration \" + str(i) + \" out of \" + str(n_fold) + \" of CV for model fitting and obtaining results is complete!\")\n print(\"\\n\")\n return store\n \n \n def avg(self, array):\n return sum(array) \/ len(array)\n\nclass sampling:\n def __init__(self):\n pass\n @staticmethod\n def naive_oversample(X_train, X_test, y_train, y_test, response):\n ros = RandomOverSampler(random_state=42)\n X_train, y_train = ros.fit_resample(X_train, y_train)\n # train test split keeps X_test and y_test as pd series, oversampler converts X_train, y_train to numpy\n # Convert all to numpy array for XGBoost to not have bugs\n X_test = X_test.values\n y_test = y_test.values\n print(\"Oversampling is complete!\")\n return X_train, X_test, y_train, y_test\n \n @staticmethod\n def smote_oversample(X_train, X_test, y_train, y_test, response):\n X_train, y_train = SMOTE().fit_resample(X_train, y_train)\n # train test split keeps X_test and y_test as pd series, oversampler converts X_train, y_train to numpy\n # Convert all to numpy array for XGBoost to not have bugs\n X_test = X_test.values\n y_test = y_test.values\n print(\"Number of Xs and Ys for SMOTE:\")\n print(sorted(Counter(y_train).items()))\n print(\"Oversampling is complete!\")\n return X_train, X_test, y_train, y_test\n \n @staticmethod\n def adasyn_oversample(X_train, X_test, y_train, y_test, response):\n X_train, y_train = ADASYN().fit_resample(X_train, y_train)\n # train test split keeps X_test and y_test as pd series, oversampler converts X_train, y_train to numpy\n # Convert all to numpy array for XGBoost to not have bugs\n X_test = X_test.values\n y_test = y_test.values\n print(\"Number of Xs and Ys for ADASYN:\")\n print(sorted(Counter(y_train).items()))\n print(\"Oversampling is complete!\")\n return X_train, X_test, y_train, y_test\n\n\n\nclass evaluate:\n def __init__(self):\n pass\n \n @staticmethod\n def model_results(y_test, y_predict, y_predictprob, text, store, i, n_fold):\n cm = metrics.confusion_matrix(y_test, y_predict)\n print(cm)\n RFC_CM = pd.DataFrame(cm, ['Actual 0', 'Actual 1'], ['Predict 0', 'Predict 1'])\n sns.heatmap(RFC_CM, annot=True, annot_kws={\"size\": 16}, cmap='Greens', linewidths=1, fmt='g')# font size\n sns.set(font_scale=1.4)#for label size\n plt.title(\"Confusion Matrix for \" + text)\n\n # fix for mpl bug that cuts off top\/bottom of seaborn viz\n b, t = plt.ylim() \n b += 0.5 \n t -= 0.5 \n plt.ylim(b, t) \n plt.figure(1,figsize=(4,4))\n plt.show() \n\n accuracy = metrics.accuracy_score(y_test, y_predict)\n # print('Accuracy: ' + str(accuracy))\n sensitivity = cm[1][1] \/ (cm[1][1] + cm[1][0])\n recall = sensitivity\n # print('Sensitivity: ' + str(sensitivity))\n specificity = cm[0][0] \/ (cm[0][0] + cm[0][1])\n # print('Specificity: ' + str(specificity))\n precision = cm[1][1] \/ (cm[1][1] + cm[0][1])\n # print('Precision: ' + str(precision))\n f1 = 2 * (recall * precision)\/(recall + precision)\n # print('f1 score: ' + str(f1))\n auc, pr_auc = evaluate.ROC(y_test, y_predictprob, text, i, n_fold)\n \n store['accuracy'].append(accuracy)\n store['sensitivity'].append(sensitivity)\n store['specificity'].append(specificity)\n store['precision'].append(precision)\n store['f1'].append(f1)\n store['auc'].append(auc)\n store['pr_auc'].append(pr_auc)\n\n return store\n \n# @staticmethod\n# def ROC(y_test, y_predictprob, text):\n# # IMPORTANT: first argument is true values, second argument is predicted probabilities\n# auc = metrics.roc_auc_score(y_test, y_predictprob)\n# # print(\"AUC value is: \" + str(auc))\n# fpr, tpr, thresholds = metrics.roc_curve(y_test, y_predictprob)\n# # print(\"AUC value is also: \" + str(metrics.auc(fpr, tpr)))\n# plt.plot(fpr, tpr)\n# plt.xlim([0.0, 1.0])\n# plt.ylim([0.0, 1.0])\n# plt.title('ROC curve for ' + text)\n# plt.xlabel('False Positive Rate (1 - Specificity)')\n# plt.ylabel('True Positive Rate (Sensitivity)')\n# plt.grid(True)\n# return auc\n\n @staticmethod\n def ROC(y_test, y_predictprob, text, i, n_fold):\n # IMPORTANT: first argument is true values, second argument is predicted probabilities\n auc = metrics.roc_auc_score(y_test, y_predictprob)\n # print(\"AUC value is: \" + str(auc))\n print(\"AUC value is: \" + str(auc))\n fpr, tpr, thresholds = metrics.roc_curve(y_test, y_predictprob)\n # print(\"AUC value is also: \" + str(metrics.auc(fpr, tpr)))\n # Calculate precision and recall for each threshold\n precision, recall, _ = metrics.precision_recall_curve(y_test, y_predictprob)\n pr_auc = metrics.auc(recall, precision)\n # Only show ROC-AUC graph and PR-AUC graph on last iteration as they look very similar\n # The full results can be obtained in the results section\n if n_fold == i:\n fullgraph = plt.figure(1,figsize=(10,20))\n plt.style.use('ggplot')\n ROCAUC_plot = fullgraph.add_subplot(211)\n ROCAUC_plot.plot(fpr, tpr, color='blue')\n ROCAUC_plot.set_title('ROC curve for ' + text)\n ROCAUC_plot.set_xlabel('False Positive Rate (1 - Specificity)')\n ROCAUC_plot.set_ylabel('True Positive Rate (Sensitivity)')\n ROCAUC_plot.set_xlim([0.0, 1.0])\n ROCAUC_plot.set_ylim([0.0, 1.0])\n ROCAUC_plot.grid(True)\n PRAUC_plot = fullgraph.add_subplot(212)\n PRAUC_plot.plot(precision, recall, color='purple')\n PRAUC_plot.set_title('Precision-Recall curve for ' + text)\n PRAUC_plot.set_xlabel('Recall')\n PRAUC_plot.set_ylabel('Precision')\n PRAUC_plot.set_xlim([0.0, 1.0])\n PRAUC_plot.set_ylim([0.0, 1.0])\n PRAUC_plot.grid(True)\n return auc, pr_auc\n\n @staticmethod\n def actual_acc(df, model, response):\n allpositive = df[df[response] == 1]\n x_positive = allpositive.drop([response], axis=1)\n y_positive = allpositive[response]\n # Convert to numpy array due to XGBoost model.predict not working well for pandas\n x_positive = x_positive.values\n y_positive = y_positive.values\n y_pospredict = model.predict(x_positive)\n accuracy_positive = metrics.accuracy_score(y_positive, y_pospredict)\n # print(\"Accuracy with all fraud results is \" + str(accuracy_positive * 100) + \"%\")\n return accuracy_positive"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_496","text":"import numpy as np\n\nimport torch\n\nfrom scipy.stats import wasserstein_distance\n\nfrom generate.metrics import ca_metrics, amino_acid_metrics, secondary_sequence_metrics\n\nclass MMD:\n \"\"\"\n Compute the Maximum Mean Discrepancy (MMD) between the predicted graph and a set of target ones.\n\n Graph statistics include amino acid sequence information and secondary structure sequence information.\n\n Source\n ------\n https:\/\/torchdrift.org\/notebooks\/note_on_mmd.html\n\n Attributes\n ----------\n pred_graph : (torch.Tensor, torch.Tensor)\n The predicted graph consisting of the predicted node features and the predicted distance matrix.\n target_graphs : list of (torch.Tensor, torch.Tensor)\n The target graphs, where each entry consists of the node features and the distance matrix.\n median_subset : int\n The size of the subset to use to compute `sigma`.\n \"\"\"\n\n def __init__(self, pred_graph, target_graphs, median_subset=100):\n \"\"\"\n Initialize the class.\n\n Parameters\n ----------\n pred_graph : (torch.Tensor, torch.Tensor)\n The predicted graph consisting of the predicted node features and the predicted distance matrix.\n target_graphs : list of (torch.Tensor, torch.Tensor)\n The target graphs, where each entry consists of the node features and the distance matrix.\n median_subset : int, optional\n The size of the subset to use to compute `sigma`. The default is 100.\n \"\"\"\n self.pred_graph = pred_graph\n self.target_graphs = target_graphs\n self.median_subset = median_subset\n\n def _wasserstein_kernel(self, x, y, sigma):\n x_flat, y_flat = x.detach().view(-1), y.detach().view(-1)\n return torch.exp(wasserstein_distance(x_flat, y_flat)\/2*(sigma**2))\n\n def _mmd(self, x, y):\n # Get number of samples\n n, m = x.shape[0], y.shape[0]\n # Compute sigma\n dists = torch.pdist(torch.cat([x.detach(), y.detach()], dim=0)[:,None])\n sigma = dists[:self.median_subset].median()\/2\n # Compute the mmd\n xx, yy, xy = 0, 0, 0\n for i in range(n):\n for j in range(n):\n xx += self._wasserstein_kernel(x[i], x[j], sigma)\n for i in range(m):\n for j in range(m):\n yy += self._wasserstein_kernel(y[i], y[j], sigma)\n for i in range(n):\n for j in range(m):\n xy += self._wasserstein_kernel(x[i], y[j], sigma)\n return xx + yy - 2*xy\n\n def compare_graphs(self):\n \"\"\"\n Compare the graphs.\n\n Returns\n -------\n scores : numpy.ndarray\n The comparison scores.\n \"\"\"\n scores = []\n # Compute MMD score of the predicted graph\n x_aa = amino_acid_metrics(self.pred_graph[0])\n x_ss = secondary_sequence_metrics(self.pred_graph[0])\n x = torch.cat((x_aa,x_ss))\n for target_graph in self.target_graphs:\n y_aa = amino_acid_metrics(target_graph[0])\n y_ss = secondary_sequence_metrics(target_graph[0])\n y = torch.cat((y_aa,y_ss))\n # Compare\n scores.append(self._mmd(x, y))\n scores = np.array(scores)\n return scores\n\nclass QCP:\n \"\"\"\n Compute the superimposition RMSD of the predicted atoms against validation ones.\n\n The idea of this control is that if the overall RMSD is low, then the generated protein is likely to be\n realistic.\n\n Attributes\n ----------\n pred_coords : numpy.ndarray\n The coordinates of the generated C-alpha atoms.\n target_proteins : list of Bio.PDB.Structure\n The list of target proteins.\n \"\"\"\n\n def __init__(self, pred_coords, target_proteins):\n \"\"\"\n Initialize the class.\n\n Parameters\n ----------\n pred_coords : torch.tensor\n The coordinates of the generated C-alpha atoms. The input should be a PyTorch tensor, which it then\n converted to a Numpy array.\n target_proteins : list of Bio.PDB.Structure\n The list of target proteins.\n \"\"\"\n self.pred_coords = pred_coords\n self.target_proteins = target_proteins\n\n def _get_target_ca_atoms_coords(self):\n target_coords_list = []\n for protein in self.target_proteins:\n coords = []\n for model in protein:\n for chain in model:\n for residue in chain:\n if 'CA' in residue:\n coords.append(residue['CA'].get_coord())\n target_coords_list.append(np.array(coords))\n return target_coords_list\n\n def _get_shifted_coords(self, coords_long, coords_short):\n shifted_coords, i = [], 0\n while i + len(coords_short) <= len(coords_long):\n shifted_coords.append(coords_long[i:i+len(coords_short)])\n i += 1\n return shifted_coords\n\n def superimpose(self):\n \"\"\"\n Compute the superimposition.\n\n Returns\n -------\n numpy.ndarray\n The array of RMSD scores (each i-th entry corresponds to the comparison of the generated atoms with the\n i-th protein)\n \"\"\"\n scores = []\n target_coords_list = self._get_target_ca_atoms_coords()\n for target_coords in target_coords_list:\n if len(target_coords) == len(self.pred_coords):\n scores.append(ca_metrics(target_coords, self.pred_coords))\n elif len(target_coords) > len(self.pred_coords):\n shifted_coords = self._get_shifted_coords(target_coords, self.pred_coords)\n rmsds = []\n for scs in shifted_coords:\n rmsds.append(ca_metrics(scs, self.pred_coords))\n scores.append(np.min(np.array(rmsds)))\n else:\n shifted_coords = self._get_shifted_coords(self.pred_coords, target_coords)\n rmsds = []\n for scs in shifted_coords:\n rmsds.append(ca_metrics(target_coords, scs))\n scores.append(np.min(np.array(rmsds)))\n scores = np.array(scores)\n return scores"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_497","text":"import itertools\nfrom functools import partial\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats\n\n# --- Preliminaries ---\n\n# - Partitioning techniques -\nclass BestSolutionHolder:\n \"\"\"\n Keeps in memory the best solution of a partitioning technique,\n is used in the function below.\n \"\"\"\n def __init__(self, crit, n_pos, n_neg, frontier, orient,\n left_n_right=False, n_pos_tot=None, n_neg_tot=None):\n self.crit = crit\n self.left_n_right = left_n_right\n if left_n_right:\n if n_pos_tot is None or n_neg_tot is None:\n raise ValueError(\"Please indicate the total number of positives or negatives.\")\n self.n_pos_tot = n_pos_tot\n self.n_neg_tot = n_neg_tot\n\n self.best_val = None\n self.best_front = None\n self.best_orient = None\n\n self.calls = 0\n\n self.save_sol(n_pos, n_neg, frontier, orient)\n\n def save_sol(self, n_pos, n_neg, frontier, orient):\n \"\"\"Saves a proposition as solution if it is better than before.\"\"\"\n self.calls += 1\n\n val = self.crit(n_pos, n_neg)\n\n if self.best_val is None or val > self.best_val:\n self.best_val = val\n self.best_front = frontier\n self.best_orient = orient\n\n if self.left_n_right:\n val = self.crit(self.n_pos_tot - n_pos, self.n_neg_tot - n_neg)\n\n if self.best_val is None or val > self.best_val:\n self.best_val = val\n self.best_front = frontier\n self.best_orient = not orient\n\ndef bipart_partition(X, Y, criterion, epsilon=0.001):\n \"\"\"\n Takes values in R associated to a class in {-1, +1}, a criterion that depends solely\n on the number of positive or negative values in a partition and finds the partition\n that maximizes the criterion.\n \"\"\"\n # crit = lambda np, nn : np - nn\n n = X.shape[0]\n n_pos_tot = (Y == +1).sum()\n n_neg_tot = n - n_pos_tot\n XY = np.vstack([X, Y]).transpose()\n sorted_XY = XY[np.argsort(XY[:, 0])]\n n_pos, n_neg = 0, 0\n best_value = BestSolutionHolder(criterion, n_pos, n_neg, sorted_XY[0, 0]-epsilon, False,\n left_n_right=True, n_pos_tot=n_pos_tot, n_neg_tot=n_neg_tot)\n i = 0\n for xy, xyp in zip(sorted_XY, sorted_XY[1:]):\n x, y = xy\n xp, yp = xyp\n frontier = (x + xp)\/2\n # assumed the density of X continuous hence xp > x\n n_pos += int(y == +1)\n n_neg += int(y == -1)\n best_value.save_sol(n_pos, n_neg, frontier, False)\n i += 1\n\n frontier = xp + epsilon\n n_pos += int(yp == +1)\n n_neg += int(yp == -1)\n best_value.save_sol(n_pos, n_neg, frontier, False)\n return best_value.best_front, best_value.best_val, best_value.best_orient\n\n# - Plotting utils -\n\ndef plot_eta(a_collection, param_fun, n_points=100):\n \"\"\"\n Plots a family of functions defined on [0,1] indexed by a\n for a set of possible values for a.\n \"\"\"\n plt.figure(figsize=(4, 4))\n\n x_ax = np.linspace(0., 1., n_points)\n\n for a in a_collection:\n eta_fun = partial(param_fun, a=a)\n plt.plot(x_ax, list(map(eta_fun, x_ax)),\n label=\"$a = {:0.2f}$\".format(a))\n\n plt.title(r\"The function $\\eta_a$ for different a\")\n plt.xlabel(\"x\")\n plt.ylabel(r\"$\\eta_a$(x)\")\n plt.grid()\n plt.legend()\n plt.show()\n\ndef plot_distribution(a_collection, gen_datafun):\n \"\"\"\n Plots the distributions induced by posterior probability functions defined on [0,1]\n indexed by a for a set of possible values for a.\n \"\"\"\n plt.figure(figsize=(8, 10))\n\n for i, a in enumerate(a_collection):\n plt.subplot(5, 2, i + 1)\n\n X, Y = gen_datafun(n=500, a=a)\n plt.grid()\n plt.hist(X[Y == -1], bins=30, color=\"red\", normed=True, alpha=0.5)\n plt.hist(X[Y == 1], bins=30, color=\"green\", normed=True, alpha=0.5)\n plt.title(\"Sample distribution, $a = {:2.2f}$\".format(a))\n plt.tight_layout()\n plt.show()\n\ndef GetSlope(df_int, quant_val=0.9):\n \"\"\"\n Gets the slope corresponding to the log of a quantile of\n the generalization error regressed by the logarithm of n, number of data points.\n Parameters:\n * df_int: Dataframe containing the columns \"n\" and \"gen_error\"\n with enough entries for each value of \"n\".\n * quant_val: quantile value that we choose.\n Returns:\n * constant, slope: values in R.\n \"\"\"\n groupby_quant = df_int.groupby(\"n\").quantile(quant_val)\n vals_med = groupby_quant[\"gen_error\"].values\n ns = df_int[\"n\"].unique()\n reg = scipy.stats.linregress(np.log(ns), np.log(vals_med))\n return np.exp(reg.intercept), reg.slope\n\ndef boxplot_slopes(df, quant=0.5, ylim=[10**(-4), 10**(0)]):\n \"\"\"\n Does a boxplot for each a of the results, to show the different generalization speeds.\n Parameters:\n * df: Dataframe containing the columns \"a\", \"n\" and \"gen_error\"\n with enough entries for each values of \"n\",\"a\".\n * quant_val: quantile value that we choose to regress the empirical generalization speed.\n \"\"\"\n plt.figure(figsize=(20, 12))\n\n x_n_plots = len(df[\"a\"].unique())\/\/2 + 1\n for i, a in enumerate(df[\"a\"].unique()):\n df_int = df[df[\"a\"] == a]\n ns = df_int[\"n\"].unique()\n bxplt_vals = [df_int[df_int[\"n\"] == n_val][\"gen_error\"].values\n for n_val in df_int[\"n\"].unique()]\n\n correct_width = [5] + list(map(lambda x: x \/ 4, ns[1:]))\n plt.subplot(x_n_plots, 4, i + 1)\n plt.boxplot(bxplt_vals, positions=ns,\n widths=correct_width)\n plt.xscale(\"log\")\n plt.yscale(\"log\")\n\n reg_med_const, reg_med_slope = GetSlope(df_int, quant_val=quant)\n\n plt.plot(ns, reg_med_const * np.power(ns, reg_med_slope), '-b')\n\n theo_slope = -1. \/ (2. - a)\n plt.plot(ns, 10**(theo_slope * np.log10(ns)), '-g')\n\n plt.title(\"Experiment $a = {:1.1f}$\\nSlope med (blue): ${:0.2f}$\\n\".format(\n a, reg_med_slope) + \"Theoretical bound slope (green) : ${:0.2f}$\".format(\n theo_slope))\n plt.ylim(ylim)\n\n plt.xlabel(\"$n$\")\n plt.ylabel(\"Regret\")\n plt.grid()\n\n plt.tight_layout()\n\n plt.show()\n\n\ndef slopes_quants(df, quant_values=np.linspace(0.7, 0.9, 5)):\n \"\"\"\n Compares the theoretical generalization slope and the empirical generalization slope.\n Parameters:\n * df: Dataframe containing the columns \"a\", \"n\" and \"gen_error\"\n with enough entries for each values of \"n\",\"a\".\n * quant_values: quantile values that we choose to regress the empirical generalization speed.\n \"\"\"\n plt.figure(figsize=(8, 15))\n a_collection = df[\"a\"].unique()\n\n theo_slopes = [-1. \/ (2. - a) for a in a_collection]\n emp_vals = []\n\n for i, quant in enumerate(quant_values):\n reg_med_slopes = list()\n for a in a_collection:\n df_int = df[df[\"a\"] == a]\n reg_med_slopes.append(GetSlope(df_int, quant_val=quant)[1])\n emp_vals.append(reg_med_slopes)\n plt.subplot(5, 2, i + 1)\n plt.plot(theo_slopes, reg_med_slopes, \"bo\")\n plt.plot([-1, -0.5], [-1, -0.5], color=\"red\")\n plt.xlabel(\"Theoretical slopes\")\n plt.ylabel(\"Experimental slopes\\n(quantile at ${:2.2f}$)\".format(quant))\n plt.grid()\n\n plt.tight_layout()\n\n plt.show()\n\ndef grid_count(grid, X):\n \"\"\"Counts the number of elements on a grid.\"\"\"\n del_elems = X < grid[0]\n remaining_X = X[~del_elems]\n res = list()\n for thre in grid[1:]:\n del_elems = remaining_X < thre\n remaining_X = X[~del_elems]\n res.append(del_elems.sum())\n return res\n\ndef plot_select_eta(a_collection, eta_fun_of_a, datagen_fun_of_a, n=1000,\n n_points=100, n_bins=20):\n \"\"\"\n Plots the empirical distribution of the data, as well as the theoretical\n distribution of it.\n \"\"\"\n x_ax = np.linspace(0., 1., n_points)\n\n grid = np.linspace(0., 1., n_bins+1)\n sizebin = grid[1]-grid[0]\n plt.figure(figsize=(12, 6))\n for i, a in enumerate(a_collection):\n ax1 = plt.subplot(1, 2, i + 1)\n\n eta_fun = lambda x, a=a: 2*eta_fun_of_a(x, a=a)\n\n X, Y = datagen_fun_of_a(n=n, a=a)\n gridXpos = grid_count(grid, X[Y == +1])\n gridXneg = grid_count(grid, X[Y == -1])\n ax2 = ax1.twinx()\n ax2.bar(grid[0:n_bins], gridXpos, width=sizebin, align=\"edge\",\n color=\"green\", alpha=0.5, label=\"class 1\")\n ax2.bar(grid[0:n_bins], gridXneg, width=sizebin, align=\"edge\",\n bottom=gridXpos, color=\"red\", alpha=0.5, label=\"class 2\")\n ax1.plot(x_ax, list(map(eta_fun, x_ax)),\n label=r\"$\\mu_1(x)$\", color=\"black\", linewidth=3)\n\n lines, labels = ax1.get_legend_handles_labels()\n lines2, labels2 = ax2.get_legend_handles_labels()\n ax2.legend(lines + lines2, labels + labels2, loc=\"lower right\") # \"upper left\") #\n if i == 0:\n ax1.set_ylabel(r\"$\\mu_1(x)$\")\n else:\n ax2.set_ylabel(r\"$P_n(X \\in $ bin $ \\; | \\; Y = i)$, $i \\in \\{1,2\\}$\",\n labelpad=20)\n plt.title(\"$a = \" + str(a)+\"$\")\n # if i == 1:\n ax1.set_xlabel(\"$x$\")\n plt.tight_layout()\n\ndef plot_possible_alpha_values(min_max_alpha, a_collection, m, alpha, sup_inc_simrank=False):\n \"\"\"\n Plot the possible alpha values.\n \"\"\"\n plt.figure(figsize=(4, 3))\n zipped_int_alpha = list(map(lambda a: min_max_alpha(a, m), a_collection))\n inf_alphas = [v[0] for v in zipped_int_alpha]\n sup_alphas = [v[1] for v in zipped_int_alpha]\n plt.plot(a_collection, inf_alphas, label=\"min for $\\\\alpha$ ($C>0$)\")\n plt.plot(a_collection, sup_alphas, label=\"max for $\\\\alpha$ ($C<1\/2$)\")\n plt.plot(a_collection, [alpha]*len(a_collection), label=\"chosen $\\\\alpha$ \")\n if sup_inc_simrank: # Only for the similarity ranking case.\n sup_inc = [0.5 - 0.5*np.power(np.abs(2*m-1), (1-a)\/a) for a in a_collection]\n plt.plot(a_collection, sup_inc, label=r\"$\\eta$ increasing\")\n plt.xlabel(\"$a$\")\n plt.ylabel(\"$\\\\alpha$\")\n plt.title(\"Limitations on the possible value for $\\\\alpha$\")\n plt.legend()\n plt.grid()\n plt.show()\n\ndef plot_emp_mammen(gendata_fun_of_a, eta_fun_of_a, a_collection, n_obs=1000, simrank=False):\n \"\"\"\n Tries and show visually what the Mammen-Tsybakov assumption means.\n \"\"\"\n plt.figure(figsize=(3, 3))\n\n for a in a_collection:\n X, _ = gendata_fun_of_a(n=n_obs, a=a)\n\n if simrank:\n all_pairs = itertools.combinations(X, 2)\n all_vals = list(map(lambda x, a=a: abs(eta_fun_of_a(x[0], x[1], a=a)-0.5), all_pairs))\n else:\n all_vals = [np.abs(eta_fun_of_a(x, a=a) - 0.5) for x in X]\n\n plt.hist(all_vals, bins=50, alpha=0.5, cumulative=True,\n normed=True, label=\"$a = {:0.2f}$\".format(a))\n\n plt.title(r\"The distribution of $|\\eta-1\/2|$ for different $a$\")\n plt.xlabel(r\"$|\\eta-1\/2|$\")\n plt.ylabel(r\"$P ( X \\le t)$\")\n plt.legend()\n plt.grid()\n plt.show()\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_498","text":"phone_display_demo.py\n\"\"\"\r\nCopyright (C) 2011-2012 \r\n\r\nPermission is hereby granted, free of charge, to any person obtaining a copy of\r\nthis software and associated documentation files (the \"Software\"), to deal in\r\nthe Software without restriction, including without limitation the rights to\r\nuse, copy, modify, merge, publish, distribute, sublicense, and\/or sell copies\r\nof the Software, and to permit persons to whom the Software is furnished to do\r\nso, subject to the following conditions:\r\n\r\nThe above copyright notice and this permission notice shall be included in all\r\ncopies or substantial portions of the Software.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\nSOFTWARE.\r\n\r\nDisplays either day or nighttime traffic image processing in a mock-up UI\r\nbased on the HTC Desire smartphone.\r\n\"\"\"\r\nimport numpy as np\r\nimport scipy\r\nimport scipy.ndimage as ndimg\r\nfrom collections import deque\r\nfrom copy import *\r\nimport PIL\r\nimport ImageOps\r\nimport pylab\r\nimport cv2\r\nimport os\r\nimport fnmatch\r\nimport sys\r\nimport pymorph\r\nimport night\r\nimport day\r\nimport argparse\r\n\r\nclass PhoneDemo(object):\r\n \"\"\" Object to run the phone UI demo. \"\"\"\r\n\r\n TYPE_DAY = \"DAY\"\r\n TYPE_NIGHT = \"NIGHT\"\r\n HISTORY_FRAMES = 600\r\n\r\n class DayProcessor(object):\r\n \"\"\" Object used to process day sequences. \"\"\"\r\n\r\n GMM_K = 3\r\n GMM_NUM_FRAMES = 25\r\n GMM_W_INIT = 0.1\r\n GMM_VAR_INIT = 20\r\n GMM_MAHA_THRESH = 3\r\n MASK_OVERLAY_ALPHA = 0.4\r\n\r\n def __init__(self, rgb):\r\n\r\n assert(rgb.dtype == 'uint8')\r\n\r\n self._gmm = day.GaussianMixtureModelUV(self.GMM_K, rgb.shape,\r\n self.GMM_NUM_FRAMES,\r\n self.GMM_W_INIT,\r\n self.GMM_VAR_INIT,\r\n self.GMM_MAHA_THRESH)\r\n self._ycbcr = np.zeros(rgb.shape, dtype='uint8')\r\n self._mask = np.zeros(rgb.shape[:2], dtype='uint8')\r\n self._red_mask = np.zeros(rgb.shape, dtype='uint8')\r\n self._rgb_red_masked = np.zeros(rgb.shape, dtype='uint8')\r\n self._process_count = 0\r\n\r\n def next(self, rgb):\r\n \"\"\" Process the next file and return the results. \"\"\"\r\n\r\n # Do GMM steps.\r\n self._gmm.rgb2ycbcr(rgb, self._ycbcr)\r\n self._gmm.segment_cl(self._mask)\r\n self._gmm.update_cl(self._ycbcr)\r\n # Save total pixels in foreground.\r\n fg_pixel_count = np.sum(self._mask)\r\n # Pull alpha and render red overlay\r\n # (channels are reversed RGB = BGR).\r\n self._red_mask[:,:,2] = self._mask * 255\r\n self._rgb_red_masked[:,:] = \\\r\n (self.MASK_OVERLAY_ALPHA * self._red_mask) + \\\r\n ((1. - self.MASK_OVERLAY_ALPHA) * rgb)\r\n\r\n # Ignore the first GMM_NUM_FRAMES \/ 2 frames.\r\n self._process_count = self._process_count + 1\r\n if self._process_count > self.GMM_NUM_FRAMES \/ 2:\r\n return fg_pixel_count, self._rgb_red_masked\r\n else:\r\n return 0, self._rgb_red_masked\r\n\r\n\r\n class NightProcessor(object):\r\n \"\"\" Object used to process day sequences. \"\"\"\r\n\r\n def __init__(self, rgb):\r\n\r\n pass\r\n\r\n def next(self, rgb):\r\n \"\"\" Process the next file and return the results. \"\"\"\r\n\r\n def blackout_date_regions(image, blackout_rects):\r\n \"\"\" Black out specified regions. \"\"\"\r\n\r\n for rect in blackout_rects:\r\n image[rect[1]:rect[3], rect[0]:rect[2]] = 0\r\n\r\n # Do bright object detection.\r\n blackout_date_regions(rgb, night.BLACKOUT_RECTS)\r\n steps = night.bright_object_detection(rgb)\r\n # Return results (channels are reversed RGB = BGR).\r\n label_img = pymorph.overlay(steps['luminance'].astype('uint8'),\r\n blue=steps['detect_dilate'])\r\n return steps['bright_blob_count'], label_img\r\n\r\n\r\n def __init__(self):\r\n\r\n # Initialize plotting parameters.\r\n self._history_raw = deque()\r\n self._history_filtered = deque()\r\n self._max_sample = 0.001\r\n self._ui = PhoneDisplay()\r\n self._filter_exp = 0.1\r\n self._sample_exp_filter = 0.\r\n\r\n def run_sequence(self, type, filepath, seq_range=None, filter_exp=None):\r\n \"\"\" Run a TYPE_DAY or TYPE_NIGHT sequence. \"\"\"\r\n\r\n QUIT_KEY_CODES = [ 27, 113, 81 ]\r\n PAUSE_KEY_CODES = [ 32, 112, 80 ]\r\n\r\n def pause():\r\n \"\"\" Poll input until the pause key is pressed. \"\"\"\r\n\r\n while True:\r\n key = cv2.waitKey(100)\r\n if PAUSE_KEY_CODES.count(key) > 0:\r\n break\r\n\r\n def bound_queue_push(val, q, maxlen=None):\r\n \"\"\" Push to bounded queue. \"\"\"\r\n\r\n q.append(val)\r\n if maxlen is not None and len(q) > maxlen:\r\n q.popleft()\r\n\r\n assert(type == self.TYPE_DAY or type == self.TYPE_NIGHT)\r\n\r\n # TODO(reissb) -- The history frames and filtering need to become\r\n # parameterized in some way. The history frames is fixed by the\r\n # camera framerate. The filtering is fixed by the required\r\n # detection sensitivity.\r\n if filter_exp is not None:\r\n self._filter_exp = filter_exp\r\n else:\r\n self._filter_exp = 0.1\r\n\r\n # Clear state.\r\n self._ui.clear()\r\n self._history_raw = deque()\r\n self._history_filtered = deque()\r\n self._max_sample = 0.001\r\n self._sample_exp_filter = 0.\r\n\r\n # Extract command-line parameters. This is the name of one file in the\r\n # series.\r\n path, filename = os.path.split(filepath)\r\n file_name, file_ext = os.path.splitext(os.path.basename(filename))\r\n series_name_end = file_name.rindex('_')\r\n series_name = file_name[:series_name_end]\r\n print \"Processing image series {0} in path {1}.\".format(series_name,\r\n path)\r\n files_in_path = os.listdir(path)\r\n series_pattern = series_name + '_[0-9]*' + file_ext\r\n print \"Processing files matching pattern {0}.\".format(series_pattern)\r\n series_suffixes = [int(os.path.splitext(fn)[0].split('_')[-1]) \\\r\n for fn in files_in_path \\\r\n if fnmatch.fnmatch(fn, series_pattern)]\r\n series_suffixes.sort()\r\n num_files = len(series_suffixes)\r\n print \"Found {0} files in image series {1}.\".format(num_files,\r\n series_name)\r\n # Check for limited range.\r\n if seq_range is not None:\r\n assert(seq_range[1] > seq_range[0] and seq_range[0] >= 0)\r\n print \"Filtering series range [{},{}).\".format(seq_range[0],\r\n seq_range[1])\r\n series_suffixes = np.array(series_suffixes)\r\n f = (series_suffixes >= seq_range[0]) * \\\r\n (series_suffixes < seq_range[1])\r\n series_suffixes = np.sort(series_suffixes * f)\r\n remove_count = len(series_suffixes) - np.sum(f)\r\n series_suffixes = np.delete(series_suffixes, range(remove_count))\r\n\r\n # Load first file and process.\r\n series_filename = series_name + '_' + str(series_suffixes[0]) + \\\r\n file_ext\r\n rgb = ndimg.imread(os.path.join(path, series_filename))\r\n # Initilaize the processor.\r\n type_processor = self.DayProcessor(rgb) if type is self.TYPE_DAY \\\r\n else self.NightProcessor(rgb)\r\n # Process the files.\r\n quit_flag = False\r\n process_count = 0\r\n history_n = int(self.HISTORY_FRAMES \/ \\\r\n (self._ui.history_frame_count - 1))\r\n for suffix in series_suffixes:\r\n # Process the next file.\r\n series_filename = series_name + '_' + str(suffix) + file_ext\r\n print \"Processing file {0}.\".format(series_filename)\r\n rgb = ndimg.imread(os.path.join(path, series_filename))\r\n sample_raw, display_img = type_processor.next(rgb)\r\n self._sample_exp_filter = \\\r\n ((1. - self._filter_exp) * self._sample_exp_filter) + \\\r\n (self._filter_exp * sample_raw)\r\n # Put new samples on queues.\r\n bound_queue_push(sample_raw,\r\n self._history_raw, self.HISTORY_FRAMES)\r\n bound_queue_push(self._sample_exp_filter,\r\n self._history_filtered, self.HISTORY_FRAMES)\r\n # Update UI.\r\n self._max_sample = max(self._max_sample,\r\n self._sample_exp_filter * 1.1)\r\n ybound = (0, self._max_sample)\r\n plot_img = self.plot_history(self._history_raw,\r\n self._history_filtered,\r\n ybound)\r\n self._ui.set_main_video_frame(display_img)\r\n self._ui.set_plot(plot_img)\r\n # Space history frames evenly over interval.\r\n if 0 == (process_count % history_n):\r\n self._ui.push_history_frame(display_img)\r\n process_count = process_count + 1\r\n # Show UI.\r\n cv2.imshow(\"Phone Display\", self._ui.ui_image)\r\n key = cv2.waitKey(1)\r\n # Catch escape or 'q' or 'Q':\r\n if QUIT_KEY_CODES.count(key) > 0:\r\n quit_flag = True\r\n break\r\n # Catch spacebar, 'p' or 'P':\r\n if PAUSE_KEY_CODES.count(key) > 0:\r\n pause()\r\n\r\n # Cleanup GUI on complete.\r\n if not quit_flag:\r\n cv2.waitKey(-1)\r\n cv2.destroyAllWindows()\r\n\r\n @staticmethod\r\n def plot_history(raw, filtered, ybound):\r\n \"\"\" Make plot of raw and history and return as image. \"\"\"\r\n\r\n p = pylab.subplot('111')\r\n p.clear()\r\n p.figure.set_size_inches(4, 3);\r\n p.plot(raw, '.r')\r\n p.plot(filtered, '-b')\r\n p.axes.set_ybound(ybound)\r\n p.figure.canvas.draw()\r\n buf = np.fromstring(p.figure.canvas.tostring_rgb(), dtype='uint8')\r\n h, w = p.figure.canvas.get_width_height()\r\n buf.shape = (w, h, 3)\r\n return buf\r\n\r\n\r\nclass PhoneDisplay(object):\r\n\r\n # Dictionary of UI resources.\r\n RESOURCES = {\r\n # Name of the phone UI image.\r\n \"UI_BASE\": \"ui_base.jpg\"\r\n }\r\n # Coordinates for the phone image display area.\r\n UI_LAYOUT = {\r\n \"MARGIN\": 15,\r\n \"DISPLAY_RECT\": { \"UpperLeft\": (255, 59), \"LowerRight\": (1106, 596) },\r\n \"MAIN_VIDEO_SIZE\": (360, 480),\r\n \"PLOT_SIZE\": (240, 320),\r\n \"HISTORY_FRAME_COUNT\": 5,\r\n \"CLEAR_COLOR\": np.array([60, 85, 45])\r\n }\r\n\r\n def __init__(self):\r\n \"\"\" Setup phone UI. \"\"\"\r\n\r\n # Load UI base resource and set slice of display area.\r\n self._ui_base = ndimg.imread(self.RESOURCES[\"UI_BASE\"])\r\n up_lt = self.UI_LAYOUT[\"DISPLAY_RECT\"][\"UpperLeft\"]\r\n lw_rt = self.UI_LAYOUT[\"DISPLAY_RECT\"][\"LowerRight\"]\r\n self._ui_display_area = self._ui_base[up_lt[1]:lw_rt[1] + 1,\r\n up_lt[0]:lw_rt[0] + 1]\r\n self._ui_display_area[:,:] = self.UI_LAYOUT[\"CLEAR_COLOR\"]\r\n self._ui_display_area_size = self._ui_display_area.shape[:2]\r\n self._ui_main_video_size = self.UI_LAYOUT[\"MAIN_VIDEO_SIZE\"]\r\n self._plot_size = self.UI_LAYOUT[\"PLOT_SIZE\"]\r\n\r\n margin = self.UI_LAYOUT[\"MARGIN\"]\r\n # Get main video frame area.\r\n vid_frm_x1 = self._ui_display_area_size[1] - margin\r\n vid_frm_x0 = vid_frm_x1 - self._ui_main_video_size[1]\r\n vid_frm_y0 = margin\r\n vid_frm_y1 = vid_frm_y0 + self._ui_main_video_size[0]\r\n self._ui_main_video_frame = self._ui_display_area[vid_frm_y0:\r\n vid_frm_y1,\r\n vid_frm_x0:\r\n vid_frm_x1]\r\n # Get plot area.\r\n plt_frm_x0 = margin\r\n plt_frm_x1 = plt_frm_x0 + self._plot_size[1]\r\n plt_frm_y0 = margin\r\n plt_frm_y1 = plt_frm_y0 + self._plot_size[0]\r\n self._ui_plot_frame = self._ui_display_area[plt_frm_y0: plt_frm_y1,\r\n plt_frm_x0: plt_frm_x1]\r\n # Compute history frame areas.\r\n his_frm_count = self.UI_LAYOUT[\"HISTORY_FRAME_COUNT\"]\r\n his_frm_wid = int((self._ui_display_area_size[1] -\r\n ((his_frm_count + 1) * margin)) \/ his_frm_count)\r\n his_frm_ht = int((3.\/4.) * his_frm_wid)\r\n self._ui_history_frame_size = (his_frm_ht, his_frm_wid)\r\n his_frm_y0 = (2 * margin) + self._ui_main_video_size[0]\r\n his_frm_x0_fn = lambda n: margin + ((margin + his_frm_wid) * n)\r\n his_frm_x1_fn = lambda n: (margin + his_frm_wid) * (n + 1)\r\n self._ui_history_frames = map(\r\n lambda n: self._ui_display_area[\r\n his_frm_y0: his_frm_y0 + his_frm_ht,\r\n his_frm_x0_fn(n):his_frm_x1_fn(n)],\r\n range(self.UI_LAYOUT[\"HISTORY_FRAME_COUNT\"]))\r\n\r\n def clear_display_area(self, color=np.array([0, 0, 0])):\r\n \"\"\" Clear UI base display area to given color. \"\"\"\r\n\r\n self._ui_display_area[:,:] = color\r\n\r\n def set_main_video_frame(self, frame):\r\n \"\"\" Set the main video frame in the UI layout. \"\"\"\r\n\r\n h, w = self._ui_main_video_size\r\n img = np.array(ImageOps.fit(PIL.Image.fromarray(frame), (w, h)))\r\n self._ui_main_video_frame[:,:] = img\r\n\r\n def push_history_frame(self, frame):\r\n \"\"\" Push a frame to the top of the history images. \"\"\"\r\n\r\n # Shift back.\r\n for n in range(len(self._ui_history_frames) - 1):\r\n self._ui_history_frames[n][:,:] = self._ui_history_frames[n+1][:,:]\r\n # Update.\r\n h, w = self._ui_history_frame_size\r\n img = np.array(ImageOps.fit(PIL.Image.fromarray(frame), (w, h)))\r\n self._ui_history_frames[-1][:,:] = img\r\n\r\n def set_plot(self, plot):\r\n \"\"\" Set the plot image in the UI layout. \"\"\"\r\n\r\n h, w = self._plot_size\r\n img = np.array(ImageOps.fit(PIL.Image.fromarray(plot), (w, h)))\r\n self._ui_plot_frame[:,:] = img\r\n\r\n def clear(self):\r\n \"\"\" Reset the UI. \"\"\"\r\n\r\n ui_base = ndimg.imread(self.RESOURCES[\"UI_BASE\"])\r\n self._ui_base[:,:,:] = ui_base\r\n self._ui_display_area[:,:] = self.UI_LAYOUT[\"CLEAR_COLOR\"]\r\n\r\n def get_ui_image(self):\r\n return self._ui_base\r\n def get_history_frame_count(self):\r\n return len(self._ui_history_frames)\r\n\r\n ui_image = property(get_ui_image, doc=\"The main display image.\")\r\n history_frame_count = property(get_history_frame_count,\r\n doc=\"Count of history frames.\")\r\n\r\n\r\ndef main():\r\n # Parse arguments for\r\n # PhoneDemo.run_sequence(type, filepath, seq_range, filter_exp):\r\n parser = argparse.ArgumentParser(description='Run a UI demo of ' +\r\n 'image-based traffic ' +\r\n 'analysis algorithms.')\r\n parser.add_argument('SEQUENCE_TYPE', type=str, choices=('D', 'N'),\r\n help='day or night image type')\r\n parser.add_argument('SEQUENCE_IMAGE_PATH', type=str,\r\n help='path to an image within sequence')\r\n parser.add_argument('-r', '--range', default=None, nargs=2, type=int,\r\n help='range of frames to process as in \\'-r 0 100\\'')\r\n parser.add_argument('-e', '--filter_exp', default=None, type=float,\r\n help='exponential filter strength')\r\n args = parser.parse_args(sys.argv[1:])\r\n demo = PhoneDemo()\r\n demo.run_sequence(PhoneDemo.TYPE_DAY if 'D' == args.SEQUENCE_TYPE\r\n else PhoneDemo.TYPE_NIGHT,\r\n args.SEQUENCE_IMAGE_PATH, args.range, args.filter_exp)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_499","text":"loicmiller\/policy-analysis0\n###############################################################################\n# Imports\n\nimport sys\nimport argparse # Argument parser\n\nfrom mgtoolkit.library import *\nimport networkx as nx\n\nfrom sympy import to_dnf\nfrom sympy.parsing.sympy_parser import parse_expr\n\nimport random # Random indices of list\n\nimport os\n\n\n###############################################################################\n# General utility\n\n# Exit the program\ndef terminate_app(code):\n print(\"Exiting program...\")\n sys.exit(code)\n\n\n###############################################################################\n# Argument parser\n\nclass Range(object):\n def __init__(self, start, end):\n self.start = start\n self.end = end\n def __eq__(self, other):\n return self.start <= other <= self.end\n\ndef get_parser():\n # Get parser for command line arguments\n parser = argparse.ArgumentParser(description=\"Workflow specification to Rego\", formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--version\", action=\"version\", version='%(prog)s 1.0')\n parser.add_argument(\"-v\", \"--verbose\", action=\"count\", default=0, help=\"increase output verbosity\")\n parser.add_argument(\"workflow\", type=str, metavar=\"FILE\", help=\"workflow to generate policy from\")\n parser.add_argument(\"-e\", \"--error-rate\", type=float, choices=[Range(0.0, 1.0)], metavar=\"ERROR_RATE\", default=0.0, help=\"rate of errors in the generated workflow\")\n\n return parser\n\n\n###############################################################################\n# Functions\n\n\n###############################################################################\n# Main\n\ndef main(verbose, workflow):\n global glob_verbose\n glob_verbose = verbose\n\n print(\"\\n\\n###############################################################################\")\n print(\"Loading workflow specification from file\")\n print(\"###############################################################################\")\n\n with open(workflow, 'r') as workflow_file:\n workflow_edges = workflow_file.readlines()\n workflow_edges = [(set(src.lstrip('{').rstrip('}').split(', ')), set(dst.lstrip('{').rstrip('}').split(', ')), attributes) for src, dst, attributes in (edge.rstrip().split(';') for edge in workflow_edges)]\n\n if glob_verbose >= 1:\n print(\"Edges\")\n for edge in workflow_edges:\n print(edge)\n\n\n print(\"\\n\\n###############################################################################\")\n print(\"Turning workflow graph into metagraph\")\n print(\"###############################################################################\")\n\n workflow_variables_set = set()\n workflow_propositions_set = set()\n workflow_edges_set = []\n\n # Simplify boolean expressions (Use simpy) https:\/\/stackoverflow.com\/questions\/52416781\/how-to-simplify-these-boolean-statements\n for src, dst, attributes in workflow_edges:\n if glob_verbose >= 2:\n print(\"Edge: {} {} {}\".format(src, dst, attributes))\n\n # Add src and dst to variable set if they are not present yet\n workflow_variables_set.update(src)\n workflow_variables_set.update(dst)\n\n\n # Parse policy into expression for simpy\n if attributes:\n edge_policy = parse_expr(attributes)\n if glob_verbose >= 2:\n print(\"Edge policy: {}\".format(edge_policy))\n\n # Convert policy to Disjunctive Normal Form (DNF)\n # I think we don't want to simplify the expression for the comparison\n # since it is not simplified in the metagraph generated from the policy\n # https:\/\/en.wikipedia.org\/wiki\/Disjunctive_normal_form\n # https:\/\/docs.sympy.org\/latest\/modules\/logic.html\n # https:\/\/docs.sympy.org\/latest\/modules\/parsing.html\n edge_policy_dnf = to_dnf(edge_policy, simplify=False)\n if glob_verbose >= 2:\n print(\"DNF: {}\".format(edge_policy_dnf))\n\n\n # Metagraph nodes\n # Each element in metagraph_nodes is the proposition part of a node in the metagraph\n metagraph_nodes = str(edge_policy_dnf).split(\"|\")\n if glob_verbose >= 2:\n print(\"Metagraph nodes: {}\".format(metagraph_nodes))\n\n # Policy elements in nodes\n # Each element is a part of the propositions_set\n for node_propositions in metagraph_nodes:\n policy_elements = node_propositions.split('&')\n policy_elements = [policy_element.strip().lstrip('(').rstrip(')') for policy_element in policy_elements] # Remove leading and trailing whitespaces, plus leading and trailing parentheses\n\n # Add policy_elements to propositions_set\n for index, policy_element in enumerate(policy_elements):\n # Add ')' back for equalities\n if 'Eq' in policy_element:\n policy_element = policy_element + ')'\n policy_elements[index] = policy_elements[index] + ')'\n workflow_propositions_set.add(policy_element)\n workflow_edges_set.append(Edge(src, dst, attributes=policy_elements))\n\n if glob_verbose >= 2:\n print(\"Policy elements: {}\".format(policy_elements))\n\n if glob_verbose >= 2:\n print(\"\\n\")\n else:\n workflow_edges_set.append(Edge(src, dst, attributes=\"\"))\n\n\n if glob_verbose >= 4:\n print(\"Variables set: {}\".format(workflow_variables_set))\n print(\"Propositions set: {}\\n\".format(workflow_propositions_set))\n print(\"Metagraph edges: {}\\n\".format(workflow_edges_set))\n\n # Create workflow metagraph\n print(\"Creating workflow metagraph\")\n workflow_metagraph = ConditionalMetagraph(workflow_variables_set, workflow_propositions_set)\n workflow_metagraph.add_edges_from(workflow_edges_set)\n\n if glob_verbose >= 4:\n print(\"Policy metagraph\\n{}\\n\".format(repr(workflow_metagraph)))\n\n if glob_verbose >= 4:\n print(\"Workflow metagraph edges\")\n print(\"{} {}\".format(\"INVERTEX\", \"OUTVERTEX\"))\n for edge in workflow_metagraph.edges:\n print(\"{} {}\".format(list(edge.invertex), list(edge.outvertex)))\n\n return workflow_metagraph\n\n\n\nif __name__ == '__main__':\n print(\"\\n\\n###############################################################################\")\n print(\"Getting arguments\")\n print(\"###############################################################################\")\n\n parser = get_parser() # Create a parser\n args = parser.parse_args() # Parse arguments\n print(args)\n\n # Call main\n main(args.verbose, args.workflow)\n\n terminate_app(0)\n\n\n###############################################################################\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_500","text":"1-10\n################################################################################\n#\n# Copyright (c) 2009 The MadGraph5_aMC@NLO Development team and Contributors\n#\n# This file is a part of the MadGraph5_aMC@NLO project, an application which \n# automatically generates Feynman diagrams and matrix elements for arbitrary\n# high-energy processes in the Standard Model and beyond.\n#\n# It is subject to the MadGraph5_aMC@NLO license which should accompany this \n# distribution.\n#\n# For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch\n#\n################################################################################\nfrom madgraph.core import base_objects\n\"\"\"Methods and classes to import v4 format model files.\"\"\"\n\nimport fractions\nimport logging\nimport os\nimport re\n\nfrom madgraph import InvalidCmd, MG4DIR, ReadWrite\n\nimport madgraph.core.color_algebra as color\nimport madgraph.iolibs.files as files\nimport madgraph.iolibs.save_load_object as save_load_object\n\nimport madgraph.various.misc as misc\n\nfrom madgraph.core.base_objects import Particle, ParticleList\nfrom madgraph.core.base_objects import Interaction, InteractionList\n\nlogger = logging.getLogger('madgraph.import_v4')\n\n#===============================================================================\n# import_v4model\n#===============================================================================\ndef import_model(model_path, mgme_dir = MG4DIR, absolute=True):\n \"\"\"create a model from a MG4 model directory.\"\"\"\n\n # Check for a valid directory\n model_path_old = model_path\n model_path = find_model_path(model_path, mgme_dir, absolute)\n\n files_list = [os.path.join(model_path, 'particles.dat'),\\\n os.path.join(model_path, 'interactions.dat')]\n \n for filepath in files_list:\n if not os.path.isfile(filepath):\n if not absolute:\n raise InvalidCmd, \"%s directory is not a valid v4 model\" % \\\n (model_path)\n else:\n return import_model(model_path_old, mgme_dir, False)\n \n # use pickle files if defined\n if files.is_uptodate(os.path.join(model_path, 'model.pkl'), files_list):\n model = save_load_object.load_from_file( \\\n os.path.join(model_path, 'model.pkl'))\n if model.has_key('version_tag') and model.get('version_tag') == os.path.realpath(model_path) + str(misc.get_pkg_info()):\n return model, model_path\n\n model = base_objects.Model() \n model.set('particles',files.read_from_file( \\\n os.path.join(model_path, 'particles.dat'),\n read_particles_v4))\n \n model.set('interactions',files.read_from_file( \\\n os.path.join(model_path, 'interactions.dat'),\n read_interactions_v4,\n model['particles']))\n \n model.set('name', os.path.split(model_path)[-1]) \n\n # save in a pickle files to fasten future usage\n if ReadWrite:\n try:\n save_load_object.save_to_file(os.path.join(model_path, 'model.pkl'), model)\n except Exception:\n logger.warning(\"fail to write %s. This is perfectly fine will just prevent speed boost in future load of this model\" %\\\n os.path.join(model_path, 'model.pkl'))\n return model, model_path \n\n \ndef find_model_path(model_path, mgme_dir, absolute=True):\n \"\"\"Find the path to the model, starting with path model_path.\"\"\"\n\n # treat simple case (model_path is a valid path\/ mgme_dir doesn't exist)\n if os.path.isdir(model_path) and absolute:\n return model_path\n elif mgme_dir and os.path.isdir(os.path.join(mgme_dir, 'models',\n model_path + \"_v4\")):\n model_path = os.path.join(mgme_dir, 'models', model_path + \"_v4\")\n elif mgme_dir and os.path.isdir(os.path.join(mgme_dir, 'Models', model_path)):\n model_path = os.path.join(mgme_dir, 'Models', model_path)\n elif not mgme_dir:\n error_text = \"Path %s is not a valid pathname\\n\" % model_path\n error_text += \"and no MG_ME installation detected in order to search in Models\"\n raise InvalidCmd(error_text)\n\n # Try to build the valid path\n path_possibilities = [os.path.join(mgme_dir, 'Models', model_path),\n os.path.join(mgme_dir, 'models', model_path + \"_v4\"), \n os.path.join(mgme_dir, 'models', model_path) \n ]\n\n for path in path_possibilities:\n if os.path.exists(path) and \\\n not os.path.exists(os.path.join(path, 'particles.py')):\n return path\n \n # No valid path found\n raise InvalidCmd(\"Path %s is not a valid pathname\" % model_path)\n\n#===============================================================================\n# read_particles_v4\n#===============================================================================\ndef read_particles_v4(fsock):\n \"\"\"Read a list of particle from stream fsock, using the old v4 format\"\"\"\n\n spin_equiv = {'s': 1,\n 'f': 2,\n 'v': 3,\n 't': 5}\n\n color_equiv = {'s': 1,\n 't': 3,\n '6': 6,\n 'o': 8}\n\n line_equiv = {'d': 'dashed',\n 's': 'straight',\n 'w': 'wavy',\n 'c': 'curly'}\n\n logger.info('load particles')\n\n mypartlist = ParticleList()\n\n for line in fsock:\n mypart = Particle()\n\n if line.find(\"MULTIPARTICLES\") != -1:\n break # stop scanning if old MULTIPARTICLES tag found\n\n line = line.split(\"#\", 2)[0] # remove any comment\n line = line.strip() # makes the string clean\n\n if line != \"\":\n values = line.split()\n if len(values) != 9:\n # Not the right number tags on the line\n raise ValueError, \\\n \"Unvalid initialization string:\" + line\n else:\n try:\n mypart.set('name', values[0].lower())\n mypart.set('antiname', values[1].lower())\n\n if mypart['name'] == mypart['antiname']:\n mypart['self_antipart'] = True\n\n if values[2].lower() in spin_equiv.keys():\n mypart.set('spin',\n spin_equiv[values[2].lower()])\n else:\n raise ValueError, \"Invalid spin %s\" % \\\n values[2]\n\n if values[3].lower() in line_equiv.keys():\n mypart.set('line',\n line_equiv[values[3].lower()])\n else:\n raise ValueError, \\\n \"Invalid line type %s\" % values[3]\n\n mypart.set(\"mass\", values[4])\n mypart.set(\"width\", values[5])\n\n if values[6].lower() in color_equiv.keys():\n mypart.set('color',\n color_equiv[values[6].lower()])\n else:\n raise ValueError, \\\n \"Invalid color rep %s\" % values[6]\n\n #mypart.set(\"texname\", values[7])\n mypart.set(\"pdg_code\", int(values[8]))\n\n mypart.set('charge', 0.)\n #mypart.set('antitexname', mypart.get('texname'))\n\n except (Particle.PhysicsObjectError, ValueError), why:\n logger.warning(\"Warning: %s, particle ignored\" % why)\n else:\n mypartlist.append(mypart)\n\n return mypartlist\n\n\n#===============================================================================\n# read_interactions_v4\n#===============================================================================\ndef read_interactions_v4(fsock, ref_part_list):\n \"\"\"Read a list of interactions from stream fsock, using the old v4 format.\n Requires a ParticleList object as an input to recognize particle names.\"\"\"\n\n logger.info('load interactions')\n myinterlist = InteractionList()\n\n if not isinstance(ref_part_list, ParticleList):\n raise ValueError, \\\n \"Object %s is not a valid ParticleList\" % repr(ref_part_list)\n\n for line in fsock:\n myinter = Interaction()\n\n line = line.split(\"#\", 2)[0] # remove any comment\n line = line.strip() # makes the string clean\n\n if line != \"\": # skip blank\n values = line.split()\n part_list = ParticleList()\n\n try:\n for str_name in values:\n curr_part = ref_part_list.get_copy(str_name.lower())\n if isinstance(curr_part, Particle):\n # Look at the total number of strings, stop if \n # anyway not enough, required if a variable name \n # corresponds to a particle! (eg G)\n if len(values) >= 2 * len(part_list) + 1:\n part_list.append(curr_part)\n else: break\n # also stops if string does not correspond to \n # a particle name\n else: break\n\n if len(part_list) < 3:\n raise Interaction.PhysicsObjectError, \\\n \"Vertex with less than 3 known particles found.\"\n\n # Flip part\/antipart of first part for FFV, FFS, FFT vertices\n # according to v4 convention\n spin_array = [part['spin'] for part in part_list]\n if spin_array[:2] == [2, 2] and \\\n not part_list[0].get('self_antipart'):\n part_list[0]['is_part'] = not part_list[0]['is_part']\n\n myinter.set('particles', part_list)\n\n # Give color structure\n # Order particles according to color\n # Don't consider singlets\n color_parts = sorted(enumerate(part_list), lambda p1, p2:\\\n p1[1].get_color() - p2[1].get_color())\n color_ind = [(i, part.get_color()) for i, part in \\\n color_parts if part.get_color() !=1]\n colors = [c for i,c in color_ind]\n ind = [i for i,c in color_ind]\n\n # Set color empty by default\n myinter.set('color', [])\n if not colors:\n # All color singlets - set empty\n pass\n elif colors == [-3, 3]:\n # triplet-triplet-singlet coupling\n myinter.set('color', [color.ColorString(\\\n [color.T(ind[1], ind[0])])])\n elif colors == [8, 8]:\n # octet-octet-singlet coupling\n my_cs = color.ColorString(\\\n [color.Tr(ind[0], ind[1])])\n my_cs.coeff = fractions.Fraction(2)\n myinter.set('color', [my_cs])\n elif colors == [-3, 3, 8]:\n # triplet-triplet-octet coupling\n myinter.set('color', [color.ColorString(\\\n [color.T(ind[2], ind[1], ind[0])])])\n elif colors == [8, 8, 8]:\n # Triple glue coupling\n my_color_string = color.ColorString(\\\n [color.f(ind[0], ind[1], ind[2])])\n my_color_string.is_imaginary = True\n myinter.set('color', [my_color_string])\n elif colors == [-3, 3, 8, 8]:\n my_cs1 = color.ColorString(\\\n [color.T(ind[2], ind[3], ind[1], ind[0])])\n my_cs2 = color.ColorString(\\\n [color.T(ind[3], ind[2], ind[1], ind[0])])\n myinter.set('color', [my_cs1, my_cs2])\n elif colors == [8, 8, 8, 8]:\n # 4-glue coupling\n cs1 = color.ColorString([color.f(0, 1, -1),\n color.f(2, 3, -1)])\n #cs1.coeff = fractions.Fraction(-1)\n cs2 = color.ColorString([color.f(2, 0, -1),\n color.f(1, 3, -1)])\n #cs2.coeff = fractions.Fraction(-1)\n cs3 = color.ColorString([color.f(1, 2, -1),\n color.f(0, 3, -1)])\n #cs3.coeff = fractions.Fraction(-1)\n myinter.set('color', [cs1, cs2, cs3])\n# The following line are expected to be correct but not physical validations\n# have been performed. So we keep it commented for the moment. \n# elif colors == [3, 3, 3]:\n# my_color_string = color.ColorString(\\\n# [color.Epsilon(ind[0], ind[1], ind[2])])\n# myinter.set('color', [my_color_string]) \n# elif colors == [-3, -3, -3]:\n# my_color_string = color.ColorString(\\\n# [color.EpsilonBar(ind[0], ind[1], ind[2])])\n# myinter.set('color', [my_color_string])\n else:\n logger.warning(\\\n \"Color combination %s not yet implemented.\" % \\\n repr(colors))\n\n # Set the Lorentz structure. Default for 3-particle\n # vertices is empty string, for 4-particle pair of\n # empty strings\n myinter.set('lorentz', [''])\n\n pdg_codes = sorted([part.get_pdg_code() for part in part_list])\n\n # WWWW and WWVV\n if pdg_codes == [-24, -24, 24, 24]:\n myinter.set('lorentz', ['WWWW'])\n elif spin_array == [3, 3, 3, 3] and \\\n 24 in pdg_codes and - 24 in pdg_codes:\n myinter.set('lorentz', ['WWVV'])\n\n # gggg\n if pdg_codes == [21, 21, 21, 21]:\n myinter.set('lorentz', ['gggg1', 'gggg2', 'gggg3'])\n\n # go-go-g\n # Using the special fvigox routine provides the minus\n # sign necessary for octet Majorana-vector interactions\n if spin_array == [2, 2, 3] and colors == [8, 8, 8] and \\\n part_list[0].get('self_antipart') and \\\n part_list[1].get('self_antipart'):\n myinter.set('lorentz', ['go'])\n\n # If extra flag, add this to Lorentz \n if len(values) > 3 * len(part_list) - 4:\n myinter.get('lorentz')[0] = \\\n myinter.get('lorentz')[0]\\\n + values[3 * len(part_list) - 4].upper()\n\n # Use the other strings to fill variable names and tags\n\n # Couplings: special treatment for 4-vertices, where MG4 used\n # two couplings, while MG5 only uses one (with the exception\n # of the 4g vertex, which needs special treatment)\n # DUM0 and DUM1 are used as placeholders by FR, corresponds to 1\n if len(part_list) == 3 or \\\n values[len(part_list) + 1] in ['DUM', 'DUM0', 'DUM1']:\n # We can just use the first coupling, since the second\n # is a dummy\n myinter.set('couplings', {(0, 0):values[len(part_list)]})\n if myinter.get('lorentz')[0] == 'WWWWN':\n # Should only use one Helas amplitude for electroweak\n # 4-vector vertices with FR. I choose W3W3NX.\n myinter.set('lorentz', ['WWVVN'])\n elif values[len(part_list)] in ['DUM', 'DUM0', 'DUM1']:\n # We can just use the second coupling, since the first\n # is a dummy\n myinter.set('couplings', {(0, 0):values[len(part_list)+1]})\n elif pdg_codes == [21, 21, 21, 21]:\n # gggg\n myinter.set('couplings', {(0, 0):values[len(part_list)],\n (1, 1):values[len(part_list)],\n (2, 2):values[len(part_list)]})\n elif myinter.get('lorentz')[0] == 'WWWW':\n # Need special treatment of v4 SM WWWW couplings since \n # MG5 can only have one coupling per Lorentz structure\n myinter.set('couplings', {(0, 0):\\\n 'sqrt(' + \n values[len(part_list)] + \\\n '**2+' + \\\n values[len(part_list) + 1] + \\\n '**2)'})\n else: #if myinter.get('lorentz')[0] == 'WWVV':\n # Need special treatment of v4 SM WWVV couplings since \n # MG5 can only have one coupling per Lorentz structure\n myinter.set('couplings', {(0, 0):values[len(part_list)] + \\\n '*' + \\\n values[len(part_list) + 1]})\n #raise Interaction.PhysicsObjectError, \\\n # \"Only FR-style 4-vertices implemented.\"\n \n # SPECIAL TREATMENT OF COLOR\n # g g sq sq (two different color structures, same Lorentz)\n if spin_array == [3, 3, 1, 1] and colors == [-3, 3, 8, 8]:\n myinter.set('couplings', {(0, 0):values[len(part_list)],\n (1, 0):values[len(part_list)]})\n\n # Coupling orders - needs to be fixed\n order_list = values[2 * len(part_list) - 2: \\\n 3 * len(part_list) - 4]\n\n def count_duplicates_in_list(dupedlist):\n \"\"\"return a dictionary with key the element of dupeList and\n with value the number of times that they are in this list\"\"\"\n unique_set = set(item for item in dupedlist)\n ret_dict = {}\n for item in unique_set:\n ret_dict[item] = dupedlist.count(item)\n return ret_dict\n\n myinter.set('orders', count_duplicates_in_list(order_list))\n\n myinter.set('id', len(myinterlist) + 1)\n\n myinterlist.append(myinter)\n\n except Interaction.PhysicsObjectError, why:\n logger.error(\"Interaction ignored: %s\" % why)\n\n return myinterlist\n\n#===============================================================================\n# read_proc_card.dat (mg4 format)\n#===============================================================================\ndef read_proc_card_v4(fsock):\n \"\"\"A simple function reading the files in fsock and returning a \n ProcCardv4Reader object. This function authorize to have the same syntax as\n for the other files treatment\"\"\"\n\n reader = ProcCardv4Reader(fsock)\n return reader\n\nclass ParticleError(InvalidCmd):\n \"\"\" A class to carch the error\"\"\"\n pass\n\nclass WrongFileFormat(InvalidCmd): \n \"\"\"A specific class error for wrong V4 proc_card\"\"\"\n pass\n\nclass ProcCardv4Reader(object):\n \"\"\"read a proc_card.dat in the mg4 format and creates the equivalent routine\n for mg5\"\"\"\n \n #tag in the proc_card.dat which split the proc_card content\n \n # line pattern (remove comment at the end of the line)\n pat_line = re.compile(r\"\"\"^\\s*(?P[^\\#]*?)\\s*(\\#|$)\"\"\", re.DOTALL)\n \n def __init__(self, fsock):\n \"\"\"init the variable\"\"\"\n\n self.process = [] # List of ProcessInfo\n self.model = \"\" # name of the model\n self.multipart = [] # list of the mg4 definition of multiparticle\n self.particles_name = set() # set of authorize particle name\n self.couplings_name = set() # set of mandatory couplings\n self.process_path = os.path.realpath(os.path.join(\n os.path.dirname(fsock.name), os.pardir))\n \n # Reading the files and store the information in string format.\n self.analyze_v4_proc_card(fsock)\n\n \n def analyze_v4_proc_card(self, fsock):\n \"\"\"read the file and fullfill the variable with mg4 line\"\"\"\n \n proc_card = fsock.read()\n\n # store process information\n process_open = False\n \n process_re = re.search(\\\n r\"^# Begin\\s+PROCESS.*?^(?P.*)^# End\\s+PROCESS\",\n proc_card, re.MULTILINE|re.DOTALL)\n\n if not process_re:\n raise WrongFileFormat('No valid Begin...End PROCESS tags')\n\n model_re = re.search(\\\n r\"^# Begin\\s+MODEL.*?^(?P.+?)(\\s+|$)^# End\\s+MODEL\",\n proc_card, re.MULTILINE|re.DOTALL)\n\n if not model_re:\n raise WrongFileFormat('No valid Begin...End MODEL tags')\n\n multiparticles_re = re.search(\\\n r\"^# Begin\\s+MULTIPARTICLES.*?^(?P.*)^# End\\s+MULTIPARTICLES\",\n proc_card, re.MULTILINE|re.DOTALL)\n\n if not multiparticles_re:\n raise WrongFileFormat('No valid Begin...End MULTIPARTICLES tags')\n\n process_lines = process_re.group('process').split('\\n')\n\n for line in process_lines:\n # an 'end_coup' stop the current process, \n # 'done' finish the list of process\n analyze_line = self.pat_line.search(line)\n if analyze_line:\n data = analyze_line.group('info') #skip the comment\n if not data:\n continue\n if not process_open and 'done' not in data:\n process_open = True\n self.process.append(ProcessInfo(data))\n elif 'end_coup' in data:\n process_open = False\n elif 'done' not in data:\n self.process[-1].add_coupling(data)\n \n self.model = model_re.group('model')\n \n multiparticles_lines = multiparticles_re.group('multiparticles').split('\\n')\n\n for line in multiparticles_lines:\n analyze_line = self.pat_line.search(line)\n if analyze_line:\n line = analyze_line.group('info') #skip the comment\n if not line:\n continue\n data = line.split()\n self.particles_name.add(data[0].lower())\n self.multipart.append(line)\n \n \n def extract_command_lines(self, model):\n \"\"\"Return the MG5 command line corresponding to this proc_card \n the MG5 command import model is skipped (since the model should be \n loaded -it is one of the argument-)\"\"\"\n \n # extract useful information of the model\n self.extract_info_from_model(model)\n \n # use the model information for the splitting in particles of the mg4\n #process line.\n for process in self.process:\n process.analyze_process(self.particles_name)\n \n #Now we are in position to write the lines call\n lines = [] \n #first write the lines associate to the multiparticls definition\n if self.multipart:\n lines.append('# Define multiparticle labels')\n for multipart in self.multipart:\n data = self.separate_particle(multipart, self.particles_name)\n lines.append('define ' + ' '.join(data))\n \n # secondly define the lines associate with diagram\n if self.process:\n lines.append('# Specify process(es) to run')\n for i, process in enumerate(self.process):\n if i == 0:\n lines.append('generate %s' % \\\n process.mg5_process_line(self.couplings_name))\n else:\n lines.append('add process %s' % \\\n process.mg5_process_line(self.couplings_name))\n \n #finally export the madevent output\n lines.append('# Output processes to MadEvent directory')\n lines.append('output -f')\n \n return lines\n \n \n def extract_info_from_model(self, model):\n \"\"\" creates the self.particles_name (list of all valid name)\n and self.couplings_name (list of all couplings)\"\"\"\n \n # add in self.particles_name (it contains normally the mulpart name \n #already) all the valid name of particle of the model \n for particle in model['particles']:\n self.particles_name.add(particle['name'])\n self.particles_name.add(particle['antiname'])\n\n # add in self.couplings_name the couplings name of the model\n for interaction in model['interactions']:\n for coupling in interaction['orders'].keys():\n self.couplings_name.add(coupling)\n\n \n @staticmethod\n def separate_particle(line, possible_str):\n \"\"\" for a list of concatanate variable return a list of particle name\"\"\"\n\n line = line.lower() # Particle name are not case sensitive\n out = [] # list of the particles\n # The procedure to find particles is the following\n # - check if the combination of 4 string form a valid particle name\n # if it is, move of 4 characters and check for the next particles.\n # if not try with 3, 2, 1 \n # if still not -> exit.\n \n pos = 0 # current starting position \n old_pos = -1 # check that we don't have infinite loop \n line += ' ' #add 4 blank for security\n while pos < len(line) - 4:\n #Check for infinite loop\n if pos == old_pos:\n logging.error('Invalid particle name: %s' % \\\n line[pos:pos + 4].rstrip())\n raise ParticleError('Invalid particle name %s' %\n line[pos:pos + 4].rstrip())\n old_pos = pos\n # check for pointless character\n if line[pos] in [' ', '\\n', '\\t']:\n pos += 1\n continue\n \n # try to find a match at 4(then 3\/2\/1) characters\n for i in range(4, 0, -1):\n if line[pos:pos + i] in possible_str:\n out.append(line[pos:pos + i])\n pos = pos + i\n break\n \n return out\n \nclass ProcessInfo(object):\n \"\"\"This is the basic object for storing process information\"\"\"\n \n def __init__(self, line):\n \"\"\"Initialize information\"\"\"\n \n self.particles = [] # list tuple (level, particle)\n self.couplings = {} # coupling -> max_order\n self.decays = [] # ProcessInfo of the decays\n self.tag = '' # tag of the process\n self.s_forbid = [] # list of particles forbids in s channel\n self.forbid = [] # list of particles forbids\n self.line = line # initialization line\n \n self.is_mg5_valid = False\n #some shortcut\n self.separate_particle = ProcCardv4Reader.separate_particle\n \n def analyze_process(self, particles_name):\n \"\"\"Add a line information\n two format are possible (decay chains or not)\n pp>h>WWj \/a $u @3\n pp>(h>WW)j \/a $u @3\n \"\"\"\n\n line = self.line\n #extract the tag\n if '@' in line:\n split = line.split('@')\n line = split[0]\n self.tag = split[1]\n \n\n # check if we have a MG5 format\n if '\/mg5\/' in line:\n self.line = line.replace('\/mg5\/','')\n self.is_mg5_valid = True\n return\n if ',' in line or '=' in line:\n self.is_mg5_valid = True\n return\n\n # extract (S-)forbidden particle\n pos_forbid = line.find('\/')\n pos_sforbid = line.find('$')\n \n # Select the restrictions (pos is -1 if not defined)\n #and remove the restrictions from the line\n if pos_forbid != -1 and pos_sforbid != -1:\n if pos_forbid > pos_sforbid :\n self.forbid = self.separate_particle(line[pos_forbid + 1:], \\\n particles_name)\n self.s_forbid = self.separate_particle(\\\n line[pos_sforbid + 1:pos_forbid], particles_name)\n line = line[:min(pos_forbid, pos_sforbid)]\n else:\n self.forbid = self.separate_particle(\\\n line[pos_forbid + 1:pos_sforbid], particles_name)\n self.s_forbid = self.separate_particle(line[pos_sforbid + 1:], \\\n particles_name)\n line = line[:min(pos_forbid, pos_sforbid)]\n # Same but if they are no S-forbidden particles\n elif pos_forbid != -1:\n self.forbid = self.separate_particle(line[pos_forbid + 1:], \\\n particles_name)\n line = line[:pos_forbid]\n # Same but if they are no forbidden particles\n elif pos_sforbid != -1:\n self.s_forbid = self.separate_particle(line[pos_sforbid + 1:], \\\n particles_name)\n line = line[:pos_sforbid]\n \n # Deal with decay chains, returns lines whitout the decay (and treat \n #the different decays.\n if '(' in line:\n line = self.treat_decay_chain(line, particles_name)\n \n #define the level of each particle\n level_content = line.split('>')\n for level, data in enumerate(level_content):\n particles = self.separate_particle(data, particles_name)\n if particles:\n [self.particles.append((level, name)) for name in particles]\n \n \n def treat_decay_chain(self, line, particles_name):\n \"\"\"Split the information of the decays into a tree of ProcessInfo.\"\"\"\n \n level = 0 #depth of the decay chain\n out_line = '' # core process\n for character in line:\n if character == '(':\n level += 1\n if level == 1:\n decay_line = \"\" # initialize a new decay info\n else:\n decay_line += '('\n continue\n elif character == ')':\n level -= 1\n if level == 0: #store the information\n self.decays.append(ProcessInfo(decay_line))\n self.decays[-1].add_restrictions(self.forbid, self.s_forbid,\n None)\n self.decays[-1].analyze_process(particles_name)\n out_line += decay_line[:decay_line.find('>')]\n else:\n decay_line += ')'\n continue\n elif level:\n decay_line += character\n else:\n out_line += character\n return out_line\n \n def add_coupling(self, line):\n \"\"\"Add the coupling information to the process\"\"\"\n data = line.split('=')\n self.couplings[data[0]] = int(data[1])\n \n \n def add_restrictions(self, forbid, s_forbid, couplings):\n \"\"\"Associate some restriction to this diagram\"\"\"\n \n self.forbid = forbid\n self.s_forbid = s_forbid\n self.couplings = couplings\n\n def mg5_process_line(self, model_coupling):\n \"\"\"Return a valid mg5 format for this process \"\"\"\n \n if self.is_mg5_valid:\n return self.line\n \n text = ''\n # Write the process\n cur_level = 0\n for level, particle in self.particles:\n if level > cur_level:\n text += '> '\n cur_level += 1\n text += '%s ' % particle\n\n # Write the constraints\n if self.s_forbid:\n text += '$ ' + ' '.join(self.s_forbid) + ' '\n if self.forbid:\n text += '\/ ' + ' '.join(self.forbid) + ' '\n\n #treat decay_chains\n for decay in self.decays:\n decay_text = decay.mg5_process_line(model_coupling)\n if ',' in decay_text:\n text = text.rstrip() + ', (%s) ' % decay_text.strip()\n else:\n text = text.rstrip() + ', %s ' % decay_text.strip()\n \n # write the tag\n if self.tag:\n text += '@%s ' % self.tag\n\n if self.couplings:\n if not self.tag:\n text += '@0 '\n #write the rules associate to the couplings\n text += self.mg5_couplings_line(model_coupling, len(self.particles))\n \n return text.rstrip()\n \n def mg5_couplings_line(self, model_coupling, nb_part):\n \"\"\"Return the assignment of coupling for this process\"\"\"\n\n out = ''\n for coupling in model_coupling:\n if self.couplings.has_key(coupling):\n # Need coupling for all cases, since might be decay chain\n out += '%s=%s ' % (coupling, self.couplings[coupling])\n else:\n # if not define put to zero (mg4 default)\n out += '%s=0 ' % coupling\n \n return out \n \n \n \n \n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_501","text":"1-10\nimport sys, time\nimport multiprocessing as mp\nimport numpy as np\nimport scipy as sp\nimport visualizer as vz\nimport mock_net as mn\n\nclass NVM:\n def __init__(self, coding, network):\n self.coding = coding\n self.network = network\n self.visualizing = False\n # Encode layer names and constants\n for symbol in self.network.get_layer_names():\n self.coding.encode(symbol)\n def __str__(self):\n pattern_list = self.network.list_patterns()\n vmstr = ''\n for (layer_name, pattern) in pattern_list:\n if self.decode(pattern)=='': continue\n vmstr += '%s:%s;'%(layer_name, self.decode(pattern))\n return vmstr\n def encode(self,human_readable):\n return self.coding.encode(human_readable)\n def decode(self, machine_readable):\n return self.coding.decode(machine_readable)\n def tick(self):\n # network update\n self.network.tick()\n # answer any visualizer request\n if self.visualizing:\n if self.viz_pipe.poll():\n # flush request\n self.viz_pipe.recv()\n # respond with data\n self.send_viz_data()\n def send_viz_data(self, down_sample=2):\n \"\"\"\n Protocol:\n <# layers>, , , , , , , ...\n \"\"\"\n if not self.visualizing: return\n pattern_list = self.network.list_patterns()\n self.viz_pipe.send(len(pattern_list))\n for (layer_name, pattern) in pattern_list:\n self.viz_pipe.send(layer_name)\n self.viz_pipe.send(self.decode(pattern)) # value\n # down sample pattern\n pattern = np.concatenate((pattern, np.nan*np.ones(len(pattern) % down_sample)))\n pattern = pattern.reshape((len(pattern)\/down_sample, down_sample)).mean(axis=1)\n pattern = (128*(pattern + 1.0)).astype(np.uint8).tobytes()\n self.viz_pipe.send_bytes(pattern) # bytes\n def show(self):\n self.hide() # flush any windowless viz process\n self.viz_pipe, other_end = mp.Pipe()\n self.viz_process = mp.Process(target=run_viz, args=(other_end,))\n self.viz_process.start()\n self.visualizing = True\n # send initial data for window layout\n self.send_viz_data()\n def hide(self):\n if not self.visualizing: return\n self.viz_pipe.send('shutdown')\n self.viz_process.join()\n self.viz_pipe = None\n self.viz_process = None\n self.visualizing = False\n def set_standard_input(self, message, from_human_readable=True):\n if from_human_readable:\n pattern = self.encode(message)\n else:\n pattern = np.fromstring(pattern,dtype=float)\n self.network.set_pattern('STDI', pattern)\n def get_standard_output(self, to_human_readable=True):\n pattern = self.network.get_pattern('STDO')\n if to_human_readable:\n message = self.decode(pattern)\n else:\n message = pattern.tobytes()\n return message\n def set_instruction(self, opcode, *operands):\n # clear gates\n self.network.set_pattern('A',self.network.get_pattern('A')*0)\n # set instruction\n self.network.set_pattern('OPC',self.encode(opcode))\n for op in range(len(operands)):\n self.network.set_pattern('OP%d'%(op+1), self.encode(operands[op]))\n def train(self, pattern_hash, new_pattern_hash):\n # train module with module.train\n # self.network.get_module(module_name).train(pattern_list, next_pattern_list)\n self.network.train(pattern_hash, new_pattern_hash)\n def quit(self):\n self.hide()\n sys.exit(0)\n\ndef mock_nvm(num_registers=3, layer_size=32):\n layer_names = ['IP','OPC','OP1','OP2','OP3'] # instruction\n layer_names += ['{%d}'%r for r in range(num_registers)] # registers\n layer_names += ['C1','C2','CO','N1','N2','NO'] # compare+nand\n layer_names += ['K','V'] # memory\n layer_names += ['STDI','STDO'] # io\n layer_sizes = [layer_size]*len(layer_names)\n net = mn.MockNet(layer_names, layer_sizes)\n coding = mn.MockCoding(layer_size)\n return NVM(coding, net)\n\ndef run_viz(nvm_pipe):\n viz = vz.Visualizer(nvm_pipe)\n viz.launch()\n\ndef flash_nrom(vm):\n # train vm on instruction set\n # gate_index_map = vm.network.get_module('gating').gate_index_map\n omega = np.tanh(1)\n gate_pattern = vm.network.get_pattern('A')\n # get non-gate layer names\n layer_names = vm.network.get_layer_names(omit_gates=True)\n # layer copies\n zero_gate_pattern = gate_pattern.copy()\n zero_gate_pattern[:] = 0\n for to_layer_name in layer_names:\n for from_layer_name in layer_names:\n gate_pattern[:] = 0\n gate_pattern[vm.network.get_gate_index(to_layer_name,from_layer_name)] = omega\n vm.train(\n {from_layer_name:'pattern', 'A':gate_pattern},\n {to_layer_name:'pattern','A':zero_gate_pattern})\n # set value to_layer_name\n for to_layer_name in layer_names:\n gate_pattern[:] = 0\n gate_pattern[vm.network.get_gate_index(to_layer_name,'OP1')] = omega\n vm.train({'OPC':vm.encode('set'),'OP2':vm.encode(to_layer_name)},{'A':gate_pattern})\n vm.train({'OPC':vm.encode('set'),'OP2':vm.encode(to_layer_name),'A':gate_pattern},\n {'A':zero_gate_pattern,'OPC':vm.encode('_')})\n # ccp from_layer_name to_layer_name condition_layer_name (conditional copy)\n for to_layer_name in layer_names:\n for from_layer_name in layer_names:\n for cond_layer_name in layer_names:\n gate_pattern[:] = 0\n gate_pattern[vm.network.get_gate_index(to_layer_name,from_layer_name)] = omega\n vm.train({\n 'OPC':vm.encode('ccp'),\n 'OP1':vm.encode(from_layer_name),\n 'OP2':vm.encode(to_layer_name),\n 'OP3':vm.encode(cond_layer_name),\n cond_layer_name:vm.encode('TRUE')},\n {'A':gate_pattern})\n vm.train({\n 'OPC':vm.encode('ccp'),\n 'OP1':vm.encode(from_layer_name),\n 'OP2':vm.encode(to_layer_name),\n 'OP3':vm.encode(cond_layer_name),\n cond_layer_name:vm.encode('TRUE'),\n 'A':gate_pattern},\n {'A':zero_gate_pattern,\n 'OPC':vm.encode('_')})\n # compare circuitry\n vm.train({}, {'CO':vm.encode('FALSE')}) # default FALSE behavior\n vm.train({'C1':'pattern','C2':'pattern'}, {'CO':vm.encode('TRUE')}) # unless equal\n # nand circuitry\n vm.train({}, {'NO':vm.encode('TRUE')}) # default TRUE behavior\n vm.train({'N1':vm.encode('TRUE'),'N2':vm.encode('TRUE')}, {'NO':vm.encode('FALSE')}) # unless both\n # mwr value_layer_name pointer_layer_name (memory write)\n gate_pattern[:] = 0\n key_gate_pattern = gate_pattern.copy()\n value_gate_pattern = gate_pattern.copy()\n assoc_gate_pattern = gate_pattern.copy()\n for pointer_layer_name in layer_names:\n for value_layer_name in layer_names:\n key_gate_pattern[:] = 0\n key_gate_pattern[vm.network.get_gate_index('K',pointer_layer_name)] = omega\n vm.train({\n 'OPC':vm.encode('mwr'),\n 'OP1':vm.encode(value_layer_name),\n 'OP2':vm.encode(pointer_layer_name)},\n {'A':key_gate_pattern})\n value_gate_pattern[:] = 0\n value_gate_pattern[vm.network.get_gate_index('V',value_layer_name)] = omega\n assoc_gate_pattern[:] = 0\n assoc_gate_pattern[vm.network.get_gate_index('V','K')] = omega\n vm.train({\n 'OPC':vm.encode('mwr'),\n 'OP1':vm.encode(value_layer_name),\n 'OP2':vm.encode(pointer_layer_name),\n 'A':key_gate_pattern},\n {'A':value_gate_pattern})\n vm.train({\n 'OPC':vm.encode('mwr'),\n 'OP1':vm.encode(value_layer_name),\n 'OP2':vm.encode(pointer_layer_name),\n 'A':value_gate_pattern},\n {'W':assoc_gate_pattern})\n vm.train({\n 'OPC':vm.encode('mwr'),\n 'OP1':vm.encode(value_layer_name),\n 'OP2':vm.encode(pointer_layer_name),\n 'W':assoc_gate_pattern},\n {'OPC':vm.encode('_'),\n 'W':zero_gate_pattern})\n # mrd value_layer_name pointer_layer_name (memory read)\n gate_pattern[:] = 0\n key_gate_pattern = gate_pattern.copy()\n value_gate_pattern = gate_pattern.copy()\n for pointer_layer_name in layer_names:\n for value_layer_name in layer_names:\n key_gate_pattern[:] = 0\n key_gate_pattern[vm.network.get_gate_index('K',pointer_layer_name)] = omega\n vm.train({\n 'OPC':vm.encode('mrd'),\n 'OP1':vm.encode(value_layer_name),\n 'OP2':vm.encode(pointer_layer_name)},\n {'A':key_gate_pattern})\n value_gate_pattern[:] = 0\n value_gate_pattern[vm.network.get_gate_index(value_layer_name,'V')] = omega\n vm.train({\n 'OPC':vm.encode('mrd'),\n 'OP1':vm.encode(value_layer_name),\n 'OP2':vm.encode(pointer_layer_name),\n 'A':key_gate_pattern},\n {'A':-key_gate_pattern}) # placeholder until hidden layer\n vm.train({\n 'OPC':vm.encode('mrd'),\n 'OP1':vm.encode(value_layer_name),\n 'OP2':vm.encode(pointer_layer_name),\n 'A':-key_gate_pattern},\n {'A':value_gate_pattern})\n vm.train({\n 'OPC':vm.encode('mrd'),\n 'OP1':vm.encode(value_layer_name),\n 'OP2':vm.encode(pointer_layer_name),\n 'A':value_gate_pattern},\n {'OPC':vm.encode('_'),\n 'A':zero_gate_pattern})\n\ndef show_tick(vm):\n period = .1\n for t in range(1):\n print('pre : %s'%vm)\n vm.tick()\n print('post: %s'%vm)\n raw_input('.')\n # time.sleep(period)\n \nif __name__ == '__main__':\n\n # Should run with -i flag to allow mvm.quit()\n\n mvm = mock_nvm()\n # mvm.set_standard_input('NIL',from_human_readable=True)\n # print(mvm.get_standard_output(to_human_readable=True))\n flash_nrom(mvm)\n # print(mvm.network.transitions['{0}'])\n mvm.show()\n show_tick(mvm)\n\n # # conditional copies\n # mvm.set_instruction('set','NIL','{0}')\n # show_tick(mvm)\n # show_tick(mvm)\n # show_tick(mvm)\n # mvm.set_instruction('set','TRUE','{1}')\n # show_tick(mvm)\n # show_tick(mvm)\n # show_tick(mvm)\n # mvm.set_instruction('set','FALSE','{2}')\n # show_tick(mvm)\n # show_tick(mvm)\n # show_tick(mvm)\n # raw_input('...')\n # mvm.set_instruction('ccp','{0}','{1}','{2}')\n # show_tick(mvm)\n # mvm.set_instruction('ccp','{0}','{2}','{1}')\n # show_tick(mvm)\n \n # # compare\/logic\n # print('set!')\n # mvm.set_instruction('set','TRUE','N1')\n # # mvm.set_instruction('set','TRUE','C1')\n # show_tick(mvm)\n # show_tick(mvm)\n # show_tick(mvm)\n # show_tick(mvm)\n # print('set!')\n # mvm.set_instruction('set','TRUE','N2')\n # # mvm.set_instruction('set','TRUE','C2')\n # show_tick(mvm)\n # show_tick(mvm)\n # show_tick(mvm)\n # show_tick(mvm)\n # print('set!')\n # mvm.set_instruction('set','NIL','N2')\n # # mvm.set_instruction('set','NIL','C2')\n # show_tick(mvm)\n # show_tick(mvm)\n # show_tick(mvm)\n\n # memory\n print('set!')\n mvm.set_instruction('set','TRUE','{0}')\n show_tick(mvm)\n show_tick(mvm)\n show_tick(mvm)\n show_tick(mvm)\n print('set!')\n mvm.set_instruction('set','NIL','{1}')\n show_tick(mvm)\n show_tick(mvm)\n show_tick(mvm)\n show_tick(mvm)\n print('set!')\n mvm.set_instruction('mwr','{0}','{1}')\n show_tick(mvm)\n show_tick(mvm)\n show_tick(mvm)\n show_tick(mvm)\n show_tick(mvm)\n show_tick(mvm)\n print('set!')\n mvm.set_instruction('set','_','K')\n show_tick(mvm)\n show_tick(mvm)\n show_tick(mvm)\n show_tick(mvm)\n print('set!')\n mvm.set_instruction('set','_','V')\n show_tick(mvm)\n show_tick(mvm)\n show_tick(mvm)\n show_tick(mvm)\n print('set!')\n mvm.set_instruction('mrd','{2}','{1}')\n show_tick(mvm)\n show_tick(mvm)\n show_tick(mvm)\n show_tick(mvm)\n show_tick(mvm)\n\n \n \n # mvm.hide()\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_502","text":"10-100\n\nfrom __future__ import division, print_function\nimport numpy as np\nimport pytest\nfrom scipy.interpolate import UnivariateSpline\n\nfrom pyunfold.unfold import iterative_unfold\nfrom pyunfold.callbacks import (Callback, CallbackList, Logger,\n Regularizer, SplineRegularizer,\n validate_callbacks, extract_regularizer,\n setup_callbacks_regularizer)\n\n\n@pytest.mark.parametrize('attr', ['on_unfolding_begin',\n 'on_unfolding_end',\n 'on_iteration_begin',\n 'on_iteration_end'])\ndef test_callback_attributes(attr):\n assert hasattr(Callback(), attr)\n\n\n@pytest.mark.parametrize('callbacks', [[Logger()], Logger()])\ndef test_logger(capsys, callbacks, example_dataset):\n\n # Perform iterative unfolding\n unfolded_results = iterative_unfold(data=example_dataset.data,\n data_err=example_dataset.data_err,\n response=example_dataset.response,\n response_err=example_dataset.response_err,\n efficiencies=example_dataset.efficiencies,\n efficiencies_err=example_dataset.efficiencies_err,\n return_iterations=True,\n callbacks=callbacks)\n\n # Get stdout and std err from iterative_unfold\n out, err = capsys.readouterr()\n\n # Build expected output\n expected_output = ''\n for row_index, row in unfolded_results.iterrows():\n row_output = ('Iteration {}: ts = {:0.4f}, ts_stopping ='\n ' {}\\n'.format(row_index + 1,\n row['ts_iter'],\n row['ts_stopping']))\n expected_output += row_output\n\n assert expected_output == out\n\n\ndef test_Logger_isinstance_Callback():\n\n logger = Logger()\n assert isinstance(logger, Callback)\n\n\ndef test_SplineRegularizer_isinstance_Regularizer():\n\n spline_reg = SplineRegularizer()\n assert isinstance(spline_reg, Regularizer)\n\n\ndef test_SplineRegularizer(example_dataset):\n degree = 3\n smooth = 20\n spline_reg = SplineRegularizer(degree=degree, smooth=smooth)\n\n unfolded_with_reg = iterative_unfold(data=example_dataset.data,\n data_err=example_dataset.data_err,\n response=example_dataset.response,\n response_err=example_dataset.response_err,\n efficiencies=example_dataset.efficiencies,\n efficiencies_err=example_dataset.efficiencies_err,\n return_iterations=True,\n callbacks=[spline_reg])\n\n unfolded_no_reg = iterative_unfold(data=example_dataset.data,\n data_err=example_dataset.data_err,\n response=example_dataset.response,\n response_err=example_dataset.response_err,\n efficiencies=example_dataset.efficiencies,\n efficiencies_err=example_dataset.efficiencies_err,\n return_iterations=True)\n\n no_reg = unfolded_no_reg.iloc[0]['unfolded']\n x = np.arange(len(no_reg), dtype=float)\n spline = UnivariateSpline(x, no_reg, k=degree, s=smooth)\n fitted_unfolded = spline(x)\n\n np.testing.assert_allclose(unfolded_with_reg.iloc[0]['unfolded'],\n fitted_unfolded)\n\n\ndef test_SplineRegularizer_groups(example_dataset):\n degree = 3\n smooth = 20\n groups = np.empty_like(example_dataset.data)\n groups[:len(groups) \/\/ 2] = 0\n groups[len(groups) \/\/ 2:] = 1\n spline_reg = SplineRegularizer(degree=degree, smooth=smooth, groups=groups)\n unfolded_with_reg = iterative_unfold(data=example_dataset.data,\n data_err=example_dataset.data_err,\n response=example_dataset.response,\n response_err=example_dataset.response_err,\n efficiencies=example_dataset.efficiencies,\n efficiencies_err=example_dataset.efficiencies_err,\n return_iterations=True,\n callbacks=[spline_reg])\n\n unfolded_no_reg = iterative_unfold(data=example_dataset.data,\n data_err=example_dataset.data_err,\n response=example_dataset.response,\n response_err=example_dataset.response_err,\n efficiencies=example_dataset.efficiencies,\n efficiencies_err=example_dataset.efficiencies_err,\n return_iterations=True)\n # Manually regularize each group independently\n y_no_reg = unfolded_no_reg.iloc[0]['unfolded']\n x = np.arange(len(y_no_reg), dtype=float)\n fitted_unfolded_no_reg = np.empty(len(y_no_reg))\n group_ids = np.unique(groups)\n for group in group_ids:\n group_mask = groups == group\n x_group = x[group_mask]\n y_group = y_no_reg[group_mask]\n spline_group = UnivariateSpline(x_group, y_group, k=degree, s=smooth)\n fitted_unfolded_group = spline_group(x_group)\n fitted_unfolded_no_reg[group_mask] = fitted_unfolded_group\n\n np.testing.assert_allclose(unfolded_with_reg.iloc[0]['unfolded'],\n fitted_unfolded_no_reg)\n\n\ndef test_SplineRegularizer_groups_raises(example_dataset):\n degree = 3\n smooth = 20\n groups = np.empty(len(example_dataset.data) - 1)\n groups[:len(groups) \/\/ 2] = 0\n groups[len(groups) \/\/ 2:] = 1\n spline_reg = SplineRegularizer(degree=degree, smooth=smooth, groups=groups)\n with pytest.raises(ValueError) as excinfo:\n iterative_unfold(data=example_dataset.data,\n data_err=example_dataset.data_err,\n response=example_dataset.response,\n response_err=example_dataset.response_err,\n efficiencies=example_dataset.efficiencies,\n efficiencies_err=example_dataset.efficiencies_err,\n return_iterations=True,\n callbacks=[spline_reg])\n\n err_msg = ('Invalid groups array. There should be an entry '\n 'for each cause bin. However, got len(groups)={} '\n 'while there are {} cause bins.'.format(len(groups),\n len(example_dataset.data)))\n assert err_msg == str(excinfo.value)\n\n\ndef test_validate_callbacks():\n callbacks = [Logger(), SplineRegularizer()]\n assert validate_callbacks(callbacks) == callbacks\n\n\ndef test_validate_empty_callbacks():\n assert validate_callbacks(None) == []\n\n\n@pytest.mark.parametrize('callback', [Logger(), SplineRegularizer()])\ndef test_validate_callbacks_single_callback(callback):\n validate_callbacks(callback) == [callback]\n\n\ndef test_validate_callbacks_raises():\n callbacks = [Logger(), SplineRegularizer(), 'not a callback']\n with pytest.raises(TypeError) as excinfo:\n validate_callbacks(callbacks)\n\n err_msg = 'Found non-callback object in callbacks: {}'.format(['not a callback'])\n assert err_msg == str(excinfo.value)\n\n\ndef test_extract_regularizer_mutliple_raises():\n callbacks = [SplineRegularizer(), SplineRegularizer()]\n with pytest.raises(NotImplementedError) as excinfo:\n extract_regularizer(callbacks)\n\n err_msg = 'Multiple regularizer callbacks where provided.'\n assert err_msg == str(excinfo.value)\n\n\ndef test_extract_regularizer_no_regularizer():\n callbacks = [Logger()]\n assert extract_regularizer(callbacks) is None\n\n\n@pytest.mark.parametrize('callback', [SplineRegularizer()])\ndef test_extract_regularizer(callback):\n callbacks = [Logger(), callback]\n assert extract_regularizer(callbacks) == callback\n\n\ndef test_setup_callbacks_regularizer():\n\n callbacks = [Logger(), SplineRegularizer()]\n c, r = setup_callbacks_regularizer(callbacks)\n assert isinstance(c, CallbackList)\n assert len(c) == 1\n assert c.callbacks[0] is callbacks[0]\n assert r is callbacks[1]\n\n\ndef test_callbacklist_empty():\n c = CallbackList()\n assert c.callbacks == []\n\n\ndef test_callbacklist_callbacks():\n logger = Logger()\n reg = SplineRegularizer()\n callbacks = [logger, reg]\n c = CallbackList(callbacks=callbacks)\n assert len(c) == len(callbacks)\n assert all(i is j for i, j in zip(c.callbacks, callbacks))\n\n\ndef test_callbacklist_method_calls():\n class MethodChecker(Callback):\n def __init__(self):\n super(Callback, self).__init__()\n self.called_unfolding_begin = False\n self.called_on_unfolding_end = False\n self.called_on_iteration_begin = False\n self.called_on_iteration_end = False\n\n def on_unfolding_begin(self, status=None):\n self.called_on_unfolding_begin = True\n\n def on_unfolding_end(self, status=None):\n self.called_on_unfolding_end = True\n\n def on_iteration_begin(self, iteration, status=None):\n self.called_on_iteration_begin = True\n\n def on_iteration_end(self, iteration, status=None):\n self.called_on_iteration_end = True\n\n method_checker = MethodChecker()\n c = CallbackList(method_checker)\n\n c.on_iteration_begin(1)\n assert method_checker.called_on_iteration_begin\n\n c.on_iteration_end(1)\n assert method_checker.called_on_iteration_end\n\n c.on_unfolding_begin()\n assert method_checker.called_on_unfolding_begin\n\n c.on_unfolding_end()\n assert method_checker.called_on_unfolding_end\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_503","text":"main.py\n# -*- coding: utf-8 -*-\nimport os\nimport sys\nfrom statistics.basic import Statistics\nfrom statistics.chisquare import chisquare\nfrom optparse import OptionParser, OptionValueError\n\n\nif __name__ == \"__main__\":\n usage = \"usage: %prog [options] keyword\"\n parser = OptionParser(usage)\n\n parser.add_option(\n \"-f\", \"--file\",\n action=\"store\",\n type=\"string\",\n dest=\"data_file\",\n help=\"data file\"\n )\n parser.add_option(\n \"-b\", \"--bins\",\n type=\"int\",\n dest=\"bins\",\n default=None,\n help=\"bins\"\n )\n parser.add_option(\n \"-w\", \"--bin_width\",\n type=\"int\",\n dest=\"bin_width\",\n default=None,\n help=\"bin width\"\n )\n parser.add_option(\n \"-s\", \"--significance_level\",\n type=\"float\",\n dest=\"significance_level\",\n default=0.05,\n help=\"significance level\"\n )\n\n options, args = parser.parse_args()\n if options.data_file:\n with open(options.data_file,\"r\") as f:\n data = map(float, f.read().split())\n else:\n data = map(int, args)\n if not data:\n raise\n s = Statistics(\n data,\n bin_width=options.bin_width,\n bins=options.bins,\n significance_level=options.significance_level\n )\n print u\"階級\\t度数\\t比率\\t\\t\\tZ-Score\\t\\t\\t累積比率\\t\\t期待比率\\t\\t期待度数\"\n for c in s.classes:\n print \"{:<5}\\t{:<3}\\t{:<18}\\t{:<18}\\t{:<18}\\t{:<18}\\t{:<18}\".format(\n c,\n s.frequencies[c],\n s.proportions[c],\n s.zscores[c],\n s.cumulative_ratios[c],\n s.class_ratios[c],\n s.expectations[c]\n )\n result = s.fit_test()\n print u\"カイ二乗値: X^2 =\", s.chisquare\n print u\"自由度: v =\", s.freedom\n print u\"有意水準: a =\", s.significance_level\n print u\"棄却域: x^2 >\", s.critical_region\n print(result)\n\n\n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_505","text":"import scipy\nimport scipy.special\nimport scipy.integrate\nfrom numpy import exp,cos,sin\n\n# A basic 1D integral:\nscipy.integrate.quad(exp, 0, 1)\n# (1.7182818284590453, 1.9076760487502457e-14)\nscipy.integrate.quad(sin, -0.5, 0.5)\n# (0.0, 2.707864644566304e-15)\nscipy.integrate.quad(cos, -0.5, 0.5)\n# (0.9588510772084061, 1.0645385431034061e-14)\n\nf = lambda x : exp(-x**2)\nscipy.integrate.quad(f, 0, 1)\n# (0.7468241328124271, 8.291413475940725e-15)\n\nscipy.integrate.quad(lambda x : exp(-x**2), 0, 1)\n# (0.7468241328124271, 8.291413475940725e-15)\n\nscipy.integrate.quad(lambda x : exp(-x**2), 0, inf)\n# (0.8862269254527579, 7.101318390472462e-09)\n\nscipy.integrate.quad(lambda x : exp(-x**2), -inf, 1)\n# (1.6330510582651852, 3.669607414547701e-11)\n\nscipy.integrate.quad(lambda x: scipy.special.jn(1,x),0,5)\n# (1.177596771314338, 1.8083362065765924e-14)\n\n#### Integrating Polynomials\n\np = np.poly1d([2, 5, 1])\np(1), p(2), p(3.5)\n\nP = polyint(p)\nq=P(5)-P(1)\n\n\n#### Basic computations in linear algebra\nimport scipy.linalg\n\na = array([[-2, 3], [4, 5]])\nscipy.linalg.det(a)\n\nb = scipy.linalg.inv(a)\ndot(a,b)\n\n#### Solving systems of linear equations¶\nimport scipy.linalg\n\nA = array([[2, 4, 6], [1, -3, -9], [8, 5, -7]])\nb = array([4, -11, 2])\n\nsol1 = scipy.linalg.solve(A,b)\n\nAinv = scipy.linalg.inv(A)\nsol2 = dot(Ainv, b)\nsol1==sol2\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_506","text":"yuanyuansjtu\/chaospychaospy\/distributions\/collection\/binomial.py\n\"\"\"Binomial probability distribution.\"\"\"\nfrom functools import wraps\nimport numpy\nfrom scipy import special\n\nfrom ..baseclass import SimpleDistribution\nfrom ..operators import J\n\n\nclass binomial(SimpleDistribution):\n \"\"\"\n Binomial probability distribution.\n\n Point density:\n comb(N, x) p^x (1-p)^{N-x} x in {0, 1, ..., N}\n\n Examples:\n >>> distribution = chaospy.Binomial(3, 0.5)\n >>> distribution\n Binomial(3, 0.5)\n >>> xloc = numpy.arange(4)\n >>> distribution.pdf(xloc).round(4)\n array([0.125, 0.375, 0.375, 0.125])\n >>> distribution.cdf(xloc).round(4)\n array([0.125, 0.5 , 0.875, 1. ])\n >>> distribution.fwd([-0.5, -0.49, 0, 0.49, 0.5]).round(4)\n array([0. , 0.0013, 0.0625, 0.1238, 0.125 ])\n >>> uloc = numpy.linspace(0, 1, 8)\n >>> uloc.round(2)\n array([0. , 0.14, 0.29, 0.43, 0.57, 0.71, 0.86, 1. ])\n >>> distribution.inv(uloc).round(2)\n array([-0.5 , 0.55, 0.93, 1.31, 1.69, 2.07, 2.45, 3.5 ])\n >>> distribution.sample(10)\n array([2, 1, 0, 2, 2, 2, 2, 3, 3, 0])\n >>> distribution.mom([1, 2, 3]).round(4)\n array([1.5 , 3. , 6.75])\n >>> distribution.ttr([0, 1, 2, 3]).round(4)\n array([[1.5 , 1.5 , 1.5 , 1.5 ],\n [1. , 0.75, 1. , 0.75]])\n\n \"\"\"\n interpret_as_integer = True\n\n def __init__(self, size, prob):\n super(binomial, self).__init__(\n parameters=dict(size=size, prob=prob),\n repr_args=[size, prob],\n )\n\n def _cdf(self, x_data, size, prob):\n size = numpy.round(size)\n x_data = x_data-0.5\n\n floor = numpy.zeros(x_data.shape)\n indices = x_data >= 0\n floor[indices] = special.bdtr(numpy.floor(x_data[indices]), size, prob)\n\n ceil = numpy.ones(x_data.shape)\n indices = x_data <= size\n ceil[indices] = special.bdtr(numpy.ceil(x_data[indices]), size, prob)\n ceil[numpy.isnan(ceil)] = 0 # left edge case\n\n offset = x_data-numpy.floor(x_data)\n out = floor*(1-offset) + ceil*offset\n return out\n\n def _pdf(self, x_data, size, prob):\n x_data = numpy.round(x_data)\n return special.comb(size, x_data)*prob**x_data*(1-prob)**(size-x_data)\n\n def _lower(self, size, prob):\n return -0.5\n\n def _upper(self, size, prob):\n return numpy.round(size)+0.5\n\n def _mom(self, k_data, size, prob):\n x_data = numpy.arange(int(size)+1, dtype=int)\n return numpy.sum(x_data**k_data*self._pdf(\n x_data, size=numpy.floor(size), prob=prob))\n\n def _ttr(self, k_data, size, prob):\n \"\"\"Krawtchouk rule.\"\"\"\n from chaospy.quadrature import discretized_stieltjes\n abscissas = numpy.arange(0, numpy.floor(size)+1)\n weights = self._pdf(abscissas, size, prob)\n (alpha, beta), _, _ = discretized_stieltjes(k_data, [abscissas], weights)\n return alpha[0, -1], beta[0, -1]\n\n\nclass Binomial(J):\n\n def __init__(self, size, prob):\n dist = binomial(size, prob)\n super(Binomial, self).__init__(dist)\n self._repr_args = [size, prob]\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_507","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 26 11:12:32 2017\n\n@author: newton\n\"\"\"\n\nimport pandas as pd \nimport scipy as sp\nimport numpy as np\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import precision_recall_curve, roc_curve, auc\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import cross_validation\nfrom sklearn import metrics\nfrom sklearn.metrics import confusion_matrix \nimport csv \nimport matplotlib.pyplot as plt\nimport itertools\n\nlabels = [0,1,2,3,4,5,6,7,8,9]\n\n#def plot_confusion_matrix(cm, title='Confusion Matrix', cmap = plt.cm.binary): \n# plt.imshow(cm, interpolation='nearest', cmap=cmap) \n# plt.title(title) \n# plt.colorbar() \n# xlocations = np.array(range(len(labels))) \n# plt.xticks(xlocations, labels, rotation=90) \n# plt.yticks(xlocations, labels) \n# plt.ylabel('True label') \n# plt.xlabel('Predicted label') \n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') \/ cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() \/ 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n \ntrains = pd.read_csv(\"train.csv\")\ntests = pd.read_csv(\"test.csv\")\n\nY = trains['label'] \n\ndel trains['label']\n\nX_datasarr = trains.as_matrix()\nX_norm = X_datasarr > 0\nX = X_norm.astype(int) \n\n\nX_train,X_test,y_train,y_test = cross_validation.train_test_split(X, Y, test_size=0.3, random_state=0)\n\nunique, counts = np.unique(y_test, return_counts=True)\nprint np.asarray((unique, counts)).T\n\nX_des_datasarr = tests.as_matrix()\nX_des_norm = X_des_datasarr > 0\nX_des = X_des_norm.astype(int) \n\nresults = []\n# 最小叶子结点的参数取值\nsample_leaf_options = list(range(1, 50, 3))\n# 决策树个数参数取值\nn_estimators_options = list(range(1, 10, 5))\nfor leaf_size in sample_leaf_options:\n for n_estimators_size in n_estimators_options:\n \n rfc = RandomForestClassifier(min_samples_leaf=leaf_size, n_estimators=n_estimators_size, random_state=50)\n rfc.fit(X_train,y_train)\n\n y_pred_class = rfc.predict(X_test)\n results.append((leaf_size, n_estimators_size, (y_test == y_pred_class).mean())) \n\nprint(max(results, key=lambda x: x[2]))\n\nrfc = RandomForestClassifier(min_samples_leaf=6, n_estimators=6, random_state=50)\nrfc.fit(X_train,y_train)\ncm = confusion_matrix(y_test, y_pred_class)\nplot_confusion_matrix(cm,labels, title='Normalized confusion matrix') \n\n \n#print metrics.accuracy_score(y_test, y_pred_class)\n#print rfc.score(X_test, y_test)\n#print rfc.classes_\n#print y_test[0]\n#print rfc.predict(X_test[0])\n#print rfc.predict_proba(X_test[0])\n\nY_des = rfc.predict(X_des)\n\n#Data is not binary and pos_label is not specified\n#precision, recall, pr_thresholds = precision_recall_curve(y_test, y_pred_class)\n#print precision,recall,pr_thresholds\n\nheaders = ['ImageId','Label']\n\nwith open('digit_submission.csv','w') as f:\n f_csv = csv.writer(f)\n f_csv.writerow(headers)\n rowid = 1\n for y in Y_des:\n row = [rowid,y]\n rowid += 1\n f_csv.writerow(row)\n\n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_508","text":"test\/algorithms\/test_bound.py\n\n# Copyright 2021 \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom nystrompca import calc_conf_bound, calculate_bound\nfrom nystrompca.base import Kernel\n\nimport numpy as np\nfrom numpy.testing import assert_array_almost_equal\nfrom scipy.stats import norm\n\n\ndef test_calc_conf_bound1():\n\n L = np.arange(1,11)[::-1] * 10\n n = 100\n alpha = 0.5\n B = 1\n \n term1 = np.sqrt(2*np.log(4)) \/ np.sqrt(90)\n term2 = 1 \/ np.sqrt(10)\n D = 0.9 * (term1 + term2)\n \n expected_bounds = np.cumsum(L[:-1]\/10) * D**2 + D ** 3\n expected_bounds = np.r_[expected_bounds, np.nan]\n\n bounds = calc_conf_bound(np.diag(L), n, B, alpha)\n\n assert_array_almost_equal(bounds, expected_bounds)\n\n\ndef test_calc_conf_bound2():\n\n L = np.exp(np.linspace(1,0,10))\n n = 20\n alpha = 0.5\n B = 10\n \n term1 = 10 * np.sqrt(2*np.log(4)) \/ np.sqrt(10)\n term2 = 100 \/ np.sqrt(10)\n D = 0.5 * (term1 + term2)\n \n expected_bounds = np.cumsum(L[:-1]\/10) + D\n expected_bounds = np.r_[expected_bounds, np.nan]\n\n bounds = calc_conf_bound(np.diag(L), n, B, alpha)\n\n assert_array_almost_equal(bounds, expected_bounds)\n\n\ndef test_calc_conf_bound3():\n\n L = np.arange(1,11)[::-1] * 10\n n = 1000\n alpha = 0.9\n B = 0.5\n \n term1 = 0.5 * np.sqrt(2*np.log(20)) \/ np.sqrt(990)\n term2 = 0.25 \/ np.sqrt(10)\n D = 0.99 * (term1 + term2)\n \n expected_bounds = np.cumsum(L[:-1]\/10) * D**2 + D ** 3\n expected_bounds = np.r_[expected_bounds, np.nan]\n\n bounds = calc_conf_bound(np.diag(L), n, B, alpha)\n\n assert_array_almost_equal(bounds, expected_bounds)\n\n\ndef test_calculate_bound():\n\n X = np.random.randn(100,10)\n kernel = Kernel(kernel='cauchy', sigma=5)\n n = 2000\n alpha = 0.75\n K_mm = kernel.matrix(X, demean=False)\n bounds1 = calculate_bound(X, n, 'cauchy', alpha, sigma=5)\n bounds2 = calc_conf_bound(K_mm, n, 1, alpha)\n\n assert_array_almost_equal(bounds1, bounds2)\n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_509","text":"clef20\/celebrity-profiling\/pan20_celebrity_profiling_evaluator_tests.py\nimport unittest\nfrom statistics import mean\nimport pan19_celebs_evaluator as pev\n\nclass TestRecallPrecision(unittest.TestCase):\n\n def test_pr_gender(self):\n truth = [\"male\", \"female\", \"binary\", \"male\", \"female\", \"binary\", \"male\", \"female\", \"binary\"]\n predictions = [\"male\", \"female\", \"binary\", \"female\", \"binary\", \"male\", \"binary\", \"male\", \"female\"]\n prec, rec = pev.mc_prec_rec(predictions, truth)\n self.assertEqual(mean(prec), 1\/3)\n self.assertEqual(mean(rec), 1\/3)\n\n def test_pr_age(self):\n truth = [1938, 1988, 2008, 1938, 1988, 2008, 1938, 1988, 2008]\n predictions = [1929, 1984, 2006, 2006, 1929, 1984, 1984, 2008, 1929]\n prec, rec = pev.mc_prec_rec(predictions, truth, hit_function=pev.age_window_hit)\n self.assertEqual(mean(prec), 2.5\/3)\n self.assertEqual(mean(rec), 1\/3)\n\n def test_pr_age2(self):\n truth = [2008, 2009, 2010, 2011, 2012]\n predictions = [2008, 2008, 2008, 2008, 2008]\n # after age_window_hit: predictions = [2008, 2009, 2008, 2008, 2008]\n prec, rec = pev.mc_prec_rec(predictions, truth, hit_function=pev.age_window_hit)\n self.assertEqual(mean([1\/3, 1, 1, 0, 0]), mean(prec))\n self.assertEqual(mean([1, 1, 1, 0, 0]), mean(rec))\n\n def test_pr_age3(self):\n truth = [1978, 1979, 1980, 1981, 1982, 1983, 1984]\n predictions = [1978, 1978, 1978, 1978, 1978, 1978, 1978]\n # after age_window_hit: predictions = [1978, 1979, 1980, 1981, 1982, 1983, 1978]\n prec, rec = pev.mc_prec_rec(predictions, truth, hit_function=pev.age_window_hit)\n self.assertEqual(mean([1\/2, 1, 1, 1, 1, 1, 0]), mean(prec))\n self.assertEqual(mean([1, 1, 1, 1, 1, 1, 0]), mean(rec))\n\n\nclass TestAgeWindowHit(unittest.TestCase):\n\n def test_bounds(self):\n self.assertTrue(pev.age_window_hit(2008, 2008))\n self.assertTrue(pev.age_window_hit(2006, 2008))\n self.assertTrue(pev.age_window_hit(2010, 2008))\n self.assertTrue(not pev.age_window_hit(2011, 2008))\n self.assertTrue(not pev.age_window_hit(2005, 2008))\n self.assertTrue(pev.age_window_hit(2008, 2010))\n\n self.assertTrue(pev.age_window_hit(1988, 1988))\n self.assertTrue(pev.age_window_hit(1984, 1988))\n self.assertTrue(pev.age_window_hit(1992, 1988))\n self.assertTrue(not pev.age_window_hit(1983, 1988))\n self.assertTrue(not pev.age_window_hit(1993, 1988))\n\n self.assertTrue(pev.age_window_hit(1929, 1938))\n self.assertTrue(pev.age_window_hit(1947, 1938))\n self.assertTrue(not pev.age_window_hit(1928, 1938))\n self.assertTrue(not pev.age_window_hit(1948, 1938))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_510","text":"from OpenPNM.Utilities import misc\nimport scipy as _sp\nimport numpy as _np\nimport os as _os\nimport pickle as _pickle\nfrom xml.etree import ElementTree as _ET\n\n\nclass VTK():\n r\"\"\"\n Class for writing a Vtp file to be read by ParaView\n\n \"\"\"\n\n _TEMPLATE = '''\n \n \n \n \n \n <\/Points>\n \n <\/Lines>\n \n <\/PointData>\n \n <\/CellData>\n <\/Piece>\n <\/PolyData>\n <\/VTKFile>\n '''.strip()\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n @staticmethod\n def save(network, filename='', phases=[]):\n r\"\"\"\n Save network and phase data to a single vtp file for visualizing in\n Paraview\n\n Parameters\n ----------\n network : OpenPNM Network Object\n The Network containing the data to be written\n\n filename : string, optional\n Filename to write data. If no name is given the file is named after\n ther network\n\n phases : list, optional\n A list contain OpenPNM Phase object(s) containing data to be written\n\n Examples\n --------\n >>> import OpenPNM\n >>> pn = OpenPNM.Network.Cubic(shape=[3,3,3])\n >>> geo = OpenPNM.Geometry.Stick_and_Ball(network=pn,\n ... pores=pn.pores(),\n ... throats=pn.throats())\n >>> air = OpenPNM.Phases.Air(network=pn)\n >>> phys = OpenPNM.Physics.Standard(network=pn, phase=air,\n ... pores=pn.pores(), throats=pn.throats())\n\n >>> import OpenPNM.Utilities.IO as io\n >>> io.VTK.save(pn,'test_pn.vtp',[air])\n\n >>> # Delete the new file\n >>> import os\n >>> os.remove('test_pn.vtp')\n \"\"\"\n\n if filename == '':\n filename = network.name\n filename = filename.split('.')[0] + '.vtp'\n\n root = _ET.fromstring(VTK._TEMPLATE)\n objs = []\n if type(phases) != list:\n phases = [phases]\n for phase in phases:\n objs.append(phase)\n objs.append(network)\n am = misc.amalgamate_data(objs=objs)\n key_list = list(sorted(am.keys()))\n points = network['pore.coords']\n pairs = network['throat.conns']\n\n num_points = len(points)\n num_throats = len(pairs)\n\n piece_node = root.find('PolyData').find('Piece')\n piece_node.set(\"NumberOfPoints\", str(num_points))\n piece_node.set(\"NumberOfLines\", str(num_throats))\n\n points_node = piece_node.find('Points')\n coords = VTK._array_to_element(\"coords\", points.T.ravel('F'), n=3)\n points_node.append(coords)\n\n lines_node = piece_node.find('Lines')\n connectivity = VTK._array_to_element(\"connectivity\", pairs)\n lines_node.append(connectivity)\n offsets = VTK._array_to_element(\"offsets\", 2*_np.arange(len(pairs))+2)\n lines_node.append(offsets)\n\n point_data_node = piece_node.find('PointData')\n for key in key_list:\n array = am[key]\n if array.dtype == _np.bool:\n array = array.astype(int)\n if array.size != num_points:\n continue\n element = VTK._array_to_element(key, array)\n point_data_node.append(element)\n\n cell_data_node = piece_node.find('CellData')\n for key in key_list:\n array = am[key]\n if array.dtype == _np.bool:\n array = array.astype(int)\n if array.size != num_throats:\n continue\n element = VTK._array_to_element(key, array)\n cell_data_node.append(element)\n\n tree = _ET.ElementTree(root)\n tree.write(filename)\n\n # Make pretty\n with open(filename, 'r+') as f:\n string = f.read()\n string = string.replace('<\/DataArray>', '<\/DataArray>\\n\\t\\t\\t')\n f.seek(0)\n # consider adding header: '\\n'+\n f.write(string)\n\n @staticmethod\n def load(filename):\n r\"\"\"\n Read in pore and throat data from a saved VTK file.\n\n Notes\n -----\n This will NOT reproduce original simulation, since all models and object\n relationships are lost. Use IO.Save and IO.Load for that.\n \"\"\"\n network = OpenPNM.Network.GenericNetwork()\n tree = _ET.parse(filename)\n piece_node = tree.find('PolyData').find('Piece')\n\n # Extract connectivity\n conn_element = piece_node.find('Lines').find('DataArray')\n array = VTK._element_to_array(conn_element, 2)\n network['throat.conns'] = array.T\n\n for element in piece_node.find('PointData').iter('DataArray'):\n key = element.get('Name')\n array = VTK._element_to_array(element)\n netname = key.split('.')[0]\n propname = key.strip(netname+'.')\n network[propname] = array\n\n return network\n\n @staticmethod\n def _array_to_element(name, array, n=1):\n dtype_map = {\n 'int8': 'Int8',\n 'int16': 'Int16',\n 'int32': 'Int32',\n 'int64': 'Int64',\n 'uint8': 'UInt8',\n 'uint16': 'UInt16',\n 'uint32': 'UInt32',\n 'uint64': 'UInt64',\n 'float32': 'Float32',\n 'float64': 'Float64',\n 'str': 'String',\n }\n element = _ET.Element('DataArray')\n element.set(\"Name\", name)\n element.set(\"NumberOfComponents\", str(n))\n element.set(\"type\", dtype_map[str(array.dtype)])\n element.text = '\\t'.join(map(str, array.ravel()))\n return element\n\n @staticmethod\n def _element_to_array(element, n=1):\n string = element.text\n dtype = element.get(\"type\")\n array = _np.fromstring(string, sep='\\t')\n array = array.astype(dtype)\n if n is not 1:\n array = array.reshape(array.size\/\/n, n)\n return array\n\n\nclass MAT():\n r\"\"\"\n Class for reading and writing OpenPNM data to a Matlab 'mat' file\n \"\"\"\n\n @staticmethod\n def save(network, filename='', phases=[]):\n r\"\"\"\n Write Network to a Mat file for exporting to Matlab. This method will be\n enhanced in a future update, and it's functionality may change!\n\n Parameters\n ----------\n\n network : OpenPNM Network Object\n\n filename : string\n Desired file name, defaults to network name if not given\n\n phases : list of phase objects ([])\n Phases that have properties we want to write to file\n\n Examples\n --------\n >>> import OpenPNM\n >>> pn = OpenPNM.Network.TestNet()\n >>> geo = OpenPNM.Geometry.TestGeometry(network=pn,\n ... pores=pn.pores(),\n ... throats=pn.throats())\n >>> air = OpenPNM.Phases.TestPhase()\n >>> import OpenPNM.Utilities.IO as io\n >>> io.MAT.save(network=pn, filename='test_pn.mat', phases=air)\n\n >>> # Remove newly created file\n >>> import os\n >>> os.remove('test_pn.mat')\n\n \"\"\"\n if filename == '':\n filename = network.name\n filename = filename.split('.')[0] + '.mat'\n\n pnMatlab = {}\n new = []\n old = []\n for keys in list(network.keys()):\n old.append(keys)\n new.append(keys.replace('.', '_'))\n\n for i in range(len(network)):\n pnMatlab[new[i]] = network[old[i]]\n\n if type(phases) != list:\n phases = [phases]\n if len(phases) != 0:\n for j in range(len(phases)):\n new = []\n old = []\n\n for keys in list(phases[j].keys()):\n old.append(keys)\n new.append(phases[j].name+'_'+keys.replace('.', '_'))\n\n for i in range(len(phases[j])):\n pnMatlab[new[i]] = phases[j][old[i]]\n\n _sp.io.savemat(file_name=filename, mdict=pnMatlab)\n\n @staticmethod\n def load():\n r\"\"\"\n This method is not implemented yet.\n \"\"\"\n raise NotImplemented()\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_511","text":"0\nfrom scipy.optimize import curve_fit\nfrom numpy import ndarray\nfrom pyequalizer.optim import *\nfrom copy import deepcopy\n\ndef find_line(front):\n \"\"\"\n find_line(front): Find a rational regression line between the stress and strain values of a set of pareto fronts. \n Parameters: \n front: A list of pyequalizer.optim.Individuals that define the pareto front. \n \"\"\"\n sct_x, sct_y = get_plot_pts(front)\n def ratline(x, c, e, h, eps):\n ex = deepcopy(x)\n try:\n for a in range(len(ex)):\n if ex[a] + h < 0:\n ex[a] = -h + 0.01\n except:\n if ex + h < 0:\n ex = 0.01 - h\n ret = c*(ex+h)**(-float(e))+eps\n return ret\n params, _ = curve_fit(ratline, sct_x, sct_y, [100., 0.1, -250., 20.], maxfev=100000000)\n print (params)\n return lambda x: ratline(x, *params)\n\ndef get_error(point, reg):\n \"\"\"\n Get the squared error between a specified 2-d point and\n a function, typically a regression line.\n\n Arguments: \n * point: A 1x2 array of numbers that define a point in space. \n * reg: A function that takes in a number and returns one other number. \n Typically defines a regression curve. \n \"\"\"\n return (point[1] - reg(point[0]))**2\n\ndef get_closest(inds, reg):\n \"\"\"\n Get a set of the closest individuals in a population \n to the previously generated regression line. \n\n Arguments: \n * inds: An array of Inds as defined in pyequalizer.optim. \n * reg: A regression line. Function taking on numeric argument and \n returning a numeric result. \n \"\"\"\n ind_c = deepcopy(inds)\n pts = list(zip(*get_plot_pts(inds)))\n for i in range(len(pts)): \n cost = get_error(pts[i], reg)\n ind_c[i].fitness.append(cost)\n return isolate_pareto(ind_c)\n\n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_504","text":"geopi1\/Improved_USRNet\nfrom scipy.io import loadmat\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n# kernel = np.zeros((17,17))\n# kernel[6:11,6:11] = np.ones((5,5),np.float32)\/25\nx = loadmat('.\/results\/soldiers_ww2_small\/soldiers_ww2_small_kernel_x2.mat')['Kernel']\nplt.figure()\n# plt.subplot(1,2,1)\n# plt.title('original kernel')\n# plt.imshow(kernel,'gray')\n# plt.subplot(1,2,2)\nplt.title('estimated kernel')\nplt.imshow(x,'gray')\n\nplt.show()\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_560","text":"sequence-dev\/sequencesequence\/subsidence.py1-10\n#! \/usr\/bin\/env python\nimport numpy as np\nfrom landlab import Component\nfrom scipy import interpolate\n\n\nclass SubsidenceTimeSeries(Component):\n\n _name = \"Subsider\"\n\n _time_units = \"y\"\n\n _info = {\n \"bedrock_surface__increment_of_elevation\": {\n \"dtype\": \"float\",\n \"intent\": \"out\",\n \"optional\": False,\n \"units\": \"m\",\n \"mapping\": \"node\",\n \"doc\": \"Increment of elevation\",\n },\n \"bedrock_surface__elevation\": {\n \"dtype\": \"float\",\n \"intent\": \"inout\",\n \"optional\": False,\n \"units\": \"m\",\n \"mapping\": \"node\",\n \"doc\": \"Surface elevation\",\n },\n }\n\n def __init__(self, grid, filepath=None, kind=\"linear\"):\n \"\"\"Generate subsidence rates.\n\n Parameters\n ----------\n grid: RasterModelGrid\n A landlab grid.\n filepath: str\n Name of csv-formatted subsidence file.\n kind: str, optional\n Kind of interpolation as a string (one of 'linear',\n 'nearest', 'zero', 'slinear', 'quadratic', 'cubic').\n Default is 'linear'.\n \"\"\"\n super(SubsidenceTimeSeries, self).__init__(grid)\n\n self._filepath = filepath\n self._kind = kind\n\n data = np.loadtxt(filepath, delimiter=\",\", comments=\"#\")\n subsidence = SubsidenceTimeSeries._subsidence_interpolator(\n data, kind=self._kind\n )\n inc = self.grid.add_empty(\n \"bedrock_surface__increment_of_elevation\", at=\"node\"\n ).reshape(self.grid.shape)\n inc[:] = subsidence(self.grid.x_of_node[self.grid.nodes_at_bottom_edge])\n\n self._dz = inc.copy()\n self._time = 0.0\n\n @staticmethod\n def _subsidence_interpolator(data, kind=\"linear\"):\n return interpolate.interp1d(\n data[:, 0],\n data[:, 1],\n kind=kind,\n copy=True,\n assume_sorted=True,\n bounds_error=True,\n )\n\n @property\n def time(self):\n return self._time\n\n @property\n def filepath(self):\n return self._filepath\n\n @filepath.setter\n def filepath(self, new_path):\n self._filepath = new_path\n subsidence = SubsidenceTimeSeries._subsidence_interpolator(\n np.loadtxt(self._filepath, delimiter=\",\", comments=\"#\"), kind=self._kind\n )\n inc = self.grid.at_node[\"bedrock_surface__increment_of_elevation\"].reshape(\n self.grid.shape\n )\n inc[:] = subsidence(self.grid.x_of_node[self.grid.nodes_at_bottom_edge])\n self._dz = inc.copy()\n\n def run_one_step(self, dt):\n dz = self.grid.at_node[\"bedrock_surface__increment_of_elevation\"]\n z = self.grid.at_node[\"bedrock_surface__elevation\"]\n z_top = self.grid.at_node[\"topographic__elevation\"]\n\n dz = dz.reshape(self.grid.shape)\n z = z.reshape(self.grid.shape)\n z_top = z_top.reshape(self.grid.shape)\n\n dz[:] = self._dz * dt\n z[:] += dz\n z_top[:] += dz\n\n self._time += dt\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_561","text":"import h5py\nimport numpy as np\nfrom scipy.io import loadmat\nfrom operator import itemgetter\nimport math\nimport scipy as sp\nimport cv2\nimport matplotlib.pyplot as plt\nimport os, sys\nimport time\nimport multiprocessing\n\n\nimport random\n\n# Generate Observation Map\ndef func(theta, m, I, imax, L, w, N, anglemask):\n print('*',end='')\n rotmat = np.array([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])\n p = 0.5*(L[:,0]+1)*(w-1) #x 0:w-1\n q = 0.5*(L[:,1]+1)*(w-1) #y 0:w-1\n x = [p-0.5*(w-1), q-0.5*(w-1)]\n x_ = np.dot(rotmat, x)\n p = x_[0,:]+0.5*(w-1);\n q = x_[1,:]+0.5*(w-1);\n p = np.int32(p)\n q = np.int32(q)\n light_idx = q*w + p # 0:w*w-1\n x = [N[:,0], N[:,1]]\n x_ = np.dot(rotmat, x)\n pn = x_[0,:];\n qn = x_[1,:];\n normal = [np.transpose(pn), np.transpose(qn), N[:,2]]\n normal = np.transpose(normal)\n temp = I*anglemask\/np.transpose(imax)\n embed = np.zeros((m, w*w), np.float32)\n embed[:, light_idx] = temp\n embed = np.reshape(embed, (m, w, w))\n mask = np.zeros((m, w*w), np.bool_)\n mask[:, light_idx] = anglemask\n mask = np.reshape(mask, (m, w, w))\n return embed, mask, normal, rotmat\n\ndef wrapper(args):\n return func(*args)\n\n\n# for multi core cpu\ndef light_embedding_2d_rot_invariant_multi(I, imax, L, w, N, div, isRandomThresh):\n\n m = I.shape[0]\n rows = w\n cols = w\n embed_rot = []\n normal_rot = []\n mask_rot = []\n rot = []\n\n anglemask = np.zeros((I.shape[0],I.shape[1]),np.float32)\n for k in range(I.shape[0]): # numpixel\n angle1 = 180*np.arccos(L[:,2])\/np.pi\n if isRandomThresh == True:\n tgt = np.where(angle10))\n Iv = Iv[valid,:]\n Nv = Nv[valid,:]\n imax = imax[valid]\n if rotdiv > 1:\n embed, mask, nm, rot, rows, cols = light_embedding_2d_rot_invariant_multi(Iv, [imax], L, w, Nv, rotdiv, isRandomThresh)\n else:\n embed, mask, nm, rot, rows, cols = light_embedding_2d_rot_invariant(Iv, [imax], L, w, Nv, rotdiv, isRandomThresh)\n\n embed = np.reshape(embed, (embed.shape[0]*embed.shape[1],w,w))\n embed = np.reshape(embed, (embed.shape[0],1,w,w))\n mask = np.reshape(mask, (mask.shape[0]*mask.shape[1],w,w))\n mask = np.reshape(mask, (mask.shape[0],1,w,w))\n nm = np.reshape(nm, (nm.shape[0]*nm.shape[1],3))\n return embed, mask, nm\n\n# prepare observation map for cyclesPS dataset (for training)\ndef prep_data_2d_from_images_cycles(dirlist, dirname, scale, w, rotdiv_in, rotdiv_on):\n S = []\n M = []\n N = []\n for d in dirlist:\n dirpath = d\n\n images_dir = dirpath + '\/' + dirname\n normal_path = dirpath + '\/' + 'gt_normal.tif'\n inboundary_path = dirpath + '\/' + 'inboundary.png'\n onboundary_path = dirpath + '\/' + 'onboundary.png'\n\n # read ground truth surface normal\n nml = np.float32(cv2.imread(normal_path,-1))\/65535.0 # [-1,1]\n nml = nml[:,:,::-1]\n nml = 2*nml-1\n nml = cv2.resize(nml, None, fx = scale, fy = scale, interpolation = cv2.INTER_NEAREST)\n nShape = np.shape(nml)\n height = nShape[0]\n width = nShape[1]\n\n # read mask images_metallic\n inboundary = cv2.imread(inboundary_path,-1)\n inboundary = cv2.resize(inboundary, None, fx = scale, fy = scale, interpolation = cv2.INTER_NEAREST)\n inboundary = np.where(inboundary>0)\n inboundary_ind = inboundary[0]*height + inboundary[1]\n onboundary = cv2.imread(onboundary_path,-1)\n onboundary = cv2.resize(onboundary, None, fx = scale, fy = scale, interpolation = cv2.INTER_NEAREST)\n onboundary = np.where(onboundary>0)\n onboundary_ind = onboundary[0]*height + onboundary[1]\n\n # read light filenames\n f = open(dirpath + '\/' 'light.txt')\n data = f.read()\n f.close\n lines = data.split('\\n')\n numLight = len(lines)-1 # the last line is empty (how to fix it?)\n\n L = np.zeros((numLight,3), np.float32)\n for i,l in enumerate(lines):\n s = l.split(' ')\n if len(s) == 3:\n L[i,0] = float(s[0])\n L[i,1] = float(s[1])\n L[i,2] = float(s[2])\n\n # read images\n I = np.zeros((numLight, height, width), np.float32)\n\n for i in range(numLight):\n if i % np.floor(numLight\/10) == 0:\n print('.',end='')\n\n image_path = images_dir + '\/' + '%05d.tif' % i\n\n cv2_im = cv2.imread(image_path, -1)\/65535.0\n cv2_im = (cv2_im[:,:,0] + cv2_im[:,:,1] + cv2_im[:,:,2])\/3\n cv2_im = cv2.resize(cv2_im, (height,width), interpolation = cv2.INTER_NEAREST)\n I[i,:,:] = cv2_im\n\n\n Iv = np.reshape(I,(numLight, height*width))\n Iv = np.transpose(Iv)\n\n Nv = np.reshape(nml,(height*width,3))\n\n embed_in, mask_in, nm_in = light_embedding_main(Iv, Nv, L, w, rotdiv_in, inboundary_ind, True)\n embed_on, mask_on, nm_on = light_embedding_main(Iv, Nv, L, w, rotdiv_on, onboundary_ind, True)\n\n embed = []\n embed.append(embed_in.copy())\n embed.append(embed_on.copy())\n embed = np.concatenate(embed, axis=0 )\n\n mask = []\n mask.append(mask_in.copy())\n mask.append(mask_on.copy())\n mask = np.concatenate(mask, axis=0 )\n\n nm = []\n nm.append(nm_in.copy())\n nm.append(nm_on.copy())\n nm = np.concatenate(nm, axis=0 )\n\n\n S.append(embed.copy())\n M.append(mask.copy())\n N.append(nm.copy())\n print('')\n\n del embed_in, mask_in, nm_in\n del embed_on, mask_on, nm_on\n del embed, mask, nm, I, Iv, Nv\n\n S = np.concatenate(S, axis=0 )\n M = np.concatenate(M, axis=0 )\n N = np.concatenate(N, axis=0 )\n\n S = np.reshape(S, (S.shape[0], S.shape[2], S.shape[3], 1))\n M = np.reshape(M, (M.shape[0], M.shape[2], M.shape[3], 1))\n return np.array(S), np.array(M), np.array(N)\n\n# prepare observation maps for test data (i.e., DiLiGenT dataset)\ndef prep_data_2d_from_images_test(dirlist, scale, w, rotdiv, index=-1):\n\n SList = []\n NList = []\n RList = []\n IDList = []\n SizeList = []\n for d in dirlist:\n print('load' + '%s' % d)\n S = []\n N = []\n dirpath = d\n images_dir = dirpath\n normal_path = dirpath + '\/' + 'normal.txt'\n mask_path = dirpath + '\/' + 'mask.png'\n\n # get image imgSize\n image_path = images_dir + '\/' + '001.png'\n cv2_im = cv2.imread(image_path, -1)\n nShape = np.shape(cv2_im)\n height = nShape[0]\n width = nShape[1]\n\n # read ground truth surface normal\n f = open(normal_path)\n data = f.read()\n f.close\n lines = np.float32(np.array(data.split('\\n')))\n nml = np.reshape(lines, (height,width,3))\n nml = cv2.resize(nml, None, fx = scale, fy = scale, interpolation = cv2.INTER_NEAREST)\n\n # nml = np.flipud(nml) # Uncomment when test on Harvest, the surface noraml needs to be fliped upside down\n\n nShape = np.shape(nml)\n height = nShape[0]\n width = nShape[1]\n\n # uncomment if you want to see the ground truth normal map\n # plt.figure(figsize=(16,16))\n # plt.imshow(np.uint8(127*(nml+1)))\n # plt.axis('off')\n # plt.show()\n\n # read mask\n mask = cv2.imread(mask_path,-1)\n mask = cv2.resize(mask, None, fx = scale, fy = scale, interpolation = cv2.INTER_NEAREST)\n validsub = np.where(mask>0)\n validind = validsub[0]*width + validsub[1]\n\n # read light directions\n f = open(dirpath + '\/' 'light_directions.txt')\n data = f.read()\n f.close\n lines = data.split('\\n')\n numLight = len(lines)-1 # the last line is empty (how to fix it?)\n\n L = np.zeros((numLight,3), np.float32)\n for i,l in enumerate(lines):\n s = l.split(' ')\n if len(s) == 3:\n L[i,0] = float(s[0])\n L[i,1] = float(s[1])\n L[i,2] = float(s[2])\n\n # read light intensities\n f = open(dirpath + '\/' 'light_intensities.txt')\n data = f.read()\n f.close\n lines = data.split('\\n')\n\n Li = np.zeros((numLight,3), np.float32)\n for i,l in enumerate(lines):\n s = l.split(' ')\n if len(s) == 3:\n Li[i,0] = float(s[0])\n Li[i,1] = float(s[1])\n Li[i,2] = float(s[2])\n \n \n\n\n if index == -1:\n setName = os.path.basename(dirpath.rstrip('\/')) # if dirpath ends in '\/' basename returns the empty string\n if setName == 'bearPNG':\n # the first 20 images of bearPNG have errors, see paper\n index = range(20, numLight)\n else:\n index = range(0, numLight)\n\n L = L[index,:]\n Li = Li[index,:]\n numLight = len(index)\n\n # read images\n I = np.zeros((numLight, height, width), np.float32)\n\n for i, idx in enumerate(index):\n if i % np.floor(numLight\/10) == 0:\n print('.',end='')\n image_path = images_dir + '\/' + '%03d.png' % (idx + 1)\n cv2_im = cv2.imread(image_path, -1)\/65535.0\n cv2_im = (cv2_im[:,:,0]\/Li[i,0] + cv2_im[:,:,1]\/Li[i,1] + cv2_im[:,:,2]\/Li[i,2])\/3\n cv2_im = cv2.resize(cv2_im, None, fx = scale, fy = scale,interpolation = cv2.INTER_NEAREST)\n I[i,:,:] = cv2_im\n\n Iv = np.reshape(I,(numLight, height*width))\n Iv = np.transpose(Iv)\n Nv = np.reshape(nml,(height*width,3))\n\n imax = np.amax(Iv,axis=1) # for entire image\n valid = np.intersect1d(validind, np.where(imax>0))\n Iv = Iv[valid,:]\n Nv = Nv[valid,:]\n imax = imax[valid]\n embed_list = []\n embed, mask, nm, rot, rows, cols = light_embedding_2d_rot_invariant(Iv, [imax], L, w, Nv, rotdiv, False)\n SList.append(embed)\n RList.append(rot)\n NList.append(nm)\n IDList.append(valid)\n SizeList.append((height,width))\n\n print('')\n return np.array(SList), np.array(NList), np.array(RList), np.array(IDList), np.array(SizeList)\n\n\n# Test and evaluate network\ndef TestNetwork(model, Sv,Nv,Rv,IDv,Szv,showFig, isTensorFlow):\n numData = len(Sv)\n for i in range(numData):\n S = Sv[i]\n N = Nv[i]\n R = Rv[i]\n ID = IDv[i]\n height = Szv[i,0]\n width = Szv[i,1]\n rotdiv = S.shape[1]\n NestList = []\n for r in range(rotdiv):\n embed_div = S[:,r,:,:]\n if isTensorFlow:\n embed_div = np.reshape(embed_div, (embed_div.shape[0], embed_div.shape[1], embed_div.shape[2], 1))\n else:\n embed_div = np.reshape(embed_div, (embed_div.shape[0], 1, embed_div.shape[1], embed_div.shape[2]))\n # predict\n outputs=model.predict(embed_div)\n Nest = np.zeros((height*width,3), np.float32)\n error = 0\n Err = np.zeros((height*width,3), np.float32)\n rot = R[r,:,:]\n # N = np.zeros()\n for k in range(len(ID)):\n # n = outputs[k,:];\n n = np.zeros((2,1),np.float32)\n n[0] = outputs[k,0]\n n[1] = outputs[k,1]\n n = np.dot(np.linalg.inv(rot),n)\n n = [n[0,0],n[1,0],outputs[k,2]]\n n = n\/np.sqrt(n[0]*n[0]+n[1]*n[1]+n[2]*n[2])\n nt = N[k,0,:];\n Nest[ID[k],:] = n\n for l in range(3):\n Err[ID[k],l] = 180*math.acos(min(1,abs(n.dot(np.transpose(nt)))))\/math.pi\n error = error + 180*math.acos(min(1,abs(n.dot(np.transpose(nt)))))\/math.pi\n print('%d ' % i + '[Angle %d] Ave.Error = %.2f ' % (r,(error\/len(ID))))\n NestList.append(Nest.copy())\n\n NestMean = np.mean(NestList,axis=0)\n Nest = np.zeros((height*width,3), np.float32)\n error = 0\n Err = np.zeros((height*width,3), np.float32)\n for k in range(len(ID)):\n # n = outputs[k,:];\n n = NestMean[ID[k],:]\n n = n\/np.sqrt(n[0]*n[0]+n[1]*n[1]+n[2]*n[2])\n nt = N[k,0,:];\n Nest[ID[k],:] = n\n for l in range(3):\n Err[ID[k],l] = 180*math.acos(min(1,abs(n.dot(np.transpose(nt)))))\/math.pi\n error = error + 180*math.acos(min(1,abs(n.dot(np.transpose(nt)))))\/math.pi\n\n if rotdiv >= 2:\n print('%s ' % i + '[Mean] Ave.Error = %.2f ' % (error\/len(ID)))\n\n Err = np.reshape(Err,(height,width,3))\n Nest = np.reshape(Nest, (height,width,3))\n\n if showFig == True:\n plt.figure(figsize=(16,16))\n plt.imshow(np.concatenate((np.uint8(127*(Nest+1)),5*np.uint8(Err)), axis=1))\n plt.axis('off')\n plt.show()\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_562","text":"import sympy\nfrom sympy.assumptions.assume import AppliedPredicate, global_assumptions\nfrom typing import Dict, List, Union\n\na, b, c = sympy.symbols('a b c')\nd_a, d_b, d_c = sympy.symbols('Δa Δb Δc')\n\n\nclass Expression:\n args: List[sympy.Symbol]\n expr: sympy.Expr\n\n def __init__(self, args: List[sympy.Symbol], expr: sympy.Expr):\n \"\"\"Initialize an Expression instance with a sympy expression and its arguments.\n\n :param args: the variables in the expression\n :param expr: the mathematical expression\n\n >>> Expression([a, b, c], a + b + c)\n f(a, b, c) = a + b + c\n >>> Expression([a, b, c], a * b \/ c)\n f(a, b, c) = a*b\/c\n >>> Expression([a, b, c], sympy.root(a ** b, c))\n f(a, b, c) = (a**b)**(1\/c)\n \"\"\"\n self.args = args\n self.expr = expr\n\n def __repr__(self) -> str:\n \"\"\"Show this expression as a mathematical function.\n\n :rtype str\n\n >>> str(Expression([a], a * sympy.pi))\n 'f(a) = pi*a'\n >>> repr(Expression([], sympy.E))\n 'f() = E'\n \"\"\"\n if len(self.args) == 1:\n return f\"f({self.args[0]}) = {self.expr}\"\n return f\"f{tuple(self.args)} = {self.expr}\"\n\n def evaluate(self, values: Dict[Union[str, sympy.Symbol], float], precision: int =3) -> sympy.Expr:\n \"\"\"Evaluate the expression with the given values.\n\n :param values: a dictionary mapping all the sympy symbols in the args to numeric values\n :param precision: the number of digits in the results\n :return: the result of the evaluation as an sympy expression\n\n >>> Expression([a, b, c], a + b + c).evaluate({a: 1, b: 2, c: 3})\n 6.00\n >>> Expression([a, b, c], a ** b + c).evaluate({'a': c, 'b': 1})\n 2.0*c\n \"\"\"\n return self.expr.subs(values).evalf(precision)\n\n def calculate_absolute_uncertainty(self, *assumptions: List[AppliedPredicate],\n refine: bool = False,\n delta_char: str = '\\\\Delta ') -> 'Expression':\n \"\"\"Calculate the absolute uncertainty in the expression (IB way), assuming all args given are independent.\n\n :return: the absolute uncertainty of this expression\n :rtype: Expression\n\n >>> Expression([a], c * a).calculate_absolute_uncertainty(sympy.Q.positive(c), refine=True, delta_char='Δ')\n f(Δa) = c*Δa\n >>> Expression([a, b, c], a + b - c).calculate_absolute_uncertainty(refine=True, delta_char='Δ')\n f(Δa, Δb, Δc) = Δa + Δb + Δc\n \"\"\"\n uncertainty_expr = sympy.Integer(0) # just in case\n uncertainty_args = []\n global_assumptions.add(*assumptions)\n\n for var in self.args:\n d_var = sympy.Symbol(delta_char + sympy.latex(var))\n uncertainty_args.append(d_var)\n uncertainty_expr += sympy.Abs(self.expr.diff(var)) * d_var\n global_assumptions.add(sympy.Q.positive(var))\n if refine:\n uncertainty_expr = sympy.refine(uncertainty_expr)\n global_assumptions.clear()\n return Expression(uncertainty_args, uncertainty_expr)\n\n def calculate_fractional_uncertainty(self, *assumptions: List[AppliedPredicate],\n refine: bool = False,\n delta_char: str = '\\\\Delta ') -> 'Expression':\n \"\"\"Calculate the absolute uncertainty in the expression (IB way), assuming all args given are independent.\n\n :return: the fractional uncertainty of this expression\n :rtype: Expression\n\n >>> Expression([a, b, c], a * b \/ c).calculate_fractional_uncertainty(refine=True, delta_char='Δ')\n f(Δa, Δb, Δc) = Δc\/c + Δb\/b + Δa\/a\n >>> Expression([a], a ** b).calculate_fractional_uncertainty(sympy.Q.positive(b), refine=True, delta_char='Δ')\n f(Δa) = b*Δa\/a\n \"\"\"\n absolute_uncertainty = self.calculate_absolute_uncertainty(*assumptions, refine=refine, delta_char=delta_char)\n frac_uncertainty_expr = sympy.Integer(0)\n if type(absolute_uncertainty.expr) == sympy.Add:\n for addend in absolute_uncertainty.expr.args:\n frac_uncertainty_expr += addend \/ self.expr\n elif type(absolute_uncertainty.expr) == sympy.Mul or type(absolute_uncertainty) == sympy.Pow:\n frac_uncertainty_expr = absolute_uncertainty.expr \/ self.expr\n else:\n frac_uncertainty_expr = sympy.Mul(absolute_uncertainty.expr, sympy.Pow(self.expr, -1), evaluate=False)\n return Expression(absolute_uncertainty.args, frac_uncertainty_expr)\n\n def to_latex(self) -> str:\n r\"\"\"Get the latex form of this expression.\n\n :rtype: str\n\n >>> Expression([a, b, c], a + b + c).to_latex()\n 'a + b + c'\n >>> Expression([a, b, c], a * b \/ c).to_latex()\n '\\\\frac{a b}{c}'\n >>> Expression([a, b, c], sympy.root(a ** b, c)).to_latex()\n '\\\\left(a^{b}\\\\right)^{\\\\frac{1}{c}}'\n \"\"\"\n return sympy.latex(self.expr)\n\n @classmethod\n def from_string(cls, args_list: List[str], string: str, constants: Dict[str, float] = None) -> 'Expression':\n \"\"\"Parse a string expression.\n\n :param string: expression as a string of python expressions\n :param args_list: the list of args \/ independent variables of the expression as strings\n :param constants: a list of local variables that are considered while parsing\n :return: an expression taking in the given args\n\n >>> Expression.from_string(['x'], 'sqrt(x) ^ y')\n f(x) = (sqrt(x))**y\n >>> Expression.from_string(['m'], 'm * g', constants={'g': 9.81})\n f(m) = 9.81*m\n \"\"\"\n parsed_expr = sympy.sympify(string, evaluate=False, locals=constants) # note: uses eval\n args = [symbol for symbol in parsed_expr.atoms(sympy.Symbol) if str(symbol) in args_list]\n return cls(args, parsed_expr)\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_563","text":"import rospy\nimport actionlib\nfrom move_base_msgs.msg import MoveBaseAction, MoveBaseGoal\nfrom moveit_msgs.msg import MoveItErrorCodes\nimport math\nfrom scipy.spatial.transform import Rotation as scipyR\nmoveit_error_dict = {}\nfor name in MoveItErrorCodes.__dict__.keys():\n if not name[:1] == '_':\n code = MoveItErrorCodes.__dict__[name]\n moveit_error_dict[code] = name\n\ndef euclidean_dist(p1, p2):\n dist = math.sqrt(sum([(a - b)** 2 for a, b in zip(p1, p2)]))\n return dist\n\ndef yaw_diff(quat1, quat2):\n euler1 = scipyR.from_quat(quat1).as_euler(\"xyz\")\n euler2 = scipyR.from_quat(quat2).as_euler(\"xyz\")\n return abs(euler1[2] - euler2[2])\n\nclass WaypointApply(object):\n class Status:\n NOT_RUNNING = \"not_running\"\n RUNNING = \"running\"\n SUCCESS = \"success\"\n FAIL = \"fail\"\n def __init__(self,\n position, orientation,\n action_name=\"navigate\",\n xy_tolerance=0.1, rot_tolerance=0.3):\n # Get an action client\n self.client = actionlib.SimpleActionClient('movo_move_base', MoveBaseAction)\n rospy.loginfo(\"Waiting for movo_move_base AS...\")\n if not self.client.wait_for_server(rospy.Duration(20)):\n rospy.logerr(\"Could not connect to movo_move_base AS\")\n exit()\n rospy.loginfo(\"Connected!\")\n rospy.sleep(1.0)\n\n self.status = WaypointApply.Status.NOT_RUNNING\n self.action_name = action_name\n self._position = position\n self._orientation = orientation\n self._xy_tolerance = xy_tolerance\n self._rot_tolerance = rot_tolerance\n self._goal_reached = False\n\n # Define the goal\n rospy.loginfo(\"Waypoint (%.2f,%.2f) and (%.2f,%.2f,%.2f,%.2f) is sent.\", position[0], position[1], orientation[0], \\\n orientation[1], orientation[2], orientation[3])\n self.goal = MoveBaseGoal()\n self.goal.target_pose.header.frame_id = 'map'\n self.goal.target_pose.pose.position.x = position[0]\n self.goal.target_pose.pose.position.y = position[1]\n self.goal.target_pose.pose.position.z = 0.0\n self.goal.target_pose.pose.orientation.x = orientation[0]\n self.goal.target_pose.pose.orientation.y = orientation[1]\n self.goal.target_pose.pose.orientation.z = orientation[2]\n self.goal.target_pose.pose.orientation.w = orientation[3]\n self.waypoint_execute()\n\n def waypoint_execute(self):\n self.status = WaypointApply.Status.RUNNING\n self.client.send_goal(self.goal, self.done_cb, feedback_cb=self.feedback_cb)\n delay = rospy.Duration(0.1)\n while not self.client.wait_for_result(delay) and not rospy.is_shutdown():\n if self._goal_reached:\n rospy.loginfo(\"Goal has been reached by the robot actually. So cancel goal.\")\n self.status = WaypointApply.Status.SUCCESS\n self.client.cancel_goal()\n break\n if self.status == WaypointApply.Status.FAIL:\n rospy.logerr(\"Could not reach goal.\")\n self.client.cancel_goal() \n break\n\n def feedback_cb(self, feedback):\n base_position = feedback.base_position\n curx = base_position.pose.position.x\n cury = base_position.pose.position.y\n curz = base_position.pose.position.z\n curqx = base_position.pose.orientation.x\n curqy = base_position.pose.orientation.y\n curqz = base_position.pose.orientation.z\n curqw = base_position.pose.orientation.w\n # Check if already reached goal\n dist = euclidean_dist((curx, cury, curz), self._position)\n angle = yaw_diff((curqx, curqy, curqz, curqw), self._orientation)\n rospy.loginfo(\"(feedback)[dist_gap: %.5f | angle_gap: %.5f]\" % (dist, angle))\n if dist <= self._xy_tolerance\\\n and angle <= self._rot_tolerance:\n self._goal_reached = True\n rospy.loginfo(\"Goal already reached within tolerance.\")\n \n\n def done_cb(self, status, result):\n # Reference for terminal status values: http:\/\/docs.ros.org\/diamondback\/api\/actionlib_msgs\/html\/msg\/GoalStatus.html\n if status == 2:\n rospy.loginfo(\"Navigation action \"+str(self.action_name)+\" received a cancel request after it started executing, completed execution!\")\n self.status = WaypointApply.Status.FAIL \n elif status == 3:\n rospy.loginfo(\"Navigation action \"+str(self.action_name)+\" reached\")\n self.status = WaypointApply.Status.SUCCESS\n elif status == 4:\n rospy.loginfo(\"Navigation action \"+str(self.action_name)+\" was aborted by the Action Server\")\n rospy.signal_shutdown(\"Navigation action \"+str(self.action_name)+\" aborted, shutting down!\")\n self.status = WaypointApply.Status.FAIL\n elif status == 5:\n rospy.loginfo(\"Navigation action \"+str(self.action_name)+\" has been rejected by the Action Server\")\n rospy.signal_shutdown(\"Navigation action \"+str(self.action_name)+\" rejected, shutting down!\")\n self.status = WaypointApply.Status.FAIL\n elif status == 8:\n rospy.loginfo(\"Navigation action \"+str(self.action_name)+\" received a cancel request before it started executing, successfully cancelled!\")\n self.status = WaypointApply.Status.FAIL\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_564","text":"\"\"\"This is where the cool functions go that help out stuff.\n\nThey aren't directly attached to an element. Consequently, you need to\nuse type annotations here.\n\"\"\"\n\nimport ast\nimport textwrap\nimport types\nfrom typing import Any, List, Union\n\nimport sympy\n\nfrom vyxal import context, lexer\nfrom vyxal.LazyList import *\n\nNUMBER_TYPE = \"number\"\nSCALAR_TYPE = \"scalar\"\n\n\ndef case_of(value: str) -> int:\n \"\"\"Returns 1 for all uppercase, 0 for all lowercase, and -1 for\n mixed case.\"\"\"\n\n if all(map(lambda x: x.isupper(), value)):\n return 1\n elif all(map(lambda x: x.islower(), value)):\n return 0\n return -1\n\n\ndef deep_copy(value: Any) -> Any:\n \"\"\"Because lists and lazylists use memory references. Frick them.\"\"\"\n\n if type(value) not in (list, LazyList):\n return value # because primitives are all like \"ooh look at me\n # I don't have any fancy memory references because I'm an epic\n # chad unlike those virgin memory reference needing lists\".\n\n # Use itertools.tee for (LazyL|l)ists\n return LazyList(itertools.tee(value)[-1])\n\n\ndef get_input(ctx: context.Context) -> Any:\n \"\"\"Returns the next input depending on where ctx tells to get the\n input from.\"\"\"\n\n if ctx.use_top_input:\n if ctx.inputs[0][0]:\n ret = ctx.inputs[0][ctx.inputs[0][1] % len(ctx.inputs[0])]\n ctx.inputs[0][1] += 1\n return ret\n else:\n try:\n temp = vy_eval(input(\"> \" * ctx.repl_mode), ctx)\n return temp\n except:\n return 0\n else:\n ret = ctx.inputs[-1][ctx.inputs[-1][1] % len(ctx.inputs[-1])]\n ctx.inputs[-1][1] += 1\n return ret\n\n\n@lazylist\ndef fixed_point(function: types.FunctionType, initial: Any) -> List[Any]:\n \"\"\"Repeat function until the result is no longer unique.\n Uses initial as the initial value\"\"\"\n\n previous = None\n current = simplify(initial)\n\n while previous != current:\n yield current\n prevuous = deep_copy(current)\n current = safe_apply(function, current)\n\n\ndef from_base_alphabet(value: str, alphabet: str) -> int:\n \"\"\"Returns value in base 10 using base len(alphabet)\n [bijective base]\"\"\"\n\n ret = 0\n for digit in value:\n ret = len(alphabet) * ret + alphabet.find(digit)\n\n return ret\n\n\ndef from_base_digits(digits: List[NUMBER_TYPE], base: int) -> int:\n \"\"\"Returns digits in base 10 using arbitrary base 'base'\"\"\"\n # I may have stolen this from Jelly\n ret = 0\n for digit in digits:\n ret = base * ret + digit\n\n return ret\n\n\ndef indent_str(string: str, indent: int, end=\"\\n\") -> str:\n\n \"\"\"Indent a multiline string with 4 spaces, with a newline (or `end`) afterwards.\"\"\"\n return textwrap.indent(string, \" \" * indent) + end\n\n\ndef indent_code(*code, indent: int = 1) -> str:\n \"\"\"Indent multiple lines (`*code`) by the given amount, then join on newlines.\"\"\"\n return \"\\n\".join(indent_str(line, indent, end=\"\") for line in code) + \"\\n\"\n\n\ndef iterable(\n item: Any, number_type: Any = None, ctx: context.Context = None\n) -> Union[LazyList, Union[list, str]]:\n \"\"\"Makes sure that a value is an iterable\"\"\"\n\n if (type_of_item := type(item)) in [sympy.Rational, int]:\n if ctx.number_as_range or number_type is range:\n return LazyList(range(ctx.range_start, int(item) + ctx.range_end))\n else:\n if type_of_item is sympy.Rational:\n item = float(item)\n\n return [int(let) if let not in \"-.\" else let for let in str(item)]\n else:\n return item\n\n\ndef keep(haystack: Any, needle: Any) -> Any:\n \"\"\"Used for keeping only needle in haystack\"\"\"\n\n ret = []\n for item in haystack:\n if item in needle:\n ret.append(item)\n\n if type(haystack) is str:\n return \"\".join(ret)\n else:\n return ret\n\n\ndef mold(\n content: Union[list, LazyList],\n shape: Union[list, LazyList],\n) -> Union[list, LazyList]:\n \"\"\"Mold one list to the shape of the other. Uses the mold function\n that Jelly uses.\"\"\"\n # https:\/\/github.com\/DennisMitchell\/jellylanguage\/blob\/70c9fd93ab009c05dc396f8cc091f72b212fb188\/jelly\/interpreter.py#L578\n for index in range(len(shape)):\n if type(shape[index]) == list:\n mold(content, shape[index])\n else:\n item = content.pop(0)\n shape[index] = item\n content.append(item)\n return shape\n\n\ndef pop(\n iterable: Union[list, LazyList], count: int, ctx: context.Context\n) -> List[Any]:\n \"\"\"Pops (count) items from iterable. If there isn't enough items\n within iterable, input is used as filler.\"\"\"\n\n popped_items = []\n for _ in range(count):\n if iterable:\n popped_items.append(iterable.pop())\n else:\n ctx.use_top_input = True\n popped_items.append(get_input(ctx))\n ctx.use_top_input = False\n\n if ctx.retain_popped:\n for item in popped_items[::-1]:\n iterable.append(item)\n\n if ctx.reverse_flag:\n popped_items = popped_items[::-1]\n\n if count == 1:\n return popped_items[0]\n\n return popped_items\n\n\ndef primitive_type(item: type) -> Union[str, type]:\n \"\"\"Turns int\/Rational\/str into 'Scalar' and everything else\n into list\"\"\"\n\n if type(item) in [int, sympy.Rational, str]:\n return SCALAR_TYPE\n else:\n return list\n\n\ndef reverse_number(\n item: Union[int, sympy.Rational]\n) -> Union[int, sympy.Rational]:\n \"\"\"Reverses a number. Negative numbers are returned negative\"\"\"\n\n temp = \"\"\n if item < 0:\n temp = type(item)(str(eval(item))[1:][::-1])\n else:\n temp = type(item)(str(eval(item))[::-1])\n\n return sympy.Rational(item)\n\n\ndef safe_apply(function: types.FunctionType, *args, ctx) -> Any:\n \"\"\"\n Applies function to args that adapts to the input style of the passed function.\n If the function is a _lambda (it's been defined within λ...;), it passes a\n list of arguments and length of argument list.\n Otherwise, if the function is a user-defined function (starts with FN_), it\n simply passes the argument list.\n Otherwise, unpack args and call as usual\n\n *args contains ctx\n \"\"\"\n\n if function.__name__.startswith(\"_lambda\"):\n ret = function(list(args), len(args), function, ctx)\n if len(ret):\n return ret[-1]\n else:\n return []\n elif function.__name__.startswith(\"FN_\"):\n ret = function(list(args), ctx)[-1]\n if len(ret):\n return ret[-1]\n else:\n return []\n return function(*args, ctx)\n\n\ndef scalarify(value: Any) -> Union[Any, List[Any]]:\n \"\"\"Returns value[0] if value is a list of length 1, else value\"\"\"\n if type(value) in (list, LazyList):\n if len(value) == 1:\n return value[0]\n else:\n return value\n else:\n return value\n\n\ndef to_base_digits(value: int, base: int) -> List[int]:\n \"\"\"Returns value in base 'base' from base 10 as a list of digits\"\"\"\n\n ret = []\n n = value\n\n while n > base:\n n, digit = divmod(n, base)\n ret.append(digit)\n ret.append(n)\n return ret[::-1]\n\n\ndef transfer_capitalisation(source: str, target: str) -> str:\n \"\"\"Returns target with the capitalisation of source\"\"\"\n ret = \"\"\n for i in range(min(len(source), len(target))):\n if source[i].isupper():\n ret += target[i].upper()\n elif source[i].islower():\n ret += target[i].lower()\n else:\n ret += target[i]\n\n if len(target) > len(source):\n ret += target[i + 1 :]\n\n return ret\n\n\ndef uncompress(token: lexer.Token) -> Union[int, str]:\n \"\"\"Uncompress the token's value based on the token type.\n\n Handles the following token types: TokenType.STRING,\n TokenType.COMPRESSED_NUMBER, TokenType.COMPRESSED_STRING\n \"\"\"\n if token.name == lexer.TokenType.COMPRESSED_STRING:\n return uncompress_str(token.value)\n if token.name == lexer.TokenType.COMPRESSED_NUMBER:\n return uncompress_num(token.value)\n\n return token.value\n\n\ndef uncompress_str(string: str) -> str:\n # TODO (lyxal) Implement string (un)compression\n raise NotImplementedError()\n\n\ndef uncompress_num(num: str) -> int:\n # TODO (lyxal) Implement number (un)compression\n raise NotImplementedError()\n\n\ndef vy_eval(item: str, ctx: context.Context) -> Any:\n \"\"\"Evaluates an item. Does so safely if using the online\n interpreter\"\"\"\n\n if ctx.online:\n try:\n return ast.literal_eval(item)\n except Exception as ex:\n # TODO: eval as vyxal\n return item\n else:\n try:\n return eval(item)\n except Exception as ex:\n return item\n\n\ndef vy_str(item: Any, ctx: context.Context) -> str:\n \"\"\"Convert to string, using custom vyxal formatting\"\"\"\n if type(item) is LazyList:\n item = list(item)\n\n if type(item) is list:\n return \"⟨\" + \"|\".join([vy_str(y) for y in x]) + \"⟩\"\n\n return str(item)\n\n\ndef vy_zip(*items) -> list:\n \"\"\"Like python's zip, but fills shorter lists with 0s\"\"\"\n\n items = list(map(iter, items))\n while True:\n ret = []\n exhausted_count = 0\n for item in items:\n try:\n ret.append(next(item))\n except:\n ret.append(0)\n exhausted_count += 1\n\n if len(items) == exhausted_count:\n break\n\n yield ret\n\n\ndef wrap(vector: Union[str, list], width: int) -> List[Any]:\n \"\"\"A version of textwrap.wrap that plays nice with spaces\"\"\"\n ret = []\n temp = []\n for item in vector:\n temp.append(item)\n if len(temp) == width:\n if all([type(x) is str for x in temp]):\n ret.append(\"\".join(temp))\n else:\n ret.append(temp[::])\n temp = []\n if len(temp) < width and temp:\n if all([type(x) is str for x in temp]):\n ret.append(\"\".join(temp))\n else:\n ret.append(temp[::])\n\n return ret\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_565","text":"from __future__ import print_function\n\nimport numpy as np\nfrom scipy.io import loadmat, savemat\nfrom scipy.spatial.distance import pdist, squareform\nimport networkx as nx\n\ndef get_topk_edges(distances, nodes, k=5):\n assert k < nodes**2\n \n topk = np.argpartition(distances, k)[:k]\n topk_edges = []\n for pos in topk:\n #print(distances[pos])\n n1, n2 = pos\/\/nodes, pos%nodes\n topk_edges.append((n1,n2))\n\n print(topk_edges)\n return topk_edges\n \n \ndef read_embeddings(data_dir, csv, dim=128):\n if csv:\n return np.loadtxt(data_dir, delimiter = ',')\n\n f = open(data_dir, 'r')\n f.readline()#remove meta-data in first line\n data = f.read().split()\n data = [float(item) for item in data]\n embed = np.zeros((len(data)\/\/(dim+1), dim))\n for i in range(0, len(data), dim + 1):\n embed[int(data[i])] = data[i+1 : i+1 + dim]\n return embed\n\ndef p_at_k(topk_edges, removed_edges):\n k = len(topk_edges)\n correct_edges = len(set(topk_edges) & set(removed_edges))\n print(\"wrong: \", set(topk_edges) - set(removed_edges))\n return correct_edges\/k\n \n\n\n\"\"\" ====== IMP: Only for symmetric graphs without self loops ======\"\"\"\n# mat_dir = 'blogcatalog90.mat'\nmat_dir = 'karate.mat'\n# embd_dir = 'blogcatalog90_DW.embd'\nembd_dir = 'Karate_data.embd'\ncsv = True\nks = [1, 3 , 5, 10, 15, 20]#, 1000, 5000, 10000, 50000]\n\nembd = read_embeddings(embd_dir, csv=csv, dim=128)\nprint(\"Read embeddings from: \", embd_dir)\n\nmat = loadmat(mat_dir)\ngraph = nx.from_scipy_sparse_matrix(mat['network'])\nselected_edges = graph.edges(data=False)\n\nnp.random.shuffle(selected_edges)\nremoved_edges = selected_edges[:20]#mat['removed_edges'] \nselected_edges = selected_edges[20:]#mat['removed_edges']\nprint(removed_edges, '\\n', selected_edges)\n\nprint(\"Read graph from: \", mat_dir)\ndel graph, mat\n\ndistances = squareform(pdist(embd,'euclidean'))\nprint(\"Calculated pair-wise distances...\")\n\nnodes = embd.shape[0]\nprecisions = {}\n\n#set the distnces among nodes of existing edges \nfor n1, n2 in selected_edges:\n distances[n1][n2] = distances[n2][n1] = 999\n\n#Make lower triangle and diagnol -inf\nfor n1 in range(nodes):\n for n2 in range(0, n1+1):\n distances[n1][n2] = np.inf\nprint(\"Done removing existing edges and making matrix as upper triangle...\\n\", distances)\n\n#Assert n2>n1 for removed edges and Convert to set\nfor n1, n2 in removed_edges:\n assert n2>n1 , str(n2)+str(n1)\nremoved_edges = [(n1, n2) for n1, n2 in removed_edges]\n#print(removed_edges)\n\n\ndistances = np.ndarray.flatten(distances)\nfor k in ks:\n topk_edges = get_topk_edges(distances, nodes, k)\n precisions[k] = p_at_k(topk_edges, removed_edges)\n print(\"P@\", k, \": \", precisions[k])\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_567","text":"EkremBayar\/bayar\nimport pytest\nimport numpy as np\nfrom scipy.optimize import quadratic_assignment, OptimizeWarning\nfrom scipy.optimize._qap import _calc_score as _score\nfrom numpy.testing import assert_equal, assert_, assert_warns\n\n\n################\n# Common Tests #\n################\n\ndef chr12c():\n A = [\n [0, 90, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [90, 0, 0, 23, 0, 0, 0, 0, 0, 0, 0, 0],\n [10, 0, 0, 0, 43, 0, 0, 0, 0, 0, 0, 0],\n [0, 23, 0, 0, 0, 88, 0, 0, 0, 0, 0, 0],\n [0, 0, 43, 0, 0, 0, 26, 0, 0, 0, 0, 0],\n [0, 0, 0, 88, 0, 0, 0, 16, 0, 0, 0, 0],\n [0, 0, 0, 0, 26, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 16, 0, 0, 0, 96, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 29, 0],\n [0, 0, 0, 0, 0, 0, 0, 96, 0, 0, 0, 37],\n [0, 0, 0, 0, 0, 0, 0, 0, 29, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 37, 0, 0],\n ]\n B = [\n [0, 36, 54, 26, 59, 72, 9, 34, 79, 17, 46, 95],\n [36, 0, 73, 35, 90, 58, 30, 78, 35, 44, 79, 36],\n [54, 73, 0, 21, 10, 97, 58, 66, 69, 61, 54, 63],\n [26, 35, 21, 0, 93, 12, 46, 40, 37, 48, 68, 85],\n [59, 90, 10, 93, 0, 64, 5, 29, 76, 16, 5, 76],\n [72, 58, 97, 12, 64, 0, 96, 55, 38, 54, 0, 34],\n [9, 30, 58, 46, 5, 96, 0, 83, 35, 11, 56, 37],\n [34, 78, 66, 40, 29, 55, 83, 0, 44, 12, 15, 80],\n [79, 35, 69, 37, 76, 38, 35, 44, 0, 64, 39, 33],\n [17, 44, 61, 48, 16, 54, 11, 12, 64, 0, 70, 86],\n [46, 79, 54, 68, 5, 0, 56, 15, 39, 70, 0, 18],\n [95, 36, 63, 85, 76, 34, 37, 80, 33, 86, 18, 0],\n ]\n A, B = np.array(A), np.array(B)\n n = A.shape[0]\n\n opt_perm = np.array([7, 5, 1, 3, 10, 4, 8, 6, 9, 11, 2, 12]) - [1] * n\n\n return A, B, opt_perm\n\n\nclass QAPCommonTests(object):\n \"\"\"\n Base class for `quadratic_assignment` tests.\n \"\"\"\n def setup_method(self):\n np.random.seed(0)\n\n # Test global optima of problem from Umeyama IVB\n # https:\/\/pcl.sitehost.iu.edu\/rgoldsto\/papers\/weighted%20graph%20match2.pdf\n # Graph matching maximum is in the paper\n # QAP minimum determined by brute force\n def test_accuracy_1(self):\n # besides testing accuracy, check that A and B can be lists\n A = [[0, 3, 4, 2],\n [0, 0, 1, 2],\n [1, 0, 0, 1],\n [0, 0, 1, 0]]\n\n B = [[0, 4, 2, 4],\n [0, 0, 1, 0],\n [0, 2, 0, 2],\n [0, 1, 2, 0]]\n\n res = quadratic_assignment(A, B, method=self.method,\n options={\"rng\": 0, \"maximize\": False})\n assert_equal(res.fun, 10)\n assert_equal(res.col_ind, np.array([1, 2, 3, 0]))\n\n res = quadratic_assignment(A, B, method=self.method,\n options={\"rng\": 0, \"maximize\": True})\n\n if self.method == 'faq':\n # Global optimum is 40, but FAQ gets 37\n assert_equal(res.fun, 37)\n assert_equal(res.col_ind, np.array([0, 2, 3, 1]))\n else:\n assert_equal(res.fun, 40)\n assert_equal(res.col_ind, np.array([0, 3, 1, 2]))\n\n res = quadratic_assignment(A, B, method=self.method,\n options={\"rng\": 0, \"maximize\": True})\n\n # Test global optima of problem from Umeyama IIIB\n # https:\/\/pcl.sitehost.iu.edu\/rgoldsto\/papers\/weighted%20graph%20match2.pdf\n # Graph matching maximum is in the paper\n # QAP minimum determined by brute force\n def test_accuracy_2(self):\n\n A = np.array([[0, 5, 8, 6],\n [5, 0, 5, 1],\n [8, 5, 0, 2],\n [6, 1, 2, 0]])\n\n B = np.array([[0, 1, 8, 4],\n [1, 0, 5, 2],\n [8, 5, 0, 5],\n [4, 2, 5, 0]])\n\n res = quadratic_assignment(A, B, method=self.method,\n options={\"rng\": 0, \"maximize\": False})\n if self.method == 'faq':\n # Global optimum is 176, but FAQ gets 178\n assert_equal(res.fun, 178)\n assert_equal(res.col_ind, np.array([1, 0, 3, 2]))\n else:\n assert_equal(res.fun, 176)\n assert_equal(res.col_ind, np.array([1, 2, 3, 0]))\n\n res = quadratic_assignment(A, B, method=self.method,\n options={\"rng\": 0, \"maximize\": True})\n assert_equal(res.fun, 286)\n assert_equal(res.col_ind, np.array([2, 3, 0, 1]))\n\n def test_accuracy_3(self):\n\n A, B, opt_perm = chr12c()\n\n # basic minimization\n res = quadratic_assignment(A, B, method=self.method,\n options={\"rng\": 0})\n assert_(11156 <= res.fun < 21000)\n assert_equal(res.fun, _score(A, B, res.col_ind))\n\n # basic maximization\n res = quadratic_assignment(A, B, method=self.method,\n options={\"rng\": 0, 'maximize': True})\n assert_(74000 <= res.fun < 85000)\n assert_equal(res.fun, _score(A, B, res.col_ind))\n\n # check ofv with strictly partial match\n seed_cost = np.array([4, 8, 10])\n seed = np.asarray([seed_cost, opt_perm[seed_cost]]).T\n res = quadratic_assignment(A, B, method=self.method,\n options={'partial_match': seed})\n assert_(11156 <= res.fun < 21000)\n assert_equal(res.col_ind[seed_cost], opt_perm[seed_cost])\n\n # check performance when partial match is the global optimum\n seed = np.asarray([np.arange(len(A)), opt_perm]).T\n res = quadratic_assignment(A, B, method=self.method,\n options={'partial_match': seed})\n assert_equal(res.col_ind, seed[:, 1].T)\n assert_equal(res.fun, 11156)\n assert_equal(res.nit, 0)\n\n # check performance with zero sized matrix inputs\n empty = np.empty((0, 0))\n res = quadratic_assignment(empty, empty, method=self.method,\n options={\"rng\": 0})\n assert_equal(res.nit, 0)\n assert_equal(res.fun, 0)\n\n def test_unknown_options(self):\n A, B, opt_perm = chr12c()\n\n def f():\n quadratic_assignment(A, B, method=self.method,\n options={\"ekki-ekki\": True})\n assert_warns(OptimizeWarning, f)\n\n\nclass TestFAQ(QAPCommonTests):\n method = \"faq\"\n\n def test_options(self):\n # cost and distance matrices of QAPLIB instance chr12c\n A, B, opt_perm = chr12c()\n n = len(A)\n\n # check that max_iter is obeying with low input value\n res = quadratic_assignment(A, B,\n options={'maxiter': 5})\n assert_equal(res.nit, 5)\n\n # test with shuffle\n res = quadratic_assignment(A, B,\n options={'shuffle_input': True})\n assert_(11156 <= res.fun < 21000)\n\n # test with randomized init\n res = quadratic_assignment(A, B,\n options={'rng': 1, 'P0': \"randomized\"})\n assert_(11156 <= res.fun < 21000)\n\n # check with specified P0\n K = np.ones((n, n)) \/ float(n)\n K = _doubly_stochastic(K)\n res = quadratic_assignment(A, B,\n options={'P0': K})\n assert_(11156 <= res.fun < 21000)\n\n def test_specific_input_validation(self):\n\n A = np.identity(2)\n B = A\n\n # method is implicitly faq\n\n # ValueError Checks: making sure single value parameters are of\n # correct value\n with pytest.raises(ValueError, match=\"Invalid 'P0' parameter\"):\n quadratic_assignment(A, B, options={'P0': \"random\"})\n with pytest.raises(\n ValueError, match=\"'maxiter' must be a positive integer\"):\n quadratic_assignment(A, B, options={'maxiter': -1})\n with pytest.raises(ValueError, match=\"'tol' must be a positive float\"):\n quadratic_assignment(A, B, options={'tol': -1})\n\n # TypeError Checks: making sure single value parameters are of\n # correct type\n with pytest.raises(TypeError):\n quadratic_assignment(A, B, options={'maxiter': 1.5})\n\n # test P0 matrix input\n with pytest.raises(\n ValueError,\n match=\"`P0` matrix must have shape m' x m', where m'=n-m\"):\n quadratic_assignment(\n np.identity(4), np.identity(4),\n options={'P0': np.ones((3, 3))}\n )\n\n K = [[0.4, 0.2, 0.3],\n [0.3, 0.6, 0.2],\n [0.2, 0.2, 0.7]]\n # matrix that isn't quite doubly stochastic\n with pytest.raises(\n ValueError, match=\"`P0` matrix must be doubly stochastic\"):\n quadratic_assignment(\n np.identity(3), np.identity(3), options={'P0': K}\n )\n\n\nclass Test2opt(QAPCommonTests):\n method = \"2opt\"\n\n def test_deterministic(self):\n # np.random.seed(0) executes before every method\n n = 20\n\n A = np.random.rand(n, n)\n B = np.random.rand(n, n)\n res1 = quadratic_assignment(A, B, method=self.method)\n\n np.random.seed(0)\n\n A = np.random.rand(n, n)\n B = np.random.rand(n, n)\n res2 = quadratic_assignment(A, B, method=self.method)\n\n assert_equal(res1.nit, res2.nit)\n\n def test_partial_guess(self):\n n = 5\n A = np.random.rand(n, n)\n B = np.random.rand(n, n)\n\n res1 = quadratic_assignment(A, B, method=self.method,\n options={'rng': 0})\n guess = np.array([np.arange(5), res1.col_ind]).T\n res2 = quadratic_assignment(A, B, method=self.method,\n options={'rng': 0, 'partial_guess': guess})\n fix = [2, 4]\n match = np.array([np.arange(5)[fix], res1.col_ind[fix]]).T\n res3 = quadratic_assignment(A, B, method=self.method,\n options={'rng': 0, 'partial_guess': guess,\n 'partial_match': match})\n assert_(res1.nit != n*(n+1)\/2)\n assert_equal(res2.nit, n*(n+1)\/2) # tests each swap exactly once\n assert_equal(res3.nit, (n-2)*(n-1)\/2) # tests free swaps exactly once\n\n def test_specific_input_validation(self):\n # can't have more seed nodes than cost\/dist nodes\n _rm = _range_matrix\n with pytest.raises(\n ValueError,\n match=\"`partial_guess` can have only as many entries as\"):\n quadratic_assignment(np.identity(3), np.identity(3),\n method=self.method,\n options={'partial_guess': _rm(5, 2)})\n # test for only two seed columns\n with pytest.raises(\n ValueError, match=\"`partial_guess` must have two columns\"):\n quadratic_assignment(\n np.identity(3), np.identity(3), method=self.method,\n options={'partial_guess': _range_matrix(2, 3)}\n )\n # test that seed has no more than two dimensions\n with pytest.raises(\n ValueError, match=\"`partial_guess` must have exactly two\"):\n quadratic_assignment(\n np.identity(3), np.identity(3), method=self.method,\n options={'partial_guess': np.random.rand(3, 2, 2)}\n )\n # seeds cannot be negative valued\n with pytest.raises(\n ValueError, match=\"`partial_guess` must contain only pos\"):\n quadratic_assignment(\n np.identity(3), np.identity(3), method=self.method,\n options={'partial_guess': -1 * _range_matrix(2, 2)}\n )\n # seeds can't have values greater than number of nodes\n with pytest.raises(\n ValueError,\n match=\"`partial_guess` entries must be less than number\"):\n quadratic_assignment(\n np.identity(5), np.identity(5), method=self.method,\n options={'partial_guess': 2 * _range_matrix(4, 2)}\n )\n # columns of seed matrix must be unique\n with pytest.raises(\n ValueError,\n match=\"`partial_guess` column entries must be unique\"):\n quadratic_assignment(\n np.identity(3), np.identity(3), method=self.method,\n options={'partial_guess': np.ones((2, 2))}\n )\n\n\nclass TestQAPOnce():\n def setup_method(self):\n np.random.seed(0)\n\n # these don't need to be repeated for each method\n def test_common_input_validation(self):\n # test that non square matrices return error\n with pytest.raises(ValueError, match=\"`A` must be square\"):\n quadratic_assignment(\n np.random.random((3, 4)),\n np.random.random((3, 3)),\n )\n with pytest.raises(ValueError, match=\"`B` must be square\"):\n quadratic_assignment(\n np.random.random((3, 3)),\n np.random.random((3, 4)),\n )\n # test that cost and dist matrices have no more than two dimensions\n with pytest.raises(\n ValueError, match=\"`A` and `B` must have exactly two\"):\n quadratic_assignment(\n np.random.random((3, 3, 3)),\n np.random.random((3, 3, 3)),\n )\n # test that cost and dist matrices of different sizes return error\n with pytest.raises(\n ValueError,\n match=\"`A` and `B` matrices must be of equal size\"):\n quadratic_assignment(\n np.random.random((3, 3)),\n np.random.random((4, 4)),\n )\n # can't have more seed nodes than cost\/dist nodes\n _rm = _range_matrix\n with pytest.raises(\n ValueError,\n match=\"`partial_match` can have only as many seeds as\"):\n quadratic_assignment(np.identity(3), np.identity(3),\n options={'partial_match': _rm(5, 2)})\n # test for only two seed columns\n with pytest.raises(\n ValueError, match=\"`partial_match` must have two columns\"):\n quadratic_assignment(\n np.identity(3), np.identity(3),\n options={'partial_match': _range_matrix(2, 3)}\n )\n # test that seed has no more than two dimensions\n with pytest.raises(\n ValueError, match=\"`partial_match` must have exactly two\"):\n quadratic_assignment(\n np.identity(3), np.identity(3),\n options={'partial_match': np.random.rand(3, 2, 2)}\n )\n # seeds cannot be negative valued\n with pytest.raises(\n ValueError, match=\"`partial_match` must contain only pos\"):\n quadratic_assignment(\n np.identity(3), np.identity(3),\n options={'partial_match': -1 * _range_matrix(2, 2)}\n )\n # seeds can't have values greater than number of nodes\n with pytest.raises(\n ValueError,\n match=\"`partial_match` entries must be less than number\"):\n quadratic_assignment(\n np.identity(5), np.identity(5),\n options={'partial_match': 2 * _range_matrix(4, 2)}\n )\n # columns of seed matrix must be unique\n with pytest.raises(\n ValueError,\n match=\"`partial_match` column entries must be unique\"):\n quadratic_assignment(\n np.identity(3), np.identity(3),\n options={'partial_match': np.ones((2, 2))}\n )\n\n\ndef _range_matrix(a, b):\n mat = np.zeros((a, b))\n for i in range(b):\n mat[:, i] = np.arange(a)\n return mat\n\n\ndef _doubly_stochastic(P, tol=1e-3):\n # cleaner implementation of btaba\/sinkhorn_knopp\n\n max_iter = 1000\n c = 1 \/ P.sum(axis=0)\n r = 1 \/ (P @ c)\n P_eps = P\n\n for it in range(max_iter):\n if ((np.abs(P_eps.sum(axis=1) - 1) < tol).all() and\n (np.abs(P_eps.sum(axis=0) - 1) < tol).all()):\n # All column\/row sums ~= 1 within threshold\n break\n\n c = 1 \/ (r @ P)\n r = 1 \/ (P @ c)\n P_eps = r[:, None] * P * c\n\n return P_eps\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_568","text":"'''\n Compute average distances for each metric on covers80\n'''\n\n\ndef segment(y, s, rs_size, kmin, kmax, filter):\n \"\"\"structurally segments the selected audio\n\n ds_size: side length to which combined matrix is going to be resampled to\n [kmin, kmax]: min and maximum approximation ranks\n filtering: True or False, whether memory stacking, timelag and path enhance are going to be used\n\n returns set of low rank approximations\"\"\"\n\n #compute cqt\n C = librosa.amplitude_to_db(np.abs(librosa.cqt(y=y, sr=sr, \n hop_length=512,\n bins_per_octave=12*3,\n n_bins=7*12*3)),\n ref=np.max)\n\n #beat synch cqt\n Csync = cv2.resize(C, (int(C.shape[1]\/10), C.shape[0]))\n\n #stack memory\n if filter:\n Csync = librosa.feature.stack_memory(Csync, 4)\n\n #Affinity matrix\n R = librosa.segment.recurrence_matrix(Csync, width=3, mode='affinity', sym=True)\n\n #Filtering\n if filter: \n df = librosa.segment.timelag_filter(scipy.ndimage.median_filter)\n R = df(R, size=(1, 7))\n R = librosa.segment.path_enhance(R, 15)\n\n #mfccs\n mfcc = librosa.feature.mfcc(y=y, sr=sr)\n\n #downsample like CQT, compress time by 10\n Msync = cv2.resize(C, (int(mfcc.shape[1]\/10), mfcc.shape[0]))\n\n #weighted sequence\n path_distance = np.sum(np.diff(Msync, axis=1)**2, axis=0)\n sigma = np.median(path_distance)\n path_sim = np.exp(-path_distance \/ sigma)\n R_path = np.diag(path_sim, k=1) + np.diag(path_sim, k=-1)\n\n #weighted combination of affinity matrix and mfcc diagonal\n deg_path = np.sum(R_path, axis=1)\n deg_rec = np.sum(R, axis=1)\n\n mu = deg_path.dot(deg_path + deg_rec) \/ np.sum((deg_path + deg_rec)**2)\n\n A = mu * R + (1 - mu) * R_path\n\n #resampling\n A_d = cv2.resize(A, (rs_size, rs_size))\n\n #laplacian\n L = scipy.sparse.csgraph.laplacian(A_d, normed=True)\n\n #eigendecomposition\n evals, evecs = scipy.linalg.eigh(L)\n #eigenvector filtering\n evecs = scipy.ndimage.median_filter(evecs, size=(9, 1))\n\n #normalization\n Cnorm = np.cumsum(evecs**2, axis=1)**0.5\n\n #temporary replacement for bug\n a_min_value = 3.6934424e-08\n Cnorm[Cnorm == 0.0] = a_min_value\n if (np.isnan(np.sum(Cnorm))):\n print(\"WOOOOOAH\")\n \n # print(\"Cnorm shape:\",Cnorm.shape)\n # plt.matshow(Cnorm)\n # plt.savefig(filedir[-10:-4])\n\n #approximations\n dist_set = []\n for k in range(kmin, kmax):\n\n # #debug\n # print(np.all(Cnorm[:, k-1:k]))\n # divisor = Cnorm[:, k-1:k]\n # if not np.all(divisor):\n # print(\"0 divisor\")\n\n Xs = evecs[:, :k] \/ Cnorm[:, k-1:k]\n \n\n #debug\n if np.isnan(np.sum(Xs)):\n print('woops')\n # fig, axs = plt.subplots(1, approx[1]-approx[0], figsize=(20, 20))\n # for i in range(approx[1]-approx[0]):\n # axs[i].matshow(struct[i])\n # plt.savefig(filedir[-10:-1])\n\n distance = scipy.spatial.distance.squareform(scipy.spatial.distance.pdist(Xs, metric='euclidean'))\n dist_set.append(distance)\n dist_set = np.asarray(dist_set)\n \n \n #return\n return(dist_set)\n\n#Importing\nimport librosa\nimport numpy as np\nimport scipy\nfrom scipy.spatial.distance import pdist, squareform\nfrom scipy.interpolate import interp2d\nfrom scipy.sparse.csgraph import laplacian\nfrom scipy.spatial.distance import directed_hausdorff\nfrom scipy.cluster import hierarchy\nfrom scipy.linalg import eigh\nfrom scipy.ndimage import median_filter\nimport cv2\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\nimport dill\nimport sys\nimport glob\nimport os\nimport random\nimport csv\n\n#--supress warnings--#\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\n#--reading--#\n\nall_dirs = []\nall_names = []\nall_roots = []\nall_audio = []\nmax_files = 40000\nfor root, dirs, files in os.walk('\/home\/ismir\/Documents\/ISMIR\/Datasets\/covers80\/'):\n for name in files:\n if (('.wav' in name) or ('.aif' in name) or ('.mp3' in name)):\n filepath = os.path.join(root, name)\n all_dirs.append(filepath)\n all_names.append(name[:-4])\n all_roots.append(root)\n\n if len(all_dirs)>=max_files:\n break\n if len(all_dirs)>=max_files:\n break \nfile_no = len(all_dirs)\n\n#load audio\nfor f in range(file_no):\n y, sr = librosa.load(all_dirs[f], sr=16000, mono=True)\n #bug: empty mel bins\n all_audio.append((y,sr))\n\n #progress\n sys.stdout.write(\"\\rLoading %i\/%s pieces.\" % ((f+1), str(file_no)))\n sys.stdout.flush()\nprint('')\n\n\n#--cover (True) vs non-cover (False)--#\ncovers = np.zeros((file_no, file_no), dtype=np.bool_)\nfor i in range(file_no):\n for j in range(file_no):\n if (all_roots[i] == all_roots[j]):\n covers[i][j] = True\n else:\n covers[i][j] = False\n\n#--Distance dictionary--#\n\"\"\"Terminology\ndistances: L1, fro, dtw, hau, pair, sh2, sh3\nformat: rs_size-approx[0]-approx[1]-distance e.g. 128-2-8-L1\n\"\"\"\ndistances = {}\n\n#--Score dictionary--#\n\"\"\"Terminology\ndistances: L1, fro, dtw, hau, pair, sh2, sh3\nformat: (filt-)rs_size-approx[0]-approx[1]-distance e.g. filt-128-2-8-L1\n\"\"\"\nscores = {}\n\n\n#--traverse parameters, compute segmentations, save evaluation--#\n\n#resampling parameters\n#for rs_size in [32]:\nfor rs_size in [128]:\n #approximations\n #for approx in [[2,6]]:\n for approx in [[2,11]]:\n for filtering in [True]:\n\n #string for keys to indicate filtering\n if filtering:\n filt = 'filt-'\n else:\n filt = ''\n\n #hold all structures and their formats\n all_struct = [] #kmax-kmin sets each with a square matrix\n all_flat = [] #kmax-kmin sets each with a flattened matrix\n all_merged = [] #single concatenated vector with all flattened matrices\n all_shingled2 = [] #shingled pairs of flat approximations\n all_shingled3 = [] #shingled triples of flat approximations\n\n print(\"--------------------\")\n print(\"Resampling size:\", str(rs_size))\n print(\"Approximation range: [\" + str(approx[0]) + ',' + str(approx[1]) + ']')\n print(\"Filtering:\", str(filtering))\n\n #songs\n for f in range(file_no):\n #structure segmentation\n struct = segment(all_audio[f][0], all_audio[f][1],\n rs_size, approx[0], approx[1], filtering)\n all_struct.append(struct)\n\n # #debug\n # fig, axs = plt.subplots(1, approx[1]-approx[0], figsize=(20, 20))\n # for i in range(approx[1]-approx[0]):\n # axs[i].matshow(struct[i])\n # plt.savefig(all_names[f])\n\n #formatting\n flat_approximations = []\n merged_approximations = np.empty((0))\n for j in range(approx[1]-approx[0]):\n flat_approximations.append(struct[j].flatten())\n merged_approximations = np.concatenate((merged_approximations, flat_approximations[j]))\n all_flat.append(np.asarray(flat_approximations))\n all_merged.append(merged_approximations)\n\n #shingling per 2\n shingled = []\n for j in range(approx[1]-approx[0]-1):\n shingled.append(np.concatenate((all_flat[f][j],all_flat[f][j+1]),axis=None))\n #shingled.append(np.concatenate((struct[all_names[f]]['OG'][1][j],struct[all_names[f]]['OG'][1][j+1]),axis=None))\n all_shingled2.append(np.asarray(shingled))\n\n #shingling per 3\n shingled = []\n for j in range(approx[1]-approx[0]-2):\n shingled.append(np.concatenate((all_flat[f][j],all_flat[f][j+1],all_flat[f][j+2]),axis=None))\n #shingled.append(np.concatenate((struct[all_names[f]]['OG'][1][j],struct[all_names[f]]['OG'][1][j+1],struct[all_names[f]]['OG'][1][j+2]), axis=None))\n all_shingled3.append(np.asarray(shingled))\n \n #progress\n sys.stdout.write(\"\\rSegmented %i\/%s pieces.\" % ((f+1), str(file_no)))\n sys.stdout.flush()\n print('')\n\n # #plot approximations\n # fig, axs = plt.subplots(1, approx[1]-approx[0], figsize=(20, 20))\n # for i in range(approx[1]-approx[0]):\n # axs[i].matshow(all_struct[0][i])\n # plt.savefig('approximations'+str(rs_size))\n\n #list to numpy array\n all_struct = np.asarray(all_struct)\n all_flat = np.asarray(all_flat)\n all_merged = np.asarray(all_merged)\n\n rows = [['', 'mean', 'max']]\n\n #L1 norm\n L1_distances = np.zeros((file_no, file_no))\n for i in range(file_no):\n for j in range(file_no):\n L1_distances[i][j] = np.linalg.norm(all_merged[i]-all_merged[j], ord=1)\n\n rows.append(['L1', np.mean(L1_distances), np.amax(L1_distances)])\n print(\"Computed L1 distances.\")\n\n #Frobenius norm\n fro_distances = np.zeros((file_no, file_no))\n for i in range(file_no):\n for j in range(file_no):\n fro_distances[i][j] = np.linalg.norm(all_merged[i]-all_merged[j])\n\n rows.append(['Frobenius', np.mean(fro_distances), np.amax(fro_distances)])\n print(\"Computed Frobenius distsances.\")\n\n #Sub-sequence Dynamic Time Warping cost\n dtw_cost = np.zeros((file_no, file_no))\n for i in range(file_no):\n for j in range(file_no):\n costs = []\n for k in range(approx[1]-approx[0]): \n costs.append(librosa.sequence.dtw(all_struct[i][k], all_struct[j][k], subseq=False, metric='euclidean')[0][rs_size-1,rs_size-1])\n dtw_cost[i][j] = sum(costs)\/len(costs)\n\n rows.append(['DTW', np.mean(dtw_cost), np.amax(dtw_cost)])\n print(\"Computed DTW costs.\")\n \n #Directed Hausdorff distance\n hausdorff_distances = np.zeros((file_no, file_no))\n for i in range(file_no):\n for j in range(file_no):\n hausdorff_distances[i][j] = (directed_hausdorff(all_flat[i], all_flat[j]))[0]\n\n rows.append(['Hausdorff', np.mean(hausdorff_distances), np.amax(hausdorff_distances)])\n print(\"Computed directed Hausdorff distances.\")\n\n #Minimum distance across all pairs\n min_distances = np.zeros((file_no, file_no))\n for i in range(file_no):\n for j in range(file_no):\n dists = []\n for n in range(approx[1]-approx[0]):\n for m in range(approx[1]-approx[0]):\n dists.append(np.linalg.norm(all_struct[i][n]-all_struct[j][m]))\n min_distances[i][j] = min(dists)\n \n rows.append(['Pair', np.mean(min_distances), np.amax(min_distances)])\n print(\"Computed minimum paiwise distances.\")\n\n #Directed Hausdorff distance of shingled pairs\n shingled2_distances = np.zeros((file_no, file_no))\n for i in range(file_no):\n for j in range(file_no):\n shingled2_distances[i][j] = (directed_hausdorff(all_shingled2[i], all_shingled2[j]))[0]\n\n rows.append(['Shingled 2', np.mean(shingled2_distances), np.amax(shingled2_distances)])\n print(\"Copmuter directed Hausforff distances for bi-grams\")\n\n #Directed Hausdorff distance of shingled triples\n shingled3_distances = np.zeros((file_no, file_no))\n for i in range(file_no):\n for j in range(file_no):\n shingled3_distances[i][j] = (directed_hausdorff(all_shingled3[i], all_shingled3[j]))[0]\n\n rows.append(['Shingled 3', np.mean(shingled3_distances), np.amax(shingled3_distances)])\n print(\"Computed directed Hausforff distances for tri-grams.\")\n\nwith open('\/home\/ismir\/Documents\/ISMIR\/figures\/deformations_run2\/mean_max.csv', mode='w') as f:\n writer = csv.writer(f)\n writer.writerows(rows)\nprint('Stats computed.')"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_569","text":"import os\nimport glob\nimport scipy.misc as misc\nimport numpy as np\nimport imageio\n\nfrom io import BytesIO\n\n\ndef normalize_image(img):\n \"\"\"\n Make image zero centered and in between (0, 1)\n \"\"\"\n normalized = img \/ 255.\n return normalized\n\n\ndef read_split_image(img):\n mat = misc.imread(img).astype(np.float)\n side = int(mat.shape[1] \/ 2)\n assert side * 2 == mat.shape[1]\n img_A = mat[:, :side] # target\n img_B = mat[:, side:] # source\n\n return img_A, img_B\n\n\ndef bytes_to_file(bytes_img):\n return BytesIO(bytes_img)\n\n\ndef read_split_image(img):\n mat = misc.imread(img).astype(np.float)\n side = int(mat.shape[1] \/ 2)\n assert side * 2 == mat.shape[1]\n img_A = mat[:, :side] # target\n img_B = mat[:, side:] # source\n\n return img_A, img_B\n\n\ndef shift_and_resize_image(img, shift_x, shift_y, nw, nh):\n w, h, _ = img.shape\n enlarged = misc.imresize(img, [nw, nh])\n return enlarged[shift_x:shift_x + w, shift_y:shift_y + h]\n\n\ndef scale_back(images):\n return (images + 1.) \/ 2.\n\n\ndef merge(images, size):\n h, w = images.shape[1], images.shape[2]\n img = np.zeros((h * size[0], w * size[1], 3))\n for idx, image in enumerate(images):\n i = idx % size[1]\n j = idx \/\/ size[1]\n img[j * h:j * h + h, i * w:i * w + w, :] = image\n\n return img\n\n\ndef save_concat_images(imgs, img_path):\n concated = np.concatenate(imgs, axis=1)\n misc.imsave(img_path, concated)\n\n\ndef compile_frames_to_gif(frame_dir, gif_file):\n frames = sorted(glob.glob(os.path.join(frame_dir, \"*.png\")))\n print(frames)\n images = [misc.imresize(imageio.imread(f), interp='nearest', size=0.33) for f in frames]\n imageio.mimsave(gif_file, images, duration=0.1)\n return gif_file\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_570","text":"0\n'''\r\nFFT_testing.py\r\n\r\nBenchmark of the FFT method with CST files\r\n'''\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nimport sys\r\nimport os\r\nfrom scipy.constants import c, mu_0, pi \r\nfrom scipy.special import iv\r\nfrom copy import copy\r\nimport pickle as pk \r\n\r\n# UNIT=1e-3 #mm to m\r\n\r\n# # Gaussian bunch \r\n# t0=0.53e-10 #injection time [s]\r\n# WL=50*UNIT #wakelength [m]\r\n# sigmaz=1.0*UNIT # [m]\r\n# sigmat=sigmaz\/c # [s]\r\n# q=1e-9 # [C]\r\n\r\n# #--- define time\r\n# N=10000\r\n# tau=np.linspace(1e-12, WL\/c, N)\r\n# dt=tau[2]-tau[1]\r\n\r\n# #--- define s\r\n# s=np.linspace(-t0*c, WL, N)\r\n# ds=s[2]-s[1]\r\n\r\n# # bunch=np.exp(-((tau-2*t0)**2)\/(2*(sigmat**2))) #*1\/(sigmat*np.sqrt(2*pi))\r\n# bunch=q*np.exp(-((s)**2)\/(2*(sigmaz**2)))*1\/(sigmaz*np.sqrt(2*pi))\r\n\r\n# # Wakefunction\r\n\r\n# F=1.0 #form factor\r\n# L=300*UNIT #length [m]\r\n# b=1.0*UNIT #radius [m]\r\n# Zo=376.73 #vacuum impedance [Ohm]\r\n# sigma_el=1.0e6 #electric conductivity [S\/m]\r\n# #s_wf=np.linspace(1.0e-5, WL, 1000)\r\n# WF=np.zeros_like(s)\r\n\r\n# #WF=-F*L\/(4*pi*b)*np.sqrt(Zo\/(pi*c*sigma_el))*(1\/np.power(tau,3\/2))\r\n# mask = s > 0\r\n# WF[mask]=F*L*c\/(4*pi*b)*np.sqrt(Zo\/(pi*sigma_el))*(1\/np.power(abs(s[mask]),3\/2))\r\n# #WF[np.logical_not(mask)] = 0.0\r\n\r\n# # Wakepotential\r\n\r\n# #--- with convolution\r\n\r\n# '''\r\n# WFf, f = Wsol.DFT(WF, ds\/c, fmax=fmax, Nf=1000)\r\n# bunchf, f=Wsol.DFT(bunch, ds\/c, fmax=fmax, Nf=1000)\r\n\r\n# convf = WFf*bunchf\r\n# WP = np.fft.ifft(Wf)\r\n# '''\r\n\r\n# #WP_conv=(1\/(q*1e12))*np.convolve( bunch[mask] , WF[mask] ) #convolution of Wakefunction and bunch charge distribution [V\/pC]\r\n# WP_conv=(1\/(q*1e12))*np.convolve( bunch , WF )\r\n# s_conv=np.linspace(0, WL, len(WP_conv))\r\n# WP_conv = np.interp(s, s_conv, WP_conv)\r\n\r\n\r\n# #--- from theory\r\n# x=s\/(2*sigmaz)\r\n# WPth=-c*L\/(4*pi*b*np.power(sigmaz, 3\/2))*np.sqrt(Zo\/sigma_el)*np.power(abs(x), 3\/2)*np.exp(-x**2)*(iv(-3\/4, x*x)-iv(1\/4, x*x)+np.sign(s)*(iv(3\/4, x*x)-iv(-1\/4, x*x)))\r\n# WPth=WPth*1e-12 #[V\/pC]\r\n\r\n# # Impedance\r\n\r\n# fmax=1\/(3*sigmat)\r\n\r\n# #--- with FFT\r\n# bunchf, f2=Wsol.FFT(bunch\/q, ds\/c, fmax=2*fmax, flag_zeropadding=False)\r\n# WPf, f=Wsol.FFT(WPth*1e12, ds\/c, flag_zeropadding=False)\r\n\r\n# bunchf = np.interp(f,f2,bunchf)\r\n\r\n# #--- with DFT\r\n# #WPf, f=Wsol.DFT(WPth, ds\/c, fmax=fmax, Nf=1000)\r\n# #bunchf, f=Wsol.DFT(bunch\/q, ds\/c, fmax=fmax, Nf=1000)\r\n# #WPf_conv, f=Wsol.DFT(WP_conv, ds\/c, fmax=fmax, Nf=1000)\r\n\r\n# Z = - WPf \/ bunchf \r\n# Z_abs= abs(Z)\r\n# Z_re=np.real(Z)\r\n# Z_im=np.imag(Z)\r\n\r\n# #Z_conv = - WPf_conv \/ bunchf \r\n\r\n# #--- from theory\r\n# fth=np.linspace(0, fmax, 1000)\r\n# Zth=(1-1j*np.sign(fth))*L\/(2*pi*b)*np.sqrt(Zo*2*pi*fth\/(2*c*sigma_el))\r\n\r\n# Zth_abs=abs(Zth)\r\n# Zth_re=np.real(Zth)\r\n# Zth_im=np.imag(Zth)\r\n\r\n# '''\r\n# # Plot WP \r\n# fig = plt.figure(2, figsize=(6,4), dpi=200, tight_layout=True)\r\n# ax=fig.gca()\r\n# ax.plot(s, WP_conv, color='red', label='Wake potential from convolution [norm]')\r\n# ax.plot(s, WPth, color='red', ls='--', label='Wake potential from theory')\r\n# ax.plot(s, bunch\/max(bunch)*max(abs(WPth)), color='orange', label='lambda(s)')\r\n# ax.set(title='Resistive wall Wake potential W\/\/(s)',\r\n# xlabel='s [m]',\r\n# ylabel='WP [V\/pC]', \r\n# )\r\n# ax.legend(loc='best')\r\n# ax.grid(True, color='gray', linewidth=0.2)\r\n# plt.show()\r\n# '''\r\n\r\n# '''\r\n# # Plot WF\r\n# fig = plt.figure(2, figsize=(6,4), dpi=200, tight_layout=True)\r\n# ax=fig.gca()\r\n# ax.plot(s, WF, color='blue', ls='--', label='Wake function from theory')\r\n# ax.plot(s, bunch, color='orange', label='lambda(s)')\r\n# ax.set(title='Resistive wall Wake function',\r\n# xlabel='s [m]',\r\n# ylabel='WF [V]', \r\n# )\r\n# ax.legend(loc='best')\r\n# ax.grid(True, color='gray', linewidth=0.2)\r\n# plt.show()\r\n# '''\r\n\r\n# # Plot Z vs Zth\r\n# factor=sum(Zth_abs)\/len(Zth_abs)\/(sum(Z_abs)\/len(Z_abs))\r\n# fig = plt.figure(2, figsize=(6,4), dpi=200, tight_layout=True)\r\n# ax=fig.gca()\r\n# #ax.plot(fth, Zth_abs, color='green', ls='--', label='|Z(f)| from theory')\r\n# ax.plot(f, Z_abs, color='green', label='|Z(f)| from FFT')\r\n# #ax.plot(f, abs(Z_conv), color='m', label='|Z(f)| from convolution')\r\n# ax.plot(f, Z_re, color='red', label='Zre(f) from FFT')\r\n# ax.plot(f, Z_im, color='blue', label='Zim(f) imaginary from FFT')\r\n# ax.set(title='Resistive wall impedance Z\/\/(f)', #' \\n Zth \/ Z = ' + str(round(factor,3)),\r\n# xlabel='f [Hz]',\r\n# ylabel='Z [Ohm]', \r\n# xlim=(0, 1\/(3*sigmat)), \r\n# )\r\n# ax.legend(loc='best')\r\n# ax.grid(True, color='gray', linewidth=0.2)\r\n# plt.show()\r\n\r\n\r\n# '''\r\n# # Plot Z vs Zth Re and Im\r\n# fig = plt.figure(2, figsize=(6,4), dpi=200, tight_layout=True)\r\n# ax=fig.gca()\r\n# ax.plot(f, Z_re, color='red', label='Zre(f) from FFT')\r\n# ax.plot(f, Z_im, color='blue', label='Zim(f) imaginary from FFT')\r\n# ax.plot(f, Zth_re, color='green', ls='--', label='Zre(f) from theory')\r\n# ax.plot(f, Zth_im, color='m', ls='--', label='Zim(f) from theory')\r\n# ax.set(title='Resistive wall impedance',\r\n# xlabel='f [Hz]',\r\n# ylabel='Z [Real \/ Imag]', \r\n# )\r\n# ax.legend(loc='best')\r\n# ax.grid(True, color='gray', linewidth=0.2)\r\n# plt.show()\r\n# '''\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# #--- read the cst dictionary\r\n# with open('cst_out.txt', 'rb') as handle:\r\n# cst_data = pk.loads(handle.read())\r\n\r\n# print(cst_data.keys())\r\n# charge_dist_time=cst_data.get('charge_dist_time')\r\n# t=cst_data.get('t_charge_dist')\r\n# t0=cst_data.get('init_time')\r\n# dt=t[2]-t[1]\r\n\r\n# charge_dist=cst_data.get('charge_dist')\r\n# s_charge_dist=cst_data.get('s_charge_dist')\r\n# ds_charge_dist=s_charge_dist[2]-s_charge_dist[1]\r\n# spectrum=cst_data.get('charge_dist_spectrum')\r\n# df=0.0094650788*1e9\r\n# f_spectrum=np.arange(0, 1001*df, df)\r\n# q=1e-9 #[C]\r\n\r\n# #'''\r\n# Wake_potential=cst_data.get('WP_cst')\r\n# s_cst=cst_data.get('s_cst')\r\n# Z_cst=cst_data.get('Z_cst')\r\n# freq_cst=cst_data.get('freq_cst')\r\n# sigmaz=cst_data.get('sigmaz')\r\n# #'''\r\n# '''\r\n# Wake_potential_cst=cst_data.get('WPx_dipolar_cst')\r\n# s_cst=cst_data.get('s_cst_dipolar')\r\n# Z_cst=cst_data.get('Zx_dipolar_cst')\r\n# freq_cst=cst_data.get('freq_cst_dipolar')\r\n# '''\r\n# '''\r\n# Wake_potential_cst=cst_data.get('WPy_quadrupolar_cst')\r\n# s_cst=cst_data.get('s_cst_quadrupolar')\r\n# Z_cst=cst_data.get('Zy_quadrupolar_cst')\r\n# freq_cst=cst_data.get('freq_cst_quadrupolar')\r\n# '''\r\n\r\n# #--- Auxiliary variables\r\n# ds=s_cst[2]-s_cst[1]\r\n# s=np.arange(np.min(s_cst),np.max(s_cst),ds) #constant ds vector\r\n\r\n# #--- Obtain impedance Z with Fourier transform numpy.fft.fft\r\n# # MAKE A SYMMETRIC SIGNAL\r\n\r\n# # Interpolate charge distribution\r\n# # INTERPOLATE TO HAVE A CONSTANT ds. PLOT CST DS DISTRIBUTION\r\n# charge_dist_interp=np.interp(s, s_charge_dist, charge_dist)\r\n# Wake_potential_interp=np.interp(s, s_cst, Wake_potential)\r\n\r\n# #lambdaf, f=Wsol.FFT(charge_dist_interp, ds\/c, fmax=np.max(freq_cst), r=10.0)\r\n# #WPf, f=Wsol.FFT(Wake_potential_interp, ds\/c, fmax=np.max(freq_cst), r=10.0)\r\n \r\n\r\n# #lambdaf, f2=Wsol.DFT(charge_dist\/q, ds_charge_dist\/c, fmax=max(freq_cst), Nf=2001)\r\n# WPf, f=Wsol.DFT(Wake_potential*1e12, ds\/c, fmax=max(freq_cst), Nf=2001)\r\n# #WPf=WPf*sum(Wake_potential*1e12)*ds\/c\/np.sqrt(pi)\r\n\r\n# #lambdaf=np.interp(f,f2,lambdaf)\r\n# lambdaf=np.interp(f, f_spectrum, spectrum\/q)*c\r\n\r\n\r\n# # Compute the impedance\r\n# Z = abs(- WPf \/ lambdaf) # * 2\/(t_sample*ds\/np.sqrt(pi)) #normalized according to CST wakesolver manual\r\n\r\n# # Plot Impedance and maximum frequency\r\n# fig = plt.figure(1, figsize=(6,4), dpi=200, tight_layout=True)\r\n# ax=fig.gca()\r\n\r\n# # add CST fft result\r\n# ax.plot(freq_cst*1.0e-9, Z_cst, lw=1.2, color='black', label='Z\/\/ from CST')\r\n\r\n# # add numpy.fft result (normalized)\r\n# factor=np.max(Z_cst)\/np.max(Z)\r\n# ax.plot(f*1.0e-9, Z, lw=1.2, color='red', label='Z\/\/ from numpy FFT')\r\n\r\n# ax.set(title='Longitudinal impedance Z from CST magnitude',\r\n# xlabel='frequency [GHz]',\r\n# ylabel='Z\/\/(s) [Ohm]', #ylim=(-8.0e4,8.0e4)\r\n# xlim=(0.,np.max(freq_cst)*1e-9)\r\n# )\r\n# ax.legend(loc='best')\r\n# ax.grid(True, color='gray', linewidth=0.2)\r\n# plt.show()\r\n\r\n\r\n# # Plot charge dist and wake potential\r\n# '''\r\n# fig = plt.figure(2, figsize=(6,4), dpi=200, tight_layout=True)\r\n# ax=fig.gca()\r\n# ax.plot(s*1e3, charge_dist_interp, color='red', label='$\\lambda$(s)')\r\n# ax.plot(s*1e3, Wake_potential_cst, color='orange', label='W||(s)')\r\n# ax.set(title='Wake potential and charge distribution',\r\n# xlabel='s [mm]',\r\n# ylabel='W||(s) [V\/pC]', #ylim=(-8.0e4,8.0e4)\r\n# )\r\n# ax.legend(loc='best')\r\n# ax.grid(True, color='gray', linewidth=0.2)\r\n# plt.show()\r\n# '''\r\n\r\n# fig = plt.figure(1, figsize=(6,4), dpi=200, tight_layout=True)\r\n# ax=fig.gca()\r\n# ax.plot(f\/1e9, abs(lambdaf), color='red', label='$\\lambda$(w)')\r\n# ax.plot(f\/1e9, abs(WPf), color='orange', label='W||(w)')\r\n# ax.set(title='Wake potential and charge distribution',\r\n# xlabel='f [GHz]',\r\n# ylabel='W||(s) [V\/pC]', #ylim=(-8.0e4,8.0e4)\r\n# )\r\n# ax.legend(loc='best')\r\n# ax.grid(True, color='gray', linewidth=0.2)\r\n# plt.show()\r\n\r\n\r\n# #VALUE IN 0 SHOULD BE THE SAME AS THE INTEGRAL FOR THE CHARGE DIST\r\n\r\n\r\n# spectrum_fft,f=Wsol.DFT(charge_dist_time, dt, fmax=max(f_spectrum), Nf=2001)\r\n# spectrum_fft=spectrum_fft*sum(charge_dist_time)*dt\/np.sqrt(pi)\r\n\r\n# fig = plt.figure(1, figsize=(6,4), dpi=200, tight_layout=True)\r\n# ax=fig.gca()\r\n# ax.plot(f\/1e9, abs(spectrum_fft), color='red', label='DFT')\r\n# ax.plot(f_spectrum\/1e9, spectrum, color='blue', label='CST')\r\n# ax.set(title='Charge distribution spectrum',\r\n# xlabel='f [GHz]',\r\n# ylabel='Spectrum', #ylim=(-8.0e4,8.0e4)\r\n# )\r\n# ax.legend(loc='best')\r\n# ax.grid(True, color='gray', linewidth=0.2)\r\n# plt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#--- read the cst dictionary\r\nwith open('cst_out.txt', 'rb') as handle:\r\n cst_data = pk.loads(handle.read())\r\n\r\nbunch=cst_data.get('charge_dist')\r\nbunch_time=cst_data.get('charge_dist_time')\r\nspectrum=cst_data.get('charge_dist_spectrum')\r\ncurrent=cst_data.get('current')\r\ns_bunch=cst_data.get('s_charge_dist')\r\nds_bunch=s_bunch[2]-s_bunch[1]\r\ndf=0.0094650788*1e9\r\nf_spectrum=np.arange(0, 1001*df, df)\r\ndt=0.0013634439*1e-9\r\nt_current=np.arange(0,7.5371175*1e-9, dt)\r\nq=1e-9 #[C]\r\n\r\nWP=cst_data.get('WP_cst')\r\nZ_cst=cst_data.get('Z_cst')\r\nWP_dip=cst_data.get('WPx_dipolarX_cst')\r\nZ_dip_cst=cst_data.get('Zx_dipolarX_cst')\r\nWP_quad=cst_data.get('WPx_quadrupolarX_cst')\r\nZ_quad_cst=cst_data.get('Zx_quadrupolarX_cst')\r\n\r\nf_cst=cst_data.get('freq_cst_dipolar')\r\ns_cst=cst_data.get('s_cst')\r\nds = s_cst[2]-s_cst[1]\r\ndf = f_cst[2]-f_cst[1]\r\n\r\nbunch_i=np.interp(s_cst, s_bunch, bunch)\r\n\r\n'''\r\nlambdaf, f2=Wsol.DFT(bunch_i\/q, ds\/c, fmax=max(f_spectrum), Nf=2001)\r\nWPf, f=Wsol.DFT(WP*1e12, ds\/c, fmax=max(f_cst), Nf=2001)\r\nWPf_dip, f=Wsol.DFT(WP_dip*1e12, ds\/c, fmax=max(f_cst), Nf=2001)\r\nWPf_quad, f=Wsol.DFT(WP_quad*1e12, ds\/c, fmax=max(f_cst), Nf=2001)\r\n'''\r\nlambdafft = np.fft.fft(bunch_i\/q*c, n=200000)\r\nWPfft = np.fft.fft(WP*1e12, n=200000)\r\nWPfft_dip = np.fft.fft(WP_dip*1e12, n=200000)\r\nWPfft_quad = np.fft.fft(WP_quad*1e12, n=200000)\r\nffft=np.fft.fftfreq(len(WPfft), ds\/c)\r\n\r\nmask = np.logical_and(ffft >= 0 , ffft < 5.5*1e9)\r\nWPf = WPfft[mask]*ds\r\nWPf_dip = WPfft_dip[mask]*ds\r\nWPf_quad = WPfft_quad[mask]*ds\r\nlambdaf = lambdafft[mask]*ds\r\nf = ffft[mask] # Positive frequencies\r\n\r\n# Compute the impedance\r\nZ = abs(- WPf \/ lambdaf)\r\nZ_dip = abs(1j* WPf_dip \/ lambdaf) \r\nZ_quad = abs(1j* WPf_quad \/ lambdaf)\r\n\r\n\r\n# Plot Impedance and maximum frequency\r\nfig = plt.figure(1, figsize=(6,4), dpi=200, tight_layout=True)\r\nax=fig.gca()\r\n\r\n# add CST fft result\r\n'''\r\nax.plot(f*1.0e-9, Z, lw=1.2, color='red', label='Z\/\/ from numpy FFT')\r\nax.plot(f_cst*1.0e-9, Z_cst, lw=1.2, color='black', ls='--', label='Z\/\/ from CST')\r\n'''\r\n'''\r\n\r\nax.plot(f*1.0e-9, Z_dip, lw=1.2, color='red', label='Z dipolar from numpy FFT')\r\nax.plot(f_cst*1.0e-9, Z_dip_cst, lw=1.2, color='black', ls='--', label='Z dipolar from CST')\r\n'''\r\n\r\nax.plot(f*1.0e-9, Z_quad, lw=1.2, color='red', label='Z quadrupolar from numpy FFT')\r\nax.plot(f_cst*1.0e-9, Z_quad_cst, lw=1.2, color='black', ls='--', label='Z quadrupolar from CST')\r\n\r\n#ax.plot(f_cst*1.0e-9, Z_cst\/Z_dip_cst, lw=1.2, color='black', label='Z\/\/ from CST')\r\n#ax.plot(f*1.0e-9, Z\/Z_dip, lw=1.2, color='red', label='Z\/\/ from numpy FFT')\r\n#ax.plot(s_cst*1e3, WP_quad, lw=1.2, color='blue', label='Z\/\/ from numpy FFT')\r\n\r\n#ax.plot(f_cst*1.0e-9, abs(WPf_dip)\/Z_dip_cst, lw=1.2, color='blue', label='Z\/\/ from numpy FFT')\r\n#ax.plot(f*1.0e-9, abs(lambdaf_cst), lw=1.2, color='blue', label='Z\/\/ from numpy FFT')\r\n\r\nax.set(title='Longitudinal impedance Z from CST magnitude',\r\n xlabel='frequency [GHz]',\r\n ylabel='Z\/\/(s) [Ohm]', #ylim=(-8.0e4,8.0e4)\r\n #xlim=(0.,np.max(f_cst)*1e-9)\r\n )\r\nax.legend(loc='best')\r\nax.grid(True, color='gray', linewidth=0.2)\r\nplt.show()\r\n\r\n\r\n\r\n\r\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_571","text":"# -*- coding: utf-8 -*-\n\"\"\"\nMy implementation of a linear algebra library in Python. Consists of\na Vector and Matrix class that can be compatible with one another, as\nwell as useful methods for interacting with both concepts\/classes.\n\ngithub repository: https:\/\/github.com\/ulloaluis\/linear-algebra\n\"\"\"\n\nfrom math import gcd, pow, sqrt, isclose\nfrom linear_lib.linear_tests import *\nfrom fractions import Fraction\n\n__author__ = \"\"\n__license__ = \"MIT\"\n__version__ = \"0.0.2\"\n__maintainer__ = \"\"\n__email__ = \"\"\n\n\nclass Vector:\n \"\"\"\n The Vector class imitates the m x 1 vector from linear algebra and\n contains many useful functions for dealing and interacting with Vectors.\n\n Getting values directly from the vector should be done using the get(index)\n function since the comp vector location in memory may change with functions\n like mag() or zero().\n\n class Vector\n __init__(comp) - takes in a list of components or a valid mx1 Matrix\n resize(length) - while preserving current elements or filling with 0's, changes current vector length\n set(comp, index=-1) - sets entire list at once or one specific index\/value\n get(index) - returns item at specified index of vector\n zero() - turns the current vector into a zero vector and returns it\n mag() - returns the magnitude of current vector\n normalize(change=False) - returns normalized current vector, if change=True, internal vector is updated\n same_normalized(other) - returns True\/False depending on equality of the two vectors once normalized\n dot(other) - returns the dot product of th two vectors\n cross(other) - returns the cross product of u x v (u is current vector, v is other)\n perp(other) - returns True\/False if current and other are\/aren't perpendicular\n parallel(other) - returns True\/False if current and other are\/aren't parallel\n indep(other) - returns True\/False if curr vector and other vector(s) are linearly independent\n operator + - returns sum of two vectors, component wise addition\n operator - - returns difference of two vectors, component wise subtraction\n operator * - alternate for dot product, or can use for vector scaling\n operator ** - returns original vector with its components raised to power\n operator == - checks to see if lists are equal\n to string method - format: \"\"\n len() method - can use len() to get vector length\n get and set [] - user can get and set values in vector with index based operations []\n\n comp = vector composition, list of components\n length = number of components in vector\n rows = same as length, used with cols for backwards compatibility with Matrix\n cols = 1 (num of columns)\n \"\"\"\n\n def __init__(self, comp=[]):\n \"\"\"\n Initializes the vector with either a list containing its components\n or an appropriate Matrix with mx1 dimensions. Defaults to an empty\n vector if not specified.\n\n :param comp: a list of the elements to be included in the vector;\n the initial components, defaulted to an empty list.\n could also be an appropriately sized Matrix\n :type comp: list, Matrix\n :return: none\n :raises: ValueError when Matrix is invalid size for Vector conversion\n \"\"\"\n if isinstance(comp, Matrix):\n if comp.cols == 1:\n self.comp = [row[0] for row in comp.comp]\n else:\n raise ValueError(\"Cannot convert Matrix with greater than 1 column to Vector.\")\n else:\n self.comp = comp # user should never change comp directly; use set()\n\n self.length = len(self.comp) # user should never change length directly; use resize()\n self.rows = self.length # rows and cols included for backwards compatibility as\n self.cols = 1 # a Matrix and for use in matrix-vector product\n\n def resize(self, length):\n \"\"\"\n Re-sizes the vector to the specified length. If length is greater than\n current size, the new components are initialized with 0, otherwise if length\n is less than current size, the last few components are lost.\n\n :param length: new length of vector\n :type length: int\n :return: current vector, now resized\n :rtype: Vector\n \"\"\"\n assert(length >= 0) # no negative lengths\n dist = length - self.length\n\n if dist < 0:\n self.comp = self.comp[:dist] # notice how this fails when dist = 0, but else correctly handles it\n else:\n self.comp = self.comp + [0]*dist\n\n self.length = length\n return self\n\n def set(self, comp, index=-1):\n \"\"\"\n Set\/change the values of the current vector. Can either pass in a new\n list to replace the internal list, or can specify an index in vector\n to change just that value, in which case comp can be a single value.\n No errors are thrown if user re-sizes the list.\n\n :param comp: list to replace whole vector or value to replace single component\n :param index: optional parameter that specifies the index of the value to be replaced\n :type comp: list, int, float\n :type index: int, float (that is whole ex. 1.0)\n :return: current vector, now updated\n :rtype: Vector\n :raises: index error if out of bounds index\n \"\"\"\n if index < 0: # default None and index=0 calls would conflict\n self.comp = comp\n self.length = self.rows = len(comp)\n else:\n if index >= self.length:\n raise IndexError(\"Index out of bounds in vector.\")\n self.comp[index] = comp\n return self\n\n def get(self, index):\n \"\"\"\n :param index: index of value\n :type index: int\n :return: element at specified index\n :rtype: int, float\n :raises: IndexError if index not in vector\n \"\"\"\n if 0 <= index < self.length:\n return self.comp[index]\n else:\n raise IndexError(\"Specified index is not in vector.\")\n\n def zero(self):\n \"\"\"\n Zeroes out the current vector by replacing each component with a 0.\n\n :return: returns current vector, which is now a zero vector\n :rtype: Vector\n \"\"\"\n self.comp = [0]*self.length\n return self\n\n def mag(self):\n \"\"\"\n Will get the magnitude of a vector.\n\n :return: the magnitude of a vector (sqrt(sum of components squared))\n :rtype: int, float\n \"\"\"\n return sqrt(sum([pow(x, 2) for x in self.comp]))\n\n def normalize(self, change=False):\n \"\"\"\n Normalizes a vector (acts on internal vector, does not take in a vector)\n\n :param change: if True, internal vector components are changed in addition\n to returning vector\n if False, vector says the same but normalized vector is returned;\n default is false\n :type change: bool\n :return: another Vector but with the normalized components (False)\n current Vector but with normalized components (True)\n :rtype: Vector\n \"\"\"\n magnitude = self.mag()\n if magnitude == 0: # already zero vector\n return self\n\n if change:\n self.comp = [elem \/ magnitude for elem in self.comp]\n return self\n else:\n return Vector([x \/ magnitude for x in self.comp])\n\n def same_normalized(self, other):\n \"\"\"\n This function states whether the current vector is the same as other\n vector when normalized.\n\n :param other: other vector to be compared\n :type other: Vector\n :return: True if they have same normalized version, False otherwise\n :rtype: bool\n \"\"\"\n return self.normalize() == other.normalize()\n\n def dot(self, other):\n \"\"\"\n This function returns a scalar (number) value representing the dot\n product of the current vector and the other vector.\n\n :param other: the b vector in a dot b\n :type other: Vector\n :return: The dot product of the current vector and other vector.\n :rtype: int, float\n :raises: ValueError when vectors are not the same length\n \"\"\"\n if len(self.comp) == len(other.comp):\n return sum([x * y for x, y in zip(self.comp, other.comp)])\n else:\n raise ValueError(\"Invalid vectors - must be of same length.\")\n\n def cross(self, other):\n \"\"\"\n For 3-dimensional vectors (3x1), this function allows you to take\n the cross product, which produces a vector i.e. orthogonal to both.\n\n :param other: 3D Vector (b in a X b)\n :return: Vector representing cross product of current and other\n :rtype: Vector\n :raises: Value error if vectors are not 3 dimensional\n \"\"\"\n\n # Simplified version, after determinants: u is current vector v is other\n # u x v = (u2v3 - u3v2)i - (u1v3-u3v1)j + (u1v2-u2v1)k\n if self.length == 3 and other.length == 3:\n i_hat = self.comp[1]*other.comp[2] - self.comp[2]*other.comp[1]\n j_hat = -1 * (self.comp[0]*other.comp[2] - self.comp[2]*other.comp[0])\n k_hat = self.comp[0]*other.comp[1] - self.comp[1]*other.comp[0]\n return Vector([i_hat, j_hat, k_hat])\n else:\n raise ValueError(\"Invalid vectors - Can only take the cross product of 3D vectors.\")\n\n def perp(self, other):\n \"\"\"\n Boolean function for whether two vectors are perpendicular\/orthogonal to each other.\n\n :param other: the other vector\n :type other: Vector\n :return: Will return True if current vector and other vector are perpendicular, false otherwise.\n :rtype: bool\n \"\"\"\n\n return self.dot(other) == 0\n\n def parallel(self, other):\n \"\"\"\n Boolean function for whether two vectors are parallel to each other.\n\n :param other: the other vector\n :type other: Vector\n :return: Will return True if current vector and other vector are parallel, false otherwise.\n :rtype: bool\n \"\"\"\n\n return self.cross(other).mag() == 0 # could also check dot prod = |A*B|\n\n def indep(self, other):\n \"\"\"\n Determines whether current vector and one or more vectors are linearly independent.\n\n Note: User should make sure to pass in vectors of correct dimension.\n\n :param other: list of vectors or a vector to be compared to current\n :type other: List, Vector\n :return: boolean true\/false if given vectors are linearly independent\n :rtype: bool\n :raises: ValueError if other is not a valid type\n \"\"\"\n\n if isinstance(other, Vector): # make 'other' a list if it's a vector\n other = [other]\n\n if isinstance(other, list) and len(other) > 0:\n other.append(self)\n m, n = len(other), len(other[0]) # m is num vectors, n is vector dimension\n\n if m == n: # Place list into matrix and check if determinant is 0\n return Matrix([vec.comp for vec in other]).det() != 0\n elif m < n:\n row_reduced = Matrix([vec.comp for vec in other]).row_reduce()\n return Vector(row_reduced[-1]).mag() != 0 # see if last row is all 0s\n else:\n return False # if num vectors > dimension, can't be independent\n\n else:\n raise ValueError(\"Invalid input - Must be a vector or list of vectors.\")\n\n def __add__(self, other):\n \"\"\"\n Adding two vectors returns a vector with the respective components\n added together as expected. (does not affect this or other vector's\n components)\n\n :param other: the other vector to be added to current instance vector\n :type other: Vector\n :return: a vector with the resulting added components\n :rtype: Vector\n :raises: ValueError when vectors are not the same length\n \"\"\"\n if len(self.comp) == len(other.comp):\n return Vector([x+y for x, y in zip(self.comp, other.comp)])\n else:\n raise ValueError(\"Invalid vectors - must be of same length.\")\n\n def __sub__(self, other):\n \"\"\"\n Subtracting two vectors returns a vector with the respective components\n subtracted. \"current - other\" is formatting. (this does not affect this\n or other vector's components)\n\n :param other: the other vector which is subtracting from the current vector\n :type other: Vector\n :return: a vector with the resulting subtracted components\n :rtype: Vector\n :raises: ValueError when vectors are not the same length\n \"\"\"\n if len(self.comp) == len(other.comp):\n return Vector([x-y for x, y in zip(self.comp, other.comp)])\n else:\n raise ValueError(\"Invalid vectors - must be of same length.\")\n\n def __mul__(self, other):\n \"\"\"\n Multiplies the two vectors together; same functionality as calling the\n dot() function for dot product of current and other. Could also scale\n each component by a number\n\n :param other: the other vector\n :type other: Vector, integer, float\n :return: number value representing dot product of both vectors\n :rtype: int, float\n :raises: ValueError when vectors are not the same length\n \"\"\"\n if isinstance(other, int) or isinstance(other, float):\n return Vector([x * other for x in self.comp])\n elif len(self.comp) == len(other.comp):\n return self.dot(other)\n else:\n raise ValueError(\"Invalid vectors - must be of same length.\")\n\n def __eq__(self, other):\n \"\"\"\n If two vectors have the same components, then they are equal. If the\n lists are not the same length, will always be False with no error thrown.\n\n :param other: other vector being tested for equality\n :type other: Vector\n :return: True or False based on equality\n :rtype: bool\n \"\"\"\n return self.comp == other.comp # compares lists\n\n def __pow__(self, power, modulo=None):\n \"\"\"\n Allows you to raise each of the components of the current vector to a power.\n\n :param power: value to raise each component to\n :param modulo: optional parameter that applies the modulus operator to each result\n :type power: int, float\n :type modulo: int, float\n :return: a vector containing the appropriately scaled components\n :rtype: Vector\n \"\"\"\n\n return Vector([pow(x, power) % modulo if modulo else pow(x, power) for x in self.comp])\n\n def __str__(self):\n \"\"\"\n Converts vector to string by placing the components a, b , c, ... into arrow\n brackets, as such . Empty arrays return for clarity.\n\n :return: a string detailing contents of vector with the format or \n :rtype: str\n \"\"\"\n if self.length == 0:\n return \"\"\n vec = \"<\"\n for elem in self.comp:\n vec += str(elem) + \", \"\n return vec[:-2] + \">\" # remove additional \", \" and close\n\n def __len__(self):\n \"\"\"\n :return: length of vector\n :rtype: int\n \"\"\"\n return self.length\n\n def __getitem__(self, i):\n \"\"\"\n Alternate for get(), allows you to reliably access components of vector.\n v = Vector([1,2]) v[0] -> 1\n\n :param i: index\n :type i: int\n :return: value at specified index in self.comp\/vector\n :rtype: int, float\n \"\"\"\n return self.get(i)\n\n def __setitem__(self, key, value):\n \"\"\"\n Allows user to set value using index-based accessing.\n\n :param key:\n :param value:\n :return: item just inserted\n \"\"\"\n return self.set(value, key)\n\n\nclass Matrix:\n \"\"\"\n The Matrix class imitates the matrix concept from linear algebra and allows\n for different ways of dealing and interacting with matrices and vectors.\n\n class Matrix\n __init__(comp) - takes in a list of components or a valid Vector\n resize(rows, cols) - while preserving current elements or filling with 0's, changes current vector length\n set(comp, index=None) - sets entire list at once or one specific index\/value (tuple or array as (row, col))\n get(row=None,col=None) - can get a specific row, column, or entire matrix composition (no args for matrix)\n zero() - replaces values in current matrix with all zeroes and returns it\n det() - takes the determinant of the current NxN matrix\n transpose() - transposes the current mxn matrix to an nxm matrix (1st row becomes 1st col, etc.)\n row_echelon() - returns the current matrix in row echelon form\n row_reduce() - returns the current matrix to reduced row echelon form\n identity(n) - static method that returns the nxn identity matrix\n combine(first, second) - static method that combines two matrices by concatenation\n inverse() - returns the inverse of current nxn matrix, or None if singular\n operator + - returns sum of two matrices, component wise addition\n operator - - returns difference of two matrices, component wise subtraction\n operator * - matrix multiplication, matrix-vector product, scalar multiplication\n operator ** - returns original matrix with its components raised to power\n operator == - checks to see if internal lists are equal\n to string method - format: \"[row1\\n row2\\n row3\\n ...]\" and floats are shown as fractions\n len() method - returns tuple formatted as (row, col)\n get and set [][] - can get rows and specific values with [] or [][], and set specific values with [][]\n\n comp = matrix composition, list of lists where each list is a row\n rows = number of rows in matrix\n cols = number of columns in matrix\n \"\"\"\n def __init__(self, comp=[]):\n \"\"\"\n Initializes the matrix to the specified format. Default is an empty 0x0 matrix\n\n Note: It is up to the user to pass well-formed matrices, that is, two different\n rows cannot be different lengths, etc.\n\n :param comp: list of lists where each individual list represents a row,\n similar to how numpy implements arrays; could also be a vector\n :type comp: list, Vector\n :return: none\n \"\"\"\n\n if isinstance(comp, Vector):\n self.comp = [[row] for row in comp.comp] # m x 1 Vector --> m rows\n else:\n self.comp = comp # list\n\n self.rows = len(self.comp) # User should never be able to change instance\n if self.rows != 0: # variables directly, use an appropriate method\n self.cols = len(self.comp[0])\n else:\n self.cols = 0 # if rows = 0, then no columns by default\n\n def resize(self, rows, cols):\n \"\"\"\n Re-sizes the current matrix to the specified dimensions, rows x cols.\n Previous elements are left in place, if size is increased then new\n locations are filled with values of 0.\n\n :param rows: new row size\n :param cols: new column size\n :type rows: int, float\n :type cols: int, float\n :return: current matrix after resizing\n :rtype: Matrix\n \"\"\"\n assert(rows >= 0 and cols >= 0) # no negative dimensions allowed\n\n dist_rows = rows - self.rows\n dist_cols = cols - self.cols\n\n if dist_rows < 0:\n self.comp = self.comp[:dist_rows]\n else:\n for i in range(dist_rows):\n self.comp.append([0]*self.cols) # update rows but don't have varying number of columns for each row\n\n if dist_cols < 0: # go through and shape columns now\n for i in range(rows):\n self.comp[i] = self.comp[i][:dist_cols]\n else:\n for i in range(rows):\n self.comp[i] += [0]*dist_cols\n\n self.rows = rows\n self.cols = cols\n return self\n\n def set(self, comp, index=None):\n \"\"\"\n Set\/change the current matrix. If index is not specified, then comp should\n be a list of lists detailing a new matrix. Otherwise, comp should be the\n integer value that goes in the specified index (row, column) tuple.\n\n :param comp: list of lists to replace matrix entirely, or single value\n to replace a specific location in matrix\n :param index: optional tuple\/list with (row, column) of value to be replaced\n :type comp: list of lists, int\n :type index: tuple, list\n :return: self, after edits are made\n :rtype: Matrix\n \"\"\"\n\n if not index:\n assert(isinstance(comp, list))\n self.comp = comp\n self.rows = len(comp)\n if self.rows != 0:\n self.cols = len(comp[0])\n else:\n self.cols = 0\n else:\n assert(isinstance(comp, int))\n self.comp[index[0]][index[1]] = comp\n return self\n\n def get(self, row=None, col=None):\n \"\"\"\n User can get rows, columns, the matrix comp list, or specific values\n in Matrix using this function and its optional parameters\n\n :param row: index of target row\n :param col: index of target col\n :type row: int\n :type col: int\n :return: element at specified row\/col, or a row, or a col, or entire Matrix\n :rtype: int, list (row\/col), List\n :raises: IndexError if row index or col index invalid\n \"\"\"\n if row is not None and col is not None: # value\n if 0 > row >= self.rows and 0 > col >= self.cols:\n raise IndexError(\"Row or column out of index bounds.\")\n return self.comp[row][col]\n elif col is None and row is not None: # row\n if 0 > row >= self.rows:\n raise IndexError(\"Row out of index bounds.\")\n return self.comp[row]\n elif col is not None: # just col\n if 0 > col >= self.cols:\n raise IndexError(\"Col out of index bounds.\")\n return [r[col] for r in self.comp]\n else: # entire matrix\n return self.comp\n\n def zero(self):\n \"\"\"\n Zeroes out the current matrix by replacing every element with a zero.\n\n :return: The current matrix, but updated to be the zero matrix.\n \"\"\"\n self.comp = [[0]*self.cols for _ in range(self.rows)]\n return self\n\n def det(self):\n \"\"\"\n Returns the determinant of an nxn matrix that is at least a 2x2. (recursive)\n\n :return: the determinant of the current matrix\n :rtype: int, float\n \"\"\"\n\n if self.rows != self.cols:\n raise ValueError(\"Invalid matrix - only N x N matrices supported.\")\n\n # base case -> 2 by 2\n if self.rows == 2 and self.cols == 2: # ad - bc\n return self.comp[0][0] * self.comp[1][1] - self.comp[0][1] * self.comp[1][0]\n\n # going along top, along first row (not optimized to find best path)\n top_row = self.comp[0]\n determinant = 0\n for col_i in range(len(top_row)):\n # don't include in same row or column\n new_matrix = self.comp[1:] # remove top row\n for r in range(len(new_matrix)): # remove this column from each row\n new_matrix[r] = new_matrix[r][:col_i] + new_matrix[r][col_i + 1:]\n\n constant = top_row[col_i]\n if col_i % 2 == 1:\n constant *= -1 # every other constant is negative\n\n determinant += constant * Matrix(new_matrix).det()\n return determinant\n\n def transpose(self):\n \"\"\"\n This function will return the transpose of the current matrix. (A -> A^T)\n \"First row becomes first column, second row becomes second column, etc.\"\n\n :return: Transposed matrix\n :rtype: Matrix\n \"\"\"\n return Matrix([[self.comp[r][c] for r in range(self.rows)] for c in range(self.cols)])\n\n @staticmethod\n def identity(n):\n \"\"\"\n Static method for creating an identity matrix of dimension nxn.\n\n :param n: dimension of identity matrix\n :type n: int\n :return: identity matrix of size nxn\n :rtype: Matrix\n \"\"\"\n return Matrix([[1 if i == j else 0 for j in range(n)] for i in range(n)])\n\n @staticmethod\n def combine(first, second):\n \"\"\"\n Static method for concatenating two matrices, side by side.\n 1 1 *combined 1 0 = 1 1 1 0\n 2 2 with* 0 1 = 2 2 0 1\n\n Warning\/Note: Matrices should have the same number of rows, otherwise\n the minimum amount of rows will be present. (If first\n has 3 rows and second has 5 rows, combined matrix has 3)\n\n :param first: first matrix\n :param second: second matrix\n :return: combined matrix, [[row1 + row2], ...]\n :rtype: Matrix\n \"\"\"\n return Matrix([one + two for one, two in zip(first.comp, second.comp)])\n\n @staticmethod\n def _clean_matrix(new_matrix):\n \"\"\"\n Not intended for client use. This method goes through matrix contents\n and reduces each row by the greatest common divisor of that row,\n multiplies row by -1 if leading pivot is negative, and turns floats\n into ints if no reduction occurs. self._clean_matrix or Matrix._clean_matrix\n\n :param new_matrix: matrix.comp, composition of matrix\n :type new_matrix: list\n :return: \"cleaned\" matrix comp\n :rtype: list\n \"\"\"\n cols = len(new_matrix[0])\n for r, row in enumerate(new_matrix):\n gcf = row[0]\n for col in row[1:]:\n gcf = gcd(gcf, col)\n\n if gcf != 0:\n new_matrix[r] = row = [elem \/\/ gcf for elem in row] # update row for next list comp\n\n c = 0\n while c < cols and row[c] == 0:\n c += 1\n if c < cols and row[c] < 0:\n new_matrix[r] = row = [-1*elem for elem in row]\n\n new_matrix[r] = [int(col) if int(col) == col else col for col in row]\n\n return new_matrix\n\n @staticmethod\n def _clear_pos(new_matrix, r, c, other_row):\n \"\"\"\n Helper method for both row echelon functions.\n\n :param new_matrix: the matrix that will be updated by algorithmically\n clearing one position in matrix\n :param r: index of row to be changed\n :param c: index of col to be changed\n :param other_row: index of other row being using in row operation\n :type r: int\n :type c: int\n :type other_row: list\n :return: matrix composition\n :rtype: list\n \"\"\"\n above = new_matrix[r][c]\n const = new_matrix[other_row][c] # row we will use\n\n # prioritize keeping numbers small \/ int division\n if const > above != 0 and const % above == 0:\n scale = const \/\/ above\n new_matrix[r] = [elem * scale for elem in new_matrix[r]]\n elif above >= const != 0 and const != 0 and above % const == 0:\n scale = above \/\/ const\n new_matrix[other_row] = [elem * scale for elem in new_matrix[other_row]]\n else: # scale both\n new_matrix[r] = [elem * const for elem in new_matrix[r]]\n new_matrix[other_row] = [elem * above for elem in new_matrix[other_row]]\n new_matrix[r] = [other - curr for curr, other in\n zip(new_matrix[r], new_matrix[other_row])]\n return new_matrix\n\n def row_echelon(self):\n \"\"\"\n This function will row reduce the current matrix until it is in row echelon form.\n That is, until there is an upper triangular matrix. I've made a decent amount of\n optimizations in this function, but there definitely many others that could be made.\n\n Note: This doesn't change the matrix internally, you will have to assign the\n return value to the your matrix variable if you want to change it.\n There is no guarantee that the matrix returned will contain only integers, may\n have floats.\n\n :return: row echelon form of current matrix\n :rtype: Matrix\n :return:\n \"\"\"\n\n # adjust matrix so rows are in proper descending order \/ putting any pre-made pivots in place\n new_matrix = sorted(self.comp, reverse=True)\n pivot_row = 0\n\n for c in range(self.cols):\n new_matrix = sorted(new_matrix, reverse=True) # swap any out of place rows\n if pivot_row < self.rows and new_matrix[pivot_row][c] != 0:\n for r in range(pivot_row+1, self.rows): # use pivot row to clear other rows\n while new_matrix[r][c] != 0:\n\n new_matrix = self._clear_pos(new_matrix, r, c, pivot_row)\n\n pivot_row += 1\n new_matrix = self._clean_matrix(new_matrix)\n return Matrix(sorted(new_matrix, reverse=True)) # swap any out of place rows\n\n def row_reduce(self):\n \"\"\"\n This function will row reduce the current matrix until it is in reduced row\n echelon form (RREF). The transpose of a matrix has the same RREF as original.\n\n Note: This doesn't change the matrix internally, you will have to assign the\n return value to the your matrix variable if you want to change it.\n\n :return: reduced row echelon form of current matrix\n :rtype: Matrix\n \"\"\"\n\n new_matrix = self.row_echelon().comp # get in row echelon form first\n\n pivots = {} # store pivot indexes key-value for use later\n\n # store pivots as col : row pairs\n for r, row in enumerate(new_matrix):\n # identify pivot\n i = 0\n while i < self.cols and row[i] == 0:\n i += 1\n if i < self.cols:\n pivots[i] = r\n\n # apply only 0s above pivot (bottom part is done since already in row echelon form)\n offset = 0 # how far ahead the first pivot is (ex. may be zero cols before first pivot)\n for c in range(self.cols):\n if c in pivots:\n pivot_row = pivots[c] # row the pivot is in\n for r in range(pivot_row): # top part, don't loop past location of pivot\n while new_matrix[r][c] != 0: # stay in same column and fix parts above pivot\n other_row = c-offset # when no offset, col c can be cleared using row c since there are c zeros\n\n new_matrix = self._clear_pos(new_matrix, r, c, other_row)\n else:\n offset += 1\n\n new_matrix = self._clean_matrix(new_matrix) # this function also changes floats to perfect ints based on gcd\n\n # now, apply \"each pivot is 1\" rule, floats inevitable, but preserve as much ints as possible\n for r, row in enumerate(new_matrix):\n # identify pivot\n i = 0\n while i < self.cols and row[i] == 0:\n i += 1\n # divide row by proper amount to get a 1 on pivot\n if i < self.cols:\n pivot = row[i]\n new_matrix[r] = [elem \/\/ pivot if elem % pivot == 0 else elem \/ pivot for elem in row]\n return Matrix(sorted(new_matrix, reverse=True)) # ensure ordering is still valid\n\n def inverse(self):\n \"\"\"\n Gets the inverse A^-1 of the current matrix A.\n\n :return: inverse matrix of current matrix, or None if not invertible (singular)\n :rtype: Matrix\n :raises: value error if current matrix is not nxn\n \"\"\"\n n = self.cols\n identity = Matrix.identity(n)\n if self.rows != n:\n raise ValueError(\"Need an nxn matrix to calculate inverse.\")\n # create combined matrix\n with_identity = Matrix.combine(self, identity).row_reduce()\n # if left side is identity, then right side is inverse\n if Matrix([row[:n] for row in with_identity.comp]) != identity:\n return None # no inverse, singular\n else:\n return Matrix([row[-n:] for row in with_identity.comp])\n\n def __add__(self, other):\n \"\"\"\n Adds two matrices and returns a matrix with the respective components\n added together as expected.\n\n :param other: the other matrix to be added to current instance matrix\n :type other: Matrix\n :return: a matrix with the resulting added components\n :rtype: Matrix\n :raises: ValueError when matrices do not have same dimensions\n \"\"\"\n new_comp = []\n if self.rows == other.rows and self.cols == other.cols:\n for x, y in zip(self.comp, other.comp):\n new_comp.append([a + b for a, b in zip(x, y)]) # adding done in list comprehension\n return Matrix(new_comp)\n else:\n raise ValueError(\"Size mismatch, both matrices must have the same number of rows and columns.\")\n\n def __sub__(self, other):\n \"\"\"\n Subtracting two matrices returns a matrix with the respective components\n subtracted. \"current - other\" is formatting.\n\n :param other: the other matrix which is subtracting from the current matrix\n :type other: Matrix\n :return: a matrix with the resulting subtracted components\n :rtype: Matrix\n :raises: ValueError when matrices do not have same dimensions\n \"\"\"\n new_comp = []\n if self.rows == other.rows and self.cols == other.cols:\n for x, y in zip(self.comp, other.comp):\n new_comp.append([a - b for a, b in zip(x, y)]) # subtracting done in list comprehension\n return Matrix(new_comp)\n else:\n raise ValueError(\"Size mismatch, both matrices must have the same number of rows and columns.\")\n\n def __mul__(self, other):\n \"\"\"\n Multiplies the two matrices together; aka Matrix Multiplication.\n Matrix-Vector product is also possible using the Vector class, though\n this method works for a mx1 matrix as well. Also configured to work with\n normal application of multiplying a scalar to a matrix.\n\n Notes: Approach is to take the dot product of each row of current matrix\n with each column of other matrix\/vector. Since you typically write\n \"Ax\" where A is the matrix and x is the vector, this syntax should\n be adhered to when attempting matrix multiplication with these classes.\n\n :param other: the other matrix or vector, could also be an int or float for scaling\n :type other: Matrix, int, float\n :return: the resulting matrix\n :rtype: Matrix\n :raises: ValueError when there's a matrix multiplication size mismatch ([mxn]*[nxp]=[mxp])\n \"\"\"\n new_matrix = []\n if isinstance(other, int) or isinstance(other, float):\n for row in self.comp:\n new_matrix.append([elem * other for elem in row])\n return Matrix(new_matrix)\n elif self.cols == other.rows: # [m x n] * [n x p] = [m x p] i.e. [self.rows x other.cols] matrix\n other_cols = []\n for i in range(other.cols): # extract columns from rows\n other_cols.append([row[i] if isinstance(other, Matrix) else row for row in other.comp])\n for row_me in self.comp:\n new_row = []\n for col_other in other_cols:\n new_row.append(Vector(row_me) * Vector(col_other)) # Dot product of vectors\n new_matrix.append(new_row)\n return Vector([row[0] for row in new_matrix]) if other.cols == 1 else Matrix(new_matrix)\n else:\n raise ValueError(\"Size mismatch; [m x n] * [n x p] = [m x p] matrix\")\n\n def __eq__(self, other):\n \"\"\"\n If two matrices have the same components, then they are equal. If the\n lists are not the same length, will always be False with no error thrown.\n Have to compare each component due to necessity of using math.isclose()\n on floats in order to deal with floating point errors.\n\n :param other: other matrix being tested for equality\n :type other: Matrix\n :return: True or False based on equality\n :rtype: bool\n \"\"\"\n if self.rows != other.rows or self.cols != other.cols:\n return False\n for my_row, other_row in zip(self, other):\n for my_val, other_val in zip(my_row, other_row):\n if not isclose(my_val, other_val):\n return False\n\n return self.comp == other.comp # compares lists\n\n def __pow__(self, power, modulo=None):\n \"\"\"\n Allows you to raise a matrix to a power, that is, each of the\n components of the current matrix is raised to a power. Can use\n power 0 to fill the current matrix with all 1s.\n\n :param power: value to raise each component to\n :param modulo: optional parameter that applies the modulus operator to each result\n :type power: int, float\n :type modulo: int, float\n :return: a matrix containing the appropriately scaled components\n :rtype: Matrix\n \"\"\"\n new_comp = []\n for row in self.comp:\n new_row = []\n for elem in row:\n if modulo:\n elem = elem % modulo\n new_row.append(pow(elem, power))\n new_comp.append(new_row)\n return Matrix(new_comp)\n\n def __str__(self):\n \"\"\"\n String representation of matrix is each row separated by new line\n characters. This is done so that when printed it resembles a normal\n matrix as closely as possible.\n\n :return: string representation of current matrix\n :rtype: str\n \"\"\"\n\n # joins each row of matrix with a new line character and a space,\n # floats are converted to visual fractions, need to get rid of quotes around them\n return \"[\" + '\\n '\\\n .join([str([str(Fraction(elem).limit_denominator()) if isinstance(elem, float) else elem for elem in row])\n .replace('\\'', '') for row in self.comp])\\\n + \"]\"\n\n def __len__(self):\n \"\"\"\n :return: returns tuple formatted as (row, col)\n :rtype: tuple\n \"\"\"\n return self.rows, self.cols\n\n def __getitem__(self, index):\n \"\"\"\n Allows user to access internal self.comp without doing\n my_matrix.comp[i][j] and instead doing my_matrix[i][j]\n\n Note: the first [] calls this function, which returns row,\n that row is a list, which supports [] in the same way\n that this function does.\n\n :param index: index of row\n :type index: int\n :return: list or value for row or row+col value\n :rtype: list, value\n \"\"\"\n return self.comp[index]\n\n def __setitem__(self, key, value):\n \"\"\"\n Allows the user to set a value using brackets.\n\n Note: behavior undefined if user attempts to set a row.\n\n :param key: index of row to be changed\n :param value: value to be set\n :type key: int\n :type value: int\n :return: no return\n \"\"\"\n\n self.set(value, key)\n\n\nif __name__ == \"__main__\":\n test()\n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_572","text":"#=====================================================#\n#\n# File: zeta_zeroes_plot.py\n# Author: \n# Date: May 2021\n# Description:\n# Shows the position of the zeta zeroes along the critical strip\n#\n#=============================================================================#\n\n\nfrom time import time\nimport numpy as np\nfrom cmath import pi, sin\nfrom scipy.special import gamma\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\n\n\n# returns the value of \\frac{\\zeta(s)}{\\eta(s)}\n# = \\frac{ 2^{iy}}{2^{iy}-2^{1-x}}\ndef zeta_over_eta(x, y):\n A = -2**(1 - x)\n B = 2**(y * 1j)\n zeta_over_eta = B \/ (A + B)\n return zeta_over_eta\n\n\n# approximates the eta function where\n# r is the real part of the input (r\\equiv\\Re(s))\n# and t is the imaginary part of the input (t\\equiv\\Im(s))\n# \\eta(r+it)=\\sum_{n=1}^\\infty\\frac{(-1)^{n-1}}{n^r}[cos(y\\phi_n)-isin(y\\phi_n)]\ndef eta(x, y, limit=5*10**3):\n n = np.arange(1, limit)\n coefficient = (-1)**(n-1) \/ n**x\n phi_y = np.log(n) * y\n real_parts = coefficient * np.cos(phi_y)\n imaginary_parts = coefficient * np.sin(phi_y)\n\n real_summation = real_parts.sum()\n imaginary_summation = imaginary_parts.sum()\n eta = real_summation - imaginary_summation * 1j\n\n return eta\n\n\n# returns value of \\zeta(s)\n# where s \\equiv r + it ,r,t \\in \\mathbb{C}\n# by computing \\frac{\\zeta(s)\\eta(s)}\n# for x = 1\/2\ndef zeta(x, y=0):\n result = zeta_over_eta(x, y) * eta(x, y)\n return result\n\n\ndef is_sign_change(previous, number):\n # print(f'prev: {previous}, number {number}')\n if previous == 0:\n return False\n previous_sign = previous \/ abs(previous)\n number_sign = number \/ abs(number)\n return previous_sign != number_sign\n\n\ndef animate(i, x_vals, y_vals):\n PRECISION = 100\n previous_y_input = (i-1)\/PRECISION\n current_y_input = i\/PRECISION\n previous_zeta = zeta(1\/2, previous_y_input)\n current_zeta = zeta(1\/2, current_y_input)\n # print(current_zeta.real, current_zeta.imag)\n # print(y_vals)\n if is_sign_change(previous_zeta.real, current_zeta.real) and is_sign_change(previous_zeta.imag, current_zeta.imag):\n x_vals.append(1\/2)\n y_vals.append(current_y_input)\n print(current_y_input)\n\n plt.cla()\n\n plt.scatter(x_vals, y_vals)\n\n plt.xlabel('Re')\n plt.ylabel('Im')\n plt.tight_layout()\n\n\ndef plot():\n plt.style.use('dark_background')\n\n x_vals = []\n y_vals = []\n\n animation = FuncAnimation(plt.gcf(), animate, fargs=(x_vals, y_vals,), interval=0)\n plt.xlabel('Re')\n plt.ylabel('Im')\n plt.tight_layout()\n plt.show()\n\n\nif __name__=='__main__':\n start = time()\n plot()\n print(f'--- {time()-start} seconds ---')\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_573","text":"import numpy as np\nfrom scipy.ndimage import uniform_filter, gaussian_filter\nfrom numpy.lib.arraypad import _as_pairs\n\n_integer_types = (np.byte, np.ubyte, # 8 bits\n np.short, np.ushort, # 16 bits\n np.intc, np.uintc, # 16 or 32 or 64 bits\n np.int_, np.uint, # 32 or 64 bits\n np.longlong, np.ulonglong) # 64 bits\n\n_integer_ranges = {t: (np.iinfo(t).min, np.iinfo(t).max)\n for t in _integer_types}\n\ndtype_range = {np.bool_: (False, True),\n np.bool8: (False, True),\n np.float16: (-1, 1),\n np.float32: (-1, 1),\n np.float64: (-1, 1)}\n\ndtype_range.update(_integer_ranges)\n\ndef _assert_compatible(im1, im2):\n \"\"\"Raise an error if the shape and dtype do not match.\"\"\"\n if not im1.shape == im2.shape:\n raise ValueError('Input images must have the same dimensions.')\n return\n\ndef _as_floats(im1, im2):\n \"\"\"Promote im1, im2 to nearest appropriate floating point precision.\"\"\"\n float_type = np.result_type(im1.dtype, im2.dtype, np.float32)\n im1 = np.asarray(im1, dtype=float_type)\n im2 = np.asarray(im2, dtype=float_type)\n return im1, im2\n\ndef crop(ar, crop_width, copy=False, order='K'):\n \"\"\"Crop array `ar` by `crop_width` along each dimension.\n Parameters\n ----------\n ar : array-like of rank N\n Input array.\n crop_width : {sequence, int}\n Number of values to remove from the edges of each axis.\n ``((before_1, after_1),`` ... ``(before_N, after_N))`` specifies\n unique crop widths at the start and end of each axis.\n ``((before, after),)`` specifies a fixed start and end crop\n for every axis.\n ``(n,)`` or ``n`` for integer ``n`` is a shortcut for\n before = after = ``n`` for all axes.\n copy : bool, optional\n If `True`, ensure the returned array is a contiguous copy. Normally,\n a crop operation will return a discontiguous view of the underlying\n input array.\n order : {'C', 'F', 'A', 'K'}, optional\n If ``copy==True``, control the memory layout of the copy. See\n ``np.copy``.\n Returns\n -------\n cropped : array\n The cropped array. If ``copy=False`` (default), this is a sliced\n view of the input array.\n \"\"\"\n ar = np.array(ar, copy=False)\n crops = _as_pairs(crop_width, ar.ndim, as_index=True)\n slices = tuple(slice(a, ar.shape[i] - b)\n for i, (a, b) in enumerate(crops))\n if copy:\n cropped = np.array(ar[slices], order=order, copy=True)\n else:\n cropped = ar[slices]\n return cropped\n\ndef MSE(im1, im2):\n \"\"\"Compute the mean-squared error between two images.\n Parameters\n ----------\n im1, im2 : ndarray\n Image. Any dimensionality.\n Returns\n -------\n mse : float\n The mean-squared error (MSE) metric.\n \"\"\"\n _assert_compatible(im1, im2)\n im1, im2 = _as_floats(im1, im2)\n return np.mean(np.square(im1 - im2), dtype=np.float64)\n\n\ndef PSNR(ref_img, pred_img):\n \"\"\"\n Compute average PSNR score for a batch of pairs of images\n\n :param ref_img: ndarray of shape (batch_size, width, height, channels) with target noise-free image\n :param pred_img: ndarray of shape (batch_size, width, height, channels) with predicted denoised image\n :return: float, averaged over batch PSNR score\n \"\"\"\n\n _assert_compatible(ref_img, pred_img)\n\n dmin, dmax = dtype_range[ref_img.dtype.type]\n true_min, true_max = np.min(ref_img), np.max(ref_img)\n if true_max > dmax or true_min < dmin:\n raise ValueError(\n \"im_true has intensity values outside the range expected for \"\n \"its data type. Please manually specify the data_range\")\n if true_min >= 0:\n # most common case (255 for uint8, 1 for float)\n data_range = dmax\n else:\n data_range = dmax - dmin\n\n ref_img, pred_img = _as_floats(ref_img, pred_img)\n\n err = MSE(ref_img, pred_img)\n return 10 * np.log10((data_range ** 2) \/ err)\n\n\ndef SSIM(X, Y, win_size=None, gradient=False,\n data_range=None, multichannel=False, gaussian_weights=False,\n full=False, **kwargs):\n \"\"\"Compute the mean structural similarity index between two images.\n Parameters\n ----------\n X, Y : ndarray\n Image. Any dimensionality.\n win_size : int or None\n The side-length of the sliding window used in comparison. Must be an\n odd value. If `gaussian_weights` is True, this is ignored and the\n window size will depend on `sigma`.\n gradient : bool, optional\n If True, also return the gradient with respect to Y.\n data_range : float, optional\n The data range of the input image (distance between minimum and\n maximum possible values). By default, this is estimated from the image\n data-type.\n multichannel : bool, optional\n If True, treat the last dimension of the array as channels. Similarity\n calculations are done independently for each channel then averaged.\n gaussian_weights : bool, optional\n If True, each patch has its mean and variance spatially weighted by a\n normalized Gaussian kernel of width sigma=1.5.\n full : bool, optional\n If True, also return the full structural similarity image.\n Other Parameters\n ----------------\n use_sample_covariance : bool\n If True, normalize covariances by N-1 rather than, N where N is the\n number of pixels within the sliding window.\n K1 : float\n Algorithm parameter, K1 (small constant, see [1]_).\n K2 : float\n Algorithm parameter, K2 (small constant, see [1]_).\n sigma : float\n Standard deviation for the Gaussian when `gaussian_weights` is True.\n Returns\n -------\n mssim : float\n The mean structural similarity over the image.\n grad : ndarray\n The gradient of the structural similarity index between X and Y [2]_.\n This is only returned if `gradient` is set to True.\n S : ndarray\n The full SSIM image. This is only returned if `full` is set to True.\n Notes\n -----\n To match the implementation of Wang et. al. [1]_, set `gaussian_weights`\n to True, `sigma` to 1.5, and `use_sample_covariance` to False.\n References\n ----------\n .. [1] ., ., ., & .\n (2004). Image quality assessment: From error visibility to\n structural similarity. IEEE Transactions on Image Processing,\n 13, 600-612.\n https:\/\/ece.uwaterloo.ca\/~z70wang\/publications\/ssim.pdf,\n :DOI:`10.1109\/TIP.2003.819861`\n .. [2] . (2009). Exact global histogram specification\n optimized for structural similarity. Optical Review, 16, 613-621.\n :arXiv:`0901.0065`\n :DOI:`10.1007\/s10043-009-0119-z`\n \"\"\"\n if not X.shape == Y.shape:\n raise ValueError('Input images must have the same dimensions.')\n\n if multichannel:\n # loop over channels\n args = dict(win_size=win_size,\n gradient=gradient,\n data_range=data_range,\n multichannel=False,\n gaussian_weights=gaussian_weights,\n full=full)\n args.update(kwargs)\n nch = X.shape[-1]\n mssim = np.empty(nch)\n if gradient:\n G = np.empty(X.shape)\n if full:\n S = np.empty(X.shape)\n for ch in range(nch):\n ch_result = SSIM(X[..., ch], Y[..., ch], **args)\n if gradient and full:\n mssim[..., ch], G[..., ch], S[..., ch] = ch_result\n elif gradient:\n mssim[..., ch], G[..., ch] = ch_result\n elif full:\n mssim[..., ch], S[..., ch] = ch_result\n else:\n mssim[..., ch] = ch_result\n mssim = mssim.mean()\n if gradient and full:\n return mssim, G, S\n elif gradient:\n return mssim, G\n elif full:\n return mssim, S\n else:\n return mssim\n\n K1 = kwargs.pop('K1', 0.01)\n K2 = kwargs.pop('K2', 0.03)\n sigma = kwargs.pop('sigma', 1.5)\n if K1 < 0:\n raise ValueError(\"K1 must be positive\")\n if K2 < 0:\n raise ValueError(\"K2 must be positive\")\n if sigma < 0:\n raise ValueError(\"sigma must be positive\")\n use_sample_covariance = kwargs.pop('use_sample_covariance', True)\n\n if gaussian_weights:\n # Set to give an 11-tap filter with the default sigma of 1.5 to match\n # Wang et. al. 2004.\n truncate = 3.5\n\n if win_size is None:\n if gaussian_weights:\n # set win_size used by crop to match the filter size\n r = int(truncate * sigma + 0.5) # radius as in ndimage\n win_size = 2 * r + 1\n else:\n win_size = 7 # backwards compatibility\n\n if np.any((np.asarray(X.shape) - win_size) < 0):\n raise ValueError(\n \"win_size exceeds image extent. If the input is a multichannel \"\n \"(color) image, set multichannel=True.\")\n\n if not (win_size % 2 == 1):\n raise ValueError('Window size must be odd.')\n\n if data_range is None:\n assert X.dtype == Y.dtype, \\\n \"Inputs have mismatched dtype. Setting data_range based on X.dtype.\"\n dmin, dmax = dtype_range[X.dtype.type]\n data_range = dmax - dmin\n\n ndim = X.ndim\n\n if gaussian_weights:\n filter_func = gaussian_filter\n filter_args = {'sigma': sigma, 'truncate': truncate}\n else:\n filter_func = uniform_filter\n filter_args = {'size': win_size}\n\n # ndimage filters need floating point data\n X = X.astype(np.float64)\n Y = Y.astype(np.float64)\n\n NP = win_size ** ndim\n\n # filter has already normalized by NP\n if use_sample_covariance:\n cov_norm = NP \/ (NP - 1) # sample covariance\n else:\n cov_norm = 1.0 # population covariance to match Wang et. al. 2004\n\n # compute (weighted) means\n ux = filter_func(X, **filter_args)\n uy = filter_func(Y, **filter_args)\n\n # compute (weighted) variances and covariances\n uxx = filter_func(X * X, **filter_args)\n uyy = filter_func(Y * Y, **filter_args)\n uxy = filter_func(X * Y, **filter_args)\n vx = cov_norm * (uxx - ux * ux)\n vy = cov_norm * (uyy - uy * uy)\n vxy = cov_norm * (uxy - ux * uy)\n\n R = data_range\n C1 = (K1 * R) ** 2\n C2 = (K2 * R) ** 2\n\n A1, A2, B1, B2 = ((2 * ux * uy + C1,\n 2 * vxy + C2,\n ux ** 2 + uy ** 2 + C1,\n vx + vy + C2))\n D = B1 * B2\n S = (A1 * A2) \/ D\n\n # to avoid edge effects will ignore filter radius strip around edges\n pad = (win_size - 1) \/\/ 2\n\n # compute (weighted) mean of ssim\n mssim = crop(S, pad).mean()\n\n if gradient:\n # The following is Eqs. 7-8 of Avanaki 2009.\n grad = filter_func(A1 \/ D, **filter_args) * X\n grad += filter_func(-S \/ B2, **filter_args) * Y\n grad += filter_func((ux * (A2 - A1) - uy * (B2 - B1) * S) \/ D,\n **filter_args)\n grad *= (2 \/ X.size)\n\n if full:\n return mssim, grad, S\n else:\n return mssim, grad\n else:\n if full:\n return mssim, S\n else:\n return mssim"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_574","text":"import matplotlib\nfrom hydroDL import kPath, utils\nfrom hydroDL.app import waterQuality\nfrom hydroDL.master import basins\nfrom hydroDL.data import usgs, gageII, gridMET, ntn\nfrom hydroDL.master import slurm\nfrom hydroDL.post import axplot, figplot\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport scipy\nimport os\nimport json\n\ndataName = 'rbWN5'\nwqData = waterQuality.DataModelWQ(dataName)\nsiteNoLst = wqData.siteNoLst\nlabel = 'QFP_C'\nep = 500\nreTest = False\ncodeLst = sorted(usgs.newC)\nnSite = len(siteNoLst)\n\nhsLst = [16, 32, 64, 128, 256, 512]\ncorrMat = np.full([nSite, len(codeLst), len(hsLst)], np.nan)\nrmseMat = np.full([nSite, len(codeLst), len(hsLst)], np.nan)\n\nfor k, hs in enumerate(hsLst):\n code = 'comb'\n trainSet = '{}-B10'.format('comb')\n testSet = '{}-A10'.format('comb')\n outName = '{}-{}-{}-{}-hs{}'.format(dataName, code, label, trainSet, hs)\n master = basins.loadMaster(outName)\n yP, ycP = basins.testModel(\n outName, testSet, wqData=wqData, ep=ep, reTest=reTest)\n ind = wqData.subset[testSet]\n info = wqData.info.iloc[ind].reset_index()\n siteNoTemp = info['siteNo'].unique()\n for iCode, code in enumerate(codeLst):\n ic = wqData.varC.index(code)\n if len(wqData.c.shape) == 3:\n p = yP[-1, :, master['varY'].index(code)]\n o = wqData.c[-1, ind, ic]\n elif len(wqData.c.shape) == 2:\n p = ycP[:, master['varYC'].index(code)]\n o = wqData.c[ind, ic]\n for siteNo in siteNoTemp:\n iS = siteNoLst.index(siteNo)\n indS = info[info['siteNo'] == siteNo].index.values\n rmse, corr = utils.stat.calErr(p[indS], o[indS])\n corrMat[iS, iCode, k] = corr\n rmseMat[iS, iCode, k] = rmse\n\n\n# plot box\ndirSel = os.path.join(kPath.dirData, 'USGS', 'inventory', 'siteSel')\nwith open(os.path.join(dirSel, 'dictRB_Y30N5.json')) as f:\n dictSite = json.load(f)\nlabLst1 = [usgs.codePdf.loc[code]['shortName'] +\n '\\n'+code for code in codeLst]\nlabLst2 = hsLst\ndataBox = list()\nfor k, code in enumerate(codeLst):\n siteNoCode = dictSite[code]\n indS = [siteNoLst.index(siteNo) for siteNo in siteNoCode]\n temp = list()\n for i in range(len(hsLst)):\n temp.append(corrMat[indS, k, i])\n dataBox.append(temp)\nfig = figplot.boxPlot(dataBox, label1=labLst1, widths=0.5,\n label2=labLst2, figsize=(12, 4), yRange=[0, 1])\nfig.show()\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_575","text":"#!\/usr\/bin\/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 26 14:46:56 2017\n@author: luohao\n\"\"\"\n\n\"\"\"\nCVPR2017 paper:, , , et al. Re-ranking Person Re-identification with k-reciprocal Encoding[J]. 2017.\nurl:http:\/\/openaccess.thecvf.com\/content_cvpr_2017\/papers\/Zhong_Re-Ranking_Person_Re-Identification_CVPR_2017_paper.pdf\nMatlab version: https:\/\/github.com\/zhunzhong07\/person-re-ranking\n\"\"\"\n\n\"\"\"\nAPI\nprobFea: all feature vectors of the query set, shape = (image_size, feature_dim)\ngalFea: all feature vectors of the gallery set, shape = (image_size, feature_dim)\nk1,k2,lambda: parameters, the original paper is (k1=20,k2=6,lambda=0.3)\nMemorySave: set to 'True' when using MemorySave mode\nMinibatch: avaliable when 'MemorySave' is 'True'\n\"\"\"\n\nimport numpy as np\nfrom scipy.spatial.distance import cdist\nfrom scipy.stats import entropy # for KL divergence\nfrom math import log\n\ndef compute_metadata_distance_hard(q_metadatas, g_metadatas, metadata_prob_ranges):\n q_num = q_metadatas.shape[0]\n g_num = g_metadatas.shape[0]\n dist = np.zeros((q_num, g_num), dtype=np.float32)\n for iq in range(q_num):\n for ig in range(g_num):\n for p_begin, p_end in metadata_prob_ranges:\n cq = np.argmax(q_metadatas[iq][p_begin:p_end])\n cg = np.argmax(g_metadatas[ig][p_begin:p_end])\n if cq != cg:\n dist[iq, ig] += 1\n break\n return dist\n\ndef compute_metadata_distance_semihard(q_metadatas, g_metadatas, metadata_prob_ranges):\n q_num = q_metadatas.shape[0]\n g_num = g_metadatas.shape[0]\n dist = np.zeros((q_num, g_num), dtype=np.float32)\n for iq in range(q_num):\n for ig in range(g_num):\n for p_begin, p_end in metadata_prob_ranges:\n cq = np.argmax(q_metadatas[iq][p_begin:p_end])\n cg = np.argmax(g_metadatas[ig][p_begin:p_end])\n if cq != cg and cq != (p_end - p_begin - 1) and cg != (p_end - p_begin - 1): # the last class is \"other\"\n dist[iq, ig] += 1\n break\n return dist\n\ndef compute_metadata_distance_easy(q_metadatas, g_metadatas, metadata_prob_ranges):\n q_num = q_metadatas.shape[0]\n g_num = g_metadatas.shape[0]\n dist = np.ones((q_num, g_num), dtype=np.float32)\n for iq in range(q_num):\n for ig in range(g_num):\n for p_begin, p_end in metadata_prob_ranges:\n cq = np.argmax(q_metadatas[iq][p_begin:p_end])\n cg = np.argmax(g_metadatas[ig][p_begin:p_end])\n if cq == cg:\n dist[iq, ig] = 0\n break\n return dist\n\ndef compute_KL_divergence(q_metadatas, g_metadatas, metadata_prob_ranges = [(0,6), (6,18), (18,26)]):\n q_num = q_metadatas.shape[0]\n g_num = g_metadatas.shape[0]\n m_num = len(metadata_prob_ranges)\n KL_div = np.zeros((q_num, g_num, m_num), dtype=np.float32)\n epsilon = 1e-4\n for iq in range(q_num):\n for ig in range(g_num):\n for im, (p_begin, p_end) in enumerate(metadata_prob_ranges):\n KL_div[iq, ig, im] = entropy(q_metadatas[iq][p_begin:p_end]+epsilon, g_metadatas[ig][p_begin:p_end]+epsilon)\n return KL_div\n\ndef compute_pred(metadatas, metadata_prob_ranges):\n all_num = metadatas.shape[0]\n m_num = len(metadata_prob_ranges)\n pred = np.zeros((all_num, m_num), dtype=np.int32)\n for im, (p_begin, p_end) in enumerate(metadata_prob_ranges):\n pred[:,im] = np.argmax(metadatas[:,p_begin:p_end], axis=1)\n return pred\n\n\ndef compute_confusion_weight_old(q_pred, g_pred, confusion_mat):\n q_num = q_pred.shape[0]\n g_num = g_pred.shape[0]\n c_num = confusion_mat.shape[0]\n\n confusion_mat = confusion_mat + 1e-4*np.ones((c_num, c_num), dtype=np.float32)\n\n c_weight = np.transpose(confusion_mat)*np.diag(confusion_mat)\n c_weight += np.transpose(c_weight)\n c_sum = np.sum(confusion_mat, axis=0).reshape(1,-1)\n c_sum = np.matmul(np.transpose(c_sum), c_sum)\n c_weight = c_weight * np.reciprocal(c_sum)\n #c_weight[range(c_num),range(c_num)]\/=2\n np.fill_diagonal(c_weight, 1) # no penalty for the same class\n #print('c_weight = ')\n #print(c_weight)\n \n confusion_weight = np.ones((q_num, g_num), dtype=np.float32)\n for iq in range(q_num):\n for ig in range(g_num):\n confusion_weight[iq, ig] = c_weight[q_pred[iq], g_pred[ig]]\n return confusion_weight\n\n \ndef compute_confusion_weight(q_pred, g_pred, confusion_mat):\n q_num = q_pred.shape[0]\n g_num = g_pred.shape[0]\n c_num = confusion_mat.shape[0]\n\n #print('confusion_mat = ')\n #print(confusion_mat)\n confusion_mat = confusion_mat + 1e-4*np.ones((c_num, c_num), dtype=np.float32)\n c_sum = np.sum(confusion_mat, axis=0)\n confusion_mat_norm = confusion_mat * np.reciprocal(c_sum)\n #print('confusion_mat_norm = ')\n #print(confusion_mat_norm)\n c_weight = np.matmul(np.transpose(confusion_mat_norm), confusion_mat_norm)\n np.fill_diagonal(c_weight, 1) # no penalty for the same class\n #print('c_weight = ')\n #print(c_weight)\n \n confusion_weight = np.ones((q_num, g_num), dtype=np.float32)\n for iq in range(q_num):\n for ig in range(g_num):\n confusion_weight[iq, ig] = c_weight[q_pred[iq], g_pred[ig]]\n return confusion_weight\n \n\ndef cluster_gallery_soft(gf, g_metadatas, metadata_prob_ranges = [(0,6), (6,18), (18,26)], k=20, learning_rate=0.5, num_iter=20, MemorySave=False, Minibatch=2000):\n '''\n return new gallery feature gf_new\n '''\n gf = gf.copy() # make a copy since it will be updated in each iteration\n g_num = gf.shape[0]\n # meta data penalty\n '''dist_meta = np.zeros((g_num, g_num), dtype=np.float16)\n epsilon = 1e-4\n for i in range(g_num):\n metaI = g_metadatas[i]\n for j in range(g_num):\n metaJ = g_metadatas[j]\n for prob_range_begin, prob_ranges_end in metadata_prob_ranges:\n if entropy (metaI[prob_range_begin:prob_ranges_end] + epsilon, metaJ[prob_range_begin:prob_ranges_end] + epsilon) > 0.5:\n dist_meta[i][j] = 1\n break'''\n dist_meta = compute_metadata_distance_hard(g_metadatas, g_metadatas, metadata_prob_ranges)\n for iter in range(num_iter):\n #print('iter: %d' % iter)\n #print('computing original distance')\n if MemorySave:\n g_g_dist = np.zeros(shape=[g_num, g_num], dtype=np.float16)\n i = 0\n while True:\n it = i + Minibatch\n if it < np.shape(gf)[0]:\n g_g_dist[i:it, ] = np.power(cdist(gf[i:it, ], gf), 2).astype(np.float16)\n else:\n g_g_dist[i:, :] = np.power(cdist(gf[i:, ], gf), 2).astype(np.float16)\n break\n i = it\n else:\n g_g_dist = cdist(gf, gf).astype(np.float16)\n g_g_dist = np.power(g_g_dist, 2).astype(np.float16)\n dist_min = np.min(g_g_dist[np.triu_indices(g_num,1)])\n dist_max = np.max(g_g_dist[np.triu_indices(g_num,1)])\n #print('dist_min = %f, dist_max = %f' % (dist_min, dist_max))\n #g_g_dist = np.transpose(g_g_dist \/ np.max(g_g_dist, axis=0))\n # apply meta data\n g_g_dist += np.transpose(dist_meta * np.max(g_g_dist, axis=1))\n initial_rank = np.argsort(g_g_dist).astype(np.int32)\n # apply mean field\n gf_new = gf.copy()\n sigma = dist_min \/ 2 + 1\n for i in range(g_num):\n k_neigh_index = initial_rank[i, :k + 1]\n sigma = np.min(g_g_dist[i, k_neigh_index[1:]]) + 1\n weight = np.exp(-g_g_dist[i, k_neigh_index] \/ sigma)\n weight \/= np.sum(weight)\n if i % 100 == 0 and False:\n print(i)\n print(k_neigh_index)\n print(g_g_dist[i, k_neigh_index])\n print(weight)\n gf_new[i] = np.dot(np.transpose(gf[k_neigh_index]), weight)\n gf = gf * (1 - learning_rate) + gf_new * (learning_rate)\n return gf\n\n\ndef re_ranking_metadata_soft_v3(original_dist, metadata_dist, query_num, all_num, r_metadata, k1, k2, lambda_value):\n '''\n input:\n original_dist: pre-compute distmat\n metadata_dist: metadata distance\n r_metadata: weight for metadata distance\n return:\n final_dist\n '''\n \n # The following naming, e.g. gallery_num, is different from outer scope.\n # Don't care about it.\n gallery_num = all_num\n original_dist = np.transpose(original_dist \/ np.max(original_dist, axis=0))\n ### additional scaling\n scaling = False\n if scaling:\n tmp_rank = np.argsort(original_dist).astype(np.int32)\n min_dist = original_dist[range(all_num), tmp_rank[:,1]]\n metadata_dist = np.transpose(metadata_dist * min_dist)\n #print('min_dist = ')\n #print(min_dist)\n ###\n original_dist += r_metadata * metadata_dist\n\n \n print('starting re_ranking')\n initial_rank = np.argsort(original_dist).astype(np.int32)\n V = np.zeros_like(original_dist).astype(np.float16)\n for i in range(all_num):\n # k-reciprocal neighbors\n forward_k_neigh_index = initial_rank[i, :k1 + 1]\n backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1]\n fi = np.where(backward_k_neigh_index == i)[0]\n k_reciprocal_index = forward_k_neigh_index[fi]\n k_reciprocal_expansion_index = k_reciprocal_index\n for j in range(len(k_reciprocal_index)):\n candidate = k_reciprocal_index[j]\n candidate_forward_k_neigh_index = initial_rank[candidate, :int(np.around(k1 \/ 2)) + 1]\n candidate_backward_k_neigh_index = initial_rank[candidate_forward_k_neigh_index,\n :int(np.around(k1 \/ 2)) + 1]\n fi_candidate = np.where(candidate_backward_k_neigh_index == candidate)[0]\n candidate_k_reciprocal_index = candidate_forward_k_neigh_index[fi_candidate]\n if len(np.intersect1d(candidate_k_reciprocal_index, k_reciprocal_index)) > 2 \/ 3 * len(\n candidate_k_reciprocal_index):\n k_reciprocal_expansion_index = np.append(k_reciprocal_expansion_index, candidate_k_reciprocal_index)\n\n k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)\n weight = np.exp(-original_dist[i, k_reciprocal_expansion_index])\n V[i, k_reciprocal_expansion_index] = weight \/ np.sum(weight)\n original_dist = original_dist[:query_num, ]\n if k2 != 1:\n V_qe = np.zeros_like(V, dtype=np.float16)\n for i in range(all_num):\n V_qe[i, :] = np.mean(V[initial_rank[i, :k2], :], axis=0)\n V = V_qe\n del V_qe\n del initial_rank\n invIndex = []\n for i in range(gallery_num):\n invIndex.append(np.where(V[:, i] != 0)[0])\n\n jaccard_dist = np.zeros_like(original_dist, dtype=np.float16)\n\n for i in range(query_num):\n temp_min = np.zeros(shape=[1, gallery_num], dtype=np.float16)\n indNonZero = np.where(V[i, :] != 0)[0]\n indImages = []\n indImages = [invIndex[ind] for ind in indNonZero]\n for j in range(len(indNonZero)):\n temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + np.minimum(V[i, indNonZero[j]], V[indImages[j], indNonZero[j]])\n jaccard_dist[i] = 1 - temp_min \/ (2 - temp_min)\n\n final_dist = jaccard_dist * (1 - lambda_value) + original_dist * lambda_value\n del original_dist\n del V\n del jaccard_dist\n final_dist = final_dist[:query_num, query_num:]\n\n # np.save('final_dist.npy', final_dist)\n\n return final_dist\n\n\n\n\ndef re_ranking_metadata_soft_v2(qf, gf, q_metadatas, g_metadatas, confusion_mats, metadata_prob_ranges, k1=4, k2=4, lambda_value=0.5, MemorySave=False, Minibatch=2000):\n\n m_num = len(metadata_prob_ranges)\n for p_begin, p_end in metadata_prob_ranges:\n assert (p_begin, p_end) in confusion_mats\n\n query_num = qf.shape[0]\n all_num = query_num + gf.shape[0]\n feat = np.append(qf, gf, axis=0)\n all_metadatas = np.append(q_metadatas, g_metadatas, axis=0)\n ###feat = np.concatenate((feat, all_metadatas*20), axis=1)\n # feat = np.append(probFea, galFea)\n # feat = np.vstack((probFea, galFea))\n feat = feat.astype(np.float16)\n print('computing original distance')\n if MemorySave:\n original_dist = np.zeros(shape=[all_num, all_num], dtype=np.float16)\n i = 0\n while True:\n it = i + Minibatch\n if it < np.shape(feat)[0]:\n original_dist[i:it, ] = np.power(cdist(feat[i:it, ], feat), 2).astype(np.float16)\n else:\n original_dist[i:, :] = np.power(cdist(feat[i:, ], feat), 2).astype(np.float16)\n break\n i = it\n else:\n original_dist = cdist(feat, feat).astype(np.float16)\n original_dist = np.power(original_dist, 2).astype(np.float16)\n del feat\n gallery_num = original_dist.shape[0]\n original_dist = np.transpose(original_dist \/ np.max(original_dist, axis=0))\n V = np.zeros_like(original_dist).astype(np.float16)\n\n # apply meta data\n print('computing KL divergence')\n \n KL_div = compute_KL_divergence(all_metadatas, all_metadatas, metadata_prob_ranges)\n KL_div_U = compute_KL_divergence(all_metadatas, np.ones(all_metadatas.shape, dtype=np.float32), metadata_prob_ranges)\n conf_pred = np.zeros((all_num, all_num, m_num), dtype=np.float32)\n for im, (p_begin, p_end) in enumerate(metadata_prob_ranges):\n conf_pred[:,:,im] = KL_div_U[:,:,im] * np.transpose(KL_div_U[:,:,im]) \/ (np.log(p_end - p_begin)*np.log(p_end - p_begin))\n pred = compute_pred(all_metadatas, metadata_prob_ranges)\n confusion_dist = np.zeros((all_num, all_num, m_num), dtype=np.float32)\n for im, (p_begin, p_end) in enumerate(metadata_prob_ranges):\n confusion_weight = compute_confusion_weight(pred[:,im], pred[:,im], confusion_mats[(p_begin, p_end)])\n confusion_dist[:,:,im] = -np.log(confusion_weight + 1e-4) \/ np.log(p_end-p_begin)\n\n pred_weight = conf_pred * confusion_dist# * KL_div\n pred_weight = np.sum(pred_weight, axis=2)\n #print('confusion_dist = ')\n #print(confusion_dist)\n\n tmp_rank = np.argsort(original_dist).astype(np.int32)\n min_dist = original_dist[range(all_num), tmp_rank[:,1]]\n #print('min_dist = ')\n #print(min_dist)\n pred_dist = np.transpose(pred_weight * min_dist)\n #print('pred_dist = ')\n #print(pred_dist)\n\n r_KL = 10#0.5#20.0\n #print('original_dist = ')\n #print(original_dist)\n #original_dist_no_meta = original_dist.copy()\n original_dist += pred_dist*r_KL\n #original_dist = np.clip(original_dist, 0, 1) # not meaningful\n #print('original_dist = ')\n #print(original_dist)\n\n initial_rank = np.argsort(original_dist).astype(np.int32)\n #original_dist_no_query = original_dist.copy()\n #original_dist_no_query[:,:query_num] = 1000.0\n #initial_rank = np.argsort(original_dist_no_query).astype(np.int32)\n\n print('starting re_ranking')\n for i in range(all_num):\n # k-reciprocal neighbors\n forward_k_neigh_index = initial_rank[i, :k1 + 1]\n backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1]\n fi = np.where(backward_k_neigh_index == i)[0]\n k_reciprocal_index = forward_k_neigh_index[fi]\n k_reciprocal_expansion_index = k_reciprocal_index\n for j in range(len(k_reciprocal_index)):\n candidate = k_reciprocal_index[j]\n candidate_forward_k_neigh_index = initial_rank[candidate, :int(np.around(k1 \/ 2)) + 1]\n candidate_backward_k_neigh_index = initial_rank[candidate_forward_k_neigh_index,\n :int(np.around(k1 \/ 2)) + 1]\n fi_candidate = np.where(candidate_backward_k_neigh_index == candidate)[0]\n candidate_k_reciprocal_index = candidate_forward_k_neigh_index[fi_candidate]\n if len(np.intersect1d(candidate_k_reciprocal_index, k_reciprocal_index)) > 2 \/ 3 * len(\n candidate_k_reciprocal_index):\n k_reciprocal_expansion_index = np.append(k_reciprocal_expansion_index, candidate_k_reciprocal_index)\n\n k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)\n weight = np.exp(-original_dist[i, k_reciprocal_expansion_index])\n V[i, k_reciprocal_expansion_index] = weight \/ np.sum(weight)\n original_dist = original_dist[:query_num, ]\n if k2 != 1:\n V_qe = np.zeros_like(V, dtype=np.float16)\n for i in range(all_num):\n V_qe[i, :] = np.mean(V[initial_rank[i, :k2], :], axis=0)\n V = V_qe\n del V_qe\n del initial_rank\n invIndex = []\n for i in range(gallery_num):\n invIndex.append(np.where(V[:, i] != 0)[0])\n\n jaccard_dist = np.zeros_like(original_dist, dtype=np.float16)\n\n for i in range(query_num):\n temp_min = np.zeros(shape=[1, gallery_num], dtype=np.float16)\n indNonZero = np.where(V[i, :] != 0)[0]\n indImages = []\n indImages = [invIndex[ind] for ind in indNonZero]\n for j in range(len(indNonZero)):\n temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + np.minimum(V[i, indNonZero[j]], V[indImages[j], indNonZero[j]])\n jaccard_dist[i] = 1 - temp_min \/ (2 - temp_min)\n\n\n\n final_dist = jaccard_dist * (1 - lambda_value) + original_dist * lambda_value\n\n #original_dist_easy_meta = original_dist_no_meta + 100*compute_metadata_distance_easy(all_metadatas, all_metadatas, metadata_prob_ranges)\n #original_dist_easy_meta = original_dist_easy_meta[:query_num, ]\n #final_dist = jaccard_dist * (1 - lambda_value) + original_dist_easy_meta * lambda_value\n\n #original_dist_no_meta = original_dist_no_meta[:query_num, ]\n #final_dist = jaccard_dist * (1 - lambda_value) + original_dist_no_meta * lambda_value\n\n\n del original_dist\n del V\n del jaccard_dist\n final_dist = final_dist[:query_num, query_num:]\n\n # np.save('final_dist.npy', final_dist)\n\n return final_dist\n\n\ndef re_ranking_metadata_soft(qf, gf, q_metadatas, g_metadatas, metadata_prob_ranges, k1=4, k2=4, lambda_value=0.5, MemorySave=False, Minibatch=2000):\n query_num = qf.shape[0]\n all_num = query_num + gf.shape[0]\n feat = np.append(qf, gf, axis=0)\n #meta = np.append(q_metadatas, g_metadatas, axis=0)\n ###feat = np.concatenate((feat, meta*20), axis=1)\n # feat = np.append(probFea, galFea)\n # feat = np.vstack((probFea, galFea))\n feat = feat.astype(np.float16)\n print('computing original distance')\n if MemorySave:\n original_dist = np.zeros(shape=[all_num, all_num], dtype=np.float16)\n i = 0\n while True:\n it = i + Minibatch\n if it < np.shape(feat)[0]:\n original_dist[i:it, ] = np.power(cdist(feat[i:it, ], feat), 2).astype(np.float16)\n else:\n original_dist[i:, :] = np.power(cdist(feat[i:, ], feat), 2).astype(np.float16)\n break\n i = it\n else:\n original_dist = cdist(feat, feat).astype(np.float16)\n original_dist = np.power(original_dist, 2).astype(np.float16)\n del feat\n gallery_num = original_dist.shape[0]\n original_dist = np.transpose(original_dist \/ np.max(original_dist, axis=0))\n V = np.zeros_like(original_dist).astype(np.float16)\n # apply meta data\n print('computing KL divergence')\n KL_div = np.zeros((all_num, all_num), dtype=np.float16)\n tmp_rank = np.argsort(original_dist).astype(np.int32)\n for i in range(all_num):\n if i < query_num:\n metaI = q_metadatas[i]\n else:\n metaI = g_metadatas[i - query_num]\n d_min = original_dist[i][tmp_rank[i,1]]\n #print('d_min: %f' % d_min)\n for j in range(all_num):\n if j < query_num:\n metaJ = q_metadatas[j]\n else:\n metaJ = g_metadatas[j - query_num]\n for prob_range_begin, prob_range_end in metadata_prob_ranges:\n hard_threshold = True\n epsilon = 1e-4\n pk = metaI[prob_range_begin:prob_range_end] + epsilon\n qk = metaJ[prob_range_begin:prob_range_end] + epsilon\n if hard_threshold:\n if np.argmax(pk) != np.argmax(qk):\n KL_div[i][j] += 100\n break\n else:\n continue\n #s = entropy(pk, qk)*0.5 + entropy(qk, pk)*0.5\n s = min(entropy(pk, qk), entropy(qk, pk))\n #print('%d: %f' % (num_classes, s))\n #KL_div[i][j] += max(s\/log(num_classes) - 1, 0) * d_min\n KL_div[i][j] += s * d_min\n print('KL_div min: %f' % np.min(KL_div[np.triu_indices(all_num,1)]))\n print('KL_div max: %f' % np.max(KL_div[np.triu_indices(all_num,1)]))\n r_KL = 1.0\n original_dist = np.clip(original_dist+KL_div*r_KL, 0, 1)\n \n\n\n initial_rank = np.argsort(original_dist).astype(np.int32)\n #original_dist_no_query = original_dist.copy()\n #original_dist_no_query[:,:query_num] = 1000.0\n #initial_rank = np.argsort(original_dist_no_query).astype(np.int32)\n\n print('starting re_ranking')\n for i in range(all_num):\n # k-reciprocal neighbors\n forward_k_neigh_index = initial_rank[i, :k1 + 1]\n backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1]\n fi = np.where(backward_k_neigh_index == i)[0]\n k_reciprocal_index = forward_k_neigh_index[fi]\n k_reciprocal_expansion_index = k_reciprocal_index\n for j in range(len(k_reciprocal_index)):\n candidate = k_reciprocal_index[j]\n candidate_forward_k_neigh_index = initial_rank[candidate, :int(np.around(k1 \/ 2)) + 1]\n candidate_backward_k_neigh_index = initial_rank[candidate_forward_k_neigh_index,\n :int(np.around(k1 \/ 2)) + 1]\n fi_candidate = np.where(candidate_backward_k_neigh_index == candidate)[0]\n candidate_k_reciprocal_index = candidate_forward_k_neigh_index[fi_candidate]\n if len(np.intersect1d(candidate_k_reciprocal_index, k_reciprocal_index)) > 2 \/ 3 * len(\n candidate_k_reciprocal_index):\n k_reciprocal_expansion_index = np.append(k_reciprocal_expansion_index, candidate_k_reciprocal_index)\n\n k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)\n weight = np.exp(-original_dist[i, k_reciprocal_expansion_index])\n V[i, k_reciprocal_expansion_index] = weight \/ np.sum(weight)\n original_dist = original_dist[:query_num, ]\n if k2 != 1:\n V_qe = np.zeros_like(V, dtype=np.float16)\n for i in range(all_num):\n V_qe[i, :] = np.mean(V[initial_rank[i, :k2], :], axis=0)\n V = V_qe\n del V_qe\n del initial_rank\n invIndex = []\n for i in range(gallery_num):\n invIndex.append(np.where(V[:, i] != 0)[0])\n\n jaccard_dist = np.zeros_like(original_dist, dtype=np.float16)\n\n for i in range(query_num):\n temp_min = np.zeros(shape=[1, gallery_num], dtype=np.float16)\n indNonZero = np.where(V[i, :] != 0)[0]\n indImages = []\n indImages = [invIndex[ind] for ind in indNonZero]\n for j in range(len(indNonZero)):\n temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + np.minimum(V[i, indNonZero[j]], V[indImages[j], indNonZero[j]])\n jaccard_dist[i] = 1 - temp_min \/ (2 - temp_min)\n\n final_dist = jaccard_dist * (1 - lambda_value) + original_dist * lambda_value\n del original_dist\n del V\n del jaccard_dist\n final_dist = final_dist[:query_num, query_num:]\n\n # np.save('final_dist.npy', final_dist)\n\n return final_dist\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_566","text":"import matplotlib as mpl\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport tkinter \n\nimport datetime\nimport struct\nimport ftplib\nimport glob\nimport tempfile\nimport pymongo\nimport requests\nimport sys\n\nimport scipy.signal as signal\nimport scipy.io.wavfile\nimport pywt\n\nfrom unophysics import ladc, wavefuncs\nfrom pathlib import Path\nfrom scipy.io import savemat\nfrom sshtunnel import SSHTunnelForwarder\n\nroot = tkinter.Tk()\nroot.title('LADC-GEMM Interactive')\nroot.geometry('540x260')\nroot.grid_columnconfigure(1, minsize=135)\n\n# SET UP INFORMATION PAGE FOR EACH LOCATION\nlocalcalls = []\ndef info_location_change():\n currentlocation.set(ladc.Stuff.brydes_calls[info_location.get()]['Location'])\n locationlabel.grid(column=1, row=0, sticky=tkinter.N+tkinter.W)\n\n global localcalls\n localcalls = ladc.Stuff.brydes_calls[info_location.get()]['Call(s) recorded']\n numcallsstring = ', '.join(localcalls)\n current_numcalls.set(numcallsstring)\n numcalls_label.grid(column=1, row=0, sticky=tkinter.N+tkinter.W, pady=5)\n callmenu.delete(0, tkinter.END)\n for x in localcalls:\n callmenu.insert(tkinter.END, x)\n current_date.set(ladc.Stuff.brydes_calls[info_location.get()]['Date'])\n date_label.grid(column=1, row=0, sticky=tkinter.N+tkinter.W, pady=5)\n current_minmax.set('')\n\n# LOCATION BUTTONS\ninfobutton_box = tkinter.LabelFrame(root)\ninfo_location = tkinter.StringVar()\ninfo_location.set('ETP')\n\ncurrentlocation=tkinter.StringVar()\nlocationlabel = tkinter.Label(root, textvariable=currentlocation, font=('bold', 13))\n\netp = tkinter.Radiobutton(infobutton_box, text='Eastern Tropical Pacific', variable=info_location, value='ETP', command=info_location_change)\nscarib = tkinter.Radiobutton(infobutton_box, text='Southern Caribbean', variable=info_location, value='SCaribbean', command=info_location_change)\nnwpac = tkinter.Radiobutton(infobutton_box, text='Northwest Pacific', variable=info_location, value='NWPacific', command=info_location_change)\ngoc = tkinter.Radiobutton(infobutton_box, text='Gulf of California', variable=info_location, value='GoC', command=info_location_change)\ncabo = tkinter.Radiobutton(infobutton_box, text='Cabo Frio, Brazil', variable=info_location, value='CaboFrio', command=info_location_change)\ngom = tkinter.Radiobutton(infobutton_box, text='Gulf of Mexico', variable=info_location, value='GoM', command=info_location_change)\n\n# REGIONAL CALL INFORMATION WIDGETS\nnumcallsbox = tkinter.LabelFrame(root)\nnumcalls = tkinter.Label(numcallsbox, text='Call(s) recorded: ')\ncurrent_numcalls = tkinter.StringVar()\nnumcalls_label = tkinter.Label(numcallsbox, textvariable=current_numcalls, wraplength=200, justify='left')\n\ndatebox = tkinter.LabelFrame(root)\ndate = tkinter.Label(datebox, text='Date: ')\ncurrent_date = tkinter.StringVar()\ndate_label = tkinter.Label(datebox, textvariable=current_date, wraplength=200, justify='left')\n\n# FREQUENCY INFORMATION WIDGETS\nfreqbox = tkinter.LabelFrame(root)\nfreq = tkinter.Label(freqbox, text='Frequency information: ')\n\ndef change_facts(event):\n selecttuple = event.widget.curselection()\n selectindex = selecttuple[0]\n callname = localcalls[selectindex]\n minmaxrange = ladc.Stuff.frequency_info[info_location.get()][callname]\n minbookmark = minmaxrange[0]\n maxbookmark = minmaxrange[1]\n minmax_string = f'{minbookmark} - {maxbookmark} Hz'\n current_minmax.set(minmax_string) \n\ncallbox = tkinter.Frame(freqbox)\ncall = tkinter.Label(callbox, text='Call: ')\ncallmenu = tkinter.Listbox(callbox, selectmode='SINGLE', height=6) \ncallmenu.bind('<>', change_facts)\n\nminmaxbox = tkinter.Frame(freqbox)\nminmax = tkinter.Label(minmaxbox, text='Min\/max frequencies: ')\ncurrent_minmax = tkinter.StringVar()\nminmax_label = tkinter.Label(minmaxbox, textvariable=current_minmax) \n\n# MENU WIDGET CHANGES\ndef datapage():\n reset_inputs()\n\n root.grid_columnconfigure(index=1, minsize=135)\n root.grid_rowconfigure(index=0, minsize=10)\n root.grid_rowconfigure(index=2, minsize=10)\n \n widgetlist = [minmax_label, callbox, minmaxbox, freqbox, freq, call, callmenu, minmax, infobutton_box, etp, scarib, nwpac, goc, cabo, gom, datebox, date, numcallsbox, numcalls, locationlabel, numcalls_label, date_label]\n for widget in widgetlist:\n widget.grid_remove()\n\n onefilename_box.grid(column=1, row=0, sticky=tkinter.W+tkinter.E, padx=10, pady=10)\n enter_onefile.grid(column=1, row=0, pady=5, padx=3)\n\n skipval_box.grid(column=2, row=0, sticky=tkinter.W+tkinter.E, padx=10, pady=10)\n enter_skipval.grid(column=2, row=0, pady=5, padx=3)\n\n cmap_box.grid(column=1, row=1, padx=10, pady=10, sticky=tkinter.E+tkinter.W)\n colorschemes.grid(column=1, row=1, sticky=tkinter.E+tkinter.W)\n\n fileamount_box.grid(column=2, row=1, padx=10, pady=10, sticky=tkinter.E+tkinter.W)\n fileamounts.grid(column=2, row=1, sticky=tkinter.E+tkinter.W)\n\n reset.grid(column=1, row=2, sticky=tkinter.E+tkinter.W, pady=10, padx=10)\n\n show_button.grid(column=2, row=2, sticky=tkinter.E+tkinter.W, padx=10, pady=10)\n\n databutton_box.grid(column=0, row=0, rowspan=4, padx=10, pady=10, sticky=tkinter.N+tkinter.S)\n spec_plot.grid(row=0, sticky=tkinter.W, pady=15, padx=5)\n amp_plot.grid(row=1, sticky=tkinter.W, pady=15, padx=5)\n search_interesting.grid(row=2, sticky=tkinter.W, pady=15, padx=5)\n\ndef infopage():\n info_location.set(None)\n\n root.grid_columnconfigure(index=1, minsize=330)\n root.grid_rowconfigure(index=0, minsize=30)\n root.grid_rowconfigure(index=2, minsize=20)\n\n widgetlist = [onefilename_box, enter_onefile, skipval_box, enter_skipval, cmap_box, colorschemes, fileamount_box, fileamounts, reset, show_button, databutton_box, spec_plot, amp_plot, search_interesting]\n for widget in widgetlist:\n widget.grid_remove()\n\n infobutton_box.grid(column=0, row=0, padx=10, pady=10, rowspan=6, sticky=tkinter.N+tkinter.S)\n\n etp.grid(column=0, row=0, sticky=tkinter.W, padx=10, pady=5)\n scarib.grid(column=0, row=1, sticky=tkinter.W, padx=10, pady=5)\n nwpac.grid(column=0, row=2, sticky=tkinter.W, padx=10, pady=5)\n goc.grid(column=0, row=3, sticky=tkinter.W, padx=10, pady=5)\n cabo.grid(column=0, row=4, sticky=tkinter.W, padx=10, pady=5)\n gom.grid(column=0, row=5, sticky=tkinter.W, padx=10, pady=5)\n\n numcallsbox.grid(column=1, row=1, columnspan=2, sticky=tkinter.W+tkinter.E+tkinter.N+tkinter.S)\n numcalls.grid(column=0, row=0, sticky=tkinter.W+tkinter.N, padx=10, pady=5)\n datebox.grid(column=1, row=2, columnspan=2, sticky=tkinter.W+tkinter.E+tkinter.N+tkinter.S)\n date.grid(column=0, row=0, sticky=tkinter.W+tkinter.N, padx=10, pady=5)\n\n freqbox.grid(column=1, row=3, columnspan=2, sticky=tkinter.W+tkinter.E+tkinter.N+tkinter.S)\n freq.grid(column=0, row=0)\n\n callbox.grid(column=0, row=1)\n call.grid(column=0, row=0)\n callmenu.grid(column=1, row=0)\n\n minmaxbox.grid(column=1, row=1)\n minmax.grid(column=0, row=0)\n minmax_label.grid(column=1, row=0)\n callmenu.delete(0, tkinter.END)\n current_minmax.set('')\n\n# CREATING MENU\nmenu = tkinter.Menu(root)\nroot.config(menu=menu)\ndatamenu = tkinter.Menu(menu)\nmenu.add_cascade(label='Data', menu=datamenu)\ndatamenu.add_command(label='Data', command=datapage)\ninfomenu = tkinter.Menu(menu)\nmenu.add_cascade(label='Information', menu=infomenu)\ninfomenu.add_command(label='Information', command=infopage)\n\n# TYPE IN FILE NAME\nonefilename_box = tkinter.LabelFrame(root, text='Filename')\nonefilename_box.grid(column=1, row=0, sticky=tkinter.W+tkinter.E, padx=10, pady=10)\nonefilename = tkinter.StringVar()\nonefilename.set('')\nenter_onefile = tkinter.Entry(onefilename_box, textvariable=onefilename, width=16)\nenter_onefile.grid(column=1, row=0, pady=5, padx=3)\n\n# TYPE IN SKIP VALUE\ndef skip_error_check(number):\n valid = False\n if number.isdigit():\n if (int(number) <= 10000) and (int(number) >= 0):\n valid = True\n elif number == '':\n valid = True\n return valid\nvalidate_skip = (root.register(skip_error_check), '%P')\nskipval_box = tkinter.LabelFrame(root, text='Skip Value')\nskipval_box.grid(column=2, row=0, sticky=tkinter.W+tkinter.E, padx=10, pady=10)\nskipval = tkinter.StringVar()\nskipval.set(0)\nenter_skipval = tkinter.Spinbox(skipval_box, from_=0, to=10000, textvariable=skipval, width=16, validate='all', validatecommand=validate_skip)\nenter_skipval.grid(column=2, row=0, pady=5, padx=3)\n\n# OPTION MENUS\ndef show_options():\n global cmap_box, colorschemes, fileamount_box, fileamounts, fileamount_str, enter_onefile\n if current_page.get() == 'spec':\n enter_onefile.configure(state='normal')\n fileamounts.configure(state='disabled')\n colorschemes.configure(state='normal')\n colorscheme_str.set(cmaps[2])\n skipval.set(0)\n fileamount_str.set(number_of_files[0])\n onefilename.set('')\n else: \n colorschemes.configure(state='disabled')\n \n if current_page.get() == 'amp':\n enter_onefile.configure(state='normal')\n fileamounts.configure(state='disabled')\n colorschemes.configure(state='disabled')\n skipval.set(0)\n fileamount_str.set(number_of_files[0])\n onefilename.set('')\n \n if current_page.get() == 'interesting':\n enter_onefile.configure(state='disabled')\n fileamounts.configure(state='normal')\n colorschemes.configure(state='disabled')\n skipval.set(0)\n fileamount_str.set(number_of_files[0])\n else:\n fileamounts.configure(state='disabled')\n\n if current_page.get() != 'spec' and current_page.get() != 'amp' and current_page.get() != 'interesting':\n enter_onefile.configure(state='disabled')\n fileamounts.configure(state='disabled')\n colorschemes.configure(state='disabled')\n enter_skipval.configure(state='disabled')\n reset.configure(state='disabled')\n show_button.configure(state='disabled')\n\ncmaps = ['hsv', 'Greys', 'nipy_spectral']\ncmap_box = tkinter.LabelFrame(root, text='Color Scheme')\ncolorscheme_str = tkinter.StringVar()\ncolorscheme_str.set(cmaps[2])\ncolorschemes = tkinter.OptionMenu(cmap_box, colorscheme_str, *cmaps)\ncmap_box.grid(column=1, row=1, padx=10, pady=10, sticky=tkinter.E+tkinter.W)\ncolorschemes.grid(column=1, row=1, sticky=tkinter.E+tkinter.W)\n\nnumber_of_files = ['4','9','16']\nfileamount_box = tkinter.LabelFrame(root, text='Number of Files')\nfileamount_str = tkinter.StringVar()\nfileamount_str.set(number_of_files[0])\nfileamounts = tkinter.OptionMenu(fileamount_box, fileamount_str, *number_of_files)\nfileamount_box.grid(column=2, row=1, padx=10, pady=10, sticky=tkinter.E+tkinter.W)\nfileamounts.grid(column=2, row=1, sticky=tkinter.E+tkinter.W)\n\n# PLOT TYPE RADIOBUTTONS\n# RADIOBUTTON LABELFRAME\ndatabutton_box = tkinter.LabelFrame(root)\ndatabutton_box.grid(column=0, row=0, rowspan=4, padx=10, pady=10, sticky=tkinter.N+tkinter.S)\n\ncurrent_page = tkinter.StringVar()\ncurrent_page.set(None)\nspec_plot = tkinter.Radiobutton(databutton_box, text='Create Spectrogram', command=show_options, variable=current_page, value='spec')\nspec_plot.grid(row=0, sticky=tkinter.W, pady=15, padx=5)\n\namp_plot = tkinter.Radiobutton(databutton_box, text='Create Time Series', command=show_options, variable=current_page, value='amp')\namp_plot.grid(row=1, sticky=tkinter.W, pady=15, padx=5)\n\nsearch_interesting = tkinter.Radiobutton(databutton_box, text= 'Find Interesting', command=show_options, variable=current_page, value='interesting')\nsearch_interesting.grid(row=2, sticky=tkinter.W, pady=15, padx=5)\n\n# RESET BUTTON\ndef reset_inputs():\n global onefilename, skipval, current_page, cmap_box, colorschemes, fileamount_box, fileamounts, fileamount_str, colorscheme_str\n onefilename.set('')\n skipval.set(0)\n current_page.set(None)\n fileamount_str.set(number_of_files[0])\n colorscheme_str.set(cmaps[2])\n enter_onefile.configure(state='normal')\n fileamounts.configure(state='normal')\n colorschemes.configure(state='normal')\n\nreset = tkinter.Button(root, text='Reset All', command=reset_inputs)\nreset.grid(column=1, row=2, sticky=tkinter.E+tkinter.W, pady=10, padx=10)\n\n# FORMAT USER INPUT FOR _ladc FUNCTIONS\ntrueskip = 0\ntruefilename = ''\nfileamount_int = 0\n\ndef recordswindow(recordfn=None, recordskip=None, recordnumber=None):\n global trueskip, truefilename, fileamount_int\n\n recordswindow = tkinter.Toplevel(root)\n recordswindow.title('Records')\n recordswindow.geometry('630x300')\n\n truerecords = tkinter.StringVar()\n truerecords.set('')\n\n trueheader = tkinter.StringVar()\n trueheader.set('')\n\n recordlabel = tkinter.Label(recordswindow, textvariable=truerecords, wraplength=600, font=('bold', 12), justify='left')\n headerlabel = tkinter.Label(recordswindow, textvariable=trueheader, font=(6))\n \n if current_page.get() == 'spec' or current_page.get() == 'amp':\n if recordfn is None:\n if recordskip is None:\n detect = ladc.find()\n elif recordskip is not None:\n detect = ladc.find(skip=recordskip)\n elif recordfn is not None:\n detect = ladc.find(filename=recordfn)\n\n detectbookmark = str(detect)\n truerecords.set(detectbookmark)\n \n filenamebookmark = detect['filename']\n headerbookmark = (f'File {filenamebookmark} Records')\n trueheader.set(headerbookmark)\n\n if current_page.get() == 'interesting':\n if recordnumber is not None:\n truerecords.set(ladc.find_interesting(skip_start=recordskip, number_of_files=recordnumber, Type=6, Buoy='13', Disk='0'))\n trueheader.set('File Records')\n\n recordlabel.grid(column=0, row=1, sticky=tkinter.E+tkinter.W, padx=5, pady=5)\n headerlabel.grid(column=0, row=0, sticky=tkinter.N+tkinter.W, padx=5, pady=5)\n print('window')\n\n# POPUP WINDOW\ndef build_plot(): \n global trueskip, truefilename, fileamount_int\n print('built plot')\n\n truefilename = onefilename.get()\n if truefilename == '':\n truefilename = None\n \n fileamount_int = int(fileamount_str.get())\n\n trueskip = int(skipval.get())\n if trueskip == 0:\n trueskip = None\n\n if current_page.get() == 'spec' or current_page.get() == 'amp':\n if (trueskip is not None) and (truefilename is not None):\n errorwindow = tkinter.Toplevel(root)\n errorwindow.title('Error')\n errormessage = tkinter.Label(errorwindow, text='Please enter only a filename OR a skip value.')\n errormessage.grid(column=0, row=0, sticky=tkinter.E+tkinter.W, padx=10, pady=10)\n okbutton = tkinter.Button(errorwindow, text='Okay', command=errorwindow.destroy)\n okbutton.grid(column=0, row=1, sticky=tkinter.E+tkinter.W, padx=10, pady=10)\n else:\n try:\n recordswindow(recordfn=truefilename, recordskip=trueskip, recordnumber=None)\n if current_page.get() == 'spec': # use the output from the window building function instead of getting it from the widget\n ladc.create_spec(skip=trueskip, cmap=(colorscheme_str.get()), figsize=(6,4), save_fig=None, show_plt=True, filename=truefilename)\n if current_page.get() == 'amp':\n ladc.create_timeseries(filename=truefilename, skip=trueskip, show_plt=True)\n except FileNotFoundError:\n errorwindow = tkinter.Toplevel(root)\n errorwindow.title('Error')\n errormessage = tkinter.Label(errorwindow,text='Sorry, that file does not exist.')\n errormessage.grid(column=0, row=0, sticky=tkinter.E+tkinter.W, padx=10, pady=10)\n okbutton = tkinter.Button(errorwindow, text='Okay', command=errorwindow.destroy)\n okbutton.grid(column=0, row=1, sticky=tkinter.E+tkinter.W, padx=10, pady=10)\n\n if current_page.get() == 'interesting':\n recordswindow(recordfn=None, recordskip=trueskip, recordnumber=fileamount_int)\n ladc.find_interesting(skip_start=trueskip, number_of_files=fileamount_int, Type=6, Buoy='13', Disk='0')\n ladc.MATLAB_format(plot=True, show_plt=True, save_plt=False, clip_length=577, number_of_files=fileamount_int, directory='data', records=None, Type=6, Buoy='13', Disk='0', skip_start=trueskip)\n\nshow_button = tkinter.Button(root, text='Create Figure(s)', command=build_plot)\nshow_button.grid(column=2, row=2, sticky=tkinter.E+tkinter.W, padx=10, pady=10)\n\nroot.mainloop()"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_256","text":"syys96\/digital_rfpython\/tools\/drf_sti.py\n#!python\n# ----------------------------------------------------------------------------\n# Copyright (c) 2017 Massachusetts Institute of Technology (MIT)\n# All rights reserved.\n#\n# Distributed under the terms of the BSD 3-clause license.\n#\n# The full license is in the LICENSE file, distributed with this software.\n# ----------------------------------------------------------------------------\n\"\"\"Create a spectral time intensity summary plot for a data set.\"\"\"\n\n\nimport datetime\nimport optparse\nimport os\nimport sys\nimport time\nimport traceback\n\nimport dateutil\nimport digital_rf as drf\nimport matplotlib.gridspec\nimport matplotlib.mlab\nimport matplotlib.pyplot\nimport numpy as np\nimport pytz\nimport scipy\nimport scipy.signal\n\n\nclass DataPlotter(object):\n def __init__(self, control):\n \"\"\"Initialize a data plotter for STI plotting.\"\"\"\n self.control = control\n ch = self.control.channel.split(\":\")\n self.channel = ch[0]\n self.sub_channel = int(ch[1])\n\n # open digital RF path\n self.dio = drf.DigitalRFReader(self.control.path)\n\n if self.control.verbose:\n print(\"channel bounds:\", self.dio.get_bounds(self.channel))\n\n self.bounds = self.dio.get_bounds(self.channel)\n\n print(\"bounds \", self.bounds)\n\n # Figure setup\n\n self.f = matplotlib.pyplot.figure(\n figsize=(7, np.min([np.max([4, self.control.frames]), 7])), dpi=128\n )\n\n self.gridspec = matplotlib.gridspec.GridSpec(self.control.frames, 1)\n\n self.subplots = []\n\n \"\"\" Setup the subplots for this display \"\"\"\n for n in np.arange(self.control.frames):\n ax = self.f.add_subplot(self.gridspec[n])\n self.subplots.append(ax)\n\n def plot(self):\n \"\"\"Iterate over the data set and plot the STI into the subplot panels.\n\n Each panel is divided into a provided number of bins of a given\n integration length. Strides between the panels are made between\n integrations.\n\n \"\"\"\n # initialize outside the loop to avoid memory leak\n\n # initial plotting scales\n vmin = 0\n vmax = 0\n\n sr = self.dio.get_properties(self.channel)[\"samples_per_second\"]\n\n if self.control.verbose:\n print(\"sample rate: \", sr)\n\n # initial time info\n b = self.dio.get_bounds(self.channel)\n\n if self.control.verbose:\n print(\"data bounds: \", b)\n\n if self.control.start:\n dtst0 = dateutil.parser.parse(self.control.start)\n st0 = (\n dtst0 - datetime.datetime(1970, 1, 1, tzinfo=pytz.utc)\n ).total_seconds()\n st0 = int(st0 * sr)\n else:\n st0 = int(b[0])\n\n if self.control.end:\n dtst0 = dateutil.parser.parse(self.control.end)\n et0 = (\n dtst0 - datetime.datetime(1970, 1, 1, tzinfo=pytz.utc)\n ).total_seconds()\n et0 = int(et0 * sr)\n else:\n et0 = int(b[1])\n\n if self.control.verbose:\n\n print(\"start sample st0: \", st0)\n print(\"end sample et0: \", et0)\n\n blocks = self.control.bins * self.control.frames\n\n samples_per_stripe = (\n self.control.num_fft * self.control.integration * self.control.decimation\n )\n total_samples = blocks * samples_per_stripe\n\n if total_samples > (et0 - st0):\n print(\n \"Insufficient samples for %d samples per stripe and %d blocks between %ld and %ld\"\n % (samples_per_stripe, blocks, st0, et0)\n )\n return\n\n stripe_stride = (et0 - st0) \/ blocks\n\n bin_stride = stripe_stride \/ self.control.bins\n\n start_sample = st0\n\n print(\"first \", start_sample)\n\n # get metadata\n # this could be done better to ensure we catch frequency or sample rate\n # changes\n mdt = self.dio.read_metadata(st0, et0, self.channel)\n try:\n md = mdt[list(mdt.keys())[0]]\n cfreq = md[\"center_frequencies\"].ravel()[self.sub_channel]\n except (IndexError, KeyError):\n cfreq = 0.0\n\n if self.control.verbose:\n print(\n \"processing info : \",\n self.control.frames,\n self.control.bins,\n samples_per_stripe,\n bin_stride,\n )\n\n for p in np.arange(self.control.frames):\n sti_psd_data = np.zeros([self.control.num_fft, self.control.bins], np.float)\n sti_times = np.zeros([self.control.bins], np.complex128)\n\n for b in np.arange(self.control.bins, dtype=np.int_):\n\n if self.control.verbose:\n print(\n \"read vector :\", self.channel, start_sample, samples_per_stripe\n )\n\n data = self.dio.read_vector(\n start_sample, samples_per_stripe, self.channel, self.sub_channel\n )\n if self.control.decimation > 1:\n data = scipy.signal.decimate(data, self.control.decimation)\n sample_freq = sr \/ self.control.decimation\n else:\n sample_freq = sr\n\n if self.control.mean:\n detrend_fn = matplotlib.mlab.detrend_mean\n else:\n detrend_fn = matplotlib.mlab.detrend_none\n\n try:\n psd_data, freq_axis = matplotlib.mlab.psd(\n data,\n NFFT=self.control.num_fft,\n Fs=float(sample_freq),\n detrend=detrend_fn,\n scale_by_freq=False,\n )\n except:\n traceback.print_exc(file=sys.stdout)\n\n sti_psd_data[:, b] = np.real(10.0 * np.log10(np.abs(psd_data) + 1e-12))\n\n sti_times[b] = start_sample \/ sr\n\n start_sample += stripe_stride\n\n # Now Plot the Data\n ax = self.subplots[p]\n\n # determine image x-y extent\n extent = (\n 0,\n self.control.bins,\n np.min(freq_axis) \/ 1e3,\n np.max(freq_axis) \/ 1e3,\n )\n\n # determine image color extent in log scale units\n Pss = sti_psd_data\n\n if self.control.zaxis:\n vmin = int(self.control.zaxis.split(\":\")[0])\n vmax = int(self.control.zaxis.split(\":\")[1])\n else:\n med_Pss = np.nanmedian(Pss)\n max_Pss = np.nanmax(Pss)\n vmin = np.real(med_Pss - 6.0)\n vmax = np.real(med_Pss + (max_Pss - med_Pss) * 0.61803398875 + 50.0)\n\n im = ax.imshow(\n sti_psd_data,\n cmap=\"jet\",\n origin=\"lower\",\n extent=extent,\n interpolation=\"nearest\",\n vmin=vmin,\n vmax=vmax,\n aspect=\"auto\",\n )\n\n ax.set_ylabel(\"f (kHz)\", fontsize=8)\n\n # plot dates\n\n tick_spacing = np.arange(\n self.control.bins \/ 8,\n self.control.bins,\n self.control.bins \/ 8,\n dtype=np.int_,\n )\n ax.set_xticks(tick_spacing)\n tick_labels = []\n\n for s in tick_spacing:\n tick_time = sti_times[s]\n\n if tick_time == 0:\n tick_string = \"\"\n else:\n gm_tick_time = time.gmtime(np.real(tick_time))\n tick_string = \"%02d:%02d:%02d\" % (\n gm_tick_time[3],\n gm_tick_time[4],\n gm_tick_time[5],\n )\n tick_labels.append(tick_string)\n\n ax.set_xticklabels(tick_labels)\n\n # set the font sizes\n tl = ax.get_xticklabels()\n\n for tk in tl:\n tk.set_size(8)\n del tl\n\n tl = ax.get_yticklabels()\n\n for tk in tl:\n tk.set_size(8)\n del tl\n\n print(\"last \", start_sample)\n\n # create a time stamp\n start_time = st0 \/ sr\n srt_time = time.gmtime(start_time)\n sub_second = int(round((start_time - int(start_time)) * 100))\n\n timestamp = \"%d-%02d-%02d %02d:%02d:%02d.%02d UT\" % (\n srt_time[0],\n srt_time[1],\n srt_time[2],\n srt_time[3],\n srt_time[4],\n srt_time[5],\n sub_second,\n )\n\n self.f.suptitle(\n \"%s %s %4.2f MHz (%s)\"\n % (self.control.title, timestamp, cfreq \/ 1e6, self.control.path),\n fontsize=10,\n )\n\n # ax.legend(fontsize=8)\n ax.set_xlabel(\"time (UTC)\", fontsize=8)\n\n # fixup ticks\n\n tl = ax.get_xticklabels()\n for tk in tl:\n tk.set_size(8)\n del tl\n tl = ax.get_yticklabels()\n for tk in tl:\n tk.set_size(8)\n del tl\n\n self.gridspec.update()\n\n self.f.tight_layout()\n\n self.f.subplots_adjust(top=0.95, right=0.88)\n cax = self.f.add_axes([0.9, 0.12, 0.02, 0.80])\n self.f.colorbar(im, cax=cax)\n if self.control.outname:\n fname, ext = os.path.splitext(self.control.outname)\n if ext == \"\":\n ext = \".png\"\n print(\"Save plot as {}\".format(fname + ext))\n matplotlib.pyplot.savefig(fname + ext)\n if self.control.appear or not self.control.outname:\n print(\"Show plot\")\n matplotlib.pyplot.show()\n\n\ndef parse_command_line(str_input=None):\n # if str_input is None:\n # parser = optparse.OptionParser()\n # else:\n # parser = optparse.OptionParser(str_input)\n parser = optparse.OptionParser()\n\n parser.add_option(\n \"-t\",\n \"--title\",\n dest=\"title\",\n default=\"Digital RF Data\",\n help=\"Use title provided for the data.\",\n )\n parser.add_option(\n \"-s\",\n \"--start\",\n dest=\"start\",\n default=None,\n help=\"Use the provided start time instead of the first time in the data. format is ISO8601: 2015-11-01T15:24:00Z\",\n )\n parser.add_option(\n \"-e\",\n \"--end\",\n dest=\"end\",\n default=None,\n help=\"Use the provided end time for the plot. format is ISO8601: 2015-11-01T15:24:00Z\",\n )\n\n parser.add_option(\n \"-p\",\n \"--path\",\n dest=\"path\",\n help=\"Use data from the provided digital RF data .\",\n )\n parser.add_option(\n \"-c\",\n \"--channel\",\n dest=\"channel\",\n default=\"ch0:0\",\n help=\"Use data from the provided digital RF channel :.\",\n )\n parser.add_option(\n \"-l\",\n \"--length\",\n dest=\"length\",\n default=0.04,\n type=\"float\",\n help=\"The default data length in seconds for unframed data.\",\n )\n parser.add_option(\n \"-b\",\n \"--bins\",\n dest=\"bins\",\n default=128,\n type=\"int\",\n help=\"The number of time bins for the STI.\",\n )\n parser.add_option(\n \"-f\",\n \"--frames\",\n dest=\"frames\",\n default=4,\n type=\"int\",\n help=\"The number of sub-panel frames in the plot.\",\n )\n parser.add_option(\n \"-n\",\n \"--num_fft\",\n dest=\"num_fft\",\n default=128,\n type=\"int\",\n help=\"The number of FFT bints for the STI.\",\n )\n parser.add_option(\n \"-i\",\n \"--integration\",\n dest=\"integration\",\n default=1,\n type=\"int\",\n help=\"The number of rasters to integrate for each plot.\",\n )\n parser.add_option(\n \"-d\",\n \"--decimation\",\n dest=\"decimation\",\n default=1,\n type=\"int\",\n help=\"The decimation factor for the data (integer).\",\n )\n parser.add_option(\n \"-m\",\n \"--mean\",\n dest=\"mean\",\n action=\"store_true\",\n default=False,\n help=\"Remove the mean from the data at the PSD processing step.\",\n )\n parser.add_option(\n \"-z\",\n \"--zaxis\",\n dest=\"zaxis\",\n default=None,\n type=\"string\",\n help=\"zaxis colorbar setting e.g. -50:50\",\n )\n parser.add_option(\n \"-v\",\n \"--verbose\",\n action=\"store_true\",\n dest=\"verbose\",\n default=False,\n help=\"Print status messages to stdout.\",\n )\n parser.add_option(\n \"-o\",\n \"--outname\",\n dest=\"outname\",\n default=None,\n type=str,\n help=\"Name of file that figure will be saved under.\",\n )\n parser.add_option(\n \"-a\",\n \"--appear\",\n action=\"store_true\",\n dest=\"appear\",\n default=False,\n help=\"Makes the plot appear through pyplot show.\",\n )\n if str_input is None:\n (options, args) = parser.parse_args()\n else:\n (options, args) = parser.parse_args(str_input)\n\n return (options, args)\n\n\n#\n# MAIN PROGRAM\n#\n\n# Setup Defaults\nif __name__ == \"__main__\":\n \"\"\"\n Needed to add main function to use outside functions outside of module.\n \"\"\"\n\n # Parse the Command Line for configuration\n (options, args) = parse_command_line()\n\n if options.path is None:\n print(\"Please provide an input source with the -p option!\")\n sys.exit(1)\n\n # Activate the DataPlotter\n dpc = DataPlotter(options)\n\n dpc.plot()\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_257","text":"JohnStarich\/python-pool-performancepools\/pool.py\nfrom requests.adapters import HTTPAdapter\nfrom collections.abc import Mapping, Sequence\nfrom types import FunctionType\nfrom tqdm import tqdm\nimport time\nimport sys\nimport gc\n\n\nclass PoolTest(object):\n def __init__(self, worker_count: int):\n self.worker_count = worker_count\n self.pool = self.init_pool(worker_count)\n self.compute_resource = self.init_compute_resource()\n self.network_resource = self.init_network_resource()\n\n def init_pool(self, worker_count: int) -> object:\n raise NotImplementedError(\"{} does not implement init_pool\"\n .format(self.__class__.__name__))\n\n def destroy_pool(self):\n pass\n\n def map(self, work_func: FunctionType, inputs: Sequence) -> Sequence:\n raise NotImplementedError(\"{} does not implement map\"\n .format(self.__class__.__name__))\n\n def init_compute_resource(self) -> object:\n from cmath import sqrt\n return sqrt\n\n def init_network_resource(self) -> object:\n import requests\n return requests.Session\n\n @staticmethod\n def do_compute_work(args) -> None:\n compute_resource, num, *_ = args\n sqrt = compute_resource\n sqrt(sqrt(sqrt(num)))\n\n @staticmethod\n def do_network_work(args) -> None:\n network_resource, *_ = args\n Session = network_resource\n with Session() as s:\n adapter = HTTPAdapter(max_retries=3)\n s.mount('http:\/\/', adapter)\n s.get('http:\/\/localhost:8080\/')\n\n def run_compute_test(self, jobs: int, trials: int,\n show_progress: bool=False) -> Mapping:\n return self._run_test(self.do_compute_work, self.compute_resource,\n jobs, trials, show_progress=show_progress)\n\n def run_network_test(self, jobs: int, trials: int,\n show_progress: bool=False) -> Mapping:\n return self._run_test(self.do_network_work, self.network_resource,\n jobs, trials, show_progress=show_progress)\n\n def _run_test(self, work_func: FunctionType, work_resource: object,\n jobs: int, trials: int,\n show_progress: bool=False) -> Mapping:\n results = {\n 'jobs': jobs,\n 'trials': trials,\n 'time': [],\n 'blocks': [],\n }\n # Forcibly evaluate the inputs to prevent time\/resources taken up later\n inputs = list(zip(\n [work_resource] * jobs,\n range(jobs)\n ))\n trial_iter = range(trials)\n if show_progress is True and trials > 2:\n trial_iter = tqdm(trial_iter, desc='trials')\n gc.collect()\n for _ in trial_iter:\n # Run trial of pool map function and measure it\n gc.collect()\n blocks_start = sys.getallocatedblocks()\n time_start = time.time()\n list(self.map(work_func, inputs))\n time_end = time.time()\n results['time'].append(time_end - time_start)\n # Get allocated blocks before garbage collection to show peak usage\n blocks_end = sys.getallocatedblocks()\n results['blocks'].append(blocks_end - blocks_start)\n return results\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_258","text":"import numpy as np\nimport math\nfrom ..utils import Monitor\nfrom scipy.optimize import basinhopping, minimize, Bounds\n\nclass OverbudgetException(Exception):\n def __init__(self):\n super(Exception, self).__init__()\n\ndef get_variable_bounds(problem):\n lbs = problem.lbs()\n ubs = problem.ubs()\n\n return Bounds(lbs, ubs)\n\ndef optimize_basinhopping(problem, max_evals, T=1.0, stepsize=0.5, localmethod=\"L-BFGS-B\", log=None, verbose=True):\n vt = problem.vartype()\n lbs = problem.lbs()\n ubs = problem.ubs()\n mon = Monitor(f\"scipy.basinhopping\/{localmethod}\", problem, log=log)\n def f(x):\n # This approach does not stay within its evaluation budget (it has little to no way to enforce this!)\n # As such. raise an exception if we are over the limit\n if mon.num_iters > max_evals:\n raise OverbudgetException()\n # scipy.optimize\n xvec = x.copy()\n # Round non-continuous variables\n xvec[vt != 'cont'] = np.round(xvec[vt != 'cont'])\n # Clamp variable values to bounds\n np.clip(xvec, lbs, ubs, out=xvec)\n mon.commit_start_eval()\n r = problem.evaluate(xvec)\n mon.commit_end_eval(xvec, r)\n return r\n \n def budget_check_global(x, f, accept):\n # Callback used to stop basin hopping when evaluation limit is reached.\n # x -- local minimum solution\n # f -- corresponding fitness\n # accept -- whether this local optima was accepted as the new reference solution\n return mon.num_iters >= max_evals\n\n def budget_check_local(x):\n # Callback used to stop local optimization when evaluation limit is reached.\n # x -- local minimum solution\n return mon.num_iters >= max_evals\n \n minimizer_kwargs = {\n 'method': localmethod,\n 'bounds': get_variable_bounds(problem),\n 'callback': budget_check_local\n }\n\n # Generate initial point\n lb = problem.lbs()\n ub = problem.ubs()\n d = len(lb)\n x0 = np.random.rand(d)*(ub-lb) + lb\n x0[vt != 'cont'] = np.round(x0[vt != 'cont'])\n\n mon.start()\n try:\n optim_result = basinhopping(func=f, x0=x0, niter=max_evals, T=T, stepsize=stepsize, minimizer_kwargs=minimizer_kwargs, callback=budget_check_global)\n except OverbudgetException as e:\n pass\n mon.end()\n\n solX = mon.best_x #optim_result['x']\n solY = mon.best_fitness #optim_result['fun']\n\n return solX, solY, mon\n\n\ndef optimize_scipy_local(problem, max_evals, method=\"BFGS\", log=None, verbose=False):\n\n vt = problem.vartype()\n mon = Monitor(f\"scipy.{method}\", problem, log=log)\n def f(x):\n # This approach does not stay within its evaluation budget (it has little to no way to enforce this!)\n # As such. raise an exception if we are over the limit\n if mon.num_iters > max_evals:\n raise OverbudgetException()\n # scipy.optimize\n xvec = x.copy()\n # Round non-continuous variables\n xvec[vt != 'cont'] = np.round(xvec[vt != 'cont'])\n mon.commit_start_eval()\n r = problem.evaluate(xvec)\n mon.commit_end_eval(xvec, r)\n return r\n \n def budget_check_local(x):\n # Callback used to stop local optimization when evaluation limit is reached.\n # x -- local minimum solution\n return mon.num_iters >= max_evals\n\n # Generate initial point, round the integers.\n lb = problem.lbs()\n ub = problem.ubs()\n d = len(lb)\n x0 = np.random.rand(d)*(ub-lb) + lb\n x0[vt != 'cont'] = np.round(x0[vt != 'cont'])\n\n mon.start()\n try:\n optim_result = minimize(fun=f, x0=x0, method=method, bounds=get_variable_bounds(problem), options={'maxiter': max_evals}, callback=budget_check_local)\n except OverbudgetException as e:\n pass\n mon.end()\n\n solX = mon.best_x #optim_result['x']\n solY = mon.best_fitness #optim_result['fun']\n\n return solX, solY, mon"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_259","text":"prise-3d\/IPFML\n\"\"\"\nFunctions which can be used to extract information from image or reduce it\n\"\"\"\n\n# main imports\nimport os\nimport random\nimport numpy as np\n\n# image processing imports\nfrom numpy.linalg import svd\nfrom scipy import misc\nfrom sklearn import preprocessing\nfrom skimage import io, color\nimport cv2\nfrom PIL import Image\n\n# ipfml imports\nfrom ipfml.processing import compression\n\n\ndef get_LAB(image):\n \"\"\"Transforms RGB Image into Lab\n\n Args:\n image: image to convert\n\n Returns:\n Lab information\n\n Usage:\n\n >>> from PIL import Image\n >>> from ipfml.processing import transform\n >>> img = Image.open('.\/images\/test_img.png')\n >>> Lab = transform.get_LAB(img)\n >>> Lab.shape\n (200, 200, 3)\n \"\"\"\n\n return color.rgb2lab(image)\n\n\ndef get_LAB_L(image):\n \"\"\"Transforms RGB Image into Lab and returns L\n\n Args:\n image: image to convert\n\n Returns:\n The L chanel from Lab information\n\n >>> from PIL import Image\n >>> from ipfml.processing import transform\n >>> img = Image.open('.\/images\/test_img.png')\n >>> L = transform.get_LAB_L(img)\n >>> L.shape\n (200, 200)\n \"\"\"\n\n lab = get_LAB(image)\n return lab[:, :, 0]\n\n\ndef get_LAB_a(image):\n \"\"\"Transforms RGB Image into LAB and returns a\n\n Args:\n image: image to convert\n\n Returns:\n The a chanel from Lab information\n\n Usage:\n\n >>> from PIL import Image\n >>> from ipfml.processing import transform\n >>> img = Image.open('.\/images\/test_img.png')\n >>> a = transform.get_LAB_a(img)\n >>> a.shape\n (200, 200)\n \"\"\"\n\n lab = get_LAB(image)\n return lab[:, :, 1]\n\n\ndef get_LAB_b(image):\n \"\"\"Transforms RGB Image into LAB and returns b\n\n Args:\n image: image to convert\n\n Returns:\n The b chanel from Lab information\n\n Usage :\n\n >>> from PIL import Image\n >>> from ipfml.processing import transform\n >>> img = Image.open('.\/images\/test_img.png')\n >>> b = transform.get_LAB_b(img)\n >>> b.shape\n (200, 200)\n \"\"\"\n\n lab = get_LAB(image)\n return lab[:, :, 2]\n\n\ndef get_XYZ(image):\n \"\"\"Transforms RGB Image into XYZ\n\n Args:\n image: image to convert\n\n Returns:\n XYZ information obtained from transformation\n\n Usage:\n\n >>> from PIL import Image\n >>> from ipfml.processing import transform\n >>> img = Image.open('.\/images\/test_img.png')\n >>> transform.get_XYZ(img).shape\n (200, 200, 3)\n \"\"\"\n\n return color.rgb2xyz(image)\n\n\ndef get_XYZ_X(image):\n \"\"\"Transforms RGB Image into XYZ and returns X\n\n Args:\n image: image to convert\n\n Returns:\n The X chanel from XYZ information\n\n Usage:\n\n >>> from PIL import Image\n >>> from ipfml.processing import transform\n >>> img = Image.open('.\/images\/test_img.png')\n >>> x = transform.get_XYZ_X(img)\n >>> x.shape\n (200, 200)\n \"\"\"\n\n xyz = color.rgb2xyz(image)\n return xyz[:, :, 0]\n\n\ndef get_XYZ_Y(image):\n \"\"\"Transforms RGB Image into XYZ and returns Y\n\n Args:\n image: image to convert\n\n Returns:\n The Y chanel from XYZ information\n\n Usage:\n\n >>> from PIL import Image\n >>> from ipfml.processing import transform\n >>> img = Image.open('.\/images\/test_img.png')\n >>> y = transform.get_XYZ_Y(img)\n >>> y.shape\n (200, 200)\n \"\"\"\n\n xyz = color.rgb2xyz(image)\n return xyz[:, :, 1]\n\n\ndef get_XYZ_Z(image):\n \"\"\"Transforms RGB Image into XYZ and returns Z\n\n Args:\n image: image to convert\n\n Returns:\n The Z chanel from XYZ information\n\n Raises:\n ValueError: If `nb_bits` has unexpected value. `nb_bits` needs to be in interval [1, 8].\n\n Usage:\n\n >>> from PIL import Image\n >>> from ipfml.processing import transform\n >>> img = Image.open('.\/images\/test_img.png')\n >>> z = transform.get_XYZ_Z(img)\n >>> z.shape\n (200, 200)\n \"\"\"\n\n xyz = color.rgb2xyz(image)\n return xyz[:, :, 2]\n\n\ndef get_low_bits_img(image, nb_bits=4):\n \"\"\"Returns Image or Numpy array with data information reduced using only low bits\n\n Args:\n image: image to convert\n nb_bits: optional parameter which indicates the number of bits to keep\n\n Returns:\n Numpy array with reduced values\n\n Usage:\n\n >>> from PIL import Image\n >>> from ipfml.processing import transform\n >>> img = Image.open('.\/images\/test_img.png')\n >>> low_bits_img = transform.get_low_bits_img(img, 5)\n >>> low_bits_img.shape\n (200, 200, 3)\n \"\"\"\n\n if nb_bits <= 0:\n raise ValueError(\n \"unexpected value of number of bits to keep. @nb_bits needs to be positive and greater than 0.\"\n )\n\n if nb_bits > 8:\n raise ValueError(\n \"Unexpected value of number of bits to keep. @nb_bits needs to be in interval [1, 8].\"\n )\n\n img_arr = np.array(image)\n\n bits_values = sum([pow(2, i - 1) for i in range(1, nb_bits + 1)])\n\n return img_arr & bits_values\n\n\ndef get_bits_img(image, interval):\n \"\"\"Returns only bits specified into the interval\n\n Args:\n image: image to convert using this interval of bits value to keep\n interval: (begin, end) of bits values\n\n Returns:\n Numpy array with reduced values\n\n Raises:\n ValueError: If min value from interval is not >= 1.\n ValueError: If max value from interval is not <= 8.\n ValueError: If min value from interval >= max value.\n\n Usage:\n\n >>> from PIL import Image\n >>> from ipfml.processing import transform\n >>> img = Image.open('.\/images\/test_img.png')\n >>> bits_img = transform.get_bits_img(img, (2, 5))\n >>> bits_img.shape\n (200, 200, 3)\n \"\"\"\n\n img_arr = np.array(image)\n begin, end = interval\n\n if begin < 1:\n raise ValueError(\n \"Unexpected value of interval. Interval min value needs to be >= 1.\"\n )\n\n if end > 8:\n raise ValueError(\n \"Unexpected value of interval. Interval min value needs to be <= 8.\"\n )\n\n if begin >= end:\n raise ValueError(\"Unexpected interval values order.\")\n\n bits_values = sum([pow(2, i - 1) for i in range(begin, end + 1)])\n\n return img_arr & bits_values\n\n\ndef gray_to_mscn(image):\n \"\"\"Convert Grayscale Image into Mean Subtracted Contrast Normalized (MSCN)\n\n Args:\n image: grayscale image\n\n Returns:\n MSCN matrix obtained from transformation\n\n Usage:\n\n >>> from PIL import Image\n >>> from ipfml.processing import transform\n >>> img = Image.open('.\/images\/test_img.png')\n >>> img = transform.get_LAB_L(img)\n >>> img_mscn = transform.gray_to_mscn(img)\n >>> img_mscn.shape\n (200, 200)\n \"\"\"\n\n s = 7 \/ 6\n blurred = cv2.GaussianBlur(image, (7, 7),\n s) # apply gaussian blur to the image\n blurred_sq = blurred * blurred\n sigma = cv2.GaussianBlur(image * image, (7, 7), s)\n sigma = abs(sigma - blurred_sq)**0.5\n sigma = sigma + 1.0 \/ 255 # avoid DivideByZero Exception\n mscn = (image - blurred) \/ sigma # MSCN(i, j) image\n\n return mscn\n\n\ndef rgb_to_mscn(image):\n \"\"\"Convert RGB Image into Mean Subtracted Contrast Normalized (MSCN)\n\n Args:\n image: 3D RGB image Numpy array or PIL RGB image\n\n Returns:\n 2D Numpy array with MSCN information\n\n Example:\n\n >>> from PIL import Image\n >>> from ipfml.processing import transform\n >>> img = Image.open('.\/images\/test_img.png')\n >>> img_mscn = transform.rgb_to_mscn(img)\n >>> img_mscn.shape\n (200, 200)\n \"\"\"\n\n # check if PIL image or not\n img_arr = np.array(image)\n\n # convert rgb image to gray\n im = np.array(color.rgb2gray(img_arr) * 255, 'uint8')\n\n return gray_to_mscn(im)\n\n\ndef get_mscn_coefficients(image):\n \"\"\"Compute the Mean Substracted Constrast Normalized coefficients of an image\n\n Args:\n image: PIL Image, Numpy array or path of image\n\n Returns:\n MSCN coefficients\n\n Raises:\n FileNotFoundError: If `image` is set as str path and image was not found\n ValueError: If `image` numpy shape are not correct\n\n Example:\n\n >>> from PIL import Image\n >>> import numpy as np\n >>> from ipfml.processing import transform\n >>> image_values = Image.open('.\/images\/test_img.png')\n >>> mscn_coefficients = transform.get_mscn_coefficients(image_values)\n >>> mscn_coefficients.shape\n (200, 200)\n \"\"\"\n\n if isinstance(image, str):\n if os.path.exists(image):\n # open image directly as grey level image\n imdist = cv2.imread(image, 0)\n else:\n raise FileNotFoundError('Image not found in your system')\n\n elif isinstance(image, np.ndarray):\n # convert if necessary to grey level numpy array\n if image.ndim == 2:\n imdist = image\n if image.ndim == 3:\n imdist = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n else:\n raise ValueError('Incorrect image shape')\n else:\n # if PIL Image\n image = np.asarray(image)\n\n if image.ndim == 2:\n imdist = image\n if image.ndim == 3:\n imdist = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n else:\n raise ValueError('Incorrect image shape')\n\n imdist = imdist.astype(np.float64)\n imdist = imdist \/ 255.0\n\n # calculating MSCN coefficients\n mu = cv2.GaussianBlur(imdist, (7, 7),\n 7 \/ 6,\n borderType=cv2.BORDER_CONSTANT)\n mu_sq = mu * mu\n sigma = cv2.GaussianBlur(imdist * imdist, (7, 7),\n 7 \/ 6,\n borderType=cv2.BORDER_CONSTANT)\n sigma = np.sqrt(abs((sigma - mu_sq)))\n structdis = (imdist - mu) \/ (sigma + 1)\n return structdis\n\n\ndef get_LAB_L_SVD(image):\n \"\"\"Returns Singular values from LAB L Image information\n\n Args:\n image: PIL Image or Numpy array\n\n Returns:\n U, s, V information obtained from SVD compression using Lab\n\n Example:\n\n >>> from PIL import Image\n >>> from ipfml.processing import transform\n >>> img = Image.open('.\/images\/test_img.png')\n >>> U, s, V = transform.get_LAB_L_SVD(img)\n >>> U.shape\n (200, 200)\n >>> len(s)\n 200\n >>> V.shape\n (200, 200)\n \"\"\"\n L = get_LAB_L(image)\n return compression.get_SVD(L)\n\n\ndef get_LAB_L_SVD_s(image):\n \"\"\"Returns s (Singular values) SVD from L of LAB Image information\n\n Args:\n image: PIL Image or Numpy array\n\n Returns:\n vector of singular values\n\n Example:\n\n >>> from PIL import Image\n >>> from ipfml.processing import transform\n >>> img = Image.open('.\/images\/test_img.png')\n >>> s = transform.get_LAB_L_SVD_s(img)\n >>> len(s)\n 200\n \"\"\"\n L = get_LAB_L(image)\n return compression.get_SVD_s(L)\n\n\ndef get_LAB_L_SVD_U(image):\n \"\"\"Returns U SVD from L of LAB Image information\n\n Args:\n image: PIL Image or Numpy array\n\n Returns:\n U matrix of SVD compression\n\n Example:\n\n >>> from PIL import Image\n >>> from ipfml.processing import transform\n >>> img = Image.open('.\/images\/test_img.png')\n >>> U = transform.get_LAB_L_SVD_U(img)\n >>> U.shape\n (200, 200)\n \"\"\"\n L = get_LAB_L(image)\n return compression.get_SVD_U(L)\n\n\ndef get_LAB_L_SVD_V(image):\n \"\"\"Returns V SVD from L of LAB Image information\n\n Args:\n image: PIL Image or Numpy array\n\n Returns:\n V matrix of SVD compression\n\n Example:\n\n >>> from PIL import Image\n >>> from ipfml.processing import transform\n >>> img = Image.open('.\/images\/test_img.png')\n >>> V = transform.get_LAB_L_SVD_V(img)\n >>> V.shape\n (200, 200)\n \"\"\"\n\n L = get_LAB_L(image)\n return compression.get_SVD_V(L)\n\n\ndef rgb_to_grey_low_bits(image, nb_bits=4):\n \"\"\"Convert RGB Image into grey image using only 4 low bits values\n\n Args:\n image: 3D RGB image Numpy array or PIL RGB image\n nb_bits: optional parameter which indicates the number of bits to keep (default 4)\n\n Returns:\n 2D Numpy array with low bits information kept\n\n Example:\n\n >>> from PIL import Image\n >>> from ipfml.processing import transform\n >>> img = Image.open('.\/images\/test_img.png')\n >>> low_bits_grey_img = transform.rgb_to_grey_low_bits(img, 5)\n >>> low_bits_grey_img.shape\n (200, 200)\n \"\"\"\n\n img_arr = np.array(image)\n grey_block = np.array(color.rgb2gray(img_arr) * 255, 'uint8')\n\n return get_low_bits_img(grey_block, nb_bits)\n\n\ndef rgb_to_LAB_L_low_bits(image, nb_bits=4):\n \"\"\"Convert RGB Image into Lab L channel image using only 4 low bits values\n\n Args:\n image: 3D RGB image Numpy array or PIL RGB image\n nb_bits: optional parameter which indicates the number of bits to keep (default 4)\n\n Returns:\n 2D Numpy array with low bits information kept\n\n Example:\n\n >>> from PIL import Image\n >>> from ipfml.processing import transform\n >>> img = Image.open('.\/images\/test_img.png')\n >>> low_bits_Lab_l_img = transform.rgb_to_LAB_L_low_bits(img, 5)\n >>> low_bits_Lab_l_img.shape\n (200, 200)\n \"\"\"\n\n L_block = np.asarray(get_LAB_L(image), 'uint8')\n\n return get_low_bits_img(L_block, nb_bits)\n\n\ndef rgb_to_LAB_L_bits(image, interval):\n \"\"\"Returns only bits from LAB L canal specified into the interval\n\n Args:\n image: image to convert using this interval of bits value to keep\n interval: (begin, end) of bits values\n\n Returns:\n 2D Numpy array with reduced values\n\n >>> from PIL import Image\n >>> from ipfml.processing import transform\n >>> img = Image.open('.\/images\/test_img.png')\n >>> bits_Lab_l_img = transform.rgb_to_LAB_L_bits(img, (2, 6))\n >>> bits_Lab_l_img.shape\n (200, 200)\n \"\"\"\n\n L_block = np.asarray(get_LAB_L(image), 'uint8')\n\n return get_bits_img(L_block, interval)\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_260","text":"import numpy as np\nfrom scipy.stats import multivariate_normal\nfrom to.probabilistic_model import ProbabilisticModel\nfrom evolution import chromosome as evolution\nfrom copy import deepcopy\n\n\n\nclass MixtureModel(object):\n\n def __init__(self, allModels, alpha=False):\n self.model_list = allModels.copy()\n self.nModels = len(allModels)\n if alpha is False:\n self.alpha = (1\/self.nModels)*np.ones(self.nModels)\n else:\n self.alpha = alpha\n\n self.probTable = None\n self.nSol = None\n self.__target_model_added = False\n\n def add_target_solutions(self, solutions, modelType):\n if not self.__target_model_added:\n self.nModels = self.nModels + 1\n self.model_list.append(ProbabilisticModel(modelType=modelType))\n self.model_list[-1].buildModel(solutions)\n self.__target_model_added = True\n else:\n raise Exception('Target model is already added.')\n\n def add_target_model(self, target_model):\n if not self.__target_model_added:\n self.nModels = self.nModels + 1\n self.model_list.append(target_model)\n self.target_model_added = True\n else:\n raise Exception('Target model is already added.')\n\n def createTable(self, solutions, CV, modelType, probs_RL=None):\n if CV:\n self.add_target_solutions(solutions, modelType)\n self.alpha = (1\/self.nModels) * np.ones(self.nModels)\n nSol = solutions.shape[0]\n self.nSol = nSol\n self.probTable = np.ones([nSol, self.nModels])\n \n if probs_RL is None:\n for j in range(self.nModels-1):\n self.probTable[:, j] = self.model_list[j].pdfEval(solutions) \n else:\n for j in range(0, self.nModels-1):\n self.probTable[:, j] = self.model_list[j].pdfEval(solutions) # Time complexity: O(pd)\n\n for i in range(nSol): # Leave-one-out cross validation\n x = np.concatenate((solutions[:i, :], solutions[i+1:, :]))\n tModel = ProbabilisticModel(modelType=modelType)\n tModel.buildModel(x)\n self.probTable[i, -1] = tModel.pdfEval(solutions[[i], :])\n else:\n nSol = solutions.shape[0]\n self.probTable = np.ones([nSol, self.nModels])\n for j in range(self.nModels):\n self.probTable[:, j] = self.model_list[j].pdfEval(solutions)\n self.nSol = nSol\n\n def EMstacking(self, iterations=1):\n for t in range(iterations):\n print(t)\n talpha = self.alpha\n probVector = np.matmul(self.probTable, talpha.T)\n if any(probVector == 0):\n print('probVector: ', probVector)\n print('self.probTable: ', self.probTable)\n print('talpha: ', talpha)\n for i in range(self.nModels):\n talpha[i] = np.sum((1\/self.nSol)*talpha[i]*self.probTable[:, i]\/probVector)\n self.alpha = talpha\n\n if np.sum(np.isnan(self.alpha)) > 0:\n print('sanity check mutate')\n self.alpha = np.zeros(self.nModels)\n self.alpha[-1] = 1\n\n\n def mutate(self, version='normal'):\n modif_alpha = None\n \n modif_alpha = self.alpha + np.random.rand(self.nModels)*0.01\n\n total_alpha = np.sum(modif_alpha)\n if total_alpha == 0:\n self.alpha = np.zeros(self.nModels)\n self.alpha[-1] = 1\n else:\n self.alpha = modif_alpha\/total_alpha\n\n # Sanity check\n if np.sum(np.isnan(self.alpha)) > 0:\n print('sanity check mutate')\n self.alpha = np.zeros(self.nModels)\n self.alpha[-1] = 1\n\n\n def sample(self, nSol, samplesRL=None, preprocess=False):\n \n if preprocess:\n i = 0\n while any(self.alpha[self.alpha!=0]<(1\/nSol - np.finfo(np.float32).eps)):\n self.alpha[self.alpha<(1\/nSol - np.finfo(np.float32).eps)] = 0\n self.alpha = self.alpha\/np.sum(self.alpha)\n i += 1\n\n indSamples = np.ceil(nSol*self.alpha).astype(int)\n solutions = np.array([])\n for i in range(self.nModels):\n if indSamples[i] == 0:\n pass\n elif i == self.nModels - 2 and samplesRL is not None:\n solutions = np.vstack([solutions, samplesRL]) if solutions.size else samplesRL\n else:\n sols = self.model_list[i].sample(indSamples[i])\n solutions = np.vstack([solutions, sols]) if solutions.size else sols\n solutions = solutions[np.random.permutation(solutions.shape[0]), :]\n solutions = solutions[:nSol, :]\n return solutions\n\n\n def sample_enhanced(self, nSol, problem, mutation_strength, \n samples_count, max_sampling_num=None, solution_found=None,\n problem_type='knapsack', net=None, s_len=None, mutation=True):\n \"\"\"\n This sampling function only works for sTrEvo algorithm\n \"\"\"\n\n if max_sampling_num is None:\n max_sampling_num = nSol\n indSamples = np.ceil(nSol*self.alpha).astype(int)\n\n solutions = []\n added_solutions = []\n solutions_idx = []\n for i in range(self.nModels):\n if indSamples[i] == 0:\n pass\n else:\n \n sampling_size = min(max_sampling_num, indSamples[i])\n sols_idx = np.ones(sampling_size) * i\n sols = self.model_list[i].sample(sampling_size)\n\n solutions = np.append(solutions, sols, axis=0) if len(solutions) else deepcopy(sols)\n solutions_idx = np.append(solutions_idx, sols_idx, axis=0) if len(sols_idx) else deepcopy(sols_idx)\n\n \n perm_indexes = np.random.permutation(len(solutions))\n solutions_num = min(nSol, len(solutions))\n solutions = solutions[perm_indexes][:solutions_num]\n solutions_idx = solutions_idx[perm_indexes][:solutions_num].astype(np.int)\n \n\n # Fitness Evaluation + Mutation_strength Update\n offsprings = []\n fitness_mean = 0\n \n func_eval_num = 0\n for solution, src_idx in zip(solutions, solutions_idx):\n \n if problem_type == 'knapsack':\n offsprings.append(evolution.Chromosome(solution))\n fitness = offsprings[-1].fitness_calc(problem)\n elif problem_type == 'pole':\n offsprings.append(evolution.ChromosomePole(solution))\n fitness = offsprings[-1].fitness_calc(net, problem, s_len)\n if not solution_found.value:\n func_eval_num += 1\n if fitness - 2000 > -0.0001:\n solution_found.value = True\n elif problem_type == 'arm':\n offsprings.append(evolution.ChromosomeKA(solution))\n fitness = offsprings[-1].fitness_calc(*problem)\n else:\n raise ValueError('Problem_type is wrong')\n \n fitness_mean += fitness\n if src_idx != self.nModels-1:\n samples_count[src_idx] += 1\n mutation_strength[src_idx] += (1\/samples_count[src_idx])*(fitness - mutation_strength[src_idx])\n \n fitness_mean = fitness_mean\/solutions_num\n\n\n # Sanity check\n if len(offsprings) != solutions_num:\n raise ValueError('offsprings length does not match the number of solutions')\n \n if solution_found is not None: \n return offsprings, mutation_strength, samples_count, fitness_mean, func_eval_num\n else:\n return offsprings, mutation_strength, samples_count, fitness_mean\n\n def n_samples(self, ind, nSol):\n return np.ceil(nSol * self.alpha[ind]).astype(int)\n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_261","text":"#!\/usr\/bin\/env python3\n# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.4'\n# jupytext_version: 1.1.4\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# # S_BackForwEwmaSD [](https:\/\/www.arpm.co\/lab\/redirect.php?code=S_BackForwEwmaSD&codeLang=Python)\n# For details, see [here](https:\/\/www.arpm.co\/lab\/redirect.php?permalink=eb-estimation-fwd-bwd-exp-smooth).\n\n# ## Prepare the environment\n\n# +\nimport os\nimport os.path as path\nimport sys\n\nsys.path.append(path.abspath('..\/..\/functions-legacy'))\n\nfrom numpy import arange, array, zeros, diff, abs, log, exp, sqrt, linspace\nfrom numpy import sum as npsum\n\nfrom scipy.io import loadmat\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import subplots, title\nimport matplotlib.dates as mdates\n\nplt.style.use('seaborn')\n\nfrom CONFIG import GLOBAL_DB, TEMPORARY_DB\nfrom ARPM_utils import save_plot, struct_to_dict, date_mtop\n\n# Parameters\ntau_HL = 30\nlam = log(2) \/ tau_HL\ni_ = 252\n# -\n\n# ## Upload database db_Stock SPX\n\n# +\ntry:\n db = loadmat(os.path.join(GLOBAL_DB, 'db_Stocks'), squeeze_me=True)\nexcept FileNotFoundError:\n db = loadmat(os.path.join(TEMPORARY_DB, 'db_Stocks'), squeeze_me=True)\n\nSPX = struct_to_dict(db['SPX'])\n# -\n\n# ## Compute the realized compounded returns\n\n# +\nv = SPX.Price_close\n\nx = log(v)\nepsi = diff(x, 1).reshape(1,-1)\ndate = SPX.Date[1:]\n\nt_ = epsi.shape[1]\n# -\n\n# ## Compute the backward-forward exponential decay probabilities\n\nedecayprobs = exp(-lam*(abs(arange(-i_, i_ + 1)))).reshape(1,-1)\ngamma = npsum(edecayprobs) # normalization coefficient\nedecayprobs = edecayprobs \/ gamma # decay factors\n\n# ## Compute the backward\/forward exponentially weighted moving standard deviations\n\ny = zeros(t_ - 2 * i_) # start from time= i_+1 and estimate up to time= t_end -i_ (so that i_ observations are always availabe both backward and forward)\nfor t in arange(i_,t_-i_):\n ret = epsi[[0],t - i_:t + i_+1]\n y[t - i_] = sqrt(edecayprobs@ret.T ** 2)\n\n# ## Display the compounded returns and the backward\/forward exponentially weighted moving standard deviations\n\n# +\ndate_dt = array([date_mtop(i) for i in date])\nmyFmt = mdates.DateFormatter('%d-%b-%Y')\n\nf, ax = subplots(2, 1)\ndate_est = date_dt[i_:t_- i_]\nax[0].plot(date_est, epsi[0,i_:t_ - i_], color='b',lw=1)\nax[0].set_xlim([date_est[0], date_est[-1]])\nax[0].xaxis.set_major_formatter(myFmt)\ntitle('Compounded returns')\n\ndate_est = date_dt[i_ :t_- i_]\nax[1].plot(date_est, y, color=[.9, .4, 0], lw = 1.5)\nax[1].set_xlim([date_est[0], date_est[-1]])\nax[1].xaxis.set_major_formatter(myFmt)\ntitle('Estimated Exponentially Weighted Moving St. Deviation')\nplt.tight_layout();\n# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])\n# -\n\n# ## Display the backward\/forward exponential decay probabilities\n\nf, ax = subplots(1, 1)\nax.bar(arange(edecayprobs.shape[1]),edecayprobs[0], facecolor=[.7, .7, .7], edgecolor=[.7, .7, .7])\nax.set_xlim([1, 2 * i_ + 1])\nplt.xticks(linspace(1,2*i_+1,3),[-252,0,252])\ntitle('Exponential decay factors profile');\n# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])\n\n"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_263","text":"ADBI-george2\/AnomalyDetection\nfrom __future__ import print_function, division\nfrom igraph import *\nimport numpy as np\nimport numpy.linalg as la\nfrom scipy.stats import pearsonr\nimport matplotlib.pyplot as plt\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n__author__ = 'panzer'\n\nFEATURES = [\"degree\", \"clustering_coefficient\", \"ego_net_edges\"]\n\ndef say(*lst):\n print(*lst, end=\"\")\n sys.stdout.flush()\n\ndef list_files(folder):\n \"\"\"\n List all files in a folder\n :param folder: Name of the folder\n :return: list of complete file names in folder\n \"\"\"\n return [\"%s\/%s\"%(folder, f) for f in os.listdir(folder) if f.endswith(\".txt\")]\n\ndef make_graph(file_name):\n \"\"\"\n Make graph from a file\n :param file_name:\n :return:\n \"\"\"\n with open(file_name, 'r') as f:\n lines = f.readlines()\n node_count, edge_count = map(int, lines[0].strip().split())\n edges = [map(int, line.strip().split()) for line in lines[1:]]\n graph = Graph()\n graph.add_vertices(node_count)\n graph.add_edges(edges)\n for vertex in graph.vs:\n assign_attributes(vertex, graph)\n return graph\n\n\ndef assign_attributes(vertex, graph):\n \"\"\"\n Assign Attributes for the vertex\n :param vertex: Vertex to be assigned attributes\n :param graph: Instance of graph to which the vertex belongs\n \"\"\"\n neighbors = graph.neighbors(vertex.index)\n ego_net = graph.subgraph([vertex.index]+neighbors)\n vertex[\"degree\"] = vertex.degree()\n cc = graph.transitivity_local_undirected([vertex.index])[0]\n vertex[\"clustering_coefficient\"] = 0 if np.isnan(cc) else cc\n vertex[\"ego_net_edges\"] = len(ego_net.es)\n\n\ndef get_feature_vector(graphs, vertex_id, feature):\n return [graph.vs[vertex_id][feature] for graph in graphs]\n\ndef pearson_rho(x_vector, y_vector):\n val, _ = pearsonr(x_vector, y_vector)\n return 0 if np.isnan(val) else val\n\ndef get_principal_eigen_vector(matrix):\n _, v = la.eig(matrix)\n return v[0]\n\ndef construct_correlation_matrix(all_graphs, feature, start, window=7):\n graphs = all_graphs[start:start+window]\n vertices = range(len(graphs[0].vs))\n matrix = []\n for x in vertices:\n x_vector = get_feature_vector(graphs, x, feature)\n covariance_vector = []\n for y in vertices:\n y_vector = get_feature_vector(graphs, y, feature)\n covariance_vector.append(pearson_rho(x_vector, y_vector))\n matrix.append(covariance_vector)\n return matrix\n\ndef vector_average(vectors):\n total = vectors[0]\n count = 1\n for vector in vectors[1:]:\n total = total + vector\n count += 1\n return total \/ count\n\ndef construct_correlation_matrices(all_graphs, window=7):\n feature_info = {}\n for feature in FEATURES:\n matrices = []\n eigens = []\n for start in range(len(all_graphs)-window):\n say(\".\")\n matrix = construct_correlation_matrix(all_graphs, feature, start, window)\n matrices.append(matrix)\n eigens.append(get_principal_eigen_vector(matrix))\n feature_info[feature] = {\n \"matrices\" : matrices,\n \"eigens\" : eigens\n }\n print(\"%s completed\"%feature)\n return feature_info\n\ndef compute_eigen_behaviour(feature_info, window=7):\n eigen_behaviours = {}\n for feature in FEATURES:\n eigens = feature_info[feature][\"eigens\"]\n eigen_behaviour = []\n for start in range(len(eigens)-window):\n u_t = eigens[start+window]\n r_t1 = vector_average(eigens[start:start+window])\n eigen_behaviour.append(round(np.dot(u_t, r_t1).real, 2))\n eigen_behaviours[feature] = eigen_behaviour\n return eigen_behaviours\n\ndef save_eigen_behaviours(eigen_behaviours, file_name):\n lines = [\" \".join(FEATURES)+\"\\n\"]\n vals = []\n for feature in FEATURES:\n vals.append(eigen_behaviours[feature])\n vals = zip(*vals)\n for line in vals:\n lines.append(\" \".join(map(str, line))+\"\\n\")\n with open(file_name, 'w') as f:\n f.writelines(lines)\n\ndef plot_eigen_behaviours(eigen_behaviours, file_name, window = 7):\n xs = range(window,len(eigen_behaviours.values()[0])+window)\n colors = [\"r\", \"g\", \"b\"]\n f, axis_arr = plt.subplots(3, sharex=True)\n for i, feature in enumerate(FEATURES):\n ys = eigen_behaviours[feature]\n axis_arr[i].plot(xs, ys, \"%s-\"%colors[i])\n axis_arr[i].set_ylabel(\"Z Score\")\n plt.xlabel(\"Time\")\n plt.xlim(0, xs[-1]+2)\n plt.savefig(file_name)\n plt.clf()\n\n\n\ndef _main(folder):\n graphs = []\n for f in list_files(folder):\n graphs.append(make_graph(f))\n print(\"Graphs Processed\")\n feature_info = construct_correlation_matrices(graphs)\n eigen_behaviours = compute_eigen_behaviour(feature_info)\n dataset = folder.split(\"\/\")[-1]\n ts_file_name = \"%s_time_series.txt\"%dataset\n ts_png_name = \"%s_time_series.png\"%dataset\n save_eigen_behaviours(eigen_behaviours, ts_file_name)\n plot_eigen_behaviours(eigen_behaviours, ts_png_name)\n\n\nif __name__ == \"__main__\":\n args = sys.argv\n if len(args) != 2:\n print(\"USE THE COMMAND : python anomaly.py \")\n exit()\n folder_name = args[1]\n _main(folder_name)"} +{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_264","text":"AndreasMadsen\/bachelor-code\n\nimport urllib.parse as url\nimport http.server\nimport os.path as path\nimport ujson as json\nimport numpy as np\nimport scipy.sparse\n\nimport time\n\nthisdir = path.dirname(path.realpath(__file__))\n\nclass npzToObj:\n def __init__(self, npz):\n \"\"\"\n npz are lazyloaded, this preloads it loads\n \"\"\"\n for name in npz.files:\n setattr(self, name, npz[name])\n\n\nclass GraphServer:\n def __init__(self, clusters, distance, connectivity, nodes, verbose=False):\n self._verbose = verbose\n if (self._verbose): print(\"Initializing graph server\")\n\n self._clusters = npzToObj(clusters)\n self._distance = scipy.sparse.csr_matrix(distance)\n self._connectivity = scipy.sparse.csr_matrix(connectivity)\n self._raw_nodes = nodes\n self._nodes = [[node['title'], node['website'], node['id']] for node in nodes]\n\n # Create a http server\n if (self._verbose): print(\"\\tCreating http server\")\n self._server = http.server.HTTPServer(('127.0.0.1', 8000), GraphServer.Responder)\n self._server._owner = self\n\n def listen(self):\n if (self._verbose): print(\"Server listening on http:\/\/127.0.0.1:8000\")\n self._server.serve_forever()\n\n def fetch_article(self, id):\n return self._raw_nodes[id]\n\n def fetch_compare(self, a, b):\n return {\n \"connecitivity\": bool(self._connectivity[min(a, b), max(a, b)]),\n \"distance\": float(self._distance[min(a, b), max(a, b)])\n }\n\n def _groups_from_title(self, search):\n if (self._verbose): print(\"\\tSearching for \\\"%s\\\"\" % (search))\n words = search.split()\n\n # Construct and execute SQL search query\n match = np.fromiter([\n np.all([(word in node[0]) for word in words])\n for node in self._nodes\n ], dtype='bool')\n\n # Fetch groups\n groups = set(int(group) for group in self._clusters.node_to_group[match])\n if (self._verbose): print(\"\\tSearch complete, found %d groups\" % len(groups))\n\n return groups\n\n def _fetch_single_group(self, group_id):\n if (self._verbose): print(\"\\tFetching group %d\" % group_id)\n\n # Create node info object\n nodes = self._clusters.group[group_id, 0:self._clusters.group_size[group_id]]\n node_info = [self._nodes[id] for id in nodes]\n\n # Create link info object\n if (self._verbose): print(\"\\tBuilding link object\")\n mask = np.any(self._clusters.connects_row[:, np.newaxis] == nodes, axis=1)\n if (np.sum(mask) == 0):\n link_info = []\n else:\n info = (\n self._clusters.connects_row[mask],\n self._clusters.connects_col[mask]\n )\n\n link_info = [\n [int(row), int(col), float(data)]\n for (row, col, data)\n in zip(info[0], info[1], self._distance[info].A1)\n ]\n\n # Send group info\n return (node_info, link_info)\n\n def fetch_graph(self, groups):\n if (self._verbose): print(\"Fetching groups\")\n\n # Validate groups\n max_group_size = int(self._clusters.group.shape[0])\n for group in groups:\n if (group >= max_group_size):\n if (self._verbose): print(\"\\tGroup with id %d do not exists\" % group)\n return None\n\n # Initialize info array\n info = []\n\n # Fetch group info\n for group in groups:\n (node_info, link_info) = self._fetch_single_group(group)\n info.append({\n \"group\": group,\n \"nodes\": node_info,\n \"links\": link_info\n })\n\n # Done return result\n return info\n\n class Responder(http.server.BaseHTTPRequestHandler):\n def __init__(self, *args, **kwargs):\n self._owner = args[2]._owner\n self._verbose = self._owner._verbose\n http.server.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)\n\n def do_GET(self):\n if self.path == '\/' : self.index_page()\n elif self.path == '\/details' : self.details_page()\n elif self.path == '\/d3.js' : self.d3_script()\n elif self.path == '\/view.js' : self.view_script()\n elif self.path == '\/style.css' : self.style_script()\n elif self.path[0:11] == '\/graph.json' : self.graph_data()\n elif self.path[0:13] == '\/article.json' : self.article_data()\n elif self.path[0:13] == '\/compare.json' : self.compare_data()\n else : self.otherwise()\n\n def index_page(self):\n self.send_response(200)\n self.send_header('Content-Type', 'text\/html; charset=UTF-8')\n self.end_headers()\n\n f = open(path.join(thisdir, 'public', 'index.html'), 'rb')\n self.wfile.write(f.read())\n f.close()\n\n def details_page(self):\n self.send_response(200)\n self.send_header('Content-Type', 'text\/html; charset=UTF-8')\n self.end_headers()\n\n f = open(path.join(thisdir, 'public', 'details.html'), 'rb')\n self.wfile.write(f.read())\n f.close()\n\n def d3_script(self):\n self.send_response(200)\n self.send_header('Content-Type', 'application\/javascript; charset=UTF-8')\n self.end_headers()\n\n f = open(path.join(thisdir, 'public', 'd3.js'), 'rb')\n self.wfile.write(f.read())\n f.close()\n\n def view_script(self):\n self.send_response(200)\n self.send_header('Content-Type', 'application\/javascript; charset=UTF-8')\n self.end_headers()\n\n f = open(path.join(thisdir, 'public', 'view.js'), 'rb')\n self.wfile.write(f.read())\n f.close()\n\n def style_script(self):\n self.send_response(200)\n self.send_header('Content-Type', 'text\/css; charset=UTF-8')\n self.end_headers()\n\n f = open(path.join(thisdir, 'public', 'style.css'), 'rb')\n self.wfile.write(f.read())\n f.close()\n\n def article_data(self):\n self.send_response(200)\n self.send_header('Content-Type', 'application\/json; charset=UTF-8')\n self.end_headers()\n\n query = url.parse_qs(url.urlparse(self.path).query)\n data = self._owner.fetch_article(int(query['id'][0]))\n self.wfile.write(bytes(json.dumps(data), 'ASCII'))\n\n def compare_data(self):\n self.send_response(200)\n self.send_header('Content-Type', 'application\/json; charset=UTF-8')\n self.end_headers()\n\n query = url.parse_qs(url.urlparse(self.path).query)\n data = self._owner.fetch_compare(int(query['a'][0]), int(query['b'][0]))\n self.wfile.write(bytes(json.dumps(data), 'ASCII'))\n\n def graph_data(self):\n self.send_response(200)\n self.send_header('Content-Type', 'application\/json; charset=UTF-8')\n self.end_headers()\n\n query = url.parse_qs(url.urlparse(self.path).query)\n\n # Convert input to group list\n if ('title' in query):\n groups = self._owner._groups_from_title(query['title'][0])\n elif ('groups' in query):\n groups = set(int(group) for group in query['groups'][0].split(\",\"))\n else:\n groups = None\n\n # Fetch nodes and links\n if (groups is None):\n data = None\n else:\n data = self._owner.fetch_graph(groups)\n\n # Send data\n if (data is None):\n if (self._verbose): print(\"\\tBad query input\")\n self.wfile.write(bytes('null', 'ASCII'))\n else:\n if (self._verbose): print(\"\\tSending result\")\n\n self.wfile.write(bytes('[', 'ASCII'))\n for index, group in enumerate(data):\n self.wfile.write(bytes('{\"group\":', 'ASCII'))\n self.wfile.write(bytes(json.dumps(group['group']), 'ASCII'))\n\n self.wfile.write(bytes(', \"nodes\":', 'ASCII'))\n self.wfile.write(bytes(json.dumps(group['nodes']), 'ASCII'))\n\n self.wfile.write(bytes(', \"links\":', 'ASCII'))\n self.wfile.write(bytes(json.dumps(group['links']), 'ASCII'))\n\n # Write } if last item otherwise write },\\n\n if (index == (len(data) - 1)):\n self.wfile.write(bytes('}', 'ASCII'))\n else:\n self.wfile.write(bytes('},\\n', 'ASCII'))\n self.wfile.write(bytes(']', 'ASCII'))\n\n if (self._verbose): print(\"\\tResult send\")\n\n def otherwise(self):\n self.send_response(404)\n self.send_header('Content-Type', 'text\/html; charset=UTF-8')\n self.end_headers()\n\n self.wfile.write(bytes('
Sorry invalid path (404)<\/pre>', 'UTF-8'))\n"}
+{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_265","text":"ACTCollaboration\/mnms\n#!\/usr\/bin\/env python3\nfrom pixell import enmap, utils\nfrom mnms import utils as tnu, mpi\n\nimport numpy as np\nfrom scipy.interpolate import RectBivariateSpline as rectinterp\n\n# Some utilities for calculating effective mode coupling matrices in 2D Fourier space\n# Currently, only supports matrices which are formed as outerproducts of two vectors\n# If want general mode coupling matrices, would need to write a C extension because things\n# won't separate nicely and you'll have to do a full sum\n\ndef get_outer_mask(arr1, arr2=None):\n    arr1 = np.atleast_1d(arr1)\n    if arr2 is None:\n        arr2 = arr1\n    else:\n        arr2 = np.atleast_1d(arr2)\n    assert arr1.ndim == 1 and arr2.ndim == 1 \n    \n    return np.einsum('y,x->yx',arr1,arr2)\n\ndef get_vecs_from_outer_mask(mask):\n    Ny, Nx = mask.shape\n    arr1 = mask[Ny\/\/2]\n    arr2 = mask[:, Nx\/\/2]\n    return arr1, arr2\n\ndef get_1d_kernel(arr1):\n    arr1 = np.atleast_1d(arr1)\n    assert arr1.ndim == 1\n    \n    fnorm = np.abs(np.fft.fft(arr1) \/ arr1.size)**2\n    kernel = np.zeros((fnorm.size, fnorm.size))\n    for i in range(fnorm.size):\n        for j in range(fnorm.size):\n            kernel[i, j] = fnorm[i - j]\n    return kernel\n\ndef get_binned_1d_kernel(kernel, bin_slices):\n    assert kernel.ndim == 2\n    assert kernel.shape[0] == kernel.shape[1]\n\n    nbins = len(bin_slices)\n    nperbin = kernel.shape[0] \/\/ nbins\n    binned_kernel = np.zeros((nbins, nbins))\n    for i in range(nbins):\n        for j in range(nbins):\n            binned_kernel[i, j] = kernel[bin_slices[i], bin_slices[j]].sum() \/ nperbin\n    return binned_kernel\n\ndef get_mcm(arr1, arr2=None, bin_slices1=None, bin_slices2=None):\n    arr1 = np.atleast_1d(arr1)\n    if arr2 is None:\n        square = True\n        assert arr1.ndim == 1\n    else:\n        arr2 = np.atleast_1d(arr2)\n        assert arr1.ndim == 1 and arr2.ndim == 1\n        if np.allclose(arr1, arr2, rtol=0):\n            square = True\n        else:\n            square = False\n    \n    kernel1 = get_1d_kernel(arr1)\n    if bin_slices1 is not None:\n        kernel1 = get_binned_1d_kernel(kernel1, bin_slices1)\n    if square:\n        kernel2 = kernel1\n    else:\n        kernel2 = get_1d_kernel(arr2)\n        if bin_slices2 is not None:\n            kernel2 = get_binned_1d_kernel(kernel2, bin_slices2)\n\n    # kernel1 is M_yy' and kernel2 is M_xx', we want M_yxy'x'\n    return np.einsum('Yy,Xx->YXyx', kernel1, kernel2)\n\ndef get_inv_mcm(arr1, arr2=None, bin_slices1=None, bin_slices2=None, verbose=False):\n    M = get_mcm(arr1, arr2=arr2, bin_slices1=bin_slices1, bin_slices2=bin_slices2)\n    assert M.ndim == 4\n    assert M.shape[0] == M.shape[2] and M.shape[1] == M.shape[3]\n    Ny = M.shape[0]\n    Nx = M.shape[1]\n    M = M.reshape(Ny*Nx, Ny*Nx)\n    if verbose:\n        print(f'Condition number of MCM is {np.round(np.linalg.cond(M), 3)}')\n    M = np.linalg.inv(M)\n    return M.reshape(Ny, Nx, Ny, Nx)\n\ndef get_uniform_bin_slices(arr1, nbins):\n    arr1 = np.atleast_1d(arr1)\n    assert arr1.ndim == 1\n\n    npix = arr1.size\n    counts, displs = mpi.mpi_distribute(npix, nbins)\n    slices = tuple(slice(d, d + counts[i]) for i, d in enumerate(displs))\n    return slices\n\n"}
+{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_266","text":"1400OS_09_Codes\/ceps.py\nimport os\nimport glob\n\nimport numpy as np\nimport scipy\nimport scipy.io.wavfile\nfrom librosa.feature import mfcc\n\nfrom utils import GENRE_DIR, CHART_DIR\n\n\ndef write_ceps(ceps, fn):\n    \"\"\"\n    Write the MFCC to separate files to speed up processing.\n    \"\"\"\n    base_fn, ext = os.path.splitext(fn)\n    data_fn = CHART_DIR + \"\/\" + base_fn + \".ceps\"\n    np.save(data_fn, ceps)\n    print(\"Written\", data_fn)\n\n\ndef create_ceps(fn):\n    sample_rate, X = scipy.io.wavfile.read(fn)\n\n    Y = X * 1.0\n\n    # ceps, mspec, spec = mfcc(Y)\n    ceps = mfcc(Y)\n    write_ceps(ceps, fn)\n\n\ndef read_ceps(genre_list, base_dir=GENRE_DIR):\n    X = []\n    y = []\n    for label, genre in enumerate(genre_list):\n        for fn in glob.glob(os.path.join(base_dir, genre, \"*.ceps.npy\")):\n            ceps = np.load(fn)\n            num_ceps = len(ceps)\n            X.append(np.mean(ceps[int(num_ceps \/ 10):int(num_ceps * 9 \/ 10)], axis=0))\n            y.append(label)\n\n    return np.array(X), np.array(y)\n\n\nif __name__ == \"__main__\":\n    # os.chdir(GENRE_DIR)\n    glob_wav = os.path.join(\".\", \"*.wav\")\n    print(glob_wav)\n    for fn in glob.glob(glob_wav):\n        create_ceps(fn)\n"}
+{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_267","text":"msc-acse\/acse-9-independent-research-project-Wade003\nfrom scipy import *\nfrom pylab import *\nnum_detectors = 100\nx = 0.5+0.25*arange(0,float(num_detectors))\/float(num_detectors)\ny = zeros(num_detectors) + 0.5\n\nt = 0.\nn_cycles = 1\ndt = 0.1\/n_cycles\ntmax = 8\n\ndef vel(x,y):\n    return [-(y-0.5),x-0.5]\n\nwhile(t pynini.Fst:\n    \"\"\"Constructs a (possibly pruned) weighted DFA of output strings.\n    Given an epsilon-free lattice of output strings (such as produced by\n    rewrite_lattice), attempts to determinize it, pruning non-optimal paths if\n    optimal_only is true. This is valid only in a semiring with the path property.\n    To prevent unexpected blowup during determinization, a state threshold is\n    also used and a warning is logged if this exact threshold is reached. The\n    threshold is a multiplier of the size of input lattice (by default, 4), plus\n    a small constant factor. This is intended by a sensible default and is not an\n    inherently meaningful value in and of itself.\n\n    Parameters\n    ----------\n    lattice: :class:`~pynini.Fst`\n        Epsilon-free non-deterministic finite acceptor.\n    threshold: float\n        Threshold for weights, 1.0 is optimal only, 0 is for all paths, greater than 1\n        prunes the lattice to include paths with costs less than the optimal path's score times the threshold\n    state_multiplier: int\n        Max ratio for the number of states in the DFA lattice to the NFA lattice; if exceeded, a warning is logged.\n\n    Returns\n    -------\n    :class:`~pynini.Fst`\n        Epsilon-free deterministic finite acceptor.\n    \"\"\"\n    weight_type = lattice.weight_type()\n    weight_threshold = pynini.Weight(weight_type, threshold)\n    state_threshold = 256 + state_multiplier * lattice.num_states()\n    lattice = pynini.determinize(lattice, nstate=state_threshold, weight=weight_threshold)\n    return lattice\n\n\ndef optimal_rewrites(\n    string: pynini.FstLike,\n    rule: pynini.Fst,\n    input_token_type: Optional[TokenType] = None,\n    output_token_type: Optional[TokenType] = None,\n    threshold: float = 1,\n) -> List[str]:\n    \"\"\"Returns all optimal rewrites.\n    Args:\n    string: Input string or FST.\n    rule: Input rule WFST.\n    input_token_type: Optional input token type, or symbol table.\n    output_token_type: Optional output token type, or symbol table.\n    threshold: Threshold for weights (1 is optimal only, 0 is for all paths)\n    Returns:\n    A tuple of output strings.\n    \"\"\"\n    lattice = rewrite.rewrite_lattice(string, rule, input_token_type)\n    lattice = threshold_lattice_to_dfa(lattice, threshold, 4)\n    return rewrite.lattice_to_strings(lattice, output_token_type)\n\n\nclass Rewriter:\n    \"\"\"\n    Helper object for rewriting\n\n    Parameters\n    ----------\n    fst: pynini.Fst\n        G2P FST model\n    input_token_type: pynini.TokenType\n        Grapheme symbol table or \"utf8\"\n    output_token_type: pynini.SymbolTable\n        Phone symbol table\n    num_pronunciations: int\n        Number of pronunciations, default to 0.  If this is 0, thresholding is used\n    threshold: float\n        Threshold to use for pruning rewrite lattice, defaults to 1.5, only used if num_pronunciations is 0\n    \"\"\"\n\n    def __init__(\n        self,\n        fst: Fst,\n        input_token_type: TokenType,\n        output_token_type: SymbolTable,\n        num_pronunciations: int = 0,\n        threshold: float = 1,\n    ):\n        if num_pronunciations > 0:\n            self.rewrite = functools.partial(\n                rewrite.top_rewrites,\n                nshortest=num_pronunciations,\n                rule=fst,\n                input_token_type=input_token_type,\n                output_token_type=output_token_type,\n            )\n        else:\n            self.rewrite = functools.partial(\n                optimal_rewrites,\n                threshold=threshold,\n                rule=fst,\n                input_token_type=input_token_type,\n                output_token_type=output_token_type,\n            )\n\n    def __call__(self, i: str) -> List[Tuple[str, ...]]:  # pragma: no cover\n        \"\"\"Call the rewrite function\"\"\"\n        hypotheses = self.rewrite(i)\n        return [x for x in hypotheses if x]\n\n\nclass PhonetisaurusRewriter:\n    \"\"\"\n    Helper function for rewriting\n\n    Parameters\n    ----------\n    fst: pynini.Fst\n        G2P FST model\n    input_token_type: pynini.SymbolTable\n        Grapheme symbol table\n    output_token_type: pynini.SymbolTable\n    num_pronunciations: int\n        Number of pronunciations, default to 0.  If this is 0, thresholding is used\n    threshold: float\n        Threshold to use for pruning rewrite lattice, defaults to 1.5, only used if num_pronunciations is 0\n    grapheme_order: int\n        Maximum number of graphemes to consider single segment\n    seq_sep: str\n        Separator to use between grapheme symbols\n    \"\"\"\n\n    def __init__(\n        self,\n        fst: Fst,\n        input_token_type: SymbolTable,\n        output_token_type: SymbolTable,\n        num_pronunciations: int = 0,\n        threshold: float = 1.5,\n        grapheme_order: int = 2,\n        seq_sep: str = \"|\",\n    ):\n        self.fst = fst\n        self.seq_sep = seq_sep\n        self.input_token_type = input_token_type\n        self.output_token_type = output_token_type\n        self.grapheme_order = grapheme_order\n        if num_pronunciations > 0:\n            self.rewrite = functools.partial(\n                rewrite.top_rewrites,\n                nshortest=num_pronunciations,\n                rule=fst,\n                input_token_type=None,\n                output_token_type=output_token_type,\n            )\n        else:\n            self.rewrite = functools.partial(\n                optimal_rewrites,\n                threshold=threshold,\n                rule=fst,\n                input_token_type=None,\n                output_token_type=output_token_type,\n            )\n\n    def __call__(self, graphemes: str) -> List[Tuple[str, ...]]:  # pragma: no cover\n        \"\"\"Call the rewrite function\"\"\"\n        fst = pynini.Fst()\n        one = pynini.Weight.one(fst.weight_type())\n        max_state = 0\n        for i in range(len(graphemes)):\n            start_state = fst.add_state()\n            for j in range(1, self.grapheme_order + 1):\n                if i + j <= len(graphemes):\n                    substring = self.seq_sep.join(graphemes[i : i + j])\n                    state = self.input_token_type.find(substring)\n                    if state != pynini.NO_SYMBOL:\n                        fst.add_arc(start_state, pynini.Arc(state, state, one, i + j))\n                    if i + j >= max_state:\n                        max_state = i + j\n        for _ in range(fst.num_states(), max_state + 1):\n            fst.add_state()\n        fst.set_start(0)\n        fst.set_final(len(graphemes), one)\n        fst.set_input_symbols(self.input_token_type)\n        fst.set_output_symbols(self.input_token_type)\n        hypotheses = self.rewrite(fst)\n        hypotheses = [x.replace(self.seq_sep, \" \") for x in hypotheses if x]\n        return hypotheses\n\n\nclass RewriterWorker(mp.Process):\n    \"\"\"\n    Rewriter process\n\n    Parameters\n    ----------\n    job_queue: :class:`~multiprocessing.Queue`\n        Queue to pull words from\n    return_queue: :class:`~multiprocessing.Queue`\n        Queue to put pronunciations\n    rewriter: :class:`~montreal_forced_aligner.g2p.generator.Rewriter`\n        Function to generate pronunciations of words\n    stopped: :class:`~montreal_forced_aligner.utils.Stopped`\n        Stop check\n    \"\"\"\n\n    def __init__(\n        self,\n        job_queue: mp.Queue,\n        return_queue: mp.Queue,\n        rewriter: Rewriter,\n        stopped: Stopped,\n    ):\n        mp.Process.__init__(self)\n        self.job_queue = job_queue\n        self.return_queue = return_queue\n        self.rewriter = rewriter\n        self.stopped = stopped\n        self.finished = Stopped()\n\n    def run(self) -> None:\n        \"\"\"Run the rewriting function\"\"\"\n        while True:\n            try:\n                word = self.job_queue.get(timeout=1)\n            except queue.Empty:\n                break\n            if self.stopped.stop_check():\n                continue\n            try:\n                rep = self.rewriter(word)\n                self.return_queue.put((word, rep))\n            except rewrite.Error:\n                pass\n            except Exception as e:  # noqa\n                self.stopped.stop()\n                self.return_queue.put(e)\n                raise\n        self.finished.stop()\n        return\n\n\ndef clean_up_word(word: str, graphemes: Set[str]) -> Tuple[str, Set[str]]:\n    \"\"\"\n    Clean up word by removing graphemes not in a specified set\n\n    Parameters\n    ----------\n    word : str\n        Input string\n    graphemes: set[str]\n        Set of allowable graphemes\n\n    Returns\n    -------\n    str\n        Cleaned up word\n    Set[str]\n        Graphemes excluded\n    \"\"\"\n    new_word = []\n    missing_graphemes = set()\n    for c in word:\n        if c not in graphemes:\n            missing_graphemes.add(c)\n        else:\n            new_word.append(c)\n    return \"\".join(new_word), missing_graphemes\n\n\nclass OrthographyGenerator(G2PTopLevelMixin):\n    \"\"\"\n    Abstract mixin class for generating \"pronunciations\" based off the orthographic word\n\n    See Also\n    --------\n    :class:`~montreal_forced_aligner.g2p.mixins.G2PTopLevelMixin`\n        For top level G2P generation parameters\n    \"\"\"\n\n    def generate_pronunciations(self) -> Dict[str, List[str]]:\n        \"\"\"\n        Generate pronunciations for the word set\n\n        Returns\n        -------\n        dict[str, Word]\n            Mapping of words to their \"pronunciation\"\n        \"\"\"\n        pronunciations = {}\n        for word in self.words_to_g2p:\n            pronunciations[word] = [\" \".join(word)]\n        return pronunciations\n\n\nclass PyniniGenerator(G2PTopLevelMixin):\n    \"\"\"\n    Class for generating pronunciations from a Pynini G2P model\n\n    Parameters\n    ----------\n    g2p_model_path: str\n        Path to G2P model\n    strict_graphemes: bool\n        Flag for whether to be strict with missing graphemes and skip words containing new graphemes\n\n    See Also\n    --------\n    :class:`~montreal_forced_aligner.g2p.mixins.G2PTopLevelMixin`\n        For top level G2P generation parameters\n\n    Attributes\n    ----------\n    g2p_model: G2PModel\n        G2P model\n    \"\"\"\n\n    def __init__(self, g2p_model_path: str, strict_graphemes: bool = False, **kwargs):\n        self.strict_graphemes = strict_graphemes\n        super().__init__(**kwargs)\n        self.g2p_model = G2PModel(\n            g2p_model_path, root_directory=getattr(self, \"workflow_directory\", None)\n        )\n\n    def generate_pronunciations(self) -> Dict[str, List[str]]:\n        \"\"\"\n        Generate pronunciations\n\n        Returns\n        -------\n        dict[str, list[str]]\n            Mappings of keys to their generated pronunciations\n        \"\"\"\n\n        fst = pynini.Fst.read(self.g2p_model.fst_path)\n        if self.g2p_model.meta[\"architecture\"] == \"phonetisaurus\":\n            output_token_type = pynini.SymbolTable.read_text(self.g2p_model.sym_path)\n            input_token_type = pynini.SymbolTable.read_text(self.g2p_model.grapheme_sym_path)\n            fst.set_input_symbols(input_token_type)\n            fst.set_output_symbols(output_token_type)\n            rewriter = PhonetisaurusRewriter(\n                fst,\n                input_token_type,\n                output_token_type,\n                num_pronunciations=self.num_pronunciations,\n                threshold=self.g2p_threshold,\n            )\n        else:\n            output_token_type = \"utf8\"\n            input_token_type = \"utf8\"\n            if self.g2p_model.sym_path is not None and os.path.exists(self.g2p_model.sym_path):\n                output_token_type = pynini.SymbolTable.read_text(self.g2p_model.sym_path)\n            rewriter = Rewriter(\n                fst,\n                input_token_type,\n                output_token_type,\n                num_pronunciations=self.num_pronunciations,\n                threshold=self.g2p_threshold,\n            )\n\n        num_words = len(self.words_to_g2p)\n        begin = time.time()\n        missing_graphemes = set()\n        self.log_info(\"Generating pronunciations...\")\n        to_return = {}\n        skipped_words = 0\n        if num_words < 30 or self.num_jobs == 1:\n            with tqdm.tqdm(total=num_words, disable=getattr(self, \"quiet\", False)) as pbar:\n                for word in self.words_to_g2p:\n                    w, m = clean_up_word(word, self.g2p_model.meta[\"graphemes\"])\n                    pbar.update(1)\n                    missing_graphemes = missing_graphemes | m\n                    if self.strict_graphemes and m:\n                        skipped_words += 1\n                        continue\n                    if not w:\n                        skipped_words += 1\n                        continue\n                    try:\n                        prons = rewriter(w)\n                    except rewrite.Error:\n                        continue\n                    to_return[word] = prons\n                self.log_debug(\n                    f\"Skipping {skipped_words} words for containing the following graphemes: \"\n                    f\"{comma_join(sorted(missing_graphemes))}\"\n                )\n        else:\n            stopped = Stopped()\n            job_queue = mp.Queue()\n            for word in self.words_to_g2p:\n                w, m = clean_up_word(word, self.g2p_model.meta[\"graphemes\"])\n                missing_graphemes = missing_graphemes | m\n                if self.strict_graphemes and m:\n                    skipped_words += 1\n                    continue\n                if not w:\n                    skipped_words += 1\n                    continue\n                job_queue.put(w)\n            self.log_debug(\n                f\"Skipping {skipped_words} words for containing the following graphemes: \"\n                f\"{comma_join(sorted(missing_graphemes))}\"\n            )\n            error_dict = {}\n            return_queue = mp.Queue()\n            procs = []\n            for _ in range(self.num_jobs):\n                p = RewriterWorker(\n                    job_queue,\n                    return_queue,\n                    rewriter,\n                    stopped,\n                )\n                procs.append(p)\n                p.start()\n            num_words -= skipped_words\n            with tqdm.tqdm(total=num_words, disable=getattr(self, \"quiet\", False)) as pbar:\n                while True:\n                    try:\n                        word, result = return_queue.get(timeout=1)\n                        if stopped.stop_check():\n                            continue\n                    except queue.Empty:\n                        for proc in procs:\n                            if not proc.finished.stop_check():\n                                break\n                        else:\n                            break\n                        continue\n                    pbar.update(1)\n                    if isinstance(result, Exception):\n                        error_dict[word] = result\n                        continue\n                    to_return[word] = result\n\n            for p in procs:\n                p.join()\n            if error_dict:\n                raise PyniniGenerationError(error_dict)\n        self.log_debug(f\"Processed {num_words} in {time.time() - begin} seconds\")\n        return to_return\n\n\nclass PyniniValidator(PyniniGenerator, TopLevelMfaWorker):\n    \"\"\"\n    Class for running validation for G2P model training\n\n    Parameters\n    ----------\n    word_list: list[str]\n        List of words to generate pronunciations\n\n    See Also\n    --------\n    :class:`~montreal_forced_aligner.g2p.generator.PyniniGenerator`\n        For parameters to generate pronunciations\n    \"\"\"\n\n    def __init__(self, word_list: List[str] = None, **kwargs):\n        super().__init__(**kwargs)\n        if word_list is None:\n            word_list = []\n        self.word_list = word_list\n\n    @property\n    def words_to_g2p(self) -> List[str]:\n        \"\"\"Words to produce pronunciations\"\"\"\n        return self.word_list\n\n    @property\n    def data_source_identifier(self) -> str:\n        \"\"\"Dummy \"validation\" data source\"\"\"\n        return \"validation\"\n\n    @property\n    def data_directory(self) -> str:\n        \"\"\"Data directory\"\"\"\n        return self.working_directory\n\n    @property\n    def evaluation_csv_path(self) -> str:\n        \"\"\"Path to working directory's CSV file\"\"\"\n        return os.path.join(self.working_directory, \"pronunciation_evaluation.csv\")\n\n    def setup(self) -> None:\n        \"\"\"Set up the G2P validator\"\"\"\n        if self.initialized:\n            return\n        self.g2p_model.validate(self.words_to_g2p)\n        self.initialized = True\n        self.wer = None\n        self.ler = None\n\n    def compute_validation_errors(\n        self,\n        gold_values: Dict[str, Set[str]],\n        hypothesis_values: Dict[str, List[str]],\n    ):\n        \"\"\"\n        Computes validation errors\n\n        Parameters\n        ----------\n        gold_values: dict[str, set[str]]\n            Gold pronunciations\n        hypothesis_values: dict[str, list[str]]\n            Hypothesis pronunciations\n        \"\"\"\n        begin = time.time()\n        # Word-level measures.\n        correct = 0\n        incorrect = 0\n        # Label-level measures.\n        total_edits = 0\n        total_length = 0\n        # Since the edit distance algorithm is quadratic, let's do this with\n        # multiprocessing.\n        self.log_debug(f\"Processing results for {len(hypothesis_values)} hypotheses\")\n        to_comp = []\n        indices = []\n        hyp_pron_count = 0\n        gold_pron_count = 0\n        output = []\n        for word, gold_pronunciations in gold_values.items():\n            if word not in hypothesis_values:\n                incorrect += 1\n                gold_length = statistics.mean(len(x.split()) for x in gold_pronunciations)\n                total_edits += gold_length\n                total_length += gold_length\n                output.append(\n                    {\n                        \"Word\": word,\n                        \"Gold pronunciations\": \", \".join(gold_pronunciations),\n                        \"Hypothesis pronunciations\": \"\",\n                        \"Accuracy\": 0,\n                        \"Error rate\": 1.0,\n                        \"Length\": gold_length,\n                    }\n                )\n                continue\n            hyp = hypothesis_values[word]\n            for h in hyp:\n                if h in gold_pronunciations:\n                    correct += 1\n                    total_length += len(h)\n                    output.append(\n                        {\n                            \"Word\": word,\n                            \"Gold pronunciations\": \", \".join(gold_pronunciations),\n                            \"Hypothesis pronunciations\": \", \".join(hyp),\n                            \"Accuracy\": 1,\n                            \"Error rate\": 0.0,\n                            \"Length\": len(h),\n                        }\n                    )\n                    break\n            else:\n                incorrect += 1\n                indices.append(word)\n                to_comp.append((gold_pronunciations, hyp))  # Multiple hypotheses to compare\n            self.log_debug(\n                f\"For the word {word}: gold is {gold_pronunciations}, hypothesized are: {hyp}\"\n            )\n            hyp_pron_count += len(hyp)\n            gold_pron_count += len(gold_pronunciations)\n        self.log_debug(\n            f\"Generated an average of {hyp_pron_count \/len(hypothesis_values)} variants \"\n            f\"The gold set had an average of {gold_pron_count\/len(hypothesis_values)} variants.\"\n        )\n        with mp.Pool(self.num_jobs) as pool:\n            gen = pool.starmap(score_g2p, to_comp)\n            for i, (edits, length) in enumerate(gen):\n                word = indices[i]\n                gold_pronunciations = gold_values[word]\n                hyp = hypothesis_values[word]\n                output.append(\n                    {\n                        \"Word\": word,\n                        \"Gold pronunciations\": \", \".join(gold_pronunciations),\n                        \"Hypothesis pronunciations\": \", \".join(hyp),\n                        \"Accuracy\": 1,\n                        \"Error rate\": edits \/ length,\n                        \"Length\": length,\n                    }\n                )\n                total_edits += edits\n                total_length += length\n        with open(self.evaluation_csv_path, \"w\", encoding=\"utf8\", newline=\"\") as f:\n            writer = csv.DictWriter(\n                f,\n                fieldnames=[\n                    \"Word\",\n                    \"Gold pronunciations\",\n                    \"Hypothesis pronunciations\",\n                    \"Accuracy\",\n                    \"Error rate\",\n                    \"Length\",\n                ],\n            )\n            writer.writeheader()\n            for line in output:\n                writer.writerow(line)\n        self.wer = 100 * incorrect \/ (correct + incorrect)\n        self.ler = 100 * total_edits \/ total_length\n        self.log_info(f\"WER:\\t{self.wer:.2f}\")\n        self.log_info(f\"LER:\\t{self.ler:.2f}\")\n        self.log_debug(\n            f\"Computation of errors for {len(gold_values)} words took {time.time() - begin} seconds\"\n        )\n\n    def evaluate_g2p_model(self, gold_pronunciations: Dict[str, Set[str]]) -> None:\n        \"\"\"\n        Evaluate a G2P model on the word list\n\n        Parameters\n        ----------\n        gold_pronunciations: dict[str, set[str]]\n            Gold pronunciations\n        \"\"\"\n        output = self.generate_pronunciations()\n        self.compute_validation_errors(gold_pronunciations, output)\n\n\nclass PyniniWordListGenerator(PyniniValidator):\n    \"\"\"\n    Top-level worker for generating pronunciations from a word list and a Pynini G2P model\n\n    Parameters\n    ----------\n    word_list_path: str\n        Path to word list file\n\n    See Also\n    --------\n    :class:`~montreal_forced_aligner.g2p.generator.PyniniGenerator`\n        For Pynini G2P generation parameters\n    :class:`~montreal_forced_aligner.abc.TopLevelMfaWorker`\n        For top-level parameters\n\n    Attributes\n    ----------\n    word_list: list[str]\n        Word list to generate pronunciations\n    \"\"\"\n\n    def __init__(self, word_list_path: str, **kwargs):\n        self.word_list_path = word_list_path\n        super().__init__(**kwargs)\n\n    @property\n    def data_directory(self) -> str:\n        \"\"\"Data directory\"\"\"\n        return self.working_directory\n\n    @property\n    def data_source_identifier(self) -> str:\n        \"\"\"Name of the word list file\"\"\"\n        return os.path.splitext(os.path.basename(self.word_list_path))[0]\n\n    def setup(self) -> None:\n        \"\"\"Set up the G2P generator\"\"\"\n        if self.initialized:\n            return\n        with open(self.word_list_path, \"r\", encoding=\"utf8\") as f:\n            for line in f:\n                self.word_list.extend(line.strip().split())\n        if not self.include_bracketed:\n            self.word_list = [x for x in self.word_list if not self.check_bracketed(x)]\n        self.g2p_model.validate(self.words_to_g2p)\n        self.initialized = True\n\n\nclass PyniniCorpusGenerator(PyniniGenerator, TextCorpusMixin, TopLevelMfaWorker):\n    \"\"\"\n    Top-level worker for generating pronunciations from a corpus and a Pynini G2P model\n\n    See Also\n    --------\n    :class:`~montreal_forced_aligner.g2p.generator.PyniniGenerator`\n        For Pynini G2P generation parameters\n    :class:`~montreal_forced_aligner.corpus.text_corpus.TextCorpusMixin`\n        For corpus parsing parameters\n    :class:`~montreal_forced_aligner.abc.TopLevelMfaWorker`\n        For top-level parameters\n    \"\"\"\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n\n    def setup(self) -> None:\n        \"\"\"Set up the pronunciation generator\"\"\"\n        if self.initialized:\n            return\n        self._load_corpus()\n        self.calculate_word_counts()\n        self.g2p_model.validate(self.words_to_g2p)\n        self.initialized = True\n\n    @property\n    def words_to_g2p(self) -> List[str]:\n        \"\"\"Words to produce pronunciations\"\"\"\n        word_list = self.corpus_word_set\n        if not self.include_bracketed:\n            word_list = [x for x in word_list if not self.check_bracketed(x)]\n        return word_list\n\n\nclass OrthographicCorpusGenerator(OrthographyGenerator, TextCorpusMixin, TopLevelMfaWorker):\n    \"\"\"\n    Top-level class for generating \"pronunciations\" from the orthography of a corpus\n\n    See Also\n    --------\n    :class:`~montreal_forced_aligner.g2p.generator.OrthographyGenerator`\n        For orthography-based G2P generation parameters\n    :class:`~montreal_forced_aligner.corpus.text_corpus.TextCorpusMixin`\n        For corpus parsing parameters\n    :class:`~montreal_forced_aligner.abc.TopLevelMfaWorker`\n        For top-level parameters\n    \"\"\"\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n\n    def setup(self) -> None:\n        \"\"\"Set up the pronunciation generator\"\"\"\n        if self.initialized:\n            return\n        self._load_corpus()\n        self.calculate_word_counts()\n        self.initialized = True\n\n    @property\n    def words_to_g2p(self) -> List[str]:\n        \"\"\"Words to produce pronunciations\"\"\"\n        word_list = self.corpus_word_set\n        if not self.include_bracketed:\n            word_list = [x for x in word_list if not self.check_bracketed(x)]\n        return word_list\n\n\nclass OrthographicWordListGenerator(OrthographyGenerator, TopLevelMfaWorker):\n    \"\"\"\n    Top-level class for generating \"pronunciations\" from the orthography of a corpus\n\n    Parameters\n    ----------\n    word_list_path: str\n        Path to word list file\n    See Also\n    --------\n    :class:`~montreal_forced_aligner.g2p.generator.OrthographyGenerator`\n        For orthography-based G2P generation parameters\n    :class:`~montreal_forced_aligner.abc.TopLevelMfaWorker`\n        For top-level parameters\n\n    Attributes\n    ----------\n    word_list: list[str]\n        Word list to generate pronunciations\n    \"\"\"\n\n    def __init__(self, word_list_path: str, **kwargs):\n        super().__init__(**kwargs)\n        self.word_list_path = word_list_path\n        self.word_list = []\n\n    def setup(self) -> None:\n        \"\"\"Set up the pronunciation generator\"\"\"\n        if self.initialized:\n            return\n        with open(self.word_list_path, \"r\", encoding=\"utf8\") as f:\n            for line in f:\n                self.word_list.extend(line.strip().split())\n        if not self.include_bracketed:\n            self.word_list = [x for x in self.word_list if not self.check_bracketed(x)]\n        self.initialized = True\n\n    @property\n    def words_to_g2p(self) -> List[str]:\n        \"\"\"Words to produce pronunciations\"\"\"\n        return self.word_list\n"}
+{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_269","text":"#!\/usr\/bin\/env python3\n# -*- coding: utf-8 -*-\nimport re\n\nfrom Commands.Keys import Button, Direction, Hat\nfrom Commands.PythonCommandBase import PythonCommand\n# import numpy as np\nfrom scipy.sparse.csgraph import shortest_path  # , floyd_warshall, dijkstra, bellman_ford, johnson\nfrom scipy.sparse import csr_matrix\n\nserial = {0: '-1',\n          1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: '0', 11: '@', 12: 'BS',\n          13: 'Q', 14: 'W', 15: 'E', 16: 'R', 17: 'T', 18: 'Y', 19: 'U', 20: 'I', 21: 'O', 22: 'P', 23: '=',\n          24: 'A', 25: 'S', 26: 'D', 27: 'F', 28: 'G', 29: 'H', 30: 'J', 31: 'K', 32: 'L', 33: '&', 34: ';',\n          35: 'Z', 36: 'X', 37: 'C', 38: 'V', 39: 'B', 40: 'N', 41: 'M', 42: '*', 43: '#', 44: '!', 45: '?',\n          46: 'SelectKeyboard', 47: 'Shift', 48: '#+=', 49: 'nl_1', 50: 'nl_2', 51: 'nl_3',\n          52: 'ok_1', 53: 'ok_2', 54: 'ok_3', 55: 'blank_1', 56: 'blank_2', 57: 'blank_3', 58: 'blank_4',\n          59: 'blank_5', 60: 'blank_6', 61: 'blank_7', 62: 'blank_8', }\nserial_inv = {v: k for k, v in serial.items()}\nserial_graph_list = [[],\n                     # 1-5\n                     [2, 13, 12], [1, 3, 14], [2, 4, 15], [3, 5, 16], [4, 6, 17],\n                     # 6-10\n                     [5, 7, 18], [6, 8, 19], [7, 9, 20], [8, 10, 21], [9, 11, 22],\n                     # 11-15 @ ~ E\n                     [10, 12, 23], [11, 49, 1], [1, 24, 14, 49], [2, 13, 15, 25], [3, 14, 16, 26],\n                     # 16-20 R ~ I\n                     [4, 15, 17, 27], [5, 16, 18, 28], [6, 17, 19, 29], [7, 18, 20, 30], [8, 19, 21, 31],\n                     # 21-25 O ~ S\n                     [9, 20, 22, 32], [10, 21, 23, 33], [11, 22, 34, 49], [13, 25, 35, 50], [14, 24, 26, 36],\n                     # 26-30 D ~ J\n                     [15, 25, 27, 37], [16, 26, 28, 38], [17, 27, 29, 39], [18, 28, 30, 40], [19, 29, 31, 41],\n                     # 31-35 J ~ Z\n                     [20, 30, 32, 42], [21, 31, 33, 43], [22, 32, 34, 44], [23, 33, 45, 50], [24, 46, 36, 53],\n                     # 36-40 X ~ N\n                     [25, 35, 37, 47], [26, 36, 38, 48], [27, 37, 39, 55], [28, 38, 40, 56], [29, 39, 41, 57],\n                     # 41-45 M ~ ?\n                     [30, 40, 42, 58], [31, 41, 43, 59], [32, 42, 44, 60], [33, 43, 45, 61], [34, 44, 62, 53],\n                     # 46-50\n                     [35, 47, 54], [36, 46, 48], [37, 47, 55], [12, 23, 13, 52], [12, 34, 24, 53],\n                     # 51-56\n                     [12, 34, 24, 54], [49, 45, 35], [45, 35, 50], [55, 46, 51], [38, 48, 54], [39, 48, 54],\n                     # 57-62\n                     [40, 48, 54], [41, 48, 54], [42, 48, 54], [43, 48, 54], [44, 48, 54], [45, 48, 54]]\n\n\nclass InputKeyboard(PythonCommand):\n    NAME = 'シリアル入力'\n\n    def __init__(self):\n        super().__init__()\n        self.s = 'F105LP98GMFCB3RA'  # 入力したい文字列\n        self.now_dict = serial_graph_list\n        self.now_dict_ = serial\n        self.now_dict_inv = serial_inv\n        self.graph = None\n        self.d = None\n        self.p = None\n        self.n = None\n        self.MakeGraph()\n        self.pos = 1  # 初期位置\n\n    def MakeGraph(self):\n        self.n = len(self.now_dict)\n        self.graph = [[0] * self.n for _ in range(self.n)]  # 隣接行列\n        for i, g_i in enumerate(self.now_dict):\n            for j in g_i:\n                self.graph[i][j] = 1\n        # for i in self.graph:\n        # print(\" \".join(list(map(str, i))))\n\n        a = csr_matrix(self.graph)\n        self.d, self.p = shortest_path(a, return_predecessors=True)\n\n    def do(self):\n        input_char = 0\n        for i in self.s:\n            print(self.now_dict_[self.now_dict_inv[i]])\n            t = GetPath(self.pos, self.now_dict_inv[i], self.p)\n            print(t)\n            stick = False\n            stick = self.Move(t, stick)\n            if not stick:\n                self.press(Button.A, wait=0.03, duration=0.05)\n            input_char += 1\n\n    def Move(self, t, stick):  # 移動のための関数\n        for j in range(len(t) - 1):\n            if t[j + 1] == 1 and t[j] == 12:\n                self.press(Direction.RIGHT, wait=0.03, duration=0.05)\n            if t[j + 1] == 12:\n                if t[j] in [49, 50, 51]:\n                    self.press(Direction.UP, wait=0.03, duration=0.05)\n                elif t[j] == 1:\n                    self.press(Direction.LEFT, wait=0.03, duration=0.05)\n                elif t[j] == 11:\n                    self.press(Direction.RIGHT, wait=0.03, duration=0.05)\n            elif t[j + 1] == 13:  # Q\n                if t[j] == 49:\n                    self.press(Direction.RIGHT, wait=0.1, duration=0.05)\n                elif t[j] == 1:\n                    self.press(Direction.DOWN, wait=0.03, duration=0.05)\n                elif t[j] == 14:\n                    self.press(Direction.LEFT, wait=0.03, duration=0.05)\n                elif t[j] == 24:\n                    self.press(Direction.UP, wait=0.03, duration=0.05)\n            elif t[j + 1] == 23:  # =\n                if t[j] == 22:\n                    self.press(Direction.RIGHT, wait=0.03, duration=0.05)\n                elif t[j] == 11:\n                    self.press(Direction.DOWN, wait=0.03, duration=0.05)\n                elif t[j] == 49:\n                    self.press(Direction.LEFT, wait=0.03, duration=0.05)\n                elif t[j] == 34:\n                    self.press(Direction.UP, wait=0.03, duration=0.05)\n            elif t[j + 1] == 24:  # A\n                if t[j] in [50, 51]:\n                    self.press(Direction.RIGHT, wait=0.1, duration=0.05)\n                elif t[j] == 13:\n                    self.press(Direction.DOWN, wait=0.03, duration=0.05)\n                elif t[j] == 25:\n                    self.press(Direction.LEFT, wait=0.03, duration=0.05)\n                elif t[j] == 35:\n                    self.press(Direction.UP, wait=0.03, duration=0.05)\n            elif t[j + 1] == 34:  # ;\n                if t[j] == 33:\n                    self.press(Direction.RIGHT, wait=0.03, duration=0.05)\n                elif t[j] == 23:\n                    self.press(Direction.DOWN, wait=0.03, duration=0.05)\n                elif t[j] in [50, 51]:\n                    self.press(Direction.LEFT, wait=0.03, duration=0.05)\n                elif t[j] == 45:\n                    self.press(Direction.UP, wait=0.03, duration=0.05)\n            elif t[j + 1] == 35:  # Z\n                if t[j] in [52, 53]:\n                    self.press(Direction.RIGHT, wait=0.1, duration=0.05)\n                elif t[j] == 24:\n                    self.press(Direction.DOWN, wait=0.03, duration=0.05)\n                elif t[j] == 36:\n                    self.press(Direction.LEFT, wait=0.03, duration=0.05)\n                elif t[j] == 46:\n                    self.press(Direction.UP, wait=0.03, duration=0.05)\n            elif t[j + 1] in [38, 39, 40, 41, 42, 43, 44] and t[j + 1] - t[j] == -17:  # Z\n                self.press(Direction.UP, wait=0.03, duration=0.05)\n            elif t[j + 1] == 45:  # ?\n                if t[j] == 44:\n                    self.press(Direction.RIGHT, wait=0.03, duration=0.05)\n                elif t[j] == 34:\n                    self.press(Direction.DOWN, wait=0.03, duration=0.05)\n                elif t[j] in [52, 53]:\n                    self.press(Direction.LEFT, wait=0.03, duration=0.05)\n                elif t[j] == 62:\n                    self.press(Direction.UP, wait=0.03, duration=0.05)\n            elif t[j + 1] == 48 and t[j] in [55, 56, 57, 58, 59, 60, 61, 62]:\n                self.press(Direction.LEFT, wait=0.03, duration=0.05)\n\n            elif t[j + 1] == 49:\n                if t[j] == 12:\n                    self.press(Direction.DOWN, wait=0.03, duration=0.05)\n                elif t[j] == 23:\n                    self.press(Direction.RIGHT, wait=0.03, duration=0.05)\n                elif t[j] == 13:\n                    self.press(Direction.LEFT, wait=0.03, duration=0.05)\n                elif t[j] == 52:\n                    self.press(Direction.UP, wait=0.03, duration=0.05)\n            elif t[j + 1] == 50:\n                if t[j] == 34:\n                    self.press(Direction.RIGHT, wait=0.03, duration=0.05)\n                elif t[j] == 24:\n                    self.press(Direction.LEFT, wait=0.03, duration=0.05)\n                elif t[j] == 53:\n                    self.press(Direction.UP, wait=0.03, duration=0.05)\n            elif t[j + 1] == 51:\n                if t[j] == 54:\n                    self.press(Direction.UP, wait=0.03, duration=0.05)\n            elif t[j + 1] == 52:\n                if t[j] == 49:\n                    self.press(Direction.DOWN, wait=0.03, duration=0.05)\n            elif t[j + 1] == 53:\n                if t[j] == 45:\n                    self.press(Direction.RIGHT, wait=0.03, duration=0.05)\n                elif t[j] == 35:\n                    self.press(Direction.LEFT, wait=0.03, duration=0.05)\n                elif t[j] == 50:\n                    self.press(Direction.DOWN, wait=0.03, duration=0.05)\n            elif t[j + 1] == 54:\n                if t[j] in [55, 56, 57, 58, 59, 60, 61, 62]:\n                    self.press(Direction.RIGHT, wait=0.03, duration=0.05)\n                elif t[j] == 46:\n                    self.press(Direction.LEFT, wait=0.03, duration=0.05)\n                elif t[j] == 51:\n                    self.press(Direction.DOWN, wait=0.03, duration=0.05)\n            elif t[j + 1] == 55:\n                if t[j] == 48:\n                    self.press(Direction.RIGHT, wait=0.03, duration=0.05)\n                elif t[j] == 54:\n                    self.press(Direction.LEFT, wait=0.03, duration=0.05)\n                elif t[j] == 38:\n                    self.press(Direction.DOWN, wait=0.03, duration=0.05)\n            elif t[j + 1] in [56, 57, 58, 59, 60, 61, 62] and t[j + 1] - t[j] == 17:\n                self.press(Direction.DOWN, wait=0.03, duration=0.05)\n            elif t[j + 1] - t[j] == 1:\n                self.press(Direction.RIGHT, wait=0.03, duration=0.05)\n            elif t[j + 1] - t[j] == -1:\n                self.press(Direction.LEFT, wait=0.03, duration=0.05)\n            elif t[j + 1] - t[j] in [11, 12]:\n                self.press(Direction.DOWN, wait=0.03, duration=0.05)\n            elif t[j + 1] - t[j] in [-11, -12]:\n                self.press(Direction.UP, wait=0.03, duration=0.05)\n            if t[j + 1] not in list(range(67, self.n)):\n                self.pos = self.now_dict_inv[self.now_dict_[t[j + 1]]]\n        return stick\n\n\ndef GetPath(start, goal, pred):\n    return GetPathRow(start, goal, pred[start])\n\n\ndef GetPathRow(start, goal, pred_row):\n    path = []\n    i = goal\n    while i != start and i >= 0:\n        path.append(i)\n        i = pred_row[i]\n    if i < 0:\n        return []\n    path.append(i)\n    return path[::-1]\n"}
+{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_270","text":"filter_and_merge_reads.py\nimport pysam\nimport sys\nimport os\nimport re\nimport argparse\nimport statistics\n\nparser=argparse.ArgumentParser()\nparser.add_argument(\"-f\",\"--file_in\",help=\"Input bam file name\", required=True)\nparser.add_argument(\"-o\",\"--file_out\",help=\"Output sam file name\", required=True)\nparser.add_argument(\"-c\",\"--overcovered_regions\",help=\"File with overcovered regions\")\n\nargs=parser.parse_args()\n\nraw_bam_file_name=args.file_in\ncollapsed_sam_file_name=args.file_out\n\novercovered_starts_dict={}\novercovered_ends_dict={}\nif args.overcovered_regions:\n    overcovered_file=open(args.overcovered_regions,'rt')\n    for line in overcovered_file:\n        overcovered_chr,overcovered_start,overcovered_end=line.split()\n        if overcovered_chr in overcovered_starts_dict:\n            overcovered_starts_dict[overcovered_chr].append(int(overcovered_start))\n            overcovered_ends_dict[overcovered_chr].append(int(overcovered_end))\n        else:\n            overcovered_starts_dict[overcovered_chr]=[int(overcovered_start),]\n            overcovered_ends_dict[overcovered_chr]=[int(overcovered_end),]\n\nsamfile=pysam.AlignmentFile(raw_bam_file_name, \"rb\")\ncollapsed_samfile=pysam.AlignmentFile(collapsed_sam_file_name, \"wb\", template=samfile)\ncurrent_read_name=\"\"\nreads_with_same_name=list()\n\ntlen_vect = []\ndef process_reads_with_same_name(reads_with_same_name, overcovered_starts_dict, overcovered_ends_dict):\n    read1=\"\"\n    read2=\"\"\n    global tlen_vect\n    \n    for same_read in reads_with_same_name:\n        #check if this is a primary read\n        if not same_read.is_secondary:\n            if same_read.is_read1:\n                if not read1:\n                    read1=same_read\n                else:\n                    #this read is probably split aligned\n                    return(\"Split-aligned R1 or R2\")\n            elif same_read.is_read2:\n                if not read2:\n                    read2=same_read\n                else:\n                    return(\"Split-aligned R1 or R2\")\n            else:\n                print(\"Strange: this is not read1 or read2\\n\")\n                print(same_read.tostring())\n\n    if not(read1 and read2):\n        return(\"No R1 or R2\")\n    elif (read1.mapping_quality<60 or read2.mapping_quality<60):\n        if (abs(read1.template_length)==abs(read2.template_length)):\n            tlen_vect.append(abs(read1.template_length))\n        return(\"R1 or R2 mapping quality lower than 60\")\n    elif (re.search('S',read1.cigarstring) or re.search('S',read2.cigarstring)):\n        return(\"Soft-clipped bases in R1 or R2\")\n    elif (read1.is_reverse and read2.is_reverse):\n        return(\"Both reads have reverse orientation\")\n    elif ((not read1.is_reverse) and (not read2.is_reverse)):\n        return(\"Both reads have forward orientation\")\n    elif (read1.template_length>1000 or read1.template_length<-1000):\n        return(\"Template length longer than 1000 bp\")\n    elif (read1.template_length==0 or read2.template_length==0):\n        return(\"Template length 0\")\n    elif (not read1.is_proper_pair or not read2.is_proper_pair):\n        return(\"R1 and R2 are not a proper pair\")\n    else:\n        if str(read1.reference_name)==str(read2.reference_name):\n            chrom=str(read1.reference_name)\n            if (abs(read1.template_length)==abs(read2.template_length)):\n                tlen_vect.append(abs(read1.template_length))\n        else:\n            return(\"R1 and R2 are not a proper pair\")\n        if read1.reference_start and read1.reference_end and read2.reference_start and read2.reference_end:\n            positions_list=[read1.reference_start,read1.reference_end,read2.reference_start,read2.reference_end]\n        else:\n            return(\"R1 and R2 are not a proper pair\")\n        pos_start=min(positions_list)\n        pos_end=max(positions_list)\n        in_overcovered=0\n        if chrom in overcovered_starts_dict:\n            for i in range(0,len(overcovered_starts_dict[chrom])):\n                if pos_start>overcovered_ends_dict[chrom][i]:\n                    i+=1\n                    continue\n                elif pos_end=overcovered_starts_dict[chrom][i]:\n                    in_overcovered=1\n                    break\n                elif pos_start>=overcovered_starts_dict[chrom][i] and \\\n                    pos_start<=overcovered_ends_dict[chrom][i]:\n                    in_overcovered=1\n                    break\n        if in_overcovered:\n            return(\"Within overcovered region\")\n        elif not read1.is_reverse:\n            read1.tags += [('rr', '\"'+read2.tostring()+'\"')]\n            read1.tags += [('re', read2.reference_end)]\n            collapsed_samfile.write(read1)\n            return(\"Reads passed\")\n        else:\n            read2.tags += [('rr', '\"'+read1.tostring()+'\"')]\n            read2.tags += [('re', read1.reference_end)]\n            collapsed_samfile.write(read2)\n            return(\"Reads passed\")\n\nnumber_of_reads=0\n\nread_dict={\"Split-aligned R1 or R2\":0,\n           \"No R1 or R2\":0,\n           \"R1 or R2 mapping quality lower than 60\":0,\n           \"Soft-clipped bases in R1 or R2\":0,\n           \"Both reads have reverse orientation\":0,\n           \"Both reads have forward orientation\":0,\n           \"Template length longer than 1000 bp\":0,\n           \"Template length 0\":0,\n           \"R1 and R2 are not a proper pair\":0,\n           \"Within overcovered region\":0,\n           \"Reads passed\":0}\n\nfor read in samfile.fetch(until_eof=True):\n    if not current_read_name:\n        current_read_name=read.query_name\n        continue\n    else:\n        if read.query_name==current_read_name:\n            reads_with_same_name.append(read)\n            continue\n        else:\n            number_of_reads+=1\n            res=process_reads_with_same_name(reads_with_same_name, overcovered_starts_dict, overcovered_ends_dict)\n            read_dict[res]+=1\n            current_read_name=read.query_name\n            reads_with_same_name=[read,]\nelse:\n    number_of_reads+=1\n    res=process_reads_with_same_name(reads_with_same_name, overcovered_starts_dict, overcovered_ends_dict)\n    read_dict[res]+=1\n            \nprint(\"Overall number of reads or clusters in bam file\")\nprint(number_of_reads)\n\nfor each_reason in read_dict.keys():\n    print(each_reason,\": \",read_dict[each_reason])\n\nprint(\"Median template length: \",statistics.median(tlen_vect))\nsamfile.close()\ncollapsed_samfile.close()\n"}
+{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_271","text":"from typing import Optional\n\nimport numpy\nfrom scipy.sparse import csr_matrix\nfrom scipy.special import expit\nfrom tqdm.auto import trange, tqdm\n\nfrom .matrix_factorization_base import MatrixFactorizationBase\n\n\nclass BPR(MatrixFactorizationBase):\n    def __init__(\n        self,\n        factors: int,\n        lr: float,\n        steps: int,\n        reg_lambda: float = 0.0,\n        random_state: Optional[int] = None,\n    ):\n        super().__init__(factors, random_state)\n        self.__lr = lr\n        self.__steps = steps\n        self.__lambda = reg_lambda\n\n    @staticmethod\n    def _sample_negative(user_id: int, user_item_csr: csr_matrix) -> int:\n        neg_sample = numpy.random.choice(user_item_csr.shape[1])\n        while user_item_csr[user_id, neg_sample] != 0:\n            neg_sample = numpy.random.choice(user_item_csr.shape[1])\n        return neg_sample\n\n    def _gradient_step(self, loss: float, user_id: int, pos_sample: int, neg_sample: int):\n        # [ d ]\n        du = loss * (self._V[pos_sample] - self._V[neg_sample]) + self.__lambda * self._U[user_id]\n        dpi = loss * self._U[user_id] + self.__lambda * self._V[pos_sample]\n        dni = loss * -self._U[user_id] + self.__lambda * self._V[neg_sample]\n\n        self._U[user_id] -= self.__lr * du\n        self._V[pos_sample] -= self.__lr * dpi\n        self._V[neg_sample] -= self.__lr * dni\n\n    def _step(self, user_id: int, pos_sample: int, user_item_csr: csr_matrix) -> float:\n        neg_sample = self._sample_negative(user_id, user_item_csr)\n\n        # [ 1 ]\n        r_uij = numpy.dot(self._U[user_id], self._V[pos_sample] - self._V[neg_sample])\n        sigmoid = expit(r_uij)\n\n        self._gradient_step(sigmoid, user_id, pos_sample, neg_sample)\n\n        return numpy.log(sigmoid)\n\n    def fit(self, user_item_csr: csr_matrix):\n        n_users, n_items = user_item_csr.shape\n        self._init_matrices(n_users, n_items)\n\n        user_item_coo = user_item_csr.tocoo()\n        n_samples = user_item_csr.count_nonzero()\n        assert len(user_item_coo.row) == len(user_item_coo.col) == n_samples\n\n        epoch_range = trange(self.__steps, desc=\"Epoch\")\n        for _ in epoch_range:\n            order = numpy.random.permutation(n_samples)\n            log_loss = 0\n            for user_id, pos_sample in tqdm(zip(user_item_coo.row[order], user_item_coo.col[order]), total=n_samples):\n                log_loss += self._step(user_id, pos_sample, user_item_csr)\n\n            epoch_range.set_postfix({\"log loss\": round(log_loss \/ n_samples, 3)})\n        epoch_range.close()\n"}
+{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_262","text":"#!\/usr\/bin\/env python3\n# ------------------------------------------------------------------------\n# Copyright (c) 2021 megvii-model. All Rights Reserved.\n# ------------------------------------------------------------------------\nimport os, sys\nfrom scipy.optimize import linear_sum_assignment\nimport numpy as np\n\ndef mask_minimumWeightMatching(costSet, mask) -> list:\n\n    m, n = costSet.shape\n    nMax = max(m, n)\n\n    costSet_ = np.full((nMax, nMax), np.inf)\n\n    mask = mask > 0\n    costSet_[:m, :n][mask] = costSet[mask]\n    assert costSet_.shape[0] == costSet_.shape[1]\n\n    if mask.sum():\n        practicalInfinity = 2 * costSet[costSet < np.inf].max() + 10\n    # except ValueError:\n    else:\n        practicalInfinity = 1\n\n    # Replace infinitites with our representation.\n    costSet_[costSet_ == np.inf] = practicalInfinity\n\n    # Find a pairing of minimum total cost between matching second-level contours.\n    iSet, jSet = linear_sum_assignment(costSet_)\n    assert len(iSet) == len(jSet)\n\n    # Return only pairs with finite cost.\n    indices = [(iSet[k], jSet[k]) for k in range(len(iSet)) \n                if costSet_[iSet[k], jSet[k]] != practicalInfinity]\n        \n    rows = np.array([i for i, _ in indices])\n    cols = np.array([j for _, j in indices])\n    return rows, cols\n\ndef minimumWeightMatching(costSet : np.ndarray) -> list:\n    '''\n    Computes a minimum-weight matching in a bipartite graph\n    (A union B, E).\n\n    costSet:\n    An (m x n)-matrix of real values, where costSet[i, j]\n    is the cost of matching the i:th vertex in A to the j:th \n    vertex of B. A value of numpy.inf is allowed, and is \n    interpreted as missing the (i, j)-edge.\n\n    returns:\n    A minimum-weight matching given as a list of pairs (i, j), \n    denoting that the i:th vertex of A be paired with the j:th \n    vertex of B.\n    '''\n\n    m, n = costSet.shape\n    nMax = max(m, n)\n\n    # Since the choice of infinity blocks later choices for that index, \n    # it is important that the cost matrix is square, so there\n    # is enough space to shift the choices for infinity to the unused \n    # part of the cost-matrix.\n    costSet_ = np.full((nMax, nMax), np.inf)\n\n    mask = costSet < 0\n    costSet_[:m, :n][mask] = costSet[mask]\n    assert costSet_.shape[0] == costSet_.shape[1]\n    \n    # We allow a cost to be infinity. Since scipy does not\n    # support this, we use a workaround. We represent infinity \n    # by M = 2 * maximum cost + 1. The point is to choose a distinct \n    # value, greater than any other cost, so that choosing an \n    # infinity-pair is the last resort. The 2 times is for large\n    # values for which x + 1 == x in floating point. The plus 1\n    # is for zero, for which 2 x == x.\n    try:\n        practicalInfinity = 2 * costSet[costSet < np.inf].max() + 10\n    except ValueError:\n        # This is thrown when the indexing set is empty;\n        # then all elements are infinities.\n        practicalInfinity = 1\n\n    # Replace infinitites with our representation.\n    costSet_[costSet_ == np.inf] = practicalInfinity\n\n    # Find a pairing of minimum total cost between matching second-level contours.\n    iSet, jSet = linear_sum_assignment(costSet_)\n    assert len(iSet) == len(jSet)\n\n    # Return only pairs with finite cost.\n    indices = [(iSet[k], jSet[k]) \n        for k in range(len(iSet)) \n        if costSet_[iSet[k], jSet[k]] != practicalInfinity]\n\n    return indices\n\ndef compute_lap(dtboxes, gtboxes, thr):\n\n    eps = 1e-7\n    n, k = dtboxes.shape[0], gtboxes.shape[0]\n    if k + n < 2:\n        m, n = np.array([]), np.array([])\n        return m, n\n        \n    overlaps = compute_iou_matrix(dtboxes, gtboxes)\n\n    if n < 2:\n        cols = np.argmax(overlaps, axis = 1)\n        rows = np.array([0])\n        m, n = (rows, cols) if thr - overlaps[rows, cols] < eps else (np.array([]), np.array([]))\n        return m, n\n\n    if k < 2:\n        \n        rows = np.argmax(overlaps, axis = 0)\n        cols = np.array([0])\n        m,n = (rows, cols) if thr - overlaps[rows, cols] < eps else (np.array([]), np.array([]))\n        return m, n\n            \n    ious = overlaps * (overlaps >= thr)\n        \n    matches = minimumWeightMatching(-ious)\n    m, n = np.array([i for i, _ in matches]).astype(np.int32), np.array([i for _, i in matches]).astype(np.int32)\n    indice = np.where(overlaps[m, n] < thr)[0]\n\n    if indice.size >= m.size:\n        m, n = np.array([]), np.array([])\n    else:\n        index = np.array(list(set(np.arange(m.size)) - set(indice))).astype(np.int)\n        m, n = m[index], n[index]\n    \n    return m, n\n\ndef compute_Jaccard(dtboxes, gtboxes, bm_thr):\n\n    rows, cols = compute_lap(dtboxes, gtboxes, bm_thr)\n    return [(i, j) for i, j in (rows, cols)]\n\ndef compute_JC(dtboxes, gtboxes, bm_thr):\n\n    rows, cols = compute_lap(dtboxes, gtboxes, bm_thr)\n    return [(i, j) for i, j in zip(rows, cols)]\n\ndef compute_ioa_matrix(dboxes: np.ndarray, gboxes: np.ndarray):\n\n    assert dboxes.shape[-1] >= 4 and gboxes.shape[-1] >= 4\n    N, K = dboxes.shape[0], gboxes.shape[0]\n    eps = 1e-6\n    dtboxes = np.tile(np.expand_dims(dboxes, axis = 1), (1, K, 1))\n    gtboxes = np.tile(np.expand_dims(gboxes, axis = 0), (N, 1, 1))\n\n    iw = np.minimum(dtboxes[:,:,2], gtboxes[:,:,2]) - np.maximum(dtboxes[:,:,0], gtboxes[:,:,0])\n    ih = np.minimum(dtboxes[:,:,3], gtboxes[:,:,3]) - np.maximum(dtboxes[:,:,1], gtboxes[:,:,1])\n    inter = np.maximum(0, iw) * np.maximum(0, ih)\n\n    dtarea = np.maximum(dtboxes[:,:,2] - dtboxes[:,:,0], 0) * np.maximum(dtboxes[:,:,3] - dtboxes[:,:,1], 0)   \n    ioas = inter \/ (dtarea + eps)\n    return ioas\n\ndef compute_iou_matrix(dboxes:np.ndarray, gboxes:np.ndarray):\n    \n    assert dboxes.shape[-1] >= 4 and gboxes.shape[-1] >= 4\n    eps = 1e-6\n    N, K = dboxes.shape[0], gboxes.shape[0]\n    dtboxes = np.tile(np.expand_dims(dboxes, axis = 1), (1, K, 1))\n    gtboxes = np.tile(np.expand_dims(gboxes, axis = 0), (N, 1, 1))\n\n    iw = np.minimum(dtboxes[:,:,2], gtboxes[:,:,2]) - np.maximum(dtboxes[:,:,0], gtboxes[:,:,0])\n    ih = np.minimum(dtboxes[:,:,3], gtboxes[:,:,3]) - np.maximum(dtboxes[:,:,1], gtboxes[:,:,1])\n    inter = np.maximum(0, iw) * np.maximum(0, ih)\n\n    dtarea = (dtboxes[:,:,2] - dtboxes[:,:,0]) * (dtboxes[:,:,3] - dtboxes[:,:,1])\n    gtarea = (gtboxes[:,:,2] - gtboxes[:,:,0]) * (gtboxes[:,:,3] - gtboxes[:,:,1])\n    ious = inter \/ (dtarea + gtarea - inter + eps)\n    return ious\n\ndef compute_maximal_iou(proposals:np.ndarray,gt:np.ndarray):\n    \n    ious = compute_iou_matrix(proposals, gt)\n    return np.max(ious, axis = 1)\n"}
+{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_273","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.stats import linregress\n\n# Set style\nplt.rcParams[\"font.family\"] = \"sans-serif\"\nplt.rcParams[\"figure.figsize\"] = (10, 4)\n\n# Data\nx = np.array([0.25, 0.286, 0.333, 0.4, 0.5])\ny = np.array([0.15, 0.3, 0.39, 0.54, 0.69])\n\nx_error = np.array([0.006, 0.008, 0.011, 0.016, 0.025])\ny_error = np.array([0.03, 0.01, 0.04, 0.02, 0.02])\n\n# Linear regression\nresult = linregress(x, y)\nx_conti = np.linspace(0.23, 0.57)\ny_conti = result.intercept + result.slope * x_conti\n\n# Plot\nfigure, axs = plt.subplots(1, 2)\naxs[0].errorbar(x, y, yerr=y_error, xerr=x_error, fmt=\"o\")\naxs[0].plot(x_conti, y_conti)\n\naxs[0].set_xlabel(\"1\/a (cm^-1)\")\naxs[0].set_ylabel(\"sin(theta)\")\naxs[0].set_ylim(0, None)\n\naxs[1].scatter(x, y - result.intercept - x * result.slope)\naxs[1].set_xlabel(\"1\/a (cm^-1)\")\naxs[1].set_ylabel(\"Residuals\")\n\n# Show\nplt.savefig(\"4a.png\")\nplt.show()\n"}
+{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_274","text":"code\/preprocess\/consumption\/sector\/tn\/tn_nm.py\n#! usr\/bin\/python3\n\nimport pandas as pd\nimport re\nimport numpy as np\nimport os\nimport sys\nfrom collections import OrderedDict, defaultdict\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n# import seaborn as sns\nfrom scipy import stats, integrate\n\n# sns.set() # switch to seaborn default\n# sns.set_style(\"whitegrid\")\n\n#load sector msncodes\ntn_msncodes = pd.read_csv(\"data\/csv\/consumption\/sector\/tn_sector.csv\", engine='c', low_memory=True)[\"MSN\"]\n#load state data\nnm_data = pd.read_csv(\"data\/csv\/state_data\/nm_data.csv\", engine='c', low_memory=True)\n\nnm_msn = []\nnm_year = []\nnm_value = []\n\nfor i in range(len(nm_data[\"MSN\"])):\n    for j in range(len(tn_msncodes)):\n        if nm_data[\"MSN\"][i] == tn_msncodes[j]:\n            nm_msn.append(nm_data[\"MSN\"][i])\n            nm_year.append(nm_data[\"Year\"][i])\n            nm_value.append(nm_data[\"Data\"][i])\n        else:\n            pass\n\nnm_tn = OrderedDict()\nnm_tn[\"MSN\"] = nm_msn\nnm_tn[\"Year\"] = nm_year\nnm_tn[\"Data\"] = nm_value\nnm_tn_data = pd.DataFrame(nm_tn)\n\nnm_tn_data.to_csv(\"data\/csv\/consumption\/sector\/nm\/nm_tn_data.csv\",\n                  index=False, index_label=False, sep=',')\n# print(nm_tn_data)\n\nsectors = [\"TNACB\", \"TNCCB\", \"TNICB\", \"TNRCB\"]\ntnacb = OrderedDict()\ntnacb[\"Year\"] = []\ntnacb[\"Data\"] = []\ntnccb = OrderedDict()\ntnccb[\"Year\"] = []\ntnccb[\"Data\"] = []\ntnicb = OrderedDict()\ntnicb[\"Year\"] = []\ntnicb[\"Data\"] = []\ntnrcb = OrderedDict()\ntnrcb[\"Year\"] = []\ntnrcb[\"Data\"] = []\n\n\nfor i in range(len(nm_tn_data[\"MSN\"])):\n    if nm_tn_data[\"MSN\"][i] == \"TNACB\":\n        tnacb[\"Year\"].append(nm_tn_data[\"Year\"][i])\n        tnacb[\"Data\"].append(nm_tn_data[\"Data\"][i])\n    elif nm_tn_data[\"MSN\"][i] == \"TNCCB\":\n        tnccb[\"Year\"].append(nm_tn_data[\"Year\"][i])\n        tnccb[\"Data\"].append(nm_tn_data[\"Data\"][i])\n    elif nm_tn_data[\"MSN\"][i] == \"TNICB\":\n        tnicb[\"Year\"].append(nm_tn_data[\"Year\"][i])\n        tnicb[\"Data\"].append(nm_tn_data[\"Data\"][i])\n    elif nm_tn_data[\"MSN\"][i] == \"TNRCB\":\n        tnrcb[\"Year\"].append(nm_tn_data[\"Year\"][i])\n        tnrcb[\"Data\"].append(nm_tn_data[\"Data\"][i])\n    else:\n        pass\n\ntnacb_data = pd.DataFrame(tnacb)\ntnacb_data.to_csv(\"data\/csv\/consumption\/sector\/nm\/tn\/tnacb.csv\",\n                  index=False, index_label=False, sep=',')\ntnccb_data = pd.DataFrame(tnccb)\ntnccb_data.to_csv(\"data\/csv\/consumption\/sector\/nm\/tn\/tnccb.csv\",\n                  index=False, index_label=False, sep=',')\ntnicb_data = pd.DataFrame(tnicb)\ntnicb_data.to_csv(\"data\/csv\/consumption\/sector\/nm\/tn\/tnicb.csv\",\n                  index=False, index_label=False, sep=',')\ntnrcb_data = pd.DataFrame(tnrcb)\ntnrcb_data.to_csv(\"data\/csv\/consumption\/sector\/nm\/tn\/tnrcb.csv\",\n                  index=False, index_label=False, sep=',')\n# print(tnacb_data)\n# print(tnccb_data)\n# print(tnicb_data)\n# print(tnrcb_data)\n"}
+{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_275","text":"r\"\"\"Solve Navier-Stokes equations for the lid driven cavity using a coupled\nformulation\n\nThe equations are in strong form\n\n.. math::\n\n    \\nu\\nabla^2 u - \\nabla p &= (u \\cdot \\nabla) u) \\\\\n    \\nabla \\cdot u &= 0 \\\\\n    i\\bs{u}(x, y=1) = (1, 0) \\, &\\text{ or }\\, \\bs{u}(x, y=1) = ((1-x)^2(1+x)^2, 0) \\\\\n    u(x, y=-1) &= 0 \\\\\n    u(x=\\pm 1, y) &= 0\n\nIn addition we require :math:`\\int p d\\ = 0`, which is achieved by\nfixing the coefficient :math:`\\hat{p}_{0, 0} = 0`.\n\nWe use a tensorproductspace with a composite Legendre for the Dirichlet space\nand a regular Legendre for the pressure space.\n\nTo remove all nullspaces we use a P_{N} x P_{N-2} basis, with P_{N-2} for the\npressure.\n\n\"\"\"\nimport os\nimport sys\nimport time\nimport numpy as np\nfrom scipy.sparse.linalg import splu\nimport sympy\nfrom shenfun import *\n\nassert comm.Get_size() == 1, \"Two non-periodic directions only have solver implemented for serial\"\n\nRe = 250.\nnu = 2.\/Re\nalfa = 0.1 # underrelaxation factor\nN = 64\nfamily = 'Chebyshev'\n#family = 'Legendre'\nx = sympy.symbols('x', real='True')\nD0 = FunctionSpace(N, family, bc=(0, 0))\n#D1 = FunctionSpace(N, family, bc=(0, 1))\nD1 = FunctionSpace(N, family, bc=(0, (1-x)**2*(1+x)**2))\n\n# Create tensor product spaces with different combination of bases\nV1 = TensorProductSpace(comm, (D0, D1))\nV0 = V1.get_homogeneous()\nP = V1.get_orthogonal()\n\n# To satisfy inf-sup for the Stokes problem, just pick the first N-2 items of the pressure basis\n# Note that this effectively sets P_{N-1} and P_{N-2} to zero, but still the basis uses\n# the same quadrature points as the Dirichlet basis, which is required for the inner products.\nP.bases[0].slice = lambda: slice(0, N-2)\nP.bases[1].slice = lambda: slice(0, N-2)\n\n# Create vector space for velocity and a mixed velocity-pressure space\nW1 = VectorSpace([V1, V0])\nVQ = CompositeSpace([W1, P])\n\n# Create space for nonlinearity\nS1 = TensorSpace(P)\n\nup = TrialFunction(VQ)\nvq = TestFunction(VQ)\n\nu, p = up\nv, q = vq\n\n# Assemble blocks of the complete block matrix\nif family.lower() == 'legendre':\n    A00 = inner(grad(v), -nu*grad(u))\n    A01 = inner(div(v), p)\nelse:\n    A00 = inner(v, nu*div(grad(u)))\n    A01 = inner(v, -grad(p))\n\nA10 = inner(q, div(u))\n\n# Create Block matrix solver. This also takes care of boundary conditions.\nsol = la.BlockMatrixSolver(A00+A01+A10)\n\n# Create Function to hold solution\nuh_hat = Function(VQ).set_boundary_dofs()\nui_hat = uh_hat[0]\n\n# New solution (iterative)\nuh_new = Function(VQ).set_boundary_dofs()\nui_new = uh_new[0]\n\n# Create regular work arrays for right hand side.\nbh_hat = Function(VQ)\n\n# Create arrays to hold velocity vector solution\nui = Array(W1)\n\n# Create work arrays for nonlinear part\nuiuj = Array(S1.get_dealiased())\nuiuj_hat = Function(S1)\nBS = BlockMatrix(inner(TestFunction(W1), div(TrialFunction(S1))))\n\ndef compute_rhs(ui_hat, bh_hat):\n    global uip, uiuj, uiuj_hat\n    bh_hat.fill(0)\n    bi_hat = bh_hat[0]\n    # Get convection\n    uip = ui_hat.backward(padding_factor=1.5)\n    uiuj = outer(uip, uip, uiuj)\n    uiuj_hat = uiuj.forward(uiuj_hat)\n    #bi_hat = inner(v, div(uiuj_hat), output_array=bi_hat)\n    bi_hat = BS.matvec(uiuj_hat, bi_hat) # fastest method\n    #bi_hat = inner(grad(v), -uiuj_hat, output_array=bi_hat) # only Legendre\n    #gradu = project(grad(ui_hat), S1)\n    #bi_hat = inner(v, dot(gradu, ui_hat), output_array=bi_hat)\n    return bh_hat\n\nconverged = False\ncount = 0\nmax_count = 1000\nif 'pytest' in os.environ:\n    max_count = 1\nt0 = time.time()\nwhile not converged:\n    count += 1\n    bh_hat = compute_rhs(ui_hat, bh_hat)\n    uh_new = sol(bh_hat, u=uh_new, constraints=((2, 0, 0),)) # Constraint for component 2 of mixed space\n    error = np.linalg.norm(ui_hat-ui_new)\n    uh_hat[:] = alfa*uh_new + (1-alfa)*uh_hat\n    converged = abs(error) < 1e-11 or count >= max_count\n    if count % 1 == 0:\n        print('Iteration %d Error %2.4e' %(count, error))\n\nprint('Time ', time.time()-t0)\n\n# Move solution to regular Function\nup = Array(VQ)\nup = uh_hat.backward(up)\nu_, p_ = up\n\nif 'pytest' in os.environ: sys.exit(0)\n\n# Postprocessing\n# Solve streamfunction\nr = TestFunction(V0)\ns = TrialFunction(V0)\nS = inner(r, div(grad(s)))\nh = inner(r, -curl(ui_hat))\nH = la.SolverGeneric2ND(S)\nphi_h = H(h)\nphi = phi_h.backward()\n# Compute vorticity\nP = V1.get_orthogonal()\nw_h = Function(P)\nw_h = project(curl(ui_hat), P, output_array=w_h)\n#p0 = np.array([[0.], [0.]])\n#print(w_h.eval(p0)*2)\n\n# Find minimal streamfunction value and position\n# by gradually zooming in on mesh\nW = 101\nconverged = False\nxmid, ymid = 0, 0\ndx = 1\npsi_old = 0\ncount = 0\ny, x = np.meshgrid(np.linspace(ymid-dx, ymid+dx, W), np.linspace(xmid-dx, xmid+dx, W))\npoints = np.vstack((x.flatten(), y.flatten()))\npp = phi_h.eval(points).reshape((W, W))\nwhile not converged:\n    yr, xr = np.meshgrid(np.linspace(ymid-dx, ymid+dx, W), np.linspace(xmid-dx, xmid+dx, W))\n    points = np.vstack((xr.flatten(), yr.flatten()))\n    pr = phi_h.eval(points).reshape((W, W))\n    xi, yi = pr.argmin()\/\/W, pr.argmin()%W\n    psi_min, xmid, ymid = pr.min()\/2, xr[xi, yi], yr[xi, yi]\n    err = abs(psi_min-psi_old)\n    converged = err < 1e-15 or count > 10\n    psi_old = psi_min\n    dx = dx\/4.\n    print(\"%d %d \" %(xi, yi) +(\"%+2.7e \"*4) %(xmid, ymid, psi_min, err))\n    count += 1\n\nimport matplotlib.pyplot as plt\n#f = open('plot_u_y_Ghia{}.csv'.format(int(Re)))\n#g = np.loadtxt(f, skiprows=1, delimiter=',')\n#plt.figure()\n#y = 2*(g[:, 0]-0.5)\n#plt.plot(y, g[:, 1], 'r+')\nX = V0.local_mesh(True)\n#x = np.vstack([np.zeros(N[0]), X[1][0]])\n#res = ui_hat[0].eval(x)\n#plt.plot(x[1], res)\n#res2 = ui_hat[0].eval(np.vstack([np.zeros(len(y)), y]))\n#plt.plot(y, res2, 'bs', mfc='None')\nplt.figure()\nplt.contourf(X[0], X[1], p_, 100)\nplt.figure()\nplt.quiver(X[0], X[1], u_[0], u_[1])\nplt.figure()\nplt.spy(sol.mat.diags())\nplt.figure()\nplt.contourf(X[0], X[1], u_[0], 100)\nplt.figure()\nplt.contourf(X[0], X[1], u_[1], 100)\nplt.figure()\nplt.contourf(X[0], X[1], phi, 100)\n#plt.title('Streamfunction')\n#plt.show()\n"}
+{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_276","text":"0\nimport numpy as np\r\nnp.set_printoptions(precision=4)\r\n\r\nimport time\r\nfrom scipy import spatial\r\n\r\ndef xy2theta(x, y):\r\n    if (x >= 0 and y >= 0): \r\n        theta = 180\/np.pi * np.arctan(y\/x);\r\n    if (x < 0 and y >= 0): \r\n        theta = 180 - ((180\/np.pi) * np.arctan(y\/(-x)));\r\n    if (x < 0 and y < 0): \r\n        theta = 180 + ((180\/np.pi) * np.arctan(y\/x));\r\n    if ( x >= 0 and y < 0):\r\n        theta = 360 - ((180\/np.pi) * np.arctan((-y)\/x));\r\n\r\n    return theta\r\n\r\n\r\ndef pt2rs(point, gap_ring, gap_sector, num_ring, num_sector):\r\n    x = point[0]\r\n    y = point[1]\r\n    # z = point[2]\r\n    \r\n    if(x == 0.0):\r\n        x = 0.001\r\n    if(y == 0.0):\r\n        y = 0.001\r\n    \r\n    theta = xy2theta(x, y)\r\n    faraway = np.sqrt(x*x + y*y)\r\n    \r\n    idx_ring = np.divmod(faraway, gap_ring)[0]       \r\n    idx_sector = np.divmod(theta, gap_sector)[0]\r\n\r\n    if(idx_ring >= num_ring):\r\n        idx_ring = num_ring-1 # python starts with 0 and ends with N-1\r\n    \r\n    return int(idx_ring), int(idx_sector)\r\n\r\n\r\ndef ptcloud2sc(ptcloud, sc_shape, max_length):\r\n    num_ring = sc_shape[0]\r\n    num_sector = sc_shape[1]\r\n\r\n    gap_ring = max_length\/num_ring\r\n    gap_sector = 360\/num_sector\r\n    \r\n    enough_large = 500\r\n    sc_storage = np.zeros([enough_large, num_ring, num_sector])\r\n    sc_counter = np.zeros([num_ring, num_sector])\r\n    \r\n    num_points = ptcloud.shape[0]\r\n    for pt_idx in range(num_points):\r\n        point = ptcloud[pt_idx, :]\r\n        point_height = point[2] + 2.0 # for setting ground is roughly zero \r\n        \r\n        idx_ring, idx_sector = pt2rs(point, gap_ring, gap_sector, num_ring, num_sector)\r\n        \r\n        if sc_counter[idx_ring, idx_sector] >= enough_large:\r\n            continue\r\n        sc_storage[int(sc_counter[idx_ring, idx_sector]), idx_ring, idx_sector] = point_height\r\n        sc_counter[idx_ring, idx_sector] = sc_counter[idx_ring, idx_sector] + 1\r\n\r\n    sc = np.amax(sc_storage, axis=0)\r\n        \r\n    return sc\r\n\r\n\r\ndef sc2rk(sc):\r\n    return np.mean(sc, axis=1)\r\n\r\ndef distance_sc(sc1, sc2):\r\n    num_sectors = sc1.shape[1]\r\n\r\n    # repeate to move 1 columns\r\n    _one_step = 1 # const\r\n    sim_for_each_cols = np.zeros(num_sectors)\r\n    for i in range(num_sectors):\r\n        # Shift\r\n        sc1 = np.roll(sc1, _one_step, axis=1) #  columne shift\r\n\r\n        #compare\r\n        sum_of_cossim = 0\r\n        num_col_engaged = 0\r\n        for j in range(num_sectors):\r\n            col_j_1 = sc1[:, j]\r\n            col_j_2 = sc2[:, j]\r\n            if (~np.any(col_j_1) or ~np.any(col_j_2)): \r\n                # to avoid being divided by zero when calculating cosine similarity\r\n                # - but this part is quite slow in python, you can omit it.\r\n                continue \r\n\r\n            cossim = np.dot(col_j_1, col_j_2) \/ (np.linalg.norm(col_j_1) * np.linalg.norm(col_j_2))\r\n            sum_of_cossim = sum_of_cossim + cossim\r\n\r\n            num_col_engaged = num_col_engaged + 1\r\n\r\n        # save \r\n        sim_for_each_cols[i] = sum_of_cossim \/ num_col_engaged\r\n\r\n    yaw_diff = np.argmax(sim_for_each_cols) + 1 # because python starts with 0 \r\n    sim = np.max(sim_for_each_cols)\r\n    dist = 1 - sim\r\n\r\n    return dist, yaw_diff\r\n\r\n    \r\nclass ScanContextManager:\r\n    def __init__(self, shape=[20,60], num_candidates=10, threshold=0.15): # defualt configs are same as the original paper \r\n        self.shape = shape\r\n        self.num_candidates = num_candidates\r\n        self.threshold = threshold\r\n\r\n        self.max_length = 80 # recommended but other (e.g., 100m) is also ok.\r\n\r\n        self.ENOUGH_LARGE = 15000 # capable of up to ENOUGH_LARGE number of nodes \r\n        self.ptclouds = [None] * self.ENOUGH_LARGE\r\n        self.scancontexts = [None] * self.ENOUGH_LARGE\r\n        self.ringkeys = [None] * self.ENOUGH_LARGE\r\n\r\n        self.curr_node_idx = 0\r\n       \r\n\r\n    def addNode(self, node_idx, ptcloud):\r\n        sc = ptcloud2sc(ptcloud, self.shape, self.max_length)\r\n        rk = sc2rk(sc)\r\n\r\n        self.curr_node_idx = node_idx\r\n        self.ptclouds[node_idx] = ptcloud\r\n        self.scancontexts[node_idx] = sc\r\n        self.ringkeys[node_idx] = rk\r\n        \r\n\r\n    def getPtcloud(self, node_idx):\r\n        return self.ptclouds[node_idx]\r\n\r\n\r\n    def detectLoop(self):        \r\n        exclude_recent_nodes = 30\r\n        valid_recent_node_idx = self.curr_node_idx - exclude_recent_nodes\r\n\r\n        if(valid_recent_node_idx < 1):\r\n            return None, None, None\r\n        else:\r\n            # step 1\r\n            ringkey_history = np.array(self.ringkeys[:valid_recent_node_idx])\r\n            ringkey_tree = spatial.KDTree(ringkey_history)\r\n\r\n            ringkey_query = self.ringkeys[self.curr_node_idx]\r\n            _, nncandidates_idx = ringkey_tree.query(ringkey_query, k=self.num_candidates)\r\n\r\n            # step 2\r\n            query_sc = self.scancontexts[self.curr_node_idx]\r\n            \r\n            nn_dist = 1.0 # initialize with the largest value of distance\r\n            nn_idx = None\r\n            nn_yawdiff = None\r\n            for ith in range(self.num_candidates):\r\n                candidate_idx = nncandidates_idx[ith]\r\n                candidate_sc = self.scancontexts[candidate_idx]\r\n                dist, yaw_diff = distance_sc(candidate_sc, query_sc)\r\n                if(dist < nn_dist):\r\n                    nn_dist = dist\r\n                    nn_yawdiff = yaw_diff\r\n                    nn_idx = candidate_idx\r\n\r\n            if(nn_dist < self.threshold):\r\n                nn_yawdiff_deg = nn_yawdiff * (360\/self.shape[1])\r\n                return nn_idx, nn_dist, nn_yawdiff_deg # loop detected!\r\n            else:\r\n                return None, None, None\r\n"}
+{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_277","text":"\"\"\"\nCCT 建模优化代码\n束线\n\n作者:赵润晓\n日期:2021年5月1日\n\"\"\"\n\nimport multiprocessing  # since v0.1.1 多线程计算\nimport time  # since v0.1.1 统计计算时长\nfrom typing import Callable, Dict, Generic, Iterable, List, NoReturn, Optional, Tuple, TypeVar, Union\nimport matplotlib.pyplot as plt\nimport math\nimport random  # since v0.1.1 随机数\nimport sys\nimport os  # since v0.1.1 查看CPU核心数\nimport numpy\nfrom scipy.integrate import solve_ivp  # since v0.1.1 ODE45\nimport warnings  # since v0.1.1 提醒方法过时\nfrom packages.point import *\nfrom packages.constants import *\nfrom packages.base_utils import BaseUtils\nfrom packages.local_coordinate_system import LocalCoordinateSystem\nfrom packages.line2s import *\nfrom packages.trajectory import Trajectory\nfrom packages.particles import *\nfrom packages.magnets import *\nfrom packages.cct import CCT\n\n\nclass Beamline(Line2, Magnet, ApertureObject):\n    def __init__(self, trajectory: Trajectory) -> None:\n        \"\"\"\n        不要直接调用构造器\n        请使用 set_start_point\n        \"\"\"\n        self.magnets: List[Magnet] = []\n        self.trajectory: Trajectory = trajectory\n\n        # 2021年3月18日 新增,表示元件。List 中每个元素表示一个元件\n        # 元件由三部分组成,位置、元件自身、长度\n        # 其中位置表示沿着 Beamline 的长度\n        # 元件自身,使用 None 表示漂移段。\n        self.elements: List[Tuple[float, Magnet, float]] = []\n\n    def magnetic_field_at(self, point: P3) -> P3:\n        \"\"\"\n        返回 Beamline 在全局坐标系点 P3 处产生的磁场\n        \"\"\"\n        b: P3 = P3.zeros()\n        for m in self.magnets:\n            b += m.magnetic_field_at(point)\n        return b\n\n    # from Magnet\n    def magnetic_field_along(\n            self,\n            line2: Optional[Line2] = None,\n            p2_t0_p3: Callable[[P2], P3] = lambda p2: P3(p2.x, p2.y, 0.0),\n            step: float = 1 * MM,\n    ) -> List[ValueWithDistance[P3]]:\n        \"\"\"\n        计算本对象在二维曲线 line2 上的磁场分布(line2 为 None 时,默认为 self.trajectory)\n        p2_t0_p3 是一个函数,用于把 line2 上的二维点转为三维,默认转为 z=0 的三维点\n        step 表示 line2 分段长度\n        -------\n        \"\"\"\n        if line2 is None:\n            line2 = self.trajectory\n\n        return super(Beamline, self).magnetic_field_along(\n            line2=line2, p2_t0_p3=p2_t0_p3, step=step\n        )\n\n    def magnetic_field_bz_along(\n            self,\n            line2: Optional[Line2] = None,\n            p2_t0_p3: Callable[[P2], P3] = lambda p2: P3(p2.x, p2.y, 0.0),\n            step: float = 1 * MM,\n    ) -> List[P2]:\n        \"\"\"\n        计算本对象在二维曲线 line (line2 为 None 时,默认为 self.trajectory)上的磁场 Z 方向分量的分布\n        因为磁铁一般放置在 XY 平面,所以 Bz 一般可以看作自然坐标系下 By,也就是二级场大小\n        p2_t0_p3 是一个函数,用于把 line2 上的二维点转为三维,默认转为 z=0 的三维点\n        step 表示 line2 分段长度\n\n        返回 P2 的数组,P2 中 x 表示曲线 line2 上距离 s,y 表示前述距离对应的点的磁场 bz\n        \"\"\"\n        if line2 is None:\n            line2 = self.trajectory\n\n        return super(Beamline, self).magnetic_field_bz_along(\n            line2=line2, p2_t0_p3=p2_t0_p3, step=step\n        )\n\n    def graident_field_along(\n            self,\n            line2: Optional[Line2] = None,\n            good_field_area_width: float = 10 * MM,\n            step: float = 1 * MM,\n            point_number: int = 4,\n    ) -> List[P2]:\n        \"\"\"\n        计算本对象在二维曲线 line2 (line2 为 None 时,默认为 self.trajectory)上的磁场梯度的分布\n        每一点的梯度,采用这点水平垂线上 Bz 的多项式拟合得到\n        good_field_area_width:水平垂线的长度,注意应小于等于好场区范围\n        step:line2 上取点间距\n        point_number:水平垂线上取点数目,越多则拟合越精确\n        \"\"\"\n        if line2 is None:\n            line2 = self.trajectory\n\n        return super(Beamline, self).graident_field_along(\n            line2=line2, good_field_area_width=good_field_area_width, step=step, point_number=point_number\n        )\n\n    def second_graident_field_along(\n            self,\n            line2: Optional[Line2] = None,\n            good_field_area_width: float = 10 * MM,\n            step: float = 1 * MM,\n            point_number: int = 4,\n    ) -> List[P2]:\n        \"\"\"\n        计算本对象在二维曲线 line2 (line2 为 None 时,默认为 self.trajectory)上的磁场二阶梯度的分布(六极场)\n        每一点的梯度,采用这点水���垂线上 Bz 的多项式拟合得到\n        good_field_area_width:水平垂线的长度,注意应小于等于好场区范围\n        step:line2 上取点间距\n        point_number:水平垂线上取点数目,越多则拟合越精确\n        \"\"\"\n        if line2 is None:\n            line2 = self.trajectory\n\n        return super(Beamline, self).second_graident_field_along(\n            line2=line2, good_field_area_width=good_field_area_width, step=step, point_number=point_number\n        )\n\n    def track_ideal_particle(\n            self,\n            kinetic_MeV: float,\n            s: float = 0.0,\n            length: Optional[float] = None,\n            footstep: float = 5 * MM,\n    ) -> List[P3]:\n        \"\"\"\n        束流跟踪,运行一个理想粒子,返回轨迹\n        kinetic_MeV 粒子动能,单位 MeV\n        s 起点位置\n        length 粒子运行长度,默认运动到束线尾部\n        footstep 粒子运动步长\n        \"\"\"\n        if length is None:\n            length = self.trajectory.get_length() - s\n        ip = ParticleFactory.create_proton_along(\n            self.trajectory, s, kinetic_MeV)\n        return ParticleRunner.run_get_trajectory(ip, self, length, footstep)\n\n    def track_phase_space_particle(\n        self,\n        x_mm: float,\n        xp_mrad: float,\n        y_mm: float,\n        yp_mrad,\n        delta: float,\n        kinetic_MeV: float,\n        s: float = 0.0,\n        length: Optional[float] = None,\n        footstep: float = 10 * MM,\n    ) -> List[ValueWithDistance[PhaseSpaceParticle]]:\n        \"\"\"\n        运行一个相空间粒子\n        x_mm 相空间坐标 x,单位 mm\n        xp_mrad 相空间坐标 xp,单位 mrad\n        y_mm 相空间坐标 y,单位 mm\n        yp_mrad 相空间坐标 yp,单位 mrad\n        delta 动量分散\n        kinetic_MeV 正则动能,单位 MeV\n        s 在束线上的起点,默认 0.0\n        length 运动长度,如果为空则运行到束线尾\n        footstep 运动步长,默认 10*MM\n\n        返回值是一个 List[ValueWithDistance[PhaseSpaceParticle]]\n        即一个数组,数组元素是 ValueWithDistance\n        即对应运动位置的粒子的相空间坐标信息\n        \"\"\"\n        if length is None:\n            length = self.trajectory.get_length() - s\n        pp = PhaseSpaceParticle(\n            x=x_mm * MM,\n            xp=xp_mrad * MM,\n            y=y_mm * MM,\n            yp=yp_mrad * MM,\n            z=0.0,\n            delta=delta\n        )\n        # ip, distence = 0.0\n        ip = ParticleFactory.create_proton_along(\n            self.trajectory, s, kinetic_MeV)\n        # to rp, distence = 0.0\n        rp = ParticleFactory.create_from_phase_space_particle(\n            ideal_particle=ip,\n            coordinate_system=ip.get_natural_coordinate_system(),\n            phase_space_particle=pp\n        )\n        # run all info, distence from 0.0\n        all_info = ParticleRunner.run_get_all_info(\n            p=rp,\n            m=self,\n            length=length,\n            footstep=footstep\n        )\n        # for cp\n        ret: List[ValueWithDistance[PhaseSpaceParticle]] = []\n        for cp in all_info:\n            d = cp.distance  # , distence from 0.0\n            cip = ParticleFactory.create_proton_along(\n                self.trajectory, d + s, kinetic_MeV)  # 所以这里是 d + s\n            cpp = PhaseSpaceParticle.create_from_running_particle(\n                ideal_particle=cip,\n                coordinate_system=cip.get_natural_coordinate_system(),\n                running_particle=cp\n            )\n            ret.append(ValueWithDistance(\n                value=cpp, distance=d\n            ))\n\n        return ret\n\n    def track_phase_ellipse(\n            self,\n            x_sigma_mm: float,\n            xp_sigma_mrad: float,\n            y_sigma_mm: float,\n            yp_sigma_mrad,\n            delta: float,\n            particle_number: int,\n            kinetic_MeV: float,\n            s: float = 0.0,\n            length: Optional[float] = None,\n            footstep: float = 10 * MM,\n            concurrency_level: int = 1,\n            report: bool = True\n    ) -> Tuple[List[P2], List[P2]]:\n        \"\"\"\n        束流跟踪,运行两个相椭圆边界上的粒子,\n        返回一个长度 2 的元组,表示相空间 x-xp 平面和 y-yp 平面上粒子投影(单位 mm \/ mrad)\n        两个相椭圆,一个位于 xxp 平面,参数为 σx 和 σxp ,动量分散为 delta\n        另一个位于 xxp 平面,参数为 σx 和 σxp ,动量分散为 delta\n        x_sigma_mm σx 单位 mm\n        xp_sigma_mrad σxp 单位 mrad\n        y_sigma_mm σy 单位 mm\n        yp_sigma_mrad σyp 单位 mrad\n        delta 动量分散 单位 1\n        particle_number 粒子数目\n        kinetic_MeV 动能 单位 MeV\n        s 起点位置\n        length 粒子运行长度,默认运行到束线尾部\n        footstep 粒子运动步长\n        concurrency_level 并���等级(使用多少个核心进行粒子跟踪)\n        report 是否打印并行任务计划\n        \"\"\"\n        if length is None:\n            length = self.trajectory.get_length() - s\n        ip_start = ParticleFactory.create_proton_along(\n            self.trajectory, s, kinetic_MeV)\n        ip_end = ParticleFactory.create_proton_along(\n            self.trajectory, s + length, kinetic_MeV\n        )\n\n        pp_x = PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_xxp_plane(\n            xMax=x_sigma_mm * MM,\n            xpMax=xp_sigma_mrad * MRAD,\n            delta=delta,\n            number=particle_number,\n        )\n\n        pp_y = PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_yyp_plane(\n            yMax=y_sigma_mm * MM,\n            ypMax=yp_sigma_mrad * MRAD,\n            delta=delta,\n            number=particle_number,\n        )\n\n        rp_x = ParticleFactory.create_from_phase_space_particles(\n            ideal_particle=ip_start,\n            coordinate_system=ip_start.get_natural_coordinate_system(),\n            phase_space_particles=pp_x,\n        )\n\n        rp_y = ParticleFactory.create_from_phase_space_particles(\n            ideal_particle=ip_start,\n            coordinate_system=ip_start.get_natural_coordinate_system(),\n            phase_space_particles=pp_y,\n        )\n\n        # run\n        # refactor v0.1.1 合并计算\n        ParticleRunner.run_only(\n            p=rp_x + rp_y, m=self, length=length, footstep=footstep, concurrency_level=concurrency_level,\n            report=report\n        )\n\n        pp_x_end = PhaseSpaceParticle.create_from_running_particles(\n            ideal_particle=ip_end,\n            coordinate_system=ip_end.get_natural_coordinate_system(),\n            running_particles=rp_x,\n        )\n\n        pp_y_end = PhaseSpaceParticle.create_from_running_particles(\n            ideal_particle=ip_end,\n            coordinate_system=ip_end.get_natural_coordinate_system(),\n            running_particles=rp_y,\n        )\n\n        xs = [pp.project_to_xxp_plane() \/ MM for pp in pp_x_end]\n        ys = [pp.project_to_yyp_plane() \/ MM for pp in pp_y_end]\n\n        s: BaseUtils.Statistic = BaseUtils.Statistic()\n\n        print(\n            f\"delta={delta},\" +\n            f\"avg_size_x={s.clear().add_all(P2.extract(xs)[0]).half_width()}mm,\" +\n            f\"avg_size_y={s.clear().add_all(P2.extract(ys)[0]).half_width()}mm\"\n        )\n\n        return (xs, ys)\n\n    # from ApertureObject\n    def is_out_of_aperture(self, point: P3) -> bool:\n        \"\"\"\n        判断点 point 是否超出 Beamline 的任意一个元件的孔径\n        只有当粒子轴向投影在元件内部时,才会进行判断,\n        否则即时粒子距离轴线很远,也认为粒子没有超出孔径,\n        这是因为粒子不在元件内时,很可能处于另一个大孔径元件中,这样会造成误判。\n\n        注意:这个函数的效率极低!\n        \"\"\"\n        for m in self.magnets:\n            if isinstance(m, ApertureObject) and m.is_out_of_aperture(point):\n                print(f\"beamline在{m}位置超出孔径\")\n                return True\n\n        return False\n\n    def trace_is_out_of_aperture(\n            self, trace_with_distance: List[ValueWithDistance[P3]]\n    ) -> bool:\n        \"\"\"\n        判断一条粒子轨迹是否超出孔径\n\n        注意:这个函数的效率极低!\n        \"\"\"\n        for pd in trace_with_distance:\n            if self.is_out_of_aperture(pd.value):\n                return True\n\n        return False\n\n    def get_length(self) -> float:\n        \"\"\"\n        获得 Beamline 的长度\n        \"\"\"\n        return self.trajectory.get_length()\n\n    def point_at(self, s: float) -> P2:\n        \"\"\"\n        获得 Beamline s 位置处的点 (x,y)\n        -------\n\n        \"\"\"\n        return self.trajectory.point_at(s)\n\n    def direct_at(self, s: float) -> P2:\n        \"\"\"\n        获得 Beamline s 位置处的方向\n        \"\"\"\n        return self.trajectory.direct_at(s)\n\n    class __BeamlineBuilder:\n        \"\"\"\n        构建 Beamline 的中间产物\n        \"\"\"\n\n        def __init__(self, start_point: P2) -> None:\n            self.start_point = start_point\n\n        def first_drift(self, direct: P2 = P2.x_direct(), length: float = 1.0) -> \"Beamline\":\n            \"\"\"\n            为 Beamline 添加第一个 drift\n            正如 Trajectory 的第一个曲线段必须是是直线一样\n            Beamline 中第一个元件必须是 drift\n            \"\"\"\n            bl = Beamline(\n                Trajectory.set_start_point(self.start_point).first_line(\n                    direct=direct, length=length\n                )\n            )\n            bl.elements.append((0, None, length))\n            return bl\n\n    @staticmethod\n    # -> \"Beamline.__BeamlineBuilder\"\n    def set_start_point(start_point: P2 = P2.origin()):\n        \"\"\"\n        设置束线起点\n        \"\"\"\n        return Beamline.__BeamlineBuilder(start_point)\n\n    def append_drift(self, length: float) -> \"Beamline\":\n        \"\"\"\n        尾加漂移段\n        length 漂移段长度\n        \"\"\"\n        old_len = self.trajectory.get_length()\n        self.trajectory.add_strait_line(length=length)\n        self.elements.append((old_len, None, length))\n\n        return self\n\n    def append_straight_dipole_magnet(\n            self,\n            magnetic_field: float,\n            length: float,\n            aperture_radius: float,\n            # field_direct: P2 = P2.y_direct()\n    ) -> \"Beamline\":\n        \"\"\"\n        尾加直线二极铁\n        \"\"\"\n        old_length = self.trajectory.get_length()\n        self.trajectory.add_strait_line(length=length)\n\n        lum = LocalUniformMagnet.create_local_uniform_magnet_along(\n            trajectory=self.trajectory,\n            s=old_length,\n            length=length,\n            magnetic_field=magnetic_field,\n            aperture_radius=aperture_radius,\n        )\n\n        self.magnets.append(lum)\n        self.elements.append((old_length, lum, length))\n\n        return self\n\n    def append_qs(\n            self,\n            length: float,\n            gradient: float,\n            second_gradient: float,\n            aperture_radius: float,\n    ) -> \"Beamline\":\n        \"\"\"\n        尾加 QS 磁铁\n\n        length: float QS 磁铁长度\n        gradient: float 梯度 T\/m\n        second_gradient: float 二阶梯度(六极场) T\/m^2\n        aperture_radius: float 半孔径 单位 m\n        \"\"\"\n        old_length = self.trajectory.get_length()\n        self.trajectory.add_strait_line(length=length)\n\n        qs = QS.create_qs_along(\n            trajectory=self.trajectory,\n            s=old_length,\n            length=length,\n            gradient=gradient,\n            second_gradient=second_gradient,\n            aperture_radius=aperture_radius,\n        )\n\n        self.magnets.append(qs)\n        self.elements.append((old_length, qs, length))\n\n        return self\n\n    def append_q(\n            self,\n            length: float,\n            gradient: float,\n            aperture_radius: float,\n    ) -> \"Beamline\":\n        \"\"\"\n        尾加 Q 磁铁\n\n        length: float QS 磁铁长度\n        gradient: float 梯度 T\/m\n        aperture_radius: float 半孔径 单位 m\n        \"\"\"\n        old_length = self.trajectory.get_length()\n        self.trajectory.add_strait_line(length=length)\n\n        q = Q.create_q_along(\n            trajectory=self.trajectory,\n            s=old_length,\n            length=length,\n            gradient=gradient,\n            aperture_radius=aperture_radius,\n        )\n\n        self.magnets.append(q)\n        self.elements.append((old_length, q, length))\n\n        return self\n\n    def append_dipole_cct(\n            self,\n            big_r: float,\n            small_r_inner: float,\n            small_r_outer: float,\n            bending_angle: float,\n            tilt_angles: List[float],\n            winding_number: int,\n            current: float,\n            disperse_number_per_winding: int = 120,\n    ) -> \"Beamline\":\n        \"\"\"\n        尾加二极CCT\n\n        big_r: float 偏转半径\n        small_r_inner: float 内层半孔径\n        small_r_outer: float 外层半孔径\n        bending_angle: float 偏转角度(正数表示逆时针、负数表示顺时针)\n        tilt_angles: List[float] 各极倾斜角\n        winding_number: int 匝数\n        current: float 电流\n        disperse_number_per_winding: int 每匝分段数目,越大计算越精确\n        \"\"\"\n        old_length = self.trajectory.get_length()\n        cct_length = big_r * abs(BaseUtils.angle_to_radian(bending_angle))\n        self.trajectory.add_arc_line(\n            radius=big_r, clockwise=bending_angle < 0, angle_deg=abs(bending_angle)\n        )\n\n        cct_inner = CCT.create_cct_along(\n            trajectory=self.trajectory,\n            s=old_length,\n            big_r=big_r,\n            small_r=small_r_inner,\n            bending_angle=abs(bending_angle),\n            tilt_angles=tilt_angles,\n            winding_number=winding_number,\n            current=current,\n            starting_point_in_ksi_phi_coordinate=P2.origin(),\n            end_point_in_ksi_phi_coordinate=P2(\n                2 * math.pi * winding_number,\n                BaseUtils.angle_to_radian(bending_angle),\n            ),\n            disperse_number_per_winding=disperse_number_per_winding,\n        )\n        self.magnets.append(cct_inner)\n        self.elements.append((old_length, cct_inner, cct_length))\n\n        cct_outer = CCT.create_cct_along(\n            trajectory=self.trajectory,\n            s=old_length,\n            big_r=big_r,\n            small_r=small_r_outer,\n            bending_angle=abs(bending_angle),\n            tilt_angles=BaseUtils.list_multiply(tilt_angles, -1),\n            winding_number=winding_number,\n            current=current,\n            starting_point_in_ksi_phi_coordinate=P2.origin(),\n            end_point_in_ksi_phi_coordinate=P2(\n                -2 * math.pi * winding_number,\n                BaseUtils.angle_to_radian(bending_angle),\n            ),\n            disperse_number_per_winding=disperse_number_per_winding,\n        )\n        self.magnets.append(cct_outer)\n        self.elements.append((old_length, cct_outer, cct_length))\n\n        return self\n\n    def append_agcct(\n            self,\n            big_r: float,\n            small_rs: List[float],\n            bending_angles: List[float],\n            tilt_angles: List[List[float]],\n            winding_numbers: List[List[int]],\n            currents: List[float],\n            disperse_number_per_winding: int = 120,\n    ) -> \"Beamline\":\n        \"\"\"\n        尾加 agcct\n        本质是两层二极 CCT 和两层交变四极 CCT\n\n        big_r: float 偏转半径,单位 m\n        small_rs: List[float] 各层 CCT 的孔径,一共四层,从大到小排列。分别是二极CCT外层、内层,四极CCT外层、内层\n        bending_angles: List[float] 交变四极 CCT 每个 part 的偏转半径(正数表示逆时针、负数表示顺时针),要么全正数,要么全负数。不需要传入二极 CCT 偏转半径,因为它就是 sum(bending_angles)\n        tilt_angles: List[List[float]] 二极 CCT 和四极 CCT 的倾斜角,典型值 [[30],[90,30]],只有两个元素的二维数组\n        winding_numbers: List[List[int]], 二极 CCT 和四极 CCT 的匝数,典型值 [[128],[21,50,50]] 表示二极 CCT 128匝,四极交变 CCT 为 21、50、50 匝\n        currents: List[float] 二极 CCT 和四极 CCT 的电流,典型值 [8000,9000]\n        disperse_number_per_winding: int 每匝分段数目,越大计算越精确\n\n        添加 CCT 的顺序为:\n        外层二极 CCT\n        内层二极 CCT\n        part1 四极 CCT 内层\n        part1 四极 CCT 外层\n        part2 四极 CCT 内层\n        part2 四极 CCT 外层\n        ... ... \n        \"\"\"\n        if len(small_rs) != 4:\n            raise ValueError(\n                f\"small_rs({small_rs}),长度应为4,分别是二极CCT外层、内层,四极CCT外层、内层\")\n        if not BaseUtils.is_sorted(small_rs[::-1]):\n            raise ValueError(\n                f\"small_rs({small_rs}),应从大到小排列,分别是二极CCT外层、内层,四极CCT外层、内层\")\n\n        total_bending_angle = sum(bending_angles)\n        old_length = self.trajectory.get_length()\n        cct_length = big_r * \\\n            abs(BaseUtils.angle_to_radian(total_bending_angle))\n        self.trajectory.add_arc_line(\n            radius=big_r,\n            clockwise=total_bending_angle < 0,\n            angle_deg=abs(total_bending_angle),\n        )\n\n        # 构建二极 CCT 外层\n        cct2_outer = CCT.create_cct_along(\n            trajectory=self.trajectory,\n            s=old_length,\n            big_r=big_r,\n            small_r=small_rs[0],\n            bending_angle=abs(total_bending_angle),\n            tilt_angles=BaseUtils.list_multiply(tilt_angles[0], -1),\n            winding_number=winding_numbers[0][0],\n            current=currents[0],\n            starting_point_in_ksi_phi_coordinate=P2.origin(),\n            end_point_in_ksi_phi_coordinate=P2(\n                -2 * math.pi * winding_numbers[0][0],\n                BaseUtils.angle_to_radian(total_bending_angle),\n            ),\n            disperse_number_per_winding=disperse_number_per_winding,\n        )\n        self.magnets.append(cct2_outer)\n        self.elements.append((old_length, cct2_outer, cct_length))\n\n        # 构建二极 CCT 内层\n        cct2_innter = CCT.create_cct_along(\n            trajectory=self.trajectory,\n            s=old_length,\n            big_r=big_r,\n            small_r=small_rs[1],\n            bending_angle=abs(total_bending_angle),\n            tilt_angles=tilt_angles[0],\n            winding_number=winding_numbers[0][0],\n            current=currents[0],\n            starting_point_in_ksi_phi_coordinate=P2.origin(),\n            end_point_in_ksi_phi_coordinate=P2(\n                2 * math.pi * winding_numbers[0][0],\n                BaseUtils.angle_to_radian(total_bending_angle),\n            ),\n            disperse_number_per_winding=disperse_number_per_winding,\n        )\n        self.magnets.append(cct2_innter)\n        self.elements.append((old_length, cct2_innter, cct_length))\n\n        # 构建内外侧四极交变 CCT\n        # 提取参数\n        agcct_small_r_out = small_rs[2]\n        agcct_small_r_in = small_rs[3]\n        agcct_winding_nums: List[int] = winding_numbers[1]\n        agcct_bending_angles: List[float] = bending_angles\n        agcct_bending_angles_rad: List[float] = BaseUtils.angle_to_radian(\n            agcct_bending_angles\n        )\n        agcct_tilt_angles: List[float] = tilt_angles[1]\n        agcct_current: float = currents[1]\n\n        # 构建 part1\n        agcct_index = 0\n        agcct_start_in = P2.origin()\n        agcct_start_out = P2.origin()\n        agcct_end_in = P2(\n            ((-1.0) ** agcct_index) * 2 * math.pi *\n            agcct_winding_nums[agcct_index],\n            agcct_bending_angles_rad[agcct_index],\n        )\n        agcct_end_out = P2(\n            ((-1.0) ** (agcct_index + 1))\n            * 2\n            * math.pi\n            * agcct_winding_nums[agcct_index],\n            agcct_bending_angles_rad[agcct_index],\n        )\n        agcct_part1_inner = CCT.create_cct_along(\n            trajectory=self.trajectory,\n            s=old_length,\n            big_r=big_r,\n            small_r=agcct_small_r_in,\n            bending_angle=abs(agcct_bending_angles[agcct_index]),\n            tilt_angles=BaseUtils.list_multiply(agcct_tilt_angles, -1),\n            winding_number=agcct_winding_nums[agcct_index],\n            current=agcct_current,\n            starting_point_in_ksi_phi_coordinate=agcct_start_in,\n            end_point_in_ksi_phi_coordinate=agcct_end_in,\n            disperse_number_per_winding=disperse_number_per_winding,\n        )\n        agcct_part1_length = big_r * \\\n            BaseUtils.angle_to_radian(abs(agcct_bending_angles[agcct_index]))\n        self.magnets.append(agcct_part1_inner)\n        self.elements.append(\n            (old_length, agcct_part1_inner, agcct_part1_length))\n\n        agcct_part1_outer = CCT.create_cct_along(\n            trajectory=self.trajectory,\n            s=old_length,\n            big_r=big_r,\n            small_r=agcct_small_r_out,\n            bending_angle=abs(agcct_bending_angles[agcct_index]),\n            tilt_angles=agcct_tilt_angles,\n            winding_number=agcct_winding_nums[agcct_index],\n            current=agcct_current,\n            starting_point_in_ksi_phi_coordinate=agcct_start_out,\n            end_point_in_ksi_phi_coordinate=agcct_end_out,\n            disperse_number_per_winding=disperse_number_per_winding,\n        )\n        self.magnets.append(agcct_part1_outer)\n        self.elements.append(\n            (old_length, agcct_part1_outer, agcct_part1_length))\n\n        old_length_i = old_length + agcct_part1_length\n        # 构建 part2 和之后的 part\n        for ignore in range(len(agcct_bending_angles) - 1):\n            agcct_index += 1\n            agcct_start_in = agcct_end_in + P2(\n                0,\n                agcct_bending_angles_rad[agcct_index - 1]\n                \/ agcct_winding_nums[agcct_index - 1],\n            )\n            agcct_start_out = agcct_end_out + P2(\n                0,\n                agcct_bending_angles_rad[agcct_index - 1]\n                \/ agcct_winding_nums[agcct_index - 1],\n            )\n            agcct_end_in = agcct_start_in + P2(\n                ((-1) ** agcct_index) * 2 * math.pi *\n                agcct_winding_nums[agcct_index],\n                agcct_bending_angles_rad[agcct_index],\n            )\n            agcct_end_out = agcct_start_out + P2(\n                ((-1) ** (agcct_index + 1))\n                * 2\n                * math.pi\n                * agcct_winding_nums[agcct_index],\n                agcct_bending_angles_rad[agcct_index],\n            )\n            agcct_parti_inner = CCT.create_cct_along(\n                trajectory=self.trajectory,\n                s=old_length,\n                big_r=big_r,\n                small_r=agcct_small_r_in,\n                bending_angle=abs(agcct_bending_angles[agcct_index]),\n                tilt_angles=BaseUtils.list_multiply(agcct_tilt_angles, -1),\n                winding_number=agcct_winding_nums[agcct_index],\n                current=agcct_current,\n                starting_point_in_ksi_phi_coordinate=agcct_start_in,\n                end_point_in_ksi_phi_coordinate=agcct_end_in,\n                disperse_number_per_winding=disperse_number_per_winding,\n            )\n            agcct_parti_length = big_r * \\\n                BaseUtils.angle_to_radian(\n                    abs(agcct_bending_angles[agcct_index]))\n            self.magnets.append(agcct_parti_inner)\n            self.elements.append(\n                (old_length_i, agcct_parti_inner, agcct_parti_length))\n\n            agcct_parti_outer = CCT.create_cct_along(\n                trajectory=self.trajectory,\n                s=old_length,\n                big_r=big_r,\n                small_r=agcct_small_r_out,\n                bending_angle=abs(agcct_bending_angles[agcct_index]),\n                tilt_angles=agcct_tilt_angles,\n                winding_number=agcct_winding_nums[agcct_index],\n                current=agcct_current,\n                starting_point_in_ksi_phi_coordinate=agcct_start_out,\n                end_point_in_ksi_phi_coordinate=agcct_end_out,\n                disperse_number_per_winding=disperse_number_per_winding,\n            )\n            self.magnets.append(agcct_parti_outer)\n            self.elements.append(\n                (old_length_i, agcct_parti_outer, agcct_parti_length))\n\n            old_length_i += agcct_parti_length\n\n        return self\n\n    def get_magnets(self) -> List[Magnet]:\n        return self.magnets\n\n    def get_trajectory(self) -> Trajectory:\n        return self.trajectory\n\n    def __str__(self) -> str:\n        return f\"beamline(magnet_size={len(self.magnets)}, traj_len={self.trajectory.get_length()})\"\n\n    def __repr__(self) -> str:\n        return self.__str__()"}
+{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_278","text":"# -*- coding: UTF-8 -*-\n\nfrom mpi4py import MPI\nfrom sympy import pi, cos, sin\nimport pytest\nimport os\n\nfrom sympde.calculus import grad, dot\nfrom sympde.topology import ScalarFunctionSpace, VectorFunctionSpace\nfrom sympde.topology import ProductSpace\nfrom sympde.topology import element_of\nfrom sympde.topology import NormalVector\nfrom sympde.topology import Union\nfrom sympde.topology import Domain\nfrom sympde.expr     import BilinearForm, LinearForm, integral\nfrom sympde.expr     import Norm\nfrom sympde.expr     import find, EssentialBC\n\nfrom psydac.fem.basic          import FemField\nfrom psydac.api.discretization import discretize\n\n# ... get the mesh directory\ntry:\n    mesh_dir = os.environ['PSYDAC_MESH_DIR']\n\nexcept:\n    base_dir = os.path.dirname(os.path.realpath(__file__))\n    base_dir = os.path.join(base_dir, '..', '..', '..')\n    mesh_dir = os.path.join(base_dir, 'mesh')\n# ...\n\n#==============================================================================\ndef run_poisson_3d_dir(filename, solution, f, comm=None):\n\n    # ... abstract model\n    domain = Domain.from_file(filename)\n\n    V = ScalarFunctionSpace('V', domain)\n\n    x,y,z = domain.coordinates\n\n    F = element_of(V, name='F')\n\n    v = element_of(V, name='v')\n    u = element_of(V, name='u')\n\n    int_0 = lambda expr: integral(domain , expr)\n\n    expr = dot(grad(v), grad(u))\n    a = BilinearForm((v,u), int_0(expr))\n\n    expr = f*v\n    l = LinearForm(v, int_0(expr))\n\n    error = F - solution\n    l2norm = Norm(error, domain, kind='l2')\n    h1norm = Norm(error, domain, kind='h1')\n\n    bc = EssentialBC(u, 0, domain.boundary)\n    equation = find(u, forall=v, lhs=a(u,v), rhs=l(v), bc=bc)\n    # ...\n\n    # ... create the computational domain from a topological domain\n    domain_h = discretize(domain, filename=filename, comm=comm)\n    # ...\n\n    # ... discrete spaces\n    Vh = discretize(V, domain_h)\n    # ...\n\n    # ... dsicretize the equation using Dirichlet bc\n    equation_h = discretize(equation, domain_h, [Vh, Vh])\n    # ...\n\n    # ... discretize norms\n    l2norm_h = discretize(l2norm, domain_h, Vh)\n    h1norm_h = discretize(h1norm, domain_h, Vh)\n    # ...\n\n    # ... solve the discrete equation\n    x = equation_h.solve()\n    # ...\n\n    # ...\n    phi = FemField( Vh, x )\n    # ...\n\n    # ... compute norms\n    l2_error = l2norm_h.assemble(F=phi)\n    h1_error = h1norm_h.assemble(F=phi)\n    # ...\n\n    return l2_error, h1_error\n\n#==============================================================================\ndef run_poisson_3d_dirneu(filename, solution, f, boundary, comm=None):\n\n    assert( isinstance(boundary, (list, tuple)) )\n\n    # ... abstract model\n    domain = Domain.from_file(filename)\n\n    V = ScalarFunctionSpace('V', domain)\n\n    B_neumann = [domain.get_boundary(**kw) for kw in boundary]\n    if len(B_neumann) == 1:\n        B_neumann = B_neumann[0]\n\n    else:\n        B_neumann = Union(*B_neumann)\n\n    x,y,z = domain.coordinates\n\n    F = element_of(V, name='F')\n\n    v = element_of(V, name='v')\n    u = element_of(V, name='u')\n\n    nn = NormalVector('nn')\n\n    int_0 = lambda expr: integral(domain , expr)\n    int_1 = lambda expr: integral(B_neumann , expr)\n\n    expr = dot(grad(v), grad(u))\n    a = BilinearForm((v,u), int_0(expr))\n\n    expr = f*v\n    l0 = LinearForm(v, int_0(expr))\n\n    expr = v*dot(grad(solution), nn)\n    l_B_neumann = LinearForm(v, int_1(expr))\n\n    expr = l0(v) + l_B_neumann(v)\n    l = LinearForm(v, expr)\n\n    error = F-solution\n    l2norm = Norm(error, domain, kind='l2')\n    h1norm = Norm(error, domain, kind='h1')\n\n    B_dirichlet = domain.boundary.complement(B_neumann)\n    bc = EssentialBC(u, 0, B_dirichlet)\n\n    equation = find(u, forall=v, lhs=a(u,v), rhs=l(v), bc=bc)\n    # ...\n\n    # ... create the computational domain from a topological domain\n    domain_h = discretize(domain, filename=filename, comm=comm)\n    # ...\n\n    # ... discrete spaces\n    Vh = discretize(V, domain_h)\n    # ...\n\n    # ... dsicretize the equation using Dirichlet bc\n    equation_h = discretize(equation, domain_h, [Vh, Vh])\n    # ...\n\n    # ... discretize norms\n    l2norm_h = discretize(l2norm, domain_h, Vh)\n    h1norm_h = discretize(h1norm, domain_h, Vh)\n    # ...\n\n    # ... solve the discrete equation\n    x = equation_h.solve()\n    # ...\n\n    # ...\n    phi = FemField( Vh, x )\n    # ...\n\n    # ... compute norms\n    l2_error = l2norm_h.assemble(F=phi)\n    h1_error = h1norm_h.assemble(F=phi)\n    # ...\n\n    return l2_error, h1_error\n\n#==============================================================================\ndef run_laplace_3d_neu(filename, solution, f, comm=None):\n\n    # ... abstract model\n    domain = Domain.from_file(filename)\n\n    V = ScalarFunctionSpace('V', domain)\n\n    B_neumann = domain.boundary\n\n    x,y,z = domain.coordinates\n\n    F = element_of(V, name='F')\n\n    v = element_of(V, name='v')\n    u = element_of(V, name='u')\n\n    nn = NormalVector('nn')\n\n    int_0 = lambda expr: integral(domain , expr)\n    int_1 = lambda expr: integral(B_neumann , expr)\n\n    expr = dot(grad(v), grad(u)) + v*u\n    a = BilinearForm((v,u), int_0(expr))\n\n    expr = f*v\n    l0 = LinearForm(v, int_0(expr))\n\n    expr = v*dot(grad(solution), nn)\n    l_B_neumann = LinearForm(v, int_1(expr))\n\n    expr = l0(v) + l_B_neumann(v)\n    l = LinearForm(v, expr)\n\n    error = F-solution\n    l2norm = Norm(error, domain, kind='l2')\n    h1norm = Norm(error, domain, kind='h1')\n\n    equation = find(u, forall=v, lhs=a(u,v), rhs=l(v))\n    # ...\n\n    # ... create the computational domain from a topological domain\n    domain_h = discretize(domain, filename=filename, comm=comm)\n    # ...\n\n    # ... discrete spaces\n    Vh = discretize(V, domain_h)\n    # ...\n\n    # ... dsicretize the equation using Dirichlet bc\n    equation_h = discretize(equation, domain_h, [Vh, Vh])\n    # ...\n\n    # ... discretize norms\n    l2norm_h = discretize(l2norm, domain_h, Vh)\n    h1norm_h = discretize(h1norm, domain_h, Vh)\n    # ...\n\n    # ... solve the discrete equation\n    x = equation_h.solve()\n    # ...\n\n    # ...\n    phi = FemField( Vh, x )\n    # ...\n\n    # ... compute norms\n    l2_error = l2norm_h.assemble(F=phi)\n    h1_error = h1norm_h.assemble(F=phi)\n    # ...\n\n    return l2_error, h1_error\n\n\n###############################################################################\n#            SERIAL TESTS\n###############################################################################\n\n#==============================================================================\ndef test_api_poisson_3d_dir_collela():\n\n    filename = os.path.join(mesh_dir, 'collela_3d.h5')\n\n    from sympy.abc import x,y,z\n\n    solution = sin(pi*x)*sin(pi*y)*sin(pi*z)\n    f        = 3*pi**2*sin(pi*x)*sin(pi*y)*sin(pi*z)\n\n    l2_error, h1_error = run_poisson_3d_dir(filename, solution, f)\n\n    expected_l2_error =  0.15687494944868827\n    expected_h1_error =  1.518006054794389\n\n    assert( abs(l2_error - expected_l2_error) < 1.e-7)\n    assert( abs(h1_error - expected_h1_error) < 1.e-7)\n\n\n#==============================================================================\ndef test_api_poisson_3d_dirneu_identity_2():\n    filename = os.path.join(mesh_dir, 'identity_3d.h5')\n\n    from sympy.abc import x,y,z\n\n    solution = sin(0.5*pi*x)*sin(pi*y)*sin(pi*z)\n    f        = (9.\/4.)*pi**2*solution\n\n    l2_error, h1_error = run_poisson_3d_dirneu(filename, solution, f, [{'axis': 0, 'ext': 1}])\n\n    expected_l2_error =  0.001438835012218704\n    expected_h1_error =  0.03929404299152016\n\n    assert( abs(l2_error - expected_l2_error) < 1.e-7)\n    assert( abs(h1_error - expected_h1_error) < 1.e-7)\n\n\n#==============================================================================\ndef test_api_poisson_3d_dirneu_identity_13():\n    filename = os.path.join(mesh_dir, 'identity_3d.h5')\n\n    from sympy.abc import x,y,z\n\n    solution = cos(0.5*pi*x)*cos(0.5*pi*y)*sin(pi*z)\n    f        = (3.\/2.)*pi**2*solution\n\n    l2_error, h1_error = run_poisson_3d_dirneu(filename, solution, f,\n                                               [{'axis': 0, 'ext': -1},\n                                                {'axis': 1, 'ext': -1}])\n\n    expected_l2_error =  0.0010275451113313282\n    expected_h1_error =  0.027938446826372126\n\n    assert( abs(l2_error - expected_l2_error) < 1.e-7)\n    assert( abs(h1_error - expected_h1_error) < 1.e-7)\n\n#==============================================================================\ndef test_api_poisson_3d_dirneu_identity_24():\n    filename = os.path.join(mesh_dir, 'identity_3d.h5')\n\n    from sympy.abc import x,y,z\n\n    solution = sin(0.5*pi*x)*sin(0.5*pi*y)*sin(pi*z)\n    f        = (3.\/2.)*pi**2*solution\n\n    l2_error, h1_error = run_poisson_3d_dirneu(filename, solution, f,\n                                               [{'axis': 0, 'ext': 1},\n                                                {'axis': 1, 'ext': 1}])\n\n    expected_l2_error =  0.001027545111330973\n    expected_h1_error =  0.027938446826371813\n\n    assert( abs(l2_error - expected_l2_error) < 1.e-7)\n    assert( abs(h1_error - expected_h1_error) < 1.e-7)\n\n##==============================================================================\n## TODO DEBUG, not working since merge with devel\n#def test_api_poisson_3d_dirneu_identity_123():\n#    filename = os.path.join(mesh_dir, 'identity_3d.h5')\n#\n#    from sympy.abc import x,y,z\n#\n#    solution = cos(0.25*pi*x)*cos(0.5*pi*y)*sin(pi*z)\n#    f        = (21.\/16.)*pi**2*solution\n#\n#    l2_error, h1_error = run_poisson_3d_dirneu(filename, solution, f,\n#                                               [{'axis': 0, 'ext': -1},\n#                                                {'axis': 0, 'ext': 1},\n#                                                {'axis': 1, 'ext': -1}])\n#\n#    expected_l2_error =  0.0013124098938804697\n#    expected_h1_error =  0.035441679549890456\n#\n#    assert( abs(l2_error - expected_l2_error) < 1.e-7)\n#    assert( abs(h1_error - expected_h1_error) < 1.e-7)\n\n##==============================================================================\n## TODO DEBUG, not working since merge with devel\n#def test_api_poisson_3d_dirneu_identity_1235():\n#    filename = os.path.join(mesh_dir, 'identity_3d.h5')\n#\n#    from sympy.abc import x,y,z\n#\n#    solution = cos(0.25*pi*x)*cos(0.5*pi*y)*cos(0.5*pi*z)\n#    f        = (9.\/16.)*pi**2*solution\n#\n#    l2_error, h1_error = run_poisson_3d_dirneu(filename, solution, f,\n#                                               [{'axis': 0, 'ext': -1},\n#                                                {'axis': 0, 'ext': 1},\n#                                                {'axis': 1, 'ext': -1},\n#                                                {'axis': 2, 'ext': -1}])\n#\n#    expected_l2_error =  0.00019677816039781896\n#    expected_h1_error =  0.0058786142515790405\n#\n#    assert( abs(l2_error - expected_l2_error) < 1.e-7)\n#    assert( abs(h1_error - expected_h1_error) < 1.e-7)\n\n\n#==============================================================================\ndef test_api_poisson_3d_dirneu_collela_2():\n    filename = os.path.join(mesh_dir, 'collela_3d.h5')\n\n    from sympy.abc import x,y,z\n\n    solution = sin(0.25*pi*(x+1.))*sin(pi*y)*sin(pi*z)\n    f        = (33.\/16.)*pi**2*solution\n\n    l2_error, h1_error = run_poisson_3d_dirneu(filename, solution, f,\n                                               [{'axis': 0, 'ext': 1}])\n\n    expected_l2_error =  0.06091240085930318\n    expected_h1_error =  0.6380043932563333\n\n    assert( abs(l2_error - expected_l2_error) < 1.e-7)\n    assert( abs(h1_error - expected_h1_error) < 1.e-7)\n\n\n##==============================================================================\n## TODO DEBUG, not working since merge with devel\n#def test_api_poisson_3d_dirneu_collela_13():\n#    filename = os.path.join(mesh_dir, 'collela_3d.h5')\n#\n#    from sympy.abc import x,y,z\n#\n#    solution = sin(0.25*pi*(1.-x))*sin(0.25*pi*(1.-y))*sin(pi*z)\n#    f        = (9.\/8.)*pi**2*solution\n#\n#    l2_error, h1_error = run_poisson_3d_dirneu(filename, solution, f,\n#                                               [{'axis': 0, 'ext': -1},\n#                                                {'axis': 1, 'ext': -1}])\n#\n#    expected_l2_error =  0.03786854933218588\n#    expected_h1_error =  0.38437667047918933\n#\n#    assert( abs(l2_error - expected_l2_error) < 1.e-7)\n#    assert( abs(h1_error - expected_h1_error) < 1.e-7)\n\n#==============================================================================\ndef test_api_poisson_3d_dirneu_collela_24():\n    filename = os.path.join(mesh_dir, 'collela_3d.h5')\n\n    from sympy.abc import x,y,z\n\n    solution = sin(0.25*pi*(x+1.))*sin(0.25*pi*(y+1.))*sin(pi*z)\n    f        = (9.\/8.)*pi**2*solution\n\n    l2_error, h1_error = run_poisson_3d_dirneu(filename, solution, f,\n                                               [{'axis': 0, 'ext': 1},\n                                                {'axis': 1, 'ext': 1}])\n\n    expected_l2_error =  0.03793880183960465\n    expected_h1_error =  0.38439642303250143\n\n    assert( abs(l2_error - expected_l2_error) < 1.e-7)\n    assert( abs(h1_error - expected_h1_error) < 1.e-7)\n\n##==============================================================================\n## TODO DEBUG, not working since merge with devel\n#def test_api_poisson_3d_dirneu_collela_123():\n#    filename = os.path.join(mesh_dir, 'collela_3d.h5')\n#\n#    from sympy.abc import x,y,z\n#\n#    solution = cos(pi*x)*sin(0.25*pi*(1.-y))*sin(pi*z)\n#    f        = (33.\/16.)*pi**2*solution\n#\n#    l2_error, h1_error = run_poisson_3d_dirneu(filename, solution, f,\n#                                               [{'axis': 0, 'ext': -1},\n#                                                {'axis': 0, 'ext': 1},\n#                                                {'axis': 1, 'ext': -1}])\n#\n#    expected_l2_error =  0.11963989196330076\n#    expected_h1_error =  1.1267766354124575\n#\n#    assert( abs(l2_error - expected_l2_error) < 1.e-7)\n#    assert( abs(h1_error - expected_h1_error) < 1.e-7)\n#\n##==============================================================================\n## TODO DEBUG, not working since merge with devel\n#def test_api_poisson_3d_dirneu_collela_1235():\n#    filename = os.path.join(mesh_dir, 'collela_3d.h5')\n#\n#    from sympy.abc import x,y,z\n#\n#    solution = cos(pi*x)*sin(0.25*pi*(1.-y))*sin(0.25*pi*(1.-z))\n#    f        = (9.\/8.)*pi**2*solution\n#\n#    l2_error, h1_error = run_poisson_3d_dirneu(filename, solution, f,\n#                                               [{'axis': 0, 'ext': -1},\n#                                                {'axis': 0, 'ext': 1},\n#                                                {'axis': 1, 'ext': -1},\n#                                                {'axis': 2, 'ext': -1}])\n#\n#    expected_l2_error =  0.13208728319093133\n#    expected_h1_error =  0.9964934429086868\n\n    assert( abs(l2_error - expected_l2_error) < 1.e-7)\n    assert( abs(h1_error - expected_h1_error) < 1.e-7)\n\n#==============================================================================\ndef test_api_laplace_3d_neu_identity():\n    filename = os.path.join(mesh_dir, 'identity_3d.h5')\n\n    from sympy.abc import x,y,z\n\n    solution = cos(pi*x)*cos(pi*y)*cos(pi*z)\n    f        = (3.*pi**2 + 1.)*solution\n\n    l2_error, h1_error = run_laplace_3d_neu(filename, solution, f)\n\n    expected_l2_error =  0.0016975430150953524\n    expected_h1_error =  0.047009063231215\n\n    assert( abs(l2_error - expected_l2_error) < 1.e-7)\n    assert( abs(h1_error - expected_h1_error) < 1.e-7)\n\n##==============================================================================\n## TODO DEBUG, not working since merge with devel\n#def test_api_laplace_3d_neu_collela():\n#    filename = os.path.join(mesh_dir, 'collela_3d.h5')\n#\n#    from sympy.abc import x,y,z\n#\n#    solution = cos(pi*x)*cos(pi*y)*cos(pi*z)\n#    f        = (3.*pi**2 + 1.)*solution\n#\n#    l2_error, h1_error = run_laplace_3d_neu(filename, solution, f)\n#\n#    expected_l2_error =  0.1768000505351402\n#    expected_h1_error =  1.7036022067226382\n#\n#    assert( abs(l2_error - expected_l2_error) < 1.e-7)\n#    assert( abs(h1_error - expected_h1_error) < 1.e-7)\n\n###############################################################################\n#            PARALLEL TESTS\n###############################################################################\n\n#==============================================================================\n@pytest.mark.parallel\ndef test_api_poisson_3d_dir_collela():\n\n    filename = os.path.join(mesh_dir, 'collela_3d.h5')\n\n    from sympy.abc import x,y,z\n\n    solution = sin(pi*x)*sin(pi*y)*sin(pi*z)\n    f        = 3*pi**2*sin(pi*x)*sin(pi*y)*sin(pi*z)\n\n    l2_error, h1_error = run_poisson_3d_dir(filename, solution, f,\n                                            comm=MPI.COMM_WORLD)\n\n    expected_l2_error =  0.15687494944868827\n    expected_h1_error =  1.518006054794389\n\n    assert( abs(l2_error - expected_l2_error) < 1.e-7)\n    assert( abs(h1_error - expected_h1_error) < 1.e-7)\n\n\n#==============================================================================\n# CLEAN UP SYMPY NAMESPACE\n#==============================================================================\n\ndef teardown_module():\n    from sympy import cache\n    cache.clear_cache()\n\ndef teardown_function():\n    from sympy import cache\n    cache.clear_cache()\n"}
+{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_279","text":"modules\/math-codes\/modules\/statistics-and-probability\/src\/skew_kurt-v1.py\n########################################################\n#  - drigols                              #\n# Last update: 17\/12\/2021                              #\n########################################################\n\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport scipy.stats as stats\n\ndf = pd.DataFrame({\n  'Name': ['Dan', 'Joann', 'Pedro', 'Rosie', 'Ethan', 'Vicky', 'Frederic'],\n  'Salary':[50000, 54000, 50000, 189000, 55000, 40000, 59000],\n  'Hours':[41, 40, 36, 30, 35, 39, 40],\n  'Grade':[50, 50, 46, 95, 50, 5, 57]\n})\n\n# Cria uma lista que vai representar os labels do nosso DataFrame:\n# - Salary;\n# - Hours;\n# - Grade.\nnumcols = ['Salary', 'Hours', 'Grade'] \n\n# O laço for vai passar por cada item na nosssa lista de labels(numcols) fazendo o seguinte:\n# - Imprimindo a assimétria (skewness);\n# - Imprimindo a curtose (kurtosis);\n# - Pegando a densidade da label;\n# - Criando um Histograma para a label;\n# - Adicionando a densidade\/linha de densidade no plot\/histograma.\nfor col in numcols:\n  print(df[col].name + ' skewness: ' + str(df[col].skew())) # Imprime a Assimetria do label\/coluna no laço for.\n  print(df[col].name + ' kurtosis: ' + str(df[col].kurt())) # Imprime a Curtose do label\/coluna no laço for.\n  density = stats.gaussian_kde(df[col]) # Pega a densidade do label\/coluna no laço for.\n  n, x, _ = plt.hist(df[col], histtype='step', density=True, bins=25) # Cria o plot do label\/coluna no laço for.\n  plt.plot(x, density(x)*6) # Cria a linha de densidade do label no laço for.\n  plt.show()\n  print('\\n')\n"}
+{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_280","text":"samcom12\/anuga_core\n\"\"\"\n\nFunction which can be useful when setting quantities\n\n\"\"\"\n\nimport copy\nimport os\nimport anuga.utilities.spatialInputUtil as su\n\n\ndef make_nearestNeighbour_quantity_function(\n    quantity_xyValueIn,\n    domain,\n    threshold_distance=9.0e+100,\n    background_value=9.0e+100,\n    k_nearest_neighbours=1,\n    method='average'\n):\n    \"\"\"\n    Function which makes another function, which can be used in set_quantity\n    Idea: For every point x,y in the domain, we want to set a quantity based on\n          the 'nearest-neighbours' from quantity_xyValue (a 3 column array with\n          x,y,quantity-value),\n          UNLESS the distance from x,y to the nearest-neighbour is >\n            threshold_distance.\n          In the latter case, we want to set the quantity value to\n            'background_value'\n\n          We need a function f(x,y) to do that. This routine makes the\n          function, with the desired quantity_xyValue points,\n          threshold_distance, and background_value\n    INPUTS:\n        @param quantity_xyValueIn -- A 3 column array with 'x,y, Value'\n            defining the points used to set the new quantity values in\n            georeferenced coordinates\n        @param domain -- The ANUGA domain\n        @param k_nearest_neighbors --- Number of nearest neighbours used in calculation\n        @param threshold_distance -- Points greater than this distance from\n            their nearest quantity_xyValue point are set to background_value\n        @param background_value -- see 'threshold_distance'\n        @param method -- Three methods; 'average' uses an inverse-distance-weighted average\n               of the k nearest neighbours is used:\n               'min' the minimum of the k nearest neighbours is used:\n               'max' the maximum of the k nearest neighbours is used.\n    OUTPUTS:\n        A function f which can be passed to domain.set_quantity('myQuantity', f)\n    \"\"\"\n\n    import scipy\n    import scipy.interpolate\n    import scipy.spatial\n\n    if(len(quantity_xyValueIn.shape) > 1):\n        quantity_xyValue = quantity_xyValueIn\n    else:\n        # Treat the single-point case\n        quantity_xyValue = quantity_xyValueIn.reshape((1, 3))\n\n    # Make a function which gives us the ROW-INDEX of the nearest xy point in\n    # quantity_xyValue\n    # quantity_xy_interpolator = scipy.interpolate.NearestNDInterpolator(\n    #    quantity_xyValue[:,0:2],\n    #    scipy.arange(len(quantity_xyValue[:,2])))\n\n    # Make a function which returns k-nearest-neighbour indices + distances\n    quantity_xy_interpolator = scipy.spatial.cKDTree(quantity_xyValue[:, 0:2])\n\n    # Make a function of x,y which we can pass to domain.set_quantity\n    def quant_NN_fun(x, y):\n        \"\"\"\n        Function to assign quantity from the nearest point in quantity_xyValue,\n        UNLESS the point is more than 'threshold_distance' away from the\n        nearest point, in which case the background value is used\n        \"\"\"\n\n        import scipy\n        import scipy.interpolate\n        import scipy.spatial\n        import numpy as np\n\n        x = np.asarray(x).reshape(1, -1)[0, :]\n        y = np.asarray(y).reshape(1, -1)[0, :]\n\n        # Since ANUGA stores x,y internally in non-georeferenced coordinates,\n        # we adjust them here\n        xll = domain.geo_reference.xllcorner\n        yll = domain.geo_reference.yllcorner\n        z = np.zeros(shape=(len(x), 2))\n        z[:, 0] = x+xll\n        z[:, 1] = y+yll\n\n        # This will hold the quantity values\n        quantity_output = x*0. + background_value\n        # Compute the index of the nearest-neighbour in quantity_xyValue\n        neighbour_data = quantity_xy_interpolator.query(z,\n                                                        k=k_nearest_neighbours)\n\n        # Next find indices with distance < threshold_distance\n        if(k_nearest_neighbours == 1):\n            dist_lt_thresh = neighbour_data[0] < threshold_distance\n        else:\n            dist_lt_thresh = neighbour_data[0][:, 0] < threshold_distance\n\n        dist_lt_thresh = dist_lt_thresh.nonzero()[0]\n\n        # Initialise output\n        quantity_output = x*0 + background_value\n\n        # Interpolate\n        if len(dist_lt_thresh) > 0:\n            if method == 'min':\n                numerator = 9.0e+100\n                for i in range(k_nearest_neighbours):\n                    if(k_nearest_neighbours == 1):\n                        distances = neighbour_data[0][dist_lt_thresh]\n                        indices = neighbour_data[1][dist_lt_thresh]\n                        values = quantity_xyValue[indices, 2]\n                        numerator = np.minimum(numerator, values)\n                    else:\n                        distances = neighbour_data[0][dist_lt_thresh, i]\n                        indices = neighbour_data[1][dist_lt_thresh, i]\n                        values = quantity_xyValue[indices, 2]\n                        numerator = np.minimum(numerator, values)\n                quantity_output[dist_lt_thresh] = numerator\n            elif method == 'max':\n                numerator = -9.0e+100\n                for i in range(k_nearest_neighbours):\n                    if(k_nearest_neighbours == 1):\n                        distances = neighbour_data[0][dist_lt_thresh]\n                        indices = neighbour_data[1][dist_lt_thresh]\n                        values = quantity_xyValue[indices, 2]\n                        numerator = np.maximum(numerator, values)\n                    else:\n                        distances = neighbour_data[0][dist_lt_thresh, i]\n                        indices = neighbour_data[1][dist_lt_thresh, i]\n                        values = quantity_xyValue[indices, 2]\n                        numerator = np.maximum(numerator, values)\n                quantity_output[dist_lt_thresh] = numerator\n            else:\n                numerator = 0\n                denominator = 0\n                for i in range(k_nearest_neighbours):\n                    if(k_nearest_neighbours == 1):\n                        distances = neighbour_data[0][dist_lt_thresh]\n                        indices = neighbour_data[1][dist_lt_thresh]\n                    else:\n                        distances = neighbour_data[0][dist_lt_thresh, i]\n                        indices = neighbour_data[1][dist_lt_thresh, i]\n\n                    inverse_distance = 1.0\/(distances+1.0e-100)\n                    values = quantity_xyValue[indices, 2]\n                    numerator += values*inverse_distance\n                    denominator += inverse_distance\n\n                quantity_output[dist_lt_thresh] = numerator \/ denominator\n\n        return quantity_output\n\n    # Return the quantity function\n    return quant_NN_fun\n\n\n###############################################################################\n\ndef composite_quantity_setting_function(poly_fun_pairs,\n                                        domain,\n                                        clip_range=None,\n                                        nan_treatment='exception',\n                                        nan_interpolation_region_polygon=None,\n                                        default_k_nearest_neighbours=1,\n                                        default_raster_interpolation='pixel',\n                                        verbose=True):\n    \"\"\" Make a 'composite function' to set quantities -- applies different\n        functions inside different polygon regions.\n\n        poly_fun_pairs = [ [p0, f0], [p1, f1], ...]\n\n        Where:\n\n          fi is a function,\n             or a constant,\n             or a '.txt' or '.csv' file with comma separated xyz data\n                and an optional header row which contains letters,\n             or the name of a gdal-compatible rasterFile\n                (not ending in .txt or .csv),\n             or a numpy array with 3 columns\n\n          pi is a polygon (anuga polygon format),\n            or a polygon filename (shapefile or a csv format that\n                                    anuga.read_polygon will read),\n            or None ( equivalent to a polygon with zero area),\n            or 'All' (equivalent to a polygon covering everything)\n            or 'Extent' in the case that fi is a rasterFile name\n                (equivalent to a polygon with the same extent as the raster)\n\n        IMPORTANT: When polygons overlap, the first elements of the list are\n                   given priority. The approach is:\n                   First f0 is applied to all points in p0, and we record\n                     that these points have been 'set'\n                   Next f1 is applied to all points in p1 which have not\n                     been 'set', and then we record those points as being 'set'\n                   Next f2 is applied to all points in p2 which have not\n                     been 'set', and then we record those points as being 'set'\n                   ... etc\n\n        INPUT:\n          @param poly_fun_pairs = [ [p0, f0], [p1, f1], ...]\n\n              where fi(x,y) is a function returning quantity values at points,\n                or any of the special cases below\n\n              SPECIAL fi CASES:\n              fi = a constant in which case points in the polygon are\n                   set to that value,\n              fi = a .txt or .csv file name containing x, y, z data,\n                     with comma separators and an optional header row\n                     containing letters (nearest neighbour interpolation is used)\n              fi = a string rasterFile name (not ending in .txt or .csv)\n                    which can be passed to quantityRasterFun to make a function\n              fi = a numpy array with 3 columns (x,y,Value) in which case\n                   nearest-neighbour interpolation is used on the points\n\n              pi are polygons where we want to use fi inside\n              (anuga polygon format) or any of the special cases below\n              SPECIAL pi CASES:\n              If pi is a filename ending in .shp or a csv format that\n                anuga.read_polygon can read, we assume it contains a polygon\n                we have to read\n              If any pi = 'All', then we assume that ALL unset points are set\n                 using the function. This CAN ONLY happen in the last [fi,pi]\n                 pair where pi is not None (since fi will be applied to\n                 all remaining points -- so anything else is probably an\n                 input mistake)\n              If any pi = None, then that [fi,pi] pair is skipped\n              If pi = 'Extent' and fi is the name of a raster file, then the\n                extent of the raster file is used to define the polygon\n\n          @param domain = ANUGA domain object\n\n          @param clip_range = List with the same length as poly_fun_pairs,\n                 of the form:\n                 [ [min0, max0], [min1, max1], ...]\n                 After f0 is applied in p0, its values will be 'clipped' to the\n                 range\n                    [min0, max0]\n                 , and similarly for the other fi\n\n          @param nan_treatment = 'exception' or 'fall_through' -- string determining\n                what to do if F(x,y) is nan. The default 'exception' raises an exception.\n                The value 'fall_through' allows the function to try lower-priority\n                poly,fun pairs (in sequence) to set the value.\n\n          @param nan_interpolation_region_polygon = None, or 'All', or a list\n                of csv or shp filenames containing polygons, or a list of\n                anuga polygon objects.\n\n                If it is not None, then all x,y points which evaluate to nan\n                on their **first preference** dataset are recorded, and as a\n                final step, the values at these x,y points\n                **which are inside the nan_interpolation_region_polygon**\n                are interpolated from the other x,y,F(x,y) values.\n\n                Nearest neighbour interpolation is used, with\n                k_nearest_neighbours taken from default_k_nearest_neighbours.\n\n                Note that if nan_treatment = 'exception', then nan's will cause\n                exceptions earlier on in this routine, so you will need\n                nan_treatment = 'fall_through' to use this option.\n\n                Example of why you might want this:\n                    Say you have 2 elevation datasets (one defining the\n                    topography above MSL, and the other defining the topography\n                    below MSL). There might be small nan gaps between them,\n                    which you would like to fill with interpolation. That\n                    can be done with this option, by including the nan regions\n                    in one of the elevation-dataset-polygons pi.\n\n          @param default_k_nearest_neighbours = integer >=1 . The value of\n                k_nearest_neighbours passed to\n                make_nearestNeighbour_quantity_function when a 'special_case'\n                value of fi is passed in (either a point array or a .txt or\n                .csv point file), or when nan_interpolation_region_polygon is\n                not None\n\n          @param default_raster_interpolation = 'pixel' or 'bilinear'. The value of\n                'interpolation' passed to quantityRasterFun if a raster filename\n                is passed as one of the fi.\n\n          @param verbose TRUE\/FALSE Print more information\n\n        OUTPUT: A function F(x,y) which can be used e.g. to set the quantity\n                domain.set_quantity('elevation', F)\n\n    \"\"\"\n    import os\n    import numpy\n    from anuga.geometry.polygon import inside_polygon\n\n    # Check that clip_range has the right form\n    if clip_range is not None:\n        if len(clip_range) != len(poly_fun_pairs):\n            msg = ' clip_range must be the same ' +\\\n                  'length as poly_fun_pairs, or None'\n            raise ValueError(msg)\n        # Check that min < = max\n        for i in range(len(clip_range)):\n            if clip_range[i][0] > clip_range[i][1]:\n                raise Exception('clip_range minima must be less than maxima')\n\n    def F(x, y):\n        \"\"\"This is the function returned by composite_quantity_setting_function\n           It can be passed to set_quantity\n        \"\"\"\n        isSet = numpy.zeros(len(x))  # 0\/1 - record if each point has been set\n        quantityVal = x*0 + numpy.nan  # Function return value\n\n        # Record points which evaluated to nan on their first preference\n        # dataset.\n        was_ever_nan = (x*0).astype(int)\n\n        lpf = len(poly_fun_pairs)\n        if(lpf <= 0):\n            raise Exception('Must have at least 1 fun-poly-pair')\n\n        # Make an array of 'transformed' spatial coordinates, for checking\n        # polygon inclusion\n        xll = domain.geo_reference.xllcorner\n        yll = domain.geo_reference.yllcorner\n        xy_array_trans = numpy.vstack([x+xll, y+yll]).transpose()\n\n        # Check that none of the pi polygons [except perhaps the last] is 'All'\n        for i in range(lpf-1):\n            if(poly_fun_pairs[i][0] == 'All'):\n                # This is only ok if all the othe poly_fun_pairs are None\n                remaining_poly_fun_pairs_are_None = \\\n                    [poly_fun_pairs[j][0] is None for j in range(i+1, lpf)]\n                if(not all(remaining_poly_fun_pairs_are_None)):\n                    raise Exception('Can only have the last polygon = All')\n\n        # Main Loop\n        # Apply the fi inside the pi\n        for i in range(lpf):\n            fi = poly_fun_pairs[i][1]  # The function\n            pi = poly_fun_pairs[i][0]  # The polygon\n\n            # Quick exit\n            if(pi is None):\n                continue\n\n            ###################################################################\n            # Get indices fInds of points in polygon pi which are not already\n            # set\n            ###################################################################\n            if(pi == 'All'):\n                # Get all unset points\n                fInside = (1 - isSet)\n                fInds = (fInside == 1).nonzero()[0]\n\n            else:\n                if pi == 'Extent':\n                    # Here fi MUST be a gdal-compatible raster\n                    if not isinstance(fi, str):\n                        msg = ' pi = \"Extent\" can only be used when fi is a' +\\\n                              ' raster file name'\n                        raise Exception(msg)\n\n                    if not os.path.exists(fi):\n                        msg = 'fi ' + str(fi) + ' is supposed to be a ' +\\\n                              ' raster filename, but it could not be found'\n                        raise Exception(msg)\n\n                    # Then we get the extent from the raster itself\n                    pi_path = su.getRasterExtent(fi, asPolygon=True)\n\n                    if verbose:\n                        print('Extracting extent from raster: ', fi)\n                        print('Extent: ', pi_path)\n\n                elif (type(pi) == str) and os.path.isfile(pi):\n                    # pi is a file\n                    pi_path = su.read_polygon(pi)\n\n                else:\n                    # pi is the actual polygon data\n                    pi_path = pi\n\n                # Get the insides of unset points inside pi_path\n                notSet = (isSet == 0.).nonzero()[0]\n                fInds = inside_polygon(xy_array_trans[notSet, :], pi_path)\n                fInds = notSet[fInds]\n\n            if len(fInds) == 0:\n                # No points found, move on\n                continue\n\n            ###################################################################\n            # Evaluate fi at the points inside pi\n            ###################################################################\n\n            # We use various tricks to infer whether fi is a function,\n            # a constant, a file (raster or csv), or an array\n            if hasattr(fi, '__call__'):\n                # fi is a function or a callable object\n                quantityVal[fInds] = fi(x[fInds], y[fInds])\n\n            elif isinstance(fi, (int, int, float)):\n                # fi is a numerical constant\n                quantityVal[fInds] = fi*1.0\n\n            elif type(fi) is str and os.path.exists(fi):\n                # fi is a file which is assumed to be\n                # a gdal-compatible raster OR an x,y,z elevation file\n                if os.path.splitext(fi)[1] in ['.txt', '.csv']:\n                    fi_array = su.read_csv_optional_header(fi)\n                    # Check the results\n                    if fi_array.shape[1] != 3:\n                        print('Treated input file ' + fi +\n                              ' as xyz array with an optional header')\n                        msg = 'Array should have 3 columns -- x,y,value'\n                        raise Exception(msg)\n\n                    newfi = make_nearestNeighbour_quantity_function(\n                        fi_array, domain,\n                        k_nearest_neighbours=default_k_nearest_neighbours)\n                    quantityVal[fInds] = newfi(x[fInds], y[fInds])\n\n                else:\n                    # Treating input file as a raster\n                    newfi = quantityRasterFun(domain, fi,\n                                              interpolation=default_raster_interpolation)\n                    quantityVal[fInds] = newfi(x[fInds], y[fInds])\n\n            elif type(fi) is numpy.ndarray:\n                if fi.shape[1] != 3:\n                    msg = 'Array should have 3 columns -- x,y,value'\n                    raise Exception(msg)\n                newfi = make_nearestNeighbour_quantity_function(fi, domain,\n                                                                k_nearest_neighbours=default_k_nearest_neighbours)\n                quantityVal[fInds] = newfi(x[fInds], y[fInds])\n\n            else:\n                print('ERROR: with function from ' + fi)\n                msg = 'Cannot make function from type ' + str(type(fi))\n                raise Exception(msg)\n\n            ###################################################################\n            # Check for nan values\n            ###################################################################\n            #nan_flag = (quantityVal[fInds] != quantityVal[fInds])\n            nan_flag = 1*numpy.isnan(quantityVal[fInds])\n            nan_inds = nan_flag.nonzero()[0]\n            was_ever_nan[fInds[nan_inds]] = 1\n\n            if len(nan_inds) > 0:\n                if nan_treatment == 'exception':\n                    msg = 'nan values generated by the poly_fun_pair at '\\\n                          'index ' + str(i) + ' '\\\n                          'in composite_quantity_setting_function. ' + \\\n                          'To allow these values to be set by later ' + \\\n                          'poly_fun pairs, pass the argument ' + \\\n                          'nan_treatment=\"fall_through\" ' + \\\n                          'to composite_quantity_setting_function'\n                    raise Exception(msg)\n\n                elif nan_treatment == 'fall_through':\n                    msg = 'WARNING: nan values generated by the ' + \\\n                          'poly_fun_pair at index ' + str(i) + ' '\\\n                          'in composite_quantity_setting_function. ' + \\\n                          'They will be passed to later poly_fun_pairs'\n                    if verbose:\n                        print(msg)\n                    not_nan_inds = (1-nan_flag).nonzero()[0]\n\n                    if len(not_nan_inds) > 0:\n                        fInds = fInds[not_nan_inds]\n                    else:\n                        # All values are nan\n                        msg = '( Actually all the values were nan - ' + \\\n                              'Are you sure they should be? Possible error?)'\n                        if verbose:\n                            print(msg)\n                        continue\n\n                else:\n                    msg = 'Found nan values in ' + \\\n                          'composite_quantity_setting_function but ' + \\\n                          'nan_treatment is not a recognized value'\n                    raise Exception(msg)\n\n            # Record that the points have been set\n            isSet[fInds] = 1\n\n            # Enforce clip_range\n            if clip_range is not None:\n                lower_bound = clip_range[i][0]\n                upper_bound = clip_range[i][1]\n                quantityVal[fInds] = numpy.maximum(\n                    quantityVal[fInds], lower_bound)\n                quantityVal[fInds] = numpy.minimum(\n                    quantityVal[fInds], upper_bound)\n\n        # End of loop\n\n        # Find points which were nan on their first preference dataset + are\n        # inside nan_interpolation_region_polygon. Then reinterpolate their\n        # values from the other x,y, quantityVal points.\n        if (nan_interpolation_region_polygon is not None) &\\\n           (was_ever_nan.sum() > 0):\n            if nan_interpolation_region_polygon == 'All':\n                points_to_reinterpolate = was_ever_nan.nonzero()[0]\n            else:\n                # nan_interpolation_region_polygon contains information on 1 or\n                # more polygons\n                # Inside those polygons, we need to re-interpolate points which\n                # first evaluted to na\n                possible_points_to_reint = was_ever_nan.nonzero()[0]\n                points_to_reinterpolate = numpy.array([]).astype(int)\n\n                for i in range(len(nan_interpolation_region_polygon)):\n                    nan_pi = nan_interpolation_region_polygon[i]\n\n                    # Ensure nan_pi = list of x,y points making a polygon\n                    if(type(nan_pi) == str):\n                        nan_pi = su.read_polygon(nan_pi)\n\n                    points_in_nan_pi = inside_polygon(\n                        xy_array_trans[possible_points_to_reint, :],\n                        nan_pi)\n\n                    if len(points_in_nan_pi) > 0:\n                        points_to_reinterpolate = numpy.hstack(\n                            [points_to_reinterpolate,\n                             possible_points_to_reint[points_in_nan_pi]])\n\n            if verbose:\n                print('Re-interpolating ', len(points_to_reinterpolate),\n                      ' points which were nan under their',\n                      ' first-preference and are inside the',\n                      ' nan_interpolation_region_polygon')\n\n            if len(points_to_reinterpolate) > 0:\n                msg = 'WARNING: nan interpolation is being applied. This ',\\\n                      'should be done in serial prior to distributing the ',\\\n                      'domain, as there is no parallel communication ',\\\n                      'implemented yet [so parallel results might depend on ',\\\n                      'the number of processes]'\n                if verbose:\n                    print(msg)\n\n            # Find the interpolation points = points not needing reinterpolation\n            ip = x*0 + 1\n            ip[points_to_reinterpolate] = 0\n            number_of_ip = ip.sum()\n            ip = ip.nonzero()[0]\n\n            # Check that none of the ip points has an nan value\n            nan_ip = (quantityVal[ip] != quantityVal[ip]).nonzero()[0]\n\n            if len(nan_ip) > 0:\n                print('There are ', len(nan_ip), ' points outside the ',\n                      'nan_interpolation_region_polygon have nan values.')\n                print('The user should ensure this does not happen.')\n                print('The points have the following coordinates:')\n                print(xy_array_trans[ip[nan_ip], :])\n                msg = \"There are nan points outside of \" +\\\n                      \"nan_interpolation_region_polygon, even after all \" +\\\n                      \"fall-through's\"\n                raise Exception(msg)\n\n            if(number_of_ip < default_k_nearest_neighbours):\n                raise Exception('Too few non-nan points to interpolate from')\n\n            # Make function for re-interpolation. Note this requires\n            # x,y,z in georeferenced coordinates, whereas x,y are ANUGA\n            # coordinates\n            reinterp_F = make_nearestNeighbour_quantity_function(\n                numpy.vstack([xy_array_trans[ip, 0], xy_array_trans[ip, 1],\n                              quantityVal[ip]]).transpose(),\n                domain,\n                k_nearest_neighbours=default_k_nearest_neighbours)\n\n            # re-interpolate\n            quantityVal[points_to_reinterpolate] = reinterp_F(\n                x[points_to_reinterpolate], y[points_to_reinterpolate])\n\n            isSet[points_to_reinterpolate] = 1\n\n        # Check there are no remaining nan values\n        if(min(isSet) != 1):\n            print('Some points remain as nan, which is not allowed')\n            unset_inds = (isSet != 1).nonzero()[0]\n            lui = min(5, len(unset_inds))\n            print('There are ', len(unset_inds), ' such points')\n            print('Here are a few:')\n            for i in range(lui):\n                print(x[unset_inds[i]] + xll, y[unset_inds[i]] + yll)\n            raise Exception('It seems the input data needs to be fixed')\n\n        return quantityVal\n        # END OF FUNCTION F(x,y)\n\n    return F\n\n##############################################################################\n\n\ndef quantityRasterFun(domain, rasterFile, interpolation='pixel'):\n    \"\"\"\n    Make a function whick takes x,y in ANUGA coordinates, and returns the values\n    on a raster rasterFile\n\n    This can be used to set a quantity, and takes care of the manual conversion\n    from ANUGA coordinates to spatial coordinates.\n\n    INPUTS: @param domain = ANUGA domain\n            @param rasterFile = Filename of the raster to extract point values\n                    from\n            @param interpolation = 'pixel' (in which case the point value is\n                    set from the pixel it is on) or 'bilinear' in which case\n                    the point value is set from bilinear interpolation of\n                    pixels.\n\n    OUTPUT: Function which takes x,y in ANUGA coordinates, and outputs their\n            corresponding raster values\n    \"\"\"\n    import scipy\n    # import numpy as NearestNDInterpolator  # FIXME (Ole): What?\n    import numpy as np\n\n    from anuga.utilities.spatialInputUtil import rasterValuesAtPoints\n\n    def QFun(x, y):\n        xll = domain.geo_reference.xllcorner\n        yll = domain.geo_reference.yllcorner\n        inDat = np.vstack([x+xll, y+yll]).transpose()\n        return rasterValuesAtPoints(xy=inDat, rasterFile=rasterFile,\n                                    interpolation=interpolation)\n\n    return QFun\n\n#################################################################################\n\n\ndef quantity_from_Pt_Pol_Data_and_Raster(Pt_Pol_Data, quantity_raster, domain):\n    \"\"\"\n        Function to make a function F(x,y) which returns the corresponding\n        values on quantity_raster, except if x,y is inside the polygon associated with\n        any element of Pt_Pol_Data, in which case a Pt_Pol_-specific nearest neighbour\n        interpolator is used.\n\n        This has been superceeded by composite_quantity_setting_function\n\n        INPUT:\n            @param Pt_Pol_Data = a list with [ [ Polygon_0, Pt_XYZ_0],\n                                               [ Polygon_1, Pt_XYZ_1],\n                                               ...\n                                             ]\n                    Here Polygon_i is a polygon in ANUGA format,\n                    and Pt_XYZ_i is a 3 column array of x,y,Value points\n            @param quantity_raster = A GDAL-compatible quantity raster\n            @param domain = ANUGA domain\n    \"\"\"\n\n    # Function to set quantity from raster\n    qFun1 = quantityRasterFun(domain, rasterFile=quantity_raster)\n\n    # List of function\/polygon pairs defining the Pt_Pol_ quantity data\n    qFunChanList = []\n    for i in range(len(Pt_Pol_Data)):\n        qFunChanList.append([\n            Pt_Pol_Data[i][0],\n            make_nearestNeighbour_quantity_function(Pt_Pol_Data[i][1], domain)\n        ])\n\n    #\n    qFun = composite_quantity_setting_function(\n        qFunChanList+[['All', qFun1]], domain)\n\n    return qFun\n"}
+{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_281","text":"nkran\/malariagen-data-python\nimport os\nimport random\nimport shutil\n\nimport dask.array as da\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport scipy.stats\nimport xarray as xr\nimport zarr\nfrom numpy.testing import assert_allclose, assert_array_equal\nfrom pandas.testing import assert_frame_equal\n\nfrom malariagen_data import Ag3, Region\nfrom malariagen_data.ag3 import _cn_mode\nfrom malariagen_data.util import locate_region, resolve_region\n\nexpected_species_legacy = {\n    \"gambiae\",\n    \"coluzzii\",\n    \"arabiensis\",\n    \"intermediate_arabiensis_gambiae\",\n    \"intermediate_gambiae_coluzzii\",\n}\n\nexpected_species = {\n    \"gambiae\",\n    \"coluzzii\",\n    \"arabiensis\",\n    \"intermediate_gambcolu_arabiensis\",\n    \"intermediate_gambiae_coluzzii\",\n}\n\ncontigs = \"2R\", \"2L\", \"3R\", \"3L\", \"X\"\n\ncohort_cols = (\n    \"country_iso\",\n    \"admin1_name\",\n    \"admin1_iso\",\n    \"admin2_name\",\n    \"taxon\",\n    \"cohort_admin1_year\",\n    \"cohort_admin1_month\",\n    \"cohort_admin2_year\",\n    \"cohort_admin2_month\",\n)\n\n\ndef setup_ag3(url=\"simplecache::gs:\/\/vo_agam_release\/\", **kwargs):\n    kwargs.setdefault(\"check_location\", False)\n    kwargs.setdefault(\"show_progress\", False)\n    if url is None:\n        # test default URL\n        return Ag3(**kwargs)\n    if url.startswith(\"simplecache::\"):\n        # configure the directory on the local file system to cache data\n        kwargs[\"simplecache\"] = dict(cache_storage=\"gcs_cache\")\n    return Ag3(url, **kwargs)\n\n\n@pytest.mark.parametrize(\n    \"url\",\n    [\n        None,\n        \"gs:\/\/vo_agam_release\/\",\n        \"gcs:\/\/vo_agam_release\/\",\n        \"gs:\/\/vo_agam_release\",\n        \"gcs:\/\/vo_agam_release\",\n        \"simplecache::gs:\/\/vo_agam_release\/\",\n        \"simplecache::gcs:\/\/vo_agam_release\/\",\n    ],\n)\ndef test_sample_sets(url):\n\n    ag3 = setup_ag3(url)\n    df_sample_sets_v3 = ag3.sample_sets(release=\"3.0\")\n    assert isinstance(df_sample_sets_v3, pd.DataFrame)\n    assert len(df_sample_sets_v3) == 28\n    assert tuple(df_sample_sets_v3.columns) == (\"sample_set\", \"sample_count\", \"release\")\n\n    # test duplicates not allowed\n    with pytest.raises(ValueError):\n        ag3.sample_sets(release=[\"3.0\", \"3.0\"])\n\n    # test default is all public releases\n    df_default = ag3.sample_sets()\n    df_all = ag3.sample_sets(release=ag3.releases)\n    assert_frame_equal(df_default, df_all)\n\n\ndef test_releases():\n\n    ag3 = setup_ag3()\n    assert isinstance(ag3.releases, tuple)\n    assert ag3.releases == (\"3.0\",)\n\n    ag3 = setup_ag3(pre=True)\n    assert isinstance(ag3.releases, tuple)\n    assert len(ag3.releases) > 1\n    assert all([r.startswith(\"3.\") for r in ag3.releases])\n\n\ndef test_sample_metadata():\n\n    ag3 = setup_ag3()\n    df_sample_sets_v3 = ag3.sample_sets(release=\"3.0\")\n\n    expected_cols = (\n        \"sample_id\",\n        \"partner_sample_id\",\n        \"contributor\",\n        \"country\",\n        \"location\",\n        \"year\",\n        \"month\",\n        \"latitude\",\n        \"longitude\",\n        \"sex_call\",\n        \"sample_set\",\n        \"release\",\n    )\n\n    # all v3\n    df_samples_v3 = ag3.sample_metadata(sample_sets=\"3.0\")\n    assert tuple(df_samples_v3.columns[: len(expected_cols)]) == expected_cols\n    expected_len = df_sample_sets_v3[\"sample_count\"].sum()\n    assert len(df_samples_v3) == expected_len\n\n    # single sample set\n    df_samples_x = ag3.sample_metadata(sample_sets=\"AG1000G-X\")\n    assert tuple(df_samples_x.columns[: len(expected_cols)]) == expected_cols\n    expected_len = df_sample_sets_v3.query(\"sample_set == 'AG1000G-X'\")[\n        \"sample_count\"\n    ].sum()\n    assert len(df_samples_x) == expected_len\n\n    # multiple sample sets\n    sample_sets = [\"AG1000G-BF-A\", \"AG1000G-BF-B\", \"AG1000G-BF-C\"]\n    df_samples_bf = ag3.sample_metadata(sample_sets=sample_sets)\n    assert tuple(df_samples_bf.columns[: len(expected_cols)]) == expected_cols\n    loc_sample_sets = df_sample_sets_v3[\"sample_set\"].isin(sample_sets)\n    expected_len = df_sample_sets_v3.loc[loc_sample_sets][\"sample_count\"].sum()\n    assert len(df_samples_bf) == expected_len\n\n    # duplicate sample sets\n    with pytest.raises(ValueError):\n        ag3.sample_metadata(sample_sets=[\"3.0\", \"3.0\"])\n    with pytest.raises(ValueError):\n        ag3.sample_metadata(sample_sets=[\"AG1000G-UG\", \"AG1000G-UG\"])\n    with pytest.raises(ValueError):\n        ag3.sample_metadata(sample_sets=[\"AG1000G-UG\", \"3.0\"])\n\n    # default is all public releases\n    df_default = ag3.sample_metadata()\n    df_all = ag3.sample_metadata(sample_sets=ag3.releases)\n    assert_frame_equal(df_default, df_all)\n\n\ndef test_sample_metadata_with_aim_species():\n    ag3 = setup_ag3(species_analysis=\"aim_20220528\")\n\n    expected_cols = (\n        \"sample_id\",\n        \"partner_sample_id\",\n        \"contributor\",\n        \"country\",\n        \"location\",\n        \"year\",\n        \"month\",\n        \"latitude\",\n        \"longitude\",\n        \"sex_call\",\n        \"sample_set\",\n        \"release\",\n        \"aim_species_fraction_arab\",\n        \"aim_species_fraction_colu\",\n        \"aim_species_fraction_colu_no2l\",\n        \"aim_species_gambcolu_arabiensis\",\n        \"aim_species_gambiae_coluzzii\",\n        \"aim_species\",\n    )\n\n    # AIM species calls, included by default\n    df_samples_aim = ag3.sample_metadata(sample_sets=\"3.0\")\n    assert tuple(df_samples_aim.columns[: len(expected_cols)]) == expected_cols\n    assert set(df_samples_aim[\"aim_species\"].dropna()) == expected_species\n\n\ndef test_sample_metadata_with_aim_species_legacy():\n    # TODO this is legacy, deprecate at some point\n    ag3 = setup_ag3(species_analysis=\"aim_20200422\")\n\n    expected_cols = (\n        \"sample_id\",\n        \"partner_sample_id\",\n        \"contributor\",\n        \"country\",\n        \"location\",\n        \"year\",\n        \"month\",\n        \"latitude\",\n        \"longitude\",\n        \"sex_call\",\n        \"sample_set\",\n        \"release\",\n        \"aim_species_fraction_colu\",\n        \"aim_species_fraction_arab\",\n        \"aim_species_gambcolu_arabiensis\",\n        \"aim_species_gambiae_coluzzii\",\n        \"aim_species\",\n    )\n\n    # AIM species calls, included by default\n    df_samples_aim = ag3.sample_metadata(sample_sets=\"3.0\")\n    assert tuple(df_samples_aim.columns[: len(expected_cols)]) == expected_cols\n    assert set(df_samples_aim[\"aim_species\"].dropna()) == expected_species_legacy\n\n\ndef test_sample_metadata_with_pca_species():\n    # TODO this is legacy, deprecate at some point\n    ag3 = setup_ag3(species_analysis=\"pca_20200422\")\n\n    expected_cols = (\n        \"sample_id\",\n        \"partner_sample_id\",\n        \"contributor\",\n        \"country\",\n        \"location\",\n        \"year\",\n        \"month\",\n        \"latitude\",\n        \"longitude\",\n        \"sex_call\",\n        \"sample_set\",\n        \"release\",\n        \"pca_species_pc1\",\n        \"pca_species_pc2\",\n        \"pca_species_gambcolu_arabiensis\",\n        \"pca_species_gambiae_coluzzii\",\n        \"pca_species\",\n    )\n\n    # PCA species calls\n    df_samples_pca = ag3.sample_metadata(sample_sets=\"3.0\")\n    assert tuple(df_samples_pca.columns[: len(expected_cols)]) == expected_cols\n    assert (\n        set(df_samples_pca[\"pca_species\"].dropna()).difference(expected_species_legacy)\n        == set()\n    )\n\n\ndef test_sample_metadata_with_cohorts():\n    ag3 = setup_ag3()\n    df_samples_coh = ag3.sample_metadata(sample_sets=\"3.0\")\n    for c in cohort_cols:\n        assert c in df_samples_coh\n\n\ndef test_sample_metadata_without_cohorts():\n    working_dir = os.path.dirname(os.path.abspath(__file__))\n    test_data_path = os.path.join(working_dir, \"anopheles_test_data\")\n    ag3 = Ag3(test_data_path)\n    df_samples_coh = ag3.sample_metadata(sample_sets=\"3.0\")\n    for c in cohort_cols:\n        assert c in df_samples_coh\n        assert df_samples_coh[c].isnull().all()\n\n\n@pytest.mark.parametrize(\n    \"sample_sets\",\n    [\n        \"AG1000G-AO\",\n        \"AG1000G-X\",\n        [\"AG1000G-BF-A\", \"AG1000G-BF-B\"],\n        \"3.0\",\n        None,\n    ],\n)\n@pytest.mark.parametrize(\"analysis\", [\"aim_20220528\", \"aim_20200422\", \"pca_20200422\"])\ndef test_species_calls(sample_sets, analysis):\n    ag3 = setup_ag3(species_analysis=analysis)\n    df_samples = ag3.sample_metadata(sample_sets=sample_sets)\n    df_species = ag3.species_calls(sample_sets=sample_sets)\n    assert len(df_species) == len(df_samples)\n    assert_array_equal(df_samples[\"sample_id\"], df_species[\"sample_id\"])\n    if analysis == \"aim_20220528\":\n        assert (\n            set(df_species[\"aim_species\"].dropna()).difference(expected_species)\n            == set()\n        )\n    if analysis == \"aim_20200422\":\n        assert (\n            set(df_species[\"aim_species\"].dropna()).difference(expected_species_legacy)\n            == set()\n        )\n    if analysis == \"pca_20200422\":\n        assert (\n            set(df_species[\"pca_species\"].dropna()).difference(expected_species_legacy)\n            == set()\n        )\n\n\n@pytest.mark.parametrize(\"mask\", [\"gamb_colu_arab\", \"gamb_colu\", \"arab\"])\ndef test_open_site_filters(mask):\n    # check can open the zarr directly\n    ag3 = setup_ag3()\n    root = ag3.open_site_filters(mask=mask)\n    assert isinstance(root, zarr.hierarchy.Group)\n    for contig in ag3.contigs:\n        assert contig in root\n\n\n@pytest.mark.parametrize(\"mask\", [\"gamb_colu_arab\", \"gamb_colu\", \"arab\"])\n@pytest.mark.parametrize(\n    \"region\", [\"2R\", [\"3R\", \"3L\", \"2R:48,714,463-48,715,355\", \"AGAP007280\"]]\n)\ndef test_site_filters(mask, region):\n    ag3 = setup_ag3()\n    filter_pass = ag3.site_filters(region=region, mask=mask)\n    assert isinstance(filter_pass, da.Array)\n    assert filter_pass.ndim == 1\n    assert filter_pass.dtype == bool\n\n\ndef test_open_snp_sites():\n    ag3 = setup_ag3()\n    root = ag3.open_snp_sites()\n    assert isinstance(root, zarr.hierarchy.Group)\n    for contig in ag3.contigs:\n        assert contig in root\n\n\n@pytest.mark.parametrize(\"chunks\", [\"auto\", \"native\"])\n@pytest.mark.parametrize(\n    \"region\", [\"2R\", [\"3R\", \"2R:48,714,463-48,715,355\", \"AGAP007280\"]]\n)\ndef test_snp_sites(chunks, region):\n\n    ag3 = setup_ag3()\n\n    pos = ag3.snp_sites(region=region, field=\"POS\", chunks=chunks)\n    ref = ag3.snp_sites(region=region, field=\"REF\", chunks=chunks)\n    alt = ag3.snp_sites(region=region, field=\"ALT\", chunks=chunks)\n    assert isinstance(pos, da.Array)\n    assert pos.ndim == 1\n    assert pos.dtype == \"i4\"\n    assert isinstance(ref, da.Array)\n    assert ref.ndim == 1\n    assert ref.dtype == \"S1\"\n    assert isinstance(alt, da.Array)\n    assert alt.ndim == 2\n    assert alt.dtype == \"S1\"\n    assert pos.shape[0] == ref.shape[0] == alt.shape[0]\n\n    # apply site mask\n    filter_pass = ag3.site_filters(region=region, mask=\"gamb_colu_arab\").compute()\n    n_pass = np.count_nonzero(filter_pass)\n    pos_pass = ag3.snp_sites(\n        region=region, field=\"POS\", site_mask=\"gamb_colu_arab\", chunks=chunks\n    )\n    assert isinstance(pos_pass, da.Array)\n    assert pos_pass.ndim == 1\n    assert pos_pass.dtype == \"i4\"\n    assert pos_pass.shape[0] == n_pass\n    assert pos_pass.compute().shape == pos_pass.shape\n    for f in \"POS\", \"REF\", \"ALT\":\n        d = ag3.snp_sites(\n            region=region, site_mask=\"gamb_colu_arab\", field=f, chunks=chunks\n        )\n        assert isinstance(d, da.Array)\n        assert d.shape[0] == n_pass\n        assert d.shape == d.compute().shape\n\n\ndef test_open_snp_genotypes():\n    # check can open the zarr directly\n    ag3 = setup_ag3()\n    root = ag3.open_snp_genotypes(sample_set=\"AG1000G-AO\")\n    assert isinstance(root, zarr.hierarchy.Group)\n    for contig in ag3.contigs:\n        assert contig in root\n\n\n@pytest.mark.parametrize(\"chunks\", [\"auto\", \"native\"])\n@pytest.mark.parametrize(\n    \"sample_sets\",\n    [None, \"AG1000G-X\", [\"AG1000G-BF-A\", \"AG1000G-BF-B\"], \"3.0\"],\n)\n@pytest.mark.parametrize(\n    \"region\", [\"2R\", [\"3R\", \"2R:48,714,463-48,715,355\", \"AGAP007280\"]]\n)\ndef test_snp_genotypes(chunks, sample_sets, region):\n\n    ag3 = setup_ag3()\n\n    df_samples = ag3.sample_metadata(sample_sets=sample_sets)\n    gt = ag3.snp_genotypes(region=region, sample_sets=sample_sets, chunks=chunks)\n    assert isinstance(gt, da.Array)\n    assert gt.ndim == 3\n    assert gt.dtype == \"i1\"\n    assert gt.shape[1] == len(df_samples)\n\n    # specific fields\n    x = ag3.snp_genotypes(\n        region=region, sample_sets=sample_sets, field=\"GT\", chunks=chunks\n    )\n    assert isinstance(x, da.Array)\n    assert x.ndim == 3\n    assert x.dtype == \"i1\"\n    x = ag3.snp_genotypes(\n        region=region, sample_sets=sample_sets, field=\"GQ\", chunks=chunks\n    )\n    assert isinstance(x, da.Array)\n    assert x.ndim == 2\n    assert x.dtype == \"i2\"\n    x = ag3.snp_genotypes(\n        region=region, sample_sets=sample_sets, field=\"MQ\", chunks=chunks\n    )\n    assert isinstance(x, da.Array)\n    assert x.ndim == 2\n    assert x.dtype == \"i2\"\n    x = ag3.snp_genotypes(\n        region=region, sample_sets=sample_sets, field=\"AD\", chunks=chunks\n    )\n    assert isinstance(x, da.Array)\n    assert x.ndim == 3\n    assert x.dtype == \"i2\"\n\n    # site mask\n    filter_pass = ag3.site_filters(region=region, mask=\"gamb_colu_arab\").compute()\n    gt_pass = ag3.snp_genotypes(\n        region=region,\n        sample_sets=sample_sets,\n        site_mask=\"gamb_colu_arab\",\n        chunks=chunks,\n    )\n    assert isinstance(gt_pass, da.Array)\n    assert gt_pass.ndim == 3\n    assert gt_pass.dtype == \"i1\"\n    assert gt_pass.shape[0] == np.count_nonzero(filter_pass)\n    assert gt_pass.shape[1] == len(df_samples)\n    assert gt_pass.shape[2] == 2\n\n\n@pytest.mark.parametrize(\n    \"sample_sets\",\n    [None, \"AG1000G-X\", [\"AG1000G-BF-A\", \"AG1000G-BF-B\"], \"3.0\"],\n)\n@pytest.mark.parametrize(\n    \"region\", [\"2R\", [\"3R\", \"2R:48,714,463-48,715,355\", \"AGAP007280\"]]\n)\ndef test_snp_genotypes_chunks(sample_sets, region):\n\n    ag3 = setup_ag3()\n    gt_native = ag3.snp_genotypes(\n        region=region, sample_sets=sample_sets, chunks=\"native\"\n    )\n    gt_auto = ag3.snp_genotypes(region=region, sample_sets=sample_sets, chunks=\"auto\")\n    gt_manual = ag3.snp_genotypes(\n        region=region, sample_sets=sample_sets, chunks=(100_000, 10, 2)\n    )\n\n    assert gt_native.chunks != gt_auto.chunks\n    assert gt_auto.chunks != gt_manual.chunks\n    assert gt_manual.chunks != gt_native.chunks\n    assert gt_manual.chunks[0][0] == 100_000\n    assert gt_manual.chunks[1][0] == 10\n    assert gt_manual.chunks[2][0] == 2\n\n\ndef test_genome():\n\n    ag3 = setup_ag3()\n\n    # test the open_genome() method to access as zarr\n    genome = ag3.open_genome()\n    assert isinstance(genome, zarr.hierarchy.Group)\n    for contig in ag3.contigs:\n        assert contig in genome\n        assert genome[contig].dtype == \"S1\"\n\n    # test the genome_sequence() method to access sequences\n    for contig in ag3.contigs:\n        seq = ag3.genome_sequence(contig)\n        assert isinstance(seq, da.Array)\n        assert seq.dtype == \"S1\"\n\n\ndef test_geneset():\n\n    ag3 = setup_ag3()\n\n    # default\n    df = ag3.geneset()\n    assert isinstance(df, pd.DataFrame)\n    gff3_cols = [\n        \"contig\",\n        \"source\",\n        \"type\",\n        \"start\",\n        \"end\",\n        \"score\",\n        \"strand\",\n        \"phase\",\n    ]\n    expected_cols = gff3_cols + [\"ID\", \"Parent\", \"Name\", \"description\"]\n    assert df.columns.tolist() == expected_cols\n\n    # don't unpack attributes\n    df = ag3.geneset(attributes=None)\n    assert isinstance(df, pd.DataFrame)\n    expected_cols = gff3_cols + [\"attributes\"]\n    assert df.columns.tolist() == expected_cols\n\n\n@pytest.mark.parametrize(\n    \"region\",\n    [\"AGAP007280\", \"3R:28,000,000-29,000,000\", \"2R\", \"X\", [\"3R\", \"3L\"]],\n)\ndef test_geneset_region(region):\n\n    ag3 = setup_ag3()\n\n    df = ag3.geneset(region=region)\n    assert isinstance(df, pd.DataFrame)\n    gff3_cols = [\n        \"contig\",\n        \"source\",\n        \"type\",\n        \"start\",\n        \"end\",\n        \"score\",\n        \"strand\",\n        \"phase\",\n    ]\n    expected_cols = gff3_cols + [\"ID\", \"Parent\", \"Name\", \"description\"]\n    assert df.columns.tolist() == expected_cols\n    assert len(df) > 0\n\n    # check region\n    region = ag3.resolve_region(region)\n    if isinstance(region, Region):\n        assert np.all(df[\"contig\"].values == region.contig)\n        if region.start and region.end:\n            assert np.all(df.eval(f\"start <= {region.end} and end >= {region.start}\"))\n\n\n@pytest.mark.parametrize(\n    \"region\",\n    [\"AGAP007280\", \"2R:48714463-48715355\", \"2R\", \"X\"],\n)\n@pytest.mark.parametrize(\"mask\", [\"gamb_colu_arab\", \"gamb_colu\", \"arab\"])\ndef test_is_accessible(region, mask):\n\n    ag3 = setup_ag3()\n    # run a couple of tests\n    is_accessible = ag3.is_accessible(region=region, site_mask=mask)\n    assert isinstance(is_accessible, np.ndarray)\n    assert is_accessible.ndim == 1\n    assert is_accessible.shape[0] == ag3.genome_sequence(region).shape[0]\n\n\ndef test_cross_metadata():\n\n    ag3 = setup_ag3()\n    df_crosses = ag3.cross_metadata()\n    assert isinstance(df_crosses, pd.DataFrame)\n    expected_cols = [\"cross\", \"sample_id\", \"father_id\", \"mother_id\", \"sex\", \"role\"]\n    assert df_crosses.columns.tolist() == expected_cols\n\n    # check samples are in AG1000G-X\n    df_samples = ag3.sample_metadata(sample_sets=\"AG1000G-X\")\n    assert set(df_crosses[\"sample_id\"]) == set(df_samples[\"sample_id\"])\n\n    # check values\n    expected_role_values = [\"parent\", \"progeny\"]\n    assert df_crosses[\"role\"].unique().tolist() == expected_role_values\n    expected_sex_values = [\"F\", \"M\"]\n    assert df_crosses[\"sex\"].unique().tolist() == expected_sex_values\n\n\ndef test_site_annotations():\n\n    ag3 = setup_ag3()\n\n    # test access as zarr\n    root = ag3.open_site_annotations()\n    assert isinstance(root, zarr.hierarchy.Group)\n    for f in (\n        \"codon_degeneracy\",\n        \"codon_nonsyn\",\n        \"codon_position\",\n        \"seq_cls\",\n        \"seq_flen\",\n        \"seq_relpos_start\",\n        \"seq_relpos_stop\",\n    ):\n        assert f in root\n        for contig in contigs:\n            assert contig in root[f]\n\n    # test access as dask arrays\n    for region in \"2R\", \"X\", \"AGAP007280\", \"2R:48714463-48715355\":\n        for site_mask in None, \"gamb_colu_arab\":\n            pos = ag3.snp_sites(region=region, field=\"POS\", site_mask=site_mask)\n            for field in \"codon_degeneracy\", \"seq_cls\":\n                d = ag3.site_annotations(\n                    region=region, field=field, site_mask=site_mask\n                )\n                assert isinstance(d, da.Array)\n                assert d.ndim == 1\n                assert d.shape == pos.shape\n\n\n@pytest.mark.parametrize(\n    \"sample_sets\",\n    [None, \"AG1000G-X\", [\"AG1000G-BF-A\", \"AG1000G-BF-B\"], \"3.0\"],\n)\n@pytest.mark.parametrize(\n    \"region\", [\"2R\", [\"3R\", \"2R:48,714,463-48,715,355\", \"AGAP007280\"]]\n)\n@pytest.mark.parametrize(\"site_mask\", [None, \"gamb_colu_arab\"])\ndef test_snp_calls(sample_sets, region, site_mask):\n\n    ag3 = setup_ag3()\n\n    ds = ag3.snp_calls(region=region, sample_sets=sample_sets, site_mask=site_mask)\n    assert isinstance(ds, xr.Dataset)\n\n    # check fields\n    expected_data_vars = {\n        \"variant_allele\",\n        \"variant_filter_pass_gamb_colu_arab\",\n        \"variant_filter_pass_gamb_colu\",\n        \"variant_filter_pass_arab\",\n        \"call_genotype\",\n        \"call_genotype_mask\",\n        \"call_GQ\",\n        \"call_AD\",\n        \"call_MQ\",\n    }\n    assert set(ds.data_vars) == expected_data_vars\n\n    expected_coords = {\n        \"variant_contig\",\n        \"variant_position\",\n        \"sample_id\",\n    }\n    assert set(ds.coords) == expected_coords\n\n    # check dimensions\n    assert set(ds.dims) == {\"alleles\", \"ploidy\", \"samples\", \"variants\"}\n\n    # check dim lengths\n    pos = ag3.snp_sites(region=region, field=\"POS\", site_mask=site_mask)\n    n_variants = len(pos)\n    df_samples = ag3.sample_metadata(sample_sets=sample_sets)\n    n_samples = len(df_samples)\n    assert ds.dims[\"variants\"] == n_variants\n    assert ds.dims[\"samples\"] == n_samples\n    assert ds.dims[\"ploidy\"] == 2\n    assert ds.dims[\"alleles\"] == 4\n\n    # check shapes\n    for f in expected_coords | expected_data_vars:\n        x = ds[f]\n        assert isinstance(x, xr.DataArray)\n        assert isinstance(x.data, da.Array)\n\n        if f == \"variant_allele\":\n            assert x.ndim == 2\n            assert x.shape == (n_variants, 4)\n            assert x.dims == (\"variants\", \"alleles\")\n        elif f.startswith(\"variant_\"):\n            assert x.ndim == 1\n            assert x.shape == (n_variants,)\n            assert x.dims == (\"variants\",)\n        elif f in {\"call_genotype\", \"call_genotype_mask\"}:\n            assert x.ndim == 3\n            assert x.dims == (\"variants\", \"samples\", \"ploidy\")\n            assert x.shape == (n_variants, n_samples, 2)\n        elif f == \"call_AD\":\n            assert x.ndim == 3\n            assert x.dims == (\"variants\", \"samples\", \"alleles\")\n            assert x.shape == (n_variants, n_samples, 4)\n        elif f.startswith(\"call_\"):\n            assert x.ndim == 2\n            assert x.dims == (\"variants\", \"samples\")\n            assert x.shape == (n_variants, n_samples)\n        elif f.startswith(\"sample_\"):\n            assert x.ndim == 1\n            assert x.dims == (\"samples\",)\n            assert x.shape == (n_samples,)\n\n    # check samples\n    expected_samples = df_samples[\"sample_id\"].tolist()\n    assert ds[\"sample_id\"].values.tolist() == expected_samples\n\n    # check attributes\n    assert \"contigs\" in ds.attrs\n    assert ds.attrs[\"contigs\"] == (\"2R\", \"2L\", \"3R\", \"3L\", \"X\")\n\n    # check can set up computations\n    d1 = ds[\"variant_position\"] > 10_000\n    assert isinstance(d1, xr.DataArray)\n    d2 = ds[\"call_AD\"].sum(axis=(1, 2))\n    assert isinstance(d2, xr.DataArray)\n\n    # check compress bug\n    pos = ds[\"variant_position\"].data\n    assert pos.shape == pos.compute().shape\n\n\n@pytest.mark.parametrize(\n    \"sample_query\",\n    [None, \"taxon == 'coluzzii'\", \"taxon == 'robot'\"],\n)\ndef test_snp_calls__sample_query(sample_query):\n    ag3 = setup_ag3()\n\n    sample_sets = \"AG1000G-BF-A\"\n    df_samples = ag3.sample_metadata(sample_sets=sample_sets)\n    if sample_query is not None:\n        df_samples = df_samples.query(sample_query)\n\n    if len(df_samples) == 0:\n        with pytest.raises(ValueError):\n            ag3.snp_calls(\n                region=\"3L\", sample_sets=sample_sets, sample_query=sample_query\n            )\n\n    else:\n        ds = ag3.snp_calls(\n            region=\"3L\", sample_sets=sample_sets, sample_query=sample_query\n        )\n        assert ds.dims[\"samples\"] == len(df_samples)\n        assert_array_equal(ds[\"sample_id\"].values, df_samples[\"sample_id\"].values)\n\n\ndef test_snp_effects():\n    ag3 = setup_ag3()\n    gste2 = \"AGAP009194-RA\"\n    site_mask = \"gamb_colu\"\n    expected_fields = [\n        \"contig\",\n        \"position\",\n        \"ref_allele\",\n        \"alt_allele\",\n        \"pass_gamb_colu_arab\",\n        \"pass_gamb_colu\",\n        \"pass_arab\",\n        \"transcript\",\n        \"effect\",\n        \"impact\",\n        \"ref_codon\",\n        \"alt_codon\",\n        \"aa_pos\",\n        \"ref_aa\",\n        \"alt_aa\",\n        \"aa_change\",\n    ]\n\n    df = ag3.snp_effects(transcript=gste2, site_mask=site_mask)\n    assert isinstance(df, pd.DataFrame)\n    assert df.columns.tolist() == expected_fields\n\n    # reverse strand gene\n    assert df.shape == (2838, len(expected_fields))\n    # check first, second, third codon position non-syn\n    assert df.iloc[1454].aa_change == \"I114L\"\n    assert df.iloc[1446].aa_change == \"I114M\"\n    # while we are here, check all columns for a position\n    assert df.iloc[1451].position == 28598166\n    assert df.iloc[1451].ref_allele == \"A\"\n    assert df.iloc[1451].alt_allele == \"G\"\n    assert df.iloc[1451].effect == \"NON_SYNONYMOUS_CODING\"\n    assert df.iloc[1451].impact == \"MODERATE\"\n    assert df.iloc[1451].ref_codon == \"aTt\"\n    assert df.iloc[1451].alt_codon == \"aCt\"\n    assert df.iloc[1451].aa_pos == 114\n    assert df.iloc[1451].ref_aa == \"I\"\n    assert df.iloc[1451].alt_aa == \"T\"\n    assert df.iloc[1451].aa_change == \"I114T\"\n    # check syn\n    assert df.iloc[1447].aa_change == \"I114I\"\n    # check intronic\n    assert df.iloc[1197].effect == \"INTRONIC\"\n    # check 5' utr\n    assert df.iloc[2661].effect == \"FIVE_PRIME_UTR\"\n    # check 3' utr\n    assert df.iloc[0].effect == \"THREE_PRIME_UTR\"\n\n    # test forward strand gene gste6\n    gste6 = \"AGAP009196-RA\"\n    df = ag3.snp_effects(transcript=gste6, site_mask=site_mask)\n    assert isinstance(df, pd.DataFrame)\n    assert df.columns.tolist() == expected_fields\n    assert df.shape == (2829, len(expected_fields))\n\n    # check first, second, third codon position non-syn\n    assert df.iloc[701].aa_change == \"E35*\"\n    assert df.iloc[703].aa_change == \"E35V\"\n    # while we are here, check all columns for a position\n    assert df.iloc[706].position == 28600605\n    assert df.iloc[706].ref_allele == \"G\"\n    assert df.iloc[706].alt_allele == \"C\"\n    assert df.iloc[706].effect == \"NON_SYNONYMOUS_CODING\"\n    assert df.iloc[706].impact == \"MODERATE\"\n    assert df.iloc[706].ref_codon == \"gaG\"\n    assert df.iloc[706].alt_codon == \"gaC\"\n    assert df.iloc[706].aa_pos == 35\n    assert df.iloc[706].ref_aa == \"E\"\n    assert df.iloc[706].alt_aa == \"D\"\n    assert df.iloc[706].aa_change == \"E35D\"\n    # check syn\n    assert df.iloc[705].aa_change == \"E35E\"\n    # check intronic\n    assert df.iloc[900].effect == \"INTRONIC\"\n    # check 5' utr\n    assert df.iloc[0].effect == \"FIVE_PRIME_UTR\"\n    # check 3' utr\n    assert df.iloc[2828].effect == \"THREE_PRIME_UTR\"\n\n    # check 5' utr intron and the different intron effects\n    utr_intron5 = \"AGAP004679-RB\"\n    df = ag3.snp_effects(transcript=utr_intron5, site_mask=site_mask)\n    assert isinstance(df, pd.DataFrame)\n    assert df.columns.tolist() == expected_fields\n    assert df.shape == (7686, len(expected_fields))\n    assert df.iloc[180].effect == \"SPLICE_CORE\"\n    assert df.iloc[198].effect == \"SPLICE_REGION\"\n    assert df.iloc[202].effect == \"INTRONIC\"\n\n    # check 3' utr intron\n    utr_intron3 = \"AGAP000689-RA\"\n    df = ag3.snp_effects(transcript=utr_intron3, site_mask=site_mask)\n    assert isinstance(df, pd.DataFrame)\n    assert df.columns.tolist() == expected_fields\n    assert df.shape == (5397, len(expected_fields))\n    assert df.iloc[646].effect == \"SPLICE_CORE\"\n    assert df.iloc[652].effect == \"SPLICE_REGION\"\n    assert df.iloc[674].effect == \"INTRONIC\"\n\n\ndef test_snp_allele_frequencies__str_cohorts():\n    ag3 = setup_ag3(cohorts_analysis=\"20211101\")\n    cohorts = \"admin1_month\"\n    min_cohort_size = 10\n    universal_fields = [\n        \"pass_gamb_colu_arab\",\n        \"pass_gamb_colu\",\n        \"pass_arab\",\n        \"label\",\n    ]\n    df = ag3.snp_allele_frequencies(\n        transcript=\"AGAP004707-RD\",\n        cohorts=cohorts,\n        min_cohort_size=min_cohort_size,\n        site_mask=\"gamb_colu\",\n        sample_sets=\"3.0\",\n        drop_invariant=True,\n        effects=False,\n    )\n    df_coh = ag3.sample_cohorts(sample_sets=\"3.0\")\n    coh_nm = \"cohort_\" + cohorts\n    coh_counts = df_coh[coh_nm].dropna().value_counts().to_frame()\n    cohort_labels = coh_counts[coh_counts[coh_nm] >= min_cohort_size].index.to_list()\n    frq_cohort_labels = [\"frq_\" + s for s in cohort_labels]\n    expected_fields = universal_fields + frq_cohort_labels + [\"max_af\"]\n\n    assert isinstance(df, pd.DataFrame)\n    assert sorted(df.columns.tolist()) == sorted(expected_fields)\n    assert df.index.names == [\"contig\", \"position\", \"ref_allele\", \"alt_allele\"]\n    assert len(df) == 16526\n\n\ndef test_snp_allele_frequencies__dict_cohorts():\n    ag3 = setup_ag3(cohorts_analysis=\"20211101\")\n    cohorts = {\n        \"ke\": \"country == 'Kenya'\",\n        \"bf_2012_col\": \"country == 'Burkina Faso' and year == 2012 and aim_species == 'coluzzii'\",\n    }\n    universal_fields = [\n        \"pass_gamb_colu_arab\",\n        \"pass_gamb_colu\",\n        \"pass_arab\",\n        \"label\",\n    ]\n\n    # test drop invariants\n    df = ag3.snp_allele_frequencies(\n        transcript=\"AGAP009194-RA\",\n        cohorts=cohorts,\n        site_mask=\"gamb_colu\",\n        sample_sets=\"3.0\",\n        drop_invariant=True,\n        effects=False,\n    )\n\n    assert isinstance(df, pd.DataFrame)\n    frq_columns = [\"frq_\" + s for s in list(cohorts.keys())]\n    expected_fields = universal_fields + frq_columns + [\"max_af\"]\n    assert sorted(df.columns.tolist()) == sorted(expected_fields)\n    assert df.shape == (133, len(expected_fields))\n    assert df.iloc[3].frq_ke == 0\n    assert df.iloc[4].frq_bf_2012_col == pytest.approx(0.006097, abs=1e-6)\n    assert df.iloc[4].max_af == pytest.approx(0.006097, abs=1e-6)\n    # check invariant have been dropped\n    assert df.max_af.min() > 0\n\n    # test keep invariants\n    df = ag3.snp_allele_frequencies(\n        transcript=\"AGAP004707-RD\",\n        cohorts=cohorts,\n        site_mask=\"gamb_colu\",\n        sample_sets=\"3.0\",\n        drop_invariant=False,\n        effects=False,\n    )\n    assert isinstance(df, pd.DataFrame)\n    assert sorted(df.columns.tolist()) == sorted(expected_fields)\n    assert df.shape == (132306, len(expected_fields))\n    # check invariant positions are still present\n    assert np.any(df.max_af == 0)\n\n\ndef test_snp_allele_frequencies__str_cohorts__effects():\n    ag3 = setup_ag3(cohorts_analysis=\"20211101\")\n    cohorts = \"admin1_month\"\n    min_cohort_size = 10\n    universal_fields = [\n        \"pass_gamb_colu_arab\",\n        \"pass_gamb_colu\",\n        \"pass_arab\",\n        \"label\",\n    ]\n    effects_fields = [\n        \"transcript\",\n        \"effect\",\n        \"impact\",\n        \"ref_codon\",\n        \"alt_codon\",\n        \"aa_pos\",\n        \"ref_aa\",\n        \"alt_aa\",\n    ]\n    df = ag3.snp_allele_frequencies(\n        transcript=\"AGAP004707-RD\",\n        cohorts=cohorts,\n        min_cohort_size=min_cohort_size,\n        site_mask=\"gamb_colu\",\n        sample_sets=\"3.0\",\n        drop_invariant=True,\n        effects=True,\n    )\n    df_coh = ag3.sample_cohorts(sample_sets=\"3.0\")\n    coh_nm = \"cohort_\" + cohorts\n    coh_counts = df_coh[coh_nm].dropna().value_counts().to_frame()\n    cohort_labels = coh_counts[coh_counts[coh_nm] >= min_cohort_size].index.to_list()\n    frq_cohort_labels = [\"frq_\" + s for s in cohort_labels]\n    expected_fields = universal_fields + frq_cohort_labels + [\"max_af\"] + effects_fields\n\n    assert isinstance(df, pd.DataFrame)\n    assert len(df) == 16526\n    assert sorted(df.columns.tolist()) == sorted(expected_fields)\n    assert df.index.names == [\n        \"contig\",\n        \"position\",\n        \"ref_allele\",\n        \"alt_allele\",\n        \"aa_change\",\n    ]\n\n\ndef test_snp_allele_frequencies__query():\n    ag3 = setup_ag3(cohorts_analysis=\"20211101\")\n    cohorts = \"admin1_year\"\n    min_cohort_size = 10\n    expected_columns = [\n        \"pass_gamb_colu_arab\",\n        \"pass_gamb_colu\",\n        \"pass_arab\",\n        \"frq_AO-LUA_colu_2009\",\n        \"max_af\",\n        \"label\",\n    ]\n\n    df = ag3.snp_allele_frequencies(\n        transcript=\"AGAP004707-RD\",\n        cohorts=cohorts,\n        sample_query=\"country == 'Angola'\",\n        min_cohort_size=min_cohort_size,\n        site_mask=\"gamb_colu\",\n        sample_sets=\"3.0\",\n        drop_invariant=True,\n        effects=False,\n    )\n\n    assert isinstance(df, pd.DataFrame)\n    assert sorted(df.columns) == sorted(expected_columns)\n    assert len(df) == 695\n\n\ndef test_snp_allele_frequencies__dup_samples():\n    ag3 = setup_ag3()\n    with pytest.raises(ValueError):\n        ag3.snp_allele_frequencies(\n            transcript=\"AGAP004707-RD\",\n            cohorts=\"admin1_year\",\n            sample_sets=[\"AG1000G-FR\", \"AG1000G-FR\"],\n        )\n\n\n@pytest.mark.parametrize(\n    \"sample_sets\",\n    [\"AG1000G-AO\", [\"AG1000G-AO\", \"AG1000G-UG\"], \"3.0\", None],\n)\n@pytest.mark.parametrize(\"region\", [\"2R\", [\"3L\", \"X\"], \"3R:28,000,000-29,000,000\"])\ndef test_cnv_hmm(sample_sets, region):\n    ag3 = setup_ag3()\n    ds = ag3.cnv_hmm(region=region, sample_sets=sample_sets, max_coverage_variance=None)\n    assert isinstance(ds, xr.Dataset)\n\n    # check fields\n    expected_data_vars = {\n        \"call_CN\",\n        \"call_NormCov\",\n        \"call_RawCov\",\n        \"sample_coverage_variance\",\n        \"sample_is_high_variance\",\n    }\n    assert set(ds.data_vars) == expected_data_vars\n\n    expected_coords = {\n        \"variant_contig\",\n        \"variant_position\",\n        \"variant_end\",\n        \"sample_id\",\n    }\n    assert set(ds.coords) == expected_coords\n\n    # check dimensions\n    assert set(ds.dims) == {\"samples\", \"variants\"}\n\n    # check dim lengths\n    if region in ag3.contigs:\n        n_variants_expected = 1 + len(ag3.genome_sequence(region=region)) \/\/ 300\n    elif isinstance(region, (tuple, list)) and all([r in ag3.contigs for r in region]):\n        n_variants_expected = sum(\n            [1 + len(ag3.genome_sequence(region=c)) \/\/ 300 for c in region]\n        )\n    else:\n        # test part of a contig region\n        region = ag3.resolve_region(region)\n        variant_contig = ds[\"variant_contig\"].values\n        contig_index = ds.attrs[\"contigs\"].index(region.contig)\n        assert np.all(variant_contig == contig_index)\n        variant_position = ds[\"variant_position\"].values\n        variant_end = ds[\"variant_end\"].values\n        assert variant_position[0] <= region.start\n        assert variant_end[0] >= region.start\n        assert variant_position[-1] <= region.end\n        assert variant_end[-1] >= region.end\n        assert np.all(variant_position <= region.end)\n        assert np.all(variant_end >= region.start)\n        n_variants_expected = 1 + (region.end - region.start) \/\/ 300\n\n    df_samples = ag3.sample_metadata(sample_sets=sample_sets)\n    n_samples_expected = len(df_samples)\n    assert ds.dims[\"variants\"] == n_variants_expected\n    assert ds.dims[\"samples\"] == n_samples_expected\n\n    # check sample IDs\n    assert ds[\"sample_id\"].values.tolist() == df_samples[\"sample_id\"].tolist()\n\n    # check shapes\n    for f in expected_coords | expected_data_vars:\n        x = ds[f]\n        assert isinstance(x, xr.DataArray)\n        assert isinstance(x.data, da.Array)\n\n        if f.startswith(\"variant_\"):\n            assert x.ndim == 1\n            assert x.shape == (n_variants_expected,)\n            assert x.dims == (\"variants\",)\n        elif f.startswith(\"call_\"):\n            assert x.ndim == 2\n            assert x.dims == (\"variants\", \"samples\")\n            assert x.shape == (n_variants_expected, n_samples_expected)\n        elif f.startswith(\"sample_\"):\n            assert x.ndim == 1\n            assert x.dims == (\"samples\",)\n            assert x.shape == (n_samples_expected,)\n\n    # check attributes\n    assert \"contigs\" in ds.attrs\n    assert ds.attrs[\"contigs\"] == (\"2R\", \"2L\", \"3R\", \"3L\", \"X\")\n\n    # check can set up computations\n    d1 = ds[\"variant_position\"] > 10_000\n    assert isinstance(d1, xr.DataArray)\n    d2 = ds[\"call_CN\"].sum(axis=1)\n    assert isinstance(d2, xr.DataArray)\n\n\n@pytest.mark.parametrize(\n    \"sample_query\",\n    [\n        \"taxon == 'coluzzii' and location == 'Bana Village'\",\n        \"taxon == 'gambiae' and location == 'Pala'\",\n    ],\n)\ndef test_cnv_hmm__sample_query(sample_query):\n\n    sample_sets = \"AG1000G-BF-B\"\n    region = \"3L\"\n    ag3 = setup_ag3()\n    ds = ag3.cnv_hmm(\n        region=region,\n        sample_sets=sample_sets,\n        sample_query=sample_query,\n        max_coverage_variance=None,\n    )\n    assert isinstance(ds, xr.Dataset)\n\n    # check fields\n    expected_data_vars = {\n        \"call_CN\",\n        \"call_NormCov\",\n        \"call_RawCov\",\n        \"sample_coverage_variance\",\n        \"sample_is_high_variance\",\n    }\n    assert set(ds.data_vars) == expected_data_vars\n\n    expected_coords = {\n        \"variant_contig\",\n        \"variant_position\",\n        \"variant_end\",\n        \"sample_id\",\n    }\n    assert set(ds.coords) == expected_coords\n\n    # check dimensions\n    assert set(ds.dims) == {\"samples\", \"variants\"}\n\n    # check expected samples\n    df_samples = ag3.sample_metadata(sample_sets=sample_sets).query(sample_query)\n    expected_samples = df_samples[\"sample_id\"].tolist()\n    n_samples_expected = len(expected_samples)\n    assert ds.dims[\"samples\"] == n_samples_expected\n\n    # check sample IDs\n    assert ds[\"sample_id\"].values.tolist() == df_samples[\"sample_id\"].tolist()\n\n\n@pytest.mark.parametrize(\n    \"max_coverage_variance\",\n    [0, 0.1, 0.2, 1],\n)\ndef test_cnv_hmm__max_coverage_variance(max_coverage_variance):\n\n    sample_sets = \"AG1000G-CI\"\n    region = \"3L\"\n    ag3 = setup_ag3()\n    ds = ag3.cnv_hmm(\n        region=region,\n        sample_sets=sample_sets,\n        max_coverage_variance=max_coverage_variance,\n    )\n    assert isinstance(ds, xr.Dataset)\n\n    # check fields\n    expected_data_vars = {\n        \"call_CN\",\n        \"call_NormCov\",\n        \"call_RawCov\",\n        \"sample_coverage_variance\",\n        \"sample_is_high_variance\",\n    }\n    assert set(ds.data_vars) == expected_data_vars\n\n    expected_coords = {\n        \"variant_contig\",\n        \"variant_position\",\n        \"variant_end\",\n        \"sample_id\",\n    }\n    assert set(ds.coords) == expected_coords\n\n    # check dimensions\n    assert set(ds.dims) == {\"samples\", \"variants\"}\n\n    # check expected samples\n    cov_var = ds[\"sample_coverage_variance\"].values\n    assert np.all(cov_var <= max_coverage_variance)\n\n\n@pytest.mark.parametrize(\"sample_set\", [\"AG1000G-AO\", \"AG1000G-UG\", \"AG1000G-X\"])\n@pytest.mark.parametrize(\"analysis\", [\"gamb_colu\", \"arab\", \"crosses\"])\n@pytest.mark.parametrize(\n    \"region\", [\"3L\", \"X\", [\"2R\", \"2L\"], \"3R:28,000,000-29,000,000\"]\n)\ndef test_cnv_coverage_calls(sample_set, analysis, region):\n\n    ag3 = setup_ag3()\n\n    expected_analyses = {\n        \"AG1000G-AO\": {\"gamb_colu\"},\n        \"AG1000G-UG\": {\"gamb_colu\", \"arab\"},\n        \"AG1000G-X\": {\"crosses\"},\n    }\n    if analysis not in expected_analyses[sample_set]:\n        with pytest.raises(ValueError):\n            ag3.cnv_coverage_calls(\n                region=region, analysis=analysis, sample_set=sample_set\n            )\n        return\n\n    ds = ag3.cnv_coverage_calls(region=region, analysis=analysis, sample_set=sample_set)\n    assert isinstance(ds, xr.Dataset)\n\n    # check fields\n    expected_data_vars = {\n        \"variant_CIPOS\",\n        \"variant_CIEND\",\n        \"variant_filter_pass\",\n        \"call_genotype\",\n    }\n    assert set(ds.data_vars) == expected_data_vars\n\n    expected_coords = {\n        \"variant_contig\",\n        \"variant_position\",\n        \"variant_end\",\n        \"variant_id\",\n        \"sample_id\",\n    }\n    assert set(ds.coords) == expected_coords\n\n    # check dimensions\n    assert set(ds.dims) == {\"samples\", \"variants\"}\n\n    # check sample IDs\n    df_samples = ag3.sample_metadata(sample_sets=sample_set)\n    sample_id = pd.Series(ds[\"sample_id\"].values)\n    assert sample_id.isin(df_samples[\"sample_id\"]).all()\n\n    # check shapes\n    for f in expected_coords | expected_data_vars:\n        x = ds[f]\n        assert isinstance(x, xr.DataArray)\n        assert isinstance(x.data, da.Array)\n\n        if f.startswith(\"variant_\"):\n            assert x.ndim == 1\n            assert x.dims == (\"variants\",)\n        elif f.startswith(\"call_\"):\n            assert x.ndim == 2\n            assert x.dims == (\"variants\", \"samples\")\n        elif f.startswith(\"sample_\"):\n            assert x.ndim == 1\n            assert x.dims == (\"samples\",)\n\n    # check attributes\n    assert \"contigs\" in ds.attrs\n    assert ds.attrs[\"contigs\"] == (\"2R\", \"2L\", \"3R\", \"3L\", \"X\")\n\n    # check region\n    region = ag3.resolve_region(region)\n    if (\n        isinstance(region, Region)\n        and region.start is not None\n        and region.end is not None\n    ):\n        variant_position = ds[\"variant_position\"].values\n        variant_end = ds[\"variant_end\"].values\n        assert np.all(variant_position <= region.end)\n        assert np.all(variant_end >= region.start)\n\n    # check can set up computations\n    d1 = ds[\"variant_position\"] > 10_000\n    assert isinstance(d1, xr.DataArray)\n    d2 = ds[\"call_genotype\"].sum(axis=1)\n    assert isinstance(d2, xr.DataArray)\n\n\n@pytest.mark.parametrize(\n    \"sample_sets\",\n    [\n        \"AG1000G-AO\",\n        \"AG1000G-UG\",\n        [\"AG1000G-AO\", \"AG1000G-UG\"],\n        \"3.0\",\n        None,\n    ],\n)\n@pytest.mark.parametrize(\"contig\", [\"2R\", \"3R\", \"X\", [\"2R\", \"3R\"]])\ndef test_cnv_discordant_read_calls(sample_sets, contig):\n\n    ag3 = setup_ag3()\n\n    if contig == \"3L\":\n        with pytest.raises(ValueError):\n            ag3.cnv_discordant_read_calls(contig=contig, sample_sets=sample_sets)\n        return\n\n    ds = ag3.cnv_discordant_read_calls(contig=contig, sample_sets=sample_sets)\n    assert isinstance(ds, xr.Dataset)\n\n    # check fields\n    expected_data_vars = {\n        \"variant_Region\",\n        \"variant_StartBreakpointMethod\",\n        \"variant_EndBreakpointMethod\",\n        \"call_genotype\",\n        \"sample_coverage_variance\",\n        \"sample_is_high_variance\",\n    }\n    assert set(ds.data_vars) == expected_data_vars\n\n    expected_coords = {\n        \"variant_contig\",\n        \"variant_position\",\n        \"variant_end\",\n        \"variant_id\",\n        \"sample_id\",\n    }\n    assert set(ds.coords) == expected_coords\n\n    # check dimensions\n    assert set(ds.dims) == {\"samples\", \"variants\"}\n\n    # check dim lengths\n    df_samples = ag3.sample_metadata(sample_sets=sample_sets)\n    n_samples = len(df_samples)\n    assert ds.dims[\"samples\"] == n_samples\n\n    expected_variants = {\"2R\": 40, \"3R\": 29, \"X\": 29}\n    if isinstance(contig, str):\n        n_variants = expected_variants[contig]\n    elif isinstance(contig, (list, tuple)):\n        n_variants = sum([expected_variants[c] for c in contig])\n    else:\n        raise NotImplementedError\n\n    assert ds.dims[\"variants\"] == n_variants\n\n    # check sample IDs\n    assert ds[\"sample_id\"].values.tolist() == df_samples[\"sample_id\"].tolist()\n\n    # check shapes\n    for f in expected_coords | expected_data_vars:\n        x = ds[f]\n        assert isinstance(x, xr.DataArray)\n        assert isinstance(x.data, da.Array)\n\n        if f.startswith(\"variant_\"):\n            assert x.ndim == 1\n            assert x.dims == (\"variants\",)\n        elif f.startswith(\"call_\"):\n            assert x.ndim == 2\n            assert x.dims == (\"variants\", \"samples\")\n        elif f.startswith(\"sample_\"):\n            assert x.ndim == 1\n            assert x.dims == (\"samples\",)\n            assert x.shape == (n_samples,)\n\n    # check attributes\n    assert \"contigs\" in ds.attrs\n    assert ds.attrs[\"contigs\"] == (\"2R\", \"2L\", \"3R\", \"3L\", \"X\")\n\n    # check can set up computations\n    d1 = ds[\"variant_position\"] > 10_000\n    assert isinstance(d1, xr.DataArray)\n    d2 = ds[\"call_genotype\"].sum(axis=1)\n    assert isinstance(d2, xr.DataArray)\n\n\n@pytest.mark.parametrize(\n    \"sample_sets\",\n    [\"AG1000G-AO\", [\"AG1000G-AO\", \"AG1000G-UG\"], \"3.0\", None],\n)\n@pytest.mark.parametrize(\"contig\", [\"2L\", \"3L\"])\ndef test_cnv_discordant_read_calls__no_calls(sample_sets, contig):\n\n    ag3 = setup_ag3()\n\n    with pytest.raises(ValueError):\n        ag3.cnv_discordant_read_calls(contig=contig, sample_sets=sample_sets)\n    return\n\n\n@pytest.mark.parametrize(\"rows\", [10, 100, 1000])\n@pytest.mark.parametrize(\"cols\", [10, 100, 1000])\n@pytest.mark.parametrize(\"vmax\", [2, 12, 100])\ndef test_cn_mode(rows, cols, vmax):\n    \"\"\"Test the numba-optimised function for computing modal copy number.\"\"\"\n\n    a = np.random.randint(0, vmax, size=(rows * cols), dtype=\"i1\").reshape(rows, cols)\n    expect = scipy.stats.mode(a, axis=0)\n    modes, counts = _cn_mode(a, vmax)\n    assert_array_equal(expect.mode.squeeze(), modes)\n    assert_array_equal(expect.count.squeeze(), counts)\n\n\n# noinspection PyArgumentList\n@pytest.mark.parametrize(\n    \"sample_sets\",\n    [\"AG1000G-AO\", (\"AG1000G-TZ\", \"AG1000G-UG\"), \"3.0\", None],\n)\n@pytest.mark.parametrize(\n    \"region\", [\"2R\", \"X\", [\"2R\", \"3R\"], \"3R:28,000,000-29,000,000\"]\n)\ndef test_gene_cnv(region, sample_sets):\n    ag3 = setup_ag3()\n\n    ds = ag3.gene_cnv(\n        region=region, sample_sets=sample_sets, max_coverage_variance=None\n    )\n\n    assert isinstance(ds, xr.Dataset)\n\n    # check fields\n    expected_data_vars = {\n        \"CN_mode\",\n        \"CN_mode_count\",\n        \"gene_windows\",\n        \"gene_contig\",\n        \"gene_start\",\n        \"gene_end\",\n        \"gene_name\",\n        \"gene_description\",\n        \"gene_strand\",\n        \"sample_coverage_variance\",\n        \"sample_is_high_variance\",\n    }\n    assert set(ds.data_vars) == expected_data_vars\n\n    expected_coords = {\n        \"gene_id\",\n        \"sample_id\",\n    }\n    assert set(ds.coords) == expected_coords\n\n    # check dimensions\n    assert set(ds.dims) == {\"samples\", \"genes\"}\n\n    # check dim lengths\n    df_samples = ag3.sample_metadata(sample_sets=sample_sets)\n    n_samples = len(df_samples)\n    assert ds.dims[\"samples\"] == n_samples\n    df_geneset = ag3.geneset(region=region)\n    df_genes = df_geneset.query(\"type == 'gene'\")\n    n_genes = len(df_genes)\n    assert ds.dims[\"genes\"] == n_genes\n\n    # check IDs\n    assert ds[\"sample_id\"].values.tolist() == df_samples[\"sample_id\"].tolist()\n    assert ds[\"gene_id\"].values.tolist() == df_genes[\"ID\"].tolist()\n\n    # check shapes\n    for f in expected_coords | expected_data_vars:\n        x = ds[f]\n        assert isinstance(x, xr.DataArray)\n        assert isinstance(x.data, np.ndarray)\n\n        if f.startswith(\"gene_\"):\n            assert x.ndim == 1\n            assert x.dims == (\"genes\",)\n        elif f.startswith(\"CN\"):\n            assert x.ndim == 2\n            assert x.dims == (\"genes\", \"samples\")\n        elif f.startswith(\"sample_\"):\n            assert x.ndim == 1\n            assert x.dims == (\"samples\",)\n            assert x.shape == (n_samples,)\n\n    # check can set up computations\n    d1 = ds[\"gene_start\"] > 10_000\n    assert isinstance(d1, xr.DataArray)\n    d2 = ds[\"CN_mode\"].max(axis=1)\n    assert isinstance(d2, xr.DataArray)\n\n    # sanity checks\n    x = ds[\"gene_windows\"].values\n    y = ds[\"CN_mode_count\"].values.max(axis=1)\n    assert np.all(x >= y)\n    z = ds[\"CN_mode\"].values\n    assert np.max(z) <= 12\n    assert np.min(z) >= -1\n\n\n@pytest.mark.parametrize(\n    \"sample_sets\",\n    [\"AG1000G-AO\", (\"AG1000G-TZ\", \"AG1000G-UG\"), \"3.0\", None],\n)\n@pytest.mark.parametrize(\"region\", [\"2R\", \"X\", \"3R:28,000,000-29,000,000\"])\ndef test_gene_cnv_xarray_indexing(region, sample_sets):\n    ag3 = setup_ag3()\n\n    ds = ag3.gene_cnv(\n        region=region, sample_sets=sample_sets, max_coverage_variance=None\n    )\n\n    # check label-based indexing\n    # pick a random gene and sample ID\n\n    # check dim lengths\n    df_samples = ag3.sample_metadata(sample_sets=sample_sets)\n    df_geneset = ag3.geneset(region=region)\n    df_genes = df_geneset.query(\"type == 'gene'\")\n    gene = random.choice(df_genes[\"ID\"].tolist())\n    sample = random.choice(df_samples[\"sample_id\"].tolist())\n    ds = ds.set_index(genes=\"gene_id\", samples=\"sample_id\")\n    o = ds.sel(genes=gene)\n    assert isinstance(o, xr.Dataset)\n    assert set(o.dims) == {\"samples\"}\n    assert o.dims[\"samples\"] == ds.dims[\"samples\"]\n    o = ds.sel(samples=sample)\n    assert isinstance(o, xr.Dataset)\n    assert set(o.dims) == {\"genes\"}\n    assert o.dims[\"genes\"] == ds.dims[\"genes\"]\n    o = ds.sel(genes=gene, samples=sample)\n    assert isinstance(o, xr.Dataset)\n    assert set(o.dims) == set()\n\n\ndef _check_frequency(x):\n    loc_nan = np.isnan(x)\n    assert np.all(x[~loc_nan] >= 0)\n    assert np.all(x[~loc_nan] <= 1)\n\n\n@pytest.mark.parametrize(\n    \"region\", [\"2R\", \"X\", [\"2R\", \"3R\"], \"3R:28,000,000-29,000,000\"]\n)\n@pytest.mark.parametrize(\n    \"cohorts\",\n    [\n        {\n            \"ke\": \"country == 'Kenya'\",\n            \"bf_2012_col\": \"country == 'Burkina Faso' and year == 2012 and aim_species == 'coluzzii'\",\n        },\n        \"admin1_month\",\n    ],\n)\ndef test_gene_cnv_frequencies(region, cohorts):\n    ag3 = setup_ag3(cohorts_analysis=\"20211101\")\n\n    universal_fields = [\n        \"contig\",\n        \"start\",\n        \"end\",\n        \"windows\",\n        \"max_af\",\n        \"gene_strand\",\n        \"gene_description\",\n        \"label\",\n    ]\n    df_genes = ag3.geneset(region=region).query(\"type == 'gene'\")\n\n    df_cnv_frq = ag3.gene_cnv_frequencies(\n        region=region,\n        sample_sets=\"3.0\",\n        cohorts=cohorts,\n        min_cohort_size=1,\n        drop_invariant=False,\n        max_coverage_variance=None,\n    )\n\n    assert isinstance(df_cnv_frq, pd.DataFrame)\n    assert len(df_cnv_frq) == len(df_genes) * 2\n    assert df_cnv_frq.index.names == [\"gene_id\", \"gene_name\", \"cnv_type\"]\n\n    # sanity checks\n    frq_cols = None\n    if isinstance(cohorts, dict):\n        frq_cols = [\"frq_\" + s for s in cohorts.keys()]\n    if isinstance(cohorts, str):\n        df_coh = ag3.sample_cohorts(sample_sets=\"3.0\")\n        coh_nm = \"cohort_\" + cohorts\n        frq_cols = [\"frq_\" + s for s in list(df_coh[coh_nm].dropna().unique())]\n\n    # check frequencies are within sensible range\n    for f in frq_cols:\n        _check_frequency(df_cnv_frq[f].values)\n\n    # check amp and del frequencies are within sensible range\n    df_frq_amp = df_cnv_frq[frq_cols].xs(\"amp\", level=\"cnv_type\")\n    df_frq_del = df_cnv_frq[frq_cols].xs(\"del\", level=\"cnv_type\")\n    df_frq_sum = df_frq_amp + df_frq_del\n    for f in frq_cols:\n        _check_frequency(df_frq_sum[f].values)\n    expected_fields = universal_fields + frq_cols\n    assert sorted(df_cnv_frq.columns.tolist()) == sorted(expected_fields)\n\n\ndef test_gene_cnv_frequencies__query():\n    ag3 = setup_ag3(cohorts_analysis=\"20211101\")\n\n    region = \"3L\"\n\n    expected_columns = [\n        \"contig\",\n        \"start\",\n        \"end\",\n        \"windows\",\n        \"max_af\",\n        \"gene_strand\",\n        \"gene_description\",\n        \"label\",\n        \"frq_AO-LUA_colu_2009\",\n    ]\n\n    df = ag3.gene_cnv_frequencies(\n        region=region,\n        sample_sets=\"3.0\",\n        cohorts=\"admin1_year\",\n        min_cohort_size=10,\n        sample_query=\"country == 'Angola'\",\n        drop_invariant=False,\n    )\n\n    assert isinstance(df, pd.DataFrame)\n    assert sorted(df.columns) == sorted(expected_columns)\n    df_genes = ag3.geneset(region=region).query(\"type == 'gene'\")\n    assert len(df) == len(df_genes) * 2\n\n\ndef test_gene_cnv_frequencies__max_coverage_variance():\n    ag3 = setup_ag3(cohorts_analysis=\"20211101\")\n    region = \"3L\"\n    df_genes = ag3.geneset(region=region).query(\"type == 'gene'\")\n\n    base_columns = [\n        \"contig\",\n        \"start\",\n        \"end\",\n        \"windows\",\n        \"max_af\",\n        \"gene_strand\",\n        \"gene_description\",\n        \"label\",\n    ]\n\n    # run without a threshold on coverage variance\n    df = ag3.gene_cnv_frequencies(\n        region=region,\n        sample_sets=[\"AG1000G-GM-A\", \"AG1000G-GM-B\", \"AG1000G-GM-C\"],\n        cohorts=\"admin1_year\",\n        min_cohort_size=10,\n        max_coverage_variance=None,\n        drop_invariant=False,\n    )\n    expected_frq_columns = [\n        \"frq_GM-L_gcx2_2012\",\n        \"frq_GM-M_gcx2_2012\",\n        \"frq_GM-N_gcx1_2011\",\n    ]\n    expected_columns = base_columns + expected_frq_columns\n    assert isinstance(df, pd.DataFrame)\n    assert sorted(df.columns) == sorted(expected_columns)\n    assert len(df) == len(df_genes) * 2\n\n    # Run with a threshold on coverage variance - this will remove samples,\n    # which in turn will drop one of the cohorts below the min_cohort_size,\n    # and so we can check that we have lost a cohort.\n    df = ag3.gene_cnv_frequencies(\n        region=region,\n        sample_sets=[\"AG1000G-GM-A\", \"AG1000G-GM-B\", \"AG1000G-GM-C\"],\n        cohorts=\"admin1_year\",\n        min_cohort_size=10,\n        max_coverage_variance=0.2,\n        drop_invariant=False,\n    )\n    expected_frq_columns = [\n        \"frq_GM-M_gcx2_2012\",\n        \"frq_GM-N_gcx1_2011\",\n    ]\n    expected_columns = base_columns + expected_frq_columns\n    assert isinstance(df, pd.DataFrame)\n    assert sorted(df.columns) == sorted(expected_columns)\n    assert len(df) == len(df_genes) * 2\n\n\ndef test_gene_cnv_frequencies__drop_invariant():\n    ag3 = setup_ag3(cohorts_analysis=\"20211101\")\n    region = \"3L\"\n\n    expected_columns = [\n        \"contig\",\n        \"start\",\n        \"end\",\n        \"windows\",\n        \"max_af\",\n        \"gene_strand\",\n        \"gene_description\",\n        \"label\",\n        \"frq_AO-LUA_colu_2009\",\n    ]\n\n    df = ag3.gene_cnv_frequencies(\n        region=region,\n        sample_sets=\"3.0\",\n        cohorts=\"admin1_year\",\n        min_cohort_size=10,\n        sample_query=\"country == 'Angola'\",\n        drop_invariant=True,\n    )\n\n    assert isinstance(df, pd.DataFrame)\n    assert sorted(df.columns) == sorted(expected_columns)\n    assert np.all(df[\"max_af\"] > 0)\n    df_genes = ag3.geneset(region=region).query(\"type == 'gene'\")\n    assert len(df) < len(df_genes) * 2\n\n\ndef test_gene_cnv_frequencies__dup_samples():\n    ag3 = setup_ag3(cohorts_analysis=\"20211101\")\n    with pytest.raises(ValueError):\n        ag3.gene_cnv_frequencies(\n            region=\"3L\",\n            cohorts=\"admin1_year\",\n            sample_sets=[\"AG1000G-FR\", \"AG1000G-FR\"],\n        )\n\n\ndef test_gene_cnv_frequencies__multi_contig_x():\n    # https:\/\/github.com\/malariagen\/malariagen-data-python\/issues\/166\n\n    ag3 = setup_ag3(cohorts_analysis=\"20211101\")\n\n    df1 = ag3.gene_cnv_frequencies(\n        region=\"X\",\n        sample_sets=\"AG1000G-BF-B\",\n        cohorts=\"admin1_year\",\n        min_cohort_size=10,\n        drop_invariant=False,\n        max_coverage_variance=None,\n    )\n\n    df2 = ag3.gene_cnv_frequencies(\n        region=[\"2R\", \"X\"],\n        sample_sets=\"AG1000G-BF-B\",\n        cohorts=\"admin1_year\",\n        min_cohort_size=10,\n        drop_invariant=False,\n        max_coverage_variance=None,\n    ).query(\"contig == 'X'\")\n\n    assert_frame_equal(df1, df2)\n\n\ndef test_gene_cnv_frequencies__missing_samples():\n    # https:\/\/github.com\/malariagen\/malariagen-data-python\/issues\/183\n\n    ag3 = setup_ag3(cohorts_analysis=\"20211101\", pre=True)\n\n    df = ag3.gene_cnv_frequencies(\n        region=\"3L\",\n        sample_sets=\"1190-VO-GH-AMENGA-ETEGO-VMF00013\",\n        cohorts=\"admin1_year\",\n    )\n    assert isinstance(df, pd.DataFrame)\n\n\n@pytest.mark.parametrize(\n    \"sample_sets\",\n    [\"AG1000G-BF-A\", (\"AG1000G-TZ\", \"AG1000G-UG\"), \"3.0\", None],\n)\n@pytest.mark.parametrize(\n    \"region\", [\"2R\", [\"3R\", \"2R:48,714,463-48,715,355\", \"AGAP007280\"]]\n)\n@pytest.mark.parametrize(\"analysis\", [\"arab\", \"gamb_colu\", \"gamb_colu_arab\"])\ndef test_haplotypes(sample_sets, region, analysis):\n\n    ag3 = setup_ag3()\n\n    # check expected samples\n    phased_samples_query = None\n    if analysis == \"arab\":\n        phased_samples_query = (\n            \"aim_species == 'arabiensis' and sample_set != 'AG1000G-X'\"\n        )\n    elif analysis == \"gamb_colu\":\n        phased_samples_query = (\n            \"aim_species in ['gambiae', 'coluzzii', 'intermediate_gambiae_coluzzii'] and \"\n            \"sample_set != 'AG1000G-X'\"\n        )\n    elif analysis == \"gamb_colu_arab\":\n        phased_samples_query = \"sample_set != 'AG1000G-X'\"\n    df_samples = ag3.sample_metadata(sample_sets=sample_sets)\n    expected_samples = df_samples.query(phased_samples_query)[\"sample_id\"].tolist()\n    n_samples = len(expected_samples)\n\n    # check if any samples\n    if n_samples == 0:\n        ds = ag3.haplotypes(region=region, sample_sets=sample_sets, analysis=analysis)\n        assert ds is None\n        return\n\n    ds = ag3.haplotypes(region=region, sample_sets=sample_sets, analysis=analysis)\n    assert isinstance(ds, xr.Dataset)\n\n    # check fields\n    expected_data_vars = {\n        \"variant_allele\",\n        \"call_genotype\",\n    }\n    assert set(ds.data_vars) == expected_data_vars\n\n    expected_coords = {\n        \"variant_contig\",\n        \"variant_position\",\n        \"sample_id\",\n    }\n    assert set(ds.coords) == expected_coords\n\n    # check dimensions\n    assert set(ds.dims) == {\"alleles\", \"ploidy\", \"samples\", \"variants\"}\n\n    # check samples\n    samples = ds[\"sample_id\"].values\n    assert set(samples) == set(expected_samples)\n\n    # check dim lengths\n    assert ds.dims[\"samples\"] == n_samples\n    assert ds.dims[\"ploidy\"] == 2\n    assert ds.dims[\"alleles\"] == 2\n\n    # check shapes\n    for f in expected_coords | expected_data_vars:\n        x = ds[f]\n        assert isinstance(x, xr.DataArray)\n        assert isinstance(x.data, da.Array)\n\n        if f == \"variant_allele\":\n            assert x.ndim == 2\n            assert x.shape[1] == 2\n            assert x.dims == (\"variants\", \"alleles\")\n        elif f.startswith(\"variant_\"):\n            assert x.ndim == 1\n            assert x.dims == (\"variants\",)\n        elif f == \"call_genotype\":\n            assert x.ndim == 3\n            assert x.dims == (\"variants\", \"samples\", \"ploidy\")\n            assert x.shape[1] == n_samples\n            assert x.shape[2] == 2\n\n    # check attributes\n    assert \"contigs\" in ds.attrs\n    assert ds.attrs[\"contigs\"] == (\"2R\", \"2L\", \"3R\", \"3L\", \"X\")\n\n    # check can set up computations\n    d1 = ds[\"variant_position\"] > 10_000\n    assert isinstance(d1, xr.DataArray)\n    d2 = ds[\"call_genotype\"].sum(axis=(1, 2))\n    assert isinstance(d2, xr.DataArray)\n\n\n@pytest.mark.parametrize(\n    \"sample_query\",\n    [\n        \"taxon == 'coluzzii' and location == 'Bana Village'\",\n        \"taxon == 'gambiae' and location == 'Pala'\",\n    ],\n)\ndef test_haplotypes__sample_query(sample_query):\n\n    sample_sets = \"AG1000G-BF-B\"\n    region = \"3L\"\n    analysis = \"gamb_colu_arab\"\n\n    ag3 = setup_ag3()\n\n    # check expected samples\n    df_samples = ag3.sample_metadata(sample_sets=sample_sets)\n    expected_samples = df_samples.query(sample_query)[\"sample_id\"].tolist()\n    n_samples = len(expected_samples)\n\n    ds = ag3.haplotypes(\n        region=region,\n        sample_sets=sample_sets,\n        analysis=analysis,\n        sample_query=sample_query,\n    )\n    assert isinstance(ds, xr.Dataset)\n\n    # check fields\n    expected_data_vars = {\n        \"variant_allele\",\n        \"call_genotype\",\n    }\n    assert set(ds.data_vars) == expected_data_vars\n\n    expected_coords = {\n        \"variant_contig\",\n        \"variant_position\",\n        \"sample_id\",\n    }\n    assert set(ds.coords) == expected_coords\n\n    # check dimensions\n    assert set(ds.dims) == {\"alleles\", \"ploidy\", \"samples\", \"variants\"}\n\n    # check samples\n    samples = ds[\"sample_id\"].values\n    assert set(samples) == set(expected_samples)\n\n    # check dim lengths\n    assert ds.dims[\"samples\"] == n_samples\n    assert ds.dims[\"ploidy\"] == 2\n    assert ds.dims[\"alleles\"] == 2\n\n    # check shapes\n    for f in expected_coords | expected_data_vars:\n        x = ds[f]\n        assert isinstance(x, xr.DataArray)\n        assert isinstance(x.data, da.Array)\n\n        if f == \"variant_allele\":\n            assert x.ndim == 2\n            assert x.shape[1] == 2\n            assert x.dims == (\"variants\", \"alleles\")\n        elif f.startswith(\"variant_\"):\n            assert x.ndim == 1\n            assert x.dims == (\"variants\",)\n        elif f == \"call_genotype\":\n            assert x.ndim == 3\n            assert x.dims == (\"variants\", \"samples\", \"ploidy\")\n            assert x.shape[1] == n_samples\n            assert x.shape[2] == 2\n\n    # check attributes\n    assert \"contigs\" in ds.attrs\n    assert ds.attrs[\"contigs\"] == (\"2R\", \"2L\", \"3R\", \"3L\", \"X\")\n\n\n# test v3 sample sets\n@pytest.mark.parametrize(\n    \"sample_sets\",\n    [\"3.0\", \"AG1000G-UG\", [\"AG1000G-AO\", \"AG1000G-FR\"]],\n)\ndef test_sample_cohorts(sample_sets):\n    ag3 = setup_ag3(cohorts_analysis=\"20211101\")\n\n    expected_cols = (\n        \"sample_id\",\n        \"country_iso\",\n        \"admin1_name\",\n        \"admin1_iso\",\n        \"admin2_name\",\n        \"taxon\",\n        \"cohort_admin1_year\",\n        \"cohort_admin1_month\",\n        \"cohort_admin2_year\",\n        \"cohort_admin2_month\",\n    )\n\n    df_coh = ag3.sample_cohorts(sample_sets=sample_sets)\n    df_meta = ag3.sample_metadata(sample_sets=sample_sets)\n\n    assert tuple(df_coh.columns) == expected_cols\n    assert len(df_coh) == len(df_meta)\n    assert df_coh.sample_id.tolist() == df_meta.sample_id.tolist()\n    if sample_sets == \"AG1000G-UG\":\n        assert df_coh.sample_id[0] == \"AC0007-C\"\n        assert df_coh.cohort_admin1_year[23] == \"UG-E_arab_2012\"\n        assert df_coh.cohort_admin1_month[37] == \"UG-E_arab_2012_10\"\n        assert df_coh.cohort_admin2_year[42] == \"UG-E_Tororo_arab_2012\"\n        assert df_coh.cohort_admin2_month[49] == \"UG-E_Tororo_arab_2012_10\"\n    if sample_sets == [\"AG1000G-AO\", \"AG1000G-FR\"]:\n        assert df_coh.sample_id[0] == \"AR0047-C\"\n        assert df_coh.sample_id[103] == \"AP0017-Cx\"\n\n\n@pytest.mark.parametrize(\n    \"region_raw\",\n    [\n        \"AGAP007280\",\n        \"3L\",\n        \"2R:48714463-48715355\",\n        \"2L:24,630,355-24,633,221\",\n        Region(\"2R\", 48714463, 48715355),\n    ],\n)\ndef test_locate_region(region_raw):\n\n    ag3 = setup_ag3()\n    gene_annotation = ag3.geneset(attributes=[\"ID\"])\n    region = resolve_region(ag3, region_raw)\n    pos = ag3.snp_sites(region=region.contig, field=\"POS\")\n    ref = ag3.snp_sites(region=region.contig, field=\"REF\")\n    loc_region = locate_region(region, pos)\n\n    # check types\n    assert isinstance(loc_region, slice)\n    assert isinstance(region, Region)\n\n    # check Region with contig\n    if region_raw == \"3L\":\n        assert region.contig == \"3L\"\n        assert region.start is None\n        assert region.end is None\n\n    # check that Region goes through unchanged\n    if isinstance(region_raw, Region):\n        assert region == region_raw\n\n    # check that gene name matches coordinates from the geneset and matches gene sequence\n    if region_raw == \"AGAP007280\":\n        gene = gene_annotation.query(\"ID == 'AGAP007280'\").squeeze()\n        assert region == Region(gene.contig, gene.start, gene.end)\n        assert pos[loc_region][0] == gene.start\n        assert pos[loc_region][-1] == gene.end\n        assert (\n            ref[loc_region][:5].compute()\n            == np.array([\"A\", \"T\", \"G\", \"G\", \"C\"], dtype=\"S1\")\n        ).all()\n\n    # check string parsing\n    if region_raw == \"2R:48714463-48715355\":\n        assert region == Region(\"2R\", 48714463, 48715355)\n    if region_raw == \"2L:24,630,355-24,633,221\":\n        assert region == Region(\"2L\", 24630355, 24633221)\n\n\ndef test_aa_allele_frequencies():\n    ag3 = setup_ag3(cohorts_analysis=\"20211101\")\n\n    expected_fields = [\n        \"transcript\",\n        \"aa_pos\",\n        \"ref_allele\",\n        \"alt_allele\",\n        \"ref_aa\",\n        \"alt_aa\",\n        \"effect\",\n        \"impact\",\n        \"frq_BF-09_gamb_2012\",\n        \"frq_BF-09_colu_2012\",\n        \"frq_BF-09_colu_2014\",\n        \"frq_BF-09_gamb_2014\",\n        \"frq_BF-07_gamb_2004\",\n        \"max_af\",\n        \"label\",\n    ]\n\n    df = ag3.aa_allele_frequencies(\n        transcript=\"AGAP004707-RD\",\n        cohorts=\"admin1_year\",\n        min_cohort_size=10,\n        site_mask=\"gamb_colu\",\n        sample_sets=(\"AG1000G-BF-A\", \"AG1000G-BF-B\", \"AG1000G-BF-C\"),\n        drop_invariant=True,\n    )\n\n    assert sorted(df.columns.tolist()) == sorted(expected_fields)\n    assert isinstance(df, pd.DataFrame)\n    assert df.index.names == [\"aa_change\", \"contig\", \"position\"]\n    assert df.shape == (61, len(expected_fields))\n    assert df.loc[\"V402L\"].max_af[0] == pytest.approx(0.121951, abs=1e-6)\n\n\ndef test_aa_allele_frequencies__dup_samples():\n    ag3 = setup_ag3(cohorts_analysis=\"20211101\")\n    with pytest.raises(ValueError):\n        ag3.aa_allele_frequencies(\n            transcript=\"AGAP004707-RD\",\n            cohorts=\"admin1_year\",\n            sample_sets=[\"AG1000G-FR\", \"AG1000G-FR\"],\n        )\n\n\n# noinspection PyDefaultArgument\ndef _check_snp_allele_frequencies_advanced(\n    transcript=\"AGAP004707-RD\",\n    area_by=\"admin1_iso\",\n    period_by=\"year\",\n    sample_sets=[\"AG1000G-BF-A\", \"AG1000G-ML-A\", \"AG1000G-UG\"],\n    sample_query=None,\n    min_cohort_size=10,\n    nobs_mode=\"called\",\n    variant_query=\"max_af > 0.02\",\n):\n    ag3 = setup_ag3(cohorts_analysis=\"20211101\")\n\n    ds = ag3.snp_allele_frequencies_advanced(\n        transcript=transcript,\n        area_by=area_by,\n        period_by=period_by,\n        sample_sets=sample_sets,\n        sample_query=sample_query,\n        min_cohort_size=min_cohort_size,\n        nobs_mode=nobs_mode,\n        variant_query=variant_query,\n    )\n\n    assert isinstance(ds, xr.Dataset)\n\n    # noinspection PyTypeChecker\n    assert sorted(ds.dims) == [\"cohorts\", \"variants\"]\n\n    expected_variant_vars = (\n        \"variant_label\",\n        \"variant_contig\",\n        \"variant_position\",\n        \"variant_ref_allele\",\n        \"variant_alt_allele\",\n        \"variant_max_af\",\n        \"variant_pass_gamb_colu_arab\",\n        \"variant_pass_gamb_colu\",\n        \"variant_pass_arab\",\n        \"variant_transcript\",\n        \"variant_effect\",\n        \"variant_impact\",\n        \"variant_ref_codon\",\n        \"variant_alt_codon\",\n        \"variant_ref_aa\",\n        \"variant_alt_aa\",\n        \"variant_aa_pos\",\n        \"variant_aa_change\",\n    )\n    for v in expected_variant_vars:\n        a = ds[v]\n        assert isinstance(a, xr.DataArray)\n        assert a.dims == (\"variants\",)\n\n    expected_cohort_vars = (\n        \"cohort_label\",\n        \"cohort_size\",\n        \"cohort_taxon\",\n        \"cohort_area\",\n        \"cohort_period\",\n        \"cohort_period_start\",\n        \"cohort_period_end\",\n        \"cohort_lat_mean\",\n        \"cohort_lat_min\",\n        \"cohort_lat_max\",\n        \"cohort_lon_mean\",\n        \"cohort_lon_min\",\n        \"cohort_lon_max\",\n    )\n    for v in expected_cohort_vars:\n        a = ds[v]\n        assert isinstance(a, xr.DataArray)\n        assert a.dims == (\"cohorts\",)\n\n    expected_event_vars = (\n        \"event_count\",\n        \"event_nobs\",\n        \"event_frequency\",\n        \"event_frequency_ci_low\",\n        \"event_frequency_ci_upp\",\n    )\n    for v in expected_event_vars:\n        a = ds[v]\n        assert isinstance(a, xr.DataArray)\n        assert a.dims == (\"variants\", \"cohorts\")\n\n    # sanity checks for area values\n    df_samples = ag3.sample_metadata(sample_sets=sample_sets)\n    if sample_query is not None:\n        df_samples = df_samples.query(sample_query)\n    expected_area = np.unique(df_samples[area_by].dropna().values)\n    area = ds[\"cohort_area\"].values\n    # N.B., some areas may not end up in final dataset if cohort\n    # size is too small, so do a set membership test\n    for a in area:\n        assert a in expected_area\n\n    # sanity checks for period values\n    period = ds[\"cohort_period\"].values\n    if period_by == \"year\":\n        expected_freqstr = \"A-DEC\"\n    elif period_by == \"month\":\n        expected_freqstr = \"M\"\n    elif period_by == \"quarter\":\n        expected_freqstr = \"Q-DEC\"\n    else:\n        assert False, \"not implemented\"\n    for p in period:\n        assert isinstance(p, pd.Period)\n        assert p.freqstr == expected_freqstr\n\n    # sanity check cohort size\n    size = ds[\"cohort_size\"].values\n    for s in size:\n        assert s >= min_cohort_size\n\n    if area_by == \"admin1_iso\" and period_by == \"year\" and nobs_mode == \"called\":\n\n        # Here we test the behaviour of the function when grouping by admin level\n        # 1 and year. We can do some more in-depth testing in this case because\n        # we can compare results directly against the simpler snp_allele_frequencies()\n        # function with the admin1_year cohorts.\n\n        # check consistency with the basic snp allele frequencies method\n        df_af = ag3.snp_allele_frequencies(\n            transcript=transcript,\n            cohorts=\"admin1_year\",\n            sample_sets=sample_sets,\n            sample_query=sample_query,\n            min_cohort_size=min_cohort_size,\n        )\n        df_af = df_af.reset_index()  # make sure all variables available to check\n        if variant_query is not None:\n            df_af = df_af.query(variant_query)\n\n        # check cohorts are consistent\n        expect_cohort_labels = sorted(\n            [c.split(\"frq_\")[1] for c in df_af.columns if c.startswith(\"frq_\")]\n        )\n        cohort_labels = sorted(ds[\"cohort_label\"].values)\n        assert cohort_labels == expect_cohort_labels\n\n        # check variants are consistent\n        assert ds.dims[\"variants\"] == len(df_af)\n        for v in expected_variant_vars:\n            c = v.split(\"variant_\")[1]\n            actual = ds[v]\n            expect = df_af[c]\n            _compare_series_like(actual, expect)\n\n        # check frequencies are consistent\n        for cohort_index, cohort_label in enumerate(ds[\"cohort_label\"].values):\n            actual_frq = ds[\"event_frequency\"].values[:, cohort_index]\n            expect_frq = df_af[f\"frq_{cohort_label}\"].values\n            assert_allclose(actual_frq, expect_frq)\n\n\n# noinspection PyDefaultArgument\ndef _check_aa_allele_frequencies_advanced(\n    transcript=\"AGAP004707-RD\",\n    area_by=\"admin1_iso\",\n    period_by=\"year\",\n    sample_sets=[\"AG1000G-BF-A\", \"AG1000G-ML-A\", \"AG1000G-UG\"],\n    sample_query=None,\n    min_cohort_size=10,\n    nobs_mode=\"called\",\n    variant_query=\"max_af > 0.02\",\n):\n    ag3 = setup_ag3(cohorts_analysis=\"20211101\")\n\n    ds = ag3.aa_allele_frequencies_advanced(\n        transcript=transcript,\n        area_by=area_by,\n        period_by=period_by,\n        sample_sets=sample_sets,\n        sample_query=sample_query,\n        min_cohort_size=min_cohort_size,\n        nobs_mode=nobs_mode,\n        variant_query=variant_query,\n    )\n\n    assert isinstance(ds, xr.Dataset)\n\n    # noinspection PyTypeChecker\n    assert sorted(ds.dims) == [\"cohorts\", \"variants\"]\n\n    expected_variant_vars = (\n        \"variant_label\",\n        \"variant_contig\",\n        \"variant_position\",\n        \"variant_max_af\",\n        \"variant_transcript\",\n        \"variant_effect\",\n        \"variant_impact\",\n        \"variant_ref_aa\",\n        \"variant_alt_aa\",\n        \"variant_aa_pos\",\n        \"variant_aa_change\",\n    )\n    for v in expected_variant_vars:\n        a = ds[v]\n        assert isinstance(a, xr.DataArray)\n        assert a.dims == (\"variants\",)\n\n    expected_cohort_vars = (\n        \"cohort_label\",\n        \"cohort_size\",\n        \"cohort_taxon\",\n        \"cohort_area\",\n        \"cohort_period\",\n        \"cohort_period_start\",\n        \"cohort_period_end\",\n        \"cohort_lat_mean\",\n        \"cohort_lat_min\",\n        \"cohort_lat_max\",\n        \"cohort_lon_mean\",\n        \"cohort_lon_min\",\n        \"cohort_lon_max\",\n    )\n    for v in expected_cohort_vars:\n        a = ds[v]\n        assert isinstance(a, xr.DataArray)\n        assert a.dims == (\"cohorts\",)\n\n    expected_event_vars = (\n        \"event_count\",\n        \"event_nobs\",\n        \"event_frequency\",\n        \"event_frequency_ci_low\",\n        \"event_frequency_ci_upp\",\n    )\n    for v in expected_event_vars:\n        a = ds[v]\n        assert isinstance(a, xr.DataArray)\n        assert a.dims == (\"variants\", \"cohorts\")\n\n    # sanity checks for area values\n    df_samples = ag3.sample_metadata(sample_sets=sample_sets)\n    if sample_query is not None:\n        df_samples = df_samples.query(sample_query)\n    expected_area = np.unique(df_samples[area_by].dropna().values)\n    area = ds[\"cohort_area\"].values\n    # N.B., some areas may not end up in final dataset if cohort\n    # size is too small, so do a set membership test\n    for a in area:\n        assert a in expected_area\n\n    # sanity checks for period values\n    period = ds[\"cohort_period\"].values\n    if period_by == \"year\":\n        expected_freqstr = \"A-DEC\"\n    elif period_by == \"month\":\n        expected_freqstr = \"M\"\n    elif period_by == \"quarter\":\n        expected_freqstr = \"Q-DEC\"\n    else:\n        assert False, \"not implemented\"\n    for p in period:\n        assert isinstance(p, pd.Period)\n        assert p.freqstr == expected_freqstr\n\n    # sanity check cohort size\n    size = ds[\"cohort_size\"].values\n    for s in size:\n        assert s >= min_cohort_size\n\n    if area_by == \"admin1_iso\" and period_by == \"year\" and nobs_mode == \"called\":\n\n        # Here we test the behaviour of the function when grouping by admin level\n        # 1 and year. We can do some more in-depth testing in this case because\n        # we can compare results directly against the simpler aa_allele_frequencies()\n        # function with the admin1_year cohorts.\n\n        # check consistency with the basic snp allele frequencies method\n        df_af = ag3.aa_allele_frequencies(\n            transcript=transcript,\n            cohorts=\"admin1_year\",\n            sample_sets=sample_sets,\n            sample_query=sample_query,\n            min_cohort_size=min_cohort_size,\n        )\n        df_af = df_af.reset_index()  # make sure all variables available to check\n        if variant_query is not None:\n            df_af = df_af.query(variant_query)\n\n        # check cohorts are consistent\n        expect_cohort_labels = sorted(\n            [c.split(\"frq_\")[1] for c in df_af.columns if c.startswith(\"frq_\")]\n        )\n        cohort_labels = sorted(ds[\"cohort_label\"].values)\n        assert cohort_labels == expect_cohort_labels\n\n        # check variants are consistent\n        assert ds.dims[\"variants\"] == len(df_af)\n        for v in expected_variant_vars:\n            c = v.split(\"variant_\")[1]\n            actual = ds[v]\n            expect = df_af[c]\n            _compare_series_like(actual, expect)\n\n        # check frequencies are consistent\n        for cohort_index, cohort_label in enumerate(ds[\"cohort_label\"].values):\n            print(cohort_label)\n            actual_frq = ds[\"event_frequency\"].values[:, cohort_index]\n            expect_frq = df_af[f\"frq_{cohort_label}\"].values\n            assert_allclose(actual_frq, expect_frq)\n\n\n# Here we don't explore the full matrix, but vary one parameter at a time, otherwise\n# the test suite would take too long to run.\n\n\n@pytest.mark.parametrize(\"transcript\", [\"AGAP004707-RD\", \"AGAP006028-RA\"])\ndef test_allele_frequencies_advanced__transcript(transcript):\n    _check_snp_allele_frequencies_advanced(\n        transcript=transcript,\n    )\n    _check_aa_allele_frequencies_advanced(\n        transcript=transcript,\n    )\n\n\n@pytest.mark.parametrize(\"area_by\", [\"country\", \"admin1_iso\", \"admin2_name\"])\ndef test_allele_frequencies_advanced__area_by(area_by):\n    _check_snp_allele_frequencies_advanced(\n        area_by=area_by,\n    )\n    _check_aa_allele_frequencies_advanced(\n        area_by=area_by,\n    )\n\n\n@pytest.mark.parametrize(\"period_by\", [\"year\", \"quarter\", \"month\"])\ndef test_allele_frequencies_advanced__period_by(period_by):\n    _check_snp_allele_frequencies_advanced(\n        period_by=period_by,\n    )\n    _check_aa_allele_frequencies_advanced(\n        period_by=period_by,\n    )\n\n\n@pytest.mark.parametrize(\n    \"sample_sets\", [\"AG1000G-BF-A\", [\"AG1000G-BF-A\", \"AG1000G-ML-A\"], \"3.0\"]\n)\ndef test_allele_frequencies_advanced__sample_sets(sample_sets):\n    _check_snp_allele_frequencies_advanced(\n        sample_sets=sample_sets,\n    )\n    _check_aa_allele_frequencies_advanced(\n        sample_sets=sample_sets,\n    )\n\n\n@pytest.mark.parametrize(\n    \"sample_query\",\n    [\n        \"taxon in ['gambiae', 'coluzzii'] and country == 'Mali'\",\n        \"taxon == 'arabiensis' and country in ['Uganda', 'Tanzania']\",\n    ],\n)\ndef test_allele_frequencies_advanced__sample_query(sample_query):\n    _check_snp_allele_frequencies_advanced(\n        sample_query=sample_query,\n    )\n    # noinspection PyTypeChecker\n    _check_aa_allele_frequencies_advanced(\n        sample_query=sample_query,\n        variant_query=None,\n    )\n\n\n@pytest.mark.parametrize(\"min_cohort_size\", [10, 100])\ndef test_allele_frequencies_advanced__min_cohort_size(min_cohort_size):\n    _check_snp_allele_frequencies_advanced(\n        min_cohort_size=min_cohort_size,\n    )\n    _check_aa_allele_frequencies_advanced(\n        min_cohort_size=min_cohort_size,\n    )\n\n\n@pytest.mark.parametrize(\n    \"variant_query\",\n    [\n        None,\n        \"effect == 'NON_SYNONYMOUS_CODING' and max_af > 0.05\",\n        \"effect == 'foobar'\",  # no variants\n    ],\n)\ndef test_allele_frequencies_advanced__variant_query(variant_query):\n    _check_snp_allele_frequencies_advanced(\n        variant_query=variant_query,\n    )\n    _check_aa_allele_frequencies_advanced(\n        variant_query=variant_query,\n    )\n\n\n@pytest.mark.parametrize(\"nobs_mode\", [\"called\", \"fixed\"])\ndef test_allele_frequencies_advanced__nobs_mode(nobs_mode):\n    _check_snp_allele_frequencies_advanced(\n        nobs_mode=nobs_mode,\n    )\n    _check_aa_allele_frequencies_advanced(\n        nobs_mode=nobs_mode,\n    )\n\n\n# noinspection PyDefaultArgument\ndef _check_gene_cnv_frequencies_advanced(\n    region=\"2L\",\n    area_by=\"admin1_iso\",\n    period_by=\"year\",\n    sample_sets=[\"AG1000G-BF-A\", \"AG1000G-ML-A\", \"AG1000G-UG\"],\n    sample_query=None,\n    min_cohort_size=10,\n    variant_query=\"max_af > 0.02\",\n    drop_invariant=True,\n    max_coverage_variance=0.2,\n):\n    ag3 = setup_ag3(cohorts_analysis=\"20211101\")\n\n    ds = ag3.gene_cnv_frequencies_advanced(\n        region=region,\n        area_by=area_by,\n        period_by=period_by,\n        sample_sets=sample_sets,\n        sample_query=sample_query,\n        min_cohort_size=min_cohort_size,\n        variant_query=variant_query,\n        drop_invariant=drop_invariant,\n        max_coverage_variance=max_coverage_variance,\n    )\n\n    assert isinstance(ds, xr.Dataset)\n\n    # noinspection PyTypeChecker\n    assert sorted(ds.dims) == [\"cohorts\", \"variants\"]\n\n    expected_variant_vars = (\n        \"variant_label\",\n        \"variant_contig\",\n        \"variant_start\",\n        \"variant_end\",\n        \"variant_windows\",\n        \"variant_cnv_type\",\n        \"variant_gene_id\",\n        \"variant_gene_name\",\n        \"variant_gene_strand\",\n        \"variant_max_af\",\n    )\n    for v in expected_variant_vars:\n        a = ds[v]\n        assert isinstance(a, xr.DataArray)\n        assert a.dims == (\"variants\",)\n\n    expected_cohort_vars = (\n        \"cohort_label\",\n        \"cohort_size\",\n        \"cohort_taxon\",\n        \"cohort_area\",\n        \"cohort_period\",\n        \"cohort_period_start\",\n        \"cohort_period_end\",\n        \"cohort_lat_mean\",\n        \"cohort_lat_min\",\n        \"cohort_lat_max\",\n        \"cohort_lon_mean\",\n        \"cohort_lon_min\",\n        \"cohort_lon_max\",\n    )\n    for v in expected_cohort_vars:\n        a = ds[v]\n        assert isinstance(a, xr.DataArray)\n        assert a.dims == (\"cohorts\",)\n\n    expected_event_vars = (\n        \"event_count\",\n        \"event_nobs\",\n        \"event_frequency\",\n        \"event_frequency_ci_low\",\n        \"event_frequency_ci_upp\",\n    )\n    for v in expected_event_vars:\n        a = ds[v]\n        assert isinstance(a, xr.DataArray)\n        assert a.dims == (\"variants\", \"cohorts\")\n\n    # sanity checks for area values\n    df_samples = ag3.sample_metadata(sample_sets=sample_sets)\n    if sample_query is not None:\n        df_samples = df_samples.query(sample_query)\n    expected_area = np.unique(df_samples[area_by].dropna().values)\n    area = ds[\"cohort_area\"].values\n    # N.B., some areas may not end up in final dataset if cohort\n    # size is too small, so do a set membership test\n    for a in area:\n        assert a in expected_area\n\n    # sanity checks for period values\n    period = ds[\"cohort_period\"].values\n    if period_by == \"year\":\n        expected_freqstr = \"A-DEC\"\n    elif period_by == \"month\":\n        expected_freqstr = \"M\"\n    elif period_by == \"quarter\":\n        expected_freqstr = \"Q-DEC\"\n    else:\n        assert False, \"not implemented\"\n    for p in period:\n        assert isinstance(p, pd.Period)\n        assert p.freqstr == expected_freqstr\n\n    # sanity check cohort size\n    size = ds[\"cohort_size\"].values\n    for s in size:\n        assert s >= min_cohort_size\n\n    if area_by == \"admin1_iso\" and period_by == \"year\":\n\n        # Here we test the behaviour of the function when grouping by admin level\n        # 1 and year. We can do some more in-depth testing in this case because\n        # we can compare results directly against the simpler gene_cnv_frequencies()\n        # function with the admin1_year cohorts.\n\n        # check consistency with the basic gene CNV frequencies method\n        df_af = ag3.gene_cnv_frequencies(\n            region=region,\n            cohorts=\"admin1_year\",\n            sample_sets=sample_sets,\n            sample_query=sample_query,\n            min_cohort_size=min_cohort_size,\n            drop_invariant=drop_invariant,\n            max_coverage_variance=max_coverage_variance,\n        )\n        df_af = df_af.reset_index()  # make sure all variables available to check\n        if variant_query is not None:\n            df_af = df_af.query(variant_query)\n\n        # check cohorts are consistent\n        expect_cohort_labels = sorted(\n            [c.split(\"frq_\")[1] for c in df_af.columns if c.startswith(\"frq_\")]\n        )\n        cohort_labels = sorted(ds[\"cohort_label\"].values)\n        assert cohort_labels == expect_cohort_labels\n\n        # check variants are consistent\n        assert ds.dims[\"variants\"] == len(df_af)\n        for v in expected_variant_vars:\n            c = v.split(\"variant_\")[1]\n            actual = ds[v]\n            expect = df_af[c]\n            _compare_series_like(actual, expect)\n\n        # check frequencies are consistent\n        for cohort_index, cohort_label in enumerate(ds[\"cohort_label\"].values):\n            actual_frq = ds[\"event_frequency\"].values[:, cohort_index]\n            expect_frq = df_af[f\"frq_{cohort_label}\"].values\n            assert_allclose(actual_frq, expect_frq)\n\n\n@pytest.mark.parametrize(\"region\", [\"2R\", \"X\", [\"3R\", \"X\"], \"3R:28,000,000-29,000,000\"])\ndef test_gene_cnv_frequencies_advanced__region(region):\n    _check_gene_cnv_frequencies_advanced(\n        region=region,\n    )\n\n\n@pytest.mark.parametrize(\"area_by\", [\"country\", \"admin1_iso\", \"admin2_name\"])\ndef test_gene_cnv_frequencies_advanced__area_by(area_by):\n    _check_gene_cnv_frequencies_advanced(\n        area_by=area_by,\n    )\n\n\n@pytest.mark.parametrize(\"period_by\", [\"year\", \"quarter\", \"month\"])\ndef test_gene_cnv_frequencies_advanced__period_by(period_by):\n    _check_gene_cnv_frequencies_advanced(\n        period_by=period_by,\n    )\n\n\n@pytest.mark.parametrize(\n    \"sample_sets\", [\"AG1000G-BF-A\", [\"AG1000G-BF-A\", \"AG1000G-ML-A\"], \"3.0\"]\n)\ndef test_gene_cnv_frequencies_advanced__sample_sets(sample_sets):\n    _check_gene_cnv_frequencies_advanced(\n        sample_sets=sample_sets,\n    )\n\n\n@pytest.mark.parametrize(\n    \"sample_query\",\n    [\n        \"taxon in ['gambiae', 'coluzzii'] and country == 'Mali'\",\n        \"taxon == 'arabiensis' and country in ['Uganda', 'Tanzania']\",\n    ],\n)\ndef test_gene_cnv_frequencies_advanced__sample_query(sample_query):\n    _check_gene_cnv_frequencies_advanced(\n        sample_query=sample_query,\n    )\n\n\n@pytest.mark.parametrize(\"min_cohort_size\", [10, 100])\ndef test_gene_cnv_frequencies_advanced__min_cohort_size(min_cohort_size):\n    _check_gene_cnv_frequencies_advanced(\n        min_cohort_size=min_cohort_size,\n    )\n\n\n@pytest.mark.parametrize(\n    \"variant_query\",\n    [\n        None,\n        \"cnv_type == 'amp' and max_af > 0.05\",\n    ],\n)\ndef test_gene_cnv_frequencies_advanced__variant_query(variant_query):\n    _check_gene_cnv_frequencies_advanced(\n        variant_query=variant_query,\n    )\n\n\n@pytest.mark.parametrize(\n    \"drop_invariant\",\n    [\n        False,\n        True,\n    ],\n)\ndef test_gene_cnv_frequencies_advanced__drop_invariant(drop_invariant):\n    # noinspection PyTypeChecker\n    _check_gene_cnv_frequencies_advanced(\n        variant_query=None,\n        drop_invariant=drop_invariant,\n    )\n\n\n@pytest.mark.parametrize(\n    \"max_coverage_variance\",\n    [None, 0.2],\n)\ndef test_gene_cnv_frequencies_advanced__max_coverage_variance(max_coverage_variance):\n    _check_gene_cnv_frequencies_advanced(\n        max_coverage_variance=max_coverage_variance,\n        sample_sets=[\"AG1000G-GM-A\", \"AG1000G-GM-B\", \"AG1000G-GM-C\"],\n    )\n\n\ndef test_gene_cnv_frequencies_advanced__multi_contig_x():\n    # https:\/\/github.com\/malariagen\/malariagen-data-python\/issues\/166\n\n    ag3 = setup_ag3(cohorts_analysis=\"20211101\")\n\n    ds1 = ag3.gene_cnv_frequencies_advanced(\n        region=\"X\",\n        area_by=\"admin1_iso\",\n        period_by=\"year\",\n        sample_sets=\"AG1000G-BF-B\",\n        sample_query=None,\n        min_cohort_size=10,\n        variant_query=None,\n        drop_invariant=False,\n        max_coverage_variance=None,\n    )\n\n    ds2 = ag3.gene_cnv_frequencies_advanced(\n        region=[\"2R\", \"X\"],\n        area_by=\"admin1_iso\",\n        period_by=\"year\",\n        sample_sets=\"AG1000G-BF-B\",\n        sample_query=None,\n        min_cohort_size=10,\n        variant_query=None,\n        drop_invariant=False,\n        max_coverage_variance=None,\n    )\n    loc_x = ds2[\"variant_contig\"].values == \"X\"\n    ds2 = ds2.isel(variants=loc_x)\n\n    for v in ds1:\n        a = ds1[v]\n        b = ds2[v]\n        _compare_series_like(a, b)\n\n\ndef test_gene_cnv_frequencies_advanced__missing_samples():\n    # https:\/\/github.com\/malariagen\/malariagen-data-python\/issues\/183\n\n    ag3 = setup_ag3(cohorts_analysis=\"20211101\", pre=True)\n\n    ds = ag3.gene_cnv_frequencies_advanced(\n        region=\"3L\",\n        sample_sets=\"1190-VO-GH-AMENGA-ETEGO-VMF00013\",\n        area_by=\"admin1_iso\",\n        period_by=\"year\",\n    )\n    assert isinstance(ds, xr.Dataset)\n\n\ndef test_snp_allele_frequencies_advanced__dup_samples():\n    ag3 = setup_ag3(cohorts_analysis=\"20211101\")\n    with pytest.raises(ValueError):\n        ag3.snp_allele_frequencies_advanced(\n            transcript=\"AGAP004707-RD\",\n            area_by=\"admin1_iso\",\n            period_by=\"year\",\n            sample_sets=[\"AG1000G-BF-A\", \"AG1000G-BF-A\"],\n        )\n\n\ndef test_aa_allele_frequencies_advanced__dup_samples():\n    ag3 = setup_ag3(cohorts_analysis=\"20211101\")\n    with pytest.raises(ValueError):\n        ag3.aa_allele_frequencies_advanced(\n            transcript=\"AGAP004707-RD\",\n            area_by=\"admin1_iso\",\n            period_by=\"year\",\n            sample_sets=[\"AG1000G-BF-A\", \"AG1000G-BF-A\"],\n        )\n\n\ndef test_gene_cnv_frequencies_advanced__dup_samples():\n    ag3 = setup_ag3(cohorts_analysis=\"20211101\")\n    with pytest.raises(ValueError):\n        ag3.gene_cnv_frequencies_advanced(\n            region=\"3L\",\n            area_by=\"admin1_iso\",\n            period_by=\"year\",\n            sample_sets=[\"AG1000G-BF-A\", \"AG1000G-BF-A\"],\n        )\n\n\n@pytest.mark.parametrize(\"region\", [\"2R:1,000,000-2,000,000\", \"AGAP004707\"])\n@pytest.mark.parametrize(\n    \"sample_sets\", [\"AG1000G-AO\", [\"AG1000G-BF-A\", \"AG1000G-BF-B\"]]\n)\n@pytest.mark.parametrize(\"sample_query\", [None, \"taxon == 'coluzzii'\"])\n@pytest.mark.parametrize(\"site_mask\", [None, \"gamb_colu_arab\"])\ndef test_snp_allele_counts(region, sample_sets, sample_query, site_mask):\n\n    results_cache = \"..\/results_cache\"\n    shutil.rmtree(results_cache, ignore_errors=True)\n    ag3 = setup_ag3(results_cache=results_cache)\n\n    ac = ag3.snp_allele_counts(\n        region=region,\n        sample_sets=sample_sets,\n        sample_query=sample_query,\n        site_mask=site_mask,\n    )\n    assert isinstance(ac, np.ndarray)\n    pos = ag3.snp_sites(region=region, field=\"POS\", site_mask=site_mask)\n    assert ac.shape == (pos.shape[0], 4)\n\n    ac2 = ag3.snp_allele_counts(\n        region=region,\n        sample_sets=sample_sets,\n        sample_query=sample_query,\n        site_mask=site_mask,\n    )\n    assert_array_equal(ac, ac2)\n\n\n@pytest.mark.parametrize(\"region\", [\"2R:1,000,000-2,000,000\", \"AGAP004707\"])\n@pytest.mark.parametrize(\n    \"sample_sets\", [\"AG1000G-AO\", [\"AG1000G-BF-A\", \"AG1000G-BF-B\"]]\n)\n@pytest.mark.parametrize(\"sample_query\", [None, \"taxon == 'coluzzii'\"])\n@pytest.mark.parametrize(\"site_mask\", [None, \"gamb_colu_arab\"])\ndef test_pca(region, sample_sets, sample_query, site_mask):\n\n    results_cache = \"..\/results_cache\"\n    shutil.rmtree(results_cache, ignore_errors=True)\n    ag3 = setup_ag3(results_cache=results_cache)\n\n    n_components = 8\n    df_pca, evr = ag3.pca(\n        region=region,\n        n_snps=100,\n        sample_sets=sample_sets,\n        sample_query=sample_query,\n        site_mask=site_mask,\n        n_components=n_components,\n    )\n\n    df_samples = ag3.sample_metadata(\n        sample_sets=sample_sets,\n        sample_query=sample_query,\n    )\n\n    assert isinstance(df_pca, pd.DataFrame)\n    assert len(df_pca) == len(df_samples)\n    expected_columns = df_samples.columns.tolist() + [\n        f\"PC{n+1}\" for n in range(n_components)\n    ]\n    assert df_pca.columns.tolist() == expected_columns\n    assert_frame_equal(df_samples, df_pca[df_samples.columns.tolist()])\n    assert isinstance(evr, np.ndarray)\n    assert evr.shape == (n_components,)\n\n    df_pca2, evr2 = ag3.pca(\n        region=region,\n        n_snps=100,\n        sample_sets=sample_sets,\n        sample_query=sample_query,\n        site_mask=site_mask,\n        n_components=n_components,\n    )\n    assert_frame_equal(df_pca, df_pca2)\n    assert_array_equal(evr, evr2)\n\n\ndef _compare_series_like(actual, expect):\n\n    # compare pandas series-like objects for equality or floating point\n    # similarity, handling missing values appropriately\n\n    # handle object arrays, these don't get nans compared properly\n    t = actual.dtype\n    if t == object:\n        expect = expect.fillna(\"NA\")\n        actual = actual.fillna(\"NA\")\n\n    if t.kind == \"f\":\n        assert_allclose(actual.values, expect.values)\n    else:\n        assert_array_equal(actual.values, expect.values)\n\n\n@pytest.mark.parametrize(\"aims\", [\"gamb_vs_colu\", \"gambcolu_vs_arab\"])\ndef test_aim_variants(aims):\n    ag3 = setup_ag3()\n    ds = ag3.aim_variants(aims=aims)\n\n    # check dataset\n    assert isinstance(ds, xr.Dataset)\n\n    # check variables\n    expected_data_vars = {\"variant_allele\"}\n    assert set(ds.data_vars) == expected_data_vars\n\n    # check coordinates\n    expected_coords = {\"variant_contig\", \"variant_position\"}\n    assert set(ds.coords) == expected_coords\n\n    # check dimensions\n    expected_dims = {\"variants\", \"alleles\"}\n    assert set(ds.dims) == expected_dims\n\n    # check variant_contig\n    x = ds[\"variant_contig\"]\n    assert x.dims == (\"variants\",)\n    assert x.dtype == \"uint8\"\n\n    # check variant_position\n    x = ds[\"variant_position\"]\n    assert x.dims == (\"variants\",)\n    assert x.dtype == \"int64\" or \"int32\"\n\n    # check variant_allele\n    x = ds[\"variant_allele\"]\n    assert x.dims == (\"variants\", \"alleles\")\n    assert x.dtype == \"S1\"\n\n    # check attributes\n    assert ds.attrs[\"contigs\"] == [\"2R\", \"2L\", \"3R\", \"3L\", \"X\"]\n\n    # check dimension lengths\n    assert ds.dims[\"alleles\"] == 2\n    if aims == \"gamb_vs_colu\":\n        assert ds.dims[\"variants\"] == 700\n    elif aims == \"gambcolu_vs_arab\":\n        assert ds.dims[\"variants\"] == 2612\n\n\n@pytest.mark.parametrize(\n    \"sample_sets\",\n    [None, \"AG1000G-UG\", [\"AG1000G-BF-A\", \"AG1000G-BF-B\"], \"3.0\"],\n)\n@pytest.mark.parametrize(\n    \"sample_query\",\n    [None, \"aim_species != 'arabiensis'\"],\n)\n@pytest.mark.parametrize(\"aims\", [\"gamb_vs_colu\", \"gambcolu_vs_arab\"])\ndef test_aim_calls(sample_sets, sample_query, aims):\n    ag3 = setup_ag3()\n    ds = ag3.aim_calls(aims=aims, sample_sets=sample_sets, sample_query=sample_query)\n\n    # check dataset\n    assert isinstance(ds, xr.Dataset)\n\n    # check variables\n    expected_data_vars = {\"variant_allele\", \"call_genotype\"}\n    assert set(ds.data_vars) == expected_data_vars\n\n    # check coordinates\n    expected_coords = {\"variant_contig\", \"variant_position\", \"sample_id\"}\n    assert set(ds.coords) == expected_coords\n\n    # check dimensions\n    expected_dims = {\"variants\", \"alleles\", \"samples\", \"ploidy\"}\n    assert set(ds.dims) == expected_dims\n\n    # check variant_contig\n    x = ds[\"variant_contig\"]\n    assert x.dims == (\"variants\",)\n    assert x.dtype == \"uint8\"\n\n    # check variant_position\n    x = ds[\"variant_position\"]\n    assert x.dims == (\"variants\",)\n    assert (x.dtype == \"int32\") or (x.dtype == \"int64\")\n\n    # check variant_allele\n    x = ds[\"variant_allele\"]\n    assert x.dims == (\"variants\", \"alleles\")\n    assert x.dtype == \"S1\"\n\n    # check variant_allele\n    x = ds[\"call_genotype\"]\n    assert x.dims == (\"variants\", \"samples\", \"ploidy\")\n    assert x.dtype == \"int8\"\n\n    # check attributes\n    assert ds.attrs[\"contigs\"] == [\"2R\", \"2L\", \"3R\", \"3L\", \"X\"]\n\n    # check dimension lengths\n    df_samples = ag3.sample_metadata(sample_sets=sample_sets, sample_query=sample_query)\n    assert_array_equal(df_samples[\"sample_id\"].values, ds[\"sample_id\"].values)\n    assert ds.dims[\"samples\"] == len(df_samples)\n    assert ds.dims[\"alleles\"] == 2\n    assert ds.dims[\"ploidy\"] == 2\n    if aims == \"gamb_vs_colu\":\n        assert ds.dims[\"variants\"] == 700\n    elif aims == \"gambcolu_vs_arab\":\n        assert ds.dims[\"variants\"] == 2612\n"}
+{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_282","text":"Melanee-Melanee\/X-Caps\n'''\nEncoding Visual Attributes in Capsules for Explainable Medical Diagnoses (X-Caps)\nOriginal Paper by , , and  (https:\/\/arxiv.org\/abs\/1909.05926)\nCode written by: \nIf you use significant portions of this code or the ideas from our paper, please cite it :)\nIf you have any questions, please email me at .\n\nThis file contains the functions needed to convert the LIDC-IDRI dataset to the expected format.\n'''\n\nimport os\nfrom fnmatch import filter as fnf\nfrom glob import glob\nimport xml.etree.ElementTree as ET\n\nfrom tqdm import tqdm\ntry:\n    import pydicom as pydcm\nexcept:\n    import dicom as pydcm\nimport numpy as np\nfrom scipy.ndimage.morphology import binary_fill_holes\nfrom PIL import Image\n\nfrom utils import safe_mkdir\n\n\ndef create_cropped_nodules(IMG_ROOT, OUT_ROOT):\n    safe_mkdir(OUT_ROOT)\n\n    DEFAULT_PIXEL_SPACING = 0.787109\n    DEFAULT_SLICE_THICKNESS = 2.5\n    CROP_EXTRA_AMT = (np.sqrt(2)-1)\/2\n\n    print('Finding all xml files in LIDC-IDRI')\n    matches = []\n    for d1 in tqdm(sorted(os.listdir(IMG_ROOT))):\n        for d2 in sorted(os.listdir(os.path.join(IMG_ROOT, d1))):\n            if d2 == 'AdamsMasks':\n                continue\n            for d3 in sorted(os.listdir(os.path.join(IMG_ROOT, d1, d2))):\n                for f in fnf(os.listdir(os.path.join(IMG_ROOT, d1, d2, d3)), '*.xml'):\n                    matches.append(os.path.join(IMG_ROOT, d1, d2, d3, f))\n\n    print('\\nCreating cropped images of all nodules in LIDC-IDRI')\n    for xml_file in tqdm(matches):\n        # Load dicom image\n        img_path = os.path.dirname(xml_file)\n        dcm_imgs = []\n        for dir, _, files in os.walk(img_path):\n            for file in fnf(files, '*.dcm'):\n                dcm_imgs.append(os.path.join(dir, file))\n\n        # Get ref file\n        RefDs = pydcm.read_file(dcm_imgs[0])\n\n        # Load dimensions based on the number of rows, columns, and slices (along the Z axis)\n        ConstPixelDims = (int(RefDs.Rows), int(RefDs.Columns), len(dcm_imgs))\n        if int(RefDs.Rows) > 512 or int(RefDs.Columns) > 512:\n            continue\n\n        # Load spacing values (in mm)\n        try:\n            pixel_space = [float(RefDs.PixelSpacing[0]), float(RefDs.PixelSpacing[1])]\n        except AttributeError as e:\n            if str(e) == \"'FileDataset' object has no attribute 'PixelSpacing'\":\n                pixel_space = [DEFAULT_PIXEL_SPACING, DEFAULT_PIXEL_SPACING]\n            else:\n                raise NotImplementedError('Unhandled exception in pixel spacing.')\n\n        # Load slice thickness (in mm)\n        try:\n            slice_thick = float(RefDs.SliceThickness)\n        except AttributeError as e:\n            if str(e) == \"'FileDataset' object has no attribute 'SliceThickness'\":\n                if os.path.basename(xml_file)[:-4] == '243' or os.path.basename(xml_file)[:-4] == '244' or \\\n                                os.path.basename(xml_file)[:-4] == '070':\n                    slice_thick = 2.5\n                elif os.path.basename(xml_file)[:-4] == '135':\n                    slice_thick = 2.0\n                elif os.path.basename(xml_file)[:-4] == '043':\n                    slice_thick = 1.8\n                else:\n                    slice_thick = DEFAULT_SLICE_THICKNESS\n            else:\n                raise NotImplementedError('Unhandled exception in slice thickness.')\n\n        ConstPixelSpacing = (pixel_space[0], pixel_space[1], slice_thick)\n\n        x = np.arange(0.0, (ConstPixelDims[0] + 1) * ConstPixelSpacing[0], ConstPixelSpacing[0])\n        y = np.arange(0.0, (ConstPixelDims[1] + 1) * ConstPixelSpacing[1], ConstPixelSpacing[1])\n        z = np.arange(0.0, (ConstPixelDims[2] + 1) * ConstPixelSpacing[2], ConstPixelSpacing[2])\n\n        # The array is sized based on 'ConstPixelDims'\n        ArrayDicom = np.zeros(ConstPixelDims, dtype=RefDs.pixel_array.dtype)\n\n        # loop through all the DICOM files\n        sop_ids = dict()\n        for filenameDCM in dcm_imgs:\n            # read the file\n            ds = pydcm.read_file(filenameDCM)\n            # store the raw image data\n            ArrayDicom[:, :, dcm_imgs.index(filenameDCM)] = ds.pixel_array\n            sop_ids[ds.SOPInstanceUID] = filenameDCM\n\n        # Load attributes\n        tree = ET.parse(xml_file)\n        root = tree.getroot()\n\n        unique_nodule_list = []\n        curr_nodule = -1\n        for s in root.findall('{http:\/\/www.nih.gov}ResponseHeader'):\n            try:\n                study_id = s.find('{http:\/\/www.nih.gov}StudyInstanceUID').text\n            except:\n                study_id = -1\n        for r_num, rad in enumerate(root.findall('{http:\/\/www.nih.gov}readingSession')):\n            try:\n                rad_id = rad.find('{http:\/\/www.nih.gov}servicingRadiologistID').text\n                if rad_id == 'anon':\n                    rad_id = 'anon-{:02d}'.format(r_num)\n            except:\n                rad_id = -1\n            for nodule in rad.findall('{http:\/\/www.nih.gov}unblindedReadNodule'):\n                nodule_id = nodule.find('{http:\/\/www.nih.gov}noduleID').text\n                sub = -1; ist = -1; cal = -1; sph = -1; mar = -1; lob = -1; spi = -1; tex = -1; mal = -1\n                for charac in nodule.findall('{http:\/\/www.nih.gov}characteristics'):\n                    try:\n                        sub = int(charac.find('{http:\/\/www.nih.gov}subtlety').text)\n                    except:\n                        pass\n                    try:\n                        ist = int(charac.find('{http:\/\/www.nih.gov}internalStructure').text)\n                    except:\n                        pass\n                    try:\n                        cal = int(charac.find('{http:\/\/www.nih.gov}calcification').text)\n                    except:\n                        pass\n                    try:\n                        sph = int(charac.find('{http:\/\/www.nih.gov}sphericity').text)\n                    except:\n                        pass\n                    try:\n                        mar = int(charac.find('{http:\/\/www.nih.gov}margin').text)\n                    except:\n                        pass\n                    try:\n                        lob = int(charac.find('{http:\/\/www.nih.gov}lobulation').text)\n                    except:\n                        pass\n                    try:\n                        spi = int(charac.find('{http:\/\/www.nih.gov}spiculation').text)\n                    except:\n                        pass\n                    try:\n                        tex = int(charac.find('{http:\/\/www.nih.gov}texture').text)\n                    except:\n                        pass\n                    try:\n                        mal = int(charac.find('{http:\/\/www.nih.gov}malignancy').text)\n                    except:\n                        pass\n                slices = []\n                x_min = 999999; x_max = -9999999; y_min = 999999; y_max = -9999999\n                slice_list = nodule.findall('{http:\/\/www.nih.gov}roi')\n                GT = np.zeros((ConstPixelDims[0], ConstPixelDims[1], len(slice_list)), dtype=np.uint8)\n                for i, roi in enumerate(slice_list):\n                    z_pos = -1*float(roi.find('{http:\/\/www.nih.gov}imageZposition').text)\n                    sop_id = roi.find('{http:\/\/www.nih.gov}imageSOP_UID').text\n                    for edges in roi.findall('{http:\/\/www.nih.gov}edgeMap'):\n                        x_pos = int(edges.find('{http:\/\/www.nih.gov}xCoord').text)\n                        y_pos = int(edges.find('{http:\/\/www.nih.gov}yCoord').text)\n                        GT[y_pos,x_pos, i] = 1\n                        if x_pos < x_min:\n                            x_min = x_pos\n                        if x_pos > x_max:\n                            x_max = x_pos\n                        if y_pos < y_min:\n                            y_min = y_pos\n                        if y_pos > y_max:\n                            y_max = y_pos\n                    slices.append([sop_id, z_pos])\n                    GT[:,:,i] = binary_fill_holes(GT[:,:,i])\n\n                np_slices = np.asarray(slices)\n                sorted_slices = np_slices[np_slices[:, 1].argsort()]\n                sorted_GT = GT[:,:,np_slices[:, 1].argsort()]\n\n                mean_x = np.mean((x_min, x_max))\n                mean_y = np.mean((y_min, y_max))\n                mean_z = np.mean((float(sorted_slices[0][1]), float(sorted_slices[-1][1])))\n                width = abs(x_max - x_min)\n                height = abs(y_max - y_min)\n                depth = abs(float(sorted_slices[-1][1]) - float(sorted_slices[0][1]))\n                this_nodule = -1\n                matched_list = []\n                for i, nod_coords in enumerate(unique_nodule_list):\n                    if (abs(nod_coords[0] - mean_x) < (nod_coords[3]+width)\/4 or abs(nod_coords[0] - mean_x) <= 3) and \\\n                       (abs(nod_coords[1] - mean_y) < (nod_coords[4]+height)\/4 or abs(nod_coords[1] - mean_y) <= 3) and \\\n                       (abs(nod_coords[2] - mean_z) < (nod_coords[5]+depth)\/4 or abs(nod_coords[2] - mean_z) <= 3*slice_thick):\n                        # Check for multiple matches\n                        matched_list.append([i, np.sqrt((nod_coords[0] - mean_x)**2 + (nod_coords[1] - mean_y)**2 +\n                                                        (nod_coords[2] - mean_z)**2)])\n                if matched_list:\n                    matched_list = np.asarray(matched_list)\n                    for match in matched_list[matched_list[:, 1].argsort()]:\n                        if not glob(os.path.join(OUT_ROOT, '{}_{}'.format(os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(xml_file)))), study_id),\n                                   'nodule_{:03d}'.format(int(match[0])), 'rad-{}_*'.format(rad_id))):\n                            this_nodule = int(match[0])\n                            break\n                if this_nodule == -1:\n                    unique_nodule_list.append([mean_x, mean_y, mean_z, width, height, depth])\n                    curr_nodule += 1\n                    this_nodule = curr_nodule\n\n                out_dir = os.path.join(OUT_ROOT, '{}_{}'.format(os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(xml_file)))), study_id),\n                               'nodule_{:03d}'.format(this_nodule), 'rad-{}_sub-{}_ist-{}_cal-{}_sph-{}_mar-{}_lob-{}_'\n                               'spi-{}_tex-{}_mal-{}'.format(rad_id, sub, ist, cal, sph, mar, lob, spi, tex, mal))\n                safe_mkdir(out_dir)\n\n                for i, box in enumerate(sorted_slices):\n                    try:\n                        dcm_slice = int(dcm_imgs.index(sop_ids[box[0]]))\n                    except Exception as e:\n                        box[0] = correct_images(box[0])\n                        if box[0] != '-1' and box[0] != '-2':\n                            dcm_slice = int(dcm_imgs.index(sop_ids[box[0]]))\n                        elif box[0] == '-2':\n                            continue # This option is for images which cannot be corrected but are unimportant\n                        else:\n                            print('Unable to locate correct .dcm slice for {}: {}. Please correct by hand.'.format(out_dir, box[0]))\n                            print(e)\n                            continue\n\n                    h_extra = int(height * CROP_EXTRA_AMT)\n                    w_extra = int(width * CROP_EXTRA_AMT)\n                    # These are to handle single pixel annotations\n                    if h_extra < 2:\n                        h_extra = 2\n                    if w_extra < 2:\n                        w_extra = 2\n                    crop = ArrayDicom[y_min-h_extra:y_max+h_extra, x_min-w_extra:x_max+w_extra, dcm_slice]\n                    crop_GT = sorted_GT[y_min-h_extra:y_max+h_extra, x_min-w_extra:x_max+w_extra, i]\n\n                    try:\n                        # NOTE: Make sure to change values back to int16 from uint16 when reading the images in!!!\n                        im = Image.fromarray(crop.astype('= {} radiologists\\' characteristics data'.format(MIN_RADS))\n    matches = []\n    nodules_total = 0\n    nodules_to_use = 0\n    non_mal_count = 0\n    mal_count = 0\n\n    for study_dir in tqdm(sorted(os.listdir(IMG_ROOT))):\n        nodule_list = sorted(os.listdir(os.path.join(IMG_ROOT, study_dir)))\n        nodules_total += len(nodule_list)\n        for nodule_dir in nodule_list:\n            rad_dirs = sorted(os.listdir(os.path.join(IMG_ROOT, study_dir, nodule_dir)))\n            rads = len(rad_dirs)\n            if rads < MIN_RADS:\n                continue\n            temp_list = []\n            temp_char_data = np.zeros((9,6))\n            for rad_dir in rad_dirs:\n                if not os.listdir(os.path.join(IMG_ROOT, study_dir, nodule_dir, rad_dir)):\n                    rads -= 1 # Make sure there is actually image(s) for this rad\n                else:\n                    split_names = rad_dir.split('_')\n                    all_chars = True\n                    for i in range(1,10):\n                        if split_names[i][-3:] == '--1':\n                            all_chars = False\n                        else:\n                            temp_char_data[i-1][int(split_names[i][-1])-1] += 1\n                    if not all_chars:\n                        rads -= 1\n                    else:\n                        temp_list.append(os.path.join(IMG_ROOT, study_dir, nodule_dir, rad_dir))\n\n            if rads >= MIN_RADS:\n                # Compute mean mal score\n                char_data_totals = np.zeros((9,rads))\n                char_data_stats = np.zeros((9,2))\n\n                for i in range(temp_char_data.shape[0]):\n                    c = 0\n                    for j in range(temp_char_data.shape[1]):\n                        for k in range(int(temp_char_data[i,j])):\n                            char_data_totals[i,c] = (j + 1)\n                            c += 1\n                char_data_stats[:,0] = np.mean(char_data_totals, axis=1)\n                char_data_stats[:, 1] = np.std(char_data_totals, axis=1)\n                mean_mal = char_data_stats[8,0]\n\n                if mean_mal != 3.:\n                    if rads > 4:\n                        print('Encountered rads > 4: {}'.format(os.path.join(IMG_ROOT, study_dir, nodule_dir))) # Sanity check for nodule matching\n                    if mean_mal > 3.:\n                        mal_count += 1\n                    else:\n                        non_mal_count += 1\n                    num_chars_data += temp_char_data\n                    nodules_to_use += 1\n                    for file_path in temp_list:\n                        matches.append([file_path] + list(np.ndarray.flatten(char_data_stats)))\n\n    print('Found {} total nodules.'.format(nodules_total))\n    print('Found {} nodules with characteristics and determinable (not score 3) avg malignancy scores.'.format(nodules_to_use))\n    print('{} nodules average score below 3.0, {} nodules above 3.0.'.format(non_mal_count, mal_count))\n\n    np.savetxt(os.path.join(OUT_ROOT, 'nodule_characteristics_counts.csv'),\n               np.concatenate((np.expand_dims(np.asarray(['sub', 'ist', 'cal', 'sph', 'mar', 'lob', 'spi',\n               'tex', 'mal', 'Totals:']), axis=1), np.vstack((num_chars_data.astype(np.int64),\n               np.asarray(['Nodules', nodules_to_use, 'Benign', non_mal_count, 'Malig', mal_count])))), axis=1),\n               fmt='%s,%s,%s,%s,%s,%s,%s', delimiter=',', header=\"Characteristics,1,2,3,4,5,6\")\n\n    np.savetxt(os.path.join(OUT_ROOT, 'master_nodule_list.csv'), np.asarray(matches),\n               fmt='%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s')\n\n\ndef correct_images(sname):\n    # This function contains a list of images which previously required manual correction within the LIDC-IDRI Dataset.\n\n    # Manually correct for image 0017\n    if sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.305973183883758685859912046949':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.139636132253744151113715840194'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.975363198401611311891539311888':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.225900589792147134785051710110'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.153194632177600377201998652445':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.222098252047357192090439228841'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.329142402178255247031380957411':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.276070543618203204841799986172'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.109012962923033337571132618784':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.201369290021439277502674762620'\n    # Manually correct for image 0365\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.112512486762301518180007539984':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.249086187399161659167414756279'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.448378396789516014605561762604':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.216758182207805904911618558070'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.303256875597167746646589593562':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.180833422259316497536094826188'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.261962165647171557143883123825':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.680655051010882131364380217685'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.428441304577336024295581627835':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.282568083479753958511921318301'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.329221218419947342986803210392':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.130147884776737463511106208477'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.162079731049618854270820976684':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.317603920309971419052997711476'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.785736194417664146622972784664':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.180833422259316497536094826188'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.652444697985639935050732394135':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.247436296095192529771061686046'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.126539353916279887972936951408':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.119827225411893011639439591713'\n    # Manually correct for image 0566\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.514074599988412913277488312051':\n        sname = '-2'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.292826962115570345472638642623':\n        sname = '-2'\n    # Manually correct for image 0659\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.294658615382614203741435957661':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.248517083496561594434577071132'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.883951945165118277793500546792':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.218208995904859324781331654067'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.227260896757583835259462034815':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.183101167412396355129144409796'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.739565975013005403715405771404':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.276779570196705787348278946110'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.244522908325555679363936146772':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.576955311322527292170312066972'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.125776849447531170933991444187':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.944721105102210115761068591710'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.156959022761131412720241221222':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.685786061228252640465903515314'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.157441085111648851876365968475':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.218208995904859324781331654067'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.137224073243631437732289379681':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.327828856516446064398338817575'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.196786590005502760794118627532':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.205919555392658132555723231924'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.271749899549008749493412118500':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.467531607505823612652093494995'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.132283326090716626749170288137':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.715346929996135559455398127585'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.292014563425807316410737237443':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.251646556878192917000905983161'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.281350813740489812658551562167':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.188830155395223944149966050821'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.316094521169588935447289217773':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.318926701435673382024116339995'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.400249926333575297612413406645':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.247894320876850135016381965868'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.381007938661788498734279329156':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.263902120137234774391883090194'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.158922411981395099005780254611':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.266102462639631998797024975317'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.153968349496782778041856013116':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.313130758239406881022967921981'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.199135326994407563129497784698':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.137821430143892810553323149499'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.328454607276840155088910752459':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.277464141419855638903368659937'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.105901452377957975094355467039':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.270022323575518362032565947858'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.230181645068532680519497368825':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.101175636735586811268012081787'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.173090595736867429956574661962':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.311147794796034131535570099457'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.426419361480558838333009902353':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.283215380710563114133061955920'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.149783315493297937843600113966':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.211773626788832944113459632641'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.333349896902589057387703875126':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.324942356299228484760469569592'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.259329619426001073359049716159':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.218894908479906137103265765511'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.204211053191501804382709873157':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.219041363289039597488091781264'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.365700870941618176907116849738':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.269625131313796127254468189745'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.174649660921460497526396207837':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.499186182774918820678569631767'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.628619440608225619886544814747':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.288908400826505634061200144991'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.165071066866482679435986323504':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.138555834428706707378735123427'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.932783428097248153076463331304':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.403741639352083297611557443868'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.232252382783080336041314614357':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.242386520761336203399531222995'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.304249587531812156369799852687':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.233668843426769210066014174740'\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.547584917033319141420515123587':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.502898181085822091725452394574'\n    # Manually correct for image 0931\n    elif sname == '1.3.6.1.4.1.14519.5.2.1.6279.6001.265313295605480688537936547605':\n        sname = '1.3.6.1.4.1.14519.5.2.1.6279.6001.290994721708875046196354781651'\n    else:\n        sname = '-1'\n    return sname"}
+{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_283","text":"import math\nimport numpy as np\nimport pylab as py\nfrom astropy.table import Table\nfrom astropy.io import fits\nimport pickle, glob\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nimport os\nfrom scipy import interpolate\nimport kai\n\ndef setup_phot(imageRoot, silent=False,\n               apertures=[25,50,75,100,125,150,175,200],\n               sky_annulus=200, sky_dannulus=50, zmag=0):\n\n    from pyraf import iraf as ir\n    \n    # Load image header\n    hdr = fits.getheader(imageRoot + '.fits')\n\n    ir.digiphot()\n    ir.daophot()\n    ir.unlearn('phot')\n    ir.unlearn('datapars')\n    ir.unlearn('centerpars')\n    ir.unlearn('fitskypars')\n    ir.unlearn('photpars')\n\n    ##########\n    # Set up datapars\n    ##########\n    ir.datapars.fwhmpsf = 5.0 # shouldn't really matter\n    ir.datapars.sigma = 'INDEF'\n    ir.datapars.datamin = 'INDEF'\n\n    if os.path.exists(imageRoot + '.max'):\n        max_file = open(imageRoot + '.max', 'r')\n        max_line = max_file.readline()\n        max = float(max_line)\n        ir.datapars.datamax = max\n\n        if not silent:\n            print( 'Set ir.datapars.datamax = %d' % max)\n\n    # Pull gain from the header\n    ir.datapars.gain = 'GAIN'\n    ir.datapars.epadu = 'INDEF'\n\n    # Assumes 43.1 electrons per read of noise\n    nreads = 1.0\n    if int(hdr['SAMPMODE']) == 3:\n        nreads = int(hdr['MULTISAM'])\n    \n    ir.datapars.ccdread = ''\n    ir.datapars.readnoise = 43.1 * math.sqrt(2.0) \/ math.sqrt(nreads)\n\n    # Get exposure times from header\n    ir.datapars.exposure = ''\n    ir.datapars.itime = float(hdr['ITIME']) * int(hdr['COADDS'])\n\n    # Other Header keywords\n    ir.datapars.airmass = 'AIRMASS'\n    ir.datapars.filter = 'FWINAME'\n    ir.datapars.obstime = 'EXPSTART'\n\n    \n    ##########\n    # Setup centerpars. We will use *.coo file for initial guess.\n    ##########\n    ir.centerpars.calgorithm = 'centroid'\n\n    ##########\n    # Setup fitskypars\n    ##########\n    ir.fitskypars.salgorithm = 'centroid'\n    ir.fitskypars.annulus = sky_annulus\n    ir.fitskypars.dannulus = sky_dannulus\n\n    ##########\n    # Setup photpars\n    ##########\n    # Setup a zeropoint... this assumes Strehl = 1, but good enough for now.\n    ir.photpars.zmag = zmag\n    ir.photpars.apertures = ','.join([str(aa) for aa in apertures])\n    \n    ##########\n    # Setup phot\n    ##########\n    ir.phot.interactive = 'no'\n    ir.phot.radplots = 'no'\n    ir.phot.verify = 'No'\n\n    if silent:\n        ir.phot.verbose = 'no'\n    else:\n        ir.phot.verbose = 'yes'\n\ndef run_phot(imageRoot, silent=False,\n             apertures=[25,50,75,100,125,150,175,200],\n             sky_annulus=200, sky_dannulus=50, zmag=0):\n\n    from pyraf import iraf as ir\n    \n    setup_phot(imageRoot, apertures=apertures, zmag=zmag, silent=silent,\n               sky_annulus=sky_annulus, sky_dannulus=sky_dannulus)\n\n    image = imageRoot + '.fits'\n    coords = imageRoot + '.coo'\n\n    # Output into current directory, not data directory\n    rootSplit = imageRoot.split('\/')\n    output = rootSplit[-1] + '.phot.mag'\n\n    ir.phot(image, coords, output)\n\n    (radius, flux, mag, merr) = get_phot_output(output, silent=silent)\n\n    return (radius, flux, mag, merr)\n\ndef get_phot_output(output, silent=False):\n    from pyraf import iraf as ir\n    \n    # Now get the results using txdump\n    radStr = ir.txdump(output, 'RAPERT', 'yes', Stdout=1)\n    fluxStr = ir.txdump(output, 'FLUX', 'yes', Stdout=1)\n    magStr = ir.txdump(output, 'MAG', 'yes', Stdout=1)\n    merrStr = ir.txdump(output, 'MERR', 'yes', Stdout=1)\n    pierStr = ir.txdump(output, 'PIER', 'yes', Stdout=1)\n\n    radFields = radStr[0].split()\n    fluxFields = fluxStr[0].split()\n    magFields = magStr[0].split()\n    merrFields = merrStr[0].split()\n    pierFields = pierStr[0].split()\n\n    count = len(radFields)\n\n    radius = np.zeros(count, dtype=float)\n    flux = np.zeros(count, dtype=float)\n    mag = np.zeros(count, dtype=float)\n    merr = np.zeros(count, dtype=float)\n\n    for rr in range(count):\n        radius[rr] = float(radFields[rr])\n\n        if (int(pierFields[rr]) != 0 or magFields[rr] == 'INDEF' or\n            merrFields[rr] == 'INDEF'):\n            print( 'Problem in image: ' + output)\n\n            # Error\n            flux[rr] = 0\n            mag[rr] = 0\n            merr[rr] = 0\n        else:\n            flux[rr] = float(fluxFields[rr])\n            mag[rr] = float(magFields[rr])\n            merr[rr] = float(merrFields[rr])\n\n    if not silent:\n        print( '%6s  %10s  %6s  %6s' % ('Radius', 'Flux', 'Mag', 'MagErr'))\n        for ii in range(count):\n            print( '%8.1f  %10d  %6.3f  %6.3f' % \\\n                (radius[ii], flux[ii], mag[ii], merr[ii]))\n    \n    return (radius, flux, mag, merr)\n\ndef get_filter_profile(filter):\n    \"\"\"\n    Returns the wavelength (in microns) and the transmission for \n    the specified NIRC2 filter.\n\n    Example: \n    (wave, trans) = kai.photometry.get_filter_profile('Kp')\n    py.clf()\n    py.plot(wave, trans)\n    py.xlabel('Wavelength (microns)')\n    py.ylabel('Transmission')\n    \"\"\"\n    base_path = os.path.dirname(kai.__file__)\n    rootDir = base_path + '\/filters\/'\n\n    filters = ['J', 'H', 'K', 'Kcont', 'Kp', 'Ks', 'Lp', 'Ms',\n               'Hcont', 'Brgamma', 'FeII']\n\n    if filter not in filters:\n        print( 'Could not find profile for filter %s.' % filter)\n        print( 'Choices are: ', filters)\n        return\n\n    table = Table.read(rootDir + filter + '.dat', format='ascii')\n\n    wavelength = table[table.colnames[0]]\n    transmission = table[table.colnames[1]]\n\n    # Lets fix wavelength array for duplicate values\n    diff = np.diff(wavelength)\n    idx = np.where(diff <= 0)[0]\n    wavelength[idx+1] += 1.0e-7\n\n    # Get rid of all entries with negative transmission\n    idx = np.where(transmission > 1)[0]\n    wavelength = wavelength[idx]\n    transmission = transmission[idx] \/ 100.0 # convert from % to ratio\n\n    return (wavelength, transmission)\n\ndef test_filter_profile_interp():\n    \"\"\"\n    Plot up the filter transmission curves and their interpolations\n    for the three K-band filters (K, Kp, Ks).\n    \"\"\"\n    # Get the transmission curve for NIRC2 filters and atmosphere.\n    K_wave, K_trans = get_filter_profile('K')\n    Kp_wave, Kp_trans = get_filter_profile('Kp')\n    Ks_wave, Ks_trans = get_filter_profile('Ks')\n    J_wave, J_trans = get_filter_profile('J')\n    H_wave, H_trans = get_filter_profile('H')\n    Lp_wave, Lp_trans = get_filter_profile('Lp')\n\n    # We will need to resample these transmission curves.\n    print( 'Creating interp object')\n    K_interp = interpolate.splrep(K_wave, K_trans, k=1, s=0)\n    Kp_interp = interpolate.splrep(Kp_wave, Kp_trans, k=1, s=0)\n    Ks_interp = interpolate.splrep(Ks_wave, Ks_trans, k=1, s=0)\n    J_interp = interpolate.splrep(J_wave, J_trans, k=1, s=0)\n    H_interp = interpolate.splrep(H_wave, H_trans, k=1, s=0)\n    Lp_interp = interpolate.splrep(Lp_wave, Lp_trans, k=1, s=0)\n\n    K_wave_new = np.arange(K_wave.min(), K_wave.max(), 0.0005)\n    Kp_wave_new = np.arange(Kp_wave.min(), Kp_wave.max(), 0.0005)\n    Ks_wave_new = np.arange(Ks_wave.min(), Ks_wave.max(), 0.0005)\n    J_wave_new = np.arange(J_wave.min(), J_wave.max(), 0.0005)\n    H_wave_new = np.arange(H_wave.min(), H_wave.max(), 0.0005)\n    Lp_wave_new = np.arange(Lp_wave.min(), Lp_wave.max(), 0.0005)\n\n    print( 'Interpolating')\n    K_trans_new = interpolate.splev(K_wave_new, K_interp)\n    Kp_trans_new = interpolate.splev(Kp_wave_new, Kp_interp)\n    Ks_trans_new = interpolate.splev(Ks_wave_new, Ks_interp)\n    J_trans_new = interpolate.splev(J_wave_new, J_interp)\n    H_trans_new = interpolate.splev(H_wave_new, H_interp)\n    Lp_trans_new = interpolate.splev(Lp_wave_new, Lp_interp)\n\n    print( 'Plotting')\n#     py.figure(2, figsize=(4,4))\n#     py.subplots_adjust(left=0.2, bottom=0.14, top=0.95, right=0.94)\n    py.clf()\n    py.plot(K_wave, K_trans, 'bo', ms=4, label='_nolegend_', mec='blue')\n    py.plot(K_wave_new, K_trans_new, 'b-', label='K', linewidth=2)\n\n    py.plot(Kp_wave, Kp_trans, 'ro', ms=4, label='_nolegend_', mec='red')\n    py.plot(Kp_wave_new, Kp_trans_new, 'r-', label='Kp', linewidth=2)\n\n    py.plot(Ks_wave, Ks_trans, 'go', ms=4, label='_nolegend_', mec='green')\n    py.plot(Ks_wave_new, Ks_trans_new, 'g-', label='Ks', linewidth=2)\n\n    py.plot(J_wave, J_trans, 'go', ms=4, label='_nolegend_', mec='green')\n    py.plot(J_wave_new, J_trans_new, 'g-', label='J', linewidth=2)\n\n    py.plot(H_wave, H_trans, 'go', ms=4, label='_nolegend_', mec='green')\n    py.plot(H_wave_new, H_trans_new, 'g-', label='H', linewidth=2)\n\n    py.plot(Lp_wave, Lp_trans, 'go', ms=4, label='_nolegend_', mec='green')\n    py.plot(Lp_wave_new, Lp_trans_new, 'g-', label='Lp', linewidth=2)\n    \n    py.legend(loc='lower right', numpoints=1, markerscale=0.1)\n    py.xlabel('Wavelength (microns)')\n    py.ylabel('Transmission (%)')\n\n#     py.axis([2.110, 2.120, 0.928, 0.945])\n\ndef test_atmosphere_profile_interp():\n    atmDir = '\/u\/jlu\/data\/w51\/09jun26\/weather\/atmosphere_transmission.dat'\n    atmData = Table.read(atmDir, format='ascii')\n    atm_wave = atmData[atmData.colnames[0]]\n    atm_trans = atmData[atmData.colnames[1]]\n\n    atm_interp = interpolate.splrep(atm_wave, atm_trans, k=1, s=1)\n\n    atm_wave_new = np.arange(2.0, 2.4, 0.0005)\n    atm_trans_new = interpolate.splev(atm_wave_new, atm_interp)\n\n    py.clf()\n    py.plot(atm_wave, atm_trans, 'r.', ms=2)\n    py.plot(atm_wave_new, atm_trans_new, 'b-')\n    py.xlabel('Wavelength (microns)')\n    py.ylabel('Transmission (%)')\n    py.xlim(2, 2.4)\n"}
+{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_284","text":"from pathlib import Path\nfrom random import choice\nfrom statistics import mean\nfrom typing import Any, Counter\n\nimport typer\nfrom click import Choice as click_Choice\nfrom tqdm import tqdm  # type: ignore\nfrom typer import echo as typer_echo\nfrom typer import prompt as typer_prompt\nfrom typer import secho as typer_secho\nfrom typer import style as typer_style\n\nfrom wordle_solver.solver import (\n    calculate_entropies,\n    get_initial_hints_dict,\n    map_all_hints,\n)\nfrom wordle_solver.utils import DEFAULT_WORDLIST_PATH, calculate_hints, load_word_list\n\napp = typer.Typer(add_completion=False)  # pragma: no cover\nstate: dict[Any, Any] = {}  # pragma: no cover\n\n\ndef get_hint_feedback(chosen_word: str) -> tuple[int, ...]:  # pragma: no cover\n    while True:\n        typer_echo(\"b = black (grey) \/ y = yellow \/ g = green\")\n\n        hint_input = typer_prompt(\n            \"Please provide the result for '\"\n            + typer_style(chosen_word, fg=\"bright_magenta\")\n            + \"'\"\n        )\n\n        if not all(char in set(\"byg\") for char in hint_input):\n            typer_secho(\n                \"Invalid input. Please only use the characters 'b', 'y', or 'g'\",\n                fg=\"red\",\n            )\n            typer_echo(\"Try again...\")\n            continue\n\n        if len(hint_input) != 5:\n            typer_secho(\"Invalid input. Input must be 5 characters long.\", fg=\"red\")\n            typer_echo(\"Try again...\")\n            continue\n\n        break\n\n    input_to_output: dict[str, int] = {\"g\": 2, \"y\": 1, \"b\": 0}\n\n    return tuple(input_to_output[input_char] for input_char in hint_input)\n\n\n@app.command()  # pragma: no cover\ndef stats(\n    word_list: Path = typer.Option(\n        DEFAULT_WORDLIST_PATH,\n        \"--word-list\",\n        \"-w\",\n        help=\"The directory location containing the word list files.\",\n    ),\n) -> None:\n    _, all_guesses, all_solutions = load_word_list(word_list)\n    initial_hint_dict = get_initial_hints_dict(all_guesses, all_solutions)\n\n    temp_entropies = calculate_entropies(all_guesses, all_solutions, initial_hint_dict)\n    top_initial_guesses: list[str] = list(temp_entropies.keys())[0:10]\n\n    for initial_guess in top_initial_guesses:\n        guesses_to_solved: list[int] = []\n\n        for secret in tqdm(all_solutions):\n            solved = False\n            iteration_count = 1\n\n            # The first guess is the most computational, so try to keep it fast\n            hint = calculate_hints(initial_guess, secret)\n            local_solutions = list(initial_hint_dict[initial_guess][hint])\n            local_hint_dict = map_all_hints(all_guesses, local_solutions)\n\n            while not solved:\n                iteration_count += 1\n                total_remaining_solutions = len(local_solutions)\n\n                if total_remaining_solutions == 1:\n                    chosen_guess = local_solutions[0]\n                elif total_remaining_solutions == 2:\n                    chosen_guess = choice(local_solutions)\n                else:\n                    entropies = calculate_entropies(\n                        all_guesses, local_solutions, local_hint_dict\n                    )\n                    chosen_guess = list(entropies.keys())[0]\n\n                # typer_echo(f\"{chosen_guess=}\")\n                hint = calculate_hints(chosen_guess, secret)\n                if hint == (2, 2, 2, 2, 2):\n                    solved = True\n                    guesses_to_solved.append(iteration_count)\n                    break\n\n                local_solutions = list(local_hint_dict[chosen_guess][hint])\n                local_hint_dict = map_all_hints(all_guesses, local_solutions)\n\n        typer_echo(f\"Using guess: {initial_guess}\")\n        typer_echo(f\"Total words solved: {len(guesses_to_solved)}\")\n        typer_echo(f\"Average guess: {round(mean(guesses_to_solved), 3)}\")\n        typer_echo(f\"Total counts: {Counter(guesses_to_solved)}\")\n\n\n@app.command()  # pragma: no cover\ndef solver(\n    word_list: Path = typer.Option(\n        DEFAULT_WORDLIST_PATH,\n        \"--word-list\",\n        \"-w\",\n        help=\"The directory location containing the word list files.\",\n    ),\n) -> None:\n    \"\"\"Solve a word on Wordle.\"\"\"\n    _, guesses, solutions = load_word_list(word_list)\n    hint_dict = get_initial_hints_dict(guesses, solutions)\n\n    for _ in range(6):\n        total_remaining_solutions = len(solutions)\n        if total_remaining_solutions == 0:\n            # TODO: Error out?\n            pass\n        elif total_remaining_solutions == 1:\n            typer_echo(\"There is only 1 reamining possible answer:\")\n            typer_echo(typer_style(solutions, bg=\"magenta\"))\n\n            chosen_guess = solutions[0]\n        elif total_remaining_solutions == 2:\n            typer_echo(\"There are 2 remaining possible answers:\")\n            typer_echo(typer_style(solutions, bg=\"magenta\"))\n\n            chosen_guess = typer_prompt(\n                \"Please select a guess word:\",\n                type=click_Choice(solutions),\n            )\n        else:\n            entropies = calculate_entropies(guesses, solutions, hint_dict)\n            top_8_entropies = dict(list(entropies.items())[0:8])\n\n            typer_echo(\n                f\"There are {total_remaining_solutions} remaining possible answers.\"\n            )\n            if total_remaining_solutions < 8:\n                typer_echo(typer_style(solutions, bg=\"magenta\"))\n\n            typer_echo(\"The top 8 guesses are:\")\n            typer_echo(typer_style(top_8_entropies, bg=\"blue\"))\n\n            chosen_guess = typer_prompt(\n                \"Please select a guess word:\",\n                type=click_Choice(list(top_8_entropies.keys())),\n            )\n\n        hint_feedback = get_hint_feedback(chosen_guess)\n        if hint_feedback == (2, 2, 2, 2, 2):\n            typer_secho(\"Victory!\", fg=\"bright_green\")\n            return\n\n        solutions = list(hint_dict[chosen_guess][hint_feedback])\n        hint_dict = map_all_hints(guesses, solutions)\n\n\n@app.callback()  # pragma: no cover\ndef main() -> None:\n    # \"\"\"\n    # Manage users in the awesome CLI app.\n    # \"\"\"\n    pass\n\n\nif __name__ == \"__main__\":\n    app()\n"}
+{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_285","text":"0\n# -*- coding: utf-8 -*-\n\"\"\"Approval-based committee (ABC) voting rules\"\"\"\n\n\nfrom __future__ import print_function\nimport sys\nimport functools\nfrom itertools import combinations\ntry:\n    from gmpy2 import mpq as Fraction\nexcept ImportError:\n    print(\"Warning: module gmpy2 not found, \"\n          + \"resorting to Python's fractions.Fraction\")\n    from fractions import Fraction\nfrom abcvoting import abcrules_gurobi\nfrom abcvoting.misc import sort_committees\nfrom abcvoting.misc import hamming\nfrom abcvoting.misc import enough_approved_candidates\nfrom abcvoting.misc import str_committees_header\nfrom abcvoting.misc import str_candset, str_candsets\nfrom abcvoting.misc import header\nfrom abcvoting import scores\n\n\n########################################################################\n\n\nclass UnknownRuleIDError(ValueError):\n    \"\"\"Exception raised if unknown rule id is used\"\"\"\n\n    def __init__(self, expression, message):\n        self.expression = expression\n        self.message = \"Rule ID \\\"\" + str(message) + \"\\\" is not known.\"\n\n\nclass ABCRule():\n    \"\"\"Class for ABC rules containing basic information and function call\"\"\"\n    def __init__(self, rule_id, shortname, longname, fct,\n                 algorithms=[\"standard\"], resolute=[True, False]):\n        self.rule_id = rule_id\n        self.shortname = shortname\n        self.longname = longname\n        self.fct = fct\n        self.algorithms = algorithms\n        # algorithms should be sorted by speed (fastest first)\n        self.resolute = resolute\n\n        assert len(resolute) > 0\n        assert len(algorithms) > 0\n\n    def compute(self, profile, committeesize, **kwargs):\n        return self.fct(profile, committeesize, **kwargs)\n\n    def fastest_algo(self):\n        for algo in self.algorithms:\n            if algo == \"gurobi\" and not abcrules_gurobi.available:\n                continue\n            return algo\n\n\n########################################################################\n\n\ndef compute(rule_id, profile, committeesize, **kwargs):\n    try:\n        return rules[rule_id].compute(profile, committeesize, **kwargs)\n    except KeyError:\n        raise NotImplementedError(\"ABC rule \" + str(rule_id) + \" not known.\")\n\n\n# computes arbitrary Thiele methods via branch-and-bound\ndef compute_thiele_method(scorefct_str, profile, committeesize,\n                          algorithm=\"branch-and-bound\",\n                          resolute=False, verbose=0):\n    \"\"\"Thiele methods\n\n    Compute winning committees of the Thiele method specified\n    by the score function (scorefct_str)\n    \"\"\"\n    enough_approved_candidates(profile, committeesize)\n    scorefct = scores.get_scorefct(scorefct_str, committeesize)\n\n    # optional output\n    if verbose:\n        print(header(rules[scorefct_str].longname))\n        if resolute:\n            print(\"Computing only one winning committee (resolute=True)\\n\")\n    if verbose >= 3:\n        if algorithm == \"gurobi\":\n            print(\"Using the Gurobi ILP solver\\n\")\n        if algorithm == \"branch-and-bound\":\n            print(\"Using a branch-and-bound algorithm\\n\")\n    # end of optional output\n\n    if algorithm == \"gurobi\":\n        committees = abcrules_gurobi.__gurobi_thiele_methods(\n            profile, committeesize, scorefct, resolute)\n\n        committees = sort_committees(committees)\n    elif algorithm == \"branch-and-bound\":\n        committees = __thiele_methods_branchandbound(\n            profile, committeesize, scorefct_str, resolute)\n    else:\n        raise NotImplementedError(\n            \"Algorithm \" + str(algorithm)\n            + \" not specified for compute_thiele_method\")\n\n    # optional output\n    if verbose >= 2:\n        print(\"Optimal \" + scorefct_str.upper() + \"-score: \"\n              + str(scores.thiele_score(scorefct_str, profile, committees[0])))\n        print()\n    if verbose:\n        print(str_committees_header(committees, winning=True))\n        print(str_candsets(committees, names=profile.names))\n    # end of optional output\n\n    return committees\n\n\n# computes arbitrary Thiele methods via branch-and-bound\ndef __thiele_methods_branchandbound(profile, committeesize,\n                                    scorefct_str, resolute):\n    \"\"\"Branch-and-bound algorithm to compute winning committees\n    for Thiele methods\"\"\"\n    enough_approved_candidates(profile, committeesize)\n    scorefct = scores.get_scorefct(scorefct_str, committeesize)\n\n    best_committees = []\n    init_com = compute_seq_thiele_method(\n        profile, committeesize, scorefct_str, resolute=True)[0]\n    best_score = scores.thiele_score(scorefct_str, profile, init_com)\n    part_coms = [[]]\n    while part_coms:\n        part_com = part_coms.pop(0)\n        # potential committee, check if at least as good\n        # as previous best committee\n        if len(part_com) == committeesize:\n            score = scores.thiele_score(scorefct_str, profile, part_com)\n            if score == best_score:\n                best_committees.append(part_com)\n            elif score > best_score:\n                best_committees = [part_com]\n                best_score = score\n        else:\n            if len(part_com) > 0:\n                largest_cand = part_com[-1]\n            else:\n                largest_cand = -1\n            missing = committeesize - len(part_com)\n            marg_util_cand = scores.marginal_thiele_scores_add(\n                scorefct, profile, part_com)\n            upper_bound = (\n                sum(sorted(marg_util_cand[largest_cand + 1:])[-missing:])\n                + scores.thiele_score(scorefct_str, profile, part_com))\n            if upper_bound >= best_score:\n                for c in range(largest_cand + 1,\n                               profile.num_cand - missing + 1):\n                    part_coms.insert(0, part_com + [c])\n\n    committees = sort_committees(best_committees)\n    if resolute:\n        committees = [committees[0]]\n\n    return committees\n\n\n# Sequential PAV\ndef compute_seqpav(profile, committeesize, algorithm=\"standard\",\n                   resolute=True, verbose=0):\n    \"\"\"Sequential PAV (seq-PAV)\"\"\"\n    return compute_seq_thiele_method(\n        profile, committeesize, 'pav', algorithm=algorithm,\n        resolute=resolute, verbose=verbose)\n\n\ndef compute_seqslav(profile, committeesize, algorithm=\"standard\",\n                    resolute=True, verbose=0):\n    \"\"\"Sequential Sainte-Lague Approval Voting (SLAV)\"\"\"\n    return compute_seq_thiele_method(\n        profile, committeesize, \"slav\", algorithm=algorithm,\n        resolute=resolute, verbose=verbose)\n\n\n# Reverse Sequential PAV\ndef compute_revseqpav(profile, committeesize, algorithm=\"standard\",\n                      resolute=True, verbose=0):\n    \"\"\"Reverse sequential PAV (revseq-PAV)\"\"\"\n    return compute_revseq_thiele_method(\n        profile, committeesize, 'pav', algorithm=algorithm,\n        resolute=resolute, verbose=verbose)\n\n\ndef compute_seqcc(profile, committeesize, algorithm=\"standard\",\n                  resolute=True, verbose=0):\n    \"\"\"Sequential Chamberlin-Courant (seq-CC)\"\"\"\n    return compute_seq_thiele_method(\n        profile, committeesize, 'cc', algorithm=algorithm,\n        resolute=resolute, verbose=verbose)\n\n\ndef compute_sav(profile, committeesize, algorithm=\"standard\",\n                resolute=False, verbose=0):\n    \"\"\"Satisfaction Approval Voting (SAV)\"\"\"\n    if algorithm == \"standard\":\n        return __separable(\"sav\", profile, committeesize, resolute, verbose)\n    else:\n            raise NotImplementedError(\n                \"Algorithm \" + str(algorithm)\n                + \" not specified for compute_sav\")\n\n\n# Approval Voting (AV)\ndef compute_av(profile, committeesize, algorithm=\"standard\",\n               resolute=False, verbose=0):\n    \"\"\"Approval Voting\"\"\"\n    if algorithm == \"standard\":\n        return __separable(\"av\", profile, committeesize, resolute, verbose)\n    else:\n        raise NotImplementedError(\n            \"Algorithm \" + str(algorithm)\n            + \" not specified for compute_av\")\n\n\ndef __separable(rule_id, profile, committeesize, resolute, verbose):\n    enough_approved_candidates(profile, committeesize)\n\n    appr_scores = [0] * profile.num_cand\n    for pref in profile:\n        for cand in pref:\n            if rule_id == \"sav\":\n                # Satisfaction Approval Voting\n                appr_scores[cand] += Fraction(pref.weight, len(pref))\n            elif rule_id == \"av\":\n                # (Classic) Approval Voting\n                appr_scores[cand] += pref.weight\n            else:\n                raise UnknownRuleIDError(rule_id)\n\n    # smallest score to be in the committee\n    cutoff = sorted(appr_scores)[-committeesize]\n\n    certain_cands = [c for c in range(profile.num_cand)\n                     if appr_scores[c] > cutoff]\n    possible_cands = [c for c in range(profile.num_cand)\n                      if appr_scores[c] == cutoff]\n    missing = committeesize - len(certain_cands)\n    if len(possible_cands) == missing:\n        # candidates with appr_scores[c] == cutoff\n        # are also certain candidates because all these candidates\n        # are required to fill the committee\n        certain_cands = sorted(certain_cands + possible_cands)\n        possible_cands = []\n        missing = 0\n\n    if resolute:\n        committees = sort_committees(\n            [(certain_cands + possible_cands[:missing])])\n    else:\n        committees = sort_committees(\n            [(certain_cands + list(selection))\n             for selection\n             in combinations(possible_cands, missing)])\n\n    # optional output\n    if verbose:\n        print(header(rules[rule_id].longname))\n        if resolute:\n            print(\"Computing only one winning committee (resolute=True)\\n\")\n    if verbose >= 2:\n        print(\"Scores of candidates:\")\n        for c in range(profile.num_cand):\n            print(profile.names[c] + \": \" + str(appr_scores[c]))\n\n        print(\"\\nCandidates are contained in winning committees\")\n        print(\"if their score is >= \" + str(cutoff) + \".\")\n\n        if len(certain_cands) > 0:\n            print(\"\\nThe following candidates are contained in\")\n            print(\"every winning committee:\")\n            namedset = [profile.names[c] for c in certain_cands]\n            print(\" \" + \", \".join(map(str, namedset)))\n            print()\n\n        if len(possible_cands) > 0:\n            print(\"The following candidates are contained in\")\n            print(\"some of the winning committees:\")\n            namedset = [profile.names[c] for c in possible_cands]\n            print(\" \" + \", \".join(map(str, namedset)))\n            print(\"(\" + str(missing) + \" of those candidates is contained\\n\"\n                  + \" in every winning committee.)\\n\")\n    if verbose:\n        print(str_committees_header(committees, winning=True))\n        print(str_candsets(committees, names=profile.names))\n    # end of optional output\n\n    return committees\n\n\ndef __seq_thiele_resolute(profile, committeesize, scorefct_str, verbose):\n    \"\"\"Compute a *resolute* reverse sequential Thiele method\n\n    Tiebreaking between candidates in favor of candidate with smaller\n    number\/index (candidates with larger numbers get deleted first).\n    \"\"\"\n    committee = []\n\n    scorefct = scores.get_scorefct(scorefct_str, committeesize)\n\n    # optional output\n    if verbose >= 2:\n        output = \"starting with the empty committee (score = \"\n        output += str(scores.thiele_score(\n            scorefct_str, profile, committee)) + \")\"\n        print(output + \"\\n\")\n    # end of optional output\n\n    # build a committee starting with the empty set\n    for _ in range(committeesize):\n        additional_score_cand = scores.marginal_thiele_scores_add(\n            scorefct, profile, committee)\n        next_cand = additional_score_cand.index(max(additional_score_cand))\n        committee.append(next_cand)\n        # optional output\n        if verbose >= 2:\n            output = \"adding candidate number \"\n            output += str(len(committee)) + \": \"\n            output += profile.names[next_cand] + \"\\n\"\n            output += \" score increases by \"\n            output += str(max(additional_score_cand))\n            output += \" to a total of \"\n            output += str(scores.thiele_score(\n                scorefct_str, profile, committee))\n            tied_cands = [c for c in range(len(additional_score_cand))\n                          if (c > next_cand and\n                              (additional_score_cand[c]\n                               == max(additional_score_cand)))]\n            if len(tied_cands) > 0:\n                output += \" tie broken in favor of \" + str(next_cand)\n                output += \" candidates \" + str_candset(tied_cands)\n                output += \" would increase the score by the same amount (\"\n                output += str(max(additional_score_cand)) + \")\"\n            print(output + \"\\n\")\n        # end of optional output\n    return [sorted(committee)]\n\n\ndef __seq_thiele_irresolute(profile, committeesize, scorefct_str):\n    \"\"\"Compute an *irresolute* reverse sequential Thiele method\n\n    Consider all possible ways to break ties between candidates\n    (aka parallel universe tiebreaking)\n    \"\"\"\n    scorefct = scores.get_scorefct(scorefct_str, committeesize)\n\n    comm_scores = {(): 0}\n    # build committees starting with the empty set\n    for _ in range(committeesize):\n        comm_scores_next = {}\n        for committee, score in comm_scores.items():\n            # marginal utility gained by adding candidate to the committee\n            additional_score_cand = scores.marginal_thiele_scores_add(\n                scorefct, profile, committee)\n            for c in range(profile.num_cand):\n                if additional_score_cand[c] >= max(additional_score_cand):\n                    next_comm = tuple(sorted(committee + (c,)))\n                    comm_scores_next[next_comm] = (\n                        score + additional_score_cand[c])\n        comm_scores = comm_scores_next\n    return sort_committees(list(comm_scores.keys()))\n\n\ndef compute_seq_thiele_method(profile, committeesize, scorefct_str,\n                              algorithm=\"standard\", resolute=True, verbose=0):\n    \"\"\"Sequential Thiele methods\"\"\"\n\n    enough_approved_candidates(profile, committeesize)\n\n    if algorithm != \"standard\":\n        raise NotImplementedError(\n            \"Algorithm \" + str(algorithm)\n            + \" not specified for compute_seq_thiele_method\")\n\n    # optional output\n    if verbose:\n        print(header(rules[\"seq\" + scorefct_str].longname))\n        if resolute:\n            print(\"Computing only one winning committee (resolute=True)\\n\")\n    # end of optional output\n\n    if resolute:\n        committees = __seq_thiele_resolute(\n            profile, committeesize, scorefct_str, verbose=verbose)\n    else:\n        committees = __seq_thiele_irresolute(\n            profile, committeesize, scorefct_str)\n\n    # optional output\n    if verbose:\n        print(str_committees_header(committees, winning=True))\n        print(str_candsets(committees, names=profile.names))\n        if resolute or len(committees) == 1:\n            print(scorefct_str.upper() + \"-score of winning committee:\",\n                  end=\"\")\n        else:\n            print(scorefct_str.upper() + \"-score of winning committees:\")\n        for comm in committees:\n            print(\" \" + str(scores.thiele_score(scorefct_str, profile, comm)))\n        print()\n    # end of optional output\n\n    return committees\n\n\ndef __revseq_thiele_irresolute(profile, committeesize, scorefct_str):\n    \"\"\"Compute an *irresolute* sequential Thiele method\n\n    Consider all possible ways to break ties between candidates\n    (aka parallel universe tiebreaking)\n    \"\"\"\n    scorefct = scores.get_scorefct(scorefct_str, committeesize)\n\n    allcandcomm = tuple(range(profile.num_cand))\n    comm_scores = {allcandcomm: scores.thiele_score(\n        scorefct_str, profile, allcandcomm)}\n\n    for _ in range(profile.num_cand - committeesize):\n        comm_scores_next = {}\n        for committee, score in comm_scores.items():\n            marg_util_cand = scores.marginal_thiele_scores_remove(\n                scorefct, profile, committee)\n            score_reduction = min(marg_util_cand)\n            # find smallest elements in marg_util_cand and return indices\n            cands_to_remove = [cand for cand in range(profile.num_cand)\n                               if marg_util_cand[cand] == min(marg_util_cand)]\n            for c in cands_to_remove:\n                next_comm = tuple(set(committee) - set([c]))\n                comm_scores_next[next_comm] = score - score_reduction\n            comm_scores = comm_scores_next\n    return sort_committees(list(comm_scores.keys()))\n\n\ndef __revseq_thiele_resolute(profile, committeesize, scorefct_str, verbose):\n    \"\"\"Compute a *resolute* reverse sequential Thiele method\n\n    Tiebreaking between candidates in favor of candidate with smaller\n    number\/index (candidates with smaller numbers are added first).\n    \"\"\"\n    scorefct = scores.get_scorefct(scorefct_str, committeesize)\n\n    committee = set(range(profile.num_cand))\n\n    # optional output\n    if verbose >= 2:\n        output = \"full committee (\" + str(len(committee))\n        output += \" candidates) has a total score of \"\n        output += str(scores.thiele_score(\n            scorefct_str, profile, committee))\n        print(output + \"\\n\")\n    # end of optional output\n\n    for _ in range(profile.num_cand - committeesize):\n        marg_util_cand = scores.marginal_thiele_scores_remove(\n            scorefct, profile, committee)\n        score_reduction = min(marg_util_cand)\n        # find smallest elements in marg_util_cand and return indices\n        cands_to_remove = [cand for cand in range(profile.num_cand)\n                           if marg_util_cand[cand] == min(marg_util_cand)]\n        committee.remove(cands_to_remove[-1])\n\n        # optional output\n        if verbose >= 2:\n            rem_cand = cands_to_remove[-1]\n            output = \"removing candidate number \"\n            output += str(profile.num_cand - len(committee)) + \": \"\n            output += profile.names[rem_cand] + \"\\n\"\n            output += \" score decreases by \"\n            output += str(score_reduction)\n            output += \" to a total of \"\n            output += str(scores.thiele_score(\n                scorefct_str, profile, committee))\n            if len(cands_to_remove) > 1:\n                output += \" (tie between candidates \"\n                output += str_candset(cands_to_remove) + \")\\n\"\n            print(output + \"\\n\")\n        # end of optional output\n\n    return [sorted(list(committee))]\n\n\ndef compute_revseq_thiele_method(profile, committeesize,\n                                 scorefct_str, algorithm=\"standard\",\n                                 resolute=True, verbose=0):\n    \"\"\"Reverse sequential Thiele methods\"\"\"\n    enough_approved_candidates(profile, committeesize)\n\n    if algorithm != \"standard\":\n        raise NotImplementedError(\n            \"Algorithm \" + str(algorithm)\n            + \" not specified for compute_revseq_thiele_method\")\n\n    # optional output\n    if verbose:\n        print(header(rules[\"revseq\" + scorefct_str].longname))\n        if resolute:\n            print(\"Computing only one winning committee (resolute=True)\\n\")\n    # end of optional output\n\n    if resolute:\n        committees = __revseq_thiele_resolute(\n            profile, committeesize, scorefct_str, verbose=verbose)\n    else:\n        committees = __revseq_thiele_irresolute(\n            profile, committeesize, scorefct_str)\n\n    # optional output\n    if verbose:\n        print(str_committees_header(committees, winning=True))\n        print(str_candsets(committees, names=profile.names))\n    if verbose >= 2:\n        if resolute or len(committees) == 1:\n            print(\"PAV-score of winning committee:\", end=\"\")\n        else:\n            print(\"PAV-score of winning committees:\")\n        for comm in committees:\n            print(\" \" + str(scores.thiele_score(scorefct_str, profile, comm)))\n        print()\n    # end of optional output\n\n    return committees\n\n\ndef __minimaxav_bruteforce(profile, committeesize):\n    \"\"\"Brute-force algorithm for computing Minimax AV (MAV)\"\"\"\n    opt_committees = []\n    opt_mavscore = profile.num_cand + 1\n    for comm in combinations(list(range(profile.num_cand)), committeesize):\n        score = scores.mavscore(profile, comm)\n        if score < opt_mavscore:\n            opt_committees = [comm]\n            opt_mavscore = score\n        elif scores.mavscore(profile, comm) == opt_mavscore:\n            opt_committees.append(comm)\n\n    committees = sort_committees(opt_committees)\n\n    return committees\n\n\n# Minimax Approval Voting\ndef compute_mav(profile, committeesize, algorithm=\"brute-force\",\n                resolute=False, verbose=0):\n    \"\"\"Minimax AV (MAV)\"\"\"\n    enough_approved_candidates(profile, committeesize)\n\n    # optional output\n    if verbose:\n        print(header(rules[\"mav\"].longname))\n        if resolute:\n            print(\"Computing only one winning committee (resolute=True)\\n\")\n    if verbose >= 3:\n        if algorithm == \"gurobi\":\n            print(\"Using the Gurobi ILP solver\\n\")\n        if algorithm == \"brute-force\":\n            print(\"Using a brute-force algorithm\\n\")\n    # end of optional output\n\n    if algorithm == \"gurobi\":\n        committees = abcrules_gurobi.__gurobi_minimaxav(\n            profile, committeesize, resolute)\n        committees = sort_committees(committees)\n    elif algorithm == \"brute-force\":\n        committees = __minimaxav_bruteforce(profile, committeesize)\n        if resolute:\n            committees = [committees[0]]\n    else:\n        raise NotImplementedError(\"Algorithm \" + str(algorithm)\n                                  + \" not specified for compute_mav\")\n\n    opt_mavscore = scores.mavscore(profile, committees[0])\n\n    # optional output\n    if verbose:\n        print(\"Minimum maximal distance: \" + str(opt_mavscore))\n\n        print(str_committees_header(committees, winning=True))\n        print(str_candsets(committees, names=profile.names))\n\n        print(\"Corresponding distances to voters:\")\n        for comm in committees:\n            print([hamming(pref, comm) for pref in profile])\n        print()\n    # end of optional output\n\n    return committees\n\n\n# Lexicographic Minimax Approval Voting\ndef compute_lexmav(profile, committeesize, algorithm=\"brute-force\",\n                   resolute=False, verbose=0):\n    \"\"\"Lexicographic Minimax AV\"\"\"\n    enough_approved_candidates(profile, committeesize)\n\n    if not profile.has_unit_weights():\n        raise ValueError(rules[\"lexmav\"].shortname +\n                         \" is only defined for unit weights (weight=1)\")\n\n    if algorithm != \"brute-force\":\n        raise NotImplementedError(\n            \"Algorithm \" + str(algorithm)\n            + \" not specified for compute_lexmav\")\n\n    opt_committees = []\n    opt_distances = [profile.num_cand + 1] * len(profile)\n    for comm in combinations(list(range(profile.num_cand)), committeesize):\n        distances = sorted([hamming(pref, comm)\n                            for pref in profile],\n                           reverse=True)\n        for i in range(len(distances)):\n            if opt_distances[i] < distances[i]:\n                break\n            if opt_distances[i] > distances[i]:\n                opt_distances = distances\n                opt_committees = [comm]\n                break\n        else:\n            opt_committees.append(comm)\n\n    committees = sort_committees(opt_committees)\n    if resolute:\n        committees = [committees[0]]\n\n    # optional output\n    if verbose:\n        print(header(rules[\"lexmav\"].longname))\n        if resolute:\n            print(\"Computing only one winning committee (resolute=True)\\n\")\n\n        print(\"Minimum maximal distance: \" + str(max(opt_distances)))\n\n        print(str_committees_header(committees, winning=True))\n        print(str_candsets(committees, names=profile.names))\n\n        print(\"Corresponding distances to voters:\")\n        for comm in committees:\n            print([hamming(pref, comm) for pref in profile])\n        print()\n    # end of optional output\n\n    return committees\n\n\n# Proportional Approval Voting\ndef compute_pav(profile, committeesize, algorithm=\"branch-and-bound\",\n                resolute=False, verbose=0):\n    \"\"\"Proportional Approval Voting (PAV)\"\"\"\n    return compute_thiele_method(\n        'pav', profile, committeesize, algorithm=algorithm,\n        resolute=resolute, verbose=verbose)\n\n\n# Sainte-Lague Approval Voting\ndef compute_slav(profile, committeesize, algorithm=\"branch-and-bound\",\n                 resolute=False, verbose=0):\n    \"\"\"Sainte-Lague Approval Voting (SLAV)\"\"\"\n    return compute_thiele_method(\n        'slav', profile, committeesize, algorithm=algorithm,\n        resolute=resolute, verbose=verbose)\n\n\n# Chamberlin-Courant\ndef compute_cc(profile, committeesize, algorithm=\"branch-and-bound\",\n               resolute=False, verbose=0):\n    \"\"\"Approval Chamberlin-Courant (CC)\"\"\"\n    return compute_thiele_method(\n        'cc', profile, committeesize, algorithm=algorithm,\n        resolute=resolute, verbose=verbose)\n\n\ndef compute_monroe(profile, committeesize, algorithm=\"brute-force\",\n                   resolute=False, verbose=0):\n    \"\"\"Monroe's rule\"\"\"\n    enough_approved_candidates(profile, committeesize)\n\n    # optional output\n    if verbose:\n        print(header(rules[\"monroe\"].longname))\n        if resolute:\n            print(\"Computing only one winning committee (resolute=True)\\n\")\n    if verbose >= 3:\n        if algorithm == \"gurobi\":\n            print(\"Using the Gurobi ILP solver\\n\")\n        if algorithm == \"brute-force\":\n            print(\"Using a brute-force algorithm\\n\")\n    # end of optional output\n\n    if not profile.has_unit_weights():\n        raise ValueError(rules[\"monroe\"].shortname +\n                         \" is only defined for unit weights (weight=1)\")\n\n    if algorithm == \"gurobi\":\n        committees = abcrules_gurobi.__gurobi_monroe(\n            profile, committeesize, resolute)\n        committees = sort_committees(committees)\n    elif algorithm == \"brute-force\":\n        committees = __monroe_bruteforce(\n            profile, committeesize, resolute)\n    else:\n        raise NotImplementedError(\n            \"Algorithm \" + str(algorithm)\n            + \" not specified for compute_monroe\")\n\n    # optional output\n    if verbose:\n        print(\"Optimal Monroe score: \"\n              + str(scores.monroescore(profile, committees[0])) + \"\\n\")\n\n        print(str_committees_header(committees, winning=True))\n        print(str_candsets(committees, names=profile.names))\n    # end of optional output\n\n    return committees\n\n\n# Monroe's rule, computed via (brute-force) matching\ndef __monroe_bruteforce(profile, committeesize, resolute):\n    \"\"\"Brute-force computation of Monroe's rule\"\"\"\n    opt_committees = []\n    opt_monroescore = -1\n    for comm in combinations(list(range(profile.num_cand)), committeesize):\n        score = scores.monroescore(profile, comm)\n        if score > opt_monroescore:\n            opt_committees = [comm]\n            opt_monroescore = score\n        elif scores.monroescore(profile, comm) == opt_monroescore:\n            opt_committees.append(comm)\n\n    committees = sort_committees(opt_committees)\n    if resolute:\n        committees = [committees[0]]\n\n    return committees\n\n\ndef compute_greedy_monroe(profile, committeesize,\n                          algorithm=\"standard\", resolute=True, verbose=0):\n    \"\"\"\"Greedy Monroe\"\"\"\n    enough_approved_candidates(profile, committeesize)\n    if not profile.has_unit_weights():\n        raise ValueError(rules[\"greedy-monroe\"].shortname +\n                         \" is only defined for unit weights (weight=1)\")\n\n    if not resolute:\n        raise NotImplementedError(\n            \"compute_greedy_monroe does not support resolute=False.\")\n\n    if algorithm != \"standard\":\n        raise NotImplementedError(\n            \"Algorithm \" + str(algorithm)\n            + \" not specified for compute_greedy_monroe\")\n\n    num_voters = len(profile)\n    committee = []\n\n    # remaining voters\n    remaining_voters = list(range(num_voters))\n    remaining_cands = set(range(profile.num_cand))\n\n    assignment = []\n    for t in range(committeesize):\n        maxapprovals = -1\n        selected = None\n        for c in remaining_cands:\n            approvals = len([i for i in remaining_voters\n                             if c in profile[i]])\n            if approvals > maxapprovals:\n                maxapprovals = approvals\n                selected = c\n\n        # determine how many voters are removed (at most)\n        if t < num_voters - committeesize * (num_voters \/\/ committeesize):\n            num_remove = num_voters \/\/ committeesize + 1\n        else:\n            num_remove = num_voters \/\/ committeesize\n\n        # only voters that approve the chosen candidate\n        # are removed\n        to_remove = [i for i in remaining_voters\n                     if selected in profile[i]]\n        if len(to_remove) > num_remove:\n            to_remove = to_remove[:num_remove]\n        assignment.append((selected, to_remove))\n        remaining_voters = [i for i in remaining_voters\n                            if i not in to_remove]\n        committee.append(selected)\n        remaining_cands.remove(selected)\n\n    committees = sort_committees([committee])\n\n    # optional output\n    if verbose:\n        print(header(rules[\"greedy-monroe\"].longname))\n\n    if verbose >= 2:\n        score1 = scores.monroescore(profile, committees[0])\n\n        score2 = len(profile) - len(remaining_voters)\n        print(\"The Monroe assignment computed by \")\n        print(\"has a Monroe score of \" + str(score2) + \".\")\n\n        if score1 > score2:\n            print(\"Monroe assignment found by Greedy Monroe is not \"\n                  + \"optimal for the winning committee,\")\n            print(\"i.e., by redistributing voters to candidates a higher \"\n                  + \"satisfaction is possible \"\n                  + \"(without changing the committee).\")\n            print(\"Optimal Monroe score of the winning committee is \"\n                  + str(score1) + \".\")\n\n        # build actual Monroe assignment for winning committee\n        for t, district in enumerate(assignment):\n            cand, voters = district\n            if t < num_voters - committeesize * (num_voters \/\/ committeesize):\n                missing = num_voters \/\/ committeesize + 1 - len(voters)\n            else:\n                missing = num_voters \/\/ committeesize - len(voters)\n            for _ in range(missing):\n                v = remaining_voters.pop()\n                voters.append(v)\n\n        print(\"Assignment (unsatisfatied voters marked with *):\\n\")\n        for cand, voters in assignment:\n            print(\" candidate \" + profile.names[cand]\n                  + \" assigned to: \", end=\"\")\n            output = \"\"\n            for v in sorted(voters):\n                output += str(v)\n                if cand not in profile[v].approved:\n                    output += \"*\"\n                output += \", \"\n            print(output[:-2])\n        print()\n\n    if verbose:\n        print(str_committees_header(committees, winning=True))\n        print(str_candsets(committees, names=profile.names))\n    # end of optional output\n\n    return committees\n\n\ndef __seqphragmen_resolute(profile, committeesize, verbose,\n                           start_load=None, partial_committee=None):\n    \"\"\"Algorithm for computing resolute seq-Phragmen  (1 winning committee)\"\"\"\n    approvers_weight = {}\n    for c in range(profile.num_cand):\n        approvers_weight[c] = sum(pref.weight for pref in profile if c in pref)\n\n    load = start_load\n    if load is None:\n        load = {v: 0 for v, _ in enumerate(profile)}\n\n    committee = partial_committee\n    if partial_committee is None:\n        committee = []  # build committees starting with the empty set\n\n    for _ in range(len(committee), committeesize):\n        approvers_load = {}\n        for c in range(profile.num_cand):\n            approvers_load[c] = sum(pref.weight * load[v]\n                                    for v, pref in enumerate(profile)\n                                    if c in pref)\n        new_maxload = [Fraction(approvers_load[c] + 1, approvers_weight[c])\n                       if approvers_weight[c] > 0 else committeesize + 1\n                       for c in range(profile.num_cand)]\n        # exclude committees already in the committee\n        large = max(new_maxload) + 1\n        for c in range(profile.num_cand):\n            if c in committee:\n                new_maxload[c] = large\n        # find smallest maxload\n        opt = min(new_maxload)\n        next_cand = new_maxload.index(opt)\n        # compute new loads and add new candidate\n        for v, pref in enumerate(profile):\n            if next_cand in pref:\n                load[v] = new_maxload[next_cand]\n            else:\n                load[v] = load[v]\n        committee = sorted(committee + [next_cand])\n\n        # optional output\n        if verbose >= 2:\n            output = \"adding candidate number \"\n            output += str(len(committee)) + \": \"\n            output += profile.names[next_cand] + \"\\n\"\n            output += \" maximum load increased to \"\n            output += str(opt)\n            print(output)\n            print(\" load distribution:\")\n            output = \"  (\"\n            for v, _ in enumerate(profile):\n                output += str(load[v]) + \", \"\n            print(output[:-2] + \")\")\n            tied_cands = [c for c in range(profile.num_cand)\n                          if (c > next_cand and\n                              (new_maxload[c] == new_maxload))]\n            if len(tied_cands) > 0:\n                output = \" tie broken in favor of \" + profile.names[next_cand]\n                output += \",\\n candidates \" + str_candset(tied_cands)\n                output += \" would increase the load to the same amount (\"\n                output += str(new_maxload) + \")\"\n                print(output)\n            print()\n        # end of optional output\n\n    comm_loads = {tuple(committee): load}\n    return [committee], comm_loads\n\n\ndef __seqphragmen_irresolute(profile, committeesize,\n                             start_load=None, partial_committee=None):\n    \"\"\"Algorithm for computing irresolute seq-Phragmen (>=1 winning committees)\n    \"\"\"\n    approvers_weight = {}\n    for c in range(profile.num_cand):\n        approvers_weight[c] = sum(pref.weight for pref in profile if c in pref)\n\n    load = start_load\n    if load is None:\n        load = {v: 0 for v, _ in enumerate(profile)}\n\n    if partial_committee is None:\n        partial_committee = []  # build committees starting with the empty set\n    comm_loads = {tuple(partial_committee): load}\n\n    for _ in range(len(partial_committee), committeesize):\n        comm_loads_next = {}\n        for committee, load in comm_loads.items():\n            approvers_load = {}\n            for c in range(profile.num_cand):\n                approvers_load[c] = sum(pref.weight * load[v]\n                                        for v, pref in enumerate(profile)\n                                        if c in pref)\n            new_maxload = [\n                Fraction(approvers_load[c] + 1, approvers_weight[c])\n                if approvers_weight[c] > 0 else committeesize + 1\n                for c in range(profile.num_cand)]\n            # exclude committees already in the committee\n            for c in range(profile.num_cand):\n                if c in committee:\n                    new_maxload[c] = sys.maxsize\n            # compute new loads\n            # and add new committees\n            for c in range(profile.num_cand):\n                if new_maxload[c] <= min(new_maxload):\n                    new_load = {}\n                    for v, pref in enumerate(profile):\n                        if c in pref:\n                            new_load[v] = new_maxload[c]\n                        else:\n                            new_load[v] = load[v]\n                    new_comm = tuple(sorted(committee + (c,)))\n                    comm_loads_next[new_comm] = new_load\n        comm_loads = comm_loads_next\n\n    committees = sort_committees(list(comm_loads.keys()))\n    return committees, comm_loads\n\n\ndef compute_seqphragmen(profile, committeesize, algorithm=\"standard\",\n                        resolute=True, verbose=False):\n    \"\"\"Phragmen's sequential rule (seq-Phragmen)\"\"\"\n    enough_approved_candidates(profile, committeesize)\n\n    if algorithm != \"standard\":\n        raise NotImplementedError(\n            \"Algorithm \" + str(algorithm)\n            + \" not specified for compute_seqphragmen\")\n\n    # optional output\n    if verbose:\n        print(header(rules[\"seqphrag\"].longname))\n        if resolute:\n            print(\"Computing only one winning committee (resolute=True)\\n\")\n    # end of optional output\n\n    if resolute:\n        committees, comm_loads = __seqphragmen_resolute(\n            profile, committeesize, verbose)\n    else:\n        committees, comm_loads = __seqphragmen_irresolute(\n            profile, committeesize)\n\n    # optional output\n    if verbose:\n        print(str_committees_header(committees, winning=True))\n        print(str_candsets(committees, names=profile.names))\n    if verbose >= 2:\n        if resolute or len(committees) == 1:\n            print(\"corresponding load distribution:\")\n        else:\n            print(\"corresponding load distributions:\")\n        for comm in committees:\n            output = \"(\"\n            for v, _ in enumerate(profile):\n                output += str(comm_loads[tuple(comm)][v]) + \", \"\n            print(output[:-2] + \")\")\n    # end of optional output\n\n    return committees\n\n\ndef __rule_x_get_min_q(profile, budget, cand):\n    rich = set([v for v, pref in enumerate(profile)\n                if cand in pref])\n    poor = set()\n\n    while len(rich) > 0:\n        poor_budget = sum(budget[v] for v in poor)\n        q = Fraction(1 - poor_budget, len(rich))\n        new_poor = set([v for v in rich\n                        if budget[v] < q])\n        if len(new_poor) == 0:\n            return q\n        rich -= new_poor\n        poor.update(new_poor)\n\n    return None  # not sufficient budget available\n\n\ndef compute_rule_x(profile, committeesize, algorithm=\"standard\",\n                   resolute=True, verbose=0):\n    \"\"\"Rule X\n\n    See https:\/\/arxiv.org\/pdf\/1911.11747.pdf, page 7\n    \"\"\"\n    enough_approved_candidates(profile, committeesize)\n    if not profile.has_unit_weights():\n        raise ValueError(rules[\"rule-x\"].shortname +\n                         \" is only defined for unit weights (weight=1)\")\n\n    if algorithm != \"standard\":\n        raise NotImplementedError(\n            \"Algorithm \" + str(algorithm)\n            + \" not specified for compute_rule_x\")\n\n    # optional output\n    if verbose:\n        print(header(rules[\"rule-x\"].longname))\n        if resolute:\n            print(\"Computing only one winning committee (resolute=True)\\n\")\n    # end of optional output\n\n    start_budget = {v: Fraction(committeesize, len(profile))\n                    for v, _ in enumerate(profile)}\n    cands = range(profile.num_cand)\n    commbugdets = [(set(), start_budget)]\n    final_committees = set()\n\n    # optional output\n    if resolute and verbose >= 2:\n        print(\"Phase 1:\\n\")\n        print(\"starting budget:\")\n        output = \"  (\"\n        for v, _ in enumerate(profile):\n            output += str(start_budget[v]) + \", \"\n        print(output[:-2] + \")\\n\")\n    # end of optional output\n\n    for _ in range(committeesize):\n        next_commbudgets = []\n        for committee, budget in commbugdets:\n\n            curr_cands = set(cands) - committee\n            min_q = {}\n            for c in curr_cands:\n                q = __rule_x_get_min_q(profile, budget, c)\n                if q is not None:\n                    min_q[c] = q\n\n            if len(min_q) > 0:  # one or more candidates are affordable\n                next_cands = [c for c in min_q.keys()\n                              if min_q[c] == min(min_q.values())]\n                for next_cand in next_cands:\n                    new_budget = dict(budget)\n                    for v, pref in enumerate(profile):\n                        if next_cand in pref:\n                            new_budget[v] -= min(budget[v], min_q[next_cand])\n                    new_comm = set(committee)\n                    new_comm.add(next_cand)\n                    next_commbudgets.append((new_comm, new_budget))\n\n                    # optional output\n                    if resolute and verbose >= 2:\n                        output = \"adding candidate number \"\n                        output += str(len(committee)) + \": \"\n                        output += profile.names[next_cand] + \"\\n\"\n                        output += \" with maxmimum cost per voter q = \"\n                        output += str(min(min_q.values()))\n                        print(output)\n                        print(\" remaining budget:\")\n                        output = \"  (\"\n                        for v, _ in enumerate(profile):\n                            output += str(new_budget[v]) + \", \"\n                        print(output[:-2] + \")\")\n                        if len(next_cands) > 1:\n                            output = \" tie broken in favor of \"\n                            output += profile.names[next_cand] + \",\"\n                            output += \"\\n candidates \"\n                            output += str_candset(next_cands[1:])\n                            output += \" are tied\"\n                            print(output)\n                        print()\n                    # end of optional output\n\n                    if resolute:\n                        break\n\n            else:  # no affordable candidates remain\n                # fill committee via seq-Phragmen\n\n                # optional output\n                if resolute and verbose >= 2:\n                    print(\"Phase 2 (seq-Phragmén):\\n\")\n                # end of optional output\n\n                start_load = {}\n                # translate budget to loads\n                for v in range(len(profile)):\n                    start_load[v] = (Fraction(committeesize, len(profile))\n                                     - budget[v])\n\n                # optional output\n                if resolute and verbose >= 2:\n                    print(\"starting loads (= budget spent):\")\n                    output = \"  (\"\n                    for v, _ in enumerate(profile):\n                        output += str(start_load[v]) + \", \"\n                    print(output[:-2] + \")\\n\")\n                # end of optional output\n\n                if resolute:\n                    committees, _ = __seqphragmen_resolute(\n                        profile, committeesize, verbose=verbose,\n                        partial_committee=list(committee),\n                        start_load=start_load)\n                else:\n                    committees, _ = __seqphragmen_irresolute(\n                        profile, committeesize,\n                        partial_committee=list(committee),\n                        start_load=start_load)\n                final_committees.update([tuple(comm) for comm in committees])\n                # after filling the remaining spots these committees\n                # have size committeesize\n\n            commbugdets = next_commbudgets\n\n    final_committees.update([tuple(comm) for comm, _ in commbugdets])\n\n    committees = sort_committees(final_committees)\n    if resolute:\n        committees = committees[:1]\n\n    # optional output\n    if verbose:\n        print(str_committees_header(committees, winning=True))\n        print(str_candsets(committees, names=profile.names))\n    # end of optional output\n\n    return committees\n\n\ndef compute_optphragmen(profile, committeesize,\n                        algorithm=\"gurobi\", resolute=False, verbose=0):\n    enough_approved_candidates(profile, committeesize)\n\n    # optional output\n    if verbose:\n        print(header(rules[\"optphrag\"].longname))\n        if resolute:\n            print(\"Computing only one winning committee (resolute=True)\\n\")\n    if verbose >= 3:\n        if algorithm == \"gurobi\":\n            print(\"Using the Gurobi ILP solver\")\n    # end of optional output\n\n    if algorithm != \"gurobi\":\n        raise NotImplementedError(\"Algorithm \" + str(algorithm)\n                                  + \" not specified for compute_optphragmen\")\n\n    committees = abcrules_gurobi.__gurobi_optphragmen(\n        profile, committeesize, resolute=resolute, verbose=verbose)\n    committees = sort_committees(committees)\n\n    # optional output\n    if verbose:\n        print(str_committees_header(committees, winning=True))\n        print(str_candsets(committees, names=profile.names))\n    # end of optional output\n\n    return committees\n\n\ndef compute_phragmen_enestroem(profile, committeesize, algorithm=\"standard\",\n                               resolute=True, verbose=0):\n    \"\"\"\"Phragmen-Enestroem (aka Phragmen's first method, Enestroem's method)\n\n    In every round the candidate with the highest combined budget of\n    their supporters is put in the committee.\n    Method described in:\n    https:\/\/arxiv.org\/pdf\/1611.08826.pdf (Section 18.5, Page 59)\n    \"\"\"\n    enough_approved_candidates(profile, committeesize)\n    if not profile.has_unit_weights():\n        raise ValueError(rules[\"phrag-enestr\"].shortname +\n                         \" is only defined for unit weights (weight=1)\")\n\n    if algorithm != \"standard\":\n        raise NotImplementedError(\n            \"Algorithm \" + str(algorithm)\n            + \" not specified for compute_phragmen_enestroem\")\n\n    num_voters = len(profile)\n\n    start_budget = {i: Fraction(profile[i].weight)\n                    for i in range(num_voters)}\n    price = Fraction(sum(start_budget.values()), committeesize)\n\n    cands = range(profile.num_cand)\n\n    committees = [(start_budget, set())]\n    for _ in range(committeesize):\n        # here the committees with i+1 candidates are\n        # stored (together with budget)\n        next_committees = []\n        # loop in case multiple possible committees\n        # with i filled candidates\n        for committee in committees:\n            budget, comm = committee\n            curr_cands = set(cands) - comm\n            support = {c: 0 for c in curr_cands}\n            for nr, pref in enumerate(profile):\n                voting_power = budget[nr]\n                if voting_power <= 0:\n                    continue\n                for cand in pref:\n                    if cand in curr_cands:\n                        support[cand] += voting_power\n            max_support = max(support.values())\n            winners = [c for c, s in support.items()\n                       if s == max_support]\n            for cand in winners:\n                b = dict(budget)  # copy of budget\n                if max_support > price:  # supporters can afford it\n                    # (voting_power - price) \/ voting_power\n                    multiplier = Fraction(max_support - price,\n                                          max_support)\n                else:  # set supporters to 0\n                    multiplier = 0\n                for nr, pref in enumerate(profile):\n                    if cand in pref:\n                        b[nr] *= multiplier\n                c = comm.union([cand])  # new committee with candidate\n                next_committees.append((b, c))\n\n        if resolute:\n            committees = [next_committees[0]]\n        else:\n            committees = next_committees\n    committees = [comm for b, comm in committees]\n    committees = sort_committees(committees)\n    if resolute:\n        committees = [committees[0]]\n\n    # optional output\n    if verbose:\n        print(header(rules[\"phrag-enestr\"].longname))\n\n        print(str_committees_header(committees, winning=True))\n        print(str_candsets(committees, names=profile.names))\n    # end of optional output\n\n    return committees\n\n\n__RULESINFO = [\n    (\"av\", \"AV\", \"Approval Voting (AV)\", compute_av,\n     [\"standard\"], [True, False]),\n    (\"sav\", \"SAV\", \"Satisfaction Approval Voting (SAV)\", compute_sav,\n     [\"standard\"], [True, False]),\n    (\"pav\", \"PAV\", \"Proportional Approval Voting (PAV)\", compute_pav,\n     [\"gurobi\", \"branch-and-bound\"], [True, False]),\n    (\"slav\", \"SLAV\", \"Sainte-Laguë Approval Voting (SLAV)\", compute_slav,\n     [\"gurobi\", \"branch-and-bound\"], [True, False]),\n    (\"cc\", \"CC\", \"Approval Chamberlin-Courant (CC)\", compute_cc,\n     [\"gurobi\", \"branch-and-bound\"], [True, False]),\n    (\"geom2\", \"2-Geometric\", \"2-Geometric Rule\",\n     functools.partial(compute_thiele_method, \"geom2\"),\n     [\"gurobi\", \"branch-and-bound\"], [True, False]),\n    (\"seqpav\", \"seq-PAV\", \"Sequential Proportional Approval Voting (seq-PAV)\",\n     compute_seqpav, [\"standard\"], [True, False]),\n    (\"revseqpav\", \"revseq-PAV\",\n     \"Reverse Sequential Proportional Approval Voting (revseq-PAV)\",\n     compute_revseqpav, [\"standard\"], [True, False]),\n    (\"seqslav\", \"seq-SLAV\",\n     \"Sequential Sainte-Laguë Approval Voting (seq-SLAV)\",\n     compute_seqslav, [\"standard\"], [True, False]),\n    (\"seqcc\", \"seq-CC\", \"Sequential Approval Chamberlin-Courant (seq-CC)\",\n     compute_seqcc, [\"standard\"], [True, False]),\n    (\"seqphrag\", \"seq-Phragmén\", \"Phragmén's Sequential Rule (seq-Phragmén)\",\n     compute_seqphragmen, [\"standard\"], [True, False]),\n    (\"optphrag\", \"opt-Phragmén\", \"Phragmén's Optimization Rule (opt-Phragmén)\",\n     compute_optphragmen, [\"gurobi\"], [True, False]),\n    (\"monroe\", \"Monroe\", \"Monroe's Approval Rule (Monroe)\",\n     compute_monroe, [\"gurobi\", \"brute-force\"], [True, False]),\n    (\"greedy-monroe\", \"Greedy Monroe\", \"Greedy Monroe\",\n     compute_greedy_monroe, [\"standard\"], [True]),\n    (\"mav\", \"MAV\", \"Minimax Approval Voting (MAV)\",\n     compute_mav, [\"gurobi\", \"brute-force\"], [True, False]),\n    (\"lexmav\", \"lex-MAV\", \"Lexicographic Minimax Approval Voting (lex-MAV)\",\n     compute_lexmav, [\"brute-force\"], [True, False]),\n    (\"rule-x\", \"Rule X\", \"Rule X\",\n     compute_rule_x, [\"standard\"], [True, False]),\n    (\"phrag-enestr\", \"Phragmén-Eneström\", \"Method of Phragmén-Eneström\",\n     compute_phragmen_enestroem, [\"standard\"], [True, False])]\nrules = {}\nfor ruleinfo in __RULESINFO:\n    rules[ruleinfo[0]] = ABCRule(*ruleinfo)\n# TODO: add other thiele methods\n"}
+{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_286","text":"drtaiga\/Thermistor_Notebook\n#Import the schemdraw modules for drawing circuit schematics\nimport schemdraw.elements as elm\nimport schemdraw\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom ipywidgets import interact, interactive, fixed, interact_manual\nimport ipywidgets as widgets\n\n#import scipy.optimize as fsolve\nfrom scipy.optimize import fsolve \nimport scipy.optimize as opt\n\n#---------------------------------\n#-- Voltage divider schematics\ndef draw_dividers():\n\n    v_divider = schemdraw.Drawing(inches_per_unit=.5, unit=2)   \n    v_divider.add(elm.DOT,open='true',label='$V_o$')\n    vdr1 = v_divider.add(elm.RES, d='down', label='$R_0$')\n    v_divider.add(elm.DOT)\n    vdr2 = v_divider.add(elm.RES, d='down', label='$R_1$')\n    v_divider.add(elm.GND, botlabel='Fig. 1A',lblofst=1)\n    v_divider.add(elm.LINE, d='right', xy=vdr1.end, l=v_divider.unit)\n    v_divider.add(elm.DOT, open='true', label='$V^{(A)}$')\n    v_divider.here = [v_divider.here[0]+2*v_divider.unit, v_divider.here[1]]\n    vdr3=v_divider.add(elm.DOT)\n    v_divider.add(elm.RES, d='up', label='$R_1$')\n    v_divider.add(elm.DOT, open='true', label='$V_o$')\n    v_divider.add(elm.RES,d='down',xy=vdr3.start, label='$R_0$')\n    v_divider.add(elm.GND, botlabel='Fig. 1B',lblofst=1)\n    v_divider.add(elm.LINE, d='right', xy=vdr3.end, l=v_divider.unit)\n    v_divider.add(elm.DOT, open='true', label='$V^{(B)}$')\n    return v_divider.draw()\n\ndef draw_dividers2():\n    \n    #SchemDraw schematic of a generic voltage divider circuit\n    v2_divider = schemdraw.Drawing(inches_per_unit=.5, unit=2)\n    v2_divider.add(elm.DOT,open='true',label='$V_o$')\n    v2dr1 = v2_divider.add(elm.RES, d='down', label='$R_0$')\n    v2_divider.add(elm.DOT)\n    v2dr2 = v2_divider.add(elm.RES_VAR, reverse='true',d='down', label='$R(T)$')\n    v2_divider.add(elm.GND, botlabel='Fig. 2A',lblofst=1)\n    v2_divider.add(elm.LINE, d='right', xy=v2dr1.end, l=v2_divider.unit)\n    v2_divider.add(elm.DOT, open='true', label='$V^{(2A)}$')\n    v2_divider.here = [v2_divider.here[0]+2*v2_divider.unit, v2_divider.here[1]]\n    v2dr3=v2_divider.add(elm.DOT)\n    v2_divider.add(elm.RES_VAR, d='up', flip='true',label='$R(T)$')\n    v2_divider.add(elm.DOT, open='true', label='$V_o$')\n    v2_divider.add(elm.RES,d='down',xy=v2dr3.start, label='$R_0$')\n    v2_divider.add(elm.GND, botlabel='Fig. 2B',lblofst=1)\n    v2_divider.add(elm.LINE, d='right', xy=v2dr3.end, l=v2_divider.unit)\n    v2_divider.add(elm.DOT, open='true', label='$V^{(2B)}$')\n    return v2_divider.draw()\n\ndef draw_divamp():\n    \n    div_amp = schemdraw.Drawing(inches_per_unit=.5, unit=2)\n    \n    op = div_amp.add(elm.Opamp,flip='true')\n    div_amp.add(elm.LINE, d='left', xy=op.in2, l=div_amp.unit*.75)\n    p1=div_amp.add(elm.DOT)\n    div_amp.add(elm.LINE,l=1.0, d='left')\n    div_amp.add(elm.RES_VAR,d='left',label=\"$R(T)$\",reverse='true',flip='true')\n    div_amp.add(elm.LINE,d='left',l=0.5)\n    div_amp.add(elm.DOT,open='true',label='$V_o$')\n    div_amp.add(elm.LINE,d='down', l=div_amp.unit*1, xy=p1.start)\n    div_amp.add(elm.GND)   \n    p3=div_amp.add(elm.LINE,d='left', xy=op.in1, l=div_amp.unit\/4)\n    div_amp.add(elm.LINE,d='right', xy=op.out,l=1)\n    div_amp.add(elm.DOT,open='true',label='$V$')\n    div_amp.add(elm.LINE,xy=p3.end,d='down',l=.75)\n    div_amp.add(elm.GND)\n    div_amp.add(elm.LINE,d='down', xy=op.vd, l=.5)\n    div_amp.add(elm.GND)\n    div_amp.add(elm.LINE,d='up', xy=op.vs, l=.5)\n    div_amp.add(elm.VDD,label='$V_0$')\n    div_amp.add(elm.LINE,d='down', xy=op.n2, l=.5)\n    div_amp.add(elm.LINE,d='right',l=.25)\n    div_amp.add(elm.DOT,rgtlabel='$V_{ref}$')\n    return div_amp.draw()    \n#--- Voltage divider schematics    \n#---------------------------------\n\n\n#---------------------------------\n#--- Wheatstone bridge schematics\ndef draw_bridge():\n    \n    wbridge = schemdraw.Drawing(inches_per_unit=.5, unit=3)\n    br1 = wbridge.add(elm.RES,theta=45, toplabel='$R_1$')\n    br_top=wbridge.add(elm.DOT)\n    br2 = wbridge.add(elm.RES,theta=-45, toplabel='$R_3$')\n    br_right=wbridge.add(elm.DOT)\n    br3 = wbridge.add(elm.RES_VAR, theta=-135, botlabel='$R(T)$', flip='true',reverse='true')\n    br_bot=wbridge.add(elm.DOT,botlabel='Fig. 3A',lblofst=1)\n    br4 = wbridge.add(elm.RES,theta=135, botlabel='$R_2$')\n    br_left=wbridge.add(elm.DOT)\n    wbridge.add(elm.LINE,d='right',xy=br_top.start,l=wbridge.unit*1.25)\n    wbridge.add(elm.DOT, open=True, label='$V_T^{(3A)}$')\n    wbridge.add(elm.LINE,d='right',xy=br_bot.start,l=wbridge.unit*1.25)\n    wbridge.add(elm.DOT,open=True, label='$V_B^{(3A)}$')\n    wbridge.add(elm.LINE,d='left',xy=br_left.start,l=wbridge.unit\/4)\n    wbridge.add(elm.VDD,label='$V_0$')\n    wbridge.add(elm.LINE,d='right',xy=br_right.start,l=wbridge.unit\/4)\n    wbridge.add(elm.GND)\n\n    wbridge.here = [wbridge.here[0]+1.5*wbridge.unit, wbridge.here[1]]\n    br2_left=wbridge.add(elm.DOT)\n    br5=wbridge.add(elm.RES,theta=45, toplabel='$R_1$')\n    br2_top=wbridge.add(elm.DOT)\n    br6 = wbridge.add(elm.Resistor(theta=-45, toplabel='$R_3$'))\n    br2_right=wbridge.add(elm.Dot())\n    br7 = wbridge.add(elm.RES, theta=-135, botlabel='$R_2$', flip='true',reverse='true')\n    br2_bot=wbridge.add(elm.DOT,botlabel='Fig. 3B',lblofst=1)\n    br8 = wbridge.add(elm.RES_VAR,theta=135, flip='true',botlabel='$R(T)$')\n\n    wbridge.add(elm.LINE,d='left',xy=br2_left.start,l=wbridge.unit\/4)\n    wbridge.add(elm.VDD,label='$V_0$')\n    wbridge.add(elm.LINE,d='right',xy=br2_right.start,l=wbridge.unit\/4)\n    wbridge.add(elm.GND)\n    wbridge.add(elm.LINE,d='right',xy=br2_top.start,l=wbridge.unit*1.25)\n    wbridge.add(elm.DOT, open=True, label='$V_T^{(3B)}$')\n    wbridge.add(elm.LINE,d='right',xy=br2_bot.start,l=wbridge.unit*1.25)\n    wbridge.add(elm.DOT, open=True, label='$V_B^{(3B)}$')\n\n    return wbridge.draw()\n\n\ndef draw_bridgeamp():\n    \n    wwbridge = schemdraw.Drawing(inches_per_unit=.5, unit=3)\n    wbr1 = wwbridge.add(elm.RES,theta=45, toplabel='$R_1$')\n    wbr_top=wwbridge.add(elm.DOT)\n    #wbridge.add(elm.Vdd(label='$V_0$'))\n    wbr2 = wwbridge.add(elm.RES,theta=-45, toplabel='$R_3$')\n    wbr_right=wwbridge.add(elm.DOT)\n    wbr3 = wwbridge.add(elm.RES_VAR,theta=-135,flip='true', botlabel='$R(T)$')\n    wbr_bot=wwbridge.add(elm.DOT)\n    #wbridge.add(elm.Ground())\n    wbr4 = wwbridge.add(elm.RES,theta=135, botlabel='$R_2$')\n    wbr_left=wwbridge.add(elm.DOT)\n    wwbridge.add(elm.LINE,d='right',xy=wbr_top.start,l=wwbridge.unit*1.25)\n    rn1=wwbridge.add(elm.DOT,open=True, label='$V_T$')\n    wwbridge.add(elm.LINE,d='right',xy=wbr_bot.start,l=wwbridge.unit*1.25)\n    rn2=wwbridge.add(elm.DOT, open=True, botlabel='$V_B$')\n    wwbridge.add(elm.LINE,d='left',xy=wbr_left.start,l=wwbridge.unit\/4)\n    wwbridge.add(elm.VDD,label='$V_0$')\n    wwbridge.add(elm.LINE,d='right',xy=wbr_right.start,l=wwbridge.unit\/4)\n    wwbridge.add(elm.GND)\n    wwbridge.add(elm.LINE,d='down',xy=rn1.start,l=wwbridge.unit*0.5)\n    wwbridge.add(elm.LINE,d='right',l=wwbridge.unit*0.5)\n    O1=wwbridge.add(elm.OPAMP,anchor='in2',flip='true')\n    wwbridge.add(elm.LINE,d='up',xy=rn2.start,l=wwbridge.unit*0.5)\n    wwbridge.add(elm.LINE,d='left', l=wwbridge.unit*0.5, xy=O1.in1)\n    wwbridge.add(elm.LINE,d='up', xy=O1.vs, l=1\/2)\n    wwbridge.add(elm.VDD,label='$V_0$')\n    wwbridge.add(elm.LINE,d='down', xy=O1.vd, l=1\/2)\n    wwbridge.add(elm.GND)\n    wwbridge.add(elm.LINE,d='right', xy=O1.out,l=1)\n    wwbridge.add(elm.DOT,open='true',label='$V$')\n    wwbridge.add(elm.LINE,d='down', xy=O1.n2, l=.5)\n    wwbridge.add(elm.LINE,d='right',l=.25)\n    wwbridge.add(elm.DOT,rgtlabel='$V_{ref}$')\n    \n    return wwbridge.draw()    \n    \n#--- Wheatstone bridge schematics\n#---------------------------------\n\n\n#---------------------------------\n# Voltage divider: thermistor-to-ground configuration (Configuration 'A')\ndef div_tg(T, Vin, res0, B, R25, T25):\n    #R25 = 1.0e4\n    #B = 3977.0\n    res = R25 * np.exp(B*(1.0\/T-1.0\/T25))\n    f=res\/(res0+res) * Vin\n    return f\n\ndef ddt_div_tp(T, Vin, res0, B, R25, T25):\n    \n    res = R25 * np.exp(B*(1.0\/T-1.0\/T25))\n    f=Vin*B*res0\/T**2.0\/(res0+res)**2.0 * res\n\n    return f\n\ndef ddt_div_tg(T, Vin, res0, B, R25, T25):\n    \n    res = R25 * np.exp(B*(1.0\/T-1.0\/T25))\n    f= Vin*(B*res**2.0 \/(T**2.0 * (res0+res)**2.0) - \\\n        B*res\/(T**2.0 * (res0+res)))\n\n    return f\n\n# Voltage divider: thermistor-to-power configuration (Configuration 'B')\ndef div_tp(T, Vin, res0, B, R25, T25):\n    res = R25 * np.exp(B*(1.0\/T-1.0\/T25))\n    f = res0\/(res0+res) * Vin\n    return f\n\n# Bridge: thermistor-to-ground configuration\ndef b_tg(T, Vin, res0, B, R25, T25, rho):\n    res = R25 * np.exp(B*(1.0\/T-1.0\/T25))\n    f = -(1\/(1.0 + rho)-res\/(res0+res)) * Vin\n    return  f\n\n# Bridge: thermistor-to-power configuration\ndef b_tp(T, Vin, res0, B, R25, T25, rho):\n    res = R25 * np.exp(B*(1.0\/T-1.0\/T25))\n    f = -(rho\/(1.0+rho) - res0\/(res0+res)) * Vin\n    return f\n\ndef div_plot(R0):\n    R25 = 1.0e4\n    B = 3977.0\n    T25 = 273.15 + 25.0\n    V_in=3.3\n    # Input temperatures in Kelvin\n    T25 = 273.15 + 25.0\n    temp_K = np.arange(250.0, 310.0, 0.2)\n    fig, ax = plt.subplots(1, 1, figsize=(6, 4))\n    out_tg=np.clip(div_tg(temp_K, V_in, R0, B, R25, T25),0,V_in)\n    out_tp=np.clip(div_tp(temp_K, V_in, R0, B, R25, T25),0,V_in)\n    \n    t_infl = opt.fsolve(f_inflection, 280., args=R0)\n    \n    slope_tg=ddt_div_tg(t_infl, V_in, R0, B, R25, T25)\n    slope_tp=ddt_div_tp(t_infl, V_in, R0, B, R25, T25)\n    \n    lin_tg = div_tg(t_infl, V_in, R0, B, R25, T25) + \\\n        slope_tg*(temp_K - t_infl)\n    lin_tp = div_tp(t_infl, V_in, R0, B, R25, T25) + \\\n        slope_tp*(temp_K - t_infl)\n    \n    \n    ax.plot(temp_K,out_tg,label='$V^{(2A)}$')\n    ax.plot(temp_K,out_tp,label='$V^{(2B)}$')\n    plt.plot(t_infl, np.clip(div_tg(t_infl, V_in, R0, B, R25, T25),0,V_in), \\\n             marker='o',color=\"gray\" )\n    plt.plot(t_infl, np.clip(div_tp(t_infl, V_in, R0, B, R25, T25),0,V_in), \\\n             marker='o', color=\"gray\" )\n    plt.plot(temp_K,lin_tg,':', color=\"gray\")\n    plt.plot(temp_K,lin_tp,':', color=\"gray\")\n    plt.ylim(0.,4.0)\n    plt.legend()\n    plt.title('Outputs from the Thermistor Voltage Divider Configurations \\n (10K Thermistor: Vishay Model Booo )')\n    plt.ylabel('Divider Circuit Output (Volts)')\n    plt.xlabel('Temperature (Kelvin)')\n    return plt.show()\n\ndef divider_plot():\n\n    return widgets.interact(div_plot, \\\n        R0=widgets.IntSlider(min=5000, max=100000, step=500, value=17900.,description=r'\\(R_0 (\\Omega) \\)'))\n\n\n# Bridge: thermistor-to-ground configuration\ndef bridge_tg(T, Vin, res0, B, R25, T25,rho):\n    res = R25 * np.exp(B*(1.0\/T-1.0\/T25))\n    f=(1.0\/(rho+1.0)-res\/(res0+res)) * Vin\n    return  f\n\n# Bridge: thermistor-to-power configuration\ndef bridge_tp(T, Vin, res0, B, R25, T25,rho):\n    res = R25 * np.exp(B*(1.0\/T-1.0\/T25))\n    f=(1.0\/(rho+1.0) - res0\/(res0+res)) * Vin\n    return f\n\n\n\ndef br_plot(R0, RHO):\n    \n    R25 = 1.0e4\n    B = 3977.0\n    T25 = 273.15 + 25.0\n    V_in=3.3\n    \n    temp_K = np.arange(250.0, 310.0, 0.2)\n    \n    fig, ax = plt.subplots(1, 1, figsize=(6, 4))\n    \n    out_tg=np.clip(bridge_tg(temp_K, V_in, R0, \\\n                                 B, R25, T25,RHO), 0,V_in)\n    out_tp=np.clip(bridge_tp(temp_K, V_in, R0, \\\n                                 B, R25, T25,RHO),0,V_in)\n\n\n    \n    t_infl = opt.fsolve(f_inflection, 280., args=R0)\n    \n    slope_tg=-ddt_div_tg(t_infl, V_in, R0, B, R25, T25)\n    slope_tp=-ddt_div_tp(t_infl, V_in, R0, B, R25, T25)\n    \n    lin_tg = bridge_tg(t_infl, V_in, R0, B, R25, T25, RHO) + \\\n        slope_tg*(temp_K - t_infl)\n    lin_tp = bridge_tp(t_infl, V_in, R0, B, R25, T25, RHO) + \\\n        slope_tp*(temp_K - t_infl)\n\n    ax.plot(temp_K,out_tg,label='$V^{(3A)}$')\n    ax.plot(temp_K,out_tp,label='$V^{(3B)}$')\n    \n    plt.plot(t_infl, np.clip(bridge_tg(t_infl, V_in, R0, B, R25, T25,RHO),0,V_in), \\\n             marker='o',color=\"gray\" )\n    plt.plot(t_infl, np.clip(bridge_tp(t_infl, V_in, R0, B, R25, T25, RHO),0,V_in), \\\n             marker='o', color=\"gray\" )\n    plt.plot(temp_K,lin_tg,':', color=\"gray\")\n    plt.plot(temp_K,lin_tp,':', color=\"gray\")\n    #ax.plot(temp_K,out_div_tp,label='$V_{div}$')\n    plt.ylim(0.,3.5)\n    plt.legend()\n    plt.title('Outputs from the Amplified Voltage Divider Configurations')\n    plt.ylabel('Divider Circuit Output (Volts)')\n    plt.xlabel('Temperature (Kelvin)')\n    \n    return plt.show()\n\ndef bridge_plot2():\n\n    return widgets.interact(br_plot, \\\n        R0=widgets.IntSlider(min=5000, max=100000, step=100, value=17500.,description=r'\\(R_0 (\\Omega) \\)'), \\\n        RHO=widgets.FloatSlider(min=0, max=0.5, step=.005, value=0,description=r'\\(\\rho\\)'))\n\n\ndef amp_plot(R0,V_ref,RHO,A_G):\n    R25 = 1.0e4\n    B = 3977.0\n    T25 = 273.15 + 25.0\n    V_in=3.3\n    temp_K = np.arange(250.0, 310.0, 0.2)\n    #temp_K = np.arange(250.0, 310.0, 0.2)\n    fig, ax = plt.subplots(1, 1, figsize=(6,4))\n    out_tg=np.clip(A_G*b_tg(temp_K, V_in, R0, \\\n                                 B, R25, T25,RHO)+V_ref,0.0,V_in)\n    out_tp=np.clip(A_G*b_tp(temp_K, V_in, R0, \\\n                                 B, R25, T25,RHO)+V_ref,0.0,V_in)\n    out_div_tp=np.clip(A_G*div_tp(temp_K, V_in, R0, B, R25, T25)+V_ref,0,V_in)\n    \n    #ax.plot(temp_K,out_tg,label='$V_{TG}$')\n    ax.plot(temp_K,out_tp,label='$V_{br}$')\n    ax.plot(temp_K,out_div_tp,label='$V_{div}$')\n    plt.ylim(0.,3.5)\n    plt.legend()\n    plt.title('Outputs from the Amplified Voltage Divider Configurations')\n    plt.ylabel('Divider Circuit Output (Volts)')\n    plt.xlabel('Temperature (Kelvin)')\n    \n    return plt.show()\n\ndef ampcircuits_plot():\n    \n    return widgets.interact(amp_plot, \\\n        R0=widgets.IntSlider(min=5000, max=100000, step=100,value=17500.,description=r'\\(R_0 (\\Omega) \\)'), \\\n        V_ref=widgets.FloatSlider(min=0, max=3.3, step=.01,value=0,description=r'\\(V_{\\it ref} (V)\\)'), \\\n        RHO=widgets.FloatSlider(min=0, max=1, step=.01,value=0,description=r'\\(\\rho\\)'), \\\n        A_G=widgets.FloatSlider(min=1, max=2, step=.01,value=0,description=r'\\(A_{G}\\)'))\n\n\n#---------------------------------\n\ndef therm_res(T, B, R25, T25):\n    \n    #B-Parameter equation for thermistor resistance\n    \n    res = R25 * np.exp(B*(1.0\/T-1.0\/T25))\n    \n    return res\n\ndef d_therm_res(T, B, R25, T25):\n    \n    f= - B\/T**2.0 * therm_res(T,B,R25,T25)\n    \n    return f\n\ndef thermres_plot():\n    \n    #Define the temperature range in Kelvin\n    temp_K = np.arange(250.0, 310.0, 0.2)\n\n    #Define thermistor parameters\n\n    R25 = 1.0e4\n    T25 = 273.15 + 25.0\n    B = 3977.0\n    \n    r_t = therm_res(temp_K, B, R25, T25)\n    \n    fig, ax = plt.subplots(1, 1, figsize=(6, 4))\n    ax.plot(temp_K,r_t\/1000.)\n    plt.title('Temperature Dependence of Thermistor Resistance \\n (10K Thermistor: Vishay Model )');\n    plt.ylabel('Thermistor Resistance ($k\\Omega$)');\n    plt.xlabel('Temperature (Kelvin)');   \n    \n    return plt.show()\n\ndef inflection():\n    \n    R25 = 1.0e4\n    T25 = 273.15 + 25.0\n    B = 3977.0\n    \n    #Define the temperature range in Kelvin\n    temp_K = np.arange(250.0, 310.0, 0.2)\n\n    \n    res0= (B-2.0*temp_K)\/(B+2.0*temp_K)*therm_res(temp_K,B,R25,T25)\n    \n    fig, ax = plt.subplots(1, 1, figsize=(6, 4))\n    ax.plot(temp_K,res0\/1000.,temp_K,therm_res(temp_K,B,R25,T25)\/1000.)\n    plt.title('Temperature Dependence of Thermistor Resistance \\n (10K Thermistor: Vishay Model )');\n    plt.ylabel('Thermistor Resistance ($k\\Omega$)');\n    plt.xlabel('Temperature (Kelvin)');   \n    \n    return plt.show()\n\ndef f_inflection(tkel, *data):\n    \n    R0=data\n    \n    R25 = 1.0e4\n    T25 = 273.15 + 25.0\n    B = 3977.0\n    \n    f = (B-2.0*tkel)\/(B+2.0*tkel)*therm_res(tkel,B,R25,T25)-R0\n    \n    return f\n\ndef f_prime(tkel):\n\n    R25 = 1.0e4\n    T25 = 273.15 + 25.0\n    B = 3977.0\n    \n    fp = - (2.0 + 2.0*(B-2.0*tkel)\/(B+2.0*tkel) + B*(B-2.0*tkel)\/tkel**2.0) \\\n        * therm_res(tkel,B,R25,T25) \/ (B+2.0*tkel)\n    \n    return fp\n    \n\n\n"}
+{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_287","text":"#!\/usr\/bin\/env python\r\n\r\n\"\"\" Assemble the specified FEM matrix.\r\n\"\"\"\r\n\r\nimport argparse\r\nimport pymesh\r\nimport scipy.sparse\r\n\r\ndef parse_args():\r\n    parser = argparse.ArgumentParser(__doc__);\r\n    parser.add_argument(\"--type\", \"-t\", help=\"The type of matrix\",\r\n            choices=[\"stiffness\", \"mass\", \"lumped_mass\", \"laplacian\",\r\n                \"displacement_strain\", \"elasticity_tensor\",\r\n                \"engineer_strain_stress\", \"rigid_motion\", \"gradient\"]);\r\n    parser.add_argument(\"input_mesh\");\r\n    parser.add_argument(\"output_matrix\");\r\n    return parser.parse_args();\r\n\r\ndef main():\r\n    args = parse_args();\r\n    mesh = pymesh.load_mesh(args.input_mesh);\r\n    assembler = pymesh.Assembler(mesh);\r\n    M = assembler.assemble(args.type);\r\n    scipy.sparse.save_npz(args.output_matrix, M);\r\n\r\nif __name__ == \"__main__\":\r\n    main();\r\n"}
+{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_272","text":"pypower\/qps_pips.py\n# Copyright (c) 1996-2015 PSERC. All rights reserved.\n# Use of this source code is governed by a BSD-style\n# license that can be found in the LICENSE file.\n\n\"\"\"Uses the Python Interior Point Solver (PIPS) to solve QP (quadratic\nprogramming) problems.\n\"\"\"\n\nfrom numpy import Inf, ones, zeros, dot\n\nfrom scipy.sparse import csr_matrix as sparse\n\nfrom pypower.pips import pips\n\n\ndef qps_pips(H, c, A, l, u, xmin=None, xmax=None, x0=None, opt=None):\n    \"\"\"Uses the Python Interior Point Solver (PIPS) to solve the following\n    QP (quadratic programming) problem::\n\n            min 1\/2 x'*H*x + C'*x\n             x\n\n    subject to::\n\n            l <= A*x <= u       (linear constraints)\n            xmin <= x <= xmax   (variable bounds)\n\n    Note the calling syntax is almost identical to that of QUADPROG from\n    MathWorks' Optimization Toolbox. The main difference is that the linear\n    constraints are specified with C{A}, C{L}, C{U} instead of C{A}, C{B},\n    C{Aeq}, C{Beq}.\n\n    Example from U{http:\/\/www.uc.edu\/sashtml\/iml\/chap8\/sect12.htm}:\n\n        >>> from numpy import array, zeros, Inf\n        >>> from scipy.sparse import csr_matrix\n        >>> H = csr_matrix(array([[1003.1,  4.3,     6.3,     5.9],\n        ...                       [4.3,     2.2,     2.1,     3.9],\n        ...                       [6.3,     2.1,     3.5,     4.8],\n        ...                       [5.9,     3.9,     4.8,     10 ]]))\n        >>> c = zeros(4)\n        >>> A = csr_matrix(array([[1,       1,       1,       1   ],\n        ...                       [0.17,    0.11,    0.10,    0.18]]))\n        >>> l = array([1, 0.10])\n        >>> u = array([1, Inf])\n        >>> xmin = zeros(4)\n        >>> xmax = None\n        >>> x0 = array([1, 0, 0, 1])\n        >>> solution = qps_pips(H, c, A, l, u, xmin, xmax, x0)\n        >>> round(solution[\"f\"], 11) == 1.09666678128\n        True\n        >>> solution[\"converged\"]\n        True\n        >>> solution[\"output\"][\"iterations\"]\n        10\n\n    All parameters are optional except C{H}, C{c}, C{A} and C{l} or C{u}.\n    @param H: Quadratic cost coefficients.\n    @type H: csr_matrix\n    @param c: vector of linear cost coefficients\n    @type c: array\n    @param A: Optional linear constraints.\n    @type A: csr_matrix\n    @param l: Optional linear constraints. Default values are M{-Inf}.\n    @type l: array\n    @param u: Optional linear constraints. Default values are M{Inf}.\n    @type u: array\n    @param xmin: Optional lower bounds on the M{x} variables, defaults are\n                 M{-Inf}.\n    @type xmin: array\n    @param xmax: Optional upper bounds on the M{x} variables, defaults are\n                 M{Inf}.\n    @type xmax: array\n    @param x0: Starting value of optimization vector M{x}.\n    @type x0: array\n    @param opt: optional options dictionary with the following keys, all of\n                which are also optional (default values shown in parentheses)\n                  - C{verbose} (False) - Controls level of progress output\n                    displayed\n                  - C{feastol} (1e-6) - termination tolerance for feasibility\n                    condition\n                  - C{gradtol} (1e-6) - termination tolerance for gradient\n                    condition\n                  - C{comptol} (1e-6) - termination tolerance for\n                    complementarity condition\n                  - C{costtol} (1e-6) - termination tolerance for cost\n                    condition\n                  - C{max_it} (150) - maximum number of iterations\n                  - C{step_control} (False) - set to True to enable step-size\n                    control\n                  - C{max_red} (20) - maximum number of step-size reductions if\n                    step-control is on\n                  - C{cost_mult} (1.0) - cost multiplier used to scale the\n                    objective function for improved conditioning. Note: The\n                    same value must also be passed to the Hessian evaluation\n                    function so that it can appropriately scale the objective\n                    function term in the Hessian of the Lagrangian.\n    @type opt: dict\n\n    @rtype: dict\n    @return: The solution dictionary has the following keys:\n               - C{x} - solution vector\n               - C{f} - final objective function value\n               - C{converged} - exit status\n                   - True = first order optimality conditions satisfied\n                   - False = maximum number of iterations reached\n                   - None = numerically failed\n               - C{output} - output dictionary with keys:\n                   - C{iterations} - number of iterations performed\n                   - C{hist} - dictionary of arrays with trajectories of the\n                     following: feascond, gradcond, coppcond, costcond, gamma,\n                     stepsize, obj, alphap, alphad\n                   - C{message} - exit message\n               - C{lmbda} - dictionary containing the Langrange and Kuhn-Tucker\n                 multipliers on the constraints, with keys:\n                   - C{eqnonlin} - nonlinear equality constraints\n                   - C{ineqnonlin} - nonlinear inequality constraints\n                   - C{mu_l} - lower (left-hand) limit on linear constraints\n                   - C{mu_u} - upper (right-hand) limit on linear constraints\n                   - C{lower} - lower bound on optimization variables\n                   - C{upper} - upper bound on optimization variables\n\n    @see: L{pips}\n\n    @author:  (PSERC Cornell)\n    \"\"\"\n    if isinstance(H, dict):\n        p = H\n    else:\n        p = {'H': H, 'c': c, 'A': A, 'l': l, 'u': u}\n        if xmin is not None: p['xmin'] = xmin\n        if xmax is not None: p['xmax'] = xmax\n        if x0 is not None: p['x0'] = x0\n        if opt is not None: p['opt'] = opt\n\n    if 'H' not in p or p['H'] == None:#p['H'].nnz == 0:\n        if p['A'] is None or p['A'].nnz == 0 and \\\n           'xmin' not in p and \\\n           'xmax' not in p:\n#           'xmin' not in p or len(p['xmin']) == 0 and \\\n#           'xmax' not in p or len(p['xmax']) == 0:\n            print('qps_pips: LP problem must include constraints or variable bounds')\n            return\n        else:\n            if p['A'] is not None and p['A'].nnz >= 0:\n                nx = p['A'].shape[1]\n            elif 'xmin' in p and len(p['xmin']) > 0:\n                nx = p['xmin'].shape[0]\n            elif 'xmax' in p and len(p['xmax']) > 0:\n                nx = p['xmax'].shape[0]\n        p['H'] = sparse((nx, nx))\n    else:\n        nx = p['H'].shape[0]\n\n    p['xmin'] = -Inf * ones(nx) if 'xmin' not in p else p['xmin']\n    p['xmax'] =  Inf * ones(nx) if 'xmax' not in p else p['xmax']\n\n    p['c'] = zeros(nx) if p['c'] is None else p['c']\n\n    p['x0'] = zeros(nx) if 'x0' not in p else p['x0']\n\n    def qp_f(x, return_hessian=False):\n        f = 0.5 * dot(x * p['H'], x) + dot(p['c'], x)\n        df = p['H'] * x + p['c']\n        if not return_hessian:\n            return f, df\n        d2f = p['H']\n        return f, df, d2f\n\n    p['f_fcn'] = qp_f\n\n    sol = pips(p)\n\n    return sol[\"x\"], sol[\"f\"], sol[\"eflag\"], sol[\"output\"], sol[\"lmbda\"]\n\n\nif __name__ == \"__main__\":\n    import doctest\n    doctest.testmod()\n"}
+{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_160","text":"import torch\nfrom torch import nn, Tensor\nfrom scipy.spatial.distance import cosine\nfrom typing import Optional\ndef mean(x : Tensor, dim : int = 1, weights: Optional[ None, Tensor ] = None) -> Tensor:\n    '''\n        Apply the mean to the index that represents the number of words\n    '''\n    return x.mean(dim=dim) if weights is not None else x.mean(dim=dim) * weights\n\ndef centroid(x : Tensor, index: int = 1) -> Tensor: \n    '''\n        Calculate the mean vector and return the one closest to the center\n    '''\n    from utils.funcs import normedChebyshev\n    mu = mean( x , index )\n    best = torch.argmin([ normedChebyshev( mu, x_i ) for x_i in x[ :, index ] ])\n    return x[:, [best], :]\n\ndef diff(x : Tensor) -> Tensor:\n    '''\n        Take the difference between the min and max of each dimension.\n        This will produce only positive values.\n\n        NOTE: This kind of seems dumb, but I'm leaving it for now.\n    '''\n    pass\n\n"}
+{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_161","text":"gemelli\/rpca.py\n# ----------------------------------------------------------------------------\n# Copyright (c) 2019--, gemelli development team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n# ----------------------------------------------------------------------------\n\nimport biom\nimport skbio\nimport numpy as np\nimport pandas as pd\nfrom typing import Union\nfrom gemelli.matrix_completion import MatrixCompletion\nfrom gemelli.preprocessing import matrix_rclr\nfrom gemelli._defaults import (DEFAULT_COMP,\n                               DEFAULT_MSC, DEFAULT_MFC,\n                               DEFAULT_OPTSPACE_ITERATIONS,\n                               DEFAULT_MFF)\nfrom scipy.linalg import svd\n\n\ndef rpca(table: biom.Table,\n         n_components: Union[int, str] = DEFAULT_COMP,\n         min_sample_count: int = DEFAULT_MSC,\n         min_feature_count: int = DEFAULT_MFC,\n         min_feature_frequency: float = DEFAULT_MFF,\n         max_iterations: int = DEFAULT_OPTSPACE_ITERATIONS) -> (\n        skbio.OrdinationResults,\n        skbio.DistanceMatrix):\n    \"\"\"Runs RPCA with an matrix_rclr preprocessing step.\n\n       This code will be run by both the standalone and QIIME 2 versions of\n       gemelli.\n    \"\"\"\n    # get shape of table\n    n_features, n_samples = table.shape\n\n    # filter sample to min seq. depth\n    def sample_filter(val, id_, md):\n        return sum(val) > min_sample_count\n\n    # filter features to min total counts\n    def observation_filter(val, id_, md):\n        return sum(val) > min_feature_count\n\n    # filter features by N samples presence\n    def frequency_filter(val, id_, md):\n        return (np.sum(val > 0) \/ n_samples) > (min_feature_frequency \/ 100)\n\n    # filter and import table for each filter above\n    table = table.filter(observation_filter, axis='observation')\n    table = table.filter(frequency_filter, axis='observation')\n    table = table.filter(sample_filter, axis='sample')\n    # table to dataframe\n    table = pd.DataFrame(table.matrix_data.toarray(),\n                         table.ids('observation'),\n                         table.ids('sample')).T\n    # check the table after filtering\n    if len(table.index) != len(set(table.index)):\n        raise ValueError('Data-table contains duplicate indices')\n    if len(table.columns) != len(set(table.columns)):\n        raise ValueError('Data-table contains duplicate columns')\n    # Robust-clt (matrix_rclr) preprocessing and OptSpace (RPCA)\n    opt = MatrixCompletion(n_components=n_components,\n                           max_iterations=max_iterations).fit(\n                               matrix_rclr(table))\n    # get new n-comp when applicable\n    n_components = opt.s.shape[0]\n    # get PC column labels for the skbio OrdinationResults\n    rename_cols = ['PC' + str(i + 1) for i in range(n_components)]\n    # get completed matrix for centering\n    X = opt.sample_weights @ opt.s @ opt.feature_weights.T\n    # center again around zero after completion\n    X = X - X.mean(axis=0)\n    X = X - X.mean(axis=1).reshape(-1, 1)\n    # re-factor the data\n    u, s, v = svd(X)\n    # only take n-components\n    u = u[:, :n_components]\n    v = v.T[:, :n_components]\n    # calc. the new variance using projection\n    p = s**2 \/ np.sum(s**2)\n    p = p[:n_components]\n    s = s[:n_components]\n    # save the loadings\n    feature_loading = pd.DataFrame(v, index=table.columns,\n                                   columns=rename_cols)\n    sample_loading = pd.DataFrame(u, index=table.index,\n                                  columns=rename_cols)\n    # % var explained\n    proportion_explained = pd.Series(p, index=rename_cols)\n    # get eigenvalues\n    eigvals = pd.Series(s, index=rename_cols)\n\n    # if the n_components is two add PC3 of zeros\n    # this is referenced as in issue in\n    # \n    # discussed in gemelli -- PR#29\n    if n_components == 2:\n        feature_loading['PC3'] = [0] * len(feature_loading.index)\n        sample_loading['PC3'] = [0] * len(sample_loading.index)\n        eigvals.loc['PC3'] = 0\n        proportion_explained.loc['PC3'] = 0\n\n    # save ordination results\n    short_method_name = 'rpca_biplot'\n    long_method_name = '(Robust Aitchison) RPCA Biplot'\n    ord_res = skbio.OrdinationResults(\n        short_method_name,\n        long_method_name,\n        eigvals.copy(),\n        samples=sample_loading.copy(),\n        features=feature_loading.copy(),\n        proportion_explained=proportion_explained.copy())\n    # save distance matrix\n    dist_res = skbio.stats.distance.DistanceMatrix(\n        opt.distance, ids=sample_loading.index)\n\n    return ord_res, dist_res\n\n\ndef auto_rpca(table: biom.Table,\n              min_sample_count: int = DEFAULT_MSC,\n              min_feature_count: int = DEFAULT_MFC,\n              min_feature_frequency: float = DEFAULT_MFF,\n              max_iterations: int = DEFAULT_OPTSPACE_ITERATIONS) -> (\n        skbio.OrdinationResults,\n        skbio.DistanceMatrix):\n    \"\"\"Runs RPCA but with auto estimation of the\n       rank peramater.\n    \"\"\"\n    ord_res, dist_res = rpca(table,\n                             n_components='auto',\n                             min_sample_count=min_sample_count,\n                             min_feature_count=min_feature_count,\n                             min_feature_frequency=min_feature_frequency,\n                             max_iterations=max_iterations)\n    return ord_res, dist_res\n"}
+{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_162","text":"import matplotlib\nmatplotlib.rcParams['pdf.fonttype'] = 42\nmatplotlib.rcParams['ps.fonttype'] = 42\n# matplotlib.rcParams['ps.useafm'] = True\n# matplotlib.rcParams['pdf.use14corefonts'] = True\n# matplotlib.rcParams['text.usetex'] = True\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plot\nimport matplotlib.cm as cm # cm.rainbow\nfrom random import expovariate\nimport sys, pprint, math, numpy, simpy, getopt, itertools\n\nfrom simplex_sim import *\nfrom simplex_models import *\nfrom mds_models import mds_exactbound_on_ar\n# from mds_exp import sim_mds_nk\n\nfrom scipy.interpolate import UnivariateSpline\n\ndef plot_reptoall_steadystate_probhist():\n  t, r, k = 1, 2, 2\n  def get_state_prob_m(ar):\n    log(WARNING, \"ar= {}, t= {}, r= {}, k= {}\".format(ar, t, r, k) )\n    env = simpy.Environment()\n    pg = PG(env, \"pg\", ar)\n    avq = AVQ(\"avq\", env, t, r, k, serv=\"Exp\", servdist_m={'mu': 1} )\n    # monitor = AVQMonitor(env, avq, poll_dist=lambda: 0.1)\n    # avq.join_q.out_m = monitor\n    pg.out = avq\n    env.run(until=50000)\n    \n    # print(\"monitor.polled_state__counter_map= {}\".format(pprint.pformat(monitor.polled_state__counter_map) ) )\n    total_counter = sum([c for rs, c in monitor.polled_state__counter_map.items() ] )\n    state_prob_m = {rs:float(c)\/total_counter for rs, c in monitor.polled_state__counter_map.items() }\n    # print(\"polled_state__counter_map= {}\".format(pprint.pformat(polled_state__counter_map) ) )\n    return state_prob_m # ['0,(0,0)']\n  # for ar in numpy.arange(0.05, 1.2, 0.1):\n  color = iter(cm.rainbow(numpy.linspace(0, 1, 20) ) )\n  plot.figure(figsize=(20,10) )\n  for ar in numpy.arange(0.05, 1.3, 0.1):\n  # for ar in numpy.arange(0.05, 0.1, 0.1):\n    state_prob_m = get_state_prob_m(ar)\n    \n    def state(kp, i, j):\n      return \"{},({},{})\".format(kp, i, j)\n    i__tau_l_map = {}\n    for i in range(10):\n      if i not in i__tau_l_map:\n        i__tau_l_map[i] = []\n      for kp in range(i, 10):\n        s_u, s_l = state(kp, i, 0), state(kp+1, i, 0)\n        if s_u in state_prob_m and s_l in state_prob_m:\n          i__tau_l_map[i].append(state_prob_m[s_l]\/state_prob_m[s_u] )\n        # if state(k+1, 0, i) in state_prob_m:\n        #   i__tau_l_map[i].append(state_prob_m[state(k+1, 0, i) ] \/state_prob_m[state(k, 0, i) ] )\n    log(WARNING, \"i__tau_l_map=\\n {}\".format(pprint.pformat(i__tau_l_map) ) )\n    #\n    wing_cutoff_i = 2\n    wing_cutoff_sum = 0\n    for s, p in state_prob_m.items():\n      split_l = s.split(\",\")\n      if int(split_l[1].split(\"(\")[1] ) > wing_cutoff_i or int(split_l[2].split(\")\")[0] ) > wing_cutoff_i:\n        wing_cutoff_sum += p\n      \n    s_l, p_l = [], []\n    for s, p in state_prob_m.items():\n      if p > 0.01:\n        s_l.append(s)\n        p_l.append(p)\n    plot.bar(range(len(p_l) ), p_l, color=next(color) )\n    plot.xticks([i+0.5 for i in range(len(s_l) ) ], s_l, size='small')\n    plot.xlabel(\"State\")\n    plot.ylabel(\"Steady-state probability\")\n    plot.title(r't= {}, $\\lambda$= {}, sum_on_plot= {}, wing_cutoff_sum= {}'. \\\n      format(t, \"{0:.2f}\".format(ar), \"{0:.2f}\".format(sum(p_l)), \"{0:.2f}\".format(wing_cutoff_sum) ) )\n    plot.savefig(\"plot_reptoall_steadystate_probhist_ar_{0:.2f}.png\".format(ar) )\n    plot.clf()\n\ndef test_avq(nf, ar, t, r, k, serv=\"Exp\", servdist_m=None,\n             w_sys=True, mixed_traff=False, sching=\"rep-to-all\", p_i_l= [] ):\n  E_T_f_sum = 0\n  for f in range(nf):\n    log(WARNING, \"ar= {}, t= {}, r= {}, k= {}, servdist_m= {}, w_sys= {}, mixed_traff= {}, sching= {}\". \\\n        format(ar, t, r, k, servdist_m, w_sys, mixed_traff, sching) )\n    \n    env = simpy.Environment()\n    if mixed_traff:\n      sym_l, sym__rgroup_l_m = simplex_sym_l__sym__rgroup_l_m(t)\n      log(WARNING, \"sym__rgroup_l_m=\\n {}\".format(pprint.pformat(sym__rgroup_l_m) ) )\n      pg = MT_PG(env, \"pg\", ar, sym_l)\n      avq = MT_AVQ(\"mt_avq\", env, t, sym__rgroup_l_m, serv, servdist_m)\n      # monitor = AVQMonitor(env, aq=avq, poll_dist=lambda: 0.1)\n      # avq.join_q.out_m = monitor\n    else:\n      psize = None\n      if serv == \"Bern*Pareto\":\n        psize = \"Pareto\"\n        serv = \"Bern\"\n      pg = PG(env, \"pg\", ar, psize=psize, psize_dist_m=servdist_m)\n      avq = AVQ(\"avq\", env, t, r, k, servdist_m, sching, w_sys=w_sys)\n      # monitor = AVQMonitor(env, aq=avq, poll_dist=lambda: 0.1)\n      # avq.join_q.out_m = monitor\n    pg.out = avq\n    pg.init()\n    c = 3 if serv == \"Pareto\" or serv == \"Bern\" else 1\n    env.run(until=c*50000) # 20\n    \n    if mixed_traff:\n      print(\"pg.sym__n_sent= {}\".format(pprint.pformat(pg.sym__n_sent) ) )\n    st_l = avq.jsink.st_l\n    if len(st_l) > 0:\n      E_T_f_sum += float(sum(st_l) )\/len(st_l)\n      # continue\n    # print(\"avq.jsink.qid__num_win_map= {}\".format(pprint.pformat(avq.jsink.qid__num_win_map) ) )\n    total_n_wins = sum([n for i, n in avq.jsink.qid__num_win_map.items() ] )\n    print(\"pg.n_sent= {}, total_n_wins= {}\".format(pg.n_sent, total_n_wins) )\n    qid_winfreq_map = {i:float(n)\/total_n_wins for i, n in avq.jsink.qid__num_win_map.items() }\n    print(\"qid_winfreq_map= {}\".format(pprint.pformat(qid_winfreq_map) ) )\n    # if not mixed_traff:\n    #   total_n_types = sum(avq.servtype__num_m)\n    #   p_i_l[:] = [n\/total_n_types for t, n in enumerate(avq.servtype__num_m) ]\n    #   print(\"p_i_l= {}\".format(p_i_l) )\n    \"\"\"\n    print(\"\\n\")\n    # print(\"avq.join_q.state__num_found_map= {}\".format(pprint.pformat(avq.join_q.state__num_found_map) ) )\n    # total_num_founds = sum([n for s, n in avq.join_q.state__num_found_map.items() ] )\n    # state__found_freq_map = {s:float(n)\/total_num_founds for s, n in avq.join_q.state__num_found_map.items() }\n    # print(\"state__found_freq_map= {}\".format(pprint.pformat(state__found_freq_map) ) )\n    \n    print(\"\\n\")\n    # print(\"monitor.polled_state__counter_map= {}\".format(pprint.pformat(monitor.polled_state__counter_map) ) )\n    total_counter = sum([c for rs, c in monitor.polled_state__counter_map.items() ] )\n    polled_state__counter_map = {rs:float(c)\/total_counter for rs, c in monitor.polled_state__counter_map.items() }\n    print(\"polled_state__counter_map= {}\".format(pprint.pformat(polled_state__counter_map) ) )\n    \n    print(\"\\n\")\n    # print(\"monitor.state__num_found_by_job_departed_map= {}\".format(pprint.pformat(monitor.state__num_found_by_job_departed_map) ) )\n    total_counter = sum([c for rs, c in monitor.state__num_found_by_job_departed_map.items() ] )\n    state__freq_found_by_job_departed_map = {rs:float(c)\/total_counter for rs, c in monitor.state__num_found_by_job_departed_map.items() }\n    print(\"state__freq_found_by_job_departed_map= {}\".format(pprint.pformat(state__freq_found_by_job_departed_map) ) )\n    \n    print(\"\\n\")\n    # print(\"monitor.start_setup__num_found_by_job_departed_map= {}\".format(pprint.pformat(monitor.start_setup__num_found_by_job_departed_map) ) )\n    total_counter = sum([c for rs, c in monitor.start_setup__num_found_by_job_departed_map.items() ] )\n    start_setup__freq_found_by_job_departed_map = {rs:float(c)\/total_counter for rs, c in monitor.start_setup__num_found_by_job_departed_map.items() }\n    print(\"start_setup__freq_found_by_job_departed_map= {}\".format(pprint.pformat(start_setup__freq_found_by_job_departed_map) ) )\n    \"\"\"\n  E_T = E_T_f_sum\/nf\n  print(\">> E_T= {}\".format(E_T) )\n  if E_T > 100: return None\n  return E_T\n  \ndef plot_winning_freqs():\n  t, r, k = 1, 2, 2\n  mu = 1\n  servdist_m = {'dist': 'Exp', 'mu': mu}\n  ar_ub = reptoall_innerbound_on_ar(t, servdist_m)\n  log(WARNING, \"t= {}, servdist_m= {}, ar_ub={}\".format(t, servdist_m, ar_ub) )\n  ar_l = []\n  qid__winfreq_l_map = {}\n  for ar in numpy.linspace(0.05, ar_ub*1.1, 20):\n    env = simpy.Environment()\n    pg = PG(env, \"pg\", ar)\n    avq = AVQ(\"avq\", env, t, r, k, servdist_m, \"rep-to-all\")\n    pg.out = avq\n    pg.init()\n    # monitor = AVQMonitor(env, aq=avq, poll_dist=lambda: 1)\n    env.run(until=50000)\n    \n    total_n_wins = sum([n for i, n in avq.jsink.qid__num_win_map.items() ] )\n    qid_winfreq_map = {i:float(n)\/total_n_wins for i, n in avq.jsink.qid__num_win_map.items() }\n    print(\"ar= {}, qid_winfreq_map= {}\".format(ar, pprint.pformat(qid_winfreq_map) ) )\n    \n    ar_l.append(ar)\n    for qid, win_freq in qid_winfreq_map.items():\n      if qid not in qid__winfreq_l_map:\n        qid__winfreq_l_map[qid] = []\n      qid__winfreq_l_map[qid].append(win_freq)\n  \n  plot.axhline(y=0.6, label=r'Lower-bound, $w_s$', c=next(dark_color), lw=2, ls='--')\n  plot.axhline(y=0.4, label=r'Upper-bound, $w_r$', c=next(dark_color), lw=2, ls='--')\n  counter = 0\n  for qid, win_freq_l in qid__winfreq_l_map.items():\n    if counter == 0:\n      plot.plot(ar_l, win_freq_l, label=r'Simulation, $w_s$', color=next(dark_color), marker=next(marker), ms=8, mew=2, ls=':')\n    else:\n      plot.plot(ar_l, win_freq_l, label=r'Simulation, $w_r$', color=next(dark_color), marker=next(marker), ms=8, mew=2, ls=':')\n    counter += 1\n\n  fontsize = 16\n  plot.legend(fontsize=13)\n  plot.xlabel(r'Arrival rate $\\lambda$', fontsize=fontsize)\n  plot.ylabel(\"Fraction of request completions\", fontsize=fontsize)\n  plot.title(r'Replicate-to-all $t=1$, $\\gamma=\\alpha=\\beta= {}$'.format(mu), fontsize=fontsize)\n  fig = plot.gcf()\n  # def_size = fig.get_size_inches()\n  # fig.set_size_inches(def_size[0]\/1.4, def_size[1]\/1.4)\n  fig.set_size_inches(6, 4)\n  fig.tight_layout()\n  # plot.savefig(\"plot_winning_freqs.png\", bbox_inches='tight')\n  plot.savefig(\"plot_winning_freqs.pdf\", dpi=fig.dpi)\n  plot.gcf().clear()\n  log(WARNING, \"done.\")\n\ndef plot_simplex_vs_rep():\n  t, r, k = 3, 2, 2\n  serv = \"Exp\"\n  mu = 1\n  servdist_m['mu'] = mu\n  if t == 1: ar_ub = 1.6\n  elif t == 3: ar_ub = 2.4\n  elif t == 7:\n    ar_ub = float(1.1*reptoall_innerbound_on_ar(mu, t, r, w_sys=True) )\n  mixed_traff = False\n  if mixed_traff: ar_ub = 1.1*ar_ub\n  log(WARNING, \"t= {}, ar_ub= {}, serv= {}, servdist_m= {}, mixed_traff= {}\".format(t, ar_ub, serv, servdist_m, mixed_traff) )\n  \n  n = 2*t + 1\n  n_sym = int(numpy.log2(n+1) )\n  # # Same distance\n  # n_rep = t + 1\n  # n_total_rep = n_sym*n_rep\n  # mu_rep = n*mu\/n_total_rep\n  \n  # n_mds = n_sym + t\n  # k_mds = n_sym\n  # mu_mds = (2*t+1)*mu\/n_mds\n  # ar_ub_mds = None\n  # if t == 3 and not mixed_traff: ar_ub_mds = ar_ub + 0.15 # mds_exactbound_on_ar(mu_mds, n_mds, k_mds)\n  \n  # Preserving hot-cold data mix\n  # n_rep = t + 1\n  # n_total_rep = n_rep\n  # ar_ub_mds = None\n  \n  # Same repair bandwidth\n  n_rep = t + 1\n  n_total_rep = int(n_sym*(t+1)\/2)\n  mu_rep = n*mu\/n_total_rep if not mixed_traff else n*mu\/n_total_rep\/n_sym\n  ar_ub_mds = None\n  \n  ar_ub_rep = n_rep*mu_rep\n  \n  sim_simplex_reqed = False\n  ET_sim_l = []\n  if not mixed_traff and t == 1:\n    ET_sim_l= [\n      0.6775872854372559,\n      0.7909557937247363,\n      0.9486987202221493,\n      1.166209238915134,\n      1.5685720588787688,\n      2.478342315521276,\n      2.6376081306859107,\n      2.906788473547391,\n      3.263700392764921,\n      3.5974807041868426,\n      4.289127887822366,\n      4.794525358984301,\n      5.896928018871929,\n      8.099664758903687,\n      12.74155958739236]\n  elif mixed_traff and t == 1:\n    ET_sim_mixedtraff_l= [\n      0.6795142458623882,\n      0.7748927520953908,\n      0.9120551663968248,\n      1.1017354073281063,\n      1.4008309793905753,\n      2.0319166972531395,\n      2.3461415096416802,\n      2.617752845887241,\n      2.931842457820586,\n      3.3957906721917803,\n      4.275140545352988,\n      5.384652265631004,\n      8.289396804081276,\n      None, # 21.85423973012918,\n      None]\n  elif not mixed_traff and t == 3:\n    ET_sim_l= [\n      0.4676519075931255,\n      0.5247256264186801,\n      0.6230081386991332,\n      0.775814486873029,\n      1.0207917160021767,\n      1.6244613243247372,\n      1.7481208563178903,\n      1.9667165686859327,\n      2.163968348080258,\n      2.5923594863306776,\n      3.0700378671376627,\n      3.796384731111067,\n      4.841880170965622,\n      6.610367379250164,\n      13.559429107437742]\n  elif mixed_traff and t == 3:\n    ET_sim_mixedtraff_l= [\n      0.46628732795742817,\n      0.5184094604634668,\n      0.5975473670434864,\n      0.7272615729604553,\n      0.9228862984361961,\n      1.3432430706439402,\n      1.5297012938889547,\n      1.7382202900329649,\n      2.006828591863818,\n      2.409746021676913,\n      2.9987862815607667,\n      4.1494167022302415,\n      6.7589082110731376,\n      None,\n      None]\n  elif not mixed_traff and t == 7:\n    ET_sim_l= [\n      0.31868938934489865,\n      0.3650196292881234,\n      0.4281058344507201,\n      0.5206469367259021,\n      0.6957249200007437,\n      1.1325417176453465,\n      1.2307386079673424,\n      1.3867025010207843,\n      1.5768489395874896,\n      1.865829597118924,\n      2.1844400783734677,\n      2.89287730113055,\n      4.276904798075734,\n      6.184072327220002,\n      None]\n  else:\n    sim_simplex_reqed = True\n  \n  sim_mds_reqed = False\n  E_T_sim_mds_l = []\n  if t == 3:\n    E_T_sim_mds_l= [\n      0.4291382378049635,\n      0.4859752967032978,\n      0.5573834220518918,\n      0.6504572423217563,\n      0.7912534680581111,\n      1.0617796194912665,\n      1.1173955998468372,\n      1.1864819039768486,\n      1.3132561853089193,\n      1.4183354786680833,\n      1.5441924947724337,\n      1.6800188501504796,\n      1.97388257061194,\n      2.365205967704707,\n      2.552714259149294]\n  else:\n    sim_mds_reqed = True\n  \n  sim_mds_split_to_one_reqed = False\n  E_T_sim_split_to_one_mds_l = []\n  if t == 3:\n    E_T_sim_split_to_one_mds_l= [\n      0.77365082603341717,\n      0.82440222647912942,\n      0.88499585518811741,\n      0.95059809100622572,\n      1.026735997953014,\n      1.1276811830357545,\n      1.2540326440649683,\n      1.4212608769595043,\n      1.6517287453133336,\n      1.9954850953566452,\n      2.5853499093220909,\n      3.8254183518878659,\n      8.5337611351281506,\n      None,\n      None]\n  else:\n    sim_mds_split_to_one_reqed = True\n  \n  mew, ms = 3, 8\n  nf = 2\n  def plot_reptoall():\n    # Simplex\n    ar_simplex_l = []\n    for ar in [*numpy.linspace(0.05, 0.8*ar_ub, 5, endpoint=False), *numpy.linspace(0.8*ar_ub, ar_ub, 10) ]:\n      ar_simplex_l.append(ar)\n      if sim_simplex_reqed:\n        ET_sim_l.append(test_avq(nf, ar, t, r, k, serv, servdist_m, w_sys=True, mixed_traff=mixed_traff) )\n    c = next(dark_color)\n    label = 'Simplex' # if t != 1 else 'Simplex or MDS'\n    print(\"ET_sim_l= {}\".format(pprint.pformat(ET_sim_l) ) )\n    plot.plot(ar_simplex_l, ET_sim_l, label=label, color=c, marker=next(marker), mew=mew, ms=ms, linestyle=':')\n    # stab_lim = ET_simplex_approx(t, ar, servdist_m, incremental=True, ar_ub=True)\n    # plot.axvline(stab_lim, label=\"Simplex stability\", color=c, linestyle='--')\n    # Rep\n    ar_rep_l, E_T_rep_n_1_l = [], []\n    for ar in numpy.linspace(0.05, ar_ub_rep-0.05, 20):\n      ar_rep_l.append(ar)\n      E_T_rep_n_1_l.append(E_T_rep_n_1(ar, mu_rep, n_rep) )\n    # E_T_rep_n_1_l = [e*n_rep for e in E_T_rep_n_1_l]\n    c = next(dark_color)\n    plot.plot(ar_rep_l, E_T_rep_n_1_l, label=r'Replication', color=c, marker=next(marker), mew=mew, ms=ms, linestyle=':')\n    # plot.axvline(ar_ub_rep, label=\"Rep stability\", color=c, linestyle='--')\n    # # MDS\n    # if ar_ub_mds is not None:\n    #   ar_mds_l = []\n    #   for ar in [*numpy.linspace(0.05, 0.7*ar_ub_mds, 5, endpoint=False), *numpy.linspace(0.7*ar_ub_mds, ar_ub, 10, endpoint=False) ]:\n    #   # for ar in numpy.linspace(ar_ub_mds, ar_ub_mds, 1):\n    #     ar_mds_l.append(ar)\n    #     if sim_mds_reqed:\n    #       E_T_sim_mds_l.append(test_avq(nf, ar, t=1, r, k, serv, {'mu': mu_mds}, w_sys=True) )\n    #   print(\"E_T_sim_mds_l= {}\".format(pprint.pformat(E_T_sim_mds_l) ) )\n    #   plot.plot(ar_mds_l, E_T_sim_mds_l, label=r'MDS', color=next(dark_color), marker=next(marker), mew=mew, ms=ms, linestyle=':')\n  def plot_selectone():\n    # Simplex\n    ar_ub = arub_simplex_selectone(t, mu) + 0.1\n    log(WARNING, \"ar_ub= {}\".format(ar_ub) )\n    ar_l, ET_l = [], []\n    for ar in numpy.linspace(0.05, ar_ub, 20):\n      ar_l.append(ar)\n      ET_l.append(ET_selectone(t, ar, mu) )\n    label = 'Simplex' # if t != 1 else 'Simplex or MDS'\n    plot.plot(ar_l, ET_l, label=label, color=next(dark_color), marker=next(marker), mew=mew, ms=ms, linestyle=':')\n    # Rep\n    ar_ub_rep = n_rep*mu_rep\n    ar_l, E_T_rep_l = [], []\n    for ar in numpy.linspace(0.05, ar_ub_rep-0.2, 20):\n      ar_l.append(ar)\n      E_T_rep_l.append(E_T_rep_n_1_split_to_one(ar, mu_rep, n_rep) )\n    plot.plot(ar_l, E_T_rep_l, label=r'Replication', color=next(dark_color), marker=next(marker), mew=mew, ms=ms, linestyle=':')\n  plot_reptoall()\n  scheduling = \"Replicate-to-all\"\n  # plot_selectone()\n  # scheduling = \"Split-to-one\"\n  plot.legend(prop={'size':12})\n  plot.xlabel(r'Arrival rate $\\lambda$ (Request\/s)', fontsize=12)\n  plot.ylabel(r'Average download time (s)', fontsize=12)\n  # plot.title(r'$t={}, \\mu={}$'.format(t, mu) )\n  plot.title(r'{} scheduling, $t= {}$'.format(scheduling, t) )\n  fig = plot.gcf()\n  def_size = fig.get_size_inches()\n  fig.set_size_inches(def_size[0]\/1.4, def_size[1]\/1.4)\n  fig.tight_layout()\n  plot.savefig(\"plot_simplex_vs_rep_t_{}_{}.pdf\".format(t, scheduling) )\n  fig.clear()\n  # Energy\n  # ar_simplex_l, Energy_simplex_l = [], []\n  # for ar in numpy.linspace(0.1, ar_ub, 20):\n  #   ar_simplex_l.append(ar)\n  #   Energy_simplex_l.append(n\/ar)\n  # ar_rep_l, Energy_rep_l = [], []\n  # for ar in numpy.linspace(0.1, ar_ub_rep, 20):\n  #   ar_rep_l.append(ar)\n  #   Energy_rep_l.append(n_total_rep\/ar)\n  # plot.plot(ar_simplex_l, Energy_simplex_l, label='Simplex', color=next(dark_color), marker=next(marker), mew=mew, ms=ms, linestyle=':')\n  # plot.plot(ar_rep_l, Energy_rep_l, label='Rep', color=next(dark_color), marker=next(marker), mew=mew, ms=ms, linestyle=':')\n  # plot.legend()\n  # plot.xlabel(r'Arrival rate $\\lambda$', fontsize=12)\n  # plot.ylabel(r'Unit of energy per request', fontsize=12)\n  # plot.title(r'$t={}, \\mu={}$'.format(t, mu) )\n  # fig = plot.gcf()\n  # def_size = fig.get_size_inches()\n  # fig.set_size_inches(def_size[0]\/1., def_size[1]\/1.)\n  # fig.tight_layout()\n  # plot.savefig(\"plot_simplex_vs_rep_t_{}_energy.pdf\".format(t) )\n  # fig.clear()\n  log(WARNING, \"done; scheduling= {}, t= {}\".format(scheduling, t) )\n\ndef plot_reptoall():\n  mixed_traff, w_sys = False, True\n  t, r, k = 1, 2, 2\n  serv = \"Exp\" # \"Bern\" # \"Bern*Pareto\" # \"Pareto\" # \"Dolly\"\n  mu = 1\n  # loc, a = 1, 2\n  # U, L, p, loc, a = 1, 8, 0.2, 0.1, 1.5 # 1, 8, 0.2, 1, 3\n  U, L, p, loc, a = 1, 10, 0.3, 0.1, 1.5 # 1, 8, 0.2, 1, 3\n  # For rep-to-all\n  if serv == \"Exp\":\n    servdist_m = {'dist': serv, 'mu': mu}\n    if t == 1: ar_ub = 1.6\n    elif t == 3: ar_ub = 2.4\n    elif t == 7: ar_ub = float(1.1*reptoall_innerbound_on_ar(t, servdist_m) )\n    else: ar_ub = reptoall_innerbound_on_ar(t, servdist_m)\n  elif serv == \"Pareto\":\n    servdist_m = {'dist': serv, 'loc': loc, 'a': a}\n    ar_ub = reptoall_innerbound_on_ar(t, servdist_m)\n  elif serv == \"TPareto\":\n    servdist_m = {'dist': serv, 'l': l, 'u': u, 'a': a}\n    ar_ub = reptoall_innerbound_on_ar(t, servdist_m)\n  elif serv == \"Bern\" or serv == \"Bern*Pareto\":\n    servdist_m = {'dist': serv, 'U': U, 'L': L, 'p': p, 'loc': loc, 'a': a}\n    ar_ub = reptoall_innerbound_on_ar(t, servdist_m)\n  elif serv == \"Dolly\":\n    servdist_m = None\n    if t == 1: ar_ub = 0.28\n    elif t == 3: ar_ub = 0.4\n  log(WARNING, \"w_sys= {}, t= {}, r= {}, k= {}, servdist_m= {}, ar_ub= {}, mixed_traff= {}\".format(w_sys, t, r, k, servdist_m, ar_ub, mixed_traff) )\n  \n  ET_sm_l, ET_sim_l, ET_l, ET_lb_l = [], [], [], []\n  ET_alt_l, ET_matrixanalytic_l = [], []\n  ET_bestapprox_l, ET_betterapprox_l, ET_naiveapprox_l, ET_varkigauri_lb_l = [], [], [], []\n  ET_simbasedapprox_l = []\n  ET_sim_mixedtraff_l = []\n  \n  # All below w_sys=True\n  nf = 3\n  sim_simplex = False\n  if serv == \"Exp\":\n    if t == 1:\n      ET_sim_l= [\n        0.6775872854372559,\n        0.7909557937247363,\n        0.9486987202221493,\n        1.166209238915134,\n        1.5685720588787688,\n        2.478342315521276,\n        2.6376081306859107,\n        2.906788473547391,\n        3.263700392764921,\n        3.5974807041868426,\n        4.289127887822366,\n        4.794525358984301,\n        5.896928018871929,\n        8.099664758903687,\n        12.74155958739236]\n    elif t == 3:\n      ET_sim_l= [\n        0.4676519075931255,\n        0.5247256264186801,\n        0.6230081386991332,\n        0.775814486873029,\n        1.0207917160021767,\n        1.6244613243247372,\n        1.7481208563178903,\n        1.9667165686859327,\n        2.163968348080258,\n        2.5923594863306776,\n        3.0700378671376627,\n        3.796384731111067,\n        4.841880170965622,\n        6.610367379250164,\n        13.559429107437742]\n    else: sim_simplex = True\n  elif serv == \"Pareto\":\n    if loc == 1 and a == 2:\n      if t == 1:\n        ET_sim_l= [\n          1.5299993522735693,\n          1.7233577876041122,\n          1.8952577131712123,\n          2.2418712080584897,\n          2.853623528849504,\n          4.2208097489868,\n          4.586420599121132,\n          5.191481636572133,\n          5.6340499086639815,\n          5.9712033727746,\n          7.94309766204549,\n          9.599736059102067,\n          13.280357368839619,\n          17.20104661693977,\n          25.449711725024084]\n      elif t == 3:\n        ET_sim_l= [\n          1.3221090353539466,\n          1.4459274633541828,\n          1.6229349092564267,\n          1.9043964678064051,\n          2.4154300633936936,\n          3.6666730405584844,\n          3.9217550909479577,\n          4.256167164955279,\n          4.717366068731679,\n          5.891743883842969,\n          6.04468767433355,\n          8.073514650754076,\n          9.880581947509592,\n          15.816118977624845,\n          28.433468299774272]\n      else: sim_simplex = True\n    elif loc == 1 and a == 5:\n      if t == 3:\n        ET_sim_l= [\n          1.1276007604818075,\n          1.240550592912947,\n          1.3862061325608057,\n          1.645653757532261,\n          2.0688083303883276,\n          3.2115831386711813,\n          3.2986018954384835,\n          3.8148027478966227,\n          4.033705086448495,\n          5.448028336643181,\n          5.697392211154507,\n          9.053323168666376,\n          10.17868048265699,\n          23.644561610837382,\n          None] # 93.02644300031747\n      else: sim_simplex = True\n    else: sim_simplex = True\n  elif serv == \"Bern\":\n    if U == 1 and L == 8 and p == 0.2:\n      if t == 1:\n        # nf = 3\n        ET_sim_l= [\n          1.6376474738985423,\n          1.9851446427827089,\n          2.4840795375267626,\n          3.1829054073054217,\n          4.39332366216294,\n          7.063110373762194,\n          7.4445330550351665,\n          8.208129233744382,\n          9.309321611480481,\n          10.747520637423975,\n          12.460023568734707,\n          15.038255521201348,\n          18.778687793661728,\n          23.582209372296532,\n          36.21619587757658]\n      elif t == 3:\n        # nf = 1\n        ET_sim_l= [\n          1.1072895175117927,\n          1.2582695204803385,\n          1.4572200912301614,\n          1.8340775367273732,\n          2.4430722742069184,\n          4.053853819806121,\n          4.4494192069988605,\n          5.061922101782603,\n          5.883304533639656,\n          6.705043861319703,\n          8.307668993372534,\n          11.041651319984396,\n          17.564101468045756,\n          33.184482866801716,\n          None]\n      else: sim_simplex = True\n    else: sim_simplex = True\n  elif serv == \"Bern*Pareto\":\n    if U == 1 and L == 8 and p == 0.2 and loc == 1 and a == 3:\n      if t == 11:\n        # nf = 3\n        ET_sim_l= [\n          2.142631836594827,\n          2.5302711620514966,\n          2.941315337537391,\n          3.8773353598252345,\n          4.550420407107853,\n          6.649089020276313,\n          7.000687768519389,\n          7.681497353358071,\n          8.058275694322152,\n          9.541434770613856,\n          10.136837383356713,\n          11.027889242435874,\n          14.072462480848941,\n          18.721889173565945,\n          29.85022801496356]\n      elif t == 33:\n        pass\n      else: sim_simplex = True\n    else: sim_simplex = True\n  else: sim_simplex = True\n  \n  # Mixed traff\n  sim_simplex_mixed_traff = False\n  if mixed_traff:\n    if serv == \"Exp\":\n      if t == 1:\n        ET_sim_mixedtraff_l= [\n          0.678978501641253,\n          0.7748022818617738,\n          0.9072886738372506,\n          1.0928902616368403,\n          1.43754904360929,\n          2.0810587767368154,\n          2.266461910378062,\n          2.5977047234601125,\n          3.2441553951140985,\n          3.585616438620215,\n          4.415600179701042,\n          6.099149242270735,\n          9.786138444920114,\n          None, # 21.631079441147904\n          None]\n      elif t == 3:\n        ET_sim_mixedtraff_l= [\n          0.46217641274184773,\n          0.5249541076176077,\n          0.6065798815902482,\n          0.7193352388312126,\n          0.9238674360581351,\n          1.363955390788439,\n          1.4654931553890183,\n          1.733811055160431,\n          2.0493965738680795,\n          2.479767271681704,\n          3.065826086322138,\n          4.300842192226751,\n          8.05986376865404,\n          None, # 35.70730644518723,\n          None]\n      else:\n        sim_simplex_mixed_traff = True\n  \n  ar_l = []\n  for ar in [*numpy.linspace(0.05, 0.8*ar_ub, 5, endpoint=False), *numpy.linspace(0.8*ar_ub, ar_ub, 10) ]:\n  # for ar in numpy.linspace(0.05, ar_ub, 2):\n    ar_l.append(ar)\n    \n    p_i_l = []\n    if sim_simplex:\n      ET_sim = test_avq(nf, ar, t, r, k, serv, servdist_m, w_sys=w_sys, p_i_l=p_i_l)\n      print(\"*** ET_sim= {}\".format(ET_sim) )\n      ET_sim_l.append(ET_sim)\n      # ET_sim_l.append(None)\n    \n    # ET_simbasedapprox_l.append(ET_simplex_approx(t, ar, servdist_m, p_i_l=p_i_l)[0] )\n    # if sim_simplex_mixed_traff:\n    #   ET_sim_mixedtraff_l.append(test_avq(nf, ar, t, r, k, serv, servdist_m, w_sys=w_sys, p_i_l=p_i_l, mixed_traff=True) )\n    \n    ET_sm_l.append(ET_simplex_sm(t, ar, servdist_m) )\n    ET_lb_l.append(ET_simplex_lb(t, ar, servdist_m) )\n    if serv == \"Exp\":\n      if t == 1:\n        ET_l.append(ET_reptoall_t1(ar, mu) )\n        ET_matrixanalytic_l.append(ET_reptoall_t1_matrixanalytic(t, ar, mu) )\n      elif t == 2:\n        if w_sys:\n          ET_alt_l.append(simplex_w_two_repair__E_T(ar, mu, M=2) )\n          ET_l.append(simplex_w_two_repair__E_T(ar, mu, M=5) )\n        else:\n          ET_l.append(simplex_wo_sys_w_two_repair__E_T(ar, mu) )\n    ET_naiveapprox_l.append(ET_simplex_approx(t, ar, servdist_m, naive=True)[0] )\n    ET_betterapprox_l.append(ET_simplex_approx(t, ar, servdist_m)[0] )\n    ET_bestapprox_l.append(ET_simplex_approx(t, ar, servdist_m, incremental=True)[0] )\n    # ET_varkigauri_lb_l.append(E_T_simplex_varki_gauri_lb(t, ar, gamma, mu)[0] )\n  \n  ar_mixed_traff_l = []\n  # for ar in numpy.linspace(0.2, 0.2, 1):\n  for ar in [*numpy.linspace(0.05, 0.8*ar_ub, 5, endpoint=False), *numpy.linspace(0.8*ar_ub, 1.1*ar_ub, 10) ]:\n    ar_mixed_traff_l.append(ar)\n    if sim_simplex_mixed_traff:\n      ET_sim_mixedtraff_l.append(test_avq(nf, ar, t, r, k, serv, servdist_m, w_sys=w_sys, mixed_traff=True) )\n  \n  # mew, ms = 0.1, 10\n  mew, ms = 2, 5\n  def plot_poster():\n    # for better looking plot\n    ar_approx_l = list(ar_l)\n    \n    ar = ar_ub + 0.03\n    ar_approx_l.append(ar)\n    ET_bestapprox_l.append(ET_simplex_approx(t, ar, servdist_m, incremental=True) )\n    \n    plot.plot(ar_l, ET_sim_l, label=\"FJ-FA, simulation\", marker=next(marker), zorder=1, color=next(dark_color), linestyle=':', mew=mew, ms=ms)\n    plot.plot(ar_approx_l, ET_bestapprox_l, label=\"FJ-FA, M\/G\/1 approximation\", zorder=2, marker=next(marker), color='black', linestyle=':', mew=mew, ms=ms)\n\n  def get_xs_l_ys_l(_x_l, _y_l):\n    x_l, y_l = [], []\n    for i, y in enumerate(_y_l):\n      if y is not None:\n        x_l.append(_x_l[i])\n        y_l.append(y)\n    \n    s = UnivariateSpline(x_l, y_l, s=0.001)\n    xs_l = np.linspace(min(x_l), max(x_l), 20)\n    ys_l = s(xs_l)\n    return xs_l, ys_l\n    \n  def plot_():\n    log(WARNING, \"ET_sim_l= {}\".format(pprint.pformat(ET_sim_l) ) )\n    # plot.plot(ar_l, ET_simbasedapprox_l, label=r'Sim-based approximation', marker=next(marker), zorder=1, color=next(dark_color), linestyle=':', mew=mew, ms=ms)\n    label = 'Simulation, fixed-arrivals' if mixed_traff else 'Simulation'\n\n    xs_l, ys_l = get_xs_l_ys_l(ar_l, ET_sim_l)\n    # plot.plot(ar_l, ET_sim_l, label=label, marker=next(marker), zorder=1, color=next(dark_color), linestyle=':', mew=mew, ms=ms)\n    plot.plot(xs_l, ys_l, label=label, marker=next(marker), zorder=1, color=next(dark_color), linestyle=':', mew=mew, ms=ms)\n    \n    if mixed_traff:\n      log(WARNING, \"ET_sim_mixedtraff_l= {}\".format(pprint.pformat(ET_sim_mixedtraff_l) ) )\n      plot.plot(ar_mixed_traff_l, ET_sim_mixedtraff_l, label=r'Simulation, mixed-arrivals', color=next(dark_color), marker=next(marker), linestyle=':', mew=mew, ms=ms)\n    else:\n      xs_l, ys_l = get_xs_l_ys_l(ar_l, ET_sm_l)\n      # plot.plot(ar_l, ET_sm_l, label=r'Split-Merge upper bound', color=next(dark_color), marker=next(marker), linestyle=':', mew=mew, ms=ms)\n      plot.plot(xs_l, ys_l, label=r'Split-Merge upper bound', color=next(dark_color), marker=next(marker), linestyle=':', mew=mew, ms=ms)\n      \n      # plot.plot(ar_l, ET_bestapprox_l, label=r'$M\/G\/1$ approximation', zorder=2, marker=next(marker), color='black', linestyle=':', mew=mew, ms=ms)\n      xs_l, ys_l = get_xs_l_ys_l(ar_l, ET_lb_l)\n      # plot.plot(ar_l, ET_lb_l, label=r'Fast-Split-Merge lower bound', color=next(dark_color), marker=next(marker), linestyle=':', mew=mew, ms=ms)\n      plot.plot(xs_l, ys_l, label=r'Fast-Split-Merge lower bound', color=next(dark_color), marker=next(marker), linestyle=':', mew=mew, ms=ms)\n      if t == 1:\n        xs_l, ys_l = get_xs_l_ys_l(ar_l, ET_matrixanalytic_l)\n        # plot.plot(ar_l, ET_matrixanalytic_l, label=r'Matrix-analytic upper-bound', color=next(dark_color), marker=next(marker), linestyle=':', mew=mew, ms=ms)\n        plot.plot(xs_l, ys_l, label=r'Matrix-analytic upper-bound', color=next(dark_color), marker=next(marker), linestyle=':', mew=mew, ms=ms)\n\n        xs_l, ys_l = get_xs_l_ys_l(ar_l, ET_l)\n        # plot.plot(ar_l, ET_l, label=r'High-traffic approximation', color=next(dark_color), marker=next(marker), linestyle=':', mew=mew, ms=ms)\n        plot.plot(xs_l, ys_l, label=r'High-traffic approximation', color=next(dark_color), marker=next(marker), linestyle=':', mew=mew, ms=ms)\n      # plot.plot(ar_l, ET_naiveapprox_l, label=r'Straightforward approximation', color=next(dark_color), marker=next(marker), linestyle=':', mew=mew, ms=ms)\n      # plot.plot(ar_l, ET_betterapprox_l, label=r'Better approximation', color=next(dark_color), marker=next(marker), linestyle=':', mew=mew, ms=ms)\n      # plot.plot(ar_l, ET_bestapprox_l, label=r'Fine-grained approximation', zorder=2, marker=next(marker), color='black', linestyle=':', mew=mew, ms=ms)\n      # plot.plot(ar_l, ET_varkigauri_lb_l, label=r'$E[\\hat{T}_{fast-serial}]$', color=next(dark_color), marker=next(marker), linestyle=':', mew=mew)\n    # stab_lim = ET_simplex_approx(t, ar, servdist_m, incremental=True, ar_ub=True)\n    # plot.axvline(stab_lim, label=\"Stability limit\", color='black', linestyle='--')\n    # plot.gca().set_xlim([0, stab_lim+0.1] )\n  \n  def plot_selectone():\n    ar_ub = 0.9*arub_simplex_selectone(t, serv, servdist_m)\n    log(WARNING, \"ar_ub={}\".format(ar_ub) )\n    ar_l, ET_l = [], []\n    for ar in numpy.linspace(0.05, ar_ub, 50):\n    # for ar in numpy.linspace(0.05, ar_ub, 2):\n      ar_l.append(ar)\n      # if sim:\n      #   ET_l.append(test_avq(nf, ar, t, r, k, serv, servdist_m, w_sys=w_sys, sching=\"select-one\") )\n      ET_l.append(ET_selectone(t, ar, mu) )\n    # log(WARNING, \"ET_l= {}\".format(pprint.pformat(ET_l) ) )\n    plot.plot(ar_l, ET_l, 'b', label=r'Select-one', linestyle='--', lw=3, mew=mew, ms=ms)\n  # plot_poster()\n  plot_()\n  \n  # plot.plot(ar_l, ET_sim_l, 'k', label=r'Replicate-to-all', linestyle='-', lw=3)\n  # plot_selectone()\n  fontsize = 16\n  plot.yscale('log')\n  plot.legend(loc='upper left', fontsize=13, framealpha=0.25)\n  plot.xlabel(r'Arrival rate $\\lambda$', fontsize=fontsize)\n  plot.ylabel(r'Average download time', fontsize=fontsize)\n  serv_in_latex = None\n  if serv == \"Exp\":\n    serv_in_latex = '\\mathrm{Exp}' + r'(\\mu={})'.format(mu)\n  elif serv == \"Pareto\":\n    serv_in_latex = r'Pareto(s={}, \\alpha={})'.format(loc, a)\n  elif serv == \"Bern\":\n    serv_in_latex = r'Bernoulli(U={}, L={}, p={})'.format(U, L, p)\n  elif serv == \"Dolly\":\n    serv_in_latex = r'Dolly'\n  plot.title(r'FJ-FA with $r= {}$, $t= {}$, $\\mu= {}$'.format(r, t, mu), fontsize=fontsize)\n  # plot.title(r'$t={}$, Servers $\\sim {}$'.format(t, serv_in_latex) )\n  fig = plot.gcf()\n  fig.set_size_inches(6, 4)\n  fig.tight_layout()\n  plot.savefig(\"plot_FJFA_r{}_t{}.pdf\".format(r, t) )\n  log(WARNING, \"done; t= {}, r= {}, k= {}\".format(t, r, k) )\n\ndef get_opts(argv):\n  opt_map = {}\n  try:\n    opts, args = getopt.getopt(argv, '', ['num_q='] )\n  except getopt.GetoptError:\n    log(ERROR, \"Unexpected command line arg, expecting: exp.py --num_q=<>\")\n    sys.exit(1)\n  \n  for opt, arg in opts:\n    opt_map[opt] = arg\n  return opt_map\n\nif __name__ == \"__main__\":\n  # opt_map = get_opts(sys.argv[1:] )\n  # log(WARNING, \"opt_map= {}\".format(pprint.pformat(opt_map) ) )\n  # num_q = int(opt_map[\"--num_q\"] )\n  \n  # plot_winning_freqs()\n  plot_reptoall()\n  # plot_simplex_vs_rep()\n  \n"}
+{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_163","text":"Fuligor\/Uczenie-sie-rekonstrukcji-rozdzielczosci-obrazow-za-pomoca-sieci-glebokich0\nimport numpy as np\nfrom scipy import signal\nfrom PIL import Image\n\ndef resample(hr_image):\n    return hr_image[range(0, hr_image.shape[0], 2)][:, range(0, hr_image.shape[1], 2)]\n\ndef downsample(hr_image, kernel):\n    temp = np.zeros_like(hr_image)\n\n    for i in range(temp.shape[2]):\n        temp[:, :, i] = signal.convolve2d(hr_image[:, :, i], kernel, mode=\"same\", boundary=\"symm\")\n        \n    if temp.dtype != np.uint8:\n        temp = (temp * 255).astype(np.uint8)\n        \n    temp = Image.fromarray(temp, mode='RGB')\n\n    size = (hr_image.shape[1]\/\/2, hr_image.shape[0]\/\/2)\n    lr_image = temp.resize(size=size, resample=Image.BOX)\n    lr_image = lr_image.resize(size=(hr_image.shape[1], hr_image.shape[0]), resample=Image.NEAREST)\n\n    return (np.array(lr_image) \/ 255).astype(np.float32)\n\n\ndef create_image_patches(image, patch_size, step):\n    patches = []\n\n    for i in range(0, image.shape[0] - patch_size[0], step):\n        for j in range(0, image.shape[1] - patch_size[1], step):\n            patch = image[i:i+patch_size[0], j:j+patch_size[1]]\n\n            patches.append(patch)\n\n    return patches\n"}
+{"id":"MathCode-Pile_decontaminated_package-filtered_train-00002-of-00059_doc_164","text":"guanyilun\/cosmo-codescosmoslib\/aps\/ps.py\n\"\"\"Reusable functions related to camb and power spectrum calculation\n\nThis module collects some reusable functions that I used when working\nwith camb, power spectrum and covariance matrix\n\"\"\"\n\nimport numpy as np\nfrom scipy import interpolate\nimport healpy as hp\nimport pickle\nfrom functools import reduce\nimport operator\n\n\nclass PS:\n    \"\"\"A container for CMB power spectrum.\"\"\"\n    def __init__(self, arg=None, order=('ell','TT','EE','BB','TE'), prefactor=False, verbose=False):\n        \"\"\"Simple power spectrum data wrapper\n\n        Args:\n            arg (str or ndarray): input data, can be a string to a file to load or\n                an np.ndarray that contains the power spectrum. The array has to have\n                a shape like [n_ell, n_spec].\n            order (tuple(str)): order of columns in the input ps. Follow the naming\n                convention like ell,TT,EE,BB,TE which is default\n            prefactor (bool): whether input array has l(l+1)\/2\\pi prefactor included\n        \"\"\"\n        self.ps = {}\n        self.order=order\n        self.prefactor=prefactor\n        self.verbose=False\n        # populate ps depending on the inputs\n        if type(arg) == str:\n            self.load_file(arg, order, prefactor)\n        elif type(arg) == np.ndarray:\n            self.load_arr(arg, order, prefactor)\n\n    def __getattr__(self, key):\n        if key in self.ps:\n            return self.ps[key]\n        return self.__dict__[key]\n\n    def load_arr(self, arr, order=('ell','TT','EE','BB','TE'), prefactor=True):\n        \"\"\"Load data from a given array\"\"\"\n        if arr.shape[-1] != len(order):\n            # see if we are missing ells\n            if arr.shape[-1] == len(order)-1:\n                print(\"Didn't find ell, generating based on the shape now...\")\n                ell = np.arange(arr.shape[0])\n                arr = np.pad(arr, ((0,0),(1,0)))\n                arr[:,0] = ell\n            else:\n                raise ValueError(\"provided order doesn't match the input array!\")\n        # now populate fields\n        self.order = order\n        for i,c in enumerate(order):\n            self.ps[c] = arr[:,i]\n        # by default keep the unprefactored version\n        self.prefactor = prefactor\n        if prefactor:\n            return self.remove_prefactor()\n        else:\n            return self\n\n    def load_file(self, infile, order=('ell','TT','EE','BB','TE'), prefactor=True):\n        \"\"\"load ps from a given file, will be read using np.readtxt\"\"\"\n        data = np.loadtxt(infile)\n        return self.load_arr(data, order, prefactor)\n\n    def __repr__(self):\n        order = str(self.order).replace(' ','')\n        return f\"PS(lmin={int(self.lmin)},lmax={int(self.lmax)},prefactor={self.prefactor},order={order})\"\n\n    def __add__(self, other):\n        if not issubclass(type(other), PS):\n            raise NotImplementedError(\"Currently only support PS type ops!\")\n        # check for ell mismatch\n        if np.any(self.ell != other.ell):\n            if self.verbose:\n                print(\"Warning: ells mismatch, interpolating...\")\n            return self.resample(other.ell) + other.resample(self.ell)\n        # find common specs\n        new_order = ['ell'] + [s for s in self.specs if s in other.specs]\n        if len(new_order) < 2: raise ValueError(\"No common specs!\")\n        if self.prefactor != other.prefactor:\n            # if prefactor mismatch, add prefactor to both of them\n            self.remove_prefactor()\n            other.remove_prefactor()\n        new_ps = PS(order=new_order, prefactor=self.prefactor)\n        assert np.all(self.ell == other.ell)\n        new_ps.ps['ell'] = self.ell\n        for s in new_ps.specs:\n            new_ps.ps[s] = self.ps[s] + other.ps[s]\n        return new_ps\n\n    def __sub__(self, other):\n        if not issubclass(type(other), PS):\n            raise NotImplementedError(\"Currently only support PS type ops!\")\n        # check for ell mismatch\n        if np.any(self.ell != other.ell):\n            if self.verbose:\n                print(\"Warning: ells mismatch, interpolating...\")\n            return self.resample(other.ell) - other.resample(self.ell)\n        # find common specs\n        new_order = ['ell'] + [s for s in self.specs if s in other.specs]\n        if len(new_order) < 2: raise ValueError(\"No common specs!\")\n        if self.prefactor != other.prefactor:\n            # if prefactor mismatch, add prefactor to both of them\n            self.remove_prefactor()\n            other.remove_prefactor()\n        new_ps = PS(order=new_order, prefactor=self.prefactor)\n        new_ps.ps['ell'] = self.ell\n        for s in new_ps.specs:\n            new_ps.ps[s] = self.ps[s] - other.ps[s]\n        return new_ps\n\n    def __mul__(self, other):\n        if issubclass(type(other), PS):\n            raise NotImplementedError(\"Currently only support number ops!\")\n        new_ps = PS(order=self.order, prefactor=self.prefactor)\n        new_ps.ps['ell'] = self.ell\n        for s in self.specs:\n            new_ps.ps[s] = self.ps[s] * other\n        return new_ps\n\n    def __rmul__(self, other):\n        return self.__mul__(other)\n\n    def __truediv__(self, other):\n        if issubclass(type(other), PS):\n            raise NotImplementedError(\"Currently only support number ops!\")\n        new_ps = PS(order=self.order, prefactor=self.prefactor)\n        new_ps.ps['ell'] = self.ell\n        for s in self.specs:\n            new_ps.ps[s] = self.ps[s] \/ other\n        return new_ps\n\n    def __getitem__(self, field):\n        if field not in self.order:\n            raise ValueError(f\"{field} not found!\")\n        return self.ps[field]\n\n    @classmethod\n    def from_arr(cls, arr, order=('ell','TT','EE','BB','TE'), prefactor=True):\n        return cls(arr, order, prefactor)\n\n    @property\n    def lmin(self):\n        return self.ps['ell'].min()\n\n    @property\n    def lmax(self):\n        return self.ps['ell'].max()\n\n    @property\n    def ell(self):\n        return self.ps['ell']\n\n    @property\n    def specs(self):\n        return [o for o in self.order if o != 'ell']\n\n    @property\n    def values(self):\n        # made sure ell starts from index 0\n        return np.vstack([self.ps[s] for s in self.order]).T\n\n    @property\n    def shape(self):\n        return self.values.shape\n\n    def add_prefactor(self, inplace=True):\n        if self.prefactor: return self\n        if inplace:\n            ell = self.ell\n            for c in self.specs:\n                self.ps[c] *= (ell+1)*ell\/(2*np.pi)\n            self.prefactor = True\n            return self\n        else:\n            return PS(self.values,self.order,prefactor=False).add_prefactor()\n\n    def remove_prefactor(self, inplace=True):\n        if not self.prefactor: return self\n        if inplace:\n            ell = self.ell\n            for c in self.specs:\n                self.ps[c] *= 2*np.pi\/(ell*(ell+1))\n            self.prefactor = False\n            return self\n        else:\n            return PS(self.values,self.order,prefactor=True).remove_refactor()\n\n    def resample(self, new_ell, **kwargs):\n        ell = self.ell\n        # make sure we are within interpolation range\n        m = np.logical_and(new_ell<=self.lmax,new_ell>=self.lmin)\n        # create a new ps object\n        new_ps = PS(order=self.order,prefactor=self.prefactor)\n        new_ps.ps['ell'] = new_ell[m]\n        for s in self.specs:\n            new_ps.ps[s] = interpolate.interp1d(ell,self.ps[s],**kwargs)(new_ell[m])\n        return new_ps\n\n    def plot(self, fmt=\"-\", name='C_\\ell', axes=None, ncol=2, figsize=(12,9),\n             legend=False, legend_below=True, anchor=(0.6,-0.2), filename=None, \n             prefactor=True, logx=True, logy=True, show_cov=False, loc='best',\n             cov=None, xlim=[], ylim=[], show_abs=True, tight=True, **kwargs):\n        \"\"\"Plot the power spectra\"\"\"\n        import matplotlib.pyplot as plt\n        ell = self.ell\n        nrow = int(np.ceil(len(self.specs)\/ncol))\n        if not np.any(axes):\n            fig, axes = plt.subplots(nrow, ncol,figsize=figsize)\n        for i,s in enumerate(self.specs):\n            spec = self.ps[s]\n            ax = axes[i\/\/ncol,i%ncol]\n            if prefactor:\n                spec_name = r'$\\ell(\\ell+1)%s^{\\rm %s}\/2\\pi$' % (name, s)\n            else:\n                spec_name = r'$%s^{\\rm %s}$' % (name, s)\n            if show_abs:\n                spec = np.abs(spec)\n            if prefactor and not self.prefactor:\n                spec = spec*ell*(ell+1)\/2\/np.pi\n            elif not prefactor and self.prefactor:\n                spec = spec*2*np.pi\/ell\/(ell+1)\n            if show_cov:\n                assert isinstance(cov, Covmat), \"covmat not provided or invalid\"\n                assert np.allclose(cov.ell, ell), 'ell mismatch in cov'\n                yerr = np.sqrt(cov[f'{s}{s}'])\n                if prefactor and not self.prefactor:\n                    yerr *= ell*(ell+1)\/2\/np.pi\n                ax.errorbar(ell, spec, yerr=yerr, fmt=fmt, **kwargs)\n            else:\n                ax.plot(ell, spec, fmt, **kwargs)\n            ax.set_xlabel(r'$\\ell$')\n            ax.set_ylabel(spec_name)\n            if logx:\n                ax.set_xscale('log')\n            if logy:\n                ax.set_yscale('log')\n            if len(xlim) == 2:\n                ax.set_xlim(xlim)\n            if len(ylim) == 2:\n                ax.set_ylim(ylim)\n            if legend and not legend_below:\n                ax.legend(loc=loc)\n        if tight: plt.tight_layout()\n        if legend and legend_below:\n            ax.legend(ncol=4, bbox_to_anchor=anchor, frameon=False)\n        if filename:\n            plt.savefig(filename, bbox_inches='tight')\n        return axes\n\n    def gen_sim(self):\n        \"\"\"Generate a sim realization of the power spectra, use internal version\"\"\"\n        # make sure we have everything we want\n        target = ['ell','TT','EE','BB','TE']\n        ok = [s for s in target if s in self.order] == target\n        if not ok:\n            raise ValueError(\"PS does not contain all of ell,TT,EE,BB,TE required\")\n        data = np.hstack([self.ps[s].reshape(-1,1) for s in target])\n        rdata = gen_ps_realization(data, self.prefactor)\n        new_ps = PS(order=target, prefactor=self.prefactor)\n        for i,s in enumerate(target): new_ps.ps[s] = rdata[:,i]\n        return new_ps\n\n    def gen_sim_hp(self):\n        \"\"\"Generate a sim realization of the power spectra, wrapped around healpy,\n        this is often 30% faster\"\"\"\n        alm = self.gen_alm_hp()\n        cl = hp.sphtfunc.alm2cl(alm)\n        ell = np.arange(cl.shape[1])\n        ps = PS(cl.T, order=('TT', 'EE', 'BB', 'TE', 'EB', 'TB'), prefactor=False)\n        ps.ps['ell'] = ell\n        ps.order += ('ell',)\n        return ps\n\n    def gen_alm_hp(self):\n        if self.prefactor:\n            self.remove_prefactor()\n        # healpy requires array starts from zero, fill will 0\n        ps = np.zeros((4,self.lmax+1))\n        ps[:,self.lmin:] = self.values[:,1:].T\n        alm = hp.synalm((ps[0],ps[1],ps[2],ps[3],np.zeros_like(ps[0]),np.zeros_like(ps[0])),\n                        lmax=self.lmax, verbose=False, new=True)\n        return alm\n\n    def gen_map(self, nside, n=1):\n        if n > 1:\n            return [self.gen_map(nside) for i in range(n)]\n        else:\n            alm = self.gen_alm_hp(nside)\n            return hp.alm2map(alm, nside)\n\n    def covmat(self, noise, f_sky=1):\n        \"\"\"get covariance matrix given a noise model\n        Args:\n            noise: noise model of PS class\n            f_sky: sky coverage fraction, 1 means full-sky coverage\n        Returns:\n            cov: a tensor of size [n_ell, n_ps, n_ps], for example with\n                 a lmax of 5000, the tensor size will be [5000, 4, 4]\n        \"\"\"\n        # assuming the beam is a gaussian beam with an ell dependent\n        # beam size\n        # ps_w_noise = self + noise\n        ps = self.resample(noise.ell)\n        ell, ClTT, ClEE, ClBB, ClTE = [ps.ps[spec]\n                                       for spec in ['ell', 'TT','EE','BB','TE']]\n        new_noise = noise.resample(ell)\n        NlTT, NlEE, NlBB, NlTE = [new_noise.ps[spec] for spec in ['TT','EE','BB','TE']]\n        # initialize empty covariance tensor. Since the covariance matrix\n        # depends on ell, we will make a higher dimensional array [n_ell,\n        # n_ps, n_ps] where the first index represents different ells, the\n        # second and third parameters represents different power spectra\n        n_ells = len(ell)\n        cov = np.zeros([n_ells, 4, 4])\n        cov[:,0,0] = 2\/(2*ell+1)*(ClTT+NlTT)**2\n        cov[:,1,1] = 2\/(2*ell+1)*(ClEE+NlEE)**2\n        cov[:,2,2] = 2\/(2*ell+1)*(ClBB+NlBB)**2\n        cov[:,3,3] = 1\/(2*ell+1)*(ClTE**2+(ClTT+NlTT)*(ClEE+NlEE))\n        cov[:,0,1] = 2\/(2*ell+1)*ClTE**2\n        cov[:,1,0] = 2\/(2*ell+1)*ClTE**2\n        cov[:,0,3] = 2\/(2*ell+1)*ClTE*(ClTT+NlTT)\n        cov[:,3,0] = 2\/(2*ell+1)*ClTE*(ClTT+NlTT)\n        cov[:,1,3] = 2\/(2*ell+1)*ClTE*(ClEE+NlEE)\n        cov[:,3,1] = 2\/(2*ell+1)*ClTE*(ClEE+NlEE)\n        # now we include the effect of partial sky coverage\n        cov \/= f_sky\n        covmat = Covmat(ell, cov)\n\n        return covmat\n\n    def save(self, filename):\n        np.savetxt(filename, self.values, comments=\",\".join(self.order))\n\n\nclass Noise(PS):\n    def __init__(self, lmin, lmax):\n        self.order = ('ell','TT','EE','BB','TE')\n        self.prefactor = False\n        ell = np.arange(lmin, lmax+1)\n        self.ps = {'ell': ell}\n\n\nclass SimpleNoise(Noise):\n    def __init__(self, nlev, fwhm, lmin, lmax):\n        super().__init__(lmin, lmax)\n        self.nlev = nlev\n        self.fwhm = fwhm\n        ell = self.ps['ell']\n        NlTT = nlev**2*np.exp(ell*(ell+1)*fwhm**2\/(8.*np.log(2)))\n        NlPP = 2*NlTT\n        self.ps.update({'TT': NlTT, 'EE': NlPP,\n                        'BB': NlPP, 'TE': np.zeros_like(ell)})\n\n\nclass Covmat:\n    \"\"\"Simple block diagonal covariance matrix\"\"\"\n    def __init__(self, ell, cov, order=('TT','EE','BB','TE')):\n        self.order = order\n        self.cov = cov\n        self.ell = ell\n    def inv(self):\n        icov = np.zeros_like(self.cov)\n        for i in range(len(self.ell)):\n            icov[i,:,:] = np.linalg.inv(self.cov[i,:,:])\n        return Covmat(self.ell, icov)\n    def save(self, filename):\n        with open(filename, 'wb') as f:\n            pickle.dump(self, f)\n    def __getitem__(self, field):\n        \"\"\"field can be of form TTTT,TTEE, etc\"\"\"\n        if len(field) !=4:\n            raise ValueError(\"Field has to be of form TTEE, TTBB, etc!\")\n        spec1 = field[:2]\n        spec2 = field[2:]\n        if (spec1 not in self.order) or (spec2 not in self.order):\n            raise ValueError(f\"{field} not found\")\n        idx1 = self.order.index(spec1)\n        idx2 = self.order.index(spec2)\n        return self.cov[:,idx1,idx2]\n\n    @classmethod\n    def from_file(cls, filename):\n        with open(filename, \"rb\") as f:\n            try: return pickle.load(f)\n            except UnicodeDecodeError:\n                return pickle.load(f, encoding='latin1')\n\n\ndef _check_ps(ps):\n    \"\"\"Check the type of power spectra\"\"\"\n    # if ps is a 2D array\n    if len(ps.shape)>1:\n        # if ps has five columns -> tensor-like\n        if ps.shape[-1] == 5:\n            return \"TENSOR\"\n        # if ps has four columns -> scaler-like\n        elif ps.shape[-1] == 4:\n            return \"SCALER\"\n        # not sure what's inside\n        else:\n            return None\n    else:\n        raise ValueError\n\ndef add_prefactor(ps):\n    \"\"\"Add the l(l+1)\/2\\pi prefactor in a power spectrum\"\"\"\n    # check the dimension of power spectra\n    ells = ps[:, 0]\n    for i in range(1,ps.shape[1]):\n        ps[:,i] \/= 2*np.pi\/(ells*(ells+1))\n    return ps\n\ndef remove_prefactor(ps):\n    \"\"\"Remove the l(l+1)\/2\\pi prefactor in a power spectrum\"\"\"\n    ells = ps[:, 0]\n    for i in range(1,ps.shape[1]):\n        ps[:,i] *= 2*np.pi\/(ells*(ells+1))\n    return ps\n\ndef resample(ps, ell):\n    ell_old = ps[:, 0]\n\n    # interpolate into the theory,\n    tt_old = ps[:, 1]\n    ee_old = ps[:, 2]\n    bb_old = ps[:, 3]\n    te_old = ps[:, 4]\n\n    tt_predicted = interpolate.interp1d(ell_old, tt_old)(ell)\n    te_predicted = interpolate.interp1d(ell_old, te_old)(ell)\n    ee_predicted = interpolate.interp1d(ell_old, ee_old)(ell)\n    bb_predicted = interpolate.interp1d(ell_old, bb_old)(ell)\n\n    cl_predicted = np.stack([ell, tt_predicted, ee_predicted, bb_predicted, te_predicted], axis=1)\n\n    return cl_predicted\n\ndef join_noise_models(noise_models, method='min'):\n    \"\"\"join multiple noise models by a given method. Currently\n    only method that works is the min, which means choose the\n    noise_models with minimum noise in each ell.\n\n    Args:\n        noise_models: list of noise models\n        method: method used to combine\n    Returns:\n        A new noise model with the noise models combined\n    \"\"\"\n    # find lmin, lmax\n    lmin = min(nm.lmin for nm in noise_models)\n    lmax = max(nm.lmax for nm in noise_models)\n    # placeholder to find corresponding ells\n    ell = np.arange(0, lmax+1)\n    noise = Noise(lmin, lmax)\n    for spec in ['TT','EE','BB','TE']:\n        # place holder to find min noise\n        cl = np.zeros_like(ell).astype('float64')\n        for nm in noise_models:\n            nm_ell = nm.ell.astype(int)\n            mask = np.logical_or(nm[spec]= 0]\n    dnc = dncm + dncp\n    if debug:\n        print(f\"nuber of data columns: {len(dset)}, number of axes:{num_axes}, Rows excluded from adjustement:{dnc}\")\n    k = 0\n    for i, ax in enumerate(dset):\n        if not i in dnc:\n            for j, d in enumerate(ax['col']):\n                lmin = data[d].quantile(q=qmin) if j==0 else min(lmin, data[d].quantile(q=qmin))\n                lmax = data[d].quantile(q=qmax) if j==0 else max(lmax, data[d].quantile(q=qmax))\n                lmin = minfactor * lmin if lmin > 0.0 else maxfactor * lmin\n                lmax = minfactor * lmax if lmax < 0.0 else maxfactor * lmax \n                if debug:\n                    print(f\"{i} {d:20} min={lmin:8.2f}, max={lmax:8.2f}\")\n                smax = k*(lmax - lmin) + lmax\n                smin = lmin - (num_axes - (k + 1))*(lmax - lmin)\n                if (smax - smin) < 1.0:\n                    smin = smin - 5.0 - k; smax = smax + 5.0 - k\n                ax['ylim'] = (smin, smax)\n            k += 1\n    if debug:\n        print(\"\\nCalculated Axes:\")\n        debug_dset(dset)\n    return dset\n\ndef count_columns(dset):\n    cnt = 0\n    for c in dset:\n        cnt += len(c['col'])\n    return cnt\n\ndef _idx(n, s, e, x):\n    return int(n * (x - s) \/ (e - s)+1)\n\ndef add_vlines(lines, ax, *args, **kwargs):\n    for l in lines:\n        ax.axvline(l, *args, **kwargs)\n\ndef add_dbokeh_vlines(lines, fig, *args, **kwargs):\n    for l in lines:\n        fig.add_layout(Span(location=l,dimension='height', *args, **kwargs))   \n\ndef add_dbokeh_hlines(lines, fig, *args, **kwargs):\n    for l in lines:\n        fig.add_layout(Span(location=l,dimension='width', *args, **kwargs))   \n\ndef add_table(summary, ax, *args, **kwargs):\n    \"\"\"\n    available options for loc:\n    best, upper right, upper left, lower left, lower right, center left, center right\n    lower center, upper center, center, top right,top left, bottom left, bottom right\n    right, left, top, bottom\n    \"\"\"\n    ax.table(\n        cellText=summary.values, \n        colWidths=[0.1]*len(summary.columns),\n        colLabels=summary.columns,\n        cellLoc='center', \n        rowLoc='center',\n        *args, **kwargs)\n        #loc='upper left')\n\ndef _plot(idf, x12='datetime', y1 = ['Various_Values_SpeedAct'], y2 = ['Power_PowerAct'], ylim2=(0,5000), *args, **kwargs):\n    ax = idf[[x12] + y1].plot(\n    x=x12,\n    y=y1,\n    kind='line',\n    grid=True, \n    *args, **kwargs)\n\n    ax2 = idf[[x12] + y2].plot(\n    x=x12,\n    y=y2,\n    secondary_y = True,\n    ax = ax,\n    kind='line', \n    grid=True, \n    *args, **kwargs)\n\n    ax2.set_ylim(ylim2)\n    return ax, ax2, idf\n\ndef dbokeh_chart(source, pltcfg, x='datetime', x_ax_unit=None, title=None, grid=True, legend=True, style='line', x_range=None, y_range=None, notebook=True, figsize=(8,6), *args, **kwargs):\n    \"\"\"wrapper function for bokeh_chart from Johannes\"\"\" \n    if notebook: output_notebook(hide_banner=True)\n    if title: title = str(title)\n    for col in pltcfg: \n        if not 'unit' in col: col['unit'] = ''\n    source = ColumnDataSource(source)   \n    fig = bokeh_chart(source, pltcfg, x, x_ax_unit, title, grid, legend, style, x_range, y_range, figsize, *args, **kwargs)\n    return fig\n\ndef bokeh_chart(source, pltcfg, x_ax='datetime', x_ax_unit=None, title=None, grid=True, legend=True, style='line', x_range=None, y_range=None, figsize=(8,6), *args, **kwargs):\n    \"\"\"Generate interactive Diane like chart with multiple axes\n\n    Args:\n        source (bokeh.ColumnDataSource): Data , e.g downloaded by engine.batch_hist_dataItems(...)\n        pltcfg ([list of dicts]): the source columns to plot, and range of y-axis\n        x_ax (str, optional): x-axis column as string. Defaults to 'datetime'.\n        x_ax_unit (str, optional): unit of x-axis as string. Defaults to None.\n        title (str, optional): Main Title of figure. Defaults to None.\n        grid (bool, optional): display grid. Defaults to True.\n        legend (bool, optional): legend. Defaults to True.  \n        style (str, optional): style of markers, options i.e. 'line', 'circle'\n            circle necessary to enable linked brushing (selection of datapoints)\n        x_range (bokeh.figure.x_range, optional): x_range of different bokeh-plot; used to connect x-axis limits\n        y_range (bokeh.figure.y_range, optional): y_range of different bokeh-plot; used to connect y-axis limits\n\n\n    Returns:\n        bokeh.plotting.figure: Bokeh plot ready to plot or embed in a layout\n\n    example:\n    .....\n    from bokeh.io import push_notebook, show, output_notebook\n    from bokeh.plotting import figure, output_file, show\n    from bokeh.models import LinearAxis, Range1d, HoverTool\n    from bokeh.layouts import column, row, gridplot\n    from bokeh.models import ColumnDataSource\n    from itertools import cycle\n    import dmyplant2\n    import arrow\n\n    import pandas as pd\n    import numpy as np\n    import traceback\n    import matplotlib\n    import sys\n    import warnings\n    \n\n    dmyplant2.cred()\n    mp = dmyplant2.MyPlant(0)\n\n    # Version mittels Validation Instance \n    dval = dmyplant2.Validation.load_def_csv('input.csv')\n    vl = dmyplant2.Validation(mp, dval, cui_log=True)\n    e = vl.eng_serialNumber(1145166)\n\n    print(f\"{e} {e.id}\")\n    pltcfg=[]\n    pltcfg.append( [\n        {'col': ['Knock integrator cyl. 07']},\n        {'col': ['Ignition voltage cyl. 07']},\n        {'col': ['ITP cyl. 07']},\n        {'col': ['Exhaust temperature cyl. 07']},\n        {'col': ['Operating hours engine']}\n    ])\n\n    datastr=[]\n    for cfg in pltcfg:\n        for y in cfg:\n            datastr += y['col']\n\n    ans=datastr_to_dict(datastr)\n    dat=ans[0]\n\n    df = mp.hist_data(\n        e.id,\n        itemIds=dat,\n        p_from=arrow.get('2021-03-05 04:00').to('Europe\/Vienna'),\n        p_to=arrow.get('2021-03-05 05:30').to('Europe\/Vienna'),\n        timeCycle=1)\n\n    output_notebook()\n\n    df.loc['2021-03-05 05:00':'2021-03-05 06:00']\n\n    title=e._info.get('Validation Engine')\n\n    source = ColumnDataSource(df)\n    output_file(title+'.html')\n    p=bokeh_chart(source, pltcfg[0], title=title)\n\n    show(p)\n    \"\"\"\n\n    dpi = 66\n    mwidth = figsize[0] * dpi\n    mheight = figsize[1] * dpi\n\n    #dataitems=pd.read_csv('data\/dataitems.csv', sep=';')\n    dataitems=dmyplant2.MyPlant.get_dataitems()\n\n    TOOLS = 'pan, box_zoom, xwheel_zoom, box_select, undo, reset, save' #select Tools to display\n    colors = cycle(matplotlib.rcParams['axes.prop_cycle']) #colors to use for plot\n    linewidth = 2\n\n    if x_ax_unit is not None: #get unit of x_axis either from user or csv-file\n        x_unit=x_ax_unit\n    else:\n        if pd.Series(x_ax).isin(dataitems.myPlantName).any():\n            x_unit=dataitems.loc[dataitems.myPlantName==x_ax].iat[0,2]\n            if x_unit is np.nan: x_unit=''\n        elif x_ax=='Operating hours validation':\n            x_unit='h'\n        else:\n            x_unit=''\n\n    x_axis_label=(f'{x_ax} [{x_unit}]')\n\n    if (x_ax == 'datetime'): #seperate constructors for object for datetime or no datetime x-axis\n        p = figure( plot_width=mwidth, plot_height=mheight, x_axis_label=None, x_axis_type='datetime',\n        x_range=x_range, y_range=y_range, tools=TOOLS)\n    else:\n        p = figure( plot_width=mwidth, plot_height=mheight, x_axis_label=x_axis_label,\n            tools=TOOLS, x_range=x_range, y_range=y_range)\n\n    if grid==False: p.grid.grid_line_color = None\n        \n    p.yaxis.visible = False\n    if x_ax=='datetime':\n        tooltips = [('Datetime', '@'+x_ax+'{%F %T}')]\n    else:\n        tooltips = [(x_ax, '@{'+x_ax +'}{0.1 f} '+x_unit)]\n    for i, y in enumerate(pltcfg):\n        to_remove=[]\n        for col in y['col']: #checks if data is available\n            #if not pd.Series(col).isin(dataitems.myPlantName).any(): ### instead of comparing with dataitems compare with source\n            if col not in source.data: ### instead of comparing with dataitems compare with source\n                to_remove.append(col)\n                logging.info(f\"{col} not found.\")\n            elif source.data[col].all() == None: #remove of columns if no measurement taken\n                to_remove.append(col)\n                logging.info(f\"{col} not available\")\n        y['col'] = [e for e in y['col'] if e not in to_remove] #remove elements not contained in dataframe by assigning new list\n        if len(y['col'])==0: #jump to next iteration if no col remaining\n            continue\n        else:\n            color = next(cycle(colors))['color']\n\n        if y.get('ylim'):\n            ylim = list(y['ylim'])\n            p.extra_y_ranges[str(i)] = Range1d(start=ylim[0], end=ylim[1])\n        else: #if no ylim defined, use automatic Bokeh Range\n            p.extra_y_ranges[str(i)] = DataRange1d()\n\n        unit=[]\n        renderers=[]\n        for ii, col in enumerate(y['col']):\n            if not pd.Series(col).isin(dataitems.myPlantName).any(): #Additional if for handling new data rows generated by function, else is normal behaviour\n                if 'unit' in y:\n                    unit.append(y['unit'])\n                else:\n                    unit.append('')\n            else: \n                unit.append(dataitems.loc[dataitems.myPlantName==col].iat[0,2])\n\n            if pd.isna(unit[-1]): \n                unit[-1]=''\n\n            if 'color' in y:\n                if isinstance(y['color'], list):\n                    color = y['color'][ii]\n                else:\n                    color = y['color']\n            else:\n                color = next(cycle(colors))['color']\n\n            # func = getattr(p, style) #to choose between different plotting styles\n            # renderers.append(func(source=source, x=x_ax, y=col, #circle or line\n            # color=color, y_range_name=str(i), legend_label=col, line_width=linewidth))\n            if legend:\n                if style == 'line':\n                    func = getattr(p, 'line') #to choose between different plotting styles\n                    renderers.append(func(source=source, x=x_ax, y=col,  #circle or line\n                    color=color, y_range_name=str(i), legend_label=col, line_width=linewidth))\n                if style == 'circle':\n                    func = getattr(p, 'circle') #to choose between different plotting styles\n                    renderers.append(func(source=source, x=x_ax, y=col,  #circle or line\n                    color=color, y_range_name=str(i), legend_label=col, line_width=linewidth))\n                if style == 'both':\n                    func = getattr(p, 'line') #to choose between different plotting styles\n                    renderers.append(func(source=source, x=x_ax, y=col,  #circle or line\n                    color=color, y_range_name=str(i), legend_label=col, line_width=linewidth))\n                    func = getattr(p, 'circle') #to choose between different plotting styles\n                    renderers.append(func(source=source, x=x_ax, y=col,  #circle or line\n                    color=color, y_range_name=str(i), legend_label=col, line_width=linewidth))\n            else:\n                if style == 'line':\n                    func = getattr(p, 'line') #to choose between different plotting styles\n                    renderers.append(func(source=source, x=x_ax, y=col, #circle or line\n                    color=color, y_range_name=str(i), line_width=linewidth))\n                if style == 'circle':\n                    func = getattr(p, 'circle') #to choose between different plotting styles\n                    renderers.append(func(source=source, x=x_ax, y=col,  #circle or line\n                    color=color, y_range_name=str(i), line_width=linewidth))\n                if style == 'both':\n                    func = getattr(p, 'line') #to choose between different plotting styles\n                    renderers.append(func(source=source, x=x_ax, y=col, #circle or line\n                    color=color, y_range_name=str(i), line_width=linewidth))\n                    func = getattr(p, 'circle') #to choose between different plotting styles\n                    renderers.append(func(source=source, x=x_ax, y=col, #circle or line\n                    color=color, y_range_name=str(i), line_width=linewidth))\n\n            tooltips.append((col, '@{'+col +'}{0.2 f} '+unit[-1]))  # or 0.0 a\n\n        if not y.get('ylim'):  #only if y-limits not specified\n            p.extra_y_ranges[str(i)].renderers = renderers #only use axis specific renderers for calculation of limits\n\n        if len(unit)==1 or unit.count(unit[0]) == len(unit): #if only one entry or all have the same unit\n            llabel = ', '.join(y['col'])+' ['+unit[0]+']'\n        else:\n            llabel = ', '.join(y['col'])+' ['+', '.join(unit)+']'\n        \n        if len(llabel) > 90:\n                llabel = llabel[:86] + ' ...'\n        if len(y['col']) > 1:\n            color = 'black'\n        p.add_layout(LinearAxis(y_range_name=str(i),\n                            axis_label=llabel, axis_label_text_color=color), 'left')\n\n    callback = CustomJS(code='document.getElementsByClassName(\"bk-tooltip\")[0].style.backgroundColor=“transparent\";')\n    #callback = CustomJS(code='document.getElementsByClassName(\"bk-tooltip\")[0].style.backgroundColor=“rgba(255,255,255,0.2)\";')\n\n    p.add_tools(HoverTool(\n        tooltips=tooltips, \n        formatters={f'@datetime': 'datetime'}, # use 'datetime' formatter for '@date' field    \n        mode='mouse',\n        callback=callback))  # mode=vline -> display a tooltip whenever the cursor is vertically in line with a glyph\n\n    p.toolbar.active_drag = p.select_one('BoxZoomTool')\n    p.toolbar.active_scroll = p.select_one('WheelZoomTool')\n\n    p.legend.click_policy='hide' #hides graph when you click on legend, other option mute (makes them less visible)\n    p.legend.location = 'top_left'\n\n    try: #if legend has more than 10 entries reduce spacing\n        if len(p.legend.items)>10:\n            p.legend.spacing = 0\n    except:\n        pass\n\n    p.title.text = str(title)\n    p.title.text_font_size = '16px' \n\n    return p\n\ndef bokeh_chart_engine_comparison(source, pltcfg, variable, eng_names, x_ax='datetime', x_ax_unit=None, title=None, grid=True, legend=True, style='circle', x_range=None, y_range=None, figsize=(8,6), *args, **kwargs):\n    \"\"\"Generate interactive Diane like chart with multiple axes\n\n    Args:\n        source (bokeh.ColumnDataSource): Data , e.g downloaded by engine.batch_hist_dataItems(...)\n        pltcfg ([list of dicts]): the source columns to plot, and range of y-axis\n        variable (String): Variable name to plot\n        eng_names (list of strings): Engine names\n        x_ax (str, optional): x-axis column as string. Defaults to 'datetime'.\n        x_ax_unit (str, optional): unit of x-axis as string. Defaults to None.\n        title (str, optional): Main Title of figure. Defaults to None.\n        grid (bool, optional): display grid. Defaults to True.\n        legend (bool, optional): legend. Defaults to True.  \n        style (str, optional): style of markers, options i.e. 'line', 'circle'\n            circle necessary to enable linked brushing (selection of datapoints)\n        x_range (bokeh.figure.x_range, optional): x_range of different bokeh-plot; used to connect x-axis limits\n        y_range (bokeh.figure.y_range, optional): y_range of different bokeh-plot; used to connect y-axis limits\n\n\n    Returns:\n        bokeh.plotting.figure: Bokeh plot ready to plot or embed in a layout\n\n\n    Example:\n    pltcfg=[{'col': ['BMW REGENSBURG 5_@_Starts', 'ALPRO M2 616F412 BE_@_Starts', 'BMW REGENSBURG_@_Starts']}]\n    Variable='Starts'\n    eng_names=['BMW REGENSBURG 5', 'ALPRO M2 616F412 BE', 'BMW REGENSBURG']\n    \"\"\"\n\n    dpi = 80\n    mwidth = figsize[0] * dpi\n    mheight = figsize[1] * dpi\n\n    dataitems=pd.read_csv('data\/dataitems.csv', sep=';')\n\n    TOOLS = 'pan, box_zoom, xwheel_zoom, box_select, undo, reset, save' #select Tools to display\n    colors = cycle(matplotlib.rcParams['axes.prop_cycle']) #colors to use for plot\n    linewidth = 1\n    hovers=[]\n\n    if x_ax_unit is not None: #get unit of x_axis either from user or csv-file\n        x_unit=x_ax_unit\n    else:\n        if pd.Series(x_ax).isin(dataitems.myPlantName).any():\n            x_unit=dataitems.loc[dataitems.myPlantName==x_ax].iat[0,2]\n            if x_unit is np.nan: x_unit=''\n        elif x_ax=='Operating hours validation':\n            x_unit='h'\n        else:\n            x_unit=''\n\n    x_axis_label=(f'{x_ax} [{x_unit}]')\n\n    if (x_ax == 'datetime'): #seperate constructors for object for datetime or no datetime x-axis\n        p = figure(\n        plot_width=mwidth,\n        plot_height=mheight,\n        x_axis_label=None,#'datetime',\n        x_axis_type='datetime',\n        x_range=x_range,\n        y_range=y_range,\n        tools=TOOLS\n        )\n    else:\n        p = figure(\n            plot_width=mwidth,\n            plot_height=mheight,\n            x_axis_label=x_axis_label,\n            tools=TOOLS,\n            x_range=x_range,\n            y_range=y_range\n        )\n\n    if grid==False:\n        p.grid.grid_line_color = None\n        \n    p.yaxis.visible = False\n    tooltips = []\n    for i, y in enumerate(pltcfg):\n        to_remove=[]\n        for col in y['col']: #checks if data is available\n            #if not pd.Series(col).isin(dataitems.myPlantName).any(): ### instead of comparing with dataitems compare with source\n            if col not in source.data: ### instead of comparing with dataitems compare with source\n                to_remove.append(col)\n                print (col +' not available! Please check spelling! Not plotted!')\n            elif source.data[col].all()==None: #remove of columns if no measurement taken\n                to_remove.append(col)\n                print (col +' not measured! Can´t be plotted!')\n        y['col'] = [e for e in y['col'] if e not in to_remove] #remove elements not contained in dataframe by assigning new list\n        if len(y['col'])==0: #jump to next iteration if no col remaining\n            continue\n        else:\n            color = next(cycle(colors))['color']\n\n        if y.get('ylim'):\n            ylim = list(y['ylim'])\n            p.extra_y_ranges[str(i)] = Range1d(start=ylim[0], end=ylim[1])\n        else: #if no ylim defined, use automatic Bokeh Range\n            p.extra_y_ranges[str(i)] = DataRange1d()\n\n        unit=[]\n        renderers=[]\n\n        for col in y['col']:\n            eng_name=col.split('_@_')[0]\n            if not pd.Series(variable).isin(dataitems.myPlantName).any(): #Additional if for handling new data rows generated by function, else is normal behaviour\n                if 'unit' in y:\n                    unit.append(y['unit'])\n                else:\n                    unit.append('')\n            else: \n                unit.append(dataitems.loc[dataitems.myPlantName==variable].iat[0,2])\n\n            if unit[-1] is np.nan: unit[-1]=''\n\n            if 'color' in y:\n                color = y['color']\n            else:\n                color = next(cycle(colors))['color']\n\n            func = getattr(p, style) #to choose between different plotting styles\n            if style=='circle':\n                renderers.append(func(source=source, x=x_ax, y=col, #circle or line\n            color=color, y_range_name=str(i), legend_label=eng_name, line_width=linewidth, size=2))\n            else:\n                renderers.append(func(source=source, x=x_ax, y=col, #circle or line\n            color=color, y_range_name=str(i), legend_label=eng_name, line_width=linewidth))        \n            p.add_tools(HoverTool(tooltips=[(eng_name, '@{'+col +'}{0.2 f} '+unit[-1])], renderers=[renderers[-1]],toggleable=False))\n\n        if not y.get('ylim'):  #only if y-limits not specified\n            p.extra_y_ranges[str(i)].renderers = renderers #only use axis specific renderers for calculation of limits\n\n        llabel = variable+' ['+unit[0]+']'\n        \n        p.add_layout(LinearAxis(y_range_name=str(i),\n                            axis_label=llabel), 'left')\n\n        \n    p.toolbar.active_scroll = p.select_one('WheelZoomTool')\n\n    p.legend.click_policy='hide' #hides graph when you click on legend, other option mute (makes them less visible)\n    p.legend.location = 'top_left'\n\n\n    p.title.text = str(title)\n    p.title.text_font_size = '20px' \n\n    return p\n\ndef datastr_to_dict (datastr):\n    \"\"\"Generate dict from myPlantNames\n    In case name is not valid it gets ignored\n\n    Args:\n        datastr (list of str): myPlantNames to be transformed\n\n    Returns:\n        dat (dict): dictionary of dataitems\n        rename (dict): dict of type {name:myPlantName}\n\n    example:\n    .....\n    datastr_to_dict(['test123','Exhaust temperature cyl. 23'])\n\n        Output: \n        test123 not available! Please check spelling.\n\n        dat={191: ['Exhaust_TempCyl23', 'C (high)']},\n        rename={'Exhaust_TempCyl23': 'Exhaust temperature cyl. 23'}\"\"\"\n\n    #updated version, can transform myPlantNames from different languages\n    data=np.unique(datastr).tolist()\n\n    Request_Ids = pd.read_csv('data\/dataitems.csv', sep=';')\n    rel_data=pd.DataFrame()\n\n    rename={}\n    for da in data:\n        # try: \n        #     new=dataitems_df.loc[dataitems_df.myPlantName==da]['dataitem'].values[0]\n        #     rename [new]=da\n        #     da=new\n        # except Exception:\n        #     pass\n\n        data_id=Request_Ids.loc[Request_Ids['myPlantName']==da]\n        if not data_id.empty:\n            new=Request_Ids.loc[Request_Ids.myPlantName==da]['name'].values[0]\n            rename [new]=da\n            rel_data=rel_data.append(data_id)\n\n        #else: #uncommented for less output messages\n            #print(da+' not available! Please check spelling.')\n            #warnings.warn(da+' not available! Please check spelling.')\n\n    dat = {rec['id']:[rec['name'], rec['unit']] for rec in rel_data.to_dict('records')}\n    return dat, rename\n\ndef expand_cylinder (y, rel_cyl=all, engi=0):\n    \"\"\"Check if parameter cylinder specific and expand if aplicable\n\n    Args:\n        y (dict): one line of a single pltcfg\n        rel_cyl (list, optional): Defines relevant cylinders, defaults to all\n        engi (dmyplant2.engine, optional): Engine instance to get number of cylinders from\n\n    Returns:\n        y (dict): line of a single pltcfg with expanded parameters\n\n    example:\n    .....\n    \"\"\"\n\n    if rel_cyl is all:\n        if engi != 0:\n            e_type=engi.get_property('Engine Type')\n            rel_cyl=list(range(1, int(e_type[1:3])+1))\n        else:\n            rel_cyl=list(range(1, 25))\n\n    add_cyl_short_num=['Inlet valve closure noise', 'Outlet valve closure noise']\n    add_cyl_num=['Exhaust temperature','Exhaust temperature delta', 'Ignition voltage', 'ITP','Knock integrator','Knock noise', #'Exhaust temperature delta' added for delta to mean value\n    'Pressure 49° before TDC', 'Mechanical noise', 'Cylinder state', 'Close current gradient',\n     'Inlet valve closure timing', 'Outlet valve closure timing']\n    add_num=['Knock signal','P-max','AI','IMEP','Duration of opening','Conrod bearing temperature','CQ max','CQ','Slow down time']\n    add_mid=[]#talk with Sebastian what is looked at analyzis\n\n    to_remove=[]\n    for col in y['col']:\n        if col in add_cyl_short_num and not col in to_remove:\n            for cyl in rel_cyl:\n                y['col'].append(f'{col} cyl. {cyl}')\n                to_remove.append (col)\n\n        if col in add_cyl_num and not col in to_remove:\n            for cyl in rel_cyl:\n                y['col'].append(f'{col} cyl. {cyl:02d}')\n                to_remove.append (col)\n\n        if col in add_num and not col in to_remove:\n            for cyl in rel_cyl:\n                y['col'].append(f'{col} {cyl:02d}')\n                to_remove.append (col)\n\n        if col in add_mid and not col in to_remove:\n            for cyl in rel_cyl:\n                y['col'].append(f'{col} cyl. {cyl:02d}')\n                to_remove.append (col)\n\n    y['col']=[i for i in y['col'] if not i in to_remove ] #remove original column\n    return y\n\ndef shrink_cylinder (y, rel_cyl=list(range(1, 25))):\n    \"\"\"Sort out some cylinder specific parameters, so that only the ones interested in are displayed\n        The rest is loaded beforehand for shorter overall loading time\n\n    Args:\n        y (dict): one line of a single pltcfg\n        rel_cyl (list, optional): Defines relevant cylinders, defaults to list:[1,2...,23,24]\n\n    Returns:\n        y (dict): line of a single pltcfg with eventually less parameters\n\n    example:\n    .....\n    \"\"\"\n\n    rel_cyl=[str(cyl).zfill(2) for cyl in rel_cyl]\n    add_cyl_short_num=['Inlet valve closure noise', 'Outlet valve closure noise']\n    add_cyl_num=['Exhaust temperature','Exhaust temperature delta', 'Ignition voltage', 'ITP','Knock integrator','Knock noise', #'Exhaust temperature delta' added for delta to mean value\n    'Pressure 49° before TDC', 'Mechanical noise', 'Cylinder state', 'Close current gradient',\n     'Inlet valve closure timing', 'Outlet valve closure timing']\n    add_num=['Knock signal','P-max','AI','IMEP','Duration of opening','Conrod bearing temperature','CQ max','CQ','Slow down time']\n    add_mid=[]#talk with Sebastian what is looked at analyzis\n    to_check=add_cyl_num+add_num+add_mid\n\n    to_remove=[]\n    for col in y['col']:\n        if (any(ele in col for ele in to_check) and not col[-2:] in rel_cyl): #check if elemt in expanded elements and not in rel_cyl\n            #bug with add_cyl_short_num, exception would need to be added\n            to_remove.append (col)\n\n    y['col']=[i for i in y['col'] if not i in to_remove ] #remove original column\n    return y\n\ndef load_pltcfg_from_excel ():\n    \"\"\"Load plotconfig from Excel Sheet \"Input\" necessary in same folder\n\n    Returns:\n        pltcfg (list of dicts): pltcfg with list of dicts\n        plt_titles (list of String): titles of plots\n    .....\n    \"\"\"\n\n    import math\n    def is_number(s):\n        \"\"\" Returns True is string is a number. \"\"\"\n        try:\n            float(s)\n            return math.isfinite(s)\n        except ValueError:\n            return False\n\n    df_cfg=pd.read_excel('Input_validation_dashboard.xlsx', sheet_name='Pltcfg', usecols=['Plot_Nr', 'Axis_Nr', 'Name', 'Unit', 'y-lim min', 'y-lim max'])\n    df_cfg.sort_values(by=['Plot_Nr','Axis_Nr'], inplace=True)\n    df_cfg.dropna(subset=['Plot_Nr', 'Axis_Nr', 'Name'], inplace=True)\n    df_cfg['p_equal'] = df_cfg.Plot_Nr.eq(df_cfg.Plot_Nr.shift())\n    df_cfg['a_equal'] = df_cfg.Axis_Nr.eq(df_cfg.Axis_Nr.shift())\n\n    pltcfg=[]\n    plt_titles=[]\n    for i in range(len(df_cfg)):\n        if df_cfg.p_equal.iloc[i]==False:\n            pltcfg.append([]) #new plot\n            if df_cfg.Axis_Nr.iloc[i]==0: #append title if axis=0\n                plt_titles.append(df_cfg.Name.iloc[i]) #append title\n            else: \n                plt_titles.append('')\n\n        if df_cfg.Axis_Nr.iloc[i]!=0:\n            if df_cfg.a_equal.iloc[i]==False or df_cfg.p_equal.iloc[i]==False:\n                pltcfg[-1].append(dict()) #new axis\n\n            y=pltcfg[-1][-1]\n            if type(df_cfg.Name.iloc[i])==str:\n                if 'col' in y:\n                    y['col'].append(df_cfg.Name.iloc[i].replace('\\xa0', ' '))\n                else:\n                    y['col']=[df_cfg.Name.iloc[i].replace('\\xa0', ' ')]\n                if 'unit' not in y and type(df_cfg.Unit.iloc[i])==str: #take first occurance of unit\n                    y['unit']=df_cfg.Unit.iloc[i].replace('\\xa0', ' ')\n\n                lim_min=df_cfg['y-lim min'].iloc[i]\n                lim_max=df_cfg['y-lim max'].iloc[i]\n                if 'ylim' not in y and is_number(lim_min) and is_number(lim_max):\n                    y['ylim']=(lim_min, lim_max) #add tuple y lim\n    return pltcfg, plt_titles\n\ndef show_val_stats (vl, df_loadrange=None, df_starts_oph=None):\n    \"\"\"\n    Calculates\n    Sort out some cylinder specific parameters, so that only the ones interested in are displayed\n    The rest is loaded beforehand for shorter overall loading time\n\n    Args:\n        vl (dmyplant2.Validation): Validation Objekt\n        df_loadrange (pd.DataFrame) (optional): Dataframe with load information \n        df_starts_oph (pd-DatFrame) (optional): DataFrame with information about oph per start\n\n    Returns:\n        text_lay (bokeh.models.layouts.Column): Bokeh Column, can be displayed directly with show() or used further in a sheet or tab\n\n    example:\n    .....\n    \"\"\"\n    from bokeh.models.widgets import DataTable, DateFormatter, TableColumn\n    elements=[]\n    #### loadrange\n    if not df_loadrange.empty:\n        loadrange_info=Div(text=\"

Power load (P\/Pnom)<\/h3>(Valid for displayed data)\")\n\n df_loadrange=df_loadrange*100\n for col in df_loadrange.columns: df_loadrange[col]=df_loadrange[col].map(\"{:,.1f}%\".format)\n df_loadrange.insert(0, 'Engine', df_loadrange.index)\n Columns = [TableColumn(field=Ci, title=Ci) for Ci in df_loadrange.columns] # bokeh columns\n loadrange = DataTable(columns=Columns, source=ColumnDataSource(df_loadrange), autosize_mode='fit_columns', height=30*(len(df_loadrange.index)+1), index_position=None) # bokeh table\n elements+=[loadrange_info, loadrange]#, loadrange_info2]\n\n #### starts_oph\n if not df_starts_oph.empty:\n starts_oph_info=Div(text=\"

OPH and Starts<\/h3>(Valid for displayed data)\")\n\n df_starts_oph['OPH']=df_starts_oph['OPH'].map(\"{:,.1f}\".format)\n df_starts_oph['OPH\/ Start']=df_starts_oph['OPH\/ Start'].map(\"{:,.1f}\".format) \n df_starts_oph.insert(0, 'Engine', df_starts_oph.index)\n Columns = [TableColumn(field=Ci, title=Ci) for Ci in df_starts_oph.columns] # bokeh columns\n starts_oph = DataTable(columns=Columns, source=ColumnDataSource(df_starts_oph), autosize_mode='fit_columns', height=30*(len(df_starts_oph.index)+1), index_position=None) # bokeh table\n elements+=[starts_oph_info, starts_oph]#, starts_oph_info2]\n\n d=vl.dashboard\n # Read Values defined in tdef from Myplant into a pd.dataframe\n tdef = {161: 'Count_OpHour', 102: 'Power_PowerAct', 1258: 'OperationalCondition', 19074: 'Various_Bits_CollAlarm'}\n ntable = [[e] + [e.get_dataItem(v) for v in tdef.values()] for e in vl.engines]\n dft = pd.DataFrame(ntable, columns=['Name'] + list(tdef.values()))\n\n info_text=Div(text=\"