title
stringclasses 1
value | text
stringlengths 46
1.11M
| id
stringlengths 27
30
|
---|---|---|
examples/advanced/relativity.py/eq1
def eq1():
r = Symbol("r")
e = Rmn.dd(0, 0)
e = e.subs(nu(r), -lam(r))
pprint(dsolve(e, lam(r)))
|
negative_train_query0_00099
|
|
examples/advanced/relativity.py/eq2
def eq2():
r = Symbol("r")
e = Rmn.dd(1, 1)
C = Symbol("CC")
e = e.subs(nu(r), -lam(r))
pprint(dsolve(e, lam(r)))
|
negative_train_query0_00100
|
|
examples/advanced/relativity.py/eq3
def eq3():
r = Symbol("r")
e = Rmn.dd(2, 2)
e = e.subs(nu(r), -lam(r))
pprint(dsolve(e, lam(r)))
|
negative_train_query0_00101
|
|
examples/advanced/relativity.py/eq4
def eq4():
r = Symbol("r")
e = Rmn.dd(3, 3)
e = e.subs(nu(r), -lam(r))
pprint(dsolve(e, lam(r)))
pprint(dsolve(e, lam(r), 'best'))
|
negative_train_query0_00102
|
|
examples/advanced/relativity.py/main
def main():
print("Initial metric:")
pprint(gdd)
print("-"*40)
print("Christoffel symbols:")
pprint_Gamma_udd(0, 1, 0)
pprint_Gamma_udd(0, 0, 1)
print()
pprint_Gamma_udd(1, 0, 0)
pprint_Gamma_udd(1, 1, 1)
pprint_Gamma_udd(1, 2, 2)
pprint_Gamma_udd(1, 3, 3)
print()
pprint_Gamma_udd(2, 2, 1)
pprint_Gamma_udd(2, 1, 2)
pprint_Gamma_udd(2, 3, 3)
print()
pprint_Gamma_udd(3, 2, 3)
pprint_Gamma_udd(3, 3, 2)
pprint_Gamma_udd(3, 1, 3)
pprint_Gamma_udd(3, 3, 1)
print("-"*40)
print("Ricci tensor:")
pprint_Rmn_dd(0, 0)
e = Rmn.dd(1, 1)
pprint_Rmn_dd(1, 1)
pprint_Rmn_dd(2, 2)
pprint_Rmn_dd(3, 3)
print("-"*40)
print("Solve Einstein's equations:")
e = e.subs(nu(r), -lam(r)).doit()
l = dsolve(e, lam(r))
pprint(l)
lamsol = solve(l, lam(r))[0]
metric = gdd.subs(lam(r), lamsol).subs(nu(r), -lamsol) # .combine()
print("metric:")
pprint(metric)
|
negative_train_query0_00103
|
|
examples/advanced/relativity.py/MT/__init__
class MT: def __init__(self, m):
self.gdd = m
self.guu = m.inv()
|
negative_train_query0_00104
|
|
examples/advanced/relativity.py/MT/__str__
class MT: def __str__(self):
return "g_dd =\n" + str(self.gdd)
|
negative_train_query0_00105
|
|
examples/advanced/relativity.py/MT/dd
class MT: def dd(self, i, j):
return self.gdd[i, j]
|
negative_train_query0_00106
|
|
examples/advanced/relativity.py/MT/uu
class MT: def uu(self, i, j):
return self.guu[i, j]
|
negative_train_query0_00107
|
|
examples/advanced/relativity.py/G/__init__
class G: def __init__(self, g, x):
self.g = g
self.x = x
|
negative_train_query0_00108
|
|
examples/advanced/relativity.py/G/udd
class G: def udd(self, i, k, l):
g = self.g
x = self.x
r = 0
for m in [0, 1, 2, 3]:
r += g.uu(i, m)/2 * (g.dd(m, k).diff(x[l]) + g.dd(m, l).diff(x[k])
- g.dd(k, l).diff(x[m]))
return r
|
negative_train_query0_00109
|
|
examples/advanced/relativity.py/Riemann/__init__
class Riemann: def __init__(self, G, x):
self.G = G
self.x = x
|
negative_train_query0_00110
|
|
examples/advanced/relativity.py/Riemann/uddd
class Riemann: def uddd(self, rho, sigma, mu, nu):
G = self.G
x = self.x
r = G.udd(rho, nu, sigma).diff(x[mu]) - G.udd(rho, mu, sigma).diff(x[nu])
for lam in [0, 1, 2, 3]:
r += G.udd(rho, mu, lam)*G.udd(lam, nu, sigma) \
- G.udd(rho, nu, lam)*G.udd(lam, mu, sigma)
return r
|
negative_train_query0_00111
|
|
examples/advanced/relativity.py/Ricci/__init__
class Ricci: def __init__(self, R, x):
self.R = R
self.x = x
self.g = R.G.g
|
negative_train_query0_00112
|
|
examples/advanced/relativity.py/Ricci/dd
class Ricci: def dd(self, mu, nu):
R = self.R
x = self.x
r = 0
for lam in [0, 1, 2, 3]:
r += R.uddd(lam, mu, lam, nu)
return r
|
negative_train_query0_00113
|
|
examples/advanced/relativity.py/Ricci/ud
class Ricci: def ud(self, mu, nu):
r = 0
for lam in [0, 1, 2, 3]:
r += self.g.uu(mu, lam)*self.dd(lam, nu)
return r.expand()
|
negative_train_query0_00114
|
|
examples/advanced/gibbs_phenomenon.py/l2_norm
def l2_norm(f, lim):
"""
Calculates L2 norm of the function "f", over the domain lim=(x, a, b).
x ...... the independent variable in f over which to integrate
a, b ... the limits of the interval
Examples
========
>>> from sympy import Symbol
>>> from gibbs_phenomenon import l2_norm
>>> x = Symbol('x', real=True)
>>> l2_norm(1, (x, -1, 1))
sqrt(2)
>>> l2_norm(x, (x, -1, 1))
sqrt(6)/3
"""
return sqrt(integrate(Abs(f)**2, lim))
|
negative_train_query0_00115
|
|
examples/advanced/gibbs_phenomenon.py/l2_inner_product
def l2_inner_product(a, b, lim):
"""
Calculates the L2 inner product (a, b) over the domain lim.
"""
return integrate(conjugate(a)*b, lim)
|
negative_train_query0_00116
|
|
examples/advanced/gibbs_phenomenon.py/l2_projection
def l2_projection(f, basis, lim):
"""
L2 projects the function f on the basis over the domain lim.
"""
r = 0
for b in basis:
r += l2_inner_product(f, b, lim) * b
return r
|
negative_train_query0_00117
|
|
examples/advanced/gibbs_phenomenon.py/l2_gram_schmidt
def l2_gram_schmidt(list, lim):
"""
Orthonormalizes the "list" of functions using the Gram-Schmidt process.
Examples
========
>>> from sympy import Symbol
>>> from gibbs_phenomenon import l2_gram_schmidt
>>> x = Symbol('x', real=True) # perform computations over reals to save time
>>> l2_gram_schmidt([1, x, x**2], (x, -1, 1))
[sqrt(2)/2, sqrt(6)*x/2, 3*sqrt(10)*(x**2 - 1/3)/4]
"""
r = []
for a in list:
if r == []:
v = a
else:
v = a - l2_projection(a, r, lim)
v_norm = l2_norm(v, lim)
if v_norm == 0:
raise ValueError("The sequence is not linearly independent.")
r.append(v/v_norm)
return r
|
negative_train_query0_00118
|
|
examples/advanced/gibbs_phenomenon.py/integ
def integ(f):
return integrate(f, (x, -pi, 0)) + integrate(-f, (x, 0, pi))
|
negative_train_query0_00119
|
|
examples/advanced/gibbs_phenomenon.py/series
def series(L):
"""
Normalizes the series.
"""
r = 0
for b in L:
r += integ(b)*b
return r
|
negative_train_query0_00120
|
|
examples/advanced/gibbs_phenomenon.py/msolve
def msolve(f, x):
"""
Finds the first root of f(x) to the left of 0.
The x0 and dx below are tailored to get the correct result for our
particular function --- the general solver often overshoots the first
solution.
"""
f = lambdify(x, f)
x0 = -0.001
dx = 0.001
while f(x0 - dx) * f(x0) > 0:
x0 = x0 - dx
x_max = x0 - dx
x_min = x0
assert f(x_max) > 0
assert f(x_min) < 0
for n in range(100):
x0 = (x_max + x_min)/2
if f(x0) > 0:
x_max = x0
else:
x_min = x0
return x0
|
negative_train_query0_00121
|
|
examples/advanced/gibbs_phenomenon.py/main
def main():
L = [1]
for i in range(1, 100):
L.append(cos(i*x))
L.append(sin(i*x))
# next 2 lines equivalent to L = l2_gram_schmidt(L, (x, -pi, pi)), but faster:
L[0] /= sqrt(2)
L = [f/sqrt(pi) for f in L]
f = series(L)
print("Fourier series of the step function")
pprint(f)
x0 = msolve(f.diff(x), x)
print("x-value of the maximum:", x0)
max = f.subs(x, x0).evalf()
print("y-value of the maximum:", max)
g = max*pi/2
print("Wilbraham-Gibbs constant :", g.evalf())
print("Wilbraham-Gibbs constant (exact):", \
Integral(sin(x)/x, (x, 0, pi)).evalf())
|
negative_train_query0_00122
|
|
examples/advanced/hydrogen.py/main
def main():
print("Hydrogen radial wavefunctions:")
a, r = symbols("a r")
print("R_{21}:")
pprint(R_nl(2, 1, a, r))
print("R_{60}:")
pprint(R_nl(6, 0, a, r))
print("Normalization:")
i = Integral(R_nl(1, 0, 1, r)**2 * r**2, (r, 0, oo))
pprint(Eq(i, i.doit()))
i = Integral(R_nl(2, 0, 1, r)**2 * r**2, (r, 0, oo))
pprint(Eq(i, i.doit()))
i = Integral(R_nl(2, 1, 1, r)**2 * r**2, (r, 0, oo))
pprint(Eq(i, i.doit()))
|
negative_train_query0_00123
|
|
examples/advanced/curvilinear_coordinates.py/laplace
def laplace(f, g_inv, g_det, X):
"""
Calculates Laplace(f), using the inverse metric g_inv, the determinant of
the metric g_det, all in variables X.
"""
r = 0
for i in range(len(X)):
for j in range(len(X)):
r += g_inv[i, j]*f.diff(X[i]).diff(X[j])
for sigma in range(len(X)):
for alpha in range(len(X)):
r += g_det.diff(X[sigma]) * g_inv[sigma, alpha] * \
f.diff(X[alpha]) / (2*g_det)
return r
|
negative_train_query0_00124
|
|
examples/advanced/curvilinear_coordinates.py/transform
def transform(name, X, Y, g_correct=None, recursive=False):
"""
Transforms from cartesian coordinates X to any curvilinear coordinates Y.
It printing useful information, like Jacobian, metric tensor, determinant
of metric, Laplace operator in the new coordinates, ...
g_correct ... if not None, it will be taken as the metric --- this is
useful if sympy's trigsimp() is not powerful enough to
simplify the metric so that it is usable for later
calculation. Leave it as None, only if the metric that
transform() prints is not simplified, you can help it by
specifying the correct one.
recursive ... apply recursive trigonometric simplification (use only when
needed, as it is an expensive operation)
"""
print("_"*80)
print("Transformation:", name)
for x, y in zip(X, Y):
pprint(Eq(y, x))
J = X.jacobian(Y)
print("Jacobian:")
pprint(J)
g = J.T*eye(J.shape[0])*J
g = g.applyfunc(expand)
print("metric tensor g_{ij}:")
pprint(g)
if g_correct is not None:
g = g_correct
print("metric tensor g_{ij} specified by hand:")
pprint(g)
print("inverse metric tensor g^{ij}:")
g_inv = g.inv(method="ADJ")
g_inv = g_inv.applyfunc(simplify)
pprint(g_inv)
print("det g_{ij}:")
g_det = g.det()
pprint(g_det)
f = Function("f")(*list(Y))
print("Laplace:")
pprint(laplace(f, g_inv, g_det, Y))
|
negative_train_query0_00125
|
|
examples/advanced/curvilinear_coordinates.py/main
def main():
mu, nu, rho, theta, phi, sigma, tau, a, t, x, y, z, w = symbols(
"mu, nu, rho, theta, phi, sigma, tau, a, t, x, y, z, w")
transform("polar", Matrix([rho*cos(phi), rho*sin(phi)]), [rho, phi])
transform("cylindrical", Matrix([rho*cos(phi), rho*sin(phi), z]),
[rho, phi, z])
transform("spherical",
Matrix([rho*sin(theta)*cos(phi), rho*sin(theta)*sin(phi),
rho*cos(theta)]),
[rho, theta, phi],
recursive=True
)
transform("rotating disk",
Matrix([t,
x*cos(w*t) - y*sin(w*t),
x*sin(w*t) + y*cos(w*t),
z]),
[t, x, y, z])
transform("parabolic",
Matrix([sigma*tau, (tau**2 - sigma**2) / 2]),
[sigma, tau])
transform("bipolar",
Matrix([a*sinh(tau)/(cosh(tau)-cos(sigma)),
a*sin(sigma)/(cosh(tau)-cos(sigma))]),
[sigma, tau]
)
transform("elliptic",
Matrix([a*cosh(mu)*cos(nu), a*sinh(mu)*sin(nu)]),
[mu, nu]
)
|
negative_train_query0_00126
|
|
bin/coverage_report.py/generate_covered_files
def generate_covered_files(top_dir):
for dirpath, dirnames, filenames in os.walk(top_dir):
omit_dirs = [dirn for dirn in dirnames if omit_dir_re.match(dirn)]
for x in omit_dirs:
dirnames.remove(x)
for filename in filenames:
if source_re.match(filename):
yield os.path.join(dirpath, filename)
|
negative_train_query0_00127
|
|
bin/coverage_report.py/make_report
def make_report(source_dir, report_dir, use_cache=False, slow=False):
# code adapted from /bin/test
bin_dir = os.path.abspath(os.path.dirname(__file__)) # bin/
sympy_top = os.path.split(bin_dir)[0] # ../
sympy_dir = os.path.join(sympy_top, 'sympy') # ../sympy/
if os.path.isdir(sympy_dir):
sys.path.insert(0, sympy_top)
os.chdir(sympy_top)
cov = coverage.coverage()
cov.exclude("raise NotImplementedError")
cov.exclude("def canonize") # this should be "@decorated"
cov.exclude("def __mathml__")
if use_cache:
cov.load()
else:
cov.erase()
cov.start()
import sympy
sympy.test(source_dir, subprocess=False)
if slow:
sympy.test(source_dir, subprocess=False, slow=slow)
#sympy.doctest() # coverage doesn't play well with doctests
cov.stop()
cov.save()
covered_files = list(generate_covered_files(source_dir))
if report_dir in os.listdir(os.curdir):
for f in os.listdir(report_dir):
if f.split('.')[-1] in ['html', 'css', 'js']:
os.remove(os.path.join(report_dir, f))
cov.html_report(morfs=covered_files, directory=report_dir)
|
negative_train_query0_00128
|
|
bin/sympy_time_cache.py/new_import
def new_import(name, globals={}, locals={}, fromlist=[]):
global pp
if name in seen:
return old_import(name, globals, locals, fromlist)
seen.add(name)
node = TreeNode(name)
pp.add_child(node)
old_pp = pp
pp = node
#Do the actual import
t1 = timeit.default_timer()
module = old_import(name, globals, locals, fromlist)
t2 = timeit.default_timer()
node.set_time(int(1000000*(t2 - t1)))
pp = old_pp
return module
|
negative_train_query0_00129
|
|
bin/sympy_time_cache.py/TreeNode/__init__
class TreeNode: def __init__(self, name):
self._name = name
self._children = []
self._time = 0
|
negative_train_query0_00130
|
|
bin/sympy_time_cache.py/TreeNode/__str__
class TreeNode: def __str__(self):
return "%s: %s" % (self._name, self._time)
|
negative_train_query0_00131
|
|
bin/sympy_time_cache.py/TreeNode/add_child
class TreeNode: def add_child(self, node):
self._children.append(node)
|
negative_train_query0_00132
|
|
bin/sympy_time_cache.py/TreeNode/children
class TreeNode: def children(self):
return self._children
|
negative_train_query0_00133
|
|
bin/sympy_time_cache.py/TreeNode/child
class TreeNode: def child(self, i):
return self.children()[i]
|
negative_train_query0_00134
|
|
bin/sympy_time_cache.py/TreeNode/set_time
class TreeNode: def set_time(self, time):
self._time = time
|
negative_train_query0_00135
|
|
bin/sympy_time_cache.py/TreeNode/time
class TreeNode: def time(self):
return self._time
|
negative_train_query0_00136
|
|
bin/sympy_time_cache.py/TreeNode/exclusive_time
class TreeNode: def exclusive_time(self):
return self.total_time() - sum(child.time() for child in self.children())
|
negative_train_query0_00137
|
|
bin/sympy_time_cache.py/TreeNode/name
class TreeNode: def name(self):
return self._name
|
negative_train_query0_00138
|
|
bin/sympy_time_cache.py/TreeNode/linearize
class TreeNode: def linearize(self):
res = [self]
for child in self.children():
res.extend(child.linearize())
return res
|
negative_train_query0_00139
|
|
bin/sympy_time_cache.py/TreeNode/print_tree
class TreeNode: def print_tree(self, level=0, max_depth=None):
print(" "*level + str(self))
if max_depth is not None and max_depth <= level:
return
for child in self.children():
child.print_tree(level + 1, max_depth=max_depth)
|
negative_train_query0_00140
|
|
bin/sympy_time_cache.py/TreeNode/print_generic
class TreeNode: def print_generic(self, n=50, method="time"):
slowest = sorted((getattr(node, method)(), node.name()) for node in self.linearize())[-n:]
for time, name in slowest[::-1]:
print("%s %s" % (time, name))
|
negative_train_query0_00141
|
|
bin/sympy_time_cache.py/TreeNode/print_slowest
class TreeNode: def print_slowest(self, n=50):
self.print_generic(n=50, method="time")
|
negative_train_query0_00142
|
|
bin/sympy_time_cache.py/TreeNode/print_slowest_exclusive
class TreeNode: def print_slowest_exclusive(self, n=50):
self.print_generic(n, method="exclusive_time")
|
negative_train_query0_00143
|
|
bin/sympy_time_cache.py/TreeNode/write_cachegrind
class TreeNode: def write_cachegrind(self, f):
if isinstance(f, str):
f = open(f, "w")
f.write("events: Microseconds\n")
f.write("fl=sympyallimport\n")
must_close = True
else:
must_close = False
f.write("fn=%s\n" % self.name())
f.write("1 %s\n" % self.exclusive_time())
counter = 2
for child in self.children():
f.write("cfn=%s\n" % child.name())
f.write("calls=1 1\n")
f.write("%s %s\n" % (counter, child.time()))
counter += 1
f.write("\n\n")
for child in self.children():
child.write_cachegrind(f)
if must_close:
f.close()
|
negative_train_query0_00144
|
|
bin/sympy_time.py/new_import
def new_import(name, globals={}, locals={}, fromlist=[]):
global level, parent
if name in seen:
return old_import(name, globals, locals, fromlist)
seen.add(name)
import_order.append((name, level, parent))
t1 = time.time()
old_parent = parent
parent = name
level += 1
module = old_import(name, globals, locals, fromlist)
level -= 1
parent = old_parent
t2 = time.time()
elapsed_times[name] = t2 - t1
return module
|
negative_train_query0_00145
|
|
bin/mailmap_update.py/red
def red(text):
return "\033[31m%s\033[0m" % text
|
negative_train_query0_00146
|
|
bin/mailmap_update.py/yellow
def yellow(text):
return "\033[33m%s\033[0m" % text
|
negative_train_query0_00147
|
|
bin/mailmap_update.py/blue
def blue(text):
return "\033[34m%s\033[0m" % text
|
negative_train_query0_00148
|
|
bin/mailmap_update.py/author_name
def author_name(line):
assert line.count("<") == line.count(">") == 1
assert line.endswith(">")
return line.split("<", 1)[0].strip()
|
negative_train_query0_00149
|
|
bin/mailmap_update.py/key
def key(line):
# return lower case first address on line or
# raise an error if not an entry
if '#' in line:
line = line.split('#')[0]
L, R = line.count("<"), line.count(">")
assert L == R and L in (1, 2)
return line.split(">", 1)[0].split("<")[1].lower()
|
negative_train_query0_00150
|
|
bin/mailmap_update.py/short_entry
def short_entry(line):
if line.count('<') == 2:
if line.split('>', 1)[1].split('<')[0].strip():
return False
return True
|
negative_train_query0_00151
|
|
bin/generate_module_list.py/get_paths
def get_paths(level=15):
"""
Generates a set of paths for modules searching.
Examples
========
>>> get_paths(2)
['sympy/__init__.py', 'sympy/*/__init__.py', 'sympy/*/*/__init__.py']
>>> get_paths(6)
['sympy/__init__.py', 'sympy/*/__init__.py', 'sympy/*/*/__init__.py',
'sympy/*/*/*/__init__.py', 'sympy/*/*/*/*/__init__.py',
'sympy/*/*/*/*/*/__init__.py', 'sympy/*/*/*/*/*/*/__init__.py']
"""
wildcards = ["/"]
for i in range(level):
wildcards.append(wildcards[-1] + "*/")
p = ["sympy" + x + "__init__.py" for x in wildcards]
return p
|
negative_train_query0_00152
|
|
bin/generate_module_list.py/generate_module_list
def generate_module_list():
g = []
for x in get_paths():
g.extend(glob(x))
g = [".".join(x.split("/")[:-1]) for x in g]
g = [i for i in g if not i.endswith('.tests')]
g.remove('sympy')
g = list(set(g))
g.sort()
return g
|
negative_train_query0_00153
|
|
bin/get_sympy.py/path_hack
def path_hack():
"""
Hack sys.path to import correct (local) sympy.
"""
this_file = os.path.abspath(__file__)
sympy_dir = os.path.join(os.path.dirname(this_file), "..")
sympy_dir = os.path.normpath(sympy_dir)
sys.path.insert(0, sympy_dir)
return sympy_dir
|
negative_train_query0_00154
|
|
bin/authors_update.py/red
def red(text):
return "\033[31m%s\033[0m" % text
|
negative_train_query0_00155
|
|
bin/authors_update.py/yellow
def yellow(text):
return "\033[33m%s\033[0m" % text
|
negative_train_query0_00156
|
|
bin/authors_update.py/green
def green(text):
return "\033[32m%s\033[0m" % text
|
negative_train_query0_00157
|
|
bin/authors_update.py/author_name
def author_name(line):
assert line.count("<") == line.count(">") == 1
assert line.endswith(">")
return line.split("<", 1)[0].strip()
|
negative_train_query0_00158
|
|
bin/authors_update.py/move
def move(l, i1, i2, who):
x = l.pop(i1)
# this will fail if the .mailmap is not right
assert who == author_name(x), \
'%s was not found at line %i' % (who, i1)
l.insert(i2, x)
|
negative_train_query0_00159
|
|
doc/generate_logos.py/main
def main():
options, args = parser.parse_args()
if options.debug:
logging.basicConfig(level=logging.DEBUG)
fn_source = os.path.join(options.source_dir, options.source_svg)
if options.generate_svg or options.generate_all:
generate_notail_notext_versions(fn_source, options.output_dir)
if options.generate_png or options.generate_all:
sizes = options.sizes.split(",")
sizes = [int(s) for s in sizes]
convert_to_png(fn_source, options.output_dir, sizes)
if options.generate_ico or options.generate_all:
sizes = options.icon_sizes.split(",")
sizes = [int(s) for s in sizes]
convert_to_ico(fn_source, options.output_dir, sizes)
|
negative_train_query0_00160
|
|
doc/generate_logos.py/generate_notail_notext_versions
def generate_notail_notext_versions(fn_source, output_dir):
for ver in versions:
properties = svg_sizes[ver]
doc = load_svg(fn_source)
(notail, notext) = versionkey_to_boolean_tuple(ver)
g_tail = searchElementById(doc, "SnakeTail", "g")
if notail:
g_tail.setAttribute("display", "none")
g_text = searchElementById(doc, "SymPy_text", "g")
if notext:
g_text.setAttribute("display", "none")
g_logo = searchElementById(doc, "SympyLogo", "g")
dx = properties["dx"]
dy = properties["dy"]
transform = "translate(%d,%d)" % (dx, dy)
g_logo.setAttribute("transform", transform)
svg = searchElementById(doc, "svg_SympyLogo", "svg")
newsize = properties["size"]
svg.setAttribute("width", "%d" % newsize)
svg.setAttribute("height", "%d" % newsize)
title = svg.getElementsByTagName("title")[0]
title.firstChild.data = properties["title"]
desc = svg.getElementsByTagName("desc")[0]
desc.appendChild(doc.createTextNode("\n\nThis file is generated from %s !" % fn_source))
fn_out = get_svg_filename_from_versionkey(fn_source, ver)
fn_out = os.path.join(output_dir, fn_out)
save_svg(fn_out, doc)
|
negative_train_query0_00161
|
|
doc/generate_logos.py/convert_to_png
def convert_to_png(fn_source, output_dir, sizes):
svgs = list(versions)
svgs.insert(0, '')
cmd = "rsvg-convert"
p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode == 127:
logging.error("%s: command not found" % cmd)
sys.exit(p.returncode)
for ver in svgs:
if ver == '':
fn_svg = fn_source
else:
fn_svg = get_svg_filename_from_versionkey(fn_source, ver)
fn_svg = os.path.join(output_dir, fn_svg)
basename = os.path.basename(fn_svg)
name, ext = os.path.splitext(basename)
for size in sizes:
fn_out = "%s-%dpx.png" % (name, size)
fn_out = os.path.join(output_dir, fn_out)
cmd = "rsvg-convert %s -f png -o %s -h %d -w %d" % (fn_svg, fn_out,
size, size)
p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode != 0:
logging.error("Return code is not 0: Command: %s" % cmd)
logging.error("return code: %s" % p.returncode)
sys.exit(p.returncode)
else:
logging.debug("command: %s" % cmd)
logging.debug("return code: %s" % p.returncode)
|
negative_train_query0_00162
|
|
doc/generate_logos.py/convert_to_ico
def convert_to_ico(fn_source, output_dir, sizes):
# firstly prepare *.png files, which will be embedded
# into the *.ico files.
convert_to_png(fn_source, output_dir, sizes)
svgs = list(versions)
svgs.insert(0, '')
cmd = "convert"
p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode == 127:
logging.error("%s: command not found" % cmd)
sys.exit(p.returncode)
for ver in svgs:
if ver == '':
fn_svg = fn_source
else:
fn_svg = get_svg_filename_from_versionkey(fn_source, ver)
fn_svg = os.path.join(output_dir, fn_svg)
basename = os.path.basename(fn_svg)
name, ext = os.path.splitext(basename)
# calculate the list of *.png files
pngs = []
for size in sizes:
fn_png= "%s-%dpx.png" % (name, size)
fn_png = os.path.join(output_dir, fn_png)
pngs.append(fn_png)
# convert them to *.ico
fn_out = "%s-favicon.ico" % name
fn_out = os.path.join(output_dir, fn_out)
cmd = "convert %s %s" % (" ".join(pngs), fn_out)
p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode != 0:
logging.error("Return code is not 0: Command: %s" % cmd)
logging.error("return code: %s" % p.returncode)
sys.exit(p.returncode)
else:
logging.debug("command: %s" % cmd)
logging.debug("return code: %s" % p.returncode)
|
negative_train_query0_00163
|
|
doc/generate_logos.py/versionkey_to_boolean_tuple
def versionkey_to_boolean_tuple(ver):
notail = False
notext = False
vers = ver.split("-")
notail = 'notail' in vers
notext = 'notext' in vers
return (notail, notext)
|
negative_train_query0_00164
|
|
doc/generate_logos.py/get_svg_filename_from_versionkey
def get_svg_filename_from_versionkey(fn_source, ver):
basename = os.path.basename(fn_source)
if ver == '':
return basename
name, ext = os.path.splitext(basename)
prefix = svg_sizes[ver]["prefix"]
fn_out = "%s-%s.svg" % (name, prefix)
return fn_out
|
negative_train_query0_00165
|
|
doc/generate_logos.py/searchElementById
def searchElementById(node, Id, tagname):
"""
Search element by id in all the children and descendants of node.
id is lower case, not ID which is usually used for getElementById
"""
nodes = node.getElementsByTagName(tagname)
for node in nodes:
an = node.getAttributeNode('id')
if an and an.nodeValue == Id:
return node
|
negative_train_query0_00166
|
|
doc/generate_logos.py/load_svg
def load_svg(fn):
doc = xml.dom.minidom.parse(fn)
return doc
|
negative_train_query0_00167
|
|
doc/generate_logos.py/save_svg
def save_svg(fn, doc):
with open(fn, "wb") as f:
xmlstr = doc.toxml("utf-8")
f.write(xmlstr)
logging.info(" File saved: %s" % fn)
|
negative_train_query0_00168
|
|
doc/ext/docscrape_sphinx.py/get_doc_object
def get_doc_object(obj, what=None, doc=None, config={}):
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
|
negative_train_query0_00169
|
|
doc/ext/docscrape_sphinx.py/SphinxDocString/__init__
class SphinxDocString: def __init__(self, docstring, config={}):
NumpyDocString.__init__(self, docstring, config=config)
self.load_config(config)
|
negative_train_query0_00170
|
|
doc/ext/docscrape_sphinx.py/SphinxDocString/load_config
class SphinxDocString: def load_config(self, config):
self.use_plots = config.get('use_plots', False)
self.class_members_toctree = config.get('class_members_toctree', True)
|
negative_train_query0_00171
|
|
doc/ext/docscrape_sphinx.py/SphinxDocString/_str_header
class SphinxDocString: def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
|
negative_train_query0_00172
|
|
doc/ext/docscrape_sphinx.py/SphinxDocString/_str_field_list
class SphinxDocString: def _str_field_list(self, name):
return [':' + name + ':']
|
negative_train_query0_00173
|
|
doc/ext/docscrape_sphinx.py/SphinxDocString/_str_indent
class SphinxDocString: def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
|
negative_train_query0_00174
|
|
doc/ext/docscrape_sphinx.py/SphinxDocString/_str_signature
class SphinxDocString: def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
|
negative_train_query0_00175
|
|
doc/ext/docscrape_sphinx.py/SphinxDocString/_str_summary
class SphinxDocString: def _str_summary(self):
return self['Summary'] + ['']
|
negative_train_query0_00176
|
|
doc/ext/docscrape_sphinx.py/SphinxDocString/_str_extended_summary
class SphinxDocString: def _str_extended_summary(self):
return self['Extended Summary'] + ['']
|
negative_train_query0_00177
|
|
doc/ext/docscrape_sphinx.py/SphinxDocString/_str_returns
class SphinxDocString: def _str_returns(self, name='Returns'):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent([param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
|
negative_train_query0_00178
|
|
doc/ext/docscrape_sphinx.py/SphinxDocString/_str_param_list
class SphinxDocString: def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent(['**%s**' % param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
|
negative_train_query0_00179
|
|
doc/ext/docscrape_sphinx.py/SphinxDocString/_obj
class SphinxDocString: def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
|
negative_train_query0_00180
|
|
doc/ext/docscrape_sphinx.py/SphinxDocString/_str_member_list
class SphinxDocString: def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
# Lines that are commented out are used to make the
# autosummary:: table. Since SymPy does not use the
# autosummary:: functionality, it is easiest to just comment it
# out.
# autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
# Check if the referenced member can have a docstring or not
param_obj = getattr(self._obj, param, None)
if not (callable(param_obj)
or isinstance(param_obj, property)
or inspect.isgetsetdescriptor(param_obj)):
param_obj = None
# if param_obj and (pydoc.getdoc(param_obj) or not desc):
# # Referenced object has a docstring
# autosum += [" %s%s" % (prefix, param)]
# else:
others.append((param, param_type, desc))
# if autosum:
# out += ['.. autosummary::']
# if self.class_members_toctree:
# out += [' :toctree:']
# out += [''] + autosum
if others:
maxlen_0 = max(3, max([len(x[0]) for x in others]))
hdr = sixu("=")*maxlen_0 + sixu(" ") + sixu("=")*10
fmt = sixu('%%%ds %%s ') % (maxlen_0,)
out += ['', '', hdr]
for param, param_type, desc in others:
desc = sixu(" ").join(x.strip() for x in desc).strip()
if param_type:
desc = "(%s) %s" % (param_type, desc)
out += [fmt % (param.strip(), desc)]
out += [hdr]
out += ['']
return out
|
negative_train_query0_00181
|
|
doc/ext/docscrape_sphinx.py/SphinxDocString/_str_section
class SphinxDocString: def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
|
negative_train_query0_00182
|
|
doc/ext/docscrape_sphinx.py/SphinxDocString/_str_see_also
class SphinxDocString: def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
|
negative_train_query0_00183
|
|
doc/ext/docscrape_sphinx.py/SphinxDocString/_str_warnings
class SphinxDocString: def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
|
negative_train_query0_00184
|
|
doc/ext/docscrape_sphinx.py/SphinxDocString/_str_index
class SphinxDocString: def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
|
negative_train_query0_00185
|
|
doc/ext/docscrape_sphinx.py/SphinxDocString/_str_references
class SphinxDocString: def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
|
negative_train_query0_00186
|
|
doc/ext/docscrape_sphinx.py/SphinxDocString/_str_examples
class SphinxDocString: def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
|
negative_train_query0_00187
|
|
doc/ext/docscrape_sphinx.py/SphinxDocString/__str__
class SphinxDocString: def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
out += self._str_param_list('Parameters')
out += self._str_returns('Returns')
out += self._str_returns('Yields')
for param_list in ('Other Parameters', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for s in self._other_keys:
out += self._str_section(s)
out += self._str_member_list('Attributes')
out = self._str_indent(out, indent)
return '\n'.join(out)
|
negative_train_query0_00188
|
|
doc/ext/docscrape_sphinx.py/SphinxFunctionDoc/__init__
class SphinxFunctionDoc: def __init__(self, obj, doc=None, config={}):
self.load_config(config)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
|
negative_train_query0_00189
|
|
doc/ext/docscrape_sphinx.py/SphinxClassDoc/__init__
class SphinxClassDoc: def __init__(self, obj, doc=None, func_doc=None, config={}):
self.load_config(config)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
|
negative_train_query0_00190
|
|
doc/ext/docscrape_sphinx.py/SphinxObjDoc/__init__
class SphinxObjDoc: def __init__(self, obj, doc=None, config={}):
self._f = obj
self.load_config(config)
SphinxDocString.__init__(self, doc, config=config)
|
negative_train_query0_00191
|
|
doc/ext/numpydoc.py/mangle_docstrings
def mangle_docstrings(app, what, name, obj, options, lines,
reference_offset=[0]):
cfg = {'use_plots': app.config.numpydoc_use_plots,
'show_class_members': app.config.numpydoc_show_class_members,
'show_inherited_class_members':
app.config.numpydoc_show_inherited_class_members,
'class_members_toctree': app.config.numpydoc_class_members_toctree}
u_NL = sixu('\n')
if what == 'module':
# Strip top title
pattern = '^\\s*[#*=]{4,}\\n[a-z0-9 -]+\\n[#*=]{4,}\\s*'
title_re = re.compile(sixu(pattern), re.I | re.S)
lines[:] = title_re.sub(sixu(''), u_NL.join(lines)).split(u_NL)
else:
doc = get_doc_object(obj, what, u_NL.join(lines), config=cfg)
if sys.version_info[0] >= 3:
doc = str(doc)
else:
doc = unicode(doc)
lines[:] = doc.split(u_NL)
if (app.config.numpydoc_edit_link and hasattr(obj, '__name__') and
obj.__name__):
if hasattr(obj, '__module__'):
v = dict(full_name=sixu("%s.%s") % (obj.__module__, obj.__name__))
else:
v = dict(full_name=obj.__name__)
lines += [sixu(''), sixu('.. htmlonly::'), sixu('')]
lines += [sixu(' %s') % x for x in
(app.config.numpydoc_edit_link % v).split("\n")]
# replace reference numbers so that there are no duplicates
references = []
for line in lines:
line = line.strip()
m = re.match(sixu('^.. \\[([a-z0-9_.-])\\]'), line, re.I)
if m:
references.append(m.group(1))
# start renaming from the longest string, to avoid overwriting parts
references.sort(key=lambda x: -len(x))
if references:
for i, line in enumerate(lines):
for r in references:
if re.match(sixu('^\\d+$'), r):
new_r = sixu("R%d") % (reference_offset[0] + int(r))
else:
new_r = sixu("%s%d") % (r, reference_offset[0])
lines[i] = lines[i].replace(sixu('[%s]_') % r,
sixu('[%s]_') % new_r)
lines[i] = lines[i].replace(sixu('.. [%s]') % r,
sixu('.. [%s]') % new_r)
reference_offset[0] += len(references)
|
negative_train_query0_00192
|
|
doc/ext/numpydoc.py/mangle_signature
def mangle_signature(app, what, name, obj, options, sig, retann):
# Do not try to inspect classes that don't define `__init__`
if (inspect.isclass(obj) and
(not hasattr(obj, '__init__') or
'initializes x; see ' in pydoc.getdoc(obj.__init__))):
return '', ''
if not (isinstance(obj, Callable) or
hasattr(obj, '__argspec_is_invalid_')):
return
if not hasattr(obj, '__doc__'):
return
doc = SphinxDocString(pydoc.getdoc(obj))
if doc['Signature']:
sig = re.sub(sixu("^[^(]*"), sixu(""), doc['Signature'])
return sig, sixu('')
|
negative_train_query0_00193
|
|
doc/ext/numpydoc.py/setup
def setup(app, get_doc_object_=get_doc_object):
if not hasattr(app, 'add_config_value'):
return # probably called by nose, better bail out
global get_doc_object
get_doc_object = get_doc_object_
app.connect('autodoc-process-docstring', mangle_docstrings)
app.connect('autodoc-process-signature', mangle_signature)
app.add_config_value('numpydoc_edit_link', None, False)
app.add_config_value('numpydoc_use_plots', None, False)
app.add_config_value('numpydoc_show_class_members', True, True)
app.add_config_value('numpydoc_show_inherited_class_members', True, True)
app.add_config_value('numpydoc_class_members_toctree', True, True)
# Extra mangling domains
app.add_domain(NumpyPythonDomain)
app.add_domain(NumpyCDomain)
|
negative_train_query0_00194
|
|
doc/ext/numpydoc.py/wrap_mangling_directive
def wrap_mangling_directive(base_directive, objtype):
class directive(base_directive):
def run(self):
env = self.state.document.settings.env
name = None
if self.arguments:
m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0])
name = m.group(2).strip()
if not name:
name = self.arguments[0]
lines = list(self.content)
mangle_docstrings(env.app, objtype, name, None, None, lines)
self.content = ViewList(lines, self.content.parent)
return base_directive.run(self)
return directive
|
negative_train_query0_00195
|
|
doc/ext/numpydoc.py/ManglingDomainBase/__init__
class ManglingDomainBase: def __init__(self, *a, **kw):
super(ManglingDomainBase, self).__init__(*a, **kw)
self.wrap_mangling_directives()
|
negative_train_query0_00196
|
|
doc/ext/numpydoc.py/ManglingDomainBase/wrap_mangling_directives
class ManglingDomainBase: def wrap_mangling_directives(self):
for name, objtype in list(self.directive_mangling_map.items()):
self.directives[name] = wrap_mangling_directive(
self.directives[name], objtype)
|
negative_train_query0_00197
|
|
doc/ext/numpydoc.py/directive/run
class directive: def run(self):
env = self.state.document.settings.env
name = None
if self.arguments:
m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0])
name = m.group(2).strip()
if not name:
name = self.arguments[0]
lines = list(self.content)
mangle_docstrings(env.app, objtype, name, None, None, lines)
self.content = ViewList(lines, self.content.parent)
return base_directive.run(self)
|
negative_train_query0_00198
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.