message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
Create ObservableMixin
Summary: Prep for migrating to Lightning | import functools
import logging
-from typing import List
+from typing import Dict, List, Type
import torch
@@ -41,33 +41,16 @@ class Aggregator:
pass
-def observable(cls=None, **kwargs): # noqa: C901
- """
- Decorator to mark a class as producing observable values. The names of the
- observable values are the names of keyword arguments. The values of keyword
- arguments are the types of the value. The type is currently not used for
- anything.
- """
- assert kwargs
- observable_value_types = kwargs
-
- def wrap(cls):
- assert not hasattr(cls, "add_observer")
- assert not hasattr(cls, "notify_observers")
-
- original_init = cls.__init__
-
- @functools.wraps(original_init)
- def new_init(self, *args, **kwargs):
- original_init(self, *args, **kwargs)
- assert not hasattr(self, "_observable_value_types")
- assert not hasattr(self, "_observers")
- self._observable_value_types = observable_value_types
- self._observers = {v: [] for v in observable_value_types}
+class ObservableMixin:
+ def __init__(self):
+ super().__init__()
+ self._observers = {v: [] for v in self._observable_value_types}
- cls.__init__ = new_init
+ @property
+ def _observable_value_types(self) -> Dict[str, Type]:
+ raise NotImplementedError
- def add_observer(self, observer: Observer) -> None:
+ def add_observer(self, observer: Observer):
observing_keys = observer.get_observing_keys()
unknown_keys = [
k for k in observing_keys if k not in self._observable_value_types
@@ -79,15 +62,11 @@ def observable(cls=None, **kwargs): # noqa: C901
self._observers[k].append(observer)
return self
- cls.add_observer = add_observer
-
- def add_observers(self, observers: List[Observer]) -> None:
+ def add_observers(self, observers: List[Observer]):
for observer in observers:
self.add_observer(observer)
return self
- cls.add_observers = add_observers
-
def notify_observers(self, **kwargs):
for key, value in kwargs.items():
if value is None:
@@ -107,7 +86,38 @@ def observable(cls=None, **kwargs): # noqa: C901
for observer in self._observers[key]:
observer.update(key, value)
- cls.notify_observers = notify_observers
+
+def observable(cls=None, **kwargs): # noqa: C901
+ """
+ Decorator to mark a class as producing observable values. The names of the
+ observable values are the names of keyword arguments. The values of keyword
+ arguments are the types of the value. The type is currently not used for
+ anything.
+ """
+ assert kwargs
+ observable_value_types = kwargs
+
+ def wrap(cls):
+ assert not hasattr(cls, "add_observer")
+ assert not hasattr(cls, "notify_observers")
+
+ original_init = cls.__init__
+
+ @functools.wraps(original_init)
+ def new_init(self, *args, **kwargs):
+ original_init(self, *args, **kwargs)
+ assert not hasattr(self, "_observable_value_types")
+ assert not hasattr(self, "_observers")
+ self._observable_value_types = observable_value_types
+ self._observers = {v: [] for v in observable_value_types}
+
+ cls.__init__ = new_init
+
+ cls.add_observer = ObservableMixin.add_observer
+
+ cls.add_observers = ObservableMixin.add_observers
+
+ cls.notify_observers = ObservableMixin.notify_observers
return cls
|
Update trickbot.txt
minus ```Google```- related IPs. If something is wrong, please, do not close entire PR, just mark what to clean off. Thank you. | @@ -1215,6 +1215,40 @@ insiderppe.cloudapp.net
5.182.210.226:443
64.44.133.156:447
+# Reference: https://blog.talosintelligence.com/2020/03/threat-roundup-0228-0306.html (# Win.Malware.Trickbot-7603048-1)
+
+107.181.246.213:443
+185.86.150.89:443
+191.7.30.30:443
+193.124.117.189:443
+193.124.117.189:447
+194.87.144.16:443
+194.87.92.113:443
+195.62.52.96:443
+37.59.183.142:443
+67.21.90.106:443
+67.21.90.109:443
+87.121.76.172:443
+87.121.76.172:449
+91.219.28.58:443
+91.219.28.80:443
+http://107.181.246.213
+http://116.203.16.95
+http://185.86.150.89
+http://191.7.30.30
+http://193.124.117.189
+http://194.87.144.16
+http://194.87.92.113
+http://195.62.52.96
+http://37.59.183.142
+http://51.254.164.249
+http://67.21.90.106
+http://67.21.90.109
+http://84.238.198.166
+http://87.121.76.172
+http://91.219.28.58
+http://91.219.28.80
+
# Generic trails
/karlmarks.php
|
[varLib.models] Add optional rounding to VariationModel() relevant methods
Users to be updated to benefit.
Part of | @@ -5,6 +5,7 @@ __all__ = ['nonNone', 'allNone', 'allEqual', 'allEqualTo', 'subList',
'supportScalar',
'VariationModel']
+from fontTools.misc.roundTools import noRound
from .errors import VariationModelError
@@ -358,7 +359,7 @@ class VariationModel(object):
self.supports = supports
self.deltaWeights = deltaWeights
- def getDeltas(self, masterValues):
+ def getDeltas(self, masterValues, round=noRound):
assert len(masterValues) == len(self.deltaWeights)
mapping = self.reverseMapping
out = []
@@ -366,12 +367,12 @@ class VariationModel(object):
delta = masterValues[mapping[i]]
for j,weight in weights.items():
delta -= out[j] * weight
- out.append(delta)
+ out.append(round(delta))
return out
- def getDeltasAndSupports(self, items):
+ def getDeltasAndSupports(self, items, round=noRound):
model, items = self.getSubModel(items)
- return model.getDeltas(items), model.supports
+ return model.getDeltas(items, round=round), model.supports
def getScalars(self, loc):
return [supportScalar(loc, support) for support in self.supports]
@@ -393,12 +394,12 @@ class VariationModel(object):
scalars = self.getScalars(loc)
return self.interpolateFromDeltasAndScalars(deltas, scalars)
- def interpolateFromMasters(self, loc, masterValues):
- deltas = self.getDeltas(masterValues)
+ def interpolateFromMasters(self, loc, masterValues, round=noRound):
+ deltas = self.getDeltas(masterValues, round=round)
return self.interpolateFromDeltas(loc, deltas)
- def interpolateFromMastersAndScalars(self, masterValues, scalars):
- deltas = self.getDeltas(masterValues)
+ def interpolateFromMastersAndScalars(self, masterValues, scalars, round=noRound):
+ deltas = self.getDeltas(masterValues, round=round)
return self.interpolateFromDeltasAndScalars(deltas, scalars)
|
Screengrab App : Use `ContextAlgo` for expansion and selection.
Note this also fixes a bug whereby the paths to expand were computed in the wrong context. | @@ -318,24 +318,19 @@ class screengrab( Gaffer.Application ) :
# Set up the scene expansion and selection.
- pathsToExpand = GafferScene.PathMatcher()
+ GafferSceneUI.ContextAlgo.clearExpansion( script.context() )
- for path in list( args["scene"]["fullyExpandedPaths"] ) + list( args["scene"]["expandedPaths"] ) :
- # Add paths and all their ancestors.
- while path :
- pathsToExpand.addPath( path )
- path = path.rpartition( "/" )[0]
+ pathsToExpand = GafferScene.PathMatcher( list( args["scene"]["fullyExpandedPaths"] ) + list( args["scene"]["expandedPaths"] ) )
+ GafferSceneUI.ContextAlgo.expand( script.context(), pathsToExpand )
- fullyExpandedPathsFilter = GafferScene.PathFilter()
- fullyExpandedPathsFilter["paths"].setValue(
- IECore.StringVectorData( [ path + "/..." for path in args["scene"]["fullyExpandedPaths"] ] )
- )
+ pathsToFullyExpand = GafferScene.PathMatcher( list( args["scene"]["fullyExpandedPaths"] ) )
+
+ with script.context() :
for node in script.selection() :
for scenePlug in [ p for p in node.children( GafferScene.ScenePlug ) if p.direction() == Gaffer.Plug.Direction.Out ] :
- GafferScene.SceneAlgo.matchingPaths( fullyExpandedPathsFilter, scenePlug, pathsToExpand )
+ GafferSceneUI.ContextAlgo.expandDescendants( script.context(), pathsToFullyExpand, scenePlug )
- script.context()["ui:scene:expandedPaths"] = GafferScene.PathMatcherData( pathsToExpand )
- script.context()["ui:scene:selectedPaths"] = args["scene"]["selectedPaths"]
+ GafferSceneUI.ContextAlgo.setSelectedPaths( script.context(), GafferScene.PathMatcher( args["scene"]["selectedPaths"] ) )
# Add a delay.
|
Don't redefine unecessary type stub.
Summary: Pull Request resolved:
Test Plan: Imported from OSS | @@ -33,7 +33,7 @@ struct ScalarTypeToCType<at::ScalarType::Half> {
// due to ambiguous reference which can't to be resolved. For some reason it cant pick between at::detail and at::cuda::detail.
// For repro example, please see: https://gist.github.com/izdeby/952ae7cf256ddb740a73776d39a7e7ba
// TODO: remove once the bug is fixed.
- static at::Half t;
+ static type t;
};
template<>
@@ -44,7 +44,7 @@ struct ScalarTypeToCType<at::ScalarType::BFloat16> {
// due to ambiguous reference which can't to be resolved. For some reason it cant pick between at::detail and at::cuda::detail.
// For repro example, please see: https://gist.github.com/izdeby/952ae7cf256ddb740a73776d39a7e7ba
// TODO: remove once the bug is fixed.
- static at::BFloat16 t;
+ static type t;
};
template<>
@@ -55,7 +55,7 @@ struct ScalarTypeToCType<at::ScalarType::Bool> {
// due to ambiguous reference which can't to be resolved. For some reason it cant pick between at::detail and at::cuda::detail.
// For repro example, please see: https://gist.github.com/izdeby/952ae7cf256ddb740a73776d39a7e7ba
// TODO: remove once the bug is fixed.
- static bool t;
+ static type t;
};
template<>
|
Only show evaluation stats if enabled
Closes | <dd>{{ challenge.cached_num_participants|intcomma }}</dd>
{% endif %}
- {% if challenge.cached_num_results %}
+ {% if challenge.use_evaluation and challenge.cached_num_results %}
<dt>Results</dt>
<dd>
<a href="{% url 'evaluation:result-list' challenge.short_name %}">{{ challenge.cached_num_results }}</a>
|
Add export of cluster templates
Partially-Implements: bp portable-node-group-and-cluster-templates
This change adds functions to sahara to enable export of ct to JSON. | @@ -143,6 +143,29 @@ def cluster_templates_delete(cluster_template_id):
return u.render()
+def _cluster_template_export_helper(template):
+ template.pop('id')
+ template.pop('updated_at')
+ template.pop('created_at')
+ template.pop('tenant_id')
+ template.pop('is_default')
+ template['default_image_id'] = '{default_image_id}'
+ template['node_groups'] = '{node_groups}'
+
+
[email protected]('/cluster-templates/<cluster_template_id>/export')
[email protected]("data-processing:cluster-templates:get")
[email protected]_exists(api.get_cluster_template, 'cluster_template_id')
+def cluster_template_export(cluster_template_id):
+ content = u.to_wrapped_dict_no_render(
+ api.get_cluster_template, cluster_template_id)
+ _cluster_template_export_helper(content['cluster_template'])
+ res = u.render(content)
+ res.headers.add('Content-Disposition', 'attachment',
+ filename='cluster_template.json')
+ return res
+
+
# NodeGroupTemplate ops
@rest.get('/node-group-templates')
|
Update menus.py
Fix incorrect warning message in get_instance_for_rendering() | @@ -266,8 +266,10 @@ class Menu:
def get_instance_for_rendering(cls, contextual_vals, option_vals):
warnings.warn(
'The get_instance_for_rendering() class method is deprecated in '
- 'v2.12 and will be removed in v3. Use create_relevant_object_from_values() '
- 'instead.', category=RemovedInWagtailMenus3Warning
+ 'v2.12 and will be removed in v3. For model-based menu classes, '
+ 'use get_from_collected_values() instead, and for non model-based '
+ 'menu classes, use create_from_collected_values().',
+ category=RemovedInWagtailMenus3Warning
)
if issubclass(cls, models.Model):
return cls.get_from_collected_values(contextual_vals, option_vals)
|
Disable bulk editing of all new metadata types.
Prevents overriding of individual metadata. | @focus="trackClick('Description')"
/>
</VFlex>
- <VFlex xs12 md6 :class="{ 'pl-2': $vuetify.breakpoint.mdAndUp }">
+ <VFlex xs12 :[mdValue]="true" :class="{ 'pl-2': $vuetify.breakpoint.mdAndUp }">
<!-- Learning activity -->
<LearningActivityOptions
+ v-if="oneSelected"
ref="learning_activities"
v-model="contentLearningActivities"
:disabled="anyIsTopic"
/>
<!-- Level -->
<LevelsOptions
+ v-if="oneSelected"
ref="contentLevel"
v-model="contentLevel"
@focus="trackClick('Levels dropdown')"
/>
<!-- What you will need -->
<ResourcesNeededOptions
+ v-if="oneSelected"
ref="resourcesNeeded"
v-model="resourcesNeeded"
@focus="trackClick('What you will need')"
</VFlex>
</VLayout>
<!-- Category -->
- <CategoryOptions ref="categories" v-model="categories" />
+ <CategoryOptions v-if="oneSelected" ref="categories" v-model="categories" />
</VFlex>
</VLayout>
<!-- For Beginners -->
<KCheckbox
+ v-if="oneSelected"
id="beginners"
ref="beginners"
:checked="forBeginners"
return this.firstNode.original_channel_name;
},
requiresAccessibility() {
- return this.nodes.every(
+ return (
+ this.oneSelected &&
+ this.nodes.every(
node => node.kind !== ContentKindsNames.AUDIO && node.kind !== ContentKindsNames.TOPIC
+ )
);
},
audioAccessibility() {
videoSelected() {
return this.oneSelected && this.firstNode.kind === ContentKindsNames.VIDEO;
},
+ // Dynamically compute the size of the VFlex used
+ /* eslint-disable-next-line kolibri/vue-no-unused-properties */
+ mdValue() {
+ return this.oneSelected ? 'md6' : 'md12';
+ },
},
watch: {
nodes: {
|
Add Blender code to robot visualisation example
This may be more involved than necessary at this point in the Tutorial, but it should be there for completeness IMO. | @@ -148,8 +148,29 @@ Visualizing Robots
Before jumping into how to build a robot model, let's first see how to visualize
one. This can be done with Blender, Rhino or Grasshopper using one of COMPAS's
artists. The basic procedure is the same in
-any of the CAD software (aside from the import statement), so for simplicity we
-will demonstrate the use of :class:`compas_rhino.artists.RobotModelArtist` in Rhino.
+any of the CAD software (aside from the import statement). Below you can find an example code for both Rhino and Blender.
+
+
+.. raw:: html
+
+ <div class="card">
+ <div class="card-header">
+ <ul class="nav nav-tabs card-header-tabs">
+ <li class="nav-item">
+ <a class="nav-link active" data-toggle="tab" href="#visualise_robot_rhino">Rhino</a>
+ </li>
+ <li class="nav-item">
+ <a class="nav-link" data-toggle="tab" href="#visualise_robot_blender">Blender</a>
+ </li>
+ </ul>
+ </div>
+ <div class="card-body">
+ <div class="tab-content">
+
+.. raw:: html
+
+ <div class="tab-pane active" id="visualise_robot_rhino">
+
Be sure to first install COMPAS for Rhino. While the following code is incomplete,
it can be used as a scaffolding for code to be run in a Python script editor within Rhino.
@@ -167,8 +188,45 @@ it can be used as a scaffolding for code to be run in a Python script editor wit
artist.clear_layer()
artist.draw_visual()
+.. raw:: html
+
+ </div>
+ <div class="tab-pane" id="visualise_robot_blender">
+
+.. code-block:: python
+
+ import compas
+ from compas.robots import GithubPackageMeshLoader
+ from compas.robots import RobotModel
+ import compas_blender
+ from compas_blender.artists import RobotModelArtist, BaseArtist
+
+ compas_blender.clear() # Delete all objects in the scene
+
+ compas.PRECISION = '12f'
+ # Load the urdf-file from Github
+ github = GithubPackageMeshLoader('ros-industrial/abb', 'abb_irb6600_support', 'kinetic-devel')
+ model = RobotModel.from_urdf_file(github.load_urdf('irb6640.urdf'))
+ model.load_geometry(github)
+
+ # Load the robot meshes into the blender scene
+ artist = RobotModelArtist(model, collection='COMPAS FAB::Example')
+
+Note that the blender ``RobotModelArtist`` is not as developed as the one for Rhino.
+
+.. raw:: html
+
+ </div>
+
+.. raw:: html
+
+ </div>
+ </div>
+ </div>
+
+
-See below for a complete example.
+See below for a complete example in Rhino.
Building robots models
|
Clarify documentation for complex.rst - Ensuring Object Creation
Updated documentation per issue
Updated complex.rst Ensuring Object Creation section to make the behavior clearer. Explicitly calling out which method raises the error. | @@ -187,8 +187,9 @@ The above example only works if there was an outer command that created a
``Repo`` object and stored it in the context. For some more advanced use
cases, this might become a problem. The default behavior of
:func:`make_pass_decorator` is to call :meth:`Context.find_object`
-which will find the object. If it can't find the object, it will raise an
-error. The alternative behavior is to use :meth:`Context.ensure_object`
+which will find the object. If it can't find the object,
+:meth:`make_pass_decorator` will raise an error.
+The alternative behavior is to use :meth:`Context.ensure_object`
which will find the object, and if it cannot find it, will create one and
store it in the innermost context. This behavior can also be enabled for
:func:`make_pass_decorator` by passing ``ensure=True``:
|
enc2gen.py: remove sib/synth-disp from create_evex_evex_mask_dest_reg_only
* the reg-only encoding functions (mod=3) could never need a sib or a
synthetic-zero-valued displacement. | @@ -4580,8 +4580,8 @@ def create_evex_evex_mask_dest_reg_only(env, ii): # allows optional imm8
fo.add_code_eol('emit_evex(r)')
emit_opcode(ii,fo)
emit_modrm(fo)
- emit_sib(fo)
- emit_synthetic_disp(fo)
+ #emit_sib(fo) # FIXME: 2019-07-24 THIS APPEARS EXTRANEOUS, REG ONLY
+ #emit_synthetic_disp(fo) # FIXME: 2019-07-24 THIS APPEARS EXTRANEOUS, REG ONLY
cond_emit_imm8(ii,fo)
add_enc_func(ii,fo)
|
Update apt_gorgon.txt
Fixes: . Full-path detection is added for sample | # See the file 'LICENSE' for copying permission
# Reference: https://github.com/pan-unit42/iocs/blob/master/gorgon/domains.txt
+# Reference: https://www.virustotal.com/gui/file/24adef104d6f177525f24c927e764cf8e53c0ce50fbdd1c414305d5fc8b15116/detection
0-day.us
acorn-paper.com
@@ -18,9 +19,9 @@ securebotnetpanel.tk
stemtopx.com
stevemike-fireforce.info
stevemikeforce.com
-t2m.io
xyz-storez.xyz
zupaservices.info
+t2m.io/GbiSgY
# Reference: https://twitter.com/h4ckak/status/1145984273985331200
# Reference: https://otx.alienvault.com/pulse/5d1b49a55c01f486b6ff8cf2
|
Use one channel in Conda to speed up the build
This PR targets to deal with | # Spark 3 will support it so we can remove this entire file
# when we support Spark 3.
channels:
- - bioconda
- conda-forge
dependencies:
- - java-jdk=8
+ - openjdk=8
- pip
- pip:
# In Read the Docs, seems installing 'requirements-dev.txt' seems ignored when Conda is used.
|
Fix AtspiElementInfo.visible for iconified windows
Any element with STATE_ICONIFIED should not be considered visible | @@ -207,7 +207,7 @@ class AtspiElementInfo(ElementInfo):
states = children[0].get_state_set()
else:
return False
- return "STATE_VISIBLE" in states and "STATE_SHOWING" in states
+ return "STATE_VISIBLE" in states and "STATE_SHOWING" in states and "STATE_ICONIFIED" not in states
def set_cache_strategy(self, cached):
"""Set a cache strategy for frequently used attributes of the element"""
|
Catch exception caused by empty images in pageseg
Return an empty segmentation instead of just crashing when given empty
pages. | @@ -376,10 +376,15 @@ def segment(im, text_direction='horizontal-tb', scale=None, maxcolseps=2, black_
scale = estimate_scale(binary)
binary = remove_hlines(binary, scale)
+ # emptyish images wll cause exceptions here.
+ try:
if black_colseps:
colseps, binary = compute_black_colseps(binary, scale, maxcolseps)
else:
colseps = compute_white_colseps(binary, scale, maxcolseps)
+ except ValueError:
+ return {'text_direction': text_direction, 'boxes': []}
+
bottom, top, boxmap = compute_gradmaps(binary, scale)
seeds = compute_line_seeds(binary, bottom, top, colseps, scale)
llabels = morph.propagate_labels(boxmap, seeds, conflict=0)
|
Changed _check_inputs to _parse_inputs.
This was suggested in the PR to avoid running _check_inputs without
parsing them first. | @@ -17,9 +17,13 @@ __all__ = [
'ptrace', 'ptrace_csr', 'ptrace_dense', 'ptrace_csr_dense',
]
-cdef tuple _check_inputs(tuple shape, object dims, object sel):
+cdef tuple _parse_inputs(object dims, object sel, tuple shape):
cdef Py_ssize_t i
+ dims = np.atleast_1d(dims).astype(idxint_dtype).ravel()
+ sel = np.atleast_1d(sel).astype(idxint_dtype)
+ sel.sort()
+
if shape[0] != shape[1]:
raise ValueError("ptrace is only defined for square density matrices")
@@ -40,6 +44,8 @@ cdef tuple _check_inputs(tuple shape, object dims, object sel):
if i > 0 and sel[i] == sel[i - 1]:
raise ValueError("Duplicate selection index in ptrace.")
+ return dims, sel
+
cdef idxint _populate_tensor_table(dims, sel, idxint[:, ::1] tensor_table) except -1:
"""
Populate the helper structure `tensor_table`. Returns the size (number of
@@ -87,11 +93,7 @@ cdef inline void _i2_k_t(idxint N, idxint[:, ::1] tensor_table, idxint out[2]):
cpdef CSR ptrace_csr(CSR matrix, object dims, object sel):
- dims = np.atleast_1d(dims).astype(idxint_dtype).ravel()
- sel = np.atleast_1d(sel).astype(idxint_dtype)
- sel.sort()
-
- _check_inputs(matrix.shape, dims, sel)
+ dims, sel = _parse_inputs(dims, sel, matrix.shape)
if len(sel) == len(dims):
return matrix.copy()
@@ -121,11 +123,7 @@ cpdef CSR ptrace_csr(CSR matrix, object dims, object sel):
cpdef Dense ptrace_csr_dense(CSR matrix, object dims, object sel):
- dims = np.atleast_1d(dims).astype(idxint_dtype).ravel()
- sel = np.atleast_1d(sel).astype(idxint_dtype)
- sel.sort()
-
- _check_inputs(matrix.shape, dims, sel)
+ dims, sel = _parse_inputs(dims, sel, matrix.shape)
if len(sel) == len(dims):
return dense.from_csr(matrix)
@@ -146,11 +144,7 @@ cpdef Dense ptrace_csr_dense(CSR matrix, object dims, object sel):
cpdef Dense ptrace_dense(Dense matrix, object dims, object sel):
- dims = np.atleast_1d(dims).astype(idxint_dtype).ravel()
- sel = np.atleast_1d(sel).astype(idxint_dtype)
- sel.sort()
-
- _check_inputs(matrix.shape, dims, sel)
+ dims, sel = _parse_inputs(dims, sel, matrix.shape)
if len(sel) == len(dims):
return matrix.copy()
|
25% faster DAG.add_wire
* do less lookups
* a little less lookup
* Add comment
Slighly slower but not a big deal. Indeed, it looks like there is not much that can be done to improveme this function | @@ -188,20 +188,26 @@ class DAGCircuit:
if wire not in self.wires:
self.wires.append(wire)
self._max_node_id += 1
- self.input_map[wire] = self._max_node_id
+ input_map_wire = self.input_map[wire] = self._max_node_id
+
self._max_node_id += 1
- self.output_map[wire] = self._max_node_id
- in_node = self.input_map[wire]
- out_node = self.output_map[wire]
- self.multi_graph.add_edge(in_node, out_node)
- self.multi_graph.node[in_node]["type"] = "in"
- self.multi_graph.node[out_node]["type"] = "out"
- self.multi_graph.node[in_node]["name"] = "%s[%s]" % (wire[0].name, wire[1])
- self.multi_graph.node[out_node]["name"] = "%s[%s]" % (wire[0].name, wire[1])
- self.multi_graph.node[in_node]["wire"] = wire
- self.multi_graph.node[out_node]["wire"] = wire
- self.multi_graph.adj[in_node][out_node][0]["name"] = "%s[%s]" % (wire[0].name, wire[1])
- self.multi_graph.adj[in_node][out_node][0]["wire"] = wire
+ output_map_wire = self.output_map[wire] = self._max_node_id
+
+ self.multi_graph.add_edge(input_map_wire,
+ output_map_wire)
+
+ wire_name = "%s[%s]" % (wire[0].name, wire[1])
+
+ self.multi_graph.add_nodes_from([(input_map_wire, {'type': 'in'}),
+ (output_map_wire, {'type': 'out'})
+ ],
+ name=wire_name,
+ wire=wire,
+ )
+ self.multi_graph.adj[input_map_wire][output_map_wire][0]["name"] \
+ = "%s[%s]" % (wire[0].name, wire[1])
+ self.multi_graph.adj[input_map_wire][output_map_wire][0]["wire"] \
+ = wire
else:
raise DAGCircuitError("duplicate wire %s" % (wire,))
|
zilencer: Make /billing appear without i18n prefix.
This copies what we do in `zproject/urls.py` for pages in the
main Django app. | @@ -19,7 +19,10 @@ v1_api_and_json_patterns = [
{'POST': 'zilencer.views.remote_server_notify_push'}),
]
-urlpatterns = [
+# Make a copy of i18n_urlpatterns so that they appear without prefix for English
+urlpatterns = list(i18n_urlpatterns)
+
+urlpatterns += [
url(r'^api/v1/', include(v1_api_and_json_patterns)),
url(r'^json/', include(v1_api_and_json_patterns)),
]
|
Update README.rst
line break before paper | @@ -8,8 +8,9 @@ OceanSpy - A Python package to facilitate ocean model data analysis and visualiz
|version| |conda forge| |docs| |travis| |codecov| |license| |doi| |JOSS|
-| For publications, please cite the following paper:
-| Almansi, M., R. Gelderloos, T. W. N. Haine, A. Saberi, and A. H. Siddiqui (2019). OceanSpy: A Python package to facilitate ocean model data analysis and visualization. *Journal of Open Source Software*, 4(39), 1506, doi: https://doi.org/10.21105/joss.01506 .
+For publications, please cite the following paper:
+
+Almansi, M., R. Gelderloos, T. W. N. Haine, A. Saberi, and A. H. Siddiqui (2019). OceanSpy: A Python package to facilitate ocean model data analysis and visualization. *Journal of Open Source Software*, 4(39), 1506, doi: https://doi.org/10.21105/joss.01506 .
What is OceanSpy?
-----------------
|
nested containers: elusive ssh race condition
In very rare cases, after having stopped SSH in the VM,
it would come back to life right before the container
starts.
This patch catches those cases. | @@ -1397,7 +1397,8 @@ function replicate_to_container_if_nested {
# the correct priveleges, then run the entrypoint script already provided
# which then starts SSH on its own. Because we're using host networking, everything
# works as-is without any changes.
- sudo docker run -u ${username} -it -d --name cbnested --privileged --net host --env CB_SSH_PUB_KEY="$(cat ~/.ssh/id_rsa.pub)" -v ~/:${NEST_EXPORTED_HOME} ${image} bash -c "sudo mkdir ${userpath}/$username; sudo cp -a ${NEST_EXPORTED_HOME}/* ${NEST_EXPORTED_HOME}/.* ${userpath}/${username}/; sudo chmod 755 ${userpath}/${username}; sudo chown -R ${username}:${username} ${userpath}/${username}; sudo bash /etc/my_init.d/inject_pubkey_and_start_ssh.sh"
+ CMD='sudo docker run -u ${username} -it -d --name cbnested --privileged --net host --env CB_SSH_PUB_KEY="$(cat ~/.ssh/id_rsa.pub)" -v ~/:${NEST_EXPORTED_HOME} ${image} bash -c "sudo mkdir ${userpath}/$username; sudo cp -a ${NEST_EXPORTED_HOME}/* ${NEST_EXPORTED_HOME}/.* ${userpath}/${username}/; sudo chmod 755 ${userpath}/${username}; sudo chown -R ${username}:${username} ${userpath}/${username}; sudo bash /etc/my_init.d/inject_pubkey_and_start_ssh.sh"'
+ eval $CMD
syslog_netcat "Container started, settling..."
@@ -1411,6 +1412,14 @@ function replicate_to_container_if_nested {
((ATTEMPTS=ATTEMPTS-1))
if [ ${ATTEMPTS} -gt 0 ] ; then
syslog_netcat "Still waiting on container startup. Attempts left: ${ATTEMPTS}"
+ if [ x"$rc" == x"1" ] ; then
+ syslog_netcat "Recreating container..."
+ # Sometimes ssh doesn't go down or gets restarted. Try again.
+ service_stop_disable sshd
+ sudo docker rm cbnested
+ eval $CMD
+ syslog_netcat "Recreated."
+ fi
sleep 2
continue
else
|
Fix up some typos and dead links
And reword the description around datacube-ows | @@ -6,7 +6,7 @@ Data Cube Ecosystem
API Access from Jupyter
-----------------------
-One of the most comment ways to use Open Data Cube is through interactively
+One of the most common ways to use Open Data Cube is through interactively
writing Python code within a Jupyter Notebook. This allows dynamically loading
data, performing analysis and developing scientific algorithms.
@@ -17,8 +17,8 @@ OGC Web Services
----------------
The datacube-ows_ server allows users to interact with
-an Open Data Cube through Open Goespatial Consortium Web Service Clients
-such as QGIS and Web Maps. It supports the WMS and WCS standards.
+an Open Data Cube using client software (such QGIS, or the OpenLayers web mapping library),
+through the use of Web Map Service (WMS) and Web Coverage Service (WCS) standards from the Open Goespatial Consortium.
.. _datacube-ows: https://github.com/opendatacube/datacube-ows
@@ -37,8 +37,8 @@ available data extents, and can be used to browse the provence of indexed data.
See the `Digital Earth Australia Explorer`_ for an example deployment showing the power of this tool.
-.. _`Data Cube Explorer`: https://github.com/opendatacube/dea-dashboard
-.. _`Digital Earth Australia Explorer`: https://data.dea.ga.gov.au/
+.. _`Data Cube Explorer`: https://github.com/opendatacube/datacube-explorer
+.. _`Digital Earth Australia Explorer`: https://explorer.sandbox.dea.ga.gov.au
Data Cube UI
@@ -65,7 +65,7 @@ data range and statistics to calculate.
Cube in a Box
-------------
-Cube in a Box provides everything needed to get up and running quickly with Open Data Cube inside
+`Cube in a Box` provides everything needed to get up and running quickly with Open Data Cube inside
an Amazon Web Services Environment.
.. _`Cube in a Box`: https://github.com/crc-si/cube-in-a-box
|
Update parse_zone_file.py
See internal ICM
We are failing to import zones when domain name in $origin contains hypens.
This is because we don't parse it correctly in the regex here | @@ -59,7 +59,7 @@ date_regex_dict = {
_REGEX = {
'ttl': r'(?P<delim>\$ttl)\s+(?P<val>\d+\w*)',
- 'origin': r'(?P<delim>\$origin)\s+(?P<val>[\w\.]+)',
+ 'origin': r'(?P<delim>\$origin)\s+(?P<val>[\w\.-]+)',
'soa': r'(?P<name>[@\*\w\.-]*)\s+(?:(?P<ttl>\d+\w*)\s+)?(?:(?P<class>in)\s+)?(?P<delim>soa)\s+(?P<host>[\w\.-]+)\s+(?P<email>[\w\.-]+)\s+(?P<serial>\d*)\s+(?P<refresh>\w*)\s+(?P<retry>\w*)\s+(?P<expire>\w*)\s+(?P<minimum>\w*)?',
'a': r'(?P<name>[@\*\w\.-]*)\s+(?:(?P<ttl>\d+\w*)\s+)?(?:(?P<class>in)\s+)?(?P<delim>a)\s+(?P<ip>[\d\.]+)',
'ns': r'(?P<name>[@\*\w\.-]*)\s+(?:(?P<ttl>\d+\w*)\s+)?(?:(?P<class>in)\s+)?(?P<delim>ns)\s+(?P<host>[\w\.-]+)',
|
billing: Only fetch customers with stripe customer id in autodowngrade.
The customers without a value for stripe_customer_id never had an active
plan. So we don't have to consider them for autodowngrade. | @@ -993,7 +993,7 @@ def void_all_open_invoices(realm: Realm) -> int:
def customer_has_last_n_invoices_open(customer: Customer, n: int) -> bool:
- if customer.stripe_customer_id is None:
+ if customer.stripe_customer_id is None: # nocoverage
return False
open_invoice_count = 0
@@ -1004,7 +1004,7 @@ def customer_has_last_n_invoices_open(customer: Customer, n: int) -> bool:
def downgrade_small_realms_behind_on_payments_as_needed() -> None:
- customers = Customer.objects.all()
+ customers = Customer.objects.all().exclude(stripe_customer_id=None)
for customer in customers:
realm = customer.realm
|
Update index.md
Change the RTMP restream to RTSP | @@ -15,7 +15,7 @@ Use of a [Google Coral Accelerator](https://coral.ai/products/) is optional, but
- Object detection with TensorFlow runs in separate processes for maximum FPS
- Communicates over MQTT for easy integration into other systems
- Recording with retention based on detected objects
-- Re-streaming via RTMP to reduce the number of connections to your camera
+- Re-streaming via RTSP to reduce the number of connections to your camera
- A dynamic combined camera view of all tracked cameras.
## Screenshots
|
Add some docs
For new grid functionality | @@ -26,13 +26,17 @@ class PrecipitationDistribution(Component):
Construction::
- PrecipitationDistribution(mean_storm_duration=0.0,
+ PrecipitationDistribution(grid=None,
+ mean_storm_duration=0.0,
mean_interstorm_duration=0.0,
mean_storm_depth=0.0, total_t=0.0,
delta_t=0.0, random_seed=0)
Parameters
----------
+ grid : ModelGrid
+ A Landlab grid (optional). If provided, storm intensities will be
+ stored as a grid scalar field as the component simulates storms.
mean_storm_duration : float
Average duration of a precipitation event.
mean_interstorm_duration : float
@@ -88,6 +92,9 @@ class PrecipitationDistribution(Component):
Parameters
----------
+ grid : ModelGrid
+ A Landlab grid (optional). If provided, storm intensities will be
+ stored as a grid scalar field as the component simulates storms.
mean_storm_duration : float
Average duration of a precipitation event.
mean_interstorm_duration : float
|
Added pretty frames in AskText dialog (GTK version only)
HG--
branch : feature_global_scalefactor | @@ -567,7 +567,7 @@ if TOOLKIT in (GTK, GTKSOURCEVIEW):
:return: the created window
"""
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
- window.set_border_width(0)
+ window.set_border_width(2)
window.set_title('Enter LaTeX Formula - TexText')
# File chooser and Scale Adjustment
@@ -583,15 +583,15 @@ if TOOLKIT in (GTK, GTKSOURCEVIEW):
preamble_delete.connect('clicked', self.clear_preamble)
preamble_delete.set_tooltip_text("Clear the preamble file setting")
+ preamble_frame = gtk.Frame("Preamble File")
preamble_box = gtk.HBox(homogeneous=False, spacing=2)
- preamble_label = gtk.Label("Preamble File")
- preamble_box.pack_start(preamble_label, False, False, 2)
+ preamble_frame.add(preamble_box)
preamble_box.pack_start(self._preamble_widget, True, True, 2)
preamble_box.pack_start(preamble_delete, False, False, 2)
+ scale_frame = gtk.Frame("Scale Factor")
scale_box = gtk.HBox(homogeneous=False, spacing=2)
- scale_label = gtk.Label("Scale Factor")
- scale_box.pack_start(scale_label, False, False, 2)
+ scale_frame.add(scale_box)
self._scale_adj = gtk.Adjustment(lower=0.1, upper=10, step_incr=0.1, page_incr=1)
self._scale = gtk.HScale(self._scale_adj)
self._scale.set_digits(1)
@@ -688,9 +688,9 @@ if TOOLKIT in (GTK, GTKSOURCEVIEW):
window.add(vbox)
vbox.pack_start(menu, False, False, 0)
- vbox.pack_start(preamble_box, False, False, 0)
+ vbox.pack_start(preamble_frame, False, False, 0)
if self.global_scale_factor:
- vbox.pack_start(scale_box, False, False, 0)
+ vbox.pack_start(scale_frame, False, False, 0)
vbox.pack_start(scroll_window, True, True, 0)
vbox.pack_start(pos_label, False, False, 0)
vbox.pack_start(self._preview, False, False, 0)
|
test: qb parameterization
where conditions
set update conditions
where with a function | @@ -66,24 +66,47 @@ class TestBuilderBase(object):
self.assertIsInstance(query.run, Callable)
self.assertIsInstance(data, list)
- def test_walk(self):
+
+class TestParameterization(unittest.TestCase):
+ def test_where_conditions(self):
DocType = frappe.qb.DocType("DocType")
query = (
frappe.qb.from_(DocType)
.select(DocType.name)
- .where(
- (DocType.owner == "Administrator' --")
- & (Coalesce(DocType.search_fields == "subject"))
- )
+ .where((DocType.owner == "Administrator' --"))
)
self.assertTrue("walk" in dir(query))
query, params = query.walk()
self.assertIn("%(param1)s", query)
- self.assertIn("%(param2)s", query)
self.assertIn("param1", params)
self.assertEqual(params["param1"], "Administrator' --")
- self.assertEqual(params["param2"], "subject")
+
+ def test_set_cnoditions(self):
+ DocType = frappe.qb.DocType("DocType")
+ query = frappe.qb.update(DocType).set(DocType.value, "some_value")
+
+ self.assertTrue("walk" in dir(query))
+ query, params = query.walk()
+
+ self.assertIn("%(param1)s", query)
+ self.assertIn("param1", params)
+ self.assertEqual(params["param1"], "some_value")
+
+ def test_where_conditions_functions(self):
+ DocType = frappe.qb.DocType("DocType")
+ query = (
+ frappe.qb.from_(DocType)
+ .select(DocType.name)
+ .where(Coalesce(DocType.search_fields == "subject"))
+ )
+
+ self.assertTrue("walk" in dir(query))
+ query, params = query.walk()
+
+ self.assertIn("%(param1)s", query)
+ self.assertIn("param1", params)
+ self.assertEqual(params["param1"], "subject")
@run_only_if(db_type_is.MARIADB)
|
Update List.py
documentation for better understanding. | List = []
# List is Muteable
# means value can be change
-List.insert(0, 5)
+List.insert(0, 5) #insertion takes place at mentioned index
List.insert(1, 10)
List.insert(0, 6)
print(List)
List.remove(6)
-List.append(9)
+List.append(9) #insertion takes place at last
List.append(1)
-List.sort()
+List.sort() #arranges element in ascending order
print(List)
List.pop()
List.reverse()
|
Reduce code duplication in Emboss
This commit decreases code duplication in the
parameter parsing of
augmenters.convolutional.Embos by using
the parameter handling functions in
parameters.py.
Additionally, alpha and strength now support
lists, which are interpreted as Choices. | @@ -256,17 +256,19 @@ def Emboss(alpha=0, strength=1, name=None, deterministic=False, random_state=Non
Parameters
----------
- alpha : int or float or tuple of two ints/floats or StochasticParameter, optional(default=0)
+ alpha : number or tuple of number or list of number or StochasticParameter, optional(default=0)
Visibility of the sharpened image. At 0, only the original image is
visible, at 1.0 only its sharpened version is visible.
* If an int or float, exactly that value will be used.
* If a tuple (a, b), a random value from the range a <= x <= b will
be sampled per image.
+ * If a list, then a random value will be sampled from that list
+ per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
- strength : int or float or tuple of two ints/floats or StochasticParameter, optional(default=1)
+ strength : number or tuple of number or list of number or StochasticParameter, optional(default=1)
Parameter that controls the strength of the embossing.
Sane values are somewhere in the range (0, 2) with 1 being the standard
embossing effect. Default value is 1.
@@ -274,6 +276,8 @@ def Emboss(alpha=0, strength=1, name=None, deterministic=False, random_state=Non
* If an int or float, exactly that value will be used.
* If a tuple (a, b), a random value from the range a <= x <= b will
be sampled per image.
+ * If a list, then a random value will be sampled from that list
+ per image.
* If a StochasticParameter, a value will be sampled from the
parameter per image.
@@ -295,26 +299,8 @@ def Emboss(alpha=0, strength=1, name=None, deterministic=False, random_state=Non
over the old image.
"""
-
- if ia.is_single_number(alpha):
- alpha_param = Deterministic(alpha)
- elif ia.is_iterable(alpha):
- ia.do_assert(len(alpha) == 2, "Expected tuple/list with 2 entries, got %d entries." % (len(alpha),))
- alpha_param = Uniform(alpha[0], alpha[1])
- elif isinstance(alpha, StochasticParameter):
- alpha_param = alpha
- else:
- raise Exception("Expected float, int, tuple/list with 2 entries or StochasticParameter. Got %s." % (type(alpha),))
-
- if ia.is_single_number(strength):
- strength_param = Deterministic(strength)
- elif ia.is_iterable(strength):
- ia.do_assert(len(strength) == 2, "Expected tuple/list with 2 entries, got %d entries." % (len(strength),))
- strength_param = Uniform(strength[0], strength[1])
- elif isinstance(strength, StochasticParameter):
- strength_param = strength
- else:
- raise Exception("Expected float, int, tuple/list with 2 entries or StochasticParameter. Got %s." % (type(strength),))
+ alpha_param = iap.handle_continuous_param(alpha, "alpha", value_range=(0, 1.0), tuple_to_uniform=True, list_to_choice=True)
+ strength_param = iap.handle_continuous_param(strength, "strength", value_range=(0, None), tuple_to_uniform=True, list_to_choice=True)
def create_matrices(image, nb_channels, random_state_func):
alpha_sample = alpha_param.draw_sample(random_state=random_state_func)
|
Update help msg for --song
song accepts youtube URLs. | @@ -106,7 +106,7 @@ def get_arguments(config_base=_CONFIG_BASE):
"-s",
"--song",
nargs="+",
- help="download track(s) by spotify link or name"
+ help="download track(s) by spotify link, name, or youtube url."
)
group.add_argument(
"-l",
|
assign to consistent QueueRecv out tensor
avoid calling self.update* functions since those replace underlying tensors
MKL expects output tensor of an op to be consistent from call to call
use [()]= instead [:]= operator to avoid slice 0d array error | @@ -595,8 +595,7 @@ class CPUCodeGenerator(PyGen):
def generate_op(self, op, out, *args):
recv_id = len(self.recv_nodes)
self.recv_nodes.append(op)
- self.append("update_a_{}(self.recv_from_queue_send({}))",
- out.tensor_description.name, recv_id)
+ self.append("{}[()] = self.recv_from_queue_send({})", out, recv_id)
@generate_op.on_type(CPUQueueGatherSendOp)
def generate_op(self, op, out, *args):
|
Update OpenAPI spec
* Update OpenAPI spec
Update OpenAPI specification to include test server url
* fix url typo | @@ -3,9 +3,10 @@ info:
title: Respa
description: The Respa API provides categorized data on resources available for reservation within a city or metropolitan area
and enables the reservation of these resources. The API provides data in the JSON format, in a RESTful fashion.
- version: 1.5.1
+ version: 1.6.1
servers:
- url: https://api.hel.fi/respa/v1
+- url: https://respa.koe.hel.ninja/v1
tags:
- name: resource
description: Look for available resources
|
[varLib.mutator] Correctly unset Device entries that are None
Part of fixing | @@ -721,7 +721,11 @@ def merge(merger, self, lst):
instancer = merger.instancer
for v in "XY":
- dev = getattr(self, v+'DeviceTable')
+ tableName = v+'DeviceTable'
+ if not hasattr(self, tableName):
+ continue
+ dev = getattr(self, tableName)
+ delattr(self, tableName)
if dev is None:
continue
@@ -732,7 +736,6 @@ def merge(merger, self, lst):
attr = v+'Coordinate'
setattr(self, attr, getattr(self, attr) + delta)
- del self.XDeviceTable, self.YDeviceTable
self.Format = 1
@MutatorMerger.merger(otBase.ValueRecord)
@@ -744,7 +747,10 @@ def merge(merger, self, lst):
('XPlacement','XPlaDevice'),
('YPlacement','YPlaDevice')]:
- dev = getattr(self, tableName, None)
+ if not hasattr(self, tableName):
+ continue
+ dev = getattr(self, tableName)
+ delattr(self, tableName)
if dev is None:
continue
@@ -754,8 +760,6 @@ def merge(merger, self, lst):
setattr(self, name, getattr(self, name) + delta)
- delattr(self, tableName)
-
#
# VariationMerger
|
Update apt_unclassified.txt
Moved from | @@ -74,3 +74,17 @@ photogram.ga
tibct.net
tibct.org
tracking.dgip.gov.pk
+
+# Reference: https://twitter.com/ClearskySec/status/1055404788635103232
+# Reference: https://www.clearskysec.com/iec/
+
+host-gv.appspot.com
+journey-in-israel.com
+iecr.co
+iec-co-il.com
+israelalerts.us
+israelalert.us
+pokemonisrael.yolasite.com
+sourcefarge.net
+users-management.com
+ynetnewes.com
|
refactor: Prepare test_list_projects for pre-prov
This is part 1 of 2 commits that aim to move tests from only being
executed with the dynamic creds credential provider.
Part 1 does some initial refactoring and Part 2 will move the tests
and finish any other refactoring that's needed. | @@ -18,7 +18,19 @@ from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
-class ListProjectsTestJSON(base.BaseIdentityV3AdminTest):
+class BaseListProjectsTestJSON(base.BaseIdentityV3AdminTest):
+
+ def _list_projects_with_params(self, included, excluded, params, key):
+ # Validate that projects in ``included`` belongs to the projects
+ # returned that match ``params`` but not projects in ``excluded``
+ body = self.projects_client.list_projects(params)['projects']
+ for p in included:
+ self.assertIn(p[key], map(lambda x: x[key], body))
+ for p in excluded:
+ self.assertNotIn(p[key], map(lambda x: x[key], body))
+
+
+class ListProjectsTestJSON(BaseListProjectsTestJSON):
@classmethod
def resource_setup(cls):
@@ -61,17 +73,20 @@ class ListProjectsTestJSON(base.BaseIdentityV3AdminTest):
def test_list_projects_with_domains(self):
# List projects with domain
self._list_projects_with_params(
- {'domain_id': self.domain['id']}, 'domain_id')
+ [self.p1], [self.p2, self.p3], {'domain_id': self.domain['id']},
+ 'domain_id')
@decorators.idempotent_id('0fe7a334-675a-4509-b00e-1c4b95d5dae8')
def test_list_projects_with_enabled(self):
# List the projects with enabled
- self._list_projects_with_params({'enabled': False}, 'enabled')
+ self._list_projects_with_params(
+ [self.p1], [self.p2, self.p3], {'enabled': False}, 'enabled')
@decorators.idempotent_id('fa178524-4e6d-4925-907c-7ab9f42c7e26')
def test_list_projects_with_name(self):
# List projects with name
- self._list_projects_with_params({'name': self.p1_name}, 'name')
+ self._list_projects_with_params(
+ [self.p1], [self.p2, self.p3], {'name': self.p1_name}, 'name')
@decorators.idempotent_id('6edc66f5-2941-4a17-9526-4073311c1fac')
def test_list_projects_with_parent(self):
@@ -82,8 +97,3 @@ class ListProjectsTestJSON(base.BaseIdentityV3AdminTest):
self.assertNotEmpty(fetched_projects)
for project in fetched_projects:
self.assertEqual(self.p3['parent_id'], project['parent_id'])
-
- def _list_projects_with_params(self, params, key):
- body = self.projects_client.list_projects(params)['projects']
- self.assertIn(self.p1[key], map(lambda x: x[key], body))
- self.assertNotIn(self.p2[key], map(lambda x: x[key], body))
|
Prepare `2.12.1rc0`.
[ci skip-rust]
[ci skip-build-wheels] | # 2.12.x Release Series
+## 2.12.1rc0 (Jul 14, 2022)
+
+### User API Changes
+
+* Fix poetry locks missing hashes. (Cherry-pick of #16112) ([#16114](https://github.com/pantsbuild/pants/pull/16114))
+
+### Bug fixes
+
+* Fix defaulting of parameters in explicitly specified deps on `parametrize`d targets for AsyncFieldMixin (Cherry-pick of #16176) ([#16179](https://github.com/pantsbuild/pants/pull/16179))
+
+* Improve memoization of interpreter constraints, Python parsing, and request classes (Cherry-pick of #16141) ([#16153](https://github.com/pantsbuild/pants/pull/16153))
+
+* Fix pantsd TTY handling. (Cherry-pick of #16038) ([#16047](https://github.com/pantsbuild/pants/pull/16047))
+
+### Documentation
+
+* Updates installation docs to refer to the 1-step setup script (Cherry-picks of #16149) ([#16157](https://github.com/pantsbuild/pants/pull/16157))
+
+* Add 2.12 changelog to docsite (cherrypick #16136) ([#16138](https://github.com/pantsbuild/pants/pull/16138))
+
+* Update docs with 2.12 changes: default Pex lockfile generator, `pants.toml` env var interpolation, pip VCS requirements, and Python interpreter constraints (Cherry-pick of #16000) ([#16059](https://github.com/pantsbuild/pants/pull/16059))
+
+* Fix CI provider table rendering. (cherrypick of #16051) ([#16061](https://github.com/pantsbuild/pants/pull/16061))
+
+* Document Docker registries configuration options for `skip_push` and `extra_image_tags`. (Cherry-pick of #16015) ([#16021](https://github.com/pantsbuild/pants/pull/16021))
+
+* Bump 2.12 install version ([#16002](https://github.com/pantsbuild/pants/pull/16002))
+
## 2.12.0 (Jun 28, 2022)
The first stable release of the `2.12.x` series, with only documentation changes since the previous `rc`!
|
Updating cooked to 1.7.0
The change in the FlatImages data model version from `1.1.0` to `1.1.1`
required rebuilding the Cooked/ directory int he dev-suite. Per KBW, the new
version is 1.7.0.
I apparently need permissions to upload the new Cooked_...tar.gz file to the
Google Drive.
modified: pypeit/tests/test_cooked.py | @@ -20,6 +20,6 @@ def test_cooked_version():
v_file = os.path.join(os.getenv('PYPEIT_DEV'), 'Cooked', 'version')
with open(v_file) as f:
tmp = f.readlines()
- assert tmp[-1].strip() == '1.6.0'
+ assert tmp[-1].strip() == '1.7.0'
|
Update phishing.txt
[0]
While [1] hxxp://145.249.105.19/ leads to [2] hxxp://145.249.105.19/demo/usa/ and [3] hxxp://145.249.105.19/demo/uk/ , then root address is to be added. | @@ -26,8 +26,12 @@ premierstl.com/level3-network/
centralmissouriwhitetails.com
gtu382gs.f8e2assh3.2018ssl.acritas-energy.com
-=======
+
# Reference: https://twitter.com/malwrhunterteam/status/1032543718623125504
receiptinvoice-format.com
mrofficepanel.receiptinvoice-format.com
+
+# Reference: https://twitter.com/PhishingAi/status/1032693998702321664
+
+http://145.249.105.19/
|
[ci] redirect to a wait page after a new form submit
Also, when you `GET /new`, we delete your old pod if it exists | @@ -134,6 +134,9 @@ def root():
@app.route('/new', methods=['GET'])
def new_get():
+ pod_name = session.get('pod_name')
+ if pod_name:
+ delete_worker_pod(pod_name)
session.clear()
return redirect(external_url_for('/'))
@@ -148,8 +151,11 @@ def new_post():
session['svc_name'] = svc.metadata.name
session['pod_name'] = pod.metadata.name
session['jupyter_token'] = jupyter_token
- return render_template('wait.html')
+ return redirect(external_url_for(f'wait'))
[email protected]('/wait', methods=['GET'])
+def wait_webpage():
+ return render_template('wait.html')
@app.route('/auth/<requested_svc_name>')
def auth(requested_svc_name):
@@ -184,6 +190,11 @@ def workers():
@app.route('/workers/<pod_name>/delete')
def workers_delete(pod_name):
+ delete_worker_pod(pod_name)
+ return redirect(external_url_for('workers'))
+
+
+def delete_worker_pod(pod_name):
if not session.get('admin'):
return redirect(external_url_for('admin-login'))
pod = k8s.read_namespaced_pod(pod_name, 'default')
@@ -193,7 +204,6 @@ def workers_delete(pod_name):
label_selector='uuid='+uuid).items
assert(len(svcs) == 1)
k8s.delete_namespaced_service(svcs[0].metadata.name, 'default', kube.client.V1DeleteOptions())
- return redirect(external_url_for('workers'))
@app.route('/admin-login', methods=['GET'])
@@ -216,7 +226,7 @@ def worker_image():
@sockets.route('/wait')
-def wait(ws):
+def wait_websocket(ws):
pod_name = session['pod_name']
svc_name = session['svc_name']
jupyter_token = session['jupyter_token']
|
Clarify density documentation
This commit adds a note that virtual temperature may be used as input
to this function and rewrites the mathematical expression to put it in
terms of the inputs to this function. | @@ -1759,7 +1759,7 @@ def density(pressure, temperature, mixing_ratio, molecular_weight_ratio=mpconsts
Total atmospheric pressure
temperature: `pint.Quantity`
- Air temperature
+ Air temperature (or the virtual temperature if the mixing ratio is set to 0)
mixing_ratio : `pint.Quantity`
Mass mixing ratio (dimensionless)
@@ -1783,7 +1783,7 @@ def density(pressure, temperature, mixing_ratio, molecular_weight_ratio=mpconsts
Notes
-----
- .. math:: \rho = \frac{p}{R_dT_v}
+ .. math:: \rho = \frac{\epsilon p\,(1+w)}{R_dT\,(w+\epsilon)}
.. versionchanged:: 1.0
Renamed ``mixing`` parameter to ``mixing_ratio``
|
inte-tests: reset subgraph_retries to 0
this is the default value, so reset-storage should reset it back to
the default value, not to something else | @@ -11,7 +11,7 @@ MANAGER_CONFIG = {
'workflow': {
'task_retries': 5,
'task_retry_interval': 1,
- 'subgraph_retries': 5,
+ 'subgraph_retries': 0,
},
}
|
Ensure the dynamically generated pubkeys are present
There are some tests that expect them to exist.
This commit is a temporary stop gap until a cleaner design is
implemented. | @@ -64,6 +64,7 @@ def fork_choice_scoring():
def test_demo(base_db,
validator_count,
keymap,
+ pubkeys,
fork_choice_scoring):
slots_per_epoch = 8
config = SERENITY_CONFIG._replace(
@@ -83,6 +84,10 @@ def test_demo(base_db,
genesis_epoch = config.GENESIS_EPOCH
chaindb = BeaconChainDB(base_db, config)
+ # TODO(ralexstokes) clean up how the cache is populated
+ for i in range(validator_count):
+ pubkeys[i]
+
genesis_state, genesis_block = create_mock_genesis(
num_validators=validator_count,
config=config,
|
Update jpeg_compression.py
fix line breaks, fix consistency | @@ -20,7 +20,9 @@ This module implements the JPEG compression defence `JpegCompression`.
| Paper link: https://arxiv.org/abs/1705.02900, https://arxiv.org/abs/1608.00853
-| Please keep in mind the limitations of defences. For more information on the limitations of this defence, see https://arxiv.org/abs/1802.00420 . For details on how to evaluate classifier security in general, see https://arxiv.org/abs/1902.06705
+| Please keep in mind the limitations of defences. For more information on the limitations of this defence, see
+https://arxiv.org/abs/1802.00420 . For details on how to evaluate classifier security in general, see
+https://arxiv.org/abs/1902.06705
"""
from __future__ import absolute_import, division, print_function, unicode_literals
@@ -42,7 +44,9 @@ class JpegCompression(Preprocessor):
| Paper link: https://arxiv.org/abs/1705.02900, https://arxiv.org/abs/1608.00853
- | Please keep in mind the limitations of defences. For more information on the limitations of this defence, see https://arxiv.org/abs/1802.00420
+ | Please keep in mind the limitations of defences. For more information on the limitations of this defence,
+ see https://arxiv.org/abs/1802.00420 . For details on how to evaluate classifier security in general, see
+ https://arxiv.org/abs/1902.06705
"""
params = ['quality', 'channel_index', 'clip_values']
|
*actually* fixed issue from last commit.
I finally found a way to test things with pytest properly on my end | @@ -423,7 +423,7 @@ def perform_ping(started, server=DEFAULT_SERVER_URL):
language = get_device_setting("language_id", "")
- if not isinstance(started, datetime):
+ if not isinstance(started, datetime.datetime):
started = datetime.datetime.strptime(started)
try:
|
Subsets and Splits