code
stringlengths 3
1.05M
| repo_name
stringlengths 4
116
| path
stringlengths 3
942
| language
stringclasses 30
values | license
stringclasses 15
values | size
int32 3
1.05M
| line_mean
float64 0.5
100
| line_max
int64 1
1k
| alpha_frac
float64 0.25
1
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|
import * as THREE from "three";
import _ from 'lodash';
import STORE from "store";
import Text3D from "renderer/text3d";
import { copyProperty, hideArrayObjects, calculateLaneMarkerPoints } from "utils/misc";
import { drawSegmentsFromPoints, drawDashedLineFromPoints,
drawBox, drawDashedBox, drawArrow, drawImage } from "utils/draw";
import iconObjectYield from "assets/images/decision/object-yield.png";
const DEFAULT_HEIGHT = 1.5;
export const DEFAULT_COLOR = 0xFF00FC;
export const ObstacleColorMapping = {
PEDESTRIAN: 0xFFEA00,
BICYCLE: 0x00DCEB,
VEHICLE: 0x00FF3C,
VIRTUAL: 0x800000,
CIPV: 0xFF9966
};
const LINE_THICKNESS = 1.5;
export default class PerceptionObstacles {
constructor() {
this.textRender = new Text3D();
this.arrows = []; // for indication of direction of moving obstacles
this.ids = []; // for obstacle id labels
this.solidCubes = []; // for obstacles with only length/width/height
this.dashedCubes = []; // for obstacles with only length/width/height
this.extrusionSolidFaces = []; // for obstacles with polygon points
this.extrusionDashedFaces = []; // for obstacles with polygon points
this.laneMarkers = []; // for lane markers
this.icons = [];
this.trafficCones = []; // for traffic cone meshes
}
update(world, coordinates, scene, isBirdView) {
this.updateObjects(world, coordinates, scene, isBirdView);
this.updateLaneMarkers(world, coordinates, scene);
}
updateObjects(world, coordinates, scene, isBirdView) {
// Id meshes need to be recreated every time.
// Each text mesh needs to be removed from the scene,
// and its char meshes need to be hidden for reuse purpose.
if (!_.isEmpty(this.ids)) {
this.ids.forEach(t => {
t.children.forEach(c => c.visible = false);
scene.remove(t);
});
this.ids = [];
}
this.textRender.reset();
const objects = world.object;
if (_.isEmpty(objects)) {
hideArrayObjects(this.arrows);
hideArrayObjects(this.solidCubes);
hideArrayObjects(this.dashedCubes);
hideArrayObjects(this.extrusionSolidFaces);
hideArrayObjects(this.extrusionDashedFaces);
hideArrayObjects(this.icons);
hideArrayObjects(this.trafficCones);
return;
}
const adc = coordinates.applyOffset({
x: world.autoDrivingCar.positionX,
y: world.autoDrivingCar.positionY,
});
adc.heading = world.autoDrivingCar.heading;
let arrowIdx = 0;
let cubeIdx = 0;
let extrusionFaceIdx = 0;
let iconIdx = 0;
let trafficConeIdx = 0;
for (let i = 0; i < objects.length; i++) {
const obstacle = objects[i];
if (!STORE.options['showObstacles' + _.upperFirst(_.camelCase(obstacle.type))]
|| !_.isNumber(obstacle.positionX) || !_.isNumber(obstacle.positionY)) {
continue;
}
const position = coordinates.applyOffset(
new THREE.Vector3(obstacle.positionX,
obstacle.positionY,
(obstacle.height || DEFAULT_HEIGHT) / 2));
const color = ObstacleColorMapping[obstacle.type] || DEFAULT_COLOR;
if (STORE.options.showObstaclesVelocity && obstacle.type &&
obstacle.type !== 'UNKNOWN_UNMOVABLE' && obstacle.speed > 0.5) {
const arrowMesh = this.updateArrow(position,
obstacle.speedHeading, color, arrowIdx++, scene);
const scale = 1 + Math.log2(obstacle.speed);
arrowMesh.scale.set(scale, scale, scale);
arrowMesh.visible = true;
}
if (STORE.options.showObstaclesHeading) {
const arrowMesh = this.updateArrow(position, obstacle.heading,
0xFFFFFF, arrowIdx++, scene);
arrowMesh.scale.set(1, 1, 1);
arrowMesh.visible = true;
}
this.updateTexts(adc, obstacle, position, scene, isBirdView);
// get the confidence and validate its range
let confidence = obstacle.confidence;
confidence = Math.max(0.0, confidence);
confidence = Math.min(1.0, confidence);
const polygon = obstacle.polygonPoint;
if (obstacle.subType === "ST_TRAFFICCONE") {
this.updateTrafficCone(position, trafficConeIdx, scene);
trafficConeIdx++;
} else if (polygon !== undefined && polygon.length > 0) {
this.updatePolygon(polygon, obstacle.height, color, coordinates, confidence,
extrusionFaceIdx, scene);
extrusionFaceIdx += polygon.length;
} else if (obstacle.length && obstacle.width && obstacle.height) {
this.updateCube(obstacle.length, obstacle.width, obstacle.height, position,
obstacle.heading, color, confidence, cubeIdx++, scene);
}
// draw a yield sign to indicate ADC is yielding to this obstacle
if (obstacle.yieldedObstacle) {
const iconPosition = {
x: position.x,
y: position.y,
z: position.z + obstacle.height + 0.5,
};
this.updateIcon(iconPosition, world.autoDrivingCar.heading, iconIdx, scene);
iconIdx++;
}
}
hideArrayObjects(this.arrows, arrowIdx);
hideArrayObjects(this.solidCubes, cubeIdx);
hideArrayObjects(this.dashedCubes, cubeIdx);
hideArrayObjects(this.extrusionSolidFaces, extrusionFaceIdx);
hideArrayObjects(this.extrusionDashedFaces, extrusionFaceIdx);
hideArrayObjects(this.icons, iconIdx);
hideArrayObjects(this.trafficCones, trafficConeIdx);
}
updateArrow(position, heading, color, arrowIdx, scene) {
const arrowMesh = this.getArrow(arrowIdx, scene);
copyProperty(arrowMesh.position, position);
arrowMesh.material.color.setHex(color);
arrowMesh.rotation.set(0, 0, -(Math.PI / 2 - heading));
return arrowMesh;
}
updateTexts(adc, obstacle, obstaclePosition, scene, isBirdView) {
const initPosition = {
x: obstaclePosition.x,
y: obstaclePosition.y,
z: obstacle.height || 3
};
const lineSpacing = 0.5;
const deltaX = isBirdView ? 0.0 : lineSpacing * Math.cos(adc.heading);
const deltaY = isBirdView ? 0.7 : lineSpacing * Math.sin(adc.heading);
const deltaZ = isBirdView ? 0.0 : lineSpacing;
let lineCount = 0;
if (STORE.options.showObstaclesInfo) {
const distance = adc.distanceTo(obstaclePosition).toFixed(1);
const speed = obstacle.speed.toFixed(1);
this.drawTexts(`(${distance}m, ${speed}m/s)`, initPosition, scene);
lineCount++;
}
if (STORE.options.showObstaclesId) {
const textPosition = {
x: initPosition.x + (lineCount * deltaX),
y: initPosition.y + (lineCount * deltaY),
z: initPosition.z + (lineCount * deltaZ),
};
this.drawTexts(obstacle.id, textPosition, scene);
lineCount++;
}
if (STORE.options.showPredictionPriority) {
const priority = _.get(obstacle, 'obstaclePriority.priority');
if (priority && priority !== "NORMAL") {
const textPosition = {
x: initPosition.x + (lineCount * deltaX),
y: initPosition.y + (lineCount * deltaY),
z: initPosition.z + (lineCount * deltaZ),
};
this.drawTexts(priority, textPosition, scene);
}
}
}
updatePolygon(points, height, color, coordinates, confidence, extrusionFaceIdx, scene) {
for (let i = 0; i < points.length; i++) {
// Get cached face mesh.
const solidFaceMesh = this.getFace(extrusionFaceIdx + i, scene, true);
const dashedFaceMesh = this.getFace(extrusionFaceIdx + i, scene, false);
// Get the adjacent point.
const next = (i === points.length - 1) ? 0 : i + 1;
const v = new THREE.Vector3(points[i].x, points[i].y, points[i].z);
const vNext = new THREE.Vector3(points[next].x, points[next].y, points[next].z);
// Set position.
const facePosition = coordinates.applyOffset(
new THREE.Vector2((v.x + vNext.x) / 2.0, (v.y + vNext.y) / 2.0));
if (facePosition === null) {
continue;
}
solidFaceMesh.position.set(facePosition.x, facePosition.y, 0);
dashedFaceMesh.position.set(facePosition.x, facePosition.y, height*confidence);
// Set face scale.
const edgeDistance = v.distanceTo(vNext);
if (edgeDistance === 0) {
console.warn("Cannot display obstacle with an edge length 0!");
continue;
}
solidFaceMesh.scale.set(edgeDistance, 1, height*confidence);
dashedFaceMesh.scale.set(edgeDistance, 1, height*(1 - confidence));
solidFaceMesh.material.color.setHex(color);
solidFaceMesh.rotation.set(0, 0, Math.atan2(vNext.y - v.y, vNext.x - v.x));
solidFaceMesh.visible = (confidence !== 0.0);
dashedFaceMesh.material.color.setHex(color);
dashedFaceMesh.rotation.set(0, 0, Math.atan2(vNext.y - v.y, vNext.x - v.x));
dashedFaceMesh.visible = (confidence !== 1.0);
}
}
updateCube(length, width, height, position, heading, color, confidence, cubeIdx, scene) {
if (confidence > 0) {
const solidCubeMesh = this.getCube(cubeIdx, scene, true);
solidCubeMesh.position.set(position.x, position.y, position.z+height*(confidence-1)/2 );
solidCubeMesh.scale.set(length, width, height*confidence);
solidCubeMesh.material.color.setHex(color);
solidCubeMesh.rotation.set(0, 0, heading);
solidCubeMesh.visible = true;
}
if (confidence < 1) {
const dashedCubeMesh = this.getCube(cubeIdx, scene, false);
dashedCubeMesh.position.set(position.x, position.y, position.z+height*confidence/2 );
dashedCubeMesh.scale.set(length, width, height*(1-confidence));
dashedCubeMesh.material.color.setHex(color);
dashedCubeMesh.rotation.set(0, 0, heading);
dashedCubeMesh.visible = true;
}
}
updateIcon(position, heading, iconIdx, scene) {
const icon = this.getIcon(iconIdx, scene);
copyProperty(icon.position, position);
icon.rotation.set(Math.PI / 2, heading - Math.PI / 2, 0);
icon.visible = true;
}
updateTrafficCone(position, coneIdx, scene) {
const cone = this.getTrafficCone(coneIdx, scene);
cone.position.setX(position.x);
cone.position.setY(position.y);
cone.visible = true;
}
getArrow(index, scene) {
if (index < this.arrows.length) {
return this.arrows[index];
}
const arrowMesh = drawArrow(1.5, LINE_THICKNESS, 0.5, 0.5, DEFAULT_COLOR);
arrowMesh.rotation.set(0, 0, -Math.PI / 2);
arrowMesh.visible = false;
this.arrows.push(arrowMesh);
scene.add(arrowMesh);
return arrowMesh;
}
getFace(index, scene, solid = true) {
const extrusionFaces = solid ? this.extrusionSolidFaces : this.extrusionDashedFaces;
if (index < extrusionFaces.length) {
return extrusionFaces[index];
}
const points = [
new THREE.Vector3(-0.5, 0, 0),
new THREE.Vector3(0.5, 0, 0),
new THREE.Vector3(0.5, 0, 1),
new THREE.Vector3(-0.5, 0, 1)
];
const extrusionFace = solid
? drawSegmentsFromPoints(points, DEFAULT_COLOR, LINE_THICKNESS)
: drawDashedLineFromPoints(points, DEFAULT_COLOR, LINE_THICKNESS, 0.1, 0.1);
extrusionFace.visible = false;
extrusionFaces.push(extrusionFace);
scene.add(extrusionFace);
return extrusionFace;
}
getCube(index, scene, solid = true) {
const cubes = solid ? this.solidCubes : this.dashedCubes;
if (index < cubes.length) {
return cubes[index];
}
const cubeSize = new THREE.Vector3(1, 1, 1);
const cubeMesh = solid
? drawBox(cubeSize, DEFAULT_COLOR, LINE_THICKNESS)
: drawDashedBox(cubeSize, DEFAULT_COLOR, LINE_THICKNESS, 0.1, 0.1);
cubeMesh.visible = false;
cubes.push(cubeMesh);
scene.add(cubeMesh);
return cubeMesh;
}
getIcon(index, scene) {
if (index < this.icons.length) {
return this.icons[index];
}
const icon = drawImage(iconObjectYield, 1, 1, 3, 3.6, 0);
icon.rotation.set(0, 0, -Math.PI / 2);
icon.visible = false;
this.icons.push(icon);
scene.add(icon);
return icon;
}
drawTexts(content, position, scene) {
const text = this.textRender.drawText(content, scene);
if (text) {
text.position.set(position.x, position.y, position.z);
this.ids.push(text);
scene.add(text);
}
}
updateLaneMarkers(world, coordinates, scene) {
if (!_.isEmpty(this.laneMarkers)) {
this.laneMarkers.forEach((laneMesh) => {
scene.remove(laneMesh);
laneMesh.geometry.dispose();
laneMesh.material.dispose();
});
this.laneMarkers = [];
}
if (STORE.options.showPerceptionLaneMarker) {
const adc = world.autoDrivingCar;
for (const name in world.laneMarker) {
const absolutePoints = calculateLaneMarkerPoints(adc, world.laneMarker[name]);
if (absolutePoints.length) {
const offsetPoints = absolutePoints.map((point) => {
return coordinates.applyOffset(point);
});
const mesh = drawSegmentsFromPoints(offsetPoints, 0x006AFF, 2, 4, false);
scene.add(mesh);
this.laneMarkers.push(mesh);
}
}
}
}
getTrafficCone(index, scene) {
if (index < this.trafficCones.length) {
return this.trafficCones[index];
}
const height = 0.914;
const geometry = new THREE.CylinderGeometry(0.1, 0.25, height, 32);
const material = new THREE.MeshBasicMaterial({
color: 0xE1601C,
transparent: true,
opacity: 0.65,
});
const cone = new THREE.Mesh(geometry, material);
cone.rotation.set(Math.PI / 2, 0, 0);
cone.position.set(0, 0, height/2);
this.trafficCones.push(cone);
scene.add(cone);
return cone;
}
}
|
msbeta/apollo
|
modules/dreamview/frontend/src/renderer/obstacles.js
|
JavaScript
|
apache-2.0
| 15,435 | 39.725594 | 100 | 0.574409 | false |
/*
* Copyright 2016 LINE Corporation
*
* LINE Corporation licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.linecorp.armeria.server.docs;
import static java.util.Objects.requireNonNull;
import java.util.Comparator;
import java.util.Objects;
import java.util.Set;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonInclude.Include;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.base.MoreObjects;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableSortedSet;
import com.linecorp.armeria.common.MediaType;
import com.linecorp.armeria.common.annotation.Nullable;
import com.linecorp.armeria.common.annotation.UnstableApi;
import com.linecorp.armeria.server.Service;
/**
* Metadata about the endpoints exposed by a {@link Service}.
*/
@UnstableApi
@JsonInclude(Include.NON_NULL)
public final class EndpointInfo {
/**
* Returns a newly created {@link EndpointInfoBuilder} that builds the {@link EndpointInfo} with
* the specified {@code hostnamePattern} and {@code pathMapping}.
*/
public static EndpointInfoBuilder builder(String hostnamePattern, String pathMapping) {
return new EndpointInfoBuilder(hostnamePattern, pathMapping);
}
private final String hostnamePattern;
private final String pathMapping;
@Nullable
private final String regexPathPrefix;
@Nullable
private final String fragment;
@Nullable
private final MediaType defaultMimeType;
private final Set<MediaType> availableMimeTypes;
/**
* Creates a new instance.
*/
EndpointInfo(String hostnamePattern, String pathMapping, @Nullable String regexPathPrefix,
@Nullable String fragment, @Nullable MediaType defaultMimeType,
Iterable<MediaType> availableMimeTypes) {
this.hostnamePattern = requireNonNull(hostnamePattern, "hostnamePattern");
this.pathMapping = requireNonNull(pathMapping, "pathMapping");
this.regexPathPrefix = Strings.emptyToNull(regexPathPrefix);
this.fragment = Strings.emptyToNull(fragment);
this.defaultMimeType = defaultMimeType;
this.availableMimeTypes = ImmutableSortedSet.copyOf(
Comparator.comparing(MediaType::toString),
requireNonNull(availableMimeTypes, "availableMimeTypes"));
}
/**
* Returns the hostname pattern of this endpoint.
*/
@JsonProperty
public String hostnamePattern() {
return hostnamePattern;
}
/**
* Returns the path mapping of this endpoint.
*/
@JsonProperty
public String pathMapping() {
return pathMapping;
}
/**
* Returns the prefix of this endpoint if the {@link #pathMapping()} returns a regular expression string
* of endpoint and the prefix exists, otherwise {@code null}.
*/
@JsonProperty
@Nullable
public String regexPathPrefix() {
return regexPathPrefix;
}
/**
* Returns the URI fragment of this endpoint.
*/
@JsonProperty
@Nullable
public String fragment() {
return fragment;
}
/**
* Returns the default MIME type of this endpoint.
*/
@JsonProperty
@Nullable
public MediaType defaultMimeType() {
return defaultMimeType;
}
/**
* Returns the set of available MIME types of this endpoint.
*/
@JsonProperty
public Set<MediaType> availableMimeTypes() {
return availableMimeTypes;
}
@Override
public int hashCode() {
return Objects.hash(hostnamePattern, pathMapping, regexPathPrefix, fragment,
defaultMimeType, availableMimeTypes);
}
@Override
public boolean equals(@Nullable Object obj) {
if (!(obj instanceof EndpointInfo)) {
return false;
}
if (this == obj) {
return true;
}
final EndpointInfo that = (EndpointInfo) obj;
return hostnamePattern.equals(that.hostnamePattern) &&
pathMapping.equals(that.pathMapping) &&
Objects.equals(regexPathPrefix, that.regexPathPrefix) &&
Objects.equals(fragment, that.fragment) &&
Objects.equals(defaultMimeType, that.defaultMimeType) &&
availableMimeTypes.equals(that.availableMimeTypes);
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this).omitNullValues()
.add("hostnamePattern", hostnamePattern)
.add("pathMapping", pathMapping)
.add("regexPathPrefix", regexPathPrefix)
.add("fragment", fragment)
.add("defaultMimeType", defaultMimeType)
.add("availableMimeTypes", availableMimeTypes)
.toString();
}
}
|
line/armeria
|
core/src/main/java/com/linecorp/armeria/server/docs/EndpointInfo.java
|
Java
|
apache-2.0
| 5,484 | 31.449704 | 108 | 0.668855 | false |
// Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Immutable;
using Microsoft.CodeAnalysis.Diagnostics.Analyzers.NamingStyles;
using Microsoft.CodeAnalysis.NamingStyles;
using static Microsoft.CodeAnalysis.Diagnostics.Analyzers.NamingStyles.SymbolSpecification;
namespace Microsoft.CodeAnalysis.Shared.Naming
{
internal static class FallbackNamingRules
{
/// <summary>
/// Standard field/property names a refactoring look for given a named symbol that is the subject of refactoring.
/// The refactoring will try to find existing matching symbol and if not found, it will generate one.
/// </summary>
internal static readonly ImmutableArray<NamingRule> RefactoringMatchLookupRules = ImmutableArray.Create(
new NamingRule(
new SymbolSpecification(Guid.NewGuid(), "Property", ImmutableArray.Create(new SymbolKindOrTypeKind(SymbolKind.Property))),
new NamingStyle(Guid.NewGuid(), capitalizationScheme: Capitalization.PascalCase),
enforcementLevel: ReportDiagnostic.Hidden),
new NamingRule(
new SymbolSpecification(Guid.NewGuid(), "Field", ImmutableArray.Create(new SymbolKindOrTypeKind(SymbolKind.Field))),
new NamingStyle(Guid.NewGuid(), capitalizationScheme: Capitalization.CamelCase),
enforcementLevel: ReportDiagnostic.Hidden),
new NamingRule(
new SymbolSpecification(Guid.NewGuid(), "FieldWithUnderscore", ImmutableArray.Create(new SymbolKindOrTypeKind(SymbolKind.Field))),
new NamingStyle(Guid.NewGuid(), prefix: "_", capitalizationScheme: Capitalization.CamelCase),
enforcementLevel: ReportDiagnostic.Hidden));
/// <summary>
/// Standard name rules for name suggestion/completion utilities.
/// </summary>
internal static readonly ImmutableArray<NamingRule> CompletionOfferingRules = ImmutableArray.Create(
CreateCamelCaseFieldsAndParametersRule(),
CreateEndWithAsyncRule(),
CreateGetAsyncRule(),
CreateMethodStartsWithGetRule());
private static NamingRule CreateGetAsyncRule()
{
var kinds = ImmutableArray.Create(new SymbolKindOrTypeKind(MethodKind.Ordinary));
var modifiers = ImmutableArray.Create(new ModifierKind(ModifierKindEnum.IsAsync));
return new NamingRule(
new SymbolSpecification(Guid.NewGuid(), "endswithasync", kinds, accessibilityList: default, modifiers),
new NamingStyle(Guid.NewGuid(), prefix: "Get", suffix: "Async"),
ReportDiagnostic.Info);
}
private static NamingRule CreateCamelCaseFieldsAndParametersRule()
{
var kinds = ImmutableArray.Create(new SymbolKindOrTypeKind(SymbolKind.Field), new SymbolKindOrTypeKind(SymbolKind.Parameter), new SymbolKindOrTypeKind(SymbolKind.Local));
return new NamingRule(
new SymbolSpecification(Guid.NewGuid(), "camelcasefields", kinds, accessibilityList: default, modifiers: default),
new NamingStyle(Guid.NewGuid(), capitalizationScheme: Capitalization.CamelCase),
ReportDiagnostic.Info);
}
private static NamingRule CreateEndWithAsyncRule()
{
var kinds = ImmutableArray.Create(new SymbolKindOrTypeKind(MethodKind.Ordinary));
var modifiers = ImmutableArray.Create(new ModifierKind(ModifierKindEnum.IsAsync));
return new NamingRule(
new SymbolSpecification(Guid.NewGuid(), "endswithasynct", kinds, accessibilityList: default, modifiers),
new NamingStyle(Guid.NewGuid(), suffix: "Async"),
ReportDiagnostic.Info);
}
private static NamingRule CreateMethodStartsWithGetRule()
{
var kinds = ImmutableArray.Create(new SymbolKindOrTypeKind(MethodKind.Ordinary));
return new NamingRule(
new SymbolSpecification(Guid.NewGuid(), "startswithget", kinds, accessibilityList: default, modifiers: default),
new NamingStyle(Guid.NewGuid(), prefix: "Get"),
ReportDiagnostic.Info);
}
}
}
|
DustinCampbell/roslyn
|
src/Features/Core/Portable/Shared/Naming/FallbackNamingRules.cs
|
C#
|
apache-2.0
| 4,409 | 55.5 | 182 | 0.6855 | false |
/*
http://www.apache.org/licenses/LICENSE-2.0.txt
Copyright 2015 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package core
import "errors"
var (
// ErrSubscriptionGroupAlreadyExists - error message when the subscription
// group already exists
ErrSubscriptionGroupAlreadyExists = errors.New("Subscription already exists")
// ErrSubscriptionGroupDoesNotExist - error message when the subscription
// group does not exist
ErrSubscriptionGroupDoesNotExist = errors.New("Subscription does not exist")
)
|
intelsdi-x/snap
|
core/subscription_group.go
|
GO
|
apache-2.0
| 1,012 | 30.625 | 78 | 0.791502 | false |
require 'test/unit'
require 'stringio'
class TestParse < Test::Unit::TestCase
def setup
@verbose = $VERBOSE
$VERBOSE = nil
end
def teardown
$VERBOSE = @verbose
end
def test_else_without_rescue
x = eval <<-END
begin
else
42
end
END
assert_equal(42, x)
end
def test_alias_backref
assert_raise(SyntaxError) do
eval <<-END
alias $foo $1
END
end
end
def test_command_call
t = Object.new
def t.foo(x); x; end
a = false
b = c = d = true
assert_nothing_raised do
eval <<-END
a &&= t.foo 42
b &&= t.foo 42
c &&= t.foo nil
d &&= t.foo false
END
end
assert_equal([false, 42, nil, false], [a, b, c, d])
a = 3
assert_nothing_raised { eval("a &= t.foo 5") }
assert_equal(1, a)
a = [nil, nil, true, true]
assert_nothing_raised do
eval <<-END
a[0] ||= t.foo 42
a[1] &&= t.foo 42
a[2] ||= t.foo 42
a[3] &&= t.foo 42
END
end
assert_equal([42, nil, true, 42], a)
o = Object.new
class << o
attr_accessor :foo, :bar, :Foo, :Bar, :baz, :qux
end
o.foo = o.Foo = o::baz = nil
o.bar = o.Bar = o::qux = 1
assert_nothing_raised do
eval <<-END
o.foo ||= t.foo 42
o.bar &&= t.foo 42
o.Foo ||= t.foo 42
o.Bar &&= t.foo 42
o::baz ||= t.foo 42
o::qux &&= t.foo 42
END
end
assert_equal([42, 42], [o.foo, o.bar])
assert_equal([42, 42], [o.Foo, o.Bar])
assert_equal([42, 42], [o::baz, o::qux])
assert_raise(SyntaxError) do
eval <<-END
$1 ||= t.foo 42
END
end
def t.bar(x); x + yield; end
a = b = nil
assert_nothing_raised do
eval <<-END
a = t.bar "foo" do
"bar"
end.gsub "ob", "OB"
b = t.bar "foo" do
"bar"
end::gsub "ob", "OB"
END
end
assert_equal("foOBar", a)
assert_equal("foOBar", b)
a = nil
assert_nothing_raised do
t.instance_eval <<-END
a = bar "foo" do "bar" end
END
end
assert_equal("foobar", a)
a = nil
assert_nothing_raised do
eval <<-END
a = t::bar "foo" do "bar" end
END
end
assert_equal("foobar", a)
def t.baz(*r)
@baz = r + (block_given? ? [yield] : [])
end
assert_nothing_raised do
t.instance_eval "baz (1), 2"
end
assert_equal([1, 2], t.instance_eval { @baz })
end
def test_mlhs_node
c = Class.new
class << c
attr_accessor :foo, :bar, :Foo, :Bar
FOO = BAR = nil
end
assert_nothing_raised do
eval <<-END
c::foo, c::bar = 1, 2
c.Foo, c.Bar = 1, 2
c::FOO, c::BAR = 1, 2
END
end
assert_equal([1, 2], [c::foo, c::bar])
assert_equal([1, 2], [c.Foo, c.Bar])
assert_equal([1, 2], [c::FOO, c::BAR])
end
def test_dynamic_constant_assignment
assert_raise(SyntaxError) do
Object.new.instance_eval <<-END
def foo
self::FOO, self::BAR = 1, 2
::FOO, ::BAR = 1, 2
end
END
end
assert_raise(SyntaxError) do
eval <<-END
$1, $2 = 1, 2
END
end
assert_raise(SyntaxError) do
Object.new.instance_eval <<-END
def foo
::FOO = 1
end
END
end
c = Class.new
assert_raise(SyntaxError) do
eval <<-END
c::FOO &= 1
::FOO &= 1
END
end
c = Class.new
assert_raise(SyntaxError) do
eval <<-END
$1 &= 1
END
end
end
def test_class_module
assert_raise(SyntaxError) do
eval <<-END
class foo; end
END
end
assert_raise(SyntaxError) do
eval <<-END
def foo
class Foo; end
module Bar; end
end
END
end
assert_raise(SyntaxError) do
eval <<-END
class Foo Bar; end
END
end
end
def test_op_name
o = Object.new
def o.>(x); x; end
def o./(x); x; end
a = nil
assert_nothing_raised do
o.instance_eval <<-END
undef >, /
END
end
end
def test_arg
o = Object.new
class << o
attr_accessor :foo, :bar, :Foo, :Bar, :baz, :qux
end
o.foo = o.Foo = o::baz = nil
o.bar = o.Bar = o::qux = 1
assert_nothing_raised do
eval <<-END
o.foo ||= 42
o.bar &&= 42
o.Foo ||= 42
o.Bar &&= 42
o::baz ||= 42
o::qux &&= 42
END
end
assert_equal([42, 42], [o.foo, o.bar])
assert_equal([42, 42], [o.Foo, o.Bar])
assert_equal([42, 42], [o::baz, o::qux])
a = nil
assert_nothing_raised do
eval <<-END
a = -2.0 ** 2
END
end
assert_equal(-4.0, a)
end
def test_block_variable
o = Object.new
def o.foo(*r); yield(*r); end
a = nil
assert_nothing_raised do
eval <<-END
o.foo 1 do|; a| a = 42 end
END
end
assert_nil(a)
end
def test_bad_arg
assert_raise(SyntaxError) do
eval <<-END
def foo(FOO); end
END
end
assert_raise(SyntaxError) do
eval <<-END
def foo(@foo); end
END
end
assert_raise(SyntaxError) do
eval <<-END
def foo($foo); end
END
end
assert_raise(SyntaxError) do
eval <<-END
def foo(@@foo); end
END
end
o = Object.new
def o.foo(*r); yield(*r); end
assert_raise(SyntaxError) do
eval <<-END
o.foo 1 {|; @a| @a = 42 }
END
end
end
def test_do_lambda
a = b = nil
assert_nothing_raised do
eval <<-END
a = -> do
b = 42
end
END
end
a.call
assert_equal(42, b)
end
def test_block_call_colon2
o = Object.new
def o.foo(x); x + yield; end
a = b = nil
assert_nothing_raised do
o.instance_eval <<-END
a = foo 1 do 42 end.to_s
b = foo 1 do 42 end::to_s
END
end
assert_equal("43", a)
assert_equal("43", b)
end
def test_call_method
a = b = nil
assert_nothing_raised do
eval <<-END
a = proc {|x| x + "bar" }.("foo")
b = proc {|x| x + "bar" }::("foo")
END
end
assert_equal("foobar", a)
assert_equal("foobar", b)
end
def test_xstring
assert_raise(Errno::ENOENT) do
eval("``")
end
end
def test_words
assert_equal([], %W( ))
end
def test_dstr
@@foo = 1
assert_equal("foo 1 bar", "foo #@@foo bar")
"1" =~ /(.)/
assert_equal("foo 1 bar", "foo #$1 bar")
end
def test_dsym
assert_nothing_raised { eval(':""') }
end
def test_arg2
o = Object.new
assert_nothing_raised do
eval <<-END
def o.foo(a=42,*r,z,&b); b.call(r.inject(a*1000+z*100, :+)); end
END
end
assert_equal(-1405, o.foo(1,2,3,4) {|x| -x })
assert_equal(-1302, o.foo(1,2,3) {|x| -x })
assert_equal(-1200, o.foo(1,2) {|x| -x })
assert_equal(-42100, o.foo(1) {|x| -x })
assert_raise(ArgumentError) { o.foo() }
assert_nothing_raised do
eval <<-END
def o.foo(a=42,z,&b); b.call(a*1000+z*100); end
END
end
assert_equal(-1200, o.foo(1,2) {|x| -x } )
assert_equal(-42100, o.foo(1) {|x| -x } )
assert_raise(ArgumentError) { o.foo() }
assert_nothing_raised do
eval <<-END
def o.foo(*r,z,&b); b.call(r.inject(z*100, :+)); end
END
end
assert_equal(-303, o.foo(1,2,3) {|x| -x } )
assert_equal(-201, o.foo(1,2) {|x| -x } )
assert_equal(-100, o.foo(1) {|x| -x } )
assert_raise(ArgumentError) { o.foo() }
end
def test_duplicate_argument
assert_raise(SyntaxError) do
eval <<-END
1.times {|&b?| }
END
end
assert_raise(SyntaxError) do
eval <<-END
1.times {|a, a|}
END
end
assert_raise(SyntaxError) do
eval <<-END
def foo(a, a); end
END
end
end
def test_define_singleton_error
assert_raise(SyntaxError) do
eval <<-END
def ("foo").foo; end
END
end
end
def test_backquote
t = Object.new
assert_nothing_raised do
eval <<-END
def t.`(x); "foo" + x + "bar"; end
END
end
a = b = nil
assert_nothing_raised do
eval <<-END
a = t.` "zzz"
1.times {|;z| t.` ("zzz") }
END
t.instance_eval <<-END
b = `zzz`
END
end
assert_equal("foozzzbar", a)
assert_equal("foozzzbar", b)
end
def test_carrige_return
assert_equal(2, eval("1 +\r\n1"))
end
def test_string
assert_raise(SyntaxError) do
eval '"\xg1"'
end
assert_raise(SyntaxError) do
eval '"\u{1234"'
end
assert_raise(SyntaxError) do
eval '"\M1"'
end
assert_raise(SyntaxError) do
eval '"\C1"'
end
assert_equal("\x81", eval('"\C-\M-a"'))
assert_equal("\177", eval('"\c?"'))
end
def test_question
assert_raise(SyntaxError) { eval('?') }
assert_raise(SyntaxError) { eval('? ') }
assert_raise(SyntaxError) { eval("?\n") }
assert_raise(SyntaxError) { eval("?\t") }
assert_raise(SyntaxError) { eval("?\v") }
assert_raise(SyntaxError) { eval("?\r") }
assert_raise(SyntaxError) { eval("?\f") }
assert_equal("\u{1234}", eval("?\u{1234}"))
assert_equal("\u{1234}", eval('?\u{1234}'))
end
def test_percent
assert_equal(:foo, eval('%s(foo)'))
assert_raise(SyntaxError) { eval('%s') }
assert_raise(SyntaxError) { eval('%ss') }
assert_raise(SyntaxError) { eval('%z()') }
end
def test_symbol
bug = '[ruby-dev:41447]'
sym = "foo\0bar".to_sym
assert_nothing_raised(SyntaxError, bug) do
assert_equal(sym, eval(":'foo\0bar'"))
end
assert_nothing_raised(SyntaxError, bug) do
assert_equal(sym, eval(':"foo\u0000bar"'))
end
assert_nothing_raised(SyntaxError, bug) do
assert_equal(sym, eval(':"foo\u{0}bar"'))
end
assert_raise(SyntaxError) do
eval ':"foo\u{}bar"'
end
end
def test_parse_string
assert_raise(SyntaxError) do
eval <<-END
/
END
end
end
def test_here_document
x = nil
assert_raise(SyntaxError) do
eval %Q(
<\<FOO
)
end
assert_raise(SyntaxError) do
eval %q(
<<FOO
#$
FOO
)
end
assert_raise(SyntaxError) do
eval %Q(
<\<\"
)
end
assert_raise(SyntaxError) do
eval %q(
<<``
)
end
assert_raise(SyntaxError) do
eval %q(
<<--
)
end
assert_raise(SyntaxError) do
eval %q(
<<FOO
#$
foo
FOO
)
end
assert_nothing_raised do
eval "x = <<""FOO\r\n1\r\nFOO"
end
assert_equal("1\n", x)
end
def test_magic_comment
x = nil
assert_nothing_raised do
eval <<-END
# coding = utf-8
x = __ENCODING__
END
end
assert_equal(Encoding.find("UTF-8"), x)
assert_raise(ArgumentError) do
eval <<-END
# coding = foobarbazquxquux_dummy_enconding
x = __ENCODING__
END
end
end
def test_utf8_bom
x = nil
assert_nothing_raised do
eval "\xef\xbb\xbf x = __ENCODING__"
end
assert_equal(Encoding.find("UTF-8"), x)
assert_raise(NameError) { eval "\xef" }
end
def test_dot_in_next_line
x = nil
assert_nothing_raised do
eval <<-END
x = 1
.to_s
END
end
assert_equal("1", x)
end
def test_pow_asgn
x = 3
assert_nothing_raised { eval("x **= 2") }
assert_equal(9, x)
end
def test_embedded_rd
assert_raise(SyntaxError) do
eval <<-END
=begin
END
end
end
def test_float
assert_equal(1.0/0, eval("1e10000"))
assert_raise(SyntaxError) { eval('1_E') }
assert_raise(SyntaxError) { eval('1E1E1') }
end
def test_global_variable
assert_equal(nil, eval('$-x'))
assert_equal(nil, eval('alias $preserve_last_match $&'))
assert_equal(nil, eval('alias $& $test_parse_foobarbazqux'))
$test_parse_foobarbazqux = nil
assert_equal(nil, $&)
assert_equal(nil, eval('alias $& $preserve_last_match'))
assert_raise(SyntaxError) { eval('$#') }
end
def test_invalid_instance_variable
assert_raise(SyntaxError) { eval('@#') }
end
def test_invalid_class_variable
assert_raise(SyntaxError) { eval('@@1') }
end
def test_invalid_char
x = 1
assert_equal(1, eval("\x01x"))
assert_equal(nil, eval("\x04x"))
end
def test_literal_concat
x = "baz"
assert_equal("foobarbaz", eval('"foo" "bar#{x}"'))
end
def test_unassignable
assert_raise(SyntaxError) do
eval %q(self = 1)
end
assert_raise(SyntaxError) do
eval %q(nil = 1)
end
assert_raise(SyntaxError) do
eval %q(true = 1)
end
assert_raise(SyntaxError) do
eval %q(false = 1)
end
assert_raise(SyntaxError) do
eval %q(__FILE__ = 1)
end
assert_raise(SyntaxError) do
eval %q(__LINE__ = 1)
end
assert_raise(SyntaxError) do
eval %q(__ENCODING__ = 1)
end
assert_raise(SyntaxError) do
eval <<-END
def foo
FOO = 1
end
END
end
end
def test_block_dup
assert_raise(SyntaxError) do
eval <<-END
foo(&proc{}) {}
END
end
end
def test_set_backref
assert_raise(SyntaxError) do
eval <<-END
$& = 1
END
end
end
def test_arg_concat
o = Object.new
class << o; self; end.instance_eval do
define_method(:[]=) {|*r, &b| b.call(r) }
end
r = nil
assert_nothing_raised do
eval <<-END
o[&proc{|x| r = x }] = 1
END
end
assert_equal([1], r)
end
def test_void_expr_stmts_value
# This test checks if void contexts are warned correctly.
# Thus, warnings MUST NOT be suppressed.
$VERBOSE = true
stderr = $stderr
$stderr = StringIO.new("")
x = 1
assert_nil eval("x; nil")
assert_nil eval("1+1; nil")
assert_nil eval("TestParse; nil")
assert_nil eval("::TestParse; nil")
assert_nil eval("x..x; nil")
assert_nil eval("x...x; nil")
assert_nil eval("self; nil")
assert_nil eval("nil; nil")
assert_nil eval("true; nil")
assert_nil eval("false; nil")
assert_nil eval("defined?(1); nil")
assert_raise(SyntaxError) do
eval %q(1; next; 2)
end
o = Object.new
assert_nothing_raised do
eval <<-END
x = def o.foo; end
END
end
assert_equal($stderr.string.lines.to_a.size, 14)
$stderr = stderr
end
def test_assign_in_conditional
assert_raise(SyntaxError) do
eval <<-END
(x, y = 1, 2) ? 1 : 2
END
end
assert_nothing_raised do
eval <<-END
if @x = true
1
else
2
end
END
end
end
def test_literal_in_conditional
assert_nothing_raised do
eval <<-END
"foo" ? 1 : 2
END
end
assert_nothing_raised do
x = "bar"
eval <<-END
/foo#{x}baz/ ? 1 : 2
END
end
assert_nothing_raised do
eval <<-END
(true..false) ? 1 : 2
END
end
assert_nothing_raised do
eval <<-END
("foo".."bar") ? 1 : 2
END
end
assert_nothing_raised do
x = "bar"
eval <<-END
:"foo#{"x"}baz" ? 1 : 2
END
end
end
def test_no_blockarg
assert_raise(SyntaxError) do
eval <<-END
yield(&:+)
END
end
end
def test_intern
assert_equal(':""', ''.intern.inspect)
assert_equal(':$foo', '$foo'.intern.inspect)
assert_equal(':"!foo"', '!foo'.intern.inspect)
assert_equal(':"foo=="', "foo==".intern.inspect)
end
def test_all_symbols
x = Symbol.all_symbols
assert_kind_of(Array, x)
assert(x.all? {|s| s.is_a?(Symbol) })
end
def test_is_class_id
c = Class.new
assert_raise(NameError) do
c.instance_eval { remove_class_variable(:@var) }
end
end
end
|
racker/omnibus
|
source/ruby-1.9.2-p180/test/ruby/test_parse.rb
|
Ruby
|
apache-2.0
| 16,055 | 18.390097 | 72 | 0.522329 | false |
/*
* Copyright (c) 2012 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.eclipse.jetty.spdy;
import java.nio.channels.SocketChannel;
import org.eclipse.jetty.io.AsyncEndPoint;
import org.eclipse.jetty.io.nio.AsyncConnection;
public interface AsyncConnectionFactory
{
public AsyncConnection newAsyncConnection(SocketChannel channel, AsyncEndPoint endPoint, Object attachment);
}
|
jamiepg1/jetty.project
|
jetty-spdy/spdy-jetty/src/main/java/org/eclipse/jetty/spdy/AsyncConnectionFactory.java
|
Java
|
apache-2.0
| 944 | 33.962963 | 112 | 0.769068 | false |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_CPU_VECTOR_SUPPORT_LIBRARY_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_CPU_VECTOR_SUPPORT_LIBRARY_H_
#include <string>
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Value.h"
#include "tensorflow/compiler/xla/primitive_util.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
namespace xla {
namespace cpu {
// Simple wrappers around llvm::APFloat::APFloat to make the calling code more
// obvious.
inline llvm::APFloat GetIeeeF32(float f) { return llvm::APFloat(f); }
inline llvm::APFloat GetIeeeF32FromBitwiseRep(int32 bitwise_value) {
return llvm::APFloat(llvm::APFloat::IEEEsingle(),
llvm::APInt(/*numBits=*/32, /*val=*/bitwise_value));
}
// A thin wrapper around llvm_util.h to make code generating vector math flow
// more readable.
class VectorSupportLibrary {
public:
// This VectorSupportLibrary instance remembers `primitive_type` and
// `vector_size`, and these are implicitly used by the methods on this
// instance (i.e. LoadVector will load a vector of type <`vector_size` x
// `primitive_type`>).
VectorSupportLibrary(PrimitiveType primitive_type, int64 vector_size,
llvm::IRBuilder<>* ir_builder, std::string name);
llvm::Value* Mul(llvm::Value* lhs, llvm::Value* rhs);
llvm::Value* Mul(int64 lhs, llvm::Value* rhs) {
return Mul(ir_builder()->getInt64(lhs), rhs);
}
llvm::Value* Mul(const llvm::APFloat& lhs, llvm::Value* rhs) {
return Mul(GetConstantFloat(rhs->getType(), lhs), rhs);
}
// If your call resolved to these then you probably wanted the versions taking
// APFloat.
llvm::Value* Mul(double lhs, llvm::Value* rhs) = delete;
llvm::Value* Mul(float lhs, llvm::Value* rhs) = delete;
llvm::Value* Add(llvm::Value* lhs, llvm::Value* rhs);
llvm::Value* Add(int64 lhs, llvm::Value* rhs) {
return Add(ir_builder()->getInt64(lhs), rhs);
}
llvm::Value* Add(const llvm::APFloat& lhs, llvm::Value* rhs) {
return Add(GetConstantFloat(rhs->getType(), lhs), rhs);
}
// If your call resolved to these then you probably wanted the versions taking
// APFloat.
llvm::Value* Add(double lhs, llvm::Value* rhs) = delete;
llvm::Value* Add(float lhs, llvm::Value* rhs) = delete;
llvm::Value* Sub(llvm::Value* lhs, llvm::Value* rhs);
llvm::Value* Sub(llvm::Value* lhs, const llvm::APFloat& rhs) {
return Sub(lhs, GetConstantFloat(lhs->getType(), rhs));
}
llvm::Value* Max(llvm::Value* lhs, llvm::Value* rhs);
llvm::Value* Max(const llvm::APFloat& lhs, llvm::Value* rhs) {
return Max(GetConstantFloat(rhs->getType(), lhs), rhs);
}
llvm::Value* Div(llvm::Value* lhs, llvm::Value* rhs);
llvm::Value* MulAdd(llvm::Value* a, llvm::Value* b, llvm::Value* c) {
return Add(c, Mul(a, b));
}
llvm::Value* MulAdd(llvm::Value* a, llvm::Value* b, const llvm::APFloat& c) {
return Add(GetConstantFloat(vector_type(), c), Mul(a, b));
}
llvm::Value* MulAdd(llvm::Value* a, const llvm::APFloat& b,
const llvm::APFloat& c) {
return Add(GetConstantFloat(a->getType(), c),
Mul(a, GetConstantFloat(a->getType(), b)));
}
llvm::Value* Floor(llvm::Value* a);
llvm::Value* Clamp(llvm::Value* a, const llvm::APFloat& low,
const llvm::APFloat& high);
llvm::Value* SplatFloat(const llvm::APFloat& d) {
return GetConstantFloat(vector_type(), d);
}
// These compare instructions return a floating point typed mask instead of an
// i1. For instance, on a vector typed input, lanes where the predicate is
// true get a float with all ones and other lanes get a float with all zeros.
// This is slightly odd from the perspective of LLVM's type system, but it
// makes kernel IR generation code written using VectorSupportLibrary (its
// raison d'etre) less cluttered.
llvm::Value* FCmpEQMask(llvm::Value* lhs, llvm::Value* rhs);
llvm::Value* FCmpULEMask(llvm::Value* lhs, llvm::Value* rhs);
llvm::Value* FCmpOLTMask(llvm::Value* lhs, llvm::Value* rhs);
llvm::Value* FCmpOLTMask(llvm::Value* lhs, const llvm::APFloat& rhs) {
return FCmpOLTMask(lhs, GetConstantFloat(lhs->getType(), rhs));
}
// These boolean operations operate on the bitwise values of the floating
// point inputs. They return a (vector of) float(s) but like in the mask
// generating predicates above this type system oddity makes the kernel IR
// generation code less cluttered.
llvm::Value* FloatAnd(llvm::Value* lhs, llvm::Value* rhs);
llvm::Value* FloatAnd(llvm::Value* lhs, const llvm::APFloat& rhs) {
return FloatAnd(lhs, GetConstantFloat(lhs->getType(), rhs));
}
llvm::Value* FloatOr(llvm::Value* lhs, llvm::Value* rhs);
llvm::Value* FloatOr(llvm::Value* lhs, const llvm::APFloat& rhs) {
return FloatOr(lhs, GetConstantFloat(lhs->getType(), rhs));
}
llvm::Value* FloatNot(llvm::Value* lhs);
llvm::Value* FloatAndNot(llvm::Value* lhs, llvm::Value* rhs) {
return FloatAnd(FloatNot(lhs), rhs);
}
llvm::Value* BroadcastScalar(llvm::Value* x);
llvm::Value* BroadcastScalar(const llvm::APFloat& d) {
return BroadcastScalar(GetConstantFloat(scalar_type(), d));
}
llvm::Value* ComputeOffsetPointer(llvm::Value* base_pointer,
llvm::Value* offset_elements);
llvm::Value* ComputeOffsetPointer(llvm::Value* base_pointer,
llvm::Value* offset_elements, int64 scale) {
return ComputeOffsetPointer(
base_pointer,
ir_builder_->CreateMul(ir_builder_->getInt64(scale), offset_elements));
}
llvm::Value* ComputeOffsetPointer(llvm::Value* base_pointer,
int64 offset_elements) {
return ComputeOffsetPointer(base_pointer,
ir_builder()->getInt64(offset_elements));
}
llvm::Value* LoadVector(llvm::Value* pointer);
llvm::Value* LoadVector(llvm::Value* base_pointer,
llvm::Value* offset_elements) {
return LoadVector(ComputeOffsetPointer(base_pointer, offset_elements));
}
llvm::Value* LoadVector(llvm::Value* base_pointer, int64 offset_elements) {
return LoadVector(base_pointer, ir_builder()->getInt64(offset_elements));
}
llvm::Value* LoadScalar(llvm::Value* pointer);
llvm::Value* LoadScalar(llvm::Value* base_pointer,
llvm::Value* offset_elements) {
return LoadScalar(ComputeOffsetPointer(base_pointer, offset_elements));
}
llvm::Value* LoadScalar(llvm::Value* base_pointer, int64 offset_elements) {
return LoadScalar(base_pointer, ir_builder()->getInt64(offset_elements));
}
void StoreVector(llvm::Value* value, llvm::Value* pointer);
void StoreVector(llvm::Value* value, llvm::Value* base_pointer,
llvm::Value* offset_elements) {
StoreVector(value, ComputeOffsetPointer(base_pointer, offset_elements));
}
void StoreVector(llvm::Value* value, llvm::Value* base_pointer,
int64 offset_elements) {
StoreVector(value, base_pointer, ir_builder()->getInt64(offset_elements));
}
void StoreScalar(llvm::Value* value, llvm::Value* pointer);
void StoreScalar(llvm::Value* value, llvm::Value* base_pointer,
llvm::Value* offset_elements) {
StoreScalar(value, ComputeOffsetPointer(base_pointer, offset_elements));
}
void StoreScalar(llvm::Value* value, llvm::Value* base_pointer,
int64 offset_elements) {
StoreScalar(base_pointer, ir_builder()->getInt64(offset_elements));
}
llvm::Value* LoadBroadcast(llvm::Value* pointer);
llvm::Value* LoadBroadcast(llvm::Value* base_pointer,
llvm::Value* offset_elements) {
return LoadBroadcast(ComputeOffsetPointer(base_pointer, offset_elements));
}
llvm::Value* LoadBroadcast(llvm::Value* base_pointer, int64 offset_elements) {
return LoadBroadcast(base_pointer, ir_builder()->getInt64(offset_elements));
}
// Compute the horizontal sum of each vector in `vectors`. The i'th element
// in the result vector is the (scalar) horizontal sum of the i'th vector in
// `vectors`. If `init_values` is not nullptr then the value in the i'th lane
// in `init_values` is added to the i'th horizontal sum.
std::vector<llvm::Value*> ComputeHorizontalSums(
std::vector<llvm::Value*> vectors, llvm::Value* init_values = nullptr);
llvm::Value* GetZeroVector();
llvm::Value* GetZeroScalar();
llvm::IRBuilder<>* ir_builder() const { return ir_builder_; }
int64 vector_size() const { return vector_size_; }
llvm::Type* vector_type() const { return vector_type_; }
llvm::Type* vector_pointer_type() const { return vector_pointer_type_; }
llvm::Type* scalar_type() const { return scalar_type_; }
llvm::Type* scalar_pointer_type() const { return scalar_pointer_type_; }
int64 scalar_byte_size() const {
return primitive_util::BitWidth(primitive_type_) / 8;
}
const std::string& name() const { return name_; }
private:
llvm::Value* ExtractLowHalf(llvm::Value*);
llvm::Value* ExtractHighHalf(llvm::Value*);
llvm::Value* MulInternal(llvm::Value* lhs, llvm::Value* rhs);
llvm::Value* AddInternal(llvm::Value* lhs, llvm::Value* rhs);
llvm::Value* AddReduce(llvm::Value* vector);
// Checks that each value in `values` is either of type scalar_type() or
// vector_type(). This LOG(FATAL)'s so it should only be called in cases
// where a mismatching type is a programmer bug.
void AssertCorrectTypes(std::initializer_list<llvm::Value*> values);
// Perform an X86 AVX style horizontal add between `lhs` and `rhs`. The
// resulting IR for an 8-float wide vector is expected to lower to a single
// vhaddps instruction on a CPU that supports vhaddps, and not be too bad in
// other cases.
//
// For a vector width of 8, the result vector is computed as:
// Result[0] = Lhs[0] + Lhs[1]
// Result[1] = Lhs[2] + Lhs[3]
// Result[2] = Rhs[0] + Rhs[1]
// Result[3] = Rhs[2] + Rhs[3]
// Result[4] = Lhs[4] + Lhs[5]
// Result[5] = Lhs[6] + Lhs[7]
// Result[6] = Rhs[4] + Rhs[5]
// Result[7] = Rhs[6] + Rhs[7]
llvm::Value* AvxStyleHorizontalAdd(llvm::Value* lhs, llvm::Value* rhs);
std::vector<llvm::Value*> ComputeAvxOptimizedHorizontalSums(
std::vector<llvm::Value*> vectors, llvm::Value* init_values);
llvm::Type* IntegerTypeForFloatSize(bool vector);
llvm::Value* I1ToFloat(llvm::Value* i1);
llvm::Value* GetConstantFloat(llvm::Type* type, const llvm::APFloat& f) {
llvm::Constant* scalar_value = llvm::ConstantFP::get(type->getContext(), f);
if (llvm::isa<llvm::VectorType>(type)) {
return llvm::ConstantVector::getSplat(vector_size(), scalar_value);
}
return scalar_value;
}
int64 vector_size_;
PrimitiveType primitive_type_;
llvm::IRBuilder<>* ir_builder_;
llvm::Type* vector_type_;
llvm::Type* vector_pointer_type_;
llvm::Type* scalar_type_;
llvm::Type* scalar_pointer_type_;
std::string name_;
};
// This wraps an alloca-backed stack variable which LLVM's SSA construction pass
// can later convert to a SSA value.
class LlvmVariable {
public:
LlvmVariable(llvm::Type*, llvm::IRBuilder<>* ir_builder);
llvm::Value* Get() const;
void Set(llvm::Value* new_value);
private:
llvm::AllocaInst* alloca_;
llvm::IRBuilder<>* ir_builder_;
};
class VectorVariable : public LlvmVariable {
public:
VectorVariable(VectorSupportLibrary* vector_support,
llvm::Value* initial_value)
: LlvmVariable(vector_support->vector_type(),
vector_support->ir_builder()) {
Set(initial_value);
}
};
class ScalarVariable : public LlvmVariable {
public:
ScalarVariable(VectorSupportLibrary* vector_support,
llvm::Value* initial_value)
: LlvmVariable(vector_support->scalar_type(),
vector_support->ir_builder()) {
Set(initial_value);
}
};
} // namespace cpu
} // namespace xla
#endif // TENSORFLOW_COMPILER_XLA_SERVICE_CPU_VECTOR_SUPPORT_LIBRARY_H_
|
nburn42/tensorflow
|
tensorflow/compiler/xla/service/cpu/vector_support_library.h
|
C
|
apache-2.0
| 12,751 | 38.47678 | 80 | 0.670457 | false |
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using Google.Cloud.Diagnostics.Common;
using Microsoft.AspNetCore.Hosting;
using Microsoft.AspNetCore.Http;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.DependencyInjection.Extensions;
using System;
#if NETCOREAPP3_1
namespace Google.Cloud.Diagnostics.AspNetCore3
#elif NETSTANDARD2_0
namespace Google.Cloud.Diagnostics.AspNetCore
#else
#error unknown target framework
#endif
{
/// <summary>
/// Extensions for configuring Google Cloud Trace in ASP.NET Core applications.
/// </summary>
public static class AspNetCoreTraceExtensions
{
/// <summary>
/// Configures Google Cloud Tracing for ASP .NET Core applications./>.
/// </summary>
public static IServiceCollection AddGoogleTraceForAspNetCore(this IServiceCollection services, AspNetCoreTraceOptions options = null) =>
#pragma warning disable CS0618 // Type or member is obsolete
services.AddGoogleTraceForAspNetCore(true, options);
#pragma warning restore CS0618 // Type or member is obsolete
[Obsolete("This was added to avoid code duplication between the obsolete extension methods and the new one.")]
internal static IServiceCollection AddGoogleTraceForAspNetCore(this IServiceCollection services, bool registerMiddleware, AspNetCoreTraceOptions options)
{
services.AddGoogleTrace(options?.ServiceOptions);
services.AddSingleton(options?.TraceFallbackPredicate ?? TraceDecisionPredicate.Default);
// We use TryAdd... here to allow user code to inject their own trace context provider
// and matching trace context response propagator. We use Google trace header otherwise.
services.TryAddGoogleTraceContextProvider();
services.TryAddSingleton<Action<HttpResponse, ITraceContext>>(PropagateGoogleTraceHeaders);
// Obsolete: Adding this for backwards compatibility in case someone is using the old factory type.
// The new and prefered factory type is Func<ITraceContext, IManagedTracer> which is being added by Common.
services.AddSingleton<Func<TraceHeaderContext, IManagedTracer>>(sp => sp.GetRequiredService<Func<ITraceContext, IManagedTracer>>());
services.AddHttpContextAccessor();
services.AddTransient<ICloudTraceNameProvider, DefaultCloudTraceNameProvider>();
if (registerMiddleware)
{
// This registers the trace middleware so users don't have to.
services.AddSingleton<IStartupFilter, AspNetCoreTraceStartupFilter>();
}
return services;
}
/// <summary>
/// Adds the services needed for obtaining the trace context from Google's own trace header,
/// but only if no other trace context provider is registered.
/// If you are using <see cref="AddGoogleTraceForAspNetCore(IServiceCollection, AspNetCoreTraceOptions)"/>
/// you don't need to call this method. Only use this method if you want to extract the trace context
/// information from Google's own header for your own code to use, or if you are not using the tracing
/// component of this library but are using the logging component and want the trace context information
/// to be associated with the log entries.
/// </summary>
public static IServiceCollection TryAddGoogleTraceContextProvider(this IServiceCollection services)
{
// We use TryAdd... here to allow user code to inject their own trace context provider
// and matching trace context response propagator. We use Google trace header otherwise.
services.TryAddScoped<ITraceContext>(ProvideGoogleTraceHeaderContext);
return services;
}
/// <summary>
/// Creates an <see cref="TraceHeaderContext"/> based on the current <see cref="HttpContext"/>
/// and a <see cref="TraceDecisionPredicate"/>.
/// Used by default to obtain trace context, if user code has not specified a trace context provider.
/// </summary>
internal static TraceHeaderContext ProvideGoogleTraceHeaderContext(IServiceProvider serviceProvider)
{
var accessor = serviceProvider.GetRequiredService<IHttpContextAccessor>();
string header = accessor.HttpContext?.Request?.Headers[TraceHeaderContext.TraceHeader];
return TraceHeaderContext.FromHeader(header);
}
/// <summary>
/// Propagates Google trace context information to the response.
/// Used by default if user code has not specified a propagator of their own.
/// </summary>
internal static void PropagateGoogleTraceHeaders(HttpResponse response, ITraceContext traceContext)
{
var googleHeader = TraceHeaderContext.Create(traceContext.TraceId, traceContext.SpanId ?? 0, traceContext.ShouldTrace);
response.Headers.Add(TraceHeaderContext.TraceHeader, googleHeader.ToString());
}
}
}
|
googleapis/google-cloud-dotnet
|
apis/Google.Cloud.Diagnostics.AspNetCore/Google.Cloud.Diagnostics.AspNetCore/Trace/AspNetCoreTraceExtensions.cs
|
C#
|
apache-2.0
| 5,657 | 50.409091 | 161 | 0.710168 | false |
/*
* Copyright (c) 2015 AsyncHttpClient Project. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package org.asynchttpclient.extras.rxjava;
import org.asynchttpclient.AsyncCompletionHandler;
import org.asynchttpclient.BoundRequestBuilder;
import org.asynchttpclient.Response;
import rx.Observable;
import rx.Subscriber;
import rx.functions.Func0;
import rx.subjects.ReplaySubject;
/**
* Provide RxJava support for executing requests. Request can be subscribed to and manipulated as needed.
*
* @see <a href="https://github.com/ReactiveX/RxJava">https://github.com/ReactiveX/RxJava</a>
*/
public class AsyncHttpObservable {
/**
* Observe a request execution and emit the response to the observer.
*
* @param supplier the supplier
* @return The cold observable (must be subscribed to in order to execute).
*/
public static Observable<Response> toObservable(final Func0<BoundRequestBuilder> supplier) {
//Get the builder from the function
final BoundRequestBuilder builder = supplier.call();
//create the observable from scratch
return Observable.create(new Observable.OnSubscribe<Response>() {
@Override
public void call(final Subscriber<? super Response> subscriber) {
try {
AsyncCompletionHandler<Void> handler = new AsyncCompletionHandler<Void>() {
@Override
public Void onCompleted(Response response) throws Exception {
subscriber.onNext(response);
subscriber.onCompleted();
return null;
}
@Override
public void onThrowable(Throwable t) {
subscriber.onError(t);
}
};
//execute the request
builder.execute(handler);
} catch (Throwable t) {
subscriber.onError(t);
}
}
});
}
/**
* Observe a request execution and emit the response to the observer.
*
* @param supplier teh supplier
* @return The hot observable (eagerly executes).
*/
public static Observable<Response> observe(final Func0<BoundRequestBuilder> supplier) {
//use a ReplaySubject to buffer the eagerly subscribed-to Observable
ReplaySubject<Response> subject = ReplaySubject.create();
//eagerly kick off subscription
toObservable(supplier).subscribe(subject);
//return the subject that can be subscribed to later while the execution has already started
return subject;
}
}
|
bomgar/async-http-client
|
extras/rxjava/src/main/java/org/asynchttpclient/extras/rxjava/AsyncHttpObservable.java
|
Java
|
apache-2.0
| 3,358 | 37.597701 | 114 | 0.639071 | false |
#!/bin/bash
# Usage: build-trik-studio-pioneer.sh <path-to-qt> <path-to-qt-installer-framework> [tag]. See build-installer.sh for more info.
set -euo pipefail
cd "$(dirname "$0")"
export QREAL_BUILD_TAG=${3:-}
[ $OSTYPE == "linux-gnu" ] && EXCLUDE="-e ru.qreal.root.associations" || EXCLUDE=
grep -q "darwin" <<< $OSTYPE && EXCLUDE="-e ru.qreal.root.associations" || :
grep -q "darwin" <<< $OSTYPE && export PRODUCT_DISPLAYED_NAME="TRIK Studio PIONEER" || :
chmod +x $PWD/build-installer.sh && $PWD/build-installer.sh $1 $2 trik-studio-pioneer $EXCLUDE
|
iakov/qreal
|
installer/build-trik-studio-pioneer.sh
|
Shell
|
apache-2.0
| 557 | 41.846154 | 128 | 0.67684 | false |
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// min-lldb-version: 310
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:run
// STACK BY REF
// gdb-command:print *self
// gdb-check:$1 = {x = 100}
// gdb-command:print arg1
// gdb-check:$2 = -1
// gdb-command:print arg2
// gdb-check:$3 = -2
// gdb-command:continue
// STACK BY VAL
// gdb-command:print self
// gdb-check:$4 = {x = 100}
// gdb-command:print arg1
// gdb-check:$5 = -3
// gdb-command:print arg2
// gdb-check:$6 = -4
// gdb-command:continue
// OWNED BY REF
// gdb-command:print *self
// gdb-check:$7 = {x = 200}
// gdb-command:print arg1
// gdb-check:$8 = -5
// gdb-command:print arg2
// gdb-check:$9 = -6
// gdb-command:continue
// OWNED BY VAL
// gdb-command:print self
// gdb-check:$10 = {x = 200}
// gdb-command:print arg1
// gdb-check:$11 = -7
// gdb-command:print arg2
// gdb-check:$12 = -8
// gdb-command:continue
// OWNED MOVED
// gdb-command:print *self
// gdb-check:$13 = {x = 200}
// gdb-command:print arg1
// gdb-check:$14 = -9
// gdb-command:print arg2
// gdb-check:$15 = -10
// gdb-command:continue
// === LLDB TESTS ==================================================================================
// lldb-command:run
// STACK BY REF
// lldb-command:print *self
// lldb-check:[...]$0 = Struct { x: 100 }
// lldb-command:print arg1
// lldb-check:[...]$1 = -1
// lldb-command:print arg2
// lldb-check:[...]$2 = -2
// lldb-command:continue
// STACK BY VAL
// lldb-command:print self
// lldb-check:[...]$3 = Struct { x: 100 }
// lldb-command:print arg1
// lldb-check:[...]$4 = -3
// lldb-command:print arg2
// lldb-check:[...]$5 = -4
// lldb-command:continue
// OWNED BY REF
// lldb-command:print *self
// lldb-check:[...]$6 = Struct { x: 200 }
// lldb-command:print arg1
// lldb-check:[...]$7 = -5
// lldb-command:print arg2
// lldb-check:[...]$8 = -6
// lldb-command:continue
// OWNED BY VAL
// lldb-command:print self
// lldb-check:[...]$9 = Struct { x: 200 }
// lldb-command:print arg1
// lldb-check:[...]$10 = -7
// lldb-command:print arg2
// lldb-check:[...]$11 = -8
// lldb-command:continue
// OWNED MOVED
// lldb-command:print *self
// lldb-check:[...]$12 = Struct { x: 200 }
// lldb-command:print arg1
// lldb-check:[...]$13 = -9
// lldb-command:print arg2
// lldb-check:[...]$14 = -10
// lldb-command:continue
#![feature(box_syntax)]
#![omit_gdb_pretty_printer_section]
#[derive(Copy)]
struct Struct {
x: int
}
trait Trait : Sized {
fn self_by_ref(&self, arg1: int, arg2: int) -> int {
zzz(); // #break
arg1 + arg2
}
fn self_by_val(self, arg1: int, arg2: int) -> int {
zzz(); // #break
arg1 + arg2
}
fn self_owned(self: Box<Self>, arg1: int, arg2: int) -> int {
zzz(); // #break
arg1 + arg2
}
}
impl Trait for Struct {}
fn main() {
let stack = Struct { x: 100 };
let _ = stack.self_by_ref(-1, -2);
let _ = stack.self_by_val(-3, -4);
let owned: Box<_> = box Struct { x: 200 };
let _ = owned.self_by_ref(-5, -6);
let _ = owned.self_by_val(-7, -8);
let _ = owned.self_owned(-9, -10);
}
fn zzz() {()}
|
AerialX/rust-rt-minimal
|
src/test/debuginfo/self-in-default-method.rs
|
Rust
|
apache-2.0
| 3,622 | 22.673203 | 100 | 0.575925 | false |
import { Subject } from 'rxjs/Subject';
import { AfterContentInit, Component, ViewChild, Input, Output } from '@angular/core';
declare var monaco;
@Component({
selector: 'text-editor',
templateUrl: './text-editor.component.html',
styleUrls: ['./text-editor.component.scss'],
})
export class TextEditorComponent implements AfterContentInit {
@ViewChild('textEditorContainer')
container;
@Output()
public onContentChanged = new Subject<string>();
private _editor: any;
private _language: string;
private _content = '{}';
private _disabled = false;
private _theme = 'vs';
private _silent = false; // When true, editor doesn't emit events on content change
public opacity = '1';
constructor() {}
ngAfterContentInit() {
this._init();
}
private _init() {
let onGotAmdLoader = () => {
(<any>window).require.config({ paths: { vs: 'assets/monaco/min/vs' } });
(<any>window).require(['vs/editor/editor.main'], () => {
let that = this;
if (that._editor) {
that._editor.dispose();
}
// compiler options
monaco.languages.typescript.javascriptDefaults.setCompilerOptions({
target: monaco.languages.typescript.ScriptTarget.ES2015,
});
that._editor = monaco.editor.create(that.container.nativeElement, {
value: that._content,
language: that._language,
readOnly: that._disabled,
lineHeight: 17,
theme: this._theme === 'dark' ? 'vs-dark' : 'vs',
minimap: {
enabled: false,
},
});
this.opacity = this._disabled ? '0.5' : '1';
that._editor.onDidChangeModelContent(() => {
if (!that._silent) {
that.onContentChanged.next(that._editor.getValue());
}
});
that.resize();
});
};
// Load AMD loader if necessary
if (!(<any>window).require) {
let loaderScript = document.createElement('script');
loaderScript.type = 'text/javascript';
loaderScript.src = 'assets/monaco/vs/loader.js';
loaderScript.addEventListener('load', onGotAmdLoader);
document.body.appendChild(loaderScript);
} else {
onGotAmdLoader();
}
}
@Input()
set content(str: string) {
if (!str) {
str = '';
}
if (this._editor && this._editor.getValue() === str) {
return;
}
this._content = str;
if (this._editor) {
this._silent = true;
this._editor.setValue(this._content);
this._silent = false;
}
}
@Input('fileName')
set fileName(filename: string) {
const extension = filename
.split('.')
.pop()
.toLocaleLowerCase();
switch (extension) {
case 'bat':
this._language = 'bat';
break;
case 'csx':
this._language = 'csharp';
break;
case 'fsx':
this._language = 'fsharp';
break;
case 'js':
this._language = 'javascript';
break;
case 'json':
this._language = 'json';
break;
case 'ps1':
this._language = 'powershell';
break;
case 'py':
this._language = 'python';
break;
case 'ts':
this._language = 'typescript';
break;
// Monaco does not have sh, php
default:
this._language = undefined;
break;
}
if (this._editor) {
this._init();
// This does not work for JSON
// monaco.editor.setModelLanguage(this._editor.getModel(), this._language);
}
}
private _setLayout(width?: number, height?: number) {
if (this._editor) {
let layout = this._editor.getLayoutInfo();
this._editor.layout({
width: width ? width : layout.width,
height: height ? height : layout.height,
});
}
}
public resize() {
this._setLayout(100, 100);
setTimeout(() => {
const width = this.container.nativeElement.clientWidth;
const height = this.container.nativeElement.clientHeight;
this._setLayout(width - 4, height - 4);
});
}
}
|
projectkudu/WebJobsPortal
|
client/src/app/controls/text-editor/text-editor.component.ts
|
TypeScript
|
apache-2.0
| 4,106 | 24.036585 | 86 | 0.566245 | false |
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// no-pretty-expanded FIXME #15189
// ignore-windows FIXME #13259
#![feature(unboxed_closures)]
#![feature(unsafe_destructor)]
use std::os;
use std::io::process::Command;
use std::str;
use std::ops::{Drop, FnMut, FnOnce};
#[inline(never)]
fn foo() {
let _v = vec![1i, 2, 3];
if os::getenv("IS_TEST").is_some() {
panic!()
}
}
#[inline(never)]
fn double() {
struct Double;
impl Drop for Double {
fn drop(&mut self) { panic!("twice") }
}
let _d = Double;
panic!("once");
}
fn runtest(me: &str) {
let mut template = Command::new(me);
template.env("IS_TEST", "1");
// Make sure that the stack trace is printed
let p = template.clone().arg("fail").env("RUST_BACKTRACE", "1").spawn().unwrap();
let out = p.wait_with_output().unwrap();
assert!(!out.status.success());
let s = str::from_utf8(out.error.as_slice()).unwrap();
assert!(s.contains("stack backtrace") && s.contains("foo::h"),
"bad output: {}", s);
// Make sure the stack trace is *not* printed
let p = template.clone().arg("fail").spawn().unwrap();
let out = p.wait_with_output().unwrap();
assert!(!out.status.success());
let s = str::from_utf8(out.error.as_slice()).unwrap();
assert!(!s.contains("stack backtrace") && !s.contains("foo::h"),
"bad output2: {}", s);
// Make sure a stack trace is printed
let p = template.clone().arg("double-fail").spawn().unwrap();
let out = p.wait_with_output().unwrap();
assert!(!out.status.success());
let s = str::from_utf8(out.error.as_slice()).unwrap();
// loosened the following from double::h to double:: due to
// spurious failures on mac, 32bit, optimized
assert!(s.contains("stack backtrace") && s.contains("double::"),
"bad output3: {}", s);
// Make sure a stack trace isn't printed too many times
let p = template.clone().arg("double-fail")
.env("RUST_BACKTRACE", "1").spawn().unwrap();
let out = p.wait_with_output().unwrap();
assert!(!out.status.success());
let s = str::from_utf8(out.error.as_slice()).unwrap();
let mut i = 0;
for _ in range(0i, 2) {
i += s.slice_from(i + 10).find_str("stack backtrace").unwrap() + 10;
}
assert!(s.slice_from(i + 10).find_str("stack backtrace").is_none(),
"bad output4: {}", s);
}
fn main() {
let args = os::args();
let args = args.as_slice();
if args.len() >= 2 && args[1].as_slice() == "fail" {
foo();
} else if args.len() >= 2 && args[1].as_slice() == "double-fail" {
double();
} else {
runtest(args[0].as_slice());
}
}
|
defuz/rust
|
src/test/run-pass/backtrace.rs
|
Rust
|
apache-2.0
| 3,146 | 31.43299 | 85 | 0.591545 | false |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/iot/model/UpdateDomainConfigurationRequest.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <utility>
using namespace Aws::IoT::Model;
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
UpdateDomainConfigurationRequest::UpdateDomainConfigurationRequest() :
m_domainConfigurationNameHasBeenSet(false),
m_authorizerConfigHasBeenSet(false),
m_domainConfigurationStatus(DomainConfigurationStatus::NOT_SET),
m_domainConfigurationStatusHasBeenSet(false),
m_removeAuthorizerConfig(false),
m_removeAuthorizerConfigHasBeenSet(false)
{
}
Aws::String UpdateDomainConfigurationRequest::SerializePayload() const
{
JsonValue payload;
if(m_authorizerConfigHasBeenSet)
{
payload.WithObject("authorizerConfig", m_authorizerConfig.Jsonize());
}
if(m_domainConfigurationStatusHasBeenSet)
{
payload.WithString("domainConfigurationStatus", DomainConfigurationStatusMapper::GetNameForDomainConfigurationStatus(m_domainConfigurationStatus));
}
if(m_removeAuthorizerConfigHasBeenSet)
{
payload.WithBool("removeAuthorizerConfig", m_removeAuthorizerConfig);
}
return payload.View().WriteReadable();
}
|
awslabs/aws-sdk-cpp
|
aws-cpp-sdk-iot/source/model/UpdateDomainConfigurationRequest.cpp
|
C++
|
apache-2.0
| 1,303 | 24.509804 | 150 | 0.782475 | false |
using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
// General Information about an assembly is controlled through the following
// set of attributes. Change these attribute values to modify the information
// associated with an assembly.
[assembly: AssemblyTitle("Commands.Intune")]
[assembly: AssemblyDescription("")]
[assembly: AssemblyConfiguration("")]
[assembly: AssemblyCompany("")]
[assembly: AssemblyProduct("Commands.Intune")]
[assembly: AssemblyCopyright("Copyright © 2015")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
// Setting ComVisible to false makes the types in this assembly not visible
// to COM components. If you need to access a type in this assembly from
// COM, set the ComVisible attribute to true on that type.
[assembly: ComVisible(false)]
// The following GUID is for the ID of the typelib if this project is exposed to COM
[assembly: Guid("8d29ab35-83dd-42a6-a10e-6c146ed64425")]
// Version information for an assembly consists of the following four values:
//
// Major Version
// Minor Version
// Build Number
// Revision
//
// You can specify all the values or you can default the Build and Revision Numbers
// by using the '*' as shown below:
[assembly: AssemblyVersion("1.0.10")]
[assembly: AssemblyFileVersion("1.0.10")]
|
hovsepm/azure-powershell
|
src/ResourceManager/Intune/Commands.Intune/Properties/AssemblyInfo.cs
|
C#
|
apache-2.0
| 1,365 | 36.833333 | 84 | 0.748899 | false |
/*
* Copyright 2004,2005 The Apache Software Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.rahas.impl;
import org.apache.axiom.om.OMElement;
import org.apache.axiom.om.OMNode;
import org.apache.axiom.soap.SOAPEnvelope;
import org.apache.axis2.context.MessageContext;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.rahas.RahasConstants;
import org.apache.rahas.RahasData;
import org.apache.rahas.Token;
import org.apache.rahas.TokenIssuer;
import org.apache.rahas.TrustException;
import org.apache.rahas.TrustUtil;
import org.apache.rahas.impl.util.*;
import org.apache.ws.security.WSSecurityException;
import org.apache.ws.security.WSUsernameTokenPrincipal;
import org.apache.ws.security.components.crypto.Crypto;
import org.apache.ws.security.util.Loader;
import org.apache.ws.security.util.XmlSchemaDateFormat;
import org.joda.time.DateTime;
import org.opensaml.common.SAMLException;
import org.opensaml.saml1.core.*;
import org.opensaml.xml.signature.KeyInfo;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import java.security.Principal;
import java.security.SecureRandom;
import java.security.cert.X509Certificate;
import java.text.DateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* Issuer to issue SAMl tokens
*/
public class SAMLTokenIssuer implements TokenIssuer {
private String configParamName;
private OMElement configElement;
private String configFile;
private static final Log log = LogFactory.getLog(SAMLTokenIssuer.class);
public SOAPEnvelope issue(RahasData data) throws TrustException {
MessageContext inMsgCtx = data.getInMessageContext();
SAMLTokenIssuerConfig tokenIssuerConfiguration = CommonUtil.getTokenIssuerConfiguration(this.configElement,
this.configFile, inMsgCtx.getParameter(this.configParamName));
if (tokenIssuerConfiguration == null) {
if (log.isDebugEnabled()) {
String parameterName;
if (this.configElement != null) {
parameterName = "OMElement - " + this.configElement.toString();
} else if (this.configFile != null) {
parameterName = "File - " + this.configFile;
} else if (this.configParamName != null) {
parameterName = "With message context parameter name - " + this.configParamName;
} else {
parameterName = "No method to build configurations";
}
log.debug("Unable to build token configurations, " + parameterName);
}
throw new TrustException("configurationIsNull");
}
SOAPEnvelope env = TrustUtil.createSOAPEnvelope(inMsgCtx
.getEnvelope().getNamespace().getNamespaceURI());
Crypto crypto = tokenIssuerConfiguration.getIssuerCrypto(inMsgCtx
.getAxisService().getClassLoader());
// Creation and expiration times
DateTime creationTime = new DateTime();
DateTime expirationTime = new DateTime(creationTime.getMillis() + tokenIssuerConfiguration.getTtl());
// Get the document
Document doc = ((Element) env).getOwnerDocument();
// Get the key size and create a new byte array of that size
int keySize = data.getKeysize();
keySize = (keySize == -1) ? tokenIssuerConfiguration.getKeySize() : keySize;
/*
* Find the KeyType If the KeyType is SymmetricKey or PublicKey,
* issue a SAML HoK assertion. - In the case of the PublicKey, in
* coming security header MUST contain a certificate (maybe via
* signature)
*
* If the KeyType is Bearer then issue a Bearer assertion
*
* If the key type is missing we will issue a HoK assertion
*/
String keyType = data.getKeyType();
Assertion assertion;
if (keyType == null) {
throw new TrustException(TrustException.INVALID_REQUEST,
new String[] { "Requested KeyType is missing" });
}
if (keyType.endsWith(RahasConstants.KEY_TYPE_SYMM_KEY)
|| keyType.endsWith(RahasConstants.KEY_TYPE_PUBLIC_KEY)) {
assertion = createHoKAssertion(tokenIssuerConfiguration, doc, crypto,
creationTime, expirationTime, data);
} else if (keyType.endsWith(RahasConstants.KEY_TYPE_BEARER)) {
assertion = createBearerAssertion(tokenIssuerConfiguration, doc, crypto,
creationTime, expirationTime, data);
} else {
throw new TrustException("unsupportedKeyType");
}
OMElement rstrElem;
int wstVersion = data.getVersion();
if (RahasConstants.VERSION_05_02 == wstVersion) {
rstrElem = TrustUtil.createRequestSecurityTokenResponseElement(
wstVersion, env.getBody());
} else {
OMElement rstrcElem = TrustUtil
.createRequestSecurityTokenResponseCollectionElement(
wstVersion, env.getBody());
rstrElem = TrustUtil.createRequestSecurityTokenResponseElement(
wstVersion, rstrcElem);
}
TrustUtil.createTokenTypeElement(wstVersion, rstrElem).setText(
RahasConstants.TOK_TYPE_SAML_10);
if (keyType.endsWith(RahasConstants.KEY_TYPE_SYMM_KEY)) {
TrustUtil.createKeySizeElement(wstVersion, rstrElem, keySize);
}
if (tokenIssuerConfiguration.isAddRequestedAttachedRef()) {
TrustUtil.createRequestedAttachedRef(rstrElem, assertion.getID(),wstVersion);
}
if (tokenIssuerConfiguration.isAddRequestedUnattachedRef()) {
TrustUtil.createRequestedUnattachedRef(rstrElem, assertion.getID(),wstVersion);
}
if (data.getAppliesToAddress() != null) {
TrustUtil.createAppliesToElement(rstrElem, data
.getAppliesToAddress(), data.getAddressingNs());
}
// Use GMT time in milliseconds
DateFormat zulu = new XmlSchemaDateFormat();
// Add the Lifetime element
TrustUtil.createLifetimeElement(wstVersion, rstrElem, zulu
.format(creationTime.toDate()), zulu.format(expirationTime.toDate()));
// Create the RequestedSecurityToken element and add the SAML token
// to it
OMElement reqSecTokenElem = TrustUtil
.createRequestedSecurityTokenElement(wstVersion, rstrElem);
Token assertionToken;
//try {
Node tempNode = assertion.getDOM();
reqSecTokenElem.addChild((OMNode) ((Element) rstrElem)
.getOwnerDocument().importNode(tempNode, true));
// Store the token
assertionToken = new Token(assertion.getID(),
(OMElement) assertion.getDOM(), creationTime.toDate(),
expirationTime.toDate());
// At this point we definitely have the secret
// Otherwise it should fail with an exception earlier
assertionToken.setSecret(data.getEphmeralKey());
TrustUtil.getTokenStore(inMsgCtx).add(assertionToken);
/* } catch (SAMLException e) {
throw new TrustException("samlConverstionError", e);
}*/
if (keyType.endsWith(RahasConstants.KEY_TYPE_SYMM_KEY)
&& tokenIssuerConfiguration.getKeyComputation() != SAMLTokenIssuerConfig.KeyComputation.KEY_COMP_USE_REQ_ENT) {
// Add the RequestedProofToken
TokenIssuerUtil.handleRequestedProofToken(data, wstVersion,
tokenIssuerConfiguration, rstrElem, assertionToken, doc);
}
return env;
}
private Assertion createBearerAssertion(SAMLTokenIssuerConfig config,
Document doc, Crypto crypto, DateTime creationTime,
DateTime expirationTime, RahasData data) throws TrustException {
Principal principal = data.getPrincipal();
Assertion assertion;
// In the case where the principal is a UT
if (principal instanceof WSUsernameTokenPrincipal) {
NameIdentifier nameId = null;
if (config.getCallbackHandler() != null) {
SAMLNameIdentifierCallback cb = new SAMLNameIdentifierCallback(data);
cb.setUserId(principal.getName());
SAMLCallbackHandler callbackHandler = config.getCallbackHandler();
try {
callbackHandler.handle(cb);
} catch (SAMLException e) {
throw new TrustException("unableToRetrieveCallbackHandler", e);
}
nameId = cb.getNameId();
} else {
nameId = SAMLUtils.createNamedIdentifier(principal.getName(), NameIdentifier.EMAIL);
}
assertion = createAuthAssertion(RahasConstants.SAML11_SUBJECT_CONFIRMATION_BEARER,
nameId, null, config, crypto, creationTime,
expirationTime, data);
return assertion;
} else {
throw new TrustException("samlUnsupportedPrincipal",
new String[]{principal.getClass().getName()});
}
}
private Assertion createHoKAssertion(SAMLTokenIssuerConfig config,
Document doc, Crypto crypto, DateTime creationTime,
DateTime expirationTime, RahasData data) throws TrustException {
if (data.getKeyType().endsWith(RahasConstants.KEY_TYPE_SYMM_KEY)) {
X509Certificate serviceCert = null;
try {
// TODO what if principal is null ?
NameIdentifier nameIdentifier = null;
if (data.getPrincipal() != null) {
String subjectNameId = data.getPrincipal().getName();
nameIdentifier =SAMLUtils.createNamedIdentifier(subjectNameId, NameIdentifier.EMAIL);
}
/**
* In this case we need to create a KeyInfo similar to following,
* * <KeyInfo xmlns="http://www.w3.org/2000/09/xmldsig#">
* <xenc:EncryptedKey xmlns:xenc="http://www.w3.org/2001/04/xmlenc#"
* ....
* </xenc:EncryptedKey>
* </ds:KeyInfo>
*/
// Get ApliesTo to figure out which service to issue the token
// for
serviceCert = getServiceCert(config, crypto, data
.getAppliesToAddress());
// set keySize
int keySize = data.getKeysize();
keySize = (keySize != -1) ? keySize : config.getKeySize();
// Create the encrypted key
KeyInfo encryptedKeyInfoElement
= CommonUtil.getSymmetricKeyBasedKeyInfo(doc, data, serviceCert, keySize,
crypto, config.getKeyComputation());
return this.createAttributeAssertion(data, encryptedKeyInfoElement, nameIdentifier, config,
crypto, creationTime, expirationTime);
} catch (WSSecurityException e) {
if (serviceCert != null) {
throw new TrustException(
"errorInBuildingTheEncryptedKeyForPrincipal",
new String[]{serviceCert.getSubjectDN().getName()},
e);
} else {
throw new TrustException(
"trustedCertNotFoundForEPR",
new String[]{data.getAppliesToAddress()},
e);
}
}
} else {
try {
/**
* In this case we need to create KeyInfo as follows,
* <KeyInfo xmlns="http://www.w3.org/2000/09/xmldsig#">
* <X509Data xmlns:xenc="http://www.w3.org/2001/04/xmlenc#"
* xmlns:ds="http://www.w3.org/2000/09/xmldsig#">
* <X509Certificate>
* MIICNTCCAZ6gAwIBAgIES343....
* </X509Certificate>
* </X509Data>
* </KeyInfo>
*/
String subjectNameId = data.getPrincipal().getName();
NameIdentifier nameId = SAMLUtils.createNamedIdentifier(subjectNameId, NameIdentifier.EMAIL);
// Create the ds:KeyValue element with the ds:X509Data
X509Certificate clientCert = data.getClientCert();
if(clientCert == null) {
clientCert = CommonUtil.getCertificateByAlias(crypto,data.getPrincipal().getName());;
}
KeyInfo keyInfo = CommonUtil.getCertificateBasedKeyInfo(clientCert);
return this.createAuthAssertion(RahasConstants.SAML11_SUBJECT_CONFIRMATION_HOK, nameId, keyInfo,
config, crypto, creationTime, expirationTime, data);
} catch (Exception e) {
throw new TrustException("samlAssertionCreationError", e);
}
}
}
/**
* Uses the <code>wst:AppliesTo</code> to figure out the certificate to
* encrypt the secret in the SAML token
*
* @param config Token issuer configuration.
* @param crypto Crypto properties.
* @param serviceAddress
* The address of the service
* @return The X509 certificate.
* @throws org.apache.rahas.TrustException If an error occurred while retrieving certificate from crypto.
*/
private X509Certificate getServiceCert(SAMLTokenIssuerConfig config,
Crypto crypto, String serviceAddress) throws TrustException {
// TODO a duplicate method !!
if (serviceAddress != null && !"".equals(serviceAddress)) {
String alias = (String) config.getTrustedServices().get(serviceAddress);
if (alias != null) {
return CommonUtil.getCertificateByAlias(crypto,alias);
} else {
alias = (String) config.getTrustedServices().get("*");
return CommonUtil.getCertificateByAlias(crypto,alias);
}
} else {
String alias = (String) config.getTrustedServices().get("*");
return CommonUtil.getCertificateByAlias(crypto,alias);
}
}
/**
* Create the SAML assertion with the secret held in an
* <code>xenc:EncryptedKey</code>
* @param data The Rahas configurations, this is needed to get the callbacks.
* @param keyInfo OpenSAML KeyInfo representation.
* @param subjectNameId Principal as an OpenSAML Subject
* @param config SAML Token issuer configurations.
* @param crypto To get certificate information.
* @param notBefore Validity period start.
* @param notAfter Validity period end
* @return OpenSAML Assertion object.
* @throws TrustException If an error occurred while creating the Assertion.
*/
private Assertion createAttributeAssertion(RahasData data,
KeyInfo keyInfo, NameIdentifier subjectNameId,
SAMLTokenIssuerConfig config,
Crypto crypto, DateTime notBefore, DateTime notAfter) throws TrustException {
try {
Subject subject
= SAMLUtils.createSubject(subjectNameId, RahasConstants.SAML11_SUBJECT_CONFIRMATION_HOK, keyInfo);
Attribute[] attributes;
SAMLCallbackHandler handler = CommonUtil.getSAMLCallbackHandler(config, data);
SAMLAttributeCallback cb = new SAMLAttributeCallback(data);
if (handler != null) {
handler.handle(cb);
attributes = cb.getAttributes();
} else {
//TODO Remove this after discussing
Attribute attribute = SAMLUtils.createAttribute("Name", "https://rahas.apache.org/saml/attrns",
"Colombo/Rahas");
attributes = new Attribute[]{attribute};
}
AttributeStatement attributeStatement = SAMLUtils.createAttributeStatement(subject, Arrays.asList(attributes));
List<Statement> attributeStatements = new ArrayList<Statement>();
attributeStatements.add(attributeStatement);
Assertion assertion = SAMLUtils.createAssertion(config.getIssuerName(), notBefore,
notAfter, attributeStatements);
SAMLUtils.signAssertion(assertion, crypto, config.getIssuerKeyAlias(), config.getIssuerKeyPassword());
return assertion;
} catch (Exception e) {
throw new TrustException("samlAssertionCreationError", e);
}
}
/**
* Creates an authentication assertion.
* @param confirmationMethod The confirmation method. (HOK, Bearer ...)
* @param subjectNameId The principal name.
* @param keyInfo OpenSAML representation of KeyInfo.
* @param config Rahas configurations.
* @param crypto Certificate information.
* @param notBefore Validity start.
* @param notAfter Validity end.
* @param data Other Rahas data.
* @return An openSAML Assertion.
* @throws TrustException If an exception occurred while creating the Assertion.
*/
private Assertion createAuthAssertion(String confirmationMethod,
NameIdentifier subjectNameId, KeyInfo keyInfo,
SAMLTokenIssuerConfig config, Crypto crypto, DateTime notBefore,
DateTime notAfter, RahasData data) throws TrustException {
try {
Subject subject = SAMLUtils.createSubject(subjectNameId,confirmationMethod, keyInfo);
AuthenticationStatement authenticationStatement
= SAMLUtils.createAuthenticationStatement(subject, RahasConstants.AUTHENTICATION_METHOD_PASSWORD,
notBefore);
List<Statement> statements = new ArrayList<Statement>();
if (data.getClaimDialect() != null && data.getClaimElem() != null) {
Statement attrStatement = createSAMLAttributeStatement(
SAMLUtils.createSubject(subject.getNameIdentifier(),
confirmationMethod, keyInfo), data, config);
statements.add(attrStatement);
}
statements.add(authenticationStatement);
Assertion assertion = SAMLUtils.createAssertion(config.getIssuerName(),
notBefore, notAfter, statements);
// Signing the assertion
// The <ds:Signature>...</ds:Signature> element appears only after
// signing.
SAMLUtils.signAssertion(assertion, crypto, config.getIssuerKeyAlias(), config.getIssuerKeyPassword());
return assertion;
} catch (Exception e) {
throw new TrustException("samlAssertionCreationError", e);
}
}
/**
* {@inheritDoc}
*/
public String getResponseAction(RahasData data) throws TrustException {
return TrustUtil.getActionValue(data.getVersion(),
RahasConstants.RSTR_ACTION_ISSUE);
}
/**
* Create an ephemeral key
*
* @return The generated key as a byte array
* @throws TrustException
*/
protected byte[] generateEphemeralKey(int keySize) throws TrustException {
try {
SecureRandom random = SecureRandom.getInstance("SHA1PRNG");
byte[] temp = new byte[keySize / 8];
random.nextBytes(temp);
return temp;
} catch (Exception e) {
throw new TrustException("Error in creating the ephemeral key", e);
}
}
/**
* {@inheritDoc}
*/
public void setConfigurationFile(String configFile) {
this.configFile = configFile;
}
/**
* {@inheritDoc}
*/
public void setConfigurationElement(OMElement configElement) {
this.configElement = configElement;
}
/**
* {@inheritDoc}
*/
public void setConfigurationParamName(String configParamName) {
this.configParamName = configParamName;
}
private AttributeStatement createSAMLAttributeStatement(Subject subject,
RahasData rahasData,
SAMLTokenIssuerConfig config)
throws TrustException {
Attribute[] attrs = null;
if (config.getCallbackHandler() != null) {
SAMLAttributeCallback cb = new SAMLAttributeCallback(rahasData);
SAMLCallbackHandler handler = config.getCallbackHandler();
try {
handler.handle(cb);
attrs = cb.getAttributes();
} catch (SAMLException e) {
throw new TrustException("unableToRetrieveCallbackHandler", e);
}
} else if (config.getCallbackHandlerName() != null
&& config.getCallbackHandlerName().trim().length() > 0) {
SAMLAttributeCallback cb = new SAMLAttributeCallback(rahasData);
SAMLCallbackHandler handler = null;
MessageContext msgContext = rahasData.getInMessageContext();
ClassLoader classLoader = msgContext.getAxisService().getClassLoader();
Class cbClass = null;
try {
cbClass = Loader.loadClass(classLoader, config.getCallbackHandlerName());
} catch (ClassNotFoundException e) {
throw new TrustException("cannotLoadPWCBClass",
new String[]{config.getCallbackHandlerName()}, e);
}
try {
handler = (SAMLCallbackHandler) cbClass.newInstance();
} catch (Exception e) {
throw new TrustException("cannotCreatePWCBInstance",
new String[]{config.getCallbackHandlerName()}, e);
}
try {
handler.handle(cb);
} catch (SAMLException e) {
throw new TrustException("unableToRetrieveCallbackHandler", e);
}
attrs = cb.getAttributes();
} else {
//TODO Remove this after discussing
Attribute attribute =
SAMLUtils.createAttribute("Name", "https://rahas.apache.org/saml/attrns", "Colombo/Rahas");
attrs = new Attribute[]{attribute};
}
AttributeStatement attributeStatement = SAMLUtils.createAttributeStatement(subject, Arrays.asList(attrs));
return attributeStatement;
}
}
|
apache/rampart
|
modules/rampart-trust/src/main/java/org/apache/rahas/impl/SAMLTokenIssuer.java
|
Java
|
apache-2.0
| 23,614 | 39.854671 | 127 | 0.607817 | false |
package cgeo.geocaching.filter;
import cgeo.geocaching.R;
import cgeo.geocaching.models.Geocache;
import android.os.Parcel;
import android.os.Parcelable;
import androidx.annotation.NonNull;
import java.util.Collections;
import java.util.List;
class ModifiedFilter extends AbstractFilter implements IFilterFactory {
public static final Creator<ModifiedFilter> CREATOR
= new Parcelable.Creator<ModifiedFilter>() {
@Override
public ModifiedFilter createFromParcel(final Parcel in) {
return new ModifiedFilter(in);
}
@Override
public ModifiedFilter[] newArray(final int size) {
return new ModifiedFilter[size];
}
};
ModifiedFilter() {
super(R.string.caches_filter_modified);
}
protected ModifiedFilter(final Parcel in) {
super(in);
}
@Override
public boolean accepts(@NonNull final Geocache cache) {
// modified on GC
return cache.hasUserModifiedCoords() || cache.hasFinalDefined();
}
@Override
@NonNull
public List<IFilter> getFilters() {
return Collections.singletonList(this);
}
}
|
S-Bartfast/cgeo
|
main/src/cgeo/geocaching/filter/ModifiedFilter.java
|
Java
|
apache-2.0
| 1,168 | 22.836735 | 72 | 0.671233 | false |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.mapper;
import java.util.List;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermInSetQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.lucene.BytesRefs;
import org.elasticsearch.index.query.QueryShardContext;
/** Base {@link MappedFieldType} implementation for a field that is indexed
* with the inverted index. */
abstract class TermBasedFieldType extends MappedFieldType {
TermBasedFieldType() {}
protected TermBasedFieldType(MappedFieldType ref) {
super(ref);
}
/** Returns the indexed value used to construct search "values".
* This method is used for the default implementations of most
* query factory methods such as {@link #termQuery}. */
protected BytesRef indexedValueForSearch(Object value) {
return BytesRefs.toBytesRef(value);
}
@Override
public Query termQuery(Object value, QueryShardContext context) {
failIfNotIndexed();
Query query = new TermQuery(new Term(name(), indexedValueForSearch(value)));
if (boost() != 1f) {
query = new BoostQuery(query, boost());
}
return query;
}
@Override
public Query termsQuery(List<?> values, QueryShardContext context) {
failIfNotIndexed();
BytesRef[] bytesRefs = new BytesRef[values.size()];
for (int i = 0; i < bytesRefs.length; i++) {
bytesRefs[i] = indexedValueForSearch(values.get(i));
}
return new TermInSetQuery(name(), bytesRefs);
}
}
|
fuchao01/elasticsearch
|
core/src/main/java/org/elasticsearch/index/mapper/TermBasedFieldType.java
|
Java
|
apache-2.0
| 2,487 | 34.528571 | 84 | 0.713309 | false |
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.ilm;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.Version;
import org.elasticsearch.client.AdminClient;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.IndicesAdminClient;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.component.Lifecycle.State;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.Index;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.TestThreadPool;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.core.ilm.IndexLifecycleMetadata;
import org.elasticsearch.xpack.core.ilm.LifecycleExecutionState;
import org.elasticsearch.xpack.core.ilm.LifecyclePolicy;
import org.elasticsearch.xpack.core.ilm.LifecyclePolicyMetadata;
import org.elasticsearch.xpack.core.ilm.LifecycleSettings;
import org.elasticsearch.xpack.core.ilm.MockAction;
import org.elasticsearch.xpack.core.ilm.OperationMode;
import org.elasticsearch.xpack.core.ilm.Phase;
import org.elasticsearch.xpack.core.ilm.ShrinkAction;
import org.elasticsearch.xpack.core.ilm.ShrinkStep;
import org.elasticsearch.xpack.core.ilm.Step;
import org.elasticsearch.xpack.core.scheduler.SchedulerEngine;
import org.hamcrest.Description;
import org.junit.After;
import org.junit.Before;
import org.mockito.ArgumentMatcher;
import org.mockito.Mockito;
import java.time.Clock;
import java.time.Instant;
import java.time.ZoneId;
import java.util.Collections;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.UUID;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.node.Node.NODE_MASTER_SETTING;
import static org.elasticsearch.xpack.core.ilm.AbstractStepTestCase.randomStepKey;
import static org.elasticsearch.xpack.core.ilm.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY;
import static org.elasticsearch.xpack.core.ilm.LifecyclePolicyTestsUtils.newTestLifecyclePolicy;
import static org.hamcrest.Matchers.equalTo;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyString;
import static org.mockito.Matchers.argThat;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class IndexLifecycleServiceTests extends ESTestCase {
private ClusterService clusterService;
private IndexLifecycleService indexLifecycleService;
private String nodeId;
private DiscoveryNode masterNode;
private IndicesAdminClient indicesClient;
private long now;
private ThreadPool threadPool;
@Before
public void prepareServices() {
nodeId = randomAlphaOfLength(10);
ExecutorService executorService = mock(ExecutorService.class);
clusterService = mock(ClusterService.class);
masterNode = DiscoveryNode.createLocal(settings(Version.CURRENT)
.put(NODE_MASTER_SETTING.getKey(), true).build(),
new TransportAddress(TransportAddress.META_ADDRESS, 9300), nodeId);
now = randomNonNegativeLong();
Clock clock = Clock.fixed(Instant.ofEpochMilli(now), ZoneId.of(randomFrom(ZoneId.getAvailableZoneIds())));
doAnswer(invocationOnMock -> null).when(clusterService).addListener(any());
doAnswer(invocationOnMock -> {
Runnable runnable = (Runnable) invocationOnMock.getArguments()[0];
runnable.run();
return null;
}).when(executorService).execute(any());
Settings settings = Settings.builder().put(LifecycleSettings.LIFECYCLE_POLL_INTERVAL, "1s").build();
when(clusterService.getClusterSettings()).thenReturn(new ClusterSettings(settings,
Collections.singleton(LifecycleSettings.LIFECYCLE_POLL_INTERVAL_SETTING)));
when(clusterService.lifecycleState()).thenReturn(State.STARTED);
Client client = mock(Client.class);
AdminClient adminClient = mock(AdminClient.class);
indicesClient = mock(IndicesAdminClient.class);
when(client.admin()).thenReturn(adminClient);
when(adminClient.indices()).thenReturn(indicesClient);
when(client.settings()).thenReturn(Settings.EMPTY);
threadPool = new TestThreadPool("test");
indexLifecycleService = new IndexLifecycleService(Settings.EMPTY, client, clusterService, threadPool,
clock, () -> now, null, null);
Mockito.verify(clusterService).addListener(indexLifecycleService);
Mockito.verify(clusterService).addStateApplier(indexLifecycleService);
}
@After
public void cleanup() {
when(clusterService.lifecycleState()).thenReturn(randomFrom(State.STOPPED, State.CLOSED));
indexLifecycleService.close();
threadPool.shutdownNow();
}
public void testStoppedModeSkip() {
String policyName = randomAlphaOfLengthBetween(1, 20);
IndexLifecycleRunnerTests.MockClusterStateActionStep mockStep =
new IndexLifecycleRunnerTests.MockClusterStateActionStep(randomStepKey(), randomStepKey());
MockAction mockAction = new MockAction(Collections.singletonList(mockStep));
Phase phase = new Phase("phase", TimeValue.ZERO, Collections.singletonMap("action", mockAction));
LifecyclePolicy policy = newTestLifecyclePolicy(policyName, Collections.singletonMap(phase.getName(), phase));
SortedMap<String, LifecyclePolicyMetadata> policyMap = new TreeMap<>();
policyMap.put(policyName, new LifecyclePolicyMetadata(policy, Collections.emptyMap(),
randomNonNegativeLong(), randomNonNegativeLong()));
Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20));
IndexMetadata indexMetadata = IndexMetadata.builder(index.getName())
.settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey(), policyName))
.numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build();
ImmutableOpenMap.Builder<String, IndexMetadata> indices = ImmutableOpenMap.<String, IndexMetadata> builder()
.fPut(index.getName(), indexMetadata);
Metadata metadata = Metadata.builder()
.putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(policyMap, OperationMode.STOPPED))
.indices(indices.build())
.persistentSettings(settings(Version.CURRENT).build())
.build();
ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT)
.metadata(metadata)
.nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build())
.build();
ClusterChangedEvent event = new ClusterChangedEvent("_source", currentState, ClusterState.EMPTY_STATE);
indexLifecycleService.applyClusterState(event);
indexLifecycleService.triggerPolicies(currentState, randomBoolean());
assertThat(mockStep.getExecuteCount(), equalTo(0L));
}
public void testRequestedStopOnShrink() {
Step.StepKey mockShrinkStep = new Step.StepKey(randomAlphaOfLength(4), ShrinkAction.NAME, ShrinkStep.NAME);
String policyName = randomAlphaOfLengthBetween(1, 20);
IndexLifecycleRunnerTests.MockClusterStateActionStep mockStep =
new IndexLifecycleRunnerTests.MockClusterStateActionStep(mockShrinkStep, randomStepKey());
MockAction mockAction = new MockAction(Collections.singletonList(mockStep));
Phase phase = new Phase("phase", TimeValue.ZERO, Collections.singletonMap("action", mockAction));
LifecyclePolicy policy = newTestLifecyclePolicy(policyName, Collections.singletonMap(phase.getName(), phase));
SortedMap<String, LifecyclePolicyMetadata> policyMap = new TreeMap<>();
policyMap.put(policyName, new LifecyclePolicyMetadata(policy, Collections.emptyMap(),
randomNonNegativeLong(), randomNonNegativeLong()));
Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20));
LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder();
lifecycleState.setPhase(mockShrinkStep.getPhase());
lifecycleState.setAction(mockShrinkStep.getAction());
lifecycleState.setStep(mockShrinkStep.getName());
IndexMetadata indexMetadata = IndexMetadata.builder(index.getName())
.settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey(), policyName))
.putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap())
.numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build();
ImmutableOpenMap.Builder<String, IndexMetadata> indices = ImmutableOpenMap.<String, IndexMetadata> builder()
.fPut(index.getName(), indexMetadata);
Metadata metadata = Metadata.builder()
.putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(policyMap, OperationMode.STOPPING))
.indices(indices.build())
.persistentSettings(settings(Version.CURRENT).build())
.build();
ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT)
.metadata(metadata)
.nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build())
.build();
ClusterChangedEvent event = new ClusterChangedEvent("_source", currentState, ClusterState.EMPTY_STATE);
SetOnce<Boolean> changedOperationMode = new SetOnce<>();
doAnswer(invocationOnMock -> {
changedOperationMode.set(true);
return null;
}).when(clusterService).submitStateUpdateTask(eq("ilm_operation_mode_update"), any(OperationModeUpdateTask.class));
indexLifecycleService.applyClusterState(event);
indexLifecycleService.triggerPolicies(currentState, true);
assertNull(changedOperationMode.get());
}
public void testRequestedStopInShrinkActionButNotShrinkStep() {
// test all the shrink action steps that ILM can be stopped during (basically all of them minus the actual shrink)
ShrinkAction action = new ShrinkAction(1);
action.toSteps(mock(Client.class), "warm", randomStepKey()).stream()
.map(sk -> sk.getKey().getName())
.filter(name -> name.equals(ShrinkStep.NAME) == false)
.forEach(this::verifyCanStopWithStep);
}
// Check that ILM can stop when in the shrink action on the provided step
private void verifyCanStopWithStep(String stoppableStep) {
Step.StepKey mockShrinkStep = new Step.StepKey(randomAlphaOfLength(4), ShrinkAction.NAME, stoppableStep);
String policyName = randomAlphaOfLengthBetween(1, 20);
IndexLifecycleRunnerTests.MockClusterStateActionStep mockStep =
new IndexLifecycleRunnerTests.MockClusterStateActionStep(mockShrinkStep, randomStepKey());
MockAction mockAction = new MockAction(Collections.singletonList(mockStep));
Phase phase = new Phase("phase", TimeValue.ZERO, Collections.singletonMap("action", mockAction));
LifecyclePolicy policy = newTestLifecyclePolicy(policyName, Collections.singletonMap(phase.getName(), phase));
SortedMap<String, LifecyclePolicyMetadata> policyMap = new TreeMap<>();
policyMap.put(policyName, new LifecyclePolicyMetadata(policy, Collections.emptyMap(),
randomNonNegativeLong(), randomNonNegativeLong()));
Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20));
LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder();
lifecycleState.setPhase(mockShrinkStep.getPhase());
lifecycleState.setAction(mockShrinkStep.getAction());
lifecycleState.setStep(mockShrinkStep.getName());
IndexMetadata indexMetadata = IndexMetadata.builder(index.getName())
.settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey(), policyName))
.putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap())
.numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build();
ImmutableOpenMap.Builder<String, IndexMetadata> indices = ImmutableOpenMap.<String, IndexMetadata> builder()
.fPut(index.getName(), indexMetadata);
Metadata metadata = Metadata.builder()
.putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(policyMap, OperationMode.STOPPING))
.indices(indices.build())
.persistentSettings(settings(Version.CURRENT).build())
.build();
ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT)
.metadata(metadata)
.nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build())
.build();
ClusterChangedEvent event = new ClusterChangedEvent("_source", currentState, ClusterState.EMPTY_STATE);
SetOnce<Boolean> changedOperationMode = new SetOnce<>();
doAnswer(invocationOnMock -> {
changedOperationMode.set(true);
return null;
}).when(clusterService).submitStateUpdateTask(eq("ilm_operation_mode_update {OperationMode STOPPED}"),
any(OperationModeUpdateTask.class));
indexLifecycleService.applyClusterState(event);
indexLifecycleService.triggerPolicies(currentState, true);
assertTrue(changedOperationMode.get());
}
public void testRequestedStopOnSafeAction() {
String policyName = randomAlphaOfLengthBetween(1, 20);
Step.StepKey currentStepKey = randomStepKey();
IndexLifecycleRunnerTests.MockClusterStateActionStep mockStep =
new IndexLifecycleRunnerTests.MockClusterStateActionStep(currentStepKey, randomStepKey());
MockAction mockAction = new MockAction(Collections.singletonList(mockStep));
Phase phase = new Phase("phase", TimeValue.ZERO, Collections.singletonMap("action", mockAction));
LifecyclePolicy policy = newTestLifecyclePolicy(policyName, Collections.singletonMap(phase.getName(), phase));
SortedMap<String, LifecyclePolicyMetadata> policyMap = new TreeMap<>();
policyMap.put(policyName, new LifecyclePolicyMetadata(policy, Collections.emptyMap(),
randomNonNegativeLong(), randomNonNegativeLong()));
Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20));
LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder();
lifecycleState.setPhase(currentStepKey.getPhase());
lifecycleState.setAction(currentStepKey.getAction());
lifecycleState.setStep(currentStepKey.getName());
IndexMetadata indexMetadata = IndexMetadata.builder(index.getName())
.settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey(), policyName))
.putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap())
.numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build();
ImmutableOpenMap.Builder<String, IndexMetadata> indices = ImmutableOpenMap.<String, IndexMetadata> builder()
.fPut(index.getName(), indexMetadata);
Metadata metadata = Metadata.builder()
.putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(policyMap, OperationMode.STOPPING))
.indices(indices.build())
.persistentSettings(settings(Version.CURRENT).build())
.build();
ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT)
.metadata(metadata)
.nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build())
.build();
ClusterChangedEvent event = new ClusterChangedEvent("_source", currentState, ClusterState.EMPTY_STATE);
SetOnce<Boolean> ranPolicy = new SetOnce<>();
SetOnce<Boolean> moveToMaintenance = new SetOnce<>();
doAnswer(invocationOnMock -> {
ranPolicy.set(true);
throw new AssertionError("invalid invocation");
}).when(clusterService).submitStateUpdateTask(anyString(), any(ExecuteStepsUpdateTask.class));
doAnswer(invocationOnMock -> {
OperationModeUpdateTask task = (OperationModeUpdateTask) invocationOnMock.getArguments()[1];
assertThat(task.getILMOperationMode(), equalTo(OperationMode.STOPPED));
moveToMaintenance.set(true);
return null;
}).when(clusterService).submitStateUpdateTask(eq("ilm_operation_mode_update {OperationMode STOPPED}"),
any(OperationModeUpdateTask.class));
indexLifecycleService.applyClusterState(event);
indexLifecycleService.triggerPolicies(currentState, randomBoolean());
assertNull(ranPolicy.get());
assertTrue(moveToMaintenance.get());
}
public void testExceptionStillProcessesOtherIndices() {
doTestExceptionStillProcessesOtherIndices(false);
}
public void testExceptionStillProcessesOtherIndicesOnMaster() {
doTestExceptionStillProcessesOtherIndices(true);
}
public void testOperationModeUpdateTaskPriority() {
indexLifecycleService.submitOperationModeUpdate(OperationMode.STOPPING);
verifyOperationModeUpdateTaskPriority(OperationMode.STOPPING, Priority.IMMEDIATE);
indexLifecycleService.submitOperationModeUpdate(OperationMode.STOPPED);
verifyOperationModeUpdateTaskPriority(OperationMode.STOPPED, Priority.IMMEDIATE);
indexLifecycleService.submitOperationModeUpdate(OperationMode.RUNNING);
verifyOperationModeUpdateTaskPriority(OperationMode.RUNNING, Priority.NORMAL);
}
private void verifyOperationModeUpdateTaskPriority(OperationMode mode, Priority expectedPriority) {
verify(clusterService).submitStateUpdateTask(
Mockito.eq("ilm_operation_mode_update {OperationMode " + mode.name() +"}"),
argThat(new ArgumentMatcher<OperationModeUpdateTask>() {
Priority actualPriority = null;
@Override
public boolean matches(Object argument) {
if (argument instanceof OperationModeUpdateTask == false) {
return false;
}
actualPriority = ((OperationModeUpdateTask) argument).priority();
return actualPriority == expectedPriority;
}
@Override
public void describeTo(Description description) {
description.appendText("the cluster state update task priority must be "+ expectedPriority+" but got: ")
.appendText(actualPriority.name());
}
})
);
}
@SuppressWarnings("unchecked")
public void doTestExceptionStillProcessesOtherIndices(boolean useOnMaster) {
String policy1 = randomAlphaOfLengthBetween(1, 20);
Step.StepKey i1currentStepKey = randomStepKey();
final Step i1mockStep;
if (useOnMaster) {
i1mockStep = new IndexLifecycleRunnerTests.MockAsyncActionStep(i1currentStepKey, randomStepKey());
} else {
i1mockStep = new IndexLifecycleRunnerTests.MockClusterStateActionStep(i1currentStepKey, randomStepKey());
}
MockAction i1mockAction = new MockAction(Collections.singletonList(i1mockStep));
Phase i1phase = new Phase("phase", TimeValue.ZERO, Collections.singletonMap("action", i1mockAction));
LifecyclePolicy i1policy = newTestLifecyclePolicy(policy1, Collections.singletonMap(i1phase.getName(), i1phase));
Index index1 = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20));
LifecycleExecutionState.Builder i1lifecycleState = LifecycleExecutionState.builder();
i1lifecycleState.setPhase(i1currentStepKey.getPhase());
i1lifecycleState.setAction(i1currentStepKey.getAction());
i1lifecycleState.setStep(i1currentStepKey.getName());
String policy2 = randomValueOtherThan(policy1, () -> randomAlphaOfLengthBetween(1, 20));
Step.StepKey i2currentStepKey = randomStepKey();
final Step i2mockStep;
if (useOnMaster) {
i2mockStep = new IndexLifecycleRunnerTests.MockAsyncActionStep(i2currentStepKey, randomStepKey());
} else {
i2mockStep = new IndexLifecycleRunnerTests.MockClusterStateActionStep(i2currentStepKey, randomStepKey());
}
MockAction mockAction = new MockAction(Collections.singletonList(i2mockStep));
Phase i2phase = new Phase("phase", TimeValue.ZERO, Collections.singletonMap("action", mockAction));
LifecyclePolicy i2policy = newTestLifecyclePolicy(policy1, Collections.singletonMap(i2phase.getName(), i1phase));
Index index2 = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20));
LifecycleExecutionState.Builder i2lifecycleState = LifecycleExecutionState.builder();
i2lifecycleState.setPhase(i2currentStepKey.getPhase());
i2lifecycleState.setAction(i2currentStepKey.getAction());
i2lifecycleState.setStep(i2currentStepKey.getName());
CountDownLatch stepLatch = new CountDownLatch(2);
boolean failStep1 = randomBoolean();
if (useOnMaster) {
((IndexLifecycleRunnerTests.MockAsyncActionStep) i1mockStep).setLatch(stepLatch);
((IndexLifecycleRunnerTests.MockAsyncActionStep) i1mockStep)
.setException(failStep1 ? new IllegalArgumentException("forcing a failure for index 1") : null);
((IndexLifecycleRunnerTests.MockAsyncActionStep) i2mockStep).setLatch(stepLatch);
((IndexLifecycleRunnerTests.MockAsyncActionStep) i2mockStep)
.setException(failStep1 ? null : new IllegalArgumentException("forcing a failure for index 2"));
} else {
((IndexLifecycleRunnerTests.MockClusterStateActionStep) i1mockStep).setLatch(stepLatch);
((IndexLifecycleRunnerTests.MockClusterStateActionStep) i1mockStep)
.setException(failStep1 ? new IllegalArgumentException("forcing a failure for index 1") : null);
((IndexLifecycleRunnerTests.MockClusterStateActionStep) i1mockStep).setLatch(stepLatch);
((IndexLifecycleRunnerTests.MockClusterStateActionStep) i1mockStep)
.setException(failStep1 ? null : new IllegalArgumentException("forcing a failure for index 2"));
}
SortedMap<String, LifecyclePolicyMetadata> policyMap = new TreeMap<>();
policyMap.put(policy1, new LifecyclePolicyMetadata(i1policy, Collections.emptyMap(),
randomNonNegativeLong(), randomNonNegativeLong()));
policyMap.put(policy2, new LifecyclePolicyMetadata(i2policy, Collections.emptyMap(),
randomNonNegativeLong(), randomNonNegativeLong()));
IndexMetadata i1indexMetadata = IndexMetadata.builder(index1.getName())
.settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey(), policy1))
.putCustom(ILM_CUSTOM_METADATA_KEY, i1lifecycleState.build().asMap())
.numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build();
IndexMetadata i2indexMetadata = IndexMetadata.builder(index2.getName())
.settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey(), policy1))
.putCustom(ILM_CUSTOM_METADATA_KEY, i2lifecycleState.build().asMap())
.numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build();
ImmutableOpenMap.Builder<String, IndexMetadata> indices = ImmutableOpenMap.<String, IndexMetadata> builder()
.fPut(index1.getName(), i1indexMetadata)
.fPut(index2.getName(), i2indexMetadata);
Metadata metadata = Metadata.builder()
.putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(policyMap, OperationMode.RUNNING))
.indices(indices.build())
.persistentSettings(settings(Version.CURRENT).build())
.build();
ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT)
.metadata(metadata)
.nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build())
.build();
if (useOnMaster) {
when(clusterService.state()).thenReturn(currentState);
indexLifecycleService.onMaster();
} else {
indexLifecycleService.triggerPolicies(currentState, randomBoolean());
}
try {
stepLatch.await(5, TimeUnit.SECONDS);
} catch (InterruptedException e) {
logger.error("failure while waiting for step execution", e);
fail("both steps should have been executed, even with an exception");
}
}
public void testTriggeredDifferentJob() {
Mockito.reset(clusterService);
SchedulerEngine.Event schedulerEvent = new SchedulerEngine.Event("foo", randomLong(), randomLong());
indexLifecycleService.triggered(schedulerEvent);
Mockito.verifyZeroInteractions(indicesClient, clusterService);
}
public void testParsingOriginationDateBeforeIndexCreation() {
Settings indexSettings = Settings.builder().put(LifecycleSettings.LIFECYCLE_PARSE_ORIGINATION_DATE, true).build();
Index index = new Index("invalid_index_name", UUID.randomUUID().toString());
expectThrows(IllegalArgumentException.class,
"The parse origination date setting was configured for index " + index.getName() +
" but the index name did not match the expected format",
() -> indexLifecycleService.beforeIndexAddedToCluster(index, indexSettings)
);
// disabling the parsing origination date setting should prevent the validation from throwing exception
try {
indexLifecycleService.beforeIndexAddedToCluster(index, Settings.EMPTY);
} catch (Exception e) {
fail("Did not expect the before index validation to throw an exception as the parse origination date setting was not set");
}
}
}
|
HonzaKral/elasticsearch
|
x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleServiceTests.java
|
Java
|
apache-2.0
| 27,545 | 57.111814 | 135 | 0.72318 | false |
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <fstream>
#include "src/v8.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-generator.h"
#include "src/interpreter/interpreter.h"
#include "src/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/interpreter/bytecode-expectations-printer.h"
#include "test/cctest/test-feedback-vector.h"
namespace v8 {
namespace internal {
namespace interpreter {
#define XSTR(A) #A
#define STR(A) XSTR(A)
#define UNIQUE_VAR() "var a" STR(__COUNTER__) " = 0;\n"
#define LOAD_UNIQUE_PROPERTY() " b.name" STR(__COUNTER__) ";\n"
#define REPEAT_2(...) __VA_ARGS__ __VA_ARGS__
#define REPEAT_4(...) REPEAT_2(__VA_ARGS__) REPEAT_2(__VA_ARGS__)
#define REPEAT_8(...) REPEAT_4(__VA_ARGS__) REPEAT_4(__VA_ARGS__)
#define REPEAT_16(...) REPEAT_8(__VA_ARGS__) REPEAT_8(__VA_ARGS__)
#define REPEAT_32(...) REPEAT_16(__VA_ARGS__) REPEAT_16(__VA_ARGS__)
#define REPEAT_64(...) REPEAT_32(__VA_ARGS__) REPEAT_32(__VA_ARGS__)
#define REPEAT_128(...) REPEAT_64(__VA_ARGS__) REPEAT_64(__VA_ARGS__)
#define REPEAT_256(...) REPEAT_128(__VA_ARGS__) REPEAT_128(__VA_ARGS__)
#define REPEAT_127(...) \
REPEAT_64(__VA_ARGS__) \
REPEAT_32(__VA_ARGS__) \
REPEAT_16(__VA_ARGS__) \
REPEAT_8(__VA_ARGS__) \
REPEAT_4(__VA_ARGS__) \
REPEAT_2(__VA_ARGS__) \
__VA_ARGS__
#define REPEAT_249(...) \
REPEAT_127(__VA_ARGS__) \
REPEAT_64(__VA_ARGS__) \
REPEAT_32(__VA_ARGS__) \
REPEAT_16(__VA_ARGS__) \
REPEAT_8(__VA_ARGS__) \
REPEAT_2(__VA_ARGS__)
#define REPEAT_2_UNIQUE_VARS() UNIQUE_VAR() UNIQUE_VAR()
#define REPEAT_4_UNIQUE_VARS() REPEAT_2_UNIQUE_VARS() REPEAT_2_UNIQUE_VARS()
#define REPEAT_8_UNIQUE_VARS() REPEAT_4_UNIQUE_VARS() REPEAT_4_UNIQUE_VARS()
#define REPEAT_16_UNIQUE_VARS() REPEAT_8_UNIQUE_VARS() REPEAT_8_UNIQUE_VARS()
#define REPEAT_32_UNIQUE_VARS() REPEAT_16_UNIQUE_VARS() REPEAT_16_UNIQUE_VARS()
#define REPEAT_64_UNIQUE_VARS() REPEAT_32_UNIQUE_VARS() REPEAT_32_UNIQUE_VARS()
#define REPEAT_128_UNIQUE_VARS() REPEAT_64_UNIQUE_VARS() REPEAT_64_UNIQUE_VARS()
#define REPEAT_250_UNIQUE_VARS() \
REPEAT_128_UNIQUE_VARS() \
REPEAT_64_UNIQUE_VARS() \
REPEAT_32_UNIQUE_VARS() \
REPEAT_16_UNIQUE_VARS() \
REPEAT_8_UNIQUE_VARS() \
UNIQUE_VAR() \
UNIQUE_VAR()
#define REPEAT_2_LOAD_UNIQUE_PROPERTY() \
LOAD_UNIQUE_PROPERTY() LOAD_UNIQUE_PROPERTY()
#define REPEAT_4_LOAD_UNIQUE_PROPERTY() \
REPEAT_2_LOAD_UNIQUE_PROPERTY() REPEAT_2_LOAD_UNIQUE_PROPERTY()
#define REPEAT_8_LOAD_UNIQUE_PROPERTY() \
REPEAT_4_LOAD_UNIQUE_PROPERTY() REPEAT_4_LOAD_UNIQUE_PROPERTY()
#define REPEAT_16_LOAD_UNIQUE_PROPERTY() \
REPEAT_8_LOAD_UNIQUE_PROPERTY() REPEAT_8_LOAD_UNIQUE_PROPERTY()
#define REPEAT_32_LOAD_UNIQUE_PROPERTY() \
REPEAT_16_LOAD_UNIQUE_PROPERTY() REPEAT_16_LOAD_UNIQUE_PROPERTY()
#define REPEAT_64_LOAD_UNIQUE_PROPERTY() \
REPEAT_32_LOAD_UNIQUE_PROPERTY() REPEAT_32_LOAD_UNIQUE_PROPERTY()
#define REPEAT_128_LOAD_UNIQUE_PROPERTY() \
REPEAT_64_LOAD_UNIQUE_PROPERTY() REPEAT_64_LOAD_UNIQUE_PROPERTY()
static const char* kGoldenFileDirectory =
"test/cctest/interpreter/bytecode_expectations/";
class InitializedIgnitionHandleScope : public InitializedHandleScope {
public:
InitializedIgnitionHandleScope() {
i::FLAG_always_opt = false;
i::FLAG_allow_natives_syntax = true;
i::FLAG_enable_lazy_source_positions = false;
}
};
void SkipGoldenFileHeader(std::istream& stream) { // NOLINT
std::string line;
int separators_seen = 0;
while (std::getline(stream, line)) {
if (line == "---") separators_seen += 1;
if (separators_seen == 2) return;
}
}
std::string LoadGolden(const std::string& golden_filename) {
std::ifstream expected_file((kGoldenFileDirectory + golden_filename).c_str());
CHECK(expected_file.is_open());
SkipGoldenFileHeader(expected_file);
std::ostringstream expected_stream;
// Restore the first separator, which was consumed by SkipGoldenFileHeader
expected_stream << "---\n" << expected_file.rdbuf();
return expected_stream.str();
}
template <size_t N>
std::string BuildActual(const BytecodeExpectationsPrinter& printer,
const char* (&snippet_list)[N],
const char* prologue = nullptr,
const char* epilogue = nullptr) {
std::ostringstream actual_stream;
for (const char* snippet : snippet_list) {
std::string source_code;
if (prologue) source_code += prologue;
source_code += snippet;
if (epilogue) source_code += epilogue;
printer.PrintExpectation(actual_stream, source_code);
}
return actual_stream.str();
}
// inplace left trim
static inline void ltrim(std::string& str) {
str.erase(str.begin(),
std::find_if(str.begin(), str.end(),
[](unsigned char ch) { return !std::isspace(ch); }));
}
// inplace right trim
static inline void rtrim(std::string& str) {
str.erase(std::find_if(str.rbegin(), str.rend(),
[](unsigned char ch) { return !std::isspace(ch); })
.base(),
str.end());
}
static inline std::string trim(std::string& str) {
ltrim(str);
rtrim(str);
return str;
}
bool CompareTexts(const std::string& generated, const std::string& expected) {
std::istringstream generated_stream(generated);
std::istringstream expected_stream(expected);
std::string generated_line;
std::string expected_line;
// Line number does not include golden file header.
int line_number = 0;
bool strings_match = true;
do {
std::getline(generated_stream, generated_line);
std::getline(expected_stream, expected_line);
if (!generated_stream.good() && !expected_stream.good()) {
return strings_match;
}
if (!generated_stream.good()) {
std::cerr << "Expected has extra lines after line " << line_number
<< "\n";
std::cerr << " Expected: '" << expected_line << "'\n";
return false;
} else if (!expected_stream.good()) {
std::cerr << "Generated has extra lines after line " << line_number
<< "\n";
std::cerr << " Generated: '" << generated_line << "'\n";
return false;
}
if (trim(generated_line) != trim(expected_line)) {
std::cerr << "Inputs differ at line " << line_number << "\n";
std::cerr << " Generated: '" << generated_line << "'\n";
std::cerr << " Expected: '" << expected_line << "'\n";
strings_match = false;
}
line_number++;
} while (true);
}
TEST(PrimitiveReturnStatements) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"",
"return;\n",
"return null;\n",
"return true;\n",
"return false;\n",
"return 0;\n",
"return +1;\n",
"return -1;\n",
"return +127;\n",
"return -128;\n",
"return 2.0;\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("PrimitiveReturnStatements.golden")));
}
TEST(PrimitiveExpressions) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var x = 0; return x;\n",
"var x = 0; return x + 3;\n",
"var x = 0; return 3 + x;\n",
"var x = 0; return x - 3;\n",
"var x = 0; return 3 - x;\n",
"var x = 4; return x * 3;\n",
"var x = 4; return 3 * x;\n",
"var x = 4; return x / 3;\n",
"var x = 4; return 3 / x;\n",
"var x = 4; return x % 3;\n",
"var x = 4; return 3 % x;\n",
"var x = 1; return x | 2;\n",
"var x = 1; return 2 | x;\n",
"var x = 1; return x ^ 2;\n",
"var x = 1; return 2 ^ x;\n",
"var x = 1; return x & 2;\n",
"var x = 1; return 2 & x;\n",
"var x = 10; return x << 3;\n",
"var x = 10; return 3 << x;\n",
"var x = 10; return x >> 3;\n",
"var x = 10; return 3 >> x;\n",
"var x = 10; return x >>> 3;\n",
"var x = 10; return 3 >>> x;\n",
"var x = 0; return (x, 3);\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("PrimitiveExpressions.golden")));
}
TEST(LogicalExpressions) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var x = 0; return x || 3;\n",
"var x = 0; return (x == 1) || 3;\n",
"var x = 0; return x && 3;\n",
"var x = 0; return (x == 0) && 3;\n",
"var x = 0; return x || (1, 2, 3);\n",
"var a = 2, b = 3, c = 4; return a || (a, b, a, b, c = 5, 3);\n",
"var x = 1; var a = 2, b = 3; return x || (" //
REPEAT_32("\n a = 1, b = 2, ") //
"3);\n",
"var x = 0; var a = 2, b = 3; return x && (" //
REPEAT_32("\n a = 1, b = 2, ") //
"3);\n",
"var x = 1; var a = 2, b = 3; return (x > 3) || (" //
REPEAT_32("\n a = 1, b = 2, ") //
"3);\n",
"var x = 0; var a = 2, b = 3; return (x < 5) && (" //
REPEAT_32("\n a = 1, b = 2, ") //
"3);\n",
"return 0 && 3;\n",
"return 1 || 3;\n",
"var x = 1; return x && 3 || 0, 1;\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("LogicalExpressions.golden")));
}
TEST(Parameters) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
const char* snippets[] = {
"function f() { return this; }",
"function f(arg1) { return arg1; }",
"function f(arg1) { return this; }",
"function f(arg1, arg2, arg3, arg4, arg5, arg6, arg7) { return arg4; }",
"function f(arg1, arg2, arg3, arg4, arg5, arg6, arg7) { return this; }",
"function f(arg1) { arg1 = 1; }",
"function f(arg1, arg2, arg3, arg4) { arg2 = 1; }",
};
CHECK(CompareTexts(BuildActual(printer, snippets, "", "\nf();"),
LoadGolden("Parameters.golden")));
}
TEST(IntegerConstants) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"return 12345678;\n",
"var a = 1234; return 5678;\n",
"var a = 1234; return 1234;\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("IntegerConstants.golden")));
}
TEST(HeapNumberConstants) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"return 1.2;\n",
"var a = 1.2; return 2.6;\n",
"var a = 3.14; return 3.14;\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("HeapNumberConstants.golden")));
}
TEST(StringConstants) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"return \"This is a string\";\n",
"var a = \"First string\"; return \"Second string\";\n",
"var a = \"Same string\"; return \"Same string\";\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("StringConstants.golden")));
}
TEST(PropertyLoads) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
const char* snippets[] = {
"function f(a) { return a.name; }\n"
"f({name : \"test\"});\n",
"function f(a) { return a[\"key\"]; }\n"
"f({key : \"test\"});\n",
"function f(a) { return a[100]; }\n"
"f({100 : \"test\"});\n",
"function f(a, b) { return a[b]; }\n"
"f({arg : \"test\"}, \"arg\");\n",
"function f(a) { var b = a.name; return a[-124]; }\n"
"f({\"-124\" : \"test\", name : 123 })",
"function f(a) {\n"
" var b = {};\n"
REPEAT_128_LOAD_UNIQUE_PROPERTY()
" return a.name;\n"
"}\n"
"f({name : \"test\"})\n",
"function f(a, b) {\n"
" var c;\n"
" c = a[b];\n"
REPEAT_127(" c = a[b];\n")
" return a[b];\n"
"}\n"
"f({name : \"test\"}, \"name\")\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("PropertyLoads.golden")));
}
TEST(PropertyLoadStoreOneShot) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_top_level(true);
printer.set_oneshot_opt(true);
const char* snippets[] = {
R"(
l = {
'a': 1,
'b': 2
};
v = l['a'] + l['b'];
l['b'] = 7;
l['a'] = l['b'];
)",
R"(
l = {
'a': 1.1,
'b': 2.2
};
for (i = 0; i < 5; ++i) {
l['a'] = l['a'] + l['b'];
l['b'] = l['a'] + l['b'];
}
)",
R"(
l = {
'a': 1.1,
'b': 2.2
};
while (s > 0) {
l['a'] = l['a'] - l['b'];
l['b'] = l['b'] - l['a'];
}
)",
R"(
l = {
'a': 1.1,
'b': 2.2
};
s = 10;
do {
l['a'] = l['b'] - l['a'];
} while (s < 10);
)",
R"(
l = {
'c': 1.1,
'd': 2.2
};
if (l['c'] < 3) {
l['c'] = 3;
} else {
l['d'] = 3;
}
)",
R"(
a = [1.1, [2.2, 4.5]];
)",
R"(
b = [];
)",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("PropertyLoadStoreOneShot.golden")));
}
TEST(PropertyLoadStoreWithoutOneShot) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_top_level(true);
const char* snippets[] = {
R"(
l = {
'aa': 1.1,
'bb': 2.2
};
v = l['aa'] + l['bb'];
l['bb'] = 7;
l['aa'] = l['bb'];
)",
R"(
l = {
'cc': 3.1,
'dd': 4.2
};
if (l['cc'] < 3) {
l['cc'] = 3;
} else {
l['dd'] = 3;
}
)",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("PropertyLoadStoreWithoutOneShot.golden")));
}
TEST(IIFEWithOneshotOpt) {
InitializedIgnitionHandleScope scope;
v8::Isolate* isolate = CcTest::isolate();
BytecodeExpectationsPrinter printer(isolate);
printer.set_wrap(false);
printer.set_top_level(true);
printer.set_print_callee(true);
printer.set_oneshot_opt(true);
const char* snippets[] = {
// No feedback vectors for top-level loads/store named property in an IIFE
R"(
(function() {
l = {};
l.aa = 2;
l.bb = l.aa;
return arguments.callee;
})();
)",
// Normal load/store within loops of an IIFE
R"(
(function() {
l = {};
for (i = 0; i < 5; ++i) {
l.aa = 2;
l.bb = l.aa;
}
return arguments.callee;
})();
)",
R"(
(function() {
l = {};
c = 4;
while(c > 4) {
l.aa = 2;
l.bb = l.aa;
c--;
}
return arguments.callee;
})();
)",
R"(
(function() {
l = {};
c = 4;
do {
l.aa = 2;
l.bb = l.aa;
c--;
} while(c > 4)
return arguments.callee;
})();
)",
// No feedback vectors for loads/stores in conditionals
R"(
(function() {
l = {
'aa': 3.3,
'bb': 4.4
};
if (l.aa < 3) {
l.aa = 3;
} else {
l.aa = l.bb;
}
return arguments.callee;
})();
)",
R"(
(function() {
a = [0, [1, 1,2,], 3];
return arguments.callee;
})();
)",
R"(
(function() {
a = [];
return arguments.callee;
})();
)",
// CallNoFeedback instead of CallProperty
R"(
this.f0 = function() {};
this.f1 = function(a) {};
this.f2 = function(a, b) {};
this.f3 = function(a, b, c) {};
this.f4 = function(a, b, c, d) {};
this.f5 = function(a, b, c, d, e) {};
(function() {
this.f0();
this.f1(1);
this.f2(1, 2);
this.f3(1, 2, 3);
this.f4(1, 2, 3, 4);
this.f5(1, 2, 3, 4, 5);
return arguments.callee;
})();
)",
// CallNoFeedback instead of CallUndefinedReceiver
R"(
function f0() {}
function f1(a) {}
function f2(a, b) {}
function f3(a, b, c) {}
function f4(a, b, c, d) {}
function f5(a, b, c, d, e) {}
(function() {
f0();
f1(1);
f2(1, 2);
f3(1, 2, 3);
f4(1, 2, 3, 4);
f5(1, 2, 3, 4, 5);
return arguments.callee;
})();
)",
// TODO(rmcilroy): Make this function produce one-shot code.
R"(
var t = 0;
function f2() {};
if (t == 0) {
(function(){
l = {};
l.a = 3;
l.b = 4;
f2();
return arguments.callee;
})();
}
)",
// No one-shot opt for IIFE`s within a function
R"(
function f2() {};
function f() {
return (function(){
l = {};
l.a = 3;
l.b = 4;
f2();
return arguments.callee;
})();
}
f();
)",
R"(
var f = function(l) { l.a = 3; return l; };
f({});
f;
)",
// No one-shot opt for top-level functions enclosed in parentheses
R"(
var f = (function(l) { l.a = 3; return l; });
f;
)",
R"(
var f = (function foo(l) { l.a = 3; return l; });
f;
)",
R"(
var f = function foo(l) { l.a = 3; return l; };
f({});
f;
)",
R"(
l = {};
var f = (function foo(l) { l.a = 3; return arguments.callee; })(l);
f;
)",
R"(
var f = (function foo(l) { l.a = 3; return arguments.callee; })({});
f;
)",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("IIFEWithOneshotOpt.golden")));
}
TEST(IIFEWithoutOneshotOpt) {
InitializedIgnitionHandleScope scope;
v8::Isolate* isolate = CcTest::isolate();
BytecodeExpectationsPrinter printer(isolate);
printer.set_wrap(false);
printer.set_top_level(true);
printer.set_print_callee(true);
const char* snippets[] = {
R"(
(function() {
l = {};
l.a = 2;
l.b = l.a;
return arguments.callee;
})();
)",
R"(
(function() {
l = {
'a': 4.3,
'b': 3.4
};
if (l.a < 3) {
l.a = 3;
} else {
l.a = l.b;
}
return arguments.callee;
})();
)",
R"(
this.f0 = function() {};
this.f1 = function(a) {};
this.f2 = function(a, b) {};
this.f3 = function(a, b, c) {};
this.f4 = function(a, b, c, d) {};
this.f5 = function(a, b, c, d, e) {};
(function() {
this.f0();
this.f1(1);
this.f2(1, 2);
this.f3(1, 2, 3);
this.f4(1, 2, 3, 4);
this.f5(1, 2, 3, 4, 5);
return arguments.callee;
})();
)",
R"(
function f0() {}
function f1(a) {}
function f2(a, b) {}
function f3(a, b, c) {}
function f4(a, b, c, d) {}
function f5(a, b, c, d, e) {}
(function() {
f0();
f1(1);
f2(1, 2);
f3(1, 2, 3);
f4(1, 2, 3, 4);
f5(1, 2, 3, 4, 5);
return arguments.callee;
})();
)",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("IIFEWithoutOneshotOpt.golden")));
}
TEST(PropertyStores) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
const char* snippets[] = {
"function f(a) { a.name = \"val\"; }\n"
"f({name : \"test\"})",
"function f(a) { a[\"key\"] = \"val\"; }\n"
"f({key : \"test\"})",
"function f(a) { a[100] = \"val\"; }\n"
"f({100 : \"test\"})",
"function f(a, b) { a[b] = \"val\"; }\n"
"f({arg : \"test\"}, \"arg\")",
"function f(a) { a.name = a[-124]; }\n"
"f({\"-124\" : \"test\", name : 123 })",
"function f(a) { \"use strict\"; a.name = \"val\"; }\n"
"f({name : \"test\"})",
"function f(a, b) { \"use strict\"; a[b] = \"val\"; }\n"
"f({arg : \"test\"}, \"arg\")",
"function f(a) {\n"
" a.name = 1;\n"
" var b = {};\n"
REPEAT_128_LOAD_UNIQUE_PROPERTY()
" a.name = 2;\n"
"}\n"
"f({name : \"test\"})\n",
"function f(a) {\n"
" 'use strict';\n"
" a.name = 1;\n"
" var b = {};\n"
REPEAT_128_LOAD_UNIQUE_PROPERTY()
" a.name = 2;\n"
"}\n"
"f({name : \"test\"})\n",
"function f(a, b) {\n"
" a[b] = 1;\n"
REPEAT_127(" a[b] = 1;\n")
" a[b] = 2;\n"
"}\n"
"f({name : \"test\"})\n",
"function f(a, b) {\n"
" 'use strict';\n"
" a[b] = 1;\n"
REPEAT_127(" a[b] = 1;\n")
" a[b] = 2;\n"
"}\n"
"f({name : \"test\"})\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("PropertyStores.golden")));
}
#define FUNC_ARG "new (function Obj() { this.func = function() { return; }})()"
TEST(PropertyCall) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
const char* snippets[] = {
"function f(a) { return a.func(); }\n"
"f(" FUNC_ARG ")",
"function f(a, b, c) { return a.func(b, c); }\n"
"f(" FUNC_ARG ", 1, 2)",
"function f(a, b) { return a.func(b + b, b); }\n"
"f(" FUNC_ARG ", 1)",
"function f(a) {\n"
" var b = {};\n"
REPEAT_128_LOAD_UNIQUE_PROPERTY()
" a.func;\n" //
" return a.func(); }\n"
"f(" FUNC_ARG ")",
"function f(a) { return a.func(1).func(2).func(3); }\n"
"f(new (function Obj() { this.func = function(a) { return this; }})())",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("PropertyCall.golden")));
}
TEST(LoadGlobal) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
const char* snippets[] = {
"var a = 1;\n"
"function f() { return a; }\n"
"f()",
"function t() { }\n"
"function f() { return t; }\n"
"f()",
"a = 1;\n"
"function f() { return a; }\n"
"f()",
"a = 1;\n"
"function f(c) {\n"
" var b = {};\n"
REPEAT_128_LOAD_UNIQUE_PROPERTY()
" return a;\n"
"}\n"
"f({name: 1});\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("LoadGlobal.golden")));
}
TEST(StoreGlobal) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
const char* snippets[] = {
"var a = 1;\n"
"function f() { a = 2; }\n"
"f();\n",
"var a = \"test\"; function f(b) { a = b; }\n"
"f(\"global\");\n",
"'use strict'; var a = 1;\n"
"function f() { a = 2; }\n"
"f();\n",
"a = 1;\n"
"function f() { a = 2; }\n"
"f();\n",
"a = 1;\n"
"function f(c) {\n"
" var b = {};\n"
REPEAT_128_LOAD_UNIQUE_PROPERTY()
" a = 2;\n"
"}\n"
"f({name: 1});\n",
"a = 1;\n"
"function f(c) {\n"
" 'use strict';\n"
" var b = {};\n"
REPEAT_128_LOAD_UNIQUE_PROPERTY()
" a = 2;\n"
"}\n"
"f({name: 1});\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("StoreGlobal.golden")));
}
TEST(CallGlobal) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
const char* snippets[] = {
"function t() { }\n"
"function f() { return t(); }\n"
"f();\n",
"function t(a, b, c) { }\n"
"function f() { return t(1, 2, 3); }\n"
"f();\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("CallGlobal.golden")));
}
TEST(CallRuntime) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
const char* snippets[] = {
"function f() { %TheHole() }\n"
"f();\n",
"function f(a) { return %IsArray(a) }\n"
"f(undefined);\n",
"function f() { return %Add(1, 2) }\n"
"f();\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("CallRuntime.golden")));
}
TEST(IfConditions) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
const char* snippets[] = {
"function f() {\n"
" if (0) {\n"
" return 1;\n"
" } else {\n"
" return -1;\n"
" }\n"
"};\n"
"f();\n",
"function f() {\n"
" if ('lucky') {\n"
" return 1;\n"
" } else {\n"
" return -1;\n"
" }\n"
"};\n"
"f();\n",
"function f() {\n"
" if (false) {\n"
" return 1;\n"
" } else {\n"
" return -1;\n"
" }\n"
"};\n"
"f();\n",
"function f() {\n"
" if (false) {\n"
" return 1;\n"
" }\n"
"};\n"
"f();\n",
"function f() {\n"
" var a = 1;\n"
" if (a) {\n"
" a += 1;\n"
" } else {\n"
" return 2;\n"
" }\n"
"};\n"
"f();\n",
"function f(a) {\n"
" if (a <= 0) {\n"
" return 200;\n"
" } else {\n"
" return -200;\n"
" }\n"
"};\n"
"f(99);\n",
"function f(a, b) { if (a in b) { return 200; } }"
"f('prop', { prop: 'yes'});\n",
"function f(z) { var a = 0; var b = 0; if (a === 0.01) {\n"
REPEAT_64(" b = a; a = b;\n")
" return 200; } else { return -200; } } f(0.001);\n",
"function f() {\n"
" var a = 0; var b = 0;\n"
" if (a) {\n"
REPEAT_64(" b = a; a = b;\n")
" return 200; } else { return -200; }\n"
"};\n"
"f();\n",
"function f(a, b) {\n"
" if (a == b) { return 1; }\n"
" if (a === b) { return 1; }\n"
" if (a < b) { return 1; }\n"
" if (a > b) { return 1; }\n"
" if (a <= b) { return 1; }\n"
" if (a >= b) { return 1; }\n"
" if (a in b) { return 1; }\n"
" if (a instanceof b) { return 1; }\n"
" return 0;\n"
"}\n"
"f(1, 1);\n",
"function f() {\n"
" var a = 0;\n"
" if (a) {\n"
" return 20;\n"
" } else {\n"
" return -20;\n"
" }\n"
"};\n"
"f();\n",
"function f(a, b) {\n"
" if (a == b || a < 0) {\n"
" return 1;\n"
" } else if (a > 0 && b > 0) {\n"
" return 0;\n"
" } else {\n"
" return -1;\n"
" }\n"
"};\n"
"f(-1, 1);\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("IfConditions.golden")));
}
TEST(DeclareGlobals) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
printer.set_top_level(true);
const char* snippets[] = {
"var a = 1;\n",
"function f() {}\n",
"var a = 1;\n"
"a=2;\n",
"function f() {}\n"
"f();\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("DeclareGlobals.golden")));
}
TEST(BreakableBlocks) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var x = 0;\n"
"label: {\n"
" x = x + 1;\n"
" break label;\n"
" x = x + 1;\n"
"}\n"
"return x;\n",
"var sum = 0;\n"
"outer: {\n"
" for (var x = 0; x < 10; ++x) {\n"
" for (var y = 0; y < 3; ++y) {\n"
" ++sum;\n"
" if (x + y == 12) { break outer; }\n"
" }\n"
" }\n"
"}\n"
"return sum;\n",
"outer: {\n"
" let y = 10;\n"
" function f() { return y; }\n"
" break outer;\n"
"}\n",
"let x = 1;\n"
"outer: {\n"
" inner: {\n"
" let y = 2;\n"
" function f() { return x + y; }\n"
" if (y) break outer;\n"
" y = 3;\n"
" }\n"
"}\n"
"x = 4;\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("BreakableBlocks.golden")));
}
TEST(BasicLoops) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var x = 0;\n"
"while (false) { x = 99; break; continue; }\n"
"return x;\n",
"var x = 0;\n"
"while (false) {\n"
" x = x + 1;\n"
"};\n"
"return x;\n",
"var x = 0;\n"
"var y = 1;\n"
"while (x < 10) {\n"
" y = y * 12;\n"
" x = x + 1;\n"
" if (x == 3) continue;\n"
" if (x == 4) break;\n"
"}\n"
"return y;\n",
"var i = 0;\n"
"while (true) {\n"
" if (i < 0) continue;\n"
" if (i == 3) break;\n"
" if (i == 4) break;\n"
" if (i == 10) continue;\n"
" if (i == 5) break;\n"
" i = i + 1;\n"
"}\n"
"return i;\n",
"var i = 0;\n"
"while (true) {\n"
" while (i < 3) {\n"
" if (i == 2) break;\n"
" i = i + 1;\n"
" }\n"
" i = i + 1;\n"
" break;\n"
"}\n"
"return i;\n",
"var x = 10;\n"
"var y = 1;\n"
"while (x) {\n"
" y = y * 12;\n"
" x = x - 1;\n"
"}\n"
"return y;\n",
"var x = 0; var y = 1;\n"
"do {\n"
" y = y * 10;\n"
" if (x == 5) break;\n"
" if (x == 6) continue;\n"
" x = x + 1;\n"
"} while (x < 10);\n"
"return y;\n",
"var x = 10;\n"
"var y = 1;\n"
"do {\n"
" y = y * 12;\n"
" x = x - 1;\n"
"} while (x);\n"
"return y;\n",
"var x = 0; var y = 1;\n"
"do {\n"
" y = y * 10;\n"
" if (x == 5) break;\n"
" x = x + 1;\n"
" if (x == 6) continue;\n"
"} while (false);\n"
"return y;\n",
"var x = 0; var y = 1;\n"
"do {\n"
" y = y * 10;\n"
" if (x == 5) break;\n"
" x = x + 1;\n"
" if (x == 6) continue;\n"
"} while (true);\n"
"return y;\n",
"var x = 0;\n"
"for (;;) {\n"
" if (x == 1) break;\n"
" if (x == 2) continue;\n"
" x = x + 1;\n"
"}\n",
"for (var x = 0;;) {\n"
" if (x == 1) break;\n"
" if (x == 2) continue;\n"
" x = x + 1;\n"
"}\n",
"var x = 0;\n"
"for (;; x = x + 1) {\n"
" if (x == 1) break;\n"
" if (x == 2) continue;\n"
"}\n",
"for (var x = 0;; x = x + 1) {\n"
" if (x == 1) break;\n"
" if (x == 2) continue;\n"
"}\n",
"var u = 0;\n"
"for (var i = 0; i < 100; i = i + 1) {\n"
" u = u + 1;\n"
" continue;\n"
"}\n",
"var y = 1;\n"
"for (var x = 10; x; --x) {\n"
" y = y * 12;\n"
"}\n"
"return y;\n",
"var x = 0;\n"
"for (var i = 0; false; i++) {\n"
" x = x + 1;\n"
"};\n"
"return x;\n",
"var x = 0;\n"
"for (var i = 0; true; ++i) {\n"
" x = x + 1;\n"
" if (x == 20) break;\n"
"};\n"
"return x;\n",
"var a = 0;\n"
"while (a) {\n"
" { \n"
" let z = 1;\n"
" function f() { z = 2; }\n"
" if (z) continue;\n"
" z++;\n"
" }\n"
"}\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("BasicLoops.golden")));
}
TEST(UnaryOperators) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var x = 0;\n"
"while (x != 10) {\n"
" x = x + 10;\n"
"}\n"
"return x;\n",
"var x = false;\n"
"do {\n"
" x = !x;\n"
"} while(x == false);\n"
"return x;\n",
"var x = 101;\n"
"return void(x * 3);\n",
"var x = 1234;\n"
"var y = void (x * x - 1);\n"
"return y;\n",
"var x = 13;\n"
"return ~x;\n",
"var x = 13;\n"
"return +x;\n",
"var x = 13;\n"
"return -x;\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("UnaryOperators.golden")));
}
TEST(Typeof) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
const char* snippets[] = {
"function f() {\n"
" var x = 13;\n"
" return typeof(x);\n"
"};",
"var x = 13;\n"
"function f() {\n"
" return typeof(x);\n"
"};",
};
CHECK(CompareTexts(BuildActual(printer, snippets, "", "\nf();"),
LoadGolden("Typeof.golden")));
}
TEST(CompareTypeOf) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"return typeof(1) === 'number';\n",
"return 'string' === typeof('foo');\n",
"return typeof(true) == 'boolean';\n",
"return 'string' === typeof(undefined);\n",
"return 'unknown' === typeof(undefined);\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("CompareTypeOf.golden")));
}
TEST(CompareNil) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var a = 1;\n"
"return a === null;\n",
"var a = undefined;\n"
"return undefined === a;\n",
"var a = undefined;\n"
"return undefined !== a;\n",
"var a = 2;\n"
"return a != null;\n",
"var a = undefined;\n"
"return undefined == a;\n",
"var a = undefined;\n"
"return undefined === a ? 1 : 2;\n",
"var a = 0;\n"
"return null == a ? 1 : 2;\n",
"var a = 0;\n"
"return undefined !== a ? 1 : 2;\n",
"var a = 0;\n"
"return a === null ? 1 : 2;\n",
"var a = 0;\n"
"if (a === null) {\n"
" return 1;\n"
"} else {\n"
" return 2;\n"
"}\n",
"var a = 0;\n"
"if (a != undefined) {\n"
" return 1;\n"
"}\n",
"var a = undefined;\n"
"var b = 0;\n"
"while (a !== undefined) {\n"
" b++;\n"
"}\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("CompareNil.golden")));
}
TEST(Delete) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var a = {x:13, y:14}; return delete a.x;\n",
"'use strict'; var a = {x:13, y:14}; return delete a.x;\n",
"var a = {1:13, 2:14}; return delete a[2];\n",
"var a = 10; return delete a;\n",
"'use strict';\n"
"var a = {1:10};\n"
"(function f1() {return a;});\n"
"return delete a[1];\n",
"return delete 'test';\n",
"return delete this;\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("Delete.golden")));
}
TEST(GlobalDelete) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
const char* snippets[] = {
"var a = {x:13, y:14};\n"
"function f() {\n"
" return delete a.x;\n"
"};\n"
"f();\n",
"a = {1:13, 2:14};\n"
"function f() {\n"
" 'use strict';\n"
" return delete a[1];\n"
"};\n"
"f();\n",
"var a = {x:13, y:14};\n"
"function f() {\n"
" return delete a;\n"
"};\n"
"f();\n",
"b = 30;\n"
"function f() {\n"
" return delete b;\n"
"};\n"
"f();\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("GlobalDelete.golden")));
}
TEST(FunctionLiterals) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"return function(){ }\n",
"return (function(){ })()\n",
"return (function(x){ return x; })(1)\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("FunctionLiterals.golden")));
}
TEST(RegExpLiterals) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"return /ab+d/;\n",
"return /(\\w+)\\s(\\w+)/i;\n",
"return /ab+d/.exec('abdd');\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("RegExpLiterals.golden")));
}
TEST(ArrayLiterals) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"return [ 1, 2 ];\n",
"var a = 1; return [ a, a + 1 ];\n",
"return [ [ 1, 2 ], [ 3 ] ];\n",
"var a = 1; return [ [ a, 2 ], [ a + 2 ] ];\n",
"var a = [ 1, 2 ]; return [ ...a ];\n",
"var a = [ 1, 2 ]; return [ 0, ...a ];\n",
"var a = [ 1, 2 ]; return [ ...a, 3 ];\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("ArrayLiterals.golden")));
}
TEST(ObjectLiterals) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"return { };\n",
"return { name: 'string', val: 9.2 };\n",
"var a = 1; return { name: 'string', val: a };\n",
"var a = 1; return { val: a, val: a + 1 };\n",
"return { func: function() { } };\n",
"return { func(a) { return a; } };\n",
"return { get a() { return 2; } };\n",
"return { get a() { return this.x; }, set a(val) { this.x = val } };\n",
"return { set b(val) { this.y = val } };\n",
"var a = 1; return { 1: a };\n",
"return { __proto__: null };\n",
"var a = 'test'; return { [a]: 1 };\n",
"var a = 'test'; return { val: a, [a]: 1 };\n",
"var a = 'test'; return { [a]: 1, __proto__: {} };\n",
"var n = 'name'; return { [n]: 'val', get a() { }, set a(b) {} };\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("ObjectLiterals.golden")));
}
TEST(TopLevelObjectLiterals) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
printer.set_top_level(true);
const char* snippets[] = {
"var a = { func: function() { } };\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("TopLevelObjectLiterals.golden")));
}
TEST(TryCatch) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"try { return 1; } catch(e) { return 2; }\n",
"var a;\n"
"try { a = 1 } catch(e1) {};\n"
"try { a = 2 } catch(e2) { a = 3 }\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("TryCatch.golden")));
}
TEST(TryFinally) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var a = 1;\n"
"try { a = 2; } finally { a = 3; }\n",
"var a = 1;\n"
"try { a = 2; } catch(e) { a = 20 } finally { a = 3; }\n",
"var a; try {\n"
" try { a = 1 } catch(e) { a = 2 }\n"
"} catch(e) { a = 20 } finally { a = 3; }\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("TryFinally.golden")));
}
TEST(Throw) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"throw 1;\n",
"throw 'Error';\n",
"var a = 1; if (a) { throw 'Error'; };\n",
};
CHECK(
CompareTexts(BuildActual(printer, snippets), LoadGolden("Throw.golden")));
}
TEST(CallNew) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
const char* snippets[] = {
"function bar() { this.value = 0; }\n"
"function f() { return new bar(); }\n"
"f();\n",
"function bar(x) { this.value = 18; this.x = x;}\n"
"function f() { return new bar(3); }\n"
"f();\n",
"function bar(w, x, y, z) {\n"
" this.value = 18;\n"
" this.x = x;\n"
" this.y = y;\n"
" this.z = z;\n"
"}\n"
"function f() { return new bar(3, 4, 5); }\n"
"f();\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("CallNew.golden")));
}
TEST(ContextVariables) {
// The wide check below relies on MIN_CONTEXT_SLOTS + 3 + 249 == 256, if this
// ever changes, the REPEAT_XXX should be changed to output the correct number
// of unique variables to trigger the wide slot load / store.
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS + 3 + 249 == 256);
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var a; return function() { a = 1; };\n",
"var a = 1; return function() { a = 2; };\n",
"var a = 1; var b = 2; return function() { a = 2; b = 3 };\n",
"var a; (function() { a = 2; })(); return a;\n",
"'use strict';\n"
"let a = 1;\n"
"{ let b = 2; return function() { a + b; }; }\n",
"'use strict';\n"
REPEAT_250_UNIQUE_VARS()
"eval();\n"
"var b = 100;\n"
"return b\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("ContextVariables.golden")));
}
TEST(ContextParameters) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
const char* snippets[] = {
"function f(arg1) { return function() { arg1 = 2; }; }",
"function f(arg1) { var a = function() { arg1 = 2; }; return arg1; }",
"function f(a1, a2, a3, a4) { return function() { a1 = a3; }; }",
"function f() { var self = this; return function() { self = 2; }; }",
};
CHECK(CompareTexts(BuildActual(printer, snippets, "", "\nf();"),
LoadGolden("ContextParameters.golden")));
}
TEST(OuterContextVariables) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
const char* snippets[] = {
"function Outer() {\n"
" var outerVar = 1;\n"
" function Inner(innerArg) {\n"
" this.innerFunc = function() { return outerVar * innerArg; }\n"
" }\n"
" this.getInnerFunc = function() { return new Inner(1).innerFunc; }\n"
"}\n"
"var f = new Outer().getInnerFunc();",
"function Outer() {\n"
" var outerVar = 1;\n"
" function Inner(innerArg) {\n"
" this.innerFunc = function() { outerVar = innerArg; }\n"
" }\n"
" this.getInnerFunc = function() { return new Inner(1).innerFunc; }\n"
"}\n"
"var f = new Outer().getInnerFunc();",
};
CHECK(CompareTexts(BuildActual(printer, snippets, "", "\nf();"),
LoadGolden("OuterContextVariables.golden")));
}
TEST(CountOperators) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var a = 1; return ++a;\n",
"var a = 1; return a++;\n",
"var a = 1; return --a;\n",
"var a = 1; return a--;\n",
"var a = { val: 1 }; return a.val++;\n",
"var a = { val: 1 }; return --a.val;\n",
"var name = 'var'; var a = { val: 1 }; return a[name]--;\n",
"var name = 'var'; var a = { val: 1 }; return ++a[name];\n",
"var a = 1; var b = function() { return a }; return ++a;\n",
"var a = 1; var b = function() { return a }; return a--;\n",
"var idx = 1; var a = [1, 2]; return a[idx++] = 2;\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("CountOperators.golden")));
}
TEST(GlobalCountOperators) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
const char* snippets[] = {
"var global = 1;\n"
"function f() { return ++global; }\n"
"f();\n",
"var global = 1;\n"
"function f() { return global--; }\n"
"f();\n",
"unallocated = 1;\n"
"function f() { 'use strict'; return --unallocated; }\n"
"f();\n",
"unallocated = 1;\n"
"function f() { return unallocated++; }\n"
"f();\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("GlobalCountOperators.golden")));
}
TEST(CompoundExpressions) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var a = 1; a += 2;\n",
"var a = 1; a /= 2;\n",
"var a = { val: 2 }; a.name *= 2;\n",
"var a = { 1: 2 }; a[1] ^= 2;\n",
"var a = 1; (function f() { return a; }); a |= 24;\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("CompoundExpressions.golden")));
}
TEST(GlobalCompoundExpressions) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
const char* snippets[] = {
"var global = 1;\n"
"function f() { return global &= 1; }\n"
"f();\n",
"unallocated = 1;\n"
"function f() { return unallocated += 1; }\n"
"f();\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("GlobalCompoundExpressions.golden")));
}
TEST(CreateArguments) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
const char* snippets[] = {
"function f() { return arguments; }",
"function f() { return arguments[0]; }",
"function f() { 'use strict'; return arguments; }",
"function f(a) { return arguments[0]; }",
"function f(a, b, c) { return arguments; }",
"function f(a, b, c) { 'use strict'; return arguments; }",
};
CHECK(CompareTexts(BuildActual(printer, snippets, "", "\nf();"),
LoadGolden("CreateArguments.golden")));
}
TEST(CreateRestParameter) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
const char* snippets[] = {
"function f(...restArgs) { return restArgs; }",
"function f(a, ...restArgs) { return restArgs; }",
"function f(a, ...restArgs) { return restArgs[0]; }",
"function f(a, ...restArgs) { return restArgs[0] + arguments[0]; }",
};
CHECK(CompareTexts(BuildActual(printer, snippets, "", "\nf();"),
LoadGolden("CreateRestParameter.golden")));
}
TEST(ForIn) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"for (var p in null) {}\n",
"for (var p in undefined) {}\n",
"for (var p in undefined) {}\n",
"var x = 'potatoes';\n"
"for (var p in x) { return p; }\n",
"var x = 0;\n"
"for (var p in [1,2,3]) { x += p; }\n",
"var x = { 'a': 1, 'b': 2 };\n"
"for (x['a'] in [10, 20, 30]) {\n"
" if (x['a'] == 10) continue;\n"
" if (x['a'] == 20) break;\n"
"}\n",
"var x = [ 10, 11, 12 ] ;\n"
"for (x[0] in [1,2,3]) { return x[3]; }\n",
};
CHECK(
CompareTexts(BuildActual(printer, snippets), LoadGolden("ForIn.golden")));
}
TEST(ForOf) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"for (var p of [0, 1, 2]) {}\n",
"var x = 'potatoes';\n"
"for (var p of x) { return p; }\n",
"for (var x of [10, 20, 30]) {\n"
" if (x == 10) continue;\n"
" if (x == 20) break;\n"
"}\n",
"var x = { 'a': 1, 'b': 2 };\n"
"for (x['a'] of [1,2,3]) { return x['a']; }\n",
};
CHECK(
CompareTexts(BuildActual(printer, snippets), LoadGolden("ForOf.golden")));
}
TEST(Conditional) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"return 1 ? 2 : 3;\n",
"return 1 ? 2 ? 3 : 4 : 5;\n",
"return 0 < 1 ? 2 : 3;\n",
"var x = 0;\n"
"return x ? 2 : 3;\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("Conditional.golden")));
}
TEST(Switch) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var a = 1;\n"
"switch(a) {\n"
" case 1: return 2;\n"
" case 2: return 3;\n"
"}\n",
"var a = 1;\n"
"switch(a) {\n"
" case 1: a = 2; break;\n"
" case 2: a = 3; break;\n"
"}\n",
"var a = 1;\n"
"switch(a) {\n"
" case 1: a = 2; // fall-through\n"
" case 2: a = 3; break;\n"
"}\n",
"var a = 1;\n"
"switch(a) {\n"
" case 2: break;\n"
" case 3: break;\n"
" default: a = 1; break;\n"
"}\n",
"var a = 1;\n"
"switch(typeof(a)) {\n"
" case 2: a = 1; break;\n"
" case 3: a = 2; break;\n"
" default: a = 3; break;\n"
"}\n",
"var a = 1;\n"
"switch(a) {\n"
" case typeof(a): a = 1; break;\n"
" default: a = 2; break;\n"
"}\n",
"var a = 1;\n"
"switch(a) {\n"
" case 1:\n"
REPEAT_64(" a = 2;\n")
" break;\n"
" case 2:\n"
" a = 3;\n"
" break;\n"
"}\n",
"var a = 1;\n"
"switch(a) {\n"
" case 1: \n"
" switch(a + 1) {\n"
" case 2 : a = 1; break;\n"
" default : a = 2; break;\n"
" } // fall-through\n"
" case 2: a = 3;\n"
"}\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("Switch.golden")));
}
TEST(BasicBlockToBoolean) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var a = 1; if (a || a < 0) { return 1; }\n",
"var a = 1; if (a && a < 0) { return 1; }\n",
"var a = 1; a = (a || a < 0) ? 2 : 3;\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("BasicBlockToBoolean.golden")));
}
TEST(DeadCodeRemoval) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"return; var a = 1; a();\n",
"if (false) { return; }; var a = 1;\n",
"if (true) { return 1; } else { return 2; };\n",
"var a = 1; if (a) { return 1; }; return 2;\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("DeadCodeRemoval.golden")));
}
TEST(ThisFunction) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
const char* snippets[] = {
"var f;\n"
"f = function f() {};",
"var f;\n"
"f = function f() { return f; };",
};
CHECK(CompareTexts(BuildActual(printer, snippets, "", "\nf();"),
LoadGolden("ThisFunction.golden")));
}
TEST(NewTarget) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"return new.target;\n",
"new.target;\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("NewTarget.golden")));
}
TEST(RemoveRedundantLdar) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var ld_a = 1;\n" // This test is to check Ldar does not
"while(true) {\n" // get removed if the preceding Star is
" ld_a = ld_a + ld_a;\n" // in a different basicblock.
" if (ld_a > 10) break;\n"
"}\n"
"return ld_a;\n",
"var ld_a = 1;\n"
"do {\n"
" ld_a = ld_a + ld_a;\n"
" if (ld_a > 10) continue;\n"
"} while(false);\n"
"return ld_a;\n",
"var ld_a = 1;\n"
" ld_a = ld_a + ld_a;\n"
" return ld_a;\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("RemoveRedundantLdar.golden")));
}
TEST(GenerateTestUndetectable) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var obj_a = {val:1};\n"
"var b = 10;\n"
"if (obj_a == null) { b = 20;}\n"
"return b;\n",
"var obj_a = {val:1};\n"
"var b = 10;\n"
"if (obj_a == undefined) { b = 20;}\n"
"return b;\n",
"var obj_a = {val:1};\n"
"var b = 10;\n"
"if (obj_a != null) { b = 20;}\n"
"return b;\n",
"var obj_a = {val:1};\n"
"var b = 10;\n"
"if (obj_a != undefined) { b = 20;}\n"
"return b;\n",
"var obj_a = {val:1};\n"
"var b = 10;\n"
"if (obj_a === null) { b = 20;}\n"
"return b;\n",
"var obj_a = {val:1};\n"
"var b = 10;\n"
"if (obj_a === undefined) { b = 20;}\n"
"return b;\n",
"var obj_a = {val:1};\n"
"var b = 10;\n"
"if (obj_a !== null) { b = 20;}\n"
"return b;\n",
"var obj_a = {val:1};\n"
"var b = 10;\n"
"if (obj_a !== undefined) { b = 20;}\n"
"return b;\n"};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("GenerateTestUndetectable.golden")));
}
TEST(AssignmentsInBinaryExpression) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var x = 0, y = 1;\n"
"return (x = 2, y = 3, x = 4, y = 5);\n",
"var x = 55;\n"
"var y = (x = 100);\n"
"return y;\n",
"var x = 55;\n"
"x = x + (x = 100) + (x = 101);\n"
"return x;\n",
"var x = 55;\n"
"x = (x = 56) - x + (x = 57);\n"
"x++;\n"
"return x;\n",
"var x = 55;\n"
"var y = x + (x = 1) + (x = 2) + (x = 3);\n"
"return y;\n",
"var x = 55;\n"
"var x = x + (x = 1) + (x = 2) + (x = 3);\n"
"return x;\n",
"var x = 10, y = 20;\n"
"return x + (x = 1) + (x + 1) * (y = 2) + (y = 3) + (x = 4) + (y = 5) + "
"y;\n",
"var x = 17;\n"
"return 1 + x + (x++) + (++x);\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("AssignmentsInBinaryExpression.golden")));
}
TEST(DestructuringAssignment) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var x, a = [0,1,2,3];\n"
"[x] = a;\n",
"var x, y, a = [0,1,2,3];\n"
"[,x,...y] = a;\n",
"var x={}, y, a = [0];\n"
"[x.foo,y=4] = a;\n",
"var x, a = {x:1};\n"
"({x} = a);\n",
"var x={}, a = {y:1};\n"
"({y:x.foo} = a);\n",
"var x, a = {y:1, w:2, v:3};\n"
"({x=0,...y} = a);\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("DestructuringAssignment.golden")));
}
TEST(Eval) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"return eval('1;');\n",
};
CHECK(
CompareTexts(BuildActual(printer, snippets), LoadGolden("Eval.golden")));
}
TEST(LookupSlot) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_test_function_name("f");
// clang-format off
const char* snippets[] = {
"eval('var x = 10;'); return x;\n",
"eval('var x = 10;'); return typeof x;\n",
"x = 20; return eval('');\n",
"var x = 20;\n"
"f = function(){\n"
" eval('var x = 10');\n"
" return x;\n"
"}\n"
"f();\n",
"x = 20;\n"
"f = function(){\n"
" eval('var x = 10');\n"
" return x;\n"
"}\n"
"f();\n"
};
// clang-format on
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("LookupSlot.golden")));
}
TEST(CallLookupSlot) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"g = function(){}; eval(''); return g();\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("CallLookupSlot.golden")));
}
// TODO(mythria): tests for variable/function declaration in lookup slots.
TEST(LookupSlotInEval) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
const char* snippets[] = {
"return x;",
"x = 10;",
"'use strict'; x = 10;",
"return typeof x;",
};
std::string actual = BuildActual(printer, snippets,
"var f;\n"
"var x = 1;\n"
"function f1() {\n"
" eval(\"function t() { ",
" }; f = t; f();\");\n"
"}\n"
"f1();");
CHECK(CompareTexts(actual, LoadGolden("LookupSlotInEval.golden")));
}
TEST(DeleteLookupSlotInEval) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
const char* snippets[] = {
"delete x;",
"return delete y;",
"return delete z;",
};
std::string actual = BuildActual(printer, snippets,
"var f;\n"
"var x = 1;\n"
"z = 10;\n"
"function f1() {\n"
" var y;\n"
" eval(\"function t() { ",
" }; f = t; f();\");\n"
"}\n"
"f1();");
CHECK(CompareTexts(actual, LoadGolden("DeleteLookupSlotInEval.golden")));
}
TEST(WideRegisters) {
// Prepare prologue that creates frame for lots of registers.
std::ostringstream os;
for (size_t i = 0; i < 157; ++i) {
os << "var x" << i << " = 0;\n";
}
std::string prologue(os.str());
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"x0 = x127;\n"
"return x0;\n",
"x127 = x126;\n"
"return x127;\n",
"if (x2 > 3) { return x129; }\n"
"return x128;\n",
"var x0 = 0;\n"
"if (x129 == 3) { var x129 = x0; }\n"
"if (x2 > 3) { return x0; }\n"
"return x129;\n",
"var x0 = 0;\n"
"var x1 = 0;\n"
"for (x128 = 0; x128 < 64; x128++) {"
" x1 += x128;"
"}"
"return x128;\n",
"var x0 = 1234;\n"
"var x1 = 0;\n"
"for (x128 in x0) {"
" x1 += x128;"
"}"
"return x1;\n",
"x0 = %Add(x64, x63);\n"
"x1 = %Add(x27, x143);\n"
"%TheHole();\n"
"return x1;\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets, prologue.c_str()),
LoadGolden("WideRegisters.golden")));
}
TEST(ConstVariable) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"const x = 10;\n",
"const x = 10; return x;\n",
"const x = ( x = 20);\n",
"const x = 10; x = 20;\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("ConstVariable.golden")));
}
TEST(LetVariable) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"let x = 10;\n",
"let x = 10; return x;\n",
"let x = (x = 20);\n",
"let x = 10; x = 20;\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("LetVariable.golden")));
}
TEST(ConstVariableContextSlot) {
// TODO(mythria): Add tests for initialization of this via super calls.
// TODO(mythria): Add tests that walk the context chain.
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"const x = 10; function f1() {return x;}\n",
"const x = 10; function f1() {return x;} return x;\n",
"const x = (x = 20); function f1() {return x;}\n",
"const x = 10; x = 20; function f1() {return x;}\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("ConstVariableContextSlot.golden")));
}
TEST(LetVariableContextSlot) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"let x = 10; function f1() {return x;}\n",
"let x = 10; function f1() {return x;} return x;\n",
"let x = (x = 20); function f1() {return x;}\n",
"let x = 10; x = 20; function f1() {return x;}\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("LetVariableContextSlot.golden")));
}
TEST(WithStatement) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"with ({x:42}) { return x; }\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("WithStatement.golden")));
}
TEST(DoDebugger) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"debugger;\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("DoDebugger.golden")));
}
TEST(ClassDeclarations) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"class Person {\n"
" constructor(name) { this.name = name; }\n"
" speak() { console.log(this.name + ' is speaking.'); }\n"
"}\n",
"class person {\n"
" constructor(name) { this.name = name; }\n"
" speak() { console.log(this.name + ' is speaking.'); }\n"
"}\n",
"var n0 = 'a';\n"
"var n1 = 'b';\n"
"class N {\n"
" [n0]() { return n0; }\n"
" static [n1]() { return n1; }\n"
"}\n",
"var count = 0;\n"
"class C { constructor() { count++; }}\n"
"return new C();\n",
"(class {})\n"
"class E { static name () {}}\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("ClassDeclarations.golden")));
}
TEST(ClassAndSuperClass) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("test");
const char* snippets[] = {
"var test;\n"
"(function() {\n"
" class A {\n"
" method() { return 2; }\n"
" }\n"
" class B extends A {\n"
" method() { return super.method() + 1; }\n"
" }\n"
" test = new B().method;\n"
" test();\n"
"})();\n",
"var test;\n"
"(function() {\n"
" class A {\n"
" get x() { return 1; }\n"
" set x(val) { return; }\n"
" }\n"
" class B extends A {\n"
" method() { super.x = 2; return super.x; }\n"
" }\n"
" test = new B().method;\n"
" test();\n"
"})();\n",
"var test;\n"
"(function() {\n"
" class A {\n"
" constructor(x) { this.x_ = x; }\n"
" }\n"
" class B extends A {\n"
" constructor() { super(1); this.y_ = 2; }\n"
" }\n"
" test = new B().constructor;\n"
"})();\n",
"var test;\n"
"(function() {\n"
" class A {\n"
" constructor() { this.x_ = 1; }\n"
" }\n"
" class B extends A {\n"
" constructor() { super(); this.y_ = 2; }\n"
" }\n"
" test = new B().constructor;\n"
"})();\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("ClassAndSuperClass.golden")));
}
TEST(PublicClassFields) {
bool old_flag = i::FLAG_harmony_public_fields;
i::FLAG_harmony_public_fields = true;
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"{\n"
" class A {\n"
" a;\n"
" ['b'];\n"
" }\n"
"\n"
" class B {\n"
" a = 1;\n"
" ['b'] = this.a;\n"
" }\n"
" new A;\n"
" new B;\n"
"}\n",
"{\n"
" class A extends class {} {\n"
" a;\n"
" ['b'];\n"
" }\n"
"\n"
" class B extends class {} {\n"
" a = 1;\n"
" ['b'] = this.a;\n"
" foo() { return 1; }\n"
" constructor() {\n"
" super();\n"
" }\n"
" }\n"
"\n"
" class C extends B {\n"
" a = 1;\n"
" ['b'] = this.a;\n"
" constructor() {\n"
" (() => super())();\n"
" }\n"
" }\n"
"\n"
" new A;\n"
" new B;\n"
" new C;\n"
"}\n"};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("PublicClassFields.golden")));
i::FLAG_harmony_public_fields = old_flag;
}
TEST(PrivateClassFields) {
bool old_flag = i::FLAG_harmony_private_fields;
i::FLAG_harmony_private_fields = true;
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"{\n"
" class A {\n"
" #a;\n"
" constructor() {\n"
" this.#a = 1;\n"
" }\n"
" }\n"
"\n"
" class B {\n"
" #a = 1;\n"
" }\n"
" new A;\n"
" new B;\n"
"}\n",
"{\n"
" class A extends class {} {\n"
" #a;\n"
" constructor() {\n"
" super();\n"
" this.#a = 1;\n"
" }\n"
" }\n"
"\n"
" class B extends class {} {\n"
" #a = 1;\n"
" #b = this.#a;\n"
" foo() { return this.#a; }\n"
" bar(v) { this.#b = v; }\n"
" constructor() {\n"
" super();\n"
" this.foo();\n"
" this.bar(3);\n"
" }\n"
" }\n"
"\n"
" class C extends B {\n"
" #a = 2;\n"
" constructor() {\n"
" (() => super())();\n"
" }\n"
" }\n"
"\n"
" new A;\n"
" new B;\n"
" new C;\n"
"};\n"};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("PrivateClassFields.golden")));
i::FLAG_harmony_private_fields = old_flag;
}
TEST(StaticClassFields) {
bool old_flag = i::FLAG_harmony_public_fields;
bool old_static_flag = i::FLAG_harmony_static_fields;
i::FLAG_harmony_public_fields = true;
i::FLAG_harmony_static_fields = true;
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"{\n"
" class A {\n"
" a;\n"
" ['b'];\n"
" static c;\n"
" static ['d'];\n"
" }\n"
"\n"
" class B {\n"
" a = 1;\n"
" ['b'] = this.a;\n"
" static c = 3;\n"
" static ['d'] = this.c;\n"
" }\n"
" new A;\n"
" new B;\n"
"}\n",
"{\n"
" class A extends class {} {\n"
" a;\n"
" ['b'];\n"
" static c;\n"
" static ['d'];\n"
" }\n"
"\n"
" class B extends class {} {\n"
" a = 1;\n"
" ['b'] = this.a;\n"
" static c = 3;\n"
" static ['d'] = this.c;\n"
" foo() { return 1; }\n"
" constructor() {\n"
" super();\n"
" }\n"
" }\n"
"\n"
" class C extends B {\n"
" a = 1;\n"
" ['b'] = this.a;\n"
" static c = 3;\n"
" static ['d'] = super.foo();\n"
" constructor() {\n"
" (() => super())();\n"
" }\n"
" }\n"
"\n"
" new A;\n"
" new B;\n"
" new C;\n"
"}\n"};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("StaticClassFields.golden")));
i::FLAG_harmony_public_fields = old_flag;
i::FLAG_harmony_static_fields = old_static_flag;
}
TEST(Generators) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
const char* snippets[] = {
"function* f() { }\n"
"f();\n",
"function* f() { yield 42 }\n"
"f();\n",
"function* f() { for (let x of [42]) yield x }\n"
"f();\n",
"function* g() { yield 42 }\n"
"function* f() { yield* g() }\n"
"f();\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("Generators.golden")));
}
TEST(AsyncGenerators) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
const char* snippets[] = {
"async function* f() { }\n"
"f();\n",
"async function* f() { yield 42 }\n"
"f();\n",
"async function* f() { for (let x of [42]) yield x }\n"
"f();\n",
"function* g() { yield 42 }\n"
"async function* f() { yield* g() }\n"
"f();\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("AsyncGenerators.golden")));
}
TEST(Modules) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_module(true);
printer.set_top_level(true);
const char* snippets[] = {
"import \"bar\";\n",
"import {foo} from \"bar\";\n",
"import {foo as goo} from \"bar\";\n"
"goo(42);\n"
"{ let x; { goo(42) } };\n",
"export var foo = 42;\n"
"foo++;\n"
"{ let x; { foo++ } };\n",
"export let foo = 42;\n"
"foo++;\n"
"{ let x; { foo++ } };\n",
"export const foo = 42;\n"
"foo++;\n"
"{ let x; { foo++ } };\n",
"export default (function () {});\n",
"export default (class {});\n",
"export {foo as goo} from \"bar\"\n",
"export * from \"bar\"\n",
"import * as foo from \"bar\"\n"
"foo.f(foo, foo.x);\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("Modules.golden")));
}
TEST(SuperCallAndSpread) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("test");
const char* snippets[] = {
"var test;\n"
"(function() {\n"
" class A {\n"
" constructor(...args) { this.baseArgs = args; }\n"
" }\n"
" class B extends A {}\n"
" test = new B(1, 2, 3).constructor;\n"
"})();\n",
"var test;\n"
"(function() {\n"
" class A {\n"
" constructor(...args) { this.baseArgs = args; }\n"
" }\n"
" class B extends A {\n"
" constructor(...args) { super(1, ...args); }\n"
" }\n"
" test = new B(1, 2, 3).constructor;\n"
"})();\n",
"var test;\n"
"(function() {\n"
" class A {\n"
" constructor(...args) { this.baseArgs = args; }\n"
" }\n"
" class B extends A {\n"
" constructor(...args) { super(1, ...args, 1); }\n"
" }\n"
" test = new B(1, 2, 3).constructor;\n"
"})();\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("SuperCallAndSpread.golden")));
}
TEST(CallAndSpread) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {"Math.max(...[1, 2, 3]);\n",
"Math.max(0, ...[1, 2, 3]);\n",
"Math.max(0, ...[1, 2, 3], 4);\n"};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("CallAndSpread.golden")));
}
TEST(NewAndSpread) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"class A { constructor(...args) { this.args = args; } }\n"
"new A(...[1, 2, 3]);\n",
"class A { constructor(...args) { this.args = args; } }\n"
"new A(0, ...[1, 2, 3]);\n",
"class A { constructor(...args) { this.args = args; } }\n"
"new A(0, ...[1, 2, 3], 4);\n"};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("NewAndSpread.golden")));
}
TEST(ForAwaitOf) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
const char* snippets[] = {
"async function f() {\n"
" for await (let x of [1, 2, 3]) {}\n"
"}\n"
"f();\n",
"async function f() {\n"
" for await (let x of [1, 2, 3]) { return x; }\n"
"}\n"
"f();\n",
"async function f() {\n"
" for await (let x of [10, 20, 30]) {\n"
" if (x == 10) continue;\n"
" if (x == 20) break;\n"
" }\n"
"}\n"
"f();\n",
"async function f() {\n"
" var x = { 'a': 1, 'b': 2 };\n"
" for (x['a'] of [1,2,3]) { return x['a']; }\n"
"}\n"
"f();\n"};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("ForAwaitOf.golden")));
}
TEST(StandardForLoop) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
const char* snippets[] = {
"function f() {\n"
" for (let x = 0; x < 10; ++x) { let y = x; }\n"
"}\n"
"f();\n",
"function f() {\n"
" for (let x = 0; x < 10; ++x) { eval('1'); }\n"
"}\n"
"f();\n",
"function f() {\n"
" for (let x = 0; x < 10; ++x) { (function() { return x; })(); }\n"
"}\n"
"f();\n",
"function f() {\n"
" for (let { x, y } = { x: 0, y: 3 }; y > 0; --y) { let z = x + y; }\n"
"}\n"
"f();\n",
"function* f() {\n"
" for (let x = 0; x < 10; ++x) { let y = x; }\n"
"}\n"
"f();\n",
"function* f() {\n"
" for (let x = 0; x < 10; ++x) yield x;\n"
"}\n"
"f();\n",
"async function f() {\n"
" for (let x = 0; x < 10; ++x) { let y = x; }\n"
"}\n"
"f();\n",
"async function f() {\n"
" for (let x = 0; x < 10; ++x) await x;\n"
"}\n"
"f();\n"};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("StandardForLoop.golden")));
}
TEST(ForOfLoop) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
printer.set_wrap(false);
printer.set_test_function_name("f");
const char* snippets[] = {
"function f(arr) {\n"
" for (let x of arr) { let y = x; }\n"
"}\n"
"f([1, 2, 3]);\n",
"function f(arr) {\n"
" for (let x of arr) { eval('1'); }\n"
"}\n"
"f([1, 2, 3]);\n",
"function f(arr) {\n"
" for (let x of arr) { (function() { return x; })(); }\n"
"}\n"
"f([1, 2, 3]);\n",
"function f(arr) {\n"
" for (let { x, y } of arr) { let z = x + y; }\n"
"}\n"
"f([{ x: 0, y: 3 }, { x: 1, y: 9 }, { x: -12, y: 17 }]);\n",
"function* f(arr) {\n"
" for (let x of arr) { let y = x; }\n"
"}\n"
"f([1, 2, 3]);\n",
"function* f(arr) {\n"
" for (let x of arr) yield x;\n"
"}\n"
"f([1, 2, 3]);\n",
"async function f(arr) {\n"
" for (let x of arr) { let y = x; }\n"
"}\n"
"f([1, 2, 3]);\n",
"async function f(arr) {\n"
" for (let x of arr) await x;\n"
"}\n"
"f([1, 2, 3]);\n"};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("ForOfLoop.golden")));
}
TEST(StringConcat) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var a = 1;\n"
"var b = 2;\n"
"return a + b + 'string';\n",
"var a = 1;\n"
"var b = 2;\n"
"return 'string' + a + b;\n",
"var a = 1;\n"
"var b = 2;\n"
"return a + 'string' + b;\n",
"var a = 1;\n"
"var b = 2;\n"
"return 'foo' + a + 'bar' + b + 'baz' + 1;\n",
"var a = 1;\n"
"var b = 2;\n"
"return (a + 'string') + ('string' + b);\n",
"var a = 1;\n"
"var b = 2;\n"
"function foo(a, b) { };\n"
"return 'string' + foo(a, b) + a + b;\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("StringConcat.golden")));
}
TEST(TemplateLiterals) {
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
const char* snippets[] = {
"var a = 1;\n"
"var b = 2;\n"
"return `${a}${b}string`;\n",
"var a = 1;\n"
"var b = 2;\n"
"return `string${a}${b}`;\n",
"var a = 1;\n"
"var b = 2;\n"
"return `${a}string${b}`;\n",
"var a = 1;\n"
"var b = 2;\n"
"return `foo${a}bar${b}baz${1}`;\n",
"var a = 1;\n"
"var b = 2;\n"
"return `${a}string` + `string${b}`;\n",
"var a = 1;\n"
"var b = 2;\n"
"function foo(a, b) { };\n"
"return `string${foo(a, b)}${a}${b}`;\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("TemplateLiterals.golden")));
}
#undef XSTR
#undef STR
#undef UNIQUE_VAR
#undef REPEAT_2
#undef REPEAT_4
#undef REPEAT_8
#undef REPEAT_16
#undef REPEAT_32
#undef REPEAT_64
#undef REPEAT_128
#undef REPEAT_256
#undef REPEAT_127
#undef REPEAT_249
#undef REPEAT_2_UNIQUE_VARS
#undef REPEAT_4_UNIQUE_VARS
#undef REPEAT_8_UNIQUE_VARS
#undef REPEAT_16_UNIQUE_VARS
#undef REPEAT_32_UNIQUE_VARS
#undef REPEAT_64_UNIQUE_VARS
#undef REPEAT_128_UNIQUE_VARS
#undef REPEAT_250_UNIQUE_VARS
#undef LOAD_UNIQUE_PROPERTY
#undef REPEAT_2_LOAD_UNIQUE_PROPERTY
#undef REPEAT_4_LOAD_UNIQUE_PROPERTY
#undef REPEAT_8_LOAD_UNIQUE_PROPERTY
#undef REPEAT_16_LOAD_UNIQUE_PROPERTY
#undef REPEAT_32_LOAD_UNIQUE_PROPERTY
#undef REPEAT_64_LOAD_UNIQUE_PROPERTY
#undef REPEAT_128_LOAD_UNIQUE_PROPERTY
#undef FUNC_ARG
} // namespace interpreter
} // namespace internal
} // namespace v8
|
weolar/miniblink49
|
v8_7_5/test/cctest/interpreter/test-bytecode-generator.cc
|
C++
|
apache-2.0
| 80,931 | 24.001854 | 80 | 0.496954 | false |
/* CSS for plugin: Debug Bar Post Types */
#debug-menu-target-Debug_Bar_Post_Types h3 {
font-family: georgia, times, serif;
font-size: 22px;
margin-top: 1.2em;
clear: both;
}
#debug-menu-target-Debug_Bar_Post_Types table,
#debug-menu-target-Debug_Bar_Post_Types th,
#debug-menu-target-Debug_Bar_Post_Types td {
border: 0px none;
}
#debug-menu-target-Debug_Bar_Post_Types table {
table-layout: auto;
}
#debug-menu-target-Debug_Bar_Post_Types table.debug-bar-post-types {
width: 100%;
border-collapse: collapse;
cell-padding: 1em;
clear: both;
line-height: 1.5;
font-size: 12px;
}
#debug-menu-target-Debug_Bar_Post_Types .debug-bar-post-types thead th,
#debug-menu-target-Debug_Bar_Post_Types .debug-bar-post-types tfoot th {
padding: 0.7em 5px;
border-bottom: 1px solid #ccc;
}
#debug-menu-target-Debug_Bar_Post_Types .debug-bar-post-types th,
#debug-menu-target-Debug_Bar_Post_Types .debug-bar-post-types td {
min-width: 60px;
}
#debug-menu-target-Debug_Bar_Post_Types .debug-bar-post-types th {
font-weight: bold;
padding: 2px 1em 2px 5px;
vertical-align: top;
}
#debug-menu-target-Debug_Bar_Post_Types .debug-bar-post-types td {
padding: 2px 5px;
vertical-align: top;
}
#debug-menu-target-Debug_Bar_Post_Types .debug-bar-post-types tr:nth-child(2n+1) {
background-color: #E8E8E8;
}
#debug-menu-target-Debug_Bar_Post_Types .debug-bar-post-types-caps thead th,
#debug-menu-target-Debug_Bar_Post_Types .debug-bar-post-types-caps tfoot th,
#debug-menu-target-Debug_Bar_Post_Types .debug-bar-post-types-caps td {
text-align: center;
}
#debug-menu-target-Debug_Bar_Post_Types ul.debug-bar-post-types {
list-style-type: square !important;
padding-left: 20px !important;
}
#debug-menu-target-Debug_Bar_Post_Types .debug-bar-post-types-table-end {
text-align: right;
}
|
noowaay/wordpress
|
wp-content/plugins/debug-bar-post-types/css/debug-bar-post-types.css
|
CSS
|
apache-2.0
| 1,805 | 29.1 | 82 | 0.728532 | false |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/ds/DirectoryService_EXPORTS.h>
namespace Aws
{
template<typename RESULT_TYPE>
class AmazonWebServiceResult;
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace DirectoryService
{
namespace Model
{
/**
* <p>Contains the results of the <a>EnableRadius</a> operation.</p><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/ds-2015-04-16/EnableRadiusResult">AWS
* API Reference</a></p>
*/
class AWS_DIRECTORYSERVICE_API EnableRadiusResult
{
public:
EnableRadiusResult();
EnableRadiusResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
EnableRadiusResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result);
};
} // namespace Model
} // namespace DirectoryService
} // namespace Aws
|
jt70471/aws-sdk-cpp
|
aws-cpp-sdk-ds/include/aws/ds/model/EnableRadiusResult.h
|
C
|
apache-2.0
| 1,004 | 22.857143 | 106 | 0.716567 | false |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ml.dmlc.mxnet
import ml.dmlc.mxnet.Base._
object Profiler {
val state2Int = Map("stop" -> 0, "run" -> 1)
/**
* Set up the configure of profiler.
* @param mode, optional
* Indicting whether to enable the profiler, can
* be "symbolic" or "all". Default is "symbolic".
* @param fileName, optional
* The name of output trace file. Default is "profile.json".
*/
def profilerSetConfig(kwargs: Map[String, String]): Unit = {
val keys = kwargs.keys.toArray
val vals = kwargs.values.toArray
checkCall(_LIB.mxSetProfilerConfig(keys, vals))
}
/**
* Set up the profiler state to record operator.
* @param state, optional
* Indicting whether to run the profiler, can
* be "stop" or "run". Default is "stop".
*/
def profilerSetState(state: String = "stop"): Unit = {
require(state2Int.contains(state))
checkCall(_LIB.mxSetProfilerState(state2Int(state)))
}
/**
* Dump profile and stop profiler. Use this to save profile
* in advance in case your program cannot exit normally.
*/
def dumpProfile(finished: Int = 1): Unit = {
checkCall(_LIB.mxDumpProfile(finished))
}
}
|
jiajiechen/mxnet
|
scala-package/core/src/main/scala/ml/dmlc/mxnet/Profiler.scala
|
Scala
|
apache-2.0
| 2,044 | 34.241379 | 79 | 0.672701 | false |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.drill.exec.physical.impl.unnest;
import org.apache.drill.common.expression.SchemaPath;
import org.apache.drill.exec.ops.FragmentContext;
import org.apache.drill.exec.ops.OperatorContext;
import org.apache.drill.exec.physical.base.LateralContract;
import org.apache.drill.exec.physical.impl.sort.RecordBatchData;
import org.apache.drill.exec.record.BatchSchema;
import org.apache.drill.exec.record.CloseableRecordBatch;
import org.apache.drill.exec.record.RecordBatch;
import org.apache.drill.exec.record.TypedFieldId;
import org.apache.drill.exec.record.VectorContainer;
import org.apache.drill.exec.record.VectorWrapper;
import org.apache.drill.exec.record.WritableBatch;
import org.apache.drill.exec.record.selection.SelectionVector2;
import org.apache.drill.exec.record.selection.SelectionVector4;
import org.apache.drill.exec.vector.ValueVector;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
/**
* A mock lateral join implementation for testing unnest. This ignores all the other input and
* simply puts the unnest output into a results hypervector.
* Since Unnest returns an empty batch when it encounters a schema change, this implementation
* will also output an empty batch when it sees a schema change from unnest
*/
public class MockLateralJoinBatch implements LateralContract, CloseableRecordBatch {
private RecordBatch incoming;
private int recordIndex = 0;
private RecordBatch unnest;
private int unnestLimit = -1; // Unnest will EMIT if the number of records cross this limit
private boolean isDone;
private IterOutcome currentLeftOutcome = IterOutcome.NOT_YET;
private final FragmentContext context;
private final OperatorContext oContext;
private List<ValueVector> resultList = new ArrayList<>();
public MockLateralJoinBatch(FragmentContext context, OperatorContext oContext, RecordBatch incoming) {
this.context = context;
this.oContext = oContext;
this.incoming = incoming;
this.isDone = false;
}
@Override public RecordBatch getIncoming() {
return incoming; // don't need this
}
@Override public int getRecordIndex() {
return recordIndex;
}
@Override public IterOutcome getLeftOutcome() {
return currentLeftOutcome;
}
public void moveToNextRecord() {
recordIndex++;
}
public void reset() {
recordIndex = 0;
}
public void setUnnest(RecordBatch unnest){
this.unnest = unnest;
}
public void setUnnestLimit(int limit){
this.unnestLimit = limit;
}
public RecordBatch getUnnest() {
return unnest;
}
public IterOutcome next() {
IterOutcome currentOutcome = incoming.next();
currentLeftOutcome = currentOutcome;
recordIndex = 0;
switch (currentOutcome) {
case OK_NEW_SCHEMA:
// Nothing to do for this.
case OK:
IterOutcome outcome;
// consume all the outout from unnest until EMIT or end of
// incoming data
int unnestCount = 0; // number of values unnested for current record
while (recordIndex < incoming.getRecordCount()) {
outcome = unnest.next();
if (outcome == IterOutcome.OK_NEW_SCHEMA) {
// setup schema does nothing (this is just a place holder)
setupSchema();
// however unnest is also expected to return an empty batch
// which we will add to our output
}
// We put each batch output from unnest into a hypervector
// the calling test can match this against the baseline
//unnestCount +=
// unnest.getOutgoingContainer().hasRecordCount() ? unnest.getOutgoingContainer().getRecordCount() : 0;
unnestCount += addBatchToHyperContainer(unnest);
if (outcome == IterOutcome.EMIT) {
// reset unnest count
unnestCount = 0;
moveToNextRecord();
}
// Pretend that an operator somewhere between lateral and unnest
// wants to terminate processing of the record.
if(unnestLimit > 0 && unnestCount >= unnestLimit) {
unnest.kill(true);
}
}
return currentOutcome;
case NONE:
case STOP:
case OUT_OF_MEMORY:
isDone = true;
return currentOutcome;
case NOT_YET:
return currentOutcome;
default:
throw new UnsupportedOperationException("This state is not supported");
}
}
@Override public WritableBatch getWritableBatch() {
return null;
}
public List<ValueVector> getResultList() {
return resultList;
}
@Override
public void close() throws Exception {
}
@Override public int getRecordCount() {
return 0;
}
@Override
public SelectionVector2 getSelectionVector2() {
return null;
}
@Override
public SelectionVector4 getSelectionVector4() {
return null;
}
@Override
public FragmentContext getContext() {
return context;
}
@Override public BatchSchema getSchema() {
return null;
}
@Override public void kill(boolean sendUpstream) {
unnest.kill(sendUpstream);
}
@Override public VectorContainer getOutgoingContainer() {
return null;
}
@Override public TypedFieldId getValueVectorId(SchemaPath path) {
return null;
}
@Override public VectorWrapper<?> getValueAccessorById(Class<?> clazz, int... ids) {
return null;
}
private void setupSchema(){
// Nothing to do in this test
return;
}
public boolean isCompleted() {
return isDone;
}
// returns number of records added to output hyper container
private int addBatchToHyperContainer(RecordBatch inputBatch) {
int count = 0;
final RecordBatchData batchCopy = new RecordBatchData(inputBatch, oContext.getAllocator());
boolean success = false;
try {
for (VectorWrapper<?> w : batchCopy.getContainer()) {
ValueVector vv = w.getValueVector();
count += vv.getAccessor().getValueCount();
resultList.add(vv);
}
success = true;
} finally {
if (!success) {
batchCopy.clear();
}
}
return count;
}
@Override
public VectorContainer getContainer() { return null; }
@Override public Iterator<VectorWrapper<?>> iterator() {
return null;
}
}
|
pwong-mapr/incubator-drill
|
exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/unnest/MockLateralJoinBatch.java
|
Java
|
apache-2.0
| 7,094 | 28.682008 | 116 | 0.696504 | false |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.rya.kafka.connect.client;
import com.beust.jcommander.JCommander;
import com.beust.jcommander.Parameter;
import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
import edu.umd.cs.findbugs.annotations.NonNull;
/**
* A command that may be executed by the Rya Kafka Connect Client {@link CLIDriver}.
*/
@DefaultAnnotation(NonNull.class)
public interface RyaKafkaClientCommand {
/**
* Command line parameters that are used by all commands that interact with Kafka.
*/
class KafkaParameters {
@Parameter(names = { "--bootstrapServers", "-b" }, description =
"A list of host/port pairs to use for establishing the initial connection to the Kafka cluster.")
public String bootstrapServers = "localhost:9092";
@Parameter(names = { "--topic", "-t" }, required = true, description = "The Kafka topic that will be interacted with.")
public String topic;
}
/**
* @return What a user would type into the command line to indicate
* they want to execute this command.
*/
public String getCommand();
/**
* @return Briefly describes what the command does.
*/
public String getDescription();
/**
* @return Describes what arguments may be provided to the command.
*/
default public String getUsage() {
final JCommander parser = new JCommander(new KafkaParameters());
final StringBuilder usage = new StringBuilder();
parser.usage(usage);
return usage.toString();
}
/**
* Validates a set of arguments that may be passed into the command.
*
* @param args - The arguments that will be validated. (not null)
* @return {@code true} if the arguments are valid, otherwise {@code false}.
*/
public boolean validArguments(String[] args);
/**
* Execute the command using the command line arguments.
*
* @param args - Command line arguments that configure how the command will execute. (not null)
* @throws ArgumentsException there was a problem with the provided arguments.
* @throws ExecutionException There was a problem while executing the command.
*/
public void execute(final String[] args) throws ArgumentsException, ExecutionException;
/**
* A {@link RyaKafkaClientCommand} could not be executed because of a problem with
* the arguments that were provided to it.
*/
public static final class ArgumentsException extends Exception {
private static final long serialVersionUID = 1L;
public ArgumentsException(final String message) {
super(message);
}
public ArgumentsException(final String message, final Throwable cause) {
super(message, cause);
}
}
/**
* A {@link RyaKafkaClientCommand} could not be executed.
*/
public static final class ExecutionException extends Exception {
private static final long serialVersionUID = 1L;
public ExecutionException(final String message) {
super(message);
}
public ExecutionException(final String message, final Throwable cause) {
super(message, cause);
}
}
}
|
kchilton2/incubator-rya
|
extras/kafka.connect/client/src/main/java/org/apache/rya/kafka/connect/client/RyaKafkaClientCommand.java
|
Java
|
apache-2.0
| 4,038 | 34.121739 | 127 | 0.682268 | false |
echo "build: Build started"
Push-Location $PSScriptRoot
if(Test-Path .\artifacts) {
echo "build: Cleaning .\artifacts"
Remove-Item .\artifacts -Force -Recurse
}
& dotnet restore --no-cache
$branch = @{ $true = $env:APPVEYOR_REPO_BRANCH; $false = $(git symbolic-ref --short -q HEAD) }[$env:APPVEYOR_REPO_BRANCH -ne $NULL];
$revision = @{ $true = "{0:00000}" -f [convert]::ToInt32("0" + $env:APPVEYOR_BUILD_NUMBER, 10); $false = "local" }[$env:APPVEYOR_BUILD_NUMBER -ne $NULL];
$suffix = @{ $true = ""; $false = "$($branch.Substring(0, [math]::Min(10,$branch.Length)))-$revision"}[$branch -eq "master" -and $revision -ne "local"]
echo "build: Version suffix is $suffix"
foreach ($src in ls src/*) {
Push-Location $src
echo "build: Packaging project in $src"
& dotnet pack -c Release -o ..\..\artifacts --version-suffix=$suffix --include-source
if($LASTEXITCODE -ne 0) { exit 1 }
Pop-Location
}
foreach ($test in ls test/*.PerformanceTests) {
Push-Location $test
echo "build: Building performance test project in $test"
& dotnet build -c Release
if($LASTEXITCODE -ne 0) { exit 2 }
Pop-Location
}
foreach ($test in ls test/*.Tests) {
Push-Location $test
echo "build: Testing project in $test"
& dotnet test -c Release
if($LASTEXITCODE -ne 0) { exit 3 }
Pop-Location
}
Pop-Location
|
nblumhardt/autofac-serilog-integration
|
Build.ps1
|
PowerShell
|
apache-2.0
| 1,352 | 25.509804 | 153 | 0.650888 | false |
package com.camunda.demo.connector.file;
import org.camunda.connect.spi.Connector;
public interface FileConnector extends Connector<FileRequest> {
public static final String ID = "file-connector";
}
|
plexiti/camunda-consulting
|
one-time-examples/2015-01-webinars/webinar-file-connector/src/main/java/com/camunda/demo/connector/file/FileConnector.java
|
Java
|
apache-2.0
| 205 | 21.777778 | 63 | 0.790244 | false |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_LAYOUT_ASSIGNMENT_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_LAYOUT_ASSIGNMENT_H_
#include <iosfwd>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "tensorflow/compiler/xla/service/computation_layout.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_module.h"
#include "tensorflow/compiler/xla/service/hlo_pass_interface.h"
#include "tensorflow/compiler/xla/service/logical_buffer.h"
#include "tensorflow/compiler/xla/service/tuple_points_to_analysis.h"
#include "tensorflow/compiler/xla/shape_layout.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/types.h"
namespace xla {
// Abstract base class for layout constraints. These constraint objects are
// gathered together in LayoutConstraints object.
class LayoutConstraint {
public:
LayoutConstraint(bool mandatory) : mandatory_(mandatory) {}
virtual ~LayoutConstraint() = default;
virtual string ToString() const = 0;
// True if this constraint cannot be overwritten by a different constraint.
bool mandatory() const { return mandatory_; }
private:
bool mandatory_;
};
std::ostream& operator<<(std::ostream& out, const LayoutConstraint& constraint);
// Layout constraint on a single LogicalBuffer. This constrains the layout of an
// array produced by a particular instruction.
class BufferLayoutConstraint : public LayoutConstraint {
public:
BufferLayoutConstraint(const Layout& layout, const LogicalBuffer& buffer,
bool mandatory);
const LogicalBuffer& buffer() const { return *buffer_; }
const Layout& layout() const { return layout_; }
string ToString() const override;
private:
Layout layout_;
const LogicalBuffer* buffer_;
};
// Constraint on the layout of the operand of an instruction. The constrained
// shape can be arbitrarily shaped (array or tuple). This is a constraint on the
// use of a shaped value and is not a hard constraint on the instruction(s)
// which define the value as copies may be inserted between the definition and
// use.
class OperandLayoutConstraint : public LayoutConstraint {
public:
OperandLayoutConstraint(const ShapeLayout& shape_layout,
const HloInstruction* instruction, int64 operand_no,
bool mandatory);
const ShapeLayout& shape_layout() const { return shape_layout_; }
const HloInstruction* instruction() const { return instruction_; }
const int64 operand_no() const { return operand_no_; }
const HloInstruction* operand() const {
return instruction_->operand(operand_no_);
}
string ToString() const override;
private:
ShapeLayout shape_layout_;
const HloInstruction* instruction_;
int64 operand_no_;
};
// Constraint on the layout of the result of the entry computation.
class ResultLayoutConstraint : public LayoutConstraint {
public:
explicit ResultLayoutConstraint(const ShapeLayout& shape_layout)
: LayoutConstraint(/*mandatory=*/true), shape_layout_(shape_layout) {}
const ShapeLayout& shape_layout() const { return shape_layout_; }
string ToString() const override;
private:
const ShapeLayout shape_layout_;
};
// Class encapsulating the layout constraints of the values in a HLO
// computation.
class LayoutConstraints {
public:
LayoutConstraints(const TuplePointsToAnalysis& points_to_analysis,
HloComputation* computation);
~LayoutConstraints() = default;
const HloComputation* computation() const { return computation_; }
HloComputation* computation() { return computation_; }
const TuplePointsToAnalysis& points_to_analysis() const {
return points_to_analysis_;
}
// Return a vector containing the constraints which have been added to the
// LayoutConstraints object since the construction of the object or since the
// last time ConsumeAddedConstraints() has been called. This is used to
// identify newly added constraints when propagating layouts.
std::vector<const LayoutConstraint*> ConsumeAddedConstraints() {
std::vector<const LayoutConstraint*> ret_vec(std::move(added_constraints_));
added_constraints_.clear();
return ret_vec;
}
void ClearAddedConstraints() { added_constraints_.clear(); }
// Returns the layout of a LogicalBuffer, the layout of the operand of the
// instruction, or the layout of the result of the computation, respectively,
// if it has been constrained. Otherwise return nullptr.
const Layout* BufferLayout(const LogicalBuffer& buffer) const;
const BufferLayoutConstraint* GetBufferLayoutConstraint(
const LogicalBuffer& buffer) const;
const ShapeLayout* OperandLayout(const HloInstruction* instruction,
int64 operand_no) const;
const OperandLayoutConstraint* GetOperandLayoutConstraint(
const HloInstruction* instruction, int64 operand_no) const;
const ShapeLayout* ResultLayout() const;
// Add a constraint on the layout of a LogicalBuffer, the layout of the
// operand of the instruction, or the layout of the result of the computation,
// respectively.
Status SetBufferLayout(const Layout& layout, const LogicalBuffer& buffer,
bool mandatory = true);
Status SetOperandLayout(const Shape& shape_with_layout,
const HloInstruction* instruction, int64 operand_no,
bool mandatory = true);
Status SetResultLayout(const Shape& shape_with_layout);
// Convenience wrapper around SetOperandLayout for setting the layout of a
// operand using a Layout object. The operand must be array-shaped.
Status SetArrayOperandLayout(const Layout& layout,
const HloInstruction* instruction,
int64 operand_no, bool mandatory = true);
// Convenience wrapper around SetBufferLayout. Sets the layouts of all buffers
// created by the instruction to the layouts in the given shape. The
// instruction must define every logical buffer in its output.
Status SetInstructionLayout(const Shape& shape_with_layout,
const HloInstruction* instruction);
// Returns true if any buffer in the given operand is forwarded to the output
// of the given instruction. For example, the Tuple instruction forwards the
// buffers of its operands and would return true for each of its operands.
bool OperandBufferForwarded(const HloInstruction* instruction,
int64 operand_no) const;
// Returns the set of logical buffers (by LogicalBuffer:Id) which do not
// yet have a layout constraint
const std::set<LogicalBuffer::Id>& unconstrained_buffer_ids() const {
return unconstrained_buffer_ids_;
}
string ToString() const;
private:
// The set of BufferLayoutConstraints applied to the computation.
std::unordered_map<const LogicalBuffer*, BufferLayoutConstraint>
buffer_constraints_;
// The set of OperandLayoutConstraints applied to the computation.
using OperandConstraintKey = std::pair<const HloInstruction*, int64>;
std::map<OperandConstraintKey, OperandLayoutConstraint> operand_constraints_;
// The result constraint for the computation (can be null).
std::unique_ptr<ResultLayoutConstraint> result_constraint_;
// A vector which holds constraints as they are added. Can be cleared with
// ClearAddedConstraints.
std::vector<const LayoutConstraint*> added_constraints_;
// Points-to analysis for the module. Used to propagate constraints through
// the HLO graph.
const TuplePointsToAnalysis& points_to_analysis_;
// Array-shaped buffers which have not yet been constrained.
std::set<LogicalBuffer::Id> unconstrained_buffer_ids_;
HloComputation* computation_;
};
// HLO pass which assigns layouts to all instructions in the HLO module while
// satisfying all necessary invariants and minimizing cost.
class LayoutAssignment : public HloPassInterface {
public:
// entry_computation_layout is modified to populate a layout for the result in
// the case that no particular layout is requested.
explicit LayoutAssignment(ComputationLayout* entry_computation_layout);
~LayoutAssignment() override {}
tensorflow::StringPiece name() const override { return "layout-assignment"; }
// Assign layouts to the given module. Returns whether the module was changed
// (any layouts were changed).
StatusOr<bool> Run(HloModule* module) override;
protected:
// These methods, invoked by PropagateConstraints, propagate a layout
// constraint to its neighbors (i.e. operands and users) in order to minimize
// the cost of the instructions being constrainted on. New constraints are
// added to the given constraint set.
//
// Backends can override these methods with backend-specific propagation
// rules.
virtual Status PropagateBufferConstraint(
const BufferLayoutConstraint& layout_constraint,
LayoutConstraints* constraints);
virtual Status PropagateOperandConstraint(
const OperandLayoutConstraint& layout_constraint,
LayoutConstraints* constraints);
virtual Status PropagateResultConstraint(
const ResultLayoutConstraint& layout_constraint,
LayoutConstraints* constraints);
// By default LayoutAssignment ensures that inputs and ouptuts of CustomCalls
// have the "major-first" layout (i.e. {n, n-1, ..., 0}).
//
// If this function returns true, LayoutAssignment does not set a layout for
// the given CustomCall. It's up to the backend to set one in
// AddBackendConstraints, if necessary.
//
// Precondition: instruction->opcode() == HloOpcode::kCustomCall.
virtual bool CustomCallRequiresMajorFirstLayout(
const HloInstruction* /*instruction*/) {
return true;
}
// Called after layouts of an instruction have been finalized to allow
// subclasses to check for platform specific assumptions.
virtual Status Verify(const HloInstruction* instruction) {
return Status::OK();
}
// Propagates a buffer layout constraint into the operands that use it.
Status PropagateBufferConstraintToUses(
const BufferLayoutConstraint& layout_constraint,
LayoutConstraints* constraints);
// Propagates a layout constraint on the use of the result of the given
// instruction to the definitions of the LogicalBuffers which make up the
// result.
Status PropagateUseConstraintToDefs(const ShapeLayout& shape_layout,
const HloInstruction* instruction,
LayoutConstraints* constraints);
// Chooses a layout of operand `operand_no` of `instruction` that minimizes
// the cost of `instruction`. `output_layout` is the layout of `instruction`.
// Returns null if it can't decide the best layout.
// Precondition: `instruction` and the operand are array-shaped.
std::unique_ptr<Layout> ChooseOperandLayoutFromOutputLayout(
const Layout& output_layout, const HloInstruction* instruction,
int64 operand_no);
// Given the layout of `user`'s `operand_no`-th operand, chooses a layout of
// `user` that minimizes its cost on that operand. Returns null if it can't
// decide the best layout.
// Precondition: `user` and the operand are array-shaped.
std::unique_ptr<Layout> ChooseOutputLayoutFromOperandLayout(
const Layout& operand_layout, const HloInstruction* user,
int64 operand_no);
private:
// Adds constraints which must be satisfied for correctness on all
// backends. Called once prior to propagating constraints.
Status AddMandatoryConstraints(const ComputationLayout& computation_layout,
HloComputation* computation,
LayoutConstraints* constraints);
// This method can be overridden to add backend-specific constraints to the
// layout of the instructions of a computation. This method is called after
// all mandatory constraints have been added via AddMandatoryConstraints
// and before propagating constraints.
virtual Status AddBackendConstraints(LayoutConstraints* constraints) {
return Status::OK();
}
// Construct contraints and assign layouts to all instructions in the
// computation satisfying the given ComputationLayout. Layouts constraints are
// added, then propagated until all LogicalBuffers in the computation are
// constrained.
Status RunOnComputation(const ComputationLayout& computation_layout,
const TuplePointsToAnalysis& points_to_analysis,
HloComputation* computation);
// Assign layouts to the instructions of a computation which satisfy the given
// layout constraints. Copies may be added to satisfy the constraints. The
// given LayoutConstraints must have layout constraints every logical buffer
// in the computation.
Status AssignLayouts(const LayoutConstraints& constraints,
HloComputation* computation);
// Propagates layout constraints from a set of initial constraints in order to
// minimize the local cost of the computation. This propagation is *not*
// required for correctness.
Status PropagateConstraints(LayoutConstraints* constraints);
// Check that all layouts in the module have been set and satisfy all
// necessary conditions.
Status CheckLayouts(HloModule* module);
ComputationLayout* entry_computation_layout_;
protected:
// Map containing the layouts of all computations assigned so
// far. Computations are handled in a topological sort where computations are
// handled before their caller instructions so the layouts of caller
// instructions can be set to match the computation.
std::map<HloComputation*, ComputationLayout> computation_layouts_;
};
} // namespace xla
#endif // TENSORFLOW_COMPILER_XLA_SERVICE_LAYOUT_ASSIGNMENT_H_
|
eadgarchen/tensorflow
|
tensorflow/compiler/xla/service/layout_assignment.h
|
C
|
apache-2.0
| 14,880 | 41.881844 | 80 | 0.734812 | false |
// Package github provides authentication strategies using GitHub.
package github
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"strconv"
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/github"
"github.com/Sirupsen/logrus"
"github.com/coreos/dex/connector"
)
const (
baseURL = "https://api.github.com"
scopeEmail = "user:email"
scopeOrgs = "read:org"
)
// Config holds configuration options for github logins.
type Config struct {
ClientID string `json:"clientID"`
ClientSecret string `json:"clientSecret"`
RedirectURI string `json:"redirectURI"`
Org string `json:"org"`
}
// Open returns a strategy for logging in through GitHub.
func (c *Config) Open(logger logrus.FieldLogger) (connector.Connector, error) {
return &githubConnector{
redirectURI: c.RedirectURI,
org: c.Org,
clientID: c.ClientID,
clientSecret: c.ClientSecret,
logger: logger,
}, nil
}
type connectorData struct {
// GitHub's OAuth2 tokens never expire. We don't need a refresh token.
AccessToken string `json:"accessToken"`
}
var (
_ connector.CallbackConnector = (*githubConnector)(nil)
_ connector.RefreshConnector = (*githubConnector)(nil)
)
type githubConnector struct {
redirectURI string
org string
clientID string
clientSecret string
logger logrus.FieldLogger
}
func (c *githubConnector) oauth2Config(scopes connector.Scopes) *oauth2.Config {
var githubScopes []string
if scopes.Groups {
githubScopes = []string{scopeEmail, scopeOrgs}
} else {
githubScopes = []string{scopeEmail}
}
return &oauth2.Config{
ClientID: c.clientID,
ClientSecret: c.clientSecret,
Endpoint: github.Endpoint,
Scopes: githubScopes,
}
}
func (c *githubConnector) LoginURL(scopes connector.Scopes, callbackURL, state string) (string, error) {
if c.redirectURI != callbackURL {
return "", fmt.Errorf("expected callback URL did not match the URL in the config")
}
return c.oauth2Config(scopes).AuthCodeURL(state), nil
}
type oauth2Error struct {
error string
errorDescription string
}
func (e *oauth2Error) Error() string {
if e.errorDescription == "" {
return e.error
}
return e.error + ": " + e.errorDescription
}
func (c *githubConnector) HandleCallback(s connector.Scopes, r *http.Request) (identity connector.Identity, err error) {
q := r.URL.Query()
if errType := q.Get("error"); errType != "" {
return identity, &oauth2Error{errType, q.Get("error_description")}
}
oauth2Config := c.oauth2Config(s)
ctx := r.Context()
token, err := oauth2Config.Exchange(ctx, q.Get("code"))
if err != nil {
return identity, fmt.Errorf("github: failed to get token: %v", err)
}
client := oauth2Config.Client(ctx, token)
user, err := c.user(ctx, client)
if err != nil {
return identity, fmt.Errorf("github: get user: %v", err)
}
username := user.Name
if username == "" {
username = user.Login
}
identity = connector.Identity{
UserID: strconv.Itoa(user.ID),
Username: username,
Email: user.Email,
EmailVerified: true,
}
if s.Groups && c.org != "" {
groups, err := c.teams(ctx, client, c.org)
if err != nil {
return identity, fmt.Errorf("github: get teams: %v", err)
}
identity.Groups = groups
}
if s.OfflineAccess {
data := connectorData{AccessToken: token.AccessToken}
connData, err := json.Marshal(data)
if err != nil {
return identity, fmt.Errorf("marshal connector data: %v", err)
}
identity.ConnectorData = connData
}
return identity, nil
}
func (c *githubConnector) Refresh(ctx context.Context, s connector.Scopes, ident connector.Identity) (connector.Identity, error) {
if len(ident.ConnectorData) == 0 {
return ident, errors.New("no upstream access token found")
}
var data connectorData
if err := json.Unmarshal(ident.ConnectorData, &data); err != nil {
return ident, fmt.Errorf("github: unmarshal access token: %v", err)
}
client := c.oauth2Config(s).Client(ctx, &oauth2.Token{AccessToken: data.AccessToken})
user, err := c.user(ctx, client)
if err != nil {
return ident, fmt.Errorf("github: get user: %v", err)
}
username := user.Name
if username == "" {
username = user.Login
}
ident.Username = username
ident.Email = user.Email
if s.Groups && c.org != "" {
groups, err := c.teams(ctx, client, c.org)
if err != nil {
return ident, fmt.Errorf("github: get teams: %v", err)
}
ident.Groups = groups
}
return ident, nil
}
type user struct {
Name string `json:"name"`
Login string `json:"login"`
ID int `json:"id"`
Email string `json:"email"`
}
// user queries the GitHub API for profile information using the provided client. The HTTP
// client is expected to be constructed by the golang.org/x/oauth2 package, which inserts
// a bearer token as part of the request.
func (c *githubConnector) user(ctx context.Context, client *http.Client) (user, error) {
var u user
req, err := http.NewRequest("GET", baseURL+"/user", nil)
if err != nil {
return u, fmt.Errorf("github: new req: %v", err)
}
req = req.WithContext(ctx)
resp, err := client.Do(req)
if err != nil {
return u, fmt.Errorf("github: get URL %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return u, fmt.Errorf("github: read body: %v", err)
}
return u, fmt.Errorf("%s: %s", resp.Status, body)
}
if err := json.NewDecoder(resp.Body).Decode(&u); err != nil {
return u, fmt.Errorf("failed to decode response: %v", err)
}
return u, nil
}
// teams queries the GitHub API for team membership within a specific organization.
//
// The HTTP passed client is expected to be constructed by the golang.org/x/oauth2 package,
// which inserts a bearer token as part of the request.
func (c *githubConnector) teams(ctx context.Context, client *http.Client, org string) ([]string, error) {
req, err := http.NewRequest("GET", baseURL+"/user/teams", nil)
if err != nil {
return nil, fmt.Errorf("github: new req: %v", err)
}
req = req.WithContext(ctx)
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("github: get teams: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("github: read body: %v", err)
}
return nil, fmt.Errorf("%s: %s", resp.Status, body)
}
// https://developer.github.com/v3/orgs/teams/#response-12
var teams []struct {
Name string `json:"name"`
Org struct {
Login string `json:"login"`
} `json:"organization"`
}
if err := json.NewDecoder(resp.Body).Decode(&teams); err != nil {
return nil, fmt.Errorf("github: unmarshal groups: %v", err)
}
groups := []string{}
for _, team := range teams {
if team.Org.Login == org {
groups = append(groups, team.Name)
}
}
return groups, nil
}
|
xeonx/dex
|
connector/github/github.go
|
GO
|
apache-2.0
| 6,914 | 25.592308 | 130 | 0.67602 | false |
/*
Copyright 2012 - 2014 Jerome Leleu
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.pac4j.core.kryo;
import java.nio.ByteBuffer;
import java.util.Date;
import java.util.Locale;
import org.pac4j.core.profile.FormattedDate;
import com.esotericsoftware.kryo.serialize.LongSerializer;
import com.esotericsoftware.kryo.serialize.SimpleSerializer;
import com.esotericsoftware.kryo.serialize.StringSerializer;
/**
* This class is a Kryo serializer for {@link FormattedDate}.
*
* @author Jerome Leleu
* @since 1.4.0
*/
public class FormattedDateSerializer extends SimpleSerializer<FormattedDate> {
private final LongSerializer longSerializer = new LongSerializer();
private final StringSerializer stringSerializer = new StringSerializer();
private final LocaleSerializer localeSerializer = new LocaleSerializer();
@Override
public FormattedDate read(final ByteBuffer buffer) {
final Long time = this.longSerializer.readObject(buffer, Long.class);
final String format = this.stringSerializer.readObject(buffer, String.class);
final Locale locale = this.localeSerializer.readObject(buffer, Locale.class);
return new FormattedDate(new Date(time), format, locale);
}
@Override
public void write(final ByteBuffer buffer, final FormattedDate object) {
this.longSerializer.writeObject(buffer, object.getTime());
this.stringSerializer.writeObject(buffer, object.getFormat());
this.localeSerializer.writeObject(buffer, object.getLocale());
}
}
|
F0REacH/pac4j-1.5.1
|
pac4j-core/src/main/java/org/pac4j/core/kryo/FormattedDateSerializer.java
|
Java
|
apache-2.0
| 2,078 | 36.107143 | 85 | 0.746391 | false |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional information regarding
* copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License. You may obtain a
* copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.geode.internal.cache;
import org.apache.geode.internal.offheap.StoredObject;
import org.apache.geode.internal.offheap.annotations.Unretained;
/**
* Used to fetch a record's raw bytes and user bits. The actual data length in byte array may be
* less than the size of the byte array itself. An integer field contains the valid length. This
* class is used exclusively by the Oplog Compactor for rolling the entries. The reason for this
* class is to reuse the underlying byte array for rolling multiple entries there by reducing the
* garbage.
*
* @since GemFire 5.5
*/
public class BytesAndBitsForCompactor {
/**
* If offHeapData is set then ignore the "data" and "validLength" fields. The offHeapData field is
* unretained so it can only be used while the RegionEntry is still synced. When done with the
* offHeapData, null it out if you want to reuse the byte[] later.
*/
private @Unretained StoredObject offHeapData;
private byte[] data;
private byte userBits = 0;
// length of the data present in the byte array
private int validLength;
private static final byte[] INIT_FOR_WRAPPER = new byte[0];
// boolean indicating if the object can be reused.
// Typically if the data stores the reference of a value byte [] directly
// from the RegionEntry than this byte array cannot be reused for
// storing another entry's data
private boolean isReusable;
public BytesAndBitsForCompactor() {
this.data = INIT_FOR_WRAPPER;
// this.userBits = userBits;
this.validLength = INIT_FOR_WRAPPER.length;
this.isReusable = true;
}
public StoredObject getOffHeapData() {
return this.offHeapData;
}
public byte[] getBytes() {
return this.data;
}
public byte getBits() {
return this.userBits;
}
public int getValidLength() {
return this.validLength;
}
public boolean isReusable() {
return this.isReusable;
}
/**
*
* @param data byte array storing the data
* @param userBits byte with appropriate bits set
* @param validLength The number of bytes representing the data , starting from 0 as offset
* @param isReusable true if this object is safe for reuse as a data holder
*/
public void setData(byte[] data, byte userBits, int validLength, boolean isReusable) {
this.data = data;
this.userBits = userBits;
this.validLength = validLength;
this.isReusable = isReusable;
}
public void setOffHeapData(StoredObject so, byte userBits) {
this.offHeapData = so;
this.userBits = userBits;
}
}
|
pdxrunner/geode
|
geode-core/src/main/java/org/apache/geode/internal/cache/BytesAndBitsForCompactor.java
|
Java
|
apache-2.0
| 3,341 | 34.924731 | 100 | 0.731218 | false |
//Copyright © 2014 Sony Computer Entertainment America LLC. See License.txt.
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.ComponentModel.Composition;
using System.Drawing;
using System.Xml;
using Sce.Atf.Controls.PropertyEditing;
using Sce.Atf.Input;
namespace Sce.Atf.Applications
{
/// <summary>
/// Service to handle commands in menus and toolbars</summary>
public abstract class CommandServiceBase : ICommandService, ICommandClient, IInitializable
{
/// <summary>
/// Constructor</summary>
public CommandServiceBase()
{
// create built-in menus and commands first
RegisterMenuInfo(MenuInfo.File);
RegisterMenuInfo(MenuInfo.Edit);
RegisterMenuInfo(MenuInfo.View);
RegisterMenuInfo(MenuInfo.Modify);
RegisterMenuInfo(MenuInfo.Format);
RegisterMenuInfo(MenuInfo.Window);
RegisterMenuInfo(MenuInfo.Help);
}
#region IInitializable Members
/// <summary>
/// Finishes initializing component by setting up settings service and registering command</summary>
public virtual void Initialize()
{
if (m_settingsService != null)
{
// create setting to store command shortcuts
m_settingsService.RegisterSettings(this,
new BoundPropertyDescriptor(
this, () => CommandShortcuts, "Keyboard Shortcuts".Localize(), null, null)
);
PropertyDescriptor[] userPrefs = new PropertyDescriptor[]
{
// setting that allows user to set toolbar image size.
new BoundPropertyDescriptor(
this, () => UserSelectedImageSize, "Command Icon Size".Localize(), null, "Size of icons on Toolbar buttons".Localize())
};
m_settingsService.RegisterSettings(this, userPrefs);
SettingsServices.RegisterUserSettings(m_settingsService, "Application".Localize(), userPrefs);
}
// Register our own "edit keyboard" command
this.RegisterCommand(
CommandId.EditKeyboard,
StandardMenu.Edit,
StandardCommandGroup.EditPreferences,
"Keyboard Shortcuts".Localize() + " ...",
"Customize keyboard shortcuts".Localize(),
this);
}
#endregion
#region ICommandService Members
/// <summary>
/// Registers the menu for the application and gives it a tool strip</summary>
/// <param name="menuInfo">Menu description; standard menus are defined as static members
/// of the MenuInfo class</param>
public void RegisterMenu(MenuInfo menuInfo)
{
RegisterMenuInfo(menuInfo);
}
/// <summary>
/// Registers a command for a command client</summary>
/// <param name="info">Command description; standard commands are defined as static
/// members of the CommandInfo class</param>
/// <param name="client">Client that handles the command</param>
public virtual void RegisterCommand(CommandInfo info, ICommandClient client)
{
if (client == null)
throw new InvalidOperationException("Command has no client");
CommandInfo duplicate = GetCommandInfo(info.CommandTag);
if (duplicate == null)
{
if (!CommandIsUnique(info.MenuTag, info.MenuText))
{
throw new InvalidOperationException(
string.Format(
"Duplicate menu/command combination. CommandTag: {0}, MenuTag: {1}, GroupTag: {2}, MenuText: {3}",
info.CommandTag, info.MenuTag, info.GroupTag, info.MenuText));
}
RegisterCommandInfo(info);
// Only increment the menu count for unique commands, because it's legal to
// call RegisterCommand multiple times with the same CommandInfo.
IncrementMenuCommandCount(info.MenuTag);
}
m_commandClients.Add(info.CommandTag, client);
}
/// <summary>
/// Unregisters a command for a command client</summary>
/// <param name="commandTag">Command tag that identifies CommandInfo used to register
/// the command</param>
/// <param name="client">Client that handles the command</param>
public virtual void UnregisterCommand(object commandTag, ICommandClient client)
{
if (client == null)
m_commandClients.Remove(commandTag);
else
m_commandClients.Remove(commandTag, client);
CommandInfo info = GetCommandInfo(commandTag);
if (info == null)
return;
UnregisterCommandInfo(info);
if (info.MenuTag != null)
{
MenuInfo menuInfo = GetMenuInfo(info.MenuTag);
if (menuInfo != null)
DecrementMenuCommandCount(menuInfo);
}
}
/// <summary>
/// Displays a context (right-click popup) menu at the given screen point. Raises
/// the ContextMenuClosed events.</summary>
/// <param name="commandTags">Commands in menu, nulls indicate separators</param>
/// <param name="screenPoint">Point in screen coordinates</param>
public abstract void RunContextMenu(IEnumerable<object> commandTags, Point screenPoint);
/// <summary>
/// Sets the active client that receives a command for the case when multiple
/// ICommandClient objects have registered for the same command tag (such as the
/// StandardCommand.EditCopy enum, for example). Set to null to reduce the priority
/// of the previously active client.</summary>
/// <param name="client">Command client, null if client is deactivated</param>
public void SetActiveClient(ICommandClient client)
{
var commandTags = new List<object>(m_commandClients.Keys);
var commandTagsToUpdate = new List<object>();
// 'client' being null is an indication to pop the most recently active client
if (client == null && m_activeClient != null)
{
// make sure previous client will NOT be the last registered for its command tags
foreach (object commandTag in commandTags)
{
if (m_commandClients.ContainsKeyValue(commandTag, m_activeClient))
{
m_commandClients.AddFirst(commandTag, m_activeClient);
commandTagsToUpdate.Add(commandTag);
}
}
}
m_activeClient = client;
if (m_activeClient != null)
{
// make sure client will be the last registered for its command tags
foreach (object commandTag in commandTags)
{
if (m_commandClients.ContainsKeyValue(commandTag, client))
{
m_commandClients.Add(commandTag, client);
commandTagsToUpdate.Add(commandTag);
}
}
}
foreach (object commandTag in commandTagsToUpdate)
UpdateCommand(commandTag);
}
/// <summary>
/// Forces an update for the command associated with the given tag.</summary>
/// <param name="commandTag">Command's tag object</param>
protected virtual void UpdateCommand(object commandTag)
{
}
/// <summary>
/// Reserves a shortcut key, so it is not available as command shortcut</summary>
/// <param name="key">Reserved key</param>
/// <param name="reason">Reason why key is reserved to display to user</param>
public void ReserveKey(Keys key, string reason)
{
if (key == Keys.None)
throw new ArgumentException("key");
if (reason == null)
throw new ArgumentNullException("reason");
// add or update key to reserved keys.
key = KeysUtil.NumPadToNum(key);
if (m_reservedKeys.ContainsKey(key))
{
m_reservedKeys[key] = reason;
}
else
{
m_reservedKeys[key] = reason;
EraseShortcut(key);
}
}
/// <summary>
/// Processes the key as a command shortcut</summary>
/// <param name="key">Key to process</param>
/// <returns>True iff the key was processed as a command shortcut</returns>
public virtual bool ProcessKey(Keys key)
{
KeyEventArgs keyEventArgs = new KeyEventArgs(key);
ProcessingKey.Raise(this, keyEventArgs);
if (keyEventArgs.Handled)
return true;
Keys shortcut = KeysUtil.NumPadToNum(key);
//if there is no key, return
if (shortcut == Keys.None)
return false;
//if the key is not a registered shortcut, return
object tag;
if (!m_shortcuts.TryGetValue(shortcut, out tag))
return false;
//Is there a client, and if so, can the client do the command?
ICommandClient client = GetClient(tag);
if (client == null)
client = m_activeClient;
if (client == null || !client.CanDoCommand(tag))
return false;
// do the command
client.DoCommand(tag);
return true;
}
/// <summary>
/// Event that is raised when processing a key; clients can subscribe to this event
/// to intercept certain hot keys for custom handling</summary>
public event EventHandler<KeyEventArgs> ProcessingKey;
#endregion
#region ICommandClient Members
/// <summary>
/// Checks whether the client can do the command, if it handles it</summary>
/// <param name="commandTag">Command to be done</param>
/// <returns>True iff client can do the command</returns>
public virtual bool CanDoCommand(object commandTag) { return false; }
/// <summary>
/// Does the command</summary>
/// <param name="commandTag">Command to be done</param>
public virtual void DoCommand(object commandTag) { }
/// <summary>
/// Updates command state for given command</summary>
/// <param name="commandTag">Command's tag object</param>
/// <param name="state">Command state to update</param>
public virtual void UpdateCommand(object commandTag, CommandState state) { }
#endregion
/// <summary>
/// Image sizes for toolbar icons</summary>
public enum ImageSizes
{
/// <summary>
/// 16 x 16 Icon</summary>
Size16x16,
/// <summary>
/// 24 x 24 Icon</summary>
Size24x24,
/// <summary>
/// 32 x 32 Icon</summary>
Size32x32
}
/// <summary>
/// Gets or sets toolbar image size</summary>
[DefaultValue(ImageSizes.Size24x24)]
public ImageSizes UserSelectedImageSize
{
get { return m_imageSize; }
set
{
if (m_imageSize != value)
{
m_imageSize = value;
OnImageSizeChanged();
}
}
}
/// <summary>
/// Handler for image size changed event for derived classes to override</summary>
protected virtual void OnImageSizeChanged() { }
/// <summary>
/// Gets or sets XML string representing command/shortcut pairs</summary>
public string CommandShortcuts
{
get
{
// generate xml string consisting of command, shortcut pairs
// use menu text as a unique id since it is more easily serialized
XmlDocument xmlDoc = new XmlDocument();
xmlDoc.AppendChild(xmlDoc.CreateXmlDeclaration("1.0", "utf-8", "yes"));
XmlElement root = xmlDoc.CreateElement("Shortcuts");
xmlDoc.AppendChild(root);
foreach (CommandInfo info in m_commands)
{
if (IsUnregistered(info))
continue;
// We don't want to save shortcuts that are at their default value since this
// prevents the default from being changed programmatically or via DefaultSettings.xml.
// http://forums.ship.scea.com/jive/thread.jspa?messageID=51034
if (info.ShortcutsAreDefault)
continue;
string commandPath = GetCommandPath(info);
int numShortcuts = 0;
foreach (Keys k in info.Shortcuts)
{
XmlElement elem = xmlDoc.CreateElement("shortcut");
elem.SetAttribute("name", commandPath);
elem.SetAttribute("value", k.ToString());
root.AppendChild(elem);
numShortcuts++;
}
if (numShortcuts < 1)
{
XmlElement elem = xmlDoc.CreateElement("shortcut");
elem.SetAttribute("name", commandPath);
elem.SetAttribute("value", Keys.None.ToString());
root.AppendChild(elem);
}
}
return xmlDoc.InnerXml;
}
set
{
XmlDocument xmlDoc = new XmlDocument();
xmlDoc.LoadXml(value);
XmlNodeList nodes = xmlDoc.DocumentElement.SelectNodes("shortcut");
if (nodes == null || nodes.Count == 0)
return;
Dictionary<string, CommandInfo> commands = new Dictionary<string, CommandInfo>(m_commands.Count);
foreach (CommandInfo info in m_commands)
{
if (IsUnregistered(info))
continue;
string commandPath = GetCommandPath(info);
commands.Add(commandPath, info);
}
Dictionary<CommandInfo, CommandInfo> changedCommands = new Dictionary<CommandInfo, CommandInfo>(m_commands.Count);
// m_shortcuts contains the default shortcuts currently. We need to override the defaults with
// the user's preferences. The preference file does not necessarily contain all shortcuts and
// some of the shortcuts may be blank (i.e., Keys.None).
foreach (XmlElement elem in nodes)
{
string strCmdTag = elem.GetAttribute("name"); //the command tag or "path", made up of menu and command name
string strShortcut = elem.GetAttribute("value");
if (commands.ContainsKey(strCmdTag))
{
// Blow away any old shortcuts before adding the first new one
CommandInfo cmdInfo = commands[strCmdTag];
if (!changedCommands.ContainsKey(cmdInfo))
{
List<Keys> shortcuts = new List<Keys>(cmdInfo.Shortcuts);
foreach (Keys k in shortcuts)
EraseShortcut(k);
changedCommands.Add(cmdInfo, cmdInfo);
}
Keys shortcut = (Keys)Enum.Parse(typeof(Keys), strShortcut);
shortcut = KeysUtil.NumPadToNum(shortcut);
SetShortcut(shortcut, commands[strCmdTag]);
}
}
}
}
/// <summary>
/// Gets or sets whether the context menu should automatically remove disabled items</summary>
public bool ContextMenuAutoCompact
{
get { return m_contextMenuAutoCompact; }
set { m_contextMenuAutoCompact = value; }
}
/// <summary>
/// Obtains the registered menu info whose menu tag object equals the menuTag parameter</summary>
/// <param name="menuTag">Menu's unique ID to compare against known menu tags</param>
/// <returns>The corresponding matching menu info, or null, if no match was found</returns>
public MenuInfo GetMenuInfo(object menuTag)
{
MenuInfo result = null;
foreach (MenuInfo menuInfo in m_menus)
{
if (menuInfo.MenuTag.Equals(menuTag))
{
result = menuInfo;
break;
}
}
return result;
}
/// <summary>
/// Obtains menu info for a context menu if the menu tag is not null and can be converted
/// to a non-empty string</summary>
/// <remarks>The menu tag is optional for context menus, but defining one allows the client
/// application to register commands with the same menu texts in different contexts.</remarks>
/// <param name="menuTag">Menu's unique ID (convertable to non-empty string)</param>
/// <returns>Menu info for a context menu</returns>
private MenuInfo GetContextMenuInfo(object menuTag)
{
MenuInfo result = null;
if (menuTag != null)
{
string menuText = menuTag.ToString();
if (!string.IsNullOrEmpty(menuText))
result = new MenuInfo(menuTag, menuText, string.Empty);
}
return result;
}
/// <summary>
/// Returns the registered CommandInfo whose command tag equals a given object</summary>
/// <param name="commandTag">The object to compare against known CommandInfo objects</param>
/// <returns>The corresponding registered CommandInfo, or null, if no match was found</returns>
public CommandInfo GetCommandInfo(object commandTag)
{
CommandInfo result = null;
if (commandTag != null)
{
m_commandsById.TryGetValue(commandTag, out result);
}
return result;
}
/// <summary>
/// Obtains the registered commands' info objects</summary>
/// <returns>Enumeration of CommandInfo for registered commands</returns>
public IEnumerable<CommandInfo> GetCommandInfos()
{
return m_commands;
}
/// <summary>
/// Gets the command client for the given command tag, or null if none exists. If multiple
/// command clients registered a command with this tag, the most recent command client
/// is returned.</summary>
/// <param name="commandTag">Command tag</param>
/// <returns>Command client for given command tag</returns>
public ICommandClient GetClient(object commandTag)
{
ICommandClient client;
m_commandClients.TryGetLast(commandTag, out client);
return client;
}
/// <summary>
/// Gets the command client for the given command tag, or the active client if none exists.
/// See SetActiveClient(). If multiple command clients registered a command with this tag,
/// the most recent command client is returned.</summary>
/// <param name="commandTag">Command tag</param>
/// <returns>Command client for given command tag</returns>
public ICommandClient GetClientOrActiveClient(object commandTag)
{
return GetClient(commandTag) ?? m_activeClient;
}
/// <summary>
/// Registers menu info</summary>
/// <param name="info">MenuInfo to register</param>
/// <remarks>Adds this MenuInfo object to m_menus field and creates a tool strip for it.
/// WARNING: This virtual method is called within the constructor. ONLY
/// CLASSES THAT DIRECTLY DERIVE FROM COMMANDSERVICEBASE SHOULD OVERRIDE IT
/// AND SHOULD DO SO USING THE 'SEALED' KEYWORD.</remarks>
protected virtual void RegisterMenuInfo(MenuInfo info)
{
MenuInfo addedInfo = GetMenuInfo(info.MenuTag);
if (addedInfo != null)
throw new InvalidOperationException("Menu object '" + info.MenuTag + "' was already added");
if (info.MenuTag is StandardMenu)
m_menus.Add(info);
else
m_menus.Insert(m_menus.Count - 2, info); // insert custom menus before Window, Help
}
/// <summary>
/// Registers a unique CommandInfo object</summary>
/// <param name="info">CommandInfo to register</param>
protected virtual void RegisterCommandInfo(CommandInfo info)
{
string menuText = info.MenuText;
if (string.IsNullOrEmpty(menuText))
throw new ArgumentException("menuText is null or empty");
int textStart = 1;
// for non-literal menu text, get last segment of path
if (menuText[0] != '@')
{
// a little subtle here, if there's no separator, -1 bumps textStart back to 0
textStart += menuText.LastIndexOfAny(s_pathDelimiters);
}
string displayedMenuText = menuText.Substring(textStart, menuText.Length - textStart);
info.DisplayedMenuText = displayedMenuText;
m_commands.Add(info);
m_commandsById[info.CommandTag] = info;
foreach (Keys k in info.Shortcuts)
SetShortcut(k, info);
}
/// <summary>
/// Unregisters a unique CommandInfo object</summary>
/// <param name="info">CommandInfo to unregister</param>
protected virtual void UnregisterCommandInfo(CommandInfo info)
{
m_commandsById.Remove(info.CommandTag);
m_commands.Remove(info);
}
/// <summary>
/// Sets shortcut</summary>
/// <param name="shortcut">Shortcut keys</param>
/// <param name="info">CommandInfo corresponding to shortcut keys</param>
/// <remarks>Keeps m_shortcuts field, the menu item, and the CommandInfo in sync with regards to shortcuts
/// and ensures that each shortcut is unique</remarks>
protected void SetShortcut(Keys shortcut, CommandInfo info)
{
shortcut = KeysUtil.NumPadToNum(shortcut);
// if shortcut is reserved then do not set it.
if (m_reservedKeys.ContainsKey(shortcut))
{
Outputs.WriteLine(OutputMessageType.Warning, "cannot assign " + KeysUtil.KeysToString(shortcut, true) +
" to " + GetCommandPath(info) + " it is reserved for " + m_reservedKeys[shortcut]);
info.RemoveShortcut(shortcut);
// erase shortcut if exist.
EraseShortcut(shortcut);
return;
}
info.AddShortcut(shortcut);
if (shortcut != Keys.None)
{
// If the shortcut already exists for a different command, then erase the old commands's shortcut.
if (m_shortcuts.ContainsKey(shortcut) &&
m_shortcuts[shortcut] != info.CommandTag)
{
object existingCommandTag = m_shortcuts[shortcut];
if (m_commandsById.ContainsKey(existingCommandTag))
{
CommandInfo existingInfo = m_commandsById[existingCommandTag];
existingInfo.RemoveShortcut(shortcut);
}
}
m_shortcuts[shortcut] = info.CommandTag;
}
}
private void EraseShortcut(Keys shortcut)
{
// If the shortcut already exists then erase it.
if (m_shortcuts.ContainsKey(shortcut))
{
object existingCommandTag = m_shortcuts[shortcut];
if (m_commandsById.ContainsKey(existingCommandTag))
{
CommandInfo existingInfo = m_commandsById[existingCommandTag];
existingInfo.RemoveShortcut(shortcut);
m_shortcuts.Remove(shortcut);
}
}
}
private bool CommandIsUnique(object menuTag, string menuText)
{
// check for the same menu tag and menu text, which should catch most accidental
// duplication
foreach (CommandInfo info in m_commands)
{
if (IsUnregistered(info))
continue;
if (TagsEqual(info.MenuTag, menuTag) && info.MenuText == menuText)
return false;
}
return true;
}
/// <summary>
/// Increments menu command count</summary>
/// <param name="menuTag">Menu's unique ID tag. Is null if there is no menu item.</param>
/// <returns>MenuInfo object corresponding to menu tag</returns>
protected virtual MenuInfo IncrementMenuCommandCount(object menuTag)
{
MenuInfo menuInfo = null;
// update menu's command count
if (menuTag != null)
{
menuInfo = GetMenuInfo(menuTag);
if (menuInfo != null)
menuInfo.Commands++;
}
return menuInfo;
}
/// <summary>
/// Decrements the count of commands associated with the specified MenuInfo</summary>
/// <param name="menuInfo">MenuInfo for menu's command count to decrement</param>
protected virtual void DecrementMenuCommandCount(MenuInfo menuInfo)
{
menuInfo.Commands--;
}
/// <summary>
/// Gets command path string for given command</summary>
/// <param name="commandInfo">CommandInfo for command</param>
/// <returns>String representing command path in menu hierarchy</returns>
public string GetCommandPath(CommandInfo commandInfo)
{
string result = commandInfo.MenuText;
MenuInfo menuInfo = GetMenuInfo(commandInfo.MenuTag);
if (menuInfo == null)
menuInfo = GetContextMenuInfo(commandInfo.MenuTag);
if (menuInfo != null)
result = menuInfo.MenuText + "/" + result;
return result;
}
/// <summary>
/// Tests if command is unregistered</summary>
/// <param name="info">CommandInfo for command</param>
/// <returns>True iff command is unregistered</returns>
protected bool IsUnregistered(CommandInfo info)
{
return GetClient(info.CommandTag) == null;
}
private static int Compare(CommandInfo x, CommandInfo y)
{
int result = CompareTags(x.MenuTag, y.MenuTag);
if (result == 0)
result = CompareTags(x.GroupTag, y.GroupTag);
if (result == 0)
result = CompareTags(x.CommandTag, y.CommandTag);
// finally use either the displayed menu or text registration index to ensure a stable sort
if (result == 0)
{
if (x.GroupTag != null && m_defaultSortByMenuLabel.Contains(x.GroupTag))
result = CompareTags(x.DisplayedMenuText, y.DisplayedMenuText);
else
result = x.Index - y.Index;
}
return result;
}
private static int CompareTags(object tag1, object tag2)
{
bool tag1First = false, tag2First = false, tag1Last = false, tag2Last = false;
if (tag1 != null)
{
tag1First = m_beginningTags.Contains(tag1);
tag1Last = m_endingTags.Contains(tag1);
}
if (tag2 != null)
{
tag2First = m_beginningTags.Contains(tag2);
tag2Last = m_endingTags.Contains(tag2);
}
if (tag1First && !tag2First)
return -1;
if (tag2First && !tag1First)
return 1;
if (tag1Last && !tag2Last)
return 1;
if (tag2Last && !tag1Last)
return -1;
if (tag1 is Enum && tag2 is Enum)
return ((int)tag1).CompareTo((int)tag2);
if (tag1 is Enum)
return -1;
if (tag2 is Enum)
return 1;
if (tag1 is string && tag2 is string)
return StringUtil.CompareNaturalOrder((string) tag1, (string) tag2);
IComparable comparable1 = tag1 as IComparable;
IComparable comparable2 = tag2 as IComparable;
if (comparable1 != null)
{
int result = comparable1.CompareTo(tag2);
if (result != 0)
return result;
}
if (comparable2 != null)
{
int result = comparable2.CompareTo(tag1);
if (result != 0)
return result;
}
return 0;
}
/// <summary>
/// Tests equality of menu tags</summary>
/// <param name="tag1">Menu 1 tag</param>
/// <param name="tag2">Menu 2 tag</param>
/// <returns>True iff tags are equal</returns>
protected static bool TagsEqual(object tag1, object tag2)
{
if (tag1 == null)
return tag2 == null;
return tag1.Equals(tag2);
}
/// <summary>
/// Comparer for sorting commands by menu, group, and command tags</summary>
protected class CommandComparer : IComparer<CommandInfo>
{
#region IComparer<CommandInfo> Members
/// <summary>
/// Compare method for commands</summary>
/// <param name="x">Command 1 CommandInfo</param>
/// <param name="y">Command 2 CommandInfo</param>
/// <returns>-1 if Command 1 before Command 2, 0 if commands identical, 1 if Command 1 after Command 2</returns>
public int Compare(CommandInfo x, CommandInfo y)
{
return CommandServiceBase.Compare(x, y);
}
#endregion
}
/// <summary>
/// Derived classes can override this to redraw the menu and toolbar icons. This is useful if any
/// registered command's icon can be changed at runtime.</summary>
public virtual void RefreshImages() { }
/// <summary>
/// Derived classes can override this to redraw a particular menu item's icon.</summary>
/// <param name="commandInfo">The command whose icon needs a refresh</param>
public virtual void RefreshImage(CommandInfo commandInfo) { }
/// <summary>
/// Indicates whether the user clicked the icon/image portion of the menu item.</summary>
public bool IconClicked { get; set; }
/// <summary>
/// Indicates which command's icon the mouse is currently over, or null if none. This can be used to
/// modify a menu icon on mouseover.</summary>
public virtual CommandInfo MouseIsOverCommandIcon { get; private set; }
/// <summary>
/// Class constructor. Does standard command ordering.</summary>
static CommandServiceBase()
{
// force standard and framework items into their places at beginning or end
m_endingTags.Add(StandardCommand.FileClose);
m_beginningTags.Add(StandardCommand.FileSave);
m_beginningTags.Add(StandardCommand.FileSaveAs);
m_beginningTags.Add(StandardCommand.FileSaveAll);
m_beginningTags.Add(StandardCommand.EditUndo);
m_beginningTags.Add(StandardCommand.EditRedo);
m_beginningTags.Add(StandardCommand.EditCut);
m_beginningTags.Add(StandardCommand.EditCopy);
m_beginningTags.Add(StandardCommand.EditPaste);
m_endingTags.Add(StandardCommand.EditDelete);
m_beginningTags.Add(StandardCommand.EditSelectAll);
m_beginningTags.Add(StandardCommand.EditDeselectAll);
m_beginningTags.Add(StandardCommand.EditInvertSelection);
m_beginningTags.Add(StandardCommand.EditGroup);
m_beginningTags.Add(StandardCommand.EditUngroup);
m_beginningTags.Add(StandardCommand.EditLock);
m_beginningTags.Add(StandardCommand.EditUnlock);
m_beginningTags.Add(StandardCommand.ViewZoomIn);
m_beginningTags.Add(StandardCommand.ViewZoomOut);
m_beginningTags.Add(StandardCommand.ViewZoomExtents);
m_beginningTags.Add(StandardCommand.WindowSplitHoriz);
m_beginningTags.Add(StandardCommand.WindowSplitVert);
m_beginningTags.Add(StandardCommand.WindowRemoveSplit);
m_endingTags.Add(StandardCommand.HelpAbout);
m_beginningTags.Add(StandardCommandGroup.FileNew);
m_beginningTags.Add(StandardCommandGroup.FileSave);
m_beginningTags.Add(StandardCommandGroup.FileOther);
m_endingTags.Add(StandardCommandGroup.FileRecentlyUsed);
m_endingTags.Add(StandardCommandGroup.FileExit);
m_beginningTags.Add(StandardCommandGroup.EditUndo);
m_beginningTags.Add(StandardCommandGroup.EditCut);
m_beginningTags.Add(StandardCommandGroup.EditSelectAll);
m_beginningTags.Add(StandardCommandGroup.EditGroup);
m_beginningTags.Add(StandardCommandGroup.EditOther);
m_endingTags.Add(StandardCommandGroup.EditPreferences);
m_beginningTags.Add(StandardCommandGroup.ViewZoomIn);
m_beginningTags.Add(StandardCommandGroup.ViewControls);
m_beginningTags.Add(StandardCommandGroup.WindowLayout);
m_beginningTags.Add(StandardCommandGroup.WindowSplit);
m_endingTags.Add(StandardCommandGroup.WindowDocuments);
m_endingTags.Add(StandardCommandGroup.HelpAbout);
m_beginningTags.Add(CommandId.FileRecentlyUsed1);
m_beginningTags.Add(CommandId.FileRecentlyUsed2);
m_beginningTags.Add(CommandId.FileRecentlyUsed3);
m_beginningTags.Add(CommandId.FileRecentlyUsed4);
m_endingTags.Add(StandardCommand.FileExit);
m_endingTags.Add(CommandId.EditPreferences);
m_endingTags.Add(CommandId.EditDocumentPreferences);
// Force subitems with the same menu and group to sort themselves by menu name, rather than creation index
m_defaultSortByMenuLabel.Add(StandardCommandGroup.WindowDocuments);
}
/// <summary>
/// Sets StatusService</summary>
/// <param name="statusService">IStatusService to be set</param>
/// <remarks>Used in ATF2.9. There could be a cleaner way to do that.</remarks>
public void SetStatusService(IStatusService statusService)
{
if (m_statusService != null)
return;
m_statusService = statusService;
}
/// <summary>
/// Imported IStatusService</summary>
[Import(AllowDefault = true)]
protected IStatusService m_statusService;
/// <summary>
/// Imported ISettingsService</summary>
[Import(AllowDefault = true)]
protected ISettingsService m_settingsService;
/// <summary>
/// Toolbar image size</summary>
protected ImageSizes m_imageSize = ImageSizes.Size24x24;
/// <summary>
/// List of menus</summary>
protected List<MenuInfo> m_menus =
new List<MenuInfo>();
/// <summary>
/// Registered commands' info objects</summary>
protected List<CommandInfo> m_commands =
new List<CommandInfo>();
/// <summary>
/// Dictionary for CommandInfo, keyed by command tag</summary>
protected Dictionary<object, CommandInfo> m_commandsById =
new Dictionary<object, CommandInfo>();
/// <summary>
/// Multimap of command clients, keyed by command tag</summary>
protected Multimap<object, ICommandClient> m_commandClients =
new Multimap<object, ICommandClient>();
/// <summary>
/// Dictionary for command tags, keyed by command shortcut</summary>
protected Dictionary<Keys, object> m_shortcuts =
new Dictionary<Keys, object>();
/// <summary>
/// Dictionary for reasons why shortcut key is not available, keyed by shortcut key</summary>
protected Dictionary<Keys, string> m_reservedKeys =
new Dictionary<Keys, string>();
/// <summary>
/// Active command client</summary>
protected ICommandClient m_activeClient;
private bool m_contextMenuAutoCompact = true;
/// <summary>
/// Path delimiter strings</summary>
protected static char[] s_pathDelimiters = new[] { '/', '\\' };
private static readonly HashSet<object> m_beginningTags = new HashSet<object>();
private static readonly HashSet<object> m_endingTags = new HashSet<object>();
private static readonly HashSet<object> m_defaultSortByMenuLabel = new HashSet<object>();
}
}
|
ylyking/LevelEditor
|
ATF/Framework/Atf.Gui/Applications/CommandServiceBase.cs
|
C#
|
apache-2.0
| 38,228 | 38.987448 | 143 | 0.569713 | false |
using System;
using System.Text;
using Microsoft.Practices.RecipeFramework;
using EnvDTE;
using System.IO;
using System.Xml;
using System.Reflection;
using System.Security.Cryptography;
using System.Collections;
using System.Diagnostics;
using Microsoft.Practices.RecipeFramework.Library;
using Microsoft.Practices.ComponentModel;
using Microsoft.Practices.RecipeFramework.Services;
namespace SPALM.SPSF.Library.ValueProviders
{
[ServiceDependency(typeof(DTE))]
public class AssemblyShortNameProvider : ValueProvider
{
protected string GetBasePath()
{
return base.GetService<IConfigurationService>(true).BasePath;
}
public override bool OnBeforeActions(object currentValue, out object newValue)
{
return SetValue(currentValue, out newValue);
}
public override bool OnBeginRecipe(object currentValue, out object newValue)
{
return SetValue(currentValue, out newValue);
}
private bool SetValue(object currentValue, out object newValue)
{
if (currentValue != null)
{
// Do not assign a new value, and return false to flag that
// we don't want the current value to be changed.
newValue = null;
return false;
}
DTE service = (DTE)this.GetService(typeof(DTE));
try
{
string assembly = Helpers.GetOutputName(Helpers.GetSelectedProject(service));
if (assembly != "")
{
if (assembly.EndsWith(".dll"))
{
assembly = assembly.Replace(".dll", "");
}
newValue = assembly;
return true;
}
}
catch (Exception)
{
}
newValue = "";
return true;
}
}
}
|
Gergues/SPSF
|
SPALM.SPSF.Library/ValueProviders/AssemblyShortNameProvider.cs
|
C#
|
apache-2.0
| 1,985 | 27.768116 | 93 | 0.565239 | false |
var x;
// valid left operands
// the left operand is required to be of type Any, the String primitive type, or the Number primitive type
var a1;
var a2;
var ra1 = x in x;
var ra2 = a1 in x;
var ra3 = a2 in x;
var ra4 = '' in x;
var ra5 = 0 in x;
// valid right operands
// the right operand is required to be of type Any, an object type, or a type parameter type
var b1;
var rb1 = x in b1;
var rb2 = x in {};
function foo(t) {
var rb3 = x in t;
}
|
hippich/typescript
|
tests/baselines/reference/inOperatorWithValidOperands.js
|
JavaScript
|
apache-2.0
| 479 | 18.826087 | 106 | 0.626305 | false |
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
import React from 'react';
import Helpers from "../../helpers";
import FauxtonAPI from "../../core/api";
import {OnePane, OnePaneHeader, OnePaneContent} from '../components/layouts';
import ActiveTasksController from "./components/controller";
import ActiveTasksPollingWidgetController from './components/polling';
const crumbs = [
{'name': 'Active Tasks'}
];
export const ActiveTasksLayout = (props) => {
return (
<OnePane>
<OnePaneHeader
crumbs={crumbs}
docURL={FauxtonAPI.constants.DOC_URLS.ACTIVE_TASKS}
endpoint={Helpers.getApiUrl('/_active_tasks')}
>
<ActiveTasksPollingWidgetController {...props} />
</OnePaneHeader>
<OnePaneContent>
<ActiveTasksController {...props} />
</OnePaneContent>
</OnePane>
);
};
export default ActiveTasksLayout;
|
apache/couchdb-fauxton
|
app/addons/activetasks/layout.js
|
JavaScript
|
apache-2.0
| 1,388 | 32.853659 | 80 | 0.704611 | false |
---
id: io-cli
title: Connector Admin CLI
sidebar_label: "CLI"
original_id: io-cli
---
The `pulsar-admin` tool helps you manage Pulsar connectors.
## `sources`
An interface for managing Pulsar IO sources (ingress data into Pulsar).
```bash
$ pulsar-admin sources subcommands
```
Subcommands are:
* `create`
* `update`
* `delete`
* `get`
* `status`
* `list`
* `stop`
* `start`
* `restart`
* `localrun`
* `available-sources`
* `reload`
### `create`
Submit a Pulsar IO source connector to run in a Pulsar cluster.
#### Usage
```bash
$ pulsar-admin sources create options
```
#### Options
|Flag|Description|
|----|---|
| `-a`, `--archive` | The path to the NAR archive for the source. <br /> It also supports url-path (http/https/file [file protocol assumes that file already exists on worker host]) from which worker can download the package.
| `--classname` | The source's class name if `archive` is file-url-path (file://).
| `--cpu` | The CPU (in cores) that needs to be allocated per source instance (applicable only to Docker runtime).
| `--deserialization-classname` | The SerDe classname for the source.
| `--destination-topic-name` | The Pulsar topic to which data is sent.
| `--disk` | The disk (in bytes) that needs to be allocated per source instance (applicable only to Docker runtime).
|`--name` | The source's name.
| `--namespace` | The source's namespace.
| ` --parallelism` | The source's parallelism factor, that is, the number of source instances to run.
| `--processing-guarantees` | The processing guarantees (aka delivery semantics) applied to the source.<br />Possible Values: ATLEAST_ONCE, ATMOST_ONCE, EFFECTIVELY_ONCE.
| `--ram` | The RAM (in bytes) that needs to be allocated per source instance (applicable only to the process and Docker runtimes).
| `-st`, `--schema-type` | The schema type.<br /> Either a builtin schema (for example, AVRO and JSON) or custom schema class name to be used to encode messages emitted from source.
| `--source-config` | Source config key/values. The `source-type` parameter of the currently built-in connectors is determined by the setting of the `name` parameter specified in the pulsar-io.yaml file.
| `--source-config-file` | The path to a YAML config file specifying the source's configuration.
| `-t`, `--source-type` | The source's connector provider. The `source-type` parameter of the currently built-in connectors is determined by the setting of the `name` parameter specified in the pulsar-io.yaml file.
| `--tenant` | The source's tenant.
### `update`
Update a already submitted Pulsar IO source connector.
#### Usage
```bash
$ pulsar-admin sources update options
```
#### Options
|Flag|Description|
|----|---|
| `-a`, `--archive` | The path to the NAR archive for the source. <br /> It also supports url-path (http/https/file [file protocol assumes that file already exists on worker host]) from which worker can download the package.
| `--classname` | The source's class name if `archive` is file-url-path (file://).
| `--cpu` | The CPU (in cores) that needs to be allocated per source instance (applicable only to Docker runtime).
| `--deserialization-classname` | The SerDe classname for the source.
| `--destination-topic-name` | The Pulsar topic to which data is sent.
| `--disk` | The disk (in bytes) that needs to be allocated per source instance (applicable only to Docker runtime).
|`--name` | The source's name.
| `--namespace` | The source's namespace.
| ` --parallelism` | The source's parallelism factor, that is, the number of source instances to run.
| `--processing-guarantees` | The processing guarantees (aka delivery semantics) applied to the source.<br />Possible Values: ATLEAST_ONCE, ATMOST_ONCE, EFFECTIVELY_ONCE.
| `--ram` | The RAM (in bytes) that needs to be allocated per source instance (applicable only to the process and Docker runtimes).
| `-st`, `--schema-type` | The schema type.<br /> Either a builtin schema (for example, AVRO and JSON) or custom schema class name to be used to encode messages emitted from source.
| `--source-config` | Source config key/values.
| `--source-config-file` | The path to a YAML config file specifying the source's configuration.
| `-t`, `--source-type` | The source's connector provider. The `source-type` parameter of the currently built-in connectors is determined by the setting of the `name` parameter specified in the pulsar-io.yaml file.
| `--tenant` | The source's tenant.
| `--update-auth-data` | Whether or not to update the auth data.<br />**Default value: false.**
### `delete`
Delete a Pulsar IO source connector.
#### Usage
```bash
$ pulsar-admin sources delete options
```
#### Option
|Flag|Description|
|---|---|
|`--name`|The source's name.|
|`--namespace`|The source's namespace.|
|`--tenant`|The source's tenant.|
### `get`
Get the information about a Pulsar IO source connector.
#### Usage
```bash
$ pulsar-admin sources get options
```
#### Options
|Flag|Description|
|---|---|
|`--name`|The source's name.|
|`--namespace`|The source's namespace.|
|`--tenant`|The source's tenant.|
### `status`
Check the current status of a Pulsar Source.
#### Usage
```bash
$ pulsar-admin sources status options
```
#### Options
|Flag|Description|
|---|---|
|`--instance-id`|The source ID.<br />If `instance-id` is not provided, Pulasr gets status of all instances.|
|`--name`|The source's name.|
|`--namespace`|The source's namespace.|
|`--tenant`|The source's tenant.|
### `list`
List all running Pulsar IO source connectors.
#### Usage
```bash
$ pulsar-admin sources list options
```
#### Options
|Flag|Description|
|---|---|
|`--namespace`|The source's namespace.|
|`--tenant`|The source's tenant.|
### `stop`
Stop a source instance.
#### Usage
```bash
$ pulsar-admin sources stop options
```
#### Options
|Flag|Description|
|---|---|
|`--instance-id`|The source instanceID.<br />If `instance-id` is not provided, Pulsar stops all instances.|
|`--name`|The source's name.|
|`--namespace`|The source's namespace.|
|`--tenant`|The source's tenant.|
### `start`
Start a source instance.
#### Usage
```bash
$ pulsar-admin sources start options
```
#### Options
|Flag|Description|
|---|---|
|`--instance-id`|The source instanceID.<br />If `instance-id` is not provided, Pulsar starts all instances.|
|`--name`|The source's name.|
|`--namespace`|The source's namespace.|
|`--tenant`|The source's tenant.|
### `restart`
Restart a source instance.
#### Usage
```bash
$ pulsar-admin sources restart options
```
#### Options
|Flag|Description|
|---|---|
|`--instance-id`|The source instanceID.<br />If `instance-id` is not provided, Pulsar restarts all instances.
|`--name`|The source's name.|
|`--namespace`|The source's namespace.|
|`--tenant`|The source's tenant.|
### `localrun`
Run a Pulsar IO source connector locally rather than deploying it to the Pulsar cluster.
#### Usage
```bash
$ pulsar-admin sources localrun options
```
#### Options
|Flag|Description|
|----|---|
| `-a`, `--archive` | The path to the NAR archive for the Source. <br /> It also supports url-path (http/https/file [file protocol assumes that file already exists on worker host]) from which worker can download the package.
| `--broker-service-url` | The URL for the Pulsar broker.
|`--classname`|The source's class name if `archive` is file-url-path (file://).
| `--client-auth-params` | Client authentication parameter.
| `--client-auth-plugin` | Client authentication plugin using which function-process can connect to broker.
|`--cpu`|The CPU (in cores) that needs to be allocated per source instance (applicable only to the Docker runtime).|
|`--deserialization-classname`|The SerDe classname for the source.
|`--destination-topic-name`|The Pulsar topic to which data is sent.
|`--disk`|The disk (in bytes) that needs to be allocated per source instance (applicable only to the Docker runtime).|
|`--hostname-verification-enabled`|Enable hostname verification.<br />**Default value: false**.
|`--name`|The source’s name.|
|`--namespace`|The source’s namespace.|
|`--parallelism`|The source’s parallelism factor, that is, the number of source instances to run).|
|`--processing-guarantees`|The processing guarantees (aka delivery semantics) applied to the source. <br />Available values: ATLEAST_ONCE, ATMOST_ONCE, EFFECTIVELY_ONCE.
|`--ram`|The RAM (in bytes) that needs to be allocated per source instance (applicable only to the Docker runtime).|
| `-st`, `--schema-type` | The schema type.<br /> Either a builtin schema (for example, AVRO and JSON) or custom schema class name to be used to encode messages emitted from source.
|`--source-config`|Source config key/values.
|`--source-config-file`|The path to a YAML config file specifying the source’s configuration.
|`--source-type`|The source's connector provider. The `source-type` parameter of the currently built-in connectors is determined by the setting of the `name` parameter specified in the pulsar-io.yaml file.
|`--tenant`|The source’s tenant.
|`--tls-allow-insecure`|Allow insecure tls connection.<br />**Default value: false**.
|`--tls-trust-cert-path`|The tls trust cert file path.
|`--use-tls`|Use tls connection.<br />**Default value: false**.
### `available-sources`
Get the list of Pulsar IO connector sources supported by Pulsar cluster.
#### Usage
```bash
$ pulsar-admin sources available-sources
```
### `reload`
Reload the available built-in connectors.
#### Usage
```bash
$ pulsar-admin sources reload
```
## `sinks`
An interface for managing Pulsar IO sinks (egress data from Pulsar).
```bash
$ pulsar-admin sinks subcommands
```
Subcommands are:
* `create`
* `update`
* `delete`
* `get`
* `status`
* `list`
* `stop`
* `start`
* `restart`
* `localrun`
* `available-sinks`
* `reload`
### `create`
Submit a Pulsar IO sink connector to run in a Pulsar cluster.
#### Usage
```bash
$ pulsar-admin sinks create options
```
#### Options
|Flag|Description|
|----|---|
| `-a`, `--archive` | The path to the archive file for the sink. <br /> It also supports url-path (http/https/file [file protocol assumes that file already exists on worker host]) from which worker can download the package.
| `--auto-ack` | Whether or not the framework will automatically acknowledge messages.
| `--classname` | The sink's class name if `archive` is file-url-path (file://).
| `--cpu` | The CPU (in cores) that needs to be allocated per sink instance (applicable only to Docker runtime).
| `--custom-schema-inputs` | The map of input topics to schema types or class names (as a JSON string).
| `--custom-serde-inputs` | The map of input topics to SerDe class names (as a JSON string).
| `--disk` | The disk (in bytes) that needs to be allocated per sink instance (applicable only to Docker runtime).
|`-i, --inputs` | The sink's input topic or topics (multiple topics can be specified as a comma-separated list).
|`--name` | The sink's name.
| `--namespace` | The sink's namespace.
| ` --parallelism` | The sink's parallelism factor, that is, the number of sink instances to run.
| `--processing-guarantees` | The processing guarantees (aka delivery semantics) applied to the sink.<br />Possible Values: ATLEAST_ONCE, ATMOST_ONCE, EFFECTIVELY_ONCE.
| `--ram` | The RAM (in bytes) that needs to be allocated per sink instance (applicable only to the process and Docker runtimes).
| `--retain-ordering` | Sink consumes and sinks messages in order.
| `--sink-config` | sink config key/values.
| `--sink-config-file` | The path to a YAML config file specifying the sink's configuration.
| `-t`, `--sink-type` | The sink's connector provider. The `sink-type` parameter of the currently built-in connectors is determined by the setting of the `name` parameter specified in the pulsar-io.yaml file.
| `--subs-name` | Pulsar source subscription name if user wants a specific subscription-name for input-topic consumer.
| `--tenant` | The sink's tenant.
| `--timeout-ms` | The message timeout in milliseconds.
| `--topics-pattern` | TopicsPattern to consume from list of topics under a namespace that match the pattern. <br />`--input` and `--topics-Pattern` are mutually exclusive. <br />Add SerDe class name for a pattern in `--customSerdeInputs` (supported for java fun only).
### `update`
Update a Pulsar IO sink connector.
#### Usage
```bash
$ pulsar-admin sinks update options
```
#### Options
|Flag|Description|
|----|---|
| `-a`, `--archive` | The path to the archive file for the sink. <br /> It also supports url-path (http/https/file [file protocol assumes that file already exists on worker host]) from which worker can download the package.
| `--auto-ack` | Whether or not the framework will automatically acknowledge messages.
| `--classname` | The sink's class name if `archive` is file-url-path (file://).
| `--cpu` | The CPU (in cores) that needs to be allocated per sink instance (applicable only to Docker runtime).
| `--custom-schema-inputs` | The map of input topics to schema types or class names (as a JSON string).
| `--custom-serde-inputs` | The map of input topics to SerDe class names (as a JSON string).
| `--disk` | The disk (in bytes) that needs to be allocated per sink instance (applicable only to Docker runtime).
|`-i, --inputs` | The sink's input topic or topics (multiple topics can be specified as a comma-separated list).
|`--name` | The sink's name.
| `--namespace` | The sink's namespace.
| ` --parallelism` | The sink's parallelism factor, that is, the number of sink instances to run.
| `--processing-guarantees` | The processing guarantees (aka delivery semantics) applied to the sink.<br />Possible Values: ATLEAST_ONCE, ATMOST_ONCE, EFFECTIVELY_ONCE.
| `--ram` | The RAM (in bytes) that needs to be allocated per sink instance (applicable only to the process and Docker runtimes).
| `--retain-ordering` | Sink consumes and sinks messages in order.
| `--sink-config` | sink config key/values.
| `--sink-config-file` | The path to a YAML config file specifying the sink's configuration.
| `-t`, `--sink-type` | The sink's connector provider. The `sink-type` parameter of the currently built-in connectors is determined by the setting of the `name` parameter specified in the pulsar-io.yaml file.
| `--subs-name` | Pulsar source subscription name if user wants a specific subscription-name for input-topic consumer.
| `--tenant` | The sink's tenant.
| `--timeout-ms` | The message timeout in milliseconds.
| `--topics-pattern` | TopicsPattern to consume from list of topics under a namespace that match the pattern. <br />`--input` and `--topics-Pattern` are mutually exclusive. <br />Add SerDe class name for a pattern in `--customSerdeInputs` (supported for java fun only).
| `--update-auth-data` | Whether or not to update the auth data.<br />**Default value: false.**
### `delete`
Delete a Pulsar IO sink connector.
#### Usage
```bash
$ pulsar-admin sinks delete options
```
#### Option
|Flag|Description|
|---|---|
|`--name`|The sink's name.|
|`--namespace`|The sink's namespace.|
|`--tenant`|The sink's tenant.|
### `get`
Get the information about a Pulsar IO sink connector.
#### Usage
```bash
$ pulsar-admin sinks get options
```
#### Options
|Flag|Description|
|---|---|
|`--name`|The sink's name.|
|`--namespace`|The sink's namespace.|
|`--tenant`|The sink's tenant.|
### `status`
Check the current status of a Pulsar sink.
#### Usage
```bash
$ pulsar-admin sinks status options
```
#### Options
|Flag|Description|
|---|---|
|`--instance-id`|The sink ID.<br />If `instance-id` is not provided, Pulasr gets status of all instances.|
|`--name`|The sink's name.|
|`--namespace`|The sink's namespace.|
|`--tenant`|The sink's tenant.|
### `list`
List all running Pulsar IO sink connectors.
#### Usage
```bash
$ pulsar-admin sinks list options
```
#### Options
|Flag|Description|
|---|---|
|`--namespace`|The sink's namespace.|
|`--tenant`|The sink's tenant.|
### `stop`
Stop a sink instance.
#### Usage
```bash
$ pulsar-admin sinks stop options
```
#### Options
|Flag|Description|
|---|---|
|`--instance-id`|The sink instanceID.<br />If `instance-id` is not provided, Pulsar stops all instances.|
|`--name`|The sink's name.|
|`--namespace`|The sink's namespace.|
|`--tenant`|The sink's tenant.|
### `start`
Start a sink instance.
#### Usage
```bash
$ pulsar-admin sinks start options
```
#### Options
|Flag|Description|
|---|---|
|`--instance-id`|The sink instanceID.<br />If `instance-id` is not provided, Pulsar starts all instances.|
|`--name`|The sink's name.|
|`--namespace`|The sink's namespace.|
|`--tenant`|The sink's tenant.|
### `restart`
Restart a sink instance.
#### Usage
```bash
$ pulsar-admin sinks restart options
```
#### Options
|Flag|Description|
|---|---|
|`--instance-id`|The sink instanceID.<br />If `instance-id` is not provided, Pulsar restarts all instances.
|`--name`|The sink's name.|
|`--namespace`|The sink's namespace.|
|`--tenant`|The sink's tenant.|
### `localrun`
Run a Pulsar IO sink connector locally rather than deploying it to the Pulsar cluster.
#### Usage
```bash
$ pulsar-admin sinks localrun options
```
#### Options
|Flag|Description|
|----|---|
| `-a`, `--archive` | The path to the archive file for the sink. <br /> It also supports url-path (http/https/file [file protocol assumes that file already exists on worker host]) from which worker can download the package.
| `--auto-ack` | Whether or not the framework will automatically acknowledge messages.
| `--broker-service-url` | The URL for the Pulsar broker.
|`--classname`|The sink's class name if `archive` is file-url-path (file://).
| `--client-auth-params` | Client authentication parameter.
| `--client-auth-plugin` | Client authentication plugin using which function-process can connect to broker.
|`--cpu`|The CPU (in cores) that needs to be allocated per sink instance (applicable only to the Docker runtime).
| `--custom-schema-inputs` | The map of input topics to Schema types or class names (as a JSON string).
| `--custom-serde-inputs` | The map of input topics to SerDe class names (as a JSON string).
|`--disk`|The disk (in bytes) that needs to be allocated per sink instance (applicable only to the Docker runtime).|
|`--hostname-verification-enabled`|Enable hostname verification.<br />**Default value: false**.
| `-i`, `--inputs` | The sink's input topic or topics (multiple topics can be specified as a comma-separated list).
|`--name`|The sink’s name.|
|`--namespace`|The sink’s namespace.|
|`--parallelism`|The sink’s parallelism factor, that is, the number of sink instances to run).|
|`--processing-guarantees`|The processing guarantees (aka delivery semantics) applied to the sink. <br />Available values: ATLEAST_ONCE, ATMOST_ONCE, EFFECTIVELY_ONCE.
|`--ram`|The RAM (in bytes) that needs to be allocated per sink instance (applicable only to the Docker runtime).|
|`--retain-ordering` | Sink consumes and sinks messages in order.
|`--sink-config`|sink config key/values.
|`--sink-config-file`|The path to a YAML config file specifying the sink’s configuration.
|`--sink-type`|The sink's connector provider. The `sink-type` parameter of the currently built-in connectors is determined by the setting of the `name` parameter specified in the pulsar-io.yaml file.
|`--subs-name` | Pulsar source subscription name if user wants a specific subscription-name for input-topic consumer.
|`--tenant`|The sink’s tenant.
| `--timeout-ms` | The message timeout in milliseconds.
|`--tls-allow-insecure`|Allow insecure tls connection.<br />**Default value: false**.
|`--tls-trust-cert-path`|The tls trust cert file path.
| `--topics-pattern` | TopicsPattern to consume from list of topics under a namespace that match the pattern. <br />`--input` and `--topics-Pattern` are mutually exclusive. <br />Add SerDe class name for a pattern in `--customSerdeInputs` (supported for java fun only).
|`--use-tls`|Use tls connection.<br />**Default value: false**.
### `available-sinks`
Get the list of Pulsar IO connector sinks supported by Pulsar cluster.
#### Usage
```bash
$ pulsar-admin sinks available-sinks
```
### `reload`
Reload the available built-in connectors.
#### Usage
```bash
$ pulsar-admin sinks reload
```
|
massakam/pulsar
|
site2/website-next/versioned_docs/version-2.6.0/io-cli.md
|
Markdown
|
apache-2.0
| 20,332 | 30.105666 | 269 | 0.700473 | false |
/**
* The BIP70 payment protocol wraps Bitcoin transactions and adds various useful features like memos, refund addresses
* and authentication.
*/
package io.xpydev.paycoinj.protocols.payments;
|
ligerzero459/paycoinj
|
core/src/main/java/io/xpydev/paycoinj/protocols/payments/package-info.java
|
Java
|
apache-2.0
| 196 | 38.4 | 118 | 0.795918 | false |
#!/bin/bash
# root
sed -i 's/^PermitRootLogin.\+$/PermitRootLogin no/g' /etc/ssh/sshd_config
usermod -p '$1$super$440quZi/kSzmHnTJR1j3a.' root
# opuser
useradd -m -p '$1$super$uXdjVfWjBYtxOPjOXZb3k0' -s /bin/bash opuser
usermod -aG sudo opuser
# boot ssh service
sed -i 's/^SSHD_OPTS=$/SSHD_OPTS=-u0/' /etc/default/ssh
sed -i '/^exit 0$/ i\service ssh start' /etc/rc.local
chage -d 0 root
chage -m 1 root
chage -M 90 root
chage -W 7 root
chage -d 0 opuser
chage -m 1 opuser
chage -M 90 opuser
chage -W 7 opuser
|
linzhaolover/myansible
|
makeDockerImage/devops_ubuntu1604/setpass.sh
|
Shell
|
apache-2.0
| 518 | 20.583333 | 73 | 0.698842 | false |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/backup/model/GetBackupPlanFromJSONResult.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <aws/core/AmazonWebServiceResult.h>
#include <aws/core/utils/StringUtils.h>
#include <aws/core/utils/UnreferencedParam.h>
#include <utility>
using namespace Aws::Backup::Model;
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
using namespace Aws;
GetBackupPlanFromJSONResult::GetBackupPlanFromJSONResult()
{
}
GetBackupPlanFromJSONResult::GetBackupPlanFromJSONResult(const Aws::AmazonWebServiceResult<JsonValue>& result)
{
*this = result;
}
GetBackupPlanFromJSONResult& GetBackupPlanFromJSONResult::operator =(const Aws::AmazonWebServiceResult<JsonValue>& result)
{
JsonView jsonValue = result.GetPayload().View();
if(jsonValue.ValueExists("BackupPlan"))
{
m_backupPlan = jsonValue.GetObject("BackupPlan");
}
return *this;
}
|
jt70471/aws-sdk-cpp
|
aws-cpp-sdk-backup/source/model/GetBackupPlanFromJSONResult.cpp
|
C++
|
apache-2.0
| 1,000 | 23.95 | 122 | 0.770541 | false |
package integration_test
import (
"path/filepath"
"github.com/cloudfoundry/libbuildpack/cutlass"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("CF NodeJS Buildpack", func() {
var app *cutlass.App
AfterEach(func() {
if app != nil {
app.Destroy()
}
app = nil
})
Context("deploying a NodeJS app with NewRelic", func() {
Context("when New Relic environment variables are set", func() {
BeforeEach(func() {
app = cutlass.New(filepath.Join(bpDir, "fixtures", "with_newrelic"))
})
It("tries to talk to NewRelic with the license key from the env vars", func() {
PushAppAndConfirm(app)
Expect(app.Stdout.String()).To(ContainSubstring("&license_key=fake_new_relic_key2"))
Expect(app.Stdout.String()).ToNot(ContainSubstring("&license_key=fake_new_relic_key1"))
})
})
Context("when newrelic.js sets license_key", func() {
BeforeEach(func() {
app = cutlass.New(filepath.Join(bpDir, "fixtures", "with_newrelic_js"))
})
It("tries to talk to NewRelic with the license key from newrelic.js", func() {
PushAppAndConfirm(app)
Expect(app.Stdout.String()).ToNot(ContainSubstring("&license_key=fake_new_relic_key2"))
Expect(app.Stdout.String()).To(ContainSubstring("&license_key=fake_new_relic_key1"))
})
})
})
})
|
pkumar-appd/nodejs-buildpack
|
src/nodejs/integration/nodejs_app_with_newrelic_test.go
|
GO
|
apache-2.0
| 1,309 | 27.456522 | 91 | 0.676089 | false |
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package job
import (
"reflect"
"sort"
"sync"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/record"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/workqueue"
"k8s.io/kubernetes/pkg/watch"
)
type JobController struct {
kubeClient client.Interface
podControl controller.PodControlInterface
// To allow injection of updateJobStatus for testing.
updateHandler func(job *extensions.Job) error
syncHandler func(jobKey string) error
// podStoreSynced returns true if the pod store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
podStoreSynced func() bool
// A TTLCache of pod creates/deletes each rc expects to see
expectations controller.ControllerExpectationsInterface
// A store of job, populated by the jobController
jobStore cache.StoreToJobLister
// Watches changes to all jobs
jobController *framework.Controller
// A store of pods, populated by the podController
podStore cache.StoreToPodLister
// Watches changes to all pods
podController *framework.Controller
// Jobs that need to be updated
queue *workqueue.Type
}
func NewJobController(kubeClient client.Interface, resyncPeriod controller.ResyncPeriodFunc) *JobController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
jm := &JobController{
kubeClient: kubeClient,
podControl: controller.RealPodControl{
KubeClient: kubeClient,
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "job"}),
},
expectations: controller.NewControllerExpectations(),
queue: workqueue.New(),
}
jm.jobStore.Store, jm.jobController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return jm.kubeClient.Extensions().Jobs(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return jm.kubeClient.Extensions().Jobs(api.NamespaceAll).Watch(options)
},
},
&extensions.Job{},
// TODO: Can we have much longer period here?
replicationcontroller.FullControllerResyncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: jm.enqueueController,
UpdateFunc: func(old, cur interface{}) {
if job := cur.(*extensions.Job); !isJobFinished(job) {
jm.enqueueController(job)
}
},
DeleteFunc: jm.enqueueController,
},
)
jm.podStore.Store, jm.podController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return jm.kubeClient.Pods(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return jm.kubeClient.Pods(api.NamespaceAll).Watch(options)
},
},
&api.Pod{},
resyncPeriod(),
framework.ResourceEventHandlerFuncs{
AddFunc: jm.addPod,
UpdateFunc: jm.updatePod,
DeleteFunc: jm.deletePod,
},
)
jm.updateHandler = jm.updateJobStatus
jm.syncHandler = jm.syncJob
jm.podStoreSynced = jm.podController.HasSynced
return jm
}
// Run the main goroutine responsible for watching and syncing jobs.
func (jm *JobController) Run(workers int, stopCh <-chan struct{}) {
defer util.HandleCrash()
go jm.jobController.Run(stopCh)
go jm.podController.Run(stopCh)
for i := 0; i < workers; i++ {
go util.Until(jm.worker, time.Second, stopCh)
}
<-stopCh
glog.Infof("Shutting down Job Manager")
jm.queue.ShutDown()
}
// getPodJob returns the job managing the given pod.
func (jm *JobController) getPodJob(pod *api.Pod) *extensions.Job {
jobs, err := jm.jobStore.GetPodJobs(pod)
if err != nil {
glog.V(4).Infof("No jobs found for pod %v, job controller will avoid syncing", pod.Name)
return nil
}
if len(jobs) > 1 {
glog.Errorf("user error! more than one job is selecting pods with labels: %+v", pod.Labels)
sort.Sort(byCreationTimestamp(jobs))
}
return &jobs[0]
}
// When a pod is created, enqueue the controller that manages it and update it's expectations.
func (jm *JobController) addPod(obj interface{}) {
pod := obj.(*api.Pod)
if pod.DeletionTimestamp != nil {
// on a restart of the controller controller, it's possible a new pod shows up in a state that
// is already pending deletion. Prevent the pod from being a creation observation.
jm.deletePod(pod)
return
}
if job := jm.getPodJob(pod); job != nil {
jobKey, err := controller.KeyFunc(job)
if err != nil {
glog.Errorf("Couldn't get key for job %#v: %v", job, err)
return
}
jm.expectations.CreationObserved(jobKey)
jm.enqueueController(job)
}
}
// When a pod is updated, figure out what job/s manage it and wake them up.
// If the labels of the pod have changed we need to awaken both the old
// and new job. old and cur must be *api.Pod types.
func (jm *JobController) updatePod(old, cur interface{}) {
if api.Semantic.DeepEqual(old, cur) {
// A periodic relist will send update events for all known pods.
return
}
curPod := cur.(*api.Pod)
if curPod.DeletionTimestamp != nil {
// when a pod is deleted gracefully it's deletion timestamp is first modified to reflect a grace period,
// and after such time has passed, the kubelet actually deletes it from the store. We receive an update
// for modification of the deletion timestamp and expect an job to create more pods asap, not wait
// until the kubelet actually deletes the pod.
jm.deletePod(curPod)
return
}
if job := jm.getPodJob(curPod); job != nil {
jm.enqueueController(job)
}
oldPod := old.(*api.Pod)
// Only need to get the old job if the labels changed.
if !reflect.DeepEqual(curPod.Labels, oldPod.Labels) {
// If the old and new job are the same, the first one that syncs
// will set expectations preventing any damage from the second.
if oldJob := jm.getPodJob(oldPod); oldJob != nil {
jm.enqueueController(oldJob)
}
}
}
// When a pod is deleted, enqueue the job that manages the pod and update its expectations.
// obj could be an *api.Pod, or a DeletionFinalStateUnknown marker item.
func (jm *JobController) deletePod(obj interface{}) {
pod, ok := obj.(*api.Pod)
// When a delete is dropped, the relist will notice a pod in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the pod
// changed labels the new job will not be woken up till the periodic resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("Couldn't get object from tombstone %+v, could take up to %v before a job recreates a pod", obj, controller.ExpectationsTimeout)
return
}
pod, ok = tombstone.Obj.(*api.Pod)
if !ok {
glog.Errorf("Tombstone contained object that is not a pod %+v, could take up to %v before job recreates a pod", obj, controller.ExpectationsTimeout)
return
}
}
if job := jm.getPodJob(pod); job != nil {
jobKey, err := controller.KeyFunc(job)
if err != nil {
glog.Errorf("Couldn't get key for job %#v: %v", job, err)
return
}
jm.expectations.DeletionObserved(jobKey)
jm.enqueueController(job)
}
}
// obj could be an *extensions.Job, or a DeletionFinalStateUnknown marker item.
func (jm *JobController) enqueueController(obj interface{}) {
key, err := controller.KeyFunc(obj)
if err != nil {
glog.Errorf("Couldn't get key for object %+v: %v", obj, err)
return
}
// TODO: Handle overlapping controllers better. Either disallow them at admission time or
// deterministically avoid syncing controllers that fight over pods. Currently, we only
// ensure that the same controller is synced for a given pod. When we periodically relist
// all controllers there will still be some replica instability. One way to handle this is
// by querying the store for all controllers that this rc overlaps, as well as all
// controllers that overlap this rc, and sorting them.
jm.queue.Add(key)
}
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
// It enforces that the syncHandler is never invoked concurrently with the same key.
func (jm *JobController) worker() {
for {
func() {
key, quit := jm.queue.Get()
if quit {
return
}
defer jm.queue.Done(key)
err := jm.syncHandler(key.(string))
if err != nil {
glog.Errorf("Error syncing job: %v", err)
}
}()
}
}
// syncJob will sync the job with the given key if it has had its expectations fulfilled, meaning
// it did not expect to see any more of its pods created or deleted. This function is not meant to be invoked
// concurrently with the same key.
func (jm *JobController) syncJob(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing job %q (%v)", key, time.Now().Sub(startTime))
}()
obj, exists, err := jm.jobStore.Store.GetByKey(key)
if !exists {
glog.V(4).Infof("Job has been deleted: %v", key)
jm.expectations.DeleteExpectations(key)
return nil
}
if err != nil {
glog.Errorf("Unable to retrieve job %v from store: %v", key, err)
jm.queue.Add(key)
return err
}
job := *obj.(*extensions.Job)
if !jm.podStoreSynced() {
// Sleep so we give the pod reflector goroutine a chance to run.
time.Sleep(replicationcontroller.PodStoreSyncedPollPeriod)
glog.V(4).Infof("Waiting for pods controller to sync, requeuing job %v", job.Name)
jm.enqueueController(&job)
return nil
}
// Check the expectations of the job before counting active pods, otherwise a new pod can sneak in
// and update the expectations after we've retrieved active pods from the store. If a new pod enters
// the store after we've checked the expectation, the job sync is just deferred till the next relist.
jobKey, err := controller.KeyFunc(&job)
if err != nil {
glog.Errorf("Couldn't get key for job %#v: %v", job, err)
return err
}
jobNeedsSync := jm.expectations.SatisfiedExpectations(jobKey)
selector, _ := extensions.LabelSelectorAsSelector(job.Spec.Selector)
podList, err := jm.podStore.Pods(job.Namespace).List(selector)
if err != nil {
glog.Errorf("Error getting pods for job %q: %v", key, err)
jm.queue.Add(key)
return err
}
activePods := controller.FilterActivePods(podList.Items)
active := len(activePods)
succeeded, failed := getStatus(podList.Items)
if jobNeedsSync {
active = jm.manageJob(activePods, succeeded, &job)
}
completions := succeeded
if completions == *job.Spec.Completions {
job.Status.Conditions = append(job.Status.Conditions, newCondition())
}
// no need to update the job if the status hasn't changed since last time
if job.Status.Active != active || job.Status.Succeeded != succeeded || job.Status.Failed != failed {
job.Status.Active = active
job.Status.Succeeded = succeeded
job.Status.Failed = failed
if err := jm.updateHandler(&job); err != nil {
glog.Errorf("Failed to update job %v, requeuing. Error: %v", job.Name, err)
jm.enqueueController(&job)
}
}
return nil
}
func newCondition() extensions.JobCondition {
return extensions.JobCondition{
Type: extensions.JobComplete,
Status: api.ConditionTrue,
LastProbeTime: unversioned.Now(),
LastTransitionTime: unversioned.Now(),
}
}
func getStatus(pods []api.Pod) (succeeded, failed int) {
succeeded = filterPods(pods, api.PodSucceeded)
failed = filterPods(pods, api.PodFailed)
return
}
func (jm *JobController) manageJob(activePods []*api.Pod, succeeded int, job *extensions.Job) int {
var activeLock sync.Mutex
active := len(activePods)
parallelism := *job.Spec.Parallelism
jobKey, err := controller.KeyFunc(job)
if err != nil {
glog.Errorf("Couldn't get key for job %#v: %v", job, err)
return 0
}
if active > parallelism {
diff := active - parallelism
jm.expectations.ExpectDeletions(jobKey, diff)
glog.V(4).Infof("Too many pods running job %q, need %d, deleting %d", jobKey, parallelism, diff)
// Sort the pods in the order such that not-ready < ready, unscheduled
// < scheduled, and pending < running. This ensures that we delete pods
// in the earlier stages whenever possible.
sort.Sort(controller.ActivePods(activePods))
active -= diff
wait := sync.WaitGroup{}
wait.Add(diff)
for i := 0; i < diff; i++ {
go func(ix int) {
defer wait.Done()
if err := jm.podControl.DeletePod(job.Namespace, activePods[ix].Name); err != nil {
defer util.HandleError(err)
// Decrement the expected number of deletes because the informer won't observe this deletion
jm.expectations.DeletionObserved(jobKey)
activeLock.Lock()
active++
activeLock.Unlock()
}
}(i)
}
wait.Wait()
} else if active < parallelism {
// how many executions are left to run
diff := *job.Spec.Completions - succeeded
// limit to parallelism and count active pods as well
if diff > parallelism {
diff = parallelism
}
diff -= active
jm.expectations.ExpectCreations(jobKey, diff)
glog.V(4).Infof("Too few pods running job %q, need %d, creating %d", jobKey, parallelism, diff)
active += diff
wait := sync.WaitGroup{}
wait.Add(diff)
for i := 0; i < diff; i++ {
go func() {
defer wait.Done()
if err := jm.podControl.CreatePods(job.Namespace, &job.Spec.Template, job); err != nil {
defer util.HandleError(err)
// Decrement the expected number of creates because the informer won't observe this pod
jm.expectations.CreationObserved(jobKey)
activeLock.Lock()
active--
activeLock.Unlock()
}
}()
}
wait.Wait()
}
return active
}
func (jm *JobController) updateJobStatus(job *extensions.Job) error {
_, err := jm.kubeClient.Extensions().Jobs(job.Namespace).UpdateStatus(job)
return err
}
// filterPods returns pods based on their phase.
func filterPods(pods []api.Pod, phase api.PodPhase) int {
result := 0
for i := range pods {
if phase == pods[i].Status.Phase {
result++
}
}
return result
}
func isJobFinished(j *extensions.Job) bool {
for _, c := range j.Status.Conditions {
if c.Type == extensions.JobComplete && c.Status == api.ConditionTrue {
return true
}
}
return false
}
// byCreationTimestamp sorts a list by creation timestamp, using their names as a tie breaker.
type byCreationTimestamp []extensions.Job
func (o byCreationTimestamp) Len() int { return len(o) }
func (o byCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o byCreationTimestamp) Less(i, j int) bool {
if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) {
return o[i].Name < o[j].Name
}
return o[i].CreationTimestamp.Before(o[j].CreationTimestamp)
}
|
cameronbrunner/kubernetes
|
pkg/controller/job/controller.go
|
GO
|
apache-2.0
| 15,629 | 32.395299 | 151 | 0.717192 | false |
/* Copyright (C) 1995-1998 Eric Young ([email protected])
* All rights reserved.
*
* This package is an SSL implementation written
* by Eric Young ([email protected]).
* The implementation was written so as to conform with Netscapes SSL.
*
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson ([email protected]).
*
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* "This product includes cryptographic software written by
* Eric Young ([email protected])"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
* 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson ([email protected])"
*
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
* [including the GNU Public Licence.]
*/
/* ====================================================================
* Copyright 2005 Nokia. All rights reserved.
*
* The portions of the attached software ("Contribution") is developed by
* Nokia Corporation and is licensed pursuant to the OpenSSL open source
* license.
*
* The Contribution, originally written by Mika Kousa and Pasi Eronen of
* Nokia Corporation, consists of the "PSK" (Pre-Shared Key) ciphersuites
* support (see RFC 4279) to OpenSSL.
*
* No patent licenses or other rights except those expressly stated in
* the OpenSSL open source license shall be deemed granted or received
* expressly, by implication, estoppel, or otherwise.
*
* No assurances are provided by Nokia that the Contribution does not
* infringe the patent or other intellectual property rights of any third
* party or that the license provides you with all the necessary rights
* to make use of the Contribution.
*
* THE SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND. IN
* ADDITION TO THE DISCLAIMERS INCLUDED IN THE LICENSE, NOKIA
* SPECIFICALLY DISCLAIMS ANY LIABILITY FOR CLAIMS BROUGHT BY YOU OR ANY
* OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS OR
* OTHERWISE.
*/
#include <openssl/ssl.h>
#include "internal.h"
const char *SSL_state_string_long(const SSL *ssl) {
switch (ssl->state) {
case SSL_ST_ACCEPT:
return "before accept initialization";
case SSL_ST_CONNECT:
return "before connect initialization";
case SSL_ST_OK:
return "SSL negotiation finished successfully";
case SSL_ST_RENEGOTIATE:
return "SSL renegotiate ciphers";
/* SSLv3 additions */
case SSL3_ST_CW_CLNT_HELLO_A:
return "SSLv3 write client hello A";
case SSL3_ST_CW_CLNT_HELLO_B:
return "SSLv3 write client hello B";
case SSL3_ST_CR_SRVR_HELLO_A:
return "SSLv3 read server hello A";
case SSL3_ST_CR_CERT_A:
return "SSLv3 read server certificate A";
case SSL3_ST_CR_KEY_EXCH_A:
return "SSLv3 read server key exchange A";
case SSL3_ST_CR_CERT_REQ_A:
return "SSLv3 read server certificate request A";
case SSL3_ST_CR_SESSION_TICKET_A:
return "SSLv3 read server session ticket A";
case SSL3_ST_CR_SRVR_DONE_A:
return "SSLv3 read server done A";
case SSL3_ST_CW_CERT_A:
return "SSLv3 write client certificate A";
case SSL3_ST_CW_CERT_B:
return "SSLv3 write client certificate B";
case SSL3_ST_CW_CERT_C:
return "SSLv3 write client certificate C";
case SSL3_ST_CW_KEY_EXCH_A:
return "SSLv3 write client key exchange A";
case SSL3_ST_CW_KEY_EXCH_B:
return "SSLv3 write client key exchange B";
case SSL3_ST_CW_CERT_VRFY_A:
return "SSLv3 write certificate verify A";
case SSL3_ST_CW_CERT_VRFY_B:
return "SSLv3 write certificate verify B";
case SSL3_ST_CW_CHANGE:
case SSL3_ST_SW_CHANGE:
return "SSLv3 write change cipher spec";
case SSL3_ST_CW_FINISHED_A:
case SSL3_ST_SW_FINISHED_A:
return "SSLv3 write finished A";
case SSL3_ST_CW_FINISHED_B:
case SSL3_ST_SW_FINISHED_B:
return "SSLv3 write finished B";
case SSL3_ST_CR_CHANGE:
case SSL3_ST_SR_CHANGE:
return "SSLv3 read change cipher spec";
case SSL3_ST_CR_FINISHED_A:
case SSL3_ST_SR_FINISHED_A:
return "SSLv3 read finished A";
case SSL3_ST_CW_FLUSH:
case SSL3_ST_SW_FLUSH:
return "SSLv3 flush data";
case SSL3_ST_SR_CLNT_HELLO_A:
return "SSLv3 read client hello A";
case SSL3_ST_SR_CLNT_HELLO_B:
return "SSLv3 read client hello B";
case SSL3_ST_SR_CLNT_HELLO_C:
return "SSLv3 read client hello C";
case SSL3_ST_SW_HELLO_REQ_A:
return "SSLv3 write hello request A";
case SSL3_ST_SW_HELLO_REQ_B:
return "SSLv3 write hello request B";
case SSL3_ST_SW_HELLO_REQ_C:
return "SSLv3 write hello request C";
case SSL3_ST_SW_SRVR_HELLO_A:
return "SSLv3 write server hello A";
case SSL3_ST_SW_SRVR_HELLO_B:
return "SSLv3 write server hello B";
case SSL3_ST_SW_CERT_A:
return "SSLv3 write certificate A";
case SSL3_ST_SW_CERT_B:
return "SSLv3 write certificate B";
case SSL3_ST_SW_KEY_EXCH_A:
return "SSLv3 write key exchange A";
case SSL3_ST_SW_KEY_EXCH_B:
return "SSLv3 write key exchange B";
case SSL3_ST_SW_CERT_REQ_A:
return "SSLv3 write certificate request A";
case SSL3_ST_SW_CERT_REQ_B:
return "SSLv3 write certificate request B";
case SSL3_ST_SW_SESSION_TICKET_A:
return "SSLv3 write session ticket A";
case SSL3_ST_SW_SESSION_TICKET_B:
return "SSLv3 write session ticket B";
case SSL3_ST_SW_SRVR_DONE_A:
return "SSLv3 write server done A";
case SSL3_ST_SW_SRVR_DONE_B:
return "SSLv3 write server done B";
case SSL3_ST_SR_CERT_A:
return "SSLv3 read client certificate A";
case SSL3_ST_SR_KEY_EXCH_A:
return "SSLv3 read client key exchange A";
case SSL3_ST_SR_KEY_EXCH_B:
return "SSLv3 read client key exchange B";
case SSL3_ST_SR_CERT_VRFY_A:
return "SSLv3 read certificate verify A";
/* DTLS */
case DTLS1_ST_CR_HELLO_VERIFY_REQUEST_A:
return "DTLS1 read hello verify request A";
default:
return "unknown state";
}
}
const char *SSL_state_string(const SSL *ssl) {
switch (ssl->state) {
case SSL_ST_ACCEPT:
return "AINIT ";
case SSL_ST_CONNECT:
return "CINIT ";
case SSL_ST_OK:
return "SSLOK ";
/* SSLv3 additions */
case SSL3_ST_SW_FLUSH:
case SSL3_ST_CW_FLUSH:
return "3FLUSH";
case SSL3_ST_CW_CLNT_HELLO_A:
return "3WCH_A";
case SSL3_ST_CW_CLNT_HELLO_B:
return "3WCH_B";
case SSL3_ST_CR_SRVR_HELLO_A:
return "3RSH_A";
case SSL3_ST_CR_CERT_A:
return "3RSC_A";
case SSL3_ST_CR_KEY_EXCH_A:
return "3RSKEA";
case SSL3_ST_CR_CERT_REQ_A:
return "3RCR_A";
case SSL3_ST_CR_SRVR_DONE_A:
return "3RSD_A";
case SSL3_ST_CW_CERT_A:
return "3WCC_A";
case SSL3_ST_CW_CERT_B:
return "3WCC_B";
case SSL3_ST_CW_CERT_C:
return "3WCC_C";
case SSL3_ST_CW_KEY_EXCH_A:
return "3WCKEA";
case SSL3_ST_CW_KEY_EXCH_B:
return "3WCKEB";
case SSL3_ST_CW_CERT_VRFY_A:
return "3WCV_A";
case SSL3_ST_CW_CERT_VRFY_B:
return "3WCV_B";
case SSL3_ST_SW_CHANGE:
case SSL3_ST_CW_CHANGE:
return "3WCCS_";
case SSL3_ST_SW_FINISHED_A:
case SSL3_ST_CW_FINISHED_A:
return "3WFINA";
case SSL3_ST_SW_FINISHED_B:
case SSL3_ST_CW_FINISHED_B:
return "3WFINB";
case SSL3_ST_CR_CHANGE:
case SSL3_ST_SR_CHANGE:
return "3RCCS_";
case SSL3_ST_SR_FINISHED_A:
case SSL3_ST_CR_FINISHED_A:
return "3RFINA";
case SSL3_ST_SW_HELLO_REQ_A:
return "3WHR_A";
case SSL3_ST_SW_HELLO_REQ_B:
return "3WHR_B";
case SSL3_ST_SW_HELLO_REQ_C:
return "3WHR_C";
case SSL3_ST_SR_CLNT_HELLO_A:
return "3RCH_A";
case SSL3_ST_SR_CLNT_HELLO_B:
return "3RCH_B";
case SSL3_ST_SR_CLNT_HELLO_C:
return "3RCH_C";
case SSL3_ST_SW_SRVR_HELLO_A:
return "3WSH_A";
case SSL3_ST_SW_SRVR_HELLO_B:
return "3WSH_B";
case SSL3_ST_SW_CERT_A:
return "3WSC_A";
case SSL3_ST_SW_CERT_B:
return "3WSC_B";
case SSL3_ST_SW_KEY_EXCH_A:
return "3WSKEA";
case SSL3_ST_SW_KEY_EXCH_B:
return "3WSKEB";
case SSL3_ST_SW_CERT_REQ_A:
return "3WCR_A";
case SSL3_ST_SW_CERT_REQ_B:
return "3WCR_B";
case SSL3_ST_SW_SRVR_DONE_A:
return "3WSD_A";
case SSL3_ST_SW_SRVR_DONE_B:
return "3WSD_B";
case SSL3_ST_SR_CERT_A:
return "3RCC_A";
case SSL3_ST_SR_KEY_EXCH_A:
return "3RCKEA";
case SSL3_ST_SR_CERT_VRFY_A:
return "3RCV_A";
/* DTLS */
case DTLS1_ST_CR_HELLO_VERIFY_REQUEST_A:
return "DRCHVA";
default:
return "UNKWN ";
}
}
const char *SSL_alert_type_string_long(int value) {
value >>= 8;
if (value == SSL3_AL_WARNING) {
return "warning";
} else if (value == SSL3_AL_FATAL) {
return "fatal";
}
return "unknown";
}
const char *SSL_alert_type_string(int value) {
return "!";
}
const char *SSL_alert_desc_string(int value) {
return "!!";
}
const char *SSL_alert_desc_string_long(int value) {
switch (value & 0xff) {
case SSL3_AD_CLOSE_NOTIFY:
return "close notify";
case SSL3_AD_UNEXPECTED_MESSAGE:
return "unexpected_message";
case SSL3_AD_BAD_RECORD_MAC:
return "bad record mac";
case SSL3_AD_DECOMPRESSION_FAILURE:
return "decompression failure";
case SSL3_AD_HANDSHAKE_FAILURE:
return "handshake failure";
case SSL3_AD_NO_CERTIFICATE:
return "no certificate";
case SSL3_AD_BAD_CERTIFICATE:
return "bad certificate";
case SSL3_AD_UNSUPPORTED_CERTIFICATE:
return "unsupported certificate";
case SSL3_AD_CERTIFICATE_REVOKED:
return "certificate revoked";
case SSL3_AD_CERTIFICATE_EXPIRED:
return "certificate expired";
case SSL3_AD_CERTIFICATE_UNKNOWN:
return "certificate unknown";
case SSL3_AD_ILLEGAL_PARAMETER:
return "illegal parameter";
case TLS1_AD_DECRYPTION_FAILED:
return "decryption failed";
case TLS1_AD_RECORD_OVERFLOW:
return "record overflow";
case TLS1_AD_UNKNOWN_CA:
return "unknown CA";
case TLS1_AD_ACCESS_DENIED:
return "access denied";
case TLS1_AD_DECODE_ERROR:
return "decode error";
case TLS1_AD_DECRYPT_ERROR:
return "decrypt error";
case TLS1_AD_EXPORT_RESTRICTION:
return "export restriction";
case TLS1_AD_PROTOCOL_VERSION:
return "protocol version";
case TLS1_AD_INSUFFICIENT_SECURITY:
return "insufficient security";
case TLS1_AD_INTERNAL_ERROR:
return "internal error";
case SSL3_AD_INAPPROPRIATE_FALLBACK:
return "inappropriate fallback";
case TLS1_AD_USER_CANCELLED:
return "user canceled";
case TLS1_AD_NO_RENEGOTIATION:
return "no renegotiation";
case TLS1_AD_UNSUPPORTED_EXTENSION:
return "unsupported extension";
case TLS1_AD_CERTIFICATE_UNOBTAINABLE:
return "certificate unobtainable";
case TLS1_AD_UNRECOGNIZED_NAME:
return "unrecognized name";
case TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE:
return "bad certificate status response";
case TLS1_AD_BAD_CERTIFICATE_HASH_VALUE:
return "bad certificate hash value";
case TLS1_AD_UNKNOWN_PSK_IDENTITY:
return "unknown PSK identity";
case TLS1_AD_CERTIFICATE_REQUIRED:
return "certificate required";
default:
return "unknown";
}
}
|
radubozga/Freedom
|
speech/Swift/Speech-gRPC-Streaming/Pods/BoringSSL/ssl/ssl_stat.c
|
C
|
apache-2.0
| 14,023 | 26.388672 | 80 | 0.666405 | false |
/**
* @license
* Copyright 2016 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
foam.CLASS({
package: 'com.google.dxf.model',
name: 'Point2',
properties: [
{ class: 'Float', name: 'x' },
{ class: 'Float', name: 'y' }
]
});
foam.CLASS({
package: 'com.google.dxf.model',
name: 'Point3',
properties: [
{ class: 'Float', name: 'x' },
{ class: 'Float', name: 'y' },
{ class: 'Float', name: 'z' }
]
});
foam.CLASS({
package: 'com.google.dxf.model',
name: 'Entity',
imports: [
'layerColors',
'layers',
'dxfScale',
'doTransform'
],
ids: [ 'handle' ],
properties: [
{
class: 'String',
name: 'handle'
},
{
class: 'String',
name: 'type'
},
{
class: 'String',
name: 'name'
},
{
class: 'String',
name: 'ownerHandle'
},
{
class: 'String',
name: 'layer'
}
],
methods: [
function render() {
throw "Can't render a default Entity.";
}
]
});
foam.CLASS({
package: 'com.google.dxf.model',
name: 'Insert',
extends: 'com.google.dxf.model.Entity',
documentation: 'An INSERT is a nested entity. It references by name an ' +
'object described in the BLOCKS table. Each block contains a list of ' +
'inner entities.',
requires: [
'foam.graphics.CView'
],
imports: [
'doTransform',
'dxfBlocks',
'inflateEntity'
],
properties: [
{
class: 'FObjectProperty',
of: 'com.google.dxf.model.Point3',
name: 'position',
adapt: function(old, nu) {
this.doTransform(nu);
return nu;
}
},
{
class: 'FObjectProperty',
of: 'com.google.dxf.model.Point3',
name: 'extrusionDirection'
},
{
class: 'Float',
name: 'rotation'
},
{ class: 'Float', name: 'xScale' },
{ class: 'Float', name: 'yScale' },
{ class: 'Float', name: 'zScale' }
],
methods: [
function render() {
// Look up the blocks entry.
if ( ! this.layers[this.layer].visible ) return;
var block = this.dxfBlocks[this.name];
var entities = block.entities.map(this.inflateEntity);
var cview = this.CView.create();
for ( var i = 0; i < entities.length; i++ ) {
var e = entities[i].render();
if ( e ) cview.add(e);
}
return cview;
}
]
});
foam.CLASS({
package: 'com.google.dxf.model',
name: 'Line',
extends: 'com.google.dxf.model.Entity',
imports: [
'doTransform'
],
properties: [
{
class: 'FObjectArray',
of: 'com.google.dxf.model.Point3',
name: 'vertices',
adaptArrayElement: function(nu, obj) {
obj.doTransform(nu);
return nu;
}
},
{
class: 'Float',
name: 'lineweight',
value: 1
},
{
class: 'String',
name: 'lineType'
}
],
methods: [
function render() {
if ( ! this.layers[this.layer].visible ) return;
return foam.graphics.Line.create({
startX: this.vertices[0].x,
startY: this.vertices[0].y,
endX: this.vertices[1].x,
endY: this.vertices[1].y,
color: this.layerColors[this.layer],
lineWidth: this.lineWeight
});
}
]
});
foam.CLASS({
package: 'com.google.dxf.model',
name: 'Arc',
extends: 'com.google.dxf.model.Entity',
imports: [
'doTransform'
],
properties: [
{ class: 'Float', name: 'angleLength' },
{ class: 'Float', name: 'startAngle' },
{ class: 'Float', name: 'endAngle' },
{ class: 'Float', name: 'radius' },
{
class: 'FObjectProperty',
of: 'com.google.dxf.model.Point3',
name: 'center',
adapt: function(old, nu) {
this.doTransform(nu);
return nu;
}
}
],
methods: [
function render() {
if ( ! this.layers[this.layer].visible ) return;
return foam.graphics.Arc.create({
x: this.center.x,
y: this.center.y,
radius: this.radius * this.dxfScale,
start: this.startAngle,
end: this.endAngle,
arcWidth: 1,
border: this.layerColors[this.layer]
});
}
]
});
foam.CLASS({
package: 'com.google.dxf.model',
name: 'Polygon',
extends: 'com.google.dxf.model.Entity',
imports: [
'doTransform'
],
properties: [
{
class: 'Boolean',
name: 'shape',
documentation: 'No idea what this means.'
},
{
class: 'Float',
name: 'lineweight',
units: '10um',
documentation: 'The width of the line in 100ths of mm.'
},
{
class: 'FObjectArray',
of: 'com.google.dxf.model.Point3',
name: 'vertices',
adaptArrayElement: function(nu, obj) {
obj.doTransform(nu);
return nu;
}
}
],
methods: [
function render() {
if ( ! this.layers[this.layer].visible ) return;
return foam.graphics.Polygon.create({
xCoordinates: this.vertices.map(function(v) { return v.x; }),
yCoordinates: this.vertices.map(function(v) { return v.y; }),
color: this.layerColors[this.layer]
});
}
]
});
foam.CLASS({
package: 'com.google.dxf.ui',
name: 'DXFDiagram',
extends: 'foam.graphics.CView',
requires: [
'com.google.dxf.model.Entity',
'foam.dao.MDAO',
'foam.dao.PromisedDAO',
'foam.graphics.CView'
],
imports: [
'document',
'window'
],
exports: [
'as data',
'doTransform',
'dxfBlocks',
'dxfScale',
'inflateEntity',
'layerColors',
'layers'
],
constants: {
ENTITY_TYPES: {
ARC: 'com.google.dxf.model.Arc',
INSERT: 'com.google.dxf.model.Insert',
LINE: 'com.google.dxf.model.Line',
LWPOLYLINE: 'com.google.dxf.model.Polygon'
}
},
axioms: [
foam.u2.CSS.create({
code: function CSS() {/*
^layers {
display: inline-block;
}
^ canvas {
}
*/}
})
],
properties: [
{
name: 'autoRepaint',
value: false
},
{
class: 'String',
name: 'dxfUrl',
value: '../dxf/sample.dxf'
},
{
name: 'dxfPromise',
hidden: true,
factory: function() {
var self = this;
return this.window.fetch(this.dxfUrl).then(function(resp) {
return resp.text();
}).then(function(text) {
var parser = new self.window.DxfParser();
return parser.parseSync(text);
}).then(function(tree) { // TODO: Debugging, remove this.
self.window.__tree = tree;
var colors = {};
// Map from layers to colours.
foam.Object.forEach(tree.tables.layer.layers, function(o, k) {
colors[k] = '#' + o.color.toString(16);
});
self.layerColors = colors;
self.layers = tree.tables.layer.layers;
self.dxfBlocks = tree.blocks;
var topRight = tree.header.$EXTMAX;
var bottomLeft = tree.header.$EXTMIN;
self.translateX = -bottomLeft.x;
self.translateY = -topRight.y;
console.log('Translation: ' + self.translateX + ' by ' +
self.translateY);
return tree;
});
}
},
{
name: 'layerColors',
hidden: true
},
{
name: 'layers',
hidden: true
},
{
name: 'sideCanvas',
hidden: true
},
{
name: 'dxfBlocks',
hidden: true
},
{
class: 'Float',
name: 'dxfScale',
documentation: 'CAD drawings are often huge (many thousands of ' +
'pixels). Give a scale factor here to shrink the diagram.',
value: 0.1,
postSet: function(old, nu) {
this.invalidate();
}
},
{
class: 'Float',
name: 'translateX',
hidden: true,
value: 0
},
{
class: 'Float',
name: 'translateY',
hidden: true,
value: 0
},
{
name: 'entityDAO',
hidden: true,
factory: function() {
var self = this;
var inner = this.MDAO.create({ of: this.Entity });
return this.PromisedDAO.create({
promise: this.dxfPromise.then(function(tree) {
var ps = [];
for ( var i = 0; i < tree.entities.length; i++ ) {
ps.push(inner.put(self.inflateEntity(tree.entities[i])));
}
return Promise.all(ps).then(function() { return inner; });
})
});
}
}
],
methods: [
function paintSelf(x) {
this.SUPER(x);
if ( this.sideCanvas ) {
x.drawImage(this.sideCanvas, this.x, this.y);
} else {
this.sideCanvas = this.document.createElement('canvas');
var self = this;
this.dxfPromise.then(function(tree) {
var topRight = tree.header.$EXTMAX;
var bottomLeft = tree.header.$EXTMIN;
self.sideCanvas.width = self.width = topRight.x - bottomLeft.x;
self.sideCanvas.height = self.height = topRight.y - bottomLeft.y;
self.renderDiagram();
});
}
},
function doTransform(pos) {
// Converts the coordinates in pos based on dxfScale and translateX/Y.
// Negating the scale for Y-coordinates, to convert +Y from up (CAD) to
// down (canvas).
pos.x = (pos.x + this.translateX) * this.dxfScale;
pos.y = (pos.y + this.translateY) * (-this.dxfScale);
},
function inflateEntity(entity) {
var model = foam.lookup(this.ENTITY_TYPES[entity.type]);
return foam.json.parse(entity, model, this.__subContext__);
}
],
listeners: [
{
name: 'renderDiagram',
isFramed: true,
code: function() {
// Render everything into the CView.
var self = this;
console.time('render');
// TODO(braden): Remove hard-coded set of layers; demo purposes only.
var visibleLayers = [
'0',
'I-WALL',
'A-GLAZ-CURT',
'A-GLAZ-CWMG',
'A-WALL',
'A-DOOR',
'S-BEAM',
'A-FLOR',
'S-STRS',
'S-STRS-MBND',
'L-PLNT',
'A-DETL',
'Q-CASE',
'Q-SPCQ',
'A-FLOR-HRAL',
'L-SITE',
'A-DETL-GENF',
'A-WALL-PATT',
'A-FLOR-LEVL',
'S-STRS-ANNO',
'A-GENM',
'S-COLS',
'A-AREA-PATT',
'S-GRID_IDEN',
'X-BLOCKS'
];
/*
foam.Object.forEach(this.layers, function(l, k) {
if ( l.visible ) visibleLayers.push(k);
});
*/
this.entityDAO.where(foam.mlang.predicate.In.create({
arg1: this.Entity.LAYER,
arg2: visibleLayers
})).select().then(function(a) {
var entities = a.array;
var cview = self.CView.create({
canvas: self.sideCanvas
});
for ( var i = 0; i < entities.length; i++ ) {
var e = entities[i].render();
if ( e ) cview.add(e);
}
cview.paint(self.sideCanvas.getContext('2d'));
console.log('Rendering complete');
self.renderEnd();
});
}
},
{
name: 'renderEnd',
isFramed: true,
code: function() {
console.timeEnd('render');
this.invalidate();
}
}
]
});
|
TanayParikh/foam2
|
src/com/google/dxf/dxf.js
|
JavaScript
|
apache-2.0
| 11,935 | 21.561437 | 78 | 0.526351 | false |
import pytest
from osf.models import Session
from osf.modm_compat import Q
@pytest.mark.django_db
class TestSession:
def test_is_authenticated(self):
session = Session(data={'auth_user_id': 'abc12'})
assert session.is_authenticated
session2 = Session()
assert session2.is_authenticated is False
def test_loading_by_id(self):
session = Session()
session.save()
assert Session.load(session._id)
def test_remove(self):
session, session2 = Session(data={'auth_user_id': '123ab'}), Session(data={'auth_user_id': 'ab123'})
session.save()
session2.save()
assert Session.objects.count() == 2 # sanity check
Session.remove(Q('data.auth_user_id', 'eq', '123ab'))
assert Session.objects.count() == 1
|
mluo613/osf.io
|
osf_tests/test_session.py
|
Python
|
apache-2.0
| 814 | 27.068966 | 108 | 0.628993 | false |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.operator;
import com.facebook.presto.spi.type.Type;
import com.facebook.presto.sql.planner.Symbol;
import com.google.common.util.concurrent.ListenableFuture;
import java.util.List;
import java.util.Map;
import java.util.OptionalInt;
import java.util.function.Supplier;
import static com.google.common.util.concurrent.Futures.immediateFuture;
import static java.util.Collections.emptyList;
public interface LookupSourceFactory
extends JoinBridge
{
List<Type> getTypes();
List<Type> getOutputTypes();
ListenableFuture<LookupSourceProvider> createLookupSourceProvider();
int partitions();
default ListenableFuture<PartitionedConsumption<Supplier<LookupSource>>> finishProbeOperator(OptionalInt lookupJoinsCount)
{
return immediateFuture(new PartitionedConsumption<>(
1,
emptyList(),
i -> {
throw new UnsupportedOperationException();
},
i -> {}));
}
/**
* Can be called only after {@link #createLookupSourceProvider()} is done and all users of {@link LookupSource}-s finished.
*/
@Override
OuterPositionIterator getOuterPositionIterator();
Map<Symbol, Integer> getLayout();
// this is only here for the index lookup source
default void setTaskContext(TaskContext taskContext) {}
@Override
void destroy();
default ListenableFuture<?> isDestroyed()
{
throw new UnsupportedOperationException();
}
}
|
stewartpark/presto
|
presto-main/src/main/java/com/facebook/presto/operator/LookupSourceFactory.java
|
Java
|
apache-2.0
| 2,101 | 29.897059 | 127 | 0.704902 | false |
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"os"
"sort"
"strconv"
"sync"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/client/cache"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/watch"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Latency [Skipped]", func() {
var c *client.Client
var nodeCount int
var additionalPodsPrefix string
var ns string
var uuid string
AfterEach(func() {
By("Removing additional pods if any")
for i := 1; i <= nodeCount; i++ {
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
c.Pods(ns).Delete(name, nil)
}
expectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+"/%s", uuid), "after"))
// Verify latency metrics
highLatencyRequests, err := HighLatencyRequests(c)
expectNoError(err)
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
})
framework := NewFramework("latency")
framework.NamespaceDeletionTimeout = time.Hour
BeforeEach(func() {
c = framework.Client
ns = framework.Namespace.Name
nodes := ListSchedulableNodesOrDie(framework.Client)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())
// Terminating a namespace (deleting the remaining objects from it - which
// generally means events) can affect the current run. Thus we wait for all
// terminating namespace to be finally deleted before starting this test.
expectNoError(checkTestingNSDeletedExcept(c, ns))
uuid = string(util.NewUUID())
expectNoError(resetMetrics(c))
expectNoError(os.Mkdir(fmt.Sprintf(testContext.OutputDir+"/%s", uuid), 0777))
expectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+"/%s", uuid), "before"))
Logf("Listing nodes for easy debugging:\n")
for _, node := range nodes.Items {
for _, address := range node.Status.Addresses {
if address.Type == api.NodeInternalIP {
Logf("Name: %v IP: %v", node.ObjectMeta.Name, address.Address)
}
}
}
})
It("pod start latency should be acceptable", func() {
runLatencyTest(nodeCount, c, ns)
})
})
func runLatencyTest(nodeCount int, c *client.Client, ns string) {
var (
nodes = make(map[string]string, 0) // pod name -> node name
createTimestamps = make(map[string]unversioned.Time, 0) // pod name -> create time
scheduleTimestamps = make(map[string]unversioned.Time, 0) // pod name -> schedule time
startTimestamps = make(map[string]unversioned.Time, 0) // pod name -> time to run
watchTimestamps = make(map[string]unversioned.Time, 0) // pod name -> time to read from informer
additionalPodsPrefix = "latency-pod-" + string(util.NewUUID())
)
var mutex sync.Mutex
readPodInfo := func(p *api.Pod) {
mutex.Lock()
defer mutex.Unlock()
defer GinkgoRecover()
if p.Status.Phase == api.PodRunning {
if _, found := watchTimestamps[p.Name]; !found {
watchTimestamps[p.Name] = unversioned.Now()
createTimestamps[p.Name] = p.CreationTimestamp
nodes[p.Name] = p.Spec.NodeName
var startTimestamp unversioned.Time
for _, cs := range p.Status.ContainerStatuses {
if cs.State.Running != nil {
if startTimestamp.Before(cs.State.Running.StartedAt) {
startTimestamp = cs.State.Running.StartedAt
}
}
}
if startTimestamp != unversioned.NewTime(time.Time{}) {
startTimestamps[p.Name] = startTimestamp
} else {
Failf("Pod %v is reported to be running, but none of its containers are", p.Name)
}
}
}
}
// Create a informer to read timestamps for each pod
stopCh := make(chan struct{})
_, informer := framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"name": additionalPodsPrefix})
return c.Pods(ns).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"name": additionalPodsPrefix})
return c.Pods(ns).Watch(options)
},
},
&api.Pod{},
0,
framework.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
p, ok := obj.(*api.Pod)
Expect(ok).To(Equal(true))
go readPodInfo(p)
},
UpdateFunc: func(oldObj, newObj interface{}) {
p, ok := newObj.(*api.Pod)
Expect(ok).To(Equal(true))
go readPodInfo(p)
},
},
)
go informer.Run(stopCh)
// Create additional pods with throughput ~5 pods/sec.
var wg sync.WaitGroup
wg.Add(nodeCount)
podLabels := map[string]string{
"name": additionalPodsPrefix,
}
for i := 1; i <= nodeCount; i++ {
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
go createRunningPod(&wg, c, name, ns, "gcr.io/google_containers/pause:go", podLabels)
time.Sleep(200 * time.Millisecond)
}
wg.Wait()
Logf("Waiting for all Pods begin observed by the watch...")
for start := time.Now(); len(watchTimestamps) < nodeCount; time.Sleep(10 * time.Second) {
if time.Since(start) < timeout {
Failf("Timeout reached waiting for all Pods being observed by the watch.")
}
}
close(stopCh)
// Read the schedule timestamp by checking the scheduler event for each pod
selector := fields.Set{
"involvedObject.kind": "Pod",
"involvedObject.namespace": ns,
"source": "scheduler",
}.AsSelector()
options := api.ListOptions{FieldSelector: selector}
schedEvents, err := c.Events(ns).List(options)
expectNoError(err)
for k := range createTimestamps {
for _, event := range schedEvents.Items {
if event.InvolvedObject.Name == k {
scheduleTimestamps[k] = event.FirstTimestamp
break
}
}
}
var (
scheduleLatencies = make([]podLatencyData, 0)
startLatencies = make([]podLatencyData, 0)
watchLatencies = make([]podLatencyData, 0)
scheduleToWatchLatencies = make([]podLatencyData, 0)
e2eLatencies = make([]podLatencyData, 0)
)
for name, podNode := range nodes {
createTs, ok := createTimestamps[name]
Expect(ok).To(Equal(true))
scheduleTs, ok := scheduleTimestamps[name]
Expect(ok).To(Equal(true))
runTs, ok := startTimestamps[name]
Expect(ok).To(Equal(true))
watchTs, ok := watchTimestamps[name]
Expect(ok).To(Equal(true))
var (
scheduleLatency = podLatencyData{name, podNode, scheduleTs.Time.Sub(createTs.Time)}
startLatency = podLatencyData{name, podNode, runTs.Time.Sub(scheduleTs.Time)}
watchLatency = podLatencyData{name, podNode, watchTs.Time.Sub(runTs.Time)}
scheduleToWatchLatency = podLatencyData{name, podNode, watchTs.Time.Sub(scheduleTs.Time)}
e2eLatency = podLatencyData{name, podNode, watchTs.Time.Sub(createTs.Time)}
)
scheduleLatencies = append(scheduleLatencies, scheduleLatency)
startLatencies = append(startLatencies, startLatency)
watchLatencies = append(watchLatencies, watchLatency)
scheduleToWatchLatencies = append(scheduleToWatchLatencies, scheduleToWatchLatency)
e2eLatencies = append(e2eLatencies, e2eLatency)
}
sort.Sort(latencySlice(scheduleLatencies))
sort.Sort(latencySlice(startLatencies))
sort.Sort(latencySlice(watchLatencies))
sort.Sort(latencySlice(scheduleToWatchLatencies))
sort.Sort(latencySlice(e2eLatencies))
printLatencies(scheduleLatencies, "worst schedule latencies")
printLatencies(startLatencies, "worst run-after-schedule latencies")
printLatencies(watchLatencies, "worst watch latencies")
printLatencies(scheduleToWatchLatencies, "worst scheduled-to-end total latencies")
printLatencies(e2eLatencies, "worst e2e total latencies")
// Ensure all scheduleLatencies are under expected ceilings.
// These numbers were guessed based on numerous Jenkins e2e runs.
testMaximumLatencyValue(scheduleLatencies, 1*time.Second, "scheduleLatencies")
testMaximumLatencyValue(startLatencies, 15*time.Second, "startLatencies")
testMaximumLatencyValue(watchLatencies, 8*time.Second, "watchLatencies")
testMaximumLatencyValue(scheduleToWatchLatencies, 5*time.Second, "scheduleToWatchLatencies")
testMaximumLatencyValue(e2eLatencies, 5*time.Second, "e2eLatencies")
// Test whether e2e pod startup time is acceptable.
podStartupLatency := PodStartupLatency{Latency: extractLatencyMetrics(e2eLatencies)}
expectNoError(VerifyPodStartupLatency(podStartupLatency))
// Log suspicious latency metrics/docker errors from all nodes that had slow startup times
logSuspiciousLatency(startLatencies, nil, nodeCount, c)
}
|
ruizeng/kubernetes
|
test/e2e/latency.go
|
GO
|
apache-2.0
| 9,283 | 33.63806 | 102 | 0.71938 | false |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Graph debug results dumping class."""
import collections
import json
import os
import numpy as np
import tvm
GRAPH_DUMP_FILE_NAME = "_tvmdbg_graph_dump.json"
CHROME_TRACE_FILE_NAME = "_tvmdbg_execution_trace.json"
ChromeTraceEvent = collections.namedtuple("ChromeTraceEvent", ["ts", "tid", "pid", "name", "ph"])
class DebugResult(object):
"""Graph debug data module.
Data dump module manage all the debug data formatting.
Output data and input graphs are formatted and dumped to file.
Frontend read these data and graph for visualization.
Parameters
----------
graph_json : str
The graph to be deployed in json format output by graph compiler. Each operator (tvm_op)
in the graph will have a one to one mapping with the symbol in libmod which is used
to construct a "PackedFunc" .
dump_path : str
Output data path is read/provided from frontend
"""
def __init__(self, graph_json, dump_path):
self._dump_path = dump_path
self._output_tensor_list = []
self._time_list = []
json_obj = self._parse_graph(graph_json)
# dump the json information
self._dump_graph_json(json_obj)
def _parse_graph(self, graph_json):
"""Parse and extract the JSON graph and update the nodes, shapes and dltype.
Parameters
----------
graph_json : str or graph class
The graph to be deployed in json format output by JSON graph.
"""
json_obj = json.loads(graph_json)
self._nodes_list = json_obj["nodes"]
self._shapes_list = json_obj["attrs"]["shape"]
self._dtype_list = json_obj["attrs"]["dltype"]
self._update_graph_json()
return json_obj
def _update_graph_json(self):
"""update the nodes_list with name, shape and data type,
for temporarily storing the output.
"""
nodes_len = len(self._nodes_list)
for i in range(nodes_len):
node = self._nodes_list[i]
input_list = []
for input_node in node["inputs"]:
input_list.append(self._nodes_list[input_node[0]]["name"])
node["inputs"] = input_list
dtype = str("type: " + self._dtype_list[1][i])
if "attrs" not in node:
node["attrs"] = {}
node["op"] = "param"
else:
node["op"] = node["attrs"]["func_name"]
node["attrs"].update({"T": dtype})
node["shape"] = self._shapes_list[1][i]
def _cleanup_tensors(self):
"""Remove the tensor dump file (graph wont be removed)"""
for filename in os.listdir(self._dump_path):
if os.path.isfile(filename) and not filename.endswith(".json"):
os.remove(filename)
def get_graph_nodes(self):
"""Return the nodes list"""
return self._nodes_list
def get_graph_node_shapes(self):
"""Return the nodes shapes list"""
return self._shapes_list
def get_graph_node_output_num(self, node):
"""Return the number of outputs of a node"""
return 1 if node["op"] == "param" else int(node["attrs"]["num_outputs"])
def get_graph_node_dtypes(self):
"""Return the nodes dtype list"""
return self._dtype_list
def get_output_tensors(self):
"""Get the output tensors of each operation in numpy format"""
eid = 0
order = 0
output_tensors = {}
for i, (node, time) in enumerate(zip(self._nodes_list, self._time_list)):
num_outputs = self.get_graph_node_output_num(node)
for j in range(num_outputs):
order += time[0]
# the node name is not unique, so we need a consistent
# indexing based on the list ordering in the nodes
key = f"{node['name']}____topo-index:{i}____output-num:{j}"
output_tensors[key] = self._output_tensor_list[eid]
eid += 1
return output_tensors
def update_output_tensors(self, tensors):
"""Update output tensors list
Parameters
----------
tensors : list[NDArray]
"""
if not isinstance(tensors, list):
AttributeError("tensors with incorrect type.")
for output_array in tensors:
self._output_tensor_list.append(output_array)
def dump_output_tensor(self):
"""Dump the outputs to a temporary folder, the tensors are in numpy format"""
# cleanup existing tensors before dumping
self._cleanup_tensors()
output_tensors = self.get_output_tensors()
with open(os.path.join(self._dump_path, "output_tensors.params"), "wb") as param_f:
param_f.write(save_tensors(output_tensors))
def dump_chrome_trace(self):
"""Dump the trace to the Chrome trace.json format."""
def s_to_us(t):
return t * 10 ** 6
starting_times = np.zeros(len(self._time_list) + 1)
starting_times[1:] = np.cumsum([times[0] for times in self._time_list])
def node_to_events(node, times, starting_time):
return [
ChromeTraceEvent(
ts=s_to_us(starting_time),
tid=1,
pid=1,
ph="B",
name=node["name"],
),
ChromeTraceEvent(
# Use start + duration instead of end to ensure precise timings.
ts=s_to_us(times[0] + starting_time),
tid=1,
pid=1,
ph="E",
name=node["name"],
),
]
events = [
e
for (node, times, starting_time) in zip(
self._nodes_list, self._time_list, starting_times
)
for e in node_to_events(node, times, starting_time)
]
result = dict(displayTimeUnit="ns", traceEvents=[e._asdict() for e in events])
with open(os.path.join(self._dump_path, CHROME_TRACE_FILE_NAME), "w") as trace_f:
json.dump(result, trace_f)
def _dump_graph_json(self, graph):
"""Dump json formatted graph.
Parameters
----------
graph : json format
json formatted JSON graph contain list of each node's
name, shape and type.
"""
graph_dump_file_name = GRAPH_DUMP_FILE_NAME
with open(os.path.join(self._dump_path, graph_dump_file_name), "w") as outfile:
json.dump(graph, outfile, indent=4, sort_keys=False)
def get_debug_result(self, sort_by_time=True):
"""Return the debugger result"""
header = ["Node Name", "Ops", "Time(us)", "Time(%)", "Shape", "Inputs", "Outputs"]
lines = ["---------", "---", "--------", "-------", "-----", "------", "-------"]
eid = 0
data = []
total_time = sum(time[0] for time in self._time_list)
for node, time in zip(self._nodes_list, self._time_list):
num_outputs = self.get_graph_node_output_num(node)
for j in range(num_outputs):
op = node["op"]
if node["op"] == "param":
eid += 1
continue
name = node["name"]
shape = str(self._output_tensor_list[eid].shape)
time_us = round(time[0] * 1e6, 3)
time_percent = round(((time[0] / total_time) * 100), 3)
inputs = str(node["attrs"]["num_inputs"])
outputs = str(node["attrs"]["num_outputs"])
node_data = [name, op, time_us, time_percent, shape, inputs, outputs]
data.append(node_data)
eid += 1
if sort_by_time:
# Sort on the basis of execution time. Prints the most expensive ops in the start.
data = sorted(data, key=lambda x: x[2], reverse=True)
# Insert a row for total time at the end.
rounded_total_time_us = round(total_time * 1e6, 3)
data.append(["Total_time", "-", rounded_total_time_us, "-", "-", "-", "-", "-"])
fmt = ""
for i, _ in enumerate(header):
max_len = len(header[i])
for j, _ in enumerate(data):
item_len = len(str(data[j][i]))
if item_len > max_len:
max_len = item_len
fmt = fmt + "{:<" + str(max_len + 2) + "}"
log = [fmt.format(*header)]
log.append(fmt.format(*lines))
for row in data:
log.append(fmt.format(*row))
return "\n".join(log)
def display_debug_result(self, sort_by_time=True):
"""Displays the debugger result"""
print(self.get_debug_result(sort_by_time))
def save_tensors(params):
"""Save parameter dictionary to binary bytes.
The result binary bytes can be loaded by the
GraphModule with API "load_params".
Parameters
----------
params : dict of str to NDArray
The parameter dictionary.
Returns
-------
param_bytes: bytearray
Serialized parameters.
"""
_save_tensors = tvm.get_global_func("tvm.relay._save_param_dict")
return _save_tensors(params)
|
Laurawly/tvm-1
|
python/tvm/contrib/debugger/debug_result.py
|
Python
|
apache-2.0
| 10,104 | 35.875912 | 97 | 0.559283 | false |
/*
* repos_diff_summarize.c -- The diff callbacks for summarizing
* the differences of two repository versions
*
* ====================================================================
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
* ====================================================================
*/
#include "svn_dirent_uri.h"
#include "svn_hash.h"
#include "svn_path.h"
#include "svn_props.h"
#include "svn_pools.h"
#include "private/svn_wc_private.h"
#include "client.h"
/* Diff callbacks baton. */
struct summarize_baton_t {
apr_pool_t *baton_pool; /* For allocating skip_path */
/* The target path of the diff, relative to the anchor; "" if target == anchor. */
const char *skip_relpath;
/* The summarize callback passed down from the API */
svn_client_diff_summarize_func_t summarize_func;
/* The summarize callback baton */
void *summarize_func_baton;
};
/* Call B->summarize_func with B->summarize_func_baton, passing it a
* summary object composed from PATH (but made to be relative to the target
* of the diff), SUMMARIZE_KIND, PROP_CHANGED (or FALSE if the action is an
* add or delete) and NODE_KIND. */
static svn_error_t *
send_summary(struct summarize_baton_t *b,
const char *path,
svn_client_diff_summarize_kind_t summarize_kind,
svn_boolean_t prop_changed,
svn_node_kind_t node_kind,
apr_pool_t *scratch_pool)
{
svn_client_diff_summarize_t *sum = apr_pcalloc(scratch_pool, sizeof(*sum));
SVN_ERR_ASSERT(summarize_kind != svn_client_diff_summarize_kind_normal
|| prop_changed);
/* PATH is relative to the anchor of the diff, but SUM->path needs to be
relative to the target of the diff. */
sum->path = svn_relpath_skip_ancestor(b->skip_relpath, path);
sum->summarize_kind = summarize_kind;
if (summarize_kind == svn_client_diff_summarize_kind_modified
|| summarize_kind == svn_client_diff_summarize_kind_normal)
sum->prop_changed = prop_changed;
sum->node_kind = node_kind;
SVN_ERR(b->summarize_func(sum, b->summarize_func_baton, scratch_pool));
return SVN_NO_ERROR;
}
/* Are there any changes to relevant (normal) props in PROPS? */
static svn_boolean_t
props_changed_hash(apr_hash_t *props,
apr_pool_t *scratch_pool)
{
apr_hash_index_t *hi;
if (!props)
return FALSE;
for (hi = apr_hash_first(scratch_pool, props); hi; hi = apr_hash_next(hi))
{
const char *name = apr_hash_this_key(hi);
if (svn_property_kind2(name) == svn_prop_regular_kind)
{
return TRUE;
}
}
return FALSE;
}
/* Are there any changes to relevant (normal) props in PROPCHANGES? */
static svn_boolean_t
props_changed(const apr_array_header_t *propchanges,
apr_pool_t *scratch_pool)
{
apr_array_header_t *props;
svn_error_clear(svn_categorize_props(propchanges, NULL, NULL, &props,
scratch_pool));
return (props->nelts != 0);
}
/* svn_diff_tree_processor_t callback */
static svn_error_t *
diff_dir_opened(void **new_dir_baton,
svn_boolean_t *skip,
svn_boolean_t *skip_children,
const char *relpath,
const svn_diff_source_t *left_source,
const svn_diff_source_t *right_source,
const svn_diff_source_t *copyfrom_source,
void *parent_dir_baton,
const struct svn_diff_tree_processor_t *processor,
apr_pool_t *result_pool,
apr_pool_t *scratch_pool)
{
/* struct summarize_baton_t *b = processor->baton; */
/* ### Send here instead of from dir_added() ? */
/*if (!left_source)
{
SVN_ERR(send_summary(b, relpath, svn_client_diff_summarize_kind_added,
FALSE, svn_node_dir, scratch_pool));
}*/
return SVN_NO_ERROR;
}
/* svn_diff_tree_processor_t callback */
static svn_error_t *
diff_dir_changed(const char *relpath,
const svn_diff_source_t *left_source,
const svn_diff_source_t *right_source,
/*const*/ apr_hash_t *left_props,
/*const*/ apr_hash_t *right_props,
const apr_array_header_t *prop_changes,
void *dir_baton,
const struct svn_diff_tree_processor_t *processor,
apr_pool_t *scratch_pool)
{
struct summarize_baton_t *b = processor->baton;
SVN_ERR(send_summary(b, relpath, svn_client_diff_summarize_kind_normal,
TRUE, svn_node_dir, scratch_pool));
return SVN_NO_ERROR;
}
/* svn_diff_tree_processor_t callback */
static svn_error_t *
diff_dir_added(const char *relpath,
const svn_diff_source_t *copyfrom_source,
const svn_diff_source_t *right_source,
/*const*/ apr_hash_t *copyfrom_props,
/*const*/ apr_hash_t *right_props,
void *dir_baton,
const struct svn_diff_tree_processor_t *processor,
apr_pool_t *scratch_pool)
{
struct summarize_baton_t *b = processor->baton;
/* ### Send from dir_opened without prop info? */
SVN_ERR(send_summary(b, relpath, svn_client_diff_summarize_kind_added,
props_changed_hash(right_props, scratch_pool),
svn_node_dir, scratch_pool));
return SVN_NO_ERROR;
}
/* svn_diff_tree_processor_t callback */
static svn_error_t *
diff_dir_deleted(const char *relpath,
const svn_diff_source_t *left_source,
/*const*/ apr_hash_t *left_props,
void *dir_baton,
const struct svn_diff_tree_processor_t *processor,
apr_pool_t *scratch_pool)
{
struct summarize_baton_t *b = processor->baton;
SVN_ERR(send_summary(b, relpath, svn_client_diff_summarize_kind_deleted,
FALSE, svn_node_dir, scratch_pool));
return SVN_NO_ERROR;
}
/* svn_diff_tree_processor_t callback */
static svn_error_t *
diff_file_added(const char *relpath,
const svn_diff_source_t *copyfrom_source,
const svn_diff_source_t *right_source,
const char *copyfrom_file,
const char *right_file,
/*const*/ apr_hash_t *copyfrom_props,
/*const*/ apr_hash_t *right_props,
void *file_baton,
const struct svn_diff_tree_processor_t *processor,
apr_pool_t *scratch_pool)
{
struct summarize_baton_t *b = processor->baton;
SVN_ERR(send_summary(b, relpath, svn_client_diff_summarize_kind_added,
props_changed_hash(right_props, scratch_pool),
svn_node_file, scratch_pool));
return SVN_NO_ERROR;
}
/* svn_diff_tree_processor_t callback */
static svn_error_t *
diff_file_changed(const char *relpath,
const svn_diff_source_t *left_source,
const svn_diff_source_t *right_source,
const char *left_file,
const char *right_file,
/*const*/ apr_hash_t *left_props,
/*const*/ apr_hash_t *right_props,
svn_boolean_t file_modified,
const apr_array_header_t *prop_changes,
void *file_baton,
const struct svn_diff_tree_processor_t *processor,
apr_pool_t *scratch_pool)
{
struct summarize_baton_t *b = processor->baton;
SVN_ERR(send_summary(b, relpath,
file_modified ? svn_client_diff_summarize_kind_modified
: svn_client_diff_summarize_kind_normal,
props_changed(prop_changes, scratch_pool),
svn_node_file, scratch_pool));
return SVN_NO_ERROR;
}
/* svn_diff_tree_processor_t callback */
static svn_error_t *
diff_file_deleted(const char *relpath,
const svn_diff_source_t *left_source,
const char *left_file,
/*const*/ apr_hash_t *left_props,
void *file_baton,
const struct svn_diff_tree_processor_t *processor,
apr_pool_t *scratch_pool)
{
struct summarize_baton_t *b = processor->baton;
SVN_ERR(send_summary(b, relpath, svn_client_diff_summarize_kind_deleted,
FALSE, svn_node_file, scratch_pool));
return SVN_NO_ERROR;
}
svn_error_t *
svn_client__get_diff_summarize_callbacks(
const svn_diff_tree_processor_t **diff_processor,
const char ***p_root_relpath,
svn_client_diff_summarize_func_t summarize_func,
void *summarize_baton,
const char *original_target,
apr_pool_t *result_pool,
apr_pool_t *scratch_pool)
{
svn_diff_tree_processor_t *dp;
struct summarize_baton_t *b = apr_pcalloc(result_pool, sizeof(*b));
b->baton_pool = result_pool;
b->summarize_func = summarize_func;
b->summarize_func_baton = summarize_baton;
dp = svn_diff__tree_processor_create(b, result_pool);
/*dp->file_opened = diff_file_opened;*/
dp->file_added = diff_file_added;
dp->file_deleted = diff_file_deleted;
dp->file_changed = diff_file_changed;
dp->dir_opened = diff_dir_opened;
dp->dir_changed = diff_dir_changed;
dp->dir_deleted = diff_dir_deleted;
dp->dir_added = diff_dir_added;
*diff_processor = dp;
*p_root_relpath = &b->skip_relpath;
return SVN_NO_ERROR;
}
svn_client_diff_summarize_t *
svn_client_diff_summarize_dup(const svn_client_diff_summarize_t *diff,
apr_pool_t *pool)
{
svn_client_diff_summarize_t *dup_diff = apr_palloc(pool, sizeof(*dup_diff));
*dup_diff = *diff;
if (diff->path)
dup_diff->path = apr_pstrdup(pool, diff->path);
return dup_diff;
}
|
YueLinHo/Subversion
|
subversion/libsvn_client/diff_summarize.c
|
C
|
apache-2.0
| 10,730 | 33.28115 | 84 | 0.595899 | false |
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flocker
import (
"fmt"
"os"
"testing"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"github.com/stretchr/testify/assert"
)
func newTestableProvisioner(t *testing.T, assert *assert.Assertions, options volume.VolumeOptions) (string, volume.Provisioner) {
tmpDir, err := utiltesting.MkTmpdir("flockervolumeTest")
assert.NoError(err, fmt.Sprintf("can't make a temp dir: %v", err))
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName(pluginName)
assert.NoError(err, "Can't find the plugin by name")
provisioner, err := plug.(*flockerPlugin).newProvisionerInternal(options, &fakeFlockerUtil{})
assert.NoError(err, fmt.Sprintf("Can't create new provisioner:%v", err))
return tmpDir, provisioner
}
func TestProvision(t *testing.T) {
assert := assert.New(t)
pvc := volumetest.CreateTestPVC("3Gi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
options := volume.VolumeOptions{
PVC: pvc,
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
}
dir, provisioner := newTestableProvisioner(t, assert, options)
defer os.RemoveAll(dir)
persistentSpec, err := provisioner.Provision(nil, nil)
assert.NoError(err, "Provision() failed: ", err)
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
assert.Equal(int64(3*1024*1024*1024), cap.Value())
assert.Equal(
"test-flocker-volume-uuid",
persistentSpec.Spec.PersistentVolumeSource.Flocker.DatasetUUID,
)
assert.Equal(
map[string]string{"fakeflockerutil": "yes"},
persistentSpec.Labels,
)
// parameters are not supported
options = volume.VolumeOptions{
PVC: pvc,
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
Parameters: map[string]string{
"not-supported-params": "test123",
},
}
dir, provisioner = newTestableProvisioner(t, assert, options)
defer os.RemoveAll(dir)
persistentSpec, err = provisioner.Provision(nil, nil)
assert.Error(err, "Provision() did not fail with Parameters specified")
// selectors are not supported
pvc.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"key": "value"}}
options = volume.VolumeOptions{
PVC: pvc,
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
}
dir, provisioner = newTestableProvisioner(t, assert, options)
defer os.RemoveAll(dir)
persistentSpec, err = provisioner.Provision(nil, nil)
assert.Error(err, "Provision() did not fail with Selector specified")
}
|
frodenas/kubernetes
|
pkg/volume/flocker/flocker_volume_test.go
|
GO
|
apache-2.0
| 3,335 | 31.696078 | 129 | 0.743028 | false |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.security.configuration;
import java.util.Objects;
/**
* KeyStore Configuration properties
*/
public class KeyStoreConfiguration {
private final String location;
private final String password;
private final String keyStoreType;
public KeyStoreConfiguration(final String location, final String password, final String keyStoreType) {
this.location = Objects.requireNonNull(location, "Location required");
this.password = Objects.requireNonNull(password, "Password required");
this.keyStoreType = Objects.requireNonNull(keyStoreType, "KeyStore Type required");
}
public String getLocation() {
return location;
}
public String getPassword() {
return password;
}
public String getKeyStoreType() {
return keyStoreType;
}
}
|
MikeThomsen/nifi
|
nifi-commons/nifi-security-utils/src/main/java/org/apache/nifi/security/configuration/KeyStoreConfiguration.java
|
Java
|
apache-2.0
| 1,644 | 33.25 | 107 | 0.73236 | false |
"""The tests for the DD-WRT device tracker platform."""
import os
import unittest
from unittest import mock
import logging
import re
import requests
import requests_mock
from homeassistant import config
from homeassistant.setup import setup_component
from homeassistant.components import device_tracker
from homeassistant.const import (
CONF_PLATFORM, CONF_HOST, CONF_PASSWORD, CONF_USERNAME)
from homeassistant.components.device_tracker import DOMAIN
from homeassistant.util import slugify
from tests.common import (
get_test_home_assistant, assert_setup_component, load_fixture,
mock_component)
from ...test_util.aiohttp import mock_aiohttp_client
TEST_HOST = '127.0.0.1'
_LOGGER = logging.getLogger(__name__)
class TestDdwrt(unittest.TestCase):
"""Tests for the Ddwrt device tracker platform."""
hass = None
def run(self, result=None):
"""Mock out http calls to macvendor API for whole test suite."""
with mock_aiohttp_client() as aioclient_mock:
macvendor_re = re.compile('http://api.macvendors.com/.*')
aioclient_mock.get(macvendor_re, text='')
super().run(result)
def setup_method(self, _):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_component(self.hass, 'zone')
def teardown_method(self, _):
"""Stop everything that was started."""
self.hass.stop()
try:
os.remove(self.hass.config.path(device_tracker.YAML_DEVICES))
except FileNotFoundError:
pass
@mock.patch('homeassistant.components.device_tracker.ddwrt._LOGGER.error')
def test_login_failed(self, mock_error):
"""Create a Ddwrt scanner with wrong credentials."""
with requests_mock.Mocker() as mock_request:
mock_request.register_uri(
'GET', r'http://%s/Status_Wireless.live.asp' % TEST_HOST,
status_code=401)
with assert_setup_component(1):
assert setup_component(
self.hass, DOMAIN, {DOMAIN: {
CONF_PLATFORM: 'ddwrt',
CONF_HOST: TEST_HOST,
CONF_USERNAME: 'fake_user',
CONF_PASSWORD: '0'
}})
self.assertTrue(
'Failed to authenticate' in
str(mock_error.call_args_list[-1]))
@mock.patch('homeassistant.components.device_tracker.ddwrt._LOGGER.error')
def test_invalid_response(self, mock_error):
"""Test error handling when response has an error status."""
with requests_mock.Mocker() as mock_request:
mock_request.register_uri(
'GET', r'http://%s/Status_Wireless.live.asp' % TEST_HOST,
status_code=444)
with assert_setup_component(1):
assert setup_component(
self.hass, DOMAIN, {DOMAIN: {
CONF_PLATFORM: 'ddwrt',
CONF_HOST: TEST_HOST,
CONF_USERNAME: 'fake_user',
CONF_PASSWORD: '0'
}})
self.assertTrue(
'Invalid response from DD-WRT' in
str(mock_error.call_args_list[-1]))
@mock.patch('homeassistant.components.device_tracker._LOGGER.error')
@mock.patch('homeassistant.components.device_tracker.'
'ddwrt.DdWrtDeviceScanner.get_ddwrt_data', return_value=None)
def test_no_response(self, data_mock, error_mock):
"""Create a Ddwrt scanner with no response in init, should fail."""
with assert_setup_component(1):
assert setup_component(
self.hass, DOMAIN, {DOMAIN: {
CONF_PLATFORM: 'ddwrt',
CONF_HOST: TEST_HOST,
CONF_USERNAME: 'fake_user',
CONF_PASSWORD: '0'
}})
self.assertTrue(
'Error setting up platform' in
str(error_mock.call_args_list[-1]))
@mock.patch('homeassistant.components.device_tracker.ddwrt.requests.get',
side_effect=requests.exceptions.Timeout)
@mock.patch('homeassistant.components.device_tracker.ddwrt._LOGGER.error')
def test_get_timeout(self, mock_error, mock_request):
"""Test get Ddwrt data with request time out."""
with assert_setup_component(1):
assert setup_component(
self.hass, DOMAIN, {DOMAIN: {
CONF_PLATFORM: 'ddwrt',
CONF_HOST: TEST_HOST,
CONF_USERNAME: 'fake_user',
CONF_PASSWORD: '0'
}})
self.assertTrue(
'Connection to the router timed out' in
str(mock_error.call_args_list[-1]))
def test_scan_devices(self):
"""Test creating device info (MAC, name) from response.
The created known_devices.yaml device info is compared
to the DD-WRT Lan Status request response fixture.
This effectively checks the data parsing functions.
"""
with requests_mock.Mocker() as mock_request:
mock_request.register_uri(
'GET', r'http://%s/Status_Wireless.live.asp' % TEST_HOST,
text=load_fixture('Ddwrt_Status_Wireless.txt'))
mock_request.register_uri(
'GET', r'http://%s/Status_Lan.live.asp' % TEST_HOST,
text=load_fixture('Ddwrt_Status_Lan.txt'))
with assert_setup_component(1):
assert setup_component(
self.hass, DOMAIN, {DOMAIN: {
CONF_PLATFORM: 'ddwrt',
CONF_HOST: TEST_HOST,
CONF_USERNAME: 'fake_user',
CONF_PASSWORD: '0'
}})
self.hass.block_till_done()
path = self.hass.config.path(device_tracker.YAML_DEVICES)
devices = config.load_yaml_config_file(path)
for device in devices:
self.assertIn(
devices[device]['mac'],
load_fixture('Ddwrt_Status_Lan.txt'))
self.assertIn(
slugify(devices[device]['name']),
load_fixture('Ddwrt_Status_Lan.txt'))
def test_device_name_no_data(self):
"""Test creating device info (MAC only) when no response."""
with requests_mock.Mocker() as mock_request:
mock_request.register_uri(
'GET', r'http://%s/Status_Wireless.live.asp' % TEST_HOST,
text=load_fixture('Ddwrt_Status_Wireless.txt'))
mock_request.register_uri(
'GET', r'http://%s/Status_Lan.live.asp' % TEST_HOST, text=None)
with assert_setup_component(1):
assert setup_component(
self.hass, DOMAIN, {DOMAIN: {
CONF_PLATFORM: 'ddwrt',
CONF_HOST: TEST_HOST,
CONF_USERNAME: 'fake_user',
CONF_PASSWORD: '0'
}})
self.hass.block_till_done()
path = self.hass.config.path(device_tracker.YAML_DEVICES)
devices = config.load_yaml_config_file(path)
for device in devices:
_LOGGER.error(devices[device])
self.assertIn(
devices[device]['mac'],
load_fixture('Ddwrt_Status_Lan.txt'))
def test_device_name_no_dhcp(self):
"""Test creating device info (MAC) when missing dhcp response."""
with requests_mock.Mocker() as mock_request:
mock_request.register_uri(
'GET', r'http://%s/Status_Wireless.live.asp' % TEST_HOST,
text=load_fixture('Ddwrt_Status_Wireless.txt'))
mock_request.register_uri(
'GET', r'http://%s/Status_Lan.live.asp' % TEST_HOST,
text=load_fixture('Ddwrt_Status_Lan.txt').
replace('dhcp_leases', 'missing'))
with assert_setup_component(1):
assert setup_component(
self.hass, DOMAIN, {DOMAIN: {
CONF_PLATFORM: 'ddwrt',
CONF_HOST: TEST_HOST,
CONF_USERNAME: 'fake_user',
CONF_PASSWORD: '0'
}})
self.hass.block_till_done()
path = self.hass.config.path(device_tracker.YAML_DEVICES)
devices = config.load_yaml_config_file(path)
for device in devices:
_LOGGER.error(devices[device])
self.assertIn(
devices[device]['mac'],
load_fixture('Ddwrt_Status_Lan.txt'))
def test_update_no_data(self):
"""Test error handling of no response when active devices checked."""
with requests_mock.Mocker() as mock_request:
mock_request.register_uri(
'GET', r'http://%s/Status_Wireless.live.asp' % TEST_HOST,
# First request has to work to setup connection
[{'text': load_fixture('Ddwrt_Status_Wireless.txt')},
# Second request to get active devices fails
{'text': None}])
mock_request.register_uri(
'GET', r'http://%s/Status_Lan.live.asp' % TEST_HOST,
text=load_fixture('Ddwrt_Status_Lan.txt'))
with assert_setup_component(1):
assert setup_component(
self.hass, DOMAIN, {DOMAIN: {
CONF_PLATFORM: 'ddwrt',
CONF_HOST: TEST_HOST,
CONF_USERNAME: 'fake_user',
CONF_PASSWORD: '0'
}})
def test_update_wrong_data(self):
"""Test error handling of bad response when active devices checked."""
with requests_mock.Mocker() as mock_request:
mock_request.register_uri(
'GET', r'http://%s/Status_Wireless.live.asp' % TEST_HOST,
text=load_fixture('Ddwrt_Status_Wireless.txt').
replace('active_wireless', 'missing'))
mock_request.register_uri(
'GET', r'http://%s/Status_Lan.live.asp' % TEST_HOST,
text=load_fixture('Ddwrt_Status_Lan.txt'))
with assert_setup_component(1):
assert setup_component(
self.hass, DOMAIN, {DOMAIN: {
CONF_PLATFORM: 'ddwrt',
CONF_HOST: TEST_HOST,
CONF_USERNAME: 'fake_user',
CONF_PASSWORD: '0'
}})
|
JshWright/home-assistant
|
tests/components/device_tracker/test_ddwrt.py
|
Python
|
apache-2.0
| 10,863 | 40.942085 | 79 | 0.536408 | false |
/* $OpenBSD: history.c,v 1.41 2015/09/01 13:12:31 tedu Exp $ */
/* $OpenBSD: trap.c,v 1.23 2010/05/19 17:36:08 jasper Exp $ */
/*-
* Copyright (c) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
* 2011, 2012, 2014, 2015, 2016
* mirabilos <[email protected]>
*
* Provided that these terms and disclaimer and all copyright notices
* are retained or reproduced in an accompanying document, permission
* is granted to deal in this work without restriction, including un-
* limited rights to use, publicly perform, distribute, sell, modify,
* merge, give away, or sublicence.
*
* This work is provided "AS IS" and WITHOUT WARRANTY of any kind, to
* the utmost extent permitted by applicable law, neither express nor
* implied; without malicious intent or gross negligence. In no event
* may a licensor, author or contributor be held liable for indirect,
* direct, other damage, loss, or other issues arising in any way out
* of dealing in the work, even if advised of the possibility of such
* damage or existence of a defect, except proven that it results out
* of said person's immediate fault when using the work as intended.
*/
#include "sh.h"
#if HAVE_SYS_FILE_H
#include <sys/file.h>
#endif
__RCSID("$MirOS: src/bin/mksh/histrap.c,v 1.152 2016/01/14 23:18:08 tg Exp $");
Trap sigtraps[ksh_NSIG + 1];
static struct sigaction Sigact_ign;
#if HAVE_PERSISTENT_HISTORY
static int histload(Source *, unsigned char *, size_t);
static int writehistline(int, int, const char *);
static void writehistfile(int, const char *);
#endif
static int hist_execute(char *, Area *);
static char **hist_get(const char *, bool, bool);
static char **hist_get_oldest(void);
static bool hstarted; /* set after hist_init() called */
static Source *hist_source;
#if HAVE_PERSISTENT_HISTORY
/*XXX imake style */
#if defined(__linux)
#define caddr_cast(x) ((void *)(x))
#else
#define caddr_cast(x) ((caddr_t)(x))
#endif
/* several OEs do not have these constants */
#ifndef MAP_FAILED
#define MAP_FAILED caddr_cast(-1)
#endif
/* some OEs need the default mapping type specified */
#ifndef MAP_FILE
#define MAP_FILE 0
#endif
/* current history file: name, fd, size */
static char *hname;
static int histfd = -1;
static off_t histfsize;
#endif
static const char Tnot_in_history[] = "not in history";
#define Thistory (Tnot_in_history + 7)
static const char TFCEDIT_dollaru[] = "${FCEDIT:-/bin/ed} $_";
#define Tspdollaru (TFCEDIT_dollaru + 18)
/* HISTSIZE default: size of saved history, persistent or standard */
#ifdef MKSH_SMALL
#define MKSH_DEFHISTSIZE 255
#else
#define MKSH_DEFHISTSIZE 2047
#endif
/* maximum considered size of persistent history file */
#define MKSH_MAXHISTFSIZE ((off_t)1048576 * 96)
/* hidden option */
#define HIST_DISCARD 5
int
c_fc(const char **wp)
{
struct shf *shf;
struct temp *tf;
bool gflag = false, lflag = false, nflag = false, rflag = false,
sflag = false;
int optc;
const char *p, *first = NULL, *last = NULL;
char **hfirst, **hlast, **hp, *editor = NULL;
if (!Flag(FTALKING_I)) {
bi_errorf("history %ss not available", Tfunction);
return (1);
}
while ((optc = ksh_getopt(wp, &builtin_opt,
"e:glnrs0,1,2,3,4,5,6,7,8,9,")) != -1)
switch (optc) {
case 'e':
p = builtin_opt.optarg;
if (ksh_isdash(p))
sflag = true;
else {
size_t len = strlen(p);
/* almost certainly not overflowing */
editor = alloc(len + 4, ATEMP);
memcpy(editor, p, len);
memcpy(editor + len, Tspdollaru, 4);
}
break;
/* non-AT&T ksh */
case 'g':
gflag = true;
break;
case 'l':
lflag = true;
break;
case 'n':
nflag = true;
break;
case 'r':
rflag = true;
break;
/* POSIX version of -e - */
case 's':
sflag = true;
break;
/* kludge city - accept -num as -- -num (kind of) */
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
p = shf_smprintf("-%c%s",
optc, builtin_opt.optarg);
if (!first)
first = p;
else if (!last)
last = p;
else {
bi_errorf("too many arguments");
return (1);
}
break;
case '?':
return (1);
}
wp += builtin_opt.optind;
/* Substitute and execute command */
if (sflag) {
char *pat = NULL, *rep = NULL, *line;
if (editor || lflag || nflag || rflag) {
bi_errorf("can't use -e, -l, -n, -r with -s (-e -)");
return (1);
}
/* Check for pattern replacement argument */
if (*wp && **wp && (p = cstrchr(*wp + 1, '='))) {
strdupx(pat, *wp, ATEMP);
rep = pat + (p - *wp);
*rep++ = '\0';
wp++;
}
/* Check for search prefix */
if (!first && (first = *wp))
wp++;
if (last || *wp) {
bi_errorf("too many arguments");
return (1);
}
hp = first ? hist_get(first, false, false) :
hist_get_newest(false);
if (!hp)
return (1);
/* hist_replace */
if (!pat)
strdupx(line, *hp, ATEMP);
else {
char *s, *s1;
size_t len, pat_len, rep_len;
XString xs;
char *xp;
bool any_subst = false;
pat_len = strlen(pat);
rep_len = strlen(rep);
Xinit(xs, xp, 128, ATEMP);
for (s = *hp; (s1 = strstr(s, pat)) &&
(!any_subst || gflag); s = s1 + pat_len) {
any_subst = true;
len = s1 - s;
XcheckN(xs, xp, len + rep_len);
/*; first part */
memcpy(xp, s, len);
xp += len;
/* replacement */
memcpy(xp, rep, rep_len);
xp += rep_len;
}
if (!any_subst) {
bi_errorf("bad substitution");
return (1);
}
len = strlen(s) + 1;
XcheckN(xs, xp, len);
memcpy(xp, s, len);
xp += len;
line = Xclose(xs, xp);
}
return (hist_execute(line, ATEMP));
}
if (editor && (lflag || nflag)) {
bi_errorf("can't use -l, -n with -e");
return (1);
}
if (!first && (first = *wp))
wp++;
if (!last && (last = *wp))
wp++;
if (*wp) {
bi_errorf("too many arguments");
return (1);
}
if (!first) {
hfirst = lflag ? hist_get("-16", true, true) :
hist_get_newest(false);
if (!hfirst)
return (1);
/* can't fail if hfirst didn't fail */
hlast = hist_get_newest(false);
} else {
/*
* POSIX says not an error if first/last out of bounds
* when range is specified; AT&T ksh and pdksh allow out
* of bounds for -l as well.
*/
hfirst = hist_get(first, tobool(lflag || last), lflag);
if (!hfirst)
return (1);
hlast = last ? hist_get(last, true, lflag) :
(lflag ? hist_get_newest(false) : hfirst);
if (!hlast)
return (1);
}
if (hfirst > hlast) {
char **temp;
temp = hfirst; hfirst = hlast; hlast = temp;
/* POSIX */
rflag = !rflag;
}
/* List history */
if (lflag) {
char *s, *t;
for (hp = rflag ? hlast : hfirst;
hp >= hfirst && hp <= hlast; hp += rflag ? -1 : 1) {
if (!nflag)
shf_fprintf(shl_stdout, "%lu",
(unsigned long)hist_source->line -
(unsigned long)(histptr - hp));
shf_putc('\t', shl_stdout);
/* print multi-line commands correctly */
s = *hp;
while ((t = strchr(s, '\n'))) {
*t = '\0';
shf_fprintf(shl_stdout, "%s\n\t", s);
*t++ = '\n';
s = t;
}
shf_fprintf(shl_stdout, "%s\n", s);
}
shf_flush(shl_stdout);
return (0);
}
/* Run editor on selected lines, then run resulting commands */
tf = maketemp(ATEMP, TT_HIST_EDIT, &e->temps);
if (!(shf = tf->shf)) {
bi_errorf("can't %s temporary file %s: %s",
"create", tf->tffn, cstrerror(errno));
return (1);
}
for (hp = rflag ? hlast : hfirst;
hp >= hfirst && hp <= hlast; hp += rflag ? -1 : 1)
shf_fprintf(shf, "%s\n", *hp);
if (shf_close(shf) == -1) {
bi_errorf("can't %s temporary file %s: %s",
"write", tf->tffn, cstrerror(errno));
return (1);
}
/* Ignore setstr errors here (arbitrary) */
setstr(local("_", false), tf->tffn, KSH_RETURN_ERROR);
/* XXX: source should not get trashed by this.. */
{
Source *sold = source;
int ret;
ret = command(editor ? editor : TFCEDIT_dollaru, 0);
source = sold;
if (ret)
return (ret);
}
{
struct stat statb;
XString xs;
char *xp;
ssize_t n;
if (!(shf = shf_open(tf->tffn, O_RDONLY, 0, 0))) {
bi_errorf("can't %s temporary file %s: %s",
"open", tf->tffn, cstrerror(errno));
return (1);
}
if (stat(tf->tffn, &statb) < 0)
n = 128;
else if ((off_t)statb.st_size > MKSH_MAXHISTFSIZE) {
bi_errorf("%s %s too large: %lu", Thistory,
"file", (unsigned long)statb.st_size);
goto errout;
} else
n = (size_t)statb.st_size + 1;
Xinit(xs, xp, n, hist_source->areap);
while ((n = shf_read(xp, Xnleft(xs, xp), shf)) > 0) {
xp += n;
if (Xnleft(xs, xp) <= 0)
XcheckN(xs, xp, Xlength(xs, xp));
}
if (n < 0) {
bi_errorf("can't %s temporary file %s: %s",
"read", tf->tffn, cstrerror(shf_errno(shf)));
errout:
shf_close(shf);
return (1);
}
shf_close(shf);
*xp = '\0';
strip_nuls(Xstring(xs, xp), Xlength(xs, xp));
return (hist_execute(Xstring(xs, xp), hist_source->areap));
}
}
/* save cmd in history, execute cmd (cmd gets afree’d) */
static int
hist_execute(char *cmd, Area *areap)
{
static int last_line = -1;
Source *sold;
int ret;
/* Back up over last histsave */
if (histptr >= history && last_line != hist_source->line) {
hist_source->line--;
afree(*histptr, APERM);
histptr--;
last_line = hist_source->line;
}
histsave(&hist_source->line, cmd, HIST_STORE, true);
/* now *histptr == cmd without all trailing newlines */
afree(cmd, areap);
cmd = *histptr;
/* pdksh says POSIX doesn’t say this is done, testsuite needs it */
shellf("%s\n", cmd);
/*-
* Commands are executed here instead of pushing them onto the
* input 'cause POSIX says the redirection and variable assignments
* in
* X=y fc -e - 42 2> /dev/null
* are to effect the repeated commands environment.
*/
/* XXX: source should not get trashed by this.. */
sold = source;
ret = command(cmd, 0);
source = sold;
return (ret);
}
/*
* get pointer to history given pattern
* pattern is a number or string
*/
static char **
hist_get(const char *str, bool approx, bool allow_cur)
{
char **hp = NULL;
int n;
if (getn(str, &n)) {
hp = histptr + (n < 0 ? n : (n - hist_source->line));
if ((size_t)hp < (size_t)history) {
if (approx)
hp = hist_get_oldest();
else {
bi_errorf("%s: %s", str, Tnot_in_history);
hp = NULL;
}
} else if ((size_t)hp > (size_t)histptr) {
if (approx)
hp = hist_get_newest(allow_cur);
else {
bi_errorf("%s: %s", str, Tnot_in_history);
hp = NULL;
}
} else if (!allow_cur && hp == histptr) {
bi_errorf("%s: %s", str, "invalid range");
hp = NULL;
}
} else {
bool anchored = *str == '?' ? (++str, false) : true;
/* the -1 is to avoid the current fc command */
if ((n = findhist(histptr - history - 1, 0, str, anchored)) < 0)
bi_errorf("%s: %s", str, Tnot_in_history);
else
hp = &history[n];
}
return (hp);
}
/* Return a pointer to the newest command in the history */
char **
hist_get_newest(bool allow_cur)
{
if (histptr < history || (!allow_cur && histptr == history)) {
bi_errorf("no history (yet)");
return (NULL);
}
return (allow_cur ? histptr : histptr - 1);
}
/* Return a pointer to the oldest command in the history */
static char **
hist_get_oldest(void)
{
if (histptr <= history) {
bi_errorf("no history (yet)");
return (NULL);
}
return (history);
}
#if !defined(MKSH_NO_CMDLINE_EDITING) && !MKSH_S_NOVI
/* current position in history[] */
static char **current;
/*
* Return the current position.
*/
char **
histpos(void)
{
return (current);
}
int
histnum(int n)
{
int last = histptr - history;
if (n < 0 || n >= last) {
current = histptr;
return (last);
} else {
current = &history[n];
return (n);
}
}
#endif
/*
* This will become unnecessary if hist_get is modified to allow
* searching from positions other than the end, and in either
* direction.
*/
int
findhist(int start, int fwd, const char *str, bool anchored)
{
char **hp;
int maxhist = histptr - history;
int incr = fwd ? 1 : -1;
size_t len = strlen(str);
if (start < 0 || start >= maxhist)
start = maxhist;
hp = &history[start];
for (; hp >= history && hp <= histptr; hp += incr)
if ((anchored && strncmp(*hp, str, len) == 0) ||
(!anchored && strstr(*hp, str)))
return (hp - history);
return (-1);
}
/*
* set history; this means reallocating the dataspace
*/
void
sethistsize(mksh_ari_t n)
{
if (n > 0 && n != histsize) {
int cursize = histptr - history;
/* save most recent history */
if (n < cursize) {
memmove(history, histptr - n + 1, n * sizeof(char *));
cursize = n - 1;
}
history = aresize2(history, n, sizeof(char *), APERM);
histsize = n;
histptr = history + cursize;
}
}
#if HAVE_PERSISTENT_HISTORY
/*
* set history file; this can mean reloading/resetting/starting
* history file maintenance
*/
void
sethistfile(const char *name)
{
/* if not started then nothing to do */
if (hstarted == false)
return;
/* if the name is the same as the name we have */
if (hname && name && !strcmp(hname, name))
return;
/*
* it's a new name - possibly
*/
if (histfd != -1) {
/* yes the file is open */
(void)close(histfd);
histfd = -1;
histfsize = 0;
afree(hname, APERM);
hname = NULL;
/* let's reset the history */
histsave(NULL, NULL, HIST_DISCARD, true);
histptr = history - 1;
hist_source->line = 0;
}
if (name)
hist_init(hist_source);
}
#endif
/*
* initialise the history vector
*/
void
init_histvec(void)
{
if (history == (char **)NULL) {
histsize = MKSH_DEFHISTSIZE;
history = alloc2(histsize, sizeof(char *), APERM);
histptr = history - 1;
}
}
/*
* It turns out that there is a lot of ghastly hackery here
*/
#if !defined(MKSH_SMALL) && HAVE_PERSISTENT_HISTORY
/* do not save command in history but possibly sync */
bool
histsync(void)
{
bool changed = false;
/* called by histsave(), may not HIST_DISCARD, caller should flush */
if (histfd != -1) {
int lno = hist_source->line;
hist_source->line++;
writehistfile(0, NULL);
hist_source->line--;
if (lno != hist_source->line)
changed = true;
}
return (changed);
}
#endif
/*
* save command in history
*/
void
histsave(int *lnp, const char *cmd, int svmode, bool ignoredups)
{
static char *enqueued = NULL;
char **hp, *c;
const char *ccp;
if (svmode == HIST_DISCARD) {
afree(enqueued, APERM);
enqueued = NULL;
return;
}
if (svmode == HIST_APPEND) {
if (!enqueued)
svmode = HIST_STORE;
} else if (enqueued) {
c = enqueued;
enqueued = NULL;
--*lnp;
histsave(lnp, c, HIST_STORE, true);
afree(c, APERM);
}
if (svmode == HIST_FLUSH)
return;
ccp = cmd + strlen(cmd);
while (ccp > cmd && ccp[-1] == '\n')
--ccp;
strndupx(c, cmd, ccp - cmd, APERM);
if (svmode != HIST_APPEND) {
if (ignoredups && !strcmp(c, *histptr)
#if !defined(MKSH_SMALL) && HAVE_PERSISTENT_HISTORY
&& !histsync()
#endif
) {
afree(c, APERM);
return;
}
++*lnp;
}
#if HAVE_PERSISTENT_HISTORY
if (svmode == HIST_STORE && histfd != -1)
writehistfile(*lnp, c);
#endif
if (svmode == HIST_QUEUE || svmode == HIST_APPEND) {
size_t nenq, ncmd;
if (!enqueued) {
if (*c)
enqueued = c;
else
afree(c, APERM);
return;
}
nenq = strlen(enqueued);
ncmd = strlen(c);
enqueued = aresize(enqueued, nenq + 1 + ncmd + 1, APERM);
enqueued[nenq] = '\n';
memcpy(enqueued + nenq + 1, c, ncmd + 1);
afree(c, APERM);
return;
}
hp = histptr;
if (++hp >= history + histsize) {
/* remove oldest command */
afree(*history, APERM);
for (hp = history; hp < history + histsize - 1; hp++)
hp[0] = hp[1];
}
*hp = c;
histptr = hp;
}
/*
* Write history data to a file nominated by HISTFILE;
* if HISTFILE is unset then history still happens, but
* the data is not written to a file. All copies of ksh
* looking at the file will maintain the same history.
* This is ksh behaviour.
*
* This stuff uses mmap()
*
* This stuff is so totally broken it must eventually be
* redesigned, without mmap, better checks, support for
* larger files, etc. and handle partially corrupted files
*/
/*-
* Open a history file
* Format is:
* Bytes 1, 2:
* HMAGIC - just to check that we are dealing with the correct object
* Then follows a number of stored commands
* Each command is
* <command byte><command number(4 octets, big endian)><bytes><NUL>
*/
#define HMAGIC1 0xAB
#define HMAGIC2 0xCD
#define COMMAND 0xFF
#if HAVE_PERSISTENT_HISTORY
static const unsigned char sprinkle[2] = { HMAGIC1, HMAGIC2 };
#endif
void
hist_init(Source *s)
{
#if HAVE_PERSISTENT_HISTORY
unsigned char *base;
int lines, fd;
enum { hist_init_first, hist_init_retry, hist_init_restore } hs;
#endif
histsave(NULL, NULL, HIST_DISCARD, true);
if (Flag(FTALKING) == 0)
return;
hstarted = true;
hist_source = s;
#if HAVE_PERSISTENT_HISTORY
if (((hname = str_val(global("HISTFILE"))) == NULL) || !*hname) {
hname = NULL;
return;
}
strdupx(hname, hname, APERM);
hs = hist_init_first;
retry:
/* we have a file and are interactive */
if ((fd = binopen3(hname, O_RDWR | O_CREAT | O_APPEND, 0600)) < 0)
return;
histfd = savefd(fd);
if (histfd != fd)
close(fd);
mksh_lockfd(histfd);
histfsize = lseek(histfd, (off_t)0, SEEK_END);
if (histfsize > MKSH_MAXHISTFSIZE || hs == hist_init_restore) {
/* we ignore too large files but still append to them */
/* we also don't need to re-read after truncation */
goto hist_init_tail;
} else if (histfsize > 2) {
/* we have some data, check its validity */
base = (void *)mmap(NULL, (size_t)histfsize, PROT_READ,
MAP_FILE | MAP_PRIVATE, histfd, (off_t)0);
if (base == (unsigned char *)MAP_FAILED)
goto hist_init_fail;
if (base[0] != HMAGIC1 || base[1] != HMAGIC2) {
munmap(caddr_cast(base), (size_t)histfsize);
goto hist_init_fail;
}
/* load _all_ data */
lines = histload(hist_source, base + 2, (size_t)histfsize - 2);
munmap(caddr_cast(base), (size_t)histfsize);
/* check if the file needs to be truncated */
if (lines > histsize && histptr >= history) {
/* you're fucked up with the current code, trust me */
char *nhname, **hp;
struct stat sb;
/* create temporary file */
nhname = shf_smprintf("%s.%d", hname, (int)procpid);
if ((fd = binopen3(nhname, O_RDWR | O_CREAT | O_TRUNC |
O_EXCL, 0600)) < 0) {
/* just don't truncate then, meh. */
goto hist_trunc_dont;
}
if (fstat(histfd, &sb) >= 0 &&
chown(nhname, sb.st_uid, sb.st_gid)) {
/* abort the truncation then, meh. */
goto hist_trunc_abort;
}
/* we definitively want some magic in that file */
if (write(fd, sprinkle, 2) != 2)
goto hist_trunc_abort;
/* and of course the entries */
hp = history;
while (hp < histptr) {
if (!writehistline(fd,
s->line - (histptr - hp), *hp))
goto hist_trunc_abort;
++hp;
}
/* now unlock, close both, rename, rinse, repeat */
close(fd);
fd = -1;
hist_finish();
if (rename(nhname, hname) < 0) {
hist_trunc_abort:
if (fd != -1)
close(fd);
unlink(nhname);
if (fd != -1)
goto hist_trunc_dont;
/* darn! restore histfd and pray */
}
hs = hist_init_restore;
hist_trunc_dont:
afree(nhname, ATEMP);
if (hs == hist_init_restore)
goto retry;
}
} else if (histfsize != 0) {
/* negative or too small... */
hist_init_fail:
/* ... or mmap failed or illegal */
hist_finish();
/* nuke the bogus file then retry, at most once */
if (!unlink(hname) && hs != hist_init_retry) {
hs = hist_init_retry;
goto retry;
}
if (hs != hist_init_retry)
bi_errorf("can't %s %s: %s",
"unlink HISTFILE", hname, cstrerror(errno));
histfsize = 0;
return;
} else {
/* size 0, add magic to the history file */
if (write(histfd, sprinkle, 2) != 2) {
hist_finish();
return;
}
}
histfsize = lseek(histfd, (off_t)0, SEEK_END);
hist_init_tail:
mksh_unlkfd(histfd);
#endif
}
#if HAVE_PERSISTENT_HISTORY
/*
* load the history structure from the stored data
*/
static int
histload(Source *s, unsigned char *base, size_t bytes)
{
int lno = 0, lines = 0;
unsigned char *cp;
histload_loop:
/* !bytes check as some systems (older FreeBSDs) have buggy memchr */
if (!bytes || (cp = memchr(base, COMMAND, bytes)) == NULL)
return (lines);
/* advance base pointer past COMMAND byte */
bytes -= ++cp - base;
base = cp;
/* if there is no full string left, don't bother with the rest */
if (bytes < 5 || (cp = memchr(base + 4, '\0', bytes - 4)) == NULL)
return (lines);
/* load the stored line number */
lno = ((base[0] & 0xFF) << 24) | ((base[1] & 0xFF) << 16) |
((base[2] & 0xFF) << 8) | (base[3] & 0xFF);
/* store away the found line (@base[4]) */
++lines;
if (histptr >= history && lno - 1 != s->line) {
/* a replacement? */
char **hp;
if (lno >= s->line - (histptr - history) && lno <= s->line) {
hp = &histptr[lno - s->line];
afree(*hp, APERM);
strdupx(*hp, (char *)(base + 4), APERM);
}
} else {
s->line = lno--;
histsave(&lno, (char *)(base + 4), HIST_NOTE, false);
}
/* advance base pointer past NUL */
bytes -= ++cp - base;
base = cp;
/* repeat until no more */
goto histload_loop;
}
/*
* write a command to the end of the history file
*
* This *MAY* seem easy but it's also necessary to check
* that the history file has not changed in size.
* If it has - then some other shell has written to it and
* we should (re)read those commands to update our history
*/
static void
writehistfile(int lno, const char *cmd)
{
off_t sizenow;
size_t bytes;
unsigned char *base, *news;
mksh_lockfd(histfd);
sizenow = lseek(histfd, (off_t)0, SEEK_END);
if (sizenow < histfsize) {
/* the file has shrunk; give up */
goto bad;
}
if (
/* ignore changes when the file is too large */
sizenow <= MKSH_MAXHISTFSIZE
&&
/* the size has changed, we need to do read updates */
sizenow > histfsize
) {
/* both sizenow and histfsize are <= MKSH_MAXHISTFSIZE */
bytes = (size_t)(sizenow - histfsize);
base = (void *)mmap(NULL, (size_t)sizenow, PROT_READ,
MAP_FILE | MAP_PRIVATE, histfd, (off_t)0);
if (base == (unsigned char *)MAP_FAILED)
goto bad;
news = base + (size_t)histfsize;
if (*news == COMMAND) {
hist_source->line--;
histload(hist_source, news, bytes);
hist_source->line++;
lno = hist_source->line;
} else
bytes = 0;
munmap(caddr_cast(base), (size_t)sizenow);
if (!bytes)
goto bad;
}
if (cmd && !writehistline(histfd, lno, cmd)) {
bad:
hist_finish();
return;
}
histfsize = lseek(histfd, (off_t)0, SEEK_END);
mksh_unlkfd(histfd);
}
static int
writehistline(int fd, int lno, const char *cmd)
{
ssize_t n;
unsigned char hdr[5];
hdr[0] = COMMAND;
hdr[1] = (lno >> 24) & 0xFF;
hdr[2] = (lno >> 16) & 0xFF;
hdr[3] = (lno >> 8) & 0xFF;
hdr[4] = lno & 0xFF;
n = strlen(cmd) + 1;
return (write(fd, hdr, 5) == 5 && write(fd, cmd, n) == n);
}
void
hist_finish(void)
{
if (histfd >= 0) {
mksh_unlkfd(histfd);
(void)close(histfd);
}
histfd = -1;
}
#endif
#if !HAVE_SYS_SIGNAME
static const struct mksh_sigpair {
const char * const name;
int nr;
} mksh_sigpairs[] = {
#include "signames.inc"
{ NULL, 0 }
};
#endif
#if HAVE_SYS_SIGLIST
#if !HAVE_SYS_SIGLIST_DECL
extern const char * const sys_siglist[];
#endif
#endif
void
inittraps(void)
{
int i;
const char *cs;
trap_exstat = -1;
/* Populate sigtraps based on sys_signame and sys_siglist. */
for (i = 1; i < ksh_NSIG; i++) {
sigtraps[i].signal = i;
#if HAVE_SYS_SIGNAME
cs = sys_signame[i];
#else
const struct mksh_sigpair *pair = mksh_sigpairs;
while ((pair->nr != i) && (pair->name != NULL))
++pair;
cs = pair->name;
#endif
if ((cs == NULL) ||
(cs[0] == '\0'))
sigtraps[i].name = shf_smprintf("%d", i);
else {
char *s;
/* this is not optimal, what about SIGSIG1? */
if (ksh_eq(cs[0], 'S', 's') &&
ksh_eq(cs[1], 'I', 'i') &&
ksh_eq(cs[2], 'G', 'g') &&
cs[3] != '\0') {
/* skip leading "SIG" */
cs += 3;
}
strdupx(s, cs, APERM);
sigtraps[i].name = s;
while ((*s = ksh_toupper(*s)))
++s;
}
#if HAVE_SYS_SIGLIST
sigtraps[i].mess = sys_siglist[i];
#elif HAVE_STRSIGNAL
sigtraps[i].mess = strsignal(i);
#else
sigtraps[i].mess = NULL;
#endif
if ((sigtraps[i].mess == NULL) ||
(sigtraps[i].mess[0] == '\0'))
sigtraps[i].mess = shf_smprintf("%s %d",
"Signal", i);
}
sigtraps[ksh_SIGEXIT].signal = ksh_SIGEXIT;
sigtraps[ksh_SIGEXIT].name = "EXIT";
sigtraps[ksh_SIGEXIT].mess = "Exit trap";
sigtraps[ksh_SIGERR].signal = ksh_SIGERR;
sigtraps[ksh_SIGERR].name = "ERR";
sigtraps[ksh_SIGERR].mess = "Error handler";
(void)sigemptyset(&Sigact_ign.sa_mask);
Sigact_ign.sa_flags = 0; /* interruptible */
Sigact_ign.sa_handler = SIG_IGN;
sigtraps[SIGINT].flags |= TF_DFL_INTR | TF_TTY_INTR;
sigtraps[SIGQUIT].flags |= TF_DFL_INTR | TF_TTY_INTR;
/* SIGTERM is not fatal for interactive */
sigtraps[SIGTERM].flags |= TF_DFL_INTR;
sigtraps[SIGHUP].flags |= TF_FATAL;
sigtraps[SIGCHLD].flags |= TF_SHELL_USES;
/* these are always caught so we can clean up any temporary files. */
setsig(&sigtraps[SIGINT], trapsig, SS_RESTORE_ORIG);
setsig(&sigtraps[SIGQUIT], trapsig, SS_RESTORE_ORIG);
setsig(&sigtraps[SIGTERM], trapsig, SS_RESTORE_ORIG);
setsig(&sigtraps[SIGHUP], trapsig, SS_RESTORE_ORIG);
}
static void alarm_catcher(int sig);
void
alarm_init(void)
{
sigtraps[SIGALRM].flags |= TF_SHELL_USES;
setsig(&sigtraps[SIGALRM], alarm_catcher,
SS_RESTORE_ORIG|SS_FORCE|SS_SHTRAP);
}
/* ARGSUSED */
static void
alarm_catcher(int sig MKSH_A_UNUSED)
{
/* this runs inside interrupt context, with errno saved */
if (ksh_tmout_state == TMOUT_READING) {
int left = alarm(0);
if (left == 0) {
ksh_tmout_state = TMOUT_LEAVING;
intrsig = 1;
} else
alarm(left);
}
}
Trap *
gettrap(const char *cs, bool igncase, bool allsigs)
{
int i;
Trap *p;
char *as;
/* signal number (1..ksh_NSIG) or 0? */
if (ksh_isdigit(*cs))
return ((getn(cs, &i) && 0 <= i && i < ksh_NSIG) ?
(&sigtraps[i]) : NULL);
/* do a lookup by name then */
/* this breaks SIGSIG1, but we do that above anyway */
if (ksh_eq(cs[0], 'S', 's') &&
ksh_eq(cs[1], 'I', 'i') &&
ksh_eq(cs[2], 'G', 'g') &&
cs[3] != '\0') {
/* skip leading "SIG" */
cs += 3;
}
if (igncase) {
char *s;
strdupx(as, cs, ATEMP);
cs = s = as;
while ((*s = ksh_toupper(*s)))
++s;
} else
as = NULL;
/* this is idiotic, we really want a hashtable here */
p = sigtraps;
i = ksh_NSIG + 1;
do {
if (!strcmp(p->name, cs))
goto found;
++p;
} while (--i);
goto notfound;
found:
if (!allsigs) {
if (p->signal == ksh_SIGEXIT || p->signal == ksh_SIGERR) {
notfound:
p = NULL;
}
}
afree(as, ATEMP);
return (p);
}
/*
* trap signal handler
*/
void
trapsig(int i)
{
Trap *p = &sigtraps[i];
int eno = errno;
trap = p->set = 1;
if (p->flags & TF_DFL_INTR)
intrsig = 1;
if ((p->flags & TF_FATAL) && !p->trap) {
fatal_trap = 1;
intrsig = 1;
}
if (p->shtrap)
(*p->shtrap)(i);
errno = eno;
}
/*
* called when we want to allow the user to ^C out of something - won't
* work if user has trapped SIGINT.
*/
void
intrcheck(void)
{
if (intrsig)
runtraps(TF_DFL_INTR|TF_FATAL);
}
/*
* called after EINTR to check if a signal with normally causes process
* termination has been received.
*/
int
fatal_trap_check(void)
{
Trap *p = sigtraps;
int i = ksh_NSIG + 1;
/* todo: should check if signal is fatal, not the TF_DFL_INTR flag */
do {
if (p->set && (p->flags & (TF_DFL_INTR|TF_FATAL)))
/* return value is used as an exit code */
return (ksh_sigmask(p->signal));
++p;
} while (--i);
return (0);
}
/*
* Returns the signal number of any pending traps: ie, a signal which has
* occurred for which a trap has been set or for which the TF_DFL_INTR flag
* is set.
*/
int
trap_pending(void)
{
Trap *p = sigtraps;
int i = ksh_NSIG + 1;
do {
if (p->set && ((p->trap && p->trap[0]) ||
((p->flags & (TF_DFL_INTR|TF_FATAL)) && !p->trap)))
return (p->signal);
++p;
} while (--i);
return (0);
}
/*
* run any pending traps. If intr is set, only run traps that
* can interrupt commands.
*/
void
runtraps(int flag)
{
Trap *p = sigtraps;
int i = ksh_NSIG + 1;
if (ksh_tmout_state == TMOUT_LEAVING) {
ksh_tmout_state = TMOUT_EXECUTING;
warningf(false, "timed out waiting for input");
unwind(LEXIT);
} else
/*
* XXX: this means the alarm will have no effect if a trap
* is caught after the alarm() was started...not good.
*/
ksh_tmout_state = TMOUT_EXECUTING;
if (!flag)
trap = 0;
if (flag & TF_DFL_INTR)
intrsig = 0;
if (flag & TF_FATAL)
fatal_trap = 0;
++trap_nested;
do {
if (p->set && (!flag ||
((p->flags & flag) && p->trap == NULL)))
runtrap(p, false);
++p;
} while (--i);
if (!--trap_nested)
runtrap(NULL, true);
}
void
runtrap(Trap *p, bool is_last)
{
int old_changed = 0, i;
char *trapstr;
if (p == NULL)
/* just clean up, see runtraps() above */
goto donetrap;
i = p->signal;
trapstr = p->trap;
p->set = 0;
if (trapstr == NULL) {
/* SIG_DFL */
if (p->flags & (TF_FATAL | TF_DFL_INTR)) {
exstat = (int)(128U + (unsigned)i);
if ((unsigned)exstat > 255U)
exstat = 255;
}
/* e.g. SIGHUP */
if (p->flags & TF_FATAL)
unwind(LLEAVE);
/* e.g. SIGINT, SIGQUIT, SIGTERM, etc. */
if (p->flags & TF_DFL_INTR)
unwind(LINTR);
goto donetrap;
}
if (trapstr[0] == '\0')
/* SIG_IGN */
goto donetrap;
if (i == ksh_SIGEXIT || i == ksh_SIGERR) {
/* avoid recursion on these */
old_changed = p->flags & TF_CHANGED;
p->flags &= ~TF_CHANGED;
p->trap = NULL;
}
if (trap_exstat == -1)
trap_exstat = exstat & 0xFF;
/*
* Note: trapstr is fully parsed before anything is executed, thus
* no problem with afree(p->trap) in settrap() while still in use.
*/
command(trapstr, current_lineno);
if (i == ksh_SIGEXIT || i == ksh_SIGERR) {
if (p->flags & TF_CHANGED)
/* don't clear TF_CHANGED */
afree(trapstr, APERM);
else
p->trap = trapstr;
p->flags |= old_changed;
}
donetrap:
/* we're the last trap of a sequence executed */
if (is_last && trap_exstat != -1) {
exstat = trap_exstat;
trap_exstat = -1;
}
}
/* clear pending traps and reset user's trap handlers; used after fork(2) */
void
cleartraps(void)
{
Trap *p = sigtraps;
int i = ksh_NSIG + 1;
trap = 0;
intrsig = 0;
fatal_trap = 0;
do {
p->set = 0;
if ((p->flags & TF_USER_SET) && (p->trap && p->trap[0]))
settrap(p, NULL);
++p;
} while (--i);
}
/* restore signals just before an exec(2) */
void
restoresigs(void)
{
Trap *p = sigtraps;
int i = ksh_NSIG + 1;
do {
if (p->flags & (TF_EXEC_IGN|TF_EXEC_DFL))
setsig(p, (p->flags & TF_EXEC_IGN) ? SIG_IGN : SIG_DFL,
SS_RESTORE_CURR|SS_FORCE);
++p;
} while (--i);
}
void
settrap(Trap *p, const char *s)
{
sig_t f;
afree(p->trap, APERM);
/* handles s == NULL */
strdupx(p->trap, s, APERM);
p->flags |= TF_CHANGED;
f = !s ? SIG_DFL : s[0] ? trapsig : SIG_IGN;
p->flags |= TF_USER_SET;
if ((p->flags & (TF_DFL_INTR|TF_FATAL)) && f == SIG_DFL)
f = trapsig;
else if (p->flags & TF_SHELL_USES) {
if (!(p->flags & TF_ORIG_IGN) || Flag(FTALKING)) {
/* do what user wants at exec time */
p->flags &= ~(TF_EXEC_IGN|TF_EXEC_DFL);
if (f == SIG_IGN)
p->flags |= TF_EXEC_IGN;
else
p->flags |= TF_EXEC_DFL;
}
/*
* assumes handler already set to what shell wants it
* (normally trapsig, but could be j_sigchld() or SIG_IGN)
*/
return;
}
/* todo: should we let user know signal is ignored? how? */
setsig(p, f, SS_RESTORE_CURR|SS_USER);
}
/*
* Called by c_print() when writing to a co-process to ensure SIGPIPE won't
* kill shell (unless user catches it and exits)
*/
int
block_pipe(void)
{
int restore_dfl = 0;
Trap *p = &sigtraps[SIGPIPE];
if (!(p->flags & (TF_ORIG_IGN|TF_ORIG_DFL))) {
setsig(p, SIG_IGN, SS_RESTORE_CURR);
if (p->flags & TF_ORIG_DFL)
restore_dfl = 1;
} else if (p->cursig == SIG_DFL) {
setsig(p, SIG_IGN, SS_RESTORE_CURR);
/* restore to SIG_DFL */
restore_dfl = 1;
}
return (restore_dfl);
}
/* Called by c_print() to undo whatever block_pipe() did */
void
restore_pipe(int restore_dfl)
{
if (restore_dfl)
setsig(&sigtraps[SIGPIPE], SIG_DFL, SS_RESTORE_CURR);
}
/*
* Set action for a signal. Action may not be set if original
* action was SIG_IGN, depending on the value of flags and FTALKING.
*/
int
setsig(Trap *p, sig_t f, int flags)
{
struct sigaction sigact;
if (p->signal == ksh_SIGEXIT || p->signal == ksh_SIGERR)
return (1);
memset(&sigact, 0, sizeof(sigact));
/*
* First time setting this signal? If so, get and note the current
* setting.
*/
if (!(p->flags & (TF_ORIG_IGN|TF_ORIG_DFL))) {
sigaction(p->signal, &Sigact_ign, &sigact);
p->flags |= sigact.sa_handler == SIG_IGN ?
TF_ORIG_IGN : TF_ORIG_DFL;
p->cursig = SIG_IGN;
}
/*-
* Generally, an ignored signal stays ignored, except if
* - the user of an interactive shell wants to change it
* - the shell wants for force a change
*/
if ((p->flags & TF_ORIG_IGN) && !(flags & SS_FORCE) &&
(!(flags & SS_USER) || !Flag(FTALKING)))
return (0);
setexecsig(p, flags & SS_RESTORE_MASK);
/*
* This is here 'cause there should be a way of clearing
* shtraps, but don't know if this is a sane way of doing
* it. At the moment, all users of shtrap are lifetime
* users (SIGALRM, SIGCHLD, SIGWINCH).
*/
if (!(flags & SS_USER))
p->shtrap = (sig_t)NULL;
if (flags & SS_SHTRAP) {
p->shtrap = f;
f = trapsig;
}
if (p->cursig != f) {
p->cursig = f;
(void)sigemptyset(&sigact.sa_mask);
/* interruptible */
sigact.sa_flags = 0;
sigact.sa_handler = f;
sigaction(p->signal, &sigact, NULL);
}
return (1);
}
/* control what signal is set to before an exec() */
void
setexecsig(Trap *p, int restore)
{
/* XXX debugging */
if (!(p->flags & (TF_ORIG_IGN|TF_ORIG_DFL)))
internal_errorf("setexecsig: unset signal %d(%s)",
p->signal, p->name);
/* restore original value for exec'd kids */
p->flags &= ~(TF_EXEC_IGN|TF_EXEC_DFL);
switch (restore & SS_RESTORE_MASK) {
case SS_RESTORE_CURR:
/* leave things as they currently are */
break;
case SS_RESTORE_ORIG:
p->flags |= p->flags & TF_ORIG_IGN ? TF_EXEC_IGN : TF_EXEC_DFL;
break;
case SS_RESTORE_DFL:
p->flags |= TF_EXEC_DFL;
break;
case SS_RESTORE_IGN:
p->flags |= TF_EXEC_IGN;
break;
}
}
#if HAVE_PERSISTENT_HISTORY || defined(DF)
/*
* File descriptor locking and unlocking functions.
* Could use some error handling, but hey, this is only
* advisory locking anyway, will often not work over NFS,
* and you are SOL if this fails...
*/
void
mksh_lockfd(int fd)
{
#if defined(__OpenBSD__)
/* flock is not interrupted by signals */
(void)flock(fd, LOCK_EX);
#elif HAVE_FLOCK
int rv;
/* e.g. on Linux */
do {
rv = flock(fd, LOCK_EX);
} while (rv == 1 && errno == EINTR);
#elif HAVE_LOCK_FCNTL
int rv;
struct flock lks;
memset(&lks, 0, sizeof(lks));
lks.l_type = F_WRLCK;
do {
rv = fcntl(fd, F_SETLKW, &lks);
} while (rv == 1 && errno == EINTR);
#endif
}
/* designed to not define mksh_unlkfd if none triggered */
#if HAVE_FLOCK
void
mksh_unlkfd(int fd)
{
(void)flock(fd, LOCK_UN);
}
#elif HAVE_LOCK_FCNTL
void
mksh_unlkfd(int fd)
{
struct flock lks;
memset(&lks, 0, sizeof(lks));
lks.l_type = F_UNLCK;
(void)fcntl(fd, F_SETLKW, &lks);
}
#endif
#endif
|
atrsoftgmbh/atrshmlog
|
mksh/mksh.logging/src.1mai/histrap.c
|
C
|
apache-2.0
| 35,454 | 21.479391 | 79 | 0.605303 | false |
package client
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"regexp"
"strconv"
"strings"
"time"
"github.com/gorilla/websocket"
"github.com/pkg/errors"
)
const (
SELF = "self"
COLLECTION = "collection"
)
var (
debug = false
dialer = &websocket.Dialer{}
privateFieldRegex = regexp.MustCompile("^[[:lower:]]")
)
type ClientOpts struct {
Url string
AccessKey string
SecretKey string
Timeout time.Duration
}
type ApiError struct {
StatusCode int
Url string
Msg string
Status string
Body string
}
func (e *ApiError) Error() string {
return e.Msg
}
func IsNotFound(err error) bool {
apiError, ok := err.(*ApiError)
if !ok {
return false
}
return apiError.StatusCode == http.StatusNotFound
}
func newApiError(resp *http.Response, url string) *ApiError {
contents, err := ioutil.ReadAll(resp.Body)
var body string
if err != nil {
body = "Unreadable body."
} else {
body = string(contents)
}
data := map[string]interface{}{}
if json.Unmarshal(contents, &data) == nil {
delete(data, "id")
delete(data, "links")
delete(data, "actions")
delete(data, "type")
delete(data, "status")
buf := &bytes.Buffer{}
for k, v := range data {
if v == nil {
continue
}
if buf.Len() > 0 {
buf.WriteString(", ")
}
fmt.Fprintf(buf, "%s=%v", k, v)
}
body = buf.String()
}
formattedMsg := fmt.Sprintf("Bad response statusCode [%d]. Status [%s]. Body: [%s] from [%s]",
resp.StatusCode, resp.Status, body, url)
return &ApiError{
Url: url,
Msg: formattedMsg,
StatusCode: resp.StatusCode,
Status: resp.Status,
Body: body,
}
}
func contains(array []string, item string) bool {
for _, check := range array {
if check == item {
return true
}
}
return false
}
func appendFilters(urlString string, filters map[string]interface{}) (string, error) {
if len(filters) == 0 {
return urlString, nil
}
u, err := url.Parse(urlString)
if err != nil {
return "", err
}
q := u.Query()
for k, v := range filters {
if l, ok := v.([]string); ok {
for _, v := range l {
q.Add(k, v)
}
} else {
q.Add(k, fmt.Sprintf("%v", v))
}
}
u.RawQuery = q.Encode()
return u.String(), nil
}
func NormalizeUrl(existingUrl string) (string, error) {
u, err := url.Parse(existingUrl)
if err != nil {
return "", err
}
if u.Path == "" || u.Path == "/" {
u.Path = "v2-beta"
} else if u.Path == "/v1" || strings.HasPrefix(u.Path, "/v1/") {
u.Path = strings.Replace(u.Path, "/v1", "/v2-beta", 1)
}
return u.String(), nil
}
func setupRancherBaseClient(rancherClient *RancherBaseClientImpl, opts *ClientOpts) error {
var err error
opts.Url, err = NormalizeUrl(opts.Url)
if err != nil {
return err
}
if opts.Timeout == 0 {
opts.Timeout = time.Second * time.Duration(defaultTimeout())
}
client := &http.Client{Timeout: opts.Timeout}
req, err := http.NewRequest("GET", opts.Url, nil)
if err != nil {
return err
}
req.SetBasicAuth(opts.AccessKey, opts.SecretKey)
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return newApiError(resp, opts.Url)
}
schemasUrls := resp.Header.Get("X-API-Schemas")
if len(schemasUrls) == 0 {
return errors.New("Failed to find schema at [" + opts.Url + "]")
}
if schemasUrls != opts.Url {
req, err = http.NewRequest("GET", schemasUrls, nil)
req.SetBasicAuth(opts.AccessKey, opts.SecretKey)
if err != nil {
return err
}
resp, err = client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return newApiError(resp, opts.Url)
}
}
var schemas Schemas
bytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
err = json.Unmarshal(bytes, &schemas)
if err != nil {
return err
}
rancherClient.Opts = opts
rancherClient.Schemas = &schemas
for _, schema := range schemas.Data {
rancherClient.Types[schema.Id] = schema
}
return nil
}
func NewListOpts() *ListOpts {
return &ListOpts{
Filters: map[string]interface{}{},
}
}
func (rancherClient *RancherBaseClientImpl) setupRequest(req *http.Request) {
req.SetBasicAuth(rancherClient.Opts.AccessKey, rancherClient.Opts.SecretKey)
}
func (rancherClient *RancherBaseClientImpl) newHttpClient() *http.Client {
if rancherClient.Opts.Timeout == 0 {
rancherClient.Opts.Timeout = time.Second * time.Duration(defaultTimeout())
}
return &http.Client{Timeout: rancherClient.Opts.Timeout}
}
func (rancherClient *RancherBaseClientImpl) doDelete(url string) error {
client := rancherClient.newHttpClient()
req, err := http.NewRequest("DELETE", url, nil)
if err != nil {
return err
}
rancherClient.setupRequest(req)
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
io.Copy(ioutil.Discard, resp.Body)
if resp.StatusCode >= 300 {
return newApiError(resp, url)
}
return nil
}
func (rancherClient *RancherBaseClientImpl) Websocket(url string, headers map[string][]string) (*websocket.Conn, *http.Response, error) {
httpHeaders := http.Header{}
for k, v := range httpHeaders {
httpHeaders[k] = v
}
if rancherClient.Opts != nil {
s := rancherClient.Opts.AccessKey + ":" + rancherClient.Opts.SecretKey
httpHeaders.Add("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(s)))
}
return dialer.Dial(url, http.Header(httpHeaders))
}
func (rancherClient *RancherBaseClientImpl) doGet(url string, opts *ListOpts, respObject interface{}) error {
if opts == nil {
opts = NewListOpts()
}
url, err := appendFilters(url, opts.Filters)
if err != nil {
return err
}
if debug {
fmt.Println("GET " + url)
}
client := rancherClient.newHttpClient()
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return err
}
rancherClient.setupRequest(req)
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return newApiError(resp, url)
}
byteContent, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if debug {
fmt.Println("Response <= " + string(byteContent))
}
if err := json.Unmarshal(byteContent, respObject); err != nil {
return errors.Wrap(err, fmt.Sprintf("Failed to parse: %s", byteContent))
}
return nil
}
func (rancherClient *RancherBaseClientImpl) List(schemaType string, opts *ListOpts, respObject interface{}) error {
return rancherClient.doList(schemaType, opts, respObject)
}
func (rancherClient *RancherBaseClientImpl) doList(schemaType string, opts *ListOpts, respObject interface{}) error {
schema, ok := rancherClient.Types[schemaType]
if !ok {
return errors.New("Unknown schema type [" + schemaType + "]")
}
if !contains(schema.CollectionMethods, "GET") {
return errors.New("Resource type [" + schemaType + "] is not listable")
}
collectionUrl, ok := schema.Links[COLLECTION]
if !ok {
return errors.New("Failed to find collection URL for [" + schemaType + "]")
}
return rancherClient.doGet(collectionUrl, opts, respObject)
}
func (rancherClient *RancherBaseClientImpl) doNext(nextUrl string, respObject interface{}) error {
return rancherClient.doGet(nextUrl, nil, respObject)
}
func (rancherClient *RancherBaseClientImpl) Post(url string, createObj interface{}, respObject interface{}) error {
return rancherClient.doModify("POST", url, createObj, respObject)
}
func (rancherClient *RancherBaseClientImpl) GetLink(resource Resource, link string, respObject interface{}) error {
url := resource.Links[link]
if url == "" {
return fmt.Errorf("Failed to find link: %s", link)
}
return rancherClient.doGet(url, &ListOpts{}, respObject)
}
func (rancherClient *RancherBaseClientImpl) doModify(method string, url string, createObj interface{}, respObject interface{}) error {
bodyContent, err := json.Marshal(createObj)
if err != nil {
return err
}
if debug {
fmt.Println(method + " " + url)
fmt.Println("Request => " + string(bodyContent))
}
client := rancherClient.newHttpClient()
req, err := http.NewRequest(method, url, bytes.NewBuffer(bodyContent))
if err != nil {
return err
}
rancherClient.setupRequest(req)
req.Header.Set("Content-Type", "application/json")
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode >= 300 {
return newApiError(resp, url)
}
byteContent, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if len(byteContent) > 0 {
if debug {
fmt.Println("Response <= " + string(byteContent))
}
return json.Unmarshal(byteContent, respObject)
}
return nil
}
func (rancherClient *RancherBaseClientImpl) Create(schemaType string, createObj interface{}, respObject interface{}) error {
return rancherClient.doCreate(schemaType, createObj, respObject)
}
func (rancherClient *RancherBaseClientImpl) doCreate(schemaType string, createObj interface{}, respObject interface{}) error {
if createObj == nil {
createObj = map[string]string{}
}
if respObject == nil {
respObject = &map[string]interface{}{}
}
schema, ok := rancherClient.Types[schemaType]
if !ok {
return errors.New("Unknown schema type [" + schemaType + "]")
}
if !contains(schema.CollectionMethods, "POST") {
return errors.New("Resource type [" + schemaType + "] is not creatable")
}
var collectionUrl string
collectionUrl, ok = schema.Links[COLLECTION]
if !ok {
// return errors.New("Failed to find collection URL for [" + schemaType + "]")
// This is a hack to address https://github.com/rancher/cattle/issues/254
re := regexp.MustCompile("schemas.*")
collectionUrl = re.ReplaceAllString(schema.Links[SELF], schema.PluralName)
}
return rancherClient.doModify("POST", collectionUrl, createObj, respObject)
}
func (rancherClient *RancherBaseClientImpl) Update(schemaType string, existing *Resource, updates interface{}, respObject interface{}) error {
return rancherClient.doUpdate(schemaType, existing, updates, respObject)
}
func (rancherClient *RancherBaseClientImpl) doUpdate(schemaType string, existing *Resource, updates interface{}, respObject interface{}) error {
if existing == nil {
return errors.New("Existing object is nil")
}
selfUrl, ok := existing.Links[SELF]
if !ok {
return errors.New(fmt.Sprintf("Failed to find self URL of [%v]", existing))
}
if updates == nil {
updates = map[string]string{}
}
if respObject == nil {
respObject = &map[string]interface{}{}
}
schema, ok := rancherClient.Types[schemaType]
if !ok {
return errors.New("Unknown schema type [" + schemaType + "]")
}
if !contains(schema.ResourceMethods, "PUT") {
return errors.New("Resource type [" + schemaType + "] is not updatable")
}
return rancherClient.doModify("PUT", selfUrl, updates, respObject)
}
func (rancherClient *RancherBaseClientImpl) ById(schemaType string, id string, respObject interface{}) error {
return rancherClient.doById(schemaType, id, respObject)
}
func (rancherClient *RancherBaseClientImpl) doById(schemaType string, id string, respObject interface{}) error {
schema, ok := rancherClient.Types[schemaType]
if !ok {
return errors.New("Unknown schema type [" + schemaType + "]")
}
if !contains(schema.ResourceMethods, "GET") {
return errors.New("Resource type [" + schemaType + "] can not be looked up by ID")
}
collectionUrl, ok := schema.Links[COLLECTION]
if !ok {
return errors.New("Failed to find collection URL for [" + schemaType + "]")
}
err := rancherClient.doGet(collectionUrl+"/"+id, nil, respObject)
//TODO check for 404 and return nil, nil
return err
}
func (rancherClient *RancherBaseClientImpl) Delete(existing *Resource) error {
if existing == nil {
return nil
}
return rancherClient.doResourceDelete(existing.Type, existing)
}
func (rancherClient *RancherBaseClientImpl) doResourceDelete(schemaType string, existing *Resource) error {
schema, ok := rancherClient.Types[schemaType]
if !ok {
return errors.New("Unknown schema type [" + schemaType + "]")
}
if !contains(schema.ResourceMethods, "DELETE") {
return errors.New("Resource type [" + schemaType + "] can not be deleted")
}
selfUrl, ok := existing.Links[SELF]
if !ok {
return errors.New(fmt.Sprintf("Failed to find self URL of [%v]", existing))
}
return rancherClient.doDelete(selfUrl)
}
func (rancherClient *RancherBaseClientImpl) Reload(existing *Resource, output interface{}) error {
selfUrl, ok := existing.Links[SELF]
if !ok {
return errors.New(fmt.Sprintf("Failed to find self URL of [%v]", existing))
}
return rancherClient.doGet(selfUrl, NewListOpts(), output)
}
func (rancherClient *RancherBaseClientImpl) Action(schemaType string, action string,
existing *Resource, inputObject, respObject interface{}) error {
return rancherClient.doAction(schemaType, action, existing, inputObject, respObject)
}
func (rancherClient *RancherBaseClientImpl) doAction(schemaType string, action string,
existing *Resource, inputObject, respObject interface{}) error {
if existing == nil {
return errors.New("Existing object is nil")
}
actionUrl, ok := existing.Actions[action]
if !ok {
return errors.New(fmt.Sprintf("Action [%v] not available on [%v]", action, existing))
}
_, ok = rancherClient.Types[schemaType]
if !ok {
return errors.New("Unknown schema type [" + schemaType + "]")
}
var input io.Reader
if inputObject != nil {
bodyContent, err := json.Marshal(inputObject)
if err != nil {
return err
}
if debug {
fmt.Println("Request => " + string(bodyContent))
}
input = bytes.NewBuffer(bodyContent)
}
client := rancherClient.newHttpClient()
req, err := http.NewRequest("POST", actionUrl, input)
if err != nil {
return err
}
rancherClient.setupRequest(req)
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Content-Length", "0")
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode >= 300 {
return newApiError(resp, actionUrl)
}
byteContent, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if debug {
fmt.Println("Response <= " + string(byteContent))
}
return json.Unmarshal(byteContent, respObject)
}
func (rancherClient *RancherBaseClientImpl) GetOpts() *ClientOpts {
return rancherClient.Opts
}
func (rancherClient *RancherBaseClientImpl) GetSchemas() *Schemas {
return rancherClient.Schemas
}
func (rancherClient *RancherBaseClientImpl) GetTypes() map[string]Schema {
return rancherClient.Types
}
func init() {
debug = os.Getenv("RANCHER_CLIENT_DEBUG") == "true"
if debug {
fmt.Println("Rancher client debug on")
}
}
func defaultTimeout() int {
defaultTimeout, _ := strconv.Atoi(os.Getenv("RANCHER_CLIENT_TIMEOUT"))
if defaultTimeout == 0 {
defaultTimeout = 10
}
return defaultTimeout
}
|
camptocamp/conplicity
|
vendor/github.com/rancher/go-rancher/v2/common.go
|
GO
|
apache-2.0
| 14,904 | 22.694754 | 144 | 0.690083 | false |
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=US-ASCII">
<title>buffered_read_stream::async_write_some</title>
<link rel="stylesheet" href="../../../../../doc/src/boostbook.css" type="text/css">
<meta name="generator" content="DocBook XSL Stylesheets V1.78.1">
<link rel="home" href="../../../boost_asio.html" title="Boost.Asio">
<link rel="up" href="../buffered_read_stream.html" title="buffered_read_stream">
<link rel="prev" href="async_read_some.html" title="buffered_read_stream::async_read_some">
<link rel="next" href="buffered_read_stream.html" title="buffered_read_stream::buffered_read_stream">
</head>
<body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF">
<table cellpadding="2" width="100%"><tr>
<td valign="top"><img alt="Boost C++ Libraries" width="277" height="86" src="../../../../../boost.png"></td>
<td align="center"><a href="../../../../../index.html">Home</a></td>
<td align="center"><a href="../../../../../libs/libraries.htm">Libraries</a></td>
<td align="center"><a href="http://www.boost.org/users/people.html">People</a></td>
<td align="center"><a href="http://www.boost.org/users/faq.html">FAQ</a></td>
<td align="center"><a href="../../../../../more/index.htm">More</a></td>
</tr></table>
<hr>
<div class="spirit-nav">
<a accesskey="p" href="async_read_some.html"><img src="../../../../../doc/src/images/prev.png" alt="Prev"></a><a accesskey="u" href="../buffered_read_stream.html"><img src="../../../../../doc/src/images/up.png" alt="Up"></a><a accesskey="h" href="../../../boost_asio.html"><img src="../../../../../doc/src/images/home.png" alt="Home"></a><a accesskey="n" href="buffered_read_stream.html"><img src="../../../../../doc/src/images/next.png" alt="Next"></a>
</div>
<div class="section">
<div class="titlepage"><div><div><h4 class="title">
<a name="boost_asio.reference.buffered_read_stream.async_write_some"></a><a class="link" href="async_write_some.html" title="buffered_read_stream::async_write_some">buffered_read_stream::async_write_some</a>
</h4></div></div></div>
<p>
<a class="indexterm" name="idp79751832"></a>
Start an asynchronous write. The data
being written must be valid for the lifetime of the asynchronous operation.
</p>
<pre class="programlisting"><span class="keyword">template</span><span class="special"><</span>
<span class="keyword">typename</span> <a class="link" href="../ConstBufferSequence.html" title="Constant buffer sequence requirements">ConstBufferSequence</a><span class="special">,</span>
<span class="keyword">typename</span> <a class="link" href="../WriteHandler.html" title="Write handler requirements">WriteHandler</a><span class="special">></span>
<a class="link" href="../asynchronous_operations.html#boost_asio.reference.asynchronous_operations.return_type_of_an_initiating_function"><span class="emphasis"><em>void-or-deduced</em></span></a> <span class="identifier">async_write_some</span><span class="special">(</span>
<span class="keyword">const</span> <span class="identifier">ConstBufferSequence</span> <span class="special">&</span> <span class="identifier">buffers</span><span class="special">,</span>
<span class="identifier">WriteHandler</span> <span class="identifier">handler</span><span class="special">);</span>
</pre>
</div>
<table xmlns:rev="http://www.cs.rpi.edu/~gregod/boost/tools/doc/revision" width="100%"><tr>
<td align="left"></td>
<td align="right"><div class="copyright-footer">Copyright © 2003-2013 Christopher M. Kohlhoff<p>
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at <a href="http://www.boost.org/LICENSE_1_0.txt" target="_top">http://www.boost.org/LICENSE_1_0.txt</a>)
</p>
</div></td>
</tr></table>
<hr>
<div class="spirit-nav">
<a accesskey="p" href="async_read_some.html"><img src="../../../../../doc/src/images/prev.png" alt="Prev"></a><a accesskey="u" href="../buffered_read_stream.html"><img src="../../../../../doc/src/images/up.png" alt="Up"></a><a accesskey="h" href="../../../boost_asio.html"><img src="../../../../../doc/src/images/home.png" alt="Home"></a><a accesskey="n" href="buffered_read_stream.html"><img src="../../../../../doc/src/images/next.png" alt="Next"></a>
</div>
</body>
</html>
|
NixaSoftware/CVis
|
venv/bin/doc/html/boost_asio/reference/buffered_read_stream/async_write_some.html
|
HTML
|
apache-2.0
| 4,326 | 77.654545 | 453 | 0.65927 | false |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.applicationhistoryservice;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.ApplicationHistoryProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse;
import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest;
import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ContainerReport;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.server.timeline.security.authorize.TimelinePolicyProvider;
import com.google.common.base.Preconditions;
public class ApplicationHistoryClientService extends AbstractService {
private static final Log LOG = LogFactory
.getLog(ApplicationHistoryClientService.class);
private ApplicationHistoryManager history;
private ApplicationHistoryProtocol protocolHandler;
private Server server;
private InetSocketAddress bindAddress;
public ApplicationHistoryClientService(ApplicationHistoryManager history) {
super("ApplicationHistoryClientService");
this.history = history;
this.protocolHandler = new ApplicationHSClientProtocolHandler();
}
protected void serviceStart() throws Exception {
Configuration conf = getConfig();
YarnRPC rpc = YarnRPC.create(conf);
InetSocketAddress address = conf.getSocketAddr(
YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
YarnConfiguration.TIMELINE_SERVICE_ADDRESS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ADDRESS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_PORT);
Preconditions.checkArgument(conf.getInt(
YarnConfiguration.TIMELINE_SERVICE_HANDLER_THREAD_COUNT,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_CLIENT_THREAD_COUNT) > 0,
"%s property value should be greater than zero",
YarnConfiguration.TIMELINE_SERVICE_HANDLER_THREAD_COUNT);
server =
rpc.getServer(ApplicationHistoryProtocol.class, protocolHandler,
address, conf, null, conf.getInt(
YarnConfiguration.TIMELINE_SERVICE_HANDLER_THREAD_COUNT,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_CLIENT_THREAD_COUNT));
// Enable service authorization?
if (conf.getBoolean(
CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) {
refreshServiceAcls(conf, new TimelinePolicyProvider());
}
server.start();
this.bindAddress =
conf.updateConnectAddr(YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
YarnConfiguration.TIMELINE_SERVICE_ADDRESS,
YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ADDRESS,
server.getListenerAddress());
LOG.info("Instantiated ApplicationHistoryClientService at "
+ this.bindAddress);
super.serviceStart();
}
@Override
protected void serviceStop() throws Exception {
if (server != null) {
server.stop();
}
super.serviceStop();
}
@Private
public ApplicationHistoryProtocol getClientHandler() {
return this.protocolHandler;
}
@Private
public InetSocketAddress getBindAddress() {
return this.bindAddress;
}
private void refreshServiceAcls(Configuration configuration,
PolicyProvider policyProvider) {
this.server.refreshServiceAcl(configuration, policyProvider);
}
private class ApplicationHSClientProtocolHandler implements
ApplicationHistoryProtocol {
@Override
public CancelDelegationTokenResponse cancelDelegationToken(
CancelDelegationTokenRequest request) throws YarnException, IOException {
// TODO Auto-generated method stub
return null;
}
@Override
public GetApplicationAttemptReportResponse getApplicationAttemptReport(
GetApplicationAttemptReportRequest request) throws YarnException,
IOException {
try {
GetApplicationAttemptReportResponse response =
GetApplicationAttemptReportResponse.newInstance(history
.getApplicationAttempt(request.getApplicationAttemptId()));
return response;
} catch (IOException e) {
throw new ApplicationAttemptNotFoundException(e.getMessage());
}
}
@Override
public GetApplicationAttemptsResponse getApplicationAttempts(
GetApplicationAttemptsRequest request) throws YarnException,
IOException {
GetApplicationAttemptsResponse response =
GetApplicationAttemptsResponse
.newInstance(new ArrayList<ApplicationAttemptReport>(history
.getApplicationAttempts(request.getApplicationId()).values()));
return response;
}
@Override
public GetApplicationReportResponse getApplicationReport(
GetApplicationReportRequest request) throws YarnException, IOException {
try {
ApplicationId applicationId = request.getApplicationId();
GetApplicationReportResponse response =
GetApplicationReportResponse.newInstance(history
.getApplication(applicationId));
return response;
} catch (IOException e) {
throw new ApplicationNotFoundException(e.getMessage());
}
}
@Override
public GetApplicationsResponse getApplications(
GetApplicationsRequest request) throws YarnException, IOException {
GetApplicationsResponse response =
GetApplicationsResponse.newInstance(new ArrayList<ApplicationReport>(
history.getAllApplications().values()));
return response;
}
@Override
public GetContainerReportResponse getContainerReport(
GetContainerReportRequest request) throws YarnException, IOException {
try {
GetContainerReportResponse response =
GetContainerReportResponse.newInstance(history.getContainer(request
.getContainerId()));
return response;
} catch (IOException e) {
throw new ContainerNotFoundException(e.getMessage());
}
}
@Override
public GetContainersResponse getContainers(GetContainersRequest request)
throws YarnException, IOException {
GetContainersResponse response =
GetContainersResponse.newInstance(new ArrayList<ContainerReport>(
history.getContainers(request.getApplicationAttemptId()).values()));
return response;
}
@Override
public GetDelegationTokenResponse getDelegationToken(
GetDelegationTokenRequest request) throws YarnException, IOException {
// TODO Auto-generated method stub
return null;
}
@Override
public RenewDelegationTokenResponse renewDelegationToken(
RenewDelegationTokenRequest request) throws YarnException, IOException {
// TODO Auto-generated method stub
return null;
}
}
}
|
oza/hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java
|
Java
|
apache-2.0
| 9,788 | 40.474576 | 88 | 0.764712 | false |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Spotify AB
from __future__ import absolute_import, division, print_function
import click
from .tree import tree as ttree
from .errors import InvalidRAMLError
from .utils import update_mime_types as umt
from ._helpers import load_file
from ramlfications import validate as vvalidate
@click.group()
def main():
"""Yet Another RAML Parser"""
# Needed to collect the validate & tree commands
@main.command(help="Validate a RAML file.")
@click.argument("ramlfile", type=click.Path(exists=True))
@click.option("--config", "-c", type=click.Path(exists=True),
help="Additionally supported items beyond RAML spec.")
def validate(ramlfile, config):
"""Validate a given RAML file."""
try:
vvalidate(ramlfile, config)
click.secho("Success! Valid RAML file: {0}".format(ramlfile),
fg="green")
except InvalidRAMLError as e:
msg = "Error validating file {0}: \n{1}".format(ramlfile, e)
click.secho(msg, fg="red", err=True)
raise SystemExit(1)
@main.command(help="Visualize the RAML file as a tree.")
@click.argument('ramlfile', type=click.Path(exists=True))
@click.option("-C", "--color", type=click.Choice(['dark', 'light']),
default=None,
help=("Color theme 'light' for dark-screened backgrounds"))
@click.option("-o", "--output", type=click.File('w'),
help=("Save tree output to file"))
@click.option("-v", "--verbose", default=0, count=True,
help="Include methods for each endpoint")
@click.option("-V", "--validate", default=False, is_flag=True,
help="Validate RAML file")
@click.option("-c", "--config", type=click.Path(exists=True),
help="Additionally supported items beyond RAML spec.")
def tree(ramlfile, color, output, verbose, validate, config):
"""Pretty-print a tree of the RAML-defined API."""
try:
load_obj = load_file(ramlfile)
ttree(load_obj, color, output, verbose, validate, config)
except InvalidRAMLError as e:
msg = '"{0}" is not a valid RAML file: {1}'.format(
click.format_filename(ramlfile), e)
click.secho(msg, fg="red", err=True)
raise SystemExit(1)
@main.command(help="Update RAMLfications' supported MIME types from IANA.")
def update():
umt()
if __name__ == "__main__":
main()
|
jasonrhaas/ramlfications
|
ramlfications/__main__.py
|
Python
|
apache-2.0
| 2,425 | 33.15493 | 75 | 0.640412 | false |
streams-persist-riak
=====================
Read/write to/from Riak
## Configuration
| Schema |
|--------|
| [RiakConfiguration.json](org/apache/streams/riak/RiakConfiguration.json "RiakConfiguration.json") [RiakConfiguration.html](apidocs/org/apache/streams/riak/RiakConfiguration.html "javadoc") |
## Components

| Class | Configuration |
|-------|---------------|
| RiakBinaryPersistWriter [RiakBinaryPersistWriter.html](apidocs/org/apache/streams/riak/RiakBinaryPersistWriter.html "javadoc") | [RiakConfiguration.json](org/apache/streams/riak/RiakConfiguration.json "RiakConfiguration.json") [RiakConfiguration.html](apidocs/org/apache/streams/riak/RiakConfiguration.html "javadoc") |
| RiakBinaryPersistReader [RiakBinaryPersistReader.html](apidocs/org/apache/streams/riak/RiakBinaryPersistReader.html "javadoc") | [RiakConfiguration.json](org/apache/streams/riak/RiakConfiguration.json "RiakConfiguration.json") [RiakConfiguration.html](apidocs/org/apache/streams/riak/RiakConfiguration.html "javadoc") |
| RiakHttpPersistWriter [RiakHttpPersistWriter.html](apidocs/org/apache/streams/riak/RiakHttpPersistWriter "javadoc") | [RiakConfiguration.json](org/apache/streams/riak/RiakConfiguration.json "RiakConfiguration.json") [RiakConfiguration.html](apidocs/org/apache/streams/riak/RiakConfiguration.html "javadoc") |
| RiakHttpPersistReader [RiakHttpPersistReader.html](apidocs/org/apache/streams/riak/RiakHttpPersistReader "javadoc") | [RiakConfiguration.json](org/apache/streams/riak/RiakConfiguration.json "RiakConfiguration.json") [RiakConfiguration.html](apidocs/org/apache/streams/riak/RiakConfiguration.html "javadoc") |
[JavaDocs](apidocs/index.html "JavaDocs")
###### Licensed under Apache License 2.0 - http://www.apache.org/licenses/LICENSE-2.0
|
apache/streams
|
streams-contrib/streams-persist-riak/src/site/markdown/index.md
|
Markdown
|
apache-2.0
| 1,817 | 71.68 | 322 | 0.788112 | false |
#ifndef SF1R_DISTRIBUTE_DRIVER_H
#define SF1R_DISTRIBUTE_DRIVER_H
#include <util/driver/Router.h>
#include <util/driver/Request.h>
#include <util/singleton.h>
#include <util/concurrent_queue.h>
#include <boost/shared_ptr.hpp>
#include <boost/scoped_ptr.hpp>
#include <boost/function.hpp>
namespace sf1r
{
class DistributeDriver
{
public:
typedef boost::shared_ptr<izenelib::driver::Router> RouterPtr;
typedef boost::function<bool(int)> CBWriteHandlerT;
static DistributeDriver* get()
{
return izenelib::util::Singleton<DistributeDriver>::get();
}
DistributeDriver();
void init(const RouterPtr& router);
void stop();
bool on_new_req_available();
bool handleReqFromPrimaryInAsyncMode(int reqtype, const std::string& reqjsondata, const std::string& packed_data);
bool handleReqFromPrimary(int reqtype, const std::string& reqjsondata, const std::string& packed_data);
bool handleReqFromLog(int reqtype, const std::string& reqjsondata, const std::string& packed_data);
bool addCallbackWriteHandler(const std::string& name, const CBWriteHandlerT& handler);
void removeCallbackWriteHandler(const std::string& name);
bool pushCallbackWrite(const std::string& name, const std::string& packed_data);
private:
bool handleRequest(const std::string& reqjsondata, const std::string& packed_data, izenelib::driver::Request::kCallType calltype, bool call_sync = false);
void run();
std::map<std::string, CBWriteHandlerT> callback_handlers_;
RouterPtr router_;
boost::thread async_task_worker_;
izenelib::util::concurrent_queue<boost::function<bool()> > asyncWriteTasks_;
};
}
#endif
|
izenecloud/sf1r-ad-delivery
|
source/core/node-manager/DistributeDriver.h
|
C
|
apache-2.0
| 1,670 | 33.791667 | 158 | 0.732335 | false |
package httpclient_test
import (
"net/http"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/cloudfoundry/bosh-utils/httpclient"
"github.com/cloudfoundry/bosh-utils/httpclient/fakes"
"net"
"os"
"syscall"
)
var _ HTTPClient = &fakes.FakeHTTPClient{}
var _ = Describe("Linux-specific tests", func() {
It("enables TCP (socket) keepalive with an appropriate interval", func() {
// to test keepalive, we need a socket. A socket is an _active_ TCP connection to a server.
// we make our own server, connect to it, and make our assertions against the socket
laddr := "127.0.0.1:19642" // unlikely-to-be-used port number, unprivileged (1964, Feb, my birth)
readyToAccept := make(chan bool, 1)
go func() {
defer GinkgoRecover()
defer func() {
readyToAccept <- true
}()
ln, err := net.Listen("tcp", laddr)
Expect(err).ToNot(HaveOccurred())
readyToAccept <- true
_, err = ln.Accept()
Expect(err).ToNot(HaveOccurred())
}()
<-readyToAccept
client := CreateDefaultClient(nil)
connection, err := client.Transport.(*http.Transport).Dial("tcp", laddr)
Expect(err).ToNot(HaveOccurred())
tcpConn, ok := connection.(*net.TCPConn)
Expect(ok).To(BeTrue())
f, err := tcpConn.File()
Expect(err).ToNot(HaveOccurred())
sockoptValue, err := syscall.GetsockoptInt(int(f.Fd()), syscall.SOL_SOCKET, syscall.SO_KEEPALIVE)
err = os.NewSyscallError("getsockopt", err)
Expect(err).ToNot(HaveOccurred())
Expect(sockoptValue).To(Equal(0x1))
sockoptValue, err = syscall.GetsockoptInt(int(f.Fd()), syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL)
err = os.NewSyscallError("getsockopt", err)
Expect(err).ToNot(HaveOccurred())
Expect(sockoptValue).To(Equal(30))
})
})
|
monkeyherder/moirai
|
vendor/github.com/cloudfoundry/bosh-utils/httpclient/keepalive_syscall_linux_test.go
|
GO
|
apache-2.0
| 1,739 | 27.048387 | 100 | 0.689477 | false |
/*
* Copyright (C) 2007 Rob Manning
* [email protected]
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
package net.sourceforge.squirrel_sql.client.session.mainpanel;
import java.awt.Component;
import net.sourceforge.squirrel_sql.client.session.SQLExecutionInfo;
import net.sourceforge.squirrel_sql.fw.datasetviewer.DataSetException;
import net.sourceforge.squirrel_sql.fw.datasetviewer.ResultSetDataSet;
import net.sourceforge.squirrel_sql.fw.datasetviewer.ResultSetMetaDataDataSet;
import net.sourceforge.squirrel_sql.fw.datasetviewer.TableState;
import net.sourceforge.squirrel_sql.fw.id.IHasIdentifier;
import net.sourceforge.squirrel_sql.fw.id.IIdentifier;
public interface IResultTab {
/**
* Show the results from the passed <TT>IDataSet</TT>.
*
* @param rsds <TT>ResultSetDataSet</TT> to show results for.
* @param mdds <TT>ResultSetMetaDataDataSet</TT> for rsds.
* @param exInfo Execution info.
*
* @throws IllegalArgumentException
* Thrown if <tt>null</tt> <tt>SQLExecutionInfo</tt> passed.
*
* @throws DataSetException
* Thrown if error occured processing dataset.
*/
void showResults(ResultSetDataSet rsds, ResultSetMetaDataDataSet mdds,
SQLExecutionInfo exInfo) throws DataSetException;
/**
* Clear results and current SQL script.
*/
void clear();
/**
* Return the current SQL script.
*
* @return Current SQL script.
*/
String getSqlString();
/**
* Return the current SQL script with control characters removed.
*
* @return Current SQL script.
*/
String getViewableSqlString();
/**
* Return the title for this tab.
*/
String getTitle();
void closeTab();
void returnToTabbedPane();
Component getOutputComponent();
void reRunSQL();
/**
* @see IHasIdentifier#getIdentifier()
*/
IIdentifier getIdentifier();
TableState getResultSortableTableState();
void toggleShowFindPanel();
}
|
sdgdsffdsfff/bigtable-sql
|
src/main/java/net/sourceforge/squirrel_sql/client/session/mainpanel/IResultTab.java
|
Java
|
apache-2.0
| 2,730 | 29.685393 | 78 | 0.708791 | false |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<title>Uses of Class org.apache.poi.ss.formula.function.FunctionMetadataRegistry (POI API Documentation)</title>
<link rel="stylesheet" type="text/css" href="../../../../../../../stylesheet.css" title="Style">
</head>
<body>
<script type="text/javascript"><!--
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Class org.apache.poi.ss.formula.function.FunctionMetadataRegistry (POI API Documentation)";
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar_top">
<!-- -->
</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../../../org/apache/poi/ss/formula/function/FunctionMetadataRegistry.html" title="class in org.apache.poi.ss.formula.function">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>PREV</li>
<li>NEXT</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../../index.html?org/apache/poi/ss/formula/function//class-useFunctionMetadataRegistry.html" target="_top">FRAMES</a></li>
<li><a href="FunctionMetadataRegistry.html" target="_top">NO FRAMES</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h2 title="Uses of Class org.apache.poi.ss.formula.function.FunctionMetadataRegistry" class="title">Uses of Class<br>org.apache.poi.ss.formula.function.FunctionMetadataRegistry</h2>
</div>
<div class="classUseContainer">No usage of org.apache.poi.ss.formula.function.FunctionMetadataRegistry</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar_bottom">
<!-- -->
</a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../../../org/apache/poi/ss/formula/function/FunctionMetadataRegistry.html" title="class in org.apache.poi.ss.formula.function">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>PREV</li>
<li>NEXT</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../../index.html?org/apache/poi/ss/formula/function//class-useFunctionMetadataRegistry.html" target="_top">FRAMES</a></li>
<li><a href="FunctionMetadataRegistry.html" target="_top">NO FRAMES</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<p class="legalCopy"><small>
<i>Copyright 2014 The Apache Software Foundation or
its licensors, as applicable.</i>
</small></p>
</body>
</html>
|
RyoSaeba69/Bio-info
|
mylib/poi-3.11/docs/apidocs/org/apache/poi/ss/formula/function/class-use/FunctionMetadataRegistry.html
|
HTML
|
apache-2.0
| 4,555 | 37.931624 | 181 | 0.614709 | false |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!--NewPage-->
<HTML>
<HEAD>
<!-- Generated by javadoc (build 1.6.0_20) on Wed Mar 30 21:34:42 CST 2011 -->
<TITLE>
org.apache.hadoop.util.bloom (Facebook's realtime distributed database, powered by Apache Hadoop based on 0.20-append branch 0.20.1-dev API)
</TITLE>
<META NAME="date" CONTENT="2011-03-30">
<LINK REL ="stylesheet" TYPE="text/css" HREF="../../../../../stylesheet.css" TITLE="Style">
</HEAD>
<BODY BGCOLOR="white">
<FONT size="+1" CLASS="FrameTitleFont">
<A HREF="../../../../../org/apache/hadoop/util/bloom/package-summary.html" target="classFrame">org.apache.hadoop.util.bloom</A></FONT>
<TABLE BORDER="0" WIDTH="100%" SUMMARY="">
<TR>
<TD NOWRAP><FONT size="+1" CLASS="FrameHeadingFont">
Interfaces</FONT>
<FONT CLASS="FrameItemFont">
<BR>
<A HREF="RemoveScheme.html" title="interface in org.apache.hadoop.util.bloom" target="classFrame"><I>RemoveScheme</I></A></FONT></TD>
</TR>
</TABLE>
<TABLE BORDER="0" WIDTH="100%" SUMMARY="">
<TR>
<TD NOWRAP><FONT size="+1" CLASS="FrameHeadingFont">
Classes</FONT>
<FONT CLASS="FrameItemFont">
<BR>
<A HREF="BloomFilter.html" title="class in org.apache.hadoop.util.bloom" target="classFrame">BloomFilter</A>
<BR>
<A HREF="CountingBloomFilter.html" title="class in org.apache.hadoop.util.bloom" target="classFrame">CountingBloomFilter</A>
<BR>
<A HREF="DynamicBloomFilter.html" title="class in org.apache.hadoop.util.bloom" target="classFrame">DynamicBloomFilter</A>
<BR>
<A HREF="Filter.html" title="class in org.apache.hadoop.util.bloom" target="classFrame">Filter</A>
<BR>
<A HREF="HashFunction.html" title="class in org.apache.hadoop.util.bloom" target="classFrame">HashFunction</A>
<BR>
<A HREF="Key.html" title="class in org.apache.hadoop.util.bloom" target="classFrame">Key</A>
<BR>
<A HREF="RetouchedBloomFilter.html" title="class in org.apache.hadoop.util.bloom" target="classFrame">RetouchedBloomFilter</A></FONT></TD>
</TR>
</TABLE>
</BODY>
</HTML>
|
submergerock/avatar-hadoop
|
docs/api/org/apache/hadoop/util/bloom/package-frame.html
|
HTML
|
apache-2.0
| 2,040 | 36.090909 | 140 | 0.709314 | false |
/*
patternLock.js v 1.0.1
Author: Sudhanshu Yadav
Copyright (c) 2016 Sudhanshu Yadav - ignitersworld.com , released under the MIT license.
Demo and documentaion on: ignitersworld.com/lab/patternLock.html
*/
.patt-holder{background:#3382c0; -ms-touch-action: none;}
.patt-wrap{position:relative; cursor:pointer;}
.patt-wrap ul, .patt-wrap li{
list-style: none;
margin:0;
padding: 0;
}
.patt-circ{
position:relative;
float: left;
box-sizing: border-box;
-moz-box-sizing: border-box;
}
.patt-circ.hovered{
border:3px solid #009900;
}
.patt-error .patt-circ.hovered{
border:3px solid #BA1B26;
}
.patt-hidden .patt-circ.hovered{border:0;}
.patt-dots{
background: #FFF;
width: 10px;height: 10px;
border-radius:5px;
position:absolute;
top:50%;
left:50%;
margin-top:-5px;
margin-left:-5px;
}
.patt-lines{
border-radius:5px;
height:10px;
background:rgba(255,255,255,.7);
position:absolute;
transform-origin:5px 5px;
-ms-transform-origin:5px 5px; /* IE 9 */
-webkit-transform-origin:5px 5px;
}
.patt-hidden .patt-lines{
display:none;
}
|
godmeir/frontWidget
|
lock/patternLock-master/patternLock.css
|
CSS
|
apache-2.0
| 1,062 | 19.037736 | 89 | 0.713748 | false |
/*
* Copyright (c) 2013-2016, ARM Limited, All Rights Reserved
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <ctype.h>
#include <cstdio>
#include <string.h>
#include "greentea-client/test_env.h"
#include "greentea-client/greentea_metrics.h"
#include "mbed_trace.h"
#include "platform/mbed_retarget.h"
/**
* Generic test suite transport protocol keys
*/
const char* GREENTEA_TEST_ENV_END = "end";
const char* GREENTEA_TEST_ENV_EXIT = "__exit";
const char* GREENTEA_TEST_ENV_SYNC = "__sync";
const char* GREENTEA_TEST_ENV_TIMEOUT = "__timeout";
const char* GREENTEA_TEST_ENV_HOST_TEST_NAME = "__host_test_name";
const char* GREENTEA_TEST_ENV_HOST_TEST_VERSION = "__version";
/**
* Test suite success code strings
*/
const char* GREENTEA_TEST_ENV_SUCCESS = "success";
const char* GREENTEA_TEST_ENV_FAILURE = "failure";
/**
* Test case transport protocol start/finish keys
*/
const char* GREENTEA_TEST_ENV_TESTCASE_NAME = "__testcase_name";
const char* GREENTEA_TEST_ENV_TESTCASE_COUNT = "__testcase_count";
const char* GREENTEA_TEST_ENV_TESTCASE_START = "__testcase_start";
const char* GREENTEA_TEST_ENV_TESTCASE_FINISH = "__testcase_finish";
const char* GREENTEA_TEST_ENV_TESTCASE_SUMMARY = "__testcase_summary";
// Code Coverage (LCOV) transport protocol keys
const char* GREENTEA_TEST_ENV_LCOV_START = "__coverage_start";
/**
* Auxilary functions
*/
static void greentea_notify_timeout(const int);
static void greentea_notify_hosttest(const char *);
static void greentea_notify_completion(const int);
static void greentea_notify_version();
/** \brief Handle the handshake with the host
* \details This is contains the shared handhshake functionality that is used between
* GREENTEA_SETUP and GREENTEA_SETUP_UUID.
* This function is blocking.
*/
void _GREENTEA_SETUP_COMMON(const int timeout, const char *host_test_name, char *buffer, size_t size) {
greentea_metrics_setup();
// Key-value protocol handshake function. Waits for {{__sync;...}} message
// Sync preamble: "{{__sync;0dad4a9d-59a3-4aec-810d-d5fb09d852c1}}"
// Example value of sync_uuid == "0dad4a9d-59a3-4aec-810d-d5fb09d852c1"
char _key[8] = {0};
while (1) {
greentea_parse_kv(_key, buffer, sizeof(_key), size);
greentea_write_string("mbedmbedmbedmbedmbedmbedmbedmbed\r\n");
if (strcmp(_key, GREENTEA_TEST_ENV_SYNC) == 0) {
// Found correct __sync message
greentea_send_kv(_key, buffer);
break;
}
}
#ifdef MBED_CONF_MBED_TRACE_ENABLE
mbed_trace_init();
#endif
greentea_notify_version();
greentea_notify_timeout(timeout);
greentea_notify_hosttest(host_test_name);
}
/** \brief Handshake with host and send setup data (timeout and host test name)
* \details This function will send preamble to master.
* After host test name is received master will invoke host test script
* and add host test's callback handlers to main event loop
* This function is blocking.
*/
extern "C" void GREENTEA_SETUP(const int timeout, const char *host_test_name) {
#if ! defined(NO_GREENTEA)
char _value[GREENTEA_UUID_LENGTH] = {0};
_GREENTEA_SETUP_COMMON(timeout, host_test_name, _value, GREENTEA_UUID_LENGTH);
#endif
}
/** \brief Handshake with host and send setup data (timeout and host test name). Allows you to preserve sync UUID.
* \details This function will send preamble to master.
* After host test name is received master will invoke host test script
* and add host test's callback handlers to main event loop
* This function is blocking.
* This function differs from GREENTEA_SETUP because it allows you to
* preserve the UUID sent during the sync process.
*/
void GREENTEA_SETUP_UUID(const int timeout, const char *host_test_name, char *buffer, size_t size) {
_GREENTEA_SETUP_COMMON(timeout, host_test_name, buffer, size);
}
/** \brief Notify host (__exit message) side that test suite execution was complete
* \result Test suite result
* \details If __exit is not received by host side we will assume TIMEOUT
*/
void GREENTEA_TESTSUITE_RESULT(const int result) {
greentea_notify_completion(result);
}
/**
* Test Case support
*/
/** \brief Notify host side that test case started
* \details test_case_name Test case name
*/
void GREENTEA_TESTCASE_START(const char *test_case_name) {
greentea_send_kv(GREENTEA_TEST_ENV_TESTCASE_START, test_case_name);
}
/** \brief Notify host side that test case finished
* \details test_case_name Test case name
* \details result Test case result (0 -OK, non zero...)
*/
void GREENTEA_TESTCASE_FINISH(const char *test_case_name, const size_t passes, const size_t failed) {
greentea_send_kv(GREENTEA_TEST_ENV_TESTCASE_FINISH, test_case_name, passes, failed);
}
/**
*****************************************************************************
* Auxilary functions and key-value protocol support
*****************************************************************************
*/
/**
*****************************************************************************
* LCOV support
*****************************************************************************
*/
#ifdef MBED_CFG_DEBUG_OPTIONS_COVERAGE
extern "C" void __gcov_flush(void);
extern bool coverage_report;
/**
* \brief Send code coverage (gcov/LCOV) notification to master
*
* Generates preamble of message sent to notify host about code coverage data dump.
*
* This function is used by Mbed OS
* (see: mbed-os/platform/mbed_retarget.cpp) to generate code coverage
* messages to host. When code coverage feature is turned on slave will
* print-out code coverage data in form of key-value protocol.
* Message with code coverage data will contain message name, path to code
* coverage output file host will touch and fill with code coverage binary
* payload. Coverage payload is encoded as stream of ASCII coded bytes ("%02X").
*
* \param path to file with code coverage payload (set by gcov instrumentation)
*
*/
void greentea_notify_coverage_start(const char *path) {
printf("{{%s;%s;", GREENTEA_TEST_ENV_LCOV_START, path);
}
/**
* \brief Sufix for code coverage message to master (closing statement)
*
* This function is used by Mbed OS
* (see: mbed-os/platform/mbed_retarget.cpp) to generate code coverage
* messages to host. When code coverage feature is turned on slave will
* print-out code coverage data in form of key-value protocol.
* Message with code coverage data will contain message name, path to code
* coverage output file host will touch and fill with code coverage binary
* payload. Coverage payload is encoded as stream of ASCII coded bytes ("%02X").
*
* Companion function greentea_notify_coverage_start() defines code coverage message structure
*
*/
void greentea_notify_coverage_end() {
printf("}}" NL);
}
#endif
/**
*****************************************************************************
* Key-value protocol support
*****************************************************************************
*/
/**
* \brief Write the preamble characters to the serial port
*
* This function writes the preamble "{{" which is required
* for key-value comunication between the target and the host.
* This uses greentea_putc which allows the direct writing of characters
* using the write() method.
* This suite of functions are provided to allow for serial communication
* to the host from within a thread/ISR.
*/
static void greentea_write_preamble()
{
greentea_putc('{');
greentea_putc('{');
}
/**
* \brief Write the postamble characters to the serial port
*
* This function writes the postamble "{{\n" which is required
* for key-value comunication between the target and the host.
* This uses greentea_putc which allows the direct writing of characters
* using the write() method.
* This suite of functions are provided to allow for serial communication
* to the host from within a thread/ISR.
*
*/
static void greentea_write_postamble()
{
greentea_putc('}');
greentea_putc('}');
greentea_putc('\r');
greentea_putc('\n');
}
/**
* \brief Write a string to the serial port
*
* This function writes a '\0' terminated string from the target
* to the host. It writes directly to the serial port using the
* the write() method.
*
* \param str - string value
*
*/
void greentea_write_string(const char *str)
{
write(STDOUT_FILENO, str, strlen(str));
}
/**
* \brief Write an int to the serial port
*
* This function writes an integer value from the target
* to the host. The integer value is converted to a string and
* and then written character by character directly to the serial
* port using the console.
* sprintf() is used to convert the int to a string. Sprintf if
* inherently thread safe so can be used.
*
* \param val - integer value
*
*/
#define MAX_INT_STRING_LEN 15
static void greentea_write_int(const int val)
{
char intval[MAX_INT_STRING_LEN];
unsigned int i = 0;
sprintf(intval, "%d", val);
while (intval[i] != '\0') {
greentea_putc(intval[i]);
i++;
}
}
/**
* \brief Encapsulate and send key-value message from DUT to host
*
* This function uses underlying functions to write directly
* to the serial port, (USBTX). This allows KVs to be used
* from within interrupt context.
*
* \param key Message key (message/event name)
* \param value Message payload, string value
*
*/
extern "C" void greentea_send_kv(const char *key, const char *val) {
if (key && val) {
greentea_write_preamble();
greentea_write_string(key);
greentea_putc(';');
greentea_write_string(val);
greentea_write_postamble();
}
}
/**
* \brief Encapsulate and send key-value message from DUT to host
*
* This function uses underlying functions to write directly
* to the serial port, (USBTX). This allows KVs to be used
* from within interrupt context.
* Last value is an integer to avoid integer to string conversion
* made by the user.
*
* \param key Message key (message/event name)
* \param value Message payload, integer value
*
*/
void greentea_send_kv(const char *key, const int val) {
if (key) {
greentea_write_preamble();
greentea_write_string(key);
greentea_putc(';');
greentea_write_int(val);
greentea_write_postamble();
}
}
/**
* \brief Encapsulate and send key-value-value message from DUT to host
*
* This function uses underlying functions to write directly
* to the serial port, (USBTX). This allows KVs to be used
* from within interrupt context.
* Last value is an integer to avoid integer to string conversion
* made by the user.
*
* \param key Message key (message/event name)
* \param value Message payload, string value
* \param result Send additional integer formatted data
*
*/
void greentea_send_kv(const char *key, const char *val, const int result) {
if (key) {
greentea_write_preamble();
greentea_write_string(key);
greentea_putc(';');
greentea_write_string(val);
greentea_putc(';');
greentea_write_int(result);
greentea_write_postamble();
}
}
/**
* \brief Encapsulate and send key-value-value-value message from DUT to host
*
* This function uses underlying functions to write directly
* to the serial port, (USBTX). This allows KVs to be used
* from within interrupt context.
* Last 2 values are integers to avoid integer to string conversion
* made by the user.
*
* Names of the parameters: this function is used to send test case
* name with number of passes and failures to host. But it can be used
* to send any key-value-value-value (string-string-integer-integer)
* set to host.
*
* \param key Message key (message/event name)
* \param value Message payload, string value
* \param passes Send additional integer formatted data
* \param failures Send additional integer formatted data
*
*/
void greentea_send_kv(const char *key, const char *val, const int passes, const int failures) {
if (key) {
greentea_write_preamble();
greentea_write_string(key);
greentea_putc(';');
greentea_write_string(val);
greentea_putc(';');
greentea_write_int(passes);
greentea_putc(';');
greentea_write_int(failures);
greentea_write_postamble();
}
}
/**
* \brief Encapsulate and send key-value-value message from DUT to host
*
* This function uses underlying functions to write directly
* to the serial port, (USBTX). This allows key-value-value to be used
* from within interrupt context.
* Both values are integers to avoid integer to string conversion
* made by the user.
*
* Names of the parameters: this function is used to send number
* of passes and failures to host. But it can be used to send any
* key-value-value (string-integer-integer) message to host.
*
* \param key Message key (message/event name)
* \param value Message payload, integer value
* \param passes Send additional integer formatted data
* \param failures Send additional integer formatted data
*
*/
void greentea_send_kv(const char *key, const int passes, const int failures) {
if (key) {
greentea_write_preamble();
greentea_write_string(key);
greentea_putc(';');
greentea_write_int(passes);
greentea_putc(';');
greentea_write_int(failures);
greentea_write_postamble();
}
}
/**
* \brief Send message with timeout to master in seconds
*
* GREENTEA_TEST_ENV_TIMEOUT message is part of preamble
* sent from DUT to host during synchronisation (beginning of test
* suite execution).
*
* Notification about total test suite timeout. Timeout is measured
* from the moment of GREENTEA_TEST_ENV_TIMEOUT reception by host.
* If timeout is reached host (and host test) will be stopped and
* control will return to Greentea.
*
* \param timeout Test suite timeout in seconds
*
*/
static void greentea_notify_timeout(const int timeout) {
greentea_send_kv(GREENTEA_TEST_ENV_TIMEOUT, timeout);
}
/**
* \brief Send host test name to master
*
* GREENTEA_TEST_ENV_HOST_TEST_NAME message is part of preamble
* sent from DUT to host during synchronisation (beginning of test
* suite execution).
*
* Host test Python script implements host side callbacks
* for key-value events sent from DUT to host. Host test's
* callbacks are registered after GREENTEA_TEST_ENV_HOST_TEST_NAME
* message reaches host.
*
* \param host_test_name Host test name, host test will be loaded by mbedhtrun
*/
static void greentea_notify_hosttest(const char *host_test_name) {
greentea_send_kv(GREENTEA_TEST_ENV_HOST_TEST_NAME, host_test_name);
}
/**
* \brief Send to master information that test suite finished its execution
*
* GREENTEA_TEST_ENV_END and GREENTEA_TEST_ENV_EXIT messages
* are sent just before test suite execution finishes (noting
* else to do). You can place it just before you return from your
* main() function.
*
* Code coverage: If MEBD_CFG_DEBUG_OPTIONS_COVERAGE is set in the
* project via build configuration function will output series
* of code coverage messages GREENTEA_TEST_ENV_LCOV_START with code
* coverage binary data. This data is captured by Greentea and can
* be used to generate LCOV reports.
*
* \param result Test suite result from DUT (0 - FAIl, !0 - SUCCESS)
*
*/
static void greentea_notify_completion(const int result) {
const char *val = result ? GREENTEA_TEST_ENV_SUCCESS : GREENTEA_TEST_ENV_FAILURE;
#ifdef MBED_CFG_DEBUG_OPTIONS_COVERAGE
coverage_report = true;
__gcov_flush();
coverage_report = false;
#endif
greentea_metrics_report();
greentea_send_kv(GREENTEA_TEST_ENV_END, val);
greentea_send_kv(GREENTEA_TEST_ENV_EXIT, 0);
}
/**
* \brief Send to master greentea-client version
*/
static void greentea_notify_version() {
greentea_send_kv(GREENTEA_TEST_ENV_HOST_TEST_VERSION, MBED_GREENTEA_CLIENT_VERSION_STRING);
}
/**
*****************************************************************************
* Parse engine for KV values which replaces scanf
*****************************************************************************
*
* Example usage:
*
* char key[10];
* char value[48];
*
* greentea_parse_kv(key, value, 10, 48);
* greentea_parse_kv(key, value, 10, 48);
*
*/
static int gettok(char *, const int);
static int getNextToken(char *, const int);
static int HandleKV(char *, char *, const int, const int);
static int isstring(int);
/**
* \brief Current token of key-value protocol's tokenizer
*/
static int CurTok = 0;
/**
* \enum Token enumeration for key-value protocol tokenizer
*
* This enum is used by key-value protocol tokenizer
* to detect parts of protocol in stream.
*
* tok_eof ::= EOF (end of file)
* tok_open ::= "{{"
* tok_close ::= "}}"
* tok_semicolon ::= ";"
* tok_string ::= [a-zA-Z0-9_-!@#$%^&*()]+ // See isstring() function
*
*/
enum Token {
tok_eof = -1,
tok_open = -2,
tok_close = -3,
tok_semicolon = -4,
tok_string = -5
};
/**
* \brief Read character from stream of data
*
* Closure for default "get character" function.
* This function is used to read characters from the stream
* (default is serial port RX). Key-value protocol tokenizer
* will build stream of tokes used by key-value protocol to
* detect valid messages.
*
* If EOF is received parser finishes parsing and stops. In
* situation where we have serial port stream of data parsing
* goes forever.
*
* \return Next character from the stream or EOF if stream has ended.
*
*/
extern "C" int greentea_getc() {
uint8_t c;
read(STDOUT_FILENO, &c, 1);
return c;
}
/**
* \brief Write character from stream of data
*
* \return The number of bytes written
*
*/
extern "C" void greentea_putc(int c) {
uint8_t _c = c;
write(STDOUT_FILENO, &_c, 1);
}
/**
* \brief parse input string for key-value pairs: {{key;value}}
* This function should replace scanf() used to
* check for incoming messages from master. All data
* parsed and rejected is discarded.
*
* \param out_key Ouput data with key
* \param out_value Ouput data with value
* \param out_key_size out_key total size
* \param out_value_size out_value total data
*
* success != 0 when key-value pair was found
* success == 0 when end of the stream was found
*
*/
extern "C" int greentea_parse_kv(char *out_key,
char *out_value,
const int out_key_size,
const int out_value_size) {
getNextToken(0, 0);
while (1) {
switch (CurTok) {
case tok_eof:
return 0;
case tok_open:
if (HandleKV(out_key, out_value, out_key_size, out_value_size)) {
// We've found {{ KEY ; VALUE }} expression
return 1;
}
break;
default:
// Load next token and pray...
getNextToken(0, 0);
break;
}
}
}
/**
* \brief Get next token from stream
*
* Key-value TOKENIZER feature
*
* This function is used by key-value parser determine
* if key-value message is embedded in stream data.
*
* \param str Output parameters to store token string value
* \param str_size Size of 'str' parameter in bytes (characters)
*
*/
static int getNextToken(char *str, const int str_size) {
return CurTok = gettok(str, str_size);
}
/**
* \brief Check if character is punctuation character
*
* Auxilary key-value TOKENIZER function
*
* Defines if character is in subset of allowed punctuation
* characters which can be part of a key or value string.
* Not allowed characters are: ";{}"
*
* \param c Input character to check
* \return Return 1 if character is allowed punctuation character, otherwise return false
*
*/
static int ispunctuation(int c) {
static const char punctuation[] = "_-!@#$%^&*()=+:<>,./?\\\"'"; // No ";{}"
for (size_t i=0; i< sizeof(punctuation); ++i) {
if (c == punctuation[i]) {
return 1;
}
}
return 0;
}
/**
* \brief Check if character is string token character
*
* Auxilary key-value TOKENIZER function
*
* Defines if character is in subset of allowed string
* token characters.
* String defines set of characters which can be a key or value string.
*
* Allowed subset includes:
* - Alphanumerical characters
* - Digits
* - White spaces and
* - subset of punctuation characters.
*
* \param c Input character to check
* \return Return 1 if character is allowed punctuation character, otherwise return false
*
*/
static int isstring(int c) {
return (isalpha(c) ||
isdigit(c) ||
isspace(c) ||
ispunctuation(c));
}
/**
* \brief TOKENIZER of key-value protocol
*
* Actual key-value TOKENIZER engine
*
* TOKENIZER defines #Token enum to map recognized tokens to integer values.
*
* <TOK_EOF> ::= EOF (end of file)
* <TOK_OPEN> ::= "{{"
* <TOK_CLOSE> ::= "}}"
* <TOK_SEMICOLON> ::= ";"
* <TOK_STRING> ::= [a-zA-Z0-9_-!@#$%^&*()]+ // See isstring() function *
*
* \param out_str Output string with parsed token (string)
* \param str_size Size of str buffer we can use
*
* \return Return #Token enum value used by parser to check for key-value occurrences
*
*/
static int gettok(char *out_str, const int str_size) {
static int LastChar = '!';
static int str_idx = 0;
// whitespace ::=
while (isspace(LastChar)) {
LastChar = greentea_getc();
}
// string ::= [a-zA-Z0-9_-!@#$%^&*()]+
if (isstring(LastChar)) {
str_idx = 0;
if (out_str && str_idx < str_size - 1) {
out_str[str_idx++] = LastChar;
}
while (isstring((LastChar = greentea_getc())))
if (out_str && str_idx < str_size - 1) {
out_str[str_idx++] = LastChar;
}
if (out_str && str_idx < str_size) {
out_str[str_idx] = '\0';
}
return tok_string;
}
// semicolon ::= ';'
if (LastChar == ';') {
LastChar = greentea_getc();
return tok_semicolon;
}
// open ::= '{{'
if (LastChar == '{') {
LastChar = greentea_getc();
if (LastChar == '{') {
LastChar = greentea_getc();
return tok_open;
}
}
// close ::= '}'
if (LastChar == '}') {
LastChar = greentea_getc();
if (LastChar == '}') {
greentea_getc(); //offset the extra '\n' send by Greentea python tool
LastChar = '!';
return tok_close;
}
}
if (LastChar == EOF)
return tok_eof;
// Otherwise, just return the character as its ascii value.
int ThisChar = LastChar;
LastChar = greentea_getc();
return ThisChar;
}
/**
* \brief Key-value parser
*
* Key-value message grammar
*
* <MESSAGE>: <TOK_OPEN> <TOK_STRING> <TOK_SEMICOLON> <TOK_STRING> <TOK_CLOSE>
*
* Examples:
* message: "{{__timeout; 1000}}"
* "{{__sync; 12345678-1234-5678-1234-567812345678}}"
*
* \param out_key Output buffer to store key string value
* \param out_value Output buffer to store value string value
* \param out_key_size Buffer 'out_key' buffer size
* \param out_value_size Buffer 'out_value_size' buffer size
* \return Returns 1 if key-value message was parsed successfully in stream of tokens from tokenizer
*
*/
static int HandleKV(char *out_key,
char *out_value,
const int out_key_size,
const int out_value_size) {
// We already started with <open>
if (getNextToken(out_key, out_key_size) == tok_string) {
if (getNextToken(0, 0) == tok_semicolon) {
if (getNextToken(out_value, out_value_size) == tok_string) {
if (getNextToken(0, 0) == tok_close) {
// <open> <string> <semicolon> <string> <close>
// Found "{{KEY;VALUE}}" expression
return 1;
}
}
}
}
getNextToken(0, 0);
return 0;
}
|
mbedmicro/mbed
|
features/frameworks/greentea-client/source/greentea_test_env.cpp
|
C++
|
apache-2.0
| 25,808 | 31.503778 | 114 | 0.620738 | false |
/*
* Copyright 2010-2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.storagegateway.model;
/**
* <p>
* A JSON object containing the following fields:
* </p>
*
* <ul>
* <li> DescribeCacheOutput$CacheAllocatedInBytes </li>
* <li> DescribeCacheOutput$CacheDirtyPercentage </li>
* <li> DescribeCacheOutput$CacheHitPercentage </li>
* <li> DescribeCacheOutput$CacheMissPercentage </li>
* <li> DescribeCacheOutput$CacheUsedPercentage </li>
* <li> DescribeCacheOutput$DiskIds </li>
* <li> DescribeCacheOutput$GatewayARN </li>
*
* </ul>
*/
public class DescribeCacheResult {
/**
* In response, AWS Storage Gateway returns the ARN of the activated
* gateway. If you don't remember the ARN of a gateway, you can use the
* List Gateways operations to return a list of gateways for your account
* and region.
* <p>
* <b>Constraints:</b><br/>
* <b>Length: </b>50 - 500<br/>
*/
private String gatewayARN;
/**
* An array of the gateway's local disk IDs that are configured as cache.
* Each local disk ID is specified as a string (minimum length of 1 and
* maximum length of 300). If no local disks are configured as cache,
* then the <code>DiskIds</code> array is empty.
*/
private java.util.List<String> diskIds;
/**
* The size allocated, in bytes, for the cache. If no cache is defined
* for the gateway, this field returns 0.
*/
private Long cacheAllocatedInBytes;
/**
* The percentage (0 to 100) of the cache storage in use. If no cached is
* defined for the gateway, this field returns 0.
*/
private Double cacheUsedPercentage;
/**
* The percentage of the cache that contains data that has not yet been
* persisted to Amazon S3. If no cached is defined for the gateway, this
* field returns 0.
*/
private Double cacheDirtyPercentage;
/**
* The percentage (0 to 100) of data read from the storage volume that
* was read from cache. If no cached is defined for the gateway, this
* field returns 0.
*/
private Double cacheHitPercentage;
/**
* TThe percentage (0 to 100) of data read from the storage volume that
* was not read from the cache, but was read from Amazon S3. If no cached
* is defined for the gateway, this field returns 0.
*/
private Double cacheMissPercentage;
/**
* In response, AWS Storage Gateway returns the ARN of the activated
* gateway. If you don't remember the ARN of a gateway, you can use the
* List Gateways operations to return a list of gateways for your account
* and region.
* <p>
* <b>Constraints:</b><br/>
* <b>Length: </b>50 - 500<br/>
*
* @return In response, AWS Storage Gateway returns the ARN of the activated
* gateway. If you don't remember the ARN of a gateway, you can use the
* List Gateways operations to return a list of gateways for your account
* and region.
*/
public String getGatewayARN() {
return gatewayARN;
}
/**
* In response, AWS Storage Gateway returns the ARN of the activated
* gateway. If you don't remember the ARN of a gateway, you can use the
* List Gateways operations to return a list of gateways for your account
* and region.
* <p>
* <b>Constraints:</b><br/>
* <b>Length: </b>50 - 500<br/>
*
* @param gatewayARN In response, AWS Storage Gateway returns the ARN of the activated
* gateway. If you don't remember the ARN of a gateway, you can use the
* List Gateways operations to return a list of gateways for your account
* and region.
*/
public void setGatewayARN(String gatewayARN) {
this.gatewayARN = gatewayARN;
}
/**
* In response, AWS Storage Gateway returns the ARN of the activated
* gateway. If you don't remember the ARN of a gateway, you can use the
* List Gateways operations to return a list of gateways for your account
* and region.
* <p>
* Returns a reference to this object so that method calls can be chained together.
* <p>
* <b>Constraints:</b><br/>
* <b>Length: </b>50 - 500<br/>
*
* @param gatewayARN In response, AWS Storage Gateway returns the ARN of the activated
* gateway. If you don't remember the ARN of a gateway, you can use the
* List Gateways operations to return a list of gateways for your account
* and region.
*
* @return A reference to this updated object so that method calls can be chained
* together.
*/
public DescribeCacheResult withGatewayARN(String gatewayARN) {
this.gatewayARN = gatewayARN;
return this;
}
/**
* An array of the gateway's local disk IDs that are configured as cache.
* Each local disk ID is specified as a string (minimum length of 1 and
* maximum length of 300). If no local disks are configured as cache,
* then the <code>DiskIds</code> array is empty.
*
* @return An array of the gateway's local disk IDs that are configured as cache.
* Each local disk ID is specified as a string (minimum length of 1 and
* maximum length of 300). If no local disks are configured as cache,
* then the <code>DiskIds</code> array is empty.
*/
public java.util.List<String> getDiskIds() {
if (diskIds == null) {
diskIds = new java.util.ArrayList<String>();
}
return diskIds;
}
/**
* An array of the gateway's local disk IDs that are configured as cache.
* Each local disk ID is specified as a string (minimum length of 1 and
* maximum length of 300). If no local disks are configured as cache,
* then the <code>DiskIds</code> array is empty.
*
* @param diskIds An array of the gateway's local disk IDs that are configured as cache.
* Each local disk ID is specified as a string (minimum length of 1 and
* maximum length of 300). If no local disks are configured as cache,
* then the <code>DiskIds</code> array is empty.
*/
public void setDiskIds(java.util.Collection<String> diskIds) {
if (diskIds == null) {
this.diskIds = null;
return;
}
java.util.List<String> diskIdsCopy = new java.util.ArrayList<String>(diskIds.size());
diskIdsCopy.addAll(diskIds);
this.diskIds = diskIdsCopy;
}
/**
* An array of the gateway's local disk IDs that are configured as cache.
* Each local disk ID is specified as a string (minimum length of 1 and
* maximum length of 300). If no local disks are configured as cache,
* then the <code>DiskIds</code> array is empty.
* <p>
* Returns a reference to this object so that method calls can be chained together.
*
* @param diskIds An array of the gateway's local disk IDs that are configured as cache.
* Each local disk ID is specified as a string (minimum length of 1 and
* maximum length of 300). If no local disks are configured as cache,
* then the <code>DiskIds</code> array is empty.
*
* @return A reference to this updated object so that method calls can be chained
* together.
*/
public DescribeCacheResult withDiskIds(String... diskIds) {
if (getDiskIds() == null) setDiskIds(new java.util.ArrayList<String>(diskIds.length));
for (String value : diskIds) {
getDiskIds().add(value);
}
return this;
}
/**
* An array of the gateway's local disk IDs that are configured as cache.
* Each local disk ID is specified as a string (minimum length of 1 and
* maximum length of 300). If no local disks are configured as cache,
* then the <code>DiskIds</code> array is empty.
* <p>
* Returns a reference to this object so that method calls can be chained together.
*
* @param diskIds An array of the gateway's local disk IDs that are configured as cache.
* Each local disk ID is specified as a string (minimum length of 1 and
* maximum length of 300). If no local disks are configured as cache,
* then the <code>DiskIds</code> array is empty.
*
* @return A reference to this updated object so that method calls can be chained
* together.
*/
public DescribeCacheResult withDiskIds(java.util.Collection<String> diskIds) {
if (diskIds == null) {
this.diskIds = null;
} else {
java.util.List<String> diskIdsCopy = new java.util.ArrayList<String>(diskIds.size());
diskIdsCopy.addAll(diskIds);
this.diskIds = diskIdsCopy;
}
return this;
}
/**
* The size allocated, in bytes, for the cache. If no cache is defined
* for the gateway, this field returns 0.
*
* @return The size allocated, in bytes, for the cache. If no cache is defined
* for the gateway, this field returns 0.
*/
public Long getCacheAllocatedInBytes() {
return cacheAllocatedInBytes;
}
/**
* The size allocated, in bytes, for the cache. If no cache is defined
* for the gateway, this field returns 0.
*
* @param cacheAllocatedInBytes The size allocated, in bytes, for the cache. If no cache is defined
* for the gateway, this field returns 0.
*/
public void setCacheAllocatedInBytes(Long cacheAllocatedInBytes) {
this.cacheAllocatedInBytes = cacheAllocatedInBytes;
}
/**
* The size allocated, in bytes, for the cache. If no cache is defined
* for the gateway, this field returns 0.
* <p>
* Returns a reference to this object so that method calls can be chained together.
*
* @param cacheAllocatedInBytes The size allocated, in bytes, for the cache. If no cache is defined
* for the gateway, this field returns 0.
*
* @return A reference to this updated object so that method calls can be chained
* together.
*/
public DescribeCacheResult withCacheAllocatedInBytes(Long cacheAllocatedInBytes) {
this.cacheAllocatedInBytes = cacheAllocatedInBytes;
return this;
}
/**
* The percentage (0 to 100) of the cache storage in use. If no cached is
* defined for the gateway, this field returns 0.
*
* @return The percentage (0 to 100) of the cache storage in use. If no cached is
* defined for the gateway, this field returns 0.
*/
public Double getCacheUsedPercentage() {
return cacheUsedPercentage;
}
/**
* The percentage (0 to 100) of the cache storage in use. If no cached is
* defined for the gateway, this field returns 0.
*
* @param cacheUsedPercentage The percentage (0 to 100) of the cache storage in use. If no cached is
* defined for the gateway, this field returns 0.
*/
public void setCacheUsedPercentage(Double cacheUsedPercentage) {
this.cacheUsedPercentage = cacheUsedPercentage;
}
/**
* The percentage (0 to 100) of the cache storage in use. If no cached is
* defined for the gateway, this field returns 0.
* <p>
* Returns a reference to this object so that method calls can be chained together.
*
* @param cacheUsedPercentage The percentage (0 to 100) of the cache storage in use. If no cached is
* defined for the gateway, this field returns 0.
*
* @return A reference to this updated object so that method calls can be chained
* together.
*/
public DescribeCacheResult withCacheUsedPercentage(Double cacheUsedPercentage) {
this.cacheUsedPercentage = cacheUsedPercentage;
return this;
}
/**
* The percentage of the cache that contains data that has not yet been
* persisted to Amazon S3. If no cached is defined for the gateway, this
* field returns 0.
*
* @return The percentage of the cache that contains data that has not yet been
* persisted to Amazon S3. If no cached is defined for the gateway, this
* field returns 0.
*/
public Double getCacheDirtyPercentage() {
return cacheDirtyPercentage;
}
/**
* The percentage of the cache that contains data that has not yet been
* persisted to Amazon S3. If no cached is defined for the gateway, this
* field returns 0.
*
* @param cacheDirtyPercentage The percentage of the cache that contains data that has not yet been
* persisted to Amazon S3. If no cached is defined for the gateway, this
* field returns 0.
*/
public void setCacheDirtyPercentage(Double cacheDirtyPercentage) {
this.cacheDirtyPercentage = cacheDirtyPercentage;
}
/**
* The percentage of the cache that contains data that has not yet been
* persisted to Amazon S3. If no cached is defined for the gateway, this
* field returns 0.
* <p>
* Returns a reference to this object so that method calls can be chained together.
*
* @param cacheDirtyPercentage The percentage of the cache that contains data that has not yet been
* persisted to Amazon S3. If no cached is defined for the gateway, this
* field returns 0.
*
* @return A reference to this updated object so that method calls can be chained
* together.
*/
public DescribeCacheResult withCacheDirtyPercentage(Double cacheDirtyPercentage) {
this.cacheDirtyPercentage = cacheDirtyPercentage;
return this;
}
/**
* The percentage (0 to 100) of data read from the storage volume that
* was read from cache. If no cached is defined for the gateway, this
* field returns 0.
*
* @return The percentage (0 to 100) of data read from the storage volume that
* was read from cache. If no cached is defined for the gateway, this
* field returns 0.
*/
public Double getCacheHitPercentage() {
return cacheHitPercentage;
}
/**
* The percentage (0 to 100) of data read from the storage volume that
* was read from cache. If no cached is defined for the gateway, this
* field returns 0.
*
* @param cacheHitPercentage The percentage (0 to 100) of data read from the storage volume that
* was read from cache. If no cached is defined for the gateway, this
* field returns 0.
*/
public void setCacheHitPercentage(Double cacheHitPercentage) {
this.cacheHitPercentage = cacheHitPercentage;
}
/**
* The percentage (0 to 100) of data read from the storage volume that
* was read from cache. If no cached is defined for the gateway, this
* field returns 0.
* <p>
* Returns a reference to this object so that method calls can be chained together.
*
* @param cacheHitPercentage The percentage (0 to 100) of data read from the storage volume that
* was read from cache. If no cached is defined for the gateway, this
* field returns 0.
*
* @return A reference to this updated object so that method calls can be chained
* together.
*/
public DescribeCacheResult withCacheHitPercentage(Double cacheHitPercentage) {
this.cacheHitPercentage = cacheHitPercentage;
return this;
}
/**
* TThe percentage (0 to 100) of data read from the storage volume that
* was not read from the cache, but was read from Amazon S3. If no cached
* is defined for the gateway, this field returns 0.
*
* @return TThe percentage (0 to 100) of data read from the storage volume that
* was not read from the cache, but was read from Amazon S3. If no cached
* is defined for the gateway, this field returns 0.
*/
public Double getCacheMissPercentage() {
return cacheMissPercentage;
}
/**
* TThe percentage (0 to 100) of data read from the storage volume that
* was not read from the cache, but was read from Amazon S3. If no cached
* is defined for the gateway, this field returns 0.
*
* @param cacheMissPercentage TThe percentage (0 to 100) of data read from the storage volume that
* was not read from the cache, but was read from Amazon S3. If no cached
* is defined for the gateway, this field returns 0.
*/
public void setCacheMissPercentage(Double cacheMissPercentage) {
this.cacheMissPercentage = cacheMissPercentage;
}
/**
* TThe percentage (0 to 100) of data read from the storage volume that
* was not read from the cache, but was read from Amazon S3. If no cached
* is defined for the gateway, this field returns 0.
* <p>
* Returns a reference to this object so that method calls can be chained together.
*
* @param cacheMissPercentage TThe percentage (0 to 100) of data read from the storage volume that
* was not read from the cache, but was read from Amazon S3. If no cached
* is defined for the gateway, this field returns 0.
*
* @return A reference to this updated object so that method calls can be chained
* together.
*/
public DescribeCacheResult withCacheMissPercentage(Double cacheMissPercentage) {
this.cacheMissPercentage = cacheMissPercentage;
return this;
}
/**
* Returns a string representation of this object; useful for testing and
* debugging.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (gatewayARN != null) sb.append("GatewayARN: " + gatewayARN + ", ");
if (diskIds != null) sb.append("DiskIds: " + diskIds + ", ");
if (cacheAllocatedInBytes != null) sb.append("CacheAllocatedInBytes: " + cacheAllocatedInBytes + ", ");
if (cacheUsedPercentage != null) sb.append("CacheUsedPercentage: " + cacheUsedPercentage + ", ");
if (cacheDirtyPercentage != null) sb.append("CacheDirtyPercentage: " + cacheDirtyPercentage + ", ");
if (cacheHitPercentage != null) sb.append("CacheHitPercentage: " + cacheHitPercentage + ", ");
if (cacheMissPercentage != null) sb.append("CacheMissPercentage: " + cacheMissPercentage + ", ");
sb.append("}");
return sb.toString();
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getGatewayARN() == null) ? 0 : getGatewayARN().hashCode());
hashCode = prime * hashCode + ((getDiskIds() == null) ? 0 : getDiskIds().hashCode());
hashCode = prime * hashCode + ((getCacheAllocatedInBytes() == null) ? 0 : getCacheAllocatedInBytes().hashCode());
hashCode = prime * hashCode + ((getCacheUsedPercentage() == null) ? 0 : getCacheUsedPercentage().hashCode());
hashCode = prime * hashCode + ((getCacheDirtyPercentage() == null) ? 0 : getCacheDirtyPercentage().hashCode());
hashCode = prime * hashCode + ((getCacheHitPercentage() == null) ? 0 : getCacheHitPercentage().hashCode());
hashCode = prime * hashCode + ((getCacheMissPercentage() == null) ? 0 : getCacheMissPercentage().hashCode());
return hashCode;
}
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null) return false;
if (obj instanceof DescribeCacheResult == false) return false;
DescribeCacheResult other = (DescribeCacheResult)obj;
if (other.getGatewayARN() == null ^ this.getGatewayARN() == null) return false;
if (other.getGatewayARN() != null && other.getGatewayARN().equals(this.getGatewayARN()) == false) return false;
if (other.getDiskIds() == null ^ this.getDiskIds() == null) return false;
if (other.getDiskIds() != null && other.getDiskIds().equals(this.getDiskIds()) == false) return false;
if (other.getCacheAllocatedInBytes() == null ^ this.getCacheAllocatedInBytes() == null) return false;
if (other.getCacheAllocatedInBytes() != null && other.getCacheAllocatedInBytes().equals(this.getCacheAllocatedInBytes()) == false) return false;
if (other.getCacheUsedPercentage() == null ^ this.getCacheUsedPercentage() == null) return false;
if (other.getCacheUsedPercentage() != null && other.getCacheUsedPercentage().equals(this.getCacheUsedPercentage()) == false) return false;
if (other.getCacheDirtyPercentage() == null ^ this.getCacheDirtyPercentage() == null) return false;
if (other.getCacheDirtyPercentage() != null && other.getCacheDirtyPercentage().equals(this.getCacheDirtyPercentage()) == false) return false;
if (other.getCacheHitPercentage() == null ^ this.getCacheHitPercentage() == null) return false;
if (other.getCacheHitPercentage() != null && other.getCacheHitPercentage().equals(this.getCacheHitPercentage()) == false) return false;
if (other.getCacheMissPercentage() == null ^ this.getCacheMissPercentage() == null) return false;
if (other.getCacheMissPercentage() != null && other.getCacheMissPercentage().equals(this.getCacheMissPercentage()) == false) return false;
return true;
}
}
|
XidongHuang/aws-sdk-for-java
|
src/main/java/com/amazonaws/services/storagegateway/model/DescribeCacheResult.java
|
Java
|
apache-2.0
| 22,402 | 41.835564 | 153 | 0.644139 | false |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/sagemaker/SageMaker_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
namespace Aws
{
namespace SageMaker
{
namespace Model
{
enum class ModelPackageStatus
{
NOT_SET,
Pending,
InProgress,
Completed,
Failed,
Deleting
};
namespace ModelPackageStatusMapper
{
AWS_SAGEMAKER_API ModelPackageStatus GetModelPackageStatusForName(const Aws::String& name);
AWS_SAGEMAKER_API Aws::String GetNameForModelPackageStatus(ModelPackageStatus value);
} // namespace ModelPackageStatusMapper
} // namespace Model
} // namespace SageMaker
} // namespace Aws
|
awslabs/aws-sdk-cpp
|
aws-cpp-sdk-sagemaker/include/aws/sagemaker/model/ModelPackageStatus.h
|
C
|
apache-2.0
| 731 | 20.441176 | 91 | 0.750343 | false |
/*
* Copyright 2015, gRPC Authors All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.grpc;
import java.io.File;
import java.util.concurrent.Executor;
import javax.annotation.Nullable;
/**
* A builder for {@link Server} instances.
*
* @param <T> The concrete type of this builder.
* @since 1.0.0
*/
public abstract class ServerBuilder<T extends ServerBuilder<T>> {
/**
* Static factory for creating a new ServerBuilder.
*
* @param port the port to listen on
* @since 1.0.0
*/
public static ServerBuilder<?> forPort(int port) {
return ServerProvider.provider().builderForPort(port);
}
/**
* Execute application code directly in the transport thread.
*
* <p>Depending on the underlying transport, using a direct executor may lead to substantial
* performance improvements. However, it also requires the application to not block under
* any circumstances.
*
* <p>Calling this method is semantically equivalent to calling {@link #executor(Executor)} and
* passing in a direct executor. However, this is the preferred way as it may allow the transport
* to perform special optimizations.
*
* @return this
* @since 1.0.0
*/
public abstract T directExecutor();
/**
* Provides a custom executor.
*
* <p>It's an optional parameter. If the user has not provided an executor when the server is
* built, the builder will use a static cached thread pool.
*
* <p>The server won't take ownership of the given executor. It's caller's responsibility to
* shut down the executor when it's desired.
*
* @return this
* @since 1.0.0
*/
public abstract T executor(@Nullable Executor executor);
/**
* Adds a service implementation to the handler registry.
*
* @param service ServerServiceDefinition object
* @return this
* @since 1.0.0
*/
public abstract T addService(ServerServiceDefinition service);
/**
* Adds a service implementation to the handler registry. If bindableService implements
* {@link InternalNotifyOnServerBuild}, the service will receive a reference to the generated
* server instance upon build().
*
* @param bindableService BindableService object
* @return this
* @since 1.0.0
*/
public abstract T addService(BindableService bindableService);
/**
* Adds a {@link ServerInterceptor} that is run for all services on the server. Interceptors
* added through this method always run before per-service interceptors added through {@link
* ServerInterceptors}. Interceptors run in the reverse order in which they are added.
*
* @param interceptor the all-service interceptor
* @return this
* @since 1.5.0
*/
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/3117")
public T intercept(ServerInterceptor interceptor) {
throw new UnsupportedOperationException();
}
/**
* Adds a {@link ServerTransportFilter}. The order of filters being added is the order they will
* be executed.
*
* @return this
* @since 1.2.0
*/
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/2132")
public T addTransportFilter(ServerTransportFilter filter) {
throw new UnsupportedOperationException();
}
/**
* Adds a {@link ServerStreamTracer.Factory} to measure server-side traffic. The order of
* factories being added is the order they will be executed.
*
* @return this
* @since 1.3.0
*/
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/2861")
public T addStreamTracerFactory(ServerStreamTracer.Factory factory) {
throw new UnsupportedOperationException();
}
/**
* Sets a fallback handler registry that will be looked up in if a method is not found in the
* primary registry. The primary registry (configured via {@code addService()}) is faster but
* immutable. The fallback registry is more flexible and allows implementations to mutate over
* time and load services on-demand.
*
* @return this
* @since 1.0.0
*/
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/933")
public abstract T fallbackHandlerRegistry(@Nullable HandlerRegistry fallbackRegistry);
/**
* Makes the server use TLS.
*
* @param certChain file containing the full certificate chain
* @param privateKey file containing the private key
*
* @return this
* @throws UnsupportedOperationException if the server does not support TLS.
* @since 1.0.0
*/
public abstract T useTransportSecurity(File certChain, File privateKey);
/**
* Set the decompression registry for use in the channel. This is an advanced API call and
* shouldn't be used unless you are using custom message encoding. The default supported
* decompressors are in {@code DecompressorRegistry.getDefaultInstance}.
*
* @return this
* @since 1.0.0
*/
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/1704")
public abstract T decompressorRegistry(@Nullable DecompressorRegistry registry);
/**
* Set the compression registry for use in the channel. This is an advanced API call and
* shouldn't be used unless you are using custom message encoding. The default supported
* compressors are in {@code CompressorRegistry.getDefaultInstance}.
*
* @return this
* @since 1.0.0
*/
@ExperimentalApi("https://github.com/grpc/grpc-java/issues/1704")
public abstract T compressorRegistry(@Nullable CompressorRegistry registry);
/**
* Builds a server using the given parameters.
*
* <p>The returned service will not been started or be bound a port. You will need to start it
* with {@link Server#start()}.
*
* @return a new Server
* @since 1.0.0
*/
public abstract Server build();
}
|
pieterjanpintens/grpc-java
|
core/src/main/java/io/grpc/ServerBuilder.java
|
Java
|
apache-2.0
| 6,274 | 32.913514 | 99 | 0.710392 | false |
package com.mapswithme.maps.bookmarks.data;
import android.os.Parcel;
import android.os.Parcelable;
import android.support.annotation.NonNull;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
public class CatalogCustomProperty implements Parcelable
{
@NonNull
private final String mKey;
@NonNull
private final String mLocalizedName;
private final boolean mRequired;
@NonNull
private final List<CatalogCustomPropertyOption> mOptions;
public CatalogCustomProperty(@NonNull String key, @NonNull String localizedName,
boolean required, @NonNull CatalogCustomPropertyOption[] options)
{
mKey = key;
mLocalizedName = localizedName;
mRequired = required;
mOptions = Collections.unmodifiableList(Arrays.asList(options));
}
protected CatalogCustomProperty(Parcel in)
{
mKey = in.readString();
mLocalizedName = in.readString();
mRequired = in.readByte() != 0;
mOptions = in.createTypedArrayList(CatalogCustomPropertyOption.CREATOR);
}
@Override
public void writeToParcel(Parcel dest, int flags)
{
dest.writeString(mKey);
dest.writeString(mLocalizedName);
dest.writeByte((byte) (mRequired ? 1 : 0));
dest.writeTypedList(mOptions);
}
@Override
public int describeContents()
{
return 0;
}
public static final Creator<CatalogCustomProperty> CREATOR = new Creator<CatalogCustomProperty>()
{
@Override
public CatalogCustomProperty createFromParcel(Parcel in)
{
return new CatalogCustomProperty(in);
}
@Override
public CatalogCustomProperty[] newArray(int size)
{
return new CatalogCustomProperty[size];
}
};
@NonNull
public String getKey() { return mKey; }
@NonNull
public String getLocalizedName() { return mLocalizedName; }
public boolean isRequired() { return mRequired; }
@NonNull
public List<CatalogCustomPropertyOption> getOptions() { return mOptions; }
}
|
alexzatsepin/omim
|
android/src/com/mapswithme/maps/bookmarks/data/CatalogCustomProperty.java
|
Java
|
apache-2.0
| 2,008 | 23.487805 | 99 | 0.72261 | false |
package driver
import (
"fmt"
"net/http"
"net/url"
"strings"
)
func (d *dockerer) registerWithDNS(ID string, fqdn string, ip string) error {
dnsip, err := d.getContainerBridgeIP(WeaveDNSContainer)
if err != nil {
return fmt.Errorf("nameserver not available: %s", err)
}
data := url.Values{}
data.Add("fqdn", fqdn)
req, err := http.NewRequest("PUT", fmt.Sprintf("http://%s:6785/name/%s/%s", dnsip, ID, ip), strings.NewReader(data.Encode()))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
cl := &http.Client{}
res, err := cl.Do(req)
if err != nil {
return err
}
if res.StatusCode != http.StatusOK {
return fmt.Errorf("non-OK status from nameserver: %d", res.StatusCode)
}
return nil
}
func (d *dockerer) deregisterWithDNS(ID string, ip string) error {
dnsip, err := d.getContainerBridgeIP(WeaveDNSContainer)
if err != nil {
return fmt.Errorf("nameserver not available: %s", err)
}
req, err := http.NewRequest("DELETE", fmt.Sprintf("http://%s:6785/name/%s/%s", dnsip, ID, ip), nil)
if err != nil {
return err
}
cl := &http.Client{}
res, err := cl.Do(req)
if err != nil {
return err
}
if res.StatusCode != http.StatusOK {
return fmt.Errorf("non-OK status from nameserver: %d", res.StatusCode)
}
return nil
}
|
bboreham/docker-plugin
|
plugin/driver/dns.go
|
GO
|
apache-2.0
| 1,310 | 22.818182 | 126 | 0.661069 | false |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/dms/model/AddTagsToResourceRequest.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <utility>
using namespace Aws::DatabaseMigrationService::Model;
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
AddTagsToResourceRequest::AddTagsToResourceRequest() :
m_resourceArnHasBeenSet(false),
m_tagsHasBeenSet(false)
{
}
Aws::String AddTagsToResourceRequest::SerializePayload() const
{
JsonValue payload;
if(m_resourceArnHasBeenSet)
{
payload.WithString("ResourceArn", m_resourceArn);
}
if(m_tagsHasBeenSet)
{
Array<JsonValue> tagsJsonList(m_tags.size());
for(unsigned tagsIndex = 0; tagsIndex < tagsJsonList.GetLength(); ++tagsIndex)
{
tagsJsonList[tagsIndex].AsObject(m_tags[tagsIndex].Jsonize());
}
payload.WithArray("Tags", std::move(tagsJsonList));
}
return payload.View().WriteReadable();
}
Aws::Http::HeaderValueCollection AddTagsToResourceRequest::GetRequestSpecificHeaders() const
{
Aws::Http::HeaderValueCollection headers;
headers.insert(Aws::Http::HeaderValuePair("X-Amz-Target", "AmazonDMSv20160101.AddTagsToResource"));
return headers;
}
|
aws/aws-sdk-cpp
|
aws-cpp-sdk-dms/source/model/AddTagsToResourceRequest.cpp
|
C++
|
apache-2.0
| 1,272 | 22.090909 | 101 | 0.740945 | false |
/*
* #%L
* BroadleafCommerce Integration
* %%
* Copyright (C) 2009 - 2015 Broadleaf Commerce
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
/*
* Copyright 2008-2013 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.broadleafcommerce.core.payment.service;
import org.broadleafcommerce.common.payment.PaymentGatewayType;
import org.springframework.stereotype.Service;
/**
* @author gdiaz
*/
@Service("blNullPaymentGatewayHostedConfiguration")
public class NullPaymentGatewayHostedConfigurationImpl implements NullPaymentGatewayHostedConfiguration {
protected int failureReportingThreshold = 1;
protected boolean performAuthorizeAndCapture = true;
@Override
public String getHostedRedirectUrl() {
return "/hosted/null-checkout";
}
@Override
public String getHostedRedirectReturnUrl() {
return "/null-checkout/hosted/return";
}
@Override
public boolean isPerformAuthorizeAndCapture() {
return true;
}
@Override
public void setPerformAuthorizeAndCapture(boolean performAuthorizeAndCapture) {
this.performAuthorizeAndCapture = performAuthorizeAndCapture;
}
@Override
public int getFailureReportingThreshold() {
return failureReportingThreshold;
}
@Override
public void setFailureReportingThreshold(int failureReportingThreshold) {
this.failureReportingThreshold = failureReportingThreshold;
}
@Override
public boolean handlesAuthorize() {
return true;
}
@Override
public boolean handlesCapture() {
return false;
}
@Override
public boolean handlesAuthorizeAndCapture() {
return true;
}
@Override
public boolean handlesReverseAuthorize() {
return false;
}
@Override
public boolean handlesVoid() {
return false;
}
@Override
public boolean handlesRefund() {
return false;
}
@Override
public boolean handlesPartialCapture() {
return false;
}
@Override
public boolean handlesMultipleShipment() {
return false;
}
@Override
public boolean handlesRecurringPayment() {
return false;
}
@Override
public boolean handlesSavedCustomerPayment() {
return false;
}
@Override
public boolean handlesMultiplePayments() {
return false;
}
@Override
public PaymentGatewayType getGatewayType() {
return NullPaymentGatewayType.NULL_HOSTED_GATEWAY;
}
}
|
caosg/BroadleafCommerce
|
integration/src/test/java/org/broadleafcommerce/core/payment/service/NullPaymentGatewayHostedConfigurationImpl.java
|
Java
|
apache-2.0
| 3,607 | 24.764286 | 105 | 0.699473 | false |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!--NewPage-->
<HTML>
<HEAD>
<!-- Generated by javadoc (build 1.6.0_21) on Wed May 04 07:58:35 PDT 2011 -->
<TITLE>
Uses of Class org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater (Hadoop 0.20.203.0 API)
</TITLE>
<META NAME="date" CONTENT="2011-05-04">
<LINK REL ="stylesheet" TYPE="text/css" HREF="../../../../../../../stylesheet.css" TITLE="Style">
<SCRIPT type="text/javascript">
function windowTitle()
{
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Class org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater (Hadoop 0.20.203.0 API)";
}
}
</SCRIPT>
<NOSCRIPT>
</NOSCRIPT>
</HEAD>
<BODY BGCOLOR="white" onload="windowTitle();">
<HR>
<!-- ========= START OF TOP NAVBAR ======= -->
<A NAME="navbar_top"><!-- --></A>
<A HREF="#skip-navbar_top" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_top_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.html" title="class in org.apache.hadoop.io.compress.zlib"><FONT CLASS="NavBarFont1"><B>Class</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Use</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
PREV
NEXT</FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../../../index.html?org/apache/hadoop/io/compress/zlib//class-useBuiltInZlibDeflater.html" target="_top"><B>FRAMES</B></A>
<A HREF="BuiltInZlibDeflater.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_top"></A>
<!-- ========= END OF TOP NAVBAR ========= -->
<HR>
<CENTER>
<H2>
<B>Uses of Class<br>org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater</B></H2>
</CENTER>
No usage of org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater
<P>
<HR>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<A NAME="navbar_bottom"><!-- --></A>
<A HREF="#skip-navbar_bottom" title="Skip navigation links"></A>
<TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY="">
<TR>
<TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1">
<A NAME="navbar_bottom_firstrow"><!-- --></A>
<TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY="">
<TR ALIGN="center" VALIGN="top">
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.html" title="class in org.apache.hadoop.io.compress.zlib"><FONT CLASS="NavBarFont1"><B>Class</B></FONT></A> </TD>
<TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> <FONT CLASS="NavBarFont1Rev"><B>Use</B></FONT> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../package-tree.html"><FONT CLASS="NavBarFont1"><B>Tree</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A> </TD>
<TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A> </TD>
</TR>
</TABLE>
</TD>
<TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM>
</EM>
</TD>
</TR>
<TR>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
PREV
NEXT</FONT></TD>
<TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2">
<A HREF="../../../../../../../index.html?org/apache/hadoop/io/compress/zlib//class-useBuiltInZlibDeflater.html" target="_top"><B>FRAMES</B></A>
<A HREF="BuiltInZlibDeflater.html" target="_top"><B>NO FRAMES</B></A>
<SCRIPT type="text/javascript">
<!--
if(window==top) {
document.writeln('<A HREF="../../../../../../../allclasses-noframe.html"><B>All Classes</B></A>');
}
//-->
</SCRIPT>
<NOSCRIPT>
<A HREF="../../../../../../../allclasses-noframe.html"><B>All Classes</B></A>
</NOSCRIPT>
</FONT></TD>
</TR>
</TABLE>
<A NAME="skip-navbar_bottom"></A>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
<HR>
Copyright © 2009 The Apache Software Foundation
</BODY>
</HTML>
|
zincumyx/Mammoth
|
mammoth-src/docs/api/org/apache/hadoop/io/compress/zlib/class-use/BuiltInZlibDeflater.html
|
HTML
|
apache-2.0
| 6,266 | 42.513889 | 251 | 0.61331 | false |
<!DOCTYPE html>
<html>
<!--
File: rpi3-system-information.html
Description: Raspberry Pi 3 - System Information.
Author: Andreas Lundquist
Copyright (c) 2013-2016 Evothings AB
-->
<head>
<meta charset="utf-8">
<script src="../js/include-head.js"></script>
<title>Raspberry Pi 3 - System Information</title>
</head>
<body>
<div class="evo-page">
<script src="../js/include-page-header.js"></script>
<div class="evo-content">
<script src="../js/include-example-menu.js"></script>
<div class="evo-box">
<!-- Always use the Evothings Client icon, do not use the "any browser icon". -->
<h1>Raspberry Pi 3 - System Information</h1>
<p>This example show you how to connect to your Raspberry Pi 3 using Bluetooth Low Energy.</p>
<!-- Insert application screen shot here-->
<img class="img-example-screenshot" src="images/raspberrypi3-system-information-screenshot.png"/>
</div>
<div class="evo-box">
<!-- Explain application structure here, comment on main files and link to them -->
<h2>Source code</h2>
<p>You can browse the source code for this example at the <a target="_blank" href="https://github.com/evothings/evothings-examples/tree/master/examples/rpi3-system-information">Evothings GitHub repository</a></p>
<p>The file <a target="_blank" href="https://github.com/evothings/evothings-examples/blob/master/examples/rpi3-system-information/app/index.html">index.html</a> contains the HTML markup of the app and the buisness logic is placed in the <a target="_blank" href="https://github.com/evothings/evothings-examples/blob/master/examples/rpi3-system-information/app/app.js">app.js</a></p>
<p>The Node.js application that you will run on the Raspberry Pi 3 can be found <a target="_blank" href="https://github.com/evothings/evothings-examples/tree/master/examples/rpi3-system-information/rpi3-application">here</a>.</p>
</div>
<div class="evo-box">
<!-- Explain what the user needs in terms of hardware as well as softare in order to run the example -->
<h2>What you need</h2>
<p>This example runs in Evothings Viewer on Android or iOS.</p>
<ul>
<li>A <a href="https://www.raspberrypi.org/products/raspberry-pi-3-model-b/" target="_blank">Raspberry Pi 3</a></li>
<li>An iOS or Android smartphone</li>
</ul>
<p>Note that this example will run on a properly configured Raspberry Pi 1 or 2 equipped with a supported Bluetooth Low Energy USB dongle.</p>
</div>
<div class="evo-box">
<!-- Explain briefly how to get up and running -->
<h2>How to get up and running</h2>
<p>Detailed instructions on how to get this example up and running are available in <a href="https://www.hackster.io/inmyorbit/connect-a-mobile-application-to-your-rpi-3-using-ble-7a7c2c" target="_blank">this</a> Hackster.io article.</p>
<p>Follow these steps to get started with this example:</p>
<ol>
<li>Clone this <a href="https://github.com/evothings/evothings-examples" target="_blank">repository</a> on to your Raspberry Pi 3.</li>
<li>Browse to <i>examples/rpi3-system-information/rpi3-application/</i> and run the following command: <i>npm install</i></li>
<li>Run application with this command: <i>sudo node index.js</i></li>
<li>Find the Raspberry Pi 3 - System Information application in Evothings Workbench and press the Run button.</li>
<li>Press the Scan button, find your Raspberry Pi and press on it to connect.</li>
</ol>
</div>
<script src="../js/include-page-footer.js"></script>
</div><!-- evo-page-content -->
</div><!-- evo-page -->
</body>
</html>
|
InMyOrbit/evothings-doc
|
examples/rpi3-system-information.html
|
HTML
|
apache-2.0
| 3,496 | 42.7 | 382 | 0.720824 | false |
{% assign tocsdir = site.data.tocs %}
{% assign tocfile = tocsdir.[page.tocdir].[page.tocfile] %}
<div class="home">
{% for entry in tocfile %}
<h1>{{ entry.name }}</h1>
<ul>
{% for child in entry.children %}
{% comment %}
skip sub-sections; they'll get their own sections below
{% endcomment %}
{% if child.children %}{% continue %}{% endif %}
<li>
<h2><a href="{{ child.url }}">{{ child.name }}</a></h2>
{% if child.description %}<span class="summary">{{ child.description }}</span>{% endif %}
</li>
{% endfor %}
</ul>
{% comment %}
add sections for sub-sections
{% endcomment %}
{% for child in entry.children %}
{% if child.children %}
<h2>{{ child.name }}</h2>
<ul>
{% for grandchild in child.children %}
<li>
<h2><a href="{{ grandchild.url }}">{{ grandchild.name }}</a></h2>
{% if grandchild.description %}<span class="summary">{{ grandchild.description }}</span>{% endif %}
</li>
{% endfor %}
</ul>
{% endif %}
{% endfor %}
{% endfor %}
</div>
|
automotive-grade-linux/docs-agl
|
webdocs-agl/site/_includes/generated_index.html
|
HTML
|
apache-2.0
| 1,331 | 33.128205 | 119 | 0.437265 | false |
<?php
/*
* firewall_shaper_wizards.php
*
* part of pfSense (https://www.pfsense.org)
* Copyright (c) 2004-2016 Electric Sheep Fencing, LLC
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
##|+PRIV
##|*IDENT=page-firewall-trafficshaper-wizard
##|*NAME=Firewall: Traffic Shaper: Wizard
##|*DESCR=Allow access to the 'Firewall: Traffic Shaper: Wizard' page.
##|*MATCH=firewall_shaper_wizards.php*
##|-PRIV
require_once("guiconfig.inc");
require_once("functions.inc");
require_once("filter.inc");
require_once("shaper.inc");
require_once("util.inc");
if ($_GET['reset'] != "") {
sigkillbyname('pfctl', SIGKILL);
exit;
}
if ($_POST['apply']) {
write_config();
$retval = 0;
/* Setup pf rules since the user may have changed the optimization value */
$retval = filter_configure();
if (stristr($retval, "error") <> true) {
$savemsg = get_std_save_message($retval);
$class = 'success';
} else {
$savemsg = $retval;
$class = 'warning';
}
/* reset rrd queues */
unlink_if_exists("/var/db/rrd/*queuedrops.rrd");
unlink_if_exists("/var/db/rrd/*queues.rrd");
enable_rrd_graphing();
clear_subsystem_dirty('shaper');
}
$pgtitle = array(gettext("Firewall"), gettext("Traffic Shaper"), gettext("Wizards"));
$shortcut_section = "trafficshaper";
$wizards = array(
gettext("Multiple Lan/Wan") => "traffic_shaper_wizard_multi_all.xml",
gettext("Dedicated Links") => "traffic_shaper_wizard_dedicated.xml",
);
include("head.inc");
if ($input_errors) {
print_input_errors($input_errors);
}
$tab_array = array();
$tab_array[] = array(gettext("By Interface"), false, "firewall_shaper.php");
$tab_array[] = array(gettext("By Queue"), false, "firewall_shaper_queues.php");
$tab_array[] = array(gettext("Limiters"), false, "firewall_shaper_vinterface.php");
$tab_array[] = array(gettext("Wizards"), true, "firewall_shaper_wizards.php");
display_top_tabs($tab_array);
if ($savemsg) {
print_info_box($savemsg, $class);
}
if (is_subsystem_dirty('shaper')) {
print_apply_box(gettext("The traffic shaper configuration has been changed.") . "<br />" . gettext("The changes must be applied for them to take effect."));
}
?>
<div class="panel panel-default">
<div class="panel-heading"><h2 class="panel-title"><?=gettext('Traffic Shaper Wizards')?></h2></div>
<div class="panel-body">
<dl class="dl-horizontal responsive">
<?php
foreach ($wizards as $key => $wizard):
?>
<dt>
<?=$key?>
</dt>
<dd>
<?='<a href="wizard.php?xml=' . $wizard . '">' . $wizard . '</a>'?>
</dd>
<?php
endforeach;
?>
</dl>
</div>
</div>
<?php
include("foot.inc");
|
BlackstarGroup/pfsense
|
src/usr/local/www/firewall_shaper_wizards.php
|
PHP
|
apache-2.0
| 3,108 | 26.75 | 157 | 0.672458 | false |
<!--
Copyright © 2015 Cask Data, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy of
the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.
-->
<div ng-controller="WorkersRunDetailLogController as LogsController">
<my-log-viewer data-params="LogsController.params"></my-log-viewer>
</div>
|
chtyim/cdap
|
cdap-ui/app/features/workers/templates/tabs/runs/tabs/log.html
|
HTML
|
apache-2.0
| 732 | 37.473684 | 79 | 0.759234 | false |
# -*- encoding: utf-8 -*-
#
# Copyright © 2013 eNovance
#
# Author: Julien Danjou <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer/publisher/file.py
"""
import datetime
import logging
import logging.handlers
import os
import tempfile
from ceilometer.openstack.common import network_utils as utils
from ceilometer.openstack.common import test
from ceilometer.publisher import file
from ceilometer import sample
class TestFilePublisher(test.BaseTestCase):
test_data = [
sample.Sample(
name='test',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='test2',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
sample.Sample(
name='test2',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
),
]
def test_file_publisher_maxbytes(self):
# Test valid configurations
tempdir = tempfile.mkdtemp()
name = '%s/log_file' % tempdir
parsed_url = utils.urlsplit('file://%s?max_bytes=50&backup_count=3'
% name)
publisher = file.FilePublisher(parsed_url)
publisher.publish_samples(None,
self.test_data)
handler = publisher.publisher_logger.handlers[0]
self.assertIsInstance(handler,
logging.handlers.RotatingFileHandler)
self.assertEqual([50, name, 3], [handler.maxBytes,
handler.baseFilename,
handler.backupCount])
# The rotating file gets created since only allow 50 bytes.
self.assertTrue(os.path.exists('%s.1' % name))
def test_file_publisher(self):
# Test missing max bytes, backup count configurations
tempdir = tempfile.mkdtemp()
name = '%s/log_file_plain' % tempdir
parsed_url = utils.urlsplit('file://%s' % name)
publisher = file.FilePublisher(parsed_url)
publisher.publish_samples(None,
self.test_data)
handler = publisher.publisher_logger.handlers[0]
self.assertIsInstance(handler,
logging.handlers.RotatingFileHandler)
self.assertEqual([0, name, 0], [handler.maxBytes,
handler.baseFilename,
handler.backupCount])
# Test the content is corrected saved in the file
self.assertTrue(os.path.exists(name))
with open(name, 'r') as f:
content = f.read()
for sample in self.test_data:
self.assertTrue(sample.id in content)
self.assertTrue(sample.timestamp in content)
def test_file_publisher_invalid(self):
# Test invalid max bytes, backup count configurations
tempdir = tempfile.mkdtemp()
parsed_url = utils.urlsplit(
'file://%s/log_file_bad'
'?max_bytes=yus&backup_count=5y' % tempdir)
publisher = file.FilePublisher(parsed_url)
publisher.publish_samples(None,
self.test_data)
self.assertIsNone(publisher.publisher_logger)
|
NeCTAR-RC/ceilometer
|
ceilometer/tests/publisher/test_file.py
|
Python
|
apache-2.0
| 4,511 | 35.666667 | 75 | 0.584701 | false |
#!/usr/bin/python
#
# create_kernel_modules_disabled.py
# automatically generate checks for disabled kernel modules
#
# NOTE: The file 'template_kernel_module_disabled' should be located in the
# same working directory as this script. The template contains the following
# tags that *must* be replaced successfully in order for the checks to work.
#
# KERNMODULE - the name of the kernel module that should be disabled
#
import sys, csv, re
def output_checkfile(kerninfo):
# get the items out of the list
kernmod = kerninfo[0]
with open("./template_kernel_module_disabled", 'r') as templatefile:
filestring = templatefile.read()
filestring = filestring.replace("KERNMODULE", kernmod)
with open("./output/kernel_module_" + kernmod + "_disabled.xml", 'wb+') as outputfile:
outputfile.write(filestring)
outputfile.close()
def main():
if len(sys.argv) < 2:
print "Provide a CSV file containing lines of the format: kernmod"
sys.exit(1)
with open(sys.argv[1], 'r') as f:
# put the CSV line's items into a list
lines = csv.reader(f)
for line in lines:
output_checkfile(line)
sys.exit(0)
if __name__ == "__main__":
main()
|
quark-pat/CLIP
|
packages/scap-security-guide/scap-security-guide/RHEL6/input/checks/templates/create_kernel_modules_disabled.py
|
Python
|
apache-2.0
| 1,252 | 30.3 | 94 | 0.658147 | false |
package codebase.util;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.InputStream;
import java.security.KeyFactory;
import java.security.PrivateKey;
import java.security.PublicKey;
import java.security.cert.CertificateFactory;
import java.security.cert.X509Certificate;
import java.security.interfaces.RSAPrivateKey;
import java.security.spec.KeySpec;
import java.security.spec.PKCS8EncodedKeySpec;
import java.security.spec.X509EncodedKeySpec;
import javax.crypto.Cipher;
import javax.naming.ldap.LdapName;
import javax.naming.ldap.Rdn;
import javax.xml.bind.DatatypeConverter;
import org.apache.commons.codec.binary.Base64;
public class CertificateManager {
public static X509Certificate getCertificate(File file) {
X509Certificate cert = null;
try {
CertificateFactory cf = CertificateFactory.getInstance("X.509");
FileInputStream finStream = new FileInputStream(file);
cert = (X509Certificate)cf.generateCertificate(finStream);
System.out.println("cert found: " + DatatypeConverter.printBase64Binary(cert.getEncoded()));
} catch(Exception e) {
e.printStackTrace();
}
return cert;
}
public static boolean validateCert(X509Certificate clientCert, String uid) {
boolean valid = false;
try { //Not your CA's. Check if it has been signed by CA
PublicKey rootCAPublicKey = getPublicKey("resources/keys/MyRoot_pub.pem");
clientCert.verify(rootCAPublicKey);
System.out.println("verified");
valid = true;
} catch(Exception e) {
System.out.println("Certificate not trusted");
return false;
}
try {
clientCert.checkValidity();
System.out.println("valid");
valid = true;
} catch(Exception e) {
System.out.println("Certificate not trusted. It has expired");
return false;
}
try {
String dn = clientCert.getSubjectX500Principal().getName();
LdapName ldapDN = new LdapName(dn);
String cn = null;
for(Rdn rdn: ldapDN.getRdns()) {
if("CN".equals(rdn.getType())) {
cn = (String) rdn.getValue();
}
}
if(cn == null) {
System.out.println("Certificate not trusted. CN could n't find!");
return false;
} else if (cn.equals(uid)) {
valid = true;
} else {
System.out.println("Certificate not trusted. CN didn't match!");
return false;
}
} catch(Exception e) {
System.out.println("Certificate not trusted. It has expired");
return false;
}
System.out.println("valid: " + valid);
return valid;
}
public static PublicKey getPublicKey(String path) {
PublicKey publicKey = null;
try {
InputStream is = new FileInputStream(path);
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
KeyFactory keyFactory = KeyFactory.getInstance("RSA");
int nRead;
byte [] pubKeyBytes = new byte[is.available()];
while ((nRead = is.read(pubKeyBytes, 0, pubKeyBytes.length)) != -1) {
buffer.write(pubKeyBytes, 0, nRead);
}
buffer.flush();
is.close();
String pubKey = new String(pubKeyBytes, "UTF-8");
pubKey = pubKey.replaceAll("(-+BEGIN PUBLIC KEY-+\\r?\\n|-+END PUBLIC KEY-+\\r?\\n?)", "");
X509EncodedKeySpec spec = new X509EncodedKeySpec(Base64.decodeBase64(pubKey));
publicKey = keyFactory.generatePublic(spec);
} catch(Exception e) {
e.printStackTrace();
}
return publicKey;
}
public static RSAPrivateKey getPrivateKey(File file) {
RSAPrivateKey privateKey = null;
try {
InputStream is = new FileInputStream(file);
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
int nRead;
byte[] privKeyBytes = new byte[is.available()];
while ((nRead = is.read(privKeyBytes, 0, privKeyBytes.length)) != -1) {
buffer.write(privKeyBytes, 0, nRead);
}
buffer.flush();
is.close();
KeyFactory keyFactory = KeyFactory.getInstance("RSA");
KeySpec ks = new PKCS8EncodedKeySpec(privKeyBytes);
privateKey = (RSAPrivateKey) keyFactory.generatePrivate(ks);
} catch(Exception e) {
e.printStackTrace();
}
return privateKey;
}
public static String getPublicKeyString(String path) {
String pubKey = null;
try {
InputStream is = new FileInputStream(path);
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
int nRead;
byte [] pubKeyBytes = new byte[is.available()];
while ((nRead = is.read(pubKeyBytes, 0, pubKeyBytes.length)) != -1) {
buffer.write(pubKeyBytes, 0, nRead);
}
buffer.flush();
is.close();
pubKey = new String(pubKeyBytes, "UTF-8");
pubKey = pubKey.replaceAll("(-+BEGIN PUBLIC KEY-+\\r?\\n|-+END PUBLIC KEY-+\\r?\\n?)", "");
} catch(Exception e) {
e.printStackTrace();
}
return pubKey;
}
public static byte[] encrypt(PublicKey publicKey, byte[] origin) {
byte[] encrypted = null;
try {
Cipher cipher = Cipher.getInstance("RSA/ECB/NoPadding");
cipher.init(Cipher.ENCRYPT_MODE, publicKey);
encrypted = cipher.doFinal(origin);
System.out.println("e: " + encrypted.length);
if(encrypted != null && encrypted.length > 0) {
encrypted = Base64.encodeBase64(encrypted);
}
// RSAPrivateKey pk = getPrivateKey(new File("resources/keys/server_prv.pk8"));
//
// byte []decrypted = decrypt(pk, encrypted);
//
// System.out.println("d: " + decrypted.length);
//
// System.out.println("decrypted: " + new String(decrypted));
} catch(Exception e) {
e.printStackTrace();
}
return encrypted;
}
public static byte[] encrypt(X509Certificate cert, byte[] origin) {
byte[] encrypted = null;
try {
byte[] encoded = cert.getPublicKey().getEncoded();
X509EncodedKeySpec spec = new X509EncodedKeySpec(encoded);
KeyFactory keyFactory = KeyFactory.getInstance("RSA");
PublicKey publicKey = keyFactory.generatePublic(spec);
Cipher cipher = Cipher.getInstance("RSA/ECB/NoPadding");
cipher.init(Cipher.ENCRYPT_MODE, publicKey);
encrypted = cipher.doFinal(origin);
System.out.println("encrypted msg: " + new String(encrypted));
if(encrypted != null && encrypted.length > 0) {
encrypted = Base64.encodeBase64(encrypted);
}
} catch(Exception e) {
e.printStackTrace();
}
return encrypted;
}
public static byte[] decrypt(PrivateKey privateKey, byte[] encrypted) {
byte[] decrypted = null;
try {
Cipher cipher = Cipher.getInstance("RSA/ECB/NoPadding");
cipher.init(Cipher.DECRYPT_MODE, privateKey);
decrypted = cipher.doFinal(Base64.decodeBase64(encrypted));
} catch(Exception e) {
e.printStackTrace();
}
return decrypted;
}
}
|
daniyar-artykov/demo-projects
|
secure-chat/src/codebase/util/CertificateManager.java
|
Java
|
apache-2.0
| 6,741 | 27.565789 | 95 | 0.671414 | false |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/glue/Glue_EXPORTS.h>
#include <aws/glue/GlueRequest.h>
#include <aws/glue/model/SchemaId.h>
#include <aws/glue/model/SchemaVersionNumber.h>
#include <aws/glue/model/Compatibility.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
namespace Glue
{
namespace Model
{
/**
*/
class AWS_GLUE_API UpdateSchemaRequest : public GlueRequest
{
public:
UpdateSchemaRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
// Note: this is not true for response, multiple operations may have the same response name,
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "UpdateSchema"; }
Aws::String SerializePayload() const override;
Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* <p>This is a wrapper structure to contain schema identity fields. The structure
* contains:</p> <ul> <li> <p>SchemaId$SchemaArn: The Amazon Resource Name (ARN) of
* the schema. One of <code>SchemaArn</code> or <code>SchemaName</code> has to be
* provided.</p> </li> <li> <p>SchemaId$SchemaName: The name of the schema. One of
* <code>SchemaArn</code> or <code>SchemaName</code> has to be provided.</p> </li>
* </ul>
*/
inline const SchemaId& GetSchemaId() const{ return m_schemaId; }
/**
* <p>This is a wrapper structure to contain schema identity fields. The structure
* contains:</p> <ul> <li> <p>SchemaId$SchemaArn: The Amazon Resource Name (ARN) of
* the schema. One of <code>SchemaArn</code> or <code>SchemaName</code> has to be
* provided.</p> </li> <li> <p>SchemaId$SchemaName: The name of the schema. One of
* <code>SchemaArn</code> or <code>SchemaName</code> has to be provided.</p> </li>
* </ul>
*/
inline bool SchemaIdHasBeenSet() const { return m_schemaIdHasBeenSet; }
/**
* <p>This is a wrapper structure to contain schema identity fields. The structure
* contains:</p> <ul> <li> <p>SchemaId$SchemaArn: The Amazon Resource Name (ARN) of
* the schema. One of <code>SchemaArn</code> or <code>SchemaName</code> has to be
* provided.</p> </li> <li> <p>SchemaId$SchemaName: The name of the schema. One of
* <code>SchemaArn</code> or <code>SchemaName</code> has to be provided.</p> </li>
* </ul>
*/
inline void SetSchemaId(const SchemaId& value) { m_schemaIdHasBeenSet = true; m_schemaId = value; }
/**
* <p>This is a wrapper structure to contain schema identity fields. The structure
* contains:</p> <ul> <li> <p>SchemaId$SchemaArn: The Amazon Resource Name (ARN) of
* the schema. One of <code>SchemaArn</code> or <code>SchemaName</code> has to be
* provided.</p> </li> <li> <p>SchemaId$SchemaName: The name of the schema. One of
* <code>SchemaArn</code> or <code>SchemaName</code> has to be provided.</p> </li>
* </ul>
*/
inline void SetSchemaId(SchemaId&& value) { m_schemaIdHasBeenSet = true; m_schemaId = std::move(value); }
/**
* <p>This is a wrapper structure to contain schema identity fields. The structure
* contains:</p> <ul> <li> <p>SchemaId$SchemaArn: The Amazon Resource Name (ARN) of
* the schema. One of <code>SchemaArn</code> or <code>SchemaName</code> has to be
* provided.</p> </li> <li> <p>SchemaId$SchemaName: The name of the schema. One of
* <code>SchemaArn</code> or <code>SchemaName</code> has to be provided.</p> </li>
* </ul>
*/
inline UpdateSchemaRequest& WithSchemaId(const SchemaId& value) { SetSchemaId(value); return *this;}
/**
* <p>This is a wrapper structure to contain schema identity fields. The structure
* contains:</p> <ul> <li> <p>SchemaId$SchemaArn: The Amazon Resource Name (ARN) of
* the schema. One of <code>SchemaArn</code> or <code>SchemaName</code> has to be
* provided.</p> </li> <li> <p>SchemaId$SchemaName: The name of the schema. One of
* <code>SchemaArn</code> or <code>SchemaName</code> has to be provided.</p> </li>
* </ul>
*/
inline UpdateSchemaRequest& WithSchemaId(SchemaId&& value) { SetSchemaId(std::move(value)); return *this;}
/**
* <p>Version number required for check pointing. One of <code>VersionNumber</code>
* or <code>Compatibility</code> has to be provided.</p>
*/
inline const SchemaVersionNumber& GetSchemaVersionNumber() const{ return m_schemaVersionNumber; }
/**
* <p>Version number required for check pointing. One of <code>VersionNumber</code>
* or <code>Compatibility</code> has to be provided.</p>
*/
inline bool SchemaVersionNumberHasBeenSet() const { return m_schemaVersionNumberHasBeenSet; }
/**
* <p>Version number required for check pointing. One of <code>VersionNumber</code>
* or <code>Compatibility</code> has to be provided.</p>
*/
inline void SetSchemaVersionNumber(const SchemaVersionNumber& value) { m_schemaVersionNumberHasBeenSet = true; m_schemaVersionNumber = value; }
/**
* <p>Version number required for check pointing. One of <code>VersionNumber</code>
* or <code>Compatibility</code> has to be provided.</p>
*/
inline void SetSchemaVersionNumber(SchemaVersionNumber&& value) { m_schemaVersionNumberHasBeenSet = true; m_schemaVersionNumber = std::move(value); }
/**
* <p>Version number required for check pointing. One of <code>VersionNumber</code>
* or <code>Compatibility</code> has to be provided.</p>
*/
inline UpdateSchemaRequest& WithSchemaVersionNumber(const SchemaVersionNumber& value) { SetSchemaVersionNumber(value); return *this;}
/**
* <p>Version number required for check pointing. One of <code>VersionNumber</code>
* or <code>Compatibility</code> has to be provided.</p>
*/
inline UpdateSchemaRequest& WithSchemaVersionNumber(SchemaVersionNumber&& value) { SetSchemaVersionNumber(std::move(value)); return *this;}
/**
* <p>The new compatibility setting for the schema.</p>
*/
inline const Compatibility& GetCompatibility() const{ return m_compatibility; }
/**
* <p>The new compatibility setting for the schema.</p>
*/
inline bool CompatibilityHasBeenSet() const { return m_compatibilityHasBeenSet; }
/**
* <p>The new compatibility setting for the schema.</p>
*/
inline void SetCompatibility(const Compatibility& value) { m_compatibilityHasBeenSet = true; m_compatibility = value; }
/**
* <p>The new compatibility setting for the schema.</p>
*/
inline void SetCompatibility(Compatibility&& value) { m_compatibilityHasBeenSet = true; m_compatibility = std::move(value); }
/**
* <p>The new compatibility setting for the schema.</p>
*/
inline UpdateSchemaRequest& WithCompatibility(const Compatibility& value) { SetCompatibility(value); return *this;}
/**
* <p>The new compatibility setting for the schema.</p>
*/
inline UpdateSchemaRequest& WithCompatibility(Compatibility&& value) { SetCompatibility(std::move(value)); return *this;}
/**
* <p>The new description for the schema.</p>
*/
inline const Aws::String& GetDescription() const{ return m_description; }
/**
* <p>The new description for the schema.</p>
*/
inline bool DescriptionHasBeenSet() const { return m_descriptionHasBeenSet; }
/**
* <p>The new description for the schema.</p>
*/
inline void SetDescription(const Aws::String& value) { m_descriptionHasBeenSet = true; m_description = value; }
/**
* <p>The new description for the schema.</p>
*/
inline void SetDescription(Aws::String&& value) { m_descriptionHasBeenSet = true; m_description = std::move(value); }
/**
* <p>The new description for the schema.</p>
*/
inline void SetDescription(const char* value) { m_descriptionHasBeenSet = true; m_description.assign(value); }
/**
* <p>The new description for the schema.</p>
*/
inline UpdateSchemaRequest& WithDescription(const Aws::String& value) { SetDescription(value); return *this;}
/**
* <p>The new description for the schema.</p>
*/
inline UpdateSchemaRequest& WithDescription(Aws::String&& value) { SetDescription(std::move(value)); return *this;}
/**
* <p>The new description for the schema.</p>
*/
inline UpdateSchemaRequest& WithDescription(const char* value) { SetDescription(value); return *this;}
private:
SchemaId m_schemaId;
bool m_schemaIdHasBeenSet;
SchemaVersionNumber m_schemaVersionNumber;
bool m_schemaVersionNumberHasBeenSet;
Compatibility m_compatibility;
bool m_compatibilityHasBeenSet;
Aws::String m_description;
bool m_descriptionHasBeenSet;
};
} // namespace Model
} // namespace Glue
} // namespace Aws
|
aws/aws-sdk-cpp
|
aws-cpp-sdk-glue/include/aws/glue/model/UpdateSchemaRequest.h
|
C
|
apache-2.0
| 9,180 | 39.610619 | 153 | 0.67433 | false |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.dsl.yaml.deserializers;
import java.util.List;
import org.apache.camel.dsl.yaml.common.YamlDeserializerBase;
import org.apache.camel.model.RouteDefinition;
import org.apache.camel.model.RouteTemplateBeanDefinition;
import org.apache.camel.model.RouteTemplateDefinition;
import org.apache.camel.model.RouteTemplateParameterDefinition;
import org.apache.camel.spi.annotations.YamlIn;
import org.apache.camel.spi.annotations.YamlProperty;
import org.apache.camel.spi.annotations.YamlType;
import org.snakeyaml.engine.v2.nodes.Node;
@YamlIn
@YamlType(
nodes = { "route-template", "routeTemplate" },
types = org.apache.camel.model.RouteTemplateDefinition.class,
order = org.apache.camel.dsl.yaml.common.YamlDeserializerResolver.ORDER_LOWEST - 1,
properties = {
@YamlProperty(name = "id",
type = "string",
required = true),
@YamlProperty(name = "from",
type = "object:org.apache.camel.model.FromDefinition",
required = true),
@YamlProperty(name = "parameters",
type = "array:org.apache.camel.model.RouteTemplateParameterDefinition"),
@YamlProperty(name = "beans",
type = "array:org.apache.camel.dsl.yaml.deserializers.NamedBeanDefinition")
})
public class RouteTemplateDefinitionDeserializer extends YamlDeserializerBase<RouteTemplateDefinition> {
public RouteTemplateDefinitionDeserializer() {
super(RouteTemplateDefinition.class);
}
@Override
protected RouteTemplateDefinition newInstance() {
return new RouteTemplateDefinition();
}
@Override
protected boolean setProperty(
RouteTemplateDefinition target, String propertyKey, String propertyName, Node node) {
switch (propertyKey) {
case "id": {
target.setId(asText(node));
break;
}
case "from": {
OutputAwareFromDefinition val = asType(node, OutputAwareFromDefinition.class);
RouteDefinition route = new RouteDefinition();
route.setInput(val.getDelegate());
route.setOutputs(val.getOutputs());
target.setRoute(route);
break;
}
case "parameters": {
List<RouteTemplateParameterDefinition> items = asFlatList(node, RouteTemplateParameterDefinition.class);
target.setTemplateParameters(items);
break;
}
case "beans": {
List<RouteTemplateBeanDefinition> items = asFlatList(node, RouteTemplateBeanDefinition.class);
target.setTemplateBeans(items);
break;
}
default: {
return false;
}
}
return true;
}
}
|
apache/camel
|
dsl/camel-yaml-dsl/camel-yaml-dsl-deserializers/src/main/java/org/apache/camel/dsl/yaml/deserializers/RouteTemplateDefinitionDeserializer.java
|
Java
|
apache-2.0
| 3,836 | 41.153846 | 120 | 0.634515 | false |
<!DOCTYPE html>
<html data-ng-app="">
<link rel="stylesheet"
href="https://stackpath.bootstrapcdn.com/bootswatch/4.3.1/minty/bootstrap.min.css">
<head>
<meta charset="US-ASCII">
<title>Vitess VSchema demo</title>
</head>
<body data-ng-controller="DemoController">
<div class="container-fluid text-dark">
<div class="row">
<h4 class="col-md-9">Vitess VSchema demo</h4>
</div>
<div class="row">
<div class="col-md-8">
<div class="card mt-3 mb-3">
<div class="card-heading">
<h4 class="text-center">customer</h4>
</div>
<div class="row">
<div class="col-md-6">
<div data-ng-show="result.customer0" data-ng-repeat="curResult in [result.customer0]"
data-ng-include="'result.html'"></div>
</div>
<div class="col-md-6">
<div data-ng-show="result.customer1" data-ng-repeat="curResult in [result.customer1]"
data-ng-include="'result.html'"></div>
</div>
</div>
<div class="row">
<div class="col-md-6">
<div data-ng-show="result.corder0" data-ng-repeat="curResult in [result.corder0]"
data-ng-include="'result.html'"></div>
</div>
<div class="col-md-6">
<div data-ng-show="result.corder1" data-ng-repeat="curResult in [result.corder1]"
data-ng-include="'result.html'"></div>
</div>
</div>
<div class="row">
<div class="col-md-6">
<div data-ng-show="result.corder_event0" data-ng-repeat="curResult in [result.corder_event0]"
data-ng-include="'result.html'"></div>
</div>
<div class="col-md-6">
<div data-ng-show="result.corder_event1" data-ng-repeat="curResult in [result.corder_event1]"
data-ng-include="'result.html'"></div>
</div>
</div>
<div class="row">
<div class="col-md-6">
<div data-ng-show="result.oname_keyspace_idx0" data-ng-repeat="curResult in [result.oname_keyspace_idx0]"
data-ng-include="'result.html'"></div>
</div>
<div class="col-md-6">
<div data-ng-show="result.oname_keyspace_idx1" data-ng-repeat="curResult in [result.oname_keyspace_idx1]"
data-ng-include="'result.html'"></div>
</div>
</div>
</div>
</div>
<div class="col-md-4">
<div class="card mt-3 mb-3">
<div class="card-heading">
<h4 class="text-center">product</h4>
</div>
<div class="row">
<div class="col-md-12">
<div data-ng-show="result.product" data-ng-repeat="curResult in [result.product]"
data-ng-include="'result.html'"></div>
<div data-ng-show="result.customer_seq" data-ng-repeat="curResult in [result.customer_seq]"
data-ng-include="'result.html'"></div>
<div data-ng-show="result.corder_keyspace_idx" data-ng-repeat="curResult in [result.corder_keyspace_idx]"
data-ng-include="'result.html'"></div>
</div>
</div>
</div>
</div>
</div>
<div class="row">
<div class="col-md-9">
<div class="card">
<form class="input-group" data-ng-submit="submitQuery()">
<div class="input-group-btn">
<button class="btn dropdown-toggle text-dark" type="button"
data-toggle="dropdown">
Samples <span class="caret"></span>
</button>
<ul class="dropdown-menu">
<li data-ng-class="{'divider': !sample}"
data-ng-repeat="sample in samples track by $index"
data-ng-click="setQuery(sample)"><a data-ng-show="sample"
href="#">{{sample}}</a></li>
</ul>
</div>
<input type="text" class="form-control" placeholder="Query"
data-ng-model="query" id="query_input">
</form>
</div>
</div>
</div>
<div class="row">
<div class="col-md-4 mt-3">
<div class="col-md-12">
<div data-ng-repeat="curResult in [result.result]"
data-ng-include="'result.html'"></div>
</div>
</div>
<div class="col-md-8">
<div class="card bg-primary mt-3 ml-1 mr-1" data-ng-show="result.queries">
<h5 class="card-title">Executed Queries
</h5>
<div data-ng-repeat="queryInfo in result.queries track by $index">
{{queryInfo}}<br>
</div>
</div>
</div>
</div>
<script src="https://code.jquery.com/jquery-3.3.1.slim.min.js" integrity="sha384-q8i/X+965DzO0rT7abK41JStQIAqVgRVzpbzo5smXKp4YfRvH+8abtTE1Pi6jizo" crossorigin="anonymous"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.7/umd/popper.min.js" integrity="sha384-UO2eT0CpHqdSJQ6hJty5KVphtPhzWj9WO1clHTMGa3JDZwrnQq4sF86dIHNDz0W1" crossorigin="anonymous"></script>
<script src="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/js/bootstrap.min.js" integrity="sha384-JjSmVgyd0p3pXB1rRibZUAYoIIy6OrQ6VrjIEaFf/nJGzIxFDsf4x0xIM+B07jRM" crossorigin="anonymous"></script>
<script src="https://ajax.googleapis.com/ajax/libs/angularjs/1.2.0/angular.js" type="text/javascript"></script>
<script src="index.js" type="text/javascript"></script>
</body>
</html>
|
tinyspeck/vitess
|
examples/demo/index.html
|
HTML
|
apache-2.0
| 5,550 | 43.047619 | 207 | 0.554234 | false |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.usergrid.query.validator;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import org.apache.usergrid.persistence.Entity;
import java.util.List;
/**
* @author Sungju Jin
*/
@Component
public class QueryValidator {
QueryValidationConfiguration configuration;
@Autowired
ApiServerRunner api;
@Autowired
SqliteRunner sql;
public QueryValidator() {
}
public QueryValidator(QueryValidationConfiguration configuration) {
this.configuration = configuration;
}
public boolean setup() {
return api.setup() && sql.setup();
}
public QueryResponse execute(QueryRequest request) {
return execute(request, new DefaultQueryResultsMatcher());
}
public QueryResponse execute(QueryRequest request, QueryResultsMatcher matcher) {
List<Entity> sqlEntities = sql.execute(request.getDbQuery());
List<Entity> apiEntities = api.execute(request.getApiQuery().getQuery(), request.getApiQuery().getLimit());
boolean equals = matcher.equals(sqlEntities, apiEntities);
QueryResponse response = new QueryResponse();
response.setResult(equals);
response.setExpacted(sqlEntities);
response.setActually(apiEntities);
return response;
}
public void setConfiguration(QueryValidationConfiguration configuration) {
this.configuration = configuration;
sql.setCollection(configuration.getCollection());
sql.setEntities(configuration.getEntities());
api.setOrg(configuration.getOrg());
api.setApp(configuration.getApp());
api.setBaseUri(configuration.getEndpointUri());
api.setCollection(configuration.getCollection());
api.setEntities(configuration.getEntities());
api.setEmail(configuration.getEmail());
api.setPassword(configuration.getPassword());
}
}
|
mesosphere/usergrid
|
stack/query-validator/src/main/java/org/apache/usergrid/query/validator/QueryValidator.java
|
Java
|
apache-2.0
| 2,751 | 34.727273 | 115 | 0.724827 | false |
// This file was generated by Mendix Studio Pro.
//
// WARNING: Only the following code will be retained when actions are regenerated:
// - the import list
// - the code between BEGIN USER CODE and END USER CODE
// - the code between BEGIN EXTRA CODE and END EXTRA CODE
// Other code you write will be lost the next time you deploy the project.
// Special characters, e.g., é, ö, à, etc. are supported in comments.
package unittesting.actions;
import unittesting.TestManager;
import com.mendix.systemwideinterfaces.core.IContext;
import com.mendix.webui.CustomJavaAction;
public class ThrowAssertionFailed extends CustomJavaAction<java.lang.Boolean>
{
private java.lang.String message;
public ThrowAssertionFailed(IContext context, java.lang.String message)
{
super(context);
this.message = message;
}
@java.lang.Override
public java.lang.Boolean executeAction() throws Exception
{
// BEGIN USER CODE
throw new TestManager.AssertionException(message);
// END USER CODE
}
/**
* Returns a string representation of this action
*/
@java.lang.Override
public java.lang.String toString()
{
return "ThrowAssertionFailed";
}
// BEGIN EXTRA CODE
// END EXTRA CODE
}
|
mendix/UnitTesting
|
src/javasource/unittesting/actions/ThrowAssertionFailed.java
|
Java
|
apache-2.0
| 1,243 | 25.555556 | 82 | 0.724194 | false |
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/support/port_platform.h>
#include "src/core/ext/filters/client_channel/backup_poller.h"
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include "src/core/ext/filters/client_channel/client_channel.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/global_config.h"
#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/gprpp/time.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/completion_queue.h"
#define DEFAULT_POLL_INTERVAL_MS 5000
namespace {
struct backup_poller {
grpc_timer polling_timer;
grpc_closure run_poller_closure;
grpc_closure shutdown_closure;
gpr_mu* pollset_mu;
grpc_pollset* pollset; // guarded by pollset_mu
bool shutting_down; // guarded by pollset_mu
gpr_refcount refs;
gpr_refcount shutdown_refs;
};
} // namespace
static gpr_once g_once = GPR_ONCE_INIT;
static gpr_mu g_poller_mu;
static backup_poller* g_poller = nullptr; // guarded by g_poller_mu
// g_poll_interval_ms is set only once at the first time
// grpc_client_channel_start_backup_polling() is called, after that it is
// treated as const.
static grpc_core::Duration g_poll_interval =
grpc_core::Duration::Milliseconds(DEFAULT_POLL_INTERVAL_MS);
GPR_GLOBAL_CONFIG_DEFINE_INT32(
grpc_client_channel_backup_poll_interval_ms, DEFAULT_POLL_INTERVAL_MS,
"Declares the interval in ms between two backup polls on client channels. "
"These polls are run in the timer thread so that gRPC can process "
"connection failures while there is no active polling thread. "
"They help reconnect disconnected client channels (mostly due to "
"idleness), so that the next RPC on this channel won't fail. Set to 0 to "
"turn off the backup polls.");
void grpc_client_channel_global_init_backup_polling() {
gpr_once_init(&g_once, [] { gpr_mu_init(&g_poller_mu); });
int32_t poll_interval_ms =
GPR_GLOBAL_CONFIG_GET(grpc_client_channel_backup_poll_interval_ms);
if (poll_interval_ms < 0) {
gpr_log(GPR_ERROR,
"Invalid GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS: %d, "
"default value %" PRId64 " will be used.",
poll_interval_ms, g_poll_interval.millis());
} else {
g_poll_interval = grpc_core::Duration::Milliseconds(poll_interval_ms);
}
}
static void backup_poller_shutdown_unref(backup_poller* p) {
if (gpr_unref(&p->shutdown_refs)) {
grpc_pollset_destroy(p->pollset);
gpr_free(p->pollset);
gpr_free(p);
}
}
static void done_poller(void* arg, grpc_error_handle /*error*/) {
backup_poller_shutdown_unref(static_cast<backup_poller*>(arg));
}
static void g_poller_unref() {
gpr_mu_lock(&g_poller_mu);
if (gpr_unref(&g_poller->refs)) {
backup_poller* p = g_poller;
g_poller = nullptr;
gpr_mu_unlock(&g_poller_mu);
gpr_mu_lock(p->pollset_mu);
p->shutting_down = true;
grpc_pollset_shutdown(
p->pollset, GRPC_CLOSURE_INIT(&p->shutdown_closure, done_poller, p,
grpc_schedule_on_exec_ctx));
gpr_mu_unlock(p->pollset_mu);
grpc_timer_cancel(&p->polling_timer);
backup_poller_shutdown_unref(p);
} else {
gpr_mu_unlock(&g_poller_mu);
}
}
static void run_poller(void* arg, grpc_error_handle error) {
backup_poller* p = static_cast<backup_poller*>(arg);
if (error != GRPC_ERROR_NONE) {
if (error != GRPC_ERROR_CANCELLED) {
GRPC_LOG_IF_ERROR("run_poller", GRPC_ERROR_REF(error));
}
backup_poller_shutdown_unref(p);
return;
}
gpr_mu_lock(p->pollset_mu);
if (p->shutting_down) {
gpr_mu_unlock(p->pollset_mu);
backup_poller_shutdown_unref(p);
return;
}
grpc_error_handle err =
grpc_pollset_work(p->pollset, nullptr, grpc_core::ExecCtx::Get()->Now());
gpr_mu_unlock(p->pollset_mu);
GRPC_LOG_IF_ERROR("Run client channel backup poller", err);
grpc_timer_init(&p->polling_timer,
grpc_core::ExecCtx::Get()->Now() + g_poll_interval,
&p->run_poller_closure);
}
static void g_poller_init_locked() {
if (g_poller == nullptr) {
g_poller = grpc_core::Zalloc<backup_poller>();
g_poller->pollset =
static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size()));
g_poller->shutting_down = false;
grpc_pollset_init(g_poller->pollset, &g_poller->pollset_mu);
gpr_ref_init(&g_poller->refs, 0);
// one for timer cancellation, one for pollset shutdown, one for g_poller
gpr_ref_init(&g_poller->shutdown_refs, 3);
GRPC_CLOSURE_INIT(&g_poller->run_poller_closure, run_poller, g_poller,
grpc_schedule_on_exec_ctx);
grpc_timer_init(&g_poller->polling_timer,
grpc_core::ExecCtx::Get()->Now() + g_poll_interval,
&g_poller->run_poller_closure);
}
}
void grpc_client_channel_start_backup_polling(
grpc_pollset_set* interested_parties) {
if (g_poll_interval == grpc_core::Duration::Zero() ||
grpc_iomgr_run_in_background()) {
return;
}
gpr_mu_lock(&g_poller_mu);
g_poller_init_locked();
gpr_ref(&g_poller->refs);
/* Get a reference to g_poller->pollset before releasing g_poller_mu to make
* TSAN happy. Otherwise, reading from g_poller (i.e g_poller->pollset) after
* releasing the lock and setting g_poller to NULL in g_poller_unref() is
* being flagged as a data-race by TSAN */
grpc_pollset* pollset = g_poller->pollset;
gpr_mu_unlock(&g_poller_mu);
grpc_pollset_set_add_pollset(interested_parties, pollset);
}
void grpc_client_channel_stop_backup_polling(
grpc_pollset_set* interested_parties) {
if (g_poll_interval == grpc_core::Duration::Zero() ||
grpc_iomgr_run_in_background()) {
return;
}
grpc_pollset_set_del_pollset(interested_parties, g_poller->pollset);
g_poller_unref();
}
|
grpc/grpc
|
src/core/ext/filters/client_channel/backup_poller.cc
|
C++
|
apache-2.0
| 6,614 | 34.368984 | 79 | 0.675083 | false |
#!/usr/bin/env python
# Copyright 2014-2017, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import shutil
import tempfile
import yaml
__doc__ = """Script usage.
Export the environment variable "ROLE_REQUIREMENTS_FILE" with the path to a
known ansible role requirements file. The items within the file will be read
and anything that is using the "git" "scm" setting it will clone extract the
last "SHA" from a given branch. By default this script will assume the repo
within the role requirements file is tracking the "master" branch however if
a different branch is desired set the "branch" option in the role
requirements file to whatever branch the system should track.
Example entry:
- name: role-name
scm: git
src: https://github.com/rcbops/role-name.git
version: XXXX
Example entry setting the tracking branch:
- name: role-name
scm: git
src: https://github.com/rcbops/role-name.git
version: XXXX
branch: stable
"""
class TempDirMake(object):
def __init__(self):
"""Create a temp workspace and cleanup on exit.
This class creates a context manager which makes a temp workspace and
cleans up when the context manager exits.
Entering the context manager returns the temp workspace path as a
string.
"""
self.temp_dir = None
self.cwd = None
def __enter__(self):
os.getcwd()
self.temp_dir = tempfile.mkdtemp()
self.cwd = os.getcwd()
os.chdir(self.temp_dir)
return self.temp_dir
def __exit__(self, exc_type, exc_val, exc_tb):
shutil.rmtree(self.temp_dir)
os.chdir(self.cwd)
# Read the file contents
requirements_file = os.environ['ROLE_REQUIREMENTS_FILE']
with open(requirements_file) as f:
release_file_content = yaml.safe_load(f.read())
with TempDirMake() as mkd:
for item in release_file_content:
if item['scm'] != 'git':
pass
branch = item.get('branch', 'master')
src = item['src']
subprocess.call(
["git", "clone", src],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE
)
os.chdir(os.path.basename(src.split('.git')[0]))
subprocess.call(
["git", "checkout", branch],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE
)
if item['version'] != 'master':
p = subprocess.check_output(
['git', 'log', '-n', '1', '--format=%H'])
item['version'] = p.strip()
else:
item['version'] = 'master'
with open(requirements_file, 'w') as f:
f.write(
yaml.safe_dump(
release_file_content, default_flow_style=False, width=1000))
|
rcbops/rpc-openstack
|
gating/update_dependencies/role-requirements-update.py
|
Python
|
apache-2.0
| 3,268 | 28.709091 | 77 | 0.649021 | false |
/*
* Copyright 2015 JBoss Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jbpm.console.ng.wi.backend.server.dd;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import javax.enterprise.context.ApplicationScoped;
import javax.inject.Inject;
import javax.inject.Named;
import org.guvnor.common.services.backend.exceptions.ExceptionUtilities;
import org.guvnor.common.services.shared.message.Level;
import org.guvnor.common.services.shared.metadata.model.Metadata;
import org.guvnor.common.services.shared.metadata.model.Overview;
import org.guvnor.common.services.shared.validation.model.ValidationMessage;
import org.jboss.errai.bus.server.annotations.Service;
import org.jbpm.console.ng.wi.dd.model.DeploymentDescriptorModel;
import org.jbpm.console.ng.wi.dd.model.ItemObjectModel;
import org.jbpm.console.ng.wi.dd.model.Parameter;
import org.jbpm.console.ng.wi.dd.service.DDEditorService;
import org.jbpm.runtime.manager.impl.deploy.DeploymentDescriptorIO;
import org.jbpm.runtime.manager.impl.deploy.DeploymentDescriptorImpl;
import org.jbpm.runtime.manager.impl.deploy.DeploymentDescriptorManager;
import org.kie.internal.runtime.conf.AuditMode;
import org.kie.internal.runtime.conf.DeploymentDescriptor;
import org.kie.internal.runtime.conf.NamedObjectModel;
import org.kie.internal.runtime.conf.ObjectModel;
import org.kie.internal.runtime.conf.PersistenceMode;
import org.kie.internal.runtime.conf.RuntimeStrategy;
import org.kie.workbench.common.services.backend.service.KieService;
import org.uberfire.backend.server.util.Paths;
import org.uberfire.backend.vfs.Path;
import org.uberfire.io.IOService;
import org.uberfire.java.nio.base.options.CommentedOption;
@Service
@ApplicationScoped
public class DDEditorServiceImpl
extends KieService<DeploymentDescriptorModel>
implements DDEditorService {
@Inject
@Named("ioStrategy")
private IOService ioService;
@Inject
private DDConfigUpdaterHelper configUpdaterHelper;
@Override
public DeploymentDescriptorModel load(Path path) {
return super.loadContent(path);
}
@Override
protected DeploymentDescriptorModel constructContent(Path path, Overview overview) {
InputStream input = ioService.newInputStream(Paths.convert(path));
org.kie.internal.runtime.conf.DeploymentDescriptor originDD = DeploymentDescriptorIO.fromXml(input);
DeploymentDescriptorModel ddModel = marshal(originDD);
ddModel.setOverview(overview);
return ddModel;
}
@Override
public Path save(Path path, DeploymentDescriptorModel content, Metadata metadata, String comment) {
try {
save( path, content, metadata, makeCommentedOption( comment ) );
return path;
} catch (Exception e) {
throw ExceptionUtilities.handleException(e);
}
}
//Don't expose this method in the service API just in case we wants to remove the automatic updates for the descriptor.
public Path save( Path path, DeploymentDescriptorModel content, Metadata metadata, CommentedOption commentedOption ) {
try {
String deploymentContent = unmarshal(path, content).toXml();
Metadata currentMetadata = metadataService.getMetadata( path );
ioService.write(Paths.convert(path),
deploymentContent,
metadataService.setUpAttributes(path,
metadata),
commentedOption);
fireMetadataSocialEvents( path, currentMetadata, metadata );
return path;
} catch (Exception e) {
throw ExceptionUtilities.handleException(e);
}
}
@Override
public List<ValidationMessage> validate(Path path, DeploymentDescriptorModel content) {
final List<ValidationMessage> validationMessages = new ArrayList<ValidationMessage>();
try {
unmarshal(path, content).toXml();
} catch (Exception e) {
final ValidationMessage msg = new ValidationMessage();
msg.setPath(path);
msg.setLevel(Level.ERROR);
msg.setText(e.getMessage());
validationMessages.add(msg);
}
return validationMessages;
}
@Override
public String toSource(Path path, DeploymentDescriptorModel model) {
try {
return unmarshal(path, model).toXml();
} catch ( Exception e ) {
throw ExceptionUtilities.handleException( e );
}
}
// helper methods
private DeploymentDescriptorModel marshal(org.kie.internal.runtime.conf.DeploymentDescriptor originDD) {
DeploymentDescriptorModel ddModel = new DeploymentDescriptorModel();
ddModel.setPersistenceUnitName(originDD.getPersistenceUnit());
ddModel.setAuditPersistenceUnitName(originDD.getAuditPersistenceUnit());
ddModel.setAuditMode(originDD.getAuditMode().toString());
ddModel.setPersistenceMode(originDD.getPersistenceMode().toString());
ddModel.setRuntimeStrategy(originDD.getRuntimeStrategy().toString());
// marshaling strategies
List<ObjectModel> marshallingStrategies = originDD.getMarshallingStrategies();
ddModel.setMarshallingStrategies(processObjectModel(marshallingStrategies));
// event listeners
List<ObjectModel> eventListeners = originDD.getEventListeners();
ddModel.setEventListeners(processObjectModel(eventListeners));
// globals
List<NamedObjectModel> globals = originDD.getGlobals();
ddModel.setGlobals(processNamedObjectModel(globals));
// work item handlers
List<NamedObjectModel> workItemHandlers = originDD.getWorkItemHandlers();
ddModel.setWorkItemHandlers(processNamedObjectModel(workItemHandlers));
// event listeners
List<ObjectModel> taskEventListeners = originDD.getTaskEventListeners();
ddModel.setTaskEventListeners(processObjectModel(taskEventListeners));
// environment entries
List<NamedObjectModel> environmentEntries = originDD.getEnvironmentEntries();
ddModel.setEnvironmentEntries(processNamedObjectModel(environmentEntries));
// configuration
List<NamedObjectModel> configuration = originDD.getConfiguration();
ddModel.setConfiguration(processNamedObjectModel(configuration));
// required roles
ddModel.setRequiredRoles(originDD.getRequiredRoles());
// remoteable classes
ddModel.setRemotableClasses(originDD.getClasses());
return ddModel;
}
private org.kie.internal.runtime.conf.DeploymentDescriptor unmarshal(Path path, DeploymentDescriptorModel model) {
if (model == null) {
return new DeploymentDescriptorManager().getDefaultDescriptor();
}
DeploymentDescriptor updated = new DeploymentDescriptorImpl();
updated.getBuilder()
.persistenceUnit(model.getPersistenceUnitName())
.auditPersistenceUnit(model.getAuditPersistenceUnitName())
.auditMode(AuditMode.valueOf(model.getAuditMode()))
.persistenceMode(PersistenceMode.valueOf(model.getPersistenceMode()))
.runtimeStrategy(RuntimeStrategy.valueOf(model.getRuntimeStrategy()));
// marshalling strategies
List<ItemObjectModel> marshallingStrategies = model.getMarshallingStrategies();
updated.getBuilder().setMarshalingStrategies(processToObjectModel(marshallingStrategies));
// event listeners
List<ItemObjectModel> eventListeners = model.getEventListeners();
updated.getBuilder().setEventListeners(processToObjectModel(eventListeners));
// globals
List<ItemObjectModel> globals = model.getGlobals();
updated.getBuilder().setGlobals(processToNamedObjectModel(globals));
// work item handlers
List<ItemObjectModel> workItemHandlers = model.getWorkItemHandlers();
updated.getBuilder().setWorkItemHandlers(processToNamedObjectModel(workItemHandlers));
// task event listeners
List<ItemObjectModel> taskEventListeners = model.getTaskEventListeners();
updated.getBuilder().setTaskEventListeners(processToObjectModel(taskEventListeners));
// environment entries
List<ItemObjectModel> environmentEntries = model.getEnvironmentEntries();
updated.getBuilder().setEnvironmentEntries(processToNamedObjectModel(environmentEntries));
// configuration
List<ItemObjectModel> configuration = model.getConfiguration();
updated.getBuilder().setConfiguration(processToNamedObjectModel(configuration));
// required roles
updated.getBuilder().setRequiredRoles(model.getRequiredRoles());
// remoteable classes
updated.getBuilder().setClasses(model.getRemotableClasses());
return updated;
}
private List<ItemObjectModel> processNamedObjectModel(List<NamedObjectModel> data) {
List<ItemObjectModel> result = null;
if (data != null) {
result = new ArrayList<ItemObjectModel>();
for (NamedObjectModel orig : data) {
List<Parameter> parameters = collectParameters(orig.getParameters());
result.add(new ItemObjectModel(orig.getName(), orig.getIdentifier(), orig.getResolver(), parameters));
}
}
return result;
}
private List<ItemObjectModel> processObjectModel(List<ObjectModel> data) {
List<ItemObjectModel> result = null;
if (data != null) {
result = new ArrayList<ItemObjectModel>();
for (ObjectModel orig : data) {
List<Parameter> parameters = collectParameters(orig.getParameters());
result.add(new ItemObjectModel(null, orig.getIdentifier(), orig.getResolver(), parameters));
}
}
return result;
}
private List<Parameter> collectParameters(List<Object> parameters) {
List<Parameter> result = null;
if (parameters != null && !parameters.isEmpty()) {
result = new ArrayList<Parameter>();
for (Object param : parameters) {
if (param instanceof ObjectModel) {
ObjectModel model = (ObjectModel) param;
result.add(new Parameter(model.getIdentifier(), model.getParameters().get(0).toString()));
}
}
}
return result;
}
private List<ObjectModel> processToObjectModel(List<ItemObjectModel> data) {
List<ObjectModel> result = null;
if (data != null) {
result = new ArrayList<ObjectModel>();
for (ItemObjectModel item : data) {
ObjectModel ms = new ObjectModel(item.getResolver(), item.getValue());
if (item.getParameters() != null) {
for (Parameter param : item.getParameters()) {
ObjectModel p = new ObjectModel(item.getResolver(), param.getType(), param.getValue().trim());
ms.addParameter(p);
}
}
result.add(ms);
}
}
return result;
}
private List<NamedObjectModel> processToNamedObjectModel(List<ItemObjectModel> data) {
List<NamedObjectModel> result = null;
if (data != null) {
result = new ArrayList<NamedObjectModel>();
for (ItemObjectModel item : data) {
NamedObjectModel ms = new NamedObjectModel(item.getResolver(), item.getName(), item.getValue());
if (item.getParameters() != null) {
for (Parameter param : item.getParameters()) {
ObjectModel p = new ObjectModel(item.getResolver(), param.getType(), param.getValue().trim());
ms.addParameter(p);
}
}
result.add(ms);
}
}
return result;
}
@Override
public void createIfNotExists(Path path) {
org.uberfire.java.nio.file.Path converted = Paths.convert(path);
if (!ioService.exists(converted)) {
// create descriptor
DeploymentDescriptor dd = new DeploymentDescriptorManager( "org.jbpm.domain" ).getDefaultDescriptor();
if ( configUpdaterHelper.hasPersistenceFile( path ) ) {
//if current project has a persistence.xml file configured add the JPAMarshalling strategy.
configUpdaterHelper.addJPAMarshallingStrategy( dd, path );
}
String xmlDescriptor = dd.toXml();
ioService.write( converted, xmlDescriptor );
}
}
}
|
emilianoandre/jbpm-console-ng
|
jbpm-console-ng-workbench-integration/jbpm-console-ng-workbench-integration-backend/src/main/java/org/jbpm/console/ng/wi/backend/server/dd/DDEditorServiceImpl.java
|
Java
|
apache-2.0
| 13,292 | 38.094118 | 123 | 0.67582 | false |
package com.haskforce.parsing.srcExtsDatatypes;
import java.util.Arrays;
/**
* PXETag l (XName l) [PXAttr l] (Maybe (Pat l))
*/
public class PXETag extends PatTopType {
public SrcInfoSpan srcInfoSpan;
public XNameTopType xName;
public PXAttr[] pxAttrs;
public PatTopType patMaybe;
@Override
public String toString() {
return "PXETag{" +
"patMaybe=" + patMaybe +
", pxAttrs=" + Arrays.toString(pxAttrs) +
", xName=" + xName +
'}';
}
}
|
charleso/intellij-haskforce
|
src/com/haskforce/parsing/srcExtsDatatypes/PXETag.java
|
Java
|
apache-2.0
| 539 | 23.5 | 57 | 0.580705 | false |
# Encoding: utf-8
#
# Copyright:: Copyright 2010, Google Inc. All Rights Reserved.
#
# License:: Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code template for registry generation for Savon backend
require 'savon'
require 'ads_common/build/savon_abstract_generator'
require 'pp'
module AdsCommon
module Build
class SavonRegistryGenerator < SavonAbstractGenerator
REGISTRY_TEMPLATE = %q{<% %>
# Encoding: utf-8
#
# This is auto-generated code, changes will be overwritten.
#
# Copyright:: Copyright 2015, Google Inc. All Rights Reserved.
# License:: Licensed under the Apache License, Version 2.0.
#
# <%= @generator_stamp %>
require '<%= @api_name.snakecase %>/errors'
<%= @modules_open_string %>
class <%= @service_name %>Registry
<%= @service_name.upcase %>_METHODS = <%= format_signature(@methods) %>
<%= @service_name.upcase %>_TYPES = <%= format_signature(@types) %>
<%= @service_name.upcase %>_NAMESPACES = <%= format_array(@namespaces) %>
def self.get_method_signature(method_name)
return <%= @service_name.upcase %>_METHODS[method_name.to_sym]
end
def self.get_type_signature(type_name)
return <%= @service_name.upcase %>_TYPES[type_name.to_sym]
end
def self.get_namespace(index)
return <%= @service_name.upcase %>_NAMESPACES[index]
end
end
<% @exceptions.each do |exception| %>
<% array_fields = [] %>
<% doc_lines = format_doc(exception[:doc]) %>
<% doc_lines.each do |doc_line| %>
# <%= doc_line %>
<% end %>
<% base_text = (exception[:base].nil?) ? @default_exception_base : exception[:base] %>
class <%= exception[:name] %> < <%= base_text %>
<% exception[:fields].each do |field| %>
attr_reader :<%= field[:name] %> # <%= field[:type] %>
<% is_array_field = (field[:max_occurs].nil?) ? false :
((field[:max_occurs] == :unbounded) || (field[:max_occurs] > 1)) %>
<% array_fields << field[:name] if is_array_field %>
<% end %>
<% if !(array_fields.empty?) %>
def initialize(exception_fault)
@array_fields ||= []
<% array_fields.each do |field| %>
@array_fields << '<%= field.to_s %>'
<% end %>
super(exception_fault, <%= @service_name %>Registry)
end
<% end %>
end
<% end %>
<%= @modules_close_string %>
}.gsub(/^ /, '')
def initialize(args)
super(args)
@exceptions = []
@methods = []
@types = []
@namespaces = []
@default_exception_base = "%s::Errors::ApiException" % @api_name
end
def get_code_template()
REGISTRY_TEMPLATE
end
def add_exceptions(exceptions)
@exceptions += exceptions
end
def add_methods(methods)
@methods += methods
end
def add_types(types)
@types += types
end
def add_namespaces(namespaces)
@namespaces += namespaces
end
private
# Multi-line documentation formatter. Used to format text extracted from
# XML into stripped multi-line text.
def format_doc(doc)
res = []
doc.split(/\n/).each do |line|
line = line.strip();
res << line if !(line.empty?)
end
return res
end
# Prepares a hash string based on array of hashes passed.
def format_signature(objects_array)
objects_hash = get_hash_for_names_array(objects_array)
return PP.singleline_pp(objects_hash, '')
end
# Prepares string representing a simple array.
def format_array(objects_array)
return (objects_array.nil?) ? '[]' : PP.singleline_pp(objects_array, '')
end
# Converts an array of hashes to a hash based on ":name" fields:
# [{:name => 'foo', :data => 'bar'}] => {:foo => {:data => 'bar'}}
def get_hash_for_names_array(input)
return input.inject({}) do |output, e|
key = e[:name].to_sym
output[key] = e.reject {|k, v| k.equal?(:name)}
output
end
end
end
end
end
|
aditya01933/google-api-ads-ruby
|
ads_common/lib/ads_common/build/savon_registry_generator.rb
|
Ruby
|
apache-2.0
| 4,940 | 31.5 | 96 | 0.554251 | false |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include <thrift/Thrift.h>
#include <thrift/TToString.h>
#include <cstring>
#include <cstdlib>
#include <stdarg.h>
#include <stdio.h>
namespace apache {
namespace thrift {
THRIFT_EXPORT TOutput GlobalOutput;
TOutput::TOutput() : f_(&errorTimeWrapper) {}
void TOutput::printf(const char* message, ...) {
#ifndef THRIFT_SQUELCH_CONSOLE_OUTPUT
// Try to reduce heap usage, even if printf is called rarely.
static const int STACK_BUF_SIZE = 256;
char stack_buf[STACK_BUF_SIZE];
va_list ap;
#ifdef _MSC_VER
va_start(ap, message);
int need = _vscprintf(message, ap);
va_end(ap);
if (need < STACK_BUF_SIZE) {
va_start(ap, message);
vsnprintf_s(stack_buf, STACK_BUF_SIZE, _TRUNCATE, message, ap);
va_end(ap);
f_(stack_buf);
return;
}
#else
va_start(ap, message);
int need = vsnprintf(stack_buf, STACK_BUF_SIZE, message, ap);
va_end(ap);
if (need < STACK_BUF_SIZE) {
f_(stack_buf);
return;
}
#endif
char* heap_buf = (char*)malloc((need + 1) * sizeof(char));
if (heap_buf == nullptr) {
#ifdef _MSC_VER
va_start(ap, message);
vsnprintf_s(stack_buf, STACK_BUF_SIZE, _TRUNCATE, message, ap);
va_end(ap);
#endif
// Malloc failed. We might as well print the stack buffer.
f_(stack_buf);
return;
}
va_start(ap, message);
int rval = vsnprintf(heap_buf, need + 1, message, ap);
va_end(ap);
// TODO(shigin): inform user
if (rval != -1) {
f_(heap_buf);
}
free(heap_buf);
#endif
}
void TOutput::errorTimeWrapper(const char* msg) {
#ifndef THRIFT_SQUELCH_CONSOLE_OUTPUT
time_t now;
char dbgtime[26];
time(&now);
THRIFT_CTIME_R(&now, dbgtime);
dbgtime[24] = 0;
fprintf(stderr, "Thrift: %s %s\n", dbgtime, msg);
#endif
}
void TOutput::perror(const char* message, int errno_copy) {
std::string out = message + std::string(": ") + strerror_s(errno_copy);
f_(out.c_str());
}
std::string TOutput::strerror_s(int errno_copy) {
#ifndef HAVE_STRERROR_R
return "errno = " + to_string(errno_copy);
#else // HAVE_STRERROR_R
char b_errbuf[1024] = {'\0'};
#ifdef STRERROR_R_CHAR_P
char* b_error = strerror_r(errno_copy, b_errbuf, sizeof(b_errbuf));
#else
char* b_error = b_errbuf;
int rv = strerror_r(errno_copy, b_errbuf, sizeof(b_errbuf));
if (rv == -1) {
// strerror_r failed. omgwtfbbq.
return "XSI-compliant strerror_r() failed with errno = "
+ to_string(errno_copy);
}
#endif
// Can anyone prove that explicit cast is probably not necessary
// to ensure that the string object is constructed before
// b_error becomes invalid?
return std::string(b_error);
#endif // HAVE_STRERROR_R
}
}
} // apache::thrift
|
eamosov/thrift
|
lib/cpp/src/thrift/TOutput.cpp
|
C++
|
apache-2.0
| 3,458 | 26.015625 | 73 | 0.672643 | false |
/* Generated by camel build tools - do NOT edit this file! */
package org.apache.camel.component.openstack.swift;
import java.net.URISyntaxException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.camel.spi.EndpointUriFactory;
/**
* Generated by camel build tools - do NOT edit this file!
*/
public class SwiftEndpointUriFactory extends org.apache.camel.support.component.EndpointUriFactorySupport implements EndpointUriFactory {
private static final String BASE = ":host";
private static final Set<String> PROPERTY_NAMES;
private static final Set<String> SECRET_PROPERTY_NAMES;
private static final Set<String> MULTI_VALUE_PREFIXES;
static {
Set<String> props = new HashSet<>(10);
props.add("lazyStartProducer");
props.add("password");
props.add("apiVersion");
props.add("domain");
props.add("host");
props.add("project");
props.add("subsystem");
props.add("config");
props.add("operation");
props.add("username");
PROPERTY_NAMES = Collections.unmodifiableSet(props);
Set<String> secretProps = new HashSet<>(2);
secretProps.add("password");
secretProps.add("username");
SECRET_PROPERTY_NAMES = Collections.unmodifiableSet(secretProps);
MULTI_VALUE_PREFIXES = Collections.emptySet();
}
@Override
public boolean isEnabled(String scheme) {
return "openstack-swift".equals(scheme);
}
@Override
public String buildUri(String scheme, Map<String, Object> properties, boolean encode) throws URISyntaxException {
String syntax = scheme + BASE;
String uri = syntax;
Map<String, Object> copy = new HashMap<>(properties);
uri = buildPathParameter(syntax, uri, "host", null, true, copy);
uri = buildQueryParameters(uri, copy, encode);
return uri;
}
@Override
public Set<String> propertyNames() {
return PROPERTY_NAMES;
}
@Override
public Set<String> secretPropertyNames() {
return SECRET_PROPERTY_NAMES;
}
@Override
public Set<String> multiValuePrefixes() {
return MULTI_VALUE_PREFIXES;
}
@Override
public boolean isLenientProperties() {
return false;
}
}
|
apache/camel
|
components/camel-openstack/src/generated/java/org/apache/camel/component/openstack/swift/SwiftEndpointUriFactory.java
|
Java
|
apache-2.0
| 2,377 | 28.7125 | 137 | 0.664283 | false |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.ankus.mapreduce.algorithms.correlation.stringset;
import java.io.IOException;
import org.ankus.io.TextFourWritableComparable;
import org.ankus.io.TextTwoWritableComparable;
import org.ankus.util.Constants;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.mapreduce.Reducer;
/**
* CalculationStringSetReducer
* @desc
* Here's an version of the similarity coefficient and distance calculation.
* 1. Hamming distance 2. Edit distance
* @version 0.0.1
* @date : 2013.07.11
* @author Suhyun Jeon
*/
public class CalculationStringSetReducer extends Reducer<TextTwoWritableComparable, TextFourWritableComparable, TextTwoWritableComparable, DoubleWritable> {
private String algorithmOption;
@Override
protected void setup(Context context) throws IOException, InterruptedException {
Configuration configuration = context.getConfiguration();
this.algorithmOption = configuration.get(Constants.ALGORITHM_OPTION);
}
@Override
protected void reduce(TextTwoWritableComparable key, Iterable<TextFourWritableComparable> values, Context context) throws IOException, InterruptedException {
if(algorithmOption.equals(Constants.EDIT_DISTANCE)){
int editDistance = 0;
String wordUserID1 = null;
String wordUserID2 = null;
int[][] matrix = null;
for(TextFourWritableComparable textFourWritableComparable : values) {
wordUserID1 = textFourWritableComparable.getText2().toString();
wordUserID2 = textFourWritableComparable.getText4().toString();
char[] word1CharArray = wordUserID1.toCharArray();
char[] word2CharArray = wordUserID2.toCharArray();
int word1CharArrayLength = word1CharArray.length;
int word2CharArrayLength = word2CharArray.length;
// Degenerate cases
if(wordUserID1.equals(wordUserID2)){
editDistance = 0;
context.write(key, new DoubleWritable(editDistance));
}
if(word1CharArrayLength == 0){
editDistance = word2CharArrayLength;
context.write(key, new DoubleWritable(editDistance));
}
if(word2CharArrayLength == 0){
editDistance = word1CharArrayLength;
context.write(key, new DoubleWritable(editDistance));
}
// Create two work vectors of integer distance
matrix = new int[word1CharArrayLength + 1][word2CharArrayLength + 1];
for(int i=0; i<matrix.length; i++){
for(int j=0; j<matrix[i].length; j++){
// The edit distance between an empty string and the prefixes of word1
if(i == 0)
matrix[i][j] = j;
// The edit distance between an empty string and the prefixes of word2
else if(j == 0)
matrix[i][j] = i;
else {
if(wordUserID1.charAt(i-1) == wordUserID2.charAt(j-1)){
matrix[i][j] = matrix[i-1][j-1];
}else{
// Min of insertion, deletion, replacement
matrix[i][j] = 1 + Math.min(Math.min(matrix[i-1][j-1], matrix[i-1][j]), matrix[i][j-1]);
}
}
}
}
}
editDistance = matrix[wordUserID1.length()][wordUserID2.length()];
context.write(key, new DoubleWritable(editDistance));
}else if(algorithmOption.equals(Constants.HAMMING_DISTACNE)){
String wordUserID1 = null;
String wordUserID2 = null;
int hammingDistance = 0;
for(TextFourWritableComparable textFourWritableComparable : values) {
wordUserID1 = textFourWritableComparable.getText2().toString();
wordUserID2 = textFourWritableComparable.getText4().toString();
char[] word1CharArray = wordUserID1.toCharArray();
char[] word2CharArray = wordUserID2.toCharArray();
int word1CharArrayLength = word1CharArray.length;
int word2CharArrayLength = word2CharArray.length;
if(wordUserID1.equals(wordUserID2)) hammingDistance = 0;
// Input words should be of equal length
if(word1CharArrayLength != word2CharArrayLength){
hammingDistance = -1;
}else{
// Both are of the same length
for(int i=0; i<word1CharArrayLength; i++){
if(wordUserID1.charAt(i) == wordUserID2.charAt(i)){
hammingDistance += 0;
}else if(wordUserID1.charAt(i) != wordUserID2.charAt(i)){
hammingDistance += 1;
}
}
}
}
context.write(key, new DoubleWritable(hammingDistance));
}
}
@Override
protected void cleanup(Context context) throws IOException, InterruptedException {
}
}
|
Geenie-Lee/ankus
|
ankus-core/src/main/java/org/ankus/mapreduce/algorithms/correlation/stringset/CalculationStringSetReducer.java
|
Java
|
apache-2.0
| 5,961 | 39.557823 | 161 | 0.622882 | false |
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <limits.h> // For LONG_MIN, LONG_MAX.
#if V8_TARGET_ARCH_MIPS64
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/callable.h"
#include "src/code-stubs.h"
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
#include "src/instruction-stream.h"
#include "src/mips64/assembler-mips64-inl.h"
#include "src/mips64/macro-assembler-mips64.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: TurboAssembler(isolate, buffer, size, create_code_object) {}
TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
CodeObjectRequired create_code_object)
: Assembler(isolate, buffer, buffer_size),
isolate_(isolate),
has_double_zero_reg_set_(false) {
if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
Handle<HeapObject>::New(isolate->heap()->undefined_value(), isolate);
}
}
static inline bool IsZero(const Operand& rt) {
if (rt.is_reg()) {
return rt.rm() == zero_reg;
} else {
return rt.immediate() == 0;
}
}
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1,
Register exclusion2,
Register exclusion3) const {
int bytes = 0;
RegList exclusions = 0;
if (exclusion1 != no_reg) {
exclusions |= exclusion1.bit();
if (exclusion2 != no_reg) {
exclusions |= exclusion2.bit();
if (exclusion3 != no_reg) {
exclusions |= exclusion3.bit();
}
}
}
RegList list = kJSCallerSaved & ~exclusions;
bytes += NumRegs(list) * kPointerSize;
if (fp_mode == kSaveFPRegs) {
bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
}
return bytes;
}
int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
RegList exclusions = 0;
if (exclusion1 != no_reg) {
exclusions |= exclusion1.bit();
if (exclusion2 != no_reg) {
exclusions |= exclusion2.bit();
if (exclusion3 != no_reg) {
exclusions |= exclusion3.bit();
}
}
}
RegList list = kJSCallerSaved & ~exclusions;
MultiPush(list);
bytes += NumRegs(list) * kPointerSize;
if (fp_mode == kSaveFPRegs) {
MultiPushFPU(kCallerSavedFPU);
bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
}
return bytes;
}
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
if (fp_mode == kSaveFPRegs) {
MultiPopFPU(kCallerSavedFPU);
bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
}
RegList exclusions = 0;
if (exclusion1 != no_reg) {
exclusions |= exclusion1.bit();
if (exclusion2 != no_reg) {
exclusions |= exclusion2.bit();
if (exclusion3 != no_reg) {
exclusions |= exclusion3.bit();
}
}
}
RegList list = kJSCallerSaved & ~exclusions;
MultiPop(list);
bytes += NumRegs(list) * kPointerSize;
return bytes;
}
void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
Ld(destination, MemOperand(s6, index << kPointerSizeLog2));
}
void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
Condition cond, Register src1,
const Operand& src2) {
Branch(2, NegateCondition(cond), src1, src2);
Ld(destination, MemOperand(s6, index << kPointerSizeLog2));
}
void TurboAssembler::PushCommonFrame(Register marker_reg) {
if (marker_reg.is_valid()) {
Push(ra, fp, marker_reg);
Daddu(fp, sp, Operand(kPointerSize));
} else {
Push(ra, fp);
mov(fp, sp);
}
}
void TurboAssembler::PushStandardFrame(Register function_reg) {
int offset = -StandardFrameConstants::kContextOffset;
if (function_reg.is_valid()) {
Push(ra, fp, cp, function_reg);
offset += kPointerSize;
} else {
Push(ra, fp, cp);
}
Daddu(fp, sp, Operand(offset));
}
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of kNumSafepointRegisters values on the
// stack, so adjust the stack for unsaved registers.
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
DCHECK_GE(num_unsaved, 0);
if (num_unsaved > 0) {
Dsubu(sp, sp, Operand(num_unsaved * kPointerSize));
}
MultiPush(kSafepointSavedRegisters);
}
void MacroAssembler::PopSafepointRegisters() {
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
MultiPop(kSafepointSavedRegisters);
if (num_unsaved > 0) {
Daddu(sp, sp, Operand(num_unsaved * kPointerSize));
}
}
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// The registers are pushed starting with the highest encoding,
// which means that lowest encodings are closest to the stack pointer.
return kSafepointRegisterStackIndexMap[reg_code];
}
// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
// The register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
void MacroAssembler::RecordWriteField(Register object, int offset,
Register value, Register dst,
RAStatus ra_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
DCHECK(!AreAliased(value, dst, t8, object));
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
// Skip barrier if writing a smi.
if (smi_check == INLINE_SMI_CHECK) {
JumpIfSmi(value, &done);
}
// Although the object register is tagged, the offset is relative to the start
// of the object, so so offset must be a multiple of kPointerSize.
DCHECK(IsAligned(offset, kPointerSize));
Daddu(dst, object, Operand(offset - kHeapObjectTag));
if (emit_debug_code()) {
Label ok;
And(t8, dst, Operand(kPointerSize - 1));
Branch(&ok, eq, t8, Operand(zero_reg));
stop("Unaligned cell in write barrier");
bind(&ok);
}
RecordWrite(object, dst, value, ra_status, save_fp, remembered_set_action,
OMIT_SMI_CHECK);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
li(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
li(dst, Operand(bit_cast<int64_t>(kZapValue + 8)));
}
}
void TurboAssembler::SaveRegisters(RegList registers) {
DCHECK_GT(NumRegs(registers), 0);
RegList regs = 0;
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
regs |= Register::from_code(i).bit();
}
}
MultiPush(regs);
}
void TurboAssembler::RestoreRegisters(RegList registers) {
DCHECK_GT(NumRegs(registers), 0);
RegList regs = 0;
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
regs |= Register::from_code(i).bit();
}
}
MultiPop(regs);
}
void TurboAssembler::CallRecordWriteStub(
Register object, Register address,
RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
// TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
// i.e. always emit remember set and save FP registers in RecordWriteStub. If
// large performance regression is observed, we should use these values to
// avoid unnecessary work.
Callable const callable =
Builtins::CallableFor(isolate(), Builtins::kRecordWrite);
RegList registers = callable.descriptor().allocatable_registers();
SaveRegisters(registers);
Register object_parameter(callable.descriptor().GetRegisterParameter(
RecordWriteDescriptor::kObject));
Register slot_parameter(
callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
Register isolate_parameter(callable.descriptor().GetRegisterParameter(
RecordWriteDescriptor::kIsolate));
Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
RecordWriteDescriptor::kRememberedSet));
Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
RecordWriteDescriptor::kFPMode));
Push(object);
Push(address);
Pop(slot_parameter);
Pop(object_parameter);
li(isolate_parameter, Operand(ExternalReference::isolate_address(isolate())));
Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreRegisters(registers);
}
// Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
// The register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
void MacroAssembler::RecordWrite(Register object, Register address,
Register value, RAStatus ra_status,
SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
DCHECK(!AreAliased(object, address, value, t8));
DCHECK(!AreAliased(object, address, value, t9));
if (emit_debug_code()) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Ld(scratch, MemOperand(address));
Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, scratch,
Operand(value));
}
if (remembered_set_action == OMIT_REMEMBERED_SET &&
!FLAG_incremental_marking) {
return;
}
// First, check if a write barrier is even needed. The tests below
// catch stores of smis and stores into the young generation.
Label done;
if (smi_check == INLINE_SMI_CHECK) {
DCHECK_EQ(0, kSmiTag);
JumpIfSmi(value, &done);
}
CheckPageFlag(value,
value, // Used as scratch.
MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
CheckPageFlag(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask,
eq,
&done);
// Record the actual write.
if (ra_status == kRAHasNotBeenSaved) {
push(ra);
}
CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
if (ra_status == kRAHasNotBeenSaved) {
pop(ra);
}
bind(&done);
{
// Count number of write barriers in generated code.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
isolate()->counters()->write_barriers_static()->Increment();
IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1,
scratch, value);
}
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
li(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
li(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
}
}
// ---------------------------------------------------------------------------
// Instruction macros.
void TurboAssembler::Addu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
addu(rd, rs, rt.rm());
} else {
if (is_int16(rt.immediate()) && !MustUseReg(rt.rmode())) {
addiu(rd, rs, static_cast<int32_t>(rt.immediate()));
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
addu(rd, rs, scratch);
}
}
}
void TurboAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
daddu(rd, rs, rt.rm());
} else {
if (is_int16(rt.immediate()) && !MustUseReg(rt.rmode())) {
daddiu(rd, rs, static_cast<int32_t>(rt.immediate()));
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
daddu(rd, rs, scratch);
}
}
}
void TurboAssembler::Subu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
subu(rd, rs, rt.rm());
} else {
DCHECK(is_int32(rt.immediate()));
if (is_int16(-rt.immediate()) && !MustUseReg(rt.rmode())) {
addiu(rd, rs,
static_cast<int32_t>(
-rt.immediate())); // No subiu instr, use addiu(x, y, -imm).
} else {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
if (-rt.immediate() >> 16 == 0 && !MustUseReg(rt.rmode())) {
// Use load -imm and addu when loading -imm generates one instruction.
li(scratch, -rt.immediate());
addu(rd, rs, scratch);
} else {
// li handles the relocation.
li(scratch, rt);
subu(rd, rs, scratch);
}
}
}
}
void TurboAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
dsubu(rd, rs, rt.rm());
} else if (is_int16(-rt.immediate()) && !MustUseReg(rt.rmode())) {
daddiu(rd, rs,
static_cast<int32_t>(
-rt.immediate())); // No dsubiu instr, use daddiu(x, y, -imm).
} else {
DCHECK(rs != at);
int li_count = InstrCountForLi64Bit(rt.immediate());
int li_neg_count = InstrCountForLi64Bit(-rt.immediate());
if (li_neg_count < li_count && !MustUseReg(rt.rmode())) {
// Use load -imm and daddu when loading -imm generates one instruction.
DCHECK(rt.immediate() != std::numeric_limits<int32_t>::min());
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, Operand(-rt.immediate()));
Daddu(rd, rs, scratch);
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, rt);
dsubu(rd, rs, scratch);
}
}
}
void TurboAssembler::Mul(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
mul(rd, rs, rt.rm());
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
mul(rd, rs, scratch);
}
}
void TurboAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
mult(rs, rt.rm());
mfhi(rd);
} else {
muh(rd, rs, rt.rm());
}
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
if (kArchVariant != kMips64r6) {
mult(rs, scratch);
mfhi(rd);
} else {
muh(rd, rs, scratch);
}
}
}
void TurboAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
multu(rs, rt.rm());
mfhi(rd);
} else {
muhu(rd, rs, rt.rm());
}
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
if (kArchVariant != kMips64r6) {
multu(rs, scratch);
mfhi(rd);
} else {
muhu(rd, rs, scratch);
}
}
}
void TurboAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant == kMips64r6) {
dmul(rd, rs, rt.rm());
} else {
dmult(rs, rt.rm());
mflo(rd);
}
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
if (kArchVariant == kMips64r6) {
dmul(rd, rs, scratch);
} else {
dmult(rs, scratch);
mflo(rd);
}
}
}
void TurboAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant == kMips64r6) {
dmuh(rd, rs, rt.rm());
} else {
dmult(rs, rt.rm());
mfhi(rd);
}
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
if (kArchVariant == kMips64r6) {
dmuh(rd, rs, scratch);
} else {
dmult(rs, scratch);
mfhi(rd);
}
}
}
void TurboAssembler::Mult(Register rs, const Operand& rt) {
if (rt.is_reg()) {
mult(rs, rt.rm());
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
mult(rs, scratch);
}
}
void TurboAssembler::Dmult(Register rs, const Operand& rt) {
if (rt.is_reg()) {
dmult(rs, rt.rm());
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
dmult(rs, scratch);
}
}
void TurboAssembler::Multu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
multu(rs, rt.rm());
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
multu(rs, scratch);
}
}
void TurboAssembler::Dmultu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
dmultu(rs, rt.rm());
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
dmultu(rs, scratch);
}
}
void TurboAssembler::Div(Register rs, const Operand& rt) {
if (rt.is_reg()) {
div(rs, rt.rm());
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
div(rs, scratch);
}
}
void TurboAssembler::Div(Register res, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
div(rs, rt.rm());
mflo(res);
} else {
div(res, rs, rt.rm());
}
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
if (kArchVariant != kMips64r6) {
div(rs, scratch);
mflo(res);
} else {
div(res, rs, scratch);
}
}
}
void TurboAssembler::Mod(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
div(rs, rt.rm());
mfhi(rd);
} else {
mod(rd, rs, rt.rm());
}
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
if (kArchVariant != kMips64r6) {
div(rs, scratch);
mfhi(rd);
} else {
mod(rd, rs, scratch);
}
}
}
void TurboAssembler::Modu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
divu(rs, rt.rm());
mfhi(rd);
} else {
modu(rd, rs, rt.rm());
}
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
if (kArchVariant != kMips64r6) {
divu(rs, scratch);
mfhi(rd);
} else {
modu(rd, rs, scratch);
}
}
}
void TurboAssembler::Ddiv(Register rs, const Operand& rt) {
if (rt.is_reg()) {
ddiv(rs, rt.rm());
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
ddiv(rs, scratch);
}
}
void TurboAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
if (kArchVariant != kMips64r6) {
if (rt.is_reg()) {
ddiv(rs, rt.rm());
mflo(rd);
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
ddiv(rs, scratch);
mflo(rd);
}
} else {
if (rt.is_reg()) {
ddiv(rd, rs, rt.rm());
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
ddiv(rd, rs, scratch);
}
}
}
void TurboAssembler::Divu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
divu(rs, rt.rm());
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
divu(rs, scratch);
}
}
void TurboAssembler::Divu(Register res, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
divu(rs, rt.rm());
mflo(res);
} else {
divu(res, rs, rt.rm());
}
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
if (kArchVariant != kMips64r6) {
divu(rs, scratch);
mflo(res);
} else {
divu(res, rs, scratch);
}
}
}
void TurboAssembler::Ddivu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
ddivu(rs, rt.rm());
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
ddivu(rs, scratch);
}
}
void TurboAssembler::Ddivu(Register res, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
ddivu(rs, rt.rm());
mflo(res);
} else {
ddivu(res, rs, rt.rm());
}
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
if (kArchVariant != kMips64r6) {
ddivu(rs, scratch);
mflo(res);
} else {
ddivu(res, rs, scratch);
}
}
}
void TurboAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
if (kArchVariant != kMips64r6) {
if (rt.is_reg()) {
ddiv(rs, rt.rm());
mfhi(rd);
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
ddiv(rs, scratch);
mfhi(rd);
}
} else {
if (rt.is_reg()) {
dmod(rd, rs, rt.rm());
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
dmod(rd, rs, scratch);
}
}
}
void TurboAssembler::Dmodu(Register rd, Register rs, const Operand& rt) {
if (kArchVariant != kMips64r6) {
if (rt.is_reg()) {
ddivu(rs, rt.rm());
mfhi(rd);
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
ddivu(rs, scratch);
mfhi(rd);
}
} else {
if (rt.is_reg()) {
dmodu(rd, rs, rt.rm());
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
dmodu(rd, rs, scratch);
}
}
}
void TurboAssembler::And(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
and_(rd, rs, rt.rm());
} else {
if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) {
andi(rd, rs, static_cast<int32_t>(rt.immediate()));
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
and_(rd, rs, scratch);
}
}
}
void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
or_(rd, rs, rt.rm());
} else {
if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) {
ori(rd, rs, static_cast<int32_t>(rt.immediate()));
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
or_(rd, rs, scratch);
}
}
}
void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
xor_(rd, rs, rt.rm());
} else {
if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) {
xori(rd, rs, static_cast<int32_t>(rt.immediate()));
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
xor_(rd, rs, scratch);
}
}
}
void TurboAssembler::Nor(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
nor(rd, rs, rt.rm());
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(rs != scratch);
li(scratch, rt);
nor(rd, rs, scratch);
}
}
void TurboAssembler::Neg(Register rs, const Operand& rt) {
dsubu(rs, zero_reg, rt.rm());
}
void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
slt(rd, rs, rt.rm());
} else {
if (is_int16(rt.immediate()) && !MustUseReg(rt.rmode())) {
slti(rd, rs, static_cast<int32_t>(rt.immediate()));
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
DCHECK(rs != scratch);
li(scratch, rt);
slt(rd, rs, scratch);
}
}
}
void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sltu(rd, rs, rt.rm());
} else {
const uint64_t int16_min = std::numeric_limits<int16_t>::min();
if (is_uint15(rt.immediate()) && !MustUseReg(rt.rmode())) {
// Imm range is: [0, 32767].
sltiu(rd, rs, static_cast<int32_t>(rt.immediate()));
} else if (is_uint15(rt.immediate() - int16_min) &&
!MustUseReg(rt.rmode())) {
// Imm range is: [max_unsigned-32767,max_unsigned].
sltiu(rd, rs, static_cast<uint16_t>(rt.immediate()));
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
DCHECK(rs != scratch);
li(scratch, rt);
sltu(rd, rs, scratch);
}
}
}
void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
slt(rd, rt.rm(), rs);
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
DCHECK(rs != scratch);
li(scratch, rt);
slt(rd, scratch, rs);
}
xori(rd, rd, 1);
}
void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sltu(rd, rt.rm(), rs);
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
DCHECK(rs != scratch);
li(scratch, rt);
sltu(rd, scratch, rs);
}
xori(rd, rd, 1);
}
void TurboAssembler::Sge(Register rd, Register rs, const Operand& rt) {
Slt(rd, rs, rt);
xori(rd, rd, 1);
}
void TurboAssembler::Sgeu(Register rd, Register rs, const Operand& rt) {
Sltu(rd, rs, rt);
xori(rd, rd, 1);
}
void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
slt(rd, rt.rm(), rs);
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
DCHECK(rs != scratch);
li(scratch, rt);
slt(rd, scratch, rs);
}
}
void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sltu(rd, rt.rm(), rs);
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
DCHECK(rs != scratch);
li(scratch, rt);
sltu(rd, scratch, rs);
}
}
void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
rotrv(rd, rs, rt.rm());
} else {
int64_t ror_value = rt.immediate() % 32;
if (ror_value < 0) {
ror_value += 32;
}
rotr(rd, rs, ror_value);
}
}
void TurboAssembler::Dror(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
drotrv(rd, rs, rt.rm());
} else {
int64_t dror_value = rt.immediate() % 64;
if (dror_value < 0) dror_value += 64;
if (dror_value <= 31) {
drotr(rd, rs, dror_value);
} else {
drotr32(rd, rs, dror_value - 32);
}
}
}
void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
pref(hint, rs);
}
void TurboAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
Register scratch) {
DCHECK(sa >= 1 && sa <= 31);
if (kArchVariant == kMips64r6 && sa <= 4) {
lsa(rd, rt, rs, sa - 1);
} else {
Register tmp = rd == rt ? scratch : rd;
DCHECK(tmp != rt);
sll(tmp, rs, sa);
Addu(rd, rt, tmp);
}
}
void TurboAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
Register scratch) {
DCHECK(sa >= 1 && sa <= 31);
if (kArchVariant == kMips64r6 && sa <= 4) {
dlsa(rd, rt, rs, sa - 1);
} else {
Register tmp = rd == rt ? scratch : rd;
DCHECK(tmp != rt);
dsll(tmp, rs, sa);
Daddu(rd, rt, tmp);
}
}
void TurboAssembler::Bovc(Register rs, Register rt, Label* L) {
if (is_trampoline_emitted()) {
Label skip;
bnvc(rs, rt, &skip);
BranchLong(L, PROTECT);
bind(&skip);
} else {
bovc(rs, rt, L);
}
}
void TurboAssembler::Bnvc(Register rs, Register rt, Label* L) {
if (is_trampoline_emitted()) {
Label skip;
bovc(rs, rt, &skip);
BranchLong(L, PROTECT);
bind(&skip);
} else {
bnvc(rs, rt, L);
}
}
// ------------Pseudo-instructions-------------
// Change endianness
void TurboAssembler::ByteSwapSigned(Register dest, Register src,
int operand_size) {
DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4 ||
operand_size == 8);
DCHECK(kArchVariant == kMips64r6 || kArchVariant == kMips64r2);
if (operand_size == 1) {
seb(src, src);
sll(src, src, 0);
dsbh(dest, src);
dshd(dest, dest);
} else if (operand_size == 2) {
seh(src, src);
sll(src, src, 0);
dsbh(dest, src);
dshd(dest, dest);
} else if (operand_size == 4) {
sll(src, src, 0);
dsbh(dest, src);
dshd(dest, dest);
} else {
dsbh(dest, src);
dshd(dest, dest);
}
}
void TurboAssembler::ByteSwapUnsigned(Register dest, Register src,
int operand_size) {
DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4);
if (operand_size == 1) {
andi(src, src, 0xFF);
dsbh(dest, src);
dshd(dest, dest);
} else if (operand_size == 2) {
andi(src, src, 0xFFFF);
dsbh(dest, src);
dshd(dest, dest);
} else {
dsll32(src, src, 0);
dsrl32(src, src, 0);
dsbh(dest, src);
dshd(dest, dest);
}
}
void TurboAssembler::Ulw(Register rd, const MemOperand& rs) {
DCHECK(rd != at);
DCHECK(rs.rm() != at);
if (kArchVariant == kMips64r6) {
Lw(rd, rs);
} else {
DCHECK_EQ(kArchVariant, kMips64r2);
DCHECK(kMipsLwrOffset <= 3 && kMipsLwlOffset <= 3);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 3 fits into int16_t.
AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3);
if (rd != source.rm()) {
lwr(rd, MemOperand(source.rm(), source.offset() + kMipsLwrOffset));
lwl(rd, MemOperand(source.rm(), source.offset() + kMipsLwlOffset));
} else {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
lwr(scratch, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
lwl(scratch, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
mov(rd, scratch);
}
}
}
void TurboAssembler::Ulwu(Register rd, const MemOperand& rs) {
if (kArchVariant == kMips64r6) {
Lwu(rd, rs);
} else {
DCHECK_EQ(kArchVariant, kMips64r2);
Ulw(rd, rs);
Dext(rd, rd, 0, 32);
}
}
void TurboAssembler::Usw(Register rd, const MemOperand& rs) {
DCHECK(rd != at);
DCHECK(rs.rm() != at);
DCHECK(rd != rs.rm());
if (kArchVariant == kMips64r6) {
Sw(rd, rs);
} else {
DCHECK_EQ(kArchVariant, kMips64r2);
DCHECK(kMipsSwrOffset <= 3 && kMipsSwlOffset <= 3);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 3 fits into int16_t.
AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 3);
swr(rd, MemOperand(source.rm(), source.offset() + kMipsSwrOffset));
swl(rd, MemOperand(source.rm(), source.offset() + kMipsSwlOffset));
}
}
void TurboAssembler::Ulh(Register rd, const MemOperand& rs) {
DCHECK(rd != at);
DCHECK(rs.rm() != at);
if (kArchVariant == kMips64r6) {
Lh(rd, rs);
} else {
DCHECK_EQ(kArchVariant, kMips64r2);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 1 fits into int16_t.
AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
if (source.rm() == scratch) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
Lb(rd, MemOperand(source.rm(), source.offset() + 1));
Lbu(scratch, source);
#elif defined(V8_TARGET_BIG_ENDIAN)
Lb(rd, source);
Lbu(scratch, MemOperand(source.rm(), source.offset() + 1));
#endif
} else {
#if defined(V8_TARGET_LITTLE_ENDIAN)
Lbu(scratch, source);
Lb(rd, MemOperand(source.rm(), source.offset() + 1));
#elif defined(V8_TARGET_BIG_ENDIAN)
Lbu(scratch, MemOperand(source.rm(), source.offset() + 1));
Lb(rd, source);
#endif
}
dsll(rd, rd, 8);
or_(rd, rd, scratch);
}
}
void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) {
DCHECK(rd != at);
DCHECK(rs.rm() != at);
if (kArchVariant == kMips64r6) {
Lhu(rd, rs);
} else {
DCHECK_EQ(kArchVariant, kMips64r2);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 1 fits into int16_t.
AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
if (source.rm() == scratch) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
Lbu(rd, MemOperand(source.rm(), source.offset() + 1));
Lbu(scratch, source);
#elif defined(V8_TARGET_BIG_ENDIAN)
Lbu(rd, source);
Lbu(scratch, MemOperand(source.rm(), source.offset() + 1));
#endif
} else {
#if defined(V8_TARGET_LITTLE_ENDIAN)
Lbu(scratch, source);
Lbu(rd, MemOperand(source.rm(), source.offset() + 1));
#elif defined(V8_TARGET_BIG_ENDIAN)
Lbu(scratch, MemOperand(source.rm(), source.offset() + 1));
Lbu(rd, source);
#endif
}
dsll(rd, rd, 8);
or_(rd, rd, scratch);
}
}
void TurboAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
DCHECK(rd != at);
DCHECK(rs.rm() != at);
DCHECK(rs.rm() != scratch);
DCHECK(scratch != at);
if (kArchVariant == kMips64r6) {
Sh(rd, rs);
} else {
DCHECK_EQ(kArchVariant, kMips64r2);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 1 fits into int16_t.
AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 1);
if (scratch != rd) {
mov(scratch, rd);
}
#if defined(V8_TARGET_LITTLE_ENDIAN)
Sb(scratch, source);
srl(scratch, scratch, 8);
Sb(scratch, MemOperand(source.rm(), source.offset() + 1));
#elif defined(V8_TARGET_BIG_ENDIAN)
Sb(scratch, MemOperand(source.rm(), source.offset() + 1));
srl(scratch, scratch, 8);
Sb(scratch, source);
#endif
}
}
void TurboAssembler::Uld(Register rd, const MemOperand& rs) {
DCHECK(rd != at);
DCHECK(rs.rm() != at);
if (kArchVariant == kMips64r6) {
Ld(rd, rs);
} else {
DCHECK_EQ(kArchVariant, kMips64r2);
DCHECK(kMipsLdrOffset <= 7 && kMipsLdlOffset <= 7);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 7 fits into int16_t.
AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 7);
if (rd != source.rm()) {
ldr(rd, MemOperand(source.rm(), source.offset() + kMipsLdrOffset));
ldl(rd, MemOperand(source.rm(), source.offset() + kMipsLdlOffset));
} else {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
ldr(scratch, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
ldl(scratch, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
mov(rd, scratch);
}
}
}
// Load consequent 32-bit word pair in 64-bit reg. and put first word in low
// bits,
// second word in high bits.
void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
Register scratch) {
Lwu(rd, rs);
Lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
dsll32(scratch, scratch, 0);
Daddu(rd, rd, scratch);
}
void TurboAssembler::Usd(Register rd, const MemOperand& rs) {
DCHECK(rd != at);
DCHECK(rs.rm() != at);
if (kArchVariant == kMips64r6) {
Sd(rd, rs);
} else {
DCHECK_EQ(kArchVariant, kMips64r2);
DCHECK(kMipsSdrOffset <= 7 && kMipsSdlOffset <= 7);
MemOperand source = rs;
// Adjust offset for two accesses and check if offset + 7 fits into int16_t.
AdjustBaseAndOffset(source, OffsetAccessType::TWO_ACCESSES, 7);
sdr(rd, MemOperand(source.rm(), source.offset() + kMipsSdrOffset));
sdl(rd, MemOperand(source.rm(), source.offset() + kMipsSdlOffset));
}
}
// Do 64-bit store as two consequent 32-bit stores to unaligned address.
void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
Register scratch) {
Sw(rd, rs);
dsrl32(scratch, rd, 0);
Sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
}
void TurboAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
if (kArchVariant == kMips64r6) {
Lwc1(fd, rs);
} else {
DCHECK_EQ(kArchVariant, kMips64r2);
Ulw(scratch, rs);
mtc1(scratch, fd);
}
}
void TurboAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
if (kArchVariant == kMips64r6) {
Swc1(fd, rs);
} else {
DCHECK_EQ(kArchVariant, kMips64r2);
mfc1(scratch, fd);
Usw(scratch, rs);
}
}
void TurboAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK(scratch != at);
if (kArchVariant == kMips64r6) {
Ldc1(fd, rs);
} else {
DCHECK_EQ(kArchVariant, kMips64r2);
Uld(scratch, rs);
dmtc1(scratch, fd);
}
}
void TurboAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK(scratch != at);
if (kArchVariant == kMips64r6) {
Sdc1(fd, rs);
} else {
DCHECK_EQ(kArchVariant, kMips64r2);
dmfc1(scratch, fd);
Usd(scratch, rs);
}
}
void TurboAssembler::Lb(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(source);
lb(rd, source);
}
void TurboAssembler::Lbu(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(source);
lbu(rd, source);
}
void TurboAssembler::Sb(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(source);
sb(rd, source);
}
void TurboAssembler::Lh(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(source);
lh(rd, source);
}
void TurboAssembler::Lhu(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(source);
lhu(rd, source);
}
void TurboAssembler::Sh(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(source);
sh(rd, source);
}
void TurboAssembler::Lw(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(source);
lw(rd, source);
}
void TurboAssembler::Lwu(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(source);
lwu(rd, source);
}
void TurboAssembler::Sw(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(source);
sw(rd, source);
}
void TurboAssembler::Ld(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(source);
ld(rd, source);
}
void TurboAssembler::Sd(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(source);
sd(rd, source);
}
void TurboAssembler::Lwc1(FPURegister fd, const MemOperand& src) {
MemOperand tmp = src;
AdjustBaseAndOffset(tmp);
lwc1(fd, tmp);
}
void TurboAssembler::Swc1(FPURegister fs, const MemOperand& src) {
MemOperand tmp = src;
AdjustBaseAndOffset(tmp);
swc1(fs, tmp);
}
void TurboAssembler::Ldc1(FPURegister fd, const MemOperand& src) {
MemOperand tmp = src;
AdjustBaseAndOffset(tmp);
ldc1(fd, tmp);
}
void TurboAssembler::Sdc1(FPURegister fs, const MemOperand& src) {
MemOperand tmp = src;
AdjustBaseAndOffset(tmp);
sdc1(fs, tmp);
}
void TurboAssembler::Ll(Register rd, const MemOperand& rs) {
bool is_one_instruction = (kArchVariant == kMips64r6) ? is_int9(rs.offset())
: is_int16(rs.offset());
if (is_one_instruction) {
ll(rd, rs);
} else {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, rs.offset());
daddu(scratch, scratch, rs.rm());
ll(rd, MemOperand(scratch, 0));
}
}
void TurboAssembler::Lld(Register rd, const MemOperand& rs) {
bool is_one_instruction = (kArchVariant == kMips64r6) ? is_int9(rs.offset())
: is_int16(rs.offset());
if (is_one_instruction) {
lld(rd, rs);
} else {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, rs.offset());
daddu(scratch, scratch, rs.rm());
lld(rd, MemOperand(scratch, 0));
}
}
void TurboAssembler::Sc(Register rd, const MemOperand& rs) {
bool is_one_instruction = (kArchVariant == kMips64r6) ? is_int9(rs.offset())
: is_int16(rs.offset());
if (is_one_instruction) {
sc(rd, rs);
} else {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, rs.offset());
daddu(scratch, scratch, rs.rm());
sc(rd, MemOperand(scratch, 0));
}
}
void TurboAssembler::Scd(Register rd, const MemOperand& rs) {
bool is_one_instruction = (kArchVariant == kMips64r6) ? is_int9(rs.offset())
: is_int16(rs.offset());
if (is_one_instruction) {
scd(rd, rs);
} else {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, rs.offset());
daddu(scratch, scratch, rs.rm());
scd(rd, MemOperand(scratch, 0));
}
}
void TurboAssembler::li(Register dst, Handle<HeapObject> value, LiFlags mode) {
li(dst, Operand(value), mode);
}
static inline int InstrCountForLiLower32Bit(int64_t value) {
if (!is_int16(static_cast<int32_t>(value)) && (value & kUpper16MaskOf64) &&
(value & kImm16Mask)) {
return 2;
} else {
return 1;
}
}
void TurboAssembler::LiLower32BitHelper(Register rd, Operand j) {
if (is_int16(static_cast<int32_t>(j.immediate()))) {
daddiu(rd, zero_reg, (j.immediate() & kImm16Mask));
} else if (!(j.immediate() & kUpper16MaskOf64)) {
ori(rd, zero_reg, j.immediate() & kImm16Mask);
} else {
lui(rd, j.immediate() >> kLuiShift & kImm16Mask);
if (j.immediate() & kImm16Mask) {
ori(rd, rd, j.immediate() & kImm16Mask);
}
}
}
static inline int InstrCountForLoadReplicatedConst32(int64_t value) {
uint32_t x = static_cast<uint32_t>(value);
uint32_t y = static_cast<uint32_t>(value >> 32);
if (x == y) {
return (is_uint16(x) || is_int16(x) || (x & kImm16Mask) == 0) ? 2 : 3;
}
return INT_MAX;
}
int TurboAssembler::InstrCountForLi64Bit(int64_t value) {
if (is_int32(value)) {
return InstrCountForLiLower32Bit(value);
} else {
int bit31 = value >> 31 & 0x1;
if ((value & kUpper16MaskOf64) == 0 && is_int16(value >> 32) &&
kArchVariant == kMips64r6) {
return 2;
} else if ((value & (kHigher16MaskOf64 | kUpper16MaskOf64)) == 0 &&
kArchVariant == kMips64r6) {
return 2;
} else if ((value & kImm16Mask) == 0 && is_int16((value >> 32) + bit31) &&
kArchVariant == kMips64r6) {
return 2;
} else if ((value & kImm16Mask) == 0 &&
((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF) &&
kArchVariant == kMips64r6) {
return 2;
} else if (is_int16(static_cast<int32_t>(value)) &&
is_int16((value >> 32) + bit31) && kArchVariant == kMips64r6) {
return 2;
} else if (is_int16(static_cast<int32_t>(value)) &&
((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF) &&
kArchVariant == kMips64r6) {
return 2;
} else if (base::bits::IsPowerOfTwo(value + 1) ||
value == std::numeric_limits<int64_t>::max()) {
return 2;
} else {
int shift_cnt = base::bits::CountTrailingZeros64(value);
int rep32_count = InstrCountForLoadReplicatedConst32(value);
int64_t tmp = value >> shift_cnt;
if (is_uint16(tmp)) {
return 2;
} else if (is_int16(tmp)) {
return 2;
} else if (rep32_count < 3) {
return 2;
} else if (is_int32(tmp)) {
return 3;
} else {
shift_cnt = 16 + base::bits::CountTrailingZeros64(value >> 16);
tmp = value >> shift_cnt;
if (is_uint16(tmp)) {
return 3;
} else if (is_int16(tmp)) {
return 3;
} else if (rep32_count < 4) {
return 3;
} else if (kArchVariant == kMips64r6) {
int64_t imm = value;
int count = InstrCountForLiLower32Bit(imm);
imm = (imm >> 32) + bit31;
if (imm & kImm16Mask) {
count++;
}
imm = (imm >> 16) + (imm >> 15 & 0x1);
if (imm & kImm16Mask) {
count++;
}
return count;
} else {
if (is_int48(value)) {
int64_t k = value >> 16;
int count = InstrCountForLiLower32Bit(k) + 1;
if (value & kImm16Mask) {
count++;
}
return count;
} else {
int64_t k = value >> 32;
int count = InstrCountForLiLower32Bit(k);
if ((value >> 16) & kImm16Mask) {
count += 3;
if (value & kImm16Mask) {
count++;
}
} else {
count++;
if (value & kImm16Mask) {
count++;
}
}
return count;
}
}
}
}
}
UNREACHABLE();
return INT_MAX;
}
// All changes to if...else conditions here must be added to
// InstrCountForLi64Bit as well.
void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) {
DCHECK(!j.is_reg());
DCHECK(!MustUseReg(j.rmode()));
DCHECK(mode == OPTIMIZE_SIZE);
BlockTrampolinePoolScope block_trampoline_pool(this);
// Normal load of an immediate value which does not need Relocation Info.
if (is_int32(j.immediate())) {
LiLower32BitHelper(rd, j);
} else {
int bit31 = j.immediate() >> 31 & 0x1;
if ((j.immediate() & kUpper16MaskOf64) == 0 &&
is_int16(j.immediate() >> 32) && kArchVariant == kMips64r6) {
// 64-bit value which consists of an unsigned 16-bit value in its
// least significant 32-bits, and a signed 16-bit value in its
// most significant 32-bits.
ori(rd, zero_reg, j.immediate() & kImm16Mask);
dahi(rd, j.immediate() >> 32 & kImm16Mask);
} else if ((j.immediate() & (kHigher16MaskOf64 | kUpper16MaskOf64)) == 0 &&
kArchVariant == kMips64r6) {
// 64-bit value which consists of an unsigned 16-bit value in its
// least significant 48-bits, and a signed 16-bit value in its
// most significant 16-bits.
ori(rd, zero_reg, j.immediate() & kImm16Mask);
dati(rd, j.immediate() >> 48 & kImm16Mask);
} else if ((j.immediate() & kImm16Mask) == 0 &&
is_int16((j.immediate() >> 32) + bit31) &&
kArchVariant == kMips64r6) {
// 16 LSBs (Least Significant Bits) all set to zero.
// 48 MSBs (Most Significant Bits) hold a signed 32-bit value.
lui(rd, j.immediate() >> kLuiShift & kImm16Mask);
dahi(rd, ((j.immediate() >> 32) + bit31) & kImm16Mask);
} else if ((j.immediate() & kImm16Mask) == 0 &&
((j.immediate() >> 31) & 0x1FFFF) ==
((0x20000 - bit31) & 0x1FFFF) &&
kArchVariant == kMips64r6) {
// 16 LSBs all set to zero.
// 48 MSBs hold a signed value which can't be represented by signed
// 32-bit number, and the middle 16 bits are all zero, or all one.
lui(rd, j.immediate() >> kLuiShift & kImm16Mask);
dati(rd, ((j.immediate() >> 48) + bit31) & kImm16Mask);
} else if (is_int16(static_cast<int32_t>(j.immediate())) &&
is_int16((j.immediate() >> 32) + bit31) &&
kArchVariant == kMips64r6) {
// 32 LSBs contain a signed 16-bit number.
// 32 MSBs contain a signed 16-bit number.
daddiu(rd, zero_reg, j.immediate() & kImm16Mask);
dahi(rd, ((j.immediate() >> 32) + bit31) & kImm16Mask);
} else if (is_int16(static_cast<int32_t>(j.immediate())) &&
((j.immediate() >> 31) & 0x1FFFF) ==
((0x20000 - bit31) & 0x1FFFF) &&
kArchVariant == kMips64r6) {
// 48 LSBs contain an unsigned 16-bit number.
// 16 MSBs contain a signed 16-bit number.
daddiu(rd, zero_reg, j.immediate() & kImm16Mask);
dati(rd, ((j.immediate() >> 48) + bit31) & kImm16Mask);
} else if (base::bits::IsPowerOfTwo(j.immediate() + 1) ||
j.immediate() == std::numeric_limits<int64_t>::max()) {
// 64-bit values which have their "n" LSBs set to one, and their
// "64-n" MSBs set to zero. "n" must meet the restrictions 0 < n < 64.
int shift_cnt = 64 - base::bits::CountTrailingZeros64(j.immediate() + 1);
daddiu(rd, zero_reg, -1);
if (shift_cnt < 32) {
dsrl(rd, rd, shift_cnt);
} else {
dsrl32(rd, rd, shift_cnt & 31);
}
} else {
int shift_cnt = base::bits::CountTrailingZeros64(j.immediate());
int rep32_count = InstrCountForLoadReplicatedConst32(j.immediate());
int64_t tmp = j.immediate() >> shift_cnt;
if (is_uint16(tmp)) {
// Value can be computed by loading a 16-bit unsigned value, and
// then shifting left.
ori(rd, zero_reg, tmp & kImm16Mask);
if (shift_cnt < 32) {
dsll(rd, rd, shift_cnt);
} else {
dsll32(rd, rd, shift_cnt & 31);
}
} else if (is_int16(tmp)) {
// Value can be computed by loading a 16-bit signed value, and
// then shifting left.
daddiu(rd, zero_reg, static_cast<int32_t>(tmp));
if (shift_cnt < 32) {
dsll(rd, rd, shift_cnt);
} else {
dsll32(rd, rd, shift_cnt & 31);
}
} else if (rep32_count < 3) {
// Value being loaded has 32 LSBs equal to the 32 MSBs, and the
// value loaded into the 32 LSBs can be loaded with a single
// MIPS instruction.
LiLower32BitHelper(rd, j);
Dins(rd, rd, 32, 32);
} else if (is_int32(tmp)) {
// Loads with 3 instructions.
// Value can be computed by loading a 32-bit signed value, and
// then shifting left.
lui(rd, tmp >> kLuiShift & kImm16Mask);
ori(rd, rd, tmp & kImm16Mask);
if (shift_cnt < 32) {
dsll(rd, rd, shift_cnt);
} else {
dsll32(rd, rd, shift_cnt & 31);
}
} else {
shift_cnt = 16 + base::bits::CountTrailingZeros64(j.immediate() >> 16);
tmp = j.immediate() >> shift_cnt;
if (is_uint16(tmp)) {
// Value can be computed by loading a 16-bit unsigned value,
// shifting left, and "or"ing in another 16-bit unsigned value.
ori(rd, zero_reg, tmp & kImm16Mask);
if (shift_cnt < 32) {
dsll(rd, rd, shift_cnt);
} else {
dsll32(rd, rd, shift_cnt & 31);
}
ori(rd, rd, j.immediate() & kImm16Mask);
} else if (is_int16(tmp)) {
// Value can be computed by loading a 16-bit signed value,
// shifting left, and "or"ing in a 16-bit unsigned value.
daddiu(rd, zero_reg, static_cast<int32_t>(tmp));
if (shift_cnt < 32) {
dsll(rd, rd, shift_cnt);
} else {
dsll32(rd, rd, shift_cnt & 31);
}
ori(rd, rd, j.immediate() & kImm16Mask);
} else if (rep32_count < 4) {
// Value being loaded has 32 LSBs equal to the 32 MSBs, and the
// value in the 32 LSBs requires 2 MIPS instructions to load.
LiLower32BitHelper(rd, j);
Dins(rd, rd, 32, 32);
} else if (kArchVariant == kMips64r6) {
// Loads with 3-4 instructions.
// Catch-all case to get any other 64-bit values which aren't
// handled by special cases above.
int64_t imm = j.immediate();
LiLower32BitHelper(rd, j);
imm = (imm >> 32) + bit31;
if (imm & kImm16Mask) {
dahi(rd, imm & kImm16Mask);
}
imm = (imm >> 16) + (imm >> 15 & 0x1);
if (imm & kImm16Mask) {
dati(rd, imm & kImm16Mask);
}
} else {
if (is_int48(j.immediate())) {
Operand k = Operand(j.immediate() >> 16);
LiLower32BitHelper(rd, k);
dsll(rd, rd, 16);
if (j.immediate() & kImm16Mask) {
ori(rd, rd, j.immediate() & kImm16Mask);
}
} else {
Operand k = Operand(j.immediate() >> 32);
LiLower32BitHelper(rd, k);
if ((j.immediate() >> 16) & kImm16Mask) {
dsll(rd, rd, 16);
ori(rd, rd, (j.immediate() >> 16) & kImm16Mask);
dsll(rd, rd, 16);
if (j.immediate() & kImm16Mask) {
ori(rd, rd, j.immediate() & kImm16Mask);
}
} else {
dsll32(rd, rd, 0);
if (j.immediate() & kImm16Mask) {
ori(rd, rd, j.immediate() & kImm16Mask);
}
}
}
}
}
}
}
}
void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
DCHECK(!j.is_reg());
BlockTrampolinePoolScope block_trampoline_pool(this);
if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) {
int li_count = InstrCountForLi64Bit(j.immediate());
int li_neg_count = InstrCountForLi64Bit(-j.immediate());
int li_not_count = InstrCountForLi64Bit(~j.immediate());
// Loading -MIN_INT64 could cause problems, but loading MIN_INT64 takes only
// two instructions so no need to check for this.
if (li_neg_count <= li_not_count && li_neg_count < li_count - 1) {
DCHECK(j.immediate() != std::numeric_limits<int64_t>::min());
li_optimized(rd, Operand(-j.immediate()), mode);
Dsubu(rd, zero_reg, rd);
} else if (li_neg_count > li_not_count && li_not_count < li_count - 1) {
DCHECK(j.immediate() != std::numeric_limits<int64_t>::min());
li_optimized(rd, Operand(~j.immediate()), mode);
nor(rd, rd, rd);
} else {
li_optimized(rd, j, mode);
}
} else if (MustUseReg(j.rmode())) {
int64_t immediate;
if (j.IsHeapObjectRequest()) {
RequestHeapObject(j.heap_object_request());
immediate = 0;
} else {
immediate = j.immediate();
}
RecordRelocInfo(j.rmode(), immediate);
lui(rd, (immediate >> 32) & kImm16Mask);
ori(rd, rd, (immediate >> 16) & kImm16Mask);
dsll(rd, rd, 16);
ori(rd, rd, immediate & kImm16Mask);
} else if (mode == ADDRESS_LOAD) {
// We always need the same number of instructions as we may need to patch
// this code to load another value which may need all 4 instructions.
lui(rd, (j.immediate() >> 32) & kImm16Mask);
ori(rd, rd, (j.immediate() >> 16) & kImm16Mask);
dsll(rd, rd, 16);
ori(rd, rd, j.immediate() & kImm16Mask);
} else { // mode == CONSTANT_SIZE - always emit the same instruction
// sequence.
if (kArchVariant == kMips64r6) {
int64_t imm = j.immediate();
lui(rd, imm >> kLuiShift & kImm16Mask);
ori(rd, rd, (imm & kImm16Mask));
imm = (imm >> 32) + ((imm >> 31) & 0x1);
dahi(rd, imm & kImm16Mask & kImm16Mask);
imm = (imm >> 16) + ((imm >> 15) & 0x1);
dati(rd, imm & kImm16Mask & kImm16Mask);
} else {
lui(rd, (j.immediate() >> 48) & kImm16Mask);
ori(rd, rd, (j.immediate() >> 32) & kImm16Mask);
dsll(rd, rd, 16);
ori(rd, rd, (j.immediate() >> 16) & kImm16Mask);
dsll(rd, rd, 16);
ori(rd, rd, j.immediate() & kImm16Mask);
}
}
}
void TurboAssembler::MultiPush(RegList regs) {
int16_t num_to_push = base::bits::CountPopulation(regs);
int16_t stack_offset = num_to_push * kPointerSize;
Dsubu(sp, sp, Operand(stack_offset));
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kPointerSize;
Sd(ToRegister(i), MemOperand(sp, stack_offset));
}
}
}
void TurboAssembler::MultiPop(RegList regs) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
Ld(ToRegister(i), MemOperand(sp, stack_offset));
stack_offset += kPointerSize;
}
}
daddiu(sp, sp, stack_offset);
}
void TurboAssembler::MultiPushFPU(RegList regs) {
int16_t num_to_push = base::bits::CountPopulation(regs);
int16_t stack_offset = num_to_push * kDoubleSize;
Dsubu(sp, sp, Operand(stack_offset));
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kDoubleSize;
Sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
}
}
}
void TurboAssembler::MultiPopFPU(RegList regs) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
Ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
stack_offset += kDoubleSize;
}
}
daddiu(sp, sp, stack_offset);
}
void TurboAssembler::Ext(Register rt, Register rs, uint16_t pos,
uint16_t size) {
DCHECK_LT(pos, 32);
DCHECK_LT(pos + size, 33);
ext_(rt, rs, pos, size);
}
void TurboAssembler::Dext(Register rt, Register rs, uint16_t pos,
uint16_t size) {
DCHECK(pos < 64 && 0 < size && size <= 64 && 0 < pos + size &&
pos + size <= 64);
if (size > 32) {
dextm_(rt, rs, pos, size);
} else if (pos >= 32) {
dextu_(rt, rs, pos, size);
} else {
dext_(rt, rs, pos, size);
}
}
void TurboAssembler::Ins(Register rt, Register rs, uint16_t pos,
uint16_t size) {
DCHECK_LT(pos, 32);
DCHECK_LE(pos + size, 32);
DCHECK_NE(size, 0);
ins_(rt, rs, pos, size);
}
void TurboAssembler::Dins(Register rt, Register rs, uint16_t pos,
uint16_t size) {
DCHECK(pos < 64 && 0 < size && size <= 64 && 0 < pos + size &&
pos + size <= 64);
if (pos + size <= 32) {
dins_(rt, rs, pos, size);
} else if (pos < 32) {
dinsm_(rt, rs, pos, size);
} else {
dinsu_(rt, rs, pos, size);
}
}
void TurboAssembler::ExtractBits(Register dest, Register source, Register pos,
int size, bool sign_extend) {
srav(dest, source, pos);
Dext(dest, dest, 0, size);
if (sign_extend) {
switch (size) {
case 8:
seb(dest, dest);
break;
case 16:
seh(dest, dest);
break;
case 32:
// sign-extend word
sll(dest, dest, 0);
break;
default:
UNREACHABLE();
}
}
}
void TurboAssembler::InsertBits(Register dest, Register source, Register pos,
int size) {
Ror(dest, dest, pos);
Dins(dest, source, 0, size);
{
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Dsubu(scratch, pos, Operand(64));
Neg(scratch, Operand(scratch));
Ror(dest, dest, scratch);
}
}
void TurboAssembler::Neg_s(FPURegister fd, FPURegister fs) {
if (kArchVariant == kMips64r6) {
// r6 neg_s changes the sign for NaN-like operands as well.
neg_s(fd, fs);
} else {
DCHECK_EQ(kArchVariant, kMips64r2);
Label is_nan, done;
Register scratch1 = t8;
Register scratch2 = t9;
CompareIsNanF32(fs, fs);
BranchTrueShortF(&is_nan);
Branch(USE_DELAY_SLOT, &done);
// For NaN input, neg_s will return the same NaN value,
// while the sign has to be changed separately.
neg_s(fd, fs); // In delay slot.
bind(&is_nan);
mfc1(scratch1, fs);
li(scratch2, kBinary32SignMask);
Xor(scratch1, scratch1, scratch2);
mtc1(scratch1, fd);
bind(&done);
}
}
void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) {
if (kArchVariant == kMips64r6) {
// r6 neg_d changes the sign for NaN-like operands as well.
neg_d(fd, fs);
} else {
DCHECK_EQ(kArchVariant, kMips64r2);
Label is_nan, done;
Register scratch1 = t8;
Register scratch2 = t9;
CompareIsNanF64(fs, fs);
BranchTrueShortF(&is_nan);
Branch(USE_DELAY_SLOT, &done);
// For NaN input, neg_d will return the same NaN value,
// while the sign has to be changed separately.
neg_d(fd, fs); // In delay slot.
bind(&is_nan);
dmfc1(scratch1, fs);
li(scratch2, Double::kSignMask);
Xor(scratch1, scratch1, scratch2);
dmtc1(scratch1, fd);
bind(&done);
}
}
void TurboAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
// Move the data from fs to t8.
mfc1(t8, fs);
Cvt_d_uw(fd, t8);
}
void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
// Convert rs to a FP value in fd.
DCHECK(rs != t9);
DCHECK(rs != at);
// Zero extend int32 in rs.
Dext(t9, rs, 0, 32);
dmtc1(t9, fd);
cvt_d_l(fd, fd);
}
void TurboAssembler::Cvt_d_ul(FPURegister fd, FPURegister fs) {
// Move the data from fs to t8.
dmfc1(t8, fs);
Cvt_d_ul(fd, t8);
}
void TurboAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
// Convert rs to a FP value in fd.
DCHECK(rs != t9);
DCHECK(rs != at);
Label msb_clear, conversion_done;
Branch(&msb_clear, ge, rs, Operand(zero_reg));
// Rs >= 2^63
andi(t9, rs, 1);
dsrl(rs, rs, 1);
or_(t9, t9, rs);
dmtc1(t9, fd);
cvt_d_l(fd, fd);
Branch(USE_DELAY_SLOT, &conversion_done);
add_d(fd, fd, fd); // In delay slot.
bind(&msb_clear);
// Rs < 2^63, we can do simple conversion.
dmtc1(rs, fd);
cvt_d_l(fd, fd);
bind(&conversion_done);
}
void TurboAssembler::Cvt_s_uw(FPURegister fd, FPURegister fs) {
// Move the data from fs to t8.
mfc1(t8, fs);
Cvt_s_uw(fd, t8);
}
void TurboAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
// Convert rs to a FP value in fd.
DCHECK(rs != t9);
DCHECK(rs != at);
// Zero extend int32 in rs.
Dext(t9, rs, 0, 32);
dmtc1(t9, fd);
cvt_s_l(fd, fd);
}
void TurboAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) {
// Move the data from fs to t8.
dmfc1(t8, fs);
Cvt_s_ul(fd, t8);
}
void TurboAssembler::Cvt_s_ul(FPURegister fd, Register rs) {
// Convert rs to a FP value in fd.
DCHECK(rs != t9);
DCHECK(rs != at);
Label positive, conversion_done;
Branch(&positive, ge, rs, Operand(zero_reg));
// Rs >= 2^31.
andi(t9, rs, 1);
dsrl(rs, rs, 1);
or_(t9, t9, rs);
dmtc1(t9, fd);
cvt_s_l(fd, fd);
Branch(USE_DELAY_SLOT, &conversion_done);
add_s(fd, fd, fd); // In delay slot.
bind(&positive);
// Rs < 2^31, we can do simple conversion.
dmtc1(rs, fd);
cvt_s_l(fd, fd);
bind(&conversion_done);
}
void MacroAssembler::Round_l_d(FPURegister fd, FPURegister fs) {
round_l_d(fd, fs);
}
void MacroAssembler::Floor_l_d(FPURegister fd, FPURegister fs) {
floor_l_d(fd, fs);
}
void MacroAssembler::Ceil_l_d(FPURegister fd, FPURegister fs) {
ceil_l_d(fd, fs);
}
void MacroAssembler::Trunc_l_d(FPURegister fd, FPURegister fs) {
trunc_l_d(fd, fs);
}
void MacroAssembler::Trunc_l_ud(FPURegister fd,
FPURegister fs,
FPURegister scratch) {
// Load to GPR.
dmfc1(t8, fs);
// Reset sign bit.
{
UseScratchRegisterScope temps(this);
Register scratch1 = temps.Acquire();
li(scratch1, 0x7FFFFFFFFFFFFFFF);
and_(t8, t8, scratch1);
}
dmtc1(t8, fs);
trunc_l_d(fd, fs);
}
void TurboAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs,
FPURegister scratch) {
Trunc_uw_d(fs, t8, scratch);
mtc1(t8, fd);
}
void TurboAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
FPURegister scratch) {
Trunc_uw_s(fs, t8, scratch);
mtc1(t8, fd);
}
void TurboAssembler::Trunc_ul_d(FPURegister fd, FPURegister fs,
FPURegister scratch, Register result) {
Trunc_ul_d(fs, t8, scratch, result);
dmtc1(t8, fd);
}
void TurboAssembler::Trunc_ul_s(FPURegister fd, FPURegister fs,
FPURegister scratch, Register result) {
Trunc_ul_s(fs, t8, scratch, result);
dmtc1(t8, fd);
}
void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
trunc_w_d(fd, fs);
}
void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
round_w_d(fd, fs);
}
void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
floor_w_d(fd, fs);
}
void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
ceil_w_d(fd, fs);
}
void TurboAssembler::Trunc_uw_d(FPURegister fd, Register rs,
FPURegister scratch) {
DCHECK(fd != scratch);
DCHECK(rs != at);
{
// Load 2^31 into scratch as its float representation.
UseScratchRegisterScope temps(this);
Register scratch1 = temps.Acquire();
li(scratch1, 0x41E00000);
mtc1(zero_reg, scratch);
mthc1(scratch1, scratch);
}
// Test if scratch > fd.
// If fd < 2^31 we can convert it normally.
Label simple_convert;
CompareF64(OLT, fd, scratch);
BranchTrueShortF(&simple_convert);
// First we subtract 2^31 from fd, then trunc it to rs
// and add 2^31 to rs.
sub_d(scratch, fd, scratch);
trunc_w_d(scratch, scratch);
mfc1(rs, scratch);
Or(rs, rs, 1 << 31);
Label done;
Branch(&done);
// Simple conversion.
bind(&simple_convert);
trunc_w_d(scratch, fd);
mfc1(rs, scratch);
bind(&done);
}
void TurboAssembler::Trunc_uw_s(FPURegister fd, Register rs,
FPURegister scratch) {
DCHECK(fd != scratch);
DCHECK(rs != at);
{
// Load 2^31 into scratch as its float representation.
UseScratchRegisterScope temps(this);
Register scratch1 = temps.Acquire();
li(scratch1, 0x4F000000);
mtc1(scratch1, scratch);
}
// Test if scratch > fd.
// If fd < 2^31 we can convert it normally.
Label simple_convert;
CompareF32(OLT, fd, scratch);
BranchTrueShortF(&simple_convert);
// First we subtract 2^31 from fd, then trunc it to rs
// and add 2^31 to rs.
sub_s(scratch, fd, scratch);
trunc_w_s(scratch, scratch);
mfc1(rs, scratch);
Or(rs, rs, 1 << 31);
Label done;
Branch(&done);
// Simple conversion.
bind(&simple_convert);
trunc_w_s(scratch, fd);
mfc1(rs, scratch);
bind(&done);
}
void TurboAssembler::Trunc_ul_d(FPURegister fd, Register rs,
FPURegister scratch, Register result) {
DCHECK(fd != scratch);
DCHECK(!AreAliased(rs, result, at));
Label simple_convert, done, fail;
if (result.is_valid()) {
mov(result, zero_reg);
Move(scratch, -1.0);
// If fd =< -1 or unordered, then the conversion fails.
CompareF64(OLE, fd, scratch);
BranchTrueShortF(&fail);
CompareIsNanF64(fd, scratch);
BranchTrueShortF(&fail);
}
// Load 2^63 into scratch as its double representation.
li(at, 0x43E0000000000000);
dmtc1(at, scratch);
// Test if scratch > fd.
// If fd < 2^63 we can convert it normally.
CompareF64(OLT, fd, scratch);
BranchTrueShortF(&simple_convert);
// First we subtract 2^63 from fd, then trunc it to rs
// and add 2^63 to rs.
sub_d(scratch, fd, scratch);
trunc_l_d(scratch, scratch);
dmfc1(rs, scratch);
Or(rs, rs, Operand(1UL << 63));
Branch(&done);
// Simple conversion.
bind(&simple_convert);
trunc_l_d(scratch, fd);
dmfc1(rs, scratch);
bind(&done);
if (result.is_valid()) {
// Conversion is failed if the result is negative.
{
UseScratchRegisterScope temps(this);
Register scratch1 = temps.Acquire();
addiu(scratch1, zero_reg, -1);
dsrl(scratch1, scratch1, 1); // Load 2^62.
dmfc1(result, scratch);
xor_(result, result, scratch1);
}
Slt(result, zero_reg, result);
}
bind(&fail);
}
void TurboAssembler::Trunc_ul_s(FPURegister fd, Register rs,
FPURegister scratch, Register result) {
DCHECK(fd != scratch);
DCHECK(!AreAliased(rs, result, at));
Label simple_convert, done, fail;
if (result.is_valid()) {
mov(result, zero_reg);
Move(scratch, -1.0f);
// If fd =< -1 or unordered, then the conversion fails.
CompareF32(OLE, fd, scratch);
BranchTrueShortF(&fail);
CompareIsNanF32(fd, scratch);
BranchTrueShortF(&fail);
}
{
// Load 2^63 into scratch as its float representation.
UseScratchRegisterScope temps(this);
Register scratch1 = temps.Acquire();
li(scratch1, 0x5F000000);
mtc1(scratch1, scratch);
}
// Test if scratch > fd.
// If fd < 2^63 we can convert it normally.
CompareF32(OLT, fd, scratch);
BranchTrueShortF(&simple_convert);
// First we subtract 2^63 from fd, then trunc it to rs
// and add 2^63 to rs.
sub_s(scratch, fd, scratch);
trunc_l_s(scratch, scratch);
dmfc1(rs, scratch);
Or(rs, rs, Operand(1UL << 63));
Branch(&done);
// Simple conversion.
bind(&simple_convert);
trunc_l_s(scratch, fd);
dmfc1(rs, scratch);
bind(&done);
if (result.is_valid()) {
// Conversion is failed if the result is negative or unordered.
{
UseScratchRegisterScope temps(this);
Register scratch1 = temps.Acquire();
addiu(scratch1, zero_reg, -1);
dsrl(scratch1, scratch1, 1); // Load 2^62.
dmfc1(result, scratch);
xor_(result, result, scratch1);
}
Slt(result, zero_reg, result);
}
bind(&fail);
}
template <typename RoundFunc>
void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src,
FPURoundingMode mode, RoundFunc round) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = t8;
if (kArchVariant == kMips64r6) {
cfc1(scratch, FCSR);
li(at, Operand(mode));
ctc1(at, FCSR);
rint_d(dst, src);
ctc1(scratch, FCSR);
} else {
Label done;
mfhc1(scratch, src);
Ext(at, scratch, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
Branch(USE_DELAY_SLOT, &done, hs, at,
Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits));
mov_d(dst, src);
round(this, dst, src);
dmfc1(at, dst);
Branch(USE_DELAY_SLOT, &done, ne, at, Operand(zero_reg));
cvt_d_l(dst, dst);
srl(at, scratch, 31);
sll(at, at, 31);
mthc1(at, dst);
bind(&done);
}
}
void TurboAssembler::Floor_d_d(FPURegister dst, FPURegister src) {
RoundDouble(dst, src, mode_floor,
[](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
tasm->floor_l_d(dst, src);
});
}
void TurboAssembler::Ceil_d_d(FPURegister dst, FPURegister src) {
RoundDouble(dst, src, mode_ceil,
[](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
tasm->ceil_l_d(dst, src);
});
}
void TurboAssembler::Trunc_d_d(FPURegister dst, FPURegister src) {
RoundDouble(dst, src, mode_trunc,
[](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
tasm->trunc_l_d(dst, src);
});
}
void TurboAssembler::Round_d_d(FPURegister dst, FPURegister src) {
RoundDouble(dst, src, mode_round,
[](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
tasm->round_l_d(dst, src);
});
}
template <typename RoundFunc>
void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src,
FPURoundingMode mode, RoundFunc round) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = t8;
if (kArchVariant == kMips64r6) {
cfc1(scratch, FCSR);
li(at, Operand(mode));
ctc1(at, FCSR);
rint_s(dst, src);
ctc1(scratch, FCSR);
} else {
int32_t kFloat32ExponentBias = 127;
int32_t kFloat32MantissaBits = 23;
int32_t kFloat32ExponentBits = 8;
Label done;
mfc1(scratch, src);
Ext(at, scratch, kFloat32MantissaBits, kFloat32ExponentBits);
Branch(USE_DELAY_SLOT, &done, hs, at,
Operand(kFloat32ExponentBias + kFloat32MantissaBits));
mov_s(dst, src);
round(this, dst, src);
mfc1(at, dst);
Branch(USE_DELAY_SLOT, &done, ne, at, Operand(zero_reg));
cvt_s_w(dst, dst);
srl(at, scratch, 31);
sll(at, at, 31);
mtc1(at, dst);
bind(&done);
}
}
void TurboAssembler::Floor_s_s(FPURegister dst, FPURegister src) {
RoundFloat(dst, src, mode_floor,
[](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
tasm->floor_w_s(dst, src);
});
}
void TurboAssembler::Ceil_s_s(FPURegister dst, FPURegister src) {
RoundFloat(dst, src, mode_ceil,
[](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
tasm->ceil_w_s(dst, src);
});
}
void TurboAssembler::Trunc_s_s(FPURegister dst, FPURegister src) {
RoundFloat(dst, src, mode_trunc,
[](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
tasm->trunc_w_s(dst, src);
});
}
void TurboAssembler::Round_s_s(FPURegister dst, FPURegister src) {
RoundFloat(dst, src, mode_round,
[](TurboAssembler* tasm, FPURegister dst, FPURegister src) {
tasm->round_w_s(dst, src);
});
}
void MacroAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
DCHECK(fr != scratch && fs != scratch && ft != scratch);
mul_s(scratch, fs, ft);
add_s(fd, fr, scratch);
}
void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
DCHECK(fr != scratch && fs != scratch && ft != scratch);
mul_d(scratch, fs, ft);
add_d(fd, fr, scratch);
}
void MacroAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
DCHECK(fr != scratch && fs != scratch && ft != scratch);
mul_s(scratch, fs, ft);
sub_s(fd, scratch, fr);
}
void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
DCHECK(fr != scratch && fs != scratch && ft != scratch);
mul_d(scratch, fs, ft);
sub_d(fd, scratch, fr);
}
void TurboAssembler::CompareF(SecondaryField sizeField, FPUCondition cc,
FPURegister cmp1, FPURegister cmp2) {
if (kArchVariant == kMips64r6) {
sizeField = sizeField == D ? L : W;
DCHECK(cmp1 != kDoubleCompareReg && cmp2 != kDoubleCompareReg);
cmp(cc, sizeField, kDoubleCompareReg, cmp1, cmp2);
} else {
c(cc, sizeField, cmp1, cmp2);
}
}
void TurboAssembler::CompareIsNanF(SecondaryField sizeField, FPURegister cmp1,
FPURegister cmp2) {
CompareF(sizeField, UN, cmp1, cmp2);
}
void TurboAssembler::BranchTrueShortF(Label* target) {
if (kArchVariant == kMips64r6) {
bc1nez(target, kDoubleCompareReg);
nop();
} else {
bc1t(target);
nop();
}
}
void TurboAssembler::BranchFalseShortF(Label* target) {
if (kArchVariant == kMips64r6) {
bc1eqz(target, kDoubleCompareReg);
nop();
} else {
bc1f(target);
nop();
}
}
void TurboAssembler::BranchTrueF(Label* target) {
bool long_branch =
target->is_bound() ? !is_near(target) : is_trampoline_emitted();
if (long_branch) {
Label skip;
BranchFalseShortF(&skip);
BranchLong(target, PROTECT);
bind(&skip);
} else {
BranchTrueShortF(target);
}
}
void TurboAssembler::BranchFalseF(Label* target) {
bool long_branch =
target->is_bound() ? !is_near(target) : is_trampoline_emitted();
if (long_branch) {
Label skip;
BranchTrueShortF(&skip);
BranchLong(target, PROTECT);
bind(&skip);
} else {
BranchFalseShortF(target);
}
}
void TurboAssembler::BranchMSA(Label* target, MSABranchDF df,
MSABranchCondition cond, MSARegister wt,
BranchDelaySlot bd) {
{
BlockTrampolinePoolScope block_trampoline_pool(this);
if (target) {
bool long_branch =
target->is_bound() ? !is_near(target) : is_trampoline_emitted();
if (long_branch) {
Label skip;
MSABranchCondition neg_cond = NegateMSABranchCondition(cond);
BranchShortMSA(df, &skip, neg_cond, wt, bd);
BranchLong(target, bd);
bind(&skip);
} else {
BranchShortMSA(df, target, cond, wt, bd);
}
}
}
}
void TurboAssembler::BranchShortMSA(MSABranchDF df, Label* target,
MSABranchCondition cond, MSARegister wt,
BranchDelaySlot bd) {
if (kArchVariant == kMips64r6) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (target) {
switch (cond) {
case all_not_zero:
switch (df) {
case MSA_BRANCH_D:
bnz_d(wt, target);
break;
case MSA_BRANCH_W:
bnz_w(wt, target);
break;
case MSA_BRANCH_H:
bnz_h(wt, target);
break;
case MSA_BRANCH_B:
default:
bnz_b(wt, target);
}
break;
case one_elem_not_zero:
bnz_v(wt, target);
break;
case one_elem_zero:
switch (df) {
case MSA_BRANCH_D:
bz_d(wt, target);
break;
case MSA_BRANCH_W:
bz_w(wt, target);
break;
case MSA_BRANCH_H:
bz_h(wt, target);
break;
case MSA_BRANCH_B:
default:
bz_b(wt, target);
}
break;
case all_zero:
bz_v(wt, target);
break;
default:
UNREACHABLE();
}
}
}
if (bd == PROTECT) {
nop();
}
}
void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(src_low != scratch);
mfhc1(scratch, dst);
mtc1(src_low, dst);
mthc1(scratch, dst);
}
void TurboAssembler::Move(FPURegister dst, uint32_t src) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, Operand(static_cast<int32_t>(src)));
mtc1(scratch, dst);
}
void TurboAssembler::Move(FPURegister dst, uint64_t src) {
// Handle special values first.
if (src == bit_cast<uint64_t>(0.0) && has_double_zero_reg_set_) {
mov_d(dst, kDoubleRegZero);
} else if (src == bit_cast<uint64_t>(-0.0) && has_double_zero_reg_set_) {
Neg_d(dst, kDoubleRegZero);
} else {
uint32_t lo = src & 0xFFFFFFFF;
uint32_t hi = src >> 32;
// Move the low part of the double into the lower of the corresponding FPU
// register of FPU register pair.
if (lo != 0) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, Operand(lo));
mtc1(scratch, dst);
} else {
mtc1(zero_reg, dst);
}
// Move the high part of the double into the higher of the corresponding FPU
// register of FPU register pair.
if (hi != 0) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, Operand(hi));
mthc1(scratch, dst);
} else {
mthc1(zero_reg, dst);
}
if (dst == kDoubleRegZero) has_double_zero_reg_set_ = true;
}
}
void TurboAssembler::Movz(Register rd, Register rs, Register rt) {
if (kArchVariant == kMips64r6) {
Label done;
Branch(&done, ne, rt, Operand(zero_reg));
mov(rd, rs);
bind(&done);
} else {
movz(rd, rs, rt);
}
}
void TurboAssembler::Movn(Register rd, Register rs, Register rt) {
if (kArchVariant == kMips64r6) {
Label done;
Branch(&done, eq, rt, Operand(zero_reg));
mov(rd, rs);
bind(&done);
} else {
movn(rd, rs, rt);
}
}
void TurboAssembler::LoadZeroOnCondition(Register rd, Register rs,
const Operand& rt, Condition cond) {
switch (cond) {
case cc_always:
mov(rd, zero_reg);
break;
case eq:
if (rs == zero_reg) {
if (rt.is_reg()) {
LoadZeroIfConditionZero(rd, rt.rm());
} else {
if (rt.immediate() == 0) {
mov(rd, zero_reg);
} else {
nop();
}
}
} else if (IsZero(rt)) {
LoadZeroIfConditionZero(rd, rs);
} else {
Dsubu(t9, rs, rt);
LoadZeroIfConditionZero(rd, t9);
}
break;
case ne:
if (rs == zero_reg) {
if (rt.is_reg()) {
LoadZeroIfConditionNotZero(rd, rt.rm());
} else {
if (rt.immediate() != 0) {
mov(rd, zero_reg);
} else {
nop();
}
}
} else if (IsZero(rt)) {
LoadZeroIfConditionNotZero(rd, rs);
} else {
Dsubu(t9, rs, rt);
LoadZeroIfConditionNotZero(rd, t9);
}
break;
// Signed comparison.
case greater:
Sgt(t9, rs, rt);
LoadZeroIfConditionNotZero(rd, t9);
break;
case greater_equal:
Sge(t9, rs, rt);
LoadZeroIfConditionNotZero(rd, t9);
// rs >= rt
break;
case less:
Slt(t9, rs, rt);
LoadZeroIfConditionNotZero(rd, t9);
// rs < rt
break;
case less_equal:
Sle(t9, rs, rt);
LoadZeroIfConditionNotZero(rd, t9);
// rs <= rt
break;
// Unsigned comparison.
case Ugreater:
Sgtu(t9, rs, rt);
LoadZeroIfConditionNotZero(rd, t9);
// rs > rt
break;
case Ugreater_equal:
Sgeu(t9, rs, rt);
LoadZeroIfConditionNotZero(rd, t9);
// rs >= rt
break;
case Uless:
Sltu(t9, rs, rt);
LoadZeroIfConditionNotZero(rd, t9);
// rs < rt
break;
case Uless_equal:
Sleu(t9, rs, rt);
LoadZeroIfConditionNotZero(rd, t9);
// rs <= rt
break;
default:
UNREACHABLE();
}
}
void TurboAssembler::LoadZeroIfConditionNotZero(Register dest,
Register condition) {
if (kArchVariant == kMips64r6) {
seleqz(dest, dest, condition);
} else {
Movn(dest, zero_reg, condition);
}
}
void TurboAssembler::LoadZeroIfConditionZero(Register dest,
Register condition) {
if (kArchVariant == kMips64r6) {
selnez(dest, dest, condition);
} else {
Movz(dest, zero_reg, condition);
}
}
void TurboAssembler::Movt(Register rd, Register rs, uint16_t cc) {
movt(rd, rs, cc);
}
void TurboAssembler::Movf(Register rd, Register rs, uint16_t cc) {
movf(rd, rs, cc);
}
void TurboAssembler::Clz(Register rd, Register rs) { clz(rd, rs); }
void TurboAssembler::Ctz(Register rd, Register rs) {
if (kArchVariant == kMips64r6) {
// We don't have an instruction to count the number of trailing zeroes.
// Start by flipping the bits end-for-end so we can count the number of
// leading zeroes instead.
rotr(rd, rs, 16);
wsbh(rd, rd);
bitswap(rd, rd);
Clz(rd, rd);
} else {
// Convert trailing zeroes to trailing ones, and bits to their left
// to zeroes.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Daddu(scratch, rs, -1);
Xor(rd, scratch, rs);
And(rd, rd, scratch);
// Count number of leading zeroes.
Clz(rd, rd);
// Subtract number of leading zeroes from 32 to get number of trailing
// ones. Remember that the trailing ones were formerly trailing zeroes.
li(scratch, 32);
Subu(rd, scratch, rd);
}
}
void TurboAssembler::Dctz(Register rd, Register rs) {
if (kArchVariant == kMips64r6) {
// We don't have an instruction to count the number of trailing zeroes.
// Start by flipping the bits end-for-end so we can count the number of
// leading zeroes instead.
dsbh(rd, rs);
dshd(rd, rd);
dbitswap(rd, rd);
dclz(rd, rd);
} else {
// Convert trailing zeroes to trailing ones, and bits to their left
// to zeroes.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Daddu(scratch, rs, -1);
Xor(rd, scratch, rs);
And(rd, rd, scratch);
// Count number of leading zeroes.
dclz(rd, rd);
// Subtract number of leading zeroes from 64 to get number of trailing
// ones. Remember that the trailing ones were formerly trailing zeroes.
li(scratch, 64);
Dsubu(rd, scratch, rd);
}
}
void TurboAssembler::Popcnt(Register rd, Register rs) {
// https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
//
// A generalization of the best bit counting method to integers of
// bit-widths up to 128 (parameterized by type T) is this:
//
// v = v - ((v >> 1) & (T)~(T)0/3); // temp
// v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp
// v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp
// c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count
//
// For comparison, for 32-bit quantities, this algorithm can be executed
// using 20 MIPS instructions (the calls to LoadConst32() generate two
// machine instructions each for the values being used in this algorithm).
// A(n unrolled) loop-based algorithm requires 25 instructions.
//
// For a 64-bit operand this can be performed in 24 instructions compared
// to a(n unrolled) loop based algorithm which requires 38 instructions.
//
// There are algorithms which are faster in the cases where very few
// bits are set but the algorithm here attempts to minimize the total
// number of instructions executed even when a large number of bits
// are set.
uint32_t B0 = 0x55555555; // (T)~(T)0/3
uint32_t B1 = 0x33333333; // (T)~(T)0/15*3
uint32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15
uint32_t value = 0x01010101; // (T)~(T)0/255
uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Register scratch2 = t8;
srl(scratch, rs, 1);
li(scratch2, B0);
And(scratch, scratch, scratch2);
Subu(scratch, rs, scratch);
li(scratch2, B1);
And(rd, scratch, scratch2);
srl(scratch, scratch, 2);
And(scratch, scratch, scratch2);
Addu(scratch, rd, scratch);
srl(rd, scratch, 4);
Addu(rd, rd, scratch);
li(scratch2, B2);
And(rd, rd, scratch2);
li(scratch, value);
Mul(rd, rd, scratch);
srl(rd, rd, shift);
}
void TurboAssembler::Dpopcnt(Register rd, Register rs) {
uint64_t B0 = 0x5555555555555555l; // (T)~(T)0/3
uint64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3
uint64_t B2 = 0x0F0F0F0F0F0F0F0Fl; // (T)~(T)0/255*15
uint64_t value = 0x0101010101010101l; // (T)~(T)0/255
uint64_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Register scratch2 = t8;
dsrl(scratch, rs, 1);
li(scratch2, B0);
And(scratch, scratch, scratch2);
Dsubu(scratch, rs, scratch);
li(scratch2, B1);
And(rd, scratch, scratch2);
dsrl(scratch, scratch, 2);
And(scratch, scratch, scratch2);
Daddu(scratch, rd, scratch);
dsrl(rd, scratch, 4);
Daddu(rd, rd, scratch);
li(scratch2, B2);
And(rd, rd, scratch2);
li(scratch, value);
Dmul(rd, rd, scratch);
dsrl32(rd, rd, shift);
}
void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
Register result,
DoubleRegister double_input,
Register scratch,
DoubleRegister double_scratch,
Register except_flag,
CheckForInexactConversion check_inexact) {
DCHECK(result != scratch);
DCHECK(double_input != double_scratch);
DCHECK(except_flag != scratch);
Label done;
// Clear the except flag (0 = no exception)
mov(except_flag, zero_reg);
// Test for values that can be exactly represented as a signed 32-bit integer.
cvt_w_d(double_scratch, double_input);
mfc1(result, double_scratch);
cvt_d_w(double_scratch, double_scratch);
CompareF64(EQ, double_input, double_scratch);
BranchTrueShortF(&done);
int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
if (check_inexact == kDontCheckForInexactConversion) {
// Ignore inexact exceptions.
except_mask &= ~kFCSRInexactFlagMask;
}
// Save FCSR.
cfc1(scratch, FCSR);
// Disable FPU exceptions.
ctc1(zero_reg, FCSR);
// Do operation based on rounding mode.
switch (rounding_mode) {
case kRoundToNearest:
Round_w_d(double_scratch, double_input);
break;
case kRoundToZero:
Trunc_w_d(double_scratch, double_input);
break;
case kRoundToPlusInf:
Ceil_w_d(double_scratch, double_input);
break;
case kRoundToMinusInf:
Floor_w_d(double_scratch, double_input);
break;
} // End of switch-statement.
// Retrieve FCSR.
cfc1(except_flag, FCSR);
// Restore FCSR.
ctc1(scratch, FCSR);
// Move the converted value into the result register.
mfc1(result, double_scratch);
// Check for fpu exceptions.
And(except_flag, except_flag, Operand(except_mask));
bind(&done);
}
void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
DoubleRegister double_input,
Label* done) {
DoubleRegister single_scratch = kScratchDoubleReg.low();
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Register scratch2 = t9;
// Clear cumulative exception flags and save the FCSR.
cfc1(scratch2, FCSR);
ctc1(zero_reg, FCSR);
// Try a conversion to a signed integer.
trunc_w_d(single_scratch, double_input);
mfc1(result, single_scratch);
// Retrieve and restore the FCSR.
cfc1(scratch, FCSR);
ctc1(scratch2, FCSR);
// Check for overflow and NaNs.
And(scratch,
scratch,
kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
// If we had no exceptions we are done.
Branch(done, eq, scratch, Operand(zero_reg));
}
void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
DoubleRegister double_input) {
Label done;
TryInlineTruncateDoubleToI(result, double_input, &done);
// If we fell through then inline version didn't succeed - call stub instead.
push(ra);
Dsubu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
Sdc1(double_input, MemOperand(sp, 0));
CallStubDelayed(new (zone) DoubleToIStub(nullptr, result));
Daddu(sp, sp, Operand(kDoubleSize));
pop(ra);
bind(&done);
}
// Emulated condtional branches do not emit a nop in the branch delay slot.
//
// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
#define BRANCH_ARGS_CHECK(cond, rs, rt) \
DCHECK((cond == cc_always && rs == zero_reg && rt.rm() == zero_reg) || \
(cond != cc_always && (rs != zero_reg || rt.rm() != zero_reg)))
void TurboAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
DCHECK_EQ(kArchVariant, kMips64r6 ? is_int26(offset) : is_int16(offset));
BranchShort(offset, bdslot);
}
void TurboAssembler::Branch(int32_t offset, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bdslot) {
bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
DCHECK(is_near);
USE(is_near);
}
void TurboAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
if (L->is_bound()) {
if (is_near_branch(L)) {
BranchShort(L, bdslot);
} else {
BranchLong(L, bdslot);
}
} else {
if (is_trampoline_emitted()) {
BranchLong(L, bdslot);
} else {
BranchShort(L, bdslot);
}
}
}
void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bdslot) {
if (L->is_bound()) {
if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) {
if (cond != cc_always) {
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
BranchLong(L, bdslot);
bind(&skip);
} else {
BranchLong(L, bdslot);
}
}
} else {
if (is_trampoline_emitted()) {
if (cond != cc_always) {
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
BranchLong(L, bdslot);
bind(&skip);
} else {
BranchLong(L, bdslot);
}
} else {
BranchShort(L, cond, rs, rt, bdslot);
}
}
}
void TurboAssembler::Branch(Label* L, Condition cond, Register rs,
Heap::RootListIndex index, BranchDelaySlot bdslot) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
LoadRoot(scratch, index);
Branch(L, cond, rs, Operand(scratch), bdslot);
}
void TurboAssembler::BranchShortHelper(int16_t offset, Label* L,
BranchDelaySlot bdslot) {
DCHECK(L == nullptr || offset == 0);
offset = GetOffset(offset, L, OffsetSize::kOffset16);
b(offset);
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
}
void TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
DCHECK(L == nullptr || offset == 0);
offset = GetOffset(offset, L, OffsetSize::kOffset26);
bc(offset);
}
void TurboAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
DCHECK(is_int26(offset));
BranchShortHelperR6(offset, nullptr);
} else {
DCHECK(is_int16(offset));
BranchShortHelper(offset, nullptr, bdslot);
}
}
void TurboAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
BranchShortHelperR6(0, L);
} else {
BranchShortHelper(0, L, bdslot);
}
}
int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
if (L) {
offset = branch_offset_helper(L, bits) >> 2;
} else {
DCHECK(is_intn(offset, bits));
}
return offset;
}
Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt,
Register scratch) {
Register r2 = no_reg;
if (rt.is_reg()) {
r2 = rt.rm();
} else {
r2 = scratch;
li(r2, rt);
}
return r2;
}
bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset,
OffsetSize bits) {
if (!is_near(L, bits)) return false;
offset = GetOffset(offset, L, bits);
return true;
}
bool TurboAssembler::CalculateOffset(Label* L, int32_t& offset, OffsetSize bits,
Register& scratch, const Operand& rt) {
if (!is_near(L, bits)) return false;
scratch = GetRtAsRegisterHelper(rt, scratch);
offset = GetOffset(offset, L, bits);
return true;
}
bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L,
Condition cond, Register rs,
const Operand& rt) {
DCHECK(L == nullptr || offset == 0);
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
// Be careful to always use shifted_branch_offset only just before the
// branch instruction, as the location will be remember for patching the
// target.
{
BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
bc(offset);
break;
case eq:
if (rt.is_reg() && rs.code() == rt.rm().code()) {
// Pre R6 beq is used here to make the code patchable. Otherwise bc
// should be used which has no condition field so is not patchable.
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
beq(rs, scratch, offset);
nop();
} else if (IsZero(rt)) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false;
beqzc(rs, offset);
} else {
// We don't want any other register but scratch clobbered.
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
beqc(rs, scratch, offset);
}
break;
case ne:
if (rt.is_reg() && rs.code() == rt.rm().code()) {
// Pre R6 bne is used here to make the code patchable. Otherwise we
// should not generate any instruction.
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
bne(rs, scratch, offset);
nop();
} else if (IsZero(rt)) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false;
bnezc(rs, offset);
} else {
// We don't want any other register but scratch clobbered.
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
bnec(rs, scratch, offset);
}
break;
// Signed comparison.
case greater:
// rs > rt
if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
bltzc(scratch, offset);
} else if (IsZero(rt)) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
bgtzc(rs, offset);
} else {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
DCHECK(rs != scratch);
bltc(scratch, rs, offset);
}
break;
case greater_equal:
// rs >= rt
if (rt.is_reg() && rs.code() == rt.rm().code()) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
blezc(scratch, offset);
} else if (IsZero(rt)) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
bgezc(rs, offset);
} else {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
DCHECK(rs != scratch);
bgec(rs, scratch, offset);
}
break;
case less:
// rs < rt
if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
bgtzc(scratch, offset);
} else if (IsZero(rt)) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
bltzc(rs, offset);
} else {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
DCHECK(rs != scratch);
bltc(rs, scratch, offset);
}
break;
case less_equal:
// rs <= rt
if (rt.is_reg() && rs.code() == rt.rm().code()) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
bgezc(scratch, offset);
} else if (IsZero(rt)) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
blezc(rs, offset);
} else {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
DCHECK(rs != scratch);
bgec(scratch, rs, offset);
}
break;
// Unsigned comparison.
case Ugreater:
// rs > rt
if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
return false;
bnezc(scratch, offset);
} else if (IsZero(rt)) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false;
bnezc(rs, offset);
} else {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
DCHECK(rs != scratch);
bltuc(scratch, rs, offset);
}
break;
case Ugreater_equal:
// rs >= rt
if (rt.is_reg() && rs.code() == rt.rm().code()) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
return false;
beqzc(scratch, offset);
} else if (IsZero(rt)) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
DCHECK(rs != scratch);
bgeuc(rs, scratch, offset);
}
break;
case Uless:
// rs < rt
if (rt.is_reg() && rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset21, scratch, rt))
return false;
bnezc(scratch, offset);
} else if (IsZero(rt)) {
break; // No code needs to be emitted.
} else {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
DCHECK(rs != scratch);
bltuc(rs, scratch, offset);
}
break;
case Uless_equal:
// rs <= rt
if (rt.is_reg() && rs.code() == rt.rm().code()) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
bc(offset);
} else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset26, scratch, rt))
return false;
bc(offset);
} else if (IsZero(rt)) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset21)) return false;
beqzc(rs, offset);
} else {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
DCHECK(rs != scratch);
bgeuc(scratch, rs, offset);
}
break;
default:
UNREACHABLE();
}
}
CheckTrampolinePoolQuick(1);
return true;
}
bool TurboAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
Register rs, const Operand& rt,
BranchDelaySlot bdslot) {
DCHECK(L == nullptr || offset == 0);
if (!is_near(L, OffsetSize::kOffset16)) return false;
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
int32_t offset32;
// Be careful to always use shifted_branch_offset only just before the
// branch instruction, as the location will be remember for patching the
// target.
{
BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
b(offset32);
break;
case eq:
if (IsZero(rt)) {
offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
beq(rs, zero_reg, offset32);
} else {
// We don't want any other register but scratch clobbered.
scratch = GetRtAsRegisterHelper(rt, scratch);
offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
beq(rs, scratch, offset32);
}
break;
case ne:
if (IsZero(rt)) {
offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
bne(rs, zero_reg, offset32);
} else {
// We don't want any other register but scratch clobbered.
scratch = GetRtAsRegisterHelper(rt, scratch);
offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
bne(rs, scratch, offset32);
}
break;
// Signed comparison.
case greater:
if (IsZero(rt)) {
offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
bgtz(rs, offset32);
} else {
Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
bne(scratch, zero_reg, offset32);
}
break;
case greater_equal:
if (IsZero(rt)) {
offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
bgez(rs, offset32);
} else {
Slt(scratch, rs, rt);
offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
beq(scratch, zero_reg, offset32);
}
break;
case less:
if (IsZero(rt)) {
offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
bltz(rs, offset32);
} else {
Slt(scratch, rs, rt);
offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
bne(scratch, zero_reg, offset32);
}
break;
case less_equal:
if (IsZero(rt)) {
offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
blez(rs, offset32);
} else {
Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
beq(scratch, zero_reg, offset32);
}
break;
// Unsigned comparison.
case Ugreater:
if (IsZero(rt)) {
offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
bne(rs, zero_reg, offset32);
} else {
Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
bne(scratch, zero_reg, offset32);
}
break;
case Ugreater_equal:
if (IsZero(rt)) {
offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
b(offset32);
} else {
Sltu(scratch, rs, rt);
offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
beq(scratch, zero_reg, offset32);
}
break;
case Uless:
if (IsZero(rt)) {
return true; // No code needs to be emitted.
} else {
Sltu(scratch, rs, rt);
offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
bne(scratch, zero_reg, offset32);
}
break;
case Uless_equal:
if (IsZero(rt)) {
offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
beq(rs, zero_reg, offset32);
} else {
Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
beq(scratch, zero_reg, offset32);
}
break;
default:
UNREACHABLE();
}
}
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
return true;
}
bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt,
BranchDelaySlot bdslot) {
BRANCH_ARGS_CHECK(cond, rs, rt);
if (!L) {
if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
DCHECK(is_int26(offset));
return BranchShortHelperR6(offset, nullptr, cond, rs, rt);
} else {
DCHECK(is_int16(offset));
return BranchShortHelper(offset, nullptr, cond, rs, rt, bdslot);
}
} else {
DCHECK_EQ(offset, 0);
if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
return BranchShortHelperR6(0, L, cond, rs, rt);
} else {
return BranchShortHelper(0, L, cond, rs, rt, bdslot);
}
}
return false;
}
void TurboAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bdslot) {
BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
}
void TurboAssembler::BranchShort(Label* L, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bdslot) {
BranchShortCheck(0, L, cond, rs, rt, bdslot);
}
void TurboAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
BranchAndLinkShort(offset, bdslot);
}
void TurboAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bdslot) {
bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot);
DCHECK(is_near);
USE(is_near);
}
void TurboAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
if (L->is_bound()) {
if (is_near_branch(L)) {
BranchAndLinkShort(L, bdslot);
} else {
BranchAndLinkLong(L, bdslot);
}
} else {
if (is_trampoline_emitted()) {
BranchAndLinkLong(L, bdslot);
} else {
BranchAndLinkShort(L, bdslot);
}
}
}
void TurboAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bdslot) {
if (L->is_bound()) {
if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) {
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
BranchAndLinkLong(L, bdslot);
bind(&skip);
}
} else {
if (is_trampoline_emitted()) {
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
BranchAndLinkLong(L, bdslot);
bind(&skip);
} else {
BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot);
}
}
}
void TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
BranchDelaySlot bdslot) {
DCHECK(L == nullptr || offset == 0);
offset = GetOffset(offset, L, OffsetSize::kOffset16);
bal(offset);
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
}
void TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
DCHECK(L == nullptr || offset == 0);
offset = GetOffset(offset, L, OffsetSize::kOffset26);
balc(offset);
}
void TurboAssembler::BranchAndLinkShort(int32_t offset,
BranchDelaySlot bdslot) {
if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
DCHECK(is_int26(offset));
BranchAndLinkShortHelperR6(offset, nullptr);
} else {
DCHECK(is_int16(offset));
BranchAndLinkShortHelper(offset, nullptr, bdslot);
}
}
void TurboAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
BranchAndLinkShortHelperR6(0, L);
} else {
BranchAndLinkShortHelper(0, L, bdslot);
}
}
bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
Condition cond, Register rs,
const Operand& rt) {
DCHECK(L == nullptr || offset == 0);
UseScratchRegisterScope temps(this);
Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
OffsetSize bits = OffsetSize::kOffset16;
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset));
switch (cond) {
case cc_always:
if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
balc(offset);
break;
case eq:
if (!is_near(L, bits)) return false;
Subu(scratch, rs, rt);
offset = GetOffset(offset, L, bits);
beqzalc(scratch, offset);
break;
case ne:
if (!is_near(L, bits)) return false;
Subu(scratch, rs, rt);
offset = GetOffset(offset, L, bits);
bnezalc(scratch, offset);
break;
// Signed comparison.
case greater:
// rs > rt
if (rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
bltzalc(scratch, offset);
} else if (IsZero(rt)) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
bgtzalc(rs, offset);
} else {
if (!is_near(L, bits)) return false;
Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
offset = GetOffset(offset, L, bits);
bnezalc(scratch, offset);
}
break;
case greater_equal:
// rs >= rt
if (rs.code() == rt.rm().code()) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
balc(offset);
} else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
blezalc(scratch, offset);
} else if (IsZero(rt)) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
bgezalc(rs, offset);
} else {
if (!is_near(L, bits)) return false;
Slt(scratch, rs, rt);
offset = GetOffset(offset, L, bits);
beqzalc(scratch, offset);
}
break;
case less:
// rs < rt
if (rs.code() == rt.rm().code()) {
break; // No code needs to be emitted.
} else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
bgtzalc(scratch, offset);
} else if (IsZero(rt)) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
bltzalc(rs, offset);
} else {
if (!is_near(L, bits)) return false;
Slt(scratch, rs, rt);
offset = GetOffset(offset, L, bits);
bnezalc(scratch, offset);
}
break;
case less_equal:
// rs <= r2
if (rs.code() == rt.rm().code()) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset26)) return false;
balc(offset);
} else if (rs == zero_reg) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16, scratch, rt))
return false;
bgezalc(scratch, offset);
} else if (IsZero(rt)) {
if (!CalculateOffset(L, offset, OffsetSize::kOffset16)) return false;
blezalc(rs, offset);
} else {
if (!is_near(L, bits)) return false;
Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
offset = GetOffset(offset, L, bits);
beqzalc(scratch, offset);
}
break;
// Unsigned comparison.
case Ugreater:
// rs > r2
if (!is_near(L, bits)) return false;
Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
offset = GetOffset(offset, L, bits);
bnezalc(scratch, offset);
break;
case Ugreater_equal:
// rs >= r2
if (!is_near(L, bits)) return false;
Sltu(scratch, rs, rt);
offset = GetOffset(offset, L, bits);
beqzalc(scratch, offset);
break;
case Uless:
// rs < r2
if (!is_near(L, bits)) return false;
Sltu(scratch, rs, rt);
offset = GetOffset(offset, L, bits);
bnezalc(scratch, offset);
break;
case Uless_equal:
// rs <= r2
if (!is_near(L, bits)) return false;
Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
offset = GetOffset(offset, L, bits);
beqzalc(scratch, offset);
break;
default:
UNREACHABLE();
}
return true;
}
// Pre r6 we need to use a bgezal or bltzal, but they can't be used directly
// with the slt instructions. We could use sub or add instead but we would miss
// overflow cases, so we keep slt and add an intermediate third instruction.
bool TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
DCHECK(L == nullptr || offset == 0);
if (!is_near(L, OffsetSize::kOffset16)) return false;
Register scratch = t8;
BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
offset = GetOffset(offset, L, OffsetSize::kOffset16);
bal(offset);
break;
case eq:
bne(rs, GetRtAsRegisterHelper(rt, scratch), 2);
nop();
offset = GetOffset(offset, L, OffsetSize::kOffset16);
bal(offset);
break;
case ne:
beq(rs, GetRtAsRegisterHelper(rt, scratch), 2);
nop();
offset = GetOffset(offset, L, OffsetSize::kOffset16);
bal(offset);
break;
// Signed comparison.
case greater:
Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
addiu(scratch, scratch, -1);
offset = GetOffset(offset, L, OffsetSize::kOffset16);
bgezal(scratch, offset);
break;
case greater_equal:
Slt(scratch, rs, rt);
addiu(scratch, scratch, -1);
offset = GetOffset(offset, L, OffsetSize::kOffset16);
bltzal(scratch, offset);
break;
case less:
Slt(scratch, rs, rt);
addiu(scratch, scratch, -1);
offset = GetOffset(offset, L, OffsetSize::kOffset16);
bgezal(scratch, offset);
break;
case less_equal:
Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
addiu(scratch, scratch, -1);
offset = GetOffset(offset, L, OffsetSize::kOffset16);
bltzal(scratch, offset);
break;
// Unsigned comparison.
case Ugreater:
Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
addiu(scratch, scratch, -1);
offset = GetOffset(offset, L, OffsetSize::kOffset16);
bgezal(scratch, offset);
break;
case Ugreater_equal:
Sltu(scratch, rs, rt);
addiu(scratch, scratch, -1);
offset = GetOffset(offset, L, OffsetSize::kOffset16);
bltzal(scratch, offset);
break;
case Uless:
Sltu(scratch, rs, rt);
addiu(scratch, scratch, -1);
offset = GetOffset(offset, L, OffsetSize::kOffset16);
bgezal(scratch, offset);
break;
case Uless_equal:
Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
addiu(scratch, scratch, -1);
offset = GetOffset(offset, L, OffsetSize::kOffset16);
bltzal(scratch, offset);
break;
default:
UNREACHABLE();
}
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
return true;
}
bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
BRANCH_ARGS_CHECK(cond, rs, rt);
if (!L) {
if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
DCHECK(is_int26(offset));
return BranchAndLinkShortHelperR6(offset, nullptr, cond, rs, rt);
} else {
DCHECK(is_int16(offset));
return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt, bdslot);
}
} else {
DCHECK_EQ(offset, 0);
if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
return BranchAndLinkShortHelperR6(0, L, cond, rs, rt);
} else {
return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
}
}
return false;
}
void TurboAssembler::Jump(Register target, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (kArchVariant == kMips64r6 && bd == PROTECT) {
if (cond == cc_always) {
jic(target, 0);
} else {
BRANCH_ARGS_CHECK(cond, rs, rt);
Branch(2, NegateCondition(cond), rs, rt);
jic(target, 0);
}
} else {
if (cond == cc_always) {
jr(target);
} else {
BRANCH_ARGS_CHECK(cond, rs, rt);
Branch(2, NegateCondition(cond), rs, rt);
jr(target);
}
// Emit a nop in the branch delay slot if required.
if (bd == PROTECT) nop();
}
}
void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
Label skip;
if (cond != cc_always) {
Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
}
// The first instruction of 'li' may be placed in the delay slot.
// This is not an issue, t9 is expected to be clobbered anyway.
li(t9, Operand(target, rmode));
Jump(t9, al, zero_reg, Operand(zero_reg), bd);
bind(&skip);
}
void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
Register rs, const Operand& rt, BranchDelaySlot bd) {
DCHECK(!RelocInfo::IsCodeTarget(rmode));
Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
}
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
Jump(reinterpret_cast<intptr_t>(code.address()), rmode, cond, rs, rt, bd);
}
int TurboAssembler::CallSize(Register target, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bd) {
int size = 0;
if (cond == cc_always) {
size += 1;
} else {
size += 3;
}
if (bd == PROTECT && kArchVariant != kMips64r6) size += 1;
return size * kInstrSize;
}
// Note: To call gcc-compiled C code on mips, you must call through t9.
void TurboAssembler::Call(Register target, Condition cond, Register rs,
const Operand& rt, BranchDelaySlot bd) {
#ifdef DEBUG
int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
#endif
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
if (kArchVariant == kMips64r6 && bd == PROTECT) {
if (cond == cc_always) {
jialc(target, 0);
} else {
BRANCH_ARGS_CHECK(cond, rs, rt);
Branch(2, NegateCondition(cond), rs, rt);
jialc(target, 0);
}
} else {
if (cond == cc_always) {
jalr(target);
} else {
BRANCH_ARGS_CHECK(cond, rs, rt);
Branch(2, NegateCondition(cond), rs, rt);
jalr(target);
}
// Emit a nop in the branch delay slot if required.
if (bd == PROTECT) nop();
}
#ifdef DEBUG
DCHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
#endif
}
int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
int size = CallSize(t9, cond, rs, rt, bd);
return size + 4 * kInstrSize;
}
void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
Register rs, const Operand& rt, BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
int64_t target_int = reinterpret_cast<int64_t>(target);
li(t9, Operand(target_int, rmode), ADDRESS_LOAD);
Call(t9, cond, rs, rt, bd);
DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
}
int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
return CallSize(code.address(), rmode, cond, rs, rt, bd);
}
void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
DCHECK(RelocInfo::IsCodeTarget(rmode));
Call(code.address(), rmode, cond, rs, rt, bd);
DCHECK_EQ(CallSize(code, rmode, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
}
void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
Jump(ra, cond, rs, rt, bd);
}
void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
(!L->is_bound() || is_near_r6(L))) {
BranchShortHelperR6(0, L);
} else {
EmitForbiddenSlotInstruction();
BlockTrampolinePoolScope block_trampoline_pool(this);
{
BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal references
// until associated instructions are emitted and available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
j(L);
}
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT) nop();
}
}
void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
(!L->is_bound() || is_near_r6(L))) {
BranchAndLinkShortHelperR6(0, L);
} else {
EmitForbiddenSlotInstruction();
BlockTrampolinePoolScope block_trampoline_pool(this);
{
BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal references
// until associated instructions are emitted and available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
jal(L);
}
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT) nop();
}
}
void TurboAssembler::DropAndRet(int drop) {
DCHECK(is_int16(drop * kPointerSize));
Ret(USE_DELAY_SLOT);
daddiu(sp, sp, drop * kPointerSize);
}
void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1,
const Operand& r2) {
// Both Drop and Ret need to be conditional.
Label skip;
if (cond != cc_always) {
Branch(&skip, NegateCondition(cond), r1, r2);
}
Drop(drop);
Ret();
if (cond != cc_always) {
bind(&skip);
}
}
void TurboAssembler::Drop(int count, Condition cond, Register reg,
const Operand& op) {
if (count <= 0) {
return;
}
Label skip;
if (cond != al) {
Branch(&skip, NegateCondition(cond), reg, op);
}
Daddu(sp, sp, Operand(count * kPointerSize));
if (cond != al) {
bind(&skip);
}
}
void MacroAssembler::Swap(Register reg1,
Register reg2,
Register scratch) {
if (scratch == no_reg) {
Xor(reg1, reg1, Operand(reg2));
Xor(reg2, reg2, Operand(reg1));
Xor(reg1, reg1, Operand(reg2));
} else {
mov(scratch, reg1);
mov(reg1, reg2);
mov(reg2, scratch);
}
}
void TurboAssembler::Call(Label* target) { BranchAndLink(target); }
void TurboAssembler::Push(Smi* smi) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, Operand(smi));
push(scratch);
}
void TurboAssembler::Push(Handle<HeapObject> handle) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, Operand(handle));
push(scratch);
}
void MacroAssembler::MaybeDropFrames() {
// Check whether we need to drop frames to restart a function on the stack.
ExternalReference restart_fp =
ExternalReference::debug_restart_fp_address(isolate());
li(a1, Operand(restart_fp));
Ld(a1, MemOperand(a1));
Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
ne, a1, Operand(zero_reg));
}
// ---------------------------------------------------------------------------
// Exception handling.
void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
Push(Smi::kZero); // Padding.
// Link the current handler as the next handler.
li(a6,
Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate())));
Ld(a5, MemOperand(a6));
push(a5);
// Set this new handler as the current one.
Sd(sp, MemOperand(a6));
}
void MacroAssembler::PopStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(a1);
Daddu(sp, sp, Operand(static_cast<int64_t>(StackHandlerConstants::kSize -
kPointerSize)));
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch,
Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate())));
Sd(a1, MemOperand(scratch));
}
void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
sub_d(dst, src, kDoubleRegZero);
}
void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) {
if (IsMipsSoftFloatABI) {
if (kArchEndian == kLittle) {
Move(dst, v0, v1);
} else {
Move(dst, v1, v0);
}
} else {
Move(dst, f0); // Reg f0 is o32 ABI FP return value.
}
}
void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) {
if (IsMipsSoftFloatABI) {
if (kArchEndian == kLittle) {
Move(dst, a0, a1);
} else {
Move(dst, a1, a0);
}
} else {
Move(dst, f12); // Reg f12 is n64 ABI FP first argument value.
}
}
void TurboAssembler::MovToFloatParameter(DoubleRegister src) {
if (!IsMipsSoftFloatABI) {
Move(f12, src);
} else {
if (kArchEndian == kLittle) {
Move(a0, a1, src);
} else {
Move(a1, a0, src);
}
}
}
void TurboAssembler::MovToFloatResult(DoubleRegister src) {
if (!IsMipsSoftFloatABI) {
Move(f0, src);
} else {
if (kArchEndian == kLittle) {
Move(v0, v1, src);
} else {
Move(v1, v0, src);
}
}
}
void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
DoubleRegister src2) {
if (!IsMipsSoftFloatABI) {
const DoubleRegister fparg2 = f13;
if (src2 == f12) {
DCHECK(src1 != fparg2);
Move(fparg2, src2);
Move(f12, src1);
} else {
Move(f12, src1);
Move(fparg2, src2);
}
} else {
if (kArchEndian == kLittle) {
Move(a0, a1, src1);
Move(a2, a3, src2);
} else {
Move(a1, a0, src1);
Move(a3, a2, src2);
}
}
}
// -----------------------------------------------------------------------------
// JavaScript invokes.
void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Register caller_args_count_reg,
Register scratch0, Register scratch1) {
#if DEBUG
if (callee_args_count.is_reg()) {
DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
scratch1));
} else {
DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
}
#endif
// Calculate the end of destination area where we will put the arguments
// after we drop current frame. We add kPointerSize to count the receiver
// argument which is not included into formal parameters count.
Register dst_reg = scratch0;
Dlsa(dst_reg, fp, caller_args_count_reg, kPointerSizeLog2);
Daddu(dst_reg, dst_reg,
Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
Register src_reg = caller_args_count_reg;
// Calculate the end of source area. +kPointerSize is for the receiver.
if (callee_args_count.is_reg()) {
Dlsa(src_reg, sp, callee_args_count.reg(), kPointerSizeLog2);
Daddu(src_reg, src_reg, Operand(kPointerSize));
} else {
Daddu(src_reg, sp,
Operand((callee_args_count.immediate() + 1) * kPointerSize));
}
if (FLAG_debug_code) {
Check(lo, AbortReason::kStackAccessBelowStackPointer, src_reg,
Operand(dst_reg));
}
// Restore caller's frame pointer and return address now as they will be
// overwritten by the copying loop.
Ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
Ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
// Now copy callee arguments to the caller frame going backwards to avoid
// callee arguments corruption (source and destination areas could overlap).
// Both src_reg and dst_reg are pointing to the word after the one to copy,
// so they must be pre-decremented in the loop.
Register tmp_reg = scratch1;
Label loop, entry;
Branch(&entry);
bind(&loop);
Dsubu(src_reg, src_reg, Operand(kPointerSize));
Dsubu(dst_reg, dst_reg, Operand(kPointerSize));
Ld(tmp_reg, MemOperand(src_reg));
Sd(tmp_reg, MemOperand(dst_reg));
bind(&entry);
Branch(&loop, ne, sp, Operand(src_reg));
// Leave current frame.
mov(sp, dst_reg);
}
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual, Label* done,
bool* definitely_mismatches,
InvokeFlag flag) {
bool definitely_matches = false;
*definitely_mismatches = false;
Label regular_invoke;
// Check whether the expected and actual arguments count match. If not,
// setup registers according to contract with ArgumentsAdaptorTrampoline:
// a0: actual arguments count
// a1: function (passed through to callee)
// a2: expected arguments count
// The code below is made a lot easier because the calling code already sets
// up actual and expected registers according to the contract if values are
// passed in registers.
DCHECK(actual.is_immediate() || actual.reg() == a0);
DCHECK(expected.is_immediate() || expected.reg() == a2);
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
li(a0, Operand(actual.immediate()));
if (expected.immediate() == actual.immediate()) {
definitely_matches = true;
} else {
const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
if (expected.immediate() == sentinel) {
// Don't worry about adapting arguments for builtins that
// don't want that done. Skip adaption code by making it look
// like we have a match between expected and actual number of
// arguments.
definitely_matches = true;
} else {
*definitely_mismatches = true;
li(a2, Operand(expected.immediate()));
}
}
} else if (actual.is_immediate()) {
li(a0, Operand(actual.immediate()));
Branch(®ular_invoke, eq, expected.reg(), Operand(a0));
} else {
Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg()));
}
if (!definitely_matches) {
Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
if (flag == CALL_FUNCTION) {
Call(adaptor);
if (!*definitely_mismatches) {
Branch(done);
}
} else {
Jump(adaptor, RelocInfo::CODE_TARGET);
}
bind(®ular_invoke);
}
}
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
Label skip_hook;
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
li(t0, Operand(debug_hook_active));
Lb(t0, MemOperand(t0));
Branch(&skip_hook, eq, t0, Operand(zero_reg));
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
if (expected.is_reg()) {
SmiTag(expected.reg());
Push(expected.reg());
}
if (actual.is_reg()) {
SmiTag(actual.reg());
Push(actual.reg());
}
if (new_target.is_valid()) {
Push(new_target);
}
Push(fun);
Push(fun);
CallRuntime(Runtime::kDebugOnFunctionCall);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
}
if (actual.is_reg()) {
Pop(actual.reg());
SmiUntag(actual.reg());
}
if (expected.is_reg()) {
Pop(expected.reg());
SmiUntag(expected.reg());
}
}
bind(&skip_hook);
}
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
DCHECK(function == a1);
DCHECK_IMPLIES(new_target.is_valid(), new_target == a3);
// On function call, call into the debugger if necessary.
CheckDebugHook(function, new_target, expected, actual);
// Clear the new.target register if not given.
if (!new_target.is_valid()) {
LoadRoot(a3, Heap::kUndefinedValueRootIndex);
}
Label done;
bool definitely_mismatches = false;
InvokePrologue(expected, actual, &done, &definitely_mismatches, flag);
if (!definitely_mismatches) {
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
Ld(code, FieldMemOperand(function, JSFunction::kCodeOffset));
if (flag == CALL_FUNCTION) {
Daddu(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
Call(code);
} else {
DCHECK(flag == JUMP_FUNCTION);
Daddu(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
Jump(code);
}
// Continue here if InvokePrologue does handle the invocation due to
// mismatched parameter counts.
bind(&done);
}
}
void MacroAssembler::InvokeFunction(Register function, Register new_target,
const ParameterCount& actual,
InvokeFlag flag) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
// Contract with called JS functions requires that function is passed in a1.
DCHECK(function == a1);
Register expected_reg = a2;
Register temp_reg = t0;
Ld(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// The argument count is stored as int32_t on 64-bit platforms.
// TODO(plind): Smi on 32-bit platforms.
Lw(expected_reg,
FieldMemOperand(temp_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount expected(expected_reg);
InvokeFunctionCode(a1, new_target, expected, actual, flag);
}
void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag) {
// You can't call a function without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
// Contract with called JS functions requires that function is passed in a1.
DCHECK(function == a1);
// Get the function and setup the context.
Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
InvokeFunctionCode(a1, no_reg, expected, actual, flag);
}
// ---------------------------------------------------------------------------
// Support functions.
void MacroAssembler::GetObjectType(Register object,
Register map,
Register type_reg) {
Ld(map, FieldMemOperand(object, HeapObject::kMapOffset));
Lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
}
// -----------------------------------------------------------------------------
// Runtime calls.
void MacroAssembler::CallStub(CodeStub* stub,
Condition cond,
Register r1,
const Operand& r2,
BranchDelaySlot bd) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
}
void TurboAssembler::CallStubDelayed(CodeStub* stub, Condition cond,
Register r1, const Operand& r2,
BranchDelaySlot bd) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
BlockTrampolinePoolScope block_trampoline_pool(this);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, Operand::EmbeddedCode(stub));
Call(scratch);
}
void MacroAssembler::TailCallStub(CodeStub* stub,
Condition cond,
Register r1,
const Operand& r2,
BranchDelaySlot bd) {
Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
}
bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame() || !stub->SometimesSetsUpAFrame();
}
void TurboAssembler::DaddOverflow(Register dst, Register left,
const Operand& right, Register overflow) {
Register right_reg = no_reg;
Register scratch = t8;
if (!right.is_reg()) {
li(at, Operand(right));
right_reg = at;
} else {
right_reg = right.rm();
}
DCHECK(left != scratch && right_reg != scratch && dst != scratch &&
overflow != scratch);
DCHECK(overflow != left && overflow != right_reg);
if (dst == left || dst == right_reg) {
daddu(scratch, left, right_reg);
xor_(overflow, scratch, left);
xor_(at, scratch, right_reg);
and_(overflow, overflow, at);
mov(dst, scratch);
} else {
daddu(dst, left, right_reg);
xor_(overflow, dst, left);
xor_(at, dst, right_reg);
and_(overflow, overflow, at);
}
}
void TurboAssembler::DsubOverflow(Register dst, Register left,
const Operand& right, Register overflow) {
Register right_reg = no_reg;
Register scratch = t8;
if (!right.is_reg()) {
li(at, Operand(right));
right_reg = at;
} else {
right_reg = right.rm();
}
DCHECK(left != scratch && right_reg != scratch && dst != scratch &&
overflow != scratch);
DCHECK(overflow != left && overflow != right_reg);
if (dst == left || dst == right_reg) {
dsubu(scratch, left, right_reg);
xor_(overflow, left, scratch);
xor_(at, left, right_reg);
and_(overflow, overflow, at);
mov(dst, scratch);
} else {
dsubu(dst, left, right_reg);
xor_(overflow, left, dst);
xor_(at, left, right_reg);
and_(overflow, overflow, at);
}
}
void TurboAssembler::MulOverflow(Register dst, Register left,
const Operand& right, Register overflow) {
Register right_reg = no_reg;
Register scratch = t8;
if (!right.is_reg()) {
li(at, Operand(right));
right_reg = at;
} else {
right_reg = right.rm();
}
DCHECK(left != scratch && right_reg != scratch && dst != scratch &&
overflow != scratch);
DCHECK(overflow != left && overflow != right_reg);
if (dst == left || dst == right_reg) {
Mul(scratch, left, right_reg);
Mulh(overflow, left, right_reg);
mov(dst, scratch);
} else {
Mul(dst, left, right_reg);
Mulh(overflow, left, right_reg);
}
dsra32(scratch, dst, 0);
xor_(overflow, overflow, scratch);
}
void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles,
BranchDelaySlot bd) {
const Runtime::Function* f = Runtime::FunctionForId(fid);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
PrepareCEntryArgs(f->nargs);
PrepareCEntryFunction(ExternalReference(f, isolate()));
CallStubDelayed(new (zone) CEntryStub(nullptr, 1, save_doubles));
}
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles,
BranchDelaySlot bd) {
// All parameters are on the stack. v0 has the return value after call.
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
CHECK(f->nargs < 0 || f->nargs == num_arguments);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
PrepareCEntryArgs(num_arguments);
PrepareCEntryFunction(ExternalReference(f, isolate()));
CEntryStub stub(isolate(), 1, save_doubles);
CallStub(&stub, al, zero_reg, Operand(zero_reg), bd);
}
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
DCHECK_EQ(1, function->result_size);
if (function->nargs >= 0) {
PrepareCEntryArgs(function->nargs);
}
JumpToExternalReference(ExternalReference(fid, isolate()));
}
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
BranchDelaySlot bd,
bool builtin_exit_frame) {
PrepareCEntryFunction(builtin);
CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
builtin_exit_frame);
Jump(stub.GetCode(),
RelocInfo::CODE_TARGET,
al,
zero_reg,
Operand(zero_reg),
bd);
}
void MacroAssembler::JumpToInstructionStream(Address entry) {
li(kOffHeapTrampolineRegister,
Operand(reinterpret_cast<uint64_t>(entry), RelocInfo::OFF_HEAP_TARGET));
Jump(kOffHeapTrampolineRegister);
}
void MacroAssembler::LoadWeakValue(Register out, Register in,
Label* target_if_cleared) {
Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObject));
And(out, in, Operand(~kWeakHeapObjectMask));
}
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
li(scratch2, Operand(ExternalReference(counter)));
Lw(scratch1, MemOperand(scratch2));
Addu(scratch1, scratch1, Operand(value));
Sw(scratch1, MemOperand(scratch2));
}
}
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
li(scratch2, Operand(ExternalReference(counter)));
Lw(scratch1, MemOperand(scratch2));
Subu(scratch1, scratch1, Operand(value));
Sw(scratch1, MemOperand(scratch2));
}
}
// -----------------------------------------------------------------------------
// Debugging.
void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
Operand rt) {
if (emit_debug_code())
Check(cc, reason, rs, rt);
}
void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
Operand rt) {
Label L;
Branch(&L, cc, rs, rt);
Abort(reason);
// Will not return here.
bind(&L);
}
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
#ifdef DEBUG
const char* msg = GetAbortReason(reason);
if (msg != nullptr) {
RecordComment("Abort message: ");
RecordComment(msg);
}
if (FLAG_trap_on_abort) {
stop(msg);
return;
}
#endif
Move(a0, Smi::FromInt(static_cast<int>(reason)));
// Disable stub call restrictions to always allow calls to abort.
if (!has_frame()) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
} else {
Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
}
// Will not return here.
if (is_trampoline_pool_blocked()) {
// If the calling code cares about the exact number of
// instructions generated, we insert padding here to keep the size
// of the Abort macro constant.
// Currently in debug mode with debug_code enabled the number of
// generated instructions is 10, so we use this as a maximum value.
static const int kExpectedAbortInstructions = 10;
int abort_instructions = InstructionsGeneratedSince(&abort_start);
DCHECK_LE(abort_instructions, kExpectedAbortInstructions);
while (abort_instructions++ < kExpectedAbortInstructions) {
nop();
}
}
}
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
Ld(dst, NativeContextMemOperand());
Ld(dst, ContextMemOperand(dst, index));
}
void TurboAssembler::StubPrologue(StackFrame::Type type) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(scratch);
}
void TurboAssembler::Prologue() { PushStandardFrame(a1); }
void TurboAssembler::EnterFrame(StackFrame::Type type) {
int stack_offset, fp_offset;
if (type == StackFrame::INTERNAL) {
stack_offset = -4 * kPointerSize;
fp_offset = 2 * kPointerSize;
} else {
stack_offset = -3 * kPointerSize;
fp_offset = 1 * kPointerSize;
}
daddiu(sp, sp, stack_offset);
stack_offset = -stack_offset - kPointerSize;
Sd(ra, MemOperand(sp, stack_offset));
stack_offset -= kPointerSize;
Sd(fp, MemOperand(sp, stack_offset));
stack_offset -= kPointerSize;
li(t9, Operand(StackFrame::TypeToMarker(type)));
Sd(t9, MemOperand(sp, stack_offset));
if (type == StackFrame::INTERNAL) {
DCHECK_EQ(stack_offset, kPointerSize);
li(t9, Operand(CodeObject()));
Sd(t9, MemOperand(sp, 0));
} else {
DCHECK_EQ(stack_offset, 0);
}
// Adjust FP to point to saved FP.
Daddu(fp, sp, Operand(fp_offset));
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
daddiu(sp, fp, 2 * kPointerSize);
Ld(ra, MemOperand(fp, 1 * kPointerSize));
Ld(fp, MemOperand(fp, 0 * kPointerSize));
}
void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
Register argc) {
Push(ra, fp);
Move(fp, sp);
Push(context, target, argc);
}
void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
Register argc) {
Pop(context, target, argc);
Pop(ra, fp);
}
void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StackFrame::Type frame_type) {
DCHECK(frame_type == StackFrame::EXIT ||
frame_type == StackFrame::BUILTIN_EXIT);
// Set up the frame structure on the stack.
STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
// This is how the stack will look:
// fp + 2 (==kCallerSPDisplacement) - old stack's end
// [fp + 1 (==kCallerPCOffset)] - saved old ra
// [fp + 0 (==kCallerFPOffset)] - saved old fp
// [fp - 1 StackFrame::EXIT Smi
// [fp - 2 (==kSPOffset)] - sp of the called function
// [fp - 3 (==kCodeOffset)] - CodeObject
// fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
// new stack (will contain saved ra)
// Save registers and reserve room for saved entry sp and code object.
daddiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
Sd(ra, MemOperand(sp, 4 * kPointerSize));
Sd(fp, MemOperand(sp, 3 * kPointerSize));
{
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch, Operand(StackFrame::TypeToMarker(frame_type)));
Sd(scratch, MemOperand(sp, 2 * kPointerSize));
}
// Set up new frame pointer.
daddiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
if (emit_debug_code()) {
Sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
// Accessed from ExitFrame::code_slot.
li(t8, Operand(CodeObject()), CONSTANT_SIZE);
Sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
// Save the frame pointer and the context in top.
li(t8,
Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress, isolate())));
Sd(fp, MemOperand(t8));
li(t8,
Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
Sd(cp, MemOperand(t8));
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
if (save_doubles) {
// The stack is already aligned to 0 modulo 8 for stores with sdc1.
int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2;
int space = kNumOfSavedRegisters * kDoubleSize;
Dsubu(sp, sp, Operand(space));
// Remember: we only need to save every 2nd double FPU value.
for (int i = 0; i < kNumOfSavedRegisters; i++) {
FPURegister reg = FPURegister::from_code(2 * i);
Sdc1(reg, MemOperand(sp, i * kDoubleSize));
}
}
// Reserve place for the return address, stack space and an optional slot
// (used by the DirectCEntryStub to hold the return value if a struct is
// returned) and align the frame preparing for calling the runtime function.
DCHECK_GE(stack_space, 0);
Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize));
if (frame_alignment > 0) {
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
And(sp, sp, Operand(-frame_alignment)); // Align stack.
}
// Set the exit frame sp value to point just before the return address
// location.
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
daddiu(scratch, sp, kPointerSize);
Sd(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
bool do_return,
bool argument_count_is_length) {
// Optionally restore all double registers.
if (save_doubles) {
// Remember: we only need to restore every 2nd double FPU value.
int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2;
Dsubu(t8, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp +
kNumOfSavedRegisters * kDoubleSize));
for (int i = 0; i < kNumOfSavedRegisters; i++) {
FPURegister reg = FPURegister::from_code(2 * i);
Ldc1(reg, MemOperand(t8, i * kDoubleSize));
}
}
// Clear top frame.
li(t8,
Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress, isolate())));
Sd(zero_reg, MemOperand(t8));
// Restore current context from top and clear it in debug mode.
li(t8,
Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
Ld(cp, MemOperand(t8));
#ifdef DEBUG
li(t8,
Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
Sd(a3, MemOperand(t8));
#endif
// Pop the arguments, restore registers, and return.
mov(sp, fp); // Respect ABI stack constraint.
Ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
Ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
if (argument_count.is_valid()) {
if (argument_count_is_length) {
daddu(sp, sp, argument_count);
} else {
Dlsa(sp, sp, argument_count, kPointerSizeLog2, t8);
}
}
if (do_return) {
Ret(USE_DELAY_SLOT);
// If returning, the instruction in the delay slot will be the addiu below.
}
daddiu(sp, sp, 2 * kPointerSize);
}
int TurboAssembler::ActivationFrameAlignment() {
#if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
// Running on the real platform. Use the alignment as mandated by the local
// environment.
// Note: This will break if we ever start generating snapshots on one Mips
// platform for another Mips platform with a different alignment.
return base::OS::ActivationFrameAlignment();
#else // V8_HOST_ARCH_MIPS
// If we are using the simulator then we should always align to the expected
// alignment. As the simulator is used to generate snapshots we do not know
// if the target platform will need alignment, so this is controlled from a
// flag.
return FLAG_sim_stack_alignment;
#endif // V8_HOST_ARCH_MIPS
}
void MacroAssembler::AssertStackIsAligned() {
if (emit_debug_code()) {
const int frame_alignment = ActivationFrameAlignment();
const int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
Label alignment_as_expected;
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
{
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
andi(scratch, sp, frame_alignment_mask);
Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
}
// Don't use Check here, as it will call Runtime_Abort re-entering here.
stop("Unexpected stack alignment");
bind(&alignment_as_expected);
}
}
}
void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) {
if (SmiValuesAre32Bits()) {
Lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
} else {
Lw(dst, src);
SmiUntag(dst);
}
}
void MacroAssembler::UntagAndJumpIfSmi(Register dst,
Register src,
Label* smi_case) {
// DCHECK(dst!=src);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
JumpIfSmi(src, smi_case, scratch, USE_DELAY_SLOT);
SmiUntag(dst, src);
}
void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
Register scratch, BranchDelaySlot bd) {
DCHECK_EQ(0, kSmiTag);
andi(scratch, value, kSmiTagMask);
Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
}
void MacroAssembler::JumpIfNotSmi(Register value,
Label* not_smi_label,
Register scratch,
BranchDelaySlot bd) {
DCHECK_EQ(0, kSmiTag);
andi(scratch, value, kSmiTagMask);
Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
}
void MacroAssembler::JumpIfEitherSmi(Register reg1,
Register reg2,
Label* on_either_smi) {
STATIC_ASSERT(kSmiTag == 0);
// TODO(plind): Find some better to fix this assert issue.
#if defined(__APPLE__)
DCHECK_EQ(1, kSmiTagMask);
#else
DCHECK_EQ((int64_t)1, kSmiTagMask);
#endif
// Both Smi tags must be 1 (not Smi).
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
and_(scratch, reg1, reg2);
JumpIfSmi(scratch, on_either_smi);
}
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
andi(scratch, object, kSmiTagMask);
Check(ne, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg));
}
}
void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
andi(scratch, object, kSmiTagMask);
Check(eq, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg));
}
}
void MacroAssembler::AssertFixedArray(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
Check(ne, AbortReason::kOperandIsASmiAndNotAFixedArray, t8,
Operand(zero_reg));
GetObjectType(object, t8, t8);
Check(eq, AbortReason::kOperandIsNotAFixedArray, t8,
Operand(FIXED_ARRAY_TYPE));
}
}
void MacroAssembler::AssertConstructor(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, t8,
Operand(zero_reg));
ld(t8, FieldMemOperand(object, HeapObject::kMapOffset));
Lbu(t8, FieldMemOperand(t8, Map::kBitFieldOffset));
And(t8, t8, Operand(Map::IsConstructorBit::kMask));
Check(ne, AbortReason::kOperandIsNotAConstructor, t8, Operand(zero_reg));
}
}
void MacroAssembler::AssertFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8,
Operand(zero_reg));
GetObjectType(object, t8, t8);
Check(eq, AbortReason::kOperandIsNotAFunction, t8,
Operand(JS_FUNCTION_TYPE));
}
}
void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, t8,
Operand(zero_reg));
GetObjectType(object, t8, t8);
Check(eq, AbortReason::kOperandIsNotABoundFunction, t8,
Operand(JS_BOUND_FUNCTION_TYPE));
}
}
void MacroAssembler::AssertGeneratorObject(Register object) {
if (!emit_debug_code()) return;
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, t8,
Operand(zero_reg));
GetObjectType(object, t8, t8);
Label done;
// Check if JSGeneratorObject
Branch(&done, eq, t8, Operand(JS_GENERATOR_OBJECT_TYPE));
// Check if JSAsyncGeneratorObject
Branch(&done, eq, t8, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
Abort(AbortReason::kOperandIsNotAGeneratorObject);
bind(&done);
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (emit_debug_code()) {
Label done_checking;
AssertNotSmi(object);
LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
Branch(&done_checking, eq, object, Operand(scratch));
Ld(t8, FieldMemOperand(object, HeapObject::kMapOffset));
LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
Assert(eq, AbortReason::kExpectedUndefinedOrCell, t8, Operand(scratch));
bind(&done_checking);
}
}
void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
if (src1 == src2) {
Move_s(dst, src1);
return;
}
// Check if one of operands is NaN.
CompareIsNanF32(src1, src2);
BranchTrueF(out_of_line);
if (kArchVariant >= kMips64r6) {
max_s(dst, src1, src2);
} else {
Label return_left, return_right, done;
CompareF32(OLT, src1, src2);
BranchTrueShortF(&return_right);
CompareF32(OLT, src2, src1);
BranchTrueShortF(&return_left);
// Operands are equal, but check for +/-0.
mfc1(t8, src1);
dsll32(t8, t8, 0);
Branch(&return_left, eq, t8, Operand(zero_reg));
Branch(&return_right);
bind(&return_right);
if (src2 != dst) {
Move_s(dst, src2);
}
Branch(&done);
bind(&return_left);
if (src1 != dst) {
Move_s(dst, src1);
}
bind(&done);
}
}
void TurboAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
FPURegister src2) {
add_s(dst, src1, src2);
}
void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
if (src1 == src2) {
Move_s(dst, src1);
return;
}
// Check if one of operands is NaN.
CompareIsNanF32(src1, src2);
BranchTrueF(out_of_line);
if (kArchVariant >= kMips64r6) {
min_s(dst, src1, src2);
} else {
Label return_left, return_right, done;
CompareF32(OLT, src1, src2);
BranchTrueShortF(&return_left);
CompareF32(OLT, src2, src1);
BranchTrueShortF(&return_right);
// Left equals right => check for -0.
mfc1(t8, src1);
dsll32(t8, t8, 0);
Branch(&return_right, eq, t8, Operand(zero_reg));
Branch(&return_left);
bind(&return_right);
if (src2 != dst) {
Move_s(dst, src2);
}
Branch(&done);
bind(&return_left);
if (src1 != dst) {
Move_s(dst, src1);
}
bind(&done);
}
}
void TurboAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
FPURegister src2) {
add_s(dst, src1, src2);
}
void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
if (src1 == src2) {
Move_d(dst, src1);
return;
}
// Check if one of operands is NaN.
CompareIsNanF64(src1, src2);
BranchTrueF(out_of_line);
if (kArchVariant >= kMips64r6) {
max_d(dst, src1, src2);
} else {
Label return_left, return_right, done;
CompareF64(OLT, src1, src2);
BranchTrueShortF(&return_right);
CompareF64(OLT, src2, src1);
BranchTrueShortF(&return_left);
// Left equals right => check for -0.
dmfc1(t8, src1);
Branch(&return_left, eq, t8, Operand(zero_reg));
Branch(&return_right);
bind(&return_right);
if (src2 != dst) {
Move_d(dst, src2);
}
Branch(&done);
bind(&return_left);
if (src1 != dst) {
Move_d(dst, src1);
}
bind(&done);
}
}
void TurboAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1,
FPURegister src2) {
add_d(dst, src1, src2);
}
void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
if (src1 == src2) {
Move_d(dst, src1);
return;
}
// Check if one of operands is NaN.
CompareIsNanF64(src1, src2);
BranchTrueF(out_of_line);
if (kArchVariant >= kMips64r6) {
min_d(dst, src1, src2);
} else {
Label return_left, return_right, done;
CompareF64(OLT, src1, src2);
BranchTrueShortF(&return_left);
CompareF64(OLT, src2, src1);
BranchTrueShortF(&return_right);
// Left equals right => check for -0.
dmfc1(t8, src1);
Branch(&return_right, eq, t8, Operand(zero_reg));
Branch(&return_left);
bind(&return_right);
if (src2 != dst) {
Move_d(dst, src2);
}
Branch(&done);
bind(&return_left);
if (src1 != dst) {
Move_d(dst, src1);
}
bind(&done);
}
}
void TurboAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1,
FPURegister src2) {
add_d(dst, src1, src2);
}
static const int kRegisterPassedArguments = 8;
int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments) {
int stack_passed_words = 0;
num_reg_arguments += 2 * num_double_arguments;
// O32: Up to four simple arguments are passed in registers a0..a3.
// N64: Up to eight simple arguments are passed in registers a0..a7.
if (num_reg_arguments > kRegisterPassedArguments) {
stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
}
stack_passed_words += kCArgSlotCount;
return stack_passed_words;
}
void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
int frame_alignment = ActivationFrameAlignment();
// n64: Up to eight simple arguments in a0..a3, a4..a7, No argument slots.
// O32: Up to four simple arguments are passed in registers a0..a3.
// Those four arguments must have reserved argument slots on the stack for
// mips, even though those argument slots are not normally used.
// Both ABIs: Remaining arguments are pushed on the stack, above (higher
// address than) the (O32) argument slots. (arg slot calculation handled by
// CalculateStackPassedWords()).
int stack_passed_arguments = CalculateStackPassedWords(
num_reg_arguments, num_double_arguments);
if (frame_alignment > kPointerSize) {
// Make stack end at alignment and make room for num_arguments - 4 words
// and the original value of sp.
mov(scratch, sp);
Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
And(sp, sp, Operand(-frame_alignment));
Sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
Dsubu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
}
}
void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
Register scratch) {
PrepareCallCFunction(num_reg_arguments, 0, scratch);
}
void TurboAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
li(t9, Operand(function));
CallCFunctionHelper(t9, num_reg_arguments, num_double_arguments);
}
void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
int num_double_arguments) {
CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
}
void TurboAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
CallCFunction(function, num_arguments, 0);
}
void TurboAssembler::CallCFunction(Register function, int num_arguments) {
CallCFunction(function, num_arguments, 0);
}
void TurboAssembler::CallCFunctionHelper(Register function,
int num_reg_arguments,
int num_double_arguments) {
DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
DCHECK(has_frame());
// Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which
// provides more information.
// The argument stots are presumed to have been set up by
// PrepareCallCFunction. The C function must be called via t9, for mips ABI.
#if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
if (emit_debug_code()) {
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
Label alignment_as_expected;
{
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
And(scratch, sp, Operand(frame_alignment_mask));
Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
}
// Don't use Check here, as it will call Runtime_Abort possibly
// re-entering here.
stop("Unexpected alignment in CallCFunction");
bind(&alignment_as_expected);
}
}
#endif // V8_HOST_ARCH_MIPS
// Just call directly. The function called cannot cause a GC, or
// allow preemption, so the return address in the link register
// stays correct.
if (function != t9) {
mov(t9, function);
function = t9;
}
Call(function);
int stack_passed_arguments = CalculateStackPassedWords(
num_reg_arguments, num_double_arguments);
if (base::OS::ActivationFrameAlignment() > kPointerSize) {
Ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
Daddu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
}
}
#undef BRANCH_ARGS_CHECK
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met) {
And(scratch, object, Operand(~Page::kPageAlignmentMask));
Ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
And(scratch, scratch, Operand(mask));
Branch(condition_met, cc, scratch, Operand(zero_reg));
}
Register GetRegisterThatIsNotOneOf(Register reg1,
Register reg2,
Register reg3,
Register reg4,
Register reg5,
Register reg6) {
RegList regs = 0;
if (reg1.is_valid()) regs |= reg1.bit();
if (reg2.is_valid()) regs |= reg2.bit();
if (reg3.is_valid()) regs |= reg3.bit();
if (reg4.is_valid()) regs |= reg4.bit();
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
int code = config->GetAllocatableGeneralCode(i);
Register candidate = Register::from_code(code);
if (regs & candidate.bit()) continue;
return candidate;
}
UNREACHABLE();
}
bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
Register reg5, Register reg6, Register reg7, Register reg8,
Register reg9, Register reg10) {
int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
reg10.is_valid();
RegList regs = 0;
if (reg1.is_valid()) regs |= reg1.bit();
if (reg2.is_valid()) regs |= reg2.bit();
if (reg3.is_valid()) regs |= reg3.bit();
if (reg4.is_valid()) regs |= reg4.bit();
if (reg5.is_valid()) regs |= reg5.bit();
if (reg6.is_valid()) regs |= reg6.bit();
if (reg7.is_valid()) regs |= reg7.bit();
if (reg8.is_valid()) regs |= reg8.bit();
if (reg9.is_valid()) regs |= reg9.bit();
if (reg10.is_valid()) regs |= reg10.bit();
int n_of_non_aliasing_regs = NumRegs(regs);
return n_of_valid_regs != n_of_non_aliasing_regs;
}
void TurboAssembler::ComputeCodeStartAddress(Register dst) {
// This push on ra and the pop below together ensure that we restore the
// register ra, which is needed while computing the code start address.
push(ra);
// The bal instruction puts the address of the current instruction into
// the return address (ra) register, which we can use later on.
Label current;
bal(¤t);
nop();
int pc = pc_offset();
bind(¤t);
li(dst, Operand(pc));
Dsubu(dst, ra, dst);
pop(ra); // Restore ra
}
void TurboAssembler::ResetSpeculationPoisonRegister() {
li(kSpeculationPoisonRegister, -1);
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_MIPS64
|
zero-rp/miniblink49
|
v8_6_7/src/mips64/macro-assembler-mips64.cc
|
C++
|
apache-2.0
| 178,453 | 29.992185 | 80 | 0.603593 | false |
/*******************************************************************************
* Copyright (c) 2009, 2020 IBM Corp.
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v2.0
* and Eclipse Distribution License v1.0 which accompany this distribution.
*
* The Eclipse Public License is available at
* https://www.eclipse.org/legal/epl-2.0/
* and the Eclipse Distribution License is available at
* http://www.eclipse.org/org/documents/edl-v10.php.
*
* Contributors:
* Ian Craggs - initial API and implementation and/or initial documentation
* Ian Craggs - updates for the async client
* Ian Craggs - change size types from int to size_t
*******************************************************************************/
#if !defined(LINKEDLIST_H)
#define LINKEDLIST_H
#include <stdlib.h> /* for size_t definition */
/*BE
defm defList(T)
def T concat Item
{
at 4
n32 ptr T concat Item suppress "next"
at 0
n32 ptr T concat Item suppress "prev"
at 8
n32 ptr T id2str(T)
}
def T concat List
{
n32 ptr T concat Item suppress "first"
n32 ptr T concat Item suppress "last"
n32 ptr T concat Item suppress "current"
n32 dec "count"
n32 suppress "size"
}
endm
defList(INT)
defList(STRING)
defList(TMP)
BE*/
/**
* Structure to hold all data for one list element
*/
typedef struct ListElementStruct
{
struct ListElementStruct *prev, /**< pointer to previous list element */
*next; /**< pointer to next list element */
void* content; /**< pointer to element content */
} ListElement;
/**
* Structure to hold all data for one list
*/
typedef struct
{
ListElement *first, /**< first element in the list */
*last, /**< last element in the list */
*current; /**< current element in the list, for iteration */
int count; /**< no of items */
size_t size; /**< heap storage used */
} List;
void ListZero(List*);
List* ListInitialize(void);
ListElement* ListAppend(List* aList, void* content, size_t size);
void ListAppendNoMalloc(List* aList, void* content, ListElement* newel, size_t size);
ListElement* ListInsert(List* aList, void* content, size_t size, ListElement* index);
int ListRemove(List* aList, void* content);
int ListRemoveItem(List* aList, void* content, int(*callback)(void*, void*));
void* ListDetachHead(List* aList);
int ListRemoveHead(List* aList);
void* ListPopTail(List* aList);
int ListDetach(List* aList, void* content);
int ListDetachItem(List* aList, void* content, int(*callback)(void*, void*));
void ListFree(List* aList);
void ListEmpty(List* aList);
void ListFreeNoContent(List* aList);
ListElement* ListNextElement(List* aList, ListElement** pos);
ListElement* ListPrevElement(List* aList, ListElement** pos);
ListElement* ListFind(List* aList, void* content);
ListElement* ListFindItem(List* aList, void* content, int(*callback)(void*, void*));
int intcompare(void* a, void* b);
int stringcompare(void* a, void* b);
#endif
|
macchina-io/macchina.io
|
protocols/MQTT/Paho/src/LinkedList.h
|
C
|
apache-2.0
| 3,007 | 27.638095 | 85 | 0.675091 | false |
/*
* Copyright 2012-2013 inBloom, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.Collections.Generic;
using System.Linq;
using System.Web;
using System.Runtime.Serialization;
namespace InBloomClient.Enum
{
/// <summary>
/// A school that has been designed: 1) to attract students of different racial/ethnic backgrounds for the purpose of reducing, preventing,
/// or eliminating racial isolation; and/or 2)to provide an academic or social focus on a particular theme
/// (e.g., science/math, performing arts, gifted/talented, or foreign language).
/// </summary>
[DataContract]
public enum MagnetSpecialProgramEmphasisSchoolType
{
[EnumMember(Value = "All students participate")]
AllStudentsParticipate,
[EnumMember(Value = "No students participate")]
NoStudentsParticipate,
[EnumMember(Value = "Some, but not all, students participate")]
SomeButNotAllStudentsParticipate
}
}
|
inbloom/cookbook
|
hello-world-c/InBloomClient/InBloomClient/Enum/MagnetSpecialProgramEmphasisSchoolType.cs
|
C#
|
apache-2.0
| 1,531 | 37.25 | 144 | 0.724003 | false |
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=US-ASCII">
<title>Struct open_or_create_t</title>
<link rel="stylesheet" href="../../../../doc/src/boostbook.css" type="text/css">
<meta name="generator" content="DocBook XSL Stylesheets V1.78.1">
<link rel="home" href="../../index.html" title="The Boost C++ Libraries BoostBook Documentation Subset">
<link rel="up" href="../../interprocess/indexes_reference.html#header.boost.interprocess.creation_tags_hpp" title="Header <boost/interprocess/creation_tags.hpp>">
<link rel="prev" href="open_copy_on_write_t.html" title="Struct open_copy_on_write_t">
<link rel="next" href="create_only.html" title="Global create_only">
</head>
<body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF">
<table cellpadding="2" width="100%"><tr>
<td valign="top"><img alt="Boost C++ Libraries" width="277" height="86" src="../../../../boost.png"></td>
<td align="center"><a href="../../../../index.html">Home</a></td>
<td align="center"><a href="../../../../libs/libraries.htm">Libraries</a></td>
<td align="center"><a href="http://www.boost.org/users/people.html">People</a></td>
<td align="center"><a href="http://www.boost.org/users/faq.html">FAQ</a></td>
<td align="center"><a href="../../../../more/index.htm">More</a></td>
</tr></table>
<hr>
<div class="spirit-nav">
<a accesskey="p" href="open_copy_on_write_t.html"><img src="../../../../doc/src/images/prev.png" alt="Prev"></a><a accesskey="u" href="../../interprocess/indexes_reference.html#header.boost.interprocess.creation_tags_hpp"><img src="../../../../doc/src/images/up.png" alt="Up"></a><a accesskey="h" href="../../index.html"><img src="../../../../doc/src/images/home.png" alt="Home"></a><a accesskey="n" href="create_only.html"><img src="../../../../doc/src/images/next.png" alt="Next"></a>
</div>
<div class="refentry">
<a name="boost.interprocess.open_or_create_t"></a><div class="titlepage"></div>
<div class="refnamediv">
<h2><span class="refentrytitle">Struct open_or_create_t</span></h2>
<p>boost::interprocess::open_or_create_t</p>
</div>
<h2 xmlns:rev="http://www.cs.rpi.edu/~gregod/boost/tools/doc/revision" class="refsynopsisdiv-title">Synopsis</h2>
<div xmlns:rev="http://www.cs.rpi.edu/~gregod/boost/tools/doc/revision" class="refsynopsisdiv"><pre class="synopsis"><span class="comment">// In header: <<a class="link" href="../../interprocess/indexes_reference.html#header.boost.interprocess.creation_tags_hpp" title="Header <boost/interprocess/creation_tags.hpp>">boost/interprocess/creation_tags.hpp</a>>
</span>
<span class="keyword">struct</span> <a class="link" href="open_or_create_t.html" title="Struct open_or_create_t">open_or_create_t</a> <span class="special">{</span>
<span class="special">}</span><span class="special">;</span></pre></div>
<div class="refsect1">
<a name="idp112096200"></a><h2>Description</h2>
<p>Tag to indicate that the resource must be created. If already created, it must be opened. </p>
</div>
</div>
<table xmlns:rev="http://www.cs.rpi.edu/~gregod/boost/tools/doc/revision" width="100%"><tr>
<td align="left"></td>
<td align="right"><div class="copyright-footer">Copyright © 2005-2012 Ion Gaztanaga<p>
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at <a href="http://www.boost.org/LICENSE_1_0.txt" target="_top">http://www.boost.org/LICENSE_1_0.txt</a>)
</p>
</div></td>
</tr></table>
<hr>
<div class="spirit-nav">
<a accesskey="p" href="open_copy_on_write_t.html"><img src="../../../../doc/src/images/prev.png" alt="Prev"></a><a accesskey="u" href="../../interprocess/indexes_reference.html#header.boost.interprocess.creation_tags_hpp"><img src="../../../../doc/src/images/up.png" alt="Up"></a><a accesskey="h" href="../../index.html"><img src="../../../../doc/src/images/home.png" alt="Home"></a><a accesskey="n" href="create_only.html"><img src="../../../../doc/src/images/next.png" alt="Next"></a>
</div>
</body>
</html>
|
NixaSoftware/CVis
|
venv/bin/doc/html/boost/interprocess/open_or_create_t.html
|
HTML
|
apache-2.0
| 4,029 | 72.254545 | 486 | 0.667659 | false |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package play.db;
import java.sql.Connection;
import java.sql.SQLException;
/**
* Similar to java.util.concurrent.Callable with a Connection as argument. Provides a functional
* interface for use with Java 8+. If no result needs to be returned, ConnectionRunnable can be used
* instead.
*
* <p>Vanilla Java: <code>
* new ConnectionCallable<A>() {
* public A call(Connection c) { return ...; }
* }
* </code> Java Lambda: <code>(Connection c) -> ...</code>
*/
public interface ConnectionCallable<A> {
A call(Connection connection) throws SQLException;
}
|
playframework/playframework
|
persistence/play-jdbc-api/src/main/java/play/db/ConnectionCallable.java
|
Java
|
apache-2.0
| 644 | 27 | 100 | 0.697205 | false |
// $Id$
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
using System;
using Org.Apache.Etch.Bindings.Csharp.Msg;
using Org.Apache.Etch.Bindings.Csharp.Support;
using Org.Apache.Etch.Bindings.Csharp.Transport;
using Org.Apache.Etch.Bindings.Csharp.Util;
using NUnit.Framework;
using org.apache.etch.tests;
using org.apache.etch.tests.types.Test1;
namespace etch.tests
{
[TestFixture]
public class TestStubTest1DotCsharp
{
[TestFixtureSetUp]
public void First()
{
Console.WriteLine();
Console.Write( "TestStub" );
}
[SetUp]
public void Setup()
{
test = new FakeTest1();
vf = new ValueFactoryTest1("none:");
pool = new FreePool();
src = new MyMessageSource( this );
stub = new StubTest1( src, test, pool, pool );
xreplyMon.Set( null );
}
Monitor<Message> xreplyMon = new Monitor<Message>( "Monitor for xreply" );
[Test]
public void method_nothing()
{
Message msg = new Message( ValueFactoryTest1._mt_org_apache_etch_tests_Test1_nothing, vf );
// msg.Add( ValueFactoryTest1._mf_x, 3 );
stub.SessionMessage( null, msg );
// check the result.
xreplyMon.WaitUntilNotEq( null );
src.xreply.CheckType( ValueFactoryTest1._mt_org_apache_etch_tests_Test1__result_nothing );
Assert.AreEqual( 0, src.xreply.Count );
}
[Test]
public void method_incr1()
{
Message msg = new Message( ValueFactoryTest1._mt_org_apache_etch_tests_Test1_incr, vf );
msg.Add( ValueFactoryTest1._mf_x, 3 );
stub.SessionMessage( null, msg );
// check the result.
xreplyMon.WaitUntilNotEq( null );
src.xreply.CheckType( ValueFactoryTest1._mt_org_apache_etch_tests_Test1__result_incr );
Assert.AreEqual(1, src.xreply.Count);
Assert.AreEqual( 4, src.xreply.Get( ValueFactoryTest1._mf_result ) );
}
[Test]
public void method_sub()
{
Message msg = new Message( ValueFactoryTest1._mt_org_apache_etch_tests_Test1_sub, vf );
msg.Add( ValueFactoryTest1._mf_x, 7 );
msg.Add( ValueFactoryTest1._mf_y, 3 );
stub.SessionMessage( null, msg );
// check the result.
src.xreply.CheckType( ValueFactoryTest1._mt_org_apache_etch_tests_Test1__result_sub );
Assert.AreEqual(1, src.xreply.Count);
Assert.AreEqual( 4, src.xreply.Get( ValueFactoryTest1._mf_result ) );
}
[Test]
public void method_sum()
{
Message msg = new Message( ValueFactoryTest1._mt_org_apache_etch_tests_Test1_sum, vf );
msg.Add( ValueFactoryTest1._mf_x, new int[] { 1, 2, 3, 7, 11 } );
stub.SessionMessage( null, msg );
// check the result.
src.xreply.CheckType( ValueFactoryTest1._mt_org_apache_etch_tests_Test1__result_sum );
Assert.AreEqual(1, src.xreply.Count);
Assert.AreEqual( 24, src.xreply.Get( ValueFactoryTest1._mf_result ) );
}
[Test]
public void method_trans1()
{
Message msg = new Message( ValueFactoryTest1._mt_org_apache_etch_tests_Test1_trans, vf );
msg.Add( ValueFactoryTest1._mf_e, E1.A );
msg.Add( ValueFactoryTest1._mf_x, 5 );
stub.SessionMessage( null, msg );
// check the result.
src.xreply.CheckType( ValueFactoryTest1._mt_org_apache_etch_tests_Test1__result_trans );
Assert.AreEqual(1, src.xreply.Count);
Assert.AreEqual( 2, src.xreply.Get( ValueFactoryTest1._mf_result ) );
}
[Test]
public void method_trans2()
{
Message msg = new Message( ValueFactoryTest1._mt_org_apache_etch_tests_Test1_trans, vf );
msg.Add( ValueFactoryTest1._mf_e, E1.B );
msg.Add( ValueFactoryTest1._mf_x, 5 );
stub.SessionMessage( null, msg );
// check the result.
src.xreply.CheckType( ValueFactoryTest1._mt_org_apache_etch_tests_Test1__result_trans );
Assert.AreEqual(1, src.xreply.Count);
Assert.AreEqual( 10, src.xreply.Get( ValueFactoryTest1._mf_result ) );
}
[Test]
public void method_trans3()
{
Message msg = new Message( ValueFactoryTest1._mt_org_apache_etch_tests_Test1_trans, vf );
msg.Add( ValueFactoryTest1._mf_e, E1.C );
msg.Add( ValueFactoryTest1._mf_x, 5 );
stub.SessionMessage( null, msg );
// check the result.
src.xreply.CheckType( ValueFactoryTest1._mt_org_apache_etch_tests_Test1__result_trans );
Assert.AreEqual(1, src.xreply.Count);
Assert.AreEqual( 12, src.xreply.Get( ValueFactoryTest1._mf_result ) );
}
[Test]
public void method_dist1()
{
Message msg = new Message( ValueFactoryTest1._mt_org_apache_etch_tests_Test1_dist, vf );
msg.Add( ValueFactoryTest1._mf_a, new S1( 1, 1, 1 ) );
msg.Add( ValueFactoryTest1._mf_b, new S1( 0, 0, 0 ) );
stub.SessionMessage( null, msg );
// check the result.
src.xreply.CheckType( ValueFactoryTest1._mt_org_apache_etch_tests_Test1__result_dist );
Assert.AreEqual( 1, src.xreply.Count );
Assert.AreEqual( Math.Sqrt( 3 ), src.xreply.Get( ValueFactoryTest1._mf_result ) );
}
[Test]
public void method_dist2()
{
Message msg = new Message( ValueFactoryTest1._mt_org_apache_etch_tests_Test1_dist, vf );
msg.Add( ValueFactoryTest1._mf_a, new S1( 1, 2, 3 ) );
msg.Add( ValueFactoryTest1._mf_b, new S1( 6, 5, 4 ) );
stub.SessionMessage( null, msg );
// check the result.
src.xreply.CheckType( ValueFactoryTest1._mt_org_apache_etch_tests_Test1__result_dist );
Assert.AreEqual( 1, src.xreply.Count );
Assert.AreEqual( Math.Sqrt( 35 ), src.xreply.Get( ValueFactoryTest1._mf_result ) );
}
[Test]
public void method_fill()
{
Message msg = new Message( ValueFactoryTest1._mt_org_apache_etch_tests_Test1_fill, vf );
msg.Add( ValueFactoryTest1._mf_n, 4 );
msg.Add( ValueFactoryTest1._mf_x, 3 );
stub.SessionMessage( null, msg );
// check the result.
src.xreply.CheckType( ValueFactoryTest1._mt_org_apache_etch_tests_Test1__result_fill );
Assert.AreEqual(1, src.xreply.Count);
int[] x = (int[]) src.xreply.Get( ValueFactoryTest1._mf_result );
Assert.IsNotNull( x );
Assert.AreEqual( 4, x.Length );
foreach (int y in x)
Assert.AreEqual( 3, y );
}
[Test]
public void method_blow()
{
Message msg = new Message( ValueFactoryTest1._mt_org_apache_etch_tests_Test1_blow, vf );
msg.Add( ValueFactoryTest1._mf_msg, "foo" );
msg.Add( ValueFactoryTest1._mf_code, 23 );
stub.SessionMessage( null, msg );
// check the result.
src.xreply.CheckType( ValueFactoryTest1._mt_org_apache_etch_tests_Test1__result_blow );
Assert.AreEqual(1, src.xreply.Count);
Excp1 e = (Excp1) src.xreply.Get( ValueFactoryTest1._mf_result );
Assert.IsNotNull( e );
Assert.AreEqual( "foo", e.msg );
Assert.AreEqual( 23, e.code );
}
[Test]
public void method_beets1()
{
Message msg = new Message( ValueFactoryTest1._mt_org_apache_etch_tests_Test1_beets, vf );
msg.Add( ValueFactoryTest1._mf_e, E1.A );
stub.SessionMessage( null, msg );
// check the result.
src.xreply.CheckType( ValueFactoryTest1._mt_org_apache_etch_tests_Test1__result_beets );
Assert.AreEqual( 1, src.xreply.Count );
Object o = src.xreply.Get( ValueFactoryTest1._mf_result );
Assert.AreEqual( 5, o );
}
[Test]
public void method_beets2()
{
Message msg = new Message( ValueFactoryTest1._mt_org_apache_etch_tests_Test1_beets, vf );
msg.Add( ValueFactoryTest1._mf_e, E1.B );
stub.SessionMessage( null, msg );
// check the result.
src.xreply.CheckType( ValueFactoryTest1._mt_org_apache_etch_tests_Test1__result_beets );
Assert.AreEqual( 1, src.xreply.Count );
Object o = src.xreply.Get( ValueFactoryTest1._mf_result );
Assert.AreEqual( typeof(Excp3), o.GetType() );
}
[Test]
public void method_beets3()
{
Message msg = new Message( ValueFactoryTest1._mt_org_apache_etch_tests_Test1_beets, vf );
msg.Add( ValueFactoryTest1._mf_e, E1.C );
stub.SessionMessage( null, msg );
// check the result.
src.xreply.CheckType( ValueFactoryTest1._mt_org_apache_etch_tests_Test1__result_beets );
Assert.AreEqual( 1, src.xreply.Count );
Object o = src.xreply.Get( ValueFactoryTest1._mf_result );
Assert.AreEqual( typeof(Excp4), o.GetType() );
}
//[Test]
//public void method_beets4()
//{
// Message msg = new Message( ValueFactoryTest1._mt_org_apache_etch_tests_Test1_beets, vf );
// msg.Add( ValueFactoryTest1._mf_e, null );
// stub.Message( null, msg );
// check the result.
// src.xreply.CheckType( ValueFactoryTest1._mt_org_apache_etch_tests_Test1__result_beets );
// Assert.AreEqual( 0, src.xreply.Count );
// Object o = src.xreply.Get( ValueFactoryTest1._mf_result );
// Assert.IsNull( o );
//}
[Test]
public void method_isTrue()
{
Message msg = new Message( ValueFactoryTest1._mt_org_apache_etch_tests_Test1_isTrue, vf );
stub.SessionMessage( null, msg );
// check the result.
src.xreply.CheckType( ValueFactoryTest1._mt_org_apache_etch_tests_Test1__result_isTrue );
Assert.AreEqual(1, src.xreply.Count);
Object o = src.xreply.Get( ValueFactoryTest1._mf_result );
Assert.AreEqual( true, o );
}
[Test]
public void method_isFalse()
{
Message msg = new Message( ValueFactoryTest1._mt_org_apache_etch_tests_Test1_isFalse, vf );
stub.SessionMessage( null, msg );
// check the result.
src.xreply.CheckType( ValueFactoryTest1._mt_org_apache_etch_tests_Test1__result_isFalse );
Assert.AreEqual(1, src.xreply.Count);
Object o = src.xreply.Get( ValueFactoryTest1._mf_result );
Assert.AreEqual( false, o );
}
[Test]
public void method_alwaysWorks()
{
Message msg = new Message( ValueFactoryTest1._mt_org_apache_etch_tests_Test1_alwaysWorks, vf );
stub.SessionMessage( null, msg );
// check the result.
src.xreply.CheckType(ValueFactoryTest1._mt_org_apache_etch_tests_Test1__result_alwaysWorks);
Assert.AreEqual(1, src.xreply.Count);
Object o = src.xreply.Get(ValueFactoryTest1._mf_result);
Assert.AreEqual( 23, o );
}
[Test]
public void method_alwaysFails()
{
Message msg = new Message( ValueFactoryTest1._mt_org_apache_etch_tests_Test1_alwaysFails, vf );
stub.SessionMessage( null, msg );
// check the result.
src.xreply.CheckType(ValueFactoryTest1._mt_org_apache_etch_tests_Test1__result_alwaysFails);
Assert.AreEqual(1, src.xreply.Count);
Object o = src.xreply.Get(ValueFactoryTest1._mf_result);
Assert.IsTrue( o is _Etch_AuthException );
_Etch_AuthException e = (_Etch_AuthException) o;
Assert.AreEqual( "alwaysFails", e.msg );
}
private FakeTest1 test;
private ValueFactoryTest1 vf;
private Pool pool;
private StubTest1 stub;
private MyMessageSource src;
public class MyMessageSource : DeliveryService
{
private TestStubTest1DotCsharp _baseObj;
private SessionMessage session;
public MyMessageSource( TestStubTest1DotCsharp baseObj )
{
_baseObj = baseObj;
}
#region MessageSource Members
public Message xreply;
public void Messagex( Who recipient, Message msg )
{
Assert.IsNull( recipient );
Assert.IsNull( xreply );
xreply = msg;
_baseObj.xreplyMon.Set(new Message(new XType("dummy"), new ValueFactoryTest1("none:")));
}
#endregion
#region DeliveryService Members
public Mailbox BeginCall(Message msg)
{
throw new Exception("The method or operation is not implemented.");
}
public object EndCall(Mailbox mb, XType responseType)
{
throw new Exception("The method or operation is not implemented.");
}
#endregion
#region TransportMessage Members
public void TransportMessage(Who recipient, Message msg)
{
Assert.IsNull(recipient);
Assert.IsNull(xreply);
xreply = msg;
_baseObj.xreplyMon.Set(new Message(new XType("dummy"), new ValueFactoryTest1("none:")));
}
#endregion
#region Transport<SessionMessage> Members
public object TransportQuery(object query)
{
throw new Exception("The method or operation is not implemented.");
}
public void TransportControl(object control, object value)
{
throw new Exception("The method or operation is not implemented.");
}
public void TransportNotify(object eventObj)
{
throw new Exception("The method or operation is not implemented.");
}
public void SetSession(SessionMessage session)
{
this.session = session;
}
#endregion
#region SessionMessage Members
public bool SessionMessage(Who sender, Message msg)
{
throw new Exception("The method or operation is not implemented.");
}
#endregion
#region Session Members
public object SessionQuery(object query)
{
throw new Exception("The method or operation is not implemented.");
}
public void SessionControl(object control, object value)
{
throw new Exception("The method or operation is not implemented.");
}
public void SessionNotify(object eventObj)
{
throw new Exception("The method or operation is not implemented.");
}
#endregion
#region Transport<SessionMessage> Members
public SessionMessage GetSession()
{
throw new Exception("The method or operation is not implemented.");
}
#endregion
}
}
}
|
OBIGOGIT/etch
|
tests/src/test/csharp/etch.tests/TestStubTest1DotCsharp.cs
|
C#
|
apache-2.0
| 15,525 | 35.443662 | 104 | 0.61095 | false |
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>CTEST_SVN_COMMAND — CMake 3.7.1 Documentation</title>
<link rel="stylesheet" href="../_static/cmake.css" type="text/css" />
<link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '../',
VERSION: '3.7.1',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
};
</script>
<script type="text/javascript" src="../_static/jquery.js"></script>
<script type="text/javascript" src="../_static/underscore.js"></script>
<script type="text/javascript" src="../_static/doctools.js"></script>
<link rel="shortcut icon" href="../_static/cmake-favicon.ico"/>
<link rel="top" title="CMake 3.7.1 Documentation" href="../index.html" />
<link rel="up" title="cmake-variables(7)" href="../manual/cmake-variables.7.html" />
<link rel="next" title="CTEST_SVN_OPTIONS" href="CTEST_SVN_OPTIONS.html" />
<link rel="prev" title="CTEST_SOURCE_DIRECTORY" href="CTEST_SOURCE_DIRECTORY.html" />
</head>
<body role="document">
<div class="related" role="navigation" aria-label="related navigation">
<h3>Navigation</h3>
<ul>
<li class="right" style="margin-right: 10px">
<a href="../genindex.html" title="General Index"
accesskey="I">index</a></li>
<li class="right" >
<a href="CTEST_SVN_OPTIONS.html" title="CTEST_SVN_OPTIONS"
accesskey="N">next</a> |</li>
<li class="right" >
<a href="CTEST_SOURCE_DIRECTORY.html" title="CTEST_SOURCE_DIRECTORY"
accesskey="P">previous</a> |</li>
<li>
<img src="../_static/cmake-logo-16.png" alt=""
style="vertical-align: middle; margin-top: -2px" />
</li>
<li>
<a href="https://cmake.org/">CMake</a> »
</li>
<li>
<a href="../index.html">3.7.1 Documentation</a> »
</li>
<li class="nav-item nav-item-1"><a href="../manual/cmake-variables.7.html" accesskey="U">cmake-variables(7)</a> »</li>
</ul>
</div>
<div class="document">
<div class="documentwrapper">
<div class="bodywrapper">
<div class="body" role="main">
<div class="section" id="ctest-svn-command">
<span id="variable:CTEST_SVN_COMMAND"></span><h1>CTEST_SVN_COMMAND<a class="headerlink" href="#ctest-svn-command" title="Permalink to this headline">¶</a></h1>
<p>Specify the CTest <code class="docutils literal"><span class="pre">SVNCommand</span></code> setting
in a <span class="target" id="index-0-manual:ctest(1)"></span><a class="reference internal" href="../manual/ctest.1.html#manual:ctest(1)" title="ctest(1)"><code class="xref cmake cmake-manual docutils literal"><span class="pre">ctest(1)</span></code></a> dashboard client script.</p>
</div>
</div>
</div>
</div>
<div class="sphinxsidebar" role="navigation" aria-label="main navigation">
<div class="sphinxsidebarwrapper">
<h4>Previous topic</h4>
<p class="topless"><a href="CTEST_SOURCE_DIRECTORY.html"
title="previous chapter">CTEST_SOURCE_DIRECTORY</a></p>
<h4>Next topic</h4>
<p class="topless"><a href="CTEST_SVN_OPTIONS.html"
title="next chapter">CTEST_SVN_OPTIONS</a></p>
<div role="note" aria-label="source link">
<h3>This Page</h3>
<ul class="this-page-menu">
<li><a href="../_sources/variable/CTEST_SVN_COMMAND.txt"
rel="nofollow">Show Source</a></li>
</ul>
</div>
<div id="searchbox" style="display: none" role="search">
<h3>Quick search</h3>
<form class="search" action="../search.html" method="get">
<input type="text" name="q" />
<input type="submit" value="Go" />
<input type="hidden" name="check_keywords" value="yes" />
<input type="hidden" name="area" value="default" />
</form>
<p class="searchtip" style="font-size: 90%">
Enter search terms or a module, class or function name.
</p>
</div>
<script type="text/javascript">$('#searchbox').show(0);</script>
</div>
</div>
<div class="clearer"></div>
</div>
<div class="related" role="navigation" aria-label="related navigation">
<h3>Navigation</h3>
<ul>
<li class="right" style="margin-right: 10px">
<a href="../genindex.html" title="General Index"
>index</a></li>
<li class="right" >
<a href="CTEST_SVN_OPTIONS.html" title="CTEST_SVN_OPTIONS"
>next</a> |</li>
<li class="right" >
<a href="CTEST_SOURCE_DIRECTORY.html" title="CTEST_SOURCE_DIRECTORY"
>previous</a> |</li>
<li>
<img src="../_static/cmake-logo-16.png" alt=""
style="vertical-align: middle; margin-top: -2px" />
</li>
<li>
<a href="https://cmake.org/">CMake</a> »
</li>
<li>
<a href="../index.html">3.7.1 Documentation</a> »
</li>
<li class="nav-item nav-item-1"><a href="../manual/cmake-variables.7.html" >cmake-variables(7)</a> »</li>
</ul>
</div>
<div class="footer" role="contentinfo">
© Copyright 2000-2016 Kitware, Inc. and Contributors.
Created using <a href="http://sphinx-doc.org/">Sphinx</a> 1.4a0+.
</div>
</body>
</html>
|
jomof/cmake-server-java-bindings
|
prebuilts/cmake-3.7.1-Windows-x86_64/doc/cmake/html/variable/CTEST_SVN_COMMAND.html
|
HTML
|
apache-2.0
| 5,620 | 39.431655 | 283 | 0.592988 | false |
package com.planet_ink.coffee_mud.Abilities.Common;
import com.planet_ink.coffee_mud.core.interfaces.*;
import com.planet_ink.coffee_mud.core.*;
import com.planet_ink.coffee_mud.core.collections.*;
import com.planet_ink.coffee_mud.Abilities.Common.CraftingSkill.CraftParms;
import com.planet_ink.coffee_mud.Abilities.Common.CraftingSkill.CraftingActivity;
import com.planet_ink.coffee_mud.Abilities.interfaces.*;
import com.planet_ink.coffee_mud.Areas.interfaces.*;
import com.planet_ink.coffee_mud.Behaviors.interfaces.*;
import com.planet_ink.coffee_mud.CharClasses.interfaces.*;
import com.planet_ink.coffee_mud.Commands.interfaces.*;
import com.planet_ink.coffee_mud.Common.interfaces.*;
import com.planet_ink.coffee_mud.Exits.interfaces.*;
import com.planet_ink.coffee_mud.Items.interfaces.*;
import com.planet_ink.coffee_mud.Libraries.interfaces.ListingLibrary;
import com.planet_ink.coffee_mud.Locales.interfaces.*;
import com.planet_ink.coffee_mud.MOBS.interfaces.*;
import com.planet_ink.coffee_mud.Races.interfaces.*;
import java.util.*;
/*
Copyright 2005-2015 Bo Zimmerman
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
@SuppressWarnings({"unchecked","rawtypes"})
public class Costuming extends EnhancedCraftingSkill implements ItemCraftor, MendingSkill
{
@Override public String ID() { return "Costuming"; }
private final static String localizedName = CMLib.lang().L("Costuming");
@Override public String name() { return localizedName; }
private static final String[] triggerStrings =I(new String[] {"COSTUME","COSTUMING"});
@Override public String[] triggerStrings(){return triggerStrings;}
@Override public String supportedResourceString(){return "CLOTH";}
@Override
public String parametersFormat(){ return
"ITEM_NAME\tITEM_LEVEL\tBUILD_TIME_TICKS\tMATERIALS_REQUIRED\tITEM_BASE_VALUE\t"
+"ITEM_CLASS_ID\tWEAPON_CLASS||CODED_WEAR_LOCATION||RIDE_BASIS\t"
+"CONTAINER_CAPACITY||WEAPON_HANDS_REQUIRED\tBASE_ARMOR_AMOUNT||BASE_DAMAGE\t"
+"CONTAINER_TYPE\tCODED_SPELL_LIST";}
//protected static final int RCP_FINALNAME=0;
//protected static final int RCP_LEVEL=1;
//protected static final int RCP_TICKS=2;
protected static final int RCP_WOOD=3;
protected static final int RCP_VALUE=4;
protected static final int RCP_CLASSTYPE=5;
protected static final int RCP_MISCTYPE=6;
protected static final int RCP_CAPACITY=7;
protected static final int RCP_ARMORDMG=8;
protected static final int RCP_CONTAINMASK=9;
protected static final int RCP_SPELL=10;
@Override
public boolean tick(Tickable ticking, int tickID)
{
if((affected!=null)&&(affected instanceof MOB)&&(tickID==Tickable.TICKID_MOB))
{
if(buildingI==null)
unInvoke();
}
return super.tick(ticking,tickID);
}
@Override public String parametersFile(){ return "costume.txt";}
@Override protected List<List<String>> loadRecipes(){return super.loadRecipes(parametersFile());}
@Override
public double getItemWeightMultiplier(boolean bundling)
{
return bundling ? 1.0 : 0.5;
}
@Override
public void unInvoke()
{
if(canBeUninvoked())
{
if(affected instanceof MOB)
{
final MOB mob=(MOB)affected;
if((buildingI!=null)&&(!aborted))
{
if(messedUp)
{
if(activity == CraftingActivity.MENDING)
messedUpCrafting(mob);
else
if(activity == CraftingActivity.LEARNING)
{
commonEmote(mob,L("<S-NAME> fail(s) to learn how to make @x1.",buildingI.name()));
buildingI.destroy();
}
else
if(activity == CraftingActivity.REFITTING)
commonEmote(mob,L("<S-NAME> mess(es) up refitting @x1.",buildingI.name()));
else
commonEmote(mob,L("<S-NAME> mess(es) up knitting @x1.",buildingI.name()));
}
else
{
if(activity == CraftingActivity.MENDING)
buildingI.setUsesRemaining(100);
else
if(activity==CraftingActivity.LEARNING)
{
deconstructRecipeInto( buildingI, recipeHolder );
buildingI.destroy();
}
else
if(activity == CraftingActivity.REFITTING)
{
buildingI.basePhyStats().setHeight(0);
buildingI.recoverPhyStats();
}
else
dropAWinner(mob,buildingI);
}
}
buildingI=null;
activity = CraftingActivity.CRAFTING;
}
}
super.unInvoke();
}
protected boolean masterCraftCheck(final Item I)
{
if(I.basePhyStats().level()>31)
return false;
if(I.name().toUpperCase().startsWith("DESIGNER")||(I.name().toUpperCase().indexOf(" DESIGNER ")>0))
return false;
return true;
}
@Override
public boolean mayICraft(final Item I)
{
if(I==null)
return false;
if(!super.mayBeCrafted(I))
return false;
if((I.material()&RawMaterial.MATERIAL_MASK)!=RawMaterial.MATERIAL_CLOTH)
return false;
if(CMLib.flags().isDeadlyOrMaliciousEffect(I))
return false;
if(isANativeItem(I.Name()) && (I instanceof Armor))
return true;
if(I.baseGoldValue()<I.basePhyStats().level())
return false;
if(I instanceof Armor)
{
if(!masterCraftCheck(I))
return false;
if(I.baseGoldValue() < I.phyStats().level())
return false;
return true;
}
if(I instanceof Weapon)
{
if(I.basePhyStats().damage()!=0)
return false;
if(I.basePhyStats().attackAdjustment()!=0)
return false;
if(!masterCraftCheck(I))
return false;
return true;
}
return (isANativeItem(I.Name()));
}
@Override public boolean supportsMending(Physical item){ return canMend(null,item,true);}
@Override
protected boolean canMend(MOB mob, Environmental E, boolean quiet)
{
if(!super.canMend(mob,E,quiet))
return false;
if((!(E instanceof Item))||(!mayICraft((Item)E)))
{
if(!quiet)
commonTell(mob,L("That's not a @x1 item.",CMLib.english().startWithAorAn(Name().toLowerCase())));
return false;
}
return true;
}
@Override
public String getDecodedComponentsDescription(final MOB mob, final List<String> recipe)
{
return super.getComponentDescription( mob, recipe, RCP_WOOD );
}
@Override
public boolean invoke(MOB mob, Vector commands, Physical givenTarget, boolean auto, int asLevel)
{
if(super.checkStop(mob, commands))
return true;
final CraftParms parsedVars=super.parseAutoGenerate(auto,givenTarget,commands);
givenTarget=parsedVars.givenTarget;
final PairVector<Integer,Integer> enhancedTypes=enhancedTypes(mob,commands);
randomRecipeFix(mob,addRecipes(mob,loadRecipes()),commands,parsedVars.autoGenerate);
if(commands.size()==0)
{
commonTell(mob,L("Costume what? Enter \"costume list\" for a list, \"costume refit <item>\" to resize, \"costume learn <item>\", \"costume scan\", \"costume mend <item>\", or \"costume stop\" to cancel."));
return false;
}
if((!auto)
&&(commands.size()>0)
&&(((String)commands.firstElement()).equalsIgnoreCase("bundle")))
{
bundling=true;
if(super.invoke(mob,commands,givenTarget,auto,asLevel))
return super.bundle(mob,commands);
return false;
}
final List<List<String>> recipes=addRecipes(mob,loadRecipes());
final String str=(String)commands.elementAt(0);
String startStr=null;
bundling=false;
int duration=4;
final int[] cols={
ListingLibrary.ColFixer.fixColWidth(27,mob.session()),
ListingLibrary.ColFixer.fixColWidth(3,mob.session()),
ListingLibrary.ColFixer.fixColWidth(6,mob.session())
};
if(str.equalsIgnoreCase("list"))
{
String mask=CMParms.combine(commands,1);
boolean allFlag=false;
if(mask.equalsIgnoreCase("all"))
{
allFlag=true;
mask="";
}
final StringBuffer buf=new StringBuffer("");
int toggler=1;
final int toggleTop=2;
for(int r=0;r<toggleTop;r++)
buf.append((r>0?" ":"")+CMStrings.padRight(L("Item"),cols[0])+" "+CMStrings.padRight(L("Lvl"),cols[1])+" "+CMStrings.padRight(L("Cloth"),cols[2]));
buf.append("\n\r");
for(int r=0;r<recipes.size();r++)
{
final List<String> V=recipes.get(r);
if(V.size()>0)
{
final String item=replacePercent(V.get(RCP_FINALNAME),"");
final int level=CMath.s_int(V.get(RCP_LEVEL));
final String wood=getComponentDescription(mob,V,RCP_WOOD);
if(wood.length()>5)
{
if(toggler>1)
buf.append("\n\r");
toggler=toggleTop;
}
if(((level<=xlevel(mob))||allFlag)
&&((mask.length()==0)||mask.equalsIgnoreCase("all")||CMLib.english().containsString(item,mask)))
{
buf.append(CMStrings.padRight(item,cols[0])+" "+CMStrings.padRight(""+level,cols[1])+" "+CMStrings.padRightPreserve(""+wood,cols[2])+((toggler!=toggleTop)?" ":"\n\r"));
if(++toggler>toggleTop)
toggler=1;
}
}
}
if(toggler!=1)
buf.append("\n\r");
commonTell(mob,buf.toString());
enhanceList(mob);
return true;
}
else
if((commands.firstElement() instanceof String)&&(((String)commands.firstElement())).equalsIgnoreCase("learn"))
{
return doLearnRecipe(mob, commands, givenTarget, auto, asLevel);
}
else
if(str.equalsIgnoreCase("scan"))
return publicScan(mob,commands);
else
if(str.equalsIgnoreCase("mend"))
{
buildingI=null;
activity = CraftingActivity.CRAFTING;
messedUp=false;
final Vector newCommands=CMParms.parse(CMParms.combine(commands,1));
buildingI=getTarget(mob,mob.location(),givenTarget,newCommands,Wearable.FILTER_UNWORNONLY);
if(!canMend(mob,buildingI,false))
return false;
activity = CraftingActivity.MENDING;
if(!super.invoke(mob,commands,givenTarget,auto,asLevel))
return false;
startStr=L("<S-NAME> start(s) mending @x1.",buildingI.name());
displayText=L("You are mending @x1",buildingI.name());
verb=L("mending @x1",buildingI.name());
}
else
if(str.equalsIgnoreCase("refit"))
{
buildingI=null;
activity = CraftingActivity.CRAFTING;
messedUp=false;
final Vector newCommands=CMParms.parse(CMParms.combine(commands,1));
buildingI=getTarget(mob,mob.location(),givenTarget,newCommands,Wearable.FILTER_UNWORNONLY);
if(buildingI==null)
return false;
if((buildingI.material()&RawMaterial.MATERIAL_MASK)!=RawMaterial.MATERIAL_CLOTH)
{
commonTell(mob,L("That's not made of cloth. It can't be refitted."));
return false;
}
if(!(buildingI instanceof Armor))
{
commonTell(mob,L("You don't know how to refit that sort of thing."));
return false;
}
if(buildingI.phyStats().height()==0)
{
commonTell(mob,L("@x1 is already the right size.",buildingI.name(mob)));
return false;
}
activity = CraftingActivity.REFITTING;
if(!super.invoke(mob,commands,givenTarget,auto,asLevel))
return false;
startStr=L("<S-NAME> start(s) refitting @x1.",buildingI.name());
displayText=L("You are refitting @x1",buildingI.name());
verb=L("refitting @x1",buildingI.name());
}
else
{
buildingI=null;
activity = CraftingActivity.CRAFTING;
messedUp=false;
aborted=false;
int amount=-1;
if((commands.size()>1)&&(CMath.isNumber((String)commands.lastElement())))
{
amount=CMath.s_int((String)commands.lastElement());
commands.removeElementAt(commands.size()-1);
}
final String recipeName=CMParms.combine(commands,0);
List<String> foundRecipe=null;
final List<List<String>> matches=matchingRecipeNames(recipes,recipeName,true);
for(int r=0;r<matches.size();r++)
{
final List<String> V=matches.get(r);
if(V.size()>0)
{
final int level=CMath.s_int(V.get(RCP_LEVEL));
if((parsedVars.autoGenerate>0)||(level<=xlevel(mob)))
{
foundRecipe=V;
break;
}
}
}
if(foundRecipe==null)
{
commonTell(mob,L("You don't know how to make a '@x1'. Try \"@x2 list\" for a list.",recipeName,triggerStrings()[0].toLowerCase()));
return false;
}
final String woodRequiredStr = foundRecipe.get(RCP_WOOD);
final List<Object> componentsFoundList=getAbilityComponents(mob, woodRequiredStr, "make "+CMLib.english().startWithAorAn(recipeName),parsedVars.autoGenerate);
if(componentsFoundList==null)
return false;
int woodRequired=CMath.s_int(woodRequiredStr);
woodRequired=adjustWoodRequired(woodRequired,mob);
if(amount>woodRequired)
woodRequired=amount;
final String misctype=foundRecipe.get(RCP_MISCTYPE);
final int[] pm={RawMaterial.MATERIAL_CLOTH};
bundling=misctype.equalsIgnoreCase("BUNDLE");
final int[][] data=fetchFoundResourceData(mob,
woodRequired,"cloth",pm,
0,null,null,
bundling,
parsedVars.autoGenerate,
enhancedTypes);
if(data==null)
return false;
fixDataForComponents(data,componentsFoundList);
woodRequired=data[0][FOUND_AMT];
if(!super.invoke(mob,commands,givenTarget,auto,asLevel))
return false;
final int lostValue=parsedVars.autoGenerate>0?0:
CMLib.materials().destroyResourcesValue(mob.location(),woodRequired,data[0][FOUND_CODE],0,null)
+CMLib.ableMapper().destroyAbilityComponents(componentsFoundList);
buildingI=CMClass.getItem(foundRecipe.get(RCP_CLASSTYPE));
if(buildingI==null)
{
commonTell(mob,L("There's no such thing as a @x1!!!",foundRecipe.get(RCP_CLASSTYPE)));
return false;
}
duration=getDuration(CMath.s_int(foundRecipe.get(RCP_TICKS)),mob,CMath.s_int(foundRecipe.get(RCP_LEVEL)),4);
String itemName=replacePercent(foundRecipe.get(RCP_FINALNAME),RawMaterial.CODES.NAME(data[0][FOUND_CODE])).toLowerCase();
if(bundling)
itemName="a "+woodRequired+"# "+itemName;
else
if(itemName.endsWith("s"))
itemName="some "+itemName;
else
itemName=CMLib.english().startWithAorAn(itemName);
buildingI.setName(itemName);
startStr=L("<S-NAME> start(s) making @x1.",buildingI.name());
displayText=L("You are making @x1",buildingI.name());
playSound="scissor.wav";
verb=L("making @x1",buildingI.name());
buildingI.setDisplayText(L("@x1 lies here",itemName));
buildingI.setDescription(itemName+". ");
buildingI.basePhyStats().setWeight(getStandardWeight(woodRequired,bundling));
final int hardness=RawMaterial.CODES.HARDNESS(data[0][FOUND_CODE])-1;
buildingI.setBaseValue(CMath.s_int(foundRecipe.get(RCP_VALUE)));
buildingI.setMaterial(data[0][FOUND_CODE]);
buildingI.basePhyStats().setLevel(CMath.s_int(foundRecipe.get(RCP_LEVEL)));
buildingI.setSecretIdentity(getBrand(mob));
final int capacity=CMath.s_int(foundRecipe.get(RCP_CAPACITY));
final long canContain=getContainerType(foundRecipe.get(RCP_CONTAINMASK));
final int armordmg=CMath.s_int(foundRecipe.get(RCP_ARMORDMG));
final String spell=(foundRecipe.size()>RCP_SPELL)?foundRecipe.get(RCP_SPELL).trim():"";
if(bundling)
buildingI.setBaseValue(lostValue);
addSpells(buildingI,spell);
if(buildingI instanceof Weapon)
{
((Weapon)buildingI).setWeaponClassification(Weapon.CLASS_NATURAL);
setWeaponTypeClass((Weapon)buildingI,misctype);
buildingI.basePhyStats().setDamage(armordmg);
((Weapon)buildingI).setRawProperLocationBitmap(Wearable.WORN_WIELD|Wearable.WORN_HELD);
((Weapon)buildingI).setRawLogicalAnd((capacity>1));
}
if((buildingI instanceof Armor)&&(!(buildingI instanceof FalseLimb)))
{
if((capacity>0)&&(buildingI instanceof Container))
{
((Container)buildingI).setCapacity(capacity+woodRequired);
((Container)buildingI).setContainTypes(canContain);
}
((Armor)buildingI).basePhyStats().setArmor(0);
if(armordmg!=0)
((Armor)buildingI).basePhyStats().setArmor(armordmg+(abilityCode()-1)+hardness);
setWearLocation(buildingI,misctype,0);
}
if(buildingI instanceof Rideable)
{
setRideBasis((Rideable)buildingI,misctype);
}
buildingI.recoverPhyStats();
buildingI.text();
buildingI.recoverPhyStats();
}
messedUp=!proficiencyCheck(mob,0,auto);
if(bundling)
{
messedUp=false;
duration=1;
verb=L("bundling @x1",RawMaterial.CODES.NAME(buildingI.material()).toLowerCase());
startStr=L("<S-NAME> start(s) @x1.",verb);
displayText=L("You are @x1",verb);
}
if(parsedVars.autoGenerate>0)
{
commands.addElement(buildingI);
return true;
}
final CMMsg msg=CMClass.getMsg(mob,buildingI,this,getActivityMessageType(),startStr);
if(mob.location().okMessage(mob,msg))
{
mob.location().send(mob,msg);
buildingI=(Item)msg.target();
beneficialAffect(mob,mob,asLevel,duration);
enhanceItem(mob,buildingI,enhancedTypes);
}
else
if(bundling)
{
messedUp=false;
aborted=false;
unInvoke();
}
return true;
}
}
|
Tycheo/coffeemud
|
com/planet_ink/coffee_mud/Abilities/Common/Costuming.java
|
Java
|
apache-2.0
| 17,414 | 32.689243 | 209 | 0.67974 | false |