query_docstring
stringlengths 24
20.8k
| positive_code
stringlengths 17
325k
| hard_negative_code
stringlengths 17
325k
| similarity_score
float64 0.3
1
| query_repo
stringclasses 407
values | query_path
stringlengths 5
170
| hn_repo
stringclasses 400
values | hn_path
stringlengths 5
170
| hn_license
stringclasses 4
values | language
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|
ParseTimezone attempts to parse a timezone string that may not be tzdata-compliant.
Returns a *time.Location whose name is normalized to either of the following:
1. tzdata name, which may be canonical, aliased or deprecated (no normalization)
2. Specific UTC offset in the format "UTC-07:00".
This means that ParseTimezone should be more lenient that the list of canonical location names in tzdata,
and the list of tzdata names (e.g. from zone.tab) is not an exhaustive list of valid inputs to this function.
Note that not all timezone abbreviations can be parsed accurately; many of them are often ambiguous.
(e.g. CST could refer to any of China Standard Time, Cuban Standard Time or Central Standard Time).
See https://en.wikipedia.org/wiki/List_of_time_zone_abbreviations for more information. | func ParseTimezone(val string) (*time.Location, error) {
// If empty, uses a default timezone.
if val == "" || val == "Local" {
return defaultTimezone, nil
}
// Convert GMT -> UTC
if val == "GMT" {
val = "UTC"
}
// Try parse tzdata name.
if loc, err := time.LoadLocation(val); err == nil {
return loc, nil
}
// Try parse UTC/GMT offsets
if strings.HasPrefix(val, "UTC") || strings.HasPrefix(val, "GMT") {
offset := val[3:]
// Support formats without leading zeros.
if regexpNonLeadingTZ.MatchString(offset) {
offset = offset[0:1] + "0" + offset[1:]
} else if regexpNonLeadingColonTZ.MatchString(offset) {
offset = offset[0:1] + "0" + offset[1:]
}
// Use alternative time format parsing.
if t, err := time.Parse("-07", offset); err == nil {
return getFixedZoneForUTCOffset(t), nil
}
if t, err := time.Parse("-07:00", offset); err == nil {
return getFixedZoneForUTCOffset(t), nil
}
if t, err := time.Parse("-0700", offset); err == nil {
return getFixedZoneForUTCOffset(t), nil
}
}
return nil, fmt.Errorf("cannot parse \"%v\" as timezone", val)
} | func Parse(layout, value string) (Time, error) {
return parse(layout, value, UTC, Local)
} | 0.714314 | furiko-io/furiko | pkg/core/tzutils/parse.go | mit-pdos/biscuit | src/time/format.go | BSD-3-Clause | go |
ParseTimezone attempts to parse a timezone string that may not be tzdata-compliant.
Returns a *time.Location whose name is normalized to either of the following:
1. tzdata name, which may be canonical, aliased or deprecated (no normalization)
2. Specific UTC offset in the format "UTC-07:00".
This means that ParseTimezone should be more lenient that the list of canonical location names in tzdata,
and the list of tzdata names (e.g. from zone.tab) is not an exhaustive list of valid inputs to this function.
Note that not all timezone abbreviations can be parsed accurately; many of them are often ambiguous.
(e.g. CST could refer to any of China Standard Time, Cuban Standard Time or Central Standard Time).
See https://en.wikipedia.org/wiki/List_of_time_zone_abbreviations for more information. | func ParseTimezone(val string) (*time.Location, error) {
// If empty, uses a default timezone.
if val == "" || val == "Local" {
return defaultTimezone, nil
}
// Convert GMT -> UTC
if val == "GMT" {
val = "UTC"
}
// Try parse tzdata name.
if loc, err := time.LoadLocation(val); err == nil {
return loc, nil
}
// Try parse UTC/GMT offsets
if strings.HasPrefix(val, "UTC") || strings.HasPrefix(val, "GMT") {
offset := val[3:]
// Support formats without leading zeros.
if regexpNonLeadingTZ.MatchString(offset) {
offset = offset[0:1] + "0" + offset[1:]
} else if regexpNonLeadingColonTZ.MatchString(offset) {
offset = offset[0:1] + "0" + offset[1:]
}
// Use alternative time format parsing.
if t, err := time.Parse("-07", offset); err == nil {
return getFixedZoneForUTCOffset(t), nil
}
if t, err := time.Parse("-07:00", offset); err == nil {
return getFixedZoneForUTCOffset(t), nil
}
if t, err := time.Parse("-0700", offset); err == nil {
return getFixedZoneForUTCOffset(t), nil
}
}
return nil, fmt.Errorf("cannot parse \"%v\" as timezone", val)
} | func ParseInLocation(layout, value string, loc *Location) (Time, error) {
return parse(layout, value, loc, loc)
} | 0.699444 | furiko-io/furiko | pkg/core/tzutils/parse.go | mit-pdos/biscuit | src/time/format.go | BSD-3-Clause | go |
ParseTimezone attempts to parse a timezone string that may not be tzdata-compliant.
Returns a *time.Location whose name is normalized to either of the following:
1. tzdata name, which may be canonical, aliased or deprecated (no normalization)
2. Specific UTC offset in the format "UTC-07:00".
This means that ParseTimezone should be more lenient that the list of canonical location names in tzdata,
and the list of tzdata names (e.g. from zone.tab) is not an exhaustive list of valid inputs to this function.
Note that not all timezone abbreviations can be parsed accurately; many of them are often ambiguous.
(e.g. CST could refer to any of China Standard Time, Cuban Standard Time or Central Standard Time).
See https://en.wikipedia.org/wiki/List_of_time_zone_abbreviations for more information. | func ParseTimezone(val string) (*time.Location, error) {
// If empty, uses a default timezone.
if val == "" || val == "Local" {
return defaultTimezone, nil
}
// Convert GMT -> UTC
if val == "GMT" {
val = "UTC"
}
// Try parse tzdata name.
if loc, err := time.LoadLocation(val); err == nil {
return loc, nil
}
// Try parse UTC/GMT offsets
if strings.HasPrefix(val, "UTC") || strings.HasPrefix(val, "GMT") {
offset := val[3:]
// Support formats without leading zeros.
if regexpNonLeadingTZ.MatchString(offset) {
offset = offset[0:1] + "0" + offset[1:]
} else if regexpNonLeadingColonTZ.MatchString(offset) {
offset = offset[0:1] + "0" + offset[1:]
}
// Use alternative time format parsing.
if t, err := time.Parse("-07", offset); err == nil {
return getFixedZoneForUTCOffset(t), nil
}
if t, err := time.Parse("-07:00", offset); err == nil {
return getFixedZoneForUTCOffset(t), nil
}
if t, err := time.Parse("-0700", offset); err == nil {
return getFixedZoneForUTCOffset(t), nil
}
}
return nil, fmt.Errorf("cannot parse \"%v\" as timezone", val)
} | func tzset(s string, lastTxSec, sec int64) (name string, offset int, start, end int64, isDST, ok bool) {
var (
stdName, dstName string
stdOffset, dstOffset int
)
stdName, s, ok = tzsetName(s)
if ok {
stdOffset, s, ok = tzsetOffset(s)
}
if !ok {
return "", 0, 0, 0, false, false
}
// The numbers in the tzset string are added to local time to get UTC,
// but our offsets are added to UTC to get local time,
// so we negate the number we see here.
stdOffset = -stdOffset
if len(s) == 0 || s[0] == ',' {
// No daylight savings time.
return stdName, stdOffset, lastTxSec, omega, false, true
}
dstName, s, ok = tzsetName(s)
if ok {
if len(s) == 0 || s[0] == ',' {
dstOffset = stdOffset + secondsPerHour
} else {
dstOffset, s, ok = tzsetOffset(s)
dstOffset = -dstOffset // as with stdOffset, above
}
}
if !ok {
return "", 0, 0, 0, false, false
}
if len(s) == 0 {
// Default DST rules per tzcode.
s = ",M3.2.0,M11.1.0"
}
// The TZ definition does not mention ';' here but tzcode accepts it.
if s[0] != ',' && s[0] != ';' {
return "", 0, 0, 0, false, false
}
s = s[1:]
var startRule, endRule rule
startRule, s, ok = tzsetRule(s)
if !ok || len(s) == 0 || s[0] != ',' {
return "", 0, 0, 0, false, false
}
s = s[1:]
endRule, s, ok = tzsetRule(s)
if !ok || len(s) > 0 {
return "", 0, 0, 0, false, false
}
year, _, _, yday := absDate(uint64(sec+unixToInternal+internalToAbsolute), false)
ysec := int64(yday*secondsPerDay) + sec%secondsPerDay
// Compute start of year in seconds since Unix epoch.
d := daysSinceEpoch(year)
abs := int64(d * secondsPerDay)
abs += absoluteToInternal + internalToUnix
startSec := int64(tzruleTime(year, startRule, stdOffset))
endSec := int64(tzruleTime(year, endRule, dstOffset))
dstIsDST, stdIsDST := true, false
// Note: this is a flipping of "DST" and "STD" while retaining the labels
// This happens in southern hemispheres. The labelling here thus is a little
// inconsistent with the goal.
if endSec < startSec {
startSec, endSec = endSec, startSec
stdName, dstName = dstName, stdName
stdOffset, dstOffset = dstOffset, stdOffset
stdIsDST, dstIsDST = dstIsDST, stdIsDST
}
// The start and end values that we return are accurate
// close to a daylight savings transition, but are otherwise
// just the start and end of the year. That suffices for
// the only caller that cares, which is Date.
if ysec < startSec {
return stdName, stdOffset, abs, startSec + abs, stdIsDST, true
} else if ysec >= endSec {
return stdName, stdOffset, endSec + abs, abs + 365*secondsPerDay, stdIsDST, true
} else {
return dstName, dstOffset, startSec + abs, endSec + abs, dstIsDST, true
}
} | 0.698559 | furiko-io/furiko | pkg/core/tzutils/parse.go | goplus/llgo | runtime/internal/lib/time/zoneinfo.go | Apache-2.0 | go |
ParseTimezone attempts to parse a timezone string that may not be tzdata-compliant.
Returns a *time.Location whose name is normalized to either of the following:
1. tzdata name, which may be canonical, aliased or deprecated (no normalization)
2. Specific UTC offset in the format "UTC-07:00".
This means that ParseTimezone should be more lenient that the list of canonical location names in tzdata,
and the list of tzdata names (e.g. from zone.tab) is not an exhaustive list of valid inputs to this function.
Note that not all timezone abbreviations can be parsed accurately; many of them are often ambiguous.
(e.g. CST could refer to any of China Standard Time, Cuban Standard Time or Central Standard Time).
See https://en.wikipedia.org/wiki/List_of_time_zone_abbreviations for more information. | func ParseTimezone(val string) (*time.Location, error) {
// If empty, uses a default timezone.
if val == "" || val == "Local" {
return defaultTimezone, nil
}
// Convert GMT -> UTC
if val == "GMT" {
val = "UTC"
}
// Try parse tzdata name.
if loc, err := time.LoadLocation(val); err == nil {
return loc, nil
}
// Try parse UTC/GMT offsets
if strings.HasPrefix(val, "UTC") || strings.HasPrefix(val, "GMT") {
offset := val[3:]
// Support formats without leading zeros.
if regexpNonLeadingTZ.MatchString(offset) {
offset = offset[0:1] + "0" + offset[1:]
} else if regexpNonLeadingColonTZ.MatchString(offset) {
offset = offset[0:1] + "0" + offset[1:]
}
// Use alternative time format parsing.
if t, err := time.Parse("-07", offset); err == nil {
return getFixedZoneForUTCOffset(t), nil
}
if t, err := time.Parse("-07:00", offset); err == nil {
return getFixedZoneForUTCOffset(t), nil
}
if t, err := time.Parse("-0700", offset); err == nil {
return getFixedZoneForUTCOffset(t), nil
}
}
return nil, fmt.Errorf("cannot parse \"%v\" as timezone", val)
} | func Parse(layout, value string) (Time, error) {
/*
// Optimize for RFC3339 as it accounts for over half of all representations.
if layout == RFC3339 || layout == RFC3339Nano {
if t, ok := parseRFC3339(value, Local); ok {
return t, nil
}
}
return parse(layout, value, UTC, Local)
*/
panic("todo: time.Parse")
} | 0.682 | furiko-io/furiko | pkg/core/tzutils/parse.go | goplus/llgo | runtime/internal/lib/time/format.go | Apache-2.0 | go |
ParseTimezone attempts to parse a timezone string that may not be tzdata-compliant.
Returns a *time.Location whose name is normalized to either of the following:
1. tzdata name, which may be canonical, aliased or deprecated (no normalization)
2. Specific UTC offset in the format "UTC-07:00".
This means that ParseTimezone should be more lenient that the list of canonical location names in tzdata,
and the list of tzdata names (e.g. from zone.tab) is not an exhaustive list of valid inputs to this function.
Note that not all timezone abbreviations can be parsed accurately; many of them are often ambiguous.
(e.g. CST could refer to any of China Standard Time, Cuban Standard Time or Central Standard Time).
See https://en.wikipedia.org/wiki/List_of_time_zone_abbreviations for more information. | func ParseTimezone(val string) (*time.Location, error) {
// If empty, uses a default timezone.
if val == "" || val == "Local" {
return defaultTimezone, nil
}
// Convert GMT -> UTC
if val == "GMT" {
val = "UTC"
}
// Try parse tzdata name.
if loc, err := time.LoadLocation(val); err == nil {
return loc, nil
}
// Try parse UTC/GMT offsets
if strings.HasPrefix(val, "UTC") || strings.HasPrefix(val, "GMT") {
offset := val[3:]
// Support formats without leading zeros.
if regexpNonLeadingTZ.MatchString(offset) {
offset = offset[0:1] + "0" + offset[1:]
} else if regexpNonLeadingColonTZ.MatchString(offset) {
offset = offset[0:1] + "0" + offset[1:]
}
// Use alternative time format parsing.
if t, err := time.Parse("-07", offset); err == nil {
return getFixedZoneForUTCOffset(t), nil
}
if t, err := time.Parse("-07:00", offset); err == nil {
return getFixedZoneForUTCOffset(t), nil
}
if t, err := time.Parse("-0700", offset); err == nil {
return getFixedZoneForUTCOffset(t), nil
}
}
return nil, fmt.Errorf("cannot parse \"%v\" as timezone", val)
} | func parseTimeZone(value string) (length int, ok bool) {
if len(value) < 3 {
return 0, false
}
// Special case 1: ChST and MeST are the only zones with a lower-case letter.
if len(value) >= 4 && (value[:4] == "ChST" || value[:4] == "MeST") {
return 4, true
}
// Special case 2: GMT may have an hour offset; treat it specially.
if value[:3] == "GMT" {
length = parseGMT(value)
return length, true
}
// Special Case 3: Some time zones are not named, but have +/-00 format
if value[0] == '+' || value[0] == '-' {
length = parseSignedOffset(value)
return length, true
}
// How many upper-case letters are there? Need at least three, at most five.
var nUpper int
for nUpper = 0; nUpper < 6; nUpper++ {
if nUpper >= len(value) {
break
}
if c := value[nUpper]; c < 'A' || 'Z' < c {
break
}
}
switch nUpper {
case 0, 1, 2, 6:
return 0, false
case 5: // Must end in T to match.
if value[4] == 'T' {
return 5, true
}
case 4:
// Must end in T, except one special case.
if value[3] == 'T' || value[:4] == "WITA" {
return 4, true
}
case 3:
return 3, true
}
return 0, false
} | 0.65601 | furiko-io/furiko | pkg/core/tzutils/parse.go | mit-pdos/biscuit | src/time/format.go | BSD-3-Clause | go |
ListGroupAccessRequests gets a list of access requests
viewable by the authenticated user.
GitLab API docs:
https://docs.gitlab.com/ee/api/access_requests.html#list-access-requests-for-a-group-or-project | func (s *AccessRequestsService) ListGroupAccessRequests(gid interface{}, opt *ListAccessRequestsOptions, options ...RequestOptionFunc) ([]*AccessRequest, *Response, error) {
group, err := parseID(gid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("groups/%s/access_requests", PathEscape(group))
req, err := s.client.NewRequest(http.MethodGet, u, opt, options)
if err != nil {
return nil, nil, err
}
var ars []*AccessRequest
resp, err := s.client.Do(req, &ars)
if err != nil {
return nil, resp, err
}
return ars, resp, nil
} | func (s *AccessRequestsService) ListProjectAccessRequests(pid interface{}, opt *ListAccessRequestsOptions, options ...RequestOptionFunc) ([]*AccessRequest, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("projects/%s/access_requests", PathEscape(project))
req, err := s.client.NewRequest(http.MethodGet, u, opt, options)
if err != nil {
return nil, nil, err
}
var ars []*AccessRequest
resp, err := s.client.Do(req, &ars)
if err != nil {
return nil, resp, err
}
return ars, resp, nil
} | 0.921987 | tektoncd/cli | vendor/gitlab.com/gitlab-org/api/client-go/access_requests.go | tektoncd/cli | vendor/gitlab.com/gitlab-org/api/client-go/access_requests.go | Apache-2.0 | go |
ListGroupAccessRequests gets a list of access requests
viewable by the authenticated user.
GitLab API docs:
https://docs.gitlab.com/ee/api/access_requests.html#list-access-requests-for-a-group-or-project | func (s *AccessRequestsService) ListGroupAccessRequests(gid interface{}, opt *ListAccessRequestsOptions, options ...RequestOptionFunc) ([]*AccessRequest, *Response, error) {
group, err := parseID(gid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("groups/%s/access_requests", PathEscape(group))
req, err := s.client.NewRequest(http.MethodGet, u, opt, options)
if err != nil {
return nil, nil, err
}
var ars []*AccessRequest
resp, err := s.client.Do(req, &ars)
if err != nil {
return nil, resp, err
}
return ars, resp, nil
} | func (s *AccessRequestsService) RequestGroupAccess(gid interface{}, options ...RequestOptionFunc) (*AccessRequest, *Response, error) {
group, err := parseID(gid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("groups/%s/access_requests", PathEscape(group))
req, err := s.client.NewRequest(http.MethodPost, u, nil, options)
if err != nil {
return nil, nil, err
}
ar := new(AccessRequest)
resp, err := s.client.Do(req, ar)
if err != nil {
return nil, resp, err
}
return ar, resp, nil
} | 0.87547 | tektoncd/cli | vendor/gitlab.com/gitlab-org/api/client-go/access_requests.go | tektoncd/cli | vendor/gitlab.com/gitlab-org/api/client-go/access_requests.go | Apache-2.0 | go |
ListGroupAccessRequests gets a list of access requests
viewable by the authenticated user.
GitLab API docs:
https://docs.gitlab.com/ee/api/access_requests.html#list-access-requests-for-a-group-or-project | func (s *AccessRequestsService) ListGroupAccessRequests(gid interface{}, opt *ListAccessRequestsOptions, options ...RequestOptionFunc) ([]*AccessRequest, *Response, error) {
group, err := parseID(gid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("groups/%s/access_requests", PathEscape(group))
req, err := s.client.NewRequest(http.MethodGet, u, opt, options)
if err != nil {
return nil, nil, err
}
var ars []*AccessRequest
resp, err := s.client.Do(req, &ars)
if err != nil {
return nil, resp, err
}
return ars, resp, nil
} | func (s *AccessRequestsService) RequestProjectAccess(pid interface{}, options ...RequestOptionFunc) (*AccessRequest, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("projects/%s/access_requests", PathEscape(project))
req, err := s.client.NewRequest(http.MethodPost, u, nil, options)
if err != nil {
return nil, nil, err
}
ar := new(AccessRequest)
resp, err := s.client.Do(req, ar)
if err != nil {
return nil, resp, err
}
return ar, resp, nil
} | 0.804066 | tektoncd/cli | vendor/gitlab.com/gitlab-org/api/client-go/access_requests.go | tektoncd/cli | vendor/gitlab.com/gitlab-org/api/client-go/access_requests.go | Apache-2.0 | go |
ListGroupAccessRequests gets a list of access requests
viewable by the authenticated user.
GitLab API docs:
https://docs.gitlab.com/ee/api/access_requests.html#list-access-requests-for-a-group-or-project | func (s *AccessRequestsService) ListGroupAccessRequests(gid interface{}, opt *ListAccessRequestsOptions, options ...RequestOptionFunc) ([]*AccessRequest, *Response, error) {
group, err := parseID(gid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("groups/%s/access_requests", PathEscape(group))
req, err := s.client.NewRequest(http.MethodGet, u, opt, options)
if err != nil {
return nil, nil, err
}
var ars []*AccessRequest
resp, err := s.client.Do(req, &ars)
if err != nil {
return nil, resp, err
}
return ars, resp, nil
} | func (s *GroupsService) ListGroupProjects(gid interface{}, opt *ListGroupProjectsOptions, options ...RequestOptionFunc) ([]*Project, *Response, error) {
group, err := parseID(gid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("groups/%s/projects", PathEscape(group))
req, err := s.client.NewRequest(http.MethodGet, u, opt, options)
if err != nil {
return nil, nil, err
}
var ps []*Project
resp, err := s.client.Do(req, &ps)
if err != nil {
return nil, resp, err
}
return ps, resp, nil
} | 0.784897 | tektoncd/cli | vendor/gitlab.com/gitlab-org/api/client-go/access_requests.go | tektoncd/cli | vendor/gitlab.com/gitlab-org/api/client-go/groups.go | Apache-2.0 | go |
ListGroupAccessRequests gets a list of access requests
viewable by the authenticated user.
GitLab API docs:
https://docs.gitlab.com/ee/api/access_requests.html#list-access-requests-for-a-group-or-project | func (s *AccessRequestsService) ListGroupAccessRequests(gid interface{}, opt *ListAccessRequestsOptions, options ...RequestOptionFunc) ([]*AccessRequest, *Response, error) {
group, err := parseID(gid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("groups/%s/access_requests", PathEscape(group))
req, err := s.client.NewRequest(http.MethodGet, u, opt, options)
if err != nil {
return nil, nil, err
}
var ars []*AccessRequest
resp, err := s.client.Do(req, &ars)
if err != nil {
return nil, resp, err
}
return ars, resp, nil
} | func (s *ProjectsService) ListProjectsGroups(pid interface{}, opt *ListProjectGroupOptions, options ...RequestOptionFunc) ([]*ProjectGroup, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("projects/%s/groups", PathEscape(project))
req, err := s.client.NewRequest(http.MethodGet, u, opt, options)
if err != nil {
return nil, nil, err
}
var p []*ProjectGroup
resp, err := s.client.Do(req, &p)
if err != nil {
return nil, resp, err
}
return p, resp, nil
} | 0.782707 | tektoncd/cli | vendor/gitlab.com/gitlab-org/api/client-go/access_requests.go | tektoncd/cli | vendor/gitlab.com/gitlab-org/api/client-go/projects.go | Apache-2.0 | go |
GetCostCategoriesWithContext is the same as GetCostCategories with the addition of
the ability to pass a context and additional request options.
See GetCostCategories for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *CostExplorer) GetCostCategoriesWithContext(ctx aws.Context, input *GetCostCategoriesInput, opts ...request.Option) (*GetCostCategoriesOutput, error) {
req, out := c.GetCostCategoriesRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *CostExplorer) GetCostCategoriesRequest(input *GetCostCategoriesInput) (req *request.Request, output *GetCostCategoriesOutput) {
op := &request.Operation{
Name: opGetCostCategories,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &GetCostCategoriesInput{}
}
output = &GetCostCategoriesOutput{}
req = c.newRequest(op, input, output)
return
} | 0.845114 | aws/aws-sdk-go | service/costexplorer/api.go | aws/aws-sdk-go | service/costexplorer/api.go | Apache-2.0 | go |
GetCostCategoriesWithContext is the same as GetCostCategories with the addition of
the ability to pass a context and additional request options.
See GetCostCategories for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *CostExplorer) GetCostCategoriesWithContext(ctx aws.Context, input *GetCostCategoriesInput, opts ...request.Option) (*GetCostCategoriesOutput, error) {
req, out := c.GetCostCategoriesRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *CostExplorer) ListCostCategoryDefinitionsWithContext(ctx aws.Context, input *ListCostCategoryDefinitionsInput, opts ...request.Option) (*ListCostCategoryDefinitionsOutput, error) {
req, out := c.ListCostCategoryDefinitionsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.783876 | aws/aws-sdk-go | service/costexplorer/api.go | aws/aws-sdk-go | service/costexplorer/api.go | Apache-2.0 | go |
GetCostCategoriesWithContext is the same as GetCostCategories with the addition of
the ability to pass a context and additional request options.
See GetCostCategories for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *CostExplorer) GetCostCategoriesWithContext(ctx aws.Context, input *GetCostCategoriesInput, opts ...request.Option) (*GetCostCategoriesOutput, error) {
req, out := c.GetCostCategoriesRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *CostExplorer) CreateCostCategoryDefinitionWithContext(ctx aws.Context, input *CreateCostCategoryDefinitionInput, opts ...request.Option) (*CreateCostCategoryDefinitionOutput, error) {
req, out := c.CreateCostCategoryDefinitionRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.780038 | aws/aws-sdk-go | service/costexplorer/api.go | aws/aws-sdk-go | service/costexplorer/api.go | Apache-2.0 | go |
GetCostCategoriesWithContext is the same as GetCostCategories with the addition of
the ability to pass a context and additional request options.
See GetCostCategories for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *CostExplorer) GetCostCategoriesWithContext(ctx aws.Context, input *GetCostCategoriesInput, opts ...request.Option) (*GetCostCategoriesOutput, error) {
req, out := c.GetCostCategoriesRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *CostExplorer) DescribeCostCategoryDefinitionWithContext(ctx aws.Context, input *DescribeCostCategoryDefinitionInput, opts ...request.Option) (*DescribeCostCategoryDefinitionOutput, error) {
req, out := c.DescribeCostCategoryDefinitionRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.779517 | aws/aws-sdk-go | service/costexplorer/api.go | aws/aws-sdk-go | service/costexplorer/api.go | Apache-2.0 | go |
GetCostCategoriesWithContext is the same as GetCostCategories with the addition of
the ability to pass a context and additional request options.
See GetCostCategories for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *CostExplorer) GetCostCategoriesWithContext(ctx aws.Context, input *GetCostCategoriesInput, opts ...request.Option) (*GetCostCategoriesOutput, error) {
req, out := c.GetCostCategoriesRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *CostExplorer) DeleteCostCategoryDefinitionWithContext(ctx aws.Context, input *DeleteCostCategoryDefinitionInput, opts ...request.Option) (*DeleteCostCategoryDefinitionOutput, error) {
req, out := c.DeleteCostCategoryDefinitionRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.750269 | aws/aws-sdk-go | service/costexplorer/api.go | aws/aws-sdk-go | service/costexplorer/api.go | Apache-2.0 | go |
ChangeApprovalConfiguration updates the approval configuration of a merge request.
GitLab API docs:
https://docs.gitlab.com/ee/api/merge_request_approvals.html#change-approval-configuration-deprecated | func (s *MergeRequestApprovalsService) ChangeApprovalConfiguration(pid interface{}, mergeRequest int, opt *ChangeMergeRequestApprovalConfigurationOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("projects/%s/merge_requests/%d/approvals", PathEscape(project), mergeRequest)
req, err := s.client.NewRequest(http.MethodPost, u, opt, options)
if err != nil {
return nil, nil, err
}
m := new(MergeRequest)
resp, err := s.client.Do(req, m)
if err != nil {
return nil, resp, err
}
return m, resp, nil
} | func (s *ProjectsService) ChangeApprovalConfiguration(pid interface{}, opt *ChangeApprovalConfigurationOptions, options ...RequestOptionFunc) (*ProjectApprovals, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("projects/%s/approvals", PathEscape(project))
req, err := s.client.NewRequest(http.MethodPost, u, opt, options)
if err != nil {
return nil, nil, err
}
pa := new(ProjectApprovals)
resp, err := s.client.Do(req, pa)
if err != nil {
return nil, resp, err
}
return pa, resp, nil
} | 0.892628 | tektoncd/cli | vendor/gitlab.com/gitlab-org/api/client-go/merge_request_approvals.go | tektoncd/cli | vendor/gitlab.com/gitlab-org/api/client-go/projects.go | Apache-2.0 | go |
ChangeApprovalConfiguration updates the approval configuration of a merge request.
GitLab API docs:
https://docs.gitlab.com/ee/api/merge_request_approvals.html#change-approval-configuration-deprecated | func (s *MergeRequestApprovalsService) ChangeApprovalConfiguration(pid interface{}, mergeRequest int, opt *ChangeMergeRequestApprovalConfigurationOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("projects/%s/merge_requests/%d/approvals", PathEscape(project), mergeRequest)
req, err := s.client.NewRequest(http.MethodPost, u, opt, options)
if err != nil {
return nil, nil, err
}
m := new(MergeRequest)
resp, err := s.client.Do(req, m)
if err != nil {
return nil, resp, err
}
return m, resp, nil
} | func (s *MergeRequestApprovalsService) ChangeAllowedApprovers(pid interface{}, mergeRequest int, opt *ChangeMergeRequestAllowedApproversOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("projects/%s/merge_requests/%d/approvers", PathEscape(project), mergeRequest)
req, err := s.client.NewRequest(http.MethodPut, u, opt, options)
if err != nil {
return nil, nil, err
}
m := new(MergeRequest)
resp, err := s.client.Do(req, m)
if err != nil {
return nil, resp, err
}
return m, resp, nil
} | 0.800677 | tektoncd/cli | vendor/gitlab.com/gitlab-org/api/client-go/merge_request_approvals.go | tektoncd/cli | vendor/gitlab.com/gitlab-org/api/client-go/merge_request_approvals.go | Apache-2.0 | go |
ChangeApprovalConfiguration updates the approval configuration of a merge request.
GitLab API docs:
https://docs.gitlab.com/ee/api/merge_request_approvals.html#change-approval-configuration-deprecated | func (s *MergeRequestApprovalsService) ChangeApprovalConfiguration(pid interface{}, mergeRequest int, opt *ChangeMergeRequestApprovalConfigurationOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("projects/%s/merge_requests/%d/approvals", PathEscape(project), mergeRequest)
req, err := s.client.NewRequest(http.MethodPost, u, opt, options)
if err != nil {
return nil, nil, err
}
m := new(MergeRequest)
resp, err := s.client.Do(req, m)
if err != nil {
return nil, resp, err
}
return m, resp, nil
} | func (s *MergeRequestApprovalsService) UpdateApprovalRule(pid interface{}, mergeRequest int, approvalRule int, opt *UpdateMergeRequestApprovalRuleOptions, options ...RequestOptionFunc) (*MergeRequestApprovalRule, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("projects/%s/merge_requests/%d/approval_rules/%d", PathEscape(project), mergeRequest, approvalRule)
req, err := s.client.NewRequest(http.MethodPut, u, opt, options)
if err != nil {
return nil, nil, err
}
par := new(MergeRequestApprovalRule)
resp, err := s.client.Do(req, &par)
if err != nil {
return nil, resp, err
}
return par, resp, nil
} | 0.754812 | tektoncd/cli | vendor/gitlab.com/gitlab-org/api/client-go/merge_request_approvals.go | tektoncd/cli | vendor/gitlab.com/gitlab-org/api/client-go/merge_request_approvals.go | Apache-2.0 | go |
ChangeApprovalConfiguration updates the approval configuration of a merge request.
GitLab API docs:
https://docs.gitlab.com/ee/api/merge_request_approvals.html#change-approval-configuration-deprecated | func (s *MergeRequestApprovalsService) ChangeApprovalConfiguration(pid interface{}, mergeRequest int, opt *ChangeMergeRequestApprovalConfigurationOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("projects/%s/merge_requests/%d/approvals", PathEscape(project), mergeRequest)
req, err := s.client.NewRequest(http.MethodPost, u, opt, options)
if err != nil {
return nil, nil, err
}
m := new(MergeRequest)
resp, err := s.client.Do(req, m)
if err != nil {
return nil, resp, err
}
return m, resp, nil
} | func (s *ProjectsService) ChangeAllowedApprovers(pid interface{}, opt *ChangeAllowedApproversOptions, options ...RequestOptionFunc) (*ProjectApprovals, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("projects/%s/approvers", PathEscape(project))
req, err := s.client.NewRequest(http.MethodPut, u, opt, options)
if err != nil {
return nil, nil, err
}
pa := new(ProjectApprovals)
resp, err := s.client.Do(req, pa)
if err != nil {
return nil, resp, err
}
return pa, resp, nil
} | 0.731553 | tektoncd/cli | vendor/gitlab.com/gitlab-org/api/client-go/merge_request_approvals.go | tektoncd/cli | vendor/gitlab.com/gitlab-org/api/client-go/projects.go | Apache-2.0 | go |
ChangeApprovalConfiguration updates the approval configuration of a merge request.
GitLab API docs:
https://docs.gitlab.com/ee/api/merge_request_approvals.html#change-approval-configuration-deprecated | func (s *MergeRequestApprovalsService) ChangeApprovalConfiguration(pid interface{}, mergeRequest int, opt *ChangeMergeRequestApprovalConfigurationOptions, options ...RequestOptionFunc) (*MergeRequest, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("projects/%s/merge_requests/%d/approvals", PathEscape(project), mergeRequest)
req, err := s.client.NewRequest(http.MethodPost, u, opt, options)
if err != nil {
return nil, nil, err
}
m := new(MergeRequest)
resp, err := s.client.Do(req, m)
if err != nil {
return nil, resp, err
}
return m, resp, nil
} | func (s *ProjectsService) GetApprovalConfiguration(pid interface{}, options ...RequestOptionFunc) (*ProjectApprovals, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("projects/%s/approvals", PathEscape(project))
req, err := s.client.NewRequest(http.MethodGet, u, nil, options)
if err != nil {
return nil, nil, err
}
pa := new(ProjectApprovals)
resp, err := s.client.Do(req, pa)
if err != nil {
return nil, resp, err
}
return pa, resp, nil
} | 0.717647 | tektoncd/cli | vendor/gitlab.com/gitlab-org/api/client-go/merge_request_approvals.go | tektoncd/cli | vendor/gitlab.com/gitlab-org/api/client-go/projects.go | Apache-2.0 | go |
UpdateFarm API operation for AWSDeadlineCloud.
Updates a farm.
Returns awserr.Error for service API and SDK errors. Use runtime type assertions
with awserr.Error's Code and Message methods to get detailed information about
the error.
See the AWS API reference guide for AWSDeadlineCloud's
API operation UpdateFarm for usage and error information.
Returned Error Types:
- AccessDeniedException
You don't have permission to perform the action.
- InternalServerErrorException
Deadline Cloud can't process your request right now. Try again later.
- ResourceNotFoundException
The requested resource can't be found.
- ThrottlingException
Your request exceeded a request rate quota.
- ValidationException
The request isn't valid. This can occur if your request contains malformed
JSON or unsupported characters.
See also, https://docs.aws.amazon.com/goto/WebAPI/deadline-2023-10-12/UpdateFarm | func (c *Deadline) UpdateFarm(input *UpdateFarmInput) (*UpdateFarmOutput, error) {
req, out := c.UpdateFarmRequest(input)
return out, req.Send()
} | func (c *Deadline) DeleteFarm(input *DeleteFarmInput) (*DeleteFarmOutput, error) {
req, out := c.DeleteFarmRequest(input)
return out, req.Send()
} | 0.822836 | aws/aws-sdk-go | service/deadline/api.go | aws/aws-sdk-go | service/deadline/api.go | Apache-2.0 | go |
UpdateFarm API operation for AWSDeadlineCloud.
Updates a farm.
Returns awserr.Error for service API and SDK errors. Use runtime type assertions
with awserr.Error's Code and Message methods to get detailed information about
the error.
See the AWS API reference guide for AWSDeadlineCloud's
API operation UpdateFarm for usage and error information.
Returned Error Types:
- AccessDeniedException
You don't have permission to perform the action.
- InternalServerErrorException
Deadline Cloud can't process your request right now. Try again later.
- ResourceNotFoundException
The requested resource can't be found.
- ThrottlingException
Your request exceeded a request rate quota.
- ValidationException
The request isn't valid. This can occur if your request contains malformed
JSON or unsupported characters.
See also, https://docs.aws.amazon.com/goto/WebAPI/deadline-2023-10-12/UpdateFarm | func (c *Deadline) UpdateFarm(input *UpdateFarmInput) (*UpdateFarmOutput, error) {
req, out := c.UpdateFarmRequest(input)
return out, req.Send()
} | func (c *Deadline) GetFarm(input *GetFarmInput) (*GetFarmOutput, error) {
req, out := c.GetFarmRequest(input)
return out, req.Send()
} | 0.809347 | aws/aws-sdk-go | service/deadline/api.go | aws/aws-sdk-go | service/deadline/api.go | Apache-2.0 | go |
UpdateFarm API operation for AWSDeadlineCloud.
Updates a farm.
Returns awserr.Error for service API and SDK errors. Use runtime type assertions
with awserr.Error's Code and Message methods to get detailed information about
the error.
See the AWS API reference guide for AWSDeadlineCloud's
API operation UpdateFarm for usage and error information.
Returned Error Types:
- AccessDeniedException
You don't have permission to perform the action.
- InternalServerErrorException
Deadline Cloud can't process your request right now. Try again later.
- ResourceNotFoundException
The requested resource can't be found.
- ThrottlingException
Your request exceeded a request rate quota.
- ValidationException
The request isn't valid. This can occur if your request contains malformed
JSON or unsupported characters.
See also, https://docs.aws.amazon.com/goto/WebAPI/deadline-2023-10-12/UpdateFarm | func (c *Deadline) UpdateFarm(input *UpdateFarmInput) (*UpdateFarmOutput, error) {
req, out := c.UpdateFarmRequest(input)
return out, req.Send()
} | func (c *Deadline) CreateFarm(input *CreateFarmInput) (*CreateFarmOutput, error) {
req, out := c.CreateFarmRequest(input)
return out, req.Send()
} | 0.783415 | aws/aws-sdk-go | service/deadline/api.go | aws/aws-sdk-go | service/deadline/api.go | Apache-2.0 | go |
UpdateFarm API operation for AWSDeadlineCloud.
Updates a farm.
Returns awserr.Error for service API and SDK errors. Use runtime type assertions
with awserr.Error's Code and Message methods to get detailed information about
the error.
See the AWS API reference guide for AWSDeadlineCloud's
API operation UpdateFarm for usage and error information.
Returned Error Types:
- AccessDeniedException
You don't have permission to perform the action.
- InternalServerErrorException
Deadline Cloud can't process your request right now. Try again later.
- ResourceNotFoundException
The requested resource can't be found.
- ThrottlingException
Your request exceeded a request rate quota.
- ValidationException
The request isn't valid. This can occur if your request contains malformed
JSON or unsupported characters.
See also, https://docs.aws.amazon.com/goto/WebAPI/deadline-2023-10-12/UpdateFarm | func (c *Deadline) UpdateFarm(input *UpdateFarmInput) (*UpdateFarmOutput, error) {
req, out := c.UpdateFarmRequest(input)
return out, req.Send()
} | func (c *Deadline) UpdateFarmRequest(input *UpdateFarmInput) (req *request.Request, output *UpdateFarmOutput) {
op := &request.Operation{
Name: opUpdateFarm,
HTTPMethod: "PATCH",
HTTPPath: "/2023-10-12/farms/{farmId}",
}
if input == nil {
input = &UpdateFarmInput{}
}
output = &UpdateFarmOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("management.", nil))
req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler)
return
} | 0.747723 | aws/aws-sdk-go | service/deadline/api.go | aws/aws-sdk-go | service/deadline/api.go | Apache-2.0 | go |
UpdateFarm API operation for AWSDeadlineCloud.
Updates a farm.
Returns awserr.Error for service API and SDK errors. Use runtime type assertions
with awserr.Error's Code and Message methods to get detailed information about
the error.
See the AWS API reference guide for AWSDeadlineCloud's
API operation UpdateFarm for usage and error information.
Returned Error Types:
- AccessDeniedException
You don't have permission to perform the action.
- InternalServerErrorException
Deadline Cloud can't process your request right now. Try again later.
- ResourceNotFoundException
The requested resource can't be found.
- ThrottlingException
Your request exceeded a request rate quota.
- ValidationException
The request isn't valid. This can occur if your request contains malformed
JSON or unsupported characters.
See also, https://docs.aws.amazon.com/goto/WebAPI/deadline-2023-10-12/UpdateFarm | func (c *Deadline) UpdateFarm(input *UpdateFarmInput) (*UpdateFarmOutput, error) {
req, out := c.UpdateFarmRequest(input)
return out, req.Send()
} | func (c *Deadline) UpdateFleet(input *UpdateFleetInput) (*UpdateFleetOutput, error) {
req, out := c.UpdateFleetRequest(input)
return out, req.Send()
} | 0.699182 | aws/aws-sdk-go | service/deadline/api.go | aws/aws-sdk-go | service/deadline/api.go | Apache-2.0 | go |
DefaultAbsPluginHome returns the absolute path in the given file
system to first directory that looks like a good candidate for
the home of kustomize plugins. | func DefaultAbsPluginHome(fSys filesys.FileSystem) (string, error) {
return FirstDirThatExistsElseError(
"plugin root", fSys, []NotedFunc{
{
Note: "homed in $" + KustomizePluginHomeEnv,
F: func() string {
return os.Getenv(KustomizePluginHomeEnv)
},
},
{
Note: "homed in $" + XdgConfigHomeEnv,
F: func() string {
if root := os.Getenv(XdgConfigHomeEnv); root != "" {
return filepath.Join(root, ProgramName, RelPluginHome)
}
// do not look in "kustomize/plugin" if XdgConfigHomeEnv is unset
return ""
},
},
{
Note: "homed in default value of $" + XdgConfigHomeEnv,
F: func() string {
return filepath.Join(
HomeDir(), XdgConfigHomeEnvDefault,
ProgramName, RelPluginHome)
},
},
{
Note: "homed in home directory",
F: func() string {
return filepath.Join(
HomeDir(), ProgramName, RelPluginHome)
},
},
})
} | func (l *Loader) absPluginHome() (string, error) {
// External plugins are disabled--return the dummy plugin root.
if l.pc.PluginRestrictions != types.PluginRestrictionsNone {
return konfig.NoPluginHomeSentinal, nil
}
// We've already determined plugin home--use the cached value.
if l.absolutePluginHome != "" {
return l.absolutePluginHome, nil
}
// Check default locations for a valid plugin root, and cache it if found.
dir, err := konfig.DefaultAbsPluginHome(l.fs)
if err != nil {
return "", err
}
l.absolutePluginHome = dir
return l.absolutePluginHome, nil
} | 0.584667 | tektoncd/cli | vendor/sigs.k8s.io/kustomize/api/konfig/plugins.go | tektoncd/cli | vendor/sigs.k8s.io/kustomize/api/internal/plugins/loader/loader.go | Apache-2.0 | go |
DefaultAbsPluginHome returns the absolute path in the given file
system to first directory that looks like a good candidate for
the home of kustomize plugins. | func DefaultAbsPluginHome(fSys filesys.FileSystem) (string, error) {
return FirstDirThatExistsElseError(
"plugin root", fSys, []NotedFunc{
{
Note: "homed in $" + KustomizePluginHomeEnv,
F: func() string {
return os.Getenv(KustomizePluginHomeEnv)
},
},
{
Note: "homed in $" + XdgConfigHomeEnv,
F: func() string {
if root := os.Getenv(XdgConfigHomeEnv); root != "" {
return filepath.Join(root, ProgramName, RelPluginHome)
}
// do not look in "kustomize/plugin" if XdgConfigHomeEnv is unset
return ""
},
},
{
Note: "homed in default value of $" + XdgConfigHomeEnv,
F: func() string {
return filepath.Join(
HomeDir(), XdgConfigHomeEnvDefault,
ProgramName, RelPluginHome)
},
},
{
Note: "homed in home directory",
F: func() string {
return filepath.Join(
HomeDir(), ProgramName, RelPluginHome)
},
},
})
} | func (p Paths) InstallPath() string { return filepath.Join(p.base, "store") } | 0.580073 | tektoncd/cli | vendor/sigs.k8s.io/kustomize/api/konfig/plugins.go | kubernetes-sigs/krew | internal/environment/environment.go | Apache-2.0 | go |
DefaultAbsPluginHome returns the absolute path in the given file
system to first directory that looks like a good candidate for
the home of kustomize plugins. | func DefaultAbsPluginHome(fSys filesys.FileSystem) (string, error) {
return FirstDirThatExistsElseError(
"plugin root", fSys, []NotedFunc{
{
Note: "homed in $" + KustomizePluginHomeEnv,
F: func() string {
return os.Getenv(KustomizePluginHomeEnv)
},
},
{
Note: "homed in $" + XdgConfigHomeEnv,
F: func() string {
if root := os.Getenv(XdgConfigHomeEnv); root != "" {
return filepath.Join(root, ProgramName, RelPluginHome)
}
// do not look in "kustomize/plugin" if XdgConfigHomeEnv is unset
return ""
},
},
{
Note: "homed in default value of $" + XdgConfigHomeEnv,
F: func() string {
return filepath.Join(
HomeDir(), XdgConfigHomeEnvDefault,
ProgramName, RelPluginHome)
},
},
{
Note: "homed in home directory",
F: func() string {
return filepath.Join(
HomeDir(), ProgramName, RelPluginHome)
},
},
})
} | func GetConfigHome() (string, error) {
return filepath.Join(Get(), ".config"), nil
} | 0.577687 | tektoncd/cli | vendor/sigs.k8s.io/kustomize/api/konfig/plugins.go | containers/podman-tui | vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go | Apache-2.0 | go |
DefaultAbsPluginHome returns the absolute path in the given file
system to first directory that looks like a good candidate for
the home of kustomize plugins. | func DefaultAbsPluginHome(fSys filesys.FileSystem) (string, error) {
return FirstDirThatExistsElseError(
"plugin root", fSys, []NotedFunc{
{
Note: "homed in $" + KustomizePluginHomeEnv,
F: func() string {
return os.Getenv(KustomizePluginHomeEnv)
},
},
{
Note: "homed in $" + XdgConfigHomeEnv,
F: func() string {
if root := os.Getenv(XdgConfigHomeEnv); root != "" {
return filepath.Join(root, ProgramName, RelPluginHome)
}
// do not look in "kustomize/plugin" if XdgConfigHomeEnv is unset
return ""
},
},
{
Note: "homed in default value of $" + XdgConfigHomeEnv,
F: func() string {
return filepath.Join(
HomeDir(), XdgConfigHomeEnvDefault,
ProgramName, RelPluginHome)
},
},
{
Note: "homed in home directory",
F: func() string {
return filepath.Join(
HomeDir(), ProgramName, RelPluginHome)
},
},
})
} | func (p Paths) PluginInstallPath(plugin string) string {
return filepath.Join(p.InstallPath(), plugin)
} | 0.542529 | tektoncd/cli | vendor/sigs.k8s.io/kustomize/api/konfig/plugins.go | kubernetes-sigs/krew | internal/environment/environment.go | Apache-2.0 | go |
DefaultAbsPluginHome returns the absolute path in the given file
system to first directory that looks like a good candidate for
the home of kustomize plugins. | func DefaultAbsPluginHome(fSys filesys.FileSystem) (string, error) {
return FirstDirThatExistsElseError(
"plugin root", fSys, []NotedFunc{
{
Note: "homed in $" + KustomizePluginHomeEnv,
F: func() string {
return os.Getenv(KustomizePluginHomeEnv)
},
},
{
Note: "homed in $" + XdgConfigHomeEnv,
F: func() string {
if root := os.Getenv(XdgConfigHomeEnv); root != "" {
return filepath.Join(root, ProgramName, RelPluginHome)
}
// do not look in "kustomize/plugin" if XdgConfigHomeEnv is unset
return ""
},
},
{
Note: "homed in default value of $" + XdgConfigHomeEnv,
F: func() string {
return filepath.Join(
HomeDir(), XdgConfigHomeEnvDefault,
ProgramName, RelPluginHome)
},
},
{
Note: "homed in home directory",
F: func() string {
return filepath.Join(
HomeDir(), ProgramName, RelPluginHome)
},
},
})
} | func Get() string {
home := os.Getenv(Key())
if home != "" {
return home
}
home, _ = os.UserHomeDir()
return home
} | 0.515822 | tektoncd/cli | vendor/sigs.k8s.io/kustomize/api/konfig/plugins.go | containers/podman-tui | vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go | Apache-2.0 | go |
MessagesGetConversationsByID returns conversations by their IDs.
extended=0
https://dev.vk.com/method/messages.getConversationsById | func (vk *VK) MessagesGetConversationsByID(params Params) (response MessagesGetConversationsByIDResponse, err error) {
err = vk.RequestUnmarshal("messages.getConversationsById", &response, params, Params{"extended": false})
return
} | func (vk *VK) MessagesGetConversationsByIDExtended(params Params) (
response MessagesGetConversationsByIDExtendedResponse,
err error,
) {
err = vk.RequestUnmarshal("messages.getConversationsById", &response, params, Params{"extended": true})
return
} | 0.89511 | 42wim/matterbridge | vendor/github.com/SevereCloud/vksdk/v2/api/messages.go | 42wim/matterbridge | vendor/github.com/SevereCloud/vksdk/v2/api/messages.go | Apache-2.0 | go |
MessagesGetConversationsByID returns conversations by their IDs.
extended=0
https://dev.vk.com/method/messages.getConversationsById | func (vk *VK) MessagesGetConversationsByID(params Params) (response MessagesGetConversationsByIDResponse, err error) {
err = vk.RequestUnmarshal("messages.getConversationsById", &response, params, Params{"extended": false})
return
} | func (vk *VK) MessagesGetConversations(params Params) (response MessagesGetConversationsResponse, err error) {
err = vk.RequestUnmarshal("messages.getConversations", &response, params)
return
} | 0.813383 | 42wim/matterbridge | vendor/github.com/SevereCloud/vksdk/v2/api/messages.go | 42wim/matterbridge | vendor/github.com/SevereCloud/vksdk/v2/api/messages.go | Apache-2.0 | go |
MessagesGetConversationsByID returns conversations by their IDs.
extended=0
https://dev.vk.com/method/messages.getConversationsById | func (vk *VK) MessagesGetConversationsByID(params Params) (response MessagesGetConversationsByIDResponse, err error) {
err = vk.RequestUnmarshal("messages.getConversationsById", &response, params, Params{"extended": false})
return
} | func (vk *VK) MessagesGetByID(params Params) (response MessagesGetByIDResponse, err error) {
err = vk.RequestUnmarshal("messages.getById", &response, params, Params{"extended": false})
return
} | 0.760845 | 42wim/matterbridge | vendor/github.com/SevereCloud/vksdk/v2/api/messages.go | 42wim/matterbridge | vendor/github.com/SevereCloud/vksdk/v2/api/messages.go | Apache-2.0 | go |
MessagesGetConversationsByID returns conversations by their IDs.
extended=0
https://dev.vk.com/method/messages.getConversationsById | func (vk *VK) MessagesGetConversationsByID(params Params) (response MessagesGetConversationsByIDResponse, err error) {
err = vk.RequestUnmarshal("messages.getConversationsById", &response, params, Params{"extended": false})
return
} | func (vk *VK) MessagesGetByConversationMessageID(params Params) (
response MessagesGetByConversationMessageIDResponse,
err error,
) {
err = vk.RequestUnmarshal("messages.getByConversationMessageId", &response, params)
return
} | 0.747053 | 42wim/matterbridge | vendor/github.com/SevereCloud/vksdk/v2/api/messages.go | 42wim/matterbridge | vendor/github.com/SevereCloud/vksdk/v2/api/messages.go | Apache-2.0 | go |
MessagesGetConversationsByID returns conversations by their IDs.
extended=0
https://dev.vk.com/method/messages.getConversationsById | func (vk *VK) MessagesGetConversationsByID(params Params) (response MessagesGetConversationsByIDResponse, err error) {
err = vk.RequestUnmarshal("messages.getConversationsById", &response, params, Params{"extended": false})
return
} | func (vk *VK) MessagesSearchConversations(params Params) (response MessagesSearchConversationsResponse, err error) {
err = vk.RequestUnmarshal("messages.searchConversations", &response, params)
return
} | 0.707926 | 42wim/matterbridge | vendor/github.com/SevereCloud/vksdk/v2/api/messages.go | 42wim/matterbridge | vendor/github.com/SevereCloud/vksdk/v2/api/messages.go | Apache-2.0 | go |
decodeArray decodes an array and stores it in value.
The length is an unsigned integer preceding the elements. Even though the length is redundant
(it's part of the type), it's a useful check and is included in the encoding. | func (dec *Decoder) decodeArray(state *decoderState, value reflect.Value, elemOp decOp, length int, ovfl error, helper decHelper) {
if n := state.decodeUint(); n != uint64(length) {
errorf("length mismatch in decodeArray")
}
dec.decodeArrayHelper(state, value, elemOp, length, ovfl, helper)
} | func (dec *Decoder) DecodeArray(v UnmarshalerJSONArray) error {
if dec.isPooled == 1 {
panic(InvalidUsagePooledDecoderError("Invalid usage of pooled decoder"))
}
_, err := dec.decodeArray(v)
return err
} | 0.673278 | mit-pdos/biscuit | src/encoding/gob/decode.go | 42wim/matterbridge | vendor/github.com/francoispqt/gojay/decode_array.go | Apache-2.0 | go |
decodeArray decodes an array and stores it in value.
The length is an unsigned integer preceding the elements. Even though the length is redundant
(it's part of the type), it's a useful check and is included in the encoding. | func (dec *Decoder) decodeArray(state *decoderState, value reflect.Value, elemOp decOp, length int, ovfl error, helper decHelper) {
if n := state.decodeUint(); n != uint64(length) {
errorf("length mismatch in decodeArray")
}
dec.decodeArrayHelper(state, value, elemOp, length, ovfl, helper)
} | func (dvd DefaultValueDecoders) ArrayDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.IsValid() || val.Kind() != reflect.Array {
return ValueDecoderError{Name: "ArrayDecodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val}
}
switch vrType := vr.Type(); vrType {
case bsontype.Array:
case bsontype.Type(0), bsontype.EmbeddedDocument:
if val.Type().Elem() != tE {
return fmt.Errorf("cannot decode document into %s", val.Type())
}
case bsontype.Binary:
if val.Type().Elem() != tByte {
return fmt.Errorf("ArrayDecodeValue can only be used to decode binary into a byte array, got %v", vrType)
}
data, subtype, err := vr.ReadBinary()
if err != nil {
return err
}
if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld {
return fmt.Errorf("ArrayDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype)
}
if len(data) > val.Len() {
return fmt.Errorf("more elements returned in array than can fit inside %s", val.Type())
}
for idx, elem := range data {
val.Index(idx).Set(reflect.ValueOf(elem))
}
return nil
case bsontype.Null:
val.Set(reflect.Zero(val.Type()))
return vr.ReadNull()
case bsontype.Undefined:
val.Set(reflect.Zero(val.Type()))
return vr.ReadUndefined()
default:
return fmt.Errorf("cannot decode %v into an array", vrType)
}
var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error)
switch val.Type().Elem() {
case tE:
elemsFunc = dvd.decodeD
default:
elemsFunc = dvd.decodeDefault
}
elems, err := elemsFunc(dc, vr, val)
if err != nil {
return err
}
if len(elems) > val.Len() {
return fmt.Errorf("more elements returned in array than can fit inside %s, got %v elements", val.Type(), len(elems))
}
for idx, elem := range elems {
val.Index(idx).Set(elem)
}
return nil
} | 0.611626 | mit-pdos/biscuit | src/encoding/gob/decode.go | containers/podman-tui | vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go | Apache-2.0 | go |
decodeArray decodes an array and stores it in value.
The length is an unsigned integer preceding the elements. Even though the length is redundant
(it's part of the type), it's a useful check and is included in the encoding. | func (dec *Decoder) decodeArray(state *decoderState, value reflect.Value, elemOp decOp, length int, ovfl error, helper decHelper) {
if n := state.decodeUint(); n != uint64(length) {
errorf("length mismatch in decodeArray")
}
dec.decodeArrayHelper(state, value, elemOp, length, ovfl, helper)
} | func Decode(b []byte, size int) ([]types.Datum, error) {
if len(b) < 1 {
return nil, errors.New("invalid encoded key")
}
var (
err error
values = make([]types.Datum, 0, size)
)
for len(b) > 0 {
var d types.Datum
b, d, err = DecodeOne(b)
if err != nil {
return nil, errors.Trace(err)
}
values = append(values, d)
}
return values, nil
} | 0.594853 | mit-pdos/biscuit | src/encoding/gob/decode.go | talent-plan/tinysql | util/codec/codec.go | Apache-2.0 | go |
decodeArray decodes an array and stores it in value.
The length is an unsigned integer preceding the elements. Even though the length is redundant
(it's part of the type), it's a useful check and is included in the encoding. | func (dec *Decoder) decodeArray(state *decoderState, value reflect.Value, elemOp decOp, length int, ovfl error, helper decHelper) {
if n := state.decodeUint(); n != uint64(length) {
errorf("length mismatch in decodeArray")
}
dec.decodeArrayHelper(state, value, elemOp, length, ovfl, helper)
} | func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
var i64 int64
var err error
switch vr.Type() {
case bsontype.Int32:
i32, err := vr.ReadInt32()
if err != nil {
return err
}
i64 = int64(i32)
case bsontype.Int64:
i64, err = vr.ReadInt64()
if err != nil {
return err
}
case bsontype.Double:
f64, err := vr.ReadDouble()
if err != nil {
return err
}
if !dc.Truncate && math.Floor(f64) != f64 {
return errors.New("UintDecodeValue can only truncate float64 to an integer type when truncation is enabled")
}
if f64 > float64(math.MaxInt64) {
return fmt.Errorf("%g overflows int64", f64)
}
i64 = int64(f64)
case bsontype.Boolean:
b, err := vr.ReadBoolean()
if err != nil {
return err
}
if b {
i64 = 1
}
default:
return fmt.Errorf("cannot decode %v into an integer type", vr.Type())
}
if !val.CanSet() {
return ValueDecoderError{
Name: "UintDecodeValue",
Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint},
Received: val,
}
}
switch val.Kind() {
case reflect.Uint8:
if i64 < 0 || i64 > math.MaxUint8 {
return fmt.Errorf("%d overflows uint8", i64)
}
case reflect.Uint16:
if i64 < 0 || i64 > math.MaxUint16 {
return fmt.Errorf("%d overflows uint16", i64)
}
case reflect.Uint32:
if i64 < 0 || i64 > math.MaxUint32 {
return fmt.Errorf("%d overflows uint32", i64)
}
case reflect.Uint64:
if i64 < 0 {
return fmt.Errorf("%d overflows uint64", i64)
}
case reflect.Uint:
if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint
return fmt.Errorf("%d overflows uint", i64)
}
default:
return ValueDecoderError{
Name: "UintDecodeValue",
Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint},
Received: val,
}
}
val.SetUint(uint64(i64))
return nil
} | 0.571207 | mit-pdos/biscuit | src/encoding/gob/decode.go | containers/podman-tui | vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go | Apache-2.0 | go |
decodeArray decodes an array and stores it in value.
The length is an unsigned integer preceding the elements. Even though the length is redundant
(it's part of the type), it's a useful check and is included in the encoding. | func (dec *Decoder) decodeArray(state *decoderState, value reflect.Value, elemOp decOp, length int, ovfl error, helper decHelper) {
if n := state.decodeUint(); n != uint64(length) {
errorf("length mismatch in decodeArray")
}
dec.decodeArrayHelper(state, value, elemOp, length, ovfl, helper)
} | func (state *decoderState) getLength() (int, bool) {
n := int(state.decodeUint())
if n < 0 || state.b.Len() < n || tooBig <= n {
return 0, false
}
return n, true
} | 0.554799 | mit-pdos/biscuit | src/encoding/gob/decode.go | mit-pdos/biscuit | src/encoding/gob/decode.go | BSD-3-Clause | go |
C documentation
/*
** pIdx is an index that covers all of the low-number columns used by
** pWInfo->pSelect (columns from 0 through 62) or an index that has
** expressions terms. Hence, we cannot determine whether or not it is
** a covering index by using the colUsed bitmasks. We have to do a search
** to see if the index is covering. This routine does that search.
**
** The return value is one of these:
**
** 0 The index is definitely not a covering index
**
** WHERE_IDX_ONLY The index is definitely a covering index
**
** WHERE_EXPRIDX The index is likely a covering index, but it is
** difficult to determine precisely because of the
** expressions that are indexed. Score it as a
** covering index, but still keep the main table open
** just in case we need it.
**
** This routine is an optimization. It is always safe to return zero.
** But returning one of the other two values when zero should have been
** returned can lead to incorrect bytecode and assertion faults.
*/ | func _whereIsCoveringIndex(tls *libc.TLS, pWInfo uintptr, pIdx uintptr, iTabCur int32) (r Tu32) {
bp := tls.Alloc(64)
defer tls.Free(64)
var i, rc int32
var _ /* ck at bp+0 */ TCoveringIndexCheck1
var _ /* w at bp+16 */ TWalker
_, _ = i, rc
if (*TWhereInfo)(unsafe.Pointer(pWInfo)).FpSelect == uintptr(0) {
/* We don't have access to the full query, so we cannot check to see
** if pIdx is covering. Assume it is not. */
return uint32(0)
}
if int32(uint32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x1000>>12)) == 0 {
i = 0
for {
if !(i < libc.Int32FromUint16((*TIndex)(unsafe.Pointer(pIdx)).FnColumn)) {
break
}
if int32(*(*Ti16)(unsafe.Pointer((*TIndex)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(i)*2))) >= libc.Int32FromUint64(libc.Uint64FromInt64(8)*libc.Uint64FromInt32(8))-libc.Int32FromInt32(1) {
break
}
goto _1
_1:
;
i++
}
if i >= libc.Int32FromUint16((*TIndex)(unsafe.Pointer(pIdx)).FnColumn) {
/* pIdx does not index any columns greater than 62, but we know from
** colMask that columns greater than 62 are used, so this is not a
** covering index */
return uint32(0)
}
}
(*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FpIdx = pIdx
(*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FiTabCur = iTabCur
(*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FbExpr = uint8(0)
(*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FbUnidx = uint8(0)
libc.Xmemset(tls, bp+16, 0, uint64(48))
(*(*TWalker)(unsafe.Pointer(bp + 16))).FxExprCallback = __ccgo_fp(_whereIsCoveringIndexWalkCallback)
(*(*TWalker)(unsafe.Pointer(bp + 16))).FxSelectCallback = __ccgo_fp(_sqlite3SelectWalkNoop)
*(*uintptr)(unsafe.Pointer(bp + 16 + 40)) = bp
_sqlite3WalkSelect(tls, bp+16, (*TWhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)
if (*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FbUnidx != 0 {
rc = 0
} else {
if (*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FbExpr != 0 {
rc = int32(WHERE_EXPRIDX)
} else {
rc = int32(WHERE_IDX_ONLY)
}
}
return libc.Uint32FromInt32(rc)
} | func _wherePartIdxExpr(tls *libc.TLS, pParse uintptr, pIdx uintptr, pPart uintptr, pMask uintptr, iIdxCur int32, pItem uintptr) {
var aff Tu8
var bNullRow int32
var db, p, pArg, pLeft, pRight uintptr
_, _, _, _, _, _, _ = aff, bNullRow, db, p, pArg, pLeft, pRight
if libc.Int32FromUint8((*TExpr)(unsafe.Pointer(pPart)).Fop) == int32(TK_AND) {
_wherePartIdxExpr(tls, pParse, pIdx, (*TExpr)(unsafe.Pointer(pPart)).FpRight, pMask, iIdxCur, pItem)
pPart = (*TExpr)(unsafe.Pointer(pPart)).FpLeft
}
if libc.Int32FromUint8((*TExpr)(unsafe.Pointer(pPart)).Fop) == int32(TK_EQ) || libc.Int32FromUint8((*TExpr)(unsafe.Pointer(pPart)).Fop) == int32(TK_IS) {
pLeft = (*TExpr)(unsafe.Pointer(pPart)).FpLeft
pRight = (*TExpr)(unsafe.Pointer(pPart)).FpRight
if libc.Int32FromUint8((*TExpr)(unsafe.Pointer(pLeft)).Fop) != int32(TK_COLUMN) {
return
}
if !(_sqlite3ExprIsConstant(tls, uintptr(0), pRight) != 0) {
return
}
if !(_sqlite3IsBinary(tls, _sqlite3ExprCompareCollSeq(tls, pParse, pPart)) != 0) {
return
}
if int32((*TExpr)(unsafe.Pointer(pLeft)).FiColumn) < 0 {
return
}
aff = libc.Uint8FromInt8((*(*TColumn)(unsafe.Pointer((*TTable)(unsafe.Pointer((*TIndex)(unsafe.Pointer(pIdx)).FpTable)).FaCol + uintptr((*TExpr)(unsafe.Pointer(pLeft)).FiColumn)*16))).Faffinity)
if libc.Int32FromUint8(aff) >= int32(SQLITE_AFF_TEXT) {
if pItem != 0 {
db = (*TParse)(unsafe.Pointer(pParse)).Fdb
p = _sqlite3DbMallocRaw(tls, db, uint64(32))
if p != 0 {
bNullRow = libc.BoolInt32(libc.Int32FromUint8((*TSrcItem)(unsafe.Pointer(pItem)).Ffg.Fjointype)&(libc.Int32FromInt32(JT_LEFT)|libc.Int32FromInt32(JT_LTORJ)) != 0)
(*TIndexedExpr)(unsafe.Pointer(p)).FpExpr = _sqlite3ExprDup(tls, db, pRight, 0)
(*TIndexedExpr)(unsafe.Pointer(p)).FiDataCur = (*TSrcItem)(unsafe.Pointer(pItem)).FiCursor
(*TIndexedExpr)(unsafe.Pointer(p)).FiIdxCur = iIdxCur
(*TIndexedExpr)(unsafe.Pointer(p)).FiIdxCol = int32((*TExpr)(unsafe.Pointer(pLeft)).FiColumn)
(*TIndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow = libc.Uint8FromInt32(bNullRow)
(*TIndexedExpr)(unsafe.Pointer(p)).FpIENext = (*TParse)(unsafe.Pointer(pParse)).FpIdxPartExpr
(*TIndexedExpr)(unsafe.Pointer(p)).Faff = aff
(*TParse)(unsafe.Pointer(pParse)).FpIdxPartExpr = p
if (*TIndexedExpr)(unsafe.Pointer(p)).FpIENext == uintptr(0) {
pArg = pParse + 104
_sqlite3ParserAddCleanup(tls, pParse, __ccgo_fp(_whereIndexedExprCleanup), pArg)
}
}
} else {
if int32((*TExpr)(unsafe.Pointer(pLeft)).FiColumn) < libc.Int32FromUint64(libc.Uint64FromInt64(8)*libc.Uint64FromInt32(8))-libc.Int32FromInt32(1) {
*(*TBitmask)(unsafe.Pointer(pMask)) &= ^(libc.Uint64FromInt32(1) << (*TExpr)(unsafe.Pointer(pLeft)).FiColumn)
}
}
}
}
} | 0.759268 | umputun/spot | vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go | umputun/spot | vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go | MIT | go |
C documentation
/*
** pIdx is an index that covers all of the low-number columns used by
** pWInfo->pSelect (columns from 0 through 62) or an index that has
** expressions terms. Hence, we cannot determine whether or not it is
** a covering index by using the colUsed bitmasks. We have to do a search
** to see if the index is covering. This routine does that search.
**
** The return value is one of these:
**
** 0 The index is definitely not a covering index
**
** WHERE_IDX_ONLY The index is definitely a covering index
**
** WHERE_EXPRIDX The index is likely a covering index, but it is
** difficult to determine precisely because of the
** expressions that are indexed. Score it as a
** covering index, but still keep the main table open
** just in case we need it.
**
** This routine is an optimization. It is always safe to return zero.
** But returning one of the other two values when zero should have been
** returned can lead to incorrect bytecode and assertion faults.
*/ | func _whereIsCoveringIndex(tls *libc.TLS, pWInfo uintptr, pIdx uintptr, iTabCur int32) (r Tu32) {
bp := tls.Alloc(64)
defer tls.Free(64)
var i, rc int32
var _ /* ck at bp+0 */ TCoveringIndexCheck1
var _ /* w at bp+16 */ TWalker
_, _ = i, rc
if (*TWhereInfo)(unsafe.Pointer(pWInfo)).FpSelect == uintptr(0) {
/* We don't have access to the full query, so we cannot check to see
** if pIdx is covering. Assume it is not. */
return uint32(0)
}
if int32(uint32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x1000>>12)) == 0 {
i = 0
for {
if !(i < libc.Int32FromUint16((*TIndex)(unsafe.Pointer(pIdx)).FnColumn)) {
break
}
if int32(*(*Ti16)(unsafe.Pointer((*TIndex)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(i)*2))) >= libc.Int32FromUint64(libc.Uint64FromInt64(8)*libc.Uint64FromInt32(8))-libc.Int32FromInt32(1) {
break
}
goto _1
_1:
;
i++
}
if i >= libc.Int32FromUint16((*TIndex)(unsafe.Pointer(pIdx)).FnColumn) {
/* pIdx does not index any columns greater than 62, but we know from
** colMask that columns greater than 62 are used, so this is not a
** covering index */
return uint32(0)
}
}
(*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FpIdx = pIdx
(*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FiTabCur = iTabCur
(*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FbExpr = uint8(0)
(*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FbUnidx = uint8(0)
libc.Xmemset(tls, bp+16, 0, uint64(48))
(*(*TWalker)(unsafe.Pointer(bp + 16))).FxExprCallback = __ccgo_fp(_whereIsCoveringIndexWalkCallback)
(*(*TWalker)(unsafe.Pointer(bp + 16))).FxSelectCallback = __ccgo_fp(_sqlite3SelectWalkNoop)
*(*uintptr)(unsafe.Pointer(bp + 16 + 40)) = bp
_sqlite3WalkSelect(tls, bp+16, (*TWhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)
if (*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FbUnidx != 0 {
rc = 0
} else {
if (*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FbExpr != 0 {
rc = int32(WHERE_EXPRIDX)
} else {
rc = int32(WHERE_IDX_ONLY)
}
}
return libc.Uint32FromInt32(rc)
} | func _sqlite3ExprCoveredByIndex(tls *libc.TLS, pExpr uintptr, iCur int32, pIdx uintptr) (r int32) {
bp := tls.Alloc(64)
defer tls.Free(64)
var _ /* w at bp+0 */ TWalker
var _ /* xcov at bp+48 */ TIdxCover
libc.Xmemset(tls, bp, 0, uint64(48))
(*(*TIdxCover)(unsafe.Pointer(bp + 48))).FiCur = iCur
(*(*TIdxCover)(unsafe.Pointer(bp + 48))).FpIdx = pIdx
(*(*TWalker)(unsafe.Pointer(bp))).FxExprCallback = __ccgo_fp(_exprIdxCover)
*(*uintptr)(unsafe.Pointer(bp + 40)) = bp + 48
_sqlite3WalkExpr(tls, bp, pExpr)
return libc.BoolInt32(!((*(*TWalker)(unsafe.Pointer(bp))).FeCode != 0))
} | 0.730125 | umputun/spot | vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go | umputun/spot | vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go | MIT | go |
C documentation
/*
** pIdx is an index that covers all of the low-number columns used by
** pWInfo->pSelect (columns from 0 through 62) or an index that has
** expressions terms. Hence, we cannot determine whether or not it is
** a covering index by using the colUsed bitmasks. We have to do a search
** to see if the index is covering. This routine does that search.
**
** The return value is one of these:
**
** 0 The index is definitely not a covering index
**
** WHERE_IDX_ONLY The index is definitely a covering index
**
** WHERE_EXPRIDX The index is likely a covering index, but it is
** difficult to determine precisely because of the
** expressions that are indexed. Score it as a
** covering index, but still keep the main table open
** just in case we need it.
**
** This routine is an optimization. It is always safe to return zero.
** But returning one of the other two values when zero should have been
** returned can lead to incorrect bytecode and assertion faults.
*/ | func _whereIsCoveringIndex(tls *libc.TLS, pWInfo uintptr, pIdx uintptr, iTabCur int32) (r Tu32) {
bp := tls.Alloc(64)
defer tls.Free(64)
var i, rc int32
var _ /* ck at bp+0 */ TCoveringIndexCheck1
var _ /* w at bp+16 */ TWalker
_, _ = i, rc
if (*TWhereInfo)(unsafe.Pointer(pWInfo)).FpSelect == uintptr(0) {
/* We don't have access to the full query, so we cannot check to see
** if pIdx is covering. Assume it is not. */
return uint32(0)
}
if int32(uint32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x1000>>12)) == 0 {
i = 0
for {
if !(i < libc.Int32FromUint16((*TIndex)(unsafe.Pointer(pIdx)).FnColumn)) {
break
}
if int32(*(*Ti16)(unsafe.Pointer((*TIndex)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(i)*2))) >= libc.Int32FromUint64(libc.Uint64FromInt64(8)*libc.Uint64FromInt32(8))-libc.Int32FromInt32(1) {
break
}
goto _1
_1:
;
i++
}
if i >= libc.Int32FromUint16((*TIndex)(unsafe.Pointer(pIdx)).FnColumn) {
/* pIdx does not index any columns greater than 62, but we know from
** colMask that columns greater than 62 are used, so this is not a
** covering index */
return uint32(0)
}
}
(*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FpIdx = pIdx
(*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FiTabCur = iTabCur
(*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FbExpr = uint8(0)
(*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FbUnidx = uint8(0)
libc.Xmemset(tls, bp+16, 0, uint64(48))
(*(*TWalker)(unsafe.Pointer(bp + 16))).FxExprCallback = __ccgo_fp(_whereIsCoveringIndexWalkCallback)
(*(*TWalker)(unsafe.Pointer(bp + 16))).FxSelectCallback = __ccgo_fp(_sqlite3SelectWalkNoop)
*(*uintptr)(unsafe.Pointer(bp + 16 + 40)) = bp
_sqlite3WalkSelect(tls, bp+16, (*TWhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)
if (*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FbUnidx != 0 {
rc = 0
} else {
if (*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FbExpr != 0 {
rc = int32(WHERE_EXPRIDX)
} else {
rc = int32(WHERE_IDX_ONLY)
}
}
return libc.Uint32FromInt32(rc)
} | func _whereLoopAddBtreeIndex(tls *libc.TLS, pBuilder uintptr, pSrc uintptr, pProbe uintptr, nInMul TLogEst) (r int32) {
bp := tls.Alloc(128)
defer tls.Free(128)
var M, logK, nIter, nOutUnadjusted, rCostIdx, rLogSize, rSize, saved_nOut, x TLogEst
var db, pBtm, pExpr, pExpr1, pNew, pParse, pTerm, pTop, pWInfo, v11, v24, v5, v8, v9, p12, p13, p14, p15, p16, p17, p18, p19, p2, p20, p25, p3 uintptr
var eOp, saved_nBtm, saved_nEq, saved_nLTerm, saved_nSkip, saved_nTop, v10, v23, v4, v7 Tu16
var i, iCol, nEq, nIn, nRecValid, nVecLen, opMask, rc, v21 int32
var saved_prereq TBitmask
var saved_wsFlags Tu32
var v22 bool
var _ /* nOut at bp+112 */ TtRowcnt
var _ /* scan at bp+0 */ TWhereScan
_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = M, db, eOp, i, iCol, logK, nEq, nIn, nIter, nOutUnadjusted, nRecValid, nVecLen, opMask, pBtm, pExpr, pExpr1, pNew, pParse, pTerm, pTop, pWInfo, rCostIdx, rLogSize, rSize, rc, saved_nBtm, saved_nEq, saved_nLTerm, saved_nOut, saved_nSkip, saved_nTop, saved_prereq, saved_wsFlags, x, v10, v11, v21, v22, v23, v24, v4, v5, v7, v8, v9, p12, p13, p14, p15, p16, p17, p18, p19, p2, p20, p25, p3
pWInfo = (*TWhereLoopBuilder)(unsafe.Pointer(pBuilder)).FpWInfo /* WHERE analyze context */
pParse = (*TWhereInfo)(unsafe.Pointer(pWInfo)).FpParse /* Parsing context */
db = (*TParse)(unsafe.Pointer(pParse)).Fdb /* Original value of pNew->nOut */
rc = SQLITE_OK /* Logarithm of table size */
pTop = uintptr(0)
pBtm = uintptr(0) /* Top and bottom range constraints */
pNew = (*TWhereLoopBuilder)(unsafe.Pointer(pBuilder)).FpNew
if (*TParse)(unsafe.Pointer(pParse)).FnErr != 0 {
return (*TParse)(unsafe.Pointer(pParse)).Frc
}
if (*TWhereLoop)(unsafe.Pointer(pNew)).FwsFlags&uint32(WHERE_BTM_LIMIT) != 0 {
opMask = libc.Int32FromInt32(WO_EQ)<<(libc.Int32FromInt32(TK_LT)-libc.Int32FromInt32(TK_EQ)) | libc.Int32FromInt32(WO_EQ)<<(libc.Int32FromInt32(TK_LE)-libc.Int32FromInt32(TK_EQ))
} else {
opMask = libc.Int32FromInt32(WO_EQ) | libc.Int32FromInt32(WO_IN) | libc.Int32FromInt32(WO_EQ)<<(libc.Int32FromInt32(TK_GT)-libc.Int32FromInt32(TK_EQ)) | libc.Int32FromInt32(WO_EQ)<<(libc.Int32FromInt32(TK_GE)-libc.Int32FromInt32(TK_EQ)) | libc.Int32FromInt32(WO_EQ)<<(libc.Int32FromInt32(TK_LT)-libc.Int32FromInt32(TK_EQ)) | libc.Int32FromInt32(WO_EQ)<<(libc.Int32FromInt32(TK_LE)-libc.Int32FromInt32(TK_EQ)) | libc.Int32FromInt32(WO_ISNULL) | libc.Int32FromInt32(WO_IS)
}
if int32(uint32(*(*uint16)(unsafe.Pointer(pProbe + 100))&0x4>>2)) != 0 || int32(uint32(*(*uint16)(unsafe.Pointer(pProbe + 100))&0x100>>8)) != 0 {
if int32(uint32(*(*uint16)(unsafe.Pointer(pProbe + 100))&0x4>>2)) != 0 {
opMask &= ^(libc.Int32FromInt32(WO_EQ)<<(libc.Int32FromInt32(TK_GT)-libc.Int32FromInt32(TK_EQ)) | libc.Int32FromInt32(WO_EQ)<<(libc.Int32FromInt32(TK_GE)-libc.Int32FromInt32(TK_EQ)) | libc.Int32FromInt32(WO_EQ)<<(libc.Int32FromInt32(TK_LT)-libc.Int32FromInt32(TK_EQ)) | libc.Int32FromInt32(WO_EQ)<<(libc.Int32FromInt32(TK_LE)-libc.Int32FromInt32(TK_EQ)))
}
if int32(uint32(*(*uint16)(unsafe.Pointer(pProbe + 100))&0x100>>8)) != 0 && int32(*(*uint32)(unsafe.Pointer(pSrc + 24 + 4))&0x2>>1) == 0 {
opMask &= ^(libc.Int32FromInt32(WO_EQ) | libc.Int32FromInt32(WO_IN) | libc.Int32FromInt32(WO_IS))
}
}
saved_nEq = (*(*struct {
FnEq Tu16
FnBtm Tu16
FnTop Tu16
FnDistinctCol Tu16
FpIndex uintptr
FpOrderBy uintptr
})(unsafe.Pointer(pNew + 24))).FnEq
saved_nBtm = (*(*struct {
FnEq Tu16
FnBtm Tu16
FnTop Tu16
FnDistinctCol Tu16
FpIndex uintptr
FpOrderBy uintptr
})(unsafe.Pointer(pNew + 24))).FnBtm
saved_nTop = (*(*struct {
FnEq Tu16
FnBtm Tu16
FnTop Tu16
FnDistinctCol Tu16
FpIndex uintptr
FpOrderBy uintptr
})(unsafe.Pointer(pNew + 24))).FnTop
saved_nSkip = (*TWhereLoop)(unsafe.Pointer(pNew)).FnSkip
saved_nLTerm = (*TWhereLoop)(unsafe.Pointer(pNew)).FnLTerm
saved_wsFlags = (*TWhereLoop)(unsafe.Pointer(pNew)).FwsFlags
saved_prereq = (*TWhereLoop)(unsafe.Pointer(pNew)).Fprereq
saved_nOut = (*TWhereLoop)(unsafe.Pointer(pNew)).FnOut
pTerm = _whereScanInit(tls, bp, (*TWhereLoopBuilder)(unsafe.Pointer(pBuilder)).FpWC, (*TSrcItem)(unsafe.Pointer(pSrc)).FiCursor, libc.Int32FromUint16(saved_nEq), libc.Uint32FromInt32(opMask), pProbe)
(*TWhereLoop)(unsafe.Pointer(pNew)).FrSetup = 0
rSize = *(*TLogEst)(unsafe.Pointer((*TIndex)(unsafe.Pointer(pProbe)).FaiRowLogEst))
rLogSize = _estLog(tls, rSize)
for {
if !(rc == SQLITE_OK && pTerm != uintptr(0)) {
break
}
eOp = (*TWhereTerm)(unsafe.Pointer(pTerm)).FeOperator /* nOut before IN() and WHERE adjustments */
nIn = 0
nRecValid = (*TWhereLoopBuilder)(unsafe.Pointer(pBuilder)).FnRecValid
if (libc.Int32FromUint16(eOp) == int32(WO_ISNULL) || libc.Int32FromUint16((*TWhereTerm)(unsafe.Pointer(pTerm)).FwtFlags)&int32(TERM_VNULL) != 0) && _indexColumnNotNull(tls, pProbe, libc.Int32FromUint16(saved_nEq)) != 0 {
goto _1 /* ignore IS [NOT] NULL constraints on NOT NULL columns */
}
if (*TWhereTerm)(unsafe.Pointer(pTerm)).FprereqRight&(*TWhereLoop)(unsafe.Pointer(pNew)).FmaskSelf != 0 {
goto _1
}
/* Do not allow the upper bound of a LIKE optimization range constraint
** to mix with a lower range bound from some other source */
if libc.Int32FromUint16((*TWhereTerm)(unsafe.Pointer(pTerm)).FwtFlags)&int32(TERM_LIKEOPT) != 0 && libc.Int32FromUint16((*TWhereTerm)(unsafe.Pointer(pTerm)).FeOperator) == libc.Int32FromInt32(WO_EQ)<<(libc.Int32FromInt32(TK_LT)-libc.Int32FromInt32(TK_EQ)) {
goto _1
}
if libc.Int32FromUint8((*TSrcItem)(unsafe.Pointer(pSrc)).Ffg.Fjointype)&(libc.Int32FromInt32(JT_LEFT)|libc.Int32FromInt32(JT_LTORJ)|libc.Int32FromInt32(JT_RIGHT)) != 0 && !(_constraintCompatibleWithOuterJoin(tls, pTerm, pSrc) != 0) {
goto _1
}
if libc.Int32FromUint8((*TIndex)(unsafe.Pointer(pProbe)).FonError) != OE_None && libc.Int32FromUint16(saved_nEq) == libc.Int32FromUint16((*TIndex)(unsafe.Pointer(pProbe)).FnKeyCol)-int32(1) {
p2 = pBuilder + 44
*(*uint8)(unsafe.Pointer(p2)) = uint8(int32(*(*uint8)(unsafe.Pointer(p2))) | libc.Int32FromInt32(SQLITE_BLDF1_UNIQUE))
} else {
p3 = pBuilder + 44
*(*uint8)(unsafe.Pointer(p3)) = uint8(int32(*(*uint8)(unsafe.Pointer(p3))) | libc.Int32FromInt32(SQLITE_BLDF1_INDEXED))
}
(*TWhereLoop)(unsafe.Pointer(pNew)).FwsFlags = saved_wsFlags
(*(*struct {
FnEq Tu16
FnBtm Tu16
FnTop Tu16
FnDistinctCol Tu16
FpIndex uintptr
FpOrderBy uintptr
})(unsafe.Pointer(pNew + 24))).FnEq = saved_nEq
(*(*struct {
FnEq Tu16
FnBtm Tu16
FnTop Tu16
FnDistinctCol Tu16
FpIndex uintptr
FpOrderBy uintptr
})(unsafe.Pointer(pNew + 24))).FnBtm = saved_nBtm
(*(*struct {
FnEq Tu16
FnBtm Tu16
FnTop Tu16
FnDistinctCol Tu16
FpIndex uintptr
FpOrderBy uintptr
})(unsafe.Pointer(pNew + 24))).FnTop = saved_nTop
(*TWhereLoop)(unsafe.Pointer(pNew)).FnLTerm = saved_nLTerm
if libc.Int32FromUint16((*TWhereLoop)(unsafe.Pointer(pNew)).FnLTerm) >= libc.Int32FromUint16((*TWhereLoop)(unsafe.Pointer(pNew)).FnLSlot) && _whereLoopResize(tls, db, pNew, libc.Int32FromUint16((*TWhereLoop)(unsafe.Pointer(pNew)).FnLTerm)+int32(1)) != 0 {
break /* OOM while trying to enlarge the pNew->aLTerm array */
}
v5 = pNew + 52
v4 = *(*Tu16)(unsafe.Pointer(v5))
*(*Tu16)(unsafe.Pointer(v5))++
*(*uintptr)(unsafe.Pointer((*TWhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(v4)*8)) = pTerm
(*TWhereLoop)(unsafe.Pointer(pNew)).Fprereq = (saved_prereq | (*TWhereTerm)(unsafe.Pointer(pTerm)).FprereqRight) & ^(*TWhereLoop)(unsafe.Pointer(pNew)).FmaskSelf
if libc.Int32FromUint16(eOp)&int32(WO_IN) != 0 {
pExpr = (*TWhereTerm)(unsafe.Pointer(pTerm)).FpExpr
if (*TExpr)(unsafe.Pointer(pExpr)).Fflags&uint32(EP_xIsSelect) != uint32(0) {
nIn = int32(46)
/* The expression may actually be of the form (x, y) IN (SELECT...).
** In this case there is a separate term for each of (x) and (y).
** However, the nIn multiplier should only be applied once, not once
** for each such term. The following loop checks that pTerm is the
** first such term in use, and sets nIn back to 0 if it is not. */
i = 0
for {
if !(i < libc.Int32FromUint16((*TWhereLoop)(unsafe.Pointer(pNew)).FnLTerm)-int32(1)) {
break
}
if *(*uintptr)(unsafe.Pointer((*TWhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(i)*8)) != 0 && (*TWhereTerm)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer((*TWhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(i)*8)))).FpExpr == pExpr {
nIn = 0
}
goto _6
_6:
;
i++
}
} else {
if *(*uintptr)(unsafe.Pointer(pExpr + 32)) != 0 && (*TExprList)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pExpr + 32)))).FnExpr != 0 {
/* "x IN (value, value, ...)" */
nIn = int32(_sqlite3LogEst(tls, libc.Uint64FromInt32((*TExprList)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pExpr + 32)))).FnExpr)))
}
}
if int32(uint32(*(*uint16)(unsafe.Pointer(pProbe + 100))&0x80>>7)) != 0 && int32(rLogSize) >= int32(10) {
/* Let:
** N = the total number of rows in the table
** K = the number of entries on the RHS of the IN operator
** M = the number of rows in the table that match terms to the
** to the left in the same index. If the IN operator is on
** the left-most index column, M==N.
**
** Given the definitions above, it is better to omit the IN operator
** from the index lookup and instead do a scan of the M elements,
** testing each scanned row against the IN operator separately, if:
**
** M*log(K) < K*log(N)
**
** Our estimates for M, K, and N might be inaccurate, so we build in
** a safety margin of 2 (LogEst: 10) that favors using the IN operator
** with the index, as using an index has better worst-case behavior.
** If we do not have real sqlite_stat1 data, always prefer to use
** the index. Do not bother with this optimization on very small
** tables (less than 2 rows) as it is pointless in that case.
*/
M = *(*TLogEst)(unsafe.Pointer((*TIndex)(unsafe.Pointer(pProbe)).FaiRowLogEst + uintptr(saved_nEq)*2))
logK = _estLog(tls, int16(nIn))
/* TUNING v----- 10 to bias toward indexed IN */
x = int16(int32(M) + int32(logK) + int32(10) - (nIn + int32(rLogSize)))
if int32(x) >= 0 {
} else {
if int32(nInMul) < int32(2) && (*Tsqlite3)(unsafe.Pointer(db)).FdbOptFlags&libc.Uint32FromInt32(libc.Int32FromInt32(SQLITE_SeekScan)) == uint32(0) {
*(*Tu32)(unsafe.Pointer(pNew + 48)) |= uint32(WHERE_IN_SEEKSCAN)
} else {
goto _1
}
}
}
*(*Tu32)(unsafe.Pointer(pNew + 48)) |= uint32(WHERE_COLUMN_IN)
} else {
if libc.Int32FromUint16(eOp)&(libc.Int32FromInt32(WO_EQ)|libc.Int32FromInt32(WO_IS)) != 0 {
iCol = int32(*(*Ti16)(unsafe.Pointer((*TIndex)(unsafe.Pointer(pProbe)).FaiColumn + uintptr(saved_nEq)*2)))
*(*Tu32)(unsafe.Pointer(pNew + 48)) |= uint32(WHERE_COLUMN_EQ)
if iCol == -int32(1) || iCol >= 0 && int32(nInMul) == 0 && libc.Int32FromUint16(saved_nEq) == libc.Int32FromUint16((*TIndex)(unsafe.Pointer(pProbe)).FnKeyCol)-int32(1) {
if iCol == -int32(1) || int32(uint32(*(*uint16)(unsafe.Pointer(pProbe + 100))&0x8>>3)) != 0 || libc.Int32FromUint16((*TIndex)(unsafe.Pointer(pProbe)).FnKeyCol) == int32(1) && (*TIndex)(unsafe.Pointer(pProbe)).FonError != 0 && libc.Int32FromUint16(eOp)&int32(WO_EQ) != 0 {
*(*Tu32)(unsafe.Pointer(pNew + 48)) |= uint32(WHERE_ONEROW)
} else {
*(*Tu32)(unsafe.Pointer(pNew + 48)) |= uint32(WHERE_UNQ_WANTED)
}
}
if libc.Int32FromUint8((*(*TWhereScan)(unsafe.Pointer(bp))).FiEquiv) > int32(1) {
*(*Tu32)(unsafe.Pointer(pNew + 48)) |= uint32(WHERE_TRANSCONS)
}
} else {
if libc.Int32FromUint16(eOp)&int32(WO_ISNULL) != 0 {
*(*Tu32)(unsafe.Pointer(pNew + 48)) |= uint32(WHERE_COLUMN_NULL)
} else {
nVecLen = _whereRangeVectorLen(tls, pParse, (*TSrcItem)(unsafe.Pointer(pSrc)).FiCursor, pProbe, libc.Int32FromUint16(saved_nEq), pTerm)
if libc.Int32FromUint16(eOp)&(libc.Int32FromInt32(WO_EQ)<<(libc.Int32FromInt32(TK_GT)-libc.Int32FromInt32(TK_EQ))|libc.Int32FromInt32(WO_EQ)<<(libc.Int32FromInt32(TK_GE)-libc.Int32FromInt32(TK_EQ))) != 0 {
*(*Tu32)(unsafe.Pointer(pNew + 48)) |= libc.Uint32FromInt32(libc.Int32FromInt32(WHERE_COLUMN_RANGE) | libc.Int32FromInt32(WHERE_BTM_LIMIT))
(*(*struct {
FnEq Tu16
FnBtm Tu16
FnTop Tu16
FnDistinctCol Tu16
FpIndex uintptr
FpOrderBy uintptr
})(unsafe.Pointer(pNew + 24))).FnBtm = libc.Uint16FromInt32(nVecLen)
pBtm = pTerm
pTop = uintptr(0)
if libc.Int32FromUint16((*TWhereTerm)(unsafe.Pointer(pTerm)).FwtFlags)&int32(TERM_LIKEOPT) != 0 {
/* Range constraints that come from the LIKE optimization are
** always used in pairs. */
pTop = pTerm + 1*56
if _whereLoopResize(tls, db, pNew, libc.Int32FromUint16((*TWhereLoop)(unsafe.Pointer(pNew)).FnLTerm)+int32(1)) != 0 {
break
} /* OOM */
v8 = pNew + 52
v7 = *(*Tu16)(unsafe.Pointer(v8))
*(*Tu16)(unsafe.Pointer(v8))++
*(*uintptr)(unsafe.Pointer((*TWhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(v7)*8)) = pTop
*(*Tu32)(unsafe.Pointer(pNew + 48)) |= uint32(WHERE_TOP_LIMIT)
(*(*struct {
FnEq Tu16
FnBtm Tu16
FnTop Tu16
FnDistinctCol Tu16
FpIndex uintptr
FpOrderBy uintptr
})(unsafe.Pointer(pNew + 24))).FnTop = uint16(1)
}
} else {
*(*Tu32)(unsafe.Pointer(pNew + 48)) |= libc.Uint32FromInt32(libc.Int32FromInt32(WHERE_COLUMN_RANGE) | libc.Int32FromInt32(WHERE_TOP_LIMIT))
(*(*struct {
FnEq Tu16
FnBtm Tu16
FnTop Tu16
FnDistinctCol Tu16
FpIndex uintptr
FpOrderBy uintptr
})(unsafe.Pointer(pNew + 24))).FnTop = libc.Uint16FromInt32(nVecLen)
pTop = pTerm
if (*TWhereLoop)(unsafe.Pointer(pNew)).FwsFlags&uint32(WHERE_BTM_LIMIT) != uint32(0) {
v9 = *(*uintptr)(unsafe.Pointer((*TWhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(libc.Int32FromUint16((*TWhereLoop)(unsafe.Pointer(pNew)).FnLTerm)-int32(2))*8))
} else {
v9 = uintptr(0)
}
pBtm = v9
}
}
}
}
/* At this point pNew->nOut is set to the number of rows expected to
** be visited by the index scan before considering term pTerm, or the
** values of nIn and nInMul. In other words, assuming that all
** "x IN(...)" terms are replaced with "x = ?". This block updates
** the value of pNew->nOut to account for pTerm (but not nIn/nInMul). */
if (*TWhereLoop)(unsafe.Pointer(pNew)).FwsFlags&uint32(WHERE_COLUMN_RANGE) != 0 {
/* Adjust nOut using stat4 data. Or, if there is no stat4
** data, using some other estimate. */
_whereRangeScanEst(tls, pParse, pBuilder, pBtm, pTop, pNew)
} else {
v11 = pNew + 24
*(*Tu16)(unsafe.Pointer(v11))++
v10 = *(*Tu16)(unsafe.Pointer(v11))
nEq = libc.Int32FromUint16(v10)
if int32((*TWhereTerm)(unsafe.Pointer(pTerm)).FtruthProb) <= 0 && int32(*(*Ti16)(unsafe.Pointer((*TIndex)(unsafe.Pointer(pProbe)).FaiColumn + uintptr(saved_nEq)*2))) >= 0 {
p12 = pNew + 22
*(*TLogEst)(unsafe.Pointer(p12)) = TLogEst(int32(*(*TLogEst)(unsafe.Pointer(p12))) + int32((*TWhereTerm)(unsafe.Pointer(pTerm)).FtruthProb))
p13 = pNew + 22
*(*TLogEst)(unsafe.Pointer(p13)) = TLogEst(int32(*(*TLogEst)(unsafe.Pointer(p13))) - nIn)
} else {
*(*TtRowcnt)(unsafe.Pointer(bp + 112)) = uint64(0)
if int32(nInMul) == 0 && (*TIndex)(unsafe.Pointer(pProbe)).FnSample != 0 && libc.Int32FromUint16((*(*struct {
FnEq Tu16
FnBtm Tu16
FnTop Tu16
FnDistinctCol Tu16
FpIndex uintptr
FpOrderBy uintptr
})(unsafe.Pointer(pNew + 24))).FnEq) <= (*TIndex)(unsafe.Pointer(pProbe)).FnSampleCol && (libc.Int32FromUint16(eOp)&int32(WO_IN) == 0 || (*TExpr)(unsafe.Pointer((*TWhereTerm)(unsafe.Pointer(pTerm)).FpExpr)).Fflags&uint32(EP_xIsSelect) == uint32(0)) && (*Tsqlite3)(unsafe.Pointer(db)).FdbOptFlags&libc.Uint32FromInt32(libc.Int32FromInt32(SQLITE_Stat4)) == uint32(0) {
pExpr1 = (*TWhereTerm)(unsafe.Pointer(pTerm)).FpExpr
if libc.Int32FromUint16(eOp)&(libc.Int32FromInt32(WO_EQ)|libc.Int32FromInt32(WO_ISNULL)|libc.Int32FromInt32(WO_IS)) != 0 {
rc = _whereEqualScanEst(tls, pParse, pBuilder, (*TExpr)(unsafe.Pointer(pExpr1)).FpRight, bp+112)
} else {
rc = _whereInScanEst(tls, pParse, pBuilder, *(*uintptr)(unsafe.Pointer(pExpr1 + 32)), bp+112)
}
if rc == int32(SQLITE_NOTFOUND) {
rc = SQLITE_OK
}
if rc != SQLITE_OK {
break
} /* Jump out of the pTerm loop */
if *(*TtRowcnt)(unsafe.Pointer(bp + 112)) != 0 {
(*TWhereLoop)(unsafe.Pointer(pNew)).FnOut = _sqlite3LogEst(tls, *(*TtRowcnt)(unsafe.Pointer(bp + 112)))
if nEq == int32(1) && int32((*TWhereLoop)(unsafe.Pointer(pNew)).FnOut)+int32(10) > int32(*(*TLogEst)(unsafe.Pointer((*TIndex)(unsafe.Pointer(pProbe)).FaiRowLogEst))) {
p14 = pTerm + 18
*(*Tu16)(unsafe.Pointer(p14)) = Tu16(int32(*(*Tu16)(unsafe.Pointer(p14))) | libc.Int32FromInt32(TERM_HIGHTRUTH))
if libc.Int32FromUint16((*TWhereTerm)(unsafe.Pointer(pTerm)).FwtFlags)&int32(TERM_HEURTRUTH) != 0 {
/* If the term has previously been used with an assumption of
** higher selectivity, then set the flag to rerun the
** loop computations. */
p15 = pBuilder + 45
*(*uint8)(unsafe.Pointer(p15)) = uint8(int32(*(*uint8)(unsafe.Pointer(p15))) | libc.Int32FromInt32(SQLITE_BLDF2_2NDPASS))
}
}
if int32((*TWhereLoop)(unsafe.Pointer(pNew)).FnOut) > int32(saved_nOut) {
(*TWhereLoop)(unsafe.Pointer(pNew)).FnOut = saved_nOut
}
p16 = pNew + 22
*(*TLogEst)(unsafe.Pointer(p16)) = TLogEst(int32(*(*TLogEst)(unsafe.Pointer(p16))) - nIn)
}
}
if *(*TtRowcnt)(unsafe.Pointer(bp + 112)) == uint64(0) {
p17 = pNew + 22
*(*TLogEst)(unsafe.Pointer(p17)) = TLogEst(int32(*(*TLogEst)(unsafe.Pointer(p17))) + (int32(*(*TLogEst)(unsafe.Pointer((*TIndex)(unsafe.Pointer(pProbe)).FaiRowLogEst + uintptr(nEq)*2))) - int32(*(*TLogEst)(unsafe.Pointer((*TIndex)(unsafe.Pointer(pProbe)).FaiRowLogEst + uintptr(nEq-int32(1))*2)))))
if libc.Int32FromUint16(eOp)&int32(WO_ISNULL) != 0 {
/* TUNING: If there is no likelihood() value, assume that a
** "col IS NULL" expression matches twice as many rows
** as (col=?). */
p18 = pNew + 22
*(*TLogEst)(unsafe.Pointer(p18)) = TLogEst(int32(*(*TLogEst)(unsafe.Pointer(p18))) + libc.Int32FromInt32(10))
}
}
}
}
/* Set rCostIdx to the estimated cost of visiting selected rows in the
** index. The estimate is the sum of two values:
** 1. The cost of doing one search-by-key to find the first matching
** entry
** 2. Stepping forward in the index pNew->nOut times to find all
** additional matching entries.
*/
if int32(uint32(*(*uint16)(unsafe.Pointer(pProbe + 100))&0x3>>0)) == int32(SQLITE_IDXTYPE_IPK) {
/* The pProbe->szIdxRow is low for an IPK table since the interior
** pages are small. Thus szIdxRow gives a good estimate of seek cost.
** But the leaf pages are full-size, so pProbe->szIdxRow would badly
** under-estimate the scanning cost. */
rCostIdx = int16(int32((*TWhereLoop)(unsafe.Pointer(pNew)).FnOut) + int32(16))
} else {
rCostIdx = int16(int32((*TWhereLoop)(unsafe.Pointer(pNew)).FnOut) + int32(1) + int32(15)*int32((*TIndex)(unsafe.Pointer(pProbe)).FszIdxRow)/int32((*TTable)(unsafe.Pointer((*TSrcItem)(unsafe.Pointer(pSrc)).FpSTab)).FszTabRow))
}
rCostIdx = _sqlite3LogEstAdd(tls, rLogSize, rCostIdx)
/* Estimate the cost of running the loop. If all data is coming
** from the index, then this is just the cost of doing the index
** lookup and scan. But if some data is coming out of the main table,
** we also have to add in the cost of doing pNew->nOut searches to
** locate the row in the main table that corresponds to the index entry.
*/
(*TWhereLoop)(unsafe.Pointer(pNew)).FrRun = rCostIdx
if (*TWhereLoop)(unsafe.Pointer(pNew)).FwsFlags&libc.Uint32FromInt32(libc.Int32FromInt32(WHERE_IDX_ONLY)|libc.Int32FromInt32(WHERE_IPK)|libc.Int32FromInt32(WHERE_EXPRIDX)) == uint32(0) {
(*TWhereLoop)(unsafe.Pointer(pNew)).FrRun = _sqlite3LogEstAdd(tls, (*TWhereLoop)(unsafe.Pointer(pNew)).FrRun, int16(int32((*TWhereLoop)(unsafe.Pointer(pNew)).FnOut)+int32(16)))
}
nOutUnadjusted = (*TWhereLoop)(unsafe.Pointer(pNew)).FnOut
p19 = pNew + 20
*(*TLogEst)(unsafe.Pointer(p19)) = TLogEst(int32(*(*TLogEst)(unsafe.Pointer(p19))) + (int32(nInMul) + nIn))
p20 = pNew + 22
*(*TLogEst)(unsafe.Pointer(p20)) = TLogEst(int32(*(*TLogEst)(unsafe.Pointer(p20))) + (int32(nInMul) + nIn))
_whereLoopOutputAdjust(tls, (*TWhereLoopBuilder)(unsafe.Pointer(pBuilder)).FpWC, pNew, rSize)
rc = _whereLoopInsert(tls, pBuilder, pNew)
if (*TWhereLoop)(unsafe.Pointer(pNew)).FwsFlags&uint32(WHERE_COLUMN_RANGE) != 0 {
(*TWhereLoop)(unsafe.Pointer(pNew)).FnOut = saved_nOut
} else {
(*TWhereLoop)(unsafe.Pointer(pNew)).FnOut = nOutUnadjusted
}
if (*TWhereLoop)(unsafe.Pointer(pNew)).FwsFlags&uint32(WHERE_TOP_LIMIT) == uint32(0) && libc.Int32FromUint16((*(*struct {
FnEq Tu16
FnBtm Tu16
FnTop Tu16
FnDistinctCol Tu16
FpIndex uintptr
FpOrderBy uintptr
})(unsafe.Pointer(pNew + 24))).FnEq) < libc.Int32FromUint16((*TIndex)(unsafe.Pointer(pProbe)).FnColumn) && (libc.Int32FromUint16((*(*struct {
FnEq Tu16
FnBtm Tu16
FnTop Tu16
FnDistinctCol Tu16
FpIndex uintptr
FpOrderBy uintptr
})(unsafe.Pointer(pNew + 24))).FnEq) < libc.Int32FromUint16((*TIndex)(unsafe.Pointer(pProbe)).FnKeyCol) || int32(uint32(*(*uint16)(unsafe.Pointer(pProbe + 100))&0x3>>0)) != int32(SQLITE_IDXTYPE_PRIMARYKEY)) {
if libc.Int32FromUint16((*(*struct {
FnEq Tu16
FnBtm Tu16
FnTop Tu16
FnDistinctCol Tu16
FpIndex uintptr
FpOrderBy uintptr
})(unsafe.Pointer(pNew + 24))).FnEq) > int32(3) {
_sqlite3ProgressCheck(tls, pParse)
}
_whereLoopAddBtreeIndex(tls, pBuilder, pSrc, pProbe, int16(int32(nInMul)+nIn))
}
(*TWhereLoop)(unsafe.Pointer(pNew)).FnOut = saved_nOut
(*TWhereLoopBuilder)(unsafe.Pointer(pBuilder)).FnRecValid = nRecValid
goto _1
_1:
;
pTerm = _whereScanNext(tls, bp)
}
(*TWhereLoop)(unsafe.Pointer(pNew)).Fprereq = saved_prereq
(*(*struct {
FnEq Tu16
FnBtm Tu16
FnTop Tu16
FnDistinctCol Tu16
FpIndex uintptr
FpOrderBy uintptr
})(unsafe.Pointer(pNew + 24))).FnEq = saved_nEq
(*(*struct {
FnEq Tu16
FnBtm Tu16
FnTop Tu16
FnDistinctCol Tu16
FpIndex uintptr
FpOrderBy uintptr
})(unsafe.Pointer(pNew + 24))).FnBtm = saved_nBtm
(*(*struct {
FnEq Tu16
FnBtm Tu16
FnTop Tu16
FnDistinctCol Tu16
FpIndex uintptr
FpOrderBy uintptr
})(unsafe.Pointer(pNew + 24))).FnTop = saved_nTop
(*TWhereLoop)(unsafe.Pointer(pNew)).FnSkip = saved_nSkip
(*TWhereLoop)(unsafe.Pointer(pNew)).FwsFlags = saved_wsFlags
(*TWhereLoop)(unsafe.Pointer(pNew)).FnOut = saved_nOut
(*TWhereLoop)(unsafe.Pointer(pNew)).FnLTerm = saved_nLTerm
/* Consider using a skip-scan if there are no WHERE clause constraints
** available for the left-most terms of the index, and if the average
** number of repeats in the left-most terms is at least 18.
**
** The magic number 18 is selected on the basis that scanning 17 rows
** is almost always quicker than an index seek (even though if the index
** contains fewer than 2^17 rows we assume otherwise in other parts of
** the code). And, even if it is not, it should not be too much slower.
** On the other hand, the extra seeks could end up being significantly
** more expensive. */
if v22 = libc.Int32FromUint16(saved_nEq) == libc.Int32FromUint16(saved_nSkip) && libc.Int32FromUint16(saved_nEq)+int32(1) < libc.Int32FromUint16((*TIndex)(unsafe.Pointer(pProbe)).FnKeyCol) && libc.Int32FromUint16(saved_nEq) == libc.Int32FromUint16((*TWhereLoop)(unsafe.Pointer(pNew)).FnLTerm) && int32(uint32(*(*uint16)(unsafe.Pointer(pProbe + 100))&0x40>>6)) == 0 && int32(uint32(*(*uint16)(unsafe.Pointer(pProbe + 100))&0x80>>7)) != 0 && (*Tsqlite3)(unsafe.Pointer(db)).FdbOptFlags&libc.Uint32FromInt32(libc.Int32FromInt32(SQLITE_SkipScan)) == uint32(0) && int32(*(*TLogEst)(unsafe.Pointer((*TIndex)(unsafe.Pointer(pProbe)).FaiRowLogEst + uintptr(libc.Int32FromUint16(saved_nEq)+int32(1))*2))) >= int32(42); v22 {
v21 = _whereLoopResize(tls, db, pNew, libc.Int32FromUint16((*TWhereLoop)(unsafe.Pointer(pNew)).FnLTerm)+int32(1))
rc = v21
}
if v22 && v21 == SQLITE_OK {
(*(*struct {
FnEq Tu16
FnBtm Tu16
FnTop Tu16
FnDistinctCol Tu16
FpIndex uintptr
FpOrderBy uintptr
})(unsafe.Pointer(pNew + 24))).FnEq++
(*TWhereLoop)(unsafe.Pointer(pNew)).FnSkip++
v24 = pNew + 52
v23 = *(*Tu16)(unsafe.Pointer(v24))
*(*Tu16)(unsafe.Pointer(v24))++
*(*uintptr)(unsafe.Pointer((*TWhereLoop)(unsafe.Pointer(pNew)).FaLTerm + uintptr(v23)*8)) = uintptr(0)
*(*Tu32)(unsafe.Pointer(pNew + 48)) |= uint32(WHERE_SKIPSCAN)
nIter = int16(int32(*(*TLogEst)(unsafe.Pointer((*TIndex)(unsafe.Pointer(pProbe)).FaiRowLogEst + uintptr(saved_nEq)*2))) - int32(*(*TLogEst)(unsafe.Pointer((*TIndex)(unsafe.Pointer(pProbe)).FaiRowLogEst + uintptr(libc.Int32FromUint16(saved_nEq)+int32(1))*2))))
p25 = pNew + 22
*(*TLogEst)(unsafe.Pointer(p25)) = TLogEst(int32(*(*TLogEst)(unsafe.Pointer(p25))) - int32(nIter))
/* TUNING: Because uncertainties in the estimates for skip-scan queries,
** add a 1.375 fudge factor to make skip-scan slightly less likely. */
nIter = TLogEst(int32(nIter) + libc.Int32FromInt32(5))
_whereLoopAddBtreeIndex(tls, pBuilder, pSrc, pProbe, int16(int32(nIter)+int32(nInMul)))
(*TWhereLoop)(unsafe.Pointer(pNew)).FnOut = saved_nOut
(*(*struct {
FnEq Tu16
FnBtm Tu16
FnTop Tu16
FnDistinctCol Tu16
FpIndex uintptr
FpOrderBy uintptr
})(unsafe.Pointer(pNew + 24))).FnEq = saved_nEq
(*TWhereLoop)(unsafe.Pointer(pNew)).FnSkip = saved_nSkip
(*TWhereLoop)(unsafe.Pointer(pNew)).FwsFlags = saved_wsFlags
}
return rc
} | 0.7285 | umputun/spot | vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go | umputun/spot | vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go | MIT | go |
C documentation
/*
** pIdx is an index that covers all of the low-number columns used by
** pWInfo->pSelect (columns from 0 through 62) or an index that has
** expressions terms. Hence, we cannot determine whether or not it is
** a covering index by using the colUsed bitmasks. We have to do a search
** to see if the index is covering. This routine does that search.
**
** The return value is one of these:
**
** 0 The index is definitely not a covering index
**
** WHERE_IDX_ONLY The index is definitely a covering index
**
** WHERE_EXPRIDX The index is likely a covering index, but it is
** difficult to determine precisely because of the
** expressions that are indexed. Score it as a
** covering index, but still keep the main table open
** just in case we need it.
**
** This routine is an optimization. It is always safe to return zero.
** But returning one of the other two values when zero should have been
** returned can lead to incorrect bytecode and assertion faults.
*/ | func _whereIsCoveringIndex(tls *libc.TLS, pWInfo uintptr, pIdx uintptr, iTabCur int32) (r Tu32) {
bp := tls.Alloc(64)
defer tls.Free(64)
var i, rc int32
var _ /* ck at bp+0 */ TCoveringIndexCheck1
var _ /* w at bp+16 */ TWalker
_, _ = i, rc
if (*TWhereInfo)(unsafe.Pointer(pWInfo)).FpSelect == uintptr(0) {
/* We don't have access to the full query, so we cannot check to see
** if pIdx is covering. Assume it is not. */
return uint32(0)
}
if int32(uint32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x1000>>12)) == 0 {
i = 0
for {
if !(i < libc.Int32FromUint16((*TIndex)(unsafe.Pointer(pIdx)).FnColumn)) {
break
}
if int32(*(*Ti16)(unsafe.Pointer((*TIndex)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(i)*2))) >= libc.Int32FromUint64(libc.Uint64FromInt64(8)*libc.Uint64FromInt32(8))-libc.Int32FromInt32(1) {
break
}
goto _1
_1:
;
i++
}
if i >= libc.Int32FromUint16((*TIndex)(unsafe.Pointer(pIdx)).FnColumn) {
/* pIdx does not index any columns greater than 62, but we know from
** colMask that columns greater than 62 are used, so this is not a
** covering index */
return uint32(0)
}
}
(*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FpIdx = pIdx
(*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FiTabCur = iTabCur
(*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FbExpr = uint8(0)
(*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FbUnidx = uint8(0)
libc.Xmemset(tls, bp+16, 0, uint64(48))
(*(*TWalker)(unsafe.Pointer(bp + 16))).FxExprCallback = __ccgo_fp(_whereIsCoveringIndexWalkCallback)
(*(*TWalker)(unsafe.Pointer(bp + 16))).FxSelectCallback = __ccgo_fp(_sqlite3SelectWalkNoop)
*(*uintptr)(unsafe.Pointer(bp + 16 + 40)) = bp
_sqlite3WalkSelect(tls, bp+16, (*TWhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)
if (*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FbUnidx != 0 {
rc = 0
} else {
if (*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FbExpr != 0 {
rc = int32(WHERE_EXPRIDX)
} else {
rc = int32(WHERE_IDX_ONLY)
}
}
return libc.Uint32FromInt32(rc)
} | func _exprIdxCover(tls *libc.TLS, pWalker uintptr, pExpr uintptr) (r int32) {
if libc.Int32FromUint8((*TExpr)(unsafe.Pointer(pExpr)).Fop) == int32(TK_COLUMN) && (*TExpr)(unsafe.Pointer(pExpr)).FiTable == (*TIdxCover)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pWalker + 40)))).FiCur && int32(_sqlite3TableColumnToIndex(tls, (*TIdxCover)(unsafe.Pointer(*(*uintptr)(unsafe.Pointer(pWalker + 40)))).FpIdx, (*TExpr)(unsafe.Pointer(pExpr)).FiColumn)) < 0 {
(*TWalker)(unsafe.Pointer(pWalker)).FeCode = uint16(1)
return int32(WRC_Abort)
}
return WRC_Continue
} | 0.726296 | umputun/spot | vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go | umputun/spot | vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go | MIT | go |
C documentation
/*
** pIdx is an index that covers all of the low-number columns used by
** pWInfo->pSelect (columns from 0 through 62) or an index that has
** expressions terms. Hence, we cannot determine whether or not it is
** a covering index by using the colUsed bitmasks. We have to do a search
** to see if the index is covering. This routine does that search.
**
** The return value is one of these:
**
** 0 The index is definitely not a covering index
**
** WHERE_IDX_ONLY The index is definitely a covering index
**
** WHERE_EXPRIDX The index is likely a covering index, but it is
** difficult to determine precisely because of the
** expressions that are indexed. Score it as a
** covering index, but still keep the main table open
** just in case we need it.
**
** This routine is an optimization. It is always safe to return zero.
** But returning one of the other two values when zero should have been
** returned can lead to incorrect bytecode and assertion faults.
*/ | func _whereIsCoveringIndex(tls *libc.TLS, pWInfo uintptr, pIdx uintptr, iTabCur int32) (r Tu32) {
bp := tls.Alloc(64)
defer tls.Free(64)
var i, rc int32
var _ /* ck at bp+0 */ TCoveringIndexCheck1
var _ /* w at bp+16 */ TWalker
_, _ = i, rc
if (*TWhereInfo)(unsafe.Pointer(pWInfo)).FpSelect == uintptr(0) {
/* We don't have access to the full query, so we cannot check to see
** if pIdx is covering. Assume it is not. */
return uint32(0)
}
if int32(uint32(*(*uint16)(unsafe.Pointer(pIdx + 100))&0x1000>>12)) == 0 {
i = 0
for {
if !(i < libc.Int32FromUint16((*TIndex)(unsafe.Pointer(pIdx)).FnColumn)) {
break
}
if int32(*(*Ti16)(unsafe.Pointer((*TIndex)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(i)*2))) >= libc.Int32FromUint64(libc.Uint64FromInt64(8)*libc.Uint64FromInt32(8))-libc.Int32FromInt32(1) {
break
}
goto _1
_1:
;
i++
}
if i >= libc.Int32FromUint16((*TIndex)(unsafe.Pointer(pIdx)).FnColumn) {
/* pIdx does not index any columns greater than 62, but we know from
** colMask that columns greater than 62 are used, so this is not a
** covering index */
return uint32(0)
}
}
(*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FpIdx = pIdx
(*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FiTabCur = iTabCur
(*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FbExpr = uint8(0)
(*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FbUnidx = uint8(0)
libc.Xmemset(tls, bp+16, 0, uint64(48))
(*(*TWalker)(unsafe.Pointer(bp + 16))).FxExprCallback = __ccgo_fp(_whereIsCoveringIndexWalkCallback)
(*(*TWalker)(unsafe.Pointer(bp + 16))).FxSelectCallback = __ccgo_fp(_sqlite3SelectWalkNoop)
*(*uintptr)(unsafe.Pointer(bp + 16 + 40)) = bp
_sqlite3WalkSelect(tls, bp+16, (*TWhereInfo)(unsafe.Pointer(pWInfo)).FpSelect)
if (*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FbUnidx != 0 {
rc = 0
} else {
if (*(*TCoveringIndexCheck1)(unsafe.Pointer(bp))).FbExpr != 0 {
rc = int32(WHERE_EXPRIDX)
} else {
rc = int32(WHERE_IDX_ONLY)
}
}
return libc.Uint32FromInt32(rc)
} | func _whereAddIndexedExpr(tls *libc.TLS, pParse uintptr, pIdx uintptr, iIdxCur int32, pTabItem uintptr) {
var i, j int32
var p, pArg, pExpr, pTab uintptr
_, _, _, _, _, _ = i, j, p, pArg, pExpr, pTab
pTab = (*TIndex)(unsafe.Pointer(pIdx)).FpTable
i = 0
for {
if !(i < libc.Int32FromUint16((*TIndex)(unsafe.Pointer(pIdx)).FnColumn)) {
break
}
j = int32(*(*Ti16)(unsafe.Pointer((*TIndex)(unsafe.Pointer(pIdx)).FaiColumn + uintptr(i)*2)))
if j == -int32(2) {
pExpr = (*(*TExprList_item)(unsafe.Pointer((*TIndex)(unsafe.Pointer(pIdx)).FaColExpr + 8 + uintptr(i)*32))).FpExpr
} else {
if j >= 0 && libc.Int32FromUint16((*(*TColumn)(unsafe.Pointer((*TTable)(unsafe.Pointer(pTab)).FaCol + uintptr(j)*16))).FcolFlags)&int32(COLFLAG_VIRTUAL) != 0 {
pExpr = _sqlite3ColumnExpr(tls, pTab, (*TTable)(unsafe.Pointer(pTab)).FaCol+uintptr(j)*16)
} else {
goto _1
}
}
if _sqlite3ExprIsConstant(tls, uintptr(0), pExpr) != 0 {
goto _1
}
p = _sqlite3DbMallocRaw(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, uint64(32))
if p == uintptr(0) {
break
}
(*TIndexedExpr)(unsafe.Pointer(p)).FpIENext = (*TParse)(unsafe.Pointer(pParse)).FpIdxEpr
(*TIndexedExpr)(unsafe.Pointer(p)).FpExpr = _sqlite3ExprDup(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, pExpr, 0)
(*TIndexedExpr)(unsafe.Pointer(p)).FiDataCur = (*TSrcItem)(unsafe.Pointer(pTabItem)).FiCursor
(*TIndexedExpr)(unsafe.Pointer(p)).FiIdxCur = iIdxCur
(*TIndexedExpr)(unsafe.Pointer(p)).FiIdxCol = i
(*TIndexedExpr)(unsafe.Pointer(p)).FbMaybeNullRow = libc.BoolUint8(libc.Int32FromUint8((*TSrcItem)(unsafe.Pointer(pTabItem)).Ffg.Fjointype)&(libc.Int32FromInt32(JT_LEFT)|libc.Int32FromInt32(JT_LTORJ)|libc.Int32FromInt32(JT_RIGHT)) != 0)
if _sqlite3IndexAffinityStr(tls, (*TParse)(unsafe.Pointer(pParse)).Fdb, pIdx) != 0 {
(*TIndexedExpr)(unsafe.Pointer(p)).Faff = libc.Uint8FromInt8(*(*int8)(unsafe.Pointer((*TIndex)(unsafe.Pointer(pIdx)).FzColAff + uintptr(i))))
}
(*TParse)(unsafe.Pointer(pParse)).FpIdxEpr = p
if (*TIndexedExpr)(unsafe.Pointer(p)).FpIENext == uintptr(0) {
pArg = pParse + 96
_sqlite3ParserAddCleanup(tls, pParse, __ccgo_fp(_whereIndexedExprCleanup), pArg)
}
goto _1
_1:
;
i++
}
} | 0.721195 | umputun/spot | vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go | umputun/spot | vendor/modernc.org/sqlite/lib/sqlite_freebsd_amd64.go | MIT | go |
CreateConfiguration API operation for AmazonMQ.
Creates a new configuration for the specified configuration name. Amazon
MQ uses the default configuration (the engine type and version).
Returns awserr.Error for service API and SDK errors. Use runtime type assertions
with awserr.Error's Code and Message methods to get detailed information about
the error.
See the AWS API reference guide for AmazonMQ's
API operation CreateConfiguration for usage and error information.
Returned Error Types:
- BadRequestException
Returns information about an error.
- InternalServerErrorException
Returns information about an error.
- ConflictException
Returns information about an error.
- ForbiddenException
Returns information about an error.
See also, https://docs.aws.amazon.com/goto/WebAPI/mq-2017-11-27/CreateConfiguration | func (c *MQ) CreateConfiguration(input *CreateConfigurationRequest) (*CreateConfigurationResponse, error) {
req, out := c.CreateConfigurationRequest(input)
return out, req.Send()
} | func (c *Kafka) CreateConfiguration(input *CreateConfigurationInput) (*CreateConfigurationOutput, error) {
req, out := c.CreateConfigurationRequest(input)
return out, req.Send()
} | 0.764275 | aws/aws-sdk-go | service/mq/api.go | aws/aws-sdk-go | service/kafka/api.go | Apache-2.0 | go |
CreateConfiguration API operation for AmazonMQ.
Creates a new configuration for the specified configuration name. Amazon
MQ uses the default configuration (the engine type and version).
Returns awserr.Error for service API and SDK errors. Use runtime type assertions
with awserr.Error's Code and Message methods to get detailed information about
the error.
See the AWS API reference guide for AmazonMQ's
API operation CreateConfiguration for usage and error information.
Returned Error Types:
- BadRequestException
Returns information about an error.
- InternalServerErrorException
Returns information about an error.
- ConflictException
Returns information about an error.
- ForbiddenException
Returns information about an error.
See also, https://docs.aws.amazon.com/goto/WebAPI/mq-2017-11-27/CreateConfiguration | func (c *MQ) CreateConfiguration(input *CreateConfigurationRequest) (*CreateConfigurationResponse, error) {
req, out := c.CreateConfigurationRequest(input)
return out, req.Send()
} | func (c *PrometheusService) CreateLoggingConfiguration(input *CreateLoggingConfigurationInput) (*CreateLoggingConfigurationOutput, error) {
req, out := c.CreateLoggingConfigurationRequest(input)
return out, req.Send()
} | 0.702397 | aws/aws-sdk-go | service/mq/api.go | aws/aws-sdk-go | service/prometheusservice/api.go | Apache-2.0 | go |
CreateConfiguration API operation for AmazonMQ.
Creates a new configuration for the specified configuration name. Amazon
MQ uses the default configuration (the engine type and version).
Returns awserr.Error for service API and SDK errors. Use runtime type assertions
with awserr.Error's Code and Message methods to get detailed information about
the error.
See the AWS API reference guide for AmazonMQ's
API operation CreateConfiguration for usage and error information.
Returned Error Types:
- BadRequestException
Returns information about an error.
- InternalServerErrorException
Returns information about an error.
- ConflictException
Returns information about an error.
- ForbiddenException
Returns information about an error.
See also, https://docs.aws.amazon.com/goto/WebAPI/mq-2017-11-27/CreateConfiguration | func (c *MQ) CreateConfiguration(input *CreateConfigurationRequest) (*CreateConfigurationResponse, error) {
req, out := c.CreateConfigurationRequest(input)
return out, req.Send()
} | func (c *MQ) DescribeConfiguration(input *DescribeConfigurationInput) (*DescribeConfigurationOutput, error) {
req, out := c.DescribeConfigurationRequest(input)
return out, req.Send()
} | 0.690939 | aws/aws-sdk-go | service/mq/api.go | aws/aws-sdk-go | service/mq/api.go | Apache-2.0 | go |
CreateConfiguration API operation for AmazonMQ.
Creates a new configuration for the specified configuration name. Amazon
MQ uses the default configuration (the engine type and version).
Returns awserr.Error for service API and SDK errors. Use runtime type assertions
with awserr.Error's Code and Message methods to get detailed information about
the error.
See the AWS API reference guide for AmazonMQ's
API operation CreateConfiguration for usage and error information.
Returned Error Types:
- BadRequestException
Returns information about an error.
- InternalServerErrorException
Returns information about an error.
- ConflictException
Returns information about an error.
- ForbiddenException
Returns information about an error.
See also, https://docs.aws.amazon.com/goto/WebAPI/mq-2017-11-27/CreateConfiguration | func (c *MQ) CreateConfiguration(input *CreateConfigurationRequest) (*CreateConfigurationResponse, error) {
req, out := c.CreateConfigurationRequest(input)
return out, req.Send()
} | func (c *GroundStation) CreateConfig(input *CreateConfigInput) (*CreateConfigOutput, error) {
req, out := c.CreateConfigRequest(input)
return out, req.Send()
} | 0.686673 | aws/aws-sdk-go | service/mq/api.go | aws/aws-sdk-go | service/groundstation/api.go | Apache-2.0 | go |
CreateConfiguration API operation for AmazonMQ.
Creates a new configuration for the specified configuration name. Amazon
MQ uses the default configuration (the engine type and version).
Returns awserr.Error for service API and SDK errors. Use runtime type assertions
with awserr.Error's Code and Message methods to get detailed information about
the error.
See the AWS API reference guide for AmazonMQ's
API operation CreateConfiguration for usage and error information.
Returned Error Types:
- BadRequestException
Returns information about an error.
- InternalServerErrorException
Returns information about an error.
- ConflictException
Returns information about an error.
- ForbiddenException
Returns information about an error.
See also, https://docs.aws.amazon.com/goto/WebAPI/mq-2017-11-27/CreateConfiguration | func (c *MQ) CreateConfiguration(input *CreateConfigurationRequest) (*CreateConfigurationResponse, error) {
req, out := c.CreateConfigurationRequest(input)
return out, req.Send()
} | func (c *AppTest) CreateTestConfiguration(input *CreateTestConfigurationInput) (*CreateTestConfigurationOutput, error) {
req, out := c.CreateTestConfigurationRequest(input)
return out, req.Send()
} | 0.677814 | aws/aws-sdk-go | service/mq/api.go | aws/aws-sdk-go | service/apptest/api.go | Apache-2.0 | go |
Difference reports whether two lists of lengths nx and ny are equal
given the definition of equality provided as f.
This function returns an edit-script, which is a sequence of operations
needed to convert one list into the other. The following invariants for
the edit-script are maintained:
- eq == (es.Dist()==0)
- nx == es.LenX()
- ny == es.LenY()
This algorithm is not guaranteed to be an optimal solution (i.e., one that
produces an edit-script with a minimal Levenshtein distance). This algorithm
favors performance over optimality. The exact output is not guaranteed to
be stable and may change over time. | func Difference(nx, ny int, f EqualFunc) (es EditScript) {
// This algorithm is based on traversing what is known as an "edit-graph".
// See Figure 1 from "An O(ND) Difference Algorithm and Its Variations"
// by Eugene W. Myers. Since D can be as large as N itself, this is
// effectively O(N^2). Unlike the algorithm from that paper, we are not
// interested in the optimal path, but at least some "decent" path.
//
// For example, let X and Y be lists of symbols:
// X = [A B C A B B A]
// Y = [C B A B A C]
//
// The edit-graph can be drawn as the following:
// A B C A B B A
// ┌─────────────┐
// C │_|_|\|_|_|_|_│ 0
// B │_|\|_|_|\|\|_│ 1
// A │\|_|_|\|_|_|\│ 2
// B │_|\|_|_|\|\|_│ 3
// A │\|_|_|\|_|_|\│ 4
// C │ | |\| | | | │ 5
// └─────────────┘ 6
// 0 1 2 3 4 5 6 7
//
// List X is written along the horizontal axis, while list Y is written
// along the vertical axis. At any point on this grid, if the symbol in
// list X matches the corresponding symbol in list Y, then a '\' is drawn.
// The goal of any minimal edit-script algorithm is to find a path from the
// top-left corner to the bottom-right corner, while traveling through the
// fewest horizontal or vertical edges.
// A horizontal edge is equivalent to inserting a symbol from list X.
// A vertical edge is equivalent to inserting a symbol from list Y.
// A diagonal edge is equivalent to a matching symbol between both X and Y.
// Invariants:
// - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
// - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
//
// In general:
// - fwdFrontier.X < revFrontier.X
// - fwdFrontier.Y < revFrontier.Y
//
// Unless, it is time for the algorithm to terminate.
fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
fwdFrontier := fwdPath.point // Forward search frontier
revFrontier := revPath.point // Reverse search frontier
// Search budget bounds the cost of searching for better paths.
// The longest sequence of non-matching symbols that can be tolerated is
// approximately the square-root of the search budget.
searchBudget := 4 * (nx + ny) // O(n)
// Running the tests with the "cmp_debug" build tag prints a visualization
// of the algorithm running in real-time. This is educational for
// understanding how the algorithm works. See debug_enable.go.
f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
// The algorithm below is a greedy, meet-in-the-middle algorithm for
// computing sub-optimal edit-scripts between two lists.
//
// The algorithm is approximately as follows:
// - Searching for differences switches back-and-forth between
// a search that starts at the beginning (the top-left corner), and
// a search that starts at the end (the bottom-right corner).
// The goal of the search is connect with the search
// from the opposite corner.
// - As we search, we build a path in a greedy manner,
// where the first match seen is added to the path (this is sub-optimal,
// but provides a decent result in practice). When matches are found,
// we try the next pair of symbols in the lists and follow all matches
// as far as possible.
// - When searching for matches, we search along a diagonal going through
// through the "frontier" point. If no matches are found,
// we advance the frontier towards the opposite corner.
// - This algorithm terminates when either the X coordinates or the
// Y coordinates of the forward and reverse frontier points ever intersect.
// This algorithm is correct even if searching only in the forward direction
// or in the reverse direction. We do both because it is commonly observed
// that two lists commonly differ because elements were added to the front
// or end of the other list.
//
// Non-deterministically start with either the forward or reverse direction
// to introduce some deliberate instability so that we have the flexibility
// to change this algorithm in the future.
if flags.Deterministic || randBool {
goto forwardSearch
} else {
goto reverseSearch
}
forwardSearch:
{
// Forward search from the beginning.
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
goto finishSearch
}
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
// Search in a diagonal pattern for a match.
z := zigzag(i)
p := point{fwdFrontier.X + z, fwdFrontier.Y - z}
switch {
case p.X >= revPath.X || p.Y < fwdPath.Y:
stop1 = true // Hit top-right corner
case p.Y >= revPath.Y || p.X < fwdPath.X:
stop2 = true // Hit bottom-left corner
case f(p.X, p.Y).Equal():
// Match found, so connect the path to this point.
fwdPath.connect(p, f)
fwdPath.append(Identity)
// Follow sequence of matches as far as possible.
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
if !f(fwdPath.X, fwdPath.Y).Equal() {
break
}
fwdPath.append(Identity)
}
fwdFrontier = fwdPath.point
stop1, stop2 = true, true
default:
searchBudget-- // Match not found
}
debug.Update()
}
// Advance the frontier towards reverse point.
if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y {
fwdFrontier.X++
} else {
fwdFrontier.Y++
}
goto reverseSearch
}
reverseSearch:
{
// Reverse search from the end.
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
goto finishSearch
}
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
// Search in a diagonal pattern for a match.
z := zigzag(i)
p := point{revFrontier.X - z, revFrontier.Y + z}
switch {
case fwdPath.X >= p.X || revPath.Y < p.Y:
stop1 = true // Hit bottom-left corner
case fwdPath.Y >= p.Y || revPath.X < p.X:
stop2 = true // Hit top-right corner
case f(p.X-1, p.Y-1).Equal():
// Match found, so connect the path to this point.
revPath.connect(p, f)
revPath.append(Identity)
// Follow sequence of matches as far as possible.
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
if !f(revPath.X-1, revPath.Y-1).Equal() {
break
}
revPath.append(Identity)
}
revFrontier = revPath.point
stop1, stop2 = true, true
default:
searchBudget-- // Match not found
}
debug.Update()
}
// Advance the frontier towards forward point.
if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y {
revFrontier.X--
} else {
revFrontier.Y--
}
goto forwardSearch
}
finishSearch:
// Join the forward and reverse paths and then append the reverse path.
fwdPath.connect(revPath.point, f)
for i := len(revPath.es) - 1; i >= 0; i-- {
t := revPath.es[i]
revPath.es = revPath.es[:i]
fwdPath.append(t)
}
debug.Finish()
return fwdPath.es
} | func Difference(nx, ny int, f EqualFunc) (es EditScript) {
// This algorithm is based on traversing what is known as an "edit-graph".
// See Figure 1 from "An O(ND) Difference Algorithm and Its Variations"
// by Eugene W. Myers. Since D can be as large as N itself, this is
// effectively O(N^2). Unlike the algorithm from that paper, we are not
// interested in the optimal path, but at least some "decent" path.
//
// For example, let X and Y be lists of symbols:
// X = [A B C A B B A]
// Y = [C B A B A C]
//
// The edit-graph can be drawn as the following:
// A B C A B B A
// ┌─────────────┐
// C │_|_|\|_|_|_|_│ 0
// B │_|\|_|_|\|\|_│ 1
// A │\|_|_|\|_|_|\│ 2
// B │_|\|_|_|\|\|_│ 3
// A │\|_|_|\|_|_|\│ 4
// C │ | |\| | | | │ 5
// └─────────────┘ 6
// 0 1 2 3 4 5 6 7
//
// List X is written along the horizontal axis, while list Y is written
// along the vertical axis. At any point on this grid, if the symbol in
// list X matches the corresponding symbol in list Y, then a '\' is drawn.
// The goal of any minimal edit-script algorithm is to find a path from the
// top-left corner to the bottom-right corner, while traveling through the
// fewest horizontal or vertical edges.
// A horizontal edge is equivalent to inserting a symbol from list X.
// A vertical edge is equivalent to inserting a symbol from list Y.
// A diagonal edge is equivalent to a matching symbol between both X and Y.
// Invariants:
// • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
// • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
//
// In general:
// • fwdFrontier.X < revFrontier.X
// • fwdFrontier.Y < revFrontier.Y
// Unless, it is time for the algorithm to terminate.
fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
fwdFrontier := fwdPath.point // Forward search frontier
revFrontier := revPath.point // Reverse search frontier
// Search budget bounds the cost of searching for better paths.
// The longest sequence of non-matching symbols that can be tolerated is
// approximately the square-root of the search budget.
searchBudget := 4 * (nx + ny) // O(n)
// Running the tests with the "cmp_debug" build tag prints a visualization
// of the algorithm running in real-time. This is educational for
// understanding how the algorithm works. See debug_enable.go.
f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
// The algorithm below is a greedy, meet-in-the-middle algorithm for
// computing sub-optimal edit-scripts between two lists.
//
// The algorithm is approximately as follows:
// • Searching for differences switches back-and-forth between
// a search that starts at the beginning (the top-left corner), and
// a search that starts at the end (the bottom-right corner). The goal of
// the search is connect with the search from the opposite corner.
// • As we search, we build a path in a greedy manner, where the first
// match seen is added to the path (this is sub-optimal, but provides a
// decent result in practice). When matches are found, we try the next pair
// of symbols in the lists and follow all matches as far as possible.
// • When searching for matches, we search along a diagonal going through
// through the "frontier" point. If no matches are found, we advance the
// frontier towards the opposite corner.
// • This algorithm terminates when either the X coordinates or the
// Y coordinates of the forward and reverse frontier points ever intersect.
// This algorithm is correct even if searching only in the forward direction
// or in the reverse direction. We do both because it is commonly observed
// that two lists commonly differ because elements were added to the front
// or end of the other list.
//
// Non-deterministically start with either the forward or reverse direction
// to introduce some deliberate instability so that we have the flexibility
// to change this algorithm in the future.
if flags.Deterministic || randBool {
goto forwardSearch
} else {
goto reverseSearch
}
forwardSearch:
{
// Forward search from the beginning.
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
goto finishSearch
}
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
// Search in a diagonal pattern for a match.
z := zigzag(i)
p := point{fwdFrontier.X + z, fwdFrontier.Y - z}
switch {
case p.X >= revPath.X || p.Y < fwdPath.Y:
stop1 = true // Hit top-right corner
case p.Y >= revPath.Y || p.X < fwdPath.X:
stop2 = true // Hit bottom-left corner
case f(p.X, p.Y).Equal():
// Match found, so connect the path to this point.
fwdPath.connect(p, f)
fwdPath.append(Identity)
// Follow sequence of matches as far as possible.
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
if !f(fwdPath.X, fwdPath.Y).Equal() {
break
}
fwdPath.append(Identity)
}
fwdFrontier = fwdPath.point
stop1, stop2 = true, true
default:
searchBudget-- // Match not found
}
debug.Update()
}
// Advance the frontier towards reverse point.
if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y {
fwdFrontier.X++
} else {
fwdFrontier.Y++
}
goto reverseSearch
}
reverseSearch:
{
// Reverse search from the end.
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
goto finishSearch
}
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
// Search in a diagonal pattern for a match.
z := zigzag(i)
p := point{revFrontier.X - z, revFrontier.Y + z}
switch {
case fwdPath.X >= p.X || revPath.Y < p.Y:
stop1 = true // Hit bottom-left corner
case fwdPath.Y >= p.Y || revPath.X < p.X:
stop2 = true // Hit top-right corner
case f(p.X-1, p.Y-1).Equal():
// Match found, so connect the path to this point.
revPath.connect(p, f)
revPath.append(Identity)
// Follow sequence of matches as far as possible.
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
if !f(revPath.X-1, revPath.Y-1).Equal() {
break
}
revPath.append(Identity)
}
revFrontier = revPath.point
stop1, stop2 = true, true
default:
searchBudget-- // Match not found
}
debug.Update()
}
// Advance the frontier towards forward point.
if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y {
revFrontier.X--
} else {
revFrontier.Y--
}
goto forwardSearch
}
finishSearch:
// Join the forward and reverse paths and then append the reverse path.
fwdPath.connect(revPath.point, f)
for i := len(revPath.es) - 1; i >= 0; i-- {
t := revPath.es[i]
revPath.es = revPath.es[:i]
fwdPath.append(t)
}
debug.Finish()
return fwdPath.es
} | 0.991896 | k8snetworkplumbingwg/multus-cni | vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go | appscode/osm | vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go | Apache-2.0 | go |
Difference reports whether two lists of lengths nx and ny are equal
given the definition of equality provided as f.
This function returns an edit-script, which is a sequence of operations
needed to convert one list into the other. The following invariants for
the edit-script are maintained:
- eq == (es.Dist()==0)
- nx == es.LenX()
- ny == es.LenY()
This algorithm is not guaranteed to be an optimal solution (i.e., one that
produces an edit-script with a minimal Levenshtein distance). This algorithm
favors performance over optimality. The exact output is not guaranteed to
be stable and may change over time. | func Difference(nx, ny int, f EqualFunc) (es EditScript) {
// This algorithm is based on traversing what is known as an "edit-graph".
// See Figure 1 from "An O(ND) Difference Algorithm and Its Variations"
// by Eugene W. Myers. Since D can be as large as N itself, this is
// effectively O(N^2). Unlike the algorithm from that paper, we are not
// interested in the optimal path, but at least some "decent" path.
//
// For example, let X and Y be lists of symbols:
// X = [A B C A B B A]
// Y = [C B A B A C]
//
// The edit-graph can be drawn as the following:
// A B C A B B A
// ┌─────────────┐
// C │_|_|\|_|_|_|_│ 0
// B │_|\|_|_|\|\|_│ 1
// A │\|_|_|\|_|_|\│ 2
// B │_|\|_|_|\|\|_│ 3
// A │\|_|_|\|_|_|\│ 4
// C │ | |\| | | | │ 5
// └─────────────┘ 6
// 0 1 2 3 4 5 6 7
//
// List X is written along the horizontal axis, while list Y is written
// along the vertical axis. At any point on this grid, if the symbol in
// list X matches the corresponding symbol in list Y, then a '\' is drawn.
// The goal of any minimal edit-script algorithm is to find a path from the
// top-left corner to the bottom-right corner, while traveling through the
// fewest horizontal or vertical edges.
// A horizontal edge is equivalent to inserting a symbol from list X.
// A vertical edge is equivalent to inserting a symbol from list Y.
// A diagonal edge is equivalent to a matching symbol between both X and Y.
// Invariants:
// - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
// - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
//
// In general:
// - fwdFrontier.X < revFrontier.X
// - fwdFrontier.Y < revFrontier.Y
//
// Unless, it is time for the algorithm to terminate.
fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
fwdFrontier := fwdPath.point // Forward search frontier
revFrontier := revPath.point // Reverse search frontier
// Search budget bounds the cost of searching for better paths.
// The longest sequence of non-matching symbols that can be tolerated is
// approximately the square-root of the search budget.
searchBudget := 4 * (nx + ny) // O(n)
// Running the tests with the "cmp_debug" build tag prints a visualization
// of the algorithm running in real-time. This is educational for
// understanding how the algorithm works. See debug_enable.go.
f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
// The algorithm below is a greedy, meet-in-the-middle algorithm for
// computing sub-optimal edit-scripts between two lists.
//
// The algorithm is approximately as follows:
// - Searching for differences switches back-and-forth between
// a search that starts at the beginning (the top-left corner), and
// a search that starts at the end (the bottom-right corner).
// The goal of the search is connect with the search
// from the opposite corner.
// - As we search, we build a path in a greedy manner,
// where the first match seen is added to the path (this is sub-optimal,
// but provides a decent result in practice). When matches are found,
// we try the next pair of symbols in the lists and follow all matches
// as far as possible.
// - When searching for matches, we search along a diagonal going through
// through the "frontier" point. If no matches are found,
// we advance the frontier towards the opposite corner.
// - This algorithm terminates when either the X coordinates or the
// Y coordinates of the forward and reverse frontier points ever intersect.
// This algorithm is correct even if searching only in the forward direction
// or in the reverse direction. We do both because it is commonly observed
// that two lists commonly differ because elements were added to the front
// or end of the other list.
//
// Non-deterministically start with either the forward or reverse direction
// to introduce some deliberate instability so that we have the flexibility
// to change this algorithm in the future.
if flags.Deterministic || randBool {
goto forwardSearch
} else {
goto reverseSearch
}
forwardSearch:
{
// Forward search from the beginning.
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
goto finishSearch
}
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
// Search in a diagonal pattern for a match.
z := zigzag(i)
p := point{fwdFrontier.X + z, fwdFrontier.Y - z}
switch {
case p.X >= revPath.X || p.Y < fwdPath.Y:
stop1 = true // Hit top-right corner
case p.Y >= revPath.Y || p.X < fwdPath.X:
stop2 = true // Hit bottom-left corner
case f(p.X, p.Y).Equal():
// Match found, so connect the path to this point.
fwdPath.connect(p, f)
fwdPath.append(Identity)
// Follow sequence of matches as far as possible.
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
if !f(fwdPath.X, fwdPath.Y).Equal() {
break
}
fwdPath.append(Identity)
}
fwdFrontier = fwdPath.point
stop1, stop2 = true, true
default:
searchBudget-- // Match not found
}
debug.Update()
}
// Advance the frontier towards reverse point.
if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y {
fwdFrontier.X++
} else {
fwdFrontier.Y++
}
goto reverseSearch
}
reverseSearch:
{
// Reverse search from the end.
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
goto finishSearch
}
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
// Search in a diagonal pattern for a match.
z := zigzag(i)
p := point{revFrontier.X - z, revFrontier.Y + z}
switch {
case fwdPath.X >= p.X || revPath.Y < p.Y:
stop1 = true // Hit bottom-left corner
case fwdPath.Y >= p.Y || revPath.X < p.X:
stop2 = true // Hit top-right corner
case f(p.X-1, p.Y-1).Equal():
// Match found, so connect the path to this point.
revPath.connect(p, f)
revPath.append(Identity)
// Follow sequence of matches as far as possible.
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
if !f(revPath.X-1, revPath.Y-1).Equal() {
break
}
revPath.append(Identity)
}
revFrontier = revPath.point
stop1, stop2 = true, true
default:
searchBudget-- // Match not found
}
debug.Update()
}
// Advance the frontier towards forward point.
if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y {
revFrontier.X--
} else {
revFrontier.Y--
}
goto forwardSearch
}
finishSearch:
// Join the forward and reverse paths and then append the reverse path.
fwdPath.connect(revPath.point, f)
for i := len(revPath.es) - 1; i >= 0; i-- {
t := revPath.es[i]
revPath.es = revPath.es[:i]
fwdPath.append(t)
}
debug.Finish()
return fwdPath.es
} | func (iab *IAB) Cf(alt *IAB) (IABDiff, error) {
if err := iab.good(); err != nil {
return 0, err
}
if iab == alt {
return 0, nil
}
// Avoid holding two locks at once.
ref, err := alt.Dup()
if err != nil {
return 0, err
}
iab.mu.RLock()
defer iab.mu.RUnlock()
var cf IABDiff
for i := 0; i < words; i++ {
if iab.i[i] != ref.i[i] {
cf |= iBits
}
if iab.a[i] != ref.a[i] {
cf |= aBits
}
if iab.nb[i] != ref.nb[i] {
cf |= bBits
}
}
return cf, nil
} | 0.557884 | k8snetworkplumbingwg/multus-cni | vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go | moby/buildkit | vendor/kernel.org/pub/linux/libs/security/libcap/cap/iab.go | Apache-2.0 | go |
Difference reports whether two lists of lengths nx and ny are equal
given the definition of equality provided as f.
This function returns an edit-script, which is a sequence of operations
needed to convert one list into the other. The following invariants for
the edit-script are maintained:
- eq == (es.Dist()==0)
- nx == es.LenX()
- ny == es.LenY()
This algorithm is not guaranteed to be an optimal solution (i.e., one that
produces an edit-script with a minimal Levenshtein distance). This algorithm
favors performance over optimality. The exact output is not guaranteed to
be stable and may change over time. | func Difference(nx, ny int, f EqualFunc) (es EditScript) {
// This algorithm is based on traversing what is known as an "edit-graph".
// See Figure 1 from "An O(ND) Difference Algorithm and Its Variations"
// by Eugene W. Myers. Since D can be as large as N itself, this is
// effectively O(N^2). Unlike the algorithm from that paper, we are not
// interested in the optimal path, but at least some "decent" path.
//
// For example, let X and Y be lists of symbols:
// X = [A B C A B B A]
// Y = [C B A B A C]
//
// The edit-graph can be drawn as the following:
// A B C A B B A
// ┌─────────────┐
// C │_|_|\|_|_|_|_│ 0
// B │_|\|_|_|\|\|_│ 1
// A │\|_|_|\|_|_|\│ 2
// B │_|\|_|_|\|\|_│ 3
// A │\|_|_|\|_|_|\│ 4
// C │ | |\| | | | │ 5
// └─────────────┘ 6
// 0 1 2 3 4 5 6 7
//
// List X is written along the horizontal axis, while list Y is written
// along the vertical axis. At any point on this grid, if the symbol in
// list X matches the corresponding symbol in list Y, then a '\' is drawn.
// The goal of any minimal edit-script algorithm is to find a path from the
// top-left corner to the bottom-right corner, while traveling through the
// fewest horizontal or vertical edges.
// A horizontal edge is equivalent to inserting a symbol from list X.
// A vertical edge is equivalent to inserting a symbol from list Y.
// A diagonal edge is equivalent to a matching symbol between both X and Y.
// Invariants:
// - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
// - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
//
// In general:
// - fwdFrontier.X < revFrontier.X
// - fwdFrontier.Y < revFrontier.Y
//
// Unless, it is time for the algorithm to terminate.
fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
fwdFrontier := fwdPath.point // Forward search frontier
revFrontier := revPath.point // Reverse search frontier
// Search budget bounds the cost of searching for better paths.
// The longest sequence of non-matching symbols that can be tolerated is
// approximately the square-root of the search budget.
searchBudget := 4 * (nx + ny) // O(n)
// Running the tests with the "cmp_debug" build tag prints a visualization
// of the algorithm running in real-time. This is educational for
// understanding how the algorithm works. See debug_enable.go.
f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
// The algorithm below is a greedy, meet-in-the-middle algorithm for
// computing sub-optimal edit-scripts between two lists.
//
// The algorithm is approximately as follows:
// - Searching for differences switches back-and-forth between
// a search that starts at the beginning (the top-left corner), and
// a search that starts at the end (the bottom-right corner).
// The goal of the search is connect with the search
// from the opposite corner.
// - As we search, we build a path in a greedy manner,
// where the first match seen is added to the path (this is sub-optimal,
// but provides a decent result in practice). When matches are found,
// we try the next pair of symbols in the lists and follow all matches
// as far as possible.
// - When searching for matches, we search along a diagonal going through
// through the "frontier" point. If no matches are found,
// we advance the frontier towards the opposite corner.
// - This algorithm terminates when either the X coordinates or the
// Y coordinates of the forward and reverse frontier points ever intersect.
// This algorithm is correct even if searching only in the forward direction
// or in the reverse direction. We do both because it is commonly observed
// that two lists commonly differ because elements were added to the front
// or end of the other list.
//
// Non-deterministically start with either the forward or reverse direction
// to introduce some deliberate instability so that we have the flexibility
// to change this algorithm in the future.
if flags.Deterministic || randBool {
goto forwardSearch
} else {
goto reverseSearch
}
forwardSearch:
{
// Forward search from the beginning.
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
goto finishSearch
}
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
// Search in a diagonal pattern for a match.
z := zigzag(i)
p := point{fwdFrontier.X + z, fwdFrontier.Y - z}
switch {
case p.X >= revPath.X || p.Y < fwdPath.Y:
stop1 = true // Hit top-right corner
case p.Y >= revPath.Y || p.X < fwdPath.X:
stop2 = true // Hit bottom-left corner
case f(p.X, p.Y).Equal():
// Match found, so connect the path to this point.
fwdPath.connect(p, f)
fwdPath.append(Identity)
// Follow sequence of matches as far as possible.
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
if !f(fwdPath.X, fwdPath.Y).Equal() {
break
}
fwdPath.append(Identity)
}
fwdFrontier = fwdPath.point
stop1, stop2 = true, true
default:
searchBudget-- // Match not found
}
debug.Update()
}
// Advance the frontier towards reverse point.
if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y {
fwdFrontier.X++
} else {
fwdFrontier.Y++
}
goto reverseSearch
}
reverseSearch:
{
// Reverse search from the end.
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
goto finishSearch
}
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
// Search in a diagonal pattern for a match.
z := zigzag(i)
p := point{revFrontier.X - z, revFrontier.Y + z}
switch {
case fwdPath.X >= p.X || revPath.Y < p.Y:
stop1 = true // Hit bottom-left corner
case fwdPath.Y >= p.Y || revPath.X < p.X:
stop2 = true // Hit top-right corner
case f(p.X-1, p.Y-1).Equal():
// Match found, so connect the path to this point.
revPath.connect(p, f)
revPath.append(Identity)
// Follow sequence of matches as far as possible.
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
if !f(revPath.X-1, revPath.Y-1).Equal() {
break
}
revPath.append(Identity)
}
revFrontier = revPath.point
stop1, stop2 = true, true
default:
searchBudget-- // Match not found
}
debug.Update()
}
// Advance the frontier towards forward point.
if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y {
revFrontier.X--
} else {
revFrontier.Y--
}
goto forwardSearch
}
finishSearch:
// Join the forward and reverse paths and then append the reverse path.
fwdPath.connect(revPath.point, f)
for i := len(revPath.es) - 1; i >= 0; i-- {
t := revPath.es[i]
revPath.es = revPath.es[:i]
fwdPath.append(t)
}
debug.Finish()
return fwdPath.es
} | func Difference(a, b []string, lowercase bool) []string {
if len(a) == 0 {
return a
}
if len(b) == 0 {
if !lowercase {
return a
}
newA := make([]string, len(a))
for i, v := range a {
newA[i] = strings.ToLower(v)
}
return newA
}
a = RemoveDuplicates(a, lowercase)
b = RemoveDuplicates(b, lowercase)
itemsMap := map[string]struct{}{}
for _, aVal := range a {
itemsMap[aVal] = struct{}{}
}
// Perform difference calculation
for _, bVal := range b {
if _, ok := itemsMap[bVal]; ok {
delete(itemsMap, bVal)
}
}
items := []string{}
for item := range itemsMap {
items = append(items, item)
}
sort.Strings(items)
return items
} | 0.55272 | k8snetworkplumbingwg/multus-cni | vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go | umputun/spot | vendor/github.com/hashicorp/go-secure-stdlib/strutil/strutil.go | MIT | go |
Difference reports whether two lists of lengths nx and ny are equal
given the definition of equality provided as f.
This function returns an edit-script, which is a sequence of operations
needed to convert one list into the other. The following invariants for
the edit-script are maintained:
- eq == (es.Dist()==0)
- nx == es.LenX()
- ny == es.LenY()
This algorithm is not guaranteed to be an optimal solution (i.e., one that
produces an edit-script with a minimal Levenshtein distance). This algorithm
favors performance over optimality. The exact output is not guaranteed to
be stable and may change over time. | func Difference(nx, ny int, f EqualFunc) (es EditScript) {
// This algorithm is based on traversing what is known as an "edit-graph".
// See Figure 1 from "An O(ND) Difference Algorithm and Its Variations"
// by Eugene W. Myers. Since D can be as large as N itself, this is
// effectively O(N^2). Unlike the algorithm from that paper, we are not
// interested in the optimal path, but at least some "decent" path.
//
// For example, let X and Y be lists of symbols:
// X = [A B C A B B A]
// Y = [C B A B A C]
//
// The edit-graph can be drawn as the following:
// A B C A B B A
// ┌─────────────┐
// C │_|_|\|_|_|_|_│ 0
// B │_|\|_|_|\|\|_│ 1
// A │\|_|_|\|_|_|\│ 2
// B │_|\|_|_|\|\|_│ 3
// A │\|_|_|\|_|_|\│ 4
// C │ | |\| | | | │ 5
// └─────────────┘ 6
// 0 1 2 3 4 5 6 7
//
// List X is written along the horizontal axis, while list Y is written
// along the vertical axis. At any point on this grid, if the symbol in
// list X matches the corresponding symbol in list Y, then a '\' is drawn.
// The goal of any minimal edit-script algorithm is to find a path from the
// top-left corner to the bottom-right corner, while traveling through the
// fewest horizontal or vertical edges.
// A horizontal edge is equivalent to inserting a symbol from list X.
// A vertical edge is equivalent to inserting a symbol from list Y.
// A diagonal edge is equivalent to a matching symbol between both X and Y.
// Invariants:
// - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
// - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
//
// In general:
// - fwdFrontier.X < revFrontier.X
// - fwdFrontier.Y < revFrontier.Y
//
// Unless, it is time for the algorithm to terminate.
fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
fwdFrontier := fwdPath.point // Forward search frontier
revFrontier := revPath.point // Reverse search frontier
// Search budget bounds the cost of searching for better paths.
// The longest sequence of non-matching symbols that can be tolerated is
// approximately the square-root of the search budget.
searchBudget := 4 * (nx + ny) // O(n)
// Running the tests with the "cmp_debug" build tag prints a visualization
// of the algorithm running in real-time. This is educational for
// understanding how the algorithm works. See debug_enable.go.
f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
// The algorithm below is a greedy, meet-in-the-middle algorithm for
// computing sub-optimal edit-scripts between two lists.
//
// The algorithm is approximately as follows:
// - Searching for differences switches back-and-forth between
// a search that starts at the beginning (the top-left corner), and
// a search that starts at the end (the bottom-right corner).
// The goal of the search is connect with the search
// from the opposite corner.
// - As we search, we build a path in a greedy manner,
// where the first match seen is added to the path (this is sub-optimal,
// but provides a decent result in practice). When matches are found,
// we try the next pair of symbols in the lists and follow all matches
// as far as possible.
// - When searching for matches, we search along a diagonal going through
// through the "frontier" point. If no matches are found,
// we advance the frontier towards the opposite corner.
// - This algorithm terminates when either the X coordinates or the
// Y coordinates of the forward and reverse frontier points ever intersect.
// This algorithm is correct even if searching only in the forward direction
// or in the reverse direction. We do both because it is commonly observed
// that two lists commonly differ because elements were added to the front
// or end of the other list.
//
// Non-deterministically start with either the forward or reverse direction
// to introduce some deliberate instability so that we have the flexibility
// to change this algorithm in the future.
if flags.Deterministic || randBool {
goto forwardSearch
} else {
goto reverseSearch
}
forwardSearch:
{
// Forward search from the beginning.
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
goto finishSearch
}
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
// Search in a diagonal pattern for a match.
z := zigzag(i)
p := point{fwdFrontier.X + z, fwdFrontier.Y - z}
switch {
case p.X >= revPath.X || p.Y < fwdPath.Y:
stop1 = true // Hit top-right corner
case p.Y >= revPath.Y || p.X < fwdPath.X:
stop2 = true // Hit bottom-left corner
case f(p.X, p.Y).Equal():
// Match found, so connect the path to this point.
fwdPath.connect(p, f)
fwdPath.append(Identity)
// Follow sequence of matches as far as possible.
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
if !f(fwdPath.X, fwdPath.Y).Equal() {
break
}
fwdPath.append(Identity)
}
fwdFrontier = fwdPath.point
stop1, stop2 = true, true
default:
searchBudget-- // Match not found
}
debug.Update()
}
// Advance the frontier towards reverse point.
if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y {
fwdFrontier.X++
} else {
fwdFrontier.Y++
}
goto reverseSearch
}
reverseSearch:
{
// Reverse search from the end.
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
goto finishSearch
}
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
// Search in a diagonal pattern for a match.
z := zigzag(i)
p := point{revFrontier.X - z, revFrontier.Y + z}
switch {
case fwdPath.X >= p.X || revPath.Y < p.Y:
stop1 = true // Hit bottom-left corner
case fwdPath.Y >= p.Y || revPath.X < p.X:
stop2 = true // Hit top-right corner
case f(p.X-1, p.Y-1).Equal():
// Match found, so connect the path to this point.
revPath.connect(p, f)
revPath.append(Identity)
// Follow sequence of matches as far as possible.
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
if !f(revPath.X-1, revPath.Y-1).Equal() {
break
}
revPath.append(Identity)
}
revFrontier = revPath.point
stop1, stop2 = true, true
default:
searchBudget-- // Match not found
}
debug.Update()
}
// Advance the frontier towards forward point.
if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y {
revFrontier.X--
} else {
revFrontier.Y--
}
goto forwardSearch
}
finishSearch:
// Join the forward and reverse paths and then append the reverse path.
fwdPath.connect(revPath.point, f)
for i := len(revPath.es) - 1; i >= 0; i-- {
t := revPath.es[i]
revPath.es = revPath.es[:i]
fwdPath.append(t)
}
debug.Finish()
return fwdPath.es
} | func (s IPNetSet) Difference(s2 IPNetSet) IPNetSet {
result := make(IPNetSet)
for k, i := range s {
_, found := s2[k]
if found {
continue
}
result[k] = i
}
return result
} | 0.530795 | k8snetworkplumbingwg/multus-cni | vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go | k8snetworkplumbingwg/multus-cni | vendor/k8s.io/utils/net/ipnet.go | Apache-2.0 | go |
Difference reports whether two lists of lengths nx and ny are equal
given the definition of equality provided as f.
This function returns an edit-script, which is a sequence of operations
needed to convert one list into the other. The following invariants for
the edit-script are maintained:
- eq == (es.Dist()==0)
- nx == es.LenX()
- ny == es.LenY()
This algorithm is not guaranteed to be an optimal solution (i.e., one that
produces an edit-script with a minimal Levenshtein distance). This algorithm
favors performance over optimality. The exact output is not guaranteed to
be stable and may change over time. | func Difference(nx, ny int, f EqualFunc) (es EditScript) {
// This algorithm is based on traversing what is known as an "edit-graph".
// See Figure 1 from "An O(ND) Difference Algorithm and Its Variations"
// by Eugene W. Myers. Since D can be as large as N itself, this is
// effectively O(N^2). Unlike the algorithm from that paper, we are not
// interested in the optimal path, but at least some "decent" path.
//
// For example, let X and Y be lists of symbols:
// X = [A B C A B B A]
// Y = [C B A B A C]
//
// The edit-graph can be drawn as the following:
// A B C A B B A
// ┌─────────────┐
// C │_|_|\|_|_|_|_│ 0
// B │_|\|_|_|\|\|_│ 1
// A │\|_|_|\|_|_|\│ 2
// B │_|\|_|_|\|\|_│ 3
// A │\|_|_|\|_|_|\│ 4
// C │ | |\| | | | │ 5
// └─────────────┘ 6
// 0 1 2 3 4 5 6 7
//
// List X is written along the horizontal axis, while list Y is written
// along the vertical axis. At any point on this grid, if the symbol in
// list X matches the corresponding symbol in list Y, then a '\' is drawn.
// The goal of any minimal edit-script algorithm is to find a path from the
// top-left corner to the bottom-right corner, while traveling through the
// fewest horizontal or vertical edges.
// A horizontal edge is equivalent to inserting a symbol from list X.
// A vertical edge is equivalent to inserting a symbol from list Y.
// A diagonal edge is equivalent to a matching symbol between both X and Y.
// Invariants:
// - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
// - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
//
// In general:
// - fwdFrontier.X < revFrontier.X
// - fwdFrontier.Y < revFrontier.Y
//
// Unless, it is time for the algorithm to terminate.
fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
fwdFrontier := fwdPath.point // Forward search frontier
revFrontier := revPath.point // Reverse search frontier
// Search budget bounds the cost of searching for better paths.
// The longest sequence of non-matching symbols that can be tolerated is
// approximately the square-root of the search budget.
searchBudget := 4 * (nx + ny) // O(n)
// Running the tests with the "cmp_debug" build tag prints a visualization
// of the algorithm running in real-time. This is educational for
// understanding how the algorithm works. See debug_enable.go.
f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
// The algorithm below is a greedy, meet-in-the-middle algorithm for
// computing sub-optimal edit-scripts between two lists.
//
// The algorithm is approximately as follows:
// - Searching for differences switches back-and-forth between
// a search that starts at the beginning (the top-left corner), and
// a search that starts at the end (the bottom-right corner).
// The goal of the search is connect with the search
// from the opposite corner.
// - As we search, we build a path in a greedy manner,
// where the first match seen is added to the path (this is sub-optimal,
// but provides a decent result in practice). When matches are found,
// we try the next pair of symbols in the lists and follow all matches
// as far as possible.
// - When searching for matches, we search along a diagonal going through
// through the "frontier" point. If no matches are found,
// we advance the frontier towards the opposite corner.
// - This algorithm terminates when either the X coordinates or the
// Y coordinates of the forward and reverse frontier points ever intersect.
// This algorithm is correct even if searching only in the forward direction
// or in the reverse direction. We do both because it is commonly observed
// that two lists commonly differ because elements were added to the front
// or end of the other list.
//
// Non-deterministically start with either the forward or reverse direction
// to introduce some deliberate instability so that we have the flexibility
// to change this algorithm in the future.
if flags.Deterministic || randBool {
goto forwardSearch
} else {
goto reverseSearch
}
forwardSearch:
{
// Forward search from the beginning.
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
goto finishSearch
}
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
// Search in a diagonal pattern for a match.
z := zigzag(i)
p := point{fwdFrontier.X + z, fwdFrontier.Y - z}
switch {
case p.X >= revPath.X || p.Y < fwdPath.Y:
stop1 = true // Hit top-right corner
case p.Y >= revPath.Y || p.X < fwdPath.X:
stop2 = true // Hit bottom-left corner
case f(p.X, p.Y).Equal():
// Match found, so connect the path to this point.
fwdPath.connect(p, f)
fwdPath.append(Identity)
// Follow sequence of matches as far as possible.
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
if !f(fwdPath.X, fwdPath.Y).Equal() {
break
}
fwdPath.append(Identity)
}
fwdFrontier = fwdPath.point
stop1, stop2 = true, true
default:
searchBudget-- // Match not found
}
debug.Update()
}
// Advance the frontier towards reverse point.
if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y {
fwdFrontier.X++
} else {
fwdFrontier.Y++
}
goto reverseSearch
}
reverseSearch:
{
// Reverse search from the end.
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
goto finishSearch
}
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
// Search in a diagonal pattern for a match.
z := zigzag(i)
p := point{revFrontier.X - z, revFrontier.Y + z}
switch {
case fwdPath.X >= p.X || revPath.Y < p.Y:
stop1 = true // Hit bottom-left corner
case fwdPath.Y >= p.Y || revPath.X < p.X:
stop2 = true // Hit top-right corner
case f(p.X-1, p.Y-1).Equal():
// Match found, so connect the path to this point.
revPath.connect(p, f)
revPath.append(Identity)
// Follow sequence of matches as far as possible.
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
if !f(revPath.X-1, revPath.Y-1).Equal() {
break
}
revPath.append(Identity)
}
revFrontier = revPath.point
stop1, stop2 = true, true
default:
searchBudget-- // Match not found
}
debug.Update()
}
// Advance the frontier towards forward point.
if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y {
revFrontier.X--
} else {
revFrontier.Y--
}
goto forwardSearch
}
finishSearch:
// Join the forward and reverse paths and then append the reverse path.
fwdPath.connect(revPath.point, f)
for i := len(revPath.es) - 1; i >= 0; i-- {
t := revPath.es[i]
revPath.es = revPath.es[:i]
fwdPath.append(t)
}
debug.Finish()
return fwdPath.es
} | func (v *Version) Difference(u *Version) (int, Diff) {
c := v.Compare(u)
if c == 0 && v.build == u.build { // Build differences are ignored in Compare.
return c, Same
}
// Special case, varies too much from Semver 2.0, but often works,
// so let Maven-specific code try.
switch v.sys {
case Maven:
return c, mavenDifference(v, u)
}
switch {
case v.major() != u.major():
return c, DiffMajor
case v.minor() != u.minor():
return c, DiffMinor
case v.patch() != u.patch():
return c, DiffPatch
case len(v.num) != 3 || len(u.num) != 3:
// Too messy, give up.
return c, DiffOther
case comparePrerelease(u, v) != 0:
return c, DiffPrerelease
case u.build != v.build:
return c, DiffBuild
}
return c, DiffOther // We know they're not the same, but not how.
} | 0.525749 | k8snetworkplumbingwg/multus-cni | vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go | google/deps.dev | util/semver/diff.go | Apache-2.0 | go |
SearchDevicesWithContext is the same as SearchDevices with the addition of
the ability to pass a context and additional request options.
See SearchDevices for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *Braket) SearchDevicesWithContext(ctx aws.Context, input *SearchDevicesInput, opts ...request.Option) (*SearchDevicesOutput, error) {
req, out := c.SearchDevicesRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *Braket) SearchDevicesPagesWithContext(ctx aws.Context, input *SearchDevicesInput, fn func(*SearchDevicesOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *SearchDevicesInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.SearchDevicesRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
for p.Next() {
if !fn(p.Page().(*SearchDevicesOutput), !p.HasNextPage()) {
break
}
}
return p.Err()
} | 0.860809 | aws/aws-sdk-go | service/braket/api.go | aws/aws-sdk-go | service/braket/api.go | Apache-2.0 | go |
SearchDevicesWithContext is the same as SearchDevices with the addition of
the ability to pass a context and additional request options.
See SearchDevices for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *Braket) SearchDevicesWithContext(ctx aws.Context, input *SearchDevicesInput, opts ...request.Option) (*SearchDevicesOutput, error) {
req, out := c.SearchDevicesRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *NetworkManager) GetDevicesWithContext(ctx aws.Context, input *GetDevicesInput, opts ...request.Option) (*GetDevicesOutput, error) {
req, out := c.GetDevicesRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.812528 | aws/aws-sdk-go | service/braket/api.go | aws/aws-sdk-go | service/networkmanager/api.go | Apache-2.0 | go |
SearchDevicesWithContext is the same as SearchDevices with the addition of
the ability to pass a context and additional request options.
See SearchDevices for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *Braket) SearchDevicesWithContext(ctx aws.Context, input *SearchDevicesInput, opts ...request.Option) (*SearchDevicesOutput, error) {
req, out := c.SearchDevicesRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *CognitoIdentityProvider) ListDevicesWithContext(ctx aws.Context, input *ListDevicesInput, opts ...request.Option) (*ListDevicesOutput, error) {
req, out := c.ListDevicesRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.796217 | aws/aws-sdk-go | service/braket/api.go | aws/aws-sdk-go | service/cognitoidentityprovider/api.go | Apache-2.0 | go |
SearchDevicesWithContext is the same as SearchDevices with the addition of
the ability to pass a context and additional request options.
See SearchDevices for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *Braket) SearchDevicesWithContext(ctx aws.Context, input *SearchDevicesInput, opts ...request.Option) (*SearchDevicesOutput, error) {
req, out := c.SearchDevicesRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *CloudSearchDomain) SearchWithContext(ctx aws.Context, input *SearchInput, opts ...request.Option) (*SearchOutput, error) {
req, out := c.SearchRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.786308 | aws/aws-sdk-go | service/braket/api.go | aws/aws-sdk-go | service/cloudsearchdomain/api.go | Apache-2.0 | go |
SearchDevicesWithContext is the same as SearchDevices with the addition of
the ability to pass a context and additional request options.
See SearchDevices for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *Braket) SearchDevicesWithContext(ctx aws.Context, input *SearchDevicesInput, opts ...request.Option) (*SearchDevicesOutput, error) {
req, out := c.SearchDevicesRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *WorkLink) ListDevicesWithContext(ctx aws.Context, input *ListDevicesInput, opts ...request.Option) (*ListDevicesOutput, error) {
req, out := c.ListDevicesRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.784035 | aws/aws-sdk-go | service/braket/api.go | aws/aws-sdk-go | service/worklink/api.go | Apache-2.0 | go |
ListPipelines API operation for Amazon Elastic Transcoder.
The ListPipelines operation gets a list of the pipelines associated with
the current AWS account.
Returns awserr.Error for service API and SDK errors. Use runtime type assertions
with awserr.Error's Code and Message methods to get detailed information about
the error.
See the AWS API reference guide for Amazon Elastic Transcoder's
API operation ListPipelines for usage and error information.
Returned Error Types:
- ValidationException
One or more required parameter values were not provided in the request.
- IncompatibleVersionException
- AccessDeniedException
General authentication failure. The request was not signed correctly.
- InternalServiceException
Elastic Transcoder encountered an unexpected exception while trying to fulfill
the request. | func (c *ElasticTranscoder) ListPipelines(input *ListPipelinesInput) (*ListPipelinesOutput, error) {
req, out := c.ListPipelinesRequest(input)
return out, req.Send()
} | func (c *CodePipeline) ListPipelines(input *ListPipelinesInput) (*ListPipelinesOutput, error) {
req, out := c.ListPipelinesRequest(input)
return out, req.Send()
} | 0.927728 | aws/aws-sdk-go | service/elastictranscoder/api.go | aws/aws-sdk-go | service/codepipeline/api.go | Apache-2.0 | go |
ListPipelines API operation for Amazon Elastic Transcoder.
The ListPipelines operation gets a list of the pipelines associated with
the current AWS account.
Returns awserr.Error for service API and SDK errors. Use runtime type assertions
with awserr.Error's Code and Message methods to get detailed information about
the error.
See the AWS API reference guide for Amazon Elastic Transcoder's
API operation ListPipelines for usage and error information.
Returned Error Types:
- ValidationException
One or more required parameter values were not provided in the request.
- IncompatibleVersionException
- AccessDeniedException
General authentication failure. The request was not signed correctly.
- InternalServiceException
Elastic Transcoder encountered an unexpected exception while trying to fulfill
the request. | func (c *ElasticTranscoder) ListPipelines(input *ListPipelinesInput) (*ListPipelinesOutput, error) {
req, out := c.ListPipelinesRequest(input)
return out, req.Send()
} | func (c *DataPipeline) ListPipelines(input *ListPipelinesInput) (*ListPipelinesOutput, error) {
req, out := c.ListPipelinesRequest(input)
return out, req.Send()
} | 0.925151 | aws/aws-sdk-go | service/elastictranscoder/api.go | aws/aws-sdk-go | service/datapipeline/api.go | Apache-2.0 | go |
ListPipelines API operation for Amazon Elastic Transcoder.
The ListPipelines operation gets a list of the pipelines associated with
the current AWS account.
Returns awserr.Error for service API and SDK errors. Use runtime type assertions
with awserr.Error's Code and Message methods to get detailed information about
the error.
See the AWS API reference guide for Amazon Elastic Transcoder's
API operation ListPipelines for usage and error information.
Returned Error Types:
- ValidationException
One or more required parameter values were not provided in the request.
- IncompatibleVersionException
- AccessDeniedException
General authentication failure. The request was not signed correctly.
- InternalServiceException
Elastic Transcoder encountered an unexpected exception while trying to fulfill
the request. | func (c *ElasticTranscoder) ListPipelines(input *ListPipelinesInput) (*ListPipelinesOutput, error) {
req, out := c.ListPipelinesRequest(input)
return out, req.Send()
} | func (c *SageMaker) ListPipelines(input *ListPipelinesInput) (*ListPipelinesOutput, error) {
req, out := c.ListPipelinesRequest(input)
return out, req.Send()
} | 0.847811 | aws/aws-sdk-go | service/elastictranscoder/api.go | aws/aws-sdk-go | service/sagemaker/api.go | Apache-2.0 | go |
ListPipelines API operation for Amazon Elastic Transcoder.
The ListPipelines operation gets a list of the pipelines associated with
the current AWS account.
Returns awserr.Error for service API and SDK errors. Use runtime type assertions
with awserr.Error's Code and Message methods to get detailed information about
the error.
See the AWS API reference guide for Amazon Elastic Transcoder's
API operation ListPipelines for usage and error information.
Returned Error Types:
- ValidationException
One or more required parameter values were not provided in the request.
- IncompatibleVersionException
- AccessDeniedException
General authentication failure. The request was not signed correctly.
- InternalServiceException
Elastic Transcoder encountered an unexpected exception while trying to fulfill
the request. | func (c *ElasticTranscoder) ListPipelines(input *ListPipelinesInput) (*ListPipelinesOutput, error) {
req, out := c.ListPipelinesRequest(input)
return out, req.Send()
} | func (c *DataPipeline) DescribePipelines(input *DescribePipelinesInput) (*DescribePipelinesOutput, error) {
req, out := c.DescribePipelinesRequest(input)
return out, req.Send()
} | 0.810606 | aws/aws-sdk-go | service/elastictranscoder/api.go | aws/aws-sdk-go | service/datapipeline/api.go | Apache-2.0 | go |
ListPipelines API operation for Amazon Elastic Transcoder.
The ListPipelines operation gets a list of the pipelines associated with
the current AWS account.
Returns awserr.Error for service API and SDK errors. Use runtime type assertions
with awserr.Error's Code and Message methods to get detailed information about
the error.
See the AWS API reference guide for Amazon Elastic Transcoder's
API operation ListPipelines for usage and error information.
Returned Error Types:
- ValidationException
One or more required parameter values were not provided in the request.
- IncompatibleVersionException
- AccessDeniedException
General authentication failure. The request was not signed correctly.
- InternalServiceException
Elastic Transcoder encountered an unexpected exception while trying to fulfill
the request. | func (c *ElasticTranscoder) ListPipelines(input *ListPipelinesInput) (*ListPipelinesOutput, error) {
req, out := c.ListPipelinesRequest(input)
return out, req.Send()
} | func (c *ElasticTranscoder) ListJobsByPipeline(input *ListJobsByPipelineInput) (*ListJobsByPipelineOutput, error) {
req, out := c.ListJobsByPipelineRequest(input)
return out, req.Send()
} | 0.792535 | aws/aws-sdk-go | service/elastictranscoder/api.go | aws/aws-sdk-go | service/elastictranscoder/api.go | Apache-2.0 | go |
Projection sets the optional parameter "projection": Set of
properties to return. Defaults to full.
Possible values:
"full" - Include all properties.
"noAcl" - Omit the owner, acl property. | func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall {
c.urlParams_.Set("projection", projection)
return c
} | func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall {
c.urlParams_.Set("projection", projection)
return c
} | 0.987663 | armory/spinnaker-operator | vendor/google.golang.org/api/storage/v1/storage-gen.go | armory/spinnaker-operator | vendor/google.golang.org/api/storage/v1/storage-gen.go | Apache-2.0 | go |
Projection sets the optional parameter "projection": Set of
properties to return. Defaults to full.
Possible values:
"full" - Include all properties.
"noAcl" - Omit the owner, acl property. | func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall {
c.urlParams_.Set("projection", projection)
return c
} | func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall {
c.urlParams_.Set("projection", projection)
return c
} | 0.983381 | armory/spinnaker-operator | vendor/google.golang.org/api/storage/v1/storage-gen.go | armory/spinnaker-operator | vendor/google.golang.org/api/storage/v1/storage-gen.go | Apache-2.0 | go |
Projection sets the optional parameter "projection": Set of
properties to return. Defaults to full.
Possible values:
"full" - Include all properties.
"noAcl" - Omit the owner, acl property. | func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall {
c.urlParams_.Set("projection", projection)
return c
} | func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall {
c.urlParams_.Set("projection", projection)
return c
} | 0.981553 | armory/spinnaker-operator | vendor/google.golang.org/api/storage/v1/storage-gen.go | tektoncd/cli | vendor/google.golang.org/api/storage/v1/storage-gen.go | Apache-2.0 | go |
Projection sets the optional parameter "projection": Set of
properties to return. Defaults to full.
Possible values:
"full" - Include all properties.
"noAcl" - Omit the owner, acl property. | func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall {
c.urlParams_.Set("projection", projection)
return c
} | func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall {
c.urlParams_.Set("projection", projection)
return c
} | 0.974169 | armory/spinnaker-operator | vendor/google.golang.org/api/storage/v1/storage-gen.go | tektoncd/cli | vendor/google.golang.org/api/storage/v1/storage-gen.go | Apache-2.0 | go |
Projection sets the optional parameter "projection": Set of
properties to return. Defaults to full.
Possible values:
"full" - Include all properties.
"noAcl" - Omit the owner, acl property. | func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall {
c.urlParams_.Set("projection", projection)
return c
} | func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall {
c.urlParams_.Set("projection", projection)
return c
} | 0.973755 | armory/spinnaker-operator | vendor/google.golang.org/api/storage/v1/storage-gen.go | armory/spinnaker-operator | vendor/google.golang.org/api/storage/v1/storage-gen.go | Apache-2.0 | go |
WithRole sets the Role field in the declarative configuration to the given value
and returns the receiver, so that objects can be built by chaining "With" function invocations.
If called multiple times, the Role field is set to the value of the last call. | func (b *SELinuxOptionsApplyConfiguration) WithRole(value string) *SELinuxOptionsApplyConfiguration {
b.Role = &value
return b
} | func (b *ClusterRoleBindingApplyConfiguration) WithRoleRef(value *RoleRefApplyConfiguration) *ClusterRoleBindingApplyConfiguration {
b.RoleRef = value
return b
} | 0.913242 | devtron-labs/silver-surfer | vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go | devtron-labs/silver-surfer | vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go | Apache-2.0 | go |
WithRole sets the Role field in the declarative configuration to the given value
and returns the receiver, so that objects can be built by chaining "With" function invocations.
If called multiple times, the Role field is set to the value of the last call. | func (b *SELinuxOptionsApplyConfiguration) WithRole(value string) *SELinuxOptionsApplyConfiguration {
b.Role = &value
return b
} | func (b *SubjectApplyConfiguration) WithUser(value *UserSubjectApplyConfiguration) *SubjectApplyConfiguration {
b.User = value
return b
} | 0.711067 | devtron-labs/silver-surfer | vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go | loggie-io/loggie | vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/subject.go | Apache-2.0 | go |
WithRole sets the Role field in the declarative configuration to the given value
and returns the receiver, so that objects can be built by chaining "With" function invocations.
If called multiple times, the Role field is set to the value of the last call. | func (b *SELinuxOptionsApplyConfiguration) WithRole(value string) *SELinuxOptionsApplyConfiguration {
b.Role = &value
return b
} | func (b *AggregationRuleApplyConfiguration) WithClusterRoleSelectors(values ...*v1.LabelSelectorApplyConfiguration) *AggregationRuleApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithClusterRoleSelectors")
}
b.ClusterRoleSelectors = append(b.ClusterRoleSelectors, *values[i])
}
return b
} | 0.708219 | devtron-labs/silver-surfer | vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go | devtron-labs/silver-surfer | vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/aggregationrule.go | Apache-2.0 | go |
WithRole sets the Role field in the declarative configuration to the given value
and returns the receiver, so that objects can be built by chaining "With" function invocations.
If called multiple times, the Role field is set to the value of the last call. | func (b *SELinuxOptionsApplyConfiguration) WithRole(value string) *SELinuxOptionsApplyConfiguration {
b.Role = &value
return b
} | func (b *ISCSIPersistentVolumeSourceApplyConfiguration) WithIQN(value string) *ISCSIPersistentVolumeSourceApplyConfiguration {
b.IQN = &value
return b
} | 0.650312 | devtron-labs/silver-surfer | vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go | devtron-labs/silver-surfer | vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsipersistentvolumesource.go | Apache-2.0 | go |
WithRole sets the Role field in the declarative configuration to the given value
and returns the receiver, so that objects can be built by chaining "With" function invocations.
If called multiple times, the Role field is set to the value of the last call. | func (b *SELinuxOptionsApplyConfiguration) WithRole(value string) *SELinuxOptionsApplyConfiguration {
b.Role = &value
return b
} | func (b *ExtGrpcServiceApplyConfiguration) WithAuthority(value string) *ExtGrpcServiceApplyConfiguration {
b.Authority = &value
return b
} | 0.65024 | devtron-labs/silver-surfer | vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go | kgateway-dev/kgateway | api/applyconfiguration/api/v1alpha1/extgrpcservice.go | Apache-2.0 | go |
GetRuleSetWithContext is the same as GetRuleSet with the addition of
the ability to pass a context and additional request options.
See GetRuleSet for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *MailManager) GetRuleSetWithContext(ctx aws.Context, input *GetRuleSetInput, opts ...request.Option) (*GetRuleSetOutput, error) {
req, out := c.GetRuleSetRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *MailManager) CreateRuleSetWithContext(ctx aws.Context, input *CreateRuleSetInput, opts ...request.Option) (*CreateRuleSetOutput, error) {
req, out := c.CreateRuleSetRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.875552 | aws/aws-sdk-go | service/mailmanager/api.go | aws/aws-sdk-go | service/mailmanager/api.go | Apache-2.0 | go |
GetRuleSetWithContext is the same as GetRuleSet with the addition of
the ability to pass a context and additional request options.
See GetRuleSet for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *MailManager) GetRuleSetWithContext(ctx aws.Context, input *GetRuleSetInput, opts ...request.Option) (*GetRuleSetOutput, error) {
req, out := c.GetRuleSetRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *MailManager) UpdateRuleSetWithContext(ctx aws.Context, input *UpdateRuleSetInput, opts ...request.Option) (*UpdateRuleSetOutput, error) {
req, out := c.UpdateRuleSetRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.84911 | aws/aws-sdk-go | service/mailmanager/api.go | aws/aws-sdk-go | service/mailmanager/api.go | Apache-2.0 | go |
GetRuleSetWithContext is the same as GetRuleSet with the addition of
the ability to pass a context and additional request options.
See GetRuleSet for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *MailManager) GetRuleSetWithContext(ctx aws.Context, input *GetRuleSetInput, opts ...request.Option) (*GetRuleSetOutput, error) {
req, out := c.GetRuleSetRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *MailManager) DeleteRuleSetWithContext(ctx aws.Context, input *DeleteRuleSetInput, opts ...request.Option) (*DeleteRuleSetOutput, error) {
req, out := c.DeleteRuleSetRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.821128 | aws/aws-sdk-go | service/mailmanager/api.go | aws/aws-sdk-go | service/mailmanager/api.go | Apache-2.0 | go |
GetRuleSetWithContext is the same as GetRuleSet with the addition of
the ability to pass a context and additional request options.
See GetRuleSet for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *MailManager) GetRuleSetWithContext(ctx aws.Context, input *GetRuleSetInput, opts ...request.Option) (*GetRuleSetOutput, error) {
req, out := c.GetRuleSetRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *SES) DescribeReceiptRuleSetWithContext(ctx aws.Context, input *DescribeReceiptRuleSetInput, opts ...request.Option) (*DescribeReceiptRuleSetOutput, error) {
req, out := c.DescribeReceiptRuleSetRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.795888 | aws/aws-sdk-go | service/mailmanager/api.go | aws/aws-sdk-go | service/ses/api.go | Apache-2.0 | go |
GetRuleSetWithContext is the same as GetRuleSet with the addition of
the ability to pass a context and additional request options.
See GetRuleSet for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *MailManager) GetRuleSetWithContext(ctx aws.Context, input *GetRuleSetInput, opts ...request.Option) (*GetRuleSetOutput, error) {
req, out := c.GetRuleSetRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *RecycleBin) GetRuleWithContext(ctx aws.Context, input *GetRuleInput, opts ...request.Option) (*GetRuleOutput, error) {
req, out := c.GetRuleRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.792353 | aws/aws-sdk-go | service/mailmanager/api.go | aws/aws-sdk-go | service/recyclebin/api.go | Apache-2.0 | go |
EndOffset sets the optional parameter "endOffset": Filter results to objects
whose names are lexicographically before endOffset. If startOffset is also
set, the objects listed will have names between startOffset (inclusive) and
endOffset (exclusive). | func (c *ObjectsListCall) EndOffset(endOffset string) *ObjectsListCall {
c.urlParams_.Set("endOffset", endOffset)
return c
} | func (c *ObjectsWatchAllCall) EndOffset(endOffset string) *ObjectsWatchAllCall {
c.urlParams_.Set("endOffset", endOffset)
return c
} | 0.975539 | tektoncd/cli | vendor/google.golang.org/api/storage/v1/storage-gen.go | armory/spinnaker-operator | vendor/google.golang.org/api/storage/v1/storage-gen.go | Apache-2.0 | go |
EndOffset sets the optional parameter "endOffset": Filter results to objects
whose names are lexicographically before endOffset. If startOffset is also
set, the objects listed will have names between startOffset (inclusive) and
endOffset (exclusive). | func (c *ObjectsListCall) EndOffset(endOffset string) *ObjectsListCall {
c.urlParams_.Set("endOffset", endOffset)
return c
} | func (c *FoldersListCall) EndOffset(endOffset string) *FoldersListCall {
c.urlParams_.Set("endOffset", endOffset)
return c
} | 0.924103 | tektoncd/cli | vendor/google.golang.org/api/storage/v1/storage-gen.go | tektoncd/cli | vendor/google.golang.org/api/storage/v1/storage-gen.go | Apache-2.0 | go |
EndOffset sets the optional parameter "endOffset": Filter results to objects
whose names are lexicographically before endOffset. If startOffset is also
set, the objects listed will have names between startOffset (inclusive) and
endOffset (exclusive). | func (c *ObjectsListCall) EndOffset(endOffset string) *ObjectsListCall {
c.urlParams_.Set("endOffset", endOffset)
return c
} | func (c *ObjectsListCall) StartOffset(startOffset string) *ObjectsListCall {
c.urlParams_.Set("startOffset", startOffset)
return c
} | 0.881165 | tektoncd/cli | vendor/google.golang.org/api/storage/v1/storage-gen.go | tektoncd/cli | vendor/google.golang.org/api/storage/v1/storage-gen.go | Apache-2.0 | go |
EndOffset sets the optional parameter "endOffset": Filter results to objects
whose names are lexicographically before endOffset. If startOffset is also
set, the objects listed will have names between startOffset (inclusive) and
endOffset (exclusive). | func (c *ObjectsListCall) EndOffset(endOffset string) *ObjectsListCall {
c.urlParams_.Set("endOffset", endOffset)
return c
} | func (c *ObjectsWatchAllCall) StartOffset(startOffset string) *ObjectsWatchAllCall {
c.urlParams_.Set("startOffset", startOffset)
return c
} | 0.878433 | tektoncd/cli | vendor/google.golang.org/api/storage/v1/storage-gen.go | armory/spinnaker-operator | vendor/google.golang.org/api/storage/v1/storage-gen.go | Apache-2.0 | go |
EndOffset sets the optional parameter "endOffset": Filter results to objects
whose names are lexicographically before endOffset. If startOffset is also
set, the objects listed will have names between startOffset (inclusive) and
endOffset (exclusive). | func (c *ObjectsListCall) EndOffset(endOffset string) *ObjectsListCall {
c.urlParams_.Set("endOffset", endOffset)
return c
} | func (c *FoldersListCall) StartOffset(startOffset string) *FoldersListCall {
c.urlParams_.Set("startOffset", startOffset)
return c
} | 0.82576 | tektoncd/cli | vendor/google.golang.org/api/storage/v1/storage-gen.go | tektoncd/cli | vendor/google.golang.org/api/storage/v1/storage-gen.go | Apache-2.0 | go |
Authenticate a user.
Authenticates a user and returns information about the authenticated user.
Include the user information in a [basic auth
header](https://en.wikipedia.org/wiki/Basic_access_authentication).
A successful call returns a JSON structure that shows user information such
as their username, the roles that are assigned to the user, any assigned
metadata, and information about the realms that authenticated and authorized
the user.
If the user cannot be authenticated, this API returns a 401 status code.
https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-authenticate | func New(tp elastictransport.Interface) *Authenticate {
r := &Authenticate{
transport: tp,
values: make(url.Values),
headers: make(http.Header),
}
if instrumented, ok := r.transport.(elastictransport.Instrumented); ok {
if instrument := instrumented.InstrumentationEnabled(); instrument != nil {
r.instrument = instrument
}
}
return r
} | func New(tp elastictransport.Interface) *OidcAuthenticate {
r := &OidcAuthenticate{
transport: tp,
values: make(url.Values),
headers: make(http.Header),
buf: gobytes.NewBuffer(nil),
}
if instrumented, ok := r.transport.(elastictransport.Instrumented); ok {
if instrument := instrumented.InstrumentationEnabled(); instrument != nil {
r.instrument = instrument
}
}
return r
} | 0.707776 | elastic/go-elasticsearch | typedapi/security/authenticate/authenticate.go | elastic/go-elasticsearch | typedapi/security/oidcauthenticate/oidc_authenticate.go | Apache-2.0 | go |
Authenticate a user.
Authenticates a user and returns information about the authenticated user.
Include the user information in a [basic auth
header](https://en.wikipedia.org/wiki/Basic_access_authentication).
A successful call returns a JSON structure that shows user information such
as their username, the roles that are assigned to the user, any assigned
metadata, and information about the realms that authenticated and authorized
the user.
If the user cannot be authenticated, this API returns a 401 status code.
https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-authenticate | func New(tp elastictransport.Interface) *Authenticate {
r := &Authenticate{
transport: tp,
values: make(url.Values),
headers: make(http.Header),
}
if instrumented, ok := r.transport.(elastictransport.Instrumented); ok {
if instrument := instrumented.InstrumentationEnabled(); instrument != nil {
r.instrument = instrument
}
}
return r
} | func New(tp elastictransport.Interface) *SamlAuthenticate {
r := &SamlAuthenticate{
transport: tp,
values: make(url.Values),
headers: make(http.Header),
buf: gobytes.NewBuffer(nil),
}
if instrumented, ok := r.transport.(elastictransport.Instrumented); ok {
if instrument := instrumented.InstrumentationEnabled(); instrument != nil {
r.instrument = instrument
}
}
return r
} | 0.681695 | elastic/go-elasticsearch | typedapi/security/authenticate/authenticate.go | elastic/go-elasticsearch | typedapi/security/samlauthenticate/saml_authenticate.go | Apache-2.0 | go |
Authenticate a user.
Authenticates a user and returns information about the authenticated user.
Include the user information in a [basic auth
header](https://en.wikipedia.org/wiki/Basic_access_authentication).
A successful call returns a JSON structure that shows user information such
as their username, the roles that are assigned to the user, any assigned
metadata, and information about the realms that authenticated and authorized
the user.
If the user cannot be authenticated, this API returns a 401 status code.
https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-authenticate | func New(tp elastictransport.Interface) *Authenticate {
r := &Authenticate{
transport: tp,
values: make(url.Values),
headers: make(http.Header),
}
if instrumented, ok := r.transport.(elastictransport.Instrumented); ok {
if instrument := instrumented.InstrumentationEnabled(); instrument != nil {
r.instrument = instrument
}
}
return r
} | func New(tp elastictransport.Interface) *GetUser {
r := &GetUser{
transport: tp,
values: make(url.Values),
headers: make(http.Header),
}
if instrumented, ok := r.transport.(elastictransport.Instrumented); ok {
if instrument := instrumented.InstrumentationEnabled(); instrument != nil {
r.instrument = instrument
}
}
return r
} | 0.659491 | elastic/go-elasticsearch | typedapi/security/authenticate/authenticate.go | elastic/go-elasticsearch | typedapi/security/getuser/get_user.go | Apache-2.0 | go |
Authenticate a user.
Authenticates a user and returns information about the authenticated user.
Include the user information in a [basic auth
header](https://en.wikipedia.org/wiki/Basic_access_authentication).
A successful call returns a JSON structure that shows user information such
as their username, the roles that are assigned to the user, any assigned
metadata, and information about the realms that authenticated and authorized
the user.
If the user cannot be authenticated, this API returns a 401 status code.
https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-authenticate | func New(tp elastictransport.Interface) *Authenticate {
r := &Authenticate{
transport: tp,
values: make(url.Values),
headers: make(http.Header),
}
if instrumented, ok := r.transport.(elastictransport.Instrumented); ok {
if instrument := instrumented.InstrumentationEnabled(); instrument != nil {
r.instrument = instrument
}
}
return r
} | func New(tp elastictransport.Interface) *GrantApiKey {
r := &GrantApiKey{
transport: tp,
values: make(url.Values),
headers: make(http.Header),
buf: gobytes.NewBuffer(nil),
}
if instrumented, ok := r.transport.(elastictransport.Instrumented); ok {
if instrument := instrumented.InstrumentationEnabled(); instrument != nil {
r.instrument = instrument
}
}
return r
} | 0.634694 | elastic/go-elasticsearch | typedapi/security/authenticate/authenticate.go | elastic/go-elasticsearch | typedapi/security/grantapikey/grant_api_key.go | Apache-2.0 | go |
Authenticate a user.
Authenticates a user and returns information about the authenticated user.
Include the user information in a [basic auth
header](https://en.wikipedia.org/wiki/Basic_access_authentication).
A successful call returns a JSON structure that shows user information such
as their username, the roles that are assigned to the user, any assigned
metadata, and information about the realms that authenticated and authorized
the user.
If the user cannot be authenticated, this API returns a 401 status code.
https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-authenticate | func New(tp elastictransport.Interface) *Authenticate {
r := &Authenticate{
transport: tp,
values: make(url.Values),
headers: make(http.Header),
}
if instrumented, ok := r.transport.(elastictransport.Instrumented); ok {
if instrument := instrumented.InstrumentationEnabled(); instrument != nil {
r.instrument = instrument
}
}
return r
} | func New(tp elastictransport.Interface) *GetUserPrivileges {
r := &GetUserPrivileges{
transport: tp,
values: make(url.Values),
headers: make(http.Header),
}
if instrumented, ok := r.transport.(elastictransport.Instrumented); ok {
if instrument := instrumented.InstrumentationEnabled(); instrument != nil {
r.instrument = instrument
}
}
return r
} | 0.626083 | elastic/go-elasticsearch | typedapi/security/authenticate/authenticate.go | elastic/go-elasticsearch | typedapi/security/getuserprivileges/get_user_privileges.go | Apache-2.0 | go |
@Tags MenuAPI
@Security ApiKeyAuth
@Summary Get menu record by ID
@Param id path string true "unique id"
@Success 200 {object} util.ResponseResult{data=schema.Menu}
@Failure 401 {object} util.ResponseResult
@Failure 500 {object} util.ResponseResult
@Router /api/v1/menus/{id} [get] | func (a *Menu) Get(c *gin.Context) {
ctx := c.Request.Context()
item, err := a.MenuBIZ.Get(ctx, c.Param("id"))
if err != nil {
util.ResError(c, err)
return
}
util.ResSuccess(c, item)
} | func (a *Menu) Update(c *gin.Context) {
ctx := c.Request.Context()
item := new(schema.MenuForm)
if err := util.ParseJSON(c, item); err != nil {
util.ResError(c, err)
return
} else if err := item.Validate(); err != nil {
util.ResError(c, err)
return
}
err := a.MenuBIZ.Update(ctx, c.Param("id"), item)
if err != nil {
util.ResError(c, err)
return
}
util.ResOK(c)
} | 0.849721 | LyricTian/gin-admin | internal/mods/rbac/api/menu.api.go | LyricTian/gin-admin | internal/mods/rbac/api/menu.api.go | Apache-2.0 | go |
@Tags MenuAPI
@Security ApiKeyAuth
@Summary Get menu record by ID
@Param id path string true "unique id"
@Success 200 {object} util.ResponseResult{data=schema.Menu}
@Failure 401 {object} util.ResponseResult
@Failure 500 {object} util.ResponseResult
@Router /api/v1/menus/{id} [get] | func (a *Menu) Get(c *gin.Context) {
ctx := c.Request.Context()
item, err := a.MenuBIZ.Get(ctx, c.Param("id"))
if err != nil {
util.ResError(c, err)
return
}
util.ResSuccess(c, item)
} | func (a *Menu) Delete(c *gin.Context) {
ctx := c.Request.Context()
err := a.MenuBIZ.Delete(ctx, c.Param("id"))
if err != nil {
util.ResError(c, err)
return
}
util.ResOK(c)
} | 0.834062 | LyricTian/gin-admin | internal/mods/rbac/api/menu.api.go | LyricTian/gin-admin | internal/mods/rbac/api/menu.api.go | Apache-2.0 | go |
@Tags MenuAPI
@Security ApiKeyAuth
@Summary Get menu record by ID
@Param id path string true "unique id"
@Success 200 {object} util.ResponseResult{data=schema.Menu}
@Failure 401 {object} util.ResponseResult
@Failure 500 {object} util.ResponseResult
@Router /api/v1/menus/{id} [get] | func (a *Menu) Get(c *gin.Context) {
ctx := c.Request.Context()
item, err := a.MenuBIZ.Get(ctx, c.Param("id"))
if err != nil {
util.ResError(c, err)
return
}
util.ResSuccess(c, item)
} | func (a *AuthorityMenuApi) GetBaseMenuById(c *gin.Context) {
var idInfo request.GetById
err := c.ShouldBindJSON(&idInfo)
if err != nil {
response.FailWithMessage(err.Error(), c)
return
}
err = utils.Verify(idInfo, utils.IdVerify)
if err != nil {
response.FailWithMessage(err.Error(), c)
return
}
menu, err := baseMenuService.GetBaseMenuById(idInfo.ID)
if err != nil {
global.GVA_LOG.Error("获取失败!", zap.Error(err))
response.FailWithMessage("获取失败", c)
return
}
response.OkWithDetailed(systemRes.SysBaseMenuResponse{Menu: menu}, "获取成功", c)
} | 0.830024 | LyricTian/gin-admin | internal/mods/rbac/api/menu.api.go | flipped-aurora/gin-vue-admin | server/api/v1/system/sys_menu.go | Apache-2.0 | go |
@Tags MenuAPI
@Security ApiKeyAuth
@Summary Get menu record by ID
@Param id path string true "unique id"
@Success 200 {object} util.ResponseResult{data=schema.Menu}
@Failure 401 {object} util.ResponseResult
@Failure 500 {object} util.ResponseResult
@Router /api/v1/menus/{id} [get] | func (a *Menu) Get(c *gin.Context) {
ctx := c.Request.Context()
item, err := a.MenuBIZ.Get(ctx, c.Param("id"))
if err != nil {
util.ResError(c, err)
return
}
util.ResSuccess(c, item)
} | func (a *Menu) Create(c *gin.Context) {
ctx := c.Request.Context()
item := new(schema.MenuForm)
if err := util.ParseJSON(c, item); err != nil {
util.ResError(c, err)
return
} else if err := item.Validate(); err != nil {
util.ResError(c, err)
return
}
result, err := a.MenuBIZ.Create(ctx, item)
if err != nil {
util.ResError(c, err)
return
}
util.ResSuccess(c, result)
} | 0.828356 | LyricTian/gin-admin | internal/mods/rbac/api/menu.api.go | LyricTian/gin-admin | internal/mods/rbac/api/menu.api.go | Apache-2.0 | go |
@Tags MenuAPI
@Security ApiKeyAuth
@Summary Get menu record by ID
@Param id path string true "unique id"
@Success 200 {object} util.ResponseResult{data=schema.Menu}
@Failure 401 {object} util.ResponseResult
@Failure 500 {object} util.ResponseResult
@Router /api/v1/menus/{id} [get] | func (a *Menu) Get(c *gin.Context) {
ctx := c.Request.Context()
item, err := a.MenuBIZ.Get(ctx, c.Param("id"))
if err != nil {
util.ResError(c, err)
return
}
util.ResSuccess(c, item)
} | func (h *handler) Detail() core.HandlerFunc {
return func(c core.Context) {
req := new(detailRequest)
res := new(detailResponse)
if err := c.ShouldBindURI(req); err != nil {
c.AbortWithError(core.Error(
http.StatusBadRequest,
code.ParamBindError,
code.Text(code.ParamBindError)).WithError(err),
)
return
}
ids, err := h.hashids.HashidsDecode(req.Id)
if err != nil {
c.AbortWithError(core.Error(
http.StatusBadRequest,
code.HashIdsDecodeError,
code.Text(code.HashIdsDecodeError)).WithError(err),
)
return
}
id := int32(ids[0])
searchOneData := new(menu.SearchOneData)
searchOneData.Id = id
info, err := h.menuService.Detail(c, searchOneData)
if err != nil {
c.AbortWithError(core.Error(
http.StatusBadRequest,
code.MenuDetailError,
code.Text(code.MenuDetailError)).WithError(err),
)
return
}
res.Id = info.Id
res.Pid = info.Pid
res.Name = info.Name
res.Link = info.Link
res.Icon = info.Icon
c.Payload(res)
}
} | 0.825769 | LyricTian/gin-admin | internal/mods/rbac/api/menu.api.go | xinliangnote/go-gin-api | internal/api/menu/func_detail.go | MIT | go |
UpdateLabelGroupWithContext is the same as UpdateLabelGroup with the addition of
the ability to pass a context and additional request options.
See UpdateLabelGroup for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *LookoutEquipment) UpdateLabelGroupWithContext(ctx aws.Context, input *UpdateLabelGroupInput, opts ...request.Option) (*UpdateLabelGroupOutput, error) {
req, out := c.UpdateLabelGroupRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *LookoutEquipment) DeleteLabelGroupWithContext(ctx aws.Context, input *DeleteLabelGroupInput, opts ...request.Option) (*DeleteLabelGroupOutput, error) {
req, out := c.DeleteLabelGroupRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.867066 | aws/aws-sdk-go | service/lookoutequipment/api.go | aws/aws-sdk-go | service/lookoutequipment/api.go | Apache-2.0 | go |
UpdateLabelGroupWithContext is the same as UpdateLabelGroup with the addition of
the ability to pass a context and additional request options.
See UpdateLabelGroup for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *LookoutEquipment) UpdateLabelGroupWithContext(ctx aws.Context, input *UpdateLabelGroupInput, opts ...request.Option) (*UpdateLabelGroupOutput, error) {
req, out := c.UpdateLabelGroupRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *LookoutEquipment) DescribeLabelGroupWithContext(ctx aws.Context, input *DescribeLabelGroupInput, opts ...request.Option) (*DescribeLabelGroupOutput, error) {
req, out := c.DescribeLabelGroupRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.847285 | aws/aws-sdk-go | service/lookoutequipment/api.go | aws/aws-sdk-go | service/lookoutequipment/api.go | Apache-2.0 | go |
UpdateLabelGroupWithContext is the same as UpdateLabelGroup with the addition of
the ability to pass a context and additional request options.
See UpdateLabelGroup for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *LookoutEquipment) UpdateLabelGroupWithContext(ctx aws.Context, input *UpdateLabelGroupInput, opts ...request.Option) (*UpdateLabelGroupOutput, error) {
req, out := c.UpdateLabelGroupRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *LookoutEquipment) CreateLabelGroupWithContext(ctx aws.Context, input *CreateLabelGroupInput, opts ...request.Option) (*CreateLabelGroupOutput, error) {
req, out := c.CreateLabelGroupRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.838346 | aws/aws-sdk-go | service/lookoutequipment/api.go | aws/aws-sdk-go | service/lookoutequipment/api.go | Apache-2.0 | go |
UpdateLabelGroupWithContext is the same as UpdateLabelGroup with the addition of
the ability to pass a context and additional request options.
See UpdateLabelGroup for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *LookoutEquipment) UpdateLabelGroupWithContext(ctx aws.Context, input *UpdateLabelGroupInput, opts ...request.Option) (*UpdateLabelGroupOutput, error) {
req, out := c.UpdateLabelGroupRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *LookoutEquipment) ListLabelGroupsWithContext(ctx aws.Context, input *ListLabelGroupsInput, opts ...request.Option) (*ListLabelGroupsOutput, error) {
req, out := c.ListLabelGroupsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.797962 | aws/aws-sdk-go | service/lookoutequipment/api.go | aws/aws-sdk-go | service/lookoutequipment/api.go | Apache-2.0 | go |
UpdateLabelGroupWithContext is the same as UpdateLabelGroup with the addition of
the ability to pass a context and additional request options.
See UpdateLabelGroup for details on how to use this API operation.
The context must be non-nil and will be used for request cancellation. If
the context is nil a panic will occur. In the future the SDK may create
sub-contexts for http.Requests. See https://golang.org/pkg/context/
for more information on using Contexts. | func (c *LookoutEquipment) UpdateLabelGroupWithContext(ctx aws.Context, input *UpdateLabelGroupInput, opts ...request.Option) (*UpdateLabelGroupOutput, error) {
req, out := c.UpdateLabelGroupRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | func (c *FraudDetector) UpdateEventLabelWithContext(ctx aws.Context, input *UpdateEventLabelInput, opts ...request.Option) (*UpdateEventLabelOutput, error) {
req, out := c.UpdateEventLabelRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
} | 0.79568 | aws/aws-sdk-go | service/lookoutequipment/api.go | aws/aws-sdk-go | service/frauddetector/api.go | Apache-2.0 | go |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.