code
stringlengths
67
15.9k
labels
sequencelengths
1
4
package taglog // A map type specific to tags. The value type must be either string or []string. // Users should avoid modifying the map directly and instead use the provided // functions. type Tags map[string]interface{} // Add one or more values to a key. func (t Tags) Add(key string, value ...string) { for _, v := range value { switch vs := t[key].(type) { case nil: t[key] = v case string: t[key] = []string{vs, v} case []string: t[key] = append(vs, v) } } } // Add one or more values to a key, merging any duplicate values. func (t Tags) Merge(key string, value ...string) { for _, v := range value { current := t.GetAll(key) found := false for _, cv := range current { if v == cv { found = true break } } if !found { t.Add(key, v) } } } // Append one or more values to a key. This the same as Add() and is only // provided to couple with Pop() for code clarity. func (t Tags) Push(key string, value ...string) { t.Add(key, value...) } // Remove the last value for a key func (t Tags) Pop(key string) { switch vs := t[key].(type) { case nil: return case string: delete(t, key) case []string: if len(vs) <= 1 { delete(t, key) } else if len(vs) == 2 { t[key] = vs[0] } else { t[key] = vs[:len(vs)-1] } } } // Set one or more values for a key. Any existing values are discarded. func (t Tags) Set(key string, value ...string) { delete(t, key) t.Add(key, value...) } // Get the first value for a key. If the key does not exist, an empty string is // returned. func (t Tags) Get(key string) string { switch vs := t[key].(type) { case string: return vs case []string: return vs[0] } return "" } // Get all the values for a key. If the key does not exist, a nil slice is // returned. func (t Tags) GetAll(key string) []string { switch vs := t[key].(type) { case string: return []string{vs} case []string: return vs } return nil } // Delete a key. func (t Tags) Del(key string) { delete(t, key) } // Delete all keys. func (t Tags) DelAll() { for k, _ := range t { delete(t, k) } } // Export all tags as a map of string slices. func (t Tags) Export() map[string][]string { tags := make(map[string][]string) for k, v := range t { switch vs := v.(type) { case string: tags[k] = []string{vs} case []string: ts := make([]string, len(vs)) copy(ts, vs) tags[k] = ts } } return tags } // Import tags from a map of string slices. func (t Tags) Import(tags map[string][]string) { for k, v := range tags { t.Merge(k, v...) } } // Copy tags. Performs a deep copy of all tag values. func (t Tags) Copy() Tags { out := make(Tags) out.Import(t.Export()) return out }
[ 5 ]
/****************************************************************************** * * Copyright 2020 SAP SE * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************/ package keppelv1_test import ( "net/http" "testing" "github.com/sapcc/go-bits/assert" "github.com/sapcc/keppel/internal/keppel" "github.com/sapcc/keppel/internal/test" ) func TestPeersAPI(t *testing.T) { s := test.NewSetup(t, test.WithKeppelAPI) h := s.Handler //check empty response when there are no peers in the DB assert.HTTPRequest{ Method: "GET", Path: "/keppel/v1/peers", Header: map[string]string{"X-Test-Perms": "view:tenant1"}, ExpectStatus: http.StatusOK, ExpectBody: assert.JSONObject{"peers": []interface{}{}}, }.Check(t, h) //add some peers expectedPeers := []assert.JSONObject{ {"hostname": "keppel.example.com"}, {"hostname": "keppel.example.org"}, } for _, peer := range expectedPeers { err := s.DB.Insert(&keppel.Peer{HostName: peer["hostname"].(string)}) if err != nil { t.Fatal(err) } } //check non-empty response assert.HTTPRequest{ Method: "GET", Path: "/keppel/v1/peers", Header: map[string]string{"X-Test-Perms": "view:tenant1"}, ExpectStatus: http.StatusOK, ExpectBody: assert.JSONObject{"peers": expectedPeers}, }.Check(t, h) }
[ 3 ]
package router import ( "github.com/lovego/goa" ) type fieldCommentPair struct { Field string Comment string } type ResBodyTpl struct { Code string `json:"code" c:"ok 表示成功,其他表示错误代码"` Message string `json:"message" c:"与code对应的描述信息"` Data interface{} `json:"data"` } const ( TypeReqBody uint8 = iota TypeResBody TypeErrResBody ) // TODO type roundTripBody struct { Type uint8 // 请求体/成功返回体/错误返回体 Desc string Body interface{} } type routerInfo struct { Path string Method string Title string Desc string // 描述 ReqContentType string RegComments []fieldCommentPair QueryComments []fieldCommentPair // 保存请求体/成功返回体/错误返回体,数据的数组。并以此顺序生成文档。 RoundTripBodies []roundTripBody //Req interface{} //SucRes interface{} //ErrRes []ResBodyTpl IsEntry bool // 是否 api 接口 } type R struct { Info routerInfo RouterGroup *goa.RouterGroup Nodes []*R } func NewRoot(r *goa.RouterGroup) *R { return New(r, ``) } func New(r *goa.RouterGroup, path string) *R { return &R{ Info: routerInfo{ Path: path, ReqContentType: `application/json`, RegComments: make([]fieldCommentPair, 0), QueryComments: make([]fieldCommentPair, 0), RoundTripBodies: make([]roundTripBody, 0), }, RouterGroup: r, Nodes: make([]*R, 0), } } func NewEntry(r *goa.RouterGroup, path string) *R { entry := New(r, path) entry.Info.IsEntry = true return entry } func (r *R) Group(path string) *R { group := r.RouterGroup.Group(path) child := New(group, path) r.Nodes = append(r.Nodes, child) return child } func (r *R) GetX(path string, handlerFunc func(*goa.Context)) *R { child := NewEntry(r.RouterGroup.Get(path, handlerFunc), path) child.Info.Method = `GET` r.Nodes = append(r.Nodes, child) return child } func (r *R) Get(path string, handlerFunc func(*goa.Context)) *R { child := NewEntry(r.RouterGroup.Get(path, handlerFunc), path) child.Info.Method = `GET` r.Nodes = append(r.Nodes, child) return child } func (r *R) PostX(path string, handlerFunc func(*goa.Context)) *R { child := NewEntry(r.RouterGroup.Post(path, handlerFunc), path) child.Info.Method = `POST` r.Nodes = append(r.Nodes, child) return child } func (r *R) Post(path string, handlerFunc func(*goa.Context)) *R { child := NewEntry(r.RouterGroup.Post(path, handlerFunc), path) child.Info.Method = `POST` r.Nodes = append(r.Nodes, child) return child } func (r *R) PutX(path string, handlerFunc func(*goa.Context)) *R { child := NewEntry(r.RouterGroup.Put(path, handlerFunc), path) child.Info.Method = `PUT` r.Nodes = append(r.Nodes, child) return child } func (r *R) Put(path string, handlerFunc func(*goa.Context)) *R { child := NewEntry(r.RouterGroup.Put(path, handlerFunc), path) child.Info.Method = `PUT` r.Nodes = append(r.Nodes, child) return child } func (r *R) PatchX(path string, handlerFunc func(*goa.Context)) *R { child := NewEntry(r.RouterGroup.Patch(path, handlerFunc), path) child.Info.Method = `PATCH` r.Nodes = append(r.Nodes, child) return child } func (r *R) Patch(path string, handlerFunc func(*goa.Context)) *R { child := NewEntry(r.RouterGroup.Patch(path, handlerFunc), path) child.Info.Method = `PATCH` r.Nodes = append(r.Nodes, child) return child } func (r *R) DeleteX(path string, handlerFunc func(*goa.Context)) *R { child := NewEntry(r.RouterGroup.Delete(path, handlerFunc), path) child.Info.Method = `DELETE` r.Nodes = append(r.Nodes, child) return child } func (r *R) Delete(path string, handlerFunc func(*goa.Context)) *R { child := NewEntry(r.RouterGroup.Delete(path, handlerFunc), path) child.Info.Method = `DELETE` r.Nodes = append(r.Nodes, child) return child }
[ 3 ]
package main import ( "net/http" "log" "io/ioutil" "encoding/json" "github.com/gorilla/mux" "time" "fmt" ) const URL = "https://api.opendota.com/api/proplayers" var allPlayers []Player var playerMap map[string]Player func init() { playerMap = make(map[string]Player) } func main() { channel := make(chan []Player) playerClient := http.Client{ Timeout: time.Second * 10, } go func(channel chan []Player) { for { players := <- channel for _, item := range players { playerMap[item.Name] = item } } }(channel) go RefreshPLayersArray(&playerClient, channel) // start the server since we have initialized all data router := mux.NewRouter() router.HandleFunc("/", func(responseWriter http.ResponseWriter, request *http.Request) { // read from a file key := request.Header.Get("name") if player, ok := playerMap[key]; ok { bytes, _ := json.Marshal(player) responseWriter.Write([]byte(bytes)) }else{ http.Error(responseWriter, "Not found", 505) } }) http.Handle("/", router) log.Fatal(http.ListenAndServe(":8080", router)) } func RefreshPLayersArray(client *http.Client, channel chan []Player) { var playersToReturn []Player request, err := http.NewRequest(http.MethodGet, URL, nil) if err != nil { log.Fatal(err) } request.Header.Set("player-name", "name") response, getErr := client.Do(request) if getErr != nil { log.Fatal(getErr) } body, readErr := ioutil.ReadAll(response.Body) if readErr != nil { log.Fatal(readErr) } jsonErr := json.Unmarshal(body, &playersToReturn) if jsonErr != nil { log.Fatal(jsonErr) } //write to the channel channel <- playersToReturn fmt.Println("Refreshing data") time.Sleep(5 * time.Minute) go RefreshPLayersArray(client, channel) }
[ 3 ]
//example with pointer receiver package main import ( "fmt" ) type Person struct { name string age int } func (p *Person) fn(name1 string) { p.name = name1 } func main(){ p1:=&Person{name:"jacob",age:23} p1.fn("ryan") fmt.Println(p1.name); }
[ 3 ]
package netmodule import ( "net" "sync" "sync/atomic" ) //tcpsocket 对net.Conn 的包装 type tcpsocket struct { conn net.Conn //TCP底层连接 buffers [2]*buffer //双发送缓存 sendIndex uint //发送缓存索引 notify chan int //通知通道 isclose uint32 //指示socket是否关闭 m sync.Mutex //锁 bclose bool //是否关闭 writeIndex uint //插入缓存索引 } //newtcpsocket 创建一个tcpsocket func newtcpsocket(c net.Conn) *tcpsocket { if c == nil { //c为nil,抛出异常 panic("c is nil") } //初始化结构体 var psocket = new(tcpsocket) psocket.conn = c psocket.buffers[0] = new(buffer) psocket.buffers[1] = new(buffer) psocket.sendIndex = 0 psocket.notify = make(chan int, 1) psocket.isclose = 0 psocket.bclose = false psocket.writeIndex = 1 //启动发送协程 go psocket._dosend() return psocket } func (my *tcpsocket) _dosend() { writeErr := false for { _, ok := <-my.notify if !ok { return } my.m.Lock() my.writeIndex = my.sendIndex my.m.Unlock() my.sendIndex = (my.sendIndex + 1) % 2 if !writeErr { var sendSplice = my.buffers[my.sendIndex].Data() for len(sendSplice) > 0 { n, err := my.conn.Write(sendSplice) if err != nil { writeErr = true break } sendSplice = sendSplice[n:] } } my.buffers[my.sendIndex].Clear() } } //Read 读数据 func (my *tcpsocket) Read(b []byte) (n int, err error) { return my.conn.Read(b) } //WriteBytes 写数据 func (my *tcpsocket) Write(b ...[]byte) { my.m.Lock() if my.bclose { my.m.Unlock() return } dataLen := my.buffers[my.writeIndex].Len() writeLen := 0 for i := 0; i < len(b); i++ { writeLen += len(b[i]) my.buffers[my.writeIndex].Append(b[i]) } if dataLen == 0 && writeLen != 0 { my.notify <- 0 } my.m.Unlock() } //Close 关闭一个tcpsocket, 释放系统资源 func (my *tcpsocket) Close() { my.m.Lock() if my.bclose { my.m.Unlock() return } my.bclose = true my.conn.Close() close(my.notify) my.m.Unlock() atomic.StoreUint32(&(my.isclose), 1) } //IsClose 判断tcpsocket是否关闭 func (my *tcpsocket) IsClose() bool { val := atomic.LoadUint32(&(my.isclose)) if val > 0 { return true } return false }
[ 3 ]
package parser import ( "github.com/almostmoore/kadastr/feature" "github.com/almostmoore/kadastr/rapi" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "log" "strconv" "sync" "time" ) type FeatureParser struct { session *mgo.Session fRepo feature.FeatureRepository rClient *rapi.Client } func NewFeatureParser(session *mgo.Session) FeatureParser { return FeatureParser{ session: session, fRepo: feature.NewFeatureRepository(session), rClient: rapi.NewClient(), } } // Run function starts parsing func (f *FeatureParser) Run(quarter string, streams int64) { var maxUnit int64 = 10000 done := make(chan bool, streams) errors := make(chan bool, streams) items := make(chan int64, maxUnit) defer close(done) defer close(errors) defer close(items) wg := &sync.WaitGroup{} var i int64 for i = 0; i < streams; i++ { wg.Add(1) go f.parse(quarter, items, errors, done, wg) } go f.checkError(errors, done, streams) go func() { for i = 0; i < maxUnit; i++ { items <- i } }() wg.Wait() } func (f *FeatureParser) checkError(errors chan bool, done chan bool, streams int64) { errCount := 0 for has := range errors { if has { errCount += 1 } else { errCount = 0 } if errCount == 200 { var i int64 for i = 0; i < streams; i++ { done <- true } } } } // parse data from rosreestr func (f *FeatureParser) parse(quarter string, items <-chan int64, errors, done chan bool, wg *sync.WaitGroup) { for { select { case i := <-items: result := f.parseItem(quarter, i) errors <- !result case <-done: wg.Done() return default: } } } // parseItem Parse item for quarter func (f *FeatureParser) parseItem(quarter string, item int64) bool { time.Sleep(5 * time.Second) number := quarter + ":" + strconv.FormatInt(item, 10) log.Printf("Парсинг участка %s\n", number) ft, err := f.rClient.GetFeature(number) if err != nil || ft.CadNumber == "" { log.Printf("Участок не найден %s (%s)\n", number, err) return false } _, err = f.fRepo.FindByCadNumber(ft.CadNumber) if err == nil { log.Printf("Участок %s уже присутствует в базе данных. Пропускаем\n", ft.CadNumber) return true } ft.ID = bson.NewObjectId() err = f.fRepo.Insert(ft) if err != nil { log.Println(err) } else { log.Printf("Участок сохранен %s\n", number) } return true }
[ 6 ]
package rsa import ( "CryptCode/utils" "crypto" "crypto/rand" "crypto/rsa" "crypto/x509" "encoding/pem" "flag" "fmt" "io/ioutil" "os" ) const RSA_PRIVATE = "RSA PRIVATE KEY" const RSA_PUBLIC = "RSA PUBLIC KEY" /** * 私钥: * 公钥: * 汉森堡 */ func CreatePairKeys() (*rsa.PrivateKey,error) { //1、先生成私钥 //var bits int //flag.IntVar(&bits, "b", 1024, "密钥长度") ////fmt.Println(bits) //privateKey, err := rsa.GenerateKey(rand.Reader, bits) //if err != nil { // return nil, err //} // ////2、根据私钥生成公钥 //publicKey := privateKey.Public() //fmt.Println(publicKey) ////3、将私钥和公钥进行返回 //return privateKey, nil //1.生成私钥 var bits int flag.IntVar(&bits,"b",1024,"密钥长度") pri,err:=rsa.GenerateKey(rand.Reader,bits) if err!=nil { return nil,err } return pri,nil //2.根据私钥生成公钥 publicKeY:=pri.Public() fmt.Println(publicKeY) //3.返回私钥公 return pri,nil } //---------------------关于pem证书文件的生成和读取------------------------ /** * 根据用户传入的内容,自动创建公私钥,并生成相应格式的证书文件 */ func GenerateKeys(file_name string) error { //1、生成私钥 //pri, err := CreatePairKeys() //if err != nil { // return err //} ////2.创建私钥文件 //err = generatePriFileByPrivateKey(pri, file_name) //if err != nil { // return err //} ////3、公钥文件 //err = generatePubFileByPubKey(pri.PublicKey, file_name) //if err != nil { // return err //} //return nil pri,err:=CreatePairKeys() if err!=nil { return nil } err=generatePriFileByPrivateKey(pri,file_name) if err!=nil { return nil } err =generatePubFileByPubKey(pri.PublicKey,file_name) if err!=nil { return err } return nil } /** * 读取pem文件格式的私钥数据 */ func ReadPemPriKey(file_name string) (*rsa.PrivateKey, error) { //blockBytes, err := ioutil.ReadFile(file_name) // //if err != nil { // // return nil, err // //} // ////pem.decode:将byte数据解码为内存中的实例对象 // //block, _ := pem.Decode(blockBytes) // // // //priBytes := block.Bytes // //priKey, err := x509.ParsePKCS1PrivateKey(priBytes) // //return priKey, err blockBytes,err:=ioutil.ReadFile(file_name) if err!=nil { return nil,err } block,_:=pem.Decode(blockBytes) priBytes:=block.Bytes pri,err:=x509.ParsePKCS1PrivateKey(priBytes) return pri,err } /** * 读取pem文件格式的公钥数据 */ func ReadPemPubKey(file_name string) (*rsa.PublicKey, error) { //blockBytes, err := ioutil.ReadFile(file_name) //if err != nil { // return nil, err //} //block, _ := pem.Decode(blockBytes) //pubKey, err := x509.ParsePKCS1PublicKey(block.Bytes) //return pubKey, err blockBytes,err:=ioutil.ReadFile(file_name) if err!=nil { return nil,err } block,_:=pem.Decode(blockBytes) pub,err:=x509.ParsePKCS1PublicKey(block.Bytes) if err!=nil { return nil,err } return pub,nil } /** * 根据给定的私钥数据,生成对应的pem文件 */ func generatePriFileByPrivateKey(pri *rsa.PrivateKey, file_name string) (error) { //根据PKCS1规则,序列化后的私钥 //priStream := x509.MarshalPKCS1PrivateKey(pri) // ////pem文件,此时,privateFile文件为空 //privatFile, err := os.Create("rsa_pri_" + file_name + ".pem") //存私钥的生成的文件 //if err != nil { // return err //} // ////pem文件中的格式 结构体 //block := &pem.Block{ // Type: RSA_PRIVATE, // Bytes: priStream, //} // ////将准备好的格式内容写入到pem文件中 //err = pem.Encode(privatFile, block) //if err != nil { // return err //} //return nil priSteam:=x509.MarshalPKCS1PrivateKey(pri) privatFile,err:=os.Create("rsa_pri_"+file_name+".pem") if err!=nil { return err } block:=&pem.Block{ Type:RSA_PRIVATE, Bytes:priSteam, } err=pem.Encode(privatFile,block) if err!=nil { return err } return nil } /** * 根据公钥生成对应的pem文件,持久化存储 */ func generatePubFileByPubKey(pub rsa.PublicKey, file_name string) error { //stream := x509.MarshalPKCS1PublicKey(&pub) // //block := pem.Block{ // Type: RSA_PUBLIC, // Bytes: stream, //} // //pubFile, err := os.Create("rsa_pub_" + file_name + ".pem") //if err != nil { // return err //} //return pem.Encode(pubFile, &block) pubStream:=x509.MarshalPKCS1PublicKey(&pub) block:=pem.Block{ Type: RSA_PUBLIC, Bytes: pubStream, } pubFile,err:=os.Create("rsa_pub_"+file_name+".pem") if err!=nil { return err } return pem.Encode(pubFile,&block) } //=========================第一种组合:公钥加密,私钥解密==============================// /** * 使用RSA算法对数据进行加密,返回加密后的密文 */ func RSAEncrypt(key rsa.PublicKey, data []byte) ([]byte, error) { return rsa.EncryptPKCS1v15(rand.Reader,&key,data) //return rsa.EncryptPKCS1v15(rand.Reader, &key, data) } /** * 使用RSA算法对密文数据进行解密,返回解密后的明文 */ func RSADecrypt(private *rsa.PrivateKey, cipher []byte) ([]byte, error) { return rsa.DecryptPKCS1v15(rand.Reader,private,cipher) //return rsa.DecryptPKCS1v15(rand.Reader, private, cipher) } //=========================第二种组合:私钥签名,公钥验签==============================// /** * 使用RSA算法对数据进行数字签名,并返回签名信息 */ func RSASign(private *rsa.PrivateKey, data []byte) ([]byte, error) { hashed:=utils.Md5Hash(data) rsa.SignPKCS1v15(rand.Reader,private,crypto.MD5,hashed) //hashed := utils.Md5Hash(data) //return rsa.SignPKCS1v15(rand.Reader, private, crypto.MD5, hashed) } /** * 使用RSA算法对数据进行签名验证,并返回签名验证的结果 * 验证通过,返回true * 验证不通过,返回false, 同时error中有错误信息 */ func RSAVerify(pub rsa.PublicKey, data []byte, signText []byte) (bool, error) { hashed := utils.Md5Hash(data) err := rsa.VerifyPKCS1v15(&pub, crypto.MD5, hashed, signText) if err!=nil { return false,err } return true,nil }
[ 3, 6 ]
package Routes import ( "freshers-bootcamp/week1/day4/Controllers" "github.com/gin-gonic/gin" ) //SetupRouter ... Configure routes func SetupRouter() *gin.Engine { r := gin.Default() grp1 := r.Group("/user-api") { grp1.GET("products", Controllers.GetUsers) grp1.POST("product", Controllers.CreateProd) grp1.GET("product/:id", Controllers.GetProdByID) grp1.PATCH("product/:id", Controllers.UpdateProd) grp1.DELETE("product/:id", Controllers.DeleteUser) grp1.POST("order", Controllers.CreateOrder) } return r }
[ 3 ]
package main import ( "bufio" "fmt" "io" "log" "os" "strconv" "strings" ) type testCase struct { rows int cols int grid [][]int } type testCaseOrErr struct { testCase err error } func main() { reader := bufio.NewReader(os.Stdin) testCases := loadTestCasesToChannel(reader) var testIx int for test := range testCases { testIx++ if test.err != nil { log.Fatal(test.err) } numAdditions := makeRabbitHouseSafe(&test.testCase) fmt.Printf("Case #%d: %d\n", testIx, numAdditions) } } func makeRabbitHouseSafe(house *testCase) (totalHeightIncrease int) { buckets := newHeightBuckets(&house.grid) for { if buckets.maxHeight == 0 { break } totalHeightIncrease += secureNextLocation(buckets, house) } return } func secureNextLocation(buckets *heightBuckets, house *testCase) (addedHeight int) { loc := buckets.getLocationAtMaxHeight() locHeight := getLocationHeight(loc, house) defer buckets.removeLocation(locHeight, loc) for _, neighbor := range getNeighborLocations(loc, house) { neighborHeight := getLocationHeight(neighbor, house) heightDiff := locHeight - neighborHeight if heightDiff > 1 { addedHeight += heightDiff - 1 buckets.insertLocation(locHeight-1, neighbor) setLocationHeight(locHeight-1, neighbor, house) buckets.removeLocation(neighborHeight, neighbor) } } return } func setLocationHeight(height int, loc location, house *testCase) { house.grid[loc.row][loc.col] = height } func getLocationHeight(loc location, house *testCase) (height int) { return house.grid[loc.row][loc.col] } func getNeighborLocations(loc location, house *testCase) (neighbors []location) { if loc.row > 0 { neighbors = append(neighbors, location{loc.row - 1, loc.col}) } if loc.col < house.cols-1 { neighbors = append(neighbors, location{loc.row, loc.col + 1}) } if loc.row < house.rows-1 { neighbors = append(neighbors, location{loc.row + 1, loc.col}) } if loc.col > 0 { neighbors = append(neighbors, location{loc.row, loc.col - 1}) } return } type location struct { row, col int } type heightBuckets struct { buckets map[int]map[location]struct{} maxHeight int } func (b *heightBuckets) getLocationAtMaxHeight() location { loc, err := b.getLocationAtHeight(b.maxHeight) if err != nil { log.Fatal(err) } return loc } func (b *heightBuckets) getLocationAtHeight(height int) (loc location, err error) { for loc = range b.buckets[height] { return loc, err } return loc, fmt.Errorf("no location found at height: %d", height) } func (b *heightBuckets) insertLocation(height int, loc location) { if _, ok := b.buckets[height]; !ok { b.buckets[height] = map[location]struct{}{} } b.buckets[height][loc] = struct{}{} if height > b.maxHeight { b.maxHeight = height } } func (b *heightBuckets) removeLocation(height int, loc location) { delete(b.buckets[height], loc) if len(b.buckets[height]) == 0 { delete(b.buckets, height) } if height == b.maxHeight { b.decreaseMaxHeight() } } func (b *heightBuckets) decreaseMaxHeight() { if len(b.buckets) == 0 { b.maxHeight = 0 return } for { if _, ok := b.buckets[b.maxHeight]; !ok { b.maxHeight-- } else { break } } } func newHeightBuckets(grid *[][]int) *heightBuckets { ret := heightBuckets{ buckets: make(map[int]map[location]struct{}), maxHeight: 0, } for rowIx, row := range *grid { for colIx, height := range row { ret.insertLocation(height, location{rowIx, colIx}) } } return &ret } // -------- Input reading -------- // func newTestCase(rows, cols int, heights [][]int) testCase { return testCase{ rows, cols, heights, } } func newTestCaseOrErr(rows, cols int, grid [][]int, err error) testCaseOrErr { return testCaseOrErr{ newTestCase(rows, cols, grid), err, } } func parseIntFields(line string) (ints []int, err error) { for _, field := range strings.Fields(line) { convField, err := strconv.Atoi(field) if err != nil { return []int{}, err } ints = append(ints, convField) } return } func parseIntsFromNextLine(reader *bufio.Reader) (ints []int, err error) { line, err := reader.ReadString('\n') if err != nil && err != io.EOF { return } return parseIntFields(line) } func parseRowAndColNum(reader *bufio.Reader) (row, col int, err error) { intFields, err := parseIntsFromNextLine(reader) if err != nil { return } if len(intFields) != 2 { err = fmt.Errorf("number of int fields in first line of test case not equal to 2") return } row = intFields[0] col = intFields[1] return } func parseNumTestCases(reader *bufio.Reader) (numTestCases int, err error) { firstLineInts, err := parseIntsFromNextLine(reader) if err != nil { return } if len(firstLineInts) != 1 { err = fmt.Errorf("unexpected number of ints in test case number definition") return } numTestCases = firstLineInts[0] return } func parseGrid(rows int, cols int, reader *bufio.Reader) ([][]int, error) { grid := make([][]int, rows) for i := 0; i < rows; i++ { row, err := parseIntsFromNextLine(reader) if err != nil { return grid, err } grid[i] = row } return grid, nil } func loadTestCasesToChannel(reader *bufio.Reader) <-chan testCaseOrErr { out := make(chan testCaseOrErr) go func() { defer close(out) numberOfTestCases, err := parseNumTestCases(reader) if err != nil { out <- testCaseOrErr{err: err} return } for i := 0; i < numberOfTestCases; i++ { rows, cols, err := parseRowAndColNum(reader) if err != nil { out <- testCaseOrErr{err: err} return } grid, err := parseGrid(rows, cols, reader) out <- newTestCaseOrErr(rows, cols, grid, err) } }() return out }
[ 6 ]
package utils import ( "bytes" "crypto/rand" "encoding/json" "fmt" "log" "math" "math/big" "os" "os/exec" "path/filepath" "strings" "time" "github.com/parnurzeal/gorequest" "golang.org/x/xerrors" pb "gopkg.in/cheggaaa/pb.v1" ) var vulnListDir = filepath.Join(CacheDir(), "vuln-list") func CacheDir() string { cacheDir, err := os.UserCacheDir() if err != nil { cacheDir = os.TempDir() } dir := filepath.Join(cacheDir, "vuln-list-update") return dir } func SetVulnListDir(dir string) { vulnListDir = dir } func VulnListDir() string { return vulnListDir } func SaveCVEPerYear(dirPath string, cveID string, data interface{}) error { s := strings.Split(cveID, "-") if len(s) != 3 { return xerrors.Errorf("invalid CVE-ID format: %s\n", cveID) } yearDir := filepath.Join(dirPath, s[1]) if err := os.MkdirAll(yearDir, os.ModePerm); err != nil { return err } filePath := filepath.Join(yearDir, fmt.Sprintf("%s.json", cveID)) if err := Write(filePath, data); err != nil { return xerrors.Errorf("failed to write file: %w", err) } return nil } func Write(filePath string, data interface{}) error { dir := filepath.Dir(filePath) if err := os.MkdirAll(dir, os.ModePerm); err != nil { return xerrors.Errorf("failed to create %s: %w", dir, err) } f, err := os.Create(filePath) if err != nil { return xerrors.Errorf("file create error: %w", err) } defer f.Close() b, err := json.MarshalIndent(data, "", " ") if err != nil { return xerrors.Errorf("JSON marshal error: %w", err) } _, err = f.Write(b) if err != nil { return xerrors.Errorf("file write error: %w", err) } return nil } // GenWorkers generate workders func GenWorkers(num, wait int) chan<- func() { tasks := make(chan func()) for i := 0; i < num; i++ { go func() { for f := range tasks { f() time.Sleep(time.Duration(wait) * time.Second) } }() } return tasks } // DeleteNil deletes nil in errs func DeleteNil(errs []error) (new []error) { for _, err := range errs { if err != nil { new = append(new, err) } } return new } // TrimSpaceNewline deletes space character and newline character(CR/LF) func TrimSpaceNewline(str string) string { str = strings.TrimSpace(str) return strings.Trim(str, "\r\n") } // FetchURL returns HTTP response body with retry func FetchURL(url, apikey string, retry int) (res []byte, err error) { for i := 0; i <= retry; i++ { if i > 0 { wait := math.Pow(float64(i), 2) + float64(RandInt()%10) log.Printf("retry after %f seconds\n", wait) time.Sleep(time.Duration(time.Duration(wait) * time.Second)) } res, err = fetchURL(url, map[string]string{"api-key": apikey}) if err == nil { return res, nil } } return nil, xerrors.Errorf("failed to fetch URL: %w", err) } func RandInt() int { seed, _ := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) return int(seed.Int64()) } func fetchURL(url string, headers map[string]string) ([]byte, error) { req := gorequest.New().Get(url) for key, value := range headers { req.Header.Add(key, value) } resp, body, errs := req.Type("text").EndBytes() if len(errs) > 0 { return nil, xerrors.Errorf("HTTP error. url: %s, err: %w", url, errs[0]) } if resp.StatusCode != 200 { return nil, xerrors.Errorf("HTTP error. status code: %d, url: %s", resp.StatusCode, url) } return body, nil } // FetchConcurrently fetches concurrently func FetchConcurrently(urls []string, concurrency, wait, retry int) (responses [][]byte, err error) { reqChan := make(chan string, len(urls)) resChan := make(chan []byte, len(urls)) errChan := make(chan error, len(urls)) defer close(reqChan) defer close(resChan) defer close(errChan) go func() { for _, url := range urls { reqChan <- url } }() bar := pb.StartNew(len(urls)) tasks := GenWorkers(concurrency, wait) for range urls { tasks <- func() { url := <-reqChan res, err := FetchURL(url, "", retry) if err != nil { errChan <- err return } resChan <- res } bar.Increment() } bar.Finish() var errs []error timeout := time.After(10 * 60 * time.Second) for range urls { select { case res := <-resChan: responses = append(responses, res) case err := <-errChan: errs = append(errs, err) case <-timeout: return nil, xerrors.New("Timeout Fetching URL") } } if 0 < len(errs) { return responses, fmt.Errorf("%s", errs) } return responses, nil } // Major returns major version func Major(osVer string) (majorVersion string) { return strings.Split(osVer, ".")[0] } func IsCommandAvailable(name string) bool { cmd := exec.Command(name, "--help") if err := cmd.Run(); err != nil { return false } return true } func Exists(path string) (bool, error) { _, err := os.Stat(path) if err == nil { return true, nil } if os.IsNotExist(err) { return false, nil } return true, err } func Exec(command string, args []string) (string, error) { cmd := exec.Command(command, args...) var stdoutBuf, stderrBuf bytes.Buffer cmd.Stdout = &stdoutBuf cmd.Stderr = &stderrBuf if err := cmd.Run(); err != nil { log.Println(stderrBuf.String()) return "", xerrors.Errorf("failed to exec: %w", err) } return stdoutBuf.String(), nil } func LookupEnv(key, defaultValue string) string { if val, ok := os.LookupEnv(key); ok { return val } return defaultValue }
[ 1, 6 ]
package utils const ( FloatType = CounterType("float64") UintType = CounterType("uint64") ) type CounterType string type Counter interface { Add(interface{}) Clone() Counter Value() interface{} } type IntCounter uint64 func (ic *IntCounter) Add(num interface{}) { switch n := num.(type) { case uint64: *ic = *ic + IntCounter(n) case IntCounter: *ic = *ic + n case int: *ic = *ic + IntCounter(n) case float64: *ic = *ic + IntCounter(n) default: } } func (ic *IntCounter) Clone() Counter { counter := new(IntCounter) counter.Add(*ic) return counter } func (ic *IntCounter) Value() interface{} { return uint64(*ic) } type NopCounter struct{} func (nc *NopCounter) Add(num interface{}) { } func (nc *NopCounter) Clone() Counter { return &NopCounter{} } func (nc *NopCounter) Value() interface{} { return &NopCounter{} }
[ 0 ]
package client import ( "encoding/binary" "encoding/json" "errors" protocol "github.com/sniperHW/flyfish/proto" "reflect" "unsafe" ) const CompressSize = 16 * 1024 //对超过这个大小的blob字段执行压缩 type Field protocol.Field func (this *Field) IsNil() bool { return (*protocol.Field)(this).IsNil() } func (this *Field) GetString() string { return (*protocol.Field)(this).GetString() } func (this *Field) GetInt() int64 { return (*protocol.Field)(this).GetInt() } func (this *Field) GetFloat() float64 { return (*protocol.Field)(this).GetFloat() } func (this *Field) GetBlob() []byte { return (*protocol.Field)(this).GetBlob() } func (this *Field) GetValue() interface{} { return (*protocol.Field)(this).GetValue() } func UnmarshalJsonField(field *Field, obj interface{}) error { if field == nil { return nil } else { v := field.GetValue() switch v.(type) { case string, []byte: var b []byte switch v.(type) { case []byte: b = v.([]byte) case string: s := v.(string) b = *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ Len: int(len(s)), Cap: int(len(s)), Data: (*reflect.StringHeader)(unsafe.Pointer(&s)).Data, })) } if len(b) == 0 { return nil } else { return json.Unmarshal(b, obj) } default: return nil } } } //对大小>=1k的[]byte字段,执行压缩 func PackField(key string, v interface{}) *protocol.Field { switch v.(type) { case []byte: b := v.([]byte) var bb []byte if len(b) >= CompressSize { bb, _ = getCompressor().Compress(b) size := make([]byte, 4) binary.BigEndian.PutUint32(size, uint32(len(bb)+4)) bb = append(bb, size...) } else { bb = b } return protocol.PackField(key, bb) default: return protocol.PackField(key, v) } } func UnpackField(f *protocol.Field) (*Field, error) { var err error if nil != f { switch f.GetValue().(type) { case []byte: b := f.GetBlob() if ok, size := checkHeader(b); ok { if len(b) >= size+4 { if size = int(binary.BigEndian.Uint32(b[len(b)-4:])); size == len(b) { if b, err = getDecompressor().Decompress(b[:len(b)-4]); nil == err { return (*Field)(protocol.PackField(f.Name, b)), err } } else { err = errors.New("flyfish client unpackField:invaild filed1") } } else { err = errors.New("flyfish client unpackField:invaild filed2") } if nil != err { return (*Field)(protocol.PackField(f.Name, []byte{})), err } } } } return (*Field)(f), err }
[ 3, 7 ]
package werckerclient import ( "bytes" "encoding/json" "errors" "fmt" "io" "io/ioutil" "net/http" "strings" "github.com/jtacoma/uritemplates" ) // NewClient creates a new Client. It merges the default Config together with // config. func NewClient(config *Config) *Client { c := &Client{config: defaultConfig.Merge(config)} return c } // Client is the wercker api client. type Client struct { config *Config } // Do makes a request to the wercker api servers. func (c *Client) Do(method string, urlTemplate *uritemplates.UriTemplate, urlModel interface{}, payload interface{}, result interface{}) error { body, err := c.DoRaw(method, urlTemplate, urlModel, payload) if err != nil { return err } if len(body) > 0 { err = json.Unmarshal(body, result) if err != nil { return err } } return nil } // DoRaw makes a full request but returns the result as a byte array func (c *Client) DoRaw(method string, urlTemplate *uritemplates.UriTemplate, urlModel interface{}, payload interface{}) ([]byte, error) { path, err := expandURL(urlTemplate, urlModel) if err != nil { return nil, err } var payloadReader io.Reader if payload != nil { b, err := json.Marshal(payload) if err != nil { return nil, err } payloadReader = bytes.NewReader(b) } return c.makeRequest(method, path, payloadReader) } func (c *Client) generateURL(path string) string { endpoint := strings.TrimRight(c.config.Endpoint, "/") return endpoint + path } // MakeRequest makes a request to the wercker API, and returns the returned // payload func (c *Client) makeRequest(method string, path string, payload io.Reader) ([]byte, error) { url := c.generateURL(path) req, err := http.NewRequest(method, url, payload) if err != nil { return nil, err } if c.config.Credentials != nil { // Add credentials creds, err := c.config.Credentials.GetCredentials() if err != nil { return nil, err } if creds.Token != "" { req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", creds.Token)) } if creds.Username != "" && creds.Password != "" { req.SetBasicAuth(creds.Username, creds.Password) } } req.Header.Set("Content-Type", "application/json") resp, err := c.config.HTTPClient.Do(req) if err != nil { return nil, err } if resp.StatusCode >= 200 && resp.StatusCode < 400 { if resp.ContentLength != 0 { body, err := ioutil.ReadAll(resp.Body) defer resp.Body.Close() if err != nil { return nil, err } return body, nil } return nil, nil } return nil, c.handleError(resp) } // ErrResponse is a generic error object using wercker api conventions. type ErrResponse struct { StatusCode int `json:"statusCode"` StatusMessage string `json:"error"` Message string `json:"message"` } // Error returns the wercker error message func (e *ErrResponse) Error() string { return e.Message } func (c *Client) handleError(resp *http.Response) error { if resp.ContentLength > 0 { body, err := ioutil.ReadAll(resp.Body) defer resp.Body.Close() // Continue if we were able to read the response if err == nil { v := &ErrResponse{} err := json.Unmarshal(body, v) // Continue if we were able to unmarshal the JSON if err == nil { return v } } } return fmt.Errorf("Unable to parse error response (status code: %d)", resp.StatusCode) } func expandURL(urlTemplate *uritemplates.UriTemplate, urlModel interface{}) (string, error) { var m map[string]interface{} var ok bool var path string var err error if urlModel != nil { m, ok = struct2map(urlModel) if !ok { return "", errors.New("Invalid URL model") } if m != nil { path, err = urlTemplate.Expand(m) if err != nil { return "", err } } } else { path = urlTemplate.String() } return path, nil }
[ 6 ]
package gospelmaria import ( "context" "database/sql" "errors" "fmt" "strings" "time" "github.com/VividCortex/ewma" "github.com/jmalloc/gospel/src/gospel" "github.com/jmalloc/gospel/src/internal/metrics" "github.com/jmalloc/gospel/src/internal/options" "github.com/jmalloc/twelf/src/twelf" "golang.org/x/time/rate" ) const ( // averageLatencyAge is average age of samples to keep when computing the // average latency. A sample is taken after each poll. // // Averages are computed using an exponentially-weighted moving average. // See https://github.com/VividCortex/ewma for more information. averageLatencyAge = 20.0 ) // Reader is an interface for reading facts from a stream stored in MariaDB. type Reader struct { // stmt is a prepared statement used to query for facts. // It accepts the stream offset as a parameter. stmt *sql.Stmt // logger is the target for debug logging. Readers do not perform general // activity logging. logger twelf.Logger // facts is a channel on which facts are delivered to the caller of Next(). // A worker goroutine polls the database and delivers the facts to this // channel. facts chan gospel.Fact // current is the fact returned by Get() until Next() is called again. current *gospel.Fact // next is the fact that will become "current" when Next() is called. // If it is nil, no additional facts were available in the buffer on the // previous call to Next(). next *gospel.Fact // end is a signaling channel that is closed when the database polling // goroutine fetches 0 facts. end chan struct{} // done is a signaling channel which is closed when the database polling // goroutine returns. The error that caused the closure, if any, is sent to // the channel before it closed. This means a pending call to Next() will // return the error when it first occurs, but subsequent calls will return // a more generic "reader is closed" error. done chan error // ctx is a context that is canceled when Close() is called, or when the // database polling goroutine returns. It is used to abort any in-progress // database queries or rate-limit pauses when the reader is closed. // // Context cancellation errors are not sent to the 'done' channel, so any // pending Next() call will receive a generic "reader is closed" error. ctx context.Context cancel func() // addr is the starting address for the next database poll. addr gospel.Address // globalLimit is a rate-limiter that limits the number of polling queries // that can be performed each second. It is shared by all readers, and hence // provides a global cap of the number of read queries per second. globalLimit *rate.Limiter // adaptiveLimit is a rate-limiter that is adjusted on-the-fly in an attempt // to balance the number of database polls against the latency of facts. // It is not shared by other readers. adaptiveLimit *rate.Limiter // acceptableLatency is the amount of latency that is generally acceptable // for the purposes of this reader. The reader will attempt to maintain this // latency by adjusting its polling rate. acceptableLatency time.Duration // starvationLatency is the amount of latency that is acceptable once the // reader has reached the end of the stream and is "starving" for facts. // This setting informs the minimum poll rate. starvationLatency time.Duration // instantaneousLatency is the latency computed from the facts returend by // the most recent database poll. If there are no facts the latency is 0. instantaneousLatency time.Duration // averageLatency tracks the average latency of the last 10 database polls. // The average latency is weighed against the acceptableLatency and // starvationLatency values to decide how the poll rate is adjusted. averageLatency ewma.MovingAverage // debug contains several properties that are only relevant when the reader // is using a debug logger. debug *readerDebug } // readerDebug contains several properties that are only relevant when the // reader is using a debug logger. type readerDebug struct { // opts is the options specified when opening the reader. opts *options.ReaderOptions // averagePollRate keeps track of the average polling rate, which can be // substantially lower than the adaptive limit for slow readers. averagePollRate *metrics.RateCounter // averageFactRate keeps track of the average rate of delivery of facts. averageFactRate *metrics.RateCounter // previousPollRate is compared to the poll rate after each poll to // determine whether a log message should be displayed. previousPollRate rate.Limit // muteEmptyPolls is true if the previous database poll did not return any // facts. It is only used to mute repeated debug messages if there is no new // information to report. muteEmptyPolls bool } // errReaderClosed is an error returned by Next() when it is called on a closed // reader, or when the reader is closed while a call to Next() is pending. var errReaderClosed = errors.New("reader is closed") // openReader returns a new reader that begins at addr. func openReader( ctx context.Context, db *sql.DB, storeID uint64, addr gospel.Address, limit *rate.Limiter, logger twelf.Logger, opts *options.ReaderOptions, ) (*Reader, error) { // Note that runCtx is NOT derived from ctx, which is only used for the // opening of the reader itself. runCtx, cancel := context.WithCancel(context.Background()) accetableLatency := getAcceptableLatency(opts) r := &Reader{ logger: logger, facts: make(chan gospel.Fact, getReadBufferSize(opts)), end: make(chan struct{}), done: make(chan error, 1), ctx: runCtx, cancel: cancel, addr: addr, globalLimit: limit, adaptiveLimit: rate.NewLimiter(rate.Every(accetableLatency), 1), acceptableLatency: accetableLatency, starvationLatency: getStarvationLatency(opts), averageLatency: ewma.NewMovingAverage(averageLatencyAge), } if logger.IsDebug() { r.debug = &readerDebug{ opts: opts, averagePollRate: metrics.NewRateCounter(), averageFactRate: metrics.NewRateCounter(), } } if err := r.prepareStatement(ctx, db, storeID, opts); err != nil { return nil, err } r.logInitialization() go r.run() return r, nil } // Next blocks until a fact is available for reading or ctx is canceled. // // If err is nil, the "current" fact is ready to be returned by Get(). // // nx is the offset within the stream that the reader has reached. It can be // used to efficiently resume reading in a future call to EventStore.Open(). // // Note that nx is not always the address immediately following the fact // returned by Get() - it may be "further ahead" in the stream, this skipping // over any facts that the reader is not interested in. func (r *Reader) Next(ctx context.Context) (nx gospel.Address, err error) { nx, _, err = r.tryNext(ctx, nil) return nx, err } // TryNext blocks until the next fact is available for reading, the end of // stream is reached, or ctx is canceled. // // If ok is true, a new fact is available and is ready to be returned by // Get(). ok is false if the current fact is the last known fact in the // stream. // // nx is the offset within the stream that the reader has reached. It can be // used to efficiently resume reading in a future call to EventStore.Open(). // nx is invalid if ok is false. func (r *Reader) TryNext(ctx context.Context) (nx gospel.Address, ok bool, err error) { return r.tryNext(ctx, r.end) } func (r *Reader) tryNext(ctx context.Context, end <-chan struct{}) (nx gospel.Address, ok bool, err error) { if r.next == nil { select { case f := <-r.facts: r.current = &f ok = true case <-end: // no fact is available, return with ok == false return case <-ctx.Done(): err = ctx.Err() return case err = <-r.done: if err == nil { err = errReaderClosed } return } } else { r.current = r.next r.next = nil ok = true } // Perform a non-blocking lookahead to see if we have the next fact already. select { case f := <-r.facts: r.next = &f nx = r.next.Addr default: // assume next is literally the next fact on the stream nx = r.current.Addr.Next() } return } // Get returns the "current" fact. // // It panics if Next() has not been called. // Get() returns the same Fact until Next() is called again. func (r *Reader) Get() gospel.Fact { if r.current == nil { panic("Next() must be called before calling Get()") } return *r.current } // Close closes the reader. func (r *Reader) Close() error { select { case err := <-r.done: return err default: r.cancel() return <-r.done } } // prepareStatement creates r.stmt, an SQL prepared statement used to poll // for new facts. func (r *Reader) prepareStatement( ctx context.Context, db *sql.DB, storeID uint64, opts *options.ReaderOptions, ) error { filter := "" if opts.FilterByEventType { types := strings.Join(escapeStrings(opts.EventTypes), `, `) filter = `AND e.event_type IN (` + types + `)` } query := fmt.Sprintf( `SELECT f.offset, f.time, e.event_type, e.content_type, e.body, CURRENT_TIMESTAMP(6) FROM fact AS f INNER JOIN event AS e ON e.id = f.event_id %s WHERE f.store_id = %d AND f.stream = %s AND f.offset >= ? ORDER BY offset LIMIT %d`, filter, storeID, escapeString(r.addr.Stream), cap(r.facts), ) stmt, err := db.PrepareContext(ctx, query) if err != nil { return err } r.stmt = stmt return nil } // run polls the database for facts and sends them to r.facts until r.ctx is // canceled or an error occurs. func (r *Reader) run() { defer r.cancel() defer close(r.done) defer r.stmt.Close() var err error for err == nil { err = r.tick() } if err != context.Canceled { r.done <- err } } // tick executes one pass of the worker goroutine. func (r *Reader) tick() error { if err := r.globalLimit.Wait(r.ctx); err != nil { return err } if err := r.adaptiveLimit.Wait(r.ctx); err != nil { return err } count, err := r.poll() if err != nil { return err } r.adjustRate() r.logPoll(count) return nil } // fetch queries the database for facts beginning at r.addr. func (r *Reader) poll() (int, error) { rows, err := r.stmt.QueryContext( r.ctx, r.addr.Offset, ) if err != nil { return 0, err } defer rows.Close() f := gospel.Fact{ Addr: r.addr, } count := 0 var first, now time.Time for rows.Next() { if err := rows.Scan( &f.Addr.Offset, &f.Time, &f.Event.EventType, &f.Event.ContentType, &f.Event.Body, &now, ); err != nil { return count, err } select { case r.facts <- f: case <-r.ctx.Done(): return count, r.ctx.Err() } r.addr = f.Addr.Next() // keep the time of the first fact in the result to compute the maximum // instantaneous latency for this poll. if count == 0 { first = f.Time } count++ if r.debug != nil { r.debug.averageFactRate.Tick() } } // TODO: this doesn't account for the time spent waiting to write to r.facts. r.instantaneousLatency = now.Sub(first) r.averageLatency.Add(r.instantaneousLatency.Seconds()) if count == 0 { select { case r.end <- struct{}{}: default: } } return count, nil } // setRate sets the adaptive polling rate, capped between the mininum (set by // r.starvationLatency) and the maximum (set by the global rate limit). func (r *Reader) setRate(lim rate.Limit) bool { min := rate.Every(r.starvationLatency) max := r.globalLimit.Limit() if lim < min { lim = min } else if lim > max { lim = max } prev := r.adaptiveLimit.Limit() if lim != prev { r.adaptiveLimit.SetLimit(lim) return true } return false } // adjustRate updates the adaptive poll rate in an attempt to balance database // poll frequency with latency. func (r *Reader) adjustRate() bool { latency := r.effectiveLatency() // headroom is the difference between the acceptable latency and the // effective latency. If the headroom is positive, we're doing 'better' than // the acceptable latency and can backoff the poll rate. headroom := r.acceptableLatency - latency // don't back off if our headroom is less than 25% // if headroom > 0 && headroom < r.acceptableLatency/25 { // return false // } // Get the current rate in terms of an interval. currentInterval := metrics.RateToDuration( r.adaptiveLimit.Limit(), ) return r.setRate( rate.Every(currentInterval + headroom), ) } // effectiveLatency returns the latency used to adjust the poll rate. // // The rolling average needs to be primed with several samples before the // average is available, until then it reports zero, in which case the // instantaneousLatency value is used instead. func (r *Reader) effectiveLatency() time.Duration { latency := r.averageLatency.Value() if latency == 0 { return r.instantaneousLatency } return time.Duration( latency * float64(time.Second), ) } // logInitialization logs a debug message describing the reader settings. func (r *Reader) logInitialization() { if !r.logger.IsDebug() { return } filter := "*" if r.debug.opts.FilterByEventType { filter = strings.Join(r.debug.opts.EventTypes, ", ") } r.logger.Debug( "[reader %p] %s | global poll limit: %s | acceptable latency: %s | starvation latency: %s | read-buffer: %d | filter: %s", r, r.addr, formatRate(r.globalLimit.Limit()), formatDuration(r.acceptableLatency), formatDuration(r.starvationLatency), getReadBufferSize(r.debug.opts), filter, ) } // logPoll logs a debug message containing metrics for the previous poll and // adjustments to the adaptive poll rate. func (r *Reader) logPoll(count int) { if r.debug == nil { return } r.debug.averagePollRate.Tick() pollRate := r.adaptiveLimit.Limit() if pollRate == r.debug.previousPollRate && count == 0 && r.debug.muteEmptyPolls { return } r.debug.muteEmptyPolls = count == 0 r.logger.Debug( "[reader %p] %s | fetch: %3d %s | queue: %3d/%3d | adaptive poll: %s | avg poll: %s | latency: %s", r, r.addr, count, formatRate(rate.Limit(r.debug.averageFactRate.Rate())), len(r.facts), cap(r.facts), formatRate(r.adaptiveLimit.Limit()), formatRate(rate.Limit(r.debug.averagePollRate.Rate())), formatDuration(r.effectiveLatency()), ) r.debug.previousPollRate = pollRate } // formatRate formats a rate limit for display in reader debug logs. func formatRate(r rate.Limit) string { if r == 0 { // "500.00/s 2.00ms" return " ?.??/s ?.??µs" } d := metrics.RateToDuration(r) return fmt.Sprintf( "%6.02f/s %s", r, formatDuration(d), ) } // formatDuration formats a duration for display in reader debug logs. func formatDuration(d time.Duration) string { if d >= time.Hour { return fmt.Sprintf("%6.02fh ", d.Seconds()/3600) } else if d >= time.Minute { return fmt.Sprintf("%6.02fm ", d.Seconds()/60) } else if d >= time.Second { return fmt.Sprintf("%6.02fs ", d.Seconds()) } else if d >= time.Millisecond { return fmt.Sprintf("%6.02fms", d.Seconds()/time.Millisecond.Seconds()) } return fmt.Sprintf("%6.02fµs", d.Seconds()/time.Microsecond.Seconds()) }
[ 5 ]
package main import ( "fmt" "github.com/gin-gonic/gin" "github.com/jinzhu/gorm" "github.com/revand/App_Go_Larave_Angular_TEST/backend/go/awards" "github.com/revand/App_Go_Larave_Angular_TEST/backend/go/users" "github.com/revand/App_Go_Larave_Angular_TEST/backend/go/common" "github.com/revand/App_Go_Larave_Angular_TEST/backend/go/redis" // "github.com/go-redis/redis" ) func Migrate(db *gorm.DB) { // users.AutoMigrate() db.AutoMigrate(&awards.Awards{}) //generate table Awards db.AutoMigrate(&users.Users{}) //generate table Users // db.AutoMigrate(&articles.TagModel{}) // db.AutoMigrate(&articles.FavoriteModel{}) // db.AutoMigrate(&articles.ArticleUserModel{}) // db.AutoMigrate(&articles.CommentModel{}) } type Author struct { Name string `json:"name"` Age int `json:"age"` } func main() { // c, err := redis.Dial("tcp", "redis:6379") //Conection db db := common.Init() Migrate(db) defer db.Close() r := gin.Default() MakeRoutes(r) v1 := r.Group("/api") // NO TOKEN awards.AwardsAuthed(v1.Group("/awards")) users.UsersRegister(v1.Group("/users")) v1.Use(users.AuthMiddleware(false)) //redis redis.Routers(v1.Group("/redis")) // SI TOKEN v1.Use(users.AuthMiddleware(true)) users.UserRegister(v1.Group("/user")) fmt.Printf("0.0.0.0:3000") r.Run(":3000") } func MakeRoutes(r *gin.Engine) { cors := func(c *gin.Context) { c.Writer.Header().Set("Access-Control-Allow-Origin", "*") c.Writer.Header().Set("Access-Control-Allow-Credentials", "true") c.Writer.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization, accept, origin, Cache-Control, X-Requested-With") c.Writer.Header().Set("Access-Control-Allow-Methods", "*") c.Writer.Header().Set("Content-Type", "application/json") if c.Request.Method == "OPTIONS" { c.AbortWithStatus(200) } c.Next() } r.Use(cors) }
[ 3 ]
package main import ( "fmt" ) func fibc(N int) (int, int) { a0, a1 := 1, 0 b0, b1 := 0, 1 if N == 0 { return a0, a1 } else if N == 1 { return b0, b1 } var c0, c1 int for i := 2; i <= N; i++ { c0, c1 = a0+b0, a1+b1 a0, a1 = b0, b1 b0, b1 = c0, c1 } return c0, c1 } func main() { var T, N int fmt.Scan(&T) for i := 0; i < T; i++ { fmt.Scan(&N) c0, c1 := fibc(N) fmt.Println(c0, c1) } }
[ 2 ]
package main import ( "fmt" "github.com/aws/aws-sdk-go/aws" //"github.com/aws/aws-sdk-go/service/ec2" "flag" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/service/cloudwatch" . "github.com/tnantoka/chatsworth" "io/ioutil" "log" "strings" "time" ) func main() { var p = flag.String("p", "./profiles", "AWS Profiles") var k = flag.String("k", "./.api_token", "Chatwork API Token") var r = flag.String("r", "", "ChatWork Room ID") flag.Parse() cw := Chatsworth{ RoomID: *r, APIToken: loadToken(*k), } cw.PostMessage(buildMessage(*p)) } func loadToken(file string) string { token, err := ioutil.ReadFile(file) if err != nil { log.Fatal(err) } return string(token) } func buildMessage(file string) string { profiles, err := ioutil.ReadFile(file) if err != nil { log.Fatal(err) } var validProfiles []string for _, profile := range strings.Split(string(profiles), "\n") { if len(profile) > 0 { validProfiles = append(validProfiles, profile) } } messageChan := fetchCharges(validProfiles) message := "[info][title]AWSの課金額[/title]" for i := 0; i < len(validProfiles); i++ { m := <-messageChan fmt.Print(m) message += m } message += "[/info]" return message } func fetchCharges(profiles []string) <-chan string { messageChan := make(chan string) for _, profile := range profiles { go func(profile string) { config := aws.Config{Region: "us-east-1"} config.Credentials = credentials.NewSharedCredentials("", profile) message := profile + ": " + fetchCharge(config) + "ドル\n" messageChan <- message }(profile) } return messageChan } func fetchCharge(config aws.Config) string { dimension := cloudwatch.Dimension{ Name: aws.String("Currency"), Value: aws.String("USD"), } svc := cloudwatch.New(&config) input := cloudwatch.GetMetricStatisticsInput{ Dimensions: []*cloudwatch.Dimension{&dimension}, StartTime: aws.Time(time.Now().Add(-24 * time.Hour)), EndTime: aws.Time(time.Now()), MetricName: aws.String("EstimatedCharges"), Namespace: aws.String("AWS/Billing"), Period: aws.Long(60), Statistics: []*string{aws.String("Maximum")}, //Unit: "", } output, err := svc.GetMetricStatistics(&input) if err != nil { log.Fatal(err) } var dp = output.Datapoints[0] return fmt.Sprint(*dp.Maximum) }
[ 3 ]
package content //CmdEventSetupUse is a command to setup event stream const CmdEventSetupUse = "setup" //CmdEventSetupShort is the short version description for vss event setup command const CmdEventSetupShort = "Setup event stream" //CmdEventSetupLong is the long version description for vss event setup command const CmdEventSetupLong = "Run this command to setup event stream. " + "It will create a CloudFormation stack with an event rule and SNS topic. " + "You will need to run this script for each cloud account. " + "Make sure your aws credentials have been configured before run this command." //CmdEventUse is command for event stream const CmdEventUse = "event" //CmdEventShort is the short version description for vss event command const CmdEventShort = "Manage event stream" //CmdEventLong is the long version description for vss event command const CmdEventLong = "Manage event stream" //CmdEventSetupExample is the use case for command event setup const CmdEventSetupExample = ` vss event setup vss event setup --aws-profile YOUR_AWS_PROFILE --cloud-id YOUR_CLOUD_ID` //CmdEventRemoveUse is the command name for command event remove const CmdEventRemoveUse = "remove" //CmdEventRemoveShort is the short version description for vss event remove command const CmdEventRemoveShort = "Remove event stream" //CmdEventRemoveLong is the long version description for vss event remove command const CmdEventRemoveLong = "Run this command to remove event stream." + "You will need to run this script for each cloud account." + "Make sure your aws credentials have been configured before run this command." //CmdEventRemoveExample is the use case for command event remove const CmdEventRemoveExample = `vss event remove vss event remove --aws-profile YOUR_AWS_PROFILE --cloud-id YOUR_CLOUD_ID` const CmdEventAuthFile = "auth-file" const CmdEventAuthFileDescription = "auth file for azure authentication" const CmdEventRegion = "region" const CmdEventRegionDescription = "The region in which you'd like to create Azure resource group in"
[ 3 ]
package metric_parser import ( //"github.com/Cepave/open-falcon-backend/common/utils" //"log" ) type metricType byte const ( MetricMax metricType = 1 MetricMin metricType = 2 MetricAvg metricType = 3 MetricMed metricType = 4 MetricMdev metricType = 5 MetricLoss metricType = 6 MetricCount metricType = 7 MetricPckSent metricType = 8 MetricPckReceived metricType = 9 MetricNumAgent metricType = 10 MetricNumTarget metricType = 11 ) var mapOfMetric = map[string]metricType { "max": MetricMax, "min": MetricMin, "avg": MetricAvg, "med": MetricMed, "mdev": MetricMdev, "loss": MetricLoss, "count": MetricCount, "pck_sent": MetricPckSent, "pck_received": MetricPckReceived, "num_agent": MetricNumAgent, "num_target": MetricNumTarget, }
[ 3 ]
package proxies import ( "context" "encoding/json" "fmt" "io/ioutil" "net/http" "os" datadog "github.com/DataDog/datadog-api-client-go/api/v1/datadog" "github.com/vivasaayi/cloudrover/utililties" ) type DataDogProxy struct { ctx context.Context apiClient *datadog.APIClient apiKey string appKey string } func GetDataDogProxy() *DataDogProxy { ddp := DataDogProxy{} ddp.ctx = datadog.NewDefaultContext(context.Background()) configuration := datadog.NewConfiguration() ddp.apiClient = datadog.NewAPIClient(configuration) ddp.apiKey = utililties.GetStringEnvVar("DD_API_KEY", "", true) ddp.appKey = utililties.GetStringEnvVar("DD_APP_KEY", "", true) return &ddp } func (ddp *DataDogProxy) GetEvents( source string, startTime int64, endTime int64, eventPriority string) datadog.EventListResponse { priority := datadog.EventPriority(eventPriority) sources := source // tags := "" unaggregated := true excludeAggregate := true // page := int32(56) optionalParams := datadog.ListEventsOptionalParameters{ Priority: &priority, Sources: &sources, // Tags: &tags, Unaggregated: &unaggregated, ExcludeAggregate: &excludeAggregate, // Page: &page, } resp, r, err := ddp.apiClient.EventsApi.ListEvents(ddp.ctx, startTime, endTime, optionalParams) if err != nil { fmt.Fprintf(os.Stderr, "Error when calling `EventsApi.ListEvents`: %v\n", err) fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", r) } return resp } func (ddp *DataDogProxy) GetMonitors() []datadog.Monitor { groupStates := "all" // string | When specified, shows additional information about the group states. Choose one or more from `all`, `alert`, `warn`, and `no data`. (optional) // name := "name_example" // string | A string to filter monitors by name. (optional) // tags := "tags_example" // string | A comma separated list indicating what tags, if any, should be used to filter the list of monitors by scope. For example, `host:host0`. (optional) // monitorTags := "monitorTags_example" // string | A comma separated list indicating what service and/or custom tags, if any, should be used to filter the list of monitors. Tags created in the Datadog UI automatically have the service key prepended. For example, `service:my-app`. (optional) withDowntimes := true // bool | If this argument is set to true, then the returned data includes all current downtimes for each monitor. (optional) idOffset := int64(789) // int64 | Monitor ID offset. (optional) page := int64(789) // int64 | The page to start paginating from. If this argument is not specified, the request returns all monitors without pagination. (optional) pageSize := int32(56) // int32 | The number of monitors to return per page. If the page argument is not specified, the default behavior returns all monitors without a `page_size` limit. However, if page is specified and `page_size` is not, the argument defaults to 100. (optional) optionalParams := datadog.ListMonitorsOptionalParameters{ GroupStates: &groupStates, // Name: &name, // Tags: &tags, // MonitorTags: &monitorTags, WithDowntimes: &withDowntimes, IdOffset: &idOffset, Page: &page, PageSize: &pageSize, } resp, r, err := ddp.apiClient.MonitorsApi.ListMonitors(ddp.ctx, optionalParams) if err != nil { fmt.Fprintf(os.Stderr, "Error when calling `MonitorsApi.ListMonitors`: %v\n", err) fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", r) } return resp } func (ddp *DataDogProxy) SearchMonitors() DDMonitorSearchResponse { client := &http.Client{} req, err := http.NewRequest("GET", `https://api.datadoghq.com/api/v1/monitor/search?query=status:alert&per_page=1000`, nil, ) if err != nil { fmt.Println("Error occured when retrieving the alerts") fmt.Println(err) } req.Header.Add("Content-Type", `application/json`) req.Header.Add("DD-API-KEY", ddp.apiKey) req.Header.Add("DD-APPLICATION-KEY", ddp.appKey) resp, err := client.Do(req) if err != nil { fmt.Println("Error occured when making http request") } fmt.Println(resp) body, err := ioutil.ReadAll(resp.Body) if err != nil { panic(err.Error()) } fmt.Println(string(body)) result := DDMonitorSearchResponse{} err = json.Unmarshal(body, &result) if err != nil { fmt.Println("Error occured when parsing search response") fmt.Println(err) } return result }
[ 6 ]
package server import ( "net/http" "log" "fmt" ) var mymux *http.ServeMux const ( mailAddress string = "http://121.40.190.238:1280" ) func Run() { mymux = http.NewServeMux() //绑定路由 bind() err := http.ListenAndServe(":1280", mymux) //设置监听的端口 if err != nil { log.Fatal("ListenAndServe: ", err) } } func TestDatabase() { var user User users := user.QueryAll() var le = len(users) for i := 0; i < le; i++ { fmt.Println(users[i].contents["id"]) fmt.Println(users[i].contents["username"]) fmt.Println("xxxxxxxxxxxxxxx") } } func TestUsers() { var user User users := user.QueryAll() var le = len(users) for i := 0; i < le; i++ { fmt.Println(users[i].contents["id"]) fmt.Println(users[i].contents["username"]) fmt.Println("xxxxxxxxxxxxxxx") } }
[ 3 ]
package main import ( "context" "fmt" "net" "time" "github.com/envoyproxy/go-control-plane/pkg/cache/types" "github.com/envoyproxy/go-control-plane/pkg/cache/v2" "github.com/golang/glog" "github.com/golang/protobuf/ptypes" "google.golang.org/grpc" api "github.com/envoyproxy/go-control-plane/envoy/api/v2" core "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" endpoint "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint" discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v2" xds "github.com/envoyproxy/go-control-plane/pkg/server/v2" ) type ADDR struct { Address string Port uint32 } type NodeConfig struct { node *core.Node endpoints []types.Resource clusters []types.Resource routes []types.Resource listeners []types.Resource runtimes []types.Resource } //implement cache.NodeHash func (n NodeConfig) ID(node *core.Node) string { return node.GetId() } func ClusterStatic(name string, address []ADDR) *api.Cluster { lbEndpoints := make([]*endpoint.LbEndpoint, len(address)) for idx, addr := range address { lbEndpoint := &endpoint.LbEndpoint{ HostIdentifier: &endpoint.LbEndpoint_Endpoint{ Endpoint: &endpoint.Endpoint{ Address: &core.Address{ Address: &core.Address_SocketAddress{ SocketAddress: &core.SocketAddress{ Protocol: core.SocketAddress_TCP, Address: addr.Address, PortSpecifier: &core.SocketAddress_PortValue{ PortValue: addr.Port, }, }, }, }, }, }, } lbEndpoints[idx] = lbEndpoint } localityLbEndpoints := &endpoint.LocalityLbEndpoints{ LbEndpoints: lbEndpoints, } endpoints := make([]*endpoint.LocalityLbEndpoints, 0) endpoints = append(endpoints, localityLbEndpoints) clusterLoadAssignment := &api.ClusterLoadAssignment{ ClusterName: name, Endpoints: endpoints, } cluster := &api.Cluster{ Name: name, AltStatName: name, ClusterDiscoveryType: &api.Cluster_Type{ Type: api.Cluster_STATIC, }, EdsClusterConfig: nil, ConnectTimeout: ptypes.DurationProto(1 * time.Second), PerConnectionBufferLimitBytes: nil, // default 1MB LbPolicy: api.Cluster_ROUND_ROBIN, LoadAssignment: clusterLoadAssignment, } return cluster } func UpdateSnapshotCache(s cache.SnapshotCache, n *NodeConfig, version string) { err := s.SetSnapshot(n.ID(n.node), cache.NewSnapshot(version, n.endpoints, n.clusters, n.routes, n.listeners, n.runtimes)) if err != nil { glog.Error(err) } } //func Update_SnapshotCache(s cache.SnapshotCache, n *NodeConfig func main() { snapshotCache := cache.NewSnapshotCache(false, cache.IDHash{}, nil) server := xds.NewServer(context.Background(), snapshotCache, nil) grpcServer := grpc.NewServer() lis, _ := net.Listen("tcp", ":5678") discovery.RegisterAggregatedDiscoveryServiceServer(grpcServer, server) api.RegisterEndpointDiscoveryServiceServer(grpcServer, server) api.RegisterClusterDiscoveryServiceServer(grpcServer, server) api.RegisterRouteDiscoveryServiceServer(grpcServer, server) api.RegisterListenerDiscoveryServiceServer(grpcServer, server) go func() { if err := grpcServer.Serve(lis); err != nil { glog.Error(err) } }() node := &core.Node{ // 根据yaml文件中定义的id和名称 Id: "envoy-64.58", Cluster: "test", } nodeConf := &NodeConfig{ node: node, endpoints: []types.Resource{}, clusters: []types.Resource{}, routes: []types.Resource{}, listeners: []types.Resource{}, runtimes: []types.Resource{}, } input := "" { clusterName := "Cluster_With_Static_Endpoint" fmt.Printf("Enter to update: %s", clusterName) _, _ = fmt.Scanf("\n", &input) var addrs []ADDR addrs = append(addrs, ADDR{ Address: "127.0.0.1", Port: 8081, }) cluster := ClusterStatic(clusterName, addrs) nodeConf.clusters = append(nodeConf.clusters, cluster) UpdateSnapshotCache(snapshotCache, nodeConf, time.Now().String()) glog.Info(clusterName + " updated") } select {} }
[ 3 ]
package main import ( "context" "encoding/json" "errors" "fmt" "io/ioutil" "os" "time" "github.com/juju/loggo" "github.com/mongodb/mongo-go-driver/bson" "github.com/mongodb/mongo-go-driver/bson/primitive" "github.com/mongodb/mongo-go-driver/mongo" "github.com/mongodb/mongo-go-driver/mongo/options" "github.com/segmentio/kafka-go" "gopkg.in/yaml.v2" ) type Config struct { KafkaBroker string `yaml:"kafkaBroker"` MongoUri string `yaml:"mongoUri"` Database string `yaml:"database"` // By limiting ourselves to these collections I can assume that a // topic already exists and has messages. If that assumption // changes, we'll need to check if a topic exists, be able to // create if it doesn't and then start the mongo changestream // watcher from the start of that collection. Collections []string `yaml:"collections"` LogLevel string `yaml:"logLevel"` } var logger = loggo.GetLogger("main") func main() { logger.SetLogLevel(loggo.INFO) logger.Infof("Started") dat, err := ioutil.ReadFile("config.yml") if err != nil { logger.Errorf(err.Error()) os.Exit(1) } config := Config{} err = yaml.Unmarshal(dat, &config) if err != nil { logger.Errorf(err.Error()) os.Exit(1) } level, ok := loggo.ParseLevel(config.LogLevel) if ok { logger.SetLogLevel(level) } else { logger.Warningf("Log level %s is unknown, using INFO", config.LogLevel) } db, err := openDatabase(config) if err != nil { logger.Errorf(err.Error()) os.Exit(1) } expectedCollections := make(map[string]bool) for i := 0; i < len(config.Collections); i++ { expectedCollections[config.Collections[i]] = true } collections := make(map[string]*mongo.Collection) channel := make(chan string) for { logger.Debugf("Listing all collections") err = startWatchingNewCollections(db, channel, expectedCollections, collections, config.KafkaBroker) if err != nil { // TODO: probably want to die if we error enough times logger.Errorf(err.Error()) } sleepAndCleanup(channel, collections) } } func openDatabase(config Config) (*mongo.Database, error) { client, err := mongo.NewClient(config.MongoUri) if err != nil { return nil, err } ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) err = client.Connect(ctx) if err != nil { return nil, err } db := client.Database(config.Database) return db, nil } func startWatchingNewCollections(db *mongo.Database, c chan string, expectedCollections map[string]bool, collections map[string]*mongo.Collection, broker string) error { cursor, err := db.ListCollections(context.Background(), bson.D{}) if err != nil { return err } for cursor.Next(context.Background()) { var result bson.M err := cursor.Decode(&result) if err != nil { logger.Errorf(err.Error()) continue } collectionName := result["name"].(string) _, ok := expectedCollections[collectionName] if !ok { logger.Infof("Collection %s is unexpected", collectionName) continue } _, ok = collections[collectionName] if ok { continue } collection := db.Collection(collectionName) collections[collectionName] = collection go WatchCollection(broker, collection, c) } return nil } func sleepAndCleanup(channel chan string, collections map[string]*mongo.Collection) { for { shouldBreak := false select { case doneCollection := <-channel: // We've stopped watching this collection so it needs // to be removed from the collections map so that // on the next pass through we can re-add it delete(collections, doneCollection) case <-time.After(15 * time.Second): shouldBreak = true } if shouldBreak { break } } } // Opens up a changestream cursor on the collection and writes new // documents to the kafka broker. // If there is an error, we send a message to the channel indicating // that we've stopped watching func WatchCollection(broker string, collection *mongo.Collection, c chan string) { defer func() { logger.Infof("Stopping watcher for %s.%s", collection.Database().Name(), collection.Name()) }() // The channel is used to indicate that an error has happened // watching the collection. Hopefully the main goroutine will be // able to restart us. defer func() { c <- collection.Name() }() logger.Infof("Watching %s.%s", collection.Database().Name(), collection.Name()) cs := options.ChangeStream() topic := fmt.Sprintf("mongo_%s_%s", collection.Database().Name(), collection.Name()) lastMessage, err := getLastMessage(broker, topic) if err != nil { logger.Errorf(err.Error()) return } payload := lastMessage["payload"].(map[string]interface{}) token, ok := payload["resumeToken"] if ok { logger.Debugf("Using resumeToken") cs.SetResumeAfter(bson.M{"_data": token}) } else { logger.Debugf("Using timestamp") timestamp := uint32(payload["timestamp"].(float64)) inc := uint32(payload["order"].(float64)) // inc is a counter so its safe to just increment one to get the next document. // If we don't increment one, we get the same document that was already in kafka. // https://docs.mongodb.com/manual/reference/bson-types/#timestamps cs.SetStartAtOperationTime(&primitive.Timestamp{timestamp, inc + 1}) } cursor, err := collection.Watch(context.Background(), mongo.Pipeline{}, cs) if err != nil { logger.Errorf(err.Error()) return } w := kafka.NewWriter(kafka.WriterConfig{ Brokers: []string{broker}, Topic: topic, Balancer: &kafka.LeastBytes{}, }) defer w.Close() logger.Debugf("Waiting for documents on: %s", collection.Name()) for cursor.Next(context.Background()) { logger.Debugf("New document recieved for %s", collection.Name()) var item bson.M cursor.Decode(&item) operationType := item["operationType"].(string) if operationType != "insert" { logger.Warningf("Document has operationType %s, expected insert", operationType) continue } // Note that this needs to be synchronous. If this was // asynchronous and something goes wrong it might be possible // for event B to get into kafka and not event A and so event // A would be lost forever msg, err := getMessage(item) if err != nil { logger.Errorf(err.Error()) return } err = w.WriteMessages(context.Background(), *msg) if err != nil { logger.Errorf(err.Error()) return } logger.Debugf("Sent message %s to %s", string(msg.Value), topic) } } // Returns the last message on kafka for the given topic in partition 0. // This probably only works correctly if there is only one partition func getLastMessage(broker string, topic string) (map[string]interface{}, error) { // TODO: this is so much work just to get one message. Maybe there is a better way? logger.Debugf("Getting last message for %s", topic) conn, err := kafka.DialLeader(context.Background(), "tcp", broker, topic, 0) first, last, err := conn.ReadOffsets() logger.Debugf("For %s: first: %d, last: %d", topic, first, last) if last == 0 { return nil, errors.New(fmt.Sprintf("Topic %s doesn't have any messages", topic)) } // Would be nice if I could re-use the connection from above // but that is not part of the library r := kafka.NewReader(kafka.ReaderConfig{ Brokers: []string{broker}, Topic: topic, Partition: 0, MinBytes: 0, MaxBytes: 10e6, // 10MB }) r.SetOffset(last - 1) m, err := r.ReadMessage(context.Background()) if err != nil { logger.Errorf(err.Error()) return nil, err } var f interface{} err = json.Unmarshal(m.Value, &f) return f.(map[string]interface{}), nil } // Converts and serializes the input document and then // sends it along to kafka. func getMessage(doc bson.M) (*kafka.Message, error) { msgValue, err := ConvertToOldFormat(doc) if err != nil { return nil, err } output, err := json.Marshal(msgValue) if err != nil { return nil, err } msg := kafka.Message{Value: output} return &msg, nil } type connectSchema struct { Schema payloadSchema `json:"schema"` Payload payloadData `json:"payload"` } type payloadSchema struct { Type string `json:"type"` Optional bool `json:"optional"` Fields []field `json:"fields"` Name string `json:"name"` } type field struct { Type string `json:"type"` Optional bool `json:"optional"` Field string `json:"field"` } type payloadData struct { Timestamp uint32 `json:"timestamp"` Order uint32 `json:"order"` Operation string `json:"operation"` Database string `json:"database"` Object string `json:"object"` ResumeToken string `json:"resumeToken"` } func ConvertToOldFormat(doc bson.M) (connectSchema, error) { namespace := doc["ns"].(bson.M) name := fmt.Sprintf("mongodbschema_%s_%s", namespace["db"], namespace["coll"]) timestamp := doc["clusterTime"].(primitive.Timestamp) fullDocument := doc["fullDocument"].(bson.M) // This transformation is to remain compatible with the previous // oplog reader fullDocument["_id"] = bson.M{"$oid": fullDocument["_id"]} documentBytes, err := json.Marshal(fullDocument) if err != nil { logger.Errorf(err.Error()) return connectSchema{}, err } resumeToken := doc["_id"].(bson.M)["_data"].(string) logger.Debugf(resumeToken) // The whole connectSchema will also be json encoded // and so we need convert the bytes into a string // otherwise the []bytes get encoded using base64 documentStr := string(documentBytes) results := connectSchema{ Schema: payloadSchema{ Type: "struct", Optional: false, Name: name, Fields: []field{ field{"int32", true, "timestamp"}, field{"int32", true, "order"}, field{"string", true, "operation"}, field{"string", true, "database"}, field{"string", true, "object"}, field{"string", true, "resumeToken"}}}, Payload: payloadData{ Timestamp: timestamp.T, Order: timestamp.I, Operation: "i", Database: fmt.Sprintf("%s.%s", namespace["db"], namespace["coll"]), Object: documentStr, ResumeToken: resumeToken}} return results, nil }
[ 6 ]
package deviceactionapi import ( log "github.com/cihub/seelog" ) // ActionRobotCleanerReq 发送MQTT命令的BODY type ActionRobotCleanerReq struct { CleanSpeed CleanSpeedOption `json:"clean_speed,omitempty"` FindMe FindMeOption `json:"find_me,omitempty"` StopClean StopCleanOption `json:"stop_clean,omitempty"` TimerOption TimerOption `json:"timer_option,omitempty"` TurnDirection TurnDirectionOption `json:"turn_direction,omitempty"` WorkMode WorkModeOption `json:"work_mode,omitempty"` } // CleanSpeedOption hh type CleanSpeedOption struct { Speed int `json:"speed"` // 速度选项:0:日常 1:强力 2:地毯 3:静音 } // FindMeOption hh. type FindMeOption struct { OnOff int `json:"on_off"` // 0: 关闭findme功能 扫地机停止发声; 1:开启findme功能,扫地机持续鸣叫 } // StopCleanOption hh. type StopCleanOption struct { Stop int `json:"stop "` // 1:停止; 0:对应工作模式 } // TimerOption hh. type TimerOption struct { ScheduleType string `json:"schedule_type "` //Timer调度类型, 目前可选值为 weekly, 后续版本会增加daily等 WeeklyOption WeeklyTimerOption `json:"weekly_option"` } // WeeklyTimerOption hh. type WeeklyTimerOption struct { StartHour int `json:"start_hour"` //开始执行的小时, 24小时制, 可选值为 0~23 StartMinute int `json:"start_minute"` //开始执行的分钟, 60分钟制, 可选值为 0~59 , Weekday int `json:"weekday"` //分别对应(Sunday=0, Monday=1, ..., Saturday=6) } // TurnDirectionOption hh. type TurnDirectionOption struct { Direction int `json:"direction"` //0:Forward, 1:Backward, 2:Left, 3:Right } // WorkModeOption hh. type WorkModeOption struct { Mode int `json:"mode"` //0:暂停, 1:定点, 2:自动, 3:返回充电, 4:沿边, 5:精扫 } // ActionRobotCleanerResp 解析返回值 type ActionRobotCleanerResp struct { Message string `json:"message"` ResCode int `json:"res_code"` //1成功 0失败, 2001, Device Action Result is pending, need check status later } //--------------------------------------------------------------------------------------------------------------------- // NewActionRobotCleanerReq hh. func NewActionRobotCleanerReq(option string, d ...int) interface{} { if option == "findMe" { log.Debugf("do something for %s", option) } if option == "turnDirection" { log.Debugf("do something for %s", option) } req := &ActionRobotCleanerReq{ CleanSpeed: CleanSpeedOption{ Speed: d[0], }, FindMe: FindMeOption{ OnOff: d[1], }, StopClean: StopCleanOption{ Stop: d[2], }, WorkMode: WorkModeOption{ Mode: d[3], }, } return req }
[ 3 ]
package service import ( "net/http" "bytes" "time" "errors" "io/ioutil" "encoding/json" "X/goappsrv/src/helper" "X/goappsrv/src/model" ) type airTableRecord struct { Id string `json:"id"` Fields guestField `json:"fields"` } type AirTableList struct { Records []airTableRecord `json:"records"` Offset string `json:"offset,omitempty"` } type qrImageStruct struct { Id string `json:"id,omitempty"` Url string `json:"url"` FileName string `json:"filename"` } type roleOverview struct { FileName string `json:"filename"` Url string `json:"url"` } type guestField struct { Name string `json:"Name,omitempty"` FirstName string `json:"Guest First Name,omitempty"` LastName string `json:"Guest Last Name,omitempty"` PromDay string `json:"Prom Day,omitempty"` Gender string `json:"Gender,omitempty"` LOSupervision string `json:"Level of Supervision,omitempty"` SNDescription string `json:"SN Description,omitempty"` RespiteRoom []string `json:"Respite Room,omitempty"` SpecificBuddy string `json:"Specific Buddy,omitempty"` LOBathroom string `json:"Level of Bathroom Assistance,omitempty"` Medication string `json:"Medication During Prom,omitempty"` DRestriction []string `json:"Dietary Restrictions,omitempty"` Sensory []string `json:"Sensory,omitempty"` CherryOnTop string `json:"Cherry On Top,omitempty"` Limo string `json:"Limo,omitempty"` ContactName string `json:"Contact Name,omitempty"` ContactNumber string `json:"Contact #,omitempty"` ContactEmail string `json:"Email,omitempty"` MailingAddress string `json:"Mailing Address,omitempty"` Notes string `json:"NOTES,omitempty"` ArrivalTime string `json:"Arrival Time,omitempty"` PagerNumber string `json:"Pager Number,omitempty"` TimeOfMed string `json:"Time of Medication,omitempty"` LastModified string `json:"Last Modified,omitempty"` QRValue string `json:"QR Value,omitempty"` QRImage []qrImageStruct `json:"QR Image,omitempty"` Teams string `json:"TEAMS,omitempty"` Role string `json:"ROLE,omitempty"` ROverview []roleOverview `json:"ROLE OVERVIEW,omitempty"` TeamRoster string `json:"Team Roster List,omitempty"` } func LoadAirTable(c helper.ContextDetail, airTableDetail model.ItemDetail) (*AirTableList, error) { var airTableList = new(AirTableList) var offset = "" var isEnd = false; for ok := true; ok; ok = (!isEnd) { url := "https://api.airtable.com/v0/" + airTableDetail.WebURL + "?view=QRAppView&offset=" + offset //url := "https://api.airtable.com/v0/" + airTableDetail.WebURL + "?view=QRAppView&maxRecords=15&offset=" + offset helper.Log(c, "info", "Loading air table", "uid", c.UID, "url", url) client := &http.Client{ CheckRedirect: func(req *http.Request, via []*http.Request) error { return http.ErrUseLastResponse }, } req, err := http.NewRequest("GET", url, nil) if err != nil { helper.Log(c, "error", "Error Loading air table", "airtableId", airTableDetail.ID, "error", err.Error()) err := errors.New("Air Table API Error") return nil, err } req.Header.Set("Authorization", "Bearer " + airTableDetail.ExtID) resp, err := client.Do(req) if err != nil { helper.Log(c, "error", "Error Loading air table", "airtableId", airTableDetail.ID, "error", err.Error()) err := errors.New("Air Table API Error") return nil, err } defer resp.Body.Close() if resp.StatusCode != 200 { helper.Log(c, "error", "Http call not successful", "airtableId", airTableDetail.ID, "response code", resp.Status) err := errors.New("Air Table API Error") return nil, err } body, err := ioutil.ReadAll(resp.Body) if err != nil { helper.Log(c, "error", "Error parsing response body", "airtableId", airTableDetail.ID, "error", err.Error()) err := errors.New("Error Parsing Air Table") return nil, err } var respJson = new(AirTableList) err = json.Unmarshal(body, &respJson) if err != nil { helper.Log(c, "error", "Error parsing response body", "airtableId", airTableDetail.ID, "error", err.Error()) err := errors.New("Error Parsing Air Table") return nil, err } airTableList.Records = append(airTableList.Records, respJson.Records...) for i, _ := range respJson.Records { if (len(respJson.Records[i].Fields.QRImage) == 0) { helper.Log(c, "info", "Generating QR Code", "airtableId", airTableDetail.ID, "extId", respJson.Records[i].Id) err := LoadQRCode(c, airTableDetail, respJson.Records[i].Id ) if err != nil { helper.Log(c, "error", "Error Generating QR Code", "extId", respJson.Records[i].Id, "error", err.Error()) } } } if respJson.Offset != "" { offset = respJson.Offset isEnd = false time.Sleep(200 * time.Millisecond) } else { offset = "" isEnd = true } } return airTableList, nil } func LoadQRCode(c helper.ContextDetail, airTableDetail model.ItemDetail, itemId string) error { url := "https://api.airtable.com/v0/" + airTableDetail.WebURL client := &http.Client{ CheckRedirect: func(req *http.Request, via []*http.Request) error { return http.ErrUseLastResponse }, } qrImage := qrImageStruct { Url: "https://api.qrserver.com/v1/create-qr-code/?size=250x250&data=https://nts.lqd.ch/" + itemId, FileName: "qrcode", } qrImageArray := []qrImageStruct{qrImage} bodyItem := airTableRecord { Id: itemId, } bodyItem.Fields.QRImage = qrImageArray bodyItem.Fields.QRValue = itemId var bodyJson = new(AirTableList) bodyJson.Records = append(bodyJson.Records, bodyItem) bytesField, _ := json.Marshal(bodyJson) req, err := http.NewRequest("PATCH", url, bytes.NewBuffer(bytesField)) req.Header.Set("Authorization", "Bearer " + airTableDetail.ExtID) req.Header.Set("Content-Type", "application/json") resp, err := client.Do(req) if err != nil { helper.Log(c, "warning", "AirTable QR Update Error", "extId", itemId, "error", err.Error()) } defer resp.Body.Close() if resp.StatusCode != 200 { helper.Log(c, "warning", "AirTable QR Update Error", "extId", itemId, "response code", resp.Status) err := errors.New("QR Code Generation error") return err } return nil }
[ 3 ]
package explicit import ( "bitbucket.org/gofd/gofd/core" "testing" ) func alldifferentPrimitives_test(t *testing.T, xinit []int, yinit []int, zinit []int, qinit []int, expx []int, expy []int, expz []int, expq []int, expready bool) { X := core.CreateIntVarExValues("X", store, xinit) Y := core.CreateIntVarExValues("Y", store, yinit) Z := core.CreateIntVarExValues("Z", store, zinit) Q := core.CreateIntVarExValues("Q", store, qinit) store.AddPropagators(CreateAlldifferentPrimitives(X, Y, Z, Q)) ready := store.IsConsistent() ready_test(t, "Alldifferent2", ready, expready) if expready { domainEquals_test(t, "Alldifferent2", X, expx) domainEquals_test(t, "Alldifferent2", Y, expy) domainEquals_test(t, "Alldifferent2", Z, expz) domainEquals_test(t, "Alldifferent2", Q, expq) } } func Test_AlldifferentPrimitivesa(t *testing.T) { setup() defer teardown() log("AlldifferentPrimitivesa: X:0, Y:0..1, Z:1..2, Q:2..3") alldifferentPrimitives_test(t, []int{0}, []int{0, 1}, []int{1, 2}, []int{2, 3}, []int{0}, []int{1}, []int{2}, []int{3}, true) } func Test_AlldifferentPrimitivesb(t *testing.T) { setup() defer teardown() log("AlldifferentPrimitivesb: X:0..1, Y:1, Z:2..3, Q:3") alldifferentPrimitives_test(t, []int{0, 1}, []int{1}, []int{2, 3}, []int{3}, []int{0}, []int{1}, []int{2}, []int{3}, true) } func Test_AlldifferentPrimitivesc(t *testing.T) { setup() defer teardown() log("AlldifferentPrimitivesc: X:0, Y:1, Z:2, Q:3") alldifferentPrimitives_test(t, []int{0}, []int{1}, []int{2}, []int{3}, []int{0}, []int{1}, []int{2}, []int{3}, true) } func Test_AlldifferentPrimitivesd(t *testing.T) { setup() defer teardown() log("AlldifferentPrimitivesd: X:0, Y:0, Z:0, Q:0") alldifferentPrimitives_test(t, []int{0}, []int{0}, []int{0}, []int{0}, []int{}, []int{}, []int{}, []int{}, false) }
[ 6 ]
package controller import ( "github.com/gin-gonic/gin" "net/http" ) // GetIndex show Hello world !! func GetIndex(c *gin.Context) { c.String(http.StatusOK, "Hello world !!") } // GetFullName get request sample func GetFullName(c *gin.Context) { fname := c.DefaultQuery("firstname", "Guest") lname := c.DefaultQuery("lastname", "Last") //lname := c.Query("lastname") // c.Request.URL.Query().Get("lastname") と同じ c.String(http.StatusOK, "Hello %s %s !!", fname, lname) } // PostMessage post request sample func PostMessage(c *gin.Context) { message := c.PostForm("message") name := c.DefaultPostForm("name", "Guest") c.JSON(http.StatusOK, gin.H{ "message": message, "name": name, }) } // SetCookie cookie sample func SetCookie(c *gin.Context) { cookie, err := c.Cookie("sample") if err != nil { cookie = "none" c.SetCookie("sample", "cookieValue", 3600, "/sample/set-cookie", "localhost", false, true) } c.JSON(http.StatusOK, gin.H{ "value": cookie, }) } // BasicAuth Basic Auth sample func BasicAuth(c *gin.Context) { var admins = gin.H{ "admin": gin.H{"email": "[email protected]"}, "hoge": gin.H{"email": "[email protected]"}, } // BasicAuth ミドルウェアによって設定される user := c.MustGet(gin.AuthUserKey).(string) if admin, ok := admins[user]; ok { c.JSON(http.StatusOK, gin.H{"user": user, "admin": admin}) } else { c.JSON(http.StatusOK, gin.H{"user": user, "admin": "No admin data :("}) } } // Html sample func Html(c *gin.Context) { var admins = gin.H{ "admin": gin.H{"email": "[email protected]"}, "hoge": gin.H{"email": "[email protected]"}, } // BasicAuth ミドルウェアによって設定される user := c.MustGet(gin.AuthUserKey).(string) if admin, ok := admins[user]; ok { c.JSON(http.StatusOK, gin.H{"user": user, "admin": admin}) } else { c.JSON(http.StatusOK, gin.H{"user": user, "admin": "No admin data :("}) } }
[ 3 ]
// Copyright (c) 2014, Markover Inc. // Use of this source code is governed by the MIT // license that can be found in the LICENSE file. // Source code and contact info at http://github.com/poptip/ftc package ftc import ( "encoding/json" "expvar" "fmt" "io" "net/http" "strings" "time" "code.google.com/p/go.net/websocket" "github.com/golang/glog" ) var numClients = expvar.NewInt("num_clients") const ( // Protocol error codes and mappings. errorTransportUnknown = 0 errorUnknownSID = 1 errorBadHandshakeMethod = 2 errorBadRequest = 3 // Query parameters used in client requests. paramTransport = "transport" paramSessionID = "sid" // Available transports. transportWebSocket = "websocket" transportPolling = "polling" // The default time before closed connections are cleaned from // the client pool. clientReapTimeout = 5 * time.Second ) var errorMessage = map[int]string{ errorTransportUnknown: "Transport unknown", errorUnknownSID: "Session ID unknown", errorBadHandshakeMethod: "Bad handshake method", errorBadRequest: "Bad request", } var ( validTransports = map[string]bool{ transportWebSocket: true, transportPolling: true, } validUpgrades = map[string]bool{ transportWebSocket: true, } ) // getValidUpgrades returns a slice containing the valid protocols // that a connection can upgrade to. func getValidUpgrades() []string { upgrades := make([]string, len(validUpgrades)) i := 0 for u := range validUpgrades { upgrades[i] = u i++ } return upgrades } // A Handler is called by the server when a connection is // opened successfully. type Handler func(*Conn) type server struct { // Handler handles an FTC connection. Handler basePath string cookieName string clients *clientSet // The set of connections (some may be closed). wsServer *websocket.Server // The underlying WebSocket server. } // The defaults for options passed to the server. const ( defaultBasePath = "/engine.io/" defaultCookieName = "io" ) // Options are the parameters passed to the server. type Options struct { // BasePath is the base URL path that the server handles requests for. BasePath string // CookieName is the name of the cookie set upon successful handshake. CookieName string } // NewServer allocates and returns a new server with the given // options and handler. If nil options are passed, the defaults // specified in the constants above are used instead. func NewServer(o *Options, h Handler) *server { opts := Options{} if o != nil { opts = *o } if len(opts.BasePath) == 0 { opts.BasePath = defaultBasePath } if len(opts.CookieName) == 0 { opts.CookieName = defaultCookieName } s := &server{ Handler: h, basePath: opts.BasePath, cookieName: opts.CookieName, clients: &clientSet{clients: map[string]*conn{}}, } go s.startReaper() s.wsServer = &websocket.Server{Handler: s.wsHandler} return s } // startReaper continuously removes closed connections from the // client set via the reap function. func (s *server) startReaper() { for { if s.clients == nil { glog.Fatal("server cannot have a nil client set") } s.clients.reap() numClients.Set(int64(s.clients.len())) time.Sleep(clientReapTimeout) } } // handlePacket takes the given packet and writes the appropriate // response to the given connection. func (s *server) handlePacket(p packet, c *conn) error { glog.Infof("handling packet type: %c, data: %s, upgraded: %t", p.typ, p.data, c.upgraded()) var encode func(packet) error if c.upgraded() { encode = newPacketEncoder(c).encode } else { encode = func(pkt packet) error { return newPayloadEncoder(c).encode([]packet{pkt}) } } switch p.typ { case packetTypePing: return encode(packet{typ: packetTypePong, data: p.data}) case packetTypeMessage: if c.pubConn != nil { c.pubConn.onMessage(p.data) } case packetTypeClose: c.Close() } return nil } // wsHandler continuously receives on the given WebSocket // connection and delegates the packets received to the // appropriate handler functions. func (s *server) wsHandler(ws *websocket.Conn) { // If the client initially attempts to connect directly using // WebSocket transport, the session ID parameter will be empty. // Otherwise, the connection with the given session ID will // need to be upgraded. glog.Infoln("Starting websocket handler...") var c *conn wsEncoder, wsDecoder := newPacketEncoder(ws), newPacketDecoder(ws) for { if c != nil { var pkt packet if err := wsDecoder.decode(&pkt); err != nil { glog.Errorf("could not decode packet: %v", err) break } glog.Infof("WS: got packet type: %c, data: %s", pkt.typ, pkt.data) if pkt.typ == packetTypeUpgrade { // Upgrade the connection to use this WebSocket Conn. c.upgrade(ws) continue } if err := s.handlePacket(pkt, c); err != nil { glog.Errorf("could not handle packet: %v", err) break } continue } id := ws.Request().FormValue(paramSessionID) c = s.clients.get(id) if len(id) > 0 && c == nil { serverError(ws, errorUnknownSID) break } else if len(id) > 0 && c != nil { // The initial handshake requires a ping (2) and pong (3) echo. var pkt packet if err := wsDecoder.decode(&pkt); err != nil { glog.Errorf("could not decode packet: %v", err) continue } glog.Infof("WS: got packet type: %c, data: %s", pkt.typ, pkt.data) if pkt.typ == packetTypePing { glog.Infof("got ping packet with data %s", pkt.data) if err := wsEncoder.encode(packet{typ: packetTypePong, data: pkt.data}); err != nil { glog.Errorf("could not encode pong packet: %v", err) continue } // Force a polling cycle to ensure a fast upgrade. glog.Infoln("forcing polling cycle") payload := []packet{packet{typ: packetTypeNoop}} if err := newPayloadEncoder(c).encode(payload); err != nil { glog.Errorf("could not encode packet to force polling cycle: %v", err) continue } } } else if len(id) == 0 && c == nil { // Create a new connection with this WebSocket Conn. c = newConn() c.ws = ws s.clients.add(c) b, err := handshakeData(c) if err != nil { glog.Errorf("could not get handshake data: %v", err) } if err := wsEncoder.encode(packet{typ: packetTypeOpen, data: b}); err != nil { glog.Errorf("could not encode open packet: %v", err) break } if s.Handler != nil { go s.Handler(c.pubConn) } } } glog.Infof("closing websocket connection %p", ws) c.Close() } // pollingHandler handles all XHR polling requests to the server, initiating // a handshake if the request’s session ID does not already exist within // the client set. func (s *server) pollingHandler(w http.ResponseWriter, r *http.Request) { setPollingHeaders(w, r) id := r.FormValue(paramSessionID) if len(id) > 0 { c := s.clients.get(id) if c == nil { serverError(w, errorUnknownSID) return } if r.Method == "POST" { var payload []packet if err := newPayloadDecoder(r.Body).decode(&payload); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } defer r.Body.Close() for _, pkt := range payload { s.handlePacket(pkt, c) } fmt.Fprintf(w, "ok") return } else if r.Method == "GET" { glog.Infoln("GET request xhr polling data...") // TODO(andybons): Requests can pile up, here. Drain the conn and // then write the payload. if _, err := io.Copy(w, c); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } return } http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) return } s.pollingHandshake(w, r) } // pollingHandshake creates a new FTC Conn with the given HTTP Request and // ResponseWriter, setting a persistence cookie if necessary and calling // the server’s Handler. func (s *server) pollingHandshake(w http.ResponseWriter, r *http.Request) { c := newConn() s.clients.add(c) if len(s.cookieName) > 0 { http.SetCookie(w, &http.Cookie{ Name: s.cookieName, Value: c.id, }) } b, err := handshakeData(c) if err != nil { glog.Errorf("could not get handshake data: %v", err) } payload := []packet{packet{typ: packetTypeOpen, data: b}} if err := newPayloadEncoder(w).encode(payload); err != nil { glog.Errorf("could not encode open payload: %v", err) return } if s.Handler != nil { go s.Handler(c.pubConn) } } // ServeHTTP implements the http.Handler interface for an FTC Server. func (s *server) ServeHTTP(w http.ResponseWriter, r *http.Request) { remoteAddr := r.Header.Get("X-Forwarded-For") if len(remoteAddr) == 0 { remoteAddr = r.RemoteAddr } glog.Infof("%s (%s) %s %s %s", r.Proto, r.Header.Get("X-Forwarded-Proto"), r.Method, remoteAddr, r.URL) transport := r.FormValue(paramTransport) if strings.HasPrefix(r.URL.Path, s.basePath) && !validTransports[transport] { serverError(w, errorTransportUnknown) return } if transport == transportWebSocket { s.wsServer.ServeHTTP(w, r) } else if transport == transportPolling { s.pollingHandler(w, r) } } // handshakeData returns the JSON encoded data needed // for the initial connection handshake. func handshakeData(c *conn) ([]byte, error) { return json.Marshal(map[string]interface{}{ "pingInterval": 25000, "pingTimeout": 60000, "upgrades": getValidUpgrades(), "sid": c.id, }) } // serverError sends a JSON-encoded message to the given io.Writer // with the given error code. func serverError(w io.Writer, code int) { if rw, ok := w.(http.ResponseWriter); ok { rw.Header().Set("Content-Type", "application/json") rw.WriteHeader(http.StatusBadRequest) } msg := struct { Code int `json:"code"` Message string `json:"message"` }{ Code: code, Message: errorMessage[code], } if err := json.NewEncoder(w).Encode(msg); err != nil { glog.Errorln("error encoding error msg %+v: %s", msg, err) return } glog.Errorf("wrote server error: %+v", msg) } // setPollingHeaders sets the appropriate headers when responding // to an XHR polling request. func setPollingHeaders(w http.ResponseWriter, r *http.Request) { origin := r.Header.Get("Origin") if len(origin) > 0 { w.Header().Set("Access-Control-Allow-Credentials", "true") } else { origin = "*" } w.Header().Set("Access-Control-Allow-Origin", origin) w.Header().Set("Connection", "keep-alive") w.Header().Set("Content-Type", "text/plain; charset=UTF-8") }
[ 5 ]
package structs type UserLoginParams struct { UserName string `valid:"Required;MaxSize(20)" form:"user_name"` Password string `valid:"Required;MinSize(6);MaxSize(16)" form:"password"` } //用户登录参数
[ 3 ]
package main import ( "fmt" "io/ioutil" "math" "os" "strings" ) const TOTALROWS = 128 const TOTALCOLUMNS = 8 func main() { f, _ := os.Open("day5_input.txt") b, _ := ioutil.ReadAll(f) input_string := string(b) lines := strings.Split(input_string, "\n") lines = lines[0 : len(lines)-1] var seats [][]int = make([][]int, TOTALROWS) for s := range seats { column := make([]int, TOTALCOLUMNS) seats[s] = column } for _, line := range lines { row := binarySearch(line[:7], TOTALROWS-1) // subtract 1 because indexed by 0 column := binarySearch(line[7:], TOTALCOLUMNS-1) // subtract 1 because indexed by 0 seats[row][column] = -1 } for i, columns := range seats { for j := range columns { if seats[i][j] == 0 && i > 5 && i < 124 { // make sure not at the "very front or back" of plane fmt.Println(i*8 + j) } } } } func binarySearch(in string, upperBound int) int { lower := 0 upper := upperBound for _, c := range in { diff := upper - lower // difference between high and low switch string(c) { case "F", "L": upper = upper - int(math.Ceil(float64(diff)/2.0)) case "B", "R": lower = lower + int(math.Ceil(float64(diff)/2.0)) } } if len(in) > 3 { return lower } return upper } func makeRange(min, max int) []int { a := make([]int, max-min+1) for i := range a { a[i] = min + i } return a }
[ 0, 1 ]
package time_series import ( "fmt" common "github.com/lukaszozimek/alpha-vantage-api-client" ) const ( ONE_MINUTE = "1min" FIVE_MINUTE = "5min" FIFITHTEEN_MINUTE = "15min" THIRTY_MINUTE = "30min" SIXTY_MINUTE = "60min" ) func TimeSeriesIntraDayInterval1minute(symbol string, apiKey string, c *common.Client) *AlphaVantageTimeSeriesApiResponse { return timeSeriesIntraDay(symbol, ONE_MINUTE, apiKey, c) } func TimeSeriesIntraDayInterval5minute(symbol string, apiKey string, c *common.Client) *AlphaVantageTimeSeriesApiResponse { return timeSeriesIntraDay(symbol, FIVE_MINUTE, apiKey, c) } func TimeSeriesIntraDayIntervalFifteenMinute(symbol string, apiKey string, c *common.Client) *AlphaVantageTimeSeriesApiResponse { return timeSeriesIntraDay(symbol, FIFITHTEEN_MINUTE, apiKey, c) } func TimeSeriesIntraDayIntervalThirtyMinute(symbol string, apiKey string, c *common.Client) *AlphaVantageTimeSeriesApiResponse { return timeSeriesIntraDay(symbol, THIRTY_MINUTE, apiKey, c) } func TimeSeriesIntraDayIntervalSixtyMinute(symbol string, apiKey string, c *common.Client) *AlphaVantageTimeSeriesApiResponse { return timeSeriesIntraDay(symbol, SIXTY_MINUTE, apiKey, c) } func timeSeriesIntraDay(symbol string, interval string, apiKey string, c *common.Client) *AlphaVantageTimeSeriesApiResponse { return makeApiCallGet(fmt.Sprintf(c.BaseURL.String()+"/query?function=TIME_SERIES_INTRADAY&symbol=%v&interval=%v&apikey=%v", symbol, interval, apiKey), c) }
[ 6 ]
package iplookup import ( "strings" "github.com/garyburd/redigo/redis" "../db" "fmt" ) type IpInfo struct { ID string //id编号 IP string //ip段 StartIP string //开始IP EndIP string //结束IP Country string //国家 Province string //省 City string //市 District string //区 Isp string //运营商 Type string //类型 Desc string //说明 } func FindIpInfo(id string) (ipInfo IpInfo, err error) { v1, e := redis.String(db.Cli().Do("HGET", "ip_info", id)) if e != nil { return ipInfo, fmt.Errorf("find ip info err. redis: id:", id) } str := strings.Trim(v1, "\n") strArr := strings.Split(str, ",") ipInfo.ID = strArr[0] ipInfo.IP = strArr[1] ipInfo.StartIP = strArr[2] ipInfo.EndIP = strArr[3] ipInfo.Country = strArr[4] ipInfo.Province = strArr[5] ipInfo.City = strArr[6] ipInfo.District = strArr[7] ipInfo.Isp = strArr[8] ipInfo.Type = strArr[9] ipInfo.Desc = strArr[10] return ipInfo, nil }
[ 3 ]
package main import ( "fmt" "math/rand" "os" //"text/tabwriter" "strconv" "time" "sort" ) /* func myQuicksort (list []int) []int { if len(list) <= 1 { return list } } func findPivot(list []int) int { listLen = len(list) if listLen < 3 { return list[0] } first := list[0] middle := list[listLen/2] last := list[listLen] if first > middle && first < last { return first } if middle > first && middle < last { return middle } if last > first && last < } */ type ByNumb []int func (a ByNumb) Len() int { return len(a) } func (a ByNumb) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a ByNumb) Less(i, j int) bool { return a[i] < a[j] } func main() { count, _ := strconv.Atoi(os.Args[1]) r := rand.New(rand.NewSource(time.Now().UnixNano())) list := make([]int, count) for i := 0; i < count; i++ { list[i] = r.Intn(100) } sort.Sort(ByNumb(list)) fmt.Println(list) }
[ 3 ]
// Package logrus_pgx provides ability to use Logrus with PGX package logrus_pgx import ( "github.com/sirupsen/logrus" ) // pgxLogger type, used to extend standard logrus logger. type PgxLogger logrus.Logger // pgxEntry type, used to extend standard logrus entry. type PgxEntry logrus.Entry //Print and format debug message using logrus. func (w *PgxLogger) Debug(msg string, vars ...interface{}) { f := logrus.Fields{} for i := 0; i < len(vars)/2; i++ { f[vars[i*2].(string)] = vars[i*2+1] } (*logrus.Logger)(w).WithFields(f).Debug(msg) } //Print and format error message using logrus. func (w *PgxLogger) Error(msg string, vars ...interface{}) { f := logrus.Fields{} for i := 0; i < len(vars)/2; i++ { f[vars[i*2].(string)] = vars[i*2+1] } (*logrus.Logger)(w).WithFields(f).Error(msg) } //Print and format info message using logrus. func (w *PgxLogger) Info(msg string, vars ...interface{}) { f := logrus.Fields{} for i := 0; i < len(vars)/2; i++ { f[vars[i*2].(string)] = vars[i*2+1] } (*logrus.Logger)(w).WithFields(f).Info(msg) } //Print and format warning message using logrus. func (w *PgxLogger) Warn(msg string, vars ...interface{}) { f := logrus.Fields{} for i := 0; i < len(vars)/2; i++ { f[vars[i*2].(string)] = vars[i*2+1] } (*logrus.Logger)(w).WithFields(f).Warn(msg) } //Print and format debug message using logrus. func (w *PgxEntry) Debug(msg string, vars ...interface{}) { f := logrus.Fields{} for i := 0; i < len(vars)/2; i++ { f[vars[i*2].(string)] = vars[i*2+1] } (*logrus.Entry)(w).WithFields(f).Debug(msg) } //Print and format error message using logrus. func (w *PgxEntry) Error(msg string, vars ...interface{}) { f := logrus.Fields{} for i := 0; i < len(vars)/2; i++ { f[vars[i*2].(string)] = vars[i*2+1] } (*logrus.Entry)(w).WithFields(f).Error(msg) } //Print and format info message using logrus. func (w *PgxEntry) Info(msg string, vars ...interface{}) { f := logrus.Fields{} for i := 0; i < len(vars)/2; i++ { f[vars[i*2].(string)] = vars[i*2+1] } (*logrus.Entry)(w).WithFields(f).Info(msg) } //Print and format warning message using logrus. func (w *PgxEntry) Warn(msg string, vars ...interface{}) { f := logrus.Fields{} for i := 0; i < len(vars)/2; i++ { f[vars[i*2].(string)] = vars[i*2+1] } (*logrus.Entry)(w).WithFields(f).Warn(msg) }
[ 3 ]
package service import ( "gin-vue-admin/global" "gin-vue-admin/model" ) func CreateMessage(message model.Message) (err error) { //global.GVA_DB.AutoMigrate(&message) err = global.GVA_DB.Create(&message).Error return err }
[ 3 ]
package url_test import ( "fmt" "testing" "github.com/barolab/candidate/url" ) type WithoutQueryTestCase struct { argument string expected string err error } func TestWithoutQuery(T *testing.T) { cases := []WithoutQueryTestCase{ {argument: "", expected: "", err: fmt.Errorf("Cannot exclude URL query from an empty URL")}, {argument: "https://twitter.com/candidate", expected: "https://twitter.com/candidate", err: nil}, {argument: "https://twitter.com/candidate?this=that", expected: "https://twitter.com/candidate", err: nil}, } for _, c := range cases { res, err := url.WithoutQuery(c.argument) if res != c.expected { T.Errorf("WithoutQuery should have return %s for url %s, got %s", c.expected, c.argument, res) } if err == nil && c.err != nil { T.Errorf("WithoutQuery returned no error but we expected to return %s (for url %s)", c.err, c.argument) } if err != nil && c.err == nil { T.Errorf("WithoutQuery returned an error %s that was not expected (for url %s)", err, c.argument) } if err != nil && c.err != nil && err.Error() != c.err.Error() { T.Errorf("WithoutQuery should have returned an error %s, but we got %s (for url %s)", c.err, err, c.argument) } } }
[ 2 ]
package main import "fmt" func main() { var numbers []int printSlice(numbers) //允许追加空切片 numbers = append(numbers,0) printSlice(numbers) //向空切片添加一个元素 numbers = append(numbers,1) printSlice(numbers) //同时添加多个元素 numbers = append(numbers,2,3,4) printSlice(numbers) //创建切片 number1 是之前 切片容量的两倍,容量的值只有1,2,4,6,8 numbers1:= make([]int,len(numbers),(cap(numbers))*2) //拷贝 number 的内容到number1 copy(numbers1,numbers) printSlice(numbers1) } func printSlice (x []int){ fmt.Printf("len=%d cap=%d slice=%v\n",len(x),cap(x),x) }
[ 3 ]
package gragh func dijkstraMatrix(g *matrix, src int) (dist []int, sptSet []int) { sptSet = make([]int, g.n) dist = make([]int, g.n) pred := make([]int, g.n) for i := range dist { dist[i] = INF sptSet[i] = -1 pred[i] = -1 } dist[src] = 0 pred[src] = src for i := 0; i < g.n; i++ { mindist := INF minvert := src // find shortest distance vertex that not in spt set for j := 0; j < g.n; j++ { if sptSet[j] == -1 && dist[j] < mindist { mindist = dist[j] minvert = j } } if minvert == INF { break // the remaining vertex are unreachable from src, thus we can break here } // update shortest distance for j := 0; j < g.n; j++ { curdist := g.get(minvert, j) // this also works from directed graph if curdist > 0 && dist[minvert]+curdist < dist[j] { dist[j] = dist[minvert] + curdist pred[j] = minvert } } sptSet[minvert] = pred[minvert] } return } func dijkstraAdjacent(g *graph, src int) (mindist []int, sptSet []int) { sptSet = make([]int, g.n) indices := make([]int, g.n) mindist = make([]int, g.n) pred := make([]int, g.n) position := make([]int, g.n) for i := 0; i < g.n; i++ { sptSet[i] = -1 indices[i] = i position[i] = i mindist[i] = INF } h := &heap{indices, pred, mindist, position} h.mindist[src] = 0 // minimum distance for source vertex is 0 h.minfrom[src] = src // source vertex's pred is itself h.siftUp(src) // src's value is least, sift up to stack top for num := 0; num < g.n; num++ { i := h.pop() // pop vertex with minimum distance with mst set if mindist[i] == INF { break } for nb := g.adjacency[i]; nb != nil; nb = nb.next { j := nb.id // update shortest distance between src and j via i if h.mindist[i]+nb.weight < h.mindist[j] { h.mindist[j] = h.mindist[i] + nb.weight h.minfrom[j] = i if !h.siftDown(h.position[j]) { // need to sift after modification h.siftUp(h.position[j]) } } } // set mst set sptSet[i] = h.minfrom[i] } return } func floydWarshall(g *matrix) [][]int { // this solution has not provide path information // however it can be achieved by using another 2D array to store the predecessor. dist := make([][]int, g.n) for i := range dist { dist[i] = make([]int, g.n) for j := range dist[i] { // initialize the distance matrix if i == j { dist[i][j] = 0 // dist[i][i]=0 } else if g.get(i, j) == 0 { dist[i][j] = INF // dist[i][j]=INF if i, j is not directed linked } else { dist[i][j] = g.get(i, j) // real distance } } } // floyd-warshall algorithm for k := 0; k < g.n; k++ { // k represent the intermediate vertex, outermost loop for i := 0; i < g.n; i++ { for j := 0; j < g.n; j++ { // dist[i][k] and dist[k][j] should not be INF to avoid overflow if dist[i][k] != INF && dist[k][j] != INF && dist[i][k]+dist[k][j] < dist[i][j] { dist[i][j] = dist[i][k] + dist[k][j] // this also works from directed graph } } } } return dist }
[ 5, 6 ]
package install import ( "fmt" "io" "net/http" "os" "os/exec" ) func PathExists(path, fileName string) bool { filePath := path + fileName _, err := os.Stat(filePath) if err == nil { return true } if os.IsNotExist(err) { return false } return false } func Download(FileName string, FilePath string) { var url = FileUrl + FileName res, err := http.Get(url) if err != nil { panic(err) } defer res.Body.Close() f, err := os.Create(FilePath + FileName) if err != nil { panic(err) } defer f.Close() io.Copy(f, res.Body) } func FileTar(Filename, FilePath, FileNewName string) { shellcmd := "tar xf " + Filename +".tar.gz -C " + FilePath cmd := exec.Command("/bin/bash", "-c", shellcmd) _, err := cmd.Output() if err != nil { fmt.Println(err, "") } shellcmdmv := "mv " + Filename + " /usr/local/" + FileNewName cmdmv := exec.Command("/bin/bash", "-c", shellcmdmv) _, err = cmdmv.Output() if err != nil { fmt.Println(err) } }
[ 2, 6 ]
/* SPDX-License-Identifier: MIT * * Copyright (C) 2019 WireGuard LLC. All Rights Reserved. */ package guid import ( "fmt" "syscall" "golang.org/x/sys/windows" ) //sys clsidFromString(lpsz *uint16, pclsid *windows.GUID) (hr int32) = ole32.CLSIDFromString // // FromString parses "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}" string to GUID. // func FromString(str string) (*windows.GUID, error) { strUTF16, err := syscall.UTF16PtrFromString(str) if err != nil { return nil, err } guid := &windows.GUID{} hr := clsidFromString(strUTF16, guid) if hr < 0 { return nil, syscall.Errno(hr) } return guid, nil } // // ToString function converts GUID to string // "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}". // // The resulting string is uppercase. // func ToString(guid *windows.GUID) string { return fmt.Sprintf("{%06X-%04X-%04X-%04X-%012X}", guid.Data1, guid.Data2, guid.Data3, guid.Data4[:2], guid.Data4[2:]) }
[ 3 ]
package stats import ( "github.com/fm2901/bank/v2/pkg/types" ) func Avg(payments []types.Payment) types.Money { var allSum types.Money var allCount types.Money if len(payments) < 1 { return 0 } for _, payment := range payments { if payment.Status == types.StatusFail { continue } allSum += payment.Amount allCount += 1 } return allSum / allCount } func TotalInCategory(payments []types.Payment, category types.Category) types.Money { var sumInCategory types.Money if len(payments) < 1 { return 0 } for _, payment := range payments { if payment.Category != category || payment.Status == types.StatusFail { continue } sumInCategory += payment.Amount } return sumInCategory } func CategoriesAvg(payments []types.Payment) map[types.Category]types.Money { categories := map[types.Category]types.Money{} counter := map[types.Category]int{} for _, payment := range payments { if payment.Amount > 0 { categories[payment.Category] += payment.Amount counter[payment.Category] += 1 } } for cat := range categories { categories[cat] = categories[cat] / types.Money(counter[cat]) } return categories } func PeriodsDynamic( first map[types.Category]types.Money, second map[types.Category]types.Money, ) map[types.Category]types.Money { result := map[types.Category]types.Money{} for key := range second { result[key] += second[key] } for key := range first { result[key] -= first[key] } return result }
[ 0, 6 ]
//Priority Queue in Golang /* In the Push and Pop method we are using interface Learn these : interface{} is the empty interface type []interface{} is a slice of type empty interface interface{}{} is an empty interface type composite literal []interface{}{} is a slice of type empty interface composite literals What does interface{} meaning in Push and Pop operations ?? interface{} means you can put value of any type, including your own custom type. All types in Go satisfy an empty interface (interface{} is an empty interface). In your example, Msg field can have value of any type. Example: package main import ( "fmt" ) type Body struct { Msg interface{} } func main() { b := Body{} b.Msg = "5" fmt.Printf("%#v %T \n", b.Msg, b.Msg) // Output: "5" string b.Msg = 5 fmt.Printf("%#v %T", b.Msg, b.Msg) //Output: 5 int } */ package main import ( "container/heap" "fmt" ) type Item struct { Name string Expiry int Price int Index int } type PriorityQueue []*Item //In order to sort the priority queue , implement the /* type Interface interface { // Len is the number of elements in the collection. Len() int // Less reports whether the element with // index i should sort before the element with index j. Less(i, j int) bool // Swap swaps the elements with indexes i and j. Swap(i, j int) */ func (pq PriorityQueue) Len() int { return len(pq) } func (pq PriorityQueue) Less(i, j int) bool { fmt.Println("pq[i].Name pq[j].Name", pq[i].Name, pq[j].Name) fmt.Println("pq[i].Expiry pq[j].Expiry", pq[i].Expiry, pq[j].Expiry) if pq[i].Expiry < pq[j].Expiry { return true } else if pq[i].Expiry == pq[j].Expiry { return pq[i].Price > pq[j].Price } return false } func (pq PriorityQueue) Swap(i, j int) { pq[i], pq[j] = pq[j], pq[i] } func (pq *PriorityQueue) Pop() interface{} { old := *pq n := len(old) item := old[n-1] *pq = old[0 : n-1] return item } func (pq *PriorityQueue) Push(x interface{}) { //n := len(*pq) item := x.(*Item) *pq = append(*pq, item) } func main() { listItems := []*Item{ {Name: "Spinach", Expiry: 5, Price: 20}, } /* {Name: "Carrot", Expiry: 30, Price: 120}, {Name: "Potato", Expiry: 30, Price: 45}, {Name: "Rice", Expiry: 100, Price: 50}, */ priorityQueue := make(PriorityQueue, len(listItems)) for i, item := range listItems { priorityQueue[i] = item } /* * Here couple of things need to be considered : * heap works on pointers , for example: Both heap push and pop works on pointers * See the signature. * We should know how to work on interface. For example heap pop returns an interface * It should be converted to corresponding type of object. * heap.Pop(&priorityQueue).(*Item) , here Item is a pointer type. * Because while inserting into priorityQueue, we are inserting a pointer of Item type. * * The defination of Push and Pop operation should remain same, only logic should be added * in Less() method as per the requirement. * * Unlike python we need to handle equal cases in Less() operation. */ heap.Init(&priorityQueue) heap.Push(&priorityQueue, &Item{Name: "Potato", Expiry: 30, Price: 45}) heap.Push(&priorityQueue, &Item{Name: "Carrot", Expiry: 30, Price: 120}) item := heap.Pop(&priorityQueue).(*Item) fmt.Printf("Name %s Expiry:%d\n", item.Name, item.Expiry) for priorityQueue.Len() > 0 { item = heap.Pop(&priorityQueue).(*Item) fmt.Printf("Name %s Expiry:%d\n", item.Name, item.Expiry) } }
[ 3 ]
package mocks import ( "app/models" "reflect" "testing" "time" ) func TestUserMock(t *testing.T) { ID := 0 users := &UserMock{} user := &models.User{ ID: ID, Name: "test user", Email: "[email protected]", Icon: "testicon", CreatedAt: time.Now(), UpdatedAt: time.Now(), } users.CreateUser(user) got, err := users.GetUser(ID) if err != nil { t.Fatalf("An error occurred: %v", err) } if !reflect.DeepEqual(*user, *got) { t.Fatalf("Not equal user") } } func TestUserMockRepository(t *testing.T) { users := NewUserRepository() ID := 0 user := &models.User{ ID: ID, Name: "test user", Email: "[email protected]", Icon: "testicon", CreatedAt: time.Now(), UpdatedAt: time.Now(), } users.CreateUser(user) got, err := users.GetUser(ID) if err != nil { t.Fatalf("An error occurred: %v", err) } if !reflect.DeepEqual(*user, *got) { t.Fatalf("Not equal user") } } func TestUser(t *testing.T) { users := NewUserRepository() ID := 0 Email := "[email protected]" now := time.Now() user := &models.User{ ID: ID, Name: "test user", Email: Email, Icon: "testicon", CreatedAt: now, UpdatedAt: now, } users.CreateUser(user) Email2 := "[email protected]" user2 := &models.User{ ID: ID, Name: "test2 user", Email: Email2, Icon: "test2icon", CreatedAt: now, UpdatedAt: time.Now(), } users.UpdateUser(user2) u, err := users.GetUser(ID) if err != nil { t.Fatalf("An error occurred: %v\n", err) } users.UpdateUser(user2) if testCompareUsers(t, user, u) { t.Fatalf("User did not update") } if !testCompareUsers(t, user2, u) { t.Fatalf("User did not update") } u, err = users.GetUserByEmail(Email) if err == nil { t.Fatalf("Invalid email, but got user") } u, err = users.GetUserByEmail(Email2) if err != nil { t.Fatalf("Valid email, but got error") } if !testCompareUsers(t, user2, u) { t.Fatalf("Users don't match") } users.DeleteUser(ID) u, err = users.GetUser(ID) if err == nil { t.Fatalf("An error occurred: %v\n", err) } if u != nil { t.Fatalf("User did not delete") } } func testCompareUsers(t *testing.T, user *models.User, user2 *models.User) bool { if user.ID != user2.ID { return false } if user.Name != user2.Name { return false } if user.Email != user2.Email { return false } if user.HashedPassword != user2.HashedPassword { return false } if user.Icon != user2.Icon { return false } return true } func TestUserError(t *testing.T) { user := &models.User{ ID: 1, Name: "test name", Email: "[email protected]", Icon: "test icon", } users := NewUserRepository() err := users.UpdateUser(user) if err == nil { t.Fatalf("An error should occur") } err = users.DeleteUser(user.ID) if err == nil { t.Fatalf("An error should occur") } }
[ 6 ]
package main import ( "fmt" "math/rand" "sort" "time" ) func main() { rand.Seed(time.Now().UnixNano()) numPrisoners := 1000 numHats := 1001 // there are 1000 prisoners prisoners := make([]int, numPrisoners) // and 1001 hats hats := make([]int, numHats) for i := 0; i < numHats; i++ { hats[i] = i + 1 } // the hats are randomly sorted and one is removed rand.Shuffle(len(hats), func(i, j int) { hats[i], hats[j] = hats[j], hats[i] }) hats = hats[:len(hats)-1] // the prisoners are all assigned hats for i := range prisoners { prisoners[i] = hats[i] } correctlyGuessedBefore := []int{} discardedThroughStrategy := []int{} priorGuess := 0 prisonersSurvived := 0 prisonersDied := 0 // the prisoners take turns, from left to right, to make a choice for i := range prisoners { // we can look in front and see what we're missing from a set of 1001 missing := make(map[int]struct{}, 1000) for j := 0; j < numHats; j++ { missing[j+1] = struct{}{} } for j := i + 1; j < len(prisoners); j++ { delete(missing, prisoners[j]) } // remove prior guesses we've remembered for _, j := range correctlyGuessedBefore { delete(missing, j) } for _, j := range discardedThroughStrategy { delete(missing, j) } // we should be left with some number of options. I need to choose in // such a way that informs the person in front of what to choose missingSlice := make([]int, 0, len(missing)) for j := range missing { missingSlice = append(missingSlice, j) } sort.Ints(missingSlice) guess := 0 if i == 0 { // first prisoner does something a little different, they tell i + 1 // their number which eliminates the remaining choice guess = prisoners[i+1] } else if i == 1 { // the second prisoner follows this strategy and can eliminate // everything guess = priorGuess // we can just discard all discardedThroughStrategy = append(discardedThroughStrategy, missingSlice...) } else { guess = missingSlice[len(missingSlice)-1] } priorGuess = guess // take a guess at the minimum if guess == prisoners[i] { correctlyGuessedBefore = append(correctlyGuessedBefore, guess) prisonersSurvived++ } else { prisonersDied++ } } fmt.Println(prisonersSurvived, prisonersDied) }
[ 5 ]
package db import ( "database/sql" "fmt" "time" "gopkg.in/vmihailenco/msgpack.v2" ) func (db DB) EnsureQueuesExist(queueNames []string) error { for _, queueName := range queueNames { if err := db.FirstOrCreate(&Queue{}, Queue{Name: queueName}).Error; err != nil { return fmt.Errorf("couldn't create queue %s: %v", queueName, err) } } return nil } func (db DB) PopJobFrom(queueNames []string, processID uint) (*Job, error) { var id int var job Job tx := db.Begin() err := tx.Raw(` SELECT id FROM jobs WHERE queue_name IN (?) AND state = ? AND start_at <= ? ORDER BY enqueued_at ASC LIMIT 1 FOR UPDATE`, queueNames, JobEnqueued, time.Now()).Row().Scan(&id) if err != nil && err != sql.ErrNoRows { tx.Rollback() return nil, err } else if err == sql.ErrNoRows { tx.Rollback() return nil, nil } err = tx.Model(&job).Where("id = ?", id).Update(Job{State: JobRunning, ProcessID: &processID}).Error if err != nil { tx.Rollback() return nil, err } if err = tx.Commit().Error; err != nil { tx.Rollback() return nil, err } if err = db.First(&job, id).Error; err != nil { return nil, err } if err = msgpack.Unmarshal(job.ParamBlob, &job.Params); err != nil { // TODO(as3richa) - record this failure return nil, err } return &Job, nil } func (db DB) PushJob(job *Job) error { return db.Create(job).Error } func (db DB) FinishJob(job *Job) error { return db.Model(job).Where("id = ?", job.ID).Update("state", JobFinished).Error } func (db DB) FailJob(job *Job) error { return db.Model(job).Where("id = ?", job.ID).Update("state", JobFailed).Error } func (db DB) BuildJob( queueName string, jobName string, params []interface{}, startAfter time.Time, retryCount uint, ) (*Job, error) { serializedParams, err := msgpack.Marshal(params) if err != nil { return nil, fmt.Errorf("couldn't marshal parameters: %v", err) } return &Job{ QueueName: queueName, Name: jobName, ParamBlob: serializedParams, StartAfter: startAfter, }, nil }
[ 6 ]
package main import ( "fmt" ) func main() { //Program to print number in decimal, binary, hex x := 10 fmt.Printf("%d,%b,%#x", x, x, x) }
[ 3 ]
package pd import ( "strings" "time" "github.com/juju/errors" "github.com/zssky/log" "github.com/taorenhai/ancestor/client" "github.com/taorenhai/ancestor/meta" ) const ( maxRetryInterval = time.Minute minReplica = 3 ) type delayRecord struct { timeout time.Time interval time.Duration } func newDelayRecord() *delayRecord { return &delayRecord{interval: time.Second, timeout: time.Now().Add(time.Second)} } func (d *delayRecord) valid() bool { if time.Since(d.timeout) > 0 { return false } return true } func (d *delayRecord) next() { d.interval = d.interval * 2 if d.interval > maxRetryInterval { d.interval = maxRetryInterval } d.timeout = time.Now().Add(d.interval) } func (s *Server) checkDelayRecord(rs *meta.RangeStatsInfo) bool { d, ok := s.delayRecord[rs.RangeID] if !ok { return false } if d.valid() { return true } if err := s.checkRangeSplit(rs); err != nil { d.next() log.Infof("checkRangeSplit error:%s", err.Error()) return true } delete(s.delayRecord, rs.RangeID) return true } var ( store client.Storage ) func (s *Server) checkReplica() { var err error if s.cluster.count() < minReplica { log.Infof("current nodes count:%d, minReplica:%d", s.cluster.count(), minReplica) return } if store == nil { store, err = client.Open(strings.Join(s.cfg.EtcdHosts, ";")) if err != nil { log.Errorf(" client.Open(%s) error:%s", strings.Join(s.cfg.EtcdHosts, ";"), err.Error()) return } } for _, rd := range s.region.unstable() { log.Debugf("unstable range:(%+v)", rd) var ids []meta.NodeID var reps []meta.ReplicaDescriptor for _, r := range rd.Replicas { ids = append(ids, r.NodeID) } for len(ids) < minReplica { n, err := s.cluster.getIdleNode(ids...) if err != nil { log.Errorf("getIdleNode error:%s", err.Error()) return } ids = append(ids, n.NodeID) rID, err := s.newID() if err != nil { log.Errorf("newID error:%s", err.Error()) return } reps = append(reps, meta.ReplicaDescriptor{NodeID: n.NodeID, ReplicaID: meta.ReplicaID(rID)}) } rd.Replicas = append(rd.Replicas, reps...) for _, rep := range reps { if err := store.GetAdmin().CreateReplica(rep.NodeID, *rd); err != nil { log.Errorf("CreateReplica node:(%d), rd:%+v, error:%s", rep.NodeID, rd, err.Error()) continue } } if err := s.region.setRangeDescriptors(*rd); err != nil { log.Errorf("setRangeDescriptors error:%s, rd:%+v", err.Error(), rd) } log.Debugf("add replica success, range:(%+v)", rd) } } func (s *Server) checkSplit() { for _, ns := range s.cluster.getNodeStats() { for _, rs := range ns.RangeStatsInfo { if s.checkDelayRecord(rs) { continue } if err := s.checkRangeSplit(rs); err != nil { log.Errorf("checkRangeSplit error:%s", err.Error()) s.delayRecord[rs.RangeID] = newDelayRecord() } } } } func (s *Server) checkLoop() { s.stopper.RunWorker(func() { ticker := time.NewTicker(time.Duration(s.cfg.CheckInterval) * time.Second) defer ticker.Stop() for { select { case <-ticker.C: if s.cfg.RangeSplitType != splitTypeManual { s.checkSplit() } s.checkReplica() case <-s.stopper.ShouldStop(): return } } }) } func (s *Server) checkRangeSplit(r *meta.RangeStatsInfo) error { if s.dm.checkSplit(r, s.cfg.RangeCapacity, s.cfg.RangeSplitThreshold) { k, b, c, err := s.getSplitKey(r) if err != nil { log.Errorf(errors.ErrorStack(err)) return errors.Trace(err) } if err := s.requestSplit(r, k, b, c); err != nil { log.Errorf(errors.ErrorStack(err)) return errors.Trace(err) } } return nil } func (s *Server) getSplitKey(rsi *meta.RangeStatsInfo) (key meta.Key, bytes int64, count int64, err error) { var rd *meta.RangeDescriptor if rd, err = s.region.getRangeDescriptor(rsi.RangeID); err != nil { return } req := meta.GetSplitKeyRequest{ RequestHeader: meta.RequestHeader{ Key: rd.StartKey, RangeID: rsi.RangeID, Flag: meta.IsRead, }, SplitSize: rsi.TotalBytes / 2, } breq := meta.BatchRequest{RequestHeader: req.RequestHeader} breq.Add(&req) bresp := meta.BatchResponse{} log.Infof("get split key request to node %d, range: %d, split size: %d", rsi.NodeID, req.RangeID, req.SplitSize) if err = s.sendBatchRequestToLeader(&breq, &bresp, rd); err != nil { return } key = bresp.Resp[0].GetSplitKey.SplitKey bytes = bresp.Resp[0].GetSplitKey.RangeBytes count = bresp.Resp[0].GetSplitKey.RangeCount log.Infof("get split key success, split key: %v, range bytes: %d, range count: %d", key, bytes, count) return } func (s *Server) requestSplit(rsi *meta.RangeStatsInfo, key meta.Key, bytes int64, count int64) error { id, err := s.idAllocator.newID() if err != nil { return errors.Errorf("get new rangeID failed") } rd, err := s.region.getRangeDescriptor(rsi.RangeID) if err != nil { return errors.Trace(err) } req := meta.SplitRequest{ RequestHeader: meta.RequestHeader{ Key: key, RangeID: rsi.RangeID, Flag: meta.IsWrite, }, SplitKey: key, NewRangeID: meta.RangeID(id), NewRangeBytes: bytes, NewRangeCount: count, } breq := meta.BatchRequest{RequestHeader: req.RequestHeader} breq.Add(&req) bresp := meta.BatchResponse{} log.Infof("split request to node %d, range %d, split key: %s, new range ID: %d, bytes: %d, count: %d", rsi.NodeID, req.RangeID, key, id, bytes, count) if err := s.sendBatchRequestToLeader(&breq, &bresp, rd); err != nil { return errors.Trace(err) } log.Info("split request execute end") newPostRD := bresp.Resp[0].Split.RangeDescriptors[1] if err := s.updateRangeDescriptor(&newPostRD, rd); err != nil { log.Errorf("update range descriptor failed, %s\n", err.Error()) return errors.Trace(err) } log.Info("updateRangeDescriptor end") newPrevRD := bresp.Resp[0].Split.RangeDescriptors[0] if err := s.region.setRangeDescriptors(newPostRD, newPrevRD); err != nil { log.Errorf("%s\n", err.Error()) return errors.Trace(err) } log.Info("update prev range descriptor on pd success") log.Info("split request success") return nil }
[ 0, 6 ]
package Redis import ( "github.com/go-redis/redis/v8" "context" "log" ) var ctx = context.Background() type Redis struct{ //Main structure for redis client. //It has connection field to save //redis-client connection. connection *redis.Client } func (r *Redis)Connect(){ //Connects to redis server. conn := redis.NewClient(&redis.Options{ Addr: "localhost:6379", Password: "", DB: 0, }) log.Println("Connected to Redis!") r.connection = conn } func (r Redis)StartDataProcessing(messageChan chan string){ //Subscribes to important channel and starts to listen //to it. Pushes gotten string messages to `messageChan`. sub := r.connection.Subscribe(ctx, "test") for{ msg, err := sub.ReceiveMessage(ctx) if err != nil{ panic(err) } messageChan <- msg.Payload } } func (r Redis)CheckIdDublesExist(StringToCheck string)bool{ result := r.connection.LRange(ctx, "authed_users", 0, -1) list, err := result.Result() if err != nil{ log.Fatalln(err) } for _, value := range(list){ if value == StringToCheck{ return true } } return false } func (r Redis)CreateAuthRecord(randomString string){ r.connection.RPush(ctx, "authed_users", randomString) }
[ 2, 3 ]
package guess_number_higher_or_lower // https://leetcode.com/problems/guess-number-higher-or-lower // level: 1 // time: O(log(n)) 0ms 100% // space: O(1) 1.9M 100% var pick int func guess(num int) int { if num == pick { return 0 } else if num > pick { return -1 } else { return 1 } } // leetcode submit region begin(Prohibit modification and deletion) /** * Forward declaration of guess API. * @param num your guess * @return -1 if num is lower than the guess number * 1 if num is higher than the guess number * otherwise return 0 * func guess(num int) int; */ func guessNumber(n int) int { l, r := 1, n for l <= r { mid := l + (r-l)>>1 diff := guess(mid) if diff == 0 { return mid } else if diff > 0 { l = mid + 1 } else { r = mid - 1 } } return 0 } // leetcode submit region end(Prohibit modification and deletion)
[ 5 ]
package main import "fmt" // Celsius ... type Celsius float64 // ToF convert Celsius to Fahrenheit func (c Celsius) ToF() Fahrenheit { return CToF(c) } // Fahrenheit ... type Fahrenheit float64 // ToC convert Celsius to Fahrenheit func (f Fahrenheit) ToC() Celsius { return FToC(f) } // const variable const ( AbsoluteZeroC Celsius = -273.15 FreezingC Celsius = 0 BoilingC Celsius = 100 ) // CToF convert Celsius to Fahrenheit func CToF(c Celsius) Fahrenheit { return Fahrenheit(c*9/5 + 32) } // FToC convert Fahrenheit to Celsius func FToC(f Fahrenheit) Celsius { return Celsius((f - 32) * 5 / 9) } func main() { fmt.Printf("%g\n", BoilingC-FreezingC) // "100" °C boilingF := BoilingC.ToF() fmt.Printf("%g\n", boilingF-FreezingC.ToF()) // "180" °F //fmt.Printf("%g\n", boilingF-FreezingC) // compile error: type mismatch var c Celsius var f Fahrenheit fmt.Println(c == 0) // "true" fmt.Println(f >= 0) // "true" fmt.Println(c == Celsius(f)) // "true"! //fmt.Println(c == f) // compile error: type mismatch }
[ 3 ]
package main import ( "fmt" "github.com/veandco/go-sdl2/sdl" ) type field struct { squares squares selected int } type square struct { R sdl.Rect } // ID of the square type ID int32 type squares []square func createSquares(startX, startY, width, height, spacing int32) (sq squares) { var x, y int32 for i := 0; i < 3; i++ { for j := 0; j < 3; j++ { x = (int32(j) * (spacing + width)) + startX y = (int32(i) * (spacing + height)) + startY sq = append(sq, square{R: sdl.Rect{X: x, Y: y, W: width, H: height}}) } } return } func (f *field) render(r *sdl.Renderer, mp sdl.Point) { for i, s := range f.squares { if f.selected >= 0 && f.selected < 9 && f.selected == i { r.SetDrawColor(100, 255, 255, 255) } else if mp.InRect(&s.R) { r.SetDrawColor(255, 0, 255, 255) } else { r.SetDrawColor(100, 0, 255, 255) } r.FillRect(&s.R) } } func (f *field) setSelected(mp sdl.Point, p player) { for i, s := range f.squares { if mp.InRect(&s.R) { f.selected = i for peer, rw := range peers { if peers[peer] != nil { rw.WriteString(fmt.Sprintf("Selected: %d To: %s From: %s\n", i, peer, p.name)) rw.Flush() } else { fmt.Println("readWriter is null, cant write") } } fmt.Println("Selected Square:", i) } } }
[ 5 ]
// Take a number: 56789. Rotate left, you get 67895. // // Keep the first digit in place and rotate left the other digits: 68957. // // Keep the first two digits in place and rotate the other ones: 68579. // // Keep the first three digits and rotate left the rest: 68597. Now it is over since keeping the first four it remains only one digit which rotated is itself. // // You have the following sequence of numbers // // 56789 -> 67895 -> 68957 -> 68579 -> 68597 // // and you must return the greatest: 68957. // // Calling this function max_rot (or maxRot or ... depending on the language // // max_rot(56789) should return 68957 package main import ( "strconv" "fmt" ) func main() { var n int64 = 56789 Lol := MaxRot(n) fmt.Println(Lol) } func MaxRot(n int64) int64 { b := strconv.Itoa(int(n)) x := b[1:] + string(b[0]) y := b[:1] + string(b[2:]) + string(b[1]) z := b[:2] + b[3:] + string(b[2]) a := b[:3] + b[4:] + string(b[3]) my, _ := strconv.Atoi(b) dick, _ := strconv.Atoi(x) so, _ := strconv.Atoi(y) fc, _ := strconv.Atoi(z) big, _ := strconv.Atoi(a) if int(my) > int(dick) && int(my) > int(so) && int(my) > int(fc) && int(my) > int(big){ return int64(my) }else if int(dick) > int(my) && int(dick) > int(so) && int(dick) > int(fc) && int(dick) > int(big){ return int64(dick) }else if int(so) > int(dick) && int(so) > int(my) && int(so) > int(fc) && int(so) > int(big){ return int64(so) }else if int(fc) > int(dick) && int(fc) > int(so) && int(fc) > int(my) && int(fc) > int(big){ return int64(fc) }else if int(big) > int(dick) && int(big) > int(so) && int(big) > int(fc) && int(big) > int(my){ return int64(big) } return 0 }
[ 5 ]
package http_proxy_middleware import ( "fmt" "github.com/didi/gatekeeper/model" "github.com/didi/gatekeeper/public" "github.com/gin-gonic/gin" "github.com/pkg/errors" ) //匹配接入方式 基于请求信息 func HTTPWhiteListMiddleware() gin.HandlerFunc { return func(c *gin.Context) { serviceDetail, err := model.GetServiceDetailFromGinContext(c) if err != nil { public.ResponseError(c, 2001, err) c.Abort() return } whiteListString := serviceDetail.PluginConf.GetPath("http_whiteblacklist", "ip_white_list").MustString() if whiteListString != "" { if !public.InIPSliceStr(c.ClientIP(), whiteListString) { public.ResponseError(c, 3001, errors.New(fmt.Sprintf("%s not in white ip list", c.ClientIP()))) c.Abort() return } } c.Next() } }
[ 3 ]
package streamtcp import ( "bufio" // "errors" "fmt" "log" "net" "time" ) type CallBackClient func(*Session, string) type Session struct { conn net.Conn incoming Message outgoing Message reader *bufio.Reader writer *bufio.Writer quiting chan net.Conn name string closing bool messageRec CallBackClient } func (self *Session) GetName() string { return self.name } func (self *Session) SetName(name string) { self.name = name } func (self *Session) GetIncoming() []byte { return <-self.incoming } func (self *Session) PutOutgoing(message []byte) { self.outgoing <- message } func CreateSession(conn net.Conn, callback CallBackClient) *Session { reader := bufio.NewReader(conn) writer := bufio.NewWriter(conn) session := &Session{ conn: conn, incoming: make(Message, 1024), outgoing: make(Message, 1024), quiting: make(chan net.Conn), reader: reader, writer: writer, messageRec: callback, } session.closing = false session.Listen() return session } func (self *Session) Listen() { go self.Read() go self.Write() } func (self *Session) quit() { self.quiting <- self.conn } /* func (self *Session) WritePing() error { if _, err := self.writer.Write([]byte("P")); err != nil { return errors.New("write ping error") } if err := self.writer.Flush(); err != nil { log.Printf("Write error: %s\n", err) return errors.New("write ping error") } return nil } */ func (self *Session) Read() { if self.closing { return } tmpBuffer := make([]byte, 0) buffer := make([]byte, 1024) for { if self.closing { return } n, err := self.reader.Read(buffer) //self.reader.Read() if err != nil { /* if err == io.EOF { //fmt.Println("n is =====================", n) break } */ self.closing = true log.Println(" connection error: ", err) //self.conn.RemoteAddr().String(), self.quit() return } if n == 1 && string(buffer[:1]) == "P" { /* if _, err := self.writer.Write([]byte("P")); err != nil { self.quit() return } if err := self.writer.Flush(); err != nil { log.Printf("Write error: %s\n", err) self.quit() return } */ //log.Println(self.conn.RemoteAddr().String(), " recv : P ") } if n > 0 { //fmt.Println("n is ========================================", n) tmpBuffer = Unpack(append(tmpBuffer, buffer[:n]...), self.incoming) } /* if line, _, err := self.reader.ReadLine(); err == nil { self.incoming <- string(line) } else { log.Printf("Read error: %s\n", err) self.quit() return } */ } } func (self *Session) WritePing() { self.outgoing <- []byte("P") } func (self *Session) Write() { for { if self.closing { return } /* timeout := make(chan bool) defer func() { //close(timeout) //<-timeout }() go func() { if self.closing { close(timeout) return } time.Sleep(30 * time.Second) //fmt.Println("sleep 30") if self.closing { close(timeout) return } timeout <- true //fmt.Println("end sleep 30") }() */ select { case <-time.After(time.Second * 30): if self.closing { return } //close(timeout) //fmt.Println("my time out") //fmt.Println("recv sleep 30") go self.WritePing() //fmt.Println("send outgoing P") /* if err := self.WritePing(); err != nil { self.quit() return } */ //log.Println(self.conn.RemoteAddr().String(), " send : P ") case data := <-self.outgoing: if self.closing { return } var out []byte if len(data) == 1 && string(data[:1]) == "P" { out = data fmt.Println("my time out") } else { out = Packet([]byte(data)) } //fmt.Println(self.conn, " send:", string(out)) if _, err := self.writer.Write(out); err != nil { log.Printf("Write error: %s\n", err) self.closing = true self.quit() return } if err := self.writer.Flush(); err != nil { log.Printf("Write error: %s\n", err) self.closing = true self.quit() return } } //case <-timeout: } } func (self *Session) Close() { self.conn.Close() }
[ 3 ]
package db_node import ( "github.com/solympe/Golang_Training/pkg/pattern-proxy/db-functions" ) type dbNode struct { cache db_functions.DBFunctions dataBase db_functions.DBFunctions } // SendData updates data in the main data-base and cache func (n *dbNode) SendData(data string) { db_functions.DBFunctions.SendData(n.cache, data) db_functions.DBFunctions.SendData(n.dataBase, data) } // GetData returns 'fresh' data from cache func (n dbNode) GetData() string { freshData := db_functions.DBFunctions.GetData(n.cache) return freshData } // NewDBNode returns new instance of dataBaseNode(proxy) func NewDBNode(cache db_functions.DBFunctions, db db_functions.DBFunctions) db_functions.DBFunctions { return &dbNode{cache, db} }
[ 6 ]
package companyreg //Licensed under the Apache License, Version 2.0 (the "License"); //you may not use this file except in compliance with the License. //You may obtain a copy of the License at // //http://www.apache.org/licenses/LICENSE-2.0 // //Unless required by applicable law or agreed to in writing, software //distributed under the License is distributed on an "AS IS" BASIS, //WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //See the License for the specific language governing permissions and //limitations under the License. // // Code generated by Alibaba Cloud SDK Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. // BookkeepingCommodity is a nested struct in companyreg response type BookkeepingCommodity struct { TopOrgCode string `json:"TopOrgCode" xml:"TopOrgCode"` CommodityCode string `json:"CommodityCode" xml:"CommodityCode"` CommodityName string `json:"CommodityName" xml:"CommodityName"` SpecCode string `json:"SpecCode" xml:"SpecCode"` PackageVersionCode string `json:"PackageVersionCode" xml:"PackageVersionCode"` PackageVersionValue string `json:"PackageVersionValue" xml:"PackageVersionValue"` PackageVersionName string `json:"PackageVersionName" xml:"PackageVersionName"` CityModuleCode string `json:"CityModuleCode" xml:"CityModuleCode"` CityModuleValue string `json:"CityModuleValue" xml:"CityModuleValue"` CityModuleName string `json:"CityModuleName" xml:"CityModuleName"` OrdTimeCode string `json:"OrdTimeCode" xml:"OrdTimeCode"` OrdTimeValue string `json:"OrdTimeValue" xml:"OrdTimeValue"` OrdTimeName string `json:"OrdTimeName" xml:"OrdTimeName"` TopOrgName string `json:"TopOrgName" xml:"TopOrgName"` ServiceModuleCode string `json:"ServiceModuleCode" xml:"ServiceModuleCode"` ServiceModuleValue string `json:"ServiceModuleValue" xml:"ServiceModuleValue"` ServiceModuleName string `json:"ServiceModuleName" xml:"ServiceModuleName"` AreaType string `json:"AreaType" xml:"AreaType"` }
[ 3 ]
package main import yaml "gopkg.in/yaml.v2" // Convertable parses input as array of bytes into "from" version, then calls // convert function which needs to be implemented in specific version conversion // then it marshals yaml into "out" version and returns the array of bytes of // that yml file // Args: // in interface{} - composev1, composev23 etc type // out interface{} - composev1, composev23 etc type // bytes - input docker-compose.yml as array of bytes // f - func which populates out interface{} // Returns []byte, error // Example usage: // return Convertable(&in, &out, bytes, func() { // out.Version = "2.3" // }) func Convertable(in interface{}, out interface{}, bytes *[]byte, f func()) ([]byte, error) { err := yaml.Unmarshal(*bytes, in) if err != nil { return nil, err } f() bytesout, err := yaml.Marshal(&out) if err != nil { return nil, err } return bytesout, nil } type converter map[string]func(bytes *[]byte) ([]byte, error) func getConverters() map[string]converter { var converters = make(map[string]converter) converters["v1"] = converter{ "v2.3": v1tov23, "v3.2": v1tov32, } return converters }
[ 6 ]
package main type MyStack struct { val []int } /** Initialize your data structure here. */ func Constructor() MyStack { return MyStack{[]int{}} } /** Push element x onto stack. */ func (this *MyStack) Push(x int) { this.val = append([]int{x},this.val...) } /** Removes the element on top of the stack and returns that element. */ func (this *MyStack) Pop() int { temp := this.val[0] this.val = this.val[1:] return temp } /** Get the top element. */ func (this *MyStack) Top() int { return this.val[0] } /** Returns whether the stack is empty. */ func (this *MyStack) Empty() bool { if len(this.val) > 0 { return false } return true } /** * Your MyStack object will be instantiated and called as such: * obj := Constructor(); * obj.Push(x); * param_2 := obj.Pop(); * param_3 := obj.Top(); * param_4 := obj.Empty(); */ //MyStack stack = new MyStack(); // //stack.push(1); //stack.push(2); //stack.top(); // returns 2 //stack.pop(); // returns 2 //stack.empty(); // returns false
[ 3 ]
package lessons func uniquePathsWithObstacles(obstacleGrid [][]int) int { m, n := len(obstacleGrid), len(obstacleGrid[0]) f := make([][]int, m) for i := range f { f[i] = make([]int, n) } for i := 0; i < m; i++ { for j := 0; j < n; j++ { if obstacleGrid[i][j] == 1 { f[i][j] = 0 } else if i == 0 && j == 0 { f[i][j] = 1 } else if i == 0 { f[i][j] = f[i][j-1] } else if j == 0 { f[i][j] = f[i-1][j] } else { f[i][j] = f[i-1][j] + f[i][j-1] } } } return f[m-1][n-1] }
[ 5 ]
package cookie import ( "errors" "net/http" "time" "github.com/gorilla/securecookie" ) type AuthCookie struct { Data string `json:"data"` OrgCID string `json:"org_custom_id"` SubscriptionID int32 `json:"subscription_id"` // used to check if internal/admin (sub 9999) } type SecureCookie struct { s *securecookie.SecureCookie expires int } // SetExpires in seconds func (c *SecureCookie) SetExpires(expiry int) { c.expires = expiry c.s.MaxAge(expiry) } func New(hashKey, blockKey []byte) *SecureCookie { c := &SecureCookie{} c.expires = 3700 c.s = securecookie.New(hashKey, blockKey) c.s.MaxAge(c.expires) return c } func (c *SecureCookie) SetAuthCookie(w http.ResponseWriter, data string, orgCID string, subscriptionID int32) error { value := &AuthCookie{Data: data, OrgCID: orgCID, SubscriptionID: subscriptionID} encoded, err := c.s.Encode("linkai_auth", value) if err != nil { return err } cookie := &http.Cookie{ Name: "linkai_auth", Value: encoded, Path: "/app/", Expires: time.Now().Add(time.Second * time.Duration(c.expires)), SameSite: http.SameSiteStrictMode, Secure: true, HttpOnly: true, } http.SetCookie(w, cookie) return nil } func (c *SecureCookie) GetAuthCookie(cookie *http.Cookie) (*AuthCookie, bool, error) { value := &AuthCookie{} if err := c.s.Decode("linkai_auth", cookie.Value, &value); err != nil { return nil, false, err } if value.Data == "" { return nil, false, errors.New("invalid cookie") } return value, true, nil }
[ 6 ]
package config import ( "bytes" "errors" "fmt" "html/template" "github.com/spf13/viper" ) //ErrKey raise when an key is unknown. var ErrKey = errors.New("KeyError") // Default values. var defaults = map[string]interface{}{ "out_file": "/tmp/jocasta_{{.App}}_stdout.log", "out_maxsize": "0", "out_backups": "0", "err_file": "/tmp/jocasta_{{.App}}_stderr.log", "err_maxsize": "0", "err_backups": "0", } // Config implements the config store of jocasta. type Config struct { v *viper.Viper App string } // Params type for characteristics of a stream. type Params struct { File string Maxsize int Backups int } // New initialize the config store. func New(path string, filename string, app string) (*Config, error) { v := viper.New() for key, value := range defaults { v.SetDefault(key, value) } v.SetConfigName(filename) // The file will be named [filename].json, [filename].yaml or [filename.toml] v.AddConfigPath(path) v.SetEnvPrefix("jocasta") v.AutomaticEnv() err := v.ReadInConfig() config := &Config{v: v, App: app} return config, err } func keyName(key, subkey string) (string, error) { switch key { case "out", "err": return fmt.Sprintf("%s_%s", key, subkey), nil default: return "", fmt.Errorf("don't know anything about %s: %w", key, ErrKey) } } // File return the filename for logs for given stream. func (c *Config) File(stream string) (string, error) { key, err := keyName(stream, "file") if err != nil { return "", err } t, err := template.New("filename").Parse(c.v.GetString(key)) if err != nil { return "", err } var tpl bytes.Buffer if err := t.Execute(&tpl, c); err != nil { return "", err } return tpl.String(), nil } // MaxSize return the max size of log file before rotation for given stream. func (c *Config) MaxSize(stream string) (uint, error) { key, err := keyName(stream, "maxsize") if err != nil { return 0, err } return c.v.GetSizeInBytes(key), nil } // Backups return the number of historical files for logs for given stream. func (c *Config) Backups(stream string) (int, error) { key, err := keyName(stream, "backups") if err != nil { return 0, err } return c.v.GetInt(key), nil } // GetParams return the whole logs info for given stream in Params type. func (c *Config) GetParams(stream string) (*Params, error) { maxsize, err := c.MaxSize(stream) if err != nil { return nil, err } // The errors are already trapped at c.MaxSize backups, _ := c.Backups(stream) file, err := c.File(stream) if err != nil { return nil, err } p := &Params{ Maxsize: int(maxsize), Backups: backups, File: file, } return p, nil }
[ 3, 6 ]
package main /******************** Testing Objective consensu:STATE TRANSFER ******** * Setup: 4 node local docker peer network with security * 0. Deploy chaincodeexample02 with 100000, 90000 as initial args * 1. Send Invoke Requests on multiple peers using go routines. * 2. Verify query results match on PEER0 and PEER1 after invoke *********************************************************************/ import ( "fmt" //"strconv" "time" "obcsdk/chaincode" "obcsdk/peernetwork" "sync" ) func main() { fmt.Println("Creating a local docker network") peernetwork.SetupLocalNetwork(4, true) _ = chaincode.InitNetwork() chaincode.InitChainCodes() chaincode.RegisterUsers() time.Sleep(30000 * time.Millisecond) fmt.Println("\nPOST/Chaincode: Deploying chaincode at the beginning ....") dAPIArgs0 := []string{"example02", "init"} depArgs0 := []string{"a", "100000", "b", "90000"} chaincode.Deploy(dAPIArgs0, depArgs0) //var resa, resb string var inita, initb, curra, currb int inita = 100000 initb = 90000 curra = inita currb = initb time.Sleep(60000 * time.Millisecond) fmt.Println("\nPOST/Chaincode: Querying a and b after deploy >>>>>>>>>>> ") qAPIArgs0 := []string{"example02", "query"} qArgsa := []string{"a"} qArgsb := []string{"b"} A, _ := chaincode.Query(qAPIArgs0, qArgsa) B, _ := chaincode.Query(qAPIArgs0, qArgsb) myStr := fmt.Sprintf("\nA = %s B= %s", A, B) fmt.Println(myStr) numReq := 250 InvokeLoop(numReq) time.Sleep(120000 * time.Millisecond) curra = curra - 20 currb = currb + 20 fmt.Println("\nPOST/Chaincode: Querying a and b after invoke >>>>>>>>>>> ") qAPIArgs00 := []string{"example02", "query", "PEER0"} qAPIArgs01 := []string{"example02", "query", "PEER1"} qAPIArgs02 := []string{"example02", "query", "PEER2"} qAPIArgs03 := []string{"example02", "query", "PEER3"} res0A, _ := chaincode.QueryOnHost(qAPIArgs00, qArgsa) res0B, _ := chaincode.QueryOnHost(qAPIArgs00, qArgsb) res1A, _ := chaincode.QueryOnHost(qAPIArgs01, qArgsa) res1B, _ := chaincode.QueryOnHost(qAPIArgs01, qArgsb) res2A, _ := chaincode.QueryOnHost(qAPIArgs02, qArgsa) res2B, _ := chaincode.QueryOnHost(qAPIArgs02, qArgsb) res3A, _ := chaincode.QueryOnHost(qAPIArgs03, qArgsa) res3B, _ := chaincode.QueryOnHost(qAPIArgs03, qArgsb) fmt.Println("Results in a and b PEER0 : ", res0A, res0B) fmt.Println("Results in a and b PEER1 : ", res1A, res1B) fmt.Println("Results in a and b PEER2 : ", res2A, res2B) fmt.Println("Results in a and b PEER3 : ", res3A, res3B) ht0, _ := chaincode.GetChainHeight("PEER0") ht1, _ := chaincode.GetChainHeight("PEER1") ht2, _ := chaincode.GetChainHeight("PEER2") ht3, _ := chaincode.GetChainHeight("PEER3") fmt.Printf("ht0: %d, ht1: %d, ht2: %d, ht3: %d ", ht0, ht1, ht2, ht3) } func InvokeLoop(numReq int) { var wg sync.WaitGroup invArgs0 := []string{"a", "b", "1"} iAPIArgsCurrPeer1 := []string{"example02", "invoke", "PEER1"} wg.Add(2) go func() { defer wg.Done() k := 1 for k <= numReq { go chaincode.InvokeOnPeer(iAPIArgsCurrPeer1, invArgs0) k++ } fmt.Println("# of Req Invoked on PEER1 ", k) }() go func() { defer wg.Done() iAPIArgsCurrPeer3 := []string{"example02", "invoke", "PEER3"} k := 1 for k <= numReq { go chaincode.InvokeOnPeer(iAPIArgsCurrPeer3, invArgs0) k++ } fmt.Println("# of Req Invoked on PEER3", k) }() wg.Wait() }
[ 0, 3 ]
package toml import ( "time" "strconv" "runtime" "strings" "fmt" ) type Tree struct { Root *ListNode // top-level root of the tree. text string lex *lexer token [3]token // three-token lookahead for parser. peekCount int } func Parse(text string) (tree *Tree, err error) { defer parseRecover(&err) t := &Tree{} t.text = text t.lex = lex(text) t.parse() return t, nil } // recover is the handler that turns panics into returns from the top level of Parse. func parseRecover(errp *error) { e := recover() if e != nil { if _, ok := e.(runtime.Error); ok { panic(e) } *errp = e.(error) } return } // next returns the next tok. func (t *Tree) next() token { if t.peekCount > 0 { t.peekCount-- } else { t.token[0] = t.lex.nextToken() } return t.token[t.peekCount] } // backup backs the input stream up one tok. func (t *Tree) backup() { t.peekCount++ } // backup2 backs the input stream up two tokens. // The zeroth token is already there. func (t *Tree) backup2(t1 token) { t.token[1] = t1 t.peekCount = 2 } // backup3 backs the input stream up three tokens // The zeroth token is already there. func (t *Tree) backup3(t2, t1 token) { // Reverse order: we're pushing back. t.token[1] = t1 t.token[2] = t2 t.peekCount = 3 } // peek returns but does not consume the next tok. func (t *Tree) peek() token { if t.peekCount > 0 { return t.token[t.peekCount-1] } t.peekCount = 1 t.token[0] = t.lex.nextToken() return t.token[0] } // nextNonSpace returns the next non-space tok. func (t *Tree) nextNonSpace() (tok token) { for { tok = t.next() if tok.typ != tokenSpace { break } } //pd("next %d %s", tok.typ, tok.val) return tok } // peekNonSpace returns but does not consume the next non-space tok. func (t *Tree) peekNonSpace() (tok token) { for { tok = t.next() if tok.typ != tokenSpace { break } } t.backup() return tok } // Parsing. // ErrorContext returns a textual representation of the location of the node in the input text. func (t *Tree) ErrorContext(n Node) (location, context string) { pos := int(n.Position()) text := t.text[:pos] byteNum := strings.LastIndex(text, "\n") if byteNum == -1 { byteNum = pos // On first line. } else { byteNum++ // After the newline. byteNum = pos - byteNum } lineNum := 1 + strings.Count(text, "\n") // TODO //context = n.String() context = "TODO" if len(context) > 20 { context = fmt.Sprintf("%.20s...", context) } return fmt.Sprintf("%d:%d", lineNum, byteNum), context } // errorf formats the error and terminates processing. func (t *Tree) errorf(format string, args ...interface{}) { t.Root = nil format = fmt.Sprintf("%d: syntax error: %s", t.lex.lineNumber(), format) panic(fmt.Errorf(format, args...)) } // error terminates processing. func (t *Tree) error(err error) { t.errorf("%s", err) } // expect consumes the next token and guarantees it has the required type. func (t *Tree) expect(expected tokenType, context string) token { tok := t.nextNonSpace() if tok.typ != expected { t.unexpected(tok, context) } return tok } // expectOneOf consumes the next token and guarantees it has one of the required types. func (t *Tree) expectOneOf(expected1, expected2 tokenType, context string) token { tok := t.nextNonSpace() if tok.typ != expected1 && tok.typ != expected2 { t.unexpected(tok, context) } return tok } // unexpected complains about the token and terminates processing. func (t *Tree) unexpected(tok token, context string) { t.errorf("unexpected %s in %s", tok, context) } func (t *Tree) parse() Node { t.Root = newList(t.peek().pos) for t.peek().typ != tokenEOF { n := t.top() t.Root.append(n) } return nil } // key = value // [keygroup] func (t *Tree) top() Node { switch tok := t.peekNonSpace(); tok.typ { case tokenError: t.nextNonSpace() t.errorf("%s", tok.val) case tokenKeyGroup: return t.entryGroup() case tokenKey: return t.entry() default: t.errorf("unexpected %q", tok.val) return nil } return nil } // [keygroup] // ... func (t *Tree) entryGroup() Node { token := t.nextNonSpace() keyGroup := parseKeyGroup(token) entries := newList(t.peek().pos) Loop: for { switch tok := t.peekNonSpace(); tok.typ { case tokenKey: entries.append(t.entry()) default: break Loop } } return newEntryGroup(token.pos, keyGroup, entries) } // "[foo.bar]" func parseKeyGroup(tok token) *KeyGroupNode { text := tok.val name := text[1:len(text)-1] keys := newList(tok.pos+Pos(1)) for _, v := range strings.Split(name, ".") { keys.append(newKey(tok.pos+Pos(len(v)), v)) } return newKeyGroup(tok.pos, keys, text) } // key = value func (t *Tree) entry() Node { tok := t.nextNonSpace() key := newKey(tok.pos, tok.val) //pd("entry %s", tok.val) t.expect(tokenKeySep, "key seperator") return newEntry(tok.pos, key, t.value()) } // value: string, array, ... func (t *Tree) value() Node { switch tok := t.nextNonSpace(); tok.typ { case tokenBool: return newBool(tok.pos, tok.val == "true") case tokenNumber: v, err := newNumber(tok.pos, tok.val) if err != nil { t.error(err) } return v case tokenString: //pd("str %d %s", tok.typ, tok.val) v, err := strconv.Unquote(tok.val) if err != nil { t.error(err) } return newString(tok.pos, v, tok.val) case tokenDatetime: v, err := time.Parse(time.RFC3339, tok.val) if err != nil { t.error(err) } return newDatetime(tok.pos, v) case tokenArrayStart: return t.array() default: t.errorf("unexpected %q in value", tok.val) return nil } return nil } // [1, 2] func (t *Tree) array() Node { pos := t.peek().pos array := newList(pos) Loop: for { switch tok := t.peekNonSpace(); tok.typ { case tokenArrayEnd: t.nextNonSpace() break Loop default: //pd("array %s", tok.val) node := t.value() if t.peekNonSpace().typ != tokenArrayEnd { t.expect(tokenArraySep, "array") } array.append(node) } } return newArray(pos, array) }
[ 3 ]
package cmd import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" corev1 "k8s.io/api/core/v1" "github.com/snowdrop/k8s-supervisor/pkg/common/config" "github.com/snowdrop/k8s-supervisor/pkg/common/oc" ) var ( ports string ) var debugCmd = &cobra.Command{ Use: "debug [flags]", Short: "Debug your SpringBoot application", Long: `Debug your SpringBoot application.`, Example: ` sb debug -p 5005:5005`, Args: cobra.RangeArgs(0, 1), Run: func(cmd *cobra.Command, args []string) { log.Info("Debug command called") _, pod := SetupAndWaitForPod() podName := pod.Name // Append Debug Env Vars and update POD //log.Info("[Step 5] - Add new ENV vars for remote Debugging") //pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env,debugEnvVars()...) //clientset.CoreV1().Pods(application.Namespace).Update(pod) log.Info("Restart the Spring Boot application ...") oc.ExecCommand(oc.Command{Args: []string{"rsh", podName, config.SupervisordBin, config.SupervisordCtl, "stop", config.RunCmdName}}) oc.ExecCommand(oc.Command{Args: []string{"rsh", podName, config.SupervisordBin, config.SupervisordCtl, "start", config.RunCmdName}}) // Forward local to Remote port log.Info("Remote Debug the Spring Boot Application ...") oc.ExecCommand(oc.Command{Args: []string{"port-forward", podName, ports}}) }, } func init() { debugCmd.Flags().StringVarP(&ports, "ports", "p", "5005:5005", "Local and remote ports to be used to forward traffic between the dev pod and your machine.") //debugCmd.MarkFlagRequired("ports") debugCmd.Annotations = map[string]string{"command": "debug"} rootCmd.AddCommand(debugCmd) } func debugEnvVars() []corev1.EnvVar { return []corev1.EnvVar{ { Name: "JAVA_DEBUG", Value: "true", }, { Name: "JAVA_DEBUG_PORT", Value: "5005", }, } }
[ 3 ]
package articles import ( "github.com/PuerkitoBio/goquery" "github.com/yevchuk-kostiantyn/WebsiteAggregator/models" "log" "strings" ) func Search(config *models.Article) { response, err := goquery.NewDocument(config.URL) log.Println("New Search", config) if err != nil { panic("Bad URL!") } article := "" response.Find("p").Each(func(index int, item *goquery.Selection) { line := item.Text() article += line }) if IsInteresting(article, config.Interest) { key := config.Interest + "|" + config.URL log.Println("Interesting!") SaveToDB(key, article) } else { log.Println("Not interesting") } } func IsInteresting(article string, interest string) bool { if strings.Contains(article, interest) { return true } else { return false } }
[ 6 ]
package detector import ( "fmt" "reflect" "strings" "github.com/hashicorp/hcl/hcl/ast" "github.com/hashicorp/hcl/hcl/token" "github.com/wata727/tflint/config" "github.com/wata727/tflint/evaluator" "github.com/wata727/tflint/issue" "github.com/wata727/tflint/logger" ) type Detector struct { ListMap map[string]*ast.ObjectList Config *config.Config AwsClient *config.AwsClient EvalConfig *evaluator.Evaluator Logger *logger.Logger Error bool } var detectors = map[string]string{ "aws_instance_invalid_type": "DetectAwsInstanceInvalidType", "aws_instance_previous_type": "DetectAwsInstancePreviousType", "aws_instance_not_specified_iam_profile": "DetectAwsInstanceNotSpecifiedIamProfile", "aws_instance_default_standard_volume": "DetectAwsInstanceDefaultStandardVolume", "aws_db_instance_default_parameter_group": "DetectAwsDbInstanceDefaultParameterGroup", "aws_elasticache_cluster_default_parameter_group": "DetectAwsElasticacheClusterDefaultParameterGroup", "aws_instance_invalid_iam_profile": "DetectAwsInstanceInvalidIamProfile", } func NewDetector(listMap map[string]*ast.ObjectList, c *config.Config) (*Detector, error) { evalConfig, err := evaluator.NewEvaluator(listMap, c) if err != nil { return nil, err } return &Detector{ ListMap: listMap, Config: c, AwsClient: c.NewAwsClient(), EvalConfig: evalConfig, Logger: logger.Init(c.Debug), Error: false, }, nil } func hclLiteralToken(item *ast.ObjectItem, k string) (token.Token, error) { objItems, err := hclObjectItems(item, k) if err != nil { return token.Token{}, err } if v, ok := objItems[0].Val.(*ast.LiteralType); ok { return v.Token, nil } return token.Token{}, fmt.Errorf("ERROR: `%s` value is not literal", k) } func hclObjectItems(item *ast.ObjectItem, k string) ([]*ast.ObjectItem, error) { items := item.Val.(*ast.ObjectType).List.Filter(k).Items if len(items) == 0 { return []*ast.ObjectItem{}, fmt.Errorf("ERROR: key `%s` not found", k) } return items, nil } func IsKeyNotFound(item *ast.ObjectItem, k string) bool { items := item.Val.(*ast.ObjectType).List.Filter(k).Items return len(items) == 0 } func (d *Detector) Detect() []*issue.Issue { var issues = []*issue.Issue{} for ruleName, detectorMethod := range detectors { if d.Config.IgnoreRule[ruleName] { d.Logger.Info(fmt.Sprintf("ignore rule `%s`", ruleName)) continue } d.Logger.Info(fmt.Sprintf("detect by `%s`", ruleName)) method := reflect.ValueOf(d).MethodByName(detectorMethod) method.Call([]reflect.Value{reflect.ValueOf(&issues)}) for name, m := range d.EvalConfig.ModuleConfig { if d.Config.IgnoreModule[m.Source] { d.Logger.Info(fmt.Sprintf("ignore module `%s`", name)) continue } d.Logger.Info(fmt.Sprintf("detect module `%s`", name)) moduleDetector := &Detector{ ListMap: m.ListMap, Config: d.Config, EvalConfig: &evaluator.Evaluator{ Config: m.Config, }, Logger: d.Logger, } method := reflect.ValueOf(moduleDetector).MethodByName(detectorMethod) method.Call([]reflect.Value{reflect.ValueOf(&issues)}) } } return issues } func (d *Detector) evalToString(v string) (string, error) { ev, err := d.EvalConfig.Eval(strings.Trim(v, "\"")) if err != nil { return "", err } else if reflect.TypeOf(ev).Kind() != reflect.String { return "", fmt.Errorf("ERROR: `%s` is not string", v) } else if ev.(string) == "[NOT EVALUABLE]" { return "", fmt.Errorf("ERROR; `%s` is not evaluable", v) } return ev.(string), nil }
[ 5 ]
package chat //User reprenent of User model type User struct { Name string `json:"name"` Username string `json:"username"` } //ResponseJoin represent of joined message type ResponseJoin struct { Success bool `json:"success"` Message string `json:"message"` Username string `json:"username"` } //Message represent of message data type Message struct { From string `json:"from"` To string `json:"to"` Message string `json:"message"` }
[ 3 ]
package blockchain import ( "crypto/sha1" "fmt" ) //Block construct type Block struct { id int hash string previousBlockHash string Content []byte } //BlockChain type type BlockChain struct { currentID int blocks []*Block } func hash(ip []byte) string { sha1 := sha1.New() sha1.Write(ip) return fmt.Sprintf("%x", sha1.Sum(nil)) } //CalculateHash Calculate the hash of the block func (block *Block) calculateHash() { block.hash = hash([]byte(string(block.id) + string(block.previousBlockHash) + string(block.Content))) } //NewChain Create new chain func NewChain() *BlockChain { return &BlockChain{ currentID: 0, blocks: make([]*Block, 0), } } //NewBlock Create new block func (blockchain *BlockChain) NewBlock(content []byte) *Block { return &Block{ Content: content, } } //GetBlocks Get all blocks func (blockchain *BlockChain) GetBlocks() []*Block { return blockchain.blocks } //AddBlock Add the block to chain func (blockchain *BlockChain) AddBlock(block *Block) { var prevHash string if blockchain.currentID == 0 { prevHash = "" } else { prevHash = blockchain.blocks[blockchain.currentID-1].hash } blockchain.currentID = blockchain.currentID + 1 block.id = blockchain.currentID block.previousBlockHash = prevHash block.calculateHash() blockchain.blocks = append(blockchain.blocks, block) } //VerifyChain Verify integrity of blockchain func (blockchain *BlockChain) VerifyChain() bool { var originalHash string isValid := true for _, block := range blockchain.blocks { originalHash = block.hash block.calculateHash() isValid = isValid && originalHash == block.hash } return isValid }
[ 0, 3 ]
package dynamo import ( "context" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" apperror "github.com/jayvib/app/apperr" "github.com/jayvib/app/model" "github.com/jayvib/app/user" "github.com/sirupsen/logrus" "time" ) func New(db dynamodbiface.DynamoDBAPI) user.Repository { return &UserRepository{db: db} } type UserRepository struct { db dynamodbiface.DynamoDBAPI } func (u *UserRepository) GetByID(ctx context.Context, id string) (*model.User, error) { tablename := model.GetUserTableName() input := &dynamodb.GetItemInput{ TableName: aws.String(tablename), // TODO: Would it be nice if ma automate nako ang // pag convert to ma[string]*dynamodb.AttributeValue? Key: map[string]*dynamodb.AttributeValue{ "id": &dynamodb.AttributeValue{ S: aws.String(id), }, }, } res, err := u.db.GetItemWithContext(ctx, input) if err != nil { return nil, err } if res == nil { return nil, apperror.ItemNotFound } //logrus.Printf("%#v", res) resUser, err := unmarshalAttributeValueToUser(res.Item) if err != nil { return nil, err } return resUser, nil } func (u *UserRepository) GetByEmail(ctx context.Context, email string) (*model.User, error) { tableName := model.GetUserTableName() input := &dynamodb.ScanInput{ TableName: aws.String(tableName), FilterExpression: aws.String("email= :e"), ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ ":e": &dynamodb.AttributeValue{ S: aws.String(email), }, }, } output, err := u.db.ScanWithContext(ctx, input) if err != nil { return nil, err } if len(output.Items) == 0 { return nil, apperror.ItemNotFound } usr, err := unmarshalAttributeValueToUser(output.Items[0]) return usr, err } func (u *UserRepository) GetByUsername(ctx context.Context, username string) (*model.User, error) { tableName := model.GetUserTableName() input := &dynamodb.ScanInput{ TableName: aws.String(tableName), FilterExpression: aws.String("username= :un"), ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ ":un": &dynamodb.AttributeValue{ S: aws.String(username), }, }, } output, err := u.db.ScanWithContext(ctx, input) if err != nil { return nil, err } if len(output.Items) == 0 { return nil, apperror.ItemNotFound } return unmarshalAttributeValueToUser(output.Items[0]) } func (u *UserRepository) Update(ctx context.Context, user *model.User) error { if user.ID == "" { return apperror.EmptyItemID } tablename := user.TableName() input := &dynamodb.UpdateItemInput{ TableName: aws.String(tablename), Key: map[string]*dynamodb.AttributeValue{ "id": { S: aws.String(user.ID), }, }, ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ ":fn": { S: aws.String(user.Firstname), }, ":ln": { S: aws.String(user.Lastname), }, ":e": { S: aws.String(user.Email), }, ":p": { S: aws.String(user.Password), }, ":ua": { S: aws.String(user.UpdatedAt.Format(time.RFC3339)), }, }, UpdateExpression: aws.String("SET firstname = :fn, lastname = :ln, email = :e, password = :p, updated_at = :ua"), } _, err := u.db.UpdateItemWithContext(ctx, input) if err != nil { return err } return nil } func (u *UserRepository) Store(ctx context.Context, user *model.User) error { if user.ID == "" { return apperror.EmptyItemID } av, err := dynamodbattribute.MarshalMap(user) if err != nil { return err } logrus.WithFields(logrus.Fields{ "av": av, }).Debug() tablename := user.TableName() input := &dynamodb.PutItemInput{ TableName: aws.String(tablename), Item: av, } if ctx == nil { ctx = context.Background() } _, err = u.db.PutItemWithContext(ctx, input) if err != nil { return err } return nil } func (u *UserRepository) Delete(ctx context.Context, id string) error { if ctx == nil { ctx = context.Background() } input := &dynamodb.DeleteItemInput{ Key: map[string]*dynamodb.AttributeValue{ "id": { S: aws.String(id), }, }, } _, err := u.db.DeleteItemWithContext(ctx, input) if err != nil { return err } return nil }
[ 3 ]
package main import "encoding/json" import "fmt" import "os" // 下面我们将使用这两个结构体来演示自定义类型的编码和解码。 type Response1 struct { Page int Fruits []string } type Response2 struct { Page int `json:"page"` Fruits []string `json:"fruits"` } func main() { b := []byte(`{ "Title": "Go语言编程", "Authors": ["XuShiwei", "HughLv", "Pandaman", "GuaguaSong", "HanTuo", "BertYuan", "XuDaoli"], "Publisher": "ituring.com.cn", "IsPublished": true, "Price": 9.99, "Sales": 1000000 }`) var r interface{} json.Unmarshal(b, &r) fmt.Println(r) // map[Authors:[XuShiwei HughLv Pandaman GuaguaSong HanTuo BertYuan XuDaoli] IsPublished:true Price:9.99 Publisher:ituring.com.cn Sales:1e+06 Title:Go语言编程] // 首先我们来看一下基本数据类型到 JSON 字符串的编码过程。 // 这里是一些原子值的例子。 bolB, _ := json.Marshal(true) fmt.Println(string(bolB)) //true fmt.Printf("%T,%v", bolB, bolB) //[]uint8,[116 114 117 101] intB, _ := json.Marshal(1) fmt.Printf("%T,%v", intB, intB) //[]uint8,[49] fmt.Println(string(intB)) //1 fltB, _ := json.Marshal(2.34) fmt.Println(string(fltB)) //2.34 strB, _ := json.Marshal("gopher") fmt.Println(string(strB)) //"gopher" // 这里是一些切片和 map 编码成 JSON 数组和对象的例子。 slcD := []string{"apple", "peach", "pear"} slcB, _ := json.Marshal(slcD) fmt.Println(string(slcB)) //["apple","peach","pear"] fmt.Printf("%T,%v", slcB, slcB) //[]uint8,[91 34 97 112 112 108 101 34 44 34 112 101 97 99 104 34 44 34 112 101 97 114 34 93] mapD := map[string]int{"apple": 5, "lettuce": 7} mapB, _ := json.Marshal(mapD) fmt.Println(string(mapB)) //{"apple":5,"lettuce":7} fmt.Printf("%T,%v", mapB, mapB) //[]uint8,[123 34 80 97 103 101 34 58 49 44 34 70 114 117 105 116 115 34 58 91 34 97 112 112 108 101 34 44 34 112 101 97 99 104 34 44 34 112 101 97 114 34 93 125] // JSON 包可以自动的编码你的自定义类型。 // 编码仅输出可导出的字段,并且默认使用他们的名字作为 JSON 数据的键。 res1D := &Response1{ Page: 1, Fruits: []string{"apple", "peach", "pear"}} res1B, _ := json.Marshal(res1D) fmt.Println(string(res1B)) //{"Page":1,"Fruits":["apple","peach","pear"]} fmt.Printf("%T,%v", res1B, res1B) //[]uint8,[123 34 80 97 103 101 34 58 49 44 34 70 114 117 105 116 115 34 58 91 34 97 112 112 108 101 34 44 34 112 101 97 99 104 34 44 34 112 101 97 114 34 93 125] // 你可以给结构字段声明标签来自定义编码的 JSON 数据键名称。 // 在上面 Response2 的定义可以作为这个标签这个的一个例子。 res2D := Response2{ Page: 1, Fruits: []string{"apple", "peach", "pear"}} res2B, _ := json.Marshal(res2D) fmt.Println(string(res2B)) //{"page":1,"fruits":["apple","peach","pear"]} // 现在来看看解码 JSON 数据为 Go 值的过程。 // 这里是一个普通数据结构的解码例子。 byt := []byte(`{"num":6.13,"strs":["a","b"]}`) // 我们需要提供一个 JSON 包可以存放解码数据的变量。 // 这里的 map[string]interface{} 将保存一个 string 为键,值为任意值的map。 var dat map[string]interface{} // 这里就是实际的解码和相关的错误检查。 if err := json.Unmarshal(byt, &dat); err != nil { panic(err) } fmt.Println(dat) // 为了使用解码 map 中的值,我们需要将他们进行适当的类型转换。 // 例如这里我们将 num 的值转换成 float64类型。 fmt.Printf("%T,%v", dat["num"], dat["num"]) //float64,6.13 num := dat["num"].(float64) fmt.Printf("%T,%v", num, num) //float64,6.13 // fmt.Println(num) // 访问嵌套的值需要一系列的转化。 fmt.Printf("%T,%v", dat["strs"], dat["strs"]) //[]interface {},[a b] strs := dat["strs"].([]interface{}) fmt.Printf("%T,%v", strs, strs) //[]interface {},[a b] fmt.Printf("%T,%v", strs[0], strs[0]) //string,a // fmt.Println(dat["strs"][0]) //dat["strs"][0] (type interface {} does not support indexing) str1 := strs[0].(string) fmt.Println(str1) //a os.Exit(2) // 我们也可以解码 JSON 值到自定义类型。 // 这个功能的好处就是可以为我们的程序带来额外的类型安全加强,并且消除在访问数据时的类型断言。 str := `{"page": 1, "fruits": ["apple", "peach"]}` res := &Response2{} json.Unmarshal([]byte(str), &res) fmt.Println(res) //&{1 [apple peach]} fmt.Println(res.Fruits[0]) //apple // 在上面的例子中,我们经常使用 byte 和 string 作为使用标准输出时数据和 JSON 表示之间的中间值。 // 我们也可以和os.Stdout 一样,直接将 JSON 编码直接输出至 os.Writer流中,或者作为 HTTP 响应体。 enc := json.NewEncoder(os.Stdout) d := map[string]int{"apple": 5, "lettuce": 7} enc.Encode(d) dec := json.NewDecoder(os.Stdin) enc := json.NewEncoder(os.Stdout) for { var v map[string]interface{} if err := dec.Decode(&v); err != nil { log.Println(err) return } for k, v1 := range v { if k != "Title" { v[k] = v1 } } if err := enc.Encode(&v); err != nil { log.Println(err) } } }
[ 3 ]
package web import ( "crypto/tls" "fmt" "log" "net" "net/mail" "net/smtp" ) // SSL/TLS Email Example, doesn't work .... func SendEmail() { from := mail.Address{"John Lau", "[email protected]"} to := mail.Address{"John Lau", "[email protected]"} subj := "This is the email subject" body := "This is an example body.\n With two lines." // Setup headers headers := make(map[string]string) headers["From"] = from.String() headers["To"] = to.String() headers["Subject"] = subj // Setup message message := "" // for k, v := range headers { // message += fmt.Sprintf("%s: %s\r\n", k, v) // } message += "\r\n" + body // Connect to the SMTP Server servername := "smtp.163.com:25" host, _, _ := net.SplitHostPort(servername) auth := smtp.PlainAuth("", "[email protected]", "passwd", host) // TLS config tlsconfig := &tls.Config{ InsecureSkipVerify: true, ServerName: host, } // Here is the key, you need to call tls.Dial instead of smtp.Dial // for smtp servers running on 465 that require an ssl connection // from the very beginning (no starttls) conn, err := tls.Dial("tcp", servername, tlsconfig) if err != nil { log.Panic(err) } c, err := smtp.NewClient(conn, host) if err != nil { log.Panic(err) } // Auth if err = c.Auth(auth); err != nil { log.Panic(err) } // To && From if err = c.Mail(from.Address); err != nil { log.Panic(err) } if err = c.Rcpt(to.Address); err != nil { log.Panic(err) } // Data w, err := c.Data() if err != nil { log.Panic(err) } _, err = w.Write([]byte(message)) if err != nil { log.Panic(err) } err = w.Close() if err != nil { log.Panic(err) } c.Quit() fmt.Printf("send [%s]email successfully!", message) } //It works. func SendShortEmail(to string, message string, subject string) { //TODO ....(password), Set up authentication information. auth := smtp.PlainAuth("", "[email protected]", "password...TODO", "smtp.163.com") // Connect to the server, authenticate, set the sender and recipient, // and send the email all in one step. _to := []string{to} msg := []byte("To: " + to + "\r\n" + "Subject: " + subject + "\r\n" + "\r\n" + "" + message + "\r\n") err := smtp.SendMail("smtp.163.com:25", auth, "[email protected]", _to, msg) if err != nil { log.Fatal(err) } fmt.Println("----------------------------------------------\n send short email successfully!") }
[ 3, 6 ]
package main import "fmt" func main() { x := 0 //narrow scope instead of using a global scope variable increment := func() int{ // This line is a nested function in go as well as an anonymous function x++ // an anon function is a function without a name, it looks like a function that is return x // assigned to a variable *** a func expression *** } fmt.Println(increment()) fmt.Println(increment()) fmt.Println(increment()) }
[ 3 ]
package pipedrive import ( "context" "fmt" "net/http" ) // DealService handles deals related // methods of the Pipedrive API. // // Pipedrive API dcos: https://developers.pipedrive.com/docs/api/v1/#!/Deals type DealService service // Deal represents a Pipedrive deal. type Deal struct { ID int `json:"id"` CreatorUserID struct { ID int `json:"id"` Name string `json:"name"` Email string `json:"email"` HasPic bool `json:"has_pic"` PicHash string `json:"pic_hash"` ActiveFlag bool `json:"active_flag"` Value int `json:"value"` } `json:"creator_user_id"` UserID struct { ID int `json:"id"` Name string `json:"name"` Email string `json:"email"` HasPic bool `json:"has_pic"` PicHash string `json:"pic_hash"` ActiveFlag bool `json:"active_flag"` Value int `json:"value"` } `json:"user_id"` PersonID struct { Name string `json:"name"` Email []struct { Value string `json:"value"` Primary bool `json:"primary"` } `json:"email"` Phone []struct { Value string `json:"value"` Primary bool `json:"primary"` } `json:"phone"` Value int `json:"value"` } `json:"person_id"` OrgID struct { Name string `json:"name"` PeopleCount int `json:"people_count"` OwnerID int `json:"owner_id"` Address interface{} `json:"address"` CcEmail string `json:"cc_email"` Value int `json:"value"` } `json:"org_id"` StageID int `json:"stage_id"` Title string `json:"title"` Value int `json:"value"` Currency string `json:"currency"` AddTime string `json:"add_time"` UpdateTime string `json:"update_time"` StageChangeTime string `json:"stage_change_time"` Active bool `json:"active"` Deleted bool `json:"deleted"` Status string `json:"status"` Probability interface{} `json:"probability"` NextActivityDate interface{} `json:"next_activity_date"` NextActivityTime interface{} `json:"next_activity_time"` NextActivityID interface{} `json:"next_activity_id"` LastActivityID int `json:"last_activity_id"` LastActivityDate string `json:"last_activity_date"` LostReason string `json:"lost_reason"` VisibleTo string `json:"visible_to"` CloseTime string `json:"close_time"` PipelineID int `json:"pipeline_id"` WonTime interface{} `json:"won_time"` FirstWonTime interface{} `json:"first_won_time"` LostTime string `json:"lost_time"` ProductsCount int `json:"products_count"` FilesCount int `json:"files_count"` NotesCount int `json:"notes_count"` FollowersCount int `json:"followers_count"` EmailMessagesCount int `json:"email_messages_count"` ActivitiesCount int `json:"activities_count"` DoneActivitiesCount int `json:"done_activities_count"` UndoneActivitiesCount int `json:"undone_activities_count"` ReferenceActivitiesCount int `json:"reference_activities_count"` ParticipantsCount int `json:"participants_count"` ExpectedCloseDate interface{} `json:"expected_close_date"` LastIncomingMailTime interface{} `json:"last_incoming_mail_time"` LastOutgoingMailTime interface{} `json:"last_outgoing_mail_time"` Eight02Aa45Ecc05F31Fcebe8B706510389F56B7A041 interface{} `json:"802aa45ecc05f31fcebe8b706510389f56b7a041"` StageOrderNr int `json:"stage_order_nr"` PersonName string `json:"person_name"` OrgName string `json:"org_name"` NextActivitySubject interface{} `json:"next_activity_subject"` NextActivityType interface{} `json:"next_activity_type"` NextActivityDuration interface{} `json:"next_activity_duration"` NextActivityNote interface{} `json:"next_activity_note"` FormattedValue string `json:"formatted_value"` RottenTime interface{} `json:"rotten_time"` WeightedValue int `json:"weighted_value"` FormattedWeightedValue string `json:"formatted_weighted_value"` OwnerName string `json:"owner_name"` CcEmail string `json:"cc_email"` OrgHidden bool `json:"org_hidden"` PersonHidden bool `json:"person_hidden"` } func (d Deal) String() string { return Stringify(d) } // DealsResponse represents multiple deals response. type DealsResponse struct { Success bool `json:"success,omitempty"` Data []Deal `json:"data,omitempty"` AdditionalData AdditionalData `json:"additional_data,omitempty"` } // DealResponse represents single deal response. type DealResponse struct { Success bool `json:"success,omitempty"` Data Deal `json:"data,omitempty"` AdditionalData AdditionalData `json:"additional_data,omitempty"` } // ListUpdates about a deal. // // Pipedrive API docs: https://developers.pipedrive.com/docs/api/v1/#!/Deals/get_deals_id_flow func (s *DealService) ListUpdates(ctx context.Context, id int) (*DealsResponse, *Response, error) { uri := fmt.Sprintf("/deals/%v/flow", id) req, err := s.client.NewRequest(http.MethodGet, uri, nil, nil) if err != nil { return nil, nil, err } var record *DealsResponse resp, err := s.client.Do(ctx, req, &record) if err != nil { return nil, resp, err } return record, resp, nil } // Find deals by name. // // Pipedrive API docs: https://developers.pipedrive.com/docs/api/v1/#!/Deals/get_deals_find func (s *DealService) Find(ctx context.Context, term string) (*DealsResponse, *Response, error) { req, err := s.client.NewRequest(http.MethodGet, "/deals/find", &SearchOptions{ Term: term, }, nil) if err != nil { return nil, nil, err } var record *DealsResponse resp, err := s.client.Do(ctx, req, &record) if err != nil { return nil, resp, err } return record, resp, nil } // List all deals. // // Pipedrive API docs: https://developers.pipedrive.com/docs/api/v1/#!/Deals/get_deals func (s *DealService) List(ctx context.Context) (*DealsResponse, *Response, error) { req, err := s.client.NewRequest(http.MethodGet, "/deals", nil, nil) if err != nil { return nil, nil, err } var record *DealsResponse resp, err := s.client.Do(ctx, req, &record) if err != nil { return nil, resp, err } return record, resp, nil } // Duplicate a deal. // // Pipedrive API docs: https://developers.pipedrive.com/docs/api/v1/#!/Deals/post_deals_id_duplicate func (s *DealService) Duplicate(ctx context.Context, id int) (*DealResponse, *Response, error) { uri := fmt.Sprintf("/deals/%v/duplicate", id) req, err := s.client.NewRequest(http.MethodPost, uri, nil, nil) if err != nil { return nil, nil, err } var record *DealResponse resp, err := s.client.Do(ctx, req, &record) if err != nil { return nil, resp, err } return record, resp, nil } // DealsMergeOptions specifices the optional parameters to the // DealService.Merge method. type DealsMergeOptions struct { MergeWithID uint `url:"merge_with_id,omitempty"` } // Merge two deals. // // Pipedrive API docs: https://developers.pipedrive.com/docs/api/v1/#!/Deals/put_deals_id_merge func (s *DealService) Merge(ctx context.Context, id int, opt *DealsMergeOptions) (*Response, error) { uri := fmt.Sprintf("/deals/%v/merge", id) req, err := s.client.NewRequest(http.MethodPut, uri, nil, opt) if err != nil { return nil, err } return s.client.Do(ctx, req, nil) } // DealsUpdateOptions specifices the optional parameters to the // DealService.Update method. type DealsUpdateOptions struct { Title string `url:"title,omitempty"` Value string `url:"value,omitempty"` Currency string `url:"currency,omitempty"` UserID uint `url:"user_id,omitempty"` PersonID uint `url:"person_id,omitempty"` OrganizationID uint `url:"org_id,omitempty"` StageID uint `url:"stage_id,omitempty"` Status string `url:"status,omitempty"` LostReason string `url:"lost_reason,omitempty"` VisibleTo uint `url:"visible_to,omitempty"` } // Update a deal. // Pipedrive API docs: https://developers.pipedrive.com/docs/api/v1/#!/Deals/put_deals_id func (s *DealService) Update(ctx context.Context, id int, opt *DealsUpdateOptions) (*Response, error) { uri := fmt.Sprintf("/deals/%v", id) req, err := s.client.NewRequest(http.MethodPut, uri, nil, opt) if err != nil { return nil, err } return s.client.Do(ctx, req, nil) } // DeleteFollower of a deal. // // Pipedrive API docs: https://developers.pipedrive.com/docs/api/v1/#!/Deals/delete_deals_id_followers_follower_id func (s *DealService) DeleteFollower(ctx context.Context, id int, followerID int) (*Response, error) { uri := fmt.Sprintf("/deals/%v/followers/%v", id, followerID) req, err := s.client.NewRequest(http.MethodDelete, uri, nil, nil) if err != nil { return nil, err } return s.client.Do(ctx, req, nil) } // DeleteMultiple deletes deals in bulk. // // Pipedrive API docs: https://developers.pipedrive.com/docs/api/v1/#!/Deals/delete_deals func (s *DealService) DeleteMultiple(ctx context.Context, ids []int) (*Response, error) { req, err := s.client.NewRequest(http.MethodDelete, "/deals", &DeleteMultipleOptions{ Ids: arrayToString(ids, ","), }, nil) if err != nil { return nil, err } return s.client.Do(ctx, req, nil) } // DeleteParticipant deletes participant in a deal. // // Pipedrive API docs: https://developers.pipedrive.com/docs/api/v1/#!/Deals/delete_deals_id_participants_deal_participant_id func (s *DealService) DeleteParticipant(ctx context.Context, dealID int, participantID int) (*Response, error) { uri := fmt.Sprintf("/deals/%v/participants/%v", dealID, participantID) req, err := s.client.NewRequest(http.MethodDelete, uri, nil, nil) if err != nil { return nil, err } return s.client.Do(ctx, req, nil) } // Delete a deal. // // Pipedrive API docs: https://developers.pipedrive.com/docs/api/v1/#!/Deals/delete_deals_id func (s *DealService) Delete(ctx context.Context, id int) (*Response, error) { uri := fmt.Sprintf("/deals/%v", id) req, err := s.client.NewRequest(http.MethodDelete, uri, nil, nil) if err != nil { return nil, err } return s.client.Do(ctx, req, nil) } // DeleteAttachedProduct deletes attached product. // // Pipedrive API docs: https://developers.pipedrive.com/docs/api/v1/#!/Deals/delete_deals_id_products_product_attachment_id func (s *DealService) DeleteAttachedProduct(ctx context.Context, dealID int, productAttachmentID int) (*Response, error) { uri := fmt.Sprintf("/deals/%v/products/%v", dealID, productAttachmentID) req, err := s.client.NewRequest(http.MethodDelete, uri, nil, nil) if err != nil { return nil, err } return s.client.Do(ctx, req, nil) }
[ 6 ]
/* Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package test //image_constants.go defines constants that are shared between test-images and conformance tests //EnvImageServerPort is the port on which the environment test-image server starts. // TODO: Modify this port number after https://github.com/knative/serving/issues/2258 is fixed for a stricter verification. const EnvImageServerPort = 8080 //EnvImageEnvVarsPath path exposed by environment test-image to fetch environment variables. const EnvImageEnvVarsPath = "/envvars" //EnvImageFilePathInfoPath path exposed by environment test-image to fetch information for filepaths const EnvImageFilePathInfoPath = "/filepath" //EnvImageFilePathQueryParam query param to be used with EnvImageFilePathInfoPath to specify filepath const EnvImageFilePathQueryParam = "path"
[ 3 ]
package styx import ( "encoding/binary" ) // i, j, k, l... are int indices // p, q, r... are string variable labels // u, v, w... are *Variable pointers // x, y... are dependency slice indices, where e.g. iter.In[p][x] == i func (iter *Iterator) next(i int) (tail int, err error) { var ok bool tail = iter.Len() // Okay so we start at the index given to us for i >= 0 { u := iter.variables[i] // Try naively getting another value from u u.value = u.Next() if u.value == NIL { // It didn't work :-/ // This means we reset u, decrement i, and continue u.value = u.Seek(u.root) err = iter.push(u, i, iter.Len()) if err != nil { return } i-- continue } // It worked! We have a new value for u. // New we need to propagate it to the rest of the // variables in iter.domain[i+1:], if they exist, // and make sure they're satisfied. // To do that, first push u's value to the rest of the domain err = iter.push(u, i, iter.Len()) if err != nil { return } // Now set the `cursor` variable to our current index. // If we fail to satisfy a variable in iter.out[i] while // propagating here, then we'll set cursor to that failure index. cursor := i // Don't recurse on i! iter.blacklist[i] = true for _, j := range iter.out[i] { v := iter.variables[j] // We have to give iter.tick(j, ...) a fresh cache here. // TODO: there some memory saving stuff to be done about caches :-/ d := make([]*vcache, j) // Okay - what we want is a new value for v. Since we pushed a new u.value // into v, we have to "start all over" with v.Seek(v.root). // This might mean that v.Seek(v.root) gives us a non-nil value - that's great! // Then we don't even enter the loop. But if v.Seek(v.root) _doesn't_ give us a // value, then we have to use iter.tick() on j to tick j's dependencies into their // next valid state (passing the fresh cache in). That will either irrecovably // fail or give us a new state to try v.Seek(v.root) again on. for v.value = v.Seek(v.root); v.value == NIL; v.value = v.Seek(v.root) { ok, err = iter.tick(j, i, d) if err != nil { return } else if !ok { break } } // Cool! Either v.value == nil and there are no more solutions to the query... if v.value == NIL { cursor = j break } // ... or v.value != nil and we can push it to the rest of the domain! err = iter.push(v, j, iter.Len()) if err != nil { return } // One really trick bit is that when we used iter.tick(j, i, d) to satisfy v, // it might have changed some other previous values in the domain. // Specifically, if might have changed: // - variables between i and j that are in out[i] // - variables between i and j that are NOT in out[i] (!!) // It will NOT change any variables before i (or i itself) // So we can't use out[i] here - we have to use the cache d that we passed // into iter.tick(j, i, d) to tell which variables were changed. for x, saved := range d[i+1:] { if saved != nil { k := i + 1 + x // If iter.cache[k] hasn't been saved yet, save it here. if iter.cache[k] == nil { iter.cache[k] = saved } } } } iter.blacklist[i] = false // Cool - either we completed the loop over iter.out[i] naturally, // or we broke out early and set cursor = j. Check for that here: if cursor == i { // Success!! We brought all of the variables before and after i into // a valid state. Clear the cache and return. clear(iter.cache) tail = i return } // Oh well - we broke out early at cursor = j, because the v.Seek / iter.tick(j...) // loop didn't give us a result. Now we restore the values that changed... err = iter.restore(iter.cache[:cursor+1], iter.Len()) if err != nil { return } // ... and clear the cache, but don't decrement i. // We want to try the same variable i over and over until it gives up! // (I'm not actually sure if we need to clear the cache here...) clear(iter.cache) } return } func clear(delta []*vcache) { for i, saved := range delta { if saved != nil { delta[i] = nil } } } // tick advances the given index's dependencies into their next valid state, giving // the variable at the index (at least) one new value at its incoming constraints. // tick makes two promises. The first is that it will leave your blacklist in the same // state that it found it. The second is that either a) it will return ok = false, the // variables will be in their initial states, and delta is in its initial state; or b) // it will return ok = true, the variables rest in a new consensus state, every changed // variable's initial state is added to delta if it doesn't already exist, and no // non-nil element of delta is overwritten. func (iter *Iterator) tick(i, min int, delta []*vcache) (ok bool, err error) { next := make([]*vcache, i) // The biggest outer loop is walking backwards over iter.In[i] x := len(iter.in[i]) for x > 0 { j := iter.in[i][x-1] if j <= min { return false, iter.restore(next, i) } else if iter.blacklist[j] { x-- continue } v := iter.variables[j] self := v.save() if v.value = v.Next(); v.value == NIL { // That sucks. Now we need to restore the value // that was changed and decrement x. v.value = v.Seek(self.ID) x-- } else { // We got a non-nil value for v, so now we // propagate between j and i, then crawl forward // over the indices in iter.Out[q] that are less than i // and seek to their new values. // Propagate up to but not including i if err = iter.push(v, j, i); err != nil { return } // Fantastic. Now that we've propagated the value we found for v, // we start "the crawl" from j to i, seeking to the new satisfying root // and recursing on tick when necessary. cursor := j iter.blacklist[j] = true for _, k := range iter.out[j] { if k >= i { break } w := iter.variables[k] if next[k] == nil { next[k] = w.save() } d := make([]*vcache, k) // Here we keep seeking and ticking until we have a real value. for w.value = w.Seek(w.root); w.value == NIL; w.value = w.Seek(w.root) { if ok, err = iter.tick(k, min, d); err != nil { return } else if ok { continue } else if err = iter.restore(d, i); err != nil { return } else { break } } if w.value == NIL { // We were unable to complete the crawl. // We've already reset our state. // This is how far we got: cursor = k + 1 break } // We got a real value for w! Now we propagate the affected values // through i and stash them into next if they're not there already, // and then continue with the tick-crawl. err = iter.push(w, k, i) if err != nil { return } for l, saved := range d { if saved != nil { err = iter.push(iter.variables[l], l, i) if err != nil { return } if next[l] == nil { next[l] = saved } } } } // We need to *unset* the blacklist after recursing. // Variables are only blacklisted when they appear as // a parent in the call stack - they might be visited // twice as siblings in the call tree, etc. iter.blacklist[j] = false if cursor == j { // Hooray! // Now here we need to push every affected value // through to the rest of the domain // delta[j] = self next[j] = self for l, saved := range next { if saved != nil { if delta[l] == nil { delta[l] = saved } err = iter.push(iter.variables[l], i, iter.Len()) if err != nil { return } } } return true, nil } // This means we reset (all) those affected to their previous state err = iter.restore(next, i) if err != nil { return } } } return } func (iter *Iterator) restore(cache []*vcache, max int) (err error) { for i, saved := range cache { // If the variable at k has been modified by the // (potentially) multiple recursive calls to tick, // then reset it to its previous state. if saved != nil { u := iter.variables[i] iter.load(u, saved) // u.load(saved) // Push the restored state through the max if err = iter.push(u, i, max); err != nil { return } } } return } func (iter *Iterator) push(u *variable, min, max int) (err error) { for j, cs := range u.edges { if j >= min && j < max { // Update the incoming D2 constraints by using .dual to find them for _, c := range cs { // Since u has a value, all of its constraints are in consensus. // That means we can freely access their iterators! // In this case, all the iterators for the outgoing u.d2s have // values that are the counts (uint32) of them *and their dual*. i := c.place v := iter.variables[j] m, n := (i+1)%3, (i+2)%3 place := i if v.node.Equal(c.quad[m]) { place = m } else if v.node.Equal(c.quad[n]) { place = n } neighbor := c.neighbors[place] neighbor.terms[i] = u.value item := c.iterator.Item() meta := item.UserMeta() if meta == UnaryPrefix { var p Permutation = i if place == m { p = place } else if place == n { p = place + 3 } neighbor.prefix = assembleKey(BinaryPrefixes[p], true, u.value) neighbor.count, err = iter.unary.Get(p, u.value, iter.txn) } else { A, B := (neighbor.place+1)%3, (neighbor.place+2)%3 neighbor.prefix = assembleKey(TernaryPrefixes[A], true, neighbor.terms[A], neighbor.terms[B]) err = item.Value(func(val []byte) error { neighbor.count = binary.BigEndian.Uint32(val) return nil }) } if err != nil { return } } // Clear the value, like I promised you earlier. w := iter.variables[j] w.value = NIL w.Sort() } } return }
[ 1 ]
package cal_test import ( "fmt" "testing" ) // 链表的一个节点 type ListNode struct { prev *ListNode // 前一个节点 next *ListNode // 后一个节点 value interface{} // 数据 } // 创建一个节点 func NewListNode(value interface{}) (listNode *ListNode) { listNode = &ListNode{ value: value, } return } // 当前节点的前一个节点 func (n *ListNode) Prev() (prev *ListNode) { prev = n.prev return } // 当前节点的前一个节点 func (n *ListNode) Next() (next *ListNode) { next = n.next return } // 获取节点的值 func (n *ListNode) GetValue() (value interface{}) { if n == nil { return } value = n.value return } // 链表 type List struct { head *ListNode // 表头节点 tail *ListNode // 表尾节点 len int // 链表的长度 } // 创建一个空链表 func NewList() (list *List) { list = &List{} return } // 返回链表头节点 func (l *List) Head() (head *ListNode) { head = l.head return } // 返回链表尾节点 func (l *List) Tail() (tail *ListNode) { tail = l.tail return } // 返回链表长度 func (l *List) Len() (len int) { len = l.len return } // 在链表的右边插入一个元素 func (l *List) RPush(value interface{}) { node := NewListNode(value) // 链表未空的时候 if l.Len() == 0 { l.head = node l.tail = node } else { tail := l.tail tail.next = node node.prev = tail l.tail = node } l.len = l.len + 1 return } // 从链表左边取出一个节点 func (l *List) LPop() (node *ListNode) { // 数据为空 if l.len == 0 { return } node = l.head if node.next == nil { // 链表未空 l.head = nil l.tail = nil } else { l.head = node.next } l.len = l.len - 1 return } // 从链表右边取出一个节点 func (l *List) RPop() (node *ListNode) { // 数据为空 if l.len == 0 { return } node = l.tail l.tail = node.prev //if node.next == nil { // // 链表未空 // l.head = nil // l.tail = nil //} else { // //} l.len = l.len - 1 return } func Test_deque(t *testing.T) { nums := []int{1, 3, -1, -3, 5, 3, 6, 7} fmt.Println(maxSlidingWindow(nums, 3)) } func maxSlidingWindow(nums []int, k int) []int { d := NewList() windowCnt := len(nums) - k + 1 result := make([]int, 0, windowCnt) for i := 0; i < windowCnt; i++ { for j := 0; j < k; j++ { if i == 0 && j == 0 { d.RPush(nums[i]) continue } max := nums[i+j] for t := i + j - 1; t >= i; t-- { if max > nums[t] { d.RPop() if t == i { d.RPush(i + j) } } else { d.RPush(t + 1) break } } } if d.Head().value.(int) < i { d.LPop() result = append(result, nums[d.LPop().value.(int)]) } else { result = append(result, nums[d.Head().value.(int)]) } } return result } //func maxSlidingWindow1(nums []int, k int) []int { // d:= NewList() // windowCnt := len(nums) - k + 1 // result := make([]int, 0, windowCnt) // for i := k; i < len(nums); i ++ { // // } // // // return result //}
[ 0, 1, 3 ]
package k8scomponents_test import ( "errors" "testing" "github.com/kyma-incubator/github-slack-connectors/scenario/github-issue-sentiment-analysis/internal/k8scomponents" "github.com/stretchr/testify/assert" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "github.com/kyma-incubator/github-slack-connectors/scenario/github-issue-sentiment-analysis/internal/k8scomponents/mocks" v1beta1svc "github.com/poy/service-catalog/pkg/apis/servicecatalog/v1beta1" ) func TestCreateServiceInstance(t *testing.T) { t.Run("should create Binding, return new binding and nil", func(t *testing.T) { //given instance := &v1beta1svc.ServiceInstance{} mockClient := &mocks.ServiceInstanceInterface{} mockClient.On("Create", instance).Return(instance, nil) //when data, err := k8scomponents.NewServiceInstance(mockClient, "default").Create(instance) //then assert.NoError(t, err) assert.Equal(t, instance, data) }) t.Run("should return nil and error when cannot create Binding", func(t *testing.T) { //given instance := &v1beta1svc.ServiceInstance{} mockClient := &mocks.ServiceInstanceInterface{} mockClient.On("Create", instance).Return(nil, errors.New("error text")) //when data, err := k8scomponents.NewServiceInstance(mockClient, "default").Create(instance) //then assert.Error(t, err) assert.Nil(t, data) }) } func TestDeleteServiceInstance(t *testing.T) { t.Run("should return ServiceBinding", func(t *testing.T) { //given name := "name" namespace := "namespace" options := &v1.DeleteOptions{} mockClient := &mocks.ServiceInstanceInterface{} mockClient.On("Delete", name, options).Return(nil) //when err := k8scomponents.NewServiceInstance(mockClient, namespace).Delete(name, options) //then assert.NoError(t, err) }) } func TestGetEventBodyServiceInstance(t *testing.T) { t.Run("should return ServiceBinding", func(t *testing.T) { //given name := "name" serviceClassExternalName := "serviceClassExternalName" plan := "plan" raw := runtime.RawExtension{} _ = raw.UnmarshalJSON([]byte(`{"location": "westeurope","resourceGroup": "github-slack-scenario"}`)) namespace := "namespace" body := &v1beta1svc.ServiceInstance{ ObjectMeta: v1.ObjectMeta{ Name: name + "inst", Namespace: namespace, }, Spec: v1beta1svc.ServiceInstanceSpec{ Parameters: &raw, PlanReference: v1beta1svc.PlanReference{ ServiceClassExternalName: serviceClassExternalName, ServicePlanExternalName: plan, }, }, } mockClient := &mocks.ServiceInstanceInterface{} //when binding := k8scomponents.NewServiceInstance(mockClient, namespace).Prepare(name, serviceClassExternalName, plan, &raw) //then assert.Equal(t, body, binding) }) }
[ 3 ]
// This code is either inspired from or taken directly from go's tls package package noise import ( "errors" "io" "net" "strconv" "strings" "sync" "time" ) // Conn represents a secured connection. // It implements the net.Conn interface. type Conn struct { conn net.Conn isClient bool // handshake config *Config // configuration passed to constructor hs *HandshakeState handshakeComplete bool handshakeMutex sync.Mutex // Authentication isRemoteAuthenticated bool // input/output in, out *CipherState inLock, outLock sync.Mutex inputBuffer []byte } // LocalAddr returns the local network address. func (c *Conn) LocalAddr() net.Addr { return c.conn.LocalAddr() } // RemoteAddr returns the remote network address. func (c *Conn) RemoteAddr() net.Addr { return c.conn.RemoteAddr() } // SetDeadline sets the read and write deadlines associated with the connection. // A zero value for t means Read and Write will not time out. // After a Write has timed out, the Noise state is corrupt and all future writes will return the same error. func (c *Conn) SetDeadline(t time.Time) error { return c.conn.SetDeadline(t) } // SetReadDeadline sets the read deadline on the underlying connection. // A zero value for t means Read will not time out. func (c *Conn) SetReadDeadline(t time.Time) error { return c.conn.SetReadDeadline(t) } // SetWriteDeadline sets the write deadline on the underlying connection. // A zero value for t means Write will not time out. // After a Write has timed out, the Noise state is corrupt and all future writes will return the same error. func (c *Conn) SetWriteDeadline(t time.Time) error { return c.conn.SetWriteDeadline(t) } // Write writes data to the connection. func (c *Conn) Write(b []byte) (int, error) { // if hp := c.config.Pattern; !c.isClient && len(hp.Messages) < 2 { return 0, errors.New("A server should not write on one-way patterns") } // Make sure to go through the handshake first if err := c.Handshake(); err != nil { return 0, err } // Lock the write socket c.outLock.Lock() defer c.outLock.Unlock() // process the data in a loop var n int data := b for len(data) > 0 { // fragment the data m := len(data) if m > MaxMsgLen { m = MaxMsgLen } // Encrypt ciphertext := c.out.Encrypt(nil, nil, data[:m]) // header (length) length := []byte{byte(len(ciphertext) >> 8), byte(len(ciphertext) % 256)} // Send data _, err := c.conn.Write(append(length, ciphertext...)) if err != nil { return n, err } n += m data = data[m:] } return n, nil } // Read can be made to time out and return a net.Error with Timeout() == true // after a fixed time limit; see SetDeadline and SetReadDeadline. func (c *Conn) Read(b []byte) (int, error) { var err error // Make sure to go through the handshake first if err = c.Handshake(); err != nil { return 0, err } // Put this after Handshake, in case people were calling // Read(nil) for the side effect of the Handshake. if len(b) == 0 { return 0, err } // If this is a one-way pattern, do some checks if hp := c.config.Pattern; !c.isClient && len(hp.Messages) < 2 { return 0, errors.New("A client should not read on one-way patterns") } // Lock the read socket c.inLock.Lock() defer c.inLock.Unlock() // read whatever there is to read in the buffer readSoFar := 0 if len(c.inputBuffer) > 0 { copy(b, c.inputBuffer) if len(c.inputBuffer) >= len(b) { c.inputBuffer = c.inputBuffer[len(b):] return len(b), nil } readSoFar += len(c.inputBuffer) c.inputBuffer = c.inputBuffer[:0] } // read header from socket bufHeader, err := readBytes(c.conn, 2) if err != nil { return 0, err } length := (int(bufHeader[0]) << 8) | int(bufHeader[1]) if length > MaxMsgLen { return 2, errors.New("Noise: Noise message received exceeds NoiseMessageLength") } // read noise message from socket noiseMessage, err := readBytes(c.conn, length) if err != nil { return 0, err } // decrypt plaintext, err := c.in.Decrypt(nil, nil, noiseMessage) if err != nil { return 0, err } // append to the input buffer c.inputBuffer = append(c.inputBuffer, plaintext...) // read whatever we can read rest := len(b) - readSoFar copy(b[readSoFar:], c.inputBuffer) if len(c.inputBuffer) >= rest { c.inputBuffer = c.inputBuffer[rest:] return len(b), nil } // we haven't filled the buffer readSoFar += len(c.inputBuffer) c.inputBuffer = c.inputBuffer[:0] return readSoFar, nil } // Close closes the connection. func (c *Conn) Close() error { return c.conn.Close() } // Noise-related functions // Handshake runs the client or server handshake protocol if // it has not yet been run. // Most uses of this package need not call Handshake explicitly: // the first Read or Write will call it automatically. func (c *Conn) Handshake() (err error) { c.handshakeMutex.Lock() defer c.handshakeMutex.Unlock() if c.handshakeComplete { return nil } var remoteKeyPair *DHKey if c.config.PeerStatic != nil { if len(c.config.PeerStatic) != 32 { return errors.New("noise: the provided remote key is not 32-byte") } remoteKeyPair = &DHKey{} copy(remoteKeyPair.Public[:], c.config.PeerStatic) } c.hs, err = NewHandshakeState(*c.config) if err != nil { return err } // start handshake var c1, c2 *CipherState var state bool var msg []byte state = c.isClient for _ = range c.config.Pattern.Messages { if state { msg, c1, c2, err = c.hs.WriteMessage(nil, nil) if err != nil { return err } // header (length) length := []byte{byte(len(msg) >> 8), byte(len(msg) % 256)} // write _, err = c.conn.Write(append(length, msg...)) if err != nil { return err } } else { bufHeader, err := readBytes(c.conn, 2) if err != nil { return err } length := (int(bufHeader[0]) << 8) | int(bufHeader[1]) if length > MaxMsgLen { return errors.New("Noise: Noise message received exceeds NoiseMessageLength") } msg, err = readBytes(c.conn, length) if err != nil { return err } _, c1, c2, err = c.hs.ReadMessage(nil, msg) if err != nil { return err } } state = !state } if c.isClient { c.out, c.in = c1, c2 } else { c.out, c.in = c2, c1 } c.handshakeComplete = true return nil } // IsRemoteAuthenticated can be used to check if the remote peer has been // properly authenticated. It serves no real purpose for the moment as the // handshake will not go through if a peer is not properly authenticated in // patterns where the peer needs to be authenticated. func (c *Conn) IsRemoteAuthenticated() bool { return c.isRemoteAuthenticated } // RemoteKey returns the static key of the remote peer. // It is useful in case the static key is only transmitted during the handshake. func (c *Conn) RemoteKey() ([]byte, error) { if !c.handshakeComplete { return nil, errors.New("handshake not completed") } return c.hs.rs, nil } // Server returns a new Noise server side connection // using net.Conn as the underlying transport. // The configuration config must be non-nil and must include // at least one certificate or else set GetCertificate. func Server(conn net.Conn, config *Config) *Conn { return &Conn{conn: conn, config: config, isClient: false} } // Client returns a new Noise client side connection // using conn as the underlying transport. // The config cannot be nil: users must set either ServerName or // InsecureSkipVerify in the config. func Client(conn net.Conn, config *Config) *Conn { return &Conn{conn: conn, config: config, isClient: true} } // A Listener implements a network Listener (net.Listener) for Noise connections. type Listener struct { net.Listener config *Config } // Accept waits for and returns the next incoming Noise connection. // The returned connection is of type *Conn. func (l *Listener) Accept() (net.Conn, error) { c, err := l.Listener.Accept() if err != nil { return &Conn{}, err } return Server(c, l.config), nil } // Close closes the listener. // Any blocked Accept operations will be unblocked and return errors. func (l *Listener) Close() error { return l.Listener.Close() } // Addr returns the listener's network address. func (l *Listener) Addr() net.Addr { return l.Listener.Addr() } // Listen creates a Noise Listener accepting connections on the // given network address using net.Listen. // The configuration config must be non-nil. func Listen(network, laddr string, config *Config) (net.Listener, error) { if config == nil { return &Listener{}, errors.New("Noise: no Config set") } l, err := net.Listen(network, laddr) if err != nil { return &Listener{}, err } noiseListener := &Listener{} noiseListener.Listener = l noiseListener.config = config return noiseListener, nil } type timeoutError struct{} func (timeoutError) Error() string { return "noise: DialWithDialer timed out" } func (timeoutError) Timeout() bool { return true } func (timeoutError) Temporary() bool { return true } // DialWithDialer connects to the given network address using dialer.Dial and // then initiates a Noise handshake, returning the resulting Noise connection. Any // timeout or deadline given in the dialer apply to connection and Noise // handshake as a whole. func DialWithDialer(dialer *net.Dialer, network, addr, localAddr string, config *Config) (*Conn, error) { // We want the Timeout and Deadline values from dialer to cover the // whole process: TCP connection and Noise handshake. This means that we // also need to start our own timers now. timeout := dialer.Timeout if !dialer.Deadline.IsZero() { deadlineTimeout := time.Until(dialer.Deadline) if timeout == 0 || deadlineTimeout < timeout { timeout = deadlineTimeout } } // check Config if config == nil { return nil, errors.New("empty noise.Config") } // Dial the net.Conn first var errChannel chan error if timeout != 0 { errChannel = make(chan error, 2) time.AfterFunc(timeout, func() { errChannel <- timeoutError{} }) } localAddrArray := strings.Split(localAddr, ":") if len(localAddrArray) != 2 { return nil, errors.New("invalid source address") } localPort, err := strconv.Atoi(localAddrArray[1]) if err != nil { return nil, errors.New("invalid source port") } localAddress := net.ParseIP(localAddrArray[0]) dialer.LocalAddr = &net.TCPAddr{ IP: localAddress, Port: localPort, } rawConn, err := dialer.Dial(network, addr) if err != nil { return nil, err } // Create the noise.Conn conn := Client(rawConn, config) // Do the handshake if timeout == 0 { err = conn.Handshake() } else { go func() { errChannel <- conn.Handshake() }() err = <-errChannel } if err != nil { rawConn.Close() return nil, err } return conn, nil } // Dial connects to the given network address using net.Dial // and then initiates a Noise handshake, returning the resulting // Noise connection. func Dial(network, addr string, localAddr string, config *Config) (*Conn, error) { return DialWithDialer(new(net.Dialer), network, addr, localAddr, config) } func readBytes(r io.Reader, n int) ([]byte, error) { result := make([]byte, n) offset := 0 for { m, err := r.Read(result[offset:]) if err != nil { return result, err } offset += m if offset == n { break } } return result, nil }
[ 6 ]
/* * Copyright (c) 2018. Abstrium SAS <team (at) pydio.com> * This file is part of Pydio Cells. * * Pydio Cells is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Pydio Cells is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with Pydio Cells. If not, see <http://www.gnu.org/licenses/>. * * The latest code can be found at <https://pydio.com>. */ package docstore import ( "context" "encoding/json" "os" "strings" "github.com/blevesearch/bleve" "go.uber.org/zap" "github.com/pmker/yux/common/log" "github.com/pmker/yux/common/proto/docstore" ) type BleveServer struct { // Internal Bleve database Engine bleve.Index // For Testing purpose : delete file after closing DeleteOnClose bool // Path to the DB file IndexPath string } func NewBleveEngine(bleveIndexPath string, deleteOnClose ...bool) (*BleveServer, error) { _, e := os.Stat(bleveIndexPath) var index bleve.Index var err error if e == nil { index, err = bleve.Open(bleveIndexPath) } else { index, err = bleve.New(bleveIndexPath, bleve.NewIndexMapping()) } if err != nil { return nil, err } del := false if len(deleteOnClose) > 0 && deleteOnClose[0] { del = true } return &BleveServer{ Engine: index, IndexPath: bleveIndexPath, DeleteOnClose: del, }, nil } func (s *BleveServer) Close() error { err := s.Engine.Close() if s.DeleteOnClose { err = os.RemoveAll(s.IndexPath) } return err } func (s *BleveServer) IndexDocument(storeID string, doc *docstore.Document) error { if doc.IndexableMeta == "" { return nil } toIndex := make(map[string]interface{}) err := json.Unmarshal([]byte(doc.IndexableMeta), &toIndex) if err != nil { return nil } toIndex["DOCSTORE_STORE_ID"] = storeID toIndex["DOCSTORE_DOC_ID"] = doc.GetID() if doc.GetOwner() != "" { toIndex["DOCSTORE_OWNER"] = doc.GetOwner() } log.Logger(context.Background()).Debug("IndexDocument", zap.Any("data", toIndex)) err = s.Engine.Index(doc.GetID(), toIndex) if err != nil { return err } return nil } func (s *BleveServer) DeleteDocument(storeID string, docID string) error { return s.Engine.Delete(docID) } func (s *BleveServer) ClearIndex(ctx context.Context) error { // List all nodes and remove them request := bleve.NewSearchRequest(bleve.NewMatchAllQuery()) MaxUint := ^uint(0) MaxInt := int(MaxUint >> 1) request.Size = MaxInt searchResult, err := s.Engine.Search(request) if err != nil { return err } for _, hit := range searchResult.Hits { log.Logger(ctx).Info("ClearIndex", zap.String("hit", hit.ID)) s.Engine.Delete(hit.ID) } return nil } func (s *BleveServer) SearchDocuments(storeID string, query *docstore.DocumentQuery, countOnly bool) ([]string, int64, error) { parts := strings.Split(query.MetaQuery, " ") for i, p := range parts { if !strings.HasPrefix(p, "+") && !strings.HasPrefix(p, "-") { parts[i] = "+" + p } } parts = append(parts, " +DOCSTORE_STORE_ID:"+s.escapeMetaValue(storeID)) if len(query.Owner) > 0 { parts = append(parts, " +DOCSTORE_OWNER:"+s.escapeMetaValue(query.Owner)) } qStringQuery := bleve.NewQueryStringQuery(strings.Join(parts, " ")) log.Logger(context.Background()).Debug("SearchDocuments", zap.Any("query", qStringQuery)) searchRequest := bleve.NewSearchRequest(qStringQuery) // TODO PASS CURSOR INFOS? if !countOnly { searchRequest.Size = int(100) searchRequest.From = int(0) } docs := []string{} searchResult, err := s.Engine.Search(searchRequest) if err != nil { return docs, 0, err } log.Logger(context.Background()).Debug("SearchDocuments", zap.Any("result", searchResult)) if countOnly { return nil, int64(searchResult.Total), nil } for _, hit := range searchResult.Hits { doc, docErr := s.Engine.Document(hit.ID) if docErr != nil || doc == nil || doc.ID == "" { log.Logger(context.Background()).Debug("Skipping Document", zap.Any("doc", doc), zap.Error(docErr)) continue } log.Logger(context.Background()).Debug("Sending Document", zap.Any("doc", doc)) docs = append(docs, doc.ID) } return docs, int64(searchResult.Total), nil } func (s *BleveServer) escapeMetaValue(value string) string { r := strings.NewReplacer("-", "\\-", "~", "\\~", "*", "\\*", ":", "\\:", "/", "\\/", " ", "\\ ") return r.Replace(value) }
[ 6 ]
package main import ( "bytes" "flag" "fmt" "image" _ "image/jpeg" _ "image/png" "io" "io/ioutil" "math" "net/http" "os" "os/exec" "sort" "strings" ) func GetRange(c uint8) int { if 0 <= c && c < 85 { return 0 } else if 85 <= c && c < 170 { return 1 } else if 170 <= c && c <= 255 { return 2 } return 3 } func GetPalette(img image.Image) [][3]uint8 { var Area [27]int var AreaAvg [27][3]uint32 for i := 0; i < img.Bounds().Max.X; i++ { for j := 0; j < img.Bounds().Max.Y; j++ { r, g, b, _ := img.At(i, j).RGBA() Area[GetRange(uint8(r/257))*9+GetRange(uint8(g/257))*3+GetRange(uint8(b/257))]++ AreaAvg[GetRange(uint8(r/257))*9+GetRange(uint8(g/257))*3+GetRange(uint8(b/257))][0] += r / 257 AreaAvg[GetRange(uint8(r/257))*9+GetRange(uint8(g/257))*3+GetRange(uint8(b/257))][1] += g / 257 AreaAvg[GetRange(uint8(r/257))*9+GetRange(uint8(g/257))*3+GetRange(uint8(b/257))][2] += b / 257 } } for i, _ := range Area { if Area[i] == 0 { continue } for c := 0; c < 3; c++ { AreaAvg[i][c] = AreaAvg[i][c] / uint32(Area[i]) } } var ret [][3]uint8 for i := 0; i < 27; i++ { if Area[i] == 0 { continue } ret = append(ret, [3]uint8{uint8(AreaAvg[i][0]), uint8(AreaAvg[i][1]), uint8(AreaAvg[i][2])}) } return ret } func LoadImage(PathOrLink string) (image.Image, error) { var data io.Reader if _, err := os.Stat(PathOrLink); os.IsNotExist(err) { // if it's not a file response, err := http.Get(PathOrLink) if err != nil { return nil, err } body, _ := ioutil.ReadAll(response.Body) defer response.Body.Close() data = bytes.NewReader(body) } else { // if it's a file data, err = os.Open(PathOrLink) if err != nil { return nil, err } } img, _, err := image.Decode(data) if err != nil { return nil, err } return img, nil } func ToHex(color [3]uint8) string { var ret string for _, c := range color { if c <= 16 { ret += "0" } ret += fmt.Sprintf("%X", c) } return ret } func GetColor(color [][3]uint8, sample [3]uint8) [][3]uint8 { sort.Slice(color, func(i, j int) bool { var c1, c2 float64 for c := 0; c < 3; c++ { c1 += math.Abs(float64(sample[c]) - float64(color[i][c])) c2 += math.Abs(float64(sample[c]) - float64(color[j][c])) } return c1 < c2 }) return color } func NormalizeColor(color [3]uint8) [3]uint8 { min := uint8(255) for _, c := range color { if c < min { min = c } } for i, _ := range color { color[i] -= min } return color } func GetBG(color [3]uint8, brightness uint8) [3]uint8 { norm := NormalizeColor(color) for i, _ := range norm { if int(norm[i])+int(brightness) > 255 { norm[i] = 255 continue } else if int(norm[i])+int(brightness) < 0 { norm[i] = 0 continue } norm[i] += brightness } return norm } func main() { const ( XresourceCacheDir = "/.cache/colterm" XresourceCache = XresourceCacheDir + "/Xresources" ) os.MkdirAll(os.Getenv("HOME") + XresourceCacheDir, os.ModePerm) FileIn := flag.String("f", "", "Input file, use this option or put file behind the options") BgBrightness := flag.Int("bg", 20, "Set background brightness(0 - 255)") FgBrightness := flag.Int("fg", 150, "Set foreground brightness(0 - 255)") FileExport := flag.String("e", "", "Export file to (Path)") Tamplate := flag.String("t", "", "Create color scheme with a given tamplate file, created file will saved in your home directory") PaletteOnly := flag.Bool("n", false, "Print colors only without applying to Xresources") flag.Parse() if len(flag.Args()) == 0 && *FileIn == "" { flag.PrintDefaults() return } imgPath := *FileIn if imgPath == "" { imgPath = flag.Args()[0] } img, err := LoadImage(imgPath) if err != nil { panic(err) } Palette := GetPalette(img) if len(Palette) < 10 { panic("This image is not appropriate for generating color scheme") } var ColorScheme [10][3]uint8 var ColorTamplete = [10][3]uint8{ [3]uint8{0, 0, 0}, [3]uint8{150, 150, 150}, [3]uint8{0, 0, 0}, [3]uint8{255, 0, 0}, [3]uint8{0, 255, 0}, [3]uint8{255, 255, 0}, [3]uint8{0, 0, 150}, [3]uint8{255, 0, 255}, [3]uint8{0, 255, 255}, [3]uint8{255, 255, 255}, } for i := 0; i < 10; i++ { value := GetColor(Palette, ColorTamplete[i]) for _, val := range value { gotVal := true for _, item := range ColorScheme { if item == val { gotVal = false } } if gotVal { ColorScheme[i] = val break } } } if *BgBrightness != -1 { ColorScheme[0] = GetBG(GetColor(Palette, ColorTamplete[0])[0], uint8(*BgBrightness)) } if *FgBrightness != -1 { ColorScheme[1] = GetBG(GetColor(Palette, ColorTamplete[1])[0], uint8(*FgBrightness)) } fmt.Printf(" ¯\\_(•_•)_/¯ \nHeres your colors\n\n") fmt.Printf("Background \033[48;2;%d;%d;%dm%s\033[49m\n", ColorScheme[0][0], ColorScheme[0][1], ColorScheme[0][2], ToHex(ColorScheme[0])) fmt.Printf("Foreground \033[48;2;%d;%d;%dm%s\033[49m\n", ColorScheme[1][0], ColorScheme[1][1], ColorScheme[1][2], ToHex(ColorScheme[1])) for i := 2; i < len(ColorScheme); i++ { c := ColorScheme[i] fmt.Printf("Color%d \033[48;2;%d;%d;%dm%s\033[49m\n", i-2, c[0], c[1], c[2], ToHex(c)) } if *PaletteOnly { return } if *Tamplate != "" { StTamplate, err := ioutil.ReadFile(*Tamplate) if err != nil { panic(err) } StData := string(StTamplate) StExport, err := os.Create(os.Getenv("HOME") + "/Colterm-" + *Tamplate) defer StExport.Close() if err != nil { panic(err) } StData = strings.ReplaceAll(StData, "background", "#"+ToHex(ColorScheme[0])) StData = strings.ReplaceAll(StData, "foreground", "#"+ToHex(ColorScheme[1])) StData = strings.ReplaceAll(StData, "cursor", "#"+ToHex(ColorScheme[1])) for i := 0; i < len(ColorScheme)-2; i++ { StData = strings.ReplaceAll(StData, fmt.Sprintf("color%d", i), "#"+ToHex(ColorScheme[i+2])) StData = strings.ReplaceAll(StData, fmt.Sprintf("color%d", i+8), "#"+ToHex(ColorScheme[i+2])) } _, err = StExport.Write([]byte(StData)) StExport.Close() if err != nil { panic(err) } } // setting Xresources var XresourcePath string if *FileExport == "" { XresourcePath = os.Getenv("HOME") + XresourceCache } else { XresourcePath = *FileExport + "/ColTerm-Xresource" } NewXre, err := os.Create(XresourcePath) defer NewXre.Close() if err != nil { panic(err) } var XresourceData string XresourceData += fmt.Sprintf("! Created by Colterm with %s\n", imgPath) XresourceData += fmt.Sprintf("*.background: #%s\n", ToHex(ColorScheme[0])) XresourceData += fmt.Sprintf("*.foreground: #%s\n", ToHex(ColorScheme[1])) for i := 0; i < 8; i++ { XresourceData += fmt.Sprintf("*.color%d: #%s\n", i, ToHex(ColorScheme[i+2])) } NewXre.Write([]byte(XresourceData)) ReloadXresource := exec.Command("xrdb", "-merge", "-quiet", os.Getenv("HOME")+XresourceCache) err = ReloadXresource.Run() if err != nil { panic(err) } }
[ 0, 2, 5 ]
package libsignal import ( "bytes" "crypto/sha1" "errors" "fmt" "io/ioutil" "os" "path/filepath" "strconv" "strings" "sync" "golang.org/x/crypto/pbkdf2" log "github.com/Sirupsen/logrus" "math/rand" "github.com/OpenBazaar/libsignal/ratchet" ) // store implements the PreKeyStore, SignedPreKeyStore, // IdentityStore and SessionStore interfaces from the axolotl package // Blobs are encrypted with AES-128 and authenticated with HMAC-SHA1 type store struct { sync.Mutex preKeysDir string signedPreKeysDir string identityDir string sessionsDir string unencrypted bool aesKey []byte macKey []byte AlwaysTrustPeerID bool } func NewStore(password, path string) (*store, error) { ts := &store{ preKeysDir: filepath.Join(path, "prekeys"), signedPreKeysDir: filepath.Join(path, "signed_prekeys"), identityDir: filepath.Join(path, "identity"), sessionsDir: filepath.Join(path, "sessions"), unencrypted: password == "", AlwaysTrustPeerID: true, } // Create dirs in case this is first run if err := os.MkdirAll(ts.preKeysDir, 0700); err != nil { return nil, err } if err := os.MkdirAll(ts.signedPreKeysDir, 0700); err != nil { return nil, err } if err := os.MkdirAll(ts.identityDir, 0700); err != nil { return nil, err } if err := os.MkdirAll(ts.sessionsDir, 0700); err != nil { return nil, err } // If there is a password, generate the keys from it if !ts.unencrypted { salt := make([]byte, 8) saltFile := filepath.Join(path, "salt") var err error // Create salt if this is first run if !exists(saltFile) { //randBytes(salt) err = ioutil.WriteFile(saltFile, salt, 0600) if err != nil { return nil, err } } else { salt, err = ioutil.ReadFile(saltFile) if err != nil { return nil, err } } ts.genKeys(password, salt, 1024) } return ts, nil } // Helpers func idToFilename(id uint32) string { return fmt.Sprintf("%09d", id) } func filenameToID(fname string) (uint32, error) { var id uint32 _, err := fmt.Sscanf(fname, "%d", &id) if err != nil { return 0, err } return uint32(id), nil } func (s *store) readNumFromFile(path string) (uint32, error) { b, err := s.readFile(path) if err != nil { return 0, err } num, err := strconv.Atoi(string(b)) if err != nil { return 0, err } return uint32(num), nil } func (s *store) writeNumToFile(path string, num uint32) { b := []byte(strconv.Itoa(int(num))) s.writeFile(path, b) } func (s *store) genKeys(password string, salt []byte, count int) { keys := pbkdf2.Key([]byte(password), salt, count, 16+20, sha1.New) s.aesKey = keys[:16] s.macKey = keys[16:] } func (s *store) encrypt(plaintext []byte) ([]byte, error) { if s.unencrypted { return plaintext, nil } e, err := aesEncrypt(s.aesKey, plaintext) if err != nil { return nil, err } return appendMAC(s.macKey, e), nil } // ErrStoreBadMAC occurs when MAC verification fails on the records stored using password based encryption. // The probable cause is using a wrong password. var ErrStoreBadMAC = errors.New("wrong MAC calculated, possibly due to wrong passphrase") func (s *store) decrypt(ciphertext []byte) ([]byte, error) { if s.unencrypted { return ciphertext, nil } macPos := len(ciphertext) - 32 if !verifyMAC(s.macKey, ciphertext[:macPos], ciphertext[macPos:]) { return nil, ErrStoreBadMAC } return aesDecrypt(s.aesKey, ciphertext[:macPos]) } func (s *store) readFile(path string) ([]byte, error) { b, err := ioutil.ReadFile(path) if err != nil { return nil, err } b, err = s.decrypt(b) return b, err } func (s *store) writeFile(path string, b []byte) error { b, err := s.encrypt(b) if err != nil { return err } return ioutil.WriteFile(path, b, 0600) } // Identity store func (s *store) GetLocalRegistrationID() (uint32, error) { regidfile := filepath.Join(s.identityDir, "regid") return s.readNumFromFile(regidfile) } func (s *store) SetLocalRegistrationID(id uint32) { regidfile := filepath.Join(s.identityDir, "regid") s.writeNumToFile(regidfile, id) } func (s *store) GetIdentityKeyPair() (*ratchet.IdentityKeyPair, error) { idkeyfile := filepath.Join(s.identityDir, "identity_key") b, err := s.readFile(idkeyfile) if err != nil { return nil, err } if len(b) != 64 { return nil, fmt.Errorf("identity key is %d not 64 bytes long", len(b)) } return ratchet.NewIdentityKeyPairFromKeys(b[32:], b[:32]), nil } func (s *store) SetIdentityKeyPair(ikp *ratchet.IdentityKeyPair) error { idkeyfile := filepath.Join(s.identityDir, "identity_key") b := make([]byte, 64) copy(b, ikp.PublicKey.Key()[:]) copy(b[32:], ikp.PrivateKey.Key()[:]) return s.writeFile(idkeyfile, b) } func (s *store) SaveIdentity(id string, key *ratchet.IdentityKey) error { idkeyfile := filepath.Join(s.identityDir, "remote_"+id) return s.writeFile(idkeyfile, key.Key()[:]) } func (s *store) IsTrustedIdentity(id string, key *ratchet.IdentityKey) bool { if s.AlwaysTrustPeerID { // Workaround until we handle peer reregistering situations // more securely and with a better UI. return true } idkeyfile := filepath.Join(s.identityDir, "remote_"+id) // Trust on first use (TOFU) if !exists(idkeyfile) { return true } b, err := s.readFile(idkeyfile) if err != nil { return false } return bytes.Equal(b, key.Key()[:]) } // MyIdentityKey returns our serialized public identity key func MyIdentityKey() []byte { return identityKey.PublicKey.Serialize() } // UnknownContactError is returned when an unknown group id is encountered type UnknownContactError struct { id string } func (err UnknownContactError) Error() string { return fmt.Sprintf("unknown contact ID %s", err.id) } // ContactIdentityKey returns the serialized public key of the given contact func ContactIdentityKey(id string) ([]byte, error) { s := textSecureStore idkeyfile := filepath.Join(s.identityDir, "remote_"+id) if !exists(idkeyfile) { return nil, UnknownContactError{id} } b, err := s.readFile(idkeyfile) if err != nil { return nil, err } return append([]byte{5}, b...), nil } // Prekey and signed prekey store func (s *store) preKeysFilePath(id uint32) string { return filepath.Join(s.preKeysDir, idToFilename(id)) } func (s *store) signedPreKeysFilePath(id uint32) string { return filepath.Join(s.signedPreKeysDir, idToFilename(id)) } func (s *store) LoadPreKey(id uint32) (*ratchet.PreKeyRecord, error) { b, err := s.readFile(s.preKeysFilePath(id)) if err != nil { return nil, err } record, err := ratchet.LoadPreKeyRecord(b) if err != nil { return nil, err } return record, nil } func (s *store) LoadSignedPreKey(id uint32) (*ratchet.SignedPreKeyRecord, error) { b, err := s.readFile(s.signedPreKeysFilePath(id)) if err != nil { return nil, err } record, err := ratchet.LoadSignedPreKeyRecord(b) if err != nil { return nil, err } return record, nil } func (s *store) LoadSignedPreKeys() []ratchet.SignedPreKeyRecord { keys := []ratchet.SignedPreKeyRecord{} err := filepath.Walk(s.signedPreKeysDir, func(path string, fi os.FileInfo, err error) error { if !fi.IsDir() { _, fname := filepath.Split(path) id, err := filenameToID(fname) if err != nil { return err } key, _ := textSecureStore.LoadSignedPreKey(uint32(id)) keys = append(keys, *key) } return nil }) if err != nil { return nil } return keys } func (s *store) LoadRandomPreKey() (ratchet.PreKeyRecord, error) { keys := []ratchet.PreKeyRecord{} err := filepath.Walk(s.preKeysDir, func(path string, fi os.FileInfo, err error) error { if !fi.IsDir() { _, fname := filepath.Split(path) id, err := filenameToID(fname) if err != nil { return nil } key, _ := textSecureStore.LoadPreKey(uint32(id)) keys = append(keys, *key) } return nil }) if err != nil { return ratchet.PreKeyRecord{}, err } i := rand.Intn(len(keys)) return keys[i], nil } func (s *store) StorePreKey(id uint32, record *ratchet.PreKeyRecord) error { b, err := record.Serialize() if err != nil { return err } return s.writeFile(s.preKeysFilePath(id), b) } func (s *store) StoreSignedPreKey(id uint32, record *ratchet.SignedPreKeyRecord) error { b, err := record.Serialize() if err != nil { return err } return s.writeFile(s.signedPreKeysFilePath(id), b) } func exists(path string) bool { _, err := os.Stat(path) return err == nil } func (s *store) valid() bool { return s.ContainsPreKey(lastResortPreKeyID) } func (s *store) ContainsPreKey(id uint32) bool { return exists(s.preKeysFilePath(id)) } func (s *store) ContainsSignedPreKey(id uint32) bool { return exists(s.signedPreKeysFilePath(id)) } func (s *store) RemovePreKey(id uint32) { _ = os.Remove(s.preKeysFilePath(id)) } func (s *store) RemoveSignedPreKey(id uint32) { _ = os.Remove(s.signedPreKeysFilePath(id)) } // HTTP API store func (s *store) storeHTTPPassword(password string) { passFile := filepath.Join(s.identityDir, "http_password") s.writeFile(passFile, []byte(password)) } func (s *store) loadHTTPPassword() (string, error) { passFile := filepath.Join(s.identityDir, "http_password") b, err := s.readFile(passFile) if err != nil { return "", err } return string(b), nil } func (s *store) storeHTTPSignalingKey(key []byte) { keyFile := filepath.Join(s.identityDir, "http_signaling_key") s.writeFile(keyFile, key) } func (s *store) loadHTTPSignalingKey() ([]byte, error) { keyFile := filepath.Join(s.identityDir, "http_signaling_key") b, err := s.readFile(keyFile) if err != nil { return nil, err } return b, nil } // Session store func (s *store) sessionFilePath(recipientID string, deviceID uint32) string { return filepath.Join(s.sessionsDir, fmt.Sprintf("%s_%d", recipientID, deviceID)) } func (s *store) GetSubDeviceSessions(recipientID string) []uint32 { sessions := []uint32{} filepath.Walk(s.sessionsDir, func(path string, fi os.FileInfo, err error) error { if !fi.IsDir() { i := strings.LastIndex(path, "_") id, _ := strconv.Atoi(path[i+1:]) sessions = append(sessions, uint32(id)) } return nil }) return sessions } func (s *store) GetSessions() []uint32 { sessions := []uint32{} filepath.Walk(s.sessionsDir, func(path string, fi os.FileInfo, err error) error { if !fi.IsDir() { b, err := s.readFile(path) if err != nil { return err } record, err := ratchet.LoadSessionRecord(b) if err != nil { return err } sessions = append(sessions, record.PreviousStates[0].SS.GetRemoteRegistrationId()) } return nil }) return sessions } func (s *store) GetSessionRecipients() []string { sessions := []string{} filepath.Walk(s.sessionsDir, func(path string, fi os.FileInfo, err error) error { if !fi.IsDir() { id, _ := filepath.Split(path) sessions = append(sessions, id) } return nil }) return sessions } func (s *store) LoadSession(recipientID string, deviceID uint32) (*ratchet.SessionRecord, error) { sfile := s.sessionFilePath(recipientID, deviceID) b, err := s.readFile(sfile) if err != nil { return ratchet.NewSessionRecord(), nil } record, err := ratchet.LoadSessionRecord(b) if err != nil { return nil, err } return record, nil } func (s *store) StoreSession(recipientID string, deviceID uint32, record *ratchet.SessionRecord) error { sfile := s.sessionFilePath(recipientID, deviceID) b, err := record.Serialize() if err != nil { return err } return s.writeFile(sfile, b) } func (s *store) ContainsSession(recipientID string, deviceID uint32) bool { sfile := s.sessionFilePath(recipientID, deviceID) return exists(sfile) } func (s *store) DeleteSession(recipientID string, deviceID uint32) { sfile := s.sessionFilePath(recipientID, deviceID) _ = os.Remove(sfile) } func (s *store) DeleteAllSessions(recipientID string) { log.Debugf("Deleting all sessions for %s\n", recipientID) sessions := s.GetSubDeviceSessions(recipientID) for _, dev := range sessions { _ = os.Remove(s.sessionFilePath(recipientID, dev)) } } var textSecureStore *store
[ 3 ]
package headers import ( "log" "net/http" "path" "strings" ) var contentTypes map[string]string var defaultHeaders map[string]string func init() { // Set content types contentTypes = make(map[string]string) contentTypes[".html"] = "text/html" contentTypes[".css"] = "text/css" contentTypes[".svg"] = "image/svg+xml" contentTypes[".js"] = "application/javascript" // Set default headers defaultHeaders = make(map[string]string) // Set below header for all responses // https://blog.stackpath.com/accept-encoding-vary-important defaultHeaders["Vary"] = "Accept-Encoding" defaultHeaders["X-Frame-Options"] = "deny" defaultHeaders["X-Xss-Protection"] = "1; mode=block" defaultHeaders["X-Content-Type-Options"] = "nosniff" } // SetContentTypeHeader writes content type header to a response according to an extension of a file path func SetContentTypeHeader(w http.ResponseWriter, filePath string) { w.Header().Set( "Content-Type", contentTypes[strings.ToLower(path.Ext(filePath))], ) } // SetDefaultHeaders write headers registered as defaults func SetDefaultHeaders(w http.ResponseWriter) { for k, v := range defaultHeaders { w.Header().Set(k, v) } } // AddDefaultHeader add headers to set as defaults func AddDefaultHeader(headerName string, value string) { defaultHeaders[headerName] = value } // AddContentType add a content type to the contentTypes map func AddContentType(extension string, value string) { if extension[0] != '.' { log.Fatalln("(AddContentType) The first character of an extension have to be '.', but:", extension) } contentTypes[extension] = value }
[ 6 ]
package random import ( "math/rand" ) // //Random generator for float values. // //Examples: // // value1 := RandomFloat.nextFloat(5, 10); // Possible result: 7.3 // value2 := RandomFloat.nextFloat(10); // Possible result: 3.7 // value3 := RandomFloat.updateFloat(10, 3); // Possible result: 9.2 type TRandomFloat struct{} var RandomFloat *TRandomFloat = &TRandomFloat{} //Generates a float in the range ['min', 'max']. If 'max' is omitted, //then the range will be set to [0, 'min']. // //Parameters: // - min: float32 - minimum value of the float that will be generated. // If 'max' is omitted, then 'max' is set to 'min' and 'min' is set to 0. // - max: float32 - maximum value of the float that will be generated. // Defaults to 'min' if omitted. // //Returns generated random float32 value. // func (c *TRandomFloat) NextFloat(min float32, max float32) float32 { if max-min <= 0 { return min } return min + rand.Float32()*(max-min) } //Updates (drifts) a float value within specified range defined //Parameters: // // - value: float32 - value to drift. // - interval: float32 - a range. Default: 10% of the value // //Returns float32 // func (c *TRandomFloat) UpdateFloat(value float32, interval float32) float32 { if interval <= 0 { interval = 0.1 * value } minValue := value - interval maxValue := value + interval return c.NextFloat(minValue, maxValue) }
[ 1, 3, 6 ]
package service import ( "github.com/aidar-darmenov/message-delivery-client/config" "github.com/aidar-darmenov/message-delivery-client/interfaces" "github.com/aidar-darmenov/message-delivery-client/model" "go.uber.org/zap" "log" "net" "strconv" ) type Service struct { Configuration interfaces.Configuration Logger *zap.Logger ChannelMessages chan model.MessageToClients TcpConnection *net.TCPConn } func NewService(cfg *config.Configuration, logger *zap.Logger) *Service { //Here can be any other objects like DB, Cache, any kind of delivery services tcpAddr, err := net.ResolveTCPAddr("tcp4", net.JoinHostPort(cfg.ConnectionHost, strconv.Itoa(cfg.ConnectionPort))) if err != nil { log.Fatal(err) } conn, err := net.DialTCP(cfg.ConnectionType, nil, tcpAddr) if err != nil { log.Fatal(err) } channelMessages := make(chan model.MessageToClients, cfg.ChannelMessagesSize) return &Service{ Configuration: cfg, Logger: logger, ChannelMessages: channelMessages, TcpConnection: conn, } } func (s *Service) GetLogger() *zap.Logger { return s.Logger } func (s *Service) GetConfigParams() *config.Configuration { return s.Configuration.Params() }
[ 3 ]
package books import ( "encoding/json" "errors" "fmt" "github.com/ikalkali/es-golang/elasticsearch" "github.com/ikalkali/es-golang/entity/queries" ) const ( indexItems = "books" typeItem = "_doc" ) func (b *Books) Save() error { result, err := elasticsearch.Client.Index(indexItems, typeItem, b) if err != nil { return err } b.Id = result.Id return nil } func (b *Books) Get() error { bookId := b.Id result, err := elasticsearch.Client.Get(indexItems, typeItem, b.Id) if err != nil { return err } bytes, err := result.Source.MarshalJSON() if err != nil { return err } if err := json.Unmarshal(bytes, &b); err != nil { return err } b.Id = bookId return nil } func (b *Books) Search(query queries.EsQuery) ([]Books, error) { result, err := elasticsearch.Client.Search(indexItems, query.Build()) if err != nil { return nil, err } books := make([]Books, result.TotalHits()) for index, hit := range result.Hits.Hits { bytes, _ := hit.Source.MarshalJSON() var book Books if err := json.Unmarshal(bytes, &book); err != nil { return nil, err } book.Id = hit.Id books[index] = book } if len(books) == 0 { return nil, errors.New("no result found with the matching criteria") } return books, nil } func (b *Books) GetAllBooks(limit int64, offset int64) ([]Books, error) { fmt.Println("Get all books db") result, err := elasticsearch.Client.GetAll(indexItems, limit, offset) if err != nil { return nil, err } books := make([]Books, len(result.Hits.Hits)) fmt.Println("TOTAL HITS", result.Hits) for index, hit := range result.Hits.Hits { bytes, _ := hit.Source.MarshalJSON() var book Books if err := json.Unmarshal(bytes, &book); err != nil { return nil, err } book.Id = hit.Id books[index] = book } if len(books) == 0 { return nil, errors.New("no document found") } return books, nil }
[ 6 ]
package tag import ( "github.com/goEventListingAPI/entity" ) //add event tags //AddEventTag(id []int)(*entity.Tag, []error) //?? how do we add multiple tags //notify(eventID uint, tagsID []int) []error //this should be done separatly in notification section //get the event tags //GetTags() ([]entity.Tag, []error) type TagRepository interface{ Tags()([]entity.Tag,[]error) Tag(id uint)(*entity.Tag,[]error) AddTag(tag *entity.Tag)(*entity.Tag,[]error) UpdateTag(tag *entity.Tag)(*entity.Tag,[]error) RemoveTag(id uint)(*entity.Tag,[]error) }
[ 3 ]
package go_dev import ( "database/sql" "fmt" "github.com/lib/pq" ) /* Initializes a new task for a project and adds it to the database If succesful, returns true Otherwise, returns false */ func CreateTask(project_name, project_owner, task_name string, db *sql.DB) bool { sqlStatement1 := `SELECT id FROM projects WHERE owner = $1 AND name = $2;` var parentID string err = db.QueryRow(sqlStatement1, project_owner, project_name).Scan(&parentID) if err == sql.ErrNoRows { return false } else if err != nil { return false } sqlStatement2 := `INSERT INTO tasks(project,name,status) VALUES ($1, $2, 0) RETURNING id` var taskID int err = db.QueryRow(sqlStatement2, parentID, task_name).Scan(&taskID) if err != nil { return false } sqlStatement3 := `UPDATE projects SET todo_tasks = array_cat(todo_tasks, $1) WHERE name = $2 && owner = $3;` _, err = db.Exec(sqlStatement3, taskID, project_name, project_owner) if err != nil { return false } return true } /* Adds a new member to an existing task If succesful, returns true Otherwise, returns false */ func AddTaskMembers(project_name, project_owner, task_name, newMember string, db *sql.DB) bool { sqlStatement1 := `SELECT id FROM projects WHERE owner = $1 AND name = $2;` var parentID int err = db.QueryRow(sqlStatement1, project_owner, project_name).Scan(&parentID) if err == sql.ErrNoRows { fmt.Println("No rows") return false } else if err != nil { fmt.Println("Other error first statement.") return false } sqlStatement := `UPDATE tasks SET users = users || $1 WHERE project = $2 AND name = $3;` _, err = db.Exec(sqlStatement, pq.Array([]string{newMember}), parentID, task_name) if err != nil { fmt.Println("Other error second statement.") fmt.Println(err) fmt.Printf("%T\n", newMember) return false } return true } /* Updates a tasks status and then populates the corresponding project task lisk If succesful, returns true Otherwise, returns false */ func UpdateStatus(taskID, status int, db *sql.DB) bool { var parentID int sqlStatement2 := `SELECT project, status FROM tasks WHERE id = $1;` var oldStatus int err = db.QueryRow(sqlStatement2, taskID).Scan(&parentID, &oldStatus) if err == sql.ErrNoRows { fmt.Println("First1") return false } else if err != nil { fmt.Println("First2") return false } var oldColumn string if oldStatus == 0 { oldColumn = "inprogress_tasks" } else if oldStatus == 1 { oldColumn = "todo_tasks" } else { oldColumn = "completed_tasks" } var newColumn string if status == 0 { newColumn = "inprogress_tasks" } else if status == 1 { newColumn = "todo_tasks" } else { newColumn = "completed_tasks" } sqlStatement3 := `UPDATE tasks SET status = $1 WHERE id = $2;` _, err = db.Exec(sqlStatement3, status, taskID) if err != nil { fmt.Println("Second") return false } sqlStatement4 := `UPDATE projects SET $1 = array_remove($1, $2) WHERE id = $3;` _, err = db.Exec(sqlStatement4, oldColumn, taskID, parentID) if err != nil { fmt.Println("Third") return false } sqlStatement5 := `UPDATE projects SET $1 = array_cat($1, $2) WHERE id = $3;` _, err = db.Exec(sqlStatement5, newColumn, taskID, parentID) if err != nil { fmt.Println("Fourth") return false } return true } /* Adds a description to a task If succesful, returns true Otherwise, returns false */ func AddDescription(project_name, project_owner, task_name, description string, db *sql.DB) bool { sqlStatement1 := `SELECT id FROM projects WHERE owner = $1 AND name = $2;` var parentID string err = db.QueryRow(sqlStatement1, project_owner, project_name).Scan(&parentID) if err == sql.ErrNoRows { return false } else if err != nil { return false } sqlStatement := `UPDATE tasks SET description = $1 WHERE project = $2 AND name = $3;` _, err = db.Exec(sqlStatement, description, parentID, task_name) if err != nil { return false } return true } /* Changes the due date on a task If succesful, returns true Otherwise, returns false */ func DueDate(project_name, project_owner, task_name, dueDate string, db *sql.DB) bool { sqlStatement1 := `SELECT id FROM projects WHERE owner = $1 AND name = $2;` var parentID string err = db.QueryRow(sqlStatement1, project_owner, project_name).Scan(&parentID) if err == sql.ErrNoRows { return false } else if err != nil { return false } sqlStatement := `UPDATE tasks SET due_date = $1 WHERE project = $2 AND name = $3;` _, err = db.Exec(sqlStatement, dueDate, parentID, task_name) if err != nil { return false } return true } /* Removes a task from the project database If succesful, returns true Otherwise, returns false */ func DeleteTask(taskID int, db *sql.DB) bool { sqlStatement := `SELECT project, status FROM tasks WHERE id = $1;` var parentID int var status int err = db.QueryRow(sqlStatement, taskID).Scan(&parentID, &status) if err == sql.ErrNoRows { return false } else if err != nil { return false } sqlStatement = `DELETE FROM tasks WHERE id= $1;` _, err = db.Exec(sqlStatement, taskID) if err != nil { return false } var oldColumn string if status == 0 { oldColumn = "inprogress_tasks" } else if status == 1 { oldColumn = "todo_tasks" } else { oldColumn = "completed_tasks" } sqlStatement = `UPDATE projects SET $1 = array_remove($1, $2) WHERE id = $3;` _, err = db.Exec(sqlStatement, oldColumn, taskID, parentID) return true } /* Returns all tasks specific to a user If succesful, returns array of all tasks If user has no tasks, then the userTasks array returned will be empty */ func GetUserTasks(username string, db *sql.DB) []Task { sqlStatement := `SELECT t.id, p.name, t.name, t.description, EXTRACT(MONTH FROM t.due_date) as month, EXTRACT(DAY FROM t.due_date) as day, t.status FROM tasks t INNER JOIN projects p ON t.project = p.id WHERE $1 = ANY(t.users) ORDER BY t.due_date ASC;` sqlStatement2 := `SELECT p.title, p.users, p.content, t.name FROM posts p INNER JOIN tasks t ON p.task = t.id WHERE p.task = $1;` rows, err := db.Query(sqlStatement, username) if err != nil { //Do something } var userTasks = make([]Task, 0) var day, month string var tsk Task var comments = make([]Post, 0) var pst Post defer rows.Close() for rows.Next() { err = rows.Scan(&tsk.Key, &tsk.Project_name, &tsk.Name, &tsk.Description, &month, &day, &tsk.Status) if err != nil { //Do something } tsk.Due_date = month + "-" + day rows2, er := db.Query(sqlStatement2, tsk.Key) if er != nil { //Do something } defer rows2.Close() for rows2.Next() { err = rows2.Scan(&pst.Title, &pst.Username, &pst.Content, &pst.Task) if err != nil { //Do something } comments = append(comments, pst) } tsk.Comments = comments userTasks = append(userTasks, tsk) } return userTasks } /* Gets all tasks specific to a project If succesful, returns array of all tasks If user has no tasks, then the ProjectTasks array returned will be empty */ func GetProjectTasks(id int, status int, db *sql.DB) []Task { sqlStatement1 := `SELECT name FROM projects WHERE id = $1;` var projectName string err = db.QueryRow(sqlStatement1, id).Scan(&projectName) if err != nil { //Do something } sqlStatement := `SELECT name, description, due_date FROM tasks WHERE project = $1 AND status = $2 ORDER BY due_date ASC;` rows, err := db.Query(sqlStatement, id, status) if err != nil { //Do something } var ProjectTasks = make([]Task, 0) defer rows.Close() for rows.Next() { var tsk Task err = rows.Scan(&tsk.Name, &tsk.Description, &tsk.Due_date) tsk.Status = status tsk.Project_name = projectName if err != nil { //Do something } ProjectTasks = append(ProjectTasks, tsk) } return ProjectTasks } func GetTask(taskID int, db *sql.DB) Task { sqlStatement := `SELECT t.id, p.name, t.name, t.description, EXTRACT(MONTH FROM t.due_date) as month, EXTRACT(DAY FROM t.due_date) as day, t.status FROM tasks t INNER JOIN projects p ON t.project = p.id WHERE $1 = t.id;` sqlStatement2 := `SELECT p.title, p.users, p.content, t.name FROM posts p INNER JOIN tasks t ON p.task = t.id WHERE p.task = $1;` var tsk Task var day, month string err := db.QueryRow(sqlStatement, taskID).Scan(&tsk.Key, &tsk.Project_name, &tsk.Name, &tsk.Description, &month, &day, &tsk.Status) if err != nil { //Do something } var comments = make([]Post, 0) var pst Post tsk.Due_date = month + "-" + day rows, er := db.Query(sqlStatement2, tsk.Key) if er != nil { //Do something } defer rows.Close() for rows.Next() { err = rows.Scan(&pst.Title, &pst.Username, &pst.Content, &pst.Task) if err != nil { //Do something } comments = append(comments, pst) } tsk.Comments = comments return tsk }
[ 3, 5, 6 ]
package search import "testing" func compare(X, Y []string) bool { for i := 0; i < len(X); i++ { if X[i] != Y[i] { return false } } return true } func TestSuffixTree(t *testing.T) { words := []string{ "aardvark", "happy", "hello", "hero", "he", "hotel", } answers := SuffixTree(words, "he") matchedWords := []string{ "hello", "hero", "he", } if !compare(answers, matchedWords) { t.Error("SuffixTree does not work") } }
[ 2 ]
package servo import ( "fmt" "strings" "sync" "time" ) type flag uint8 // is check if the given bits are set in the flag. func (f flag) is(bits flag) bool { return f&bits != 0 } // String implements the Stringer interface. func (f flag) String() string { if f == 0 { return "( NONE )" } s := new(strings.Builder) fmt.Fprintf(s, "(") if f.is(Centered) { fmt.Fprintf(s, " Centered") } if f.is(Normalized) { fmt.Fprintf(s, " Normalized") } fmt.Fprintf(s, " )") return s.String() } const ( // Centered sets the range of the servo from -90 to 90 degrees. // Together with Normalized, the range of the servo is set to -1 to 1. Centered flag = (1 << iota) // Normalized sets the range of the servo from 0 to 2. // Together with Centered, the range of the servo is set to -1 to 1. Normalized ) // Servo is a struct that holds all the information necessary to control a // servo motor. Use the function servo.New(gpio) for correct // initialization. Servo is designed to be concurrent-safe. type Servo struct { // pin is the GPIO pin number of the Raspberry Pi. Check that the pin is // controllable with pi-blaster. // // CAUTION: Incorrect pin assignment might cause damage to your Raspberry // Pi. pin gpio // Name is an optional value to assign a meaningful name to the servo. Name string // Flags is a bit flag that sets various configuration parameters. // // servo.Centered sets the range of the servo from -90 to 90 degrees. // // servo.Normalized sets the range of the servo from 0 to 2. // Together with servo.Centered, the range of the servo is set to -1 to 1. Flags flag // MinPulse is the minimum pwm pulse of the servo. (default 0.05 s) // MaxPulse is the maximum pwm pulse of the servo. (default 0.25 s) // These calibration variables should be immutables once the servo is // connected.. MinPulse, MaxPulse float64 target, position float64 deltaT time.Time lastPWM pwm step, maxStep float64 idle bool finished *sync.Cond lock *sync.RWMutex } // updateRate is set to 3ms/degree, an approximate on 0.19s/60degrees. // String implements the Stringer interface. // It returns a string in the following format: // // servo "NAME" connected to gpio(GPIO_PIN) [flags: ( FLAGS_SET )] // // where NAME is the verbose name (default: fmt.Sprintf("Servo%d", GPIO)), // GPIO_PIN is the connection pin of the servo, and FLAGS_SET is the list of // flags set (default: NONE). func (s *Servo) String() string { return fmt.Sprintf("servo %q connected to gpio(%d) [flags: %v]", s.Name, s.pin, s.Flags) } // New creates a new Servo struct with default values, connected at a GPIO pin // of the Raspberry Pi. You should check that the pin is controllable with pi-blaster. // // CAUTION: Incorrect pin assignment might cause damage to your Raspberry // Pi. func New(GPIO int) (s *Servo) { // maxS is the maximun degrees/s for a tipical servo of speed // 0.19s/60degrees. const maxS = 315.7 s = &Servo{ pin: gpio(GPIO), Name: fmt.Sprintf("Servo%d", GPIO), maxStep: maxS, step: maxS, MinPulse: 0.05, MaxPulse: 0.25, idle: true, finished: sync.NewCond(&sync.Mutex{}), lock: new(sync.RWMutex), } return s } // Connect connects the servo to the pi-blaster daemon. func (s *Servo) Connect() error { _blaster.subscribe(s) return nil } // Close cleans up the state of the servo and deactivates the corresponding // GPIO pin. func (s *Servo) Close() { _blaster.unsubscribe(s) } // Position returns the current angle of the servo, adjusted for its Flags. func (s *Servo) Position() float64 { s.lock.RLock() defer s.lock.RUnlock() p := s.position if s.Flags.is(Centered) { p -= 90 } if s.Flags.is(Normalized) { p /= 90 } return p } // Waiter implements the Wait function. type Waiter interface { // Wait waits for the servo to finish moving. Wait() } // MoveTo sets a target angle for the servo to move. The magnitude of the target // depends on the servo's Flags. The target is automatically clamped to the set // range. If called concurrently, the target position is overridden by the last // goroutine (usually non-deterministic). func (s *Servo) MoveTo(target float64) (wait Waiter) { s.moveTo(target) return s } func (s *Servo) moveTo(target float64) { if s.Flags.is(Normalized) { target *= 90 } if s.Flags.is(Centered) { target += 90 } s.lock.Lock() defer s.lock.Unlock() if s.step == 0.0 { s.target = s.position } else { s.target = clamp(target, 0, 180) } s.deltaT = time.Now() s.idle = false } // SetSpeed changes the speed of the servo from (still) 0.0 to 1.0 (max speed). // Setting a speed of 0.0 effectively sets the target position to the current // position and the servo will not move. func (s *Servo) SetSpeed(percentage float64) { s.lock.Lock() defer s.lock.Unlock() s.step = s.maxStep * clamp(percentage, 0.0, 1.0) } // Stop stops moving the servo. This effectively sets the target position to // the stopped position of the servo. func (s *Servo) Stop() { s.lock.Lock() defer s.lock.Unlock() s.target = s.position s.idle = true s.finished.L.Lock() s.finished.Broadcast() s.finished.L.Unlock() } // SetPosition immediately sets the angle the servo. func (s *Servo) SetPosition(position float64) { if s.Flags.is(Normalized) { position *= 90 } if s.Flags.is(Centered) { position += 90 } s.lock.Lock() defer s.lock.Unlock() s.position = clamp(position, 0, 180) s.target = s.position s.idle = false } // pwm linearly interpolates an angle based on the start, finish, and // duration of the movement, and returns the gpio pin and adjusted pwm for the // current time. func (s *Servo) pwm() (gpio, pwm) { ok := false s.lock.RLock() p := s.position _pwm := s.lastPWM defer func() { if !ok { s.lock.Lock() s.position = p s.lastPWM = _pwm s.deltaT = time.Now() if p == s.target { s.idle = true s.finished.L.Lock() s.finished.Broadcast() s.finished.L.Unlock() } s.lock.Unlock() } }() defer s.lock.RUnlock() if s.position == s.target && s.idle { ok = true return s.pin, _pwm } delta := time.Since(s.deltaT).Seconds() * s.step if s.target < s.position { p = s.position - delta if p <= s.target { p = s.target } } else { p = s.position + delta if p >= s.target { p = s.target } } _pwm = pwm(remap(p, 0, 180, s.MinPulse, s.MaxPulse)) return s.pin, _pwm } // isIdle checks if the servo is not moving. func (s *Servo) isIdle() bool { s.lock.RLock() defer s.lock.RUnlock() return s.idle } // Wait waits for the servo to stop moving. It is concurrent-safe. func (s *Servo) Wait() { s.finished.L.Lock() defer s.finished.L.Unlock() for !s.isIdle() { s.finished.Wait() } } func clamp(value, min, max float64) float64 { if value < min { value = min } if value > max { value = max } return value } func remap(value, min, max, toMin, toMax float64) float64 { return (value-min)/(max-min)*(toMax-toMin) + toMin }
[ 1, 2 ]
package schema import ( "context" "fmt" "reflect" "strings" "hermes/models" "github.com/fatih/structs" "github.com/iancoleman/strcase" "github.com/jinzhu/gorm" graphqlErrors "github.com/neelance/graphql-go/errors" ) type ( entity struct { Table string Field *string } field struct { Name string Eq *Value Ne *Value Gt *Value Lt *Value Gte *Value Lte *Value Count *bool } arguments struct { Field field Or *[]field And *[]field } countResult struct { Count int32 } averageResult struct { Average float64 } StatsError struct { *graphqlErrors.QueryError Code int } Resolver struct{} ) func (r *Resolver) Count(context context.Context, args arguments) (int32, error) { var result countResult if db, castOk := context.Value(DB).(*gorm.DB); castOk { model := args.Field.getModel(db) entity := args.Field.getEntity() if model == nil { return result.Count, invalidTableError(entity.Table) } if entity.Field != nil && !fieldExists(*entity.Field, structs.Names(model)) { return result.Count, invalidFieldError(*entity.Field) } count := fmt.Sprintf("COUNT(%s) AS Count", args.Field.Name) query := db.Select(count).Table(entity.Table) modelStruct := structs.New(model) if entity.Field != nil { fieldName := toCamelCase(*entity.Field) field := modelStruct.Field(fieldName) fieldKind := field.Kind() if operator := args.Field.resolveOperator(); operator != nil { if value := args.Field.getValue(); value != nil { valueType := reflect.TypeOf(value) if isValidType(fieldKind, valueType) { where := fmt.Sprintf("%s %s ?", args.Field.Name, *operator) query = query.Where(where, value) } else { return result.Count, invalidValueError() } } else { return result.Count, noValueError() } } } query = args.attachAND(query) query = args.attachOR(query) query.Scan(&result) errorList := query.GetErrors() if !(len(errorList) > 0 || query.Error != nil) { return result.Count, nil } else if query.Error != nil { return result.Count, queryError(query.Error) } return result.Count, databaseError() } return result.Count, connectionError() } func (r *Resolver) Average(context context.Context, args arguments) (float64, error) { var result averageResult if db, castOk := context.Value(DB).(*gorm.DB); castOk { model := args.Field.getModel(db) entity := args.Field.getEntity() if model == nil { return result.Average, invalidTableError(entity.Table) } if entity.Field == nil { return result.Average, badRequestError("Average requires a field name") } if !fieldExists(*entity.Field, structs.Names(model)) { return result.Average, invalidFieldError(*entity.Field) } modelStruct := structs.New(model) fieldName := toCamelCase(*entity.Field) field := modelStruct.Field(fieldName) fieldKind := field.Kind() if fieldKind == reflect.Ptr { fieldValueKind := reflect.TypeOf(field.Value()).Kind() if !isNumericKind(fieldValueKind) { return result.Average, invalidFieldError(*entity.Field) } } else if !isNumericKind(fieldKind) { return result.Average, invalidFieldError(*entity.Field) } average := fmt.Sprintf("AVG(%s) AS Average", args.Field.Name) query := db.Select(average).Table(entity.Table) if operator := args.Field.resolveOperator(); operator != nil { if value := args.Field.getValue(); value != nil { valueType := reflect.TypeOf(value) if isValidNumericType(valueType) { where := fmt.Sprintf("%s %s ?", args.Field.Name, *operator) query = query.Where(where, value) } else { return result.Average, invalidValueError() } } else { return result.Average, noValueError() } } query = args.attachAND(query) query = args.attachOR(query) query.Scan(&result) errorList := query.GetErrors() if !(len(errorList) > 0 || query.Error != nil) { return result.Average, nil } else if query.Error != nil { return result.Average, queryError(query.Error) } return result.Average, databaseError() } return result.Average, connectionError() } func (a arguments) attachAND(query *gorm.DB) *gorm.DB { if a.And != nil { for _, item := range *a.And { if operator := item.resolveOperator(); operator != nil && len(item.Name) > 0 { if value := item.getValue(); value != nil { where := fmt.Sprintf("%s %s ?", item.Name, *operator) query = query.Where(where, value) } } } } return query } func (a arguments) attachOR(query *gorm.DB) *gorm.DB { if a.Or != nil { for _, item := range *a.Or { if operator := item.resolveOperator(); operator != nil && len(item.Name) > 0 { if value := item.getValue(); value != nil { where := fmt.Sprintf("%s %s ?", item.Name, *operator) query = query.Or(where, value) } } } } return query } func (f *field) getModel(db *gorm.DB) interface{} { entity := f.getEntity() switch entity.Table { case "apps": return &models.App{} case "appusers": return &models.AppUser{} case "brands": return &models.Brand{} case "browsers": return &models.Browser{} case "devices": return &models.Device{} case "messages": return &models.Message{} case "platforms": return &models.Platform{} case "ranges": return &models.Range{} case "ratings": return &models.Rating{} default: return nil } } func (f *field) getEntity() entity { splitField := strings.Split(f.Name, ".") if len(splitField) < 2 { return entity{Table: splitField[0], Field: nil} } return entity{Table: splitField[0], Field: &splitField[1]} } func (f *field) getValue() interface{} { if f.Eq != nil { return f.resolveValue(f.Eq) } else if f.Ne != nil { return f.resolveValue(f.Ne) } else if f.Gt != nil { return f.resolveValue(f.Gt) } else if f.Lt != nil { return f.resolveValue(f.Lt) } else if f.Gte != nil { return f.resolveValue(f.Gte) } else if f.Lte != nil { return f.resolveValue(f.Lte) } return nil } func (f *field) resolveValue(value *Value) interface{} { if value.String != nil { return value.String } else if value.Int != nil { return value.Int } else if value.Float != nil { return value.Float } else if value.Bool != nil { return value.Bool } return nil } func (f *field) resolveOperator() *string { var operator string value := f.getValue() if f.Eq != nil { switch value.(type) { case string, *string: if isPostgres() { operator = "ILIKE" } operator = "LIKE" default: operator = "=" } } else if f.Ne != nil { switch value.(type) { case string, *string: if isPostgres() { operator = "NOT ILIKE" } operator = "NOT LIKE" default: operator = "<>" } } else if f.Gt != nil { operator = ">" } else if f.Lt != nil { operator = "<" } else if f.Gte != nil { operator = ">=" } else if f.Lte != nil { operator = "<=" } if len(operator) > 0 { return &operator } return nil } func toCamelCase(str string) string { camel := strcase.ToCamel(str) if strings.HasSuffix(camel, "Id") { camel = strings.TrimSuffix(camel, "Id") camel = camel + "ID" } return camel } func isValidType(kind reflect.Kind, valueType reflect.Type) bool { valueKind := valueType.Kind() valueElemKind := valueType.Elem().Kind() ptrKind := reflect.Ptr intKind := reflect.Int floatKind := reflect.Float64 return (valueKind == ptrKind && valueElemKind == kind) || (valueKind == ptrKind && valueElemKind == floatKind && kind == intKind) || (valueKind == ptrKind && valueElemKind == intKind && kind == floatKind) || (valueKind == kind) || (valueKind == floatKind && kind == intKind) || (valueKind == intKind && kind == floatKind) } func isValidNumericType(valueType reflect.Type) bool { valueKind := valueType.Kind() valueElemKind := valueType.Elem().Kind() intKind := reflect.Int floatKind := reflect.Float64 return (valueKind == reflect.Ptr && (valueElemKind == intKind || valueElemKind == floatKind)) || (valueKind == intKind || valueKind == floatKind) } func isNumericKind(kind reflect.Kind) bool { return (kind == reflect.Int) || (kind == reflect.Int8) || (kind == reflect.Int16) || (kind == reflect.Int32) || (kind == reflect.Int64) || (kind == reflect.Uint) || (kind == reflect.Float32) || (kind == reflect.Float64) }
[ 0, 5 ]
// Copyright (c) 2017 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package v6 import ( "context" "encoding/json" "fmt" "net/http" "time" "github.com/olivere/elastic" "github.com/uber/cadence/common/config" "github.com/uber/cadence/common/elasticsearch/client" "github.com/uber/cadence/common/log" "github.com/uber/cadence/common/types" ) type ( // ElasticV6 implements Client ElasticV6 struct { client *elastic.Client logger log.Logger } ) func (c *ElasticV6) IsNotFoundError(err error) bool { return elastic.IsNotFound(err) } // NewV6Client returns a new implementation of GenericClient func NewV6Client( connectConfig *config.ElasticSearchConfig, logger log.Logger, tlsClient *http.Client, awsSigningClient *http.Client, ) (*ElasticV6, error) { clientOptFuncs := []elastic.ClientOptionFunc{ elastic.SetURL(connectConfig.URL.String()), elastic.SetRetrier(elastic.NewBackoffRetrier(elastic.NewExponentialBackoff(128*time.Millisecond, 513*time.Millisecond))), elastic.SetDecoder(&elastic.NumberDecoder{}), // critical to ensure decode of int64 won't lose precise) } if connectConfig.DisableSniff { clientOptFuncs = append(clientOptFuncs, elastic.SetSniff(false)) } if connectConfig.DisableHealthCheck { clientOptFuncs = append(clientOptFuncs, elastic.SetHealthcheck(false)) } if awsSigningClient != nil { clientOptFuncs = append(clientOptFuncs, elastic.SetHttpClient(awsSigningClient)) } if tlsClient != nil { clientOptFuncs = append(clientOptFuncs, elastic.SetHttpClient(tlsClient)) } client, err := elastic.NewClient(clientOptFuncs...) if err != nil { return nil, err } return &ElasticV6{ client: client, logger: logger, }, nil } func (c *ElasticV6) PutMapping(ctx context.Context, index, body string) error { _, err := c.client.PutMapping().Index(index).Type("_doc").BodyString(body).Do(ctx) return err } func (c *ElasticV6) CreateIndex(ctx context.Context, index string) error { _, err := c.client.CreateIndex(index).Do(ctx) return err } func (c *ElasticV6) Count(ctx context.Context, index, query string) (int64, error) { return c.client.Count(index).BodyString(query).Do(ctx) } func (c *ElasticV6) ClearScroll(ctx context.Context, scrollID string) error { return elastic.NewScrollService(c.client).ScrollId(scrollID).Clear(ctx) } func (c *ElasticV6) Scroll(ctx context.Context, index, body, scrollID string) (*client.Response, error) { scrollService := elastic.NewScrollService(c.client) var esResult *elastic.SearchResult var err error // we are not returning error immediately here, as result + error combination is possible if len(scrollID) == 0 { esResult, err = scrollService.Index(index).Body(body).Do(ctx) } else { esResult, err = scrollService.ScrollId(scrollID).Do(ctx) } if esResult == nil { return nil, err } var hits []*client.SearchHit if esResult.Hits != nil { for _, h := range esResult.Hits.Hits { if h.Source != nil { hits = append(hits, &client.SearchHit{Source: *h.Source}) } } } result := &client.Response{ TookInMillis: esResult.TookInMillis, TotalHits: esResult.TotalHits(), Hits: &client.SearchHits{Hits: hits}, ScrollID: esResult.ScrollId, } if len(esResult.Aggregations) > 0 { result.Aggregations = make(map[string]json.RawMessage, len(esResult.Aggregations)) for key, agg := range esResult.Aggregations { if agg != nil { result.Aggregations[key] = *agg } } } return result, err } func (c *ElasticV6) Search(ctx context.Context, index, body string) (*client.Response, error) { esResult, err := c.client.Search(index).Source(body).Do(ctx) if err != nil { return nil, err } if esResult.Error != nil { return nil, types.InternalServiceError{ Message: fmt.Sprintf("ElasticSearch Error: %#v", esResult.Error), } } else if esResult.TimedOut { return nil, types.InternalServiceError{ Message: fmt.Sprintf("ElasticSearch Error: Request timed out: %v ms", esResult.TookInMillis), } } var sort []interface{} var hits []*client.SearchHit if esResult != nil && esResult.Hits != nil { for _, h := range esResult.Hits.Hits { if h.Source != nil { hits = append(hits, &client.SearchHit{Source: *h.Source}) } sort = h.Sort } } result := &client.Response{ TookInMillis: esResult.TookInMillis, TotalHits: esResult.TotalHits(), Hits: &client.SearchHits{Hits: hits}, Sort: sort, } if len(esResult.Aggregations) > 0 { result.Aggregations = make(map[string]json.RawMessage, len(esResult.Aggregations)) for key, agg := range esResult.Aggregations { if agg != nil { result.Aggregations[key] = *agg } } } return result, nil }
[ 6 ]
package main //给定一个整数 n,返回 n! 结果尾数中零的数量。 // // 示例 1: // // 输入: 3 //输出: 0 //解释: 3! = 6, 尾数中没有零。 // // 示例 2: // // 输入: 5 //输出: 1 //解释: 5! = 120, 尾数中有 1 个零. // // 说明: 你算法的时间复杂度应为 O(log n) 。 // Related Topics 数学 // 👍 389 👎 0 //leetcode submit region begin(Prohibit modification and deletion) func trailingZeroes(n int) int { var res int delive := 5 for delive <= n { res += n / delive delive *= 5 } return res } //leetcode submit region end(Prohibit modification and deletion) func main() { trailingZeroes(125) }
[ 3 ]
package main import ( "context" "fmt" "go.etcd.io/etcd/clientv3" "log" "time" ) func main() { var ( conf clientv3.Config client *clientv3.Client err error kv clientv3.KV delRes *clientv3.DeleteResponse ) conf = clientv3.Config{ Endpoints: []string{"127.0.0.1:2379"}, DialTimeout: 5 * time.Second, } //建立连接 if client, err = clientv3.New(conf); nil != err { log.Fatal(err) } //put key kv = clientv3.NewKV(client) //del if delRes, err = kv.Delete(context.TODO(), "/cron/jobs/jobs1", clientv3.WithPrevKV()); nil != err { log.Fatal(err) } if len(delRes.PrevKvs) > 0 { fmt.Println(delRes.PrevKvs) } }
[ 3 ]
package array import "container/list" //输入:nums = [10,2,-10,5,20], k = 2 //输出:37 //解释:子序列为 [10, 2, 5, 20] func ConstrainedSubsetSum(nums []int, k int) int{ var l int = len(nums) var q list.List //q.PushBack(nums[0])//store idx from "i - k" to "i - 1",decrease sort var dp []int = make([]int,l)//dp[i]:the biggest sum from nums[0] to nums[i] var res int = nums[0] for i := 0;i < l;i++{ dp[i] = nums[i] if q.Len() > 0{ dp[i] += dp[q.Front().Value.(int)] } res = max_int(res,dp[i]) //remove the idx which dp[idx] smaller than dp[i],from small to big for q.Len() > 0 && dp[q.Back().Value.(int)] < dp[i]{ q.Remove(q.Back()) } //remove the idx which i - idx >= k if q.Len() > 0 && (i - q.Front().Value.(int)) >= k{ q.Remove(q.Front()) } if dp[i] > 0{ q.PushBack(i) } } return res } //TLE //func dfs_constrainedSubsetSum(nums []int,l int,pos int,last_pos int,k int,memo *[]int)int{ // if pos >= l{ // return 0 // } // if last_pos != -1 && (pos - last_pos) > k{ // return 0 // } // if (*memo)[pos] != -2147483648{ // return (*memo)[pos] // } // //skip current // var res1 int = dfs_constrainedSubsetSum(nums,l,pos + 1,last_pos,k,memo) // //choose current // var res2 int = nums[pos] // for i := 1;i <= k;i++{ // if (pos + i) >= l{ // break // } // cur := nums[pos] + dfs_constrainedSubsetSum(nums,l,pos + i,pos,k,memo) // if cur > res2{ // res2 = cur // } // } // if res2 >= res1{ // (*memo)[pos] = res2 // return (*memo)[pos] // } // return res1 //} // //func ConstrainedSubsetSum(nums []int, k int) int { // var l int = len(nums) // var memo []int = make([]int,l) // for i := 0;i < l;i++{ // memo[i] = -2147483648 // } // return dfs_constrainedSubsetSum(nums,l,0,-1,k,&memo) //}
[ 3 ]
package flexible_reflect import ( "errors" "reflect" "testing" ) type Employee struct { EmployeeID string Name string //`format:"normal"` //struct tag Age int } type Customer struct { CookieID string Name string Age int } func fillBySettings(st interface{}, settings map[string]interface{}) error { if reflect.TypeOf(st).Kind() != reflect.Ptr { // Elem() 获取指针指向的值 if reflect.TypeOf(st).Elem().Kind() != reflect.Struct { return errors.New("the first param should be a pointer to the struct type") } } if settings == nil { return errors.New("settings is nil") } var ( field reflect.StructField ok bool ) for k, v := range settings { if field, ok = (reflect.ValueOf(st)).Elem().Type().FieldByName(k); !ok { continue } if field.Type == reflect.TypeOf(v) { vstr := reflect.ValueOf(st) vstr = vstr.Elem() vstr.FieldByName(k).Set(reflect.ValueOf(v)) } } return nil } func TestFillNameAndAge(t *testing.T) { settings := map[string]interface{}{"Name": "Mike", "Age": 40} e := Employee{} t.Log(e) if err := fillBySettings(&e, settings); err != nil { t.Fatal(err) } t.Log(e) c := new(Customer) t.Log(c) if err := fillBySettings(c, settings); err != nil { t.Fatal(err) } t.Log(c) }
[ 3 ]
package file import ( "bufio" "fmt" "github.com/haplone/tidb_test/utils" "io" "io/ioutil" "log" "os" "strings" ) func GetDbCfgs(FoldName, FileName string) []DbCfg { var dbs []DbCfg dbNames := ParseDbNames(FoldName, FileName) for _, n := range dbNames { db := NewDbCfg(FoldName, n) dbs = append(dbs, db) } return dbs } func ParseDbNames(FoldName, FileName string) []string { var names []string f, err := os.Open(fmt.Sprintf("%s/%s", FoldName, FileName)) utils.CheckErr(err) buf := bufio.NewReader(f) for { line, err := buf.ReadString('\n') if err != nil { if err == io.EOF { break } utils.CheckErr(err) break } if strings.Contains(line, ",") { line = strings.Split(line, ",")[0] } line = strings.TrimSpace(line) if len(line) != 0 { names = append(names, line) } } return names } type DbCfg struct { FoldName string DbName string Tbls []TblCfg CreateSql string } func NewDbCfg(FoldName, DbName string) DbCfg { return DbCfg{ FoldName: FoldName, DbName: DbName, } } func (d *DbCfg) AddTbl(t TblCfg) { log.Printf("%s add tbl %s", t.DbName, t.TblName) d.Tbls = append(d.Tbls, t) } func (d *DbCfg) Parse() { d.parseCreateDbSql() d.parseTblList() } func (d *DbCfg) GetDbFold() string { return fmt.Sprintf("%s/%s", d.FoldName, d.DbName) } func (d *DbCfg) parseCreateDbSql() { sqlFile := fmt.Sprintf("%s/data/%s-schema-create.sql", d.GetDbFold(), d.DbName) f, err := os.Open(sqlFile) utils.CheckErr(err) s, err := ioutil.ReadAll(f) utils.CheckErr(err) d.CreateSql = string(s) } func (d *DbCfg) parseTblList() { dbFold := d.GetDbFold() tblListFile := fmt.Sprintf("%s/%s_tables.list", dbFold, d.DbName) f, err := os.Open(tblListFile) utils.CheckErr(err) buf := bufio.NewReader(f) for { line, err := buf.ReadString('\n') if err != nil { if err == io.EOF { return } utils.CheckErr(err) return } line = strings.TrimSpace(line) if len(line) != 0 { tbl := NewTblCfg(d.FoldName, d.DbName, line) tbl.parseCreateTblSql() //go tbl.parseSql() d.AddTbl(tbl) //log.Printf("--: %s", line) } } } type TblCfg struct { FoldName string DbName string TblName string CreateSql string SqlCh chan string } func NewTblCfg(FoldName, DbName, TblName string) TblCfg { return TblCfg{ FoldName: FoldName, DbName: DbName, TblName: TblName, SqlCh: make(chan string, 1000), } } func (t *TblCfg) GetDbFold() string { return fmt.Sprintf("%s/%s/data", t.FoldName, t.DbName) } func (t *TblCfg) parseCreateTblSql() { sqlFile := fmt.Sprintf("%s/%s.%s-schema.sql", t.GetDbFold(), t.DbName, t.TblName) f, err := os.Open(sqlFile) utils.CheckErr(err) s, err := ioutil.ReadAll(f) utils.CheckErr(err) t.CreateSql = string(s) } func (t *TblCfg) ParseSql() { sqlFile := fmt.Sprintf("%s/", t.GetDbFold()) dl, err := ioutil.ReadDir(sqlFile) utils.CheckErr(err) defer func() { //log.Printf("read sql done for tbl : %s", t.TblName) close(t.SqlCh) }() for _, d := range dl { if strings.HasPrefix(d.Name(), fmt.Sprintf("%s.%s.", t.DbName, t.TblName)) && strings.HasSuffix(d.Name(), ".sql") { //log.Printf("-- %s", d.Name()) sf, err := os.Open(fmt.Sprintf("%s/%s", t.GetDbFold(), d.Name())) utils.CheckErr(err) buf := bufio.NewReader(sf) var sql string for { line, err := buf.ReadString('\n') //utils.CheckErr(err) //log.Printf("--sql: %s", line) if strings.HasPrefix(strings.ToUpper(line), "INSERT") { if len(sql) > 0 { t.SqlCh <- sql } sql = line } else { sql = sql + line } if err != nil { if err == io.EOF { if len(sql) > 0 { t.SqlCh <- sql } log.Printf("`%s`.`%s`[%s] read sql file done(eof)", t.DbName, t.TblName, d.Name()) break } utils.CheckErr(err) break } } } } }
[ 0, 2, 3 ]
package main import ( "fmt" "log" "net/http" "time" ) func main() { fmt.Println("Program started...") var programOption int64 fmt.Println( `Select a program option by entering a number: 1: Command line input to return SHA512 Base64 encoded hash 2: Hash and encode passwords over HTTP 3: Same as 2, but with the ability to send a GET request to /shutdown to shutdown the server once work is completed, and a /stats endpoint`) _, err := fmt.Scan(&programOption) checkError(err) // command line mode to take a user inputted // string and return a base64 SHA512 encoded string if programOption == 1 { fmt.Println("Program 1, command line input started") // infinite loop to take infinite entries, don't have to restart each time for { pwd := hashPassword(passwordCLineEntry()) fmt.Println("Your base64 encoded password:") fmt.Println(pwd) } // ideally would set the port in the .env or similar } else if programOption == 2 { // does the same as program 1, but over http, // and can handle multiple connections fmt.Println("Program 2, http mode started") // set some timeouts s := &http.Server{ Addr: ":8080", Handler: nil, ReadTimeout: 15 * time.Second, WriteTimeout: 15 * time.Second, MaxHeaderBytes: 1 << 20, } http.HandleFunc("/hash", handlerHash) log.Fatal(s.ListenAndServe()) } else if programOption == 3 { // does the same as program 2 but provides a // /shutdown endpoint to shutdown gracefully // and provides a /stats endpoint fmt.Println("Program 3, http mode started w/shutdown and stats enabled") // set some timeouts svr := &http.Server{ Addr: ":8080", Handler: nil, ReadTimeout: 10 * time.Second, WriteTimeout: 10 * time.Second, MaxHeaderBytes: 1 << 20, } // handle the hashing http.HandleFunc("/hash", handlerHash) http.HandleFunc("/stats", handlerStats) // don't want the server to block, as we need to check for shutdown signals go func() { if err := svr.ListenAndServe(); err != http.ErrServerClosed { checkError(err) } }() // channels to send signals that shutdown is ok, and can commence idleConnsClosed := make(chan struct{}) sigStop := make(chan bool, 1) // run the server shutdown as a goroutine that blocks until shutdown signal is sent go gracefulShutdown(svr, idleConnsClosed, sigStop) // handle the shutdown request by sending a signal ok to shutdown http.HandleFunc("/shutdown", func(w http.ResponseWriter, r *http.Request) { if r.Method != "GET" { http.Error(w, "method not allowed.", 405) } else { sigStop <- true } }) // block program exit until until all idle connections are closed <-idleConnsClosed } }
[ 5 ]
package assignment02IBC_master type Transaction struct { Amount int Sender string Receiver string } func (T Transaction) IsEmpty() bool{ if T.Amount == 0 && T.Receiver == "" && T.Sender == "" { return true } return false }
[ 2 ]
package controllers import ( "github.com/astaxie/beego" "strings" ) type SiteController struct { BaseController } //Login page func (this *SiteController) Login() { this.Layout = "layout/login.html" if this.IsLogin() { this.Redirect(beego.URLFor("SiteController.Index")) } if this.IsPost() { phone := strings.TrimSpace(this.GetString("phone")) password := strings.TrimSpace(this.GetString("password")) if err := this.BaseController.Login(phone, password); err == nil { this.Redirect(beego.URLFor("SiteController.Index")) } else { this.Data["error"] = err.Error() } } this.ShowHtml() } //main page func (this *SiteController) Index() { this.Layout = "layout/main.html" this.ShowHtml() } func (this *SiteController) Logout() { if this.IsLogin() { this.BaseController.Logout() } this.Redirect(beego.URLFor("SiteController.Login")) } //main page func (this *SiteController) NoPermission() { this.Layout = "layout/main.html" this.ShowHtml("site/403.html") } func (this *SiteController) RABCMethods() []string { return []string{"Get", "Post"} }
[ 3 ]