query
stringlengths 8
4.68k
| pos
stringlengths 30
210k
| negs
listlengths 7
7
|
|---|---|---|
/ The function fiat_p224_addcarryx_u32 is a thin wrapper around bits.Add32 that uses fiat_p224_uint1 rather than uint32
|
func fiat_p224_addcarryx_u32(x uint32, y uint32, carry fiat_p224_uint1) (uint32, fiat_p224_uint1) {
var sum uint32
var carryOut uint32
sum, carryOut = bits.Add32(x, y, uint32(carry))
return sum, fiat_p224_uint1(carryOut)
}
|
[
"func softfloat_mul128By32(a64, a0 uint64, b uint32) Uint128 {\n\tvar z Uint128\n\tvar mid uint64\n\tvar carry uint32\n\n\tz.Low = a0 * uint64(b)\n\tmid = (a0 >> 32) * uint64(b)\n\tcarry = uint32(z.Low>>32) - uint32(mid)\n\tz.High = a64*uint64(b) + uint64((mid+uint64(carry))>>32)\n\treturn z\n}",
"func CarryAdd(out1 *TightFieldElement, arg1 *TightFieldElement, arg2 *TightFieldElement) {\n\tx1 := (arg1[0] + arg2[0])\n\tx2 := ((x1 >> 26) + (arg1[1] + arg2[1]))\n\tx3 := ((x2 >> 26) + (arg1[2] + arg2[2]))\n\tx4 := ((x3 >> 26) + (arg1[3] + arg2[3]))\n\tx5 := ((x4 >> 26) + (arg1[4] + arg2[4]))\n\tx6 := ((x1 & 0x3ffffff) + ((x5 >> 26) * 0x5))\n\tx7 := (uint32(uint1((x6 >> 26))) + (x2 & 0x3ffffff))\n\tx8 := (x6 & 0x3ffffff)\n\tx9 := (x7 & 0x3ffffff)\n\tx10 := (uint32(uint1((x7 >> 26))) + (x3 & 0x3ffffff))\n\tx11 := (x4 & 0x3ffffff)\n\tx12 := (x5 & 0x3ffffff)\n\tout1[0] = x8\n\tout1[1] = x9\n\tout1[2] = x10\n\tout1[3] = x11\n\tout1[4] = x12\n}",
"func (d *GF255e) Add(a, b *GF255e) *GF255e {\n\tgf_add((*[4]uint64)(d), (*[4]uint64)(a), (*[4]uint64)(b), mq255e)\n\treturn d\n}",
"func AddConst32(c float32, x, y []float32) {\n\taddConst32(c, x, y)\n}",
"func CarryAdd(out1 *TightFieldElement, arg1 *TightFieldElement, arg2 *TightFieldElement) {\n\tx1 := (arg1[0] + arg2[0])\n\tx2 := ((x1 >> 44) + (arg1[1] + arg2[1]))\n\tx3 := ((x2 >> 43) + (arg1[2] + arg2[2]))\n\tx4 := ((x1 & 0xfffffffffff) + ((x3 >> 43) * 0x5))\n\tx5 := (uint64(uint1((x4 >> 44))) + (x2 & 0x7ffffffffff))\n\tx6 := (x4 & 0xfffffffffff)\n\tx7 := (x5 & 0x7ffffffffff)\n\tx8 := (uint64(uint1((x5 >> 43))) + (x3 & 0x7ffffffffff))\n\tout1[0] = x6\n\tout1[1] = x7\n\tout1[2] = x8\n}",
"func fiat_p448_addcarryx_u64(x uint64, y uint64, carry fiat_p448_uint1) (uint64, fiat_p448_uint1) {\n var sum uint64\n var carryOut uint64\n sum, carryOut = bits.Add64(x, y, uint64(carry))\n return sum, fiat_p448_uint1(carryOut)\n}",
"func Add64(x, y, carry uint64) (sum, carryOut uint64) {\n\tsum = x + y + carry\n\t// The sum will overflow if both top bits are set (x & y) or if one of them\n\t// is (x | y), and a carry from the lower place happened. If such a carry\n\t// happens, the top bit will be 1 + 0 + 1 = 0 (&^ sum).\n\tcarryOut = ((x & y) | ((x | y) &^ sum)) >> 63\n\treturn\n}"
] |
vytvori prazdnu mapu velkosti 6x6
|
func vytvorPrazdnuMapu() [6][6]uint8 {
mapa := [6][6]uint8{
{0,0,0,0,0,0},
{0,0,0,0,0,0},
{0,0,0,0,0,0},
{0,0,0,0,0,0},
{0,0,0,0,0,0},
{0,0,0,0,0,0},
}
return mapa
}
|
[
"func initTerrain(game *gamestate) {\n\trand.New(rand.NewSource(42)) //42 best magical Constant\n\trand.Seed(time.Now().UnixNano())\n\tx := 0\n\n\ty := heightOfMap - rand.Float64()*(heightOfMap/2) // the height to start at\n\tfmt.Println(y)\n\t//standardTerrain := y\n\tvar dy float64 = 0\n\tvar dyGoal float64 = 0\n\tvar curveDensity float64 = 0\n\n\tfor x < int(mapSize) {\n\t\tif curveDensity == 0 {\n\t\t\tdyGoal = 0.5 * (-0.5 + rand.Float64())\n\t\t\tcurveDensity = 30\n\t\t}\n\t\tdy += dyGoal / 30\n\t\ty += dy\n\t\tif x == 0 {\n\t\t\tfmt.Println(int(y))\n\t\t}\n\t\tgame.Terrain[x] = genTerrain(x, int(y))\n\t\tcurveDensity--\n\t\tx++\n\t\tif y > heightOfMap-250 {\n\t\t\tdy -= 0.02\n\t\t}\n\t\tif y > heightOfMap-200 {\n\t\t\tdyGoal = -0.5\n\t\t\tdy -= 0.05\n\t\t}\n\n\t\tif y < reactionHeight+100 {\n\t\t\tdy += 0.01\n\t\t}\n\t\tif y < reactionHeight {\n\t\t\tdyGoal = 0.5\n\t\t\tdy += 0.05\n\t\t}\n\t\tif dy >= 0.33 {\n\t\t\tdy = 0.33\n\t\t}\n\t\tif dy <= -0.33 {\n\t\t\tdy = -0.33\n\t\t}\n\n\t}\n\n}",
"func (p *Player) see(treasureMap TreasureMap) ([2]int, [][2]int) {\n\tvar (\n\t\tstartX, startY = p.Position[0], p.Position[1]\n\t\ttreasurePosition, treasureFound [2]int\n\t\tlistPathPosition, pathFound [][2]int\n\t)\n\n\t// see all entity in x axis with same y axis / right direction ->\n\ttreasurePosition, pathFound = checkMap(treasureMap, startX+1, startY, 1, axis_x)\n\tif treasureMap.OriginalMapping[treasurePosition] == entity_treasure {\n\t\ttreasureFound = treasurePosition\n\t}\n\tlistPathPosition = append(listPathPosition, pathFound...)\n\tp.Range[right] = len(pathFound)\n\n\t// see all entity in -x axis with same y axis / left direction <-\n\ttreasurePosition, pathFound = checkMap(treasureMap, startX-1, startY, -1, axis_x)\n\tif treasureMap.OriginalMapping[treasurePosition] == entity_treasure {\n\t\ttreasureFound = treasurePosition\n\t}\n\tlistPathPosition = append(listPathPosition, pathFound...)\n\tp.Range[left] = len(pathFound)\n\n\t// see all entity in y axis with same x axis / up direction ^\n\ttreasurePosition, pathFound = checkMap(treasureMap, startY+1, startX, 1, axis_y)\n\tif treasureMap.OriginalMapping[treasurePosition] == entity_treasure {\n\t\ttreasureFound = treasurePosition\n\t}\n\tlistPathPosition = append(listPathPosition, pathFound...)\n\tp.Range[up] = len(pathFound)\n\n\t// see all entity in -y axis with same x axis / down direction v\n\ttreasurePosition, pathFound = checkMap(treasureMap, startY-1, startX, -1, axis_y)\n\tif treasureMap.OriginalMapping[treasurePosition] == entity_treasure {\n\t\ttreasureFound = treasurePosition\n\t}\n\tlistPathPosition = append(listPathPosition, pathFound...)\n\tp.Range[down] = len(pathFound)\n\n\tif treasureMap.OriginalMapping[treasureFound] == entity_treasure {\n\t\tp.FoundTreasure = true\n\t}\n\n\t// check possibility of path intersection with best probability to get the most explored map\n\tif p.DirectionTaken == up && p.Range[right] > p.Range[up] {\n\t\tp.DirectionTaken = right\n\t} else if p.DirectionTaken == right && p.Range[down] > p.Range[right] {\n\t\tp.DirectionTaken = down\n\t}\n\n\treturn treasureFound, listPathPosition\n}",
"func updateBoard() {\n\t// zero board\n\tfor i := 0; i < BoardSize; i++ {\n\t\tfor j := 0; j < BoardSize; j++ {\n\t\t\tBoard[i][j] = \"\"\n\t\t}\n\t}\n\tBoard[food[0]][food[1]] = \"fo\"\n\tfor _, s := range Snakes {\n\t\tbody := s.Body\n\t\tfor e := body.Front(); e != nil; e = e.Next() {\n\t\t\tp := e.Value.(Point)\n\t\t\tBoard[p[0]][p[1]] = ColorString(s.Color)\n\t\t}\n\t}\n}",
"func (bm Blendmap) View() (float32, float32, float32, float32) {\n\treturn bm.Map.viewport.Min.X, bm.Map.viewport.Min.Y, bm.Map.viewport.Max.X, bm.Map.viewport.Max.Y\n}",
"func (grid *SquareGrid) Map() map[Loc]interface{} {\n\treturn grid.Data\n}",
"func func4(obstacleGrid [][]int, m int, n int) int {\n\tmemo := make([][]int, 2)\n\tfor i := 0; i < 2; i++ {\n\t\tmemo[i] = make([]int, n)\n\t\tfor j := 0; j < n; j++ {\n\t\t\tmemo[i][j] = 0\n\t\t}\n\t}\n\n\tfor i := m - 1; i >= 0; i-- {\n\t\tfor j := n - 1; j >= 0; j-- {\n\t\t\tif i == m-1 && j == n-1 {\n\t\t\t\tif obstacleGrid[m-1][n-1] == 1 {\n\t\t\t\t\tmemo[(m-1)%2][n-1] = 0\n\t\t\t\t} else {\n\t\t\t\t\tmemo[(m-1)%2][n-1] = 1\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tmemo[i%2][j] = 0\n\t\t\tif obstacleGrid[i][j] == 1 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif j+1 < n {\n\t\t\t\tmemo[i%2][j] += memo[i%2][j+1]\n\t\t\t}\n\t\t\tif i+1 < m {\n\t\t\t\tmemo[i%2][j] += memo[(i+1)%2][j]\n\t\t\t}\n\t\t}\n\t}\n\treturn memo[0][0]\n}",
"func (r *Room) Map(radius int64) string {\n\tstr := \"\\n \"\n\tstartX := r.Data.X - radius\n\tstartY := r.Data.Y + radius\n\tz := r.Data.Z\n\tvar ry int64 = 0\n\tfor y := startY; y > r.Data.Y-radius; y-- {\n\t\tvar rx int64 = 0\n\t\tfor x := startX; x < r.Data.X+radius; x++ {\n\t\t\tmroom := Atlas.GetRoom(x, y, z)\n\t\t\tswitch {\n\t\t\tcase mroom == nil:\n\t\t\t\tstr += \" \"\n\t\t\tcase mroom == r:\n\t\t\t\tstr += \"{R*{x\"\n\t\t\tdefault:\n\t\t\t\tstr += \"{W#{x\"\n\t\t\t}\n\t\t\trx++\n\t\t}\n\t\try++\n\t\tstr += \"\\n \"\n\t}\n\n\treturn str\n}"
] |
=== FUNCTION ============================================================== Name: GeneratePanicMsg Description: =============================================================================
|
func GeneratePanicMsg(limit uint64) string {
overMaxElements := limit - maxUint64
msg :=
"\nThe numerator is too high: " + strconv.FormatUint(limit, 10) +
"\nThe numerator cannot exceed " + strconv.FormatUint(maxUint64, 10) +
"\nYou are over the limit by " + strconv.FormatUint(overMaxElements, 10)
return msg
}
|
[
"func SystemMessg(message byte, length uint16) *network.Writer {\n\tvar packet = network.NewWriter(SYSTEMMESSG)\n\tpacket.WriteByte(message)\n\tpacket.WriteUint16(length)\n\n\treturn packet\n}",
"func Panic(v ...interface{}) {\n s := sout(v...)\n Std.Output(LevelPanic, CallDepth, s)\n panic(s)\n}",
"func TestPanic(t *testing.T) {\n\tdefer func() {\n\t\tif r := recover(); r == nil {\n\t\t\tt.Errorf(\"The code did not panic as expected\")\n\t\t}\n\t}()\n\n\tr, w, _ := os.Pipe()\n\tSetOutputs(Output{\n\t\tName: \"test\",\n\t\tWriter: w,\n\t\tLevels: level.Default,\n\t})\n\n\tPanic(\"Test fatal\")\n\toutC := make(chan string)\n\tgo ioCopy(r, outC)\n\tw.Close()\n\tout := <-outC\n\n\texpected := \"Test fatal\"\n\tn := level.Labels[level.Panic]\n\tif !strings.Contains(out, expected) || !strings.Contains(out, n) {\n\t\tt.Errorf(\"Result `%s` doesn't contains `%s` and `%s`\",\n\t\t\tout, expected, n)\n\t}\n}",
"func PanicTrace(kb int) []byte {\n\ts := []byte(\"/src/runtime/panic.go\")\n\te := []byte(\"\\ngoroutine \")\n\tline := []byte(\"\\n\")\n\tstack := make([]byte, kb<<10) // 4KB\n\tlength := runtime.Stack(stack, true)\n\tstart := bytes.Index(stack, s)\n\tstack = stack[start:length]\n\tstart = bytes.Index(stack, line) + 1\n\tstack = stack[start:]\n\tend := bytes.LastIndex(stack, line)\n\tif end != -1 {\n\t\tstack = stack[:end]\n\t}\n\tend = bytes.Index(stack, e)\n\tif end != -1 {\n\t\tstack = stack[:end]\n\t}\n\tstack = bytes.TrimRight(stack, \"\\n\")\n\treturn stack\n}",
"func (ce *mySpiderError) genFullErrMsg() {\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"Spider Error:\")\n\tif ce.errType != \"\" {\n\t\tbuffer.WriteString(string(ce.errType))\n\t\tbuffer.WriteString(\": \")\n\t}\n\tbuffer.WriteString(ce.errMsg)\n\n\tce.fullErrMsg = fmt.Sprintf(\"%s\\n\", buffer.String())\n}",
"func VMTypeToStringPanic(tag vm_grpc.VMTypeTag) string {\n\tif val, ok := vm_grpc.VMTypeTag_name[int32(tag)]; !ok {\n\t\tpanic(fmt.Errorf(\"can't find string representation of type %d, check correctness of type value\", tag))\n\t} else {\n\t\treturn val\n\t}\n}",
"func FATALMSGF(format string, v ...interface{}) {\n\ts := fmt.Sprintf(format, v...)\n\tFatalLogger.Output(2, s)\n\tos.Exit(1)\n}"
] |
ValidateSlice validates that all items in target is one of valid
|
func ValidateSlice(target []string, valid []string) (bool, error) {
for _, item := range target {
found, _ := ValidateString(item, valid)
if !found {
return false, fmt.Errorf("'%s' is not in the allowed list: %s", item, strings.Join(valid, ", "))
}
}
return true, nil
}
|
[
"func TestMyValids(t *testing.T) {\n\n\ttables := []struct {\n\t\tmyQuery string\n\t\tindexList []int\n\t\tmyIndex int\n\t\tmyValIndex bool\n\t\theader []string\n\t\terr error\n\t}{\n\t\t{\"SELECT UPPER(NULLIF(draft_year,random_name))\", []int{3, 5, 6, 7, 8, 9}, 3, true, []string{\"draft_year\", \"random_name\"}, nil},\n\t\t{\"SELECT UPPER(NULLIF(draft_year,xandom_name))\", []int{3, 5, 6, 7, 8, 9}, 3, true, []string{\"draft_year\", \"random_name\"}, ErrMissingHeaders},\n\t}\n\tfor _, table := range tables {\n\t\toptions := &Options{\n\t\t\tHasHeader: false,\n\t\t\tRecordDelimiter: \"\\n\",\n\t\t\tFieldDelimiter: \",\",\n\t\t\tComments: \"\",\n\t\t\tName: \"S3Object\", // Default table name for all objects\n\t\t\tReadFrom: bytes.NewReader([]byte(\"name1,name2,name3,name4\" + \"\\n\" + \"5,is,a,string\" + \"\\n\" + \"random,random,stuff,stuff\")),\n\t\t\tCompressed: \"\",\n\t\t\tExpression: \"\",\n\t\t\tOutputFieldDelimiter: \",\",\n\t\t\tStreamSize: 20,\n\t\t\tHeaderOpt: true,\n\t\t}\n\t\ts3s, err := NewInput(options)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\ts3s.header = table.header\n\t\t_, _, _, _, _, _, err = s3s.ParseSelect(table.myQuery)\n\t\tif err != table.err {\n\t\t\tt.Fatal()\n\t\t}\n\t\tmyVal := isValidFunc(table.indexList, table.myIndex)\n\t\tif myVal != table.myValIndex {\n\t\t\tt.Error()\n\t\t}\n\t}\n}",
"func (s *mustRunAs) Validate(fldPath *field.Path, _ *api.Pod, groups []int64) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tif len(groups) == 0 && len(s.ranges) > 0 {\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(s.field), groups, \"unable to validate empty groups against required ranges\"))\n\t}\n\n\tfor _, group := range groups {\n\t\tif !s.isGroupValid(group) {\n\t\t\tdetail := fmt.Sprintf(\"%d is not an allowed group\", group)\n\t\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(s.field), groups, detail))\n\t\t}\n\t}\n\n\treturn allErrs\n}",
"func CheckSubset(src, trg *Item) bool {\n\ttype obj struct {\n\t\tsrc Attribute\n\t\ttrg Attribute\n\t}\n\tfor _, v := range []obj{\n\t\t{src.part, trg.part},\n\t\t{src.vendor, trg.vendor},\n\t\t{src.product, trg.product},\n\t\t{src.version, trg.version},\n\t\t{src.update, trg.update},\n\t\t{src.edition, trg.edition},\n\t\t{src.language, trg.language},\n\t\t{src.sw_edition, trg.sw_edition},\n\t\t{src.target_sw, trg.target_sw},\n\t\t{src.target_hw, trg.target_hw},\n\t\t{src.other, trg.other},\n\t} {\n\t\tswitch v.src.Comparison(v.trg) {\n\t\tcase Subset, Equal:\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func CheckValid(data Dstream) bool {\n\n\tdata.Reset()\n\tnames := data.Names()\n\n\tfor c := 0; data.Next(); c++ {\n\t\tn0 := ilen(data.GetPos(0))\n\t\tfor j := 1; j < len(names); j++ {\n\t\t\tn1 := ilen(data.GetPos(j))\n\t\t\tif n1 != n0 {\n\t\t\t\tmsg := fmt.Sprintf(\"Length mismatch in chunk %d: len(%s) = %d, len(%s) = %d\\n\",\n\t\t\t\t\tc, names[0], n0, names[j], n1)\n\t\t\t\t_, _ = io.WriteString(os.Stderr, msg)\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\n\tdata.Reset()\n\n\treturn true\n}",
"func invalidLength(offset, length, sliceLength int) bool {\n\treturn offset+length < offset || offset+length > sliceLength\n}",
"func SliceSubset(slice1, slice2 interface{}) (bool, error) {\n\n\tswitch x := slice1.(type) {\n\tcase []DRAState:\n\t\tstateSlice1, ok1 := slice1.([]DRAState)\n\t\tstateSlice2, ok2 := slice2.([]DRAState)\n\n\t\tif (!ok1) || (!ok2) {\n\t\t\treturn false, fmt.Errorf(\"Error converting slice1 (%v) or slice2 (%v).\", ok1, ok2)\n\t\t}\n\n\t\t//Iterate through all TransitionSystemState in stateSlice1 and make sure that they are in 2.\n\t\tfor _, stateFrom1 := range stateSlice1 {\n\t\t\tif !(stateFrom1.In(stateSlice2)) {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\t// If all elements of slice1 are in slice2 then return true!\n\t\treturn true, nil\n\tcase []TransitionSystemState:\n\t\tstateSlice1, ok1 := slice1.([]TransitionSystemState)\n\t\tstateSlice2, ok2 := slice2.([]TransitionSystemState)\n\n\t\tif (!ok1) || (!ok2) {\n\t\t\treturn false, fmt.Errorf(\"Error converting slice1 (%v) or slice2 (%v).\", ok1, ok2)\n\t\t}\n\n\t\t//Iterate through all TransitionSystemState in stateSlice1 and make sure that they are in 2.\n\t\tfor _, stateFrom1 := range stateSlice1 {\n\t\t\tif !(stateFrom1.In(stateSlice2)) {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\t// If all elements of slice1 are in slice2 then return true!\n\t\treturn true, nil\n\n\tcase []mc.AtomicProposition:\n\t\tapSlice1, ok1 := slice1.([]mc.AtomicProposition)\n\t\tapSlice2, ok2 := slice2.([]mc.AtomicProposition)\n\n\t\tif (!ok1) || (!ok2) {\n\t\t\treturn false, fmt.Errorf(\"Error converting slice1 (%v) or slice2 (%v).\", ok1, ok2)\n\t\t}\n\n\t\t//Iterate through all TransitionSystemState in stateSlice1 and make sure that they are in 2.\n\t\tfor _, apFrom1 := range apSlice1 {\n\t\t\tif !(apFrom1.In(apSlice2)) {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\t// If all elements of slice1 are in slice2 then return true!\n\t\treturn true, nil\n\n\tdefault:\n\t\treturn false, fmt.Errorf(\"Unexpected type given to SliceSubset(): %v\", x)\n\t}\n\n}",
"func validateEntries(t *testing.T, entries, expected []endpoint.Endpoint) {\n\tif len(entries) != len(expected) {\n\t\tt.Fatalf(\"expected %q to match %q\", entries, expected)\n\t}\n\n\tfor i := range entries {\n\t\tif entries[i] != expected[i] {\n\t\t\tt.Fatalf(\"expected %q to match %q\", entries, expected)\n\t\t}\n\t}\n}"
] |
ArrayContainsString checks to see if a value exists in the array
|
func ArrayContainsString(array []string, value string) bool {
hasValue := false
for i := 0; i < len(array); i++ {
if array[i] == value {
hasValue = true
}
}
return hasValue
}
|
[
"func Contains(aa []string, s string) bool {\n\tfor _, v := range aa {\n\t\tif s == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (arr StringArray) Contains(v string) bool {\n\treturn arr.IndexOf(v) > -1\n}",
"func contains(s []string, e string) bool {\n\tfor _, a := range s {\n\t\tif a == e {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func InArray(val string, array []string) (exists bool, index int) {\n\texists = false\n\tindex = -1\n\tfor i, v := range array {\n\t\tif val == v {\n\t\t\tindex = i\n\t\t\texists = true\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}",
"func StringContains(arr []string, val string) (index int) {\n\tindex = -1\n\tfor i := 0; i < len(arr); i++ {\n\t\tif arr[i] == val {\n\t\t\tindex = i\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}",
"func Contains(s []string, str string) bool {\n\tfor _, v := range s {\n\t\tif v == str {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func Contains(a []string, x string) bool {\n\tfor _, n := range a {\n\t\tif strings.Contains(x, n) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}"
] |
Render method writes File into HTTP response.
|
func (f *binaryRender) Render(w io.Writer) error {
if f.Reader != nil {
defer ess.CloseQuietly(f.Reader)
_, err := io.Copy(w, f.Reader)
return err
}
file, err := os.Open(f.Path)
if err != nil {
return err
}
defer ess.CloseQuietly(file)
fi, err := file.Stat()
if err != nil {
return err
}
if fi.IsDir() {
return fmt.Errorf("'%s' is a directory", f.Path)
}
_, err = io.Copy(w, file)
return err
}
|
[
"func (o EndpointsResponseOutput) File() pulumi.StringOutput {\n\treturn o.ApplyT(func(v EndpointsResponse) string { return v.File }).(pulumi.StringOutput)\n}",
"func (e *ErrResponse) Render(w http.ResponseWriter, r *http.Request) error {\n\trender.Status(r, e.StatusCode)\n\treturn nil\n}",
"func (c *Context) File(fs http.FileSystem, fp string) error {\n\tfp = filepath.Clean(fp)\n\t// TODO: add option to disable this check\n\tif hasDotPrefix(fp) {\n\t\treturn c.NotFound()\n\t}\n\n\tf, err := fs.Open(fp)\n\tif err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\tc.Log(\"Error: %s\", err)\n\t\t}\n\t\treturn c.NotFound()\n\t}\n\tdefer f.Close()\n\n\tfi, err := f.Stat()\n\tif err != nil {\n\t\tc.Log(\"Error: %s\", err)\n\t\treturn c.NotFound()\n\t}\n\tif fi.IsDir() {\n\t\t// TODO: add option to try serve an index file instead\n\t\treturn c.NotFound()\n\t}\n\n\thttp.ServeContent(c.W, c.R, fi.Name(), fi.ModTime(), f)\n\treturn nil // ServeContent will handle any errors with a http.Error, so we do nothing else\n}",
"func renderEmbeddedFile(resFile string, w http.ResponseWriter, r *http.Request) {\n\tf, err := storage.InternalHttp.Open(resFile)\n\tdefer f.Close()\n\tif err != nil {\n\t\tfail404(w, r)\n\t\treturn\n\t}\n\tvar name string\n\tif qName, inQuery := r.URL.Query()[\"static\"]; inQuery {\n\t\tname = qName[0]\n\t} else {\n\t\tname = filepath.Base(resFile)\n\t}\n\thttp.ServeContent(w, r, name, BuildTime, f)\n}",
"func writeFileToResponse(file string, w http.ResponseWriter) error {\n\tfout, err := os.Open(file)\n\tif err != nil {\n\t\tnumErrors++\n\t\treturn err\n\t}\n\tdefer fout.Close()\n\tio.Copy(w, fout)\n\n\treturn nil\n}",
"func (r *Reply) File(file string) *Reply {\n\tif !filepath.IsAbs(file) {\n\t\tfile = filepath.Join(r.ctx.a.BaseDir(), file)\n\t}\n\tr.gzip = util.IsGzipWorthForFile(file)\n\tr.Render(&binaryRender{Path: file})\n\treturn r\n}",
"func (rw *RW) WriteHTML(fileName string) (err error) {\n\tfile ,err := os.Open(\"views/\"+fileName)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer file.Close()\n\tfileBytes ,err := ioutil.ReadAll(file)\n\tif err != nil {\n\t\treturn\n\t}\n\n\trw.W.WriteHeader(200)\n\t_,err = rw.W.Write(fileBytes)\n\treturn\n}"
] |
Write operation is not supported. For interface compatibility only.
|
func (reader *embedFileReader) Write(b []byte) (int, error) {
return 0, ErrNotAvail
}
|
[
"func (m *Memory) Write(b []byte) (n int, err error) {\n\tpanic(\"not implemented\")\n}",
"func (r *MockReadWriteCloser) Write(p []byte) (n int, err error) {\n\n\tif err = r.WriteErr; err != nil {\n\t\tr.BytesWritten = p\n\t\tn = len(p)\n\t}\n\treturn\n}",
"func Write(ref name.Reference, img v1.Image, w io.Writer, opts ...WriteOption) error {\n\treturn MultiRefWrite(map[name.Reference]v1.Image{ref: img}, w, opts...)\n}",
"func (p *xmlProvider) IsWriteable(w http.ResponseWriter, r *http.Request, v interface{}) bool {\n\treturn true\n}",
"func TestWrite(t *testing.T) {\n\ttests := []struct {\n\t\tid *ua.NodeID\n\t\tv interface{}\n\t\tstatus ua.StatusCode\n\t}{\n\t\t// happy flows\n\t\t{ua.NewStringNodeID(2, \"rw_bool\"), false, ua.StatusOK},\n\t\t{ua.NewStringNodeID(2, \"rw_int32\"), int32(9), ua.StatusOK},\n\n\t\t// error flows\n\t\t{ua.NewStringNodeID(2, \"ro_bool\"), false, ua.StatusBadUserAccessDenied},\n\t}\n\n\tctx := context.Background()\n\n\tsrv := NewServer(\"rw_server.py\")\n\tdefer srv.Close()\n\n\tc, err := opcua.NewClient(srv.Endpoint, srv.Opts...)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := c.Connect(ctx); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer c.Close(ctx)\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.id.String(), func(t *testing.T) {\n\t\t\ttestWrite(t, ctx, c, tt.status, &ua.WriteRequest{\n\t\t\t\tNodesToWrite: []*ua.WriteValue{\n\t\t\t\t\t&ua.WriteValue{\n\t\t\t\t\t\tNodeID: tt.id,\n\t\t\t\t\t\tAttributeID: ua.AttributeIDValue,\n\t\t\t\t\t\tValue: &ua.DataValue{\n\t\t\t\t\t\t\tEncodingMask: ua.DataValueValue,\n\t\t\t\t\t\t\tValue: ua.MustVariant(tt.v),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\n\t\t\t// skip read tests if the write is expected to fail\n\t\t\tif tt.status != ua.StatusOK {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttestRead(t, ctx, c, tt.v, tt.id)\n\t\t})\n\t}\n}",
"func (w *writer) Write(p []byte) (int, error) {\n\t// Avoid opening the pipe for a zero-length write;\n\t// the concrete can do these for empty blobs.\n\tif len(p) == 0 {\n\t\treturn 0, nil\n\t}\n\tif w.pw == nil {\n\t\t// We'll write into pw and use pr as an io.Reader for the\n\t\t// Upload call to Azure.\n\t\tw.pr, w.pw = io.Pipe()\n\t\tw.open(w.pr, true)\n\t}\n\treturn w.pw.Write(p)\n}",
"func (d *Data) Write (w http.ResponseWriter) {\n\tproto := Protocol{\n\t\tAuthorized: true,\n\t\tSuccess: d.Code == http.StatusOK || d.Code == 0,\n\t\tError: d.Msg,\n\t\tData: d.Data}\n\td.Data = &proto\n\t(*resp.Data)(d).Write(w)\n}"
] |
NewBikePointGetAllParams creates a new BikePointGetAllParams object with the default values initialized.
|
func NewBikePointGetAllParams() *BikePointGetAllParams {
return &BikePointGetAllParams{
timeout: cr.DefaultTimeout,
}
}
|
[
"func NewGetZippedParams() *GetZippedParams {\n\tvar ()\n\treturn &GetZippedParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewGetContactsParams() *GetContactsParams {\n\tvar (\n\t\tlimitDefault = int32(5000)\n\t\toffsetDefault = int32(0)\n\t)\n\treturn &GetContactsParams{\n\t\tLimit: &limitDefault,\n\t\tOffset: &offsetDefault,\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (o *PcloudNetworksGetallParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (s RealmGateway_export_Params) NewParams() (Persistent_SaveParams, error) {\n\tss, err := NewPersistent_SaveParams(s.Struct.Segment())\n\tif err != nil {\n\t\treturn Persistent_SaveParams{}, err\n\t}\n\terr = s.Struct.SetPtr(1, ss.Struct.ToPtr())\n\treturn ss, err\n}",
"func GetAll(ptp protocol.PointToPoint, tempFields ...interface{}) (ts []container.Tuple, b bool) {\n\tts, b = getAllAndQueryAll(ptp, protocol.GetAllRequest, tempFields...)\n\treturn ts, b\n}",
"func NewGetUsersParams() *GetUsersParams {\n\tvar ()\n\treturn &GetUsersParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewParams() *Params {\n\tp := Params{}\n\tp.names = []string{}\n\tp.values = map[string]interface{}{}\n\n\treturn &p\n}"
] |
NewMockTime creates a new mock instance
|
func NewMockTime(ctrl *gomock.Controller) *MockTime {
mock := &MockTime{ctrl: ctrl}
mock.recorder = &MockTimeMockRecorder{mock}
return mock
}
|
[
"func NewMock(serverHost string) (*MockClient, error) {\n\treturn &MockClient{}, nil\n}",
"func NewMockAuth(t mockConstructorTestingTNewMockAuth) *MockAuth {\n\tmock := &MockAuth{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func NewMock() *Mock {\n\treturn &Mock{now: time.Unix(0, 0)}\n}",
"func (c stubClocker) Now() time.Time { return c.t }",
"func (mock *groupTimerFactoryMock) newTimerCalls() []struct {\n\tName string\n\tD time.Duration\n} {\n\tvar calls []struct {\n\t\tName string\n\t\tD time.Duration\n\t}\n\tmock.locknewTimer.RLock()\n\tcalls = mock.calls.newTimer\n\tmock.locknewTimer.RUnlock()\n\treturn calls\n}",
"func NewMock(now time.Time) *Mock {\n\treturn &Mock{\n\t\tnow: now,\n\t\tmockTimers: &timerHeap{},\n\t}\n}",
"func NewMock() *Mock {\n\tc := &Mock{\n\t\tFakeIncoming: func() chan []byte {\n\t\t\treturn make(chan []byte, 2)\n\t\t},\n\t\tFakeName: func() string {\n\t\t\treturn \"TestClient\"\n\t\t},\n\t\tFakeGame: func() string {\n\t\t\treturn \"test\"\n\t\t},\n\t\tFakeClose: func() {\n\t\t\t// Do nothing\n\t\t},\n\t\tFakeStopTimer: func() {\n\t\t\t// Do nothing\n\t\t},\n\t\tFakeRoom: func() interfaces.Room {\n\t\t\treturn nil\n\t\t},\n\t\tFakeSetRoom: func(interfaces.Room) {\n\n\t\t},\n\t}\n\n\tc.FakeWritePump = func() {\n\t\tfor range c.Incoming() {\n\t\t\t// Do nothing\n\t\t}\n\t}\n\n\tc.FakeSetName = func(string) interfaces.Client {\n\t\treturn c\n\t}\n\treturn c\n}"
] |
SharedAccessSignatureAuthorizationRuleListResultPreparer prepares a request to retrieve the next set of results. It returns nil if no more results exist.
|
func (client SharedAccessSignatureAuthorizationRuleListResult) SharedAccessSignatureAuthorizationRuleListResultPreparer() (*http.Request, error) {
if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
return nil, nil
}
return autorest.Prepare(&http.Request{},
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(client.NextLink)))
}
|
[
"func (alr AppListResult) appListResultPreparer() (*http.Request, error) {\n\tif alr.NextLink == nil || len(to.String(alr.NextLink)) < 1 {\n\t\treturn nil, nil\n\t}\n\treturn autorest.Prepare(&http.Request{},\n\t\tautorest.AsJSON(),\n\t\tautorest.AsGet(),\n\t\tautorest.WithBaseURL(to.String(alr.NextLink)))\n}",
"func (client AccountListResult) AccountListResultPreparer() (*http.Request, error) {\n\tif client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {\n\t\treturn nil, nil\n\t}\n\treturn autorest.Prepare(&http.Request{},\n\t\tautorest.AsJSON(),\n\t\tautorest.AsGet(),\n\t\tautorest.WithBaseURL(to.String(client.NextLink)))\n}",
"func (client SyncAgentListResult) SyncAgentListResultPreparer() (*http.Request, error) {\n\tif client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {\n\t\treturn nil, nil\n\t}\n\treturn autorest.Prepare(&http.Request{},\n\t\tautorest.AsJSON(),\n\t\tautorest.AsGet(),\n\t\tautorest.WithBaseURL(to.String(client.NextLink)))\n}",
"func (client WorkflowListResult) WorkflowListResultPreparer() (*http.Request, error) {\n\tif client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {\n\t\treturn nil, nil\n\t}\n\treturn autorest.Prepare(&http.Request{},\n\t\tautorest.AsJSON(),\n\t\tautorest.AsGet(),\n\t\tautorest.WithBaseURL(to.String(client.NextLink)))\n}",
"func (rlr ResourceListResult) resourceListResultPreparer() (*http.Request, error) {\n\tif rlr.NextLink == nil || len(to.String(rlr.NextLink)) < 1 {\n\t\treturn nil, nil\n\t}\n\treturn autorest.Prepare(&http.Request{},\n\t\tautorest.AsJSON(),\n\t\tautorest.AsGet(),\n\t\tautorest.WithBaseURL(to.String(rlr.NextLink)))\n}",
"func (dpmlr DataPolicyManifestListResult) dataPolicyManifestListResultPreparer(ctx context.Context) (*http.Request, error) {\n\tif !dpmlr.hasNextLink() {\n\t\treturn nil, nil\n\t}\n\treturn autorest.Prepare((&http.Request{}).WithContext(ctx),\n\t\tautorest.AsJSON(),\n\t\tautorest.AsGet(),\n\t\tautorest.WithBaseURL(to.String(dpmlr.NextLink)))\n}",
"func (client StorageListResult) StorageListResultPreparer() (*http.Request, error) {\n\tif client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {\n\t\treturn nil, nil\n\t}\n\treturn autorest.Prepare(&http.Request{},\n\t\tautorest.AsJSON(),\n\t\tautorest.AsGet(),\n\t\tautorest.WithBaseURL(to.String(client.NextLink)))\n}"
] |
MetricKeyPrefix returns the metrics key prefix
|
func (p PostgresPlugin) MetricKeyPrefix() string {
if p.Prefix == "" {
p.Prefix = "postgres"
}
return p.Prefix
}
|
[
"func KeyPrefix(p string) []byte {\n\treturn []byte(p)\n}",
"func generateKeyPrefixID(prefix []byte, id uint64) []byte {\n\t// Handle the prefix.\n\tkey := append(prefix, prefixDelimiter)\n\n\t// Handle the item ID.\n\tkey = append(key, idToKey(id)...)\n\n\treturn key\n}",
"func TestConvertToGraphitePrefixKey(t *testing.T) {\n\tconfig := map[string]interface{}{\n\t\t\"interval\": \"10\",\n\t\t\"timeout\": \"10\",\n\t\t\"max_buffer_size\": \"100\",\n\t\t\"server\": \"test_server\",\n\t\t\"prefixKeys\": true,\n\t\t\"port\": 10101,\n\t}\n\n\tg := getTestGraphiteHandler(12, 13, 14)\n\tg.Configure(config)\n\n\twaitForSplitSecond()\n\tnow := time.Now().Unix()\n\tm := metric.New(\"TestMetric\")\n\n\tdims := map[string]string{\n\t\t\"container_id\": \"test-id\",\n\t\t\"container_name\": \"test-container\",\n\t}\n\tm.Dimensions = dims\n\n\tdpString := g.convertToGraphite(m)\n\n\tassert.Equal(t, fmt.Sprintf(\"container_id_container_name.TestMetric.container_id.test-id.container_name.test-container 0.000000 %d\\n\", now), dpString)\n}",
"func stripKvKeyPrefix(prefix string, full_key string) string {\n\tuse_key := full_key\n\t// Strip leading slash first.\n\tif use_key[0:1] == \"/\" {\n\t\tuse_key = use_key[1:]\n\t}\n\t//log.Printf(\"stripKvKeyPrefix(%s, %s) use_key slice: %s\\n\", prefix, full_key, use_key[0:len(prefix)])\n\tif use_key[0:len(prefix)] == prefix {\n\t\tuse_key = use_key[len(prefix):]\n\t}\n\t//log.Printf(\"stripKvKeyPrefix(%s, %s) new use_key 1: %s\\n\", prefix, full_key, use_key)\n\t// Strip leading slash again if required.\n\tif len(use_key) > 0 && use_key[0:1] == \"/\" {\n\t\tuse_key = use_key[1:]\n\t}\n\t//log.Printf(\"stripKvKeyPrefix(%s, %s) new use_key 2: %s\\n\", prefix, full_key, use_key)\n\treturn use_key\n}",
"func (m *Metric) GetKey() string {\n\tif m == nil || m.Key == nil {\n\t\treturn \"\"\n\t}\n\treturn *m.Key\n}",
"func metricKey(s string) string {\n\tin := []rune(s)\n\tvar out []rune\n\n\tfor i, r := range in {\n\t\tif !unicode.In(r, unicode.Letter, unicode.Number) {\n\t\t\tout = append(out, '_')\n\t\t\tcontinue\n\t\t}\n\t\tlr := unicode.ToLower(r)\n\t\t// Add an underscore if the current rune:\n\t\t// - is uppercase\n\t\t// - not the first rune\n\t\t// - is followed or preceded by a lowercase rune\n\t\t// - was not preceded by an underscore in the output\n\t\tif r != lr &&\n\t\t\ti > 0 &&\n\t\t\t(i+1) < len(in) &&\n\t\t\t(unicode.IsLower(in[i+1]) || unicode.IsLower(in[i-1])) &&\n\t\t\tout[len(out)-1] != '_' {\n\t\t\tout = append(out, '_')\n\t\t}\n\t\tout = append(out, lr)\n\t}\n\treturn string(out)\n}",
"func (t *tableCommon) IndexPrefix() kv.Key {\n\ttrace_util_0.Count(_tables_00000, 62)\n\treturn t.indexPrefix\n}"
] |
UnsetFirstName ensures that no value is present for FirstName, not even an explicit nil
|
func (o *RelationshipManager) UnsetFirstName() {
o.FirstName.Unset()
}
|
[
"func (o *GetSearchEmployeesParams) SetFirstName(firstName *string) {\n\to.FirstName = firstName\n}",
"func (o *Permissao) UnsetNome() {\n\to.Nome.Unset()\n}",
"func (uc *UserCreate) SetFirstName(s string) *UserCreate {\n\tuc.mutation.SetFirstName(s)\n\treturn uc\n}",
"func (uu *UserUpdate) ClearLastName() *UserUpdate {\n\tuu.last_name = nil\n\tuu.clearlast_name = true\n\treturn uu\n}",
"func (ps *protectedStruct) FirstName() string {\n\tps.mutex.Lock()\n\tdefer ps.mutex.Unlock()\n\treturn ps.somePerson.first\n}",
"func (o *InlineResponse20027Person) HasFirstName() bool {\n\tif o != nil && o.FirstName != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (puo *PatientrecordUpdateOne) ClearPrename() *PatientrecordUpdateOne {\n\tpuo.mutation.ClearPrename()\n\treturn puo\n}"
] |
CreateParcelVol mocks base method
|
func (m *MockNuvoVM) CreateParcelVol(arg0, arg1, arg2 string) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "CreateParcelVol", arg0, arg1, arg2)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
|
[
"func (m *MockNuvoVM) DestroyVol(arg0, arg1, arg2 string, arg3 bool) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DestroyVol\", arg0, arg1, arg2, arg3)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}",
"func newVolume(name, capacity, boundToClaimUID, boundToClaimName string, phase v1.PersistentVolumePhase, reclaimPolicy v1.PersistentVolumeReclaimPolicy, class string, annotations ...string) *v1.PersistentVolume {\n\tvolume := v1.PersistentVolume{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tResourceVersion: \"1\",\n\t\t},\n\t\tSpec: v1.PersistentVolumeSpec{\n\t\t\tCapacity: v1.ResourceList{\n\t\t\t\tv1.ResourceName(v1.ResourceStorage): resource.MustParse(capacity),\n\t\t\t},\n\t\t\tPersistentVolumeSource: v1.PersistentVolumeSource{\n\t\t\t\tGCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{},\n\t\t\t},\n\t\t\tAccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce, v1.ReadOnlyMany},\n\t\t\tPersistentVolumeReclaimPolicy: reclaimPolicy,\n\t\t\tStorageClassName: class,\n\t\t},\n\t\tStatus: v1.PersistentVolumeStatus{\n\t\t\tPhase: phase,\n\t\t},\n\t}\n\n\tif boundToClaimName != \"\" {\n\t\tvolume.Spec.ClaimRef = &v1.ObjectReference{\n\t\t\tKind: \"PersistentVolumeClaim\",\n\t\t\tAPIVersion: \"v1\",\n\t\t\tUID: types.UID(boundToClaimUID),\n\t\t\tNamespace: testNamespace,\n\t\t\tName: boundToClaimName,\n\t\t}\n\t}\n\n\tif len(annotations) > 0 {\n\t\tvolume.Annotations = make(map[string]string)\n\t\tfor _, a := range annotations {\n\t\t\tswitch a {\n\t\t\tcase annDynamicallyProvisioned:\n\t\t\t\tvolume.Annotations[a] = mockPluginName\n\t\t\tdefault:\n\t\t\t\tvolume.Annotations[a] = \"yes\"\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &volume\n}",
"func newSMBVolume(name string, size int64, params map[string]string) (*smbVolume, error) {\n\tvar source, subDir string\n\tsubDirReplaceMap := map[string]string{}\n\n\t// validate parameters (case-insensitive).\n\tfor k, v := range params {\n\t\tswitch strings.ToLower(k) {\n\t\tcase sourceField:\n\t\t\tsource = v\n\t\tcase subDirField:\n\t\t\tsubDir = v\n\t\tcase pvcNamespaceKey:\n\t\t\tsubDirReplaceMap[pvcNamespaceMetadata] = v\n\t\tcase pvcNameKey:\n\t\t\tsubDirReplaceMap[pvcNameMetadata] = v\n\t\tcase pvNameKey:\n\t\t\tsubDirReplaceMap[pvNameMetadata] = v\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"invalid parameter %s in storage class\", k)\n\t\t}\n\t}\n\n\tif source == \"\" {\n\t\treturn nil, fmt.Errorf(\"%v is a required parameter\", sourceField)\n\t}\n\n\tvol := &smbVolume{\n\t\tsource: source,\n\t\tsize: size,\n\t}\n\tif subDir == \"\" {\n\t\t// use pv name by default if not specified\n\t\tvol.subDir = name\n\t} else {\n\t\t// replace pv/pvc name namespace metadata in subDir\n\t\tvol.subDir = replaceWithMap(subDir, subDirReplaceMap)\n\t\t// make volume id unique if subDir is provided\n\t\tvol.uuid = name\n\t}\n\tvol.id = getVolumeIDFromSmbVol(vol)\n\treturn vol, nil\n}",
"func (_m *BundleRepository) Create(ctx context.Context, tenant string, item *model.Bundle) error {\n\tret := _m.Called(ctx, tenant, item)\n\n\tvar r0 error\n\tif rf, ok := ret.Get(0).(func(context.Context, string, *model.Bundle) error); ok {\n\t\tr0 = rf(ctx, tenant, item)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\n\treturn r0\n}",
"func (c *MockBlockStorageClient) CreateVolume(ctx context.Context, details core.CreateVolumeDetails) (*core.Volume, error) {\n\treturn &core.Volume{Id: &VolumeBackupID}, nil\n}",
"func NewMock() *Mock {\n\treturn &Mock{VolumesMock: &VolumesServiceMock{}}\n}",
"func TestCreate(t *testing.T) {\n\ttestCases := []struct {\n\t\tname string\n\t\tcontrolPlanes func() (runtime.Object, runtime.Object)\n\t}{\n\t\t{\n\t\t\tname: \"default-version.v2\",\n\t\t\tcontrolPlanes: func() (runtime.Object, runtime.Object) {\n\t\t\t\tcontrolPlane := newControlPlaneV2(\"istio-system\")\n\t\t\t\tcontrolPlane.Spec.Version = \"\"\n\n\t\t\t\tmutatedControlPlane := controlPlane.DeepCopy()\n\t\t\t\tmutatedControlPlane.Spec.Version = versions.DefaultVersion.String()\n\t\t\t\treturn controlPlane, mutatedControlPlane\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"default-profile.v1\",\n\t\t\tcontrolPlanes: func() (runtime.Object, runtime.Object) {\n\t\t\t\tcontrolPlane := newControlPlaneV1(\"istio-system\")\n\t\t\t\tcontrolPlane.Spec.Template = \"\"\n\n\t\t\t\tmutatedControlPlane := controlPlane.DeepCopy()\n\t\t\t\tmutatedControlPlane.Spec.Profiles = []string{maistrav1.DefaultTemplate}\n\t\t\t\treturn controlPlane, mutatedControlPlane\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"default-profile.v2\",\n\t\t\tcontrolPlanes: func() (runtime.Object, runtime.Object) {\n\t\t\t\tcontrolPlane := newControlPlaneV2(\"istio-system\")\n\t\t\t\tcontrolPlane.Spec.Profiles = nil\n\n\t\t\t\tmutatedControlPlane := controlPlane.DeepCopy()\n\t\t\t\tmutatedControlPlane.Spec.Profiles = []string{maistrav1.DefaultTemplate}\n\t\t\t\treturn controlPlane, mutatedControlPlane\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tcontrolPlane, mutatedControlPlane := tc.controlPlanes()\n\t\t\tmutator := createControlPlaneMutatorTestFixture()\n\t\t\tresponse := mutator.Handle(ctx, newCreateRequest(controlPlane))\n\t\t\texpectedResponse := PatchResponse(toRawExtension(controlPlane), mutatedControlPlane)\n\t\t\tassert.DeepEquals(response, expectedResponse, \"Expected the response to set the version on create\", t)\n\t\t})\n\t}\n}"
] |
NewBuildClient creates a new BuildClient instance that is set to the initial state (i.e., being idle).
|
func NewBuildClient(scheduler remoteworker.OperationQueueClient, buildExecutor BuildExecutor, filePool filesystem.FilePool, clock clock.Clock, workerID map[string]string, instanceNamePrefix digest.InstanceName, platform *remoteexecution.Platform, sizeClass uint32) *BuildClient {
return &BuildClient{
scheduler: scheduler,
buildExecutor: buildExecutor,
filePool: filePool,
clock: clock,
instanceNamePrefix: instanceNamePrefix,
instanceNamePatcher: digest.NewInstanceNamePatcher(digest.EmptyInstanceName, instanceNamePrefix),
request: remoteworker.SynchronizeRequest{
WorkerId: workerID,
InstanceNamePrefix: instanceNamePrefix.String(),
Platform: platform,
SizeClass: sizeClass,
CurrentState: &remoteworker.CurrentState{
WorkerState: &remoteworker.CurrentState_Idle{
Idle: &emptypb.Empty{},
},
},
},
nextSynchronizationAt: clock.Now(),
}
}
|
[
"func NewGitClient(t mockConstructorTestingTNewGitClient) *GitClient {\n\tmock := &GitClient{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func (c *Client) Build(params map[string]interface{}) (api.ClientAPI, error) {\n\t// tenantName, _ := params[\"name\"].(string)\n\n\tidentity, _ := params[\"identity\"].(map[string]interface{})\n\tcompute, _ := params[\"compute\"].(map[string]interface{})\n\t// network, _ := params[\"network\"].(map[string]interface{})\n\n\tusername, _ := identity[\"Username\"].(string)\n\tpassword, _ := identity[\"Password\"].(string)\n\tdomainName, _ := identity[\"UserDomainName\"].(string)\n\n\tregion, _ := compute[\"Region\"].(string)\n\tprojectName, _ := compute[\"ProjectName\"].(string)\n\tprojectID, _ := compute[\"ProjectID\"].(string)\n\tdefaultImage, _ := compute[\"DefaultImage\"].(string)\n\n\treturn AuthenticatedClient(\n\t\tAuthOptions{\n\t\t\tUsername: username,\n\t\t\tPassword: password,\n\t\t\tRegion: region,\n\t\t\tDomainName: domainName,\n\t\t\tProjectName: projectName,\n\t\t\tProjectID: projectID,\n\t\t},\n\t\topenstack.CfgOptions{\n\t\t\tDefaultImage: defaultImage,\n\t\t},\n\t)\n}",
"func NewClient() *Client {\n baseURL, _ := url.Parse(defaultBaseURL)\n return &Client{client: http.DefaultClient, BaseURL: baseURL, UserAgent: userAgent}\n}",
"func New() *Client {\n\treturn &Client{repeat: false}\n}",
"func NewClient(ctx context.Context, auth *repository.Auth) (repository.Client, error) {\n\tif auth == nil {\n\t\treturn nil, fmt.Errorf(\"Must provide authentication\")\n\t}\n\tif auth.Type() != repository.TokenAuthType {\n\t\treturn nil, fmt.Errorf(\"Unsupported auth type: %s\", auth.Type())\n\t}\n\n\tsts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: auth.Token},\n\t)\n\thttpClient := oauth2.NewClient(ctx, sts)\n\trtw := newRoundTripperWrapper(httpClient.Transport)\n\thttpClient.Transport = rtw\n\treturn &client{\n\t\tghClient: githubv4.NewClient(httpClient),\n\t}, nil\n}",
"func (cb *ClientBuilder) Build() *Client {\n\treturn cb.client\n}",
"func NewBuild(jenkins *gojenkins.Jenkins, regex string) *Build {\n\treturn &Build{jenkins, regex}\n}"
] |
WithColumnKeys sets explicit column keys. It's also possible to set a key retriever on this property object. Upon file decryption, availability of explicit keys is checked before invocation of the retreiver callback. If an explicit key is available for a footer or a column, its key metadata will be ignored.
|
func WithColumnKeys(decrypt ColumnPathToDecryptionPropsMap) FileDecryptionOption {
return func(cfg *fileDecryptConfig) {
if len(decrypt) == 0 {
return
}
if len(cfg.colDecrypt) != 0 {
panic("column properties already set")
}
for _, v := range decrypt {
if v.IsUtilized() {
panic("parquet: column properties utilized in another file")
}
v.SetUtilized()
}
cfg.colDecrypt = decrypt
}
}
|
[
"func (b CreateIndexBuilder) Columns(columns ...string) CreateIndexBuilder {\n\treturn builder.Set(b, \"Columns\", columns).(CreateIndexBuilder)\n}",
"func NewColumnDecryptionProperties(column string, opts ...ColumnDecryptOption) *ColumnDecryptionProperties {\n\tvar cfg columnDecryptConfig\n\tfor _, o := range opts {\n\t\to(&cfg)\n\t}\n\n\treturn &ColumnDecryptionProperties{\n\t\tcolumnPath: column,\n\t\tutilized: false,\n\t\tkey: cfg.key,\n\t}\n}",
"func (fe *FileEncryptionProperties) ColumnEncryptionProperties(path string) *ColumnEncryptionProperties {\n\tif len(fe.encryptedCols) == 0 {\n\t\treturn NewColumnEncryptionProperties(path)\n\t}\n\tif c, ok := fe.encryptedCols[path]; ok {\n\t\treturn c\n\t}\n\treturn nil\n}",
"func ColumnKey(maps []map[interface{}]interface{}, value interface{}, key interface{}) map[interface{}]interface{} {\n\tlists := make(map[interface{}]interface{})\n\tfor index, mapValue := range maps {\n\t\tv, VOK := mapValue[value]\n\t\tk, KeyOk := mapValue[key]\n\t\tif VOK && KeyOk {\n\t\t\tlists[k] = v\n\t\t} else if VOK && (!KeyOk) {\n\t\t\tlists[index] = v\n\t\t}\n\t}\n\treturn lists\n}",
"func (ce *ColumnEncryptionProperties) IsEncryptedWithFooterKey() bool {\n\treturn ce.encryptedWithFooterKey\n}",
"func (en *EncryptionConfigTestSuite) TestEncryptTwoColumnsAndFooterWithAadPrefix() {\n\tencryptCols := make(parquet.ColumnPathToEncryptionPropsMap)\n\tencryptCols[en.pathToDoubleField] = parquet.NewColumnEncryptionProperties(en.pathToDoubleField, parquet.WithKey(en.columnEncryptionKey1), parquet.WithKeyID(\"kc1\"))\n\tencryptCols[en.pathToFloatField] = parquet.NewColumnEncryptionProperties(en.pathToFloatField, parquet.WithKey(en.columnEncryptionKey2), parquet.WithKeyID(\"kc2\"))\n\n\tprops := parquet.NewFileEncryptionProperties(en.footerEncryptionKey, parquet.WithFooterKeyMetadata(\"kf\"), parquet.WithEncryptedColumns(encryptCols), parquet.WithAadPrefix(en.fileName))\n\ten.encryptFile(props, \"tmp_encrypt_columns_and_footer_aad.parquet.encrypted\")\n}",
"func WithKeyMetadata(keyMeta string) ColumnEncryptOption {\n\treturn func(c *colEncryptConfig) {\n\t\tc.keyMetadata = keyMeta\n\t}\n}"
] |
NewProjectV1UsingExternalConfig : constructs an instance of ProjectV1 with passed in options and external configuration.
|
func NewProjectV1UsingExternalConfig(options *ProjectV1Options) (project *ProjectV1, err error) {
if options.ServiceName == "" {
options.ServiceName = DefaultServiceName
}
if options.Authenticator == nil {
options.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName)
if err != nil {
return
}
}
project, err = NewProjectV1(options)
if err != nil {
return
}
err = project.Service.ConfigureService(options.ServiceName)
if err != nil {
return
}
if options.URL != "" {
err = project.Service.SetServiceURL(options.URL)
}
return
}
|
[
"func (s *Server) Create(ctx context.Context, q *project.ProjectCreateRequest) (*v1alpha1.AppProject, error) {\n\tif q.Project == nil {\n\t\treturn nil, status.Errorf(codes.InvalidArgument, \"missing payload 'project' in request\")\n\t}\n\tif err := s.enf.EnforceErr(ctx.Value(\"claims\"), rbacpolicy.ResourceProjects, rbacpolicy.ActionCreate, q.Project.Name); err != nil {\n\t\treturn nil, err\n\t}\n\tq.Project.NormalizePolicies()\n\terr := validateProject(q.Project)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error validating project: %w\", err)\n\t}\n\tres, err := s.appclientset.ArgoprojV1alpha1().AppProjects(s.ns).Create(ctx, q.Project, metav1.CreateOptions{})\n\tif apierr.IsAlreadyExists(err) {\n\t\texisting, getErr := s.appclientset.ArgoprojV1alpha1().AppProjects(s.ns).Get(ctx, q.Project.Name, metav1.GetOptions{})\n\t\tif getErr != nil {\n\t\t\treturn nil, status.Errorf(codes.Internal, \"unable to check existing project details: %v\", getErr)\n\t\t}\n\t\tif q.GetUpsert() {\n\t\t\tif err := s.enf.EnforceErr(ctx.Value(\"claims\"), rbacpolicy.ResourceProjects, rbacpolicy.ActionUpdate, q.GetProject().Name); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\texisting.Spec = q.GetProject().Spec\n\t\t\tres, err = s.appclientset.ArgoprojV1alpha1().AppProjects(s.ns).Update(ctx, existing, metav1.UpdateOptions{})\n\t\t} else {\n\t\t\tif !reflect.DeepEqual(existing.Spec, q.GetProject().Spec) {\n\t\t\t\treturn nil, status.Errorf(codes.InvalidArgument, argo.GenerateSpecIsDifferentErrorMessage(\"project\", existing.Spec, q.GetProject().Spec))\n\t\t\t}\n\t\t\treturn existing, nil\n\t\t}\n\t}\n\tif err == nil {\n\t\ts.logEvent(res, ctx, argo.EventReasonResourceCreated, \"created project\")\n\t}\n\treturn res, err\n}",
"func New(name string) *Project {\n\tp := &Project{\n\t\tName: name,\n\t\tImage: fmt.Sprintf(\"chainkit-%s\", name),\n\t\tBinaries: &binaries{\n\t\t\tCLI: name + \"cli\",\n\t\t\tDaemon: name + \"d\",\n\t\t},\n\t}\n\treturn p\n}",
"func NewProjectController(opt *Options) ProjectControl {\n\treturn &project{\n\t\toption: opt,\n\t}\n}",
"func NewProject(owner, repo string) (*api.Project, error) {\n\tghAPI, projectURL := buildProjectURLs(owner, repo)\n\n\tresp, err := http.Get(ghAPI)\n\n\tif !is2xx(resp) {\n\t\treturn nil, fmt.Errorf(\"Github responded with a %d code.\\n\", resp.StatusCode)\n\t}\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tvar releases []api.Release\n\n\tif err := json.Unmarshal(data, &releases); err != nil {\n\t\treturn nil, err\n\t}\n\treturn api.NewProject(owner, repo, projectURL, releases), nil\n}",
"func NewEnterpriseManagementV1UsingExternalConfig(options *EnterpriseManagementV1Options) (enterpriseManagement *EnterpriseManagementV1, err error) {\n\tif options.ServiceName == \"\" {\n\t\toptions.ServiceName = DefaultServiceName\n\t}\n\n\tif options.Authenticator == nil {\n\t\toptions.Authenticator, err = core.GetAuthenticatorFromEnvironment(options.ServiceName)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\tenterpriseManagement, err = NewEnterpriseManagementV1(options)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = enterpriseManagement.Service.ConfigureService(options.ServiceName)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif options.URL != \"\" {\n\t\terr = enterpriseManagement.Service.SetServiceURL(options.URL)\n\t}\n\treturn\n}",
"func ProjectCreate(p project.APIProject, c *cli.Context) error {\n\toptions := options.Create{\n\t\tNoRecreate: c.Bool(\"no-recreate\"),\n\t\tForceRecreate: c.Bool(\"force-recreate\"),\n\t\tNoBuild: c.Bool(\"no-build\"),\n\t}\n\terr := p.Create(context.Background(), options, c.Args()...)\n\tif err != nil {\n\t\treturn cli.NewExitError(err.Error(), 1)\n\t}\n\treturn nil\n}",
"func NewProjectFromPostForm(pf url.Values) (*Project, error) {\n\tp := NewProject()\n\tdecoder := schema.NewDecoder()\n\n\tif err := decoder.Decode(p, pf); err != nil {\n\t\tfmt.Println(err)\n\t\treturn nil, errors.New(\"Invalid project input. Form-data expected\")\n\t}\n\n\tif !p.IsValid() {\n\t\treturn nil, errors.New(\"Incomplete project data\")\n\t}\n\n\treturn p, nil\n}"
] |
FirstPoolWithAvailableQuota returns the first pool ID in the list of pools with available addresses. This function always returns types.PoolNotExists
|
func (n *NoOpAllocator) FirstPoolWithAvailableQuota(preferredPoolIDs []types.PoolID) (types.PoolID, int) {
return types.PoolNotExists, 0
}
|
[
"func (sp *Storagepool) FindStoragePoolByName(ctx context.Context, poolName string) (*types.StoragePool, error) {\n\tif len(poolName) == 0 {\n\t\treturn nil, errors.New(\"poolName shouldn't be empty\")\n\t}\n\tspResponse := &types.StoragePool{}\n\terr := sp.client.executeWithRetryAuthenticate(ctx, http.MethodGet, fmt.Sprintf(api.UnityAPIGetResourceByNameWithFieldsURI, api.PoolAction, poolName, StoragePoolFields), nil, spResponse)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"find storage pool by name failed %s err: %v\", poolName, err)\n\t}\n\n\treturn spResponse, nil\n}",
"func (a *Adaptor) getFirstIPLocation() ([]int, *adaptor.AdaptorError) {\n\tvar locations []int\n\n\tparams := url.Values{\n\t\t\"policy\": []string{FirstIPPolicy},\n\t}\n\n\terr := a.apidClient.DoFunction(\"getAssignmentPolicy\", params, &locations)\n\tif err != nil {\n\t\tln.Err(\"error when getting assignment policy\", ln.Map{\"error\": err.Error()})\n\t\treturn []int{}, adaptor.NewError(\"error when getting first ip location\")\n\t}\n\n\treturn locations, nil\n}",
"func (n *NoOpAllocator) GetPoolQuota() types.PoolQuotaMap {\n\treturn types.PoolQuotaMap{}\n}",
"func FindFirstAvailableServerConfig(cfg config.StorageClusterConfig) (serverCfg config.StorageServerConfig, err error) {\n\tfor _, serverCfg = range cfg.Servers {\n\t\tif serverCfg.State == config.StorageServerStateOnline {\n\t\t\treturn\n\t\t}\n\t}\n\n\terr = ErrNoServersAvailable\n\treturn\n}",
"func (c *Client) MinimumLedgerSlot(ctx context.Context) (uint64, error) {\n\tres, err := c.RpcClient.MinimumLedgerSlot(ctx)\n\terr = checkRpcResult(res.GeneralResponse, err)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn res.Result, nil\n}",
"func PoolLastBlock(request []string) (uint32, error) {\n\tapikey, userid, err := splitApiKey(request[1])\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tmposClient := mpos.NewMposClient(nil, request[0], apikey, userid, userAgent)\n\tmposClient.SetDebug(debug)\n\tstatus, err := mposClient.GetPoolStatus()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn status.LastBlock, nil\n}",
"func (mio *Mio) GetPool() string {\n if mio.obj == nil {\n return \"\"\n }\n p := mio.objPool\n\n return fmt.Sprintf(\"0x%x:0x%x\", p.f_container, p.f_key)\n}"
] |
decodeHTTPSumRequest is a transport/http.DecodeRequestFunc that decodes a JSONencoded request from the HTTP request body. Primarily useful in a server.
|
func decodeHTTPSumRequest(_ context.Context, r *http.Request) (interface{}, error) {
var req endpoints.SumRequest
err := json.NewDecoder(r.Body).Decode(&req)
return req, err
}
|
[
"func decodeHTTPNewJobRequest(_ context.Context, r *http.Request) (interface{}, error) {\n\tvar req endpoint.NewJobRequest\n\terr := json.NewDecoder(r.Body).Decode(&req)\n\treturn req, err\n}",
"func BasicDecodeRequest(ctx context.Context, req *http.Request) (interface{}, error) {\n\treturn DecodeRequestWithHeaders(ctx, req, map[string]string{}, map[string]string{}, nil)\n}",
"func decode(r *http.Request, v ok) error {\n\tif r.Body == nil {\n\t\treturn errors.New(\"Invalid Body\")\n\t}\n\tif err := json.NewDecoder(r.Body).Decode(v); err != nil {\n\t\treturn err\n\t}\n\treturn v.OK()\n}",
"func (mh *MessageHandler) decodeRequest(httpRequest *http.Request) (deviceRequest *Request, err error) {\n\tdeviceRequest, err = DecodeRequest(httpRequest.Body, mh.Decoders)\n\tif err == nil {\n\t\tdeviceRequest = deviceRequest.WithContext(httpRequest.Context())\n\t}\n\n\treturn\n}",
"func decodePostAcceptDealRequest(_ context.Context, r *http1.Request) (interface{}, error) {\n\treq := endpoint.PostAcceptDealRequest{}\n\terr := json.NewDecoder(r.Body).Decode(&req)\n\treturn req, err\n}",
"func (tbr *TransportBaseReqquesst) DecodeAddRequest(data []byte) (dto.BasicRequest, error) {\n\trequest := dto.AddRequest{}\n\terr := json.Unmarshal(data, &request)\n\tif err != nil {\n\t\treturn dto.BasicRequest{}, err\n\t}\n\treturn dto.BasicRequest{Path: \"Add\", RequestId: \"xxx\", Request: request}, nil\n}",
"func DecodeAddRequest(mux goahttp.Muxer, decoder func(*http.Request) goahttp.Decoder) kithttp.DecodeRequestFunc {\n\tdec := server.DecodeAddRequest(mux, decoder)\n\treturn func(ctx context.Context, r *http.Request) (interface{}, error) {\n\t\tr = r.WithContext(ctx)\n\t\treturn dec(r)\n\t}\n}"
] |
SortStable sorts slice of HelpAppUpdate.
|
func (s HelpAppUpdateArray) SortStable(less func(a, b HelpAppUpdate) bool) HelpAppUpdateArray {
sort.SliceStable(s, func(i, j int) bool {
return less(s[i], s[j])
})
return s
}
|
[
"func (s EncryptedChatRequestedArray) SortByDate() EncryptedChatRequestedArray {\n\treturn s.Sort(func(a, b EncryptedChatRequested) bool {\n\t\treturn a.GetDate() < b.GetDate()\n\t})\n}",
"func (p SliceUI) Sort() { sort.Sort(p) }",
"func (s HelpAppUpdateClassArray) Sort(less func(a, b HelpAppUpdateClass) bool) HelpAppUpdateClassArray {\n\tsort.Slice(s, func(i, j int) bool {\n\t\treturn less(s[i], s[j])\n\t})\n\treturn s\n}",
"func (s MessagesFavedStickersArray) Sort(less func(a, b MessagesFavedStickers) bool) MessagesFavedStickersArray {\n\tsort.Slice(s, func(i, j int) bool {\n\t\treturn less(s[i], s[j])\n\t})\n\treturn s\n}",
"func (s InputSecureFileUploadedArray) SortStableByID() InputSecureFileUploadedArray {\n\treturn s.SortStable(func(a, b InputSecureFileUploaded) bool {\n\t\treturn a.GetID() < b.GetID()\n\t})\n}",
"func (s SecurePlainEmailArray) SortStable(less func(a, b SecurePlainEmail) bool) SecurePlainEmailArray {\n\tsort.SliceStable(s, func(i, j int) bool {\n\t\treturn less(s[i], s[j])\n\t})\n\treturn s\n}",
"func (s EncryptedChatRequestedArray) SortStable(less func(a, b EncryptedChatRequested) bool) EncryptedChatRequestedArray {\n\tsort.SliceStable(s, func(i, j int) bool {\n\t\treturn less(s[i], s[j])\n\t})\n\treturn s\n}"
] |
New creates a Server with zerodown.Listener, and use defines parameters in server for running an HTTP server. It will listen on server.Addr
|
func New(server *http.Server) (*Server, error) {
listener, err := zerodown.Listen("tcp", server.Addr)
if err != nil {
return nil, err
}
return &Server{
server: server,
listener: listener,
}, nil
}
|
[
"func NewServer() *Server {\n\n\treturn &Server{\n\t\tConfig: Config{\n\t\t\tPort: \":8080\",\n\t\t},\n\t}\n}",
"func New(auth Authorizer, errorWriter ErrorWriter, clean CleanCredentials) *Server {\n\treturn &Server{\n\t\tpeers: map[string]peer{},\n\t\tauthorizer: auth,\n\t\tcleanCredentials: clean,\n\t\terrorWriter: errorWriter,\n\t\tsessions: newSessionManager(),\n\t}\n}",
"func NewServer(conf *ServerConfig) *Server {\n\treturn &Server{\n\t\tHTTP: &http.Server{\n\t\t\tAddr: conf.ListenAddress,\n\t\t\tHandler: digestRoutes(conf.Routes),\n\t\t},\n\t\tconfig: conf,\n\t}\n}",
"func (s *Server) New() (*http.Server, error) {\n\taddr := s.Address\n\tif addr == \"\" {\n\t\taddr = defaultAddr\n\t}\n\th, err := s.Handler()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsrv := &http.Server{\n\t\tHandler: h,\n\t\tAddr: addr,\n\t}\n\treturn srv, nil\n}",
"func New(e *goastarter.Endpoints, uh goagrpc.UnaryHandler) *Server {\n\treturn &Server{\n\t\tAddH: NewAddHandler(e.Add, uh),\n\t}\n}",
"func NewServer(ln net.Listener, id metainfo.Hash, h Handler, c ...Config) *Server {\n\tif id.IsZero() {\n\t\tpanic(\"the peer node id must not be empty\")\n\t}\n\n\tvar conf Config\n\tconf.set(c...)\n\treturn &Server{Listener: ln, ID: id, Handler: h, Config: conf}\n}",
"func New(cfg config.ServerConfig, db database.Database) *Server {\n\treturn &Server{\n\t\trouter: gin.Default(),\n\t\tport: cfg.Port,\n\t\tdb: db,\n\t}\n}"
] |
AssignProperties_To_Servers_ConnectionPolicy_STATUS populates the provided destination Servers_ConnectionPolicy_STATUS from our Servers_ConnectionPolicy_STATUS
|
func (policy *Servers_ConnectionPolicy_STATUS) AssignProperties_To_Servers_ConnectionPolicy_STATUS(destination *v20211101s.Servers_ConnectionPolicy_STATUS) error {
// Create a new property bag
propertyBag := genruntime.NewPropertyBag()
// Conditions
destination.Conditions = genruntime.CloneSliceOfCondition(policy.Conditions)
// ConnectionType
if policy.ConnectionType != nil {
connectionType := string(*policy.ConnectionType)
destination.ConnectionType = &connectionType
} else {
destination.ConnectionType = nil
}
// Id
destination.Id = genruntime.ClonePointerToString(policy.Id)
// Kind
destination.Kind = genruntime.ClonePointerToString(policy.Kind)
// Location
destination.Location = genruntime.ClonePointerToString(policy.Location)
// Name
destination.Name = genruntime.ClonePointerToString(policy.Name)
// Type
destination.Type = genruntime.ClonePointerToString(policy.Type)
// Update the property bag
if len(propertyBag) > 0 {
destination.PropertyBag = propertyBag
} else {
destination.PropertyBag = nil
}
// No error
return nil
}
|
[
"func (trigger *RequestsBasedTrigger_STATUS) AssignProperties_To_RequestsBasedTrigger_STATUS(destination *v20220301s.RequestsBasedTrigger_STATUS) error {\n\t// Clone the existing property bag\n\tpropertyBag := genruntime.NewPropertyBag(trigger.PropertyBag)\n\n\t// Count\n\tdestination.Count = genruntime.ClonePointerToInt(trigger.Count)\n\n\t// TimeInterval\n\tdestination.TimeInterval = genruntime.ClonePointerToString(trigger.TimeInterval)\n\n\t// Update the property bag\n\tif len(propertyBag) > 0 {\n\t\tdestination.PropertyBag = propertyBag\n\t} else {\n\t\tdestination.PropertyBag = nil\n\t}\n\n\t// Invoke the augmentConversionForRequestsBasedTrigger_STATUS interface (if implemented) to customize the conversion\n\tvar triggerAsAny any = trigger\n\tif augmentedTrigger, ok := triggerAsAny.(augmentConversionForRequestsBasedTrigger_STATUS); ok {\n\t\terr := augmentedTrigger.AssignPropertiesTo(destination)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"calling augmented AssignPropertiesTo() for conversion\")\n\t\t}\n\t}\n\n\t// No error\n\treturn nil\n}",
"func (policy *BackupPolicy_STATUS) AssignProperties_To_BackupPolicy_STATUS(destination *v20210515s.BackupPolicy_STATUS) error {\n\t// Create a new property bag\n\tpropertyBag := genruntime.NewPropertyBag()\n\n\t// Continuous\n\tif policy.Continuous != nil {\n\t\tvar continuous v20210515s.ContinuousModeBackupPolicy_STATUS\n\t\terr := policy.Continuous.AssignProperties_To_ContinuousModeBackupPolicy_STATUS(&continuous)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"calling AssignProperties_To_ContinuousModeBackupPolicy_STATUS() to populate field Continuous\")\n\t\t}\n\t\tdestination.Continuous = &continuous\n\t} else {\n\t\tdestination.Continuous = nil\n\t}\n\n\t// Periodic\n\tif policy.Periodic != nil {\n\t\tvar periodic v20210515s.PeriodicModeBackupPolicy_STATUS\n\t\terr := policy.Periodic.AssignProperties_To_PeriodicModeBackupPolicy_STATUS(&periodic)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"calling AssignProperties_To_PeriodicModeBackupPolicy_STATUS() to populate field Periodic\")\n\t\t}\n\t\tdestination.Periodic = &periodic\n\t} else {\n\t\tdestination.Periodic = nil\n\t}\n\n\t// Update the property bag\n\tif len(propertyBag) > 0 {\n\t\tdestination.PropertyBag = propertyBag\n\t} else {\n\t\tdestination.PropertyBag = nil\n\t}\n\n\t// No error\n\treturn nil\n}",
"func (schema *ManagementPolicySchema_STATUS) AssignProperties_To_ManagementPolicySchema_STATUS(destination *v20220901s.ManagementPolicySchema_STATUS) error {\n\t// Clone the existing property bag\n\tpropertyBag := genruntime.NewPropertyBag(schema.PropertyBag)\n\n\t// Rules\n\tif schema.Rules != nil {\n\t\truleList := make([]v20220901s.ManagementPolicyRule_STATUS, len(schema.Rules))\n\t\tfor ruleIndex, ruleItem := range schema.Rules {\n\t\t\t// Shadow the loop variable to avoid aliasing\n\t\t\truleItem := ruleItem\n\t\t\tvar rule v20220901s.ManagementPolicyRule_STATUS\n\t\t\terr := ruleItem.AssignProperties_To_ManagementPolicyRule_STATUS(&rule)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"calling AssignProperties_To_ManagementPolicyRule_STATUS() to populate field Rules\")\n\t\t\t}\n\t\t\truleList[ruleIndex] = rule\n\t\t}\n\t\tdestination.Rules = ruleList\n\t} else {\n\t\tdestination.Rules = nil\n\t}\n\n\t// Update the property bag\n\tif len(propertyBag) > 0 {\n\t\tdestination.PropertyBag = propertyBag\n\t} else {\n\t\tdestination.PropertyBag = nil\n\t}\n\n\t// Invoke the augmentConversionForManagementPolicySchema_STATUS interface (if implemented) to customize the conversion\n\tvar schemaAsAny any = schema\n\tif augmentedSchema, ok := schemaAsAny.(augmentConversionForManagementPolicySchema_STATUS); ok {\n\t\terr := augmentedSchema.AssignPropertiesTo(destination)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"calling augmented AssignPropertiesTo() for conversion\")\n\t\t}\n\t}\n\n\t// No error\n\treturn nil\n}",
"func (setting *DatabaseAccounts_MongodbDatabases_Collections_ThroughputSetting_STATUS) AssignProperties_To_DatabaseAccounts_MongodbDatabases_Collections_ThroughputSetting_STATUS(destination *v20210515s.DatabaseAccounts_MongodbDatabases_Collections_ThroughputSetting_STATUS) error {\n\t// Create a new property bag\n\tpropertyBag := genruntime.NewPropertyBag()\n\n\t// Conditions\n\tdestination.Conditions = genruntime.CloneSliceOfCondition(setting.Conditions)\n\n\t// Id\n\tdestination.Id = genruntime.ClonePointerToString(setting.Id)\n\n\t// Location\n\tdestination.Location = genruntime.ClonePointerToString(setting.Location)\n\n\t// Name\n\tdestination.Name = genruntime.ClonePointerToString(setting.Name)\n\n\t// Resource\n\tif setting.Resource != nil {\n\t\tvar resource v20210515s.ThroughputSettingsGetProperties_Resource_STATUS\n\t\terr := setting.Resource.AssignProperties_To_ThroughputSettingsGetProperties_Resource_STATUS(&resource)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"calling AssignProperties_To_ThroughputSettingsGetProperties_Resource_STATUS() to populate field Resource\")\n\t\t}\n\t\tdestination.Resource = &resource\n\t} else {\n\t\tdestination.Resource = nil\n\t}\n\n\t// Tags\n\tdestination.Tags = genruntime.CloneMapOfStringToString(setting.Tags)\n\n\t// Type\n\tdestination.Type = genruntime.ClonePointerToString(setting.Type)\n\n\t// Update the property bag\n\tif len(propertyBag) > 0 {\n\t\tdestination.PropertyBag = propertyBag\n\t} else {\n\t\tdestination.PropertyBag = nil\n\t}\n\n\t// No error\n\treturn nil\n}",
"func (info *ConnStringInfo_STATUS) AssignProperties_To_ConnStringInfo_STATUS(destination *v20220301s.ConnStringInfo_STATUS) error {\n\t// Clone the existing property bag\n\tpropertyBag := genruntime.NewPropertyBag(info.PropertyBag)\n\n\t// ConnectionString\n\tdestination.ConnectionString = genruntime.ClonePointerToString(info.ConnectionString)\n\n\t// Name\n\tdestination.Name = genruntime.ClonePointerToString(info.Name)\n\n\t// Type\n\tdestination.Type = genruntime.ClonePointerToString(info.Type)\n\n\t// Update the property bag\n\tif len(propertyBag) > 0 {\n\t\tdestination.PropertyBag = propertyBag\n\t} else {\n\t\tdestination.PropertyBag = nil\n\t}\n\n\t// Invoke the augmentConversionForConnStringInfo_STATUS interface (if implemented) to customize the conversion\n\tvar infoAsAny any = info\n\tif augmentedInfo, ok := infoAsAny.(augmentConversionForConnStringInfo_STATUS); ok {\n\t\terr := augmentedInfo.AssignPropertiesTo(destination)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"calling augmented AssignPropertiesTo() for conversion\")\n\t\t}\n\t}\n\n\t// No error\n\treturn nil\n}",
"func (topic *Topic_STATUS) AssignProperties_To_Topic_STATUS(destination *v20200601s.Topic_STATUS) error {\n\t// Clone the existing property bag\n\tpropertyBag := genruntime.NewPropertyBag(topic.PropertyBag)\n\n\t// Conditions\n\tdestination.Conditions = genruntime.CloneSliceOfCondition(topic.Conditions)\n\n\t// Endpoint\n\tdestination.Endpoint = genruntime.ClonePointerToString(topic.Endpoint)\n\n\t// Id\n\tdestination.Id = genruntime.ClonePointerToString(topic.Id)\n\n\t// InboundIpRules\n\tif topic.InboundIpRules != nil {\n\t\tinboundIpRuleList := make([]v20200601s.InboundIpRule_STATUS, len(topic.InboundIpRules))\n\t\tfor inboundIpRuleIndex, inboundIpRuleItem := range topic.InboundIpRules {\n\t\t\t// Shadow the loop variable to avoid aliasing\n\t\t\tinboundIpRuleItem := inboundIpRuleItem\n\t\t\tvar inboundIpRule v20200601s.InboundIpRule_STATUS\n\t\t\terr := inboundIpRuleItem.AssignProperties_To_InboundIpRule_STATUS(&inboundIpRule)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"calling AssignProperties_To_InboundIpRule_STATUS() to populate field InboundIpRules\")\n\t\t\t}\n\t\t\tinboundIpRuleList[inboundIpRuleIndex] = inboundIpRule\n\t\t}\n\t\tdestination.InboundIpRules = inboundIpRuleList\n\t} else {\n\t\tdestination.InboundIpRules = nil\n\t}\n\n\t// InputSchema\n\tdestination.InputSchema = genruntime.ClonePointerToString(topic.InputSchema)\n\n\t// InputSchemaMapping\n\tif topic.InputSchemaMapping != nil {\n\t\tvar inputSchemaMapping v20200601s.InputSchemaMapping_STATUS\n\t\terr := topic.InputSchemaMapping.AssignProperties_To_InputSchemaMapping_STATUS(&inputSchemaMapping)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"calling AssignProperties_To_InputSchemaMapping_STATUS() to populate field InputSchemaMapping\")\n\t\t}\n\t\tdestination.InputSchemaMapping = &inputSchemaMapping\n\t} else {\n\t\tdestination.InputSchemaMapping = nil\n\t}\n\n\t// Location\n\tdestination.Location = genruntime.ClonePointerToString(topic.Location)\n\n\t// MetricResourceId\n\tdestination.MetricResourceId = genruntime.ClonePointerToString(topic.MetricResourceId)\n\n\t// Name\n\tdestination.Name = genruntime.ClonePointerToString(topic.Name)\n\n\t// PrivateEndpointConnections\n\tif topic.PrivateEndpointConnections != nil {\n\t\tprivateEndpointConnectionList := make([]v20200601s.PrivateEndpointConnection_STATUS_Topic_SubResourceEmbedded, len(topic.PrivateEndpointConnections))\n\t\tfor privateEndpointConnectionIndex, privateEndpointConnectionItem := range topic.PrivateEndpointConnections {\n\t\t\t// Shadow the loop variable to avoid aliasing\n\t\t\tprivateEndpointConnectionItem := privateEndpointConnectionItem\n\t\t\tvar privateEndpointConnection v20200601s.PrivateEndpointConnection_STATUS_Topic_SubResourceEmbedded\n\t\t\terr := privateEndpointConnectionItem.AssignProperties_To_PrivateEndpointConnection_STATUS_Topic_SubResourceEmbedded(&privateEndpointConnection)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"calling AssignProperties_To_PrivateEndpointConnection_STATUS_Topic_SubResourceEmbedded() to populate field PrivateEndpointConnections\")\n\t\t\t}\n\t\t\tprivateEndpointConnectionList[privateEndpointConnectionIndex] = privateEndpointConnection\n\t\t}\n\t\tdestination.PrivateEndpointConnections = privateEndpointConnectionList\n\t} else {\n\t\tdestination.PrivateEndpointConnections = nil\n\t}\n\n\t// ProvisioningState\n\tdestination.ProvisioningState = genruntime.ClonePointerToString(topic.ProvisioningState)\n\n\t// PublicNetworkAccess\n\tdestination.PublicNetworkAccess = genruntime.ClonePointerToString(topic.PublicNetworkAccess)\n\n\t// SystemData\n\tif topic.SystemData != nil {\n\t\tvar systemDatum v20200601s.SystemData_STATUS\n\t\terr := topic.SystemData.AssignProperties_To_SystemData_STATUS(&systemDatum)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"calling AssignProperties_To_SystemData_STATUS() to populate field SystemData\")\n\t\t}\n\t\tdestination.SystemData = &systemDatum\n\t} else {\n\t\tdestination.SystemData = nil\n\t}\n\n\t// Tags\n\tdestination.Tags = genruntime.CloneMapOfStringToString(topic.Tags)\n\n\t// Type\n\tdestination.Type = genruntime.ClonePointerToString(topic.Type)\n\n\t// Update the property bag\n\tif len(propertyBag) > 0 {\n\t\tdestination.PropertyBag = propertyBag\n\t} else {\n\t\tdestination.PropertyBag = nil\n\t}\n\n\t// Invoke the augmentConversionForTopic_STATUS interface (if implemented) to customize the conversion\n\tvar topicAsAny any = topic\n\tif augmentedTopic, ok := topicAsAny.(augmentConversionForTopic_STATUS); ok {\n\t\terr := augmentedTopic.AssignPropertiesTo(destination)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"calling augmented AssignPropertiesTo() for conversion\")\n\t\t}\n\t}\n\n\t// No error\n\treturn nil\n}",
"func (policy *ServersConnectionPolicy) AssignProperties_To_ServersConnectionPolicy(destination *v20211101s.ServersConnectionPolicy) error {\n\n\t// ObjectMeta\n\tdestination.ObjectMeta = *policy.ObjectMeta.DeepCopy()\n\n\t// Spec\n\tvar spec v20211101s.Servers_ConnectionPolicy_Spec\n\terr := policy.Spec.AssignProperties_To_Servers_ConnectionPolicy_Spec(&spec)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"calling AssignProperties_To_Servers_ConnectionPolicy_Spec() to populate field Spec\")\n\t}\n\tdestination.Spec = spec\n\n\t// Status\n\tvar status v20211101s.Servers_ConnectionPolicy_STATUS\n\terr = policy.Status.AssignProperties_To_Servers_ConnectionPolicy_STATUS(&status)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"calling AssignProperties_To_Servers_ConnectionPolicy_STATUS() to populate field Status\")\n\t}\n\tdestination.Status = status\n\n\t// No error\n\treturn nil\n}"
] |
SetUUID adds the uuid to the application component snapshot collection get params
|
func (o *ApplicationComponentSnapshotCollectionGetParams) SetUUID(uuid *string) {
o.UUID = uuid
}
|
[
"func (o *QtreeCollectionGetParams) SetSvmUUID(svmUUID *string) {\n\to.SvmUUID = svmUUID\n}",
"func (ec *ExperienceCreate) SetUUID(u uuid.UUID) *ExperienceCreate {\n\tec.mutation.SetUUID(u)\n\treturn ec\n}",
"func (pc *PetCreate) SetUUID(u uuid.UUID) *PetCreate {\n\tpc.mutation.SetUUID(u)\n\treturn pc\n}",
"func (o *ApplicationComponentSnapshotCollectionGetParams) WithApplicationUUID(applicationUUID string) *ApplicationComponentSnapshotCollectionGetParams {\n\to.SetApplicationUUID(applicationUUID)\n\treturn o\n}",
"func (dev VMVolumeDevice) UUID() string {\n\treturn utils.NewUUID5(blockVolumeNsUUID, dev.HostPath)\n}",
"func (o *GetVersioningPolicyParams) SetUUID(uuid string) {\n\to.UUID = uuid\n}",
"func (o *AssetConcentrationRisk) SetUuid(v string) {\n\to.Uuid = v\n}"
] |
Decode transform the geohash string to a latitude && longitude location
|
func Decode(geohashStr string) (float64, float64, error) {
latitudeRange, longtitudeRange, err := decodeToRange(geohashStr)
if err != nil {
return 0, 0, err
}
return latitudeRange.GetMidVal(), longtitudeRange.GetMidVal(), nil
}
|
[
"func Neigbors(hashstring string) []string {\n\thashLen := len(hashstring)\n\n\tposition, errs := Decode(hashstring)\n\tlat := position[0]\n\tlon := position[1]\n\tlatErr := errs[0] * 2\n\tlonErr := errs[1] * 2\n\n\thashList := []string{}\n\tfor i := -1; i < 2; i++ {\n\t\tfor j := -1; j < 2; j++ {\n\t\t\tif i == 0 && j == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tnLat := validateLat(lat + float64(i)*latErr)\n\t\t\tnLon := validateLon(lon + float64(j)*lonErr)\n\t\t\tnHash := Encode(nLat, nLon, hashLen)\n\t\t\thashList = append(hashList, nHash)\n\n\t\t}\n\t}\n\n\treturn hashList\n}",
"func parseRawPlace(raw string) (p rawPlace) {\n\tsplit := len(raw) - 2\n\tp.city = strings.Title(strings.TrimSpace(raw[:split]))\n\tp.state = strings.ToUpper(strings.TrimSpace(raw[split:]))\n\treturn\n}",
"func Decode_Bbox(hashstring string) []float64 {\n\tlat_bnd := []float64{MIN_LAT, MAX_LAT}\n\tlng_bnd := []float64{MIN_LONG, MAX_LONG}\n\tvar hash_val byte\n\tbits_total := 0\n\tvar mid float64 = 0\n\n\tfor i := 0; i < len(hashstring); i++ {\n\t\tcurr := []byte{hashstring[i]}\n\t\thash_val = byte(bytes.Index(BASE32_VALS, curr))\n\t\tfor bits := 4; bits >= 0; bits-- {\n\t\t\tbit := (int(hash_val) >> bits) & 1\n\n\t\t\tif bits_total%2 == 0 {\n\t\t\t\tmid = (lng_bnd[0] + lng_bnd[1]) / 2\n\t\t\t\tif bit == 1 {\n\t\t\t\t\tlng_bnd[0] = mid\n\t\t\t\t} else {\n\t\t\t\t\tlng_bnd[1] = mid\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tmid = (lat_bnd[0] + lat_bnd[1]) / 2\n\t\t\t\tif bit == 1 {\n\t\t\t\t\tlat_bnd[0] = mid\n\t\t\t\t} else {\n\t\t\t\t\tlat_bnd[1] = mid\n\t\t\t\t}\n\t\t\t}\n\t\t\tbits_total++\n\t\t}\n\t}\n\n\treturn []float64{lat_bnd[0], lng_bnd[0], lat_bnd[1], lng_bnd[1]}\n}",
"func decodeHash(k uint64, p, pPrime uint) (idx uint64, rhoW uint8) {\n\tvar r uint8\n\tif k&1 == 1 {\n\t\tr = uint8(extractShift(k, 1, 6) + uint64(pPrime-p))\n\t} else {\n\t\tr = rho(extractShift(k, 1, pPrime-p-1))\n\t}\n\treturn getIndex(k, p, pPrime), r\n}",
"func decodePeerAddress(chunk string) string {\n\tip := net.IPv4(chunk[0], chunk[1], chunk[2], chunk[3])\n\tremotePort := 256*int(chunk[4]) + int(chunk[5]) // Port is given in network encoding.\n\treturn fmt.Sprintf(\"%s:%d\", ip.String(), remotePort)\n}",
"func DecodeInfoHash(x string) (b InfoHash, err error) {\n\tvar h []byte\n\th, err = hex.DecodeString(x)\n\tif len(h) != 20 {\n\t\treturn \"\", fmt.Errorf(\"DecodeInfoHash: expected InfoHash len=20, got %d\", len(h))\n\t}\n\treturn InfoHash(h), err\n}",
"func decodecity(name string, json map[string]interface{}, city *City){\n\tif (json[\"status\"] != \"OK\"){\n\t\tfmt.Println(json[\"status\"])\n\t\treturn\n\t}\n\tresults := json[\"results\"].([]interface{})\n\tfirstresult := results[0].(map[string]interface{})\n\tgeo := firstresult[\"geometry\"].(map[string]interface{})\n\tlocation := geo[\"location\"].(map[string]interface{})\n\tlat := location[\"lat\"].(float64)\n\tlng := location[\"lng\"].(float64)\n\t\n\t*city = City{Name: name, Lat: lat, Lng: lng}\n}"
] |
New read config file
|
func New(configFile string) *Config {
c, err := ioutil.ReadFile(configFile)
if err != nil {
log.Panicf("Read config file %s failed: %s", configFile, err.Error())
}
cfg := &Config{}
if err := yaml.Unmarshal(c, cfg); err != nil {
log.Panicf("yaml.Unmarshal config file %s failed: %s", configFile, err.Error())
}
return cfg
}
|
[
"func readConfig() (*koanf.Koanf, error) {\n\tvar k = koanf.New(\".\")\n\tf := flag.NewFlagSet(\"terrakube\", flag.ExitOnError)\n\tf.String(\"input\", \"STDIN\", \"Input directory/file(s) containing the Kubernetes YAML manifests.\")\n\tf.String(\"output\", \"STDOUT\", \"Output file for the generated terraform configuration.\")\n\tf.Bool(\"overwrite\", false, \"Overwrite existing terraform configuration file.\")\n\n\tf.Parse(os.Args[1:])\n\n\tif err := k.Load(basicflag.Provider(f, \".\"), nil); err != nil {\n\t\treturn nil, fmt.Errorf(\"Error loading configuration: %v\", err)\n\t}\n\n\tk.Load(env.Provider(\"TK_\", \".\", func(s string) string {\n\t\treturn strings.Replace(strings.ToLower(strings.TrimPrefix(s, \"TK_\")), \"_\", \"-\", -1)\n\t}), nil)\n\n\treturn k, nil\n}",
"func readConfFile() string {\n\tpath := getConfFile()\n\tdata, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn string(data)\n}",
"func loadConfiguration() {\n configFilePath := \"src/config/main.conf\"\n\n log.Printf(\"starting %s load\\n\", configFilePath)\n configFile, err := os.Open(configFilePath)\n if err != nil {\n log.Println(\"[ERROR] \", err)\n log.Println(\"For your happiness an example config file is provided in the 'conf' directory in the repository.\")\n os.Exit(1)\n }\n\n configDecoder := json.NewDecoder(configFile)\n err = configDecoder.Decode(&globalConfiguration)\n if err != nil {\n log.Println(\"[ERROR] \", err)\n log.Println(\"Please ensure that your config file is in valid JSON format.\")\n os.Exit(1)\n }\n\n log.Printf(\"finished %s load\\n\", configFilePath)\n}",
"func (m *Manager) readConfig() (readConfigCh chan *configEntry, doneCh chan bool) {\n\treadConfigCh, doneCh = make(chan *configEntry), make(chan bool)\n\tgo func() {\n\t\tresponse, err := m.etcdClient.Get(m.configPath, true, true)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Initial config not present. Monitoring changes on it from now on..\")\n\t\t} else {\n\t\t\taction := \"readingConfig\"\n\t\t\tm.processNode(response.Node, action, readConfigCh)\n\t\t\tdoneCh <- true\n\t\t}\n\t}()\n\treturn\n}",
"func (a *App) readConfig() {\n\tvar configLocation string\n\tif _, err := os.Stat(\"./configs\"); err == nil {\n\t\tconfigLocation = \"./configs\"\n\t}\n\n\ta.Config = config.NewEnvFile(configLocation)\n}",
"func ReadConfig(configFile string) Config {\n\t_, err := os.Stat(configFile)\n\tif err != nil {\n\t\tlog.Fatal(\"Config file is missing: \", configFile)\n\t}\n\n\tvar config Config\n\tif _, err := toml.DecodeFile(configFile, &config); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn config\n}",
"func readConfig() config {\n\tcontent, err := ioutil.ReadFile(\"config.yml\")\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\n\tvar tmpCnf config\n\terr = yaml.Unmarshal(content, &tmpCnf)\n\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\n\treturn tmpCnf\n}"
] |
NOTE put it to helper
|
func query_param(query_data map[string][]string) *pagination.QueryParam {
qp := new(pagination.QueryParam)
if len(query_data["page"]) > 0 {
page, err := strconv.Atoi(query_data["page"][0])
if err == nil {
qp.Page = page
}
}
if len(query_data["per_page"]) > 0 {
page, err := strconv.Atoi(query_data["per_page"][0])
if err == nil {
qp.Per_page = page
}
}
if len(query_data["value"]) > 0 {
qp.Value = query_data["value"][0]
}
if len(query_data["filter"]) > 0 {
qp.Filter, _ = strconv.ParseBool(query_data["filter"][0])
}
return qp
}
|
[
"func (ls *ListStack) containsHelper(item adts.ContainerElement) bool {\n\tfor tmp := ls.backer.Front(); tmp != nil; tmp = tmp.Next() {\n\t\tif v, ok := tmp.Value.(adts.ContainerElement); ok {\n\t\t\tif v.Equals(item) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}",
"func getHelper(nodeRPC string, input string, fname string, cmd string) {\n\n\tcolorprint.Info(\">>>> Please enter the name of the file that you would like to obtain\")\n\tfmt.Scan(&fname)\n\tcolorprint.Debug(\"<<<< \" + fname)\n\tcolorprint.Info(\">>>> Please enter the address of the node you want to connect to\")\n\tfmt.Scan(&input)\n\tcolorprint.Debug(\"<<<< \" + input)\n\tnodeAddr := input\n\t// Connect to utility.Service via RPC // returns *Client, err\n\tavail, _, _ := CheckFileAvailability(fname, nodeAddr)\n\tif avail && (cmd == \"get\") {\n\t\tcolorprint.Info(\">>>> Would you like to get the file from the node[\" + nodeRPC + \"]?(y/n)\")\n\t\tfmt.Scan(&input)\n\t\tcolorprint.Debug(\"<<<< \" + input)\n\t\tif input == \"y\" {\n\t\t\t// TODO\n\t\t}\n\t}\n}",
"func setup(){}",
"func (l *line) isHelperMethod() bool {\n\treturn len(l.tokens) > 1 && l.tokens[0] == equal\n}",
"func (is *infosec) html(page *Page, els element) {\n\tis.Map.html(page, nil)\n\tels.setMeta(\"isInfosec\", true)\n\n\t// not in an infobox{}\n\t// FIXME: do not produce this warning if infosec{} is in a variable\n\tif is.parentBlock().blockType() != \"infobox\" {\n\t\tis.warn(is.openPosition(), \"infosec{} outside of infobox{} does nothing\")\n\t\treturn\n\t}\n\n\t// inject the title\n\tif is.blockName() != \"\" {\n\t\tis.mapList = append([]*mapListEntry{{\n\t\t\tkey: \"_infosec_title_\",\n\t\t\tmetas: map[string]bool{\"isTitle\": true},\n\t\t\tvalue: page.Fmt(is.blockName(), is.openPosition()),\n\t\t}}, is.mapList...)\n\t}\n\n\tinfoTableAddRows(is, els, page, is.mapList)\n}",
"func (_m *TestingT) Helper() {\n\t_m.Called()\n}",
"func (m *DefaultMounter) mountHelper(cfg *dokan.Config) (*dokan.MountHandle, error) {\n\t// m.dir is constant and safe to access outside the lock.\n\thandle, err := dokan.Mount(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tm.lock.Lock()\n\tdefer m.lock.Unlock()\n\tm.mnt = handle\n\treturn handle, nil\n}"
] |
Query returns a query builder for Veterinarian.
|
func (c *VeterinarianClient) Query() *VeterinarianQuery {
return &VeterinarianQuery{config: c.config}
}
|
[
"func (r *Resolver) Query() generated.QueryResolver { return &queryResolver{r} }",
"func (c *BeerClient) Query() *BeerQuery {\n\treturn &BeerQuery{\n\t\tconfig: c.config,\n\t}\n}",
"func (dao *VillageDAO) Query(rs app.RequestScope, offset, limit int, districtID int) ([]models.Village, error) {\n\tvillages := []models.Village{}\n\terr := rs.Tx().Select().Where(dbx.HashExp{\"district_id\": districtID}).OrderBy(\"id\").Offset(int64(offset)).Limit(int64(limit)).All(&villages)\n\treturn villages, err\n}",
"func Query() error {\n\tvar user TbUser\n\terr := orm.Where(\"name=$1\", \"viney\").Limit(1).Find(&user)\n\tif err == nil {\n\t\tfmt.Println(user)\n\t\treturn nil\n\t}\n\n\treturn err\n}",
"func (c *ClubapplicationClient) Query() *ClubapplicationQuery {\n\treturn &ClubapplicationQuery{config: c.config}\n}",
"func (c *LevelOfDangerousClient) Query() *LevelOfDangerousQuery {\n\treturn &LevelOfDangerousQuery{config: c.config}\n}",
"func (c *MedicineTypeClient) Query() *MedicineTypeQuery {\n\treturn &MedicineTypeQuery{config: c.config}\n}"
] |
BuildAuthRequestCode builds the string representation of the auth code
|
func BuildAuthRequestCode(authReq AuthRequest, crypto Crypto) (string, error) {
return crypto.Encrypt(authReq.GetID())
}
|
[
"func BuildRPCToken(client, service, method string) ([]byte, error) {\n\ttok := RPCToken{\n\t\tClient: client,\n\t\tKind: TokenKindRPC,\n\t\tService: service,\n\t\tMethod: method,\n\t}\n\n\treturn json.Marshal(tok)\n}",
"func (a *AuthorizationsService) AuthorizationCode(redirectURL string) (string, error) {\n\tu := fmt.Sprintf(\"%s/authenticate?response_type=code&client_id=%v&redirect_uri=%v\", fakeURL, a.client.ClientID, redirectURL)\n\tif isTEST {\n\t\tu = fmt.Sprintf(\"%s?response_type=code&client_id=%v&redirect_uri=%v\", \"/v1/oauth/oauth-business-users-for-applications/authenticate\", a.client.ClientID, redirectURL)\n\t}\n\t//https://authenticate.trustpilot.com?client_id=APIKey&redirect_uri=https://www.clientsSite.com&response_type=code\n\treq, err := a.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\tlog.Printf(\"Err %v\", err)\n\t\treturn \"\", err\n\t}\n\tresp, err := a.client.Do(a.client.CTX, req)\n\tif err != nil {\n\t\tlog.Printf(\"Err1 %v\", err)\n\t\treturn \"\", err\n\t}\n\t//Redirects back to: https://www.clientsSite.com/?code=Code as the response\n\treturn string(resp), nil\n}",
"func (e ConstraintError) Code() (string, []any) {\n\treturn strconv.Quote(e.prefix() + e.msg), nil\n}",
"func (c *Client) GetAuthCodeURL() string {\n\treturn \"Not Implemented\"\n}",
"func (is *Signer) BuildStringToSign(request *http.Request) (string, error) {\n\tif request.Method == \"GET\" {\n\t\treturn is.BuildStringToSignByValues(request.Header.Get(\"Date\"), request.Method, request.URL.Path, request.URL.Query())\n\t} else if request.Method == \"POST\" {\n\t\treturn is.BuildStringToSignByValues(request.Header.Get(\"Date\"), request.Method, request.URL.Path, request.Form)\n\t}\n\treturn \"\", fmt.Errorf(\"Requset Type Not Support For Sign \")\n}",
"func formatCode(code []byte) []byte {\n\tformatted, err := format.Source(code)\n\n\tif err != nil {\n\t\tlog.WithError(err).Warn(\"Code formatting error, generated service will not build, outputting unformatted code\")\n\t\t// return code so at least we get something to examine\n\t\treturn code\n\t}\n\n\treturn formatted\n}",
"func (c *Client) BuildSendSmsCodeRequest(ctx context.Context, v interface{}) (*http.Request, error) {\n\tu := &url.URL{Scheme: c.scheme, Host: c.host, Path: SendSmsCodeUserPath()}\n\treq, err := http.NewRequest(\"POST\", u.String(), nil)\n\tif err != nil {\n\t\treturn nil, goahttp.ErrInvalidURL(\"User\", \"SendSmsCode\", u.String(), err)\n\t}\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\treturn req, nil\n}"
] |
eat the next Rune from input
|
func (l *Lexer) next() rune {
r, width := utf8.DecodeRuneInString(l.input[l.pos:])
l.pos += width
// log.Printf("[next] %s", l.debugString())
return r
}
|
[
"func (s *scanner) peek() rune {\n\tr := s.next()\n\ts.backup()\n\treturn r\n}",
"func (s *Scanner) nextRune() rune {\n\tr, _, err := s.r.ReadRune()\n\tif err != nil {\n\t\tif err != io.EOF {\n\t\t\tfmt.Fprintln(os.Stderr)\n\t\t}\n\t\tr = -1 // EOF rune\n\t}\n\treturn r\n}",
"func (l *Lexer) AcceptRun(valid string) (n int) {\n\tfor l.Accept(valid) {\n\t\tn++\n\t}\n\treturn\n}",
"func phase_one(){\nphase= 1\nsection_count= 0\nreset_input()\nskip_limbo()\nfor!input_has_ended{\nscan_section()\n}\ncheck_complete()\nphase= 2\n}",
"func (t *Throne) Rune() rune {\n\tif t.Visibility {\n\t\treturn throneRune\n\t} else {\n\t\treturn invisibleRune\n\t}\n}",
"func Run1(scanner *bufio.Scanner) string {\n\tvar input string\n\tfor scanner.Scan() {\n\t\tinput = scanner.Text()\n\t}\n\tfloor := 0\n\tfor _, c := range input {\n\t\tswitch char := string(c); char {\n\t\tcase \"(\":\n\t\t\tfloor++\n\t\tcase \")\":\n\t\t\tfloor--\n\t\t}\n\t}\n\treturn fmt.Sprintf(\"Santa ends up on floor: %d,\\nand basement entrance position is: %d\", santasFloor(input), basementPosition(input))\n}",
"func (l *reader) nextItemFromInput() item {\n\tfor {\n\t\tselect {\n\t\tcase item := <-l.items:\n\t\t\treturn item\n\t\tdefault:\n\t\t\tl.state = l.state(l)\n\t\t}\n\t}\n\tpanic(\"not reached\")\n}"
] |
Changed returns neighbors list update event chan.
|
func (a *PlaylistSongsHandler) Changed() <-chan struct{} {
return a.changed
}
|
[
"func (ctrler CtrlDefReactor) OnNodeUpdate(oldObj *Node, newObj *cluster.Node) error {\n\tlog.Info(\"OnNodeUpdate is not implemented\")\n\treturn nil\n}",
"func (_UpkeepRegistrationRequests *UpkeepRegistrationRequestsFilterer) WatchConfigChanged(opts *bind.WatchOpts, sink chan<- *UpkeepRegistrationRequestsConfigChanged) (event.Subscription, error) {\n\n\tlogs, sub, err := _UpkeepRegistrationRequests.contract.WatchLogs(opts, \"ConfigChanged\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(UpkeepRegistrationRequestsConfigChanged)\n\t\t\t\tif err := _UpkeepRegistrationRequests.contract.UnpackLog(event, \"ConfigChanged\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func (p *ServerGatedServiceClient) FriendChanged(project_id int32, key string, otype int8, uid_pair []*rtmcommon.FriendPair) (err error) {\n\tif err = p.sendFriendChanged(project_id, key, otype, uid_pair); err != nil {\n\t\treturn\n\t}\n\treturn p.recvFriendChanged()\n}",
"func (l *List) NotifyStatusChanged(pid peer.Identifier) {\n\tl.list.NotifyStatusChanged(pid)\n}",
"func (c *CaptureList) Updated() <-chan struct{} {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\treturn c.updated\n}",
"func (h *Hub) SendOnlineChangedEvent() {\n\tdata := map[string]interface{}{}\n\n\tdata[\"event\"] = \"onlineChanged\"\n\n\tjson, _ := json2.Marshal(data)\n\th.Broadcast <- json\n}",
"func (d *delegate) NotifyUpdate(n *memberlist.Node) {\n\tlevel.Debug(d.logger).Log(\"received\", \"NotifyUpdate\", \"node\", n.Name, \"addr\", n.Address())\n}"
] |
Swap atomically swaps the wrapped chainhash.ListTransactionsResult and returns the old value.
|
func (at *ListTransactionsResult) Swap(
n []btcjson.ListTransactionsResult,
) []btcjson.ListTransactionsResult {
o := at.v.Load().([]btcjson.ListTransactionsResult)
at.v.Store(n)
return o
}
|
[
"func (list VulnerabilityList) Swap(i, j int) {\n\ttemp := list[i]\n\tlist[i] = list[j]\n\tlist[j] = temp\n}",
"func (_IUniswapV2Router02 *IUniswapV2Router02Transactor) SwapExactTokensForETHSupportingFeeOnTransferTokens(opts *bind.TransactOpts, amountIn *big.Int, amountOutMin *big.Int, path []common.Address, to common.Address, deadline *big.Int) (*types.Transaction, error) {\r\n\treturn _IUniswapV2Router02.contract.Transact(opts, \"swapExactTokensForETHSupportingFeeOnTransferTokens\", amountIn, amountOutMin, path, to, deadline)\r\n}",
"func (_IUniswapV2Router02 *IUniswapV2Router02Transactor) SwapTokensForExactTokens(opts *bind.TransactOpts, amountOut *big.Int, amountInMax *big.Int, path []common.Address, to common.Address, deadline *big.Int) (*types.Transaction, error) {\r\n\treturn _IUniswapV2Router02.contract.Transact(opts, \"swapTokensForExactTokens\", amountOut, amountInMax, path, to, deadline)\r\n}",
"func querySwapTokens(ctx sdk.Context, req abci.RequestQuery, keeper Keeper) ([]byte, sdk.Error) {\n\tvar queryParams types.QuerySwapTokensParams\n\terr := keeper.cdc.UnmarshalJSON(req.Data, &queryParams)\n\tif err != nil {\n\t\treturn nil, sdk.ErrUnknownRequest(sdk.AppendMsgToErr(\"incorrectly formatted request data\", err.Error()))\n\t}\n\n\tif queryParams.BusinessType == \"\" {\n\t\treturn nil, sdk.ErrUnknownRequest(\"invalid params:business_type is required\")\n\t}\n\n\t// coins in account\n\tvar accountCoins sdk.SysCoins\n\tif queryParams.Address != \"\" {\n\t\taddr, err := sdk.AccAddressFromBech32(queryParams.Address)\n\t\tif err != nil {\n\t\t\treturn nil, sdk.ErrInvalidAddress(fmt.Sprintf(\"invalid address:%s\", queryParams.Address))\n\t\t}\n\t\taccountCoins = keeper.tokenKeeper.GetCoins(ctx, addr)\n\t}\n\n\tvar tokens []string\n\tswitch queryParams.BusinessType {\n\tcase types.SwapBusinessTypeCreate:\n\t\ttokens = getSwapCreateLiquidityTokens(ctx, keeper)\n\tcase types.SwapBusinessTypeAdd:\n\t\ttokens = getSwapAddLiquidityTokens(ctx, keeper, queryParams.BaseTokenName)\n\tcase types.SwapBusinessTypeSwap:\n\t\ttokens = getSwapTokens(ctx, keeper, queryParams.BaseTokenName)\n\t}\n\n\tswapTokensMap := make(map[string]sdk.Dec, len(tokens))\n\tfor _, token := range tokens {\n\t\tswapTokensMap[token] = sdk.ZeroDec()\n\t}\n\n\t// update amount by coins in account\n\tfor _, coin := range accountCoins {\n\t\tif _, ok := swapTokensMap[coin.Denom]; ok {\n\t\t\tswapTokensMap[coin.Denom] = coin.Amount\n\t\t}\n\t}\n\n\t// sort token list by account balance\n\tvar swapTokens types.SwapTokens\n\tfor symbol, available := range swapTokensMap {\n\t\tswapTokens = append(swapTokens, types.NewSwapToken(symbol, available))\n\t}\n\tsort.Sort(swapTokens)\n\n\tswapTokensResp := types.SwapTokensResponse{\n\t\tNativeToken: common.NativeToken,\n\t\tTokens: swapTokens,\n\t}\n\n\tresponse := common.GetBaseResponse(swapTokensResp)\n\tbz, err := json.Marshal(response)\n\tif err != nil {\n\t\treturn nil, sdk.ErrInternal(sdk.AppendMsgToErr(\"failed to marshal response to json\", err.Error()))\n\t}\n\treturn bz, nil\n}",
"func (_Token *TokenTransactor) TokenToTokenSwapOutput(opts *bind.TransactOpts, tokens_bought *big.Int, max_tokens_sold *big.Int, max_eth_sold *big.Int, deadline *big.Int, token_addr common.Address) (*types.Transaction, error) {\n\treturn _Token.contract.Transact(opts, \"tokenToTokenSwapOutput\", tokens_bought, max_tokens_sold, max_eth_sold, deadline, token_addr)\n}",
"func (_Token *TokenTransactor) TokenToEthSwapOutput(opts *bind.TransactOpts, eth_bought *big.Int, max_tokens *big.Int, deadline *big.Int) (*types.Transaction, error) {\n\treturn _Token.contract.Transact(opts, \"tokenToEthSwapOutput\", eth_bought, max_tokens, deadline)\n}",
"func (_IUniswapV2Router01 *IUniswapV2Router01Session) SwapTokensForExactETH(amountOut *big.Int, amountInMax *big.Int, path []common.Address, to common.Address, deadline *big.Int) (*types.Transaction, error) {\r\n\treturn _IUniswapV2Router01.Contract.SwapTokensForExactETH(&_IUniswapV2Router01.TransactOpts, amountOut, amountInMax, path, to, deadline)\r\n}"
] |
checkIfOldestFuturesCanReplace checks when there ara different oldest future blocks in different 2f+1 nodes, if there is a oldest future block can replace others
|
func (n *Node) checkIfOldestFuturesCanReplace(oldestFutureMsgs []*hbft.OldestFutureMsg, currentSeqID, currentViewID uint64) (bool, common.Hash, bool) {
differentFutureCntMap := make(map[common.Hash]int)
differentFutureMsgMap := make(map[common.Hash]*hbft.OldestFutureMsg)
differentFuturePrimMap := make(map[string]int)
// Check view and signature
cnt := 0
for _, oldestFutureMsg := range oldestFutureMsgs {
if oldestFutureMsg.SequenceID == currentSeqID &&
oldestFutureMsg.ViewID == currentViewID {
if err := n.checkMsgSignature(oldestFutureMsg); err != nil {
n.HBFTDebugInfo(fmt.Sprintf("checkIfOldestFuturesCanReplace failed, check signature failed, %s", err.Error()))
return false, common.Hash{}, false
}
if _, ok := differentFuturePrimMap[oldestFutureMsg.ViewPrimary]; ok {
n.HBFTDebugInfo("checkIfOldestFuturesCanReplace failed, duplicated prim")
return false, common.Hash{}, false
} else {
differentFuturePrimMap[oldestFutureMsg.ViewPrimary] = 1
}
if oldestFutureMsg.OldestFuture != nil {
differentFutureMsgMap[oldestFutureMsg.OldestFuture.Hash()] = oldestFutureMsg
} else {
differentFutureMsgMap[common.Hash{}] = oldestFutureMsg
}
cnt++
}
}
if cnt <= n.EleBackend.Get2fRealSealersCnt() {
n.HBFTDebugInfo("checkIfOldestFuturesCanReplace failed, check view failed")
return false, common.Hash{}, false
}
for _, oldestFutureMsg := range oldestFutureMsgs {
if !oldestFutureMsg.NoAnyFutures {
if _, ok := differentFutureCntMap[oldestFutureMsg.OldestFuture.Hash()]; !ok {
differentFutureCntMap[oldestFutureMsg.OldestFuture.Hash()] = 1
} else {
differentFutureCntMap[oldestFutureMsg.OldestFuture.Hash()] += 1
}
} else {
continue
}
}
if len(differentFutureCntMap) == 1 {
for hash, cnt := range differentFutureCntMap {
if cnt > n.EleBackend.Get2fRealSealersCnt()/2 {
return true, hash, true
} else {
return true, hash, false
}
}
}
oldestFutureBlockToReturn := common.Hash{}
maxValue := uint64(0)
for hash, cnt := range differentFutureCntMap {
if cnt > n.EleBackend.Get2fRealSealersCnt()/2 {
return true, hash, true
} else {
if differentFutureMsgMap[hash].Completed.ReqTimeStamp > maxValue && !common.EmptyHash(hash) {
maxValue = differentFutureMsgMap[hash].Completed.ReqTimeStamp
oldestFutureBlockToReturn = hash
}
//if differentFutureMsgMap[hash].Completed.BlockNum == 1 {
// if differentFutureMsgMap[hash].Completed.ReqTimeStamp > maxValue {
// maxValue = differentFutureMsgMap[hash].Completed.ReqTimeStamp
// oldestFutureBlockToReturn = hash
// }
//} else {
// if differentFutureMsgMap[hash].Completed.ReqTimeStamp > maxValue {
// maxValue = differentFutureMsgMap[hash].Completed.ReqTimeStamp
// oldestFutureBlockToReturn = hash
// }
//}
}
}
if !common.EmptyHash(oldestFutureBlockToReturn) {
return true, oldestFutureBlockToReturn, true
}
n.HBFTDebugInfo("checkIfOldestFuturesCanReplace failed")
return false, common.Hash{}, false
}
|
[
"func isOVNKubernetesChangeSafe(prev, next *operv1.NetworkSpec) []error {\n\tpn := prev.DefaultNetwork.OVNKubernetesConfig\n\tnn := next.DefaultNetwork.OVNKubernetesConfig\n\terrs := []error{}\n\n\tif next.Migration != nil && next.Migration.MTU != nil {\n\t\tmtuNet := next.Migration.MTU.Network\n\t\tmtuMach := next.Migration.MTU.Machine\n\n\t\t// For MTU values provided for migration, verify that:\n\t\t// - The current and target MTUs for the CNI are provided\n\t\t// - The machine target MTU is provided\n\t\t// - The current MTU actually matches the MTU known as current\n\t\t// - The machine target MTU has a valid overhead with the CNI target MTU\n\t\tif mtuNet == nil || mtuMach == nil || mtuNet.From == nil || mtuNet.To == nil || mtuMach.To == nil {\n\t\t\terrs = append(errs, errors.Errorf(\"invalid Migration.MTU, at least one of the required fields is missing\"))\n\t\t} else {\n\t\t\t// Only check next.Migration.MTU.Network.From when it changes\n\t\t\tcheckPrevMTU := prev.Migration == nil || prev.Migration.MTU == nil || prev.Migration.MTU.Network == nil || !reflect.DeepEqual(prev.Migration.MTU.Network.From, next.Migration.MTU.Network.From)\n\t\t\tif checkPrevMTU && !reflect.DeepEqual(next.Migration.MTU.Network.From, pn.MTU) {\n\t\t\t\terrs = append(errs, errors.Errorf(\"invalid Migration.MTU.Network.From(%d) not equal to the currently applied MTU(%d)\", *next.Migration.MTU.Network.From, *pn.MTU))\n\t\t\t}\n\n\t\t\tminMTU := MinMTUIPv4\n\t\t\tfor _, cn := range next.ClusterNetwork {\n\t\t\t\tif utilnet.IsIPv6CIDRString(cn.CIDR) {\n\t\t\t\t\tminMTU = MinMTUIPv6\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif *next.Migration.MTU.Network.To < minMTU || *next.Migration.MTU.Network.To > MaxMTU {\n\t\t\t\terrs = append(errs, errors.Errorf(\"invalid Migration.MTU.Network.To(%d), has to be in range: %d-%d\", *next.Migration.MTU.Network.To, minMTU, MaxMTU))\n\t\t\t}\n\t\t\tif *next.Migration.MTU.Machine.To < minMTU || *next.Migration.MTU.Machine.To > MaxMTU {\n\t\t\t\terrs = append(errs, errors.Errorf(\"invalid Migration.MTU.Machine.To(%d), has to be in range: %d-%d\", *next.Migration.MTU.Machine.To, minMTU, MaxMTU))\n\t\t\t}\n\t\t\tif (*next.Migration.MTU.Network.To + getOVNEncapOverhead(next)) > *next.Migration.MTU.Machine.To {\n\t\t\t\terrs = append(errs, errors.Errorf(\"invalid Migration.MTU.Machine.To(%d), has to be at least %d\", *next.Migration.MTU.Machine.To, *next.Migration.MTU.Network.To+getOVNEncapOverhead(next)))\n\t\t\t}\n\t\t}\n\t} else if !reflect.DeepEqual(pn.MTU, nn.MTU) {\n\t\terrs = append(errs, errors.Errorf(\"cannot change ovn-kubernetes MTU without migration\"))\n\t}\n\n\tif !reflect.DeepEqual(pn.GenevePort, nn.GenevePort) {\n\t\terrs = append(errs, errors.Errorf(\"cannot change ovn-kubernetes genevePort\"))\n\t}\n\tif pn.HybridOverlayConfig == nil && nn.HybridOverlayConfig != nil {\n\t\terrs = append(errs, errors.Errorf(\"cannot start a hybrid overlay network after install time\"))\n\t}\n\tif pn.HybridOverlayConfig != nil {\n\t\tif !reflect.DeepEqual(pn.HybridOverlayConfig, nn.HybridOverlayConfig) {\n\t\t\terrs = append(errs, errors.Errorf(\"cannot edit a running hybrid overlay network\"))\n\t\t}\n\t}\n\tif pn.IPsecConfig != nil && nn.IPsecConfig != nil {\n\t\tif !reflect.DeepEqual(pn.IPsecConfig, nn.IPsecConfig) {\n\t\t\terrs = append(errs, errors.Errorf(\"cannot edit IPsec configuration at runtime\"))\n\t\t}\n\t}\n\n\treturn errs\n}",
"func checkNodesReady(c *client.Client, nt time.Duration, expect int) ([]string, error) {\n\t// First, keep getting all of the nodes until we get the number we expect.\n\tvar nodeList *api.NodeList\n\tvar errLast error\n\tstart := time.Now()\n\tfound := wait.Poll(poll, nt, func() (bool, error) {\n\t\t// Even though listNodes(...) has its own retries, a rolling-update\n\t\t// (GCE/GKE implementation of restart) can complete before the apiserver\n\t\t// knows about all of the nodes. Thus, we retry the list nodes call\n\t\t// until we get the expected number of nodes.\n\t\tnodeList, errLast = listNodes(c, labels.Everything(), fields.Everything())\n\t\tif errLast != nil {\n\t\t\treturn false, nil\n\t\t}\n\t\tif len(nodeList.Items) != expect {\n\t\t\terrLast = fmt.Errorf(\"expected to find %d nodes but found only %d (%v elapsed)\",\n\t\t\t\texpect, len(nodeList.Items), time.Since(start))\n\t\t\tLogf(\"%v\", errLast)\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\t}) == nil\n\tnodeNames := make([]string, len(nodeList.Items))\n\tfor i, n := range nodeList.Items {\n\t\tnodeNames[i] = n.ObjectMeta.Name\n\t}\n\tif !found {\n\t\treturn nodeNames, fmt.Errorf(\"couldn't find %d nodes within %v; last error: %v\",\n\t\t\texpect, nt, errLast)\n\t}\n\tLogf(\"Successfully found %d nodes\", expect)\n\n\t// Next, ensure in parallel that all the nodes are ready. We subtract the\n\t// time we spent waiting above.\n\ttimeout := nt - time.Since(start)\n\tresult := make(chan bool, len(nodeList.Items))\n\tfor _, n := range nodeNames {\n\t\tn := n\n\t\tgo func() { result <- waitForNodeToBeReady(c, n, timeout) }()\n\t}\n\tfailed := false\n\t// TODO(mbforbes): Change to `for range` syntax once we support only Go\n\t// >= 1.4.\n\tfor i := range nodeList.Items {\n\t\t_ = i\n\t\tif !<-result {\n\t\t\tfailed = true\n\t\t}\n\t}\n\tif failed {\n\t\treturn nodeNames, fmt.Errorf(\"at least one node failed to be ready\")\n\t}\n\treturn nodeNames, nil\n}",
"func (t *Tangle) CheckSolidityAndComputeWhiteFlagMutations(ctx context.Context, index iotago.MilestoneIndex, timestamp uint32, parents iotago.BlockIDs, previousMilestoneID iotago.MilestoneID) (*whiteflag.WhiteFlagMutations, error) {\n\n\tsnapshotInfo := t.storage.SnapshotInfo()\n\tif snapshotInfo == nil {\n\t\treturn nil, errors.Wrap(common.ErrCritical, common.ErrSnapshotInfoNotFound.Error())\n\t}\n\n\t// check if the requested milestone index would be the next one\n\tif index != t.syncManager.ConfirmedMilestoneIndex()+1 {\n\t\treturn nil, common.ErrNodeNotSynced\n\t}\n\n\tif len(parents) < 1 {\n\t\treturn nil, ErrParentsNotGiven\n\t}\n\n\t// register all parents for block solid events\n\t// this has to be done, even if the parents may be solid already, to prevent race conditions\n\tblockSolidEventListeners := make(map[iotago.BlockID]*valuenotifier.Listener, len(parents))\n\tfor _, parent := range parents {\n\t\tblockSolidEventListeners[parent] = t.BlockSolidListener(parent)\n\t}\n\n\t// check all parents for solidity\n\tfor _, parent := range parents {\n\t\tcachedBlockMeta := t.storage.CachedBlockMetadataOrNil(parent)\n\t\tif cachedBlockMeta == nil {\n\t\t\tcontains, err := t.storage.SolidEntryPointsContain(parent)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif contains {\n\t\t\t\t// notify the listener manually, because the parent is already solid.\n\t\t\t\tt.blockSolidNotifier.Notify(parent)\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\tcachedBlockMeta.ConsumeMetadata(func(metadata *storage.BlockMetadata) { // meta -1\n\t\t\tif !metadata.IsSolid() {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// notify the listener manually, because the parent is already solid.\n\t\t\tt.blockSolidNotifier.Notify(parent)\n\t\t})\n\t}\n\n\tblocksMemcache := storage.NewBlocksMemcache(t.storage.CachedBlock)\n\tmetadataMemcache := storage.NewMetadataMemcache(t.storage.CachedBlockMetadata)\n\tmemcachedTraverserStorage := dag.NewMemcachedTraverserStorage(t.storage, metadataMemcache)\n\n\tdefer func() {\n\t\t// deregister the events to free the memory\n\t\tfor _, listener := range blockSolidEventListeners {\n\t\t\tlistener.Deregister()\n\t\t}\n\n\t\t// all releases are forced since the cone is referenced and not needed anymore\n\t\tmemcachedTraverserStorage.Cleanup(true)\n\n\t\t// release all blocks at the end\n\t\tblocksMemcache.Cleanup(true)\n\n\t\t// Release all block metadata at the end\n\t\tmetadataMemcache.Cleanup(true)\n\t}()\n\n\t// check if all requested parents are solid\n\tsolid, aborted := t.SolidQueueCheck(ctx,\n\t\tmemcachedTraverserStorage,\n\t\tindex,\n\t\tparents)\n\tif aborted {\n\t\treturn nil, common.ErrOperationAborted\n\t}\n\n\tif !solid {\n\t\t// wait for at most \"ComputeWhiteFlagTimeout\" for the parents to become solid\n\t\tctx, cancel := context.WithTimeout(ctx, t.whiteFlagParentsSolidTimeout)\n\t\tdefer cancel()\n\n\t\tfor _, blockSolidEventListener := range blockSolidEventListeners {\n\t\t\t// wait until the block is solid\n\t\t\tif err := blockSolidEventListener.Wait(ctx); err != nil {\n\t\t\t\treturn nil, ErrParentsNotSolid\n\t\t\t}\n\t\t}\n\t}\n\n\tparentsTraverser := dag.NewParentsTraverser(memcachedTraverserStorage)\n\n\t// at this point all parents are solid\n\t// compute merkle tree root\n\treturn whiteflag.ComputeWhiteFlagMutations(\n\t\tctx,\n\t\tt.storage.UTXOManager(),\n\t\tparentsTraverser,\n\t\tblocksMemcache.CachedBlock,\n\t\tindex,\n\t\ttimestamp,\n\t\tparents,\n\t\tpreviousMilestoneID,\n\t\tsnapshotInfo.GenesisMilestoneIndex(),\n\t\twhiteflag.DefaultWhiteFlagTraversalCondition,\n\t)\n}",
"func IsNodeRecentlyCordoned(\n\tnode *v1.Node,\n\tcluster *corev1.StorageCluster,\n) bool {\n\tcordoned, startTime := IsNodeCordoned(node)\n\tif !cordoned || startTime.IsZero() {\n\t\treturn false\n\t}\n\n\tvar waitDuration time.Duration\n\tif duration, err := strconv.Atoi(cluster.Annotations[constants.AnnotationCordonedRestartDelay]); err == nil {\n\t\twaitDuration = time.Duration(duration) * time.Second\n\t} else {\n\t\twaitDuration = constants.DefaultCordonedRestartDelay\n\t}\n\treturn time.Now().Add(-waitDuration).Before(startTime)\n}",
"func (fc *familyChannel) isExpire(ahead, _ int64) bool {\n\tnow := timeutil.Now()\n\tfc.logger.Info(\"family channel expire check\",\n\t\tlogger.String(\"database\", fc.database),\n\t\tlogger.Any(\"shard\", fc.shardID),\n\t\tlogger.Int64(\"head\", ahead),\n\t\tlogger.String(\"family\", timeutil.FormatTimestamp(fc.lastFlushTime.Load(), timeutil.DataTimeFormat2)))\n\t// add 15 minute buffer\n\treturn fc.lastFlushTime.Load()+ahead+15*time.Minute.Milliseconds() < now\n}",
"func shouldUpdateOVNKonPrepull(ovn bootstrap.OVNBootstrapResult, releaseVersion string) (updateNode, renderPrepull bool) {\n\t// Fresh cluster - full steam ahead! No need to wait for pre-puller.\n\tif ovn.NodeUpdateStatus == nil {\n\t\tklog.V(3).Infof(\"Fresh cluster, no need for prepuller\")\n\t\treturn true, false\n\t}\n\n\t// if node is already upgraded, then no need to pre-pull\n\t// Return true so that we reconcile any changes that somehow could have happened.\n\texistingNodeVersion := ovn.NodeUpdateStatus.Version\n\tif existingNodeVersion == releaseVersion {\n\t\tklog.V(3).Infof(\"OVN-Kubernetes node is already in the expected release.\")\n\t\treturn true, false\n\t}\n\n\t// at this point, we've determined we need an upgrade\n\tif ovn.PrePullerUpdateStatus == nil {\n\t\tklog.Infof(\"Rolling out the no-op prepuller daemonset...\")\n\t\treturn false, true\n\t}\n\n\t// If pre-puller just pulled a new upgrade image and then we\n\t// downgrade immediately, we might wanna make prepuller pull the downgrade image.\n\texistingPrePullerVersion := ovn.PrePullerUpdateStatus.Version\n\tif existingPrePullerVersion != releaseVersion {\n\t\tklog.Infof(\"Rendering prepuller daemonset to update its image...\")\n\t\treturn false, true\n\t}\n\n\tif ovn.PrePullerUpdateStatus.Progressing {\n\t\tklog.Infof(\"Waiting for ovnkube-upgrades-prepuller daemonset to finish pulling the image before updating node\")\n\t\treturn false, true\n\t}\n\n\tklog.Infof(\"OVN-Kube upgrades-prepuller daemonset rollout complete, now starting node rollouts\")\n\treturn true, false\n}",
"func isBlockValid(newBlock, oldBlock Block) bool {\n\tif oldBlock.Index+1 != newBlock.Index {\n\t\treturn false\n\t}\n\n\tif oldBlock.Hash != newBlock.PrevHash {\n\t\treturn false\n\t}\n\n\tif calculateBlockHash(newBlock) != newBlock.Hash {\n\t\treturn false\n\t}\n\n\treturn true\n}"
] |
ABI returns the ABI associated with a name
|
func (r *Resolver) ABI(name string) (string, error) {
contentTypes := big.NewInt(3)
nameHash, err := NameHash(name)
if err != nil {
return "", err
}
contentType, data, err := r.Contract.ABI(nil, nameHash, contentTypes)
var abi string
if err == nil {
if contentType.Cmp(big.NewInt(1)) == 0 {
// Uncompressed JSON
abi = string(data)
} else if contentType.Cmp(big.NewInt(2)) == 0 {
// Zlib-compressed JSON
b := bytes.NewReader(data)
var z io.ReadCloser
z, err = zlib.NewReader(b)
if err != nil {
return "", err
}
defer z.Close()
var uncompressed []byte
uncompressed, err = ioutil.ReadAll(z)
if err != nil {
return "", err
}
abi = string(uncompressed)
}
}
return abi, nil
}
|
[
"func (*bzlLibraryLang) Name() string { return languageName }",
"func BiosName() (string, error) {\n\t/*\n\t\tSample output of 'wmic bios get manufacturer'\n\n\t\tManufacturer\n\t\tLENOVO\n\t*/\n\tresult, err := readAndParseFromCommandLine(biosNameCmd)\n\tif err != nil {\n\t\treturn \"-1\", err\n\t}\n\n\tbiosName := \"\"\n\tif len(result) > 1 {\n\t\tbiosName = result[1]\n\t}\n\treturn biosName, err\n}",
"func (ba *PackageArchive) Name() string {\n\treturn ba.archiveType\n}",
"func (a *ABI) GetMethod(name string) *Method {\n\tfor i := range a.Methods {\n\t\tif a.Methods[i].Name == name {\n\t\t\treturn &a.Methods[i]\n\t\t}\n\t}\n\treturn nil\n}",
"func (c BitfinexCrawler) GetName() string {\r\n\treturn BITFINEX_MODULE_NAME\r\n}",
"func mkobjabi(file string) {\n\tvar buf bytes.Buffer\n\tfmt.Fprintf(&buf, \"// Code generated by go tool dist; DO NOT EDIT.\\n\")\n\tfmt.Fprintln(&buf)\n\tfmt.Fprintf(&buf, \"package objabi\\n\")\n\tfmt.Fprintln(&buf)\n\tfmt.Fprintf(&buf, \"const stackGuardMultiplierDefault = %d\\n\", stackGuardMultiplierDefault())\n\n\twritefile(buf.String(), file, writeSkipSame)\n}",
"func (r *SnpDerivedKeyRespABI) ABI() BinaryConversion { return r }"
] |
dfs_traverse looks for the end point and returns a path or an error if we cannot get there
|
func (m *Maze) dfs_traverse(cp *Point, tr *traverser, path []*Point) ([]*Point, error) {
// we made it to the destination! return the path and get out!
if cp.IsDestination {
return path, nil
}
// nothing more to visit and there was no destination
if tr.isVisitComplete() {
return []*Point{}, errors.New("destination unreachable")
}
// change the current point - DFS pops the last node, as a stack
cp = tr.popLastNode()
// next point has already been visited
if tr.isNodeVisited(cp) {
return m.dfs_traverse(cp, tr, path)
}
tr.enqueueNodes(m.getLegalNextMoves(cp))
tr.visitNode(cp)
newPath := append(path, cp)
return m.dfs_traverse(cp, tr, newPath)
}
|
[
"func (d *Dijkstra) PathToTarget() ([]graphEdge, error) {\n\tif d.err != nil {\n\t\treturn []graphEdge{}, d.err\n\t}\n\n\tvar path []graphEdge\n\tidx := d.target\n\tfor {\n\t\tif idx == d.source {\n\t\t\tbreak\n\t\t}\n\t\te, ok := d.spt[idx]\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tpath = append(path, e)\n\t\tidx = e.From\n\t}\n\n\treturn reversePath(path), nil\n}",
"func (branch *Branch) traverse(lookup string) <-chan *Branch {\n\tch := make(chan *Branch)\n\tok := false\n\troute := strings.Split(lookup, \".\")\n\tgo func() {\n\t\tdefer close(ch)\n\t\tfor _, name := range route {\n\t\t\tif branch == nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tbranch, ok = branch.Get(name)\n\t\t\tif !ok {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tch <- branch\n\t\t}\n\t}()\n\treturn ch\n}",
"func (stack *Stack) FindHandlePath(handle []lr.Symbol, skip int) NodePath {\n\tpath, ok := collectHandleBranch(stack.tos, handle, len(handle), &skip)\n\tif ok {\n\t\tT().Debugf(\"found a handle %v\", path)\n\t}\n\treturn path\n}",
"func (bfs *BFS) HasPathTo(v int) bool {\n\treturn bfs.marked[v]\n}",
"func (w *Walker) Seek(visitor Visitor) error {\n\n\tif visitor == nil {\n\t\treturn ErrNilVisitor\n\t\t// Although valid, there is no point in calling `Seek` without\n\t\t// any extra logic, it would just go down to the leftmost leaf,\n\t\t// so this would probably be a user error.\n\t}\n\n\t// Go down until it the desired node is found (that will be signaled\n\t// pausing the seek with `errPauseWalkOperation`) or a leaf node is\n\t// reached (end of the DAG).\n\tfor {\n\t\terr := w.down(visitor)\n\n\t\tif err == errPauseWalkOperation {\n\t\t\treturn nil\n\t\t\t// Found the node, `errPauseWalkOperation` is just an internal\n\t\t\t// error to signal to pause, don't pass it along.\n\t\t}\n\n\t\tif err == ErrDownNoChild {\n\t\t\treturn nil\n\t\t\t// Can't keep going down from this node, either at a leaf node\n\t\t\t// or the `Visitor` has moved the child index past the\n\t\t\t// available index (probably because none indicated that the\n\t\t\t// target node could be down from there).\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t\t// `down()` is the only movement that can return *any* error.\n\t\t}\n\t}\n\t// TODO: Copied from the first part of `Iterate()` (although conceptually\n\t// different from it). Could this be encapsulated in a function to avoid\n\t// repeating code? The way the pause signal is handled it wouldn't seem\n\t// very useful: the `errPauseWalkOperation` needs to be processed at this\n\t// depth to return from the function (and pause the seek, returning\n\t// from another function here wouldn't cause it to stop).\n}",
"func (g *Graph) HasPath (startS, endS string) (bool) {\n var q Queue\n\n // add start node\n // the biggie is that you can't create this.\n // you have to take it from the graph...\n //fmt.Println(startV)\n q.Add(g.FindVertex(startS))\n //fmt.Println(\">>>\",g.FindVertex(startS))\n\n curV := q.Remove()\n //fmt.Println(curV)\n for ; curV.name != \"\" ; curV = q.Remove() {\n // has this as val before.\n // this was wrong. should be the graph node.\n // and here too...\n if curV.name == endS {\n return true\n } \n for i :=0 ; i<len(curV.children) ; i++ {\n v := g.FindVertex(curV.children[i].name)\n //fmt.Println(\">\", v)\n q.Add(v)\n }\n }\n\n // nothing found...\n return false\n}",
"func (g *Graph) Bfs(root string, goal string, maxDepth int) (bool, *Vertex) {\n\n\t// Preconditions\n\tif len(root) == 0 {\n\t\tlog.Fatal(\"Root vertex is empty\")\n\t}\n\n\tif len(goal) == 0 {\n\t\tlog.Fatal(\"Goal vertex is empty\")\n\t}\n\n\tif maxDepth < 0 {\n\t\tlog.Fatalf(\"Maximum depth is invalid: %v\\n\", maxDepth)\n\t}\n\n\t// Set of the identifiers of discovered vertices\n\tdiscovered := set.New()\n\tdiscovered.Insert(root)\n\n\t// Queue to hold the vertices to visit\n\tq := queue.New()\n\tq.Enqueue(NewVertex(root, 0))\n\n\t// While there are vertices in the queue to check\n\tfor q.Len() > 0 {\n\n\t\t// Take a vertex from the queue\n\t\tv := q.Dequeue().(Vertex)\n\n\t\t// If the vertex is the goal, then return\n\t\tif v.Identifier == goal {\n\t\t\treturn true, &v\n\t\t}\n\n\t\t// Depth of any vertices adjacent to v\n\t\tnewDepth := v.Depth + 1\n\n\t\t// If the adjacent vertices are within the range\n\t\tif newDepth <= maxDepth {\n\n\t\t\t// Get a list of the adjacent vertices\n\t\t\tw := g.AdjacentTo(v.Identifier)\n\n\t\t\t// Walk through each of the adjacent vertices\n\t\t\tfor _, adjIdentifier := range w {\n\n\t\t\t\t// If the vertex hasn't been seen before\n\t\t\t\tif !discovered.Has(adjIdentifier) {\n\n\t\t\t\t\t// Add the identifier to the set of discovered identifiers\n\t\t\t\t\tdiscovered.Insert(adjIdentifier)\n\n\t\t\t\t\t// Put the vertex on the queue\n\t\t\t\t\tnewVertex := NewVertex(adjIdentifier, newDepth)\n\t\t\t\t\tnewVertex.Parent = &v\n\t\t\t\t\tq.Enqueue(newVertex)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// The goal was not found\n\treturn false, nil\n}"
] |
SetStat set all stat cache(redis)
|
func (r *RPC) SetStat(c context.Context, arg *model.ArgStats, res *struct{}) (err error) {
err = r.s.SetStat(c, arg.Aid, arg.Stats)
return
}
|
[
"func (fc *fileCache) Set(key, value string, ttl int) {\n\tfc.cache.Set(key, &cacheObject{\n\t\tValue: value,\n\t\tTimestamp: time.Now().Unix(),\n\t\tTTL: ttl,\n\t})\n\tfc.dirty = true\n}",
"func (c *FakeZkConn) Set(path string, data []byte, version int32) (*zk.Stat, error) {\n\tc.history.addToHistory(\"Set\", path, data, version)\n\treturn nil, nil\n}",
"func Set(ctx context.Context, key string, value string) error {\n\treturn redisClient().Set(ctx, key, value, 1*time.Hour).Err()\n}",
"func (c *cache) set(key string, val interface{}, d time.Duration) {\n\tvar e int64\n\tif d == DefaultExpiration {\n\t\td = c.defaultExpiration\n\t}\n\tif d > 0 {\n\t\te = time.Now().Add(d).UnixNano()\n\t}\n\n\tc.items[key] = Item{\n\t\tObject: val,\n\t\tExpiration: e,\n\t}\n}",
"func (c *Context) Set(stat string, value float64) {\n\tfor _, sink := range c.sinks {\n\t\tsink.Set(c, stat, value)\n\t}\n}",
"func Set(c echo.Context) error {\n\tkey, value := c.Param(\"key\"), c.Param(\"value\")\n\tcc := c.(*Context)\n\tcache := dis.NewCache(cc.Redis)\n\terr := cache.StoreString(key, value)\n\tif err != nil {\n\t\treturn cc.JSONBlob(http.StatusInternalServerError, Err(err))\n\t}\n\treturn cc.JSONBlob(http.StatusOK, []byte(OKMessage))\n}",
"func (mem *Memcache) Set(key string, val interface{}, timeout time.Duration) (err error) {\n\tvar data []byte\n\tif data, err = json.Marshal(val); err != nil {\n\t\treturn err\n\t}\n\n\titem := &memcache.Item{Key: key, Value: data, Expiration: int32(timeout / time.Second)}\n\treturn mem.conn.Set(item)\n}"
] |
Value implementation of driver.Valuer
|
func (number Number) Value() (value driver.Value, err error) {
if number == "" {
return "", nil
}
if err = number.Validate(); err != nil {
return nil, err
}
return number.String(), nil
}
|
[
"func (i Item) Value() interface{} {\n\treturn i.v\n}",
"func (v Value2) Get() any { return int(v) }",
"func (u UUID) Value() (driver.Value, error) {\n\treturn []byte(u), nil\n}",
"func (v Vars) Value() (driver.Value, error) {\n\tm := make(map[string]sql.NullString)\n\n\tfor k, v := range v {\n\t\tm[string(k)] = sql.NullString{\n\t\t\tValid: true,\n\t\t\tString: *v,\n\t\t}\n\t}\n\n\th := hstore.Hstore{\n\t\tMap: m,\n\t}\n\n\treturn h.Value()\n}",
"func (v *vl) Val() interface{} {\n\treturn v.v\n}",
"func (entry persistedEntry) Value(index int64) (*value, error) {\n\tif entry.DataLen > modules.RegistryDataSize {\n\t\terr := errors.New(\"Value: entry has a too big data len\")\n\t\tbuild.Critical(err)\n\t\treturn nil, err\n\t}\n\tspk, err := newSiaPublicKey(entry.Key)\n\tif err != nil {\n\t\treturn nil, errors.AddContext(err, \"Value: failed to convert compressed key to SiaPublicKey\")\n\t}\n\tswitch entry.Type {\n\tcase modules.RegistryTypeInvalid:\n\t\treturn nil, modules.ErrInvalidRegistryEntryType\n\tcase modules.RegistryTypeWithPubkey:\n\tcase modules.RegistryTypeWithoutPubkey:\n\tdefault:\n\t\treturn nil, modules.ErrInvalidRegistryEntryType\n\t}\n\treturn &value{\n\t\tentryType: entry.Type,\n\t\tkey: spk,\n\t\ttweak: entry.Tweak,\n\t\texpiry: types.BlockHeight(entry.Expiry),\n\t\tdata: entry.Data[:entry.DataLen],\n\t\trevision: entry.Revision,\n\t\tsignature: entry.Signature,\n\t\tstaticIndex: index,\n\t}, nil\n}",
"func tryConcreteValue(v interface{}) Value {\n\tswitch val := v.(type) {\n\tcase Value:\n\t\treturn val\n\tcase int:\n\t\treturn IntegerValue(val)\n\tcase int64:\n\t\treturn LongValue(val)\n\tcase string:\n\t\treturn StringValue(val)\n\tcase []interface{}:\n\t\treturn ListValue(val)\n\tcase map[string]interface{}:\n\t\treturn JsonValue(val)\n\tcase map[interface{}]interface{}:\n\t\treturn NewMapValue(val)\n\tcase nil:\n\t\treturn nullValue\n\tcase []Value:\n\t\treturn NewValueArray(val)\n\tcase []byte:\n\t\treturn BytesValue(val)\n\tcase int8:\n\t\treturn IntegerValue(int(val))\n\tcase int16:\n\t\treturn IntegerValue(int(val))\n\tcase int32:\n\t\treturn IntegerValue(int(val))\n\tcase uint8: // byte supported here\n\t\treturn IntegerValue(int(val))\n\tcase uint16:\n\t\treturn IntegerValue(int(val))\n\tcase uint32:\n\t\treturn IntegerValue(int(val))\n\tcase float32:\n\t\treturn FloatValue(float64(val))\n\tcase float64:\n\t\treturn FloatValue(val)\n\tcase uint:\n\t\t// if it doesn't overflow int64, it is OK\n\t\tif int64(val) >= 0 {\n\t\t\treturn LongValue(int64(val))\n\t\t}\n\tcase MapIter:\n\t\treturn NewMapperValue(val)\n\tcase ListIter:\n\t\treturn NewListerValue(val)\n\tcase AerospikeBlob:\n\t\treturn NewBlobValue(val)\n\n\t/*\n\t\tThe following cases will try to avoid using reflection by matching against the\n\t\tinternal generic types.\n\t\tIf you have custom type aliases in your code, you can use the same aerospike types to cast your type into,\n\t\tto avoid hitting the reflection.\n\t*/\n\tcase []string:\n\t\treturn NewListerValue(stringSlice(val))\n\tcase []int:\n\t\treturn NewListerValue(intSlice(val))\n\tcase []int8:\n\t\treturn NewListerValue(int8Slice(val))\n\tcase []int16:\n\t\treturn NewListerValue(int16Slice(val))\n\tcase []int32:\n\t\treturn NewListerValue(int32Slice(val))\n\tcase []int64:\n\t\treturn NewListerValue(int64Slice(val))\n\tcase []uint16:\n\t\treturn NewListerValue(uint16Slice(val))\n\tcase []uint32:\n\t\treturn NewListerValue(uint32Slice(val))\n\tcase []uint64:\n\t\treturn NewListerValue(uint64Slice(val))\n\tcase []float32:\n\t\treturn NewListerValue(float32Slice(val))\n\tcase []float64:\n\t\treturn NewListerValue(float64Slice(val))\n\tcase map[string]string:\n\t\treturn NewMapperValue(stringStringMap(val))\n\tcase map[string]int:\n\t\treturn NewMapperValue(stringIntMap(val))\n\tcase map[string]int8:\n\t\treturn NewMapperValue(stringInt8Map(val))\n\tcase map[string]int16:\n\t\treturn NewMapperValue(stringInt16Map(val))\n\tcase map[string]int32:\n\t\treturn NewMapperValue(stringInt32Map(val))\n\tcase map[string]int64:\n\t\treturn NewMapperValue(stringInt64Map(val))\n\tcase map[string]uint16:\n\t\treturn NewMapperValue(stringUint16Map(val))\n\tcase map[string]uint32:\n\t\treturn NewMapperValue(stringUint32Map(val))\n\tcase map[string]float32:\n\t\treturn NewMapperValue(stringFloat32Map(val))\n\tcase map[string]float64:\n\t\treturn NewMapperValue(stringFloat64Map(val))\n\tcase map[int]string:\n\t\treturn NewMapperValue(intStringMap(val))\n\tcase map[int]int:\n\t\treturn NewMapperValue(intIntMap(val))\n\tcase map[int]int8:\n\t\treturn NewMapperValue(intInt8Map(val))\n\tcase map[int]int16:\n\t\treturn NewMapperValue(intInt16Map(val))\n\tcase map[int]int32:\n\t\treturn NewMapperValue(intInt32Map(val))\n\tcase map[int]int64:\n\t\treturn NewMapperValue(intInt64Map(val))\n\tcase map[int]uint16:\n\t\treturn NewMapperValue(intUint16Map(val))\n\tcase map[int]uint32:\n\t\treturn NewMapperValue(intUint32Map(val))\n\tcase map[int]float32:\n\t\treturn NewMapperValue(intFloat32Map(val))\n\tcase map[int]float64:\n\t\treturn NewMapperValue(intFloat64Map(val))\n\tcase map[int]interface{}:\n\t\treturn NewMapperValue(intInterfaceMap(val))\n\tcase map[int8]string:\n\t\treturn NewMapperValue(int8StringMap(val))\n\tcase map[int8]int:\n\t\treturn NewMapperValue(int8IntMap(val))\n\tcase map[int8]int8:\n\t\treturn NewMapperValue(int8Int8Map(val))\n\tcase map[int8]int16:\n\t\treturn NewMapperValue(int8Int16Map(val))\n\tcase map[int8]int32:\n\t\treturn NewMapperValue(int8Int32Map(val))\n\tcase map[int8]int64:\n\t\treturn NewMapperValue(int8Int64Map(val))\n\tcase map[int8]uint16:\n\t\treturn NewMapperValue(int8Uint16Map(val))\n\tcase map[int8]uint32:\n\t\treturn NewMapperValue(int8Uint32Map(val))\n\tcase map[int8]float32:\n\t\treturn NewMapperValue(int8Float32Map(val))\n\tcase map[int8]float64:\n\t\treturn NewMapperValue(int8Float64Map(val))\n\tcase map[int8]interface{}:\n\t\treturn NewMapperValue(int8InterfaceMap(val))\n\tcase map[int16]string:\n\t\treturn NewMapperValue(int16StringMap(val))\n\tcase map[int16]int:\n\t\treturn NewMapperValue(int16IntMap(val))\n\tcase map[int16]int8:\n\t\treturn NewMapperValue(int16Int8Map(val))\n\tcase map[int16]int16:\n\t\treturn NewMapperValue(int16Int16Map(val))\n\tcase map[int16]int32:\n\t\treturn NewMapperValue(int16Int32Map(val))\n\tcase map[int16]int64:\n\t\treturn NewMapperValue(int16Int64Map(val))\n\tcase map[int16]uint16:\n\t\treturn NewMapperValue(int16Uint16Map(val))\n\tcase map[int16]uint32:\n\t\treturn NewMapperValue(int16Uint32Map(val))\n\tcase map[int16]float32:\n\t\treturn NewMapperValue(int16Float32Map(val))\n\tcase map[int16]float64:\n\t\treturn NewMapperValue(int16Float64Map(val))\n\tcase map[int16]interface{}:\n\t\treturn NewMapperValue(int16InterfaceMap(val))\n\tcase map[int32]string:\n\t\treturn NewMapperValue(int32StringMap(val))\n\tcase map[int32]int:\n\t\treturn NewMapperValue(int32IntMap(val))\n\tcase map[int32]int8:\n\t\treturn NewMapperValue(int32Int8Map(val))\n\tcase map[int32]int16:\n\t\treturn NewMapperValue(int32Int16Map(val))\n\tcase map[int32]int32:\n\t\treturn NewMapperValue(int32Int32Map(val))\n\tcase map[int32]int64:\n\t\treturn NewMapperValue(int32Int64Map(val))\n\tcase map[int32]uint16:\n\t\treturn NewMapperValue(int32Uint16Map(val))\n\tcase map[int32]uint32:\n\t\treturn NewMapperValue(int32Uint32Map(val))\n\tcase map[int32]float32:\n\t\treturn NewMapperValue(int32Float32Map(val))\n\tcase map[int32]float64:\n\t\treturn NewMapperValue(int32Float64Map(val))\n\tcase map[int32]interface{}:\n\t\treturn NewMapperValue(int32InterfaceMap(val))\n\tcase map[int64]string:\n\t\treturn NewMapperValue(int64StringMap(val))\n\tcase map[int64]int:\n\t\treturn NewMapperValue(int64IntMap(val))\n\tcase map[int64]int8:\n\t\treturn NewMapperValue(int64Int8Map(val))\n\tcase map[int64]int16:\n\t\treturn NewMapperValue(int64Int16Map(val))\n\tcase map[int64]int32:\n\t\treturn NewMapperValue(int64Int32Map(val))\n\tcase map[int64]int64:\n\t\treturn NewMapperValue(int64Int64Map(val))\n\tcase map[int64]uint16:\n\t\treturn NewMapperValue(int64Uint16Map(val))\n\tcase map[int64]uint32:\n\t\treturn NewMapperValue(int64Uint32Map(val))\n\tcase map[int64]float32:\n\t\treturn NewMapperValue(int64Float32Map(val))\n\tcase map[int64]float64:\n\t\treturn NewMapperValue(int64Float64Map(val))\n\tcase map[int64]interface{}:\n\t\treturn NewMapperValue(int64InterfaceMap(val))\n\tcase map[uint16]string:\n\t\treturn NewMapperValue(uint16StringMap(val))\n\tcase map[uint16]int:\n\t\treturn NewMapperValue(uint16IntMap(val))\n\tcase map[uint16]int8:\n\t\treturn NewMapperValue(uint16Int8Map(val))\n\tcase map[uint16]int16:\n\t\treturn NewMapperValue(uint16Int16Map(val))\n\tcase map[uint16]int32:\n\t\treturn NewMapperValue(uint16Int32Map(val))\n\tcase map[uint16]int64:\n\t\treturn NewMapperValue(uint16Int64Map(val))\n\tcase map[uint16]uint16:\n\t\treturn NewMapperValue(uint16Uint16Map(val))\n\tcase map[uint16]uint32:\n\t\treturn NewMapperValue(uint16Uint32Map(val))\n\tcase map[uint16]float32:\n\t\treturn NewMapperValue(uint16Float32Map(val))\n\tcase map[uint16]float64:\n\t\treturn NewMapperValue(uint16Float64Map(val))\n\tcase map[uint16]interface{}:\n\t\treturn NewMapperValue(uint16InterfaceMap(val))\n\tcase map[uint32]string:\n\t\treturn NewMapperValue(uint32StringMap(val))\n\tcase map[uint32]int:\n\t\treturn NewMapperValue(uint32IntMap(val))\n\tcase map[uint32]int8:\n\t\treturn NewMapperValue(uint32Int8Map(val))\n\tcase map[uint32]int16:\n\t\treturn NewMapperValue(uint32Int16Map(val))\n\tcase map[uint32]int32:\n\t\treturn NewMapperValue(uint32Int32Map(val))\n\tcase map[uint32]int64:\n\t\treturn NewMapperValue(uint32Int64Map(val))\n\tcase map[uint32]uint16:\n\t\treturn NewMapperValue(uint32Uint16Map(val))\n\tcase map[uint32]uint32:\n\t\treturn NewMapperValue(uint32Uint32Map(val))\n\tcase map[uint32]float32:\n\t\treturn NewMapperValue(uint32Float32Map(val))\n\tcase map[uint32]float64:\n\t\treturn NewMapperValue(uint32Float64Map(val))\n\tcase map[uint32]interface{}:\n\t\treturn NewMapperValue(uint32InterfaceMap(val))\n\tcase map[float32]string:\n\t\treturn NewMapperValue(float32StringMap(val))\n\tcase map[float32]int:\n\t\treturn NewMapperValue(float32IntMap(val))\n\tcase map[float32]int8:\n\t\treturn NewMapperValue(float32Int8Map(val))\n\tcase map[float32]int16:\n\t\treturn NewMapperValue(float32Int16Map(val))\n\tcase map[float32]int32:\n\t\treturn NewMapperValue(float32Int32Map(val))\n\tcase map[float32]int64:\n\t\treturn NewMapperValue(float32Int64Map(val))\n\tcase map[float32]uint16:\n\t\treturn NewMapperValue(float32Uint16Map(val))\n\tcase map[float32]uint32:\n\t\treturn NewMapperValue(float32Uint32Map(val))\n\tcase map[float32]float32:\n\t\treturn NewMapperValue(float32Float32Map(val))\n\tcase map[float32]float64:\n\t\treturn NewMapperValue(float32Float64Map(val))\n\tcase map[float32]interface{}:\n\t\treturn NewMapperValue(float32InterfaceMap(val))\n\tcase map[float64]string:\n\t\treturn NewMapperValue(float64StringMap(val))\n\tcase map[float64]int:\n\t\treturn NewMapperValue(float64IntMap(val))\n\tcase map[float64]int8:\n\t\treturn NewMapperValue(float64Int8Map(val))\n\tcase map[float64]int16:\n\t\treturn NewMapperValue(float64Int16Map(val))\n\tcase map[float64]int32:\n\t\treturn NewMapperValue(float64Int32Map(val))\n\tcase map[float64]int64:\n\t\treturn NewMapperValue(float64Int64Map(val))\n\tcase map[float64]uint16:\n\t\treturn NewMapperValue(float64Uint16Map(val))\n\tcase map[float64]uint32:\n\t\treturn NewMapperValue(float64Uint32Map(val))\n\tcase map[float64]float32:\n\t\treturn NewMapperValue(float64Float32Map(val))\n\tcase map[float64]float64:\n\t\treturn NewMapperValue(float64Float64Map(val))\n\tcase map[float64]interface{}:\n\t\treturn NewMapperValue(float64InterfaceMap(val))\n\tcase map[string]uint64:\n\t\treturn NewMapperValue(stringUint64Map(val))\n\tcase map[int]uint64:\n\t\treturn NewMapperValue(intUint64Map(val))\n\tcase map[int8]uint64:\n\t\treturn NewMapperValue(int8Uint64Map(val))\n\tcase map[int16]uint64:\n\t\treturn NewMapperValue(int16Uint64Map(val))\n\tcase map[int32]uint64:\n\t\treturn NewMapperValue(int32Uint64Map(val))\n\tcase map[int64]uint64:\n\t\treturn NewMapperValue(int64Uint64Map(val))\n\tcase map[uint16]uint64:\n\t\treturn NewMapperValue(uint16Uint64Map(val))\n\tcase map[uint32]uint64:\n\t\treturn NewMapperValue(uint32Uint64Map(val))\n\tcase map[float32]uint64:\n\t\treturn NewMapperValue(float32Uint64Map(val))\n\tcase map[float64]uint64:\n\t\treturn NewMapperValue(float64Uint64Map(val))\n\tcase map[uint64]string:\n\t\treturn NewMapperValue(uint64StringMap(val))\n\tcase map[uint64]int:\n\t\treturn NewMapperValue(uint64IntMap(val))\n\tcase map[uint64]int8:\n\t\treturn NewMapperValue(uint64Int8Map(val))\n\tcase map[uint64]int16:\n\t\treturn NewMapperValue(uint64Int16Map(val))\n\tcase map[uint64]int32:\n\t\treturn NewMapperValue(uint64Int32Map(val))\n\tcase map[uint64]int64:\n\t\treturn NewMapperValue(uint64Int64Map(val))\n\tcase map[uint64]uint16:\n\t\treturn NewMapperValue(uint64Uint16Map(val))\n\tcase map[uint64]uint32:\n\t\treturn NewMapperValue(uint64Uint32Map(val))\n\tcase map[uint64]uint64:\n\t\treturn NewMapperValue(uint64Uint64Map(val))\n\tcase map[uint64]float32:\n\t\treturn NewMapperValue(uint64Float32Map(val))\n\tcase map[uint64]float64:\n\t\treturn NewMapperValue(uint64Float64Map(val))\n\tcase map[uint64]interface{}:\n\t\treturn NewMapperValue(uint64InterfaceMap(val))\n\t}\n\n\treturn nil\n}"
] |
RefreshOAuth2Token will fetch a new API Key / Access Token from a refresh token
|
func RefreshOAuth2Token(client *http.Client, channel string, provider string, refreshToken string) (string, error) {
theurl := sdk.JoinURL(
api.BackendURL(api.AuthService, channel),
fmt.Sprintf("oauth2/%s/refresh/%s", provider, url.PathEscape(refreshToken)),
)
req, err := http.NewRequest(http.MethodGet, theurl, nil)
if err != nil {
return "", err
}
api.SetUserAgent(req)
req.Header.Set("Accept", "application/json")
resp, err := client.Do(req)
if err != nil {
return "", err
}
var res struct {
AccessToken string `json:"access_token"`
}
defer resp.Body.Close()
if err := json.NewDecoder(resp.Body).Decode(&res); err != nil {
return "", err
}
if res.AccessToken == "" {
return "", errors.New("new token not returned, refresh_token might be bad")
}
return res.AccessToken, nil
}
|
[
"func (c *Client) Refresh(refreshToken string) (*gocloak.JWT, error) {\n\t// c.l.Started(\"Refresh\")\n\tt, err := c.kc.RefreshToken(c.ctx, refreshToken, c.client.clientID, c.cfg.ClientSecret, c.realm)\n\tif err != nil {\n\t\t// c.l.Errorf(\"Refresh\", err, \"failed refresh\")\n\t\treturn nil, err\n\t}\n\t// c.l.Completed(\"Refresh\")\n\treturn t, nil\n}",
"func (p *Provider) RefreshOAuth1Token(session *Session) error {\n\tnewAccessToken, err := p.consumer.RefreshToken(session.AccessToken)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsession.AccessToken = newAccessToken\n\tsession.AccessTokenExpires = time.Now().UTC().Add(30 * time.Minute)\n\treturn nil\n}",
"func RefreshOAuthV2TokenContext(ctx context.Context, client httpClient, clientID, clientSecret, refreshToken string) (resp *OAuthV2Response, err error) {\n\tvalues := url.Values{\n\t\t\"client_id\": {clientID},\n\t\t\"client_secret\": {clientSecret},\n\t\t\"refresh_token\": {refreshToken},\n\t\t\"grant_type\": {\"refresh_token\"},\n\t}\n\tresponse := &OAuthV2Response{}\n\tif err = postForm(ctx, client, APIURL+\"oauth.v2.access\", values, response, discard{}); err != nil {\n\t\treturn nil, err\n\t}\n\treturn response, response.Err()\n}",
"func (mp *MP) RefreshToken(retry int) (err error) {\n\tretry--\n\n\tfor {\n\t\tselect {\n\t\tcase <-time.Tick(mp.token.expire):\n\t\t\tif err = mp.requestToken(); err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\tmp.token.expire = 3 * time.Second\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif err == nil || retry == 0 {\n\t\t\treturn\n\t\t}\n\t\tif retry > 0 {\n\t\t\tretry--\n\t\t}\n\t}\n\n\treturn\n}",
"func (a *Auth) RefreshToken(refreshToken string) (TokenInfo, error) {\n\tuserID, err := a.ParseUserID(refreshToken, true)\n\tif err != nil {\n\t\tif err == errors.ErrTokenExpired {\n\t\t\treturn a.GenerateToken(userID)\n\t\t}\n\t\treturn nil, err\n\t}\n\n\taccessToken, err := a.generateAccess(userID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttokenInfo := &tokenInfo{\n\t\tTokenType: a.opts.tokenType,\n\t\tAccessToken: accessToken,\n\t\tRefreshToken: refreshToken,\n\t}\n\treturn tokenInfo, nil\n}",
"func (repo *TokenRepository) GenerateRefreshToken(claims *BaseClaims) (string, error) {\n\ttoken := jwtGo.NewWithClaims(jwtGo.SigningMethodHS256, claims)\n\ttokenString, err := token.SignedString(repo.refreshKey)\n\n\treturn tokenString, err\n}",
"func (a *AuthController) RefreshAccessToken(c *gin.Context) {\n\t// Retrieve the body\n\trefreshingTokenForm := validator2.RefreshingTokenForm{}\n\tif err := c.ShouldBindJSON(&refreshingTokenForm); err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error()})\n\t\treturn\n\t}\n\n\t// Validate the form\n\tvalidate := validator.New()\n\terr := validate.Struct(refreshingTokenForm)\n\n\t// Check if the form is valid\n\tif err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error()})\n\t\treturn\n\t}\n\n\t// Get the service linked to the token\n\tuser, err := a.userService.FetchUserFromRefreshToken(refreshingTokenForm.RefreshToken)\n\tif err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\n\t\t\t\"error\": \"Impossible to retrieve the user\",\n\t\t\t\"code\": codeErrorServer,\n\t\t})\n\t\treturn\n\t}\n\n\t// Create the access token\n\taccessToken, err := createAccessToken(user)\n\n\t// Send an error if the tokens didn't sign well\n\tif err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\n\t\t\t\"error\": \"Impossible to generate the access token\",\n\t\t\t\"code\": codeErrorServer,\n\t\t})\n\t\treturn\n\t}\n\n\t// Send the tokens\n\tc.JSONP(http.StatusOK, gin.H{\n\t\t\"accessToken\": accessToken,\n\t\t\"expiresIn\": config.Conf.JwtTokenExpiration,\n\t})\n}"
] |
BazelMetricsFilename returns the bazel profile filename based on the action name. This is to help to store a set of bazel profiles since bazel may execute multiple times during a single build.
|
func BazelMetricsFilename(s SharedPaths, actionName bazel.RunName) string {
return filepath.Join(s.BazelMetricsDir(), actionName.String()+"_bazel_profile.gz")
}
|
[
"func (o MrScalarCoreScalingDownPolicyOutput) MetricName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v MrScalarCoreScalingDownPolicy) string { return v.MetricName }).(pulumi.StringOutput)\n}",
"func (o ElastigroupScalingUpPolicyOutput) MetricName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ElastigroupScalingUpPolicy) string { return v.MetricName }).(pulumi.StringOutput)\n}",
"func TestGenerateMetricName(t *testing.T) {\n\tpacket := collectd.Packet{\n\t\tPlugin: \"irq\",\n\t\tType: \"irq\",\n\t\tTypeInstance: \"7\",\n\t}\n\tname := coco.MetricName(packet)\n\texpected := 2\n\tactual := strings.Count(name, \"/\")\n\tif actual != expected {\n\t\tt.Errorf(\"Expected %d / separators, got %d\", expected, actual)\n\t}\n\n\tpacket = collectd.Packet{\n\t\tPlugin: \"load\",\n\t\tType: \"load\",\n\t}\n\tname = coco.MetricName(packet)\n\texpected = 1\n\tactual = strings.Count(name, \"/\")\n\tif actual != expected {\n\t\tt.Errorf(\"Expected %d / separators, got %d\", expected, actual)\n\t}\n}",
"func GetMetricsFromFile(metricType string) (Metrics, error) {\n\tabsPath, err := os.Executable()\n\tif err != nil {\n\t\tlog.Error(\"An unknown error occurred: \", err)\n\t\treturn Metrics{}, err\n\t}\n\n\tfilename := filepath.Dir(absPath) + string(os.PathSeparator) + \"metrics\" + string(os.PathSeparator) + metricType + \".json\"\n\tif _, err := os.Stat(filename); err != nil {\n\t\tlog.Error(\"Could not find metrics file \", filename)\n\t\treturn Metrics{}, err\n\t}\n\n\trawMetrics, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\tlog.Error(\"Could not read file \", filename)\n\t\treturn Metrics{}, err\n\t}\n\n\tvar metrics Metrics\n\terr = json.Unmarshal(rawMetrics, &metrics)\n\tif err != nil {\n\t\tlog.Error(\"Could not unmarshal file \", filename)\n\t\treturn Metrics{}, err\n\t}\n\n\tlog.Debug(filename, \" loaded\")\n\n\treturn metrics, nil\n}",
"func metricString(ns []string) string {\n\treturn strings.Join(ns, \"/\")\n}",
"func GenerateFileName(dir, name, issueNumber, format string) string {\n\treturn fmt.Sprintf(\"%s/%s-%s.%s\", dir, name, issueNumber, format)\n}",
"func (config *Configuration) PIDFileName() string {\n name := \"~/.run/\" + config.ServiceName + \".pid\"\n name = Util.AbsolutePath(name)\n return name\n}"
] |
IDsX is like IDs, but panics if an error occurs.
|
func (fdq *FurnitureDetailQuery) IDsX(ctx context.Context) []int {
ids, err := fdq.IDs(ctx)
if err != nil {
panic(err)
}
return ids
}
|
[
"func (liq *LineItemQuery) IDsX(ctx context.Context) []uuid.UUID {\n\tids, err := liq.IDs(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn ids\n}",
"func (ecpq *EntityContactPointQuery) IDsX(ctx context.Context) []int {\n\tids, err := ecpq.IDs(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn ids\n}",
"func (bs *BrowserSelect) IntsX(ctx context.Context) []int {\n\tv, err := bs.Ints(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn v\n}",
"func (ttrq *TradeTimeRangeQuery) IDsX(ctx context.Context) []int {\n\tids, err := ttrq.IDs(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn ids\n}",
"func (wq *WordQuery) IDsX(ctx context.Context) []int {\n\tids, err := wq.IDs(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn ids\n}",
"func (ls *LocationSelect) IntsX(ctx context.Context) []int {\n\tv, err := ls.Ints(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn v\n}",
"func (pq *PrizeQuery) IDsX(ctx context.Context) []int {\n\tids, err := pq.IDs(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn ids\n}"
] |
String returns the string representation
|
func (s Expression) String() string {
return awsutil.Prettify(s)
}
|
[
"func (i NotMachine) String() string { return toString(i) }",
"func (s ReEncryptOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s CreateActivationOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (o QtreeCreateResponseResult) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}",
"func (g GetObjectOutput) String() string {\n\treturn helper.Prettify(g)\n}",
"func (i Info) String() string {\n\ts, _ := i.toJSON()\n\treturn s\n}",
"func String() string {\n\toutput := output{\n\t\tRerun: Rerun,\n\t\tVariables: Variables,\n\t\tItems: Items,\n\t}\n\tvar err error\n\tvar b []byte\n\tif Indent == \"\" {\n\t\tb, err = json.Marshal(output)\n\t} else {\n\t\tb, err = json.MarshalIndent(output, \"\", Indent)\n\t}\n\tif err != nil {\n\t\tmessageErr := Errorf(\"Error in parser. Please report this output to https://github.com/drgrib/alfred/issues: %v\", err)\n\t\tpanic(messageErr)\n\t}\n\ts := string(b)\n\treturn s\n}"
] |
Concat returns a new operator node as a result of the fn.Concat function.
|
func (g *Graph) Concat(xs ...Node) Node {
return g.NewOperator(fn.NewConcat(Operands(xs)), xs...)
}
|
[
"func MOVSHDUP(mx, x operand.Op) { ctx.MOVSHDUP(mx, x) }",
"func Add(a, b NumberArray) (resultingMatrix NumberArray, err error) {\n\treturn binaryOperation(\"Add\", a, b)\n}",
"func AddToOperator(m manager.Manager) error {\n\tfor _, f := range AddToOperatorFuncs {\n\t\tif err := f(m); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (t *Tree) operator() *OperatorNode {\n\ttoken := t.expectOneOf(operators, \"operator\")\n\treturn t.newOperator(token.val, token.pos, t.list())\n}",
"func MOVSLDUP(mx, x operand.Op) { ctx.MOVSLDUP(mx, x) }",
"func (t Text) Concat(v interface{}) (interface{}, error) {\n\tswitch rhs := v.(type) {\n\tcase string:\n\t\treturn Text(append(t, Segment{Text: rhs})), nil\n\tcase *Segment:\n\t\treturn Text(append(t, *rhs)), nil\n\tcase *Text:\n\t\treturn Text(append(t, *rhs...)), nil\n\t}\n\n\treturn nil, vals.ErrConcatNotImplemented\n}",
"func (p Print) Compile() Node {\n\treturn Print{&UnaryOperator{Node: p.Node.Compile()}}\n}"
] |
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be nonnil.
|
func (in *ScheduledSparkApplicationSpec) DeepCopyInto(out *ScheduledSparkApplicationSpec) {
*out = *in
in.Template.DeepCopyInto(&out.Template)
if in.Suspend != nil {
in, out := &in.Suspend, &out.Suspend
*out = new(bool)
**out = **in
}
if in.SuccessfulRunHistoryLimit != nil {
in, out := &in.SuccessfulRunHistoryLimit, &out.SuccessfulRunHistoryLimit
*out = new(int32)
**out = **in
}
if in.FailedRunHistoryLimit != nil {
in, out := &in.FailedRunHistoryLimit, &out.FailedRunHistoryLimit
*out = new(int32)
**out = **in
}
return
}
|
[
"func (in *MaintenanceInfo) DeepCopyInto(out *MaintenanceInfo) {\n\t*out = *in\n\treturn\n}",
"func (in *Node) DeepCopyInto(out *Node) {\n\t*out = *in\n\tif in.FailStatus != nil {\n\t\tin, out := &in.FailStatus, &out.FailStatus\n\t\t*out = make([]string, len(*in))\n\t\tcopy(*out, *in)\n\t}\n\tif in.MigratingSlots != nil {\n\t\tin, out := &in.MigratingSlots, &out.MigratingSlots\n\t\t*out = make(map[string]string, len(*in))\n\t\tfor key, val := range *in {\n\t\t\t(*out)[key] = val\n\t\t}\n\t}\n\tif in.ImportingSlots != nil {\n\t\tin, out := &in.ImportingSlots, &out.ImportingSlots\n\t\t*out = make(map[string]string, len(*in))\n\t\tfor key, val := range *in {\n\t\t\t(*out)[key] = val\n\t\t}\n\t}\n}",
"func (in *DataDisk) DeepCopyInto(out *DataDisk) {\n\t*out = *in\n}",
"func (in *Target) DeepCopyInto(out *Target) {\n\t*out = *in\n\treturn\n}",
"func (in *Variable) DeepCopyInto(out *Variable) {\n\t*out = *in\n}",
"func (in *Git) DeepCopyInto(out *Git) {\n\t*out = *in\n\treturn\n}",
"func (in *NetflowType) DeepCopyInto(out *NetflowType) {\n\t*out = *in\n\treturn\n}"
] |
CheckDescriptionLength checks if the given PR's description contains enough number of arguments
|
func CheckDescriptionLength(pr *gogh.PullRequest, config PluginConfiguration, logger log.Logger) string {
actualLength := len(strings.TrimSpace(issueLinkRegexp.ReplaceAllString(pr.GetBody(), "")))
if actualLength < config.DescriptionContentLength {
return fmt.Sprintf(DescriptionLengthShortMessage, config.DescriptionContentLength, actualLength)
}
return ""
}
|
[
"func invalidLength(offset, length, sliceLength int) bool {\n\treturn offset+length < offset || offset+length > sliceLength\n}",
"func TestLengths(t *testing.T) {\n // strings\n for k, v := range testStr {\n val := LenString(k)\n if val != v {\n t.Fatalf(\"%v returned %v (expected %v)\", k, val, v)\n }\n }\n\n // bytes\n bVal := LenByte()\n if bVal != BYTE_SIZE {\n t.Fatalf(\"Byte returned %v (expected %v)\", bVal, 4)\n }\n\n // uints\n uval32 := LenUint32()\n if uval32 != UINT32_SIZE {\n t.Fatalf(\"Uint32 returned %v (expected %v)\", uval32, 4)\n }\n uval64 := LenUint64()\n if uval64 != UINT64_SIZE {\n t.Fatalf(\"Uint64 returned %v (expected %v)\", uval64, 8)\n }\n\n log.Println(\"TestLengths: passed\")\n}",
"func CheckLongDesc(cmd *cobra.Command) []error {\n\tfmt.Fprint(os.Stdout, \" ↳ checking long description\\n\")\n\tcmdPath := cmd.CommandPath()\n\tlong := cmd.Long\n\tif len(long) > 0 {\n\t\tif strings.Trim(long, \" \\t\\n\") != long {\n\t\t\treturn []error{fmt.Errorf(`command %q: long description is not normalized, make sure you are calling templates.LongDesc (from pkg/cmd/templates) before assigning cmd.Long`, cmdPath)}\n\t\t}\n\t}\n\treturn nil\n}",
"func CheckArguments(arguments []Argument, min int, max int, fname string, usage string) (int, ErrorValue) {\n\targLen := len(arguments)\n\tif argLen < min || argLen > max {\n\t\treturn argLen, NewErrorValue(fmt.Sprintf(\"Invalid call to %s. Usage: %s %s\", fname, fname, usage))\n\t}\n\treturn argLen, nil\n}",
"func IsCorrectLength(reads []string, k int) bool {\n for _, read := range reads {\n if (len(read) != k) {\n return false\n }\n }\n return true\n}",
"func TestShouldGetListLength(t *testing.T) {\n\tlst := []int{1, 2, 3, 4, 5}\n\n\tl := Length(lst)\n\n\tassert.Equal(t, l, 5, \"List size should be 5\")\n}",
"func Length(i interface{}) (l int, ok bool) {\n\tswitch i := i.(type) {\n\tcase string:\n\t\treturn len(i), true\n\tcase []interface{}:\n\t\treturn len(i), true\n\tcase map[string]interface{}:\n\t\treturn len(i), true\n\tcase []int64:\n\t\treturn len(i), true\n\tcase []float64:\n\t\treturn len(i), true\n\tcase []bool:\n\t\treturn len(i), true\n\tcase map[string]float64:\n\t\treturn len(i), true\n\tcase map[string]string:\n\t\treturn len(i), true\n\tcase map[string]bool:\n\t\treturn len(i), true\n\tdefault:\n\t\treturn 0, false\n\t}\n}"
] |
Float64 returns a single float64 from selector. It is only allowed when selecting one field.
|
func (fds *FurnitureDetailSelect) Float64(ctx context.Context) (_ float64, err error) {
var v []float64
if v, err = fds.Float64s(ctx); err != nil {
return
}
switch len(v) {
case 1:
return v[0], nil
case 0:
err = &NotFoundError{furnituredetail.Label}
default:
err = fmt.Errorf("ent: FurnitureDetailSelect.Float64s returned %d results when one was expected", len(v))
}
return
}
|
[
"func (e *Element) GetFloat64(key string) float64 {\n\tval, _ := e.Get(key)\n\tassertedVal, ok := val.(float64)\n\tif !ok {\n\t\tassertedVal = 0\n\t}\n\treturn assertedVal\n}",
"func (ups *UnsavedPostSelect) Float64(ctx context.Context) (_ float64, err error) {\n\tvar v []float64\n\tif v, err = ups.Float64s(ctx); err != nil {\n\t\treturn\n\t}\n\tswitch len(v) {\n\tcase 1:\n\t\treturn v[0], nil\n\tcase 0:\n\t\terr = &NotFoundError{unsavedpost.Label}\n\tdefault:\n\t\terr = fmt.Errorf(\"ent: UnsavedPostSelect.Float64s returned %d results when one was expected\", len(v))\n\t}\n\treturn\n}",
"func (rs *RemedySelect) Float64(ctx context.Context) (_ float64, err error) {\n\tvar v []float64\n\tif v, err = rs.Float64s(ctx); err != nil {\n\t\treturn\n\t}\n\tswitch len(v) {\n\tcase 1:\n\t\treturn v[0], nil\n\tcase 0:\n\t\terr = &NotFoundError{remedy.Label}\n\tdefault:\n\t\terr = fmt.Errorf(\"ent: RemedySelect.Float64s returned %d results when one was expected\", len(v))\n\t}\n\treturn\n}",
"func (irs *InstanceRuntimeSelect) Float64(ctx context.Context) (_ float64, err error) {\n\tvar v []float64\n\tif v, err = irs.Float64s(ctx); err != nil {\n\t\treturn\n\t}\n\tswitch len(v) {\n\tcase 1:\n\t\treturn v[0], nil\n\tcase 0:\n\t\terr = &NotFoundError{instanceruntime.Label}\n\tdefault:\n\t\terr = fmt.Errorf(\"ent: InstanceRuntimeSelect.Float64s returned %d results when one was expected\", len(v))\n\t}\n\treturn\n}",
"func (gs *GoodsSelect) Float64(ctx context.Context) (_ float64, err error) {\n\tvar v []float64\n\tif v, err = gs.Float64s(ctx); err != nil {\n\t\treturn\n\t}\n\tswitch len(v) {\n\tcase 1:\n\t\treturn v[0], nil\n\tcase 0:\n\t\terr = &NotFoundError{goods.Label}\n\tdefault:\n\t\terr = fmt.Errorf(\"ent: GoodsSelect.Float64s returned %d results when one was expected\", len(v))\n\t}\n\treturn\n}",
"func (urgb *UserRoleGroupBy) Float64(ctx context.Context) (_ float64, err error) {\n\tvar v []float64\n\tif v, err = urgb.Float64s(ctx); err != nil {\n\t\treturn\n\t}\n\tswitch len(v) {\n\tcase 1:\n\t\treturn v[0], nil\n\tcase 0:\n\t\terr = &NotFoundError{userrole.Label}\n\tdefault:\n\t\terr = fmt.Errorf(\"ent: UserRoleGroupBy.Float64s returned %d results when one was expected\", len(v))\n\t}\n\treturn\n}",
"func (ss *ServerSelect) Float64X(ctx context.Context) float64 {\n\tv, err := ss.Float64(ctx)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn v\n}"
] |
respecify a portion of a color table
|
func CopyColorSubTable(target uint32, start int32, x int32, y int32, width int32) {
C.glowCopyColorSubTable(gpCopyColorSubTable, (C.GLenum)(target), (C.GLsizei)(start), (C.GLint)(x), (C.GLint)(y), (C.GLsizei)(width))
}
|
[
"func (c *Colorscheme) tcellColor(name string) tcell.Color {\n\tv, ok := c.colors[name].(string)\n\tif !ok {\n\t\treturn tcell.ColorDefault\n\t}\n\n\tif color, found := TcellColorschemeColorsMap[v]; found {\n\t\treturn color\n\t}\n\n\tcolor := tcell.GetColor(v)\n\tif color != tcell.ColorDefault {\n\t\treturn color\n\t}\n\n\t// find closest X11 color to RGB\n\t// if code, ok := HexToAnsi(v); ok {\n\t// \treturn tcell.PaletteColor(int(code) & 0xff)\n\t// }\n\treturn color\n}",
"func calcTableCap(c int) int {\n\tif c <= neighbour {\n\t\treturn c\n\t}\n\treturn c + neighbour - 1\n}",
"func StandardTileColorer(frame []byte, stride int) TileColorFunction {\n\treturn func(hTile int, vTile int, lookupArray []byte, mask uint64, indexBitSize uint64) {\n\t\tstart := vTile*TileSideLength*stride + hTile*TileSideLength\n\t\tsingleMask := ^(^uint64(0) << indexBitSize)\n\n\t\tfor i := 0; i < PixelPerTile; i++ {\n\t\t\tpixelValue := lookupArray[(mask>>(indexBitSize*uint64(i)))&singleMask]\n\n\t\t\tif pixelValue != 0x00 {\n\t\t\t\toffset := start + (i % TileSideLength) + stride*(i/TileSideLength)\n\n\t\t\t\tframe[offset] = pixelValue\n\t\t\t}\n\t\t}\n\t}\n}",
"func TableSetBgColorV(target TableBgTarget, color Vec4, columnN int) {\n\tcolorArg, _ := color.wrapped()\n\tC.iggTableSetBgColor(C.int(target), colorArg, C.int(columnN))\n}",
"func (table *Table) Draw(buf *Buffer) {\n\ttable.Block.Draw(buf)\n\n\ttable.drawLocation(buf)\n\ttable.drawUpdated(buf)\n\ttable.ColResizer()\n\n\tcolXPos := []int{}\n\tcur := 1 + table.PadLeft\n\tfor _, w := range table.ColWidths {\n\t\tcolXPos = append(colXPos, cur)\n\t\tcur += w\n\t\tcur += table.ColGap\n\t}\n\n\tfor i, h := range table.Header {\n\t\twidth := table.ColWidths[i]\n\t\tif width == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tif width > (table.Inner.Dx()-colXPos[i])+1 {\n\t\t\tcontinue\n\t\t}\n\t\tbuf.SetString(\n\t\t\th,\n\t\t\tNewStyle(Theme.Default.Fg, ColorClear, ModifierBold),\n\t\t\timage.Pt(table.Inner.Min.X+colXPos[i]-1, table.Inner.Min.Y),\n\t\t)\n\t}\n\n\tif table.TopRow < 0 {\n\t\treturn\n\t}\n\n\tfor rowNum := table.TopRow; rowNum < table.TopRow+table.Inner.Dy()-1 && rowNum < len(table.Rows); rowNum++ {\n\t\trow := table.Rows[rowNum]\n\t\ty := (rowNum + 2) - table.TopRow\n\n\t\tstyle := NewStyle(Theme.Default.Fg)\n\t\tfor i, width := range table.ColWidths {\n\t\t\tif width == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif width > (table.Inner.Dx()-colXPos[i])+1 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tr := TrimString(row[i], width)\n\t\t\tif table.Styles[rowNum][i] != nil {\n\t\t\t\tbuf.SetString(\n\t\t\t\t\tr,\n\t\t\t\t\t*table.Styles[rowNum][i],\n\t\t\t\t\timage.Pt(table.Inner.Min.X+colXPos[i]-1, table.Inner.Min.Y+y-1),\n\t\t\t\t)\n\t\t\t} else {\n\t\t\t\tbuf.SetString(\n\t\t\t\t\tr,\n\t\t\t\t\tstyle,\n\t\t\t\t\timage.Pt(table.Inner.Min.X+colXPos[i]-1, table.Inner.Min.Y+y-1),\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\t}\n}",
"func nextcolor(c color.RGBA) color.RGBA {\n\tswitch {\n\tcase c.R == 255 && c.G == 0 && c.B == 0:\n\t\tc.G += 5\n\tcase c.R == 255 && c.G != 255 && c.B == 0:\n\t\tc.G += 5\n\tcase c.G == 255 && c.R != 0:\n\t\tc.R -= 5\n\tcase c.R == 0 && c.B != 255:\n\t\tc.B += 5\n\tcase c.B == 255 && c.G != 0:\n\t\tc.G -= 5\n\tcase c.G == 0 && c.R != 255:\n\t\tc.R += 5\n\tdefault:\n\t\tc.B -= 5\n\t}\n\treturn c\n}",
"func CopyColorSubTable(target uint32, start int32, x int32, y int32, width int32) {\n C.glowCopyColorSubTable(gpCopyColorSubTable, (C.GLenum)(target), (C.GLsizei)(start), (C.GLint)(x), (C.GLint)(y), (C.GLsizei)(width))\n}"
] |
resolveSystemRegistry find all image field in yaml content and replace with new image value which system registry prefixed
|
func resolveSystemRegistry(content string) string {
if settings.SystemDefaultRegistry.Get() == "" {
return content
}
exp := `image:.*`
return regexp.MustCompile(exp).ReplaceAllStringFunc(content, replaceImage)
}
|
[
"func (rt *ReplicateTasks) processKustomizeDir(absPath string, registry string, include string, exclude string) error {\n\tlog.Infof(\"Processing %v\", absPath)\n\tkustomizationFilePath := filepath.Join(absPath, \"kustomization.yaml\")\n\tif _, err := os.Stat(kustomizationFilePath); err != nil {\n\t\tlog.Infof(\"Skipping %v; no kustomization.yaml found\", absPath)\n\t\treturn nil\n\t}\n\tkustomization := kustomize.GetKustomization(absPath)\n\tfor _, image := range kustomization.Images {\n\t\tcurName := image.Name\n\t\tif image.NewName != \"\" {\n\t\t\tcurName = image.NewName\n\t\t}\n\t\tif strings.Contains(curName, \"$\") {\n\t\t\tlog.Infof(\"Image name %v contains kutomize parameter, skipping\\n\", curName)\n\t\t\tcontinue\n\t\t}\n\t\t// check exclude first\n\t\tif exclude != \"\" && strings.HasPrefix(curName, exclude) {\n\t\t\tlog.Infof(\"Image %v matches exclude prefix %v, skipping\\n\", curName, exclude)\n\t\t\tcontinue\n\t\t}\n\t\t// then check include\n\t\tif include != \"\" && (!strings.HasPrefix(curName, include)) {\n\t\t\tlog.Infof(\"Image %v doesn't match include prefix %v, skipping\\n\", curName, include)\n\t\t\tcontinue\n\t\t}\n\t\tnewName := strings.Join([]string{registry, image.Name}, \"/\")\n\n\t\tif (image.NewTag == \"\") == (image.Digest == \"\") {\n\t\t\tlog.Warnf(\"One and only one of NewTag or Digest can exist for image %s, skipping\\n\",\n\t\t\t\timage.Name)\n\t\t\tcontinue\n\t\t}\n\n\t\tif image.NewTag != \"\" {\n\t\t\t(*rt)[strings.Join([]string{newName, image.NewTag}, \":\")] =\n\t\t\t\tstrings.Join([]string{curName, image.NewTag}, \":\")\n\t\t}\n\t\tif image.Digest != \"\" {\n\t\t\t(*rt)[strings.Join([]string{newName, image.Digest}, \"@\")] =\n\t\t\t\tstrings.Join([]string{curName, image.Digest}, \"@\")\n\t\t}\n\t\tlog.Infof(\"Replacing image name from %s to %s\", image.Name, newName)\n\t\t//kustomization.Images[i].NewName = newName\n\t}\n\n\t// Process any kustomize packages we depend on.\n\tfor _, r := range kustomization.Resources {\n\t\tif ext := strings.ToLower(filepath.Ext(r)); ext == \".yaml\" || ext == \".yml\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tp := path.Join(absPath, r)\n\n\t\tif b, err := utils.IsRemoteFile(p); b || err != nil {\n\t\t\tif err != nil {\n\t\t\t\tlog.Infof(\"Skipping path %v; there was an error determining if it was a local file; error: %v\", p, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlog.Infof(\"Skipping remote file %v\", p)\n\t\t\tcontinue\n\t\t}\n\t\tif err := rt.processKustomizeDir(p, registry, include, exclude); err != nil {\n\t\t\tlog.Errorf(\"Error occurred while processing %v; error %v\", p, err)\n\t\t}\n\t}\n\n\t// Bases is deprecated but our manifests still use it.\n\tfor _, r := range kustomization.Bases {\n\t\tp := path.Join(absPath, r)\n\n\t\tif b, err := utils.IsRemoteFile(p); b || err != nil {\n\t\t\tif err != nil {\n\t\t\t\tlog.Infof(\"Skipping path %v; there was an error determining if it was a local file; error: %v\", p, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlog.Infof(\"Skipping remote file %v\", p)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := rt.processKustomizeDir(p, registry, include, exclude); err != nil {\n\t\t\tlog.Errorf(\"Error occurred while processing %v; error %v\", p, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func GetImage(runtime connector.ModuleRuntime, kubeConf *common.KubeConf, name string) Image {\n\tvar image Image\n\tpauseTag, corednsTag := \"3.2\", \"1.6.9\"\n\n\tif versionutil.MustParseSemantic(kubeConf.Cluster.Kubernetes.Version).LessThan(versionutil.MustParseSemantic(\"v1.21.0\")) {\n\t\tpauseTag = \"3.2\"\n\t\tcorednsTag = \"1.6.9\"\n\t}\n\tif versionutil.MustParseSemantic(kubeConf.Cluster.Kubernetes.Version).AtLeast(versionutil.MustParseSemantic(\"v1.21.0\")) ||\n\t\t(kubeConf.Cluster.Kubernetes.ContainerManager != \"\" && kubeConf.Cluster.Kubernetes.ContainerManager != \"docker\") {\n\t\tpauseTag = \"3.4.1\"\n\t\tcorednsTag = \"1.8.0\"\n\t}\n\tif versionutil.MustParseSemantic(kubeConf.Cluster.Kubernetes.Version).AtLeast(versionutil.MustParseSemantic(\"v1.22.0\")) {\n\t\tpauseTag = \"3.5\"\n\t\tcorednsTag = \"1.8.0\"\n\t}\n\tif versionutil.MustParseSemantic(kubeConf.Cluster.Kubernetes.Version).AtLeast(versionutil.MustParseSemantic(\"v1.23.0\")) {\n\t\tpauseTag = \"3.6\"\n\t\tcorednsTag = \"1.8.6\"\n\t}\n\tif versionutil.MustParseSemantic(kubeConf.Cluster.Kubernetes.Version).AtLeast(versionutil.MustParseSemantic(\"v1.24.0\")) {\n\t\tpauseTag = \"3.7\"\n\t\tcorednsTag = \"1.8.6\"\n\t}\n\tif versionutil.MustParseSemantic(kubeConf.Cluster.Kubernetes.Version).AtLeast(versionutil.MustParseSemantic(\"v1.25.0\")) {\n\t\tpauseTag = \"3.8\"\n\t\tcorednsTag = \"1.9.3\"\n\t}\n\n\tlogger.Log.Debugf(\"pauseTag: %s, corednsTag: %s\", pauseTag, corednsTag)\n\n\tImageList := map[string]Image{\n\t\t\"pause\": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: kubekeyv1alpha2.DefaultKubeImageNamespace, Repo: \"pause\", Tag: pauseTag, Group: kubekeyv1alpha2.K8s, Enable: true},\n\t\t\"etcd\": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: kubekeyv1alpha2.DefaultKubeImageNamespace, Repo: \"etcd\", Tag: kubekeyv1alpha2.DefaultEtcdVersion, Group: kubekeyv1alpha2.Master, Enable: strings.EqualFold(kubeConf.Cluster.Etcd.Type, kubekeyv1alpha2.Kubeadm)},\n\t\t\"kube-apiserver\": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: kubekeyv1alpha2.DefaultKubeImageNamespace, Repo: \"kube-apiserver\", Tag: kubeConf.Cluster.Kubernetes.Version, Group: kubekeyv1alpha2.Master, Enable: true},\n\t\t\"kube-controller-manager\": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: kubekeyv1alpha2.DefaultKubeImageNamespace, Repo: \"kube-controller-manager\", Tag: kubeConf.Cluster.Kubernetes.Version, Group: kubekeyv1alpha2.Master, Enable: true},\n\t\t\"kube-scheduler\": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: kubekeyv1alpha2.DefaultKubeImageNamespace, Repo: \"kube-scheduler\", Tag: kubeConf.Cluster.Kubernetes.Version, Group: kubekeyv1alpha2.Master, Enable: true},\n\t\t\"kube-proxy\": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: kubekeyv1alpha2.DefaultKubeImageNamespace, Repo: \"kube-proxy\", Tag: kubeConf.Cluster.Kubernetes.Version, Group: kubekeyv1alpha2.K8s, Enable: !kubeConf.Cluster.Kubernetes.DisableKubeProxy},\n\n\t\t// network\n\t\t\"coredns\": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: \"coredns\", Repo: \"coredns\", Tag: corednsTag, Group: kubekeyv1alpha2.K8s, Enable: true},\n\t\t\"k8s-dns-node-cache\": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: kubekeyv1alpha2.DefaultKubeImageNamespace, Repo: \"k8s-dns-node-cache\", Tag: \"1.15.12\", Group: kubekeyv1alpha2.K8s, Enable: kubeConf.Cluster.Kubernetes.EnableNodelocaldns()},\n\t\t\"calico-kube-controllers\": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: \"calico\", Repo: \"kube-controllers\", Tag: kubekeyv1alpha2.DefaultCalicoVersion, Group: kubekeyv1alpha2.K8s, Enable: strings.EqualFold(kubeConf.Cluster.Network.Plugin, \"calico\")},\n\t\t\"calico-cni\": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: \"calico\", Repo: \"cni\", Tag: kubekeyv1alpha2.DefaultCalicoVersion, Group: kubekeyv1alpha2.K8s, Enable: strings.EqualFold(kubeConf.Cluster.Network.Plugin, \"calico\")},\n\t\t\"calico-node\": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: \"calico\", Repo: \"node\", Tag: kubekeyv1alpha2.DefaultCalicoVersion, Group: kubekeyv1alpha2.K8s, Enable: strings.EqualFold(kubeConf.Cluster.Network.Plugin, \"calico\")},\n\t\t\"calico-flexvol\": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: \"calico\", Repo: \"pod2daemon-flexvol\", Tag: kubekeyv1alpha2.DefaultCalicoVersion, Group: kubekeyv1alpha2.K8s, Enable: strings.EqualFold(kubeConf.Cluster.Network.Plugin, \"calico\")},\n\t\t\"calico-typha\": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: \"calico\", Repo: \"typha\", Tag: kubekeyv1alpha2.DefaultCalicoVersion, Group: kubekeyv1alpha2.K8s, Enable: strings.EqualFold(kubeConf.Cluster.Network.Plugin, \"calico\") && len(runtime.GetHostsByRole(common.K8s)) > 50},\n\t\t\"flannel\": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: \"flannel\", Repo: \"flannel\", Tag: kubekeyv1alpha2.DefaultFlannelVersion, Group: kubekeyv1alpha2.K8s, Enable: strings.EqualFold(kubeConf.Cluster.Network.Plugin, \"flannel\")},\n\t\t\"flannel-cni-plugin\": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: \"flannel\", Repo: \"flannel-cni-plugin\", Tag: kubekeyv1alpha2.DefaultFlannelCniPluginVersion, Group: kubekeyv1alpha2.K8s, Enable: strings.EqualFold(kubeConf.Cluster.Network.Plugin, \"flannel\")},\n\t\t\"cilium\": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: \"cilium\", Repo: \"cilium\", Tag: kubekeyv1alpha2.DefaultCiliumVersion, Group: kubekeyv1alpha2.K8s, Enable: strings.EqualFold(kubeConf.Cluster.Network.Plugin, \"cilium\")},\n\t\t\"cilium-operator-generic\": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: \"cilium\", Repo: \"operator-generic\", Tag: kubekeyv1alpha2.DefaultCiliumVersion, Group: kubekeyv1alpha2.K8s, Enable: strings.EqualFold(kubeConf.Cluster.Network.Plugin, \"cilium\")},\n\t\t\"kubeovn\": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: \"kubeovn\", Repo: \"kube-ovn\", Tag: kubekeyv1alpha2.DefaultKubeovnVersion, Group: kubekeyv1alpha2.K8s, Enable: strings.EqualFold(kubeConf.Cluster.Network.Plugin, \"kubeovn\")},\n\t\t\"multus\": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: kubekeyv1alpha2.DefaultKubeImageNamespace, Repo: \"multus-cni\", Tag: kubekeyv1alpha2.DefalutMultusVersion, Group: kubekeyv1alpha2.K8s, Enable: strings.Contains(kubeConf.Cluster.Network.Plugin, \"multus\")},\n\t\t// storage\n\t\t\"provisioner-localpv\": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: \"openebs\", Repo: \"provisioner-localpv\", Tag: \"3.3.0\", Group: kubekeyv1alpha2.Worker, Enable: false},\n\t\t\"linux-utils\": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: \"openebs\", Repo: \"linux-utils\", Tag: \"3.3.0\", Group: kubekeyv1alpha2.Worker, Enable: false},\n\t\t// load balancer\n\t\t\"haproxy\": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: \"library\", Repo: \"haproxy\", Tag: \"2.3\", Group: kubekeyv1alpha2.Worker, Enable: kubeConf.Cluster.ControlPlaneEndpoint.IsInternalLBEnabled()},\n\t\t\"kubevip\": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: \"plndr\", Repo: \"kube-vip\", Tag: \"v0.5.0\", Group: kubekeyv1alpha2.Master, Enable: kubeConf.Cluster.ControlPlaneEndpoint.IsInternalLBEnabledVip()},\n\t\t// kata-deploy\n\t\t\"kata-deploy\": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: kubekeyv1alpha2.DefaultKubeImageNamespace, Repo: \"kata-deploy\", Tag: \"stable\", Group: kubekeyv1alpha2.Worker, Enable: kubeConf.Cluster.Kubernetes.EnableKataDeploy()},\n\t\t// node-feature-discovery\n\t\t\"node-feature-discovery\": {RepoAddr: kubeConf.Cluster.Registry.PrivateRegistry, Namespace: kubekeyv1alpha2.DefaultKubeImageNamespace, Repo: \"node-feature-discovery\", Tag: \"v0.10.0\", Group: kubekeyv1alpha2.K8s, Enable: kubeConf.Cluster.Kubernetes.EnableNodeFeatureDiscovery()},\n\t}\n\n\timage = ImageList[name]\n\tif kubeConf.Cluster.Registry.NamespaceOverride != \"\" {\n\t\timage.NamespaceOverride = kubeConf.Cluster.Registry.NamespaceOverride\n\t}\n\treturn image\n}",
"func TestIsImageInLocalRegistry(t *testing.T) {\n\ttype testDef struct {\n\t\timageName string\n\t\tdocker test.FakeDockerClient\n\t\texpectedResult bool\n\t\texpectedError string\n\t}\n\ttests := map[string]testDef{\n\t\t\"ImageFound\": {\"a_test_image\", test.FakeDockerClient{}, true, \"\"},\n\t\t\"ImageNotFound\": {\"a_test_image:sometag\", test.FakeDockerClient{}, false, \"unable to get metadata for a_test_image:sometag\"},\n\t}\n\n\tfor test, def := range tests {\n\t\tdh := getDocker(&def.docker)\n\t\tfake := dh.kubeDockerClient.(*dockertools.FakeDockerClient)\n\t\tif def.expectedResult {\n\t\t\tfake.Image = &dockertypes.ImageInspect{ID: def.imageName}\n\t\t}\n\n\t\tresult, err := dh.IsImageInLocalRegistry(def.imageName)\n\n\t\tif e := fake.AssertCalls([]string{\"inspect_image\"}); e != nil {\n\t\t\tt.Errorf(\"%+v\", e)\n\t\t}\n\n\t\tif result != def.expectedResult {\n\t\t\tt.Errorf(\"Test - %s: Expected result: %v. Got: %v\", test, def.expectedResult, result)\n\t\t}\n\t\tif err != nil && len(def.expectedError) > 0 && !strings.Contains(err.Error(), def.expectedError) {\n\t\t\tt.Errorf(\"Test - %s: Expected error: Got: %+v\", test, err)\n\t\t}\n\t}\n}",
"func setImage(dep *appsv1.Deployment, ctn string, image string) {\n\tfor index, value := range dep.Spec.Template.Spec.Containers {\n\t\tif value.Name == ctn {\n\t\t\tnewImage := \"\"\n\t\t\toriImage := dep.Spec.Template.Spec.Containers[index].Image\n\t\t\timageStrutct := strings.Split(oriImage, \":\")\n\t\t\tif len(imageStrutct) != 0 {\n\t\t\t\tnewImage = fmt.Sprintf(\"%s:%s\", image, imageStrutct[len(imageStrutct)-1])\n\t\t\t\tdep.Spec.Template.Spec.Containers[index].Image = newImage\n\t\t\t}\n\t\t}\n\t}\n}",
"func resolveAuthConfigFromImage(cfg *configfile.ConfigFile, image string) (registrytypes.AuthConfig, error) {\n\tregistryRef, err := reference.ParseNormalizedNamed(image)\n\tif err != nil {\n\t\treturn registrytypes.AuthConfig{}, err\n\t}\n\trepoInfo, err := registry.ParseRepositoryInfo(registryRef)\n\tif err != nil {\n\t\treturn registrytypes.AuthConfig{}, err\n\t}\n\treturn ResolveAuthConfig(cfg, repoInfo.Index), nil\n}",
"func BuildImageOverrideMapFromEnviron(environ []string, prefix string) map[string]string {\n\toverrideMap := map[string]string{}\n\n\tfor _, e := range environ {\n\t\tpair := strings.SplitN(e, \"=\", 2)\n\t\tif strings.HasPrefix(pair[0], prefix) {\n\t\t\t// convert\n\t\t\t// \"IMAGE_container=quay.io/foo\"\n\t\t\t// \"IMAGE_deployment__container=quay.io/foo2\"\n\t\t\t// \"IMAGE_env_var=quay.io/foo3\"\n\t\t\t// \"IMAGE_deployment__env_var=quay.io/foo4\"\n\t\t\t// to\n\t\t\t// container: quay.io/foo\n\t\t\t// deployment/container: quay.io/foo2\n\t\t\t// env_var: quay.io/foo3\n\t\t\t// deployment/env_var: quay.io/foo4\n\t\t\tname := strings.TrimPrefix(pair[0], prefix)\n\t\t\tname = strings.Replace(name, \"__\", \"/\", 1)\n\t\t\tif pair[1] != \"\" {\n\t\t\t\toverrideMap[name] = pair[1]\n\t\t\t}\n\t\t}\n\t}\n\treturn overrideMap\n}",
"func getImagesMappingRE() map[*regexp.Regexp][]byte {\n\timageNamesMapping := imageNamesMapping()\n\timageMappingRE := make(map[*regexp.Regexp][]byte, len(imageNamesMapping))\n\n\tfor existingImage, archSpecificImage := range imageNamesMapping {\n\t\timageMappingRE[regexp.MustCompile(\"(?im)image: \"+existingImage+\"$\")] = []byte(\"image: \" + archSpecificImage)\n\t\timageMappingRE[regexp.MustCompile(\"(?im)default: \"+existingImage+\"$\")] = []byte(\"default: \" + archSpecificImage)\n\t}\n\n\treturn imageMappingRE\n}"
] |
Deprecated: Use VmxcTxHistory.ProtoReflect.Descriptor instead.
|
func (*VmxcTxHistory) Descriptor() ([]byte, []int) {
return file_wallet_proto_rawDescGZIP(), []int{20}
}
|
[
"func (*StateMachineLogEntryProto) Descriptor() ([]byte, []int) {\n\treturn file_raft_proto_rawDescGZIP(), []int{6}\n}",
"func (*EpochChange) Descriptor() ([]byte, []int) {\n\treturn file_msgs_msgs_proto_rawDescGZIP(), []int{21}\n}",
"func (*IntegrationChangeHistoryListResp) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{31}\n}",
"func (*CCLCMsg_HltvReplay) Descriptor() ([]byte, []int) {\n\treturn file_netmessages_proto_rawDescGZIP(), []int{52}\n}",
"func (*PlanChange_Removed) Descriptor() ([]byte, []int) {\n\treturn edgelq_limits_proto_v1alpha2_plan_change_proto_rawDescGZIP(), []int{0, 3}\n}",
"func (*SendTransactionStatus) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{38}\n}",
"func (*Checkpoint) Descriptor() ([]byte, []int) {\n\treturn file_msgs_msgs_proto_rawDescGZIP(), []int{19}\n}"
] |
/ Creates a file if it does not already exist.
|
func CreateFileIfNotExists(path string) error {
exists, er := FilePathExists(path)
if er != nil {
return er
} else {
if !exists {
var file, err = os.Create(path)
if err != nil {
return err
}
defer file.Close()
}
}
return nil
}
|
[
"func CreateFile(fileName string, canAppend bool) *os.File {\n\tfileMode := os.O_TRUNC\n\n\tif canAppend {\n\t\tfileMode = os.O_APPEND\n\t}\n\n\tfile, err := os.OpenFile(fileName, fileMode|os.O_CREATE|os.O_WRONLY, 0644)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn file\n\n}",
"func createDefaultFileIfNotExists(v *viper.Viper, filename string) error {\n\tif v.ConfigFileUsed() != \"\" {\n\t\treturn nil\n\t}\n\t// get config home\n\tcfgHome, err := defaultCfgHome()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// create config directory\n\tif err := os.MkdirAll(cfgHome, os.ModePerm); err != nil {\n\t\treturn fmt.Errorf(\"couldn't create config directory: %v\", err)\n\t}\n\t// create config file\n\tf, err := os.Create(filepath.Join(cfgHome, filename))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not create config file: %v\", err)\n\t}\n\t// close config file\n\tif err := f.Close(); err != nil {\n\t\treturn fmt.Errorf(\"could not close config file: %v\", err)\n\t}\n\treturn nil\n}",
"func createFile(filePath string) (*os.File, error) {\n\tfilePath = *destDir + filePath\n\tcreateFileMutex.Lock()\n\tdefer createFileMutex.Unlock()\n\n\tex := path.Ext(filePath)\n\tbs := filePath[:len(filePath)-len(ex)]\n\n\tfor {\n\t\tfilePath = bs + ex\n\t\tif _, err := os.Stat(filePath); err == nil {\n\t\t\tif bs[len(bs)-1] != ')' {\n\t\t\t\tbs = bs + \"(1)\"\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tl := strings.LastIndex(bs, \"(\")\n\t\t\tif l == -1 {\n\t\t\t\tbs = bs + \"(1)\"\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ti, err := strconv.Atoi(bs[l+1 : len(bs)-1])\n\t\t\tif err != nil {\n\t\t\t\tbs = bs + \"(1)\"\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ti++\n\t\t\tbs = bs[:l] + \"(\" + strconv.Itoa(i) + \")\"\n\t\t} else {\n\t\t\tout, err := os.Create(filePath)\n\t\t\treturn out, err\n\t\t}\n\t}\n\n}",
"func (d *Directory) CreateFile(p, name, hash, keyHash string, size int64, fragments []*Fragment) error {\n\tdir, err := d.checkPathExists(p)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i := 0; i < len(dir.INodes); i++ {\n\t\tif dir.INodes[i].GetName() == name {\n\t\t\treturn errors.New(\"The same Name file or directory exists: \" + p + name)\n\t\t}\n\t}\n\td.lock()\n\tdefer d.unlock()\n\tdir.INodes = append(dir.INodes, NewFile(name, size, hash, keyHash, fragments))\n\treturn nil\n}",
"func createFile(bytes []byte, filepath string) error {\n\terr := ioutil.WriteFile(filepath, bytes, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (s *awsStorage) CreateFile(key, filename, ctype string, size int64, hash []byte) error {\n\treturn s.upsertInfo(key,\n\t\t&info{Name: filename, ContentType: ctype, Length: size, Hash: toHex(hash[:])}, true)\n}",
"func createWriteThroughFile(path string) (*os.File, error) {\n\tif len(path) == 0 {\n\t\treturn nil, syscall.ERROR_FILE_NOT_FOUND\n\t}\n\tpathp, err := syscall.UTF16PtrFromString(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\th, err := syscall.CreateFile(\n\t\tpathp, // Path\n\t\tsyscall.GENERIC_READ|syscall.GENERIC_WRITE, // Access Mode\n\t\tuint32(syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE), // Share Mode\n\t\tnil, // Security Attributes\n\t\tsyscall.CREATE_ALWAYS, // Create Mode\n\t\tuint32(syscall.FILE_ATTRIBUTE_NORMAL|_FILE_FLAG_WRITE_THROUGH), // Flags and Attributes\n\t\t0) // Template File\n\n\treturn os.NewFile(uintptr(h), path), err\n}"
] |
DeleteRecord indicates an expected call of DeleteRecord
|
func (mr *MockDBStorageMockRecorder) DeleteRecord(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRecord", reflect.TypeOf((*MockDBStorage)(nil).DeleteRecord), arg0, arg1, arg2)
}
|
[
"func (mr *MockFlagMockRecorder) Delete(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Delete\", reflect.TypeOf((*MockFlag)(nil).Delete), arg0, arg1)\n}",
"func (mr *ClientMockRecorder) Delete(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Delete\", reflect.TypeOf((*Client)(nil).Delete), arg0, arg1)\n}",
"func (c Client) DeleteRecord(ctx context.Context, domainID, recordID int64) error {\n\tendpoint := c.baseURL.JoinPath(\"dns\", strconv.FormatInt(domainID, 10), \"record\", strconv.FormatInt(recordID, 10))\n\n\tapiResp := APIException{}\n\terr := c.doRetry(ctx, http.MethodDelete, endpoint.String(), nil, &apiResp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif apiResp.StatusCode/100 != 2 {\n\t\treturn fmt.Errorf(\"API error: %w\", apiResp)\n\t}\n\n\treturn nil\n}",
"func (ds *DeleteSuite) TestDelete_Failure_SensorType_Not_exists() {\n\n\t// Arrange.\n\n\te := echo.New()\n\treq := httptest.NewRequest(http.MethodDelete, \"/:id\", nil)\n\tres := httptest.NewRecorder()\n\tc := e.NewContext(req, res)\n\n\tc.SetPath(\"/:id\")\n\tc.SetParamNames(\"id\")\n\tc.SetParamValues(\"99\")\n\n\t// Act.\n\n\t_ = HandleDelete(c)\n\n\t// Assert.\n\n\tassert.Equal(ds.T(), http.StatusBadRequest, res.Code)\n\tvar httpError echo.HTTPError\n\t_ = json.Unmarshal(res.Body.Bytes(), &httpError)\n\tassert.Equal(ds.T(), \"sensortype not found\", httpError.Message)\n}",
"func (z *Zone) DeleteRecord(name string, recordType string) error {\n\trrset := new(RRset)\n\trrset.Name = name\n\trrset.Type = recordType\n\trrset.ChangeType = \"DELETE\"\n\n\treturn z.patchRRset(*rrset)\n}",
"func (_obj *DataService) DeleteActivityRecord(activity_id string, wx_id string, affectRows *int32, _opt ...map[string]string) (ret int32, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = _os.Write_string(activity_id, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _os.Write_string(wx_id, 2)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _os.Write_int32((*affectRows), 3)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\ttarsCtx := context.Background()\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"deleteActivityRecord\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = _is.Read_int32(&ret, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _is.Read_int32(&(*affectRows), 3, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}",
"func (sr *StoredRecording) Delete(key *ari.Key) error {\n\treturn sr.client.del(\"/recordings/stored/\"+key.ID, nil, \"\")\n}"
] |
Remove jwt token from db. Remove user access.
|
func (gs *GateService) Logout(ctx context.Context, opaque string) error {
return gs.repo.RemoveToken(ctx, opaque)
}
|
[
"func revokeMT(tx *sqlx.Tx, id mtid.MTID) error {\n\treturn db.RunWithinTransaction(tx, func(tx *sqlx.Tx) error {\n\t\tif err := encryptionkeyrepo.DeleteEncryptionKey(tx, id); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err := tx.Exec(`DELETE FROM MTokens WHERE id=?`, id)\n\t\treturn errors.WithStack(err)\n\t})\n}",
"func logout(w http.ResponseWriter, r *http.Request) {\n\tsession, err := store.Get(r, \"auth\")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tsession.Values[\"user\"] = User{}\n\tsession.Options.MaxAge = -1\n\n\terr = session.Save(r, w)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tlogRequest(r)\n}",
"func (d *DecodeService) Remove(w http.ResponseWriter, r *http.Request) {\n\tvar req request.UserNameAudioToken\n\terr := json.NewDecoder(r.Body).Decode(&req)\n\tif err != nil {\n\t\tservice.ProcessBadFormat(w, service.ErrWrongFormat)\n\t\treturn\n\t}\n\n\tdb, err := d.adb.Get(req.Username)\n\tif err != nil {\n\t\tservice.ProcessServerError(w, service.ErrFindUser)\n\t\treturn\n\t}\n\n\tas, err := storage.NewAudioPostgres(db)\n\tif err != nil {\n\t\tservice.ProcessServerError(w, service.ErrFindUser)\n\t\treturn\n\t}\n\n\taudio, err := as.GetByToken(req.Token)\n\tif err != nil {\n\t\tservice.ProcessServerError(w, service.ErrFindUserAudio)\n\t\treturn\n\t}\n\n\terr = os.Remove(path.Join(getBaseDir(req.Username), audio.Name))\n\tlog.Println(err)\n\tas.Remove(audio)\n}",
"func (s *OAuthTokensStore) RemoveByAccess(ctx context.Context, access string) error {\n\treturn s.Find(db.Cond{\"access\": access}).Delete()\n}",
"func DeleteOauth2ById(db *mgo.Database,id bson.ObjectId) error {\n\tc:=db.C(OAUTH_2)\n\treturn c.RemoveId(id)\n}",
"func (c *Client) RemoveDeviceToken(address, token, platform string) error {\n\tt := &DeviceToken{}\n\t_, err := c.Model(t).Where(\"address = ? \", address).\n\t\tWhere(\"token = ?\", token).\n\t\tWhere(\"platform = ?\", platform).Delete()\n\treturn err\n}",
"func (o *AuthToken) RemoveUser(exec boil.Executor, related *User) error {\n\tvar err error\n\n\to.UserID.Valid = false\n\tif err = o.Update(exec, \"user_id\"); err != nil {\n\t\to.UserID.Valid = true\n\t\treturn errors.Wrap(err, \"failed to update local table\")\n\t}\n\n\to.R.User = nil\n\tif related == nil || related.R == nil {\n\t\treturn nil\n\t}\n\n\tfor i, ri := range related.R.AuthTokens {\n\t\tif o.UserID.Int != ri.UserID.Int {\n\t\t\tcontinue\n\t\t}\n\n\t\tln := len(related.R.AuthTokens)\n\t\tif ln > 1 && i < ln-1 {\n\t\t\trelated.R.AuthTokens[i] = related.R.AuthTokens[ln-1]\n\t\t}\n\t\trelated.R.AuthTokens = related.R.AuthTokens[:ln-1]\n\t\tbreak\n\t}\n\treturn nil\n}"
] |
NewDockerMemoryStatsUpdate returns the fields that have been updated since the last measurement. It returns nil if nothing has changed.
|
func NewDockerMemoryStatsUpdate(prev, next docker.ContainerMemoryStats) *DockerMemoryStatsUpdate {
if prev == next {
return nil
}
var delta DockerMemoryStatsUpdate
if prev.Usage != next.Usage {
delta.Usage = &next.Usage
}
if prev.MaxUsage != next.MaxUsage {
delta.MaxUsage = &next.MaxUsage
}
if prev.Limit != next.Limit {
delta.Limit = &next.Limit
}
if prev.Stats == next.Stats {
return &delta
}
if prev.Stats.ActiveAnon != next.Stats.ActiveAnon {
delta.ActiveAnon = &next.Stats.ActiveAnon
}
if prev.Stats.ActiveFile != next.Stats.ActiveFile {
delta.ActiveFile = &next.Stats.ActiveFile
}
if prev.Stats.InactiveAnon != next.Stats.InactiveAnon {
delta.InactiveAnon = &next.Stats.InactiveAnon
}
if prev.Stats.InactiveFile != next.Stats.InactiveFile {
delta.InactiveFile = &next.Stats.InactiveFile
}
if prev.Stats.TotalCache != next.Stats.TotalCache {
delta.TotalCache = &next.Stats.TotalCache
}
if prev.Stats.TotalRss != next.Stats.TotalRss {
delta.TotalRss = &next.Stats.TotalRss
}
return &delta
}
|
[
"func getMmStat(ctx context.Context) ([]float64, error) {\n\tout, err := testexec.CommandContext(ctx,\n\t\t\"cat\", zramMmStatPath).Output(testexec.DumpLogOnError)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to dump zram mm_stat\")\n\t}\n\n\tstatsRaw := strings.Fields(string(out))\n\tnumFields := len(statsRaw)\n\tstats := make([]float64, numFields)\n\tfor i := 0; i < numFields; i++ {\n\t\tstats[i], err = strconv.ParseFloat(statsRaw[i], 64)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to parse %s\", zramFieldNames[i])\n\t\t}\n\t}\n\treturn stats, nil\n}",
"func (pt *ProcessTemplate) ExtractInstanceUpdateData(input *Process, host map[string]interface{}) (\n\tmap[string]interface{}, error) {\n\n\tdata := make(map[string]interface{})\n\tproperty := pt.Property\n\tif IsAsDefaultValue(property.FuncName.AsDefaultValue) == false {\n\t\tif input.FuncName != nil {\n\t\t\tdata[\"bk_func_name\"] = *input.FuncName\n\t\t}\n\t}\n\tif IsAsDefaultValue(property.ProcessName.AsDefaultValue) == false {\n\t\tif input.ProcessName != nil {\n\t\t\tdata[\"bk_process_name\"] = *input.ProcessName\n\t\t}\n\t}\n\tif IsAsDefaultValue(property.StartParamRegex.AsDefaultValue) == false {\n\t\tif input.StartParamRegex != nil {\n\t\t\tdata[\"bk_start_param_regex\"] = *input.StartParamRegex\n\t\t}\n\t}\n\tif IsAsDefaultValue(property.StartCheckSecs.AsDefaultValue) == false {\n\t\tif input.StartCheckSecs != nil {\n\t\t\tdata[\"bk_start_check_secs\"] = *input.StartCheckSecs\n\t\t}\n\t}\n\tif IsAsDefaultValue(property.User.AsDefaultValue) == false {\n\t\tif input.User != nil {\n\t\t\tdata[\"user\"] = *input.User\n\t\t}\n\t}\n\tif IsAsDefaultValue(property.StopCmd.AsDefaultValue) == false {\n\t\tif input.StopCmd != nil {\n\t\t\tdata[\"stop_cmd\"] = *input.StopCmd\n\t\t}\n\t}\n\tif IsAsDefaultValue(property.ProcNum.AsDefaultValue) == false {\n\t\tif input.ProcNum != nil {\n\t\t\tdata[\"proc_num\"] = *input.ProcNum\n\t\t}\n\t}\n\n\tif IsAsDefaultValue(property.Description.AsDefaultValue) == false {\n\t\tif input.Description != nil {\n\t\t\tdata[\"description\"] = *input.Description\n\t\t}\n\t}\n\n\tif IsAsDefaultValue(property.TimeoutSeconds.AsDefaultValue) == false {\n\t\tif input.TimeoutSeconds != nil {\n\t\t\tdata[\"timeout\"] = *input.TimeoutSeconds\n\t\t}\n\t}\n\tif IsAsDefaultValue(property.AutoStart.AsDefaultValue) == false {\n\t\tif input.AutoStart != nil {\n\t\t\tdata[\"auto_start\"] = *input.AutoStart\n\t\t}\n\t}\n\tif IsAsDefaultValue(property.PidFile.AsDefaultValue) == false {\n\t\tif input.PidFile != nil {\n\t\t\tdata[\"pid_file\"] = *input.PidFile\n\t\t}\n\t}\n\tif IsAsDefaultValue(property.ReloadCmd.AsDefaultValue) == false {\n\t\tif input.ReloadCmd != nil {\n\t\t\tdata[\"reload_cmd\"] = *input.ReloadCmd\n\t\t}\n\t}\n\tif IsAsDefaultValue(property.RestartCmd.AsDefaultValue) == false {\n\t\tif input.RestartCmd != nil {\n\t\t\tdata[\"restart_cmd\"] = *input.RestartCmd\n\t\t}\n\t}\n\tif IsAsDefaultValue(property.ForceStopCmd.AsDefaultValue) == false {\n\t\tif input.ForceStopCmd != nil {\n\t\t\tdata[\"face_stop_cmd\"] = *input.ForceStopCmd\n\t\t}\n\t}\n\tif IsAsDefaultValue(property.WorkPath.AsDefaultValue) == false {\n\t\tif input.WorkPath != nil {\n\t\t\tdata[\"work_path\"] = *input.WorkPath\n\t\t}\n\t}\n\n\tif IsAsDefaultValue(property.Priority.AsDefaultValue) == false {\n\t\tif input.Priority != nil {\n\t\t\tdata[\"priority\"] = *input.Priority\n\t\t}\n\t}\n\tif IsAsDefaultValue(property.StartCmd.AsDefaultValue) == false {\n\t\tif input.StartCmd != nil {\n\t\t\tdata[\"start_cmd\"] = *input.StartCmd\n\t\t}\n\t}\n\n\t// bind info 每次都是全量更新\n\tvar err error\n\tdata[common.BKProcBindInfo], err = pt.Property.BindInfo.ExtractInstanceUpdateData(input, host)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn data, nil\n}",
"func NewUpdateStats(\n\trootDir string,\n\tfilesToImport *[]string,\n\timportFilesCount int,\n\tupdatedFilesCount int,\n\tfailedFiles *[]string,\n) *UpdateStats {\n\n\ttotalCountOfFiles := len(*filesToImport)\n\tcountOfFailedFiles := len(*failedFiles)\n\n\treturn &UpdateStats{\n\t\tImportStats: ImportStats{\n\t\t\tRootDirectory: rootDir,\n\t\t\tScannedFilesCount: totalCountOfFiles,\n\t\t\tImportedFilesCount: importFilesCount,\n\t\t\tFailedFilesCount: countOfFailedFiles,\n\t\t\tFailedFiles: *failedFiles,\n\t\t},\n\t\tUpdatedFilesCount: updatedFilesCount,\n\t}\n}",
"func (p *Process) MemoryInfo() (*MemoryInfoStat, error) {\n\treturn p.MemoryInfoWithContext(context.Background())\n}",
"func (hdlr *prochdlr) memInfo() (*MemoryInfo, error) {\r\n\tcounters := winapi.ProcessMemoryCountersEx{}\r\n\tsize := uint32(unsafe.Sizeof(counters))\r\n\terr := winapi.GetProcessMemoryInfo(hdlr.handler, &counters, size)\r\n\tif err != nil {\r\n\t\treturn nil, errors.Wrap(err, \"get process memory info\")\r\n\t}\r\n\r\n\tminfo := MemoryInfo{\r\n\t\tWorkingSetSize: counters.WorkingSetSize,\r\n\t\tQuotaPagedPoolUsage: counters.QuotaPagedPoolUsage,\r\n\t\tQuotaNonPagedPoolUsage: counters.QuotaNonPagedPoolUsage,\r\n\t\tPrivateUsage: counters.PrivateUsage,\r\n\t}\r\n\r\n\treturn &minfo, nil\r\n}",
"func (m *KubeletMonitor) parsePodStats(podStats []stats.PodStats) {\n\tfor _, podStat := range podStats {\n\t\tvar cpuUsageNanoCoreSum uint64\n\t\tvar memoryUsageBytesSum uint64\n\t\tfor _, containerStat := range podStat.Containers {\n\t\t\tif containerStat.CPU != nil && containerStat.CPU.UsageNanoCores != nil {\n\t\t\t\tcpuUsageNanoCoreSum += *containerStat.CPU.UsageNanoCores\n\t\t\t}\n\t\t\tif containerStat.Memory != nil && containerStat.Memory.UsageBytes != nil {\n\t\t\t\tmemoryUsageBytesSum += *containerStat.Memory.UsageBytes\n\t\t\t}\n\t\t}\n\t\tglog.V(4).Infof(\"Cpu usage of pod %s is %f core\", util.PodStatsKeyFunc(podStat),\n\t\t\tfloat64(cpuUsageNanoCoreSum)/util.NanoToUnit)\n\t\tpodCpuUsageCoreMetrics := metrics.NewEntityResourceMetric(task.PodType, util.PodStatsKeyFunc(podStat),\n\t\t\tmetrics.CPU, metrics.Used, float64(cpuUsageNanoCoreSum)/util.NanoToUnit)\n\n\t\tglog.V(4).Infof(\"Memory usage of pod %s is %f Kb\", util.PodStatsKeyFunc(podStat),\n\t\t\tfloat64(memoryUsageBytesSum)/util.KilobytesToBytes)\n\t\tpodMemoryUsageCoreMetrics := metrics.NewEntityResourceMetric(task.PodType, util.PodStatsKeyFunc(podStat),\n\t\t\tmetrics.Memory, metrics.Used, float64(memoryUsageBytesSum)/util.KilobytesToBytes)\n\n\t\t// application cpu and mem used are the same as pod's.\n\t\tapplicationCpuUsageCoreMetrics := metrics.NewEntityResourceMetric(task.ApplicationType,\n\t\t\tutil.PodStatsKeyFunc(podStat), metrics.CPU, metrics.Used,\n\t\t\tfloat64(cpuUsageNanoCoreSum)/util.NanoToUnit)\n\t\tapplicationMemoryUsageCoreMetrics := metrics.NewEntityResourceMetric(task.ApplicationType,\n\t\t\tutil.PodStatsKeyFunc(podStat), metrics.Memory, metrics.Used,\n\t\t\tfloat64(memoryUsageBytesSum)/util.KilobytesToBytes)\n\n\t\tm.metricSink.AddNewMetricEntries(podCpuUsageCoreMetrics,\n\t\t\tpodMemoryUsageCoreMetrics,\n\t\t\tapplicationCpuUsageCoreMetrics,\n\t\t\tapplicationMemoryUsageCoreMetrics)\n\t}\n}",
"func New() *MemStats {\n\treturn &MemStats{\n\t\tvalues: map[string]int64{},\n\t}\n}"
] |
cornstack-samples
🚧 This dataset is under active development and may change.
Filtered CoRNStack sample subsets for code retrieval training.
Source dataset and paper:
- CoRNStack collection: https://huggingface.co/collections/nomic-ai/cornstack
- CoRNStack paper: https://huggingface.co/papers/2412.01007
Note: the original CoRNStack collection is a much larger dataset family for code search training. If you need large-scale data (not samples), please refer to the original CoRNStack collection above.
What This Release Contains
This release keeps the original subset layout (6 languages x pair + hard-negatives) and applies deterministic rule-based filtering.
In this revision, query-level deduplication and positive-document (pos) deduplication are applied per subset.
For both rules, if duplicates exist, only the first row is kept.
Config Layout And Schema
Each language is published as two configs with split train:
{lang}-v1-pair-2M{lang}-v1-hard-negatives-100k
Schema:
- Pair configs:
query,pos - Hard-negative configs:
query,pos,negs(list[string])
Subsets And Row Counts (After Filter + Query Dedup + Pos Dedup)
Note: subset names keep the original 2M / 100k naming convention for compatibility, but
actual row counts vary by language and can be substantially lower after quality filtering and
deduplication.
| Subset (config name) | split | num_examples |
|---|---|---|
go-v1-pair-2M |
train |
1,532,319 |
go-v1-hard-negatives-100k |
train |
87,536 |
java-v1-pair-2M |
train |
1,456,651 |
java-v1-hard-negatives-100k |
train |
80,906 |
javascript-v1-pair-2M |
train |
1,298,745 |
javascript-v1-hard-negatives-100k |
train |
79,484 |
php-v1-pair-2M |
train |
1,311,302 |
php-v1-hard-negatives-100k |
train |
74,945 |
python-v1-pair-2M |
train |
1,779,891 |
python-v1-hard-negatives-100k |
train |
96,670 |
ruby-v1-pair-2M |
train |
1,145,414 |
ruby-v1-hard-negatives-100k |
train |
67,937 |
Total rows:
- Pair: 8,524,322
- Hard-negatives: 487,478
- Overall: 9,011,800
Dedup Impact (Pos Dedup Stage)
The table below shows the pos-dedup impact on top of the previous filtered + query-deduped data.
| Subset | before | after | removed | removed_ratio |
|---|---|---|---|---|
go-v1-pair-2M |
1,541,111 | 1,532,319 | 8,792 | 0.57% |
go-v1-hard-negatives-100k |
87,647 | 87,536 | 111 | 0.13% |
java-v1-pair-2M |
1,491,655 | 1,456,651 | 35,004 | 2.35% |
java-v1-hard-negatives-100k |
81,657 | 80,906 | 751 | 0.92% |
javascript-v1-pair-2M |
1,310,965 | 1,298,745 | 12,220 | 0.93% |
javascript-v1-hard-negatives-100k |
79,684 | 79,484 | 200 | 0.25% |
php-v1-pair-2M |
1,343,442 | 1,311,302 | 32,140 | 2.39% |
php-v1-hard-negatives-100k |
75,632 | 74,945 | 687 | 0.91% |
python-v1-pair-2M |
1,807,480 | 1,779,891 | 27,589 | 1.53% |
python-v1-hard-negatives-100k |
97,147 | 96,670 | 477 | 0.49% |
ruby-v1-pair-2M |
1,175,219 | 1,145,414 | 29,805 | 2.54% |
ruby-v1-hard-negatives-100k |
68,382 | 67,937 | 445 | 0.65% |
Stage totals:
- Pair removed by pos dedup: 145,550 (1.68%)
- Hard-negatives removed by pos dedup: 2,671 (0.54%)
- Overall removed by pos dedup: 148,221 (1.62%)
Quick Usage
from datasets import load_dataset
pair_ds = load_dataset("hotchpotch/cornstack-samples", "python-v1-pair-2M", split="train")
hard_ds = load_dataset("hotchpotch/cornstack-samples", "python-v1-hard-negatives-100k", split="train")
print(pair_ds.column_names, len(pair_ds))
print(hard_ds.column_names, len(hard_ds))
License
This dataset follows CoRNStack and is released under Apache-2.0.
Citation And Attribution
If you use this dataset, please cite and attribute CoRNStack:
- Paper: https://huggingface.co/papers/2412.01007
- Collection: https://huggingface.co/collections/nomic-ai/cornstack
Noise Filtering Algorithm (Rule-based)
The following deterministic rules are applied before publishing this release.
- Prefix-based noisy query removal
A row is dropped if
querystarts with any of the following prefixes:
TODOGET /POST /PUT /DELETE /Display a listing of the resource.Store a newly created resource in storage.Show the form for editing the specified resource.Update the specified resource in storage.Show the form for creating a new resource.Remove the specified resource from storage.Display the specified resource.Transform the resource into an array.Autogenerated method stubAuto generatedthis down() migration is autogeneratedthis up() migration is autogenerated"/ renamed from:""/ access modifiers changed from:"
- Minimum positive-document length A row is dropped if the positive side text is shorter than 30 characters.
- Pair configs:
poslength >= 30 required - Hard-negative configs:
poslength >= 30 required
Hard-negative size constraint For hard-negative configs in this release, each row keeps exactly seven negatives after normalization (
len(negs) = 7, equivalent tomin_negs = 7).Query-level deduplication Within each subset split, rows are grouped by exact
querystring.
- Keep the first occurrence
- Drop all later duplicates
- Positive-document (
pos) deduplication Within each subset split, rows are grouped by exactposstring.
- Keep the first occurrence
- Drop all later duplicates
This filtering is purely rule-based (no model scoring), targeting high-noise templates and low-information positives while preserving broad retrieval coverage.
- Downloads last month
- 28