From 6e2cb00008cbf09e556b00f87603797fcaa47e09 Mon Sep 17 00:00:00 2001 From: Christopher Speller Date: Mon, 16 Apr 2018 05:37:14 -0700 Subject: Depenancy upgrades and movign to dep. (#8630) --- vendor/github.com/prometheus/procfs/bcache/get.go | 330 ---------------------- 1 file changed, 330 deletions(-) delete mode 100644 vendor/github.com/prometheus/procfs/bcache/get.go (limited to 'vendor/github.com/prometheus/procfs/bcache/get.go') diff --git a/vendor/github.com/prometheus/procfs/bcache/get.go b/vendor/github.com/prometheus/procfs/bcache/get.go deleted file mode 100644 index b6d97de15..000000000 --- a/vendor/github.com/prometheus/procfs/bcache/get.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bcache - -import ( - "bufio" - "fmt" - "io/ioutil" - "os" - "path" - "path/filepath" - "strconv" - "strings" -) - -// ParsePseudoFloat parses the peculiar format produced by bcache's bch_hprint. -func parsePseudoFloat(str string) (float64, error) { - ss := strings.Split(str, ".") - - intPart, err := strconv.ParseFloat(ss[0], 64) - if err != nil { - return 0, err - } - - if len(ss) == 1 { - // Pure integers are fine. - return intPart, nil - } - fracPart, err := strconv.ParseFloat(ss[1], 64) - if err != nil { - return 0, err - } - // fracPart is a number between 0 and 1023 divided by 100; it is off - // by a small amount. Unexpected bumps in time lines may occur because - // for bch_hprint .1 != .10 and .10 > .9 (at least up to Linux - // v4.12-rc3). - - // Restore the proper order: - fracPart = fracPart / 10.24 - return intPart + fracPart, nil -} - -// Dehumanize converts a human-readable byte slice into a uint64. -func dehumanize(hbytes []byte) (uint64, error) { - ll := len(hbytes) - if ll == 0 { - return 0, fmt.Errorf("zero-length reply") - } - lastByte := hbytes[ll-1] - mul := float64(1) - var ( - mant float64 - err error - ) - // If lastByte is beyond the range of ASCII digits, it must be a - // multiplier. - if lastByte > 57 { - // Remove multiplier from slice. - hbytes = hbytes[:len(hbytes)-1] - - const ( - _ = 1 << (10 * iota) - KiB - MiB - GiB - TiB - PiB - EiB - ZiB - YiB - ) - - multipliers := map[rune]float64{ - // Source for conversion rules: - // linux-kernel/drivers/md/bcache/util.c:bch_hprint() - 'k': KiB, - 'M': MiB, - 'G': GiB, - 'T': TiB, - 'P': PiB, - 'E': EiB, - 'Z': ZiB, - 'Y': YiB, - } - mul = multipliers[rune(lastByte)] - mant, err = parsePseudoFloat(string(hbytes)) - if err != nil { - return 0, err - } - } else { - // Not humanized by bch_hprint - mant, err = strconv.ParseFloat(string(hbytes), 64) - if err != nil { - return 0, err - } - } - res := uint64(mant * mul) - return res, nil -} - -type parser struct { - uuidPath string - subDir string - currentDir string - err error -} - -func (p *parser) setSubDir(pathElements ...string) { - p.subDir = path.Join(pathElements...) - p.currentDir = path.Join(p.uuidPath, p.subDir) -} - -func (p *parser) readValue(fileName string) uint64 { - if p.err != nil { - return 0 - } - path := path.Join(p.currentDir, fileName) - byt, err := ioutil.ReadFile(path) - if err != nil { - p.err = fmt.Errorf("failed to read: %s", path) - return 0 - } - // Remove trailing newline. - byt = byt[:len(byt)-1] - res, err := dehumanize(byt) - p.err = err - return res -} - -// ParsePriorityStats parses lines from the priority_stats file. -func parsePriorityStats(line string, ps *PriorityStats) error { - var ( - value uint64 - err error - ) - switch { - case strings.HasPrefix(line, "Unused:"): - fields := strings.Fields(line) - rawValue := fields[len(fields)-1] - valueStr := strings.TrimSuffix(rawValue, "%") - value, err = strconv.ParseUint(valueStr, 10, 64) - if err != nil { - return err - } - ps.UnusedPercent = value - case strings.HasPrefix(line, "Metadata:"): - fields := strings.Fields(line) - rawValue := fields[len(fields)-1] - valueStr := strings.TrimSuffix(rawValue, "%") - value, err = strconv.ParseUint(valueStr, 10, 64) - if err != nil { - return err - } - ps.MetadataPercent = value - } - return nil -} - -func (p *parser) getPriorityStats() PriorityStats { - var res PriorityStats - - if p.err != nil { - return res - } - - path := path.Join(p.currentDir, "priority_stats") - - file, err := os.Open(path) - if err != nil { - p.err = fmt.Errorf("failed to read: %s", path) - return res - } - defer file.Close() - - scanner := bufio.NewScanner(file) - for scanner.Scan() { - err = parsePriorityStats(scanner.Text(), &res) - if err != nil { - p.err = fmt.Errorf("failed to parse: %s (%s)", path, err) - return res - } - } - if err := scanner.Err(); err != nil { - p.err = fmt.Errorf("failed to parse: %s (%s)", path, err) - return res - } - return res -} - -// GetStats collects from sysfs files data tied to one bcache ID. -func GetStats(uuidPath string) (*Stats, error) { - var bs Stats - - par := parser{uuidPath: uuidPath} - - // bcache stats - - // dir - par.setSubDir("") - bs.Bcache.AverageKeySize = par.readValue("average_key_size") - bs.Bcache.BtreeCacheSize = par.readValue("btree_cache_size") - bs.Bcache.CacheAvailablePercent = par.readValue("cache_available_percent") - bs.Bcache.Congested = par.readValue("congested") - bs.Bcache.RootUsagePercent = par.readValue("root_usage_percent") - bs.Bcache.TreeDepth = par.readValue("tree_depth") - - // bcache stats (internal) - - // dir /internal - par.setSubDir("internal") - bs.Bcache.Internal.ActiveJournalEntries = par.readValue("active_journal_entries") - bs.Bcache.Internal.BtreeNodes = par.readValue("btree_nodes") - bs.Bcache.Internal.BtreeReadAverageDurationNanoSeconds = par.readValue("btree_read_average_duration_us") - bs.Bcache.Internal.CacheReadRaces = par.readValue("cache_read_races") - - // bcache stats (period) - - // dir /stats_five_minute - par.setSubDir("stats_five_minute") - bs.Bcache.FiveMin.Bypassed = par.readValue("bypassed") - bs.Bcache.FiveMin.CacheHits = par.readValue("cache_hits") - - bs.Bcache.FiveMin.Bypassed = par.readValue("bypassed") - bs.Bcache.FiveMin.CacheBypassHits = par.readValue("cache_bypass_hits") - bs.Bcache.FiveMin.CacheBypassMisses = par.readValue("cache_bypass_misses") - bs.Bcache.FiveMin.CacheHits = par.readValue("cache_hits") - bs.Bcache.FiveMin.CacheMissCollisions = par.readValue("cache_miss_collisions") - bs.Bcache.FiveMin.CacheMisses = par.readValue("cache_misses") - bs.Bcache.FiveMin.CacheReadaheads = par.readValue("cache_readaheads") - - // dir /stats_total - par.setSubDir("stats_total") - bs.Bcache.Total.Bypassed = par.readValue("bypassed") - bs.Bcache.Total.CacheHits = par.readValue("cache_hits") - - bs.Bcache.Total.Bypassed = par.readValue("bypassed") - bs.Bcache.Total.CacheBypassHits = par.readValue("cache_bypass_hits") - bs.Bcache.Total.CacheBypassMisses = par.readValue("cache_bypass_misses") - bs.Bcache.Total.CacheHits = par.readValue("cache_hits") - bs.Bcache.Total.CacheMissCollisions = par.readValue("cache_miss_collisions") - bs.Bcache.Total.CacheMisses = par.readValue("cache_misses") - bs.Bcache.Total.CacheReadaheads = par.readValue("cache_readaheads") - - if par.err != nil { - return nil, par.err - } - - // bdev stats - - reg := path.Join(uuidPath, "bdev[0-9]*") - bdevDirs, err := filepath.Glob(reg) - if err != nil { - return nil, err - } - - bs.Bdevs = make([]BdevStats, len(bdevDirs)) - - for ii, bdevDir := range bdevDirs { - var bds = &bs.Bdevs[ii] - - bds.Name = filepath.Base(bdevDir) - - par.setSubDir(bds.Name) - bds.DirtyData = par.readValue("dirty_data") - - // dir //stats_five_minute - par.setSubDir(bds.Name, "stats_five_minute") - bds.FiveMin.Bypassed = par.readValue("bypassed") - bds.FiveMin.CacheBypassHits = par.readValue("cache_bypass_hits") - bds.FiveMin.CacheBypassMisses = par.readValue("cache_bypass_misses") - bds.FiveMin.CacheHits = par.readValue("cache_hits") - bds.FiveMin.CacheMissCollisions = par.readValue("cache_miss_collisions") - bds.FiveMin.CacheMisses = par.readValue("cache_misses") - bds.FiveMin.CacheReadaheads = par.readValue("cache_readaheads") - - // dir //stats_total - par.setSubDir("stats_total") - bds.Total.Bypassed = par.readValue("bypassed") - bds.Total.CacheBypassHits = par.readValue("cache_bypass_hits") - bds.Total.CacheBypassMisses = par.readValue("cache_bypass_misses") - bds.Total.CacheHits = par.readValue("cache_hits") - bds.Total.CacheMissCollisions = par.readValue("cache_miss_collisions") - bds.Total.CacheMisses = par.readValue("cache_misses") - bds.Total.CacheReadaheads = par.readValue("cache_readaheads") - } - - if par.err != nil { - return nil, par.err - } - - // cache stats - - reg = path.Join(uuidPath, "cache[0-9]*") - cacheDirs, err := filepath.Glob(reg) - if err != nil { - return nil, err - } - bs.Caches = make([]CacheStats, len(cacheDirs)) - - for ii, cacheDir := range cacheDirs { - var cs = &bs.Caches[ii] - cs.Name = filepath.Base(cacheDir) - - // dir is / - par.setSubDir(cs.Name) - cs.IOErrors = par.readValue("io_errors") - cs.MetadataWritten = par.readValue("metadata_written") - cs.Written = par.readValue("written") - - ps := par.getPriorityStats() - cs.Priority = ps - } - - if par.err != nil { - return nil, par.err - } - - return &bs, nil -} -- cgit v1.2.3-1-g7c22