PageRenderTime 149ms CodeModel.GetById 15ms RepoModel.GetById 0ms app.codeStats 0ms

/vendor/github.com/google/cadvisor/fs/fs.go

https://gitlab.com/unofficial-mirrors/kubernetes
Go | 766 lines | 618 code | 90 blank | 58 comment | 170 complexity | f4972d6fc0d28964adfa9574094e6c0f MD5 | raw file
  1. // Copyright 2014 Google Inc. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. // +build linux
  15. // Provides Filesystem Stats
  16. package fs
  17. import (
  18. "bufio"
  19. "bytes"
  20. "fmt"
  21. "io/ioutil"
  22. "os"
  23. "os/exec"
  24. "path"
  25. "path/filepath"
  26. "regexp"
  27. "strconv"
  28. "strings"
  29. "syscall"
  30. "time"
  31. "github.com/docker/docker/pkg/mount"
  32. "github.com/golang/glog"
  33. "github.com/google/cadvisor/devicemapper"
  34. "github.com/google/cadvisor/utils"
  35. dockerutil "github.com/google/cadvisor/utils/docker"
  36. zfs "github.com/mistifyio/go-zfs"
  37. )
  38. const (
  39. LabelSystemRoot = "root"
  40. LabelDockerImages = "docker-images"
  41. LabelRktImages = "rkt-images"
  42. LabelCrioImages = "crio-images"
  43. )
  44. // The maximum number of `du` and `find` tasks that can be running at once.
  45. const maxConcurrentOps = 20
  46. // A pool for restricting the number of consecutive `du` and `find` tasks running.
  47. var pool = make(chan struct{}, maxConcurrentOps)
  48. func init() {
  49. for i := 0; i < maxConcurrentOps; i++ {
  50. releaseToken()
  51. }
  52. }
  53. func claimToken() {
  54. <-pool
  55. }
  56. func releaseToken() {
  57. pool <- struct{}{}
  58. }
  59. type partition struct {
  60. mountpoint string
  61. major uint
  62. minor uint
  63. fsType string
  64. blockSize uint
  65. }
  66. type RealFsInfo struct {
  67. // Map from block device path to partition information.
  68. partitions map[string]partition
  69. // Map from label to block device path.
  70. // Labels are intent-specific tags that are auto-detected.
  71. labels map[string]string
  72. // Map from mountpoint to mount information.
  73. mounts map[string]*mount.Info
  74. // devicemapper client
  75. dmsetup devicemapper.DmsetupClient
  76. // fsUUIDToDeviceName is a map from the filesystem UUID to its device name.
  77. fsUUIDToDeviceName map[string]string
  78. }
  79. type Context struct {
  80. // docker root directory.
  81. Docker DockerContext
  82. RktPath string
  83. Crio CrioContext
  84. }
  85. type DockerContext struct {
  86. Root string
  87. Driver string
  88. DriverStatus map[string]string
  89. }
  90. type CrioContext struct {
  91. Root string
  92. }
  93. func NewFsInfo(context Context) (FsInfo, error) {
  94. mounts, err := mount.GetMounts()
  95. if err != nil {
  96. return nil, err
  97. }
  98. fsUUIDToDeviceName, err := getFsUUIDToDeviceNameMap()
  99. if err != nil {
  100. // UUID is not always avaiable across different OS distributions.
  101. // Do not fail if there is an error.
  102. glog.Warningf("Failed to get disk UUID mapping, getting disk info by uuid will not work: %v", err)
  103. }
  104. // Avoid devicemapper container mounts - these are tracked by the ThinPoolWatcher
  105. excluded := []string{fmt.Sprintf("%s/devicemapper/mnt", context.Docker.Root)}
  106. fsInfo := &RealFsInfo{
  107. partitions: processMounts(mounts, excluded),
  108. labels: make(map[string]string, 0),
  109. mounts: make(map[string]*mount.Info, 0),
  110. dmsetup: devicemapper.NewDmsetupClient(),
  111. fsUUIDToDeviceName: fsUUIDToDeviceName,
  112. }
  113. for _, mount := range mounts {
  114. fsInfo.mounts[mount.Mountpoint] = mount
  115. }
  116. fsInfo.addRktImagesLabel(context, mounts)
  117. // need to call this before the log line below printing out the partitions, as this function may
  118. // add a "partition" for devicemapper to fsInfo.partitions
  119. fsInfo.addDockerImagesLabel(context, mounts)
  120. fsInfo.addCrioImagesLabel(context, mounts)
  121. glog.V(1).Infof("Filesystem UUIDs: %+v", fsInfo.fsUUIDToDeviceName)
  122. glog.V(1).Infof("Filesystem partitions: %+v", fsInfo.partitions)
  123. fsInfo.addSystemRootLabel(mounts)
  124. return fsInfo, nil
  125. }
  126. // getFsUUIDToDeviceNameMap creates the filesystem uuid to device name map
  127. // using the information in /dev/disk/by-uuid. If the directory does not exist,
  128. // this function will return an empty map.
  129. func getFsUUIDToDeviceNameMap() (map[string]string, error) {
  130. const dir = "/dev/disk/by-uuid"
  131. if _, err := os.Stat(dir); os.IsNotExist(err) {
  132. return make(map[string]string), nil
  133. }
  134. files, err := ioutil.ReadDir(dir)
  135. if err != nil {
  136. return nil, err
  137. }
  138. fsUUIDToDeviceName := make(map[string]string)
  139. for _, file := range files {
  140. path := filepath.Join(dir, file.Name())
  141. target, err := os.Readlink(path)
  142. if err != nil {
  143. glog.Warningf("Failed to resolve symlink for %q", path)
  144. continue
  145. }
  146. device, err := filepath.Abs(filepath.Join(dir, target))
  147. if err != nil {
  148. return nil, fmt.Errorf("failed to resolve the absolute path of %q", filepath.Join(dir, target))
  149. }
  150. fsUUIDToDeviceName[file.Name()] = device
  151. }
  152. return fsUUIDToDeviceName, nil
  153. }
  154. func processMounts(mounts []*mount.Info, excludedMountpointPrefixes []string) map[string]partition {
  155. partitions := make(map[string]partition, 0)
  156. supportedFsType := map[string]bool{
  157. // all ext systems are checked through prefix.
  158. "btrfs": true,
  159. "tmpfs": true,
  160. "xfs": true,
  161. "zfs": true,
  162. }
  163. for _, mount := range mounts {
  164. if !strings.HasPrefix(mount.Fstype, "ext") && !supportedFsType[mount.Fstype] {
  165. continue
  166. }
  167. // Avoid bind mounts.
  168. if _, ok := partitions[mount.Source]; ok {
  169. continue
  170. }
  171. hasPrefix := false
  172. for _, prefix := range excludedMountpointPrefixes {
  173. if strings.HasPrefix(mount.Mountpoint, prefix) {
  174. hasPrefix = true
  175. break
  176. }
  177. }
  178. if hasPrefix {
  179. continue
  180. }
  181. // btrfs fix: following workaround fixes wrong btrfs Major and Minor Ids reported in /proc/self/mountinfo.
  182. // instead of using values from /proc/self/mountinfo we use stat to get Ids from btrfs mount point
  183. if mount.Fstype == "btrfs" && mount.Major == 0 && strings.HasPrefix(mount.Source, "/dev/") {
  184. major, minor, err := getBtrfsMajorMinorIds(mount)
  185. if err != nil {
  186. glog.Warningf("%s", err)
  187. } else {
  188. mount.Major = major
  189. mount.Minor = minor
  190. }
  191. }
  192. partitions[mount.Source] = partition{
  193. fsType: mount.Fstype,
  194. mountpoint: mount.Mountpoint,
  195. major: uint(mount.Major),
  196. minor: uint(mount.Minor),
  197. }
  198. }
  199. return partitions
  200. }
  201. // getDockerDeviceMapperInfo returns information about the devicemapper device and "partition" if
  202. // docker is using devicemapper for its storage driver. If a loopback device is being used, don't
  203. // return any information or error, as we want to report based on the actual partition where the
  204. // loopback file resides, inside of the loopback file itself.
  205. func (self *RealFsInfo) getDockerDeviceMapperInfo(context DockerContext) (string, *partition, error) {
  206. if context.Driver != DeviceMapper.String() {
  207. return "", nil, nil
  208. }
  209. dataLoopFile := context.DriverStatus[dockerutil.DriverStatusDataLoopFile]
  210. if len(dataLoopFile) > 0 {
  211. return "", nil, nil
  212. }
  213. dev, major, minor, blockSize, err := dockerDMDevice(context.DriverStatus, self.dmsetup)
  214. if err != nil {
  215. return "", nil, err
  216. }
  217. return dev, &partition{
  218. fsType: DeviceMapper.String(),
  219. major: major,
  220. minor: minor,
  221. blockSize: blockSize,
  222. }, nil
  223. }
  224. // addSystemRootLabel attempts to determine which device contains the mount for /.
  225. func (self *RealFsInfo) addSystemRootLabel(mounts []*mount.Info) {
  226. for _, m := range mounts {
  227. if m.Mountpoint == "/" {
  228. self.partitions[m.Source] = partition{
  229. fsType: m.Fstype,
  230. mountpoint: m.Mountpoint,
  231. major: uint(m.Major),
  232. minor: uint(m.Minor),
  233. }
  234. self.labels[LabelSystemRoot] = m.Source
  235. return
  236. }
  237. }
  238. }
  239. // addDockerImagesLabel attempts to determine which device contains the mount for docker images.
  240. func (self *RealFsInfo) addDockerImagesLabel(context Context, mounts []*mount.Info) {
  241. dockerDev, dockerPartition, err := self.getDockerDeviceMapperInfo(context.Docker)
  242. if err != nil {
  243. glog.Warningf("Could not get Docker devicemapper device: %v", err)
  244. }
  245. if len(dockerDev) > 0 && dockerPartition != nil {
  246. self.partitions[dockerDev] = *dockerPartition
  247. self.labels[LabelDockerImages] = dockerDev
  248. } else {
  249. self.updateContainerImagesPath(LabelDockerImages, mounts, getDockerImagePaths(context))
  250. }
  251. }
  252. func (self *RealFsInfo) addCrioImagesLabel(context Context, mounts []*mount.Info) {
  253. if context.Crio.Root != "" {
  254. crioPath := context.Crio.Root
  255. crioImagePaths := map[string]struct{}{
  256. "/": {},
  257. }
  258. for _, dir := range []string{"overlay", "overlay2"} {
  259. crioImagePaths[path.Join(crioPath, dir+"-images")] = struct{}{}
  260. }
  261. for crioPath != "/" && crioPath != "." {
  262. crioImagePaths[crioPath] = struct{}{}
  263. crioPath = filepath.Dir(crioPath)
  264. }
  265. self.updateContainerImagesPath(LabelCrioImages, mounts, crioImagePaths)
  266. }
  267. }
  268. func (self *RealFsInfo) addRktImagesLabel(context Context, mounts []*mount.Info) {
  269. if context.RktPath != "" {
  270. rktPath := context.RktPath
  271. rktImagesPaths := map[string]struct{}{
  272. "/": {},
  273. }
  274. for rktPath != "/" && rktPath != "." {
  275. rktImagesPaths[rktPath] = struct{}{}
  276. rktPath = filepath.Dir(rktPath)
  277. }
  278. self.updateContainerImagesPath(LabelRktImages, mounts, rktImagesPaths)
  279. }
  280. }
  281. // Generate a list of possible mount points for docker image management from the docker root directory.
  282. // Right now, we look for each type of supported graph driver directories, but we can do better by parsing
  283. // some of the context from `docker info`.
  284. func getDockerImagePaths(context Context) map[string]struct{} {
  285. dockerImagePaths := map[string]struct{}{
  286. "/": {},
  287. }
  288. // TODO(rjnagal): Detect docker root and graphdriver directories from docker info.
  289. dockerRoot := context.Docker.Root
  290. for _, dir := range []string{"devicemapper", "btrfs", "aufs", "overlay", "overlay2", "zfs"} {
  291. dockerImagePaths[path.Join(dockerRoot, dir)] = struct{}{}
  292. }
  293. for dockerRoot != "/" && dockerRoot != "." {
  294. dockerImagePaths[dockerRoot] = struct{}{}
  295. dockerRoot = filepath.Dir(dockerRoot)
  296. }
  297. return dockerImagePaths
  298. }
  299. // This method compares the mountpoints with possible container image mount points. If a match is found,
  300. // the label is added to the partition.
  301. func (self *RealFsInfo) updateContainerImagesPath(label string, mounts []*mount.Info, containerImagePaths map[string]struct{}) {
  302. var useMount *mount.Info
  303. for _, m := range mounts {
  304. if _, ok := containerImagePaths[m.Mountpoint]; ok {
  305. if useMount == nil || (len(useMount.Mountpoint) < len(m.Mountpoint)) {
  306. useMount = m
  307. }
  308. }
  309. }
  310. if useMount != nil {
  311. self.partitions[useMount.Source] = partition{
  312. fsType: useMount.Fstype,
  313. mountpoint: useMount.Mountpoint,
  314. major: uint(useMount.Major),
  315. minor: uint(useMount.Minor),
  316. }
  317. self.labels[label] = useMount.Source
  318. }
  319. }
  320. func (self *RealFsInfo) GetDeviceForLabel(label string) (string, error) {
  321. dev, ok := self.labels[label]
  322. if !ok {
  323. return "", fmt.Errorf("non-existent label %q", label)
  324. }
  325. return dev, nil
  326. }
  327. func (self *RealFsInfo) GetLabelsForDevice(device string) ([]string, error) {
  328. labels := []string{}
  329. for label, dev := range self.labels {
  330. if dev == device {
  331. labels = append(labels, label)
  332. }
  333. }
  334. return labels, nil
  335. }
  336. func (self *RealFsInfo) GetMountpointForDevice(dev string) (string, error) {
  337. p, ok := self.partitions[dev]
  338. if !ok {
  339. return "", fmt.Errorf("no partition info for device %q", dev)
  340. }
  341. return p.mountpoint, nil
  342. }
  343. func (self *RealFsInfo) GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, error) {
  344. filesystems := make([]Fs, 0)
  345. deviceSet := make(map[string]struct{})
  346. diskStatsMap, err := getDiskStatsMap("/proc/diskstats")
  347. if err != nil {
  348. return nil, err
  349. }
  350. for device, partition := range self.partitions {
  351. _, hasMount := mountSet[partition.mountpoint]
  352. _, hasDevice := deviceSet[device]
  353. if mountSet == nil || (hasMount && !hasDevice) {
  354. var (
  355. err error
  356. fs Fs
  357. )
  358. switch partition.fsType {
  359. case DeviceMapper.String():
  360. fs.Capacity, fs.Free, fs.Available, err = getDMStats(device, partition.blockSize)
  361. glog.V(5).Infof("got devicemapper fs capacity stats: capacity: %v free: %v available: %v:", fs.Capacity, fs.Free, fs.Available)
  362. fs.Type = DeviceMapper
  363. case ZFS.String():
  364. fs.Capacity, fs.Free, fs.Available, err = getZfstats(device)
  365. fs.Type = ZFS
  366. default:
  367. var inodes, inodesFree uint64
  368. if utils.FileExists(partition.mountpoint) {
  369. fs.Capacity, fs.Free, fs.Available, inodes, inodesFree, err = getVfsStats(partition.mountpoint)
  370. fs.Inodes = &inodes
  371. fs.InodesFree = &inodesFree
  372. fs.Type = VFS
  373. } else {
  374. glog.V(4).Infof("unable to determine file system type, partition mountpoint does not exist: %v", partition.mountpoint)
  375. }
  376. }
  377. if err != nil {
  378. glog.Errorf("Stat fs failed. Error: %v", err)
  379. } else {
  380. deviceSet[device] = struct{}{}
  381. fs.DeviceInfo = DeviceInfo{
  382. Device: device,
  383. Major: uint(partition.major),
  384. Minor: uint(partition.minor),
  385. }
  386. fs.DiskStats = diskStatsMap[device]
  387. filesystems = append(filesystems, fs)
  388. }
  389. }
  390. }
  391. return filesystems, nil
  392. }
  393. var partitionRegex = regexp.MustCompile(`^(?:(?:s|v|xv)d[a-z]+\d*|dm-\d+)$`)
  394. func getDiskStatsMap(diskStatsFile string) (map[string]DiskStats, error) {
  395. diskStatsMap := make(map[string]DiskStats)
  396. file, err := os.Open(diskStatsFile)
  397. if err != nil {
  398. if os.IsNotExist(err) {
  399. glog.Warningf("Not collecting filesystem statistics because file %q was not found", diskStatsFile)
  400. return diskStatsMap, nil
  401. }
  402. return nil, err
  403. }
  404. defer file.Close()
  405. scanner := bufio.NewScanner(file)
  406. for scanner.Scan() {
  407. line := scanner.Text()
  408. words := strings.Fields(line)
  409. if !partitionRegex.MatchString(words[2]) {
  410. continue
  411. }
  412. // 8 50 sdd2 40 0 280 223 7 0 22 108 0 330 330
  413. deviceName := path.Join("/dev", words[2])
  414. wordLength := len(words)
  415. offset := 3
  416. var stats = make([]uint64, wordLength-offset)
  417. if len(stats) < 11 {
  418. return nil, fmt.Errorf("could not parse all 11 columns of /proc/diskstats")
  419. }
  420. var error error
  421. for i := offset; i < wordLength; i++ {
  422. stats[i-offset], error = strconv.ParseUint(words[i], 10, 64)
  423. if error != nil {
  424. return nil, error
  425. }
  426. }
  427. diskStats := DiskStats{
  428. ReadsCompleted: stats[0],
  429. ReadsMerged: stats[1],
  430. SectorsRead: stats[2],
  431. ReadTime: stats[3],
  432. WritesCompleted: stats[4],
  433. WritesMerged: stats[5],
  434. SectorsWritten: stats[6],
  435. WriteTime: stats[7],
  436. IoInProgress: stats[8],
  437. IoTime: stats[9],
  438. WeightedIoTime: stats[10],
  439. }
  440. diskStatsMap[deviceName] = diskStats
  441. }
  442. return diskStatsMap, nil
  443. }
  444. func (self *RealFsInfo) GetGlobalFsInfo() ([]Fs, error) {
  445. return self.GetFsInfoForPath(nil)
  446. }
  447. func major(devNumber uint64) uint {
  448. return uint((devNumber >> 8) & 0xfff)
  449. }
  450. func minor(devNumber uint64) uint {
  451. return uint((devNumber & 0xff) | ((devNumber >> 12) & 0xfff00))
  452. }
  453. func (self *RealFsInfo) GetDeviceInfoByFsUUID(uuid string) (*DeviceInfo, error) {
  454. deviceName, found := self.fsUUIDToDeviceName[uuid]
  455. if !found {
  456. return nil, ErrNoSuchDevice
  457. }
  458. p, found := self.partitions[deviceName]
  459. if !found {
  460. return nil, fmt.Errorf("cannot find device %q in partitions", deviceName)
  461. }
  462. return &DeviceInfo{deviceName, p.major, p.minor}, nil
  463. }
  464. func (self *RealFsInfo) GetDirFsDevice(dir string) (*DeviceInfo, error) {
  465. buf := new(syscall.Stat_t)
  466. err := syscall.Stat(dir, buf)
  467. if err != nil {
  468. return nil, fmt.Errorf("stat failed on %s with error: %s", dir, err)
  469. }
  470. major := major(buf.Dev)
  471. minor := minor(buf.Dev)
  472. for device, partition := range self.partitions {
  473. if partition.major == major && partition.minor == minor {
  474. return &DeviceInfo{device, major, minor}, nil
  475. }
  476. }
  477. mount, found := self.mounts[dir]
  478. if found && mount.Fstype == "btrfs" && mount.Major == 0 && strings.HasPrefix(mount.Source, "/dev/") {
  479. major, minor, err := getBtrfsMajorMinorIds(mount)
  480. if err != nil {
  481. glog.Warningf("%s", err)
  482. } else {
  483. return &DeviceInfo{mount.Source, uint(major), uint(minor)}, nil
  484. }
  485. }
  486. return nil, fmt.Errorf("could not find device with major: %d, minor: %d in cached partitions map", major, minor)
  487. }
  488. func (self *RealFsInfo) GetDirDiskUsage(dir string, timeout time.Duration) (uint64, error) {
  489. claimToken()
  490. defer releaseToken()
  491. return GetDirDiskUsage(dir, timeout)
  492. }
  493. func GetDirDiskUsage(dir string, timeout time.Duration) (uint64, error) {
  494. if dir == "" {
  495. return 0, fmt.Errorf("invalid directory")
  496. }
  497. cmd := exec.Command("nice", "-n", "19", "du", "-s", dir)
  498. stdoutp, err := cmd.StdoutPipe()
  499. if err != nil {
  500. return 0, fmt.Errorf("failed to setup stdout for cmd %v - %v", cmd.Args, err)
  501. }
  502. stderrp, err := cmd.StderrPipe()
  503. if err != nil {
  504. return 0, fmt.Errorf("failed to setup stderr for cmd %v - %v", cmd.Args, err)
  505. }
  506. if err := cmd.Start(); err != nil {
  507. return 0, fmt.Errorf("failed to exec du - %v", err)
  508. }
  509. timer := time.AfterFunc(timeout, func() {
  510. glog.Warningf("Killing cmd %v due to timeout(%s)", cmd.Args, timeout.String())
  511. cmd.Process.Kill()
  512. })
  513. stdoutb, souterr := ioutil.ReadAll(stdoutp)
  514. if souterr != nil {
  515. glog.Errorf("Failed to read from stdout for cmd %v - %v", cmd.Args, souterr)
  516. }
  517. stderrb, _ := ioutil.ReadAll(stderrp)
  518. err = cmd.Wait()
  519. timer.Stop()
  520. if err != nil {
  521. return 0, fmt.Errorf("du command failed on %s with output stdout: %s, stderr: %s - %v", dir, string(stdoutb), string(stderrb), err)
  522. }
  523. stdout := string(stdoutb)
  524. usageInKb, err := strconv.ParseUint(strings.Fields(stdout)[0], 10, 64)
  525. if err != nil {
  526. return 0, fmt.Errorf("cannot parse 'du' output %s - %s", stdout, err)
  527. }
  528. return usageInKb * 1024, nil
  529. }
  530. func (self *RealFsInfo) GetDirInodeUsage(dir string, timeout time.Duration) (uint64, error) {
  531. claimToken()
  532. defer releaseToken()
  533. return GetDirInodeUsage(dir, timeout)
  534. }
  535. func GetDirInodeUsage(dir string, timeout time.Duration) (uint64, error) {
  536. if dir == "" {
  537. return 0, fmt.Errorf("invalid directory")
  538. }
  539. var counter byteCounter
  540. var stderr bytes.Buffer
  541. findCmd := exec.Command("find", dir, "-xdev", "-printf", ".")
  542. findCmd.Stdout, findCmd.Stderr = &counter, &stderr
  543. if err := findCmd.Start(); err != nil {
  544. return 0, fmt.Errorf("failed to exec cmd %v - %v; stderr: %v", findCmd.Args, err, stderr.String())
  545. }
  546. timer := time.AfterFunc(timeout, func() {
  547. glog.Warningf("Killing cmd %v due to timeout(%s)", findCmd.Args, timeout.String())
  548. findCmd.Process.Kill()
  549. })
  550. err := findCmd.Wait()
  551. timer.Stop()
  552. if err != nil {
  553. return 0, fmt.Errorf("cmd %v failed. stderr: %s; err: %v", findCmd.Args, stderr.String(), err)
  554. }
  555. return counter.bytesWritten, nil
  556. }
  557. func getVfsStats(path string) (total uint64, free uint64, avail uint64, inodes uint64, inodesFree uint64, err error) {
  558. var s syscall.Statfs_t
  559. if err = syscall.Statfs(path, &s); err != nil {
  560. return 0, 0, 0, 0, 0, err
  561. }
  562. total = uint64(s.Frsize) * s.Blocks
  563. free = uint64(s.Frsize) * s.Bfree
  564. avail = uint64(s.Frsize) * s.Bavail
  565. inodes = uint64(s.Files)
  566. inodesFree = uint64(s.Ffree)
  567. return total, free, avail, inodes, inodesFree, nil
  568. }
  569. // Devicemapper thin provisioning is detailed at
  570. // https://www.kernel.org/doc/Documentation/device-mapper/thin-provisioning.txt
  571. func dockerDMDevice(driverStatus map[string]string, dmsetup devicemapper.DmsetupClient) (string, uint, uint, uint, error) {
  572. poolName, ok := driverStatus[dockerutil.DriverStatusPoolName]
  573. if !ok || len(poolName) == 0 {
  574. return "", 0, 0, 0, fmt.Errorf("Could not get dm pool name")
  575. }
  576. out, err := dmsetup.Table(poolName)
  577. if err != nil {
  578. return "", 0, 0, 0, err
  579. }
  580. major, minor, dataBlkSize, err := parseDMTable(string(out))
  581. if err != nil {
  582. return "", 0, 0, 0, err
  583. }
  584. return poolName, major, minor, dataBlkSize, nil
  585. }
  586. // parseDMTable parses a single line of `dmsetup table` output and returns the
  587. // major device, minor device, block size, and an error.
  588. func parseDMTable(dmTable string) (uint, uint, uint, error) {
  589. dmTable = strings.Replace(dmTable, ":", " ", -1)
  590. dmFields := strings.Fields(dmTable)
  591. if len(dmFields) < 8 {
  592. return 0, 0, 0, fmt.Errorf("Invalid dmsetup status output: %s", dmTable)
  593. }
  594. major, err := strconv.ParseUint(dmFields[5], 10, 32)
  595. if err != nil {
  596. return 0, 0, 0, err
  597. }
  598. minor, err := strconv.ParseUint(dmFields[6], 10, 32)
  599. if err != nil {
  600. return 0, 0, 0, err
  601. }
  602. dataBlkSize, err := strconv.ParseUint(dmFields[7], 10, 32)
  603. if err != nil {
  604. return 0, 0, 0, err
  605. }
  606. return uint(major), uint(minor), uint(dataBlkSize), nil
  607. }
  608. func getDMStats(poolName string, dataBlkSize uint) (uint64, uint64, uint64, error) {
  609. out, err := exec.Command("dmsetup", "status", poolName).Output()
  610. if err != nil {
  611. return 0, 0, 0, err
  612. }
  613. used, total, err := parseDMStatus(string(out))
  614. if err != nil {
  615. return 0, 0, 0, err
  616. }
  617. used *= 512 * uint64(dataBlkSize)
  618. total *= 512 * uint64(dataBlkSize)
  619. free := total - used
  620. return total, free, free, nil
  621. }
  622. func parseDMStatus(dmStatus string) (uint64, uint64, error) {
  623. dmStatus = strings.Replace(dmStatus, "/", " ", -1)
  624. dmFields := strings.Fields(dmStatus)
  625. if len(dmFields) < 8 {
  626. return 0, 0, fmt.Errorf("Invalid dmsetup status output: %s", dmStatus)
  627. }
  628. used, err := strconv.ParseUint(dmFields[6], 10, 64)
  629. if err != nil {
  630. return 0, 0, err
  631. }
  632. total, err := strconv.ParseUint(dmFields[7], 10, 64)
  633. if err != nil {
  634. return 0, 0, err
  635. }
  636. return used, total, nil
  637. }
  638. // getZfstats returns ZFS mount stats using zfsutils
  639. func getZfstats(poolName string) (uint64, uint64, uint64, error) {
  640. dataset, err := zfs.GetDataset(poolName)
  641. if err != nil {
  642. return 0, 0, 0, err
  643. }
  644. total := dataset.Used + dataset.Avail + dataset.Usedbydataset
  645. return total, dataset.Avail, dataset.Avail, nil
  646. }
  647. // Simple io.Writer implementation that counts how many bytes were written.
  648. type byteCounter struct{ bytesWritten uint64 }
  649. func (b *byteCounter) Write(p []byte) (int, error) {
  650. b.bytesWritten += uint64(len(p))
  651. return len(p), nil
  652. }
  653. // Get major and minor Ids for a mount point using btrfs as filesystem.
  654. func getBtrfsMajorMinorIds(mount *mount.Info) (int, int, error) {
  655. // btrfs fix: following workaround fixes wrong btrfs Major and Minor Ids reported in /proc/self/mountinfo.
  656. // instead of using values from /proc/self/mountinfo we use stat to get Ids from btrfs mount point
  657. buf := new(syscall.Stat_t)
  658. err := syscall.Stat(mount.Source, buf)
  659. if err != nil {
  660. err = fmt.Errorf("stat failed on %s with error: %s", mount.Source, err)
  661. return 0, 0, err
  662. }
  663. glog.V(4).Infof("btrfs mount %#v", mount)
  664. if buf.Mode&syscall.S_IFMT == syscall.S_IFBLK {
  665. err := syscall.Stat(mount.Mountpoint, buf)
  666. if err != nil {
  667. err = fmt.Errorf("stat failed on %s with error: %s", mount.Mountpoint, err)
  668. return 0, 0, err
  669. }
  670. glog.V(4).Infof("btrfs dev major:minor %d:%d\n", int(major(buf.Dev)), int(minor(buf.Dev)))
  671. glog.V(4).Infof("btrfs rdev major:minor %d:%d\n", int(major(buf.Rdev)), int(minor(buf.Rdev)))
  672. return int(major(buf.Dev)), int(minor(buf.Dev)), nil
  673. } else {
  674. return 0, 0, fmt.Errorf("%s is not a block device", mount.Source)
  675. }
  676. }