1
0
mirror of https://github.com/gluster/glusterd2.git synced 2026-02-05 12:45:38 +01:00

glusterd2: Refactoring of functions for snapshot

Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
This commit is contained in:
Mohammed Rafi KC
2018-01-17 11:21:16 +05:30
parent 82eaf05360
commit acf41b0a82
15 changed files with 251 additions and 157 deletions

View File

@@ -8,10 +8,14 @@ import (
"path"
"strconv"
"strings"
"syscall"
"time"
"github.com/cespare/xxhash"
"github.com/gluster/glusterd2/glusterd2/daemon"
"github.com/gluster/glusterd2/glusterd2/gdctx"
"github.com/gluster/glusterd2/glusterd2/pmap"
"github.com/gluster/glusterd2/pkg/api"
"github.com/gluster/glusterd2/pkg/utils"
config "github.com/spf13/viper"
@@ -131,3 +135,100 @@ func NewGlusterfsd(binfo Brickinfo) (*Glusterfsd, error) {
func (b *Glusterfsd) ID() string {
return b.brickinfo.Path
}
// BrickStartMaxRetries represents maximum no. of attempts that will be made
// to start brick processes in case of port clashes.
const BrickStartMaxRetries = 3
// Until https://review.gluster.org/#/c/16200/ gets into a release.
// And this is fully safe too as no other well-known errno exists after 132
//anotherEADDRINUSE is errno generated for rpc connection
const anotherEADDRINUSE = syscall.Errno(0x9E) // 158
func errorContainsErrno(err error, errno syscall.Errno) bool {
exiterr, ok := err.(*exec.ExitError)
if !ok {
return false
}
status, ok := exiterr.Sys().(syscall.WaitStatus)
if !ok {
return false
}
if status.ExitStatus() != int(errno) {
return false
}
return true
}
// These functions are used in vol-create, vol-expand and vol-shrink (TBD)
//StartBrick starts glusterfsd process
func (b Brickinfo) StartBrick() error {
brickDaemon, err := NewGlusterfsd(b)
if err != nil {
return err
}
for i := 0; i < BrickStartMaxRetries; i++ {
err = daemon.Start(brickDaemon, true)
if err != nil {
if errorContainsErrno(err, syscall.EADDRINUSE) || errorContainsErrno(err, anotherEADDRINUSE) {
// Retry iff brick failed to start because of port being in use.
// Allow the previous instance to cleanup and exit
time.Sleep(1 * time.Second)
} else {
return err
}
} else {
break
}
}
return nil
}
//StopBrick will stop glusterfsd process
func (b Brickinfo) StopBrick() error {
brickDaemon, err := NewGlusterfsd(b)
if err != nil {
return err
}
return daemon.Stop(brickDaemon, true)
}
//CreateBrickSizeInfo parses size information for response
func CreateBrickSizeInfo(size *SizeInfo) api.SizeInfo {
return api.SizeInfo{
Used: size.Used,
Free: size.Free,
Capacity: size.Capacity,
}
}
//CreateBrickInfo parses brick information for response
func CreateBrickInfo(b *Brickinfo) api.BrickInfo {
return api.BrickInfo{
ID: b.ID,
Path: b.Path,
VolumeID: b.VolumeID,
VolumeName: b.VolumeName,
NodeID: b.NodeID,
Hostname: b.Hostname,
Type: api.BrickType(b.Type),
}
}
//CreateSizeInfo return size of a brick
func CreateSizeInfo(fstat *syscall.Statfs_t) *SizeInfo {
var s SizeInfo
if fstat != nil {
s.Capacity = fstat.Blocks * uint64(fstat.Bsize)
s.Free = fstat.Bfree * uint64(fstat.Bsize)
s.Used = s.Capacity - s.Free
}
return &s
}

View File

@@ -3,7 +3,6 @@ package brick
import (
"fmt"
"github.com/gluster/glusterd2/pkg/utils"
"github.com/pborman/uuid"
"golang.org/x/sys/unix"
)
@@ -31,6 +30,13 @@ type Brickinfo struct {
Decommissioned bool
}
// SizeInfo represents sizing information.
type SizeInfo struct {
Capacity uint64
Used uint64
Free uint64
}
//Brickstatus gives status of brick
type Brickstatus struct {
Info Brickinfo
@@ -40,7 +46,7 @@ type Brickstatus struct {
FS string
MountOpts string
Device string
Size utils.SizeInfo
Size SizeInfo
}
func (b *Brickinfo) String() string {

View File

@@ -20,18 +20,19 @@ const (
func registerBricksStatusStepFuncs() {
transaction.RegisterStepFunc(bricksStatus, "bricks-status.Check")
}
func createBrickStatusRspAPI(brickStatuses []brick.Brickstatus) []*api.BrickStatus {
func createBrickStatusRsp(brickStatuses []brick.Brickstatus) []*api.BrickStatus {
var brickStatusesRsp []*api.BrickStatus
for _, status := range brickStatuses {
s := &api.BrickStatus{
Info: createBrickInfo(&status.Info),
Info: brick.CreateBrickInfo(&status.Info),
Online: status.Online,
Pid: status.Pid,
Port: status.Port,
FS: status.FS,
MountOpts: status.MountOpts,
Device: status.Device,
Size: createSizeInfo(&status.Size),
Size: brick.CreateBrickSizeInfo(&status.Size),
}
brickStatusesRsp = append(brickStatusesRsp, s)
}
@@ -52,10 +53,10 @@ func bricksStatus(ctx transaction.TxnCtx) error {
}
brickStatuses, err := volume.CheckBricksStatus(vol)
if err != nil {
ctx.Logger().WithError(err).Error("Failed to get brick status information from store.")
ctx.Logger().WithError(err).Error("Failed to get brick status information.")
return err
}
brickStatusesRsp := createBrickStatusRspAPI(brickStatuses)
brickStatusesRsp := createBrickStatusRsp(brickStatuses)
// Store the results in transaction context. This will be consumed by
// the node that initiated the transaction.
ctx.SetNodeResult(gdctx.MyUUID, brickStatusTxnKey, brickStatusesRsp)
@@ -112,7 +113,7 @@ func createBricksStatusResp(ctx transaction.TxnCtx, vol *volume.Volinfo) (*api.B
bmap := make(map[string]*api.BrickStatus)
for _, b := range vol.GetBricks() {
bmap[b.ID.String()] = &api.BrickStatus{
Info: createBrickInfo(&b),
Info: brick.CreateBrickInfo(&b),
}
}

View File

@@ -2,82 +2,12 @@ package volumecommands
import (
"errors"
"os/exec"
"syscall"
"time"
"github.com/gluster/glusterd2/glusterd2/brick"
"github.com/gluster/glusterd2/glusterd2/daemon"
"github.com/gluster/glusterd2/pkg/api"
"github.com/pborman/uuid"
)
// BrickStartMaxRetries represents maximum no. of attempts that will be made
// to start brick processes in case of port clashes.
const BrickStartMaxRetries = 3
// Until https://review.gluster.org/#/c/16200/ gets into a release.
// And this is fully safe too as no other well-known errno exists after 132
const anotherEADDRINUSE = syscall.Errno(0x9E) // 158
func errorContainsErrno(err error, errno syscall.Errno) bool {
exiterr, ok := err.(*exec.ExitError)
if !ok {
return false
}
status, ok := exiterr.Sys().(syscall.WaitStatus)
if !ok {
return false
}
if status.ExitStatus() != int(errno) {
return false
}
return true
}
// These functions are used in vol-create, vol-expand and vol-shrink (TBD)
func startBrick(b brick.Brickinfo) error {
brickDaemon, err := brick.NewGlusterfsd(b)
if err != nil {
return err
}
for i := 0; i < BrickStartMaxRetries; i++ {
err = daemon.Start(brickDaemon, true)
if err != nil {
if errorContainsErrno(err, syscall.EADDRINUSE) || errorContainsErrno(err, anotherEADDRINUSE) {
// Retry iff brick failed to start because of port being in use.
// Allow the previous instance to cleanup and exit
time.Sleep(1 * time.Second)
} else {
return err
}
} else {
break
}
}
return nil
}
func stopBrick(b brick.Brickinfo) error {
brickDaemon, err := brick.NewGlusterfsd(b)
if err != nil {
return err
}
err = daemon.Stop(brickDaemon, true)
if err != nil {
return err
}
return nil
}
func nodesFromVolumeCreateReq(req *api.VolCreateReq) ([]uuid.UUID, error) {
var nodesMap = make(map[string]int)
var nodes []uuid.UUID

View File

@@ -1,62 +1,14 @@
package volumecommands
import (
"github.com/gluster/glusterd2/glusterd2/brick"
"github.com/gluster/glusterd2/glusterd2/volume"
"github.com/gluster/glusterd2/pkg/api"
"github.com/gluster/glusterd2/pkg/utils"
)
func createSizeInfo(size *utils.SizeInfo) api.SizeInfo {
func createSizeInfo(size *volume.SizeInfo) api.SizeInfo {
return api.SizeInfo{
Used: size.Used,
Free: size.Free,
Capacity: size.Capacity,
}
}
func createBrickInfo(b *brick.Brickinfo) api.BrickInfo {
return api.BrickInfo{
ID: b.ID,
Path: b.Path,
VolumeID: b.VolumeID,
VolumeName: b.VolumeName,
NodeID: b.NodeID,
Hostname: b.Hostname,
Type: api.BrickType(b.Type),
}
}
func createSubvolInfo(sv *[]volume.Subvol) []api.Subvol {
var subvols []api.Subvol
for _, subvol := range *sv {
var blist []api.BrickInfo
for _, b := range subvol.Bricks {
blist = append(blist, createBrickInfo(&b))
}
subvols = append(subvols, api.Subvol{
Name: subvol.Name,
Type: api.SubvolType(subvol.Type),
Bricks: blist,
ReplicaCount: subvol.ReplicaCount,
ArbiterCount: subvol.ArbiterCount,
})
}
return subvols
}
func createVolumeInfoResp(v *volume.Volinfo) *api.VolumeInfo {
return &api.VolumeInfo{
ID: v.ID,
Name: v.Name,
Type: api.VolType(v.Type),
Transport: v.Transport,
DistCount: v.DistCount,
State: api.VolState(v.State),
Options: v.Options,
Subvols: createSubvolInfo(&v.Subvols),
}
}

View File

@@ -319,7 +319,7 @@ func volumeCreateHandler(w http.ResponseWriter, r *http.Request) {
}
func createVolumeCreateResp(v *volume.Volinfo) *api.VolumeCreateResp {
return (*api.VolumeCreateResp)(createVolumeInfoResp(v))
return (*api.VolumeCreateResp)(volume.CreateVolumeInfoResp(v))
}
func checkDisperseParams(req *api.SubvolReq, s *volume.Subvol) error {

View File

@@ -46,7 +46,7 @@ func startBricksOnExpand(c transaction.TxnCtx) error {
"brick": b.String(),
}).Info("Starting brick")
if err := startBrick(b); err != nil {
if err := b.StartBrick(); err != nil {
return err
}
}
@@ -73,7 +73,7 @@ func undoStartBricksOnExpand(c transaction.TxnCtx) error {
"brick": b.String(),
}).Info("volume expand failed, stopping brick")
if err := stopBrick(b); err != nil {
if err := b.StopBrick(); err != nil {
c.Logger().WithFields(log.Fields{
"error": err,
"volume": b.VolumeName,
@@ -334,5 +334,5 @@ func volumeExpandHandler(w http.ResponseWriter, r *http.Request) {
}
func createVolumeExpandResp(v *volume.Volinfo) *api.VolumeExpandResp {
return (*api.VolumeExpandResp)(createVolumeInfoResp(v))
return (*api.VolumeExpandResp)(volume.CreateVolumeInfoResp(v))
}

View File

@@ -27,5 +27,5 @@ func volumeInfoHandler(w http.ResponseWriter, r *http.Request) {
}
func createVolumeGetResp(v *volume.Volinfo) *api.VolumeGetResp {
return (*api.VolumeGetResp)(createVolumeInfoResp(v))
return (*api.VolumeGetResp)(volume.CreateVolumeInfoResp(v))
}

View File

@@ -33,7 +33,7 @@ func startAllBricks(c transaction.TxnCtx) error {
"brick": b.String(),
}).Info("Starting brick")
if err := startBrick(b); err != nil {
if err := b.StartBrick(); err != nil {
return err
}
}
@@ -68,7 +68,7 @@ func stopAllBricks(c transaction.TxnCtx) error {
"brick": b.String(),
}).Info("volume start failed, stopping brick")
if err := stopBrick(b); err != nil {
if err := b.StopBrick(); err != nil {
return err
}
}

View File

@@ -35,7 +35,7 @@ func volumeStatusHandler(w http.ResponseWriter, r *http.Request) {
func createVolumeStatusResp(v *volume.Volinfo, s *api.SizeInfo) *api.VolumeStatusResp {
resp := &api.VolumeStatusResp{
Info: *(createVolumeInfoResp(v)),
Info: *(volume.CreateVolumeInfoResp(v)),
}
if s != nil {

View File

@@ -12,10 +12,28 @@ import (
"strings"
"syscall"
"github.com/gluster/glusterd2/pkg/utils"
config "github.com/spf13/viper"
)
//For now duplicating SizeInfo till we have a common package for both brick and volume
// SizeInfo represents sizing information.
type SizeInfo struct {
Capacity uint64
Used uint64
Free uint64
}
func createSizeInfo(fstat *syscall.Statfs_t) *SizeInfo {
var s SizeInfo
if fstat != nil {
s.Capacity = fstat.Blocks * uint64(fstat.Bsize)
s.Free = fstat.Bfree * uint64(fstat.Bsize)
s.Used = s.Capacity - s.Free
}
return &s
}
const fuseSuperMagic = 1702057286
func mountVolume(name string, mountpoint string) error {
@@ -49,18 +67,8 @@ func mountVolume(name string, mountpoint string) error {
return cmd.Wait() // glusterfs daemonizes itself
}
func createSizeInfo(fstat *syscall.Statfs_t) *utils.SizeInfo {
var s utils.SizeInfo
if fstat != nil {
s.Capacity = fstat.Blocks * uint64(fstat.Bsize)
s.Free = fstat.Bfree * uint64(fstat.Bsize)
s.Used = s.Capacity - s.Free
}
return &s
}
//UsageInfo gives the size information of a gluster volume
func UsageInfo(volname string) (*utils.SizeInfo, error) {
func UsageInfo(volname string) (*SizeInfo, error) {
tempDir, err := ioutil.TempDir(config.GetString("rundir"), "gd2mount")
if err != nil {

View File

@@ -262,3 +262,16 @@ func (v *Volinfo) Peers() []*peer.Peer {
return peers
}
//SubvolTypeToString converts VolType to corresponding string
func SubvolTypeToString(subvolType SubvolType) string {
switch subvolType {
case SubvolReplicate:
return "replicate"
case SubvolDisperse:
return "disperse"
default:
return "distribute"
}
return ""
}

View File

@@ -1,13 +1,17 @@
package volume
import (
"errors"
"os"
"path"
"strings"
"syscall"
"github.com/gluster/glusterd2/glusterd2/brick"
"github.com/gluster/glusterd2/glusterd2/daemon"
"github.com/gluster/glusterd2/glusterd2/pmap"
"github.com/gluster/glusterd2/pkg/errors"
"github.com/gluster/glusterd2/pkg/api"
gderrors "github.com/gluster/glusterd2/pkg/errors"
"github.com/pborman/uuid"
log "github.com/sirupsen/logrus"
@@ -27,7 +31,7 @@ func isBrickPathAvailable(nodeID uuid.UUID, brickPath string) error {
for _, b := range v.GetBricks() {
if uuid.Equal(b.NodeID, nodeID) && b.Path == brickPath {
log.Error("Brick is already used by ", v.Name)
return errors.ErrBrickPathAlreadyInUse
return gderrors.ErrBrickPathAlreadyInUse
}
}
}
@@ -85,7 +89,7 @@ func CheckBricksStatus(volinfo *Volinfo) ([]brick.Brickstatus, error) {
log.WithError(err).WithField("path",
binfo.Path).Error("syscall.Statfs() failed")
} else {
s.Size = *(createSizeInfo(&fstat))
s.Size = *(brick.CreateSizeInfo(&fstat))
}
for _, m := range mtabEntries {
@@ -101,3 +105,84 @@ func CheckBricksStatus(volinfo *Volinfo) ([]brick.Brickstatus, error) {
return brickStatuses, nil
}
//GetBrickMountRoot return root of a brick mount
func GetBrickMountRoot(brickPath string) (string, error) {
brickStat, err := os.Stat(brickPath)
if err != nil {
return "", err
}
brickSt := brickStat.Sys().(*syscall.Stat_t)
for dirPath := brickPath; dirPath != "/"; {
dir := path.Dir(dirPath)
mntStat, err := os.Stat(dir)
if err != nil {
return "", err
}
if mntSt := mntStat.Sys().(*syscall.Stat_t); brickSt.Dev != mntSt.Dev {
return dirPath, nil
}
dirPath = dir
}
mntStat, err := os.Stat("/")
if err != nil {
return "", err
}
if mntSt := mntStat.Sys().(*syscall.Stat_t); brickSt.Dev == mntSt.Dev {
return "/", nil
}
return "", errors.New("Failed To Get Mount Root")
}
//GetBrickMountDevice return device name of the mount point
func GetBrickMountDevice(brickPath, mountRoot string) (string, error) {
mtabEntries, err := getMounts()
if err != nil {
return "", err
}
for _, entry := range mtabEntries {
if entry.mntDir == mountRoot {
return entry.fsName, nil
}
}
return "", errors.New("Mount Point Not Found")
}
//CreateSubvolInfo parses subvol information for response
func CreateSubvolInfo(sv *[]Subvol) []api.Subvol {
var subvols []api.Subvol
for _, subvol := range *sv {
var blist []api.BrickInfo
for _, b := range subvol.Bricks {
blist = append(blist, brick.CreateBrickInfo(&b))
}
subvols = append(subvols, api.Subvol{
Name: subvol.Name,
Type: api.SubvolType(subvol.Type),
Bricks: blist,
ReplicaCount: subvol.ReplicaCount,
ArbiterCount: subvol.ArbiterCount,
})
}
return subvols
}
//CreateVolumeInfoResp parses volume information for response
func CreateVolumeInfoResp(v *Volinfo) *api.VolumeInfo {
return &api.VolumeInfo{
ID: v.ID,
Name: v.Name,
Type: api.VolType(v.Type),
Transport: v.Transport,
DistCount: v.DistCount,
State: api.VolState(v.State),
Options: v.Options,
Subvols: CreateSubvolInfo(&v.Subvols),
}
}

View File

@@ -8,10 +8,3 @@ import "reflect"
func GetTypeString(i interface{}) string {
return reflect.TypeOf(i).Elem().String()
}
// SizeInfo represents sizing information.
type SizeInfo struct {
Capacity uint64
Used uint64
Free uint64
}

View File

@@ -10,3 +10,8 @@ import (
func GetVolumeDir(volumeName string) string {
return path.Join(config.GetString("localstatedir"), "vols", volumeName)
}
// GetSnapshotDir returns path to snapshot directory
func GetSnapshotDir(snapName string) string {
return path.Join(config.GetString("localstatedir"), "snaps", snapName)
}