mirror of
https://github.com/gluster/glusterd2.git
synced 2026-02-05 12:45:38 +01:00
Volume profile info API + CLI
Signed-off-by: Vishal Pandey <vpandey@redhat.com>
This commit is contained in:
committed by
Madhu Rajanna
parent
35f7857537
commit
e6ce7cfd90
1
doc/endpoints.md
generated
1
doc/endpoints.md
generated
@@ -34,6 +34,7 @@ Statedump | POST | /volumes/{volname}/statedump | [VolStatedumpReq](https://godo
|
||||
VolfilesGet | GET | /volfiles | [](https://godoc.org/github.com/gluster/glusterd2/pkg/api#) | [](https://godoc.org/github.com/gluster/glusterd2/pkg/api#)
|
||||
VolfilesGet | GET | /volfiles/{volfileid:.*} | [](https://godoc.org/github.com/gluster/glusterd2/pkg/api#) | [](https://godoc.org/github.com/gluster/glusterd2/pkg/api#)
|
||||
EditVolume | POST | /volumes/{volname}/edit | [VolEditReq](https://godoc.org/github.com/gluster/glusterd2/pkg/api#VolEditReq) | [VolumeEditResp](https://godoc.org/github.com/gluster/glusterd2/pkg/api#VolumeEditResp)
|
||||
ProfileVolume | GET | /volumes/{volname}/profile/{option} | [](https://godoc.org/github.com/gluster/glusterd2/pkg/api#) | [BrickProfileInfo](https://godoc.org/github.com/gluster/glusterd2/pkg/api#BrickProfileInfo)
|
||||
SnapshotCreate | POST | /snapshots | [SnapCreateReq](https://godoc.org/github.com/gluster/glusterd2/pkg/api#SnapCreateReq) | [SnapCreateResp](https://godoc.org/github.com/gluster/glusterd2/pkg/api#SnapCreateResp)
|
||||
SnapshotActivate | POST | /snapshots/{snapname}/activate | [SnapActivateReq](https://godoc.org/github.com/gluster/glusterd2/pkg/api#SnapActivateReq) | [SnapshotActivateResp](https://godoc.org/github.com/gluster/glusterd2/pkg/api#SnapshotActivateResp)
|
||||
SnapshotDeactivate | POST | /snapshots/{snapname}/deactivate | [](https://godoc.org/github.com/gluster/glusterd2/pkg/api#) | [SnapshotDeactivateResp](https://godoc.org/github.com/gluster/glusterd2/pkg/api#SnapshotDeactivateResp)
|
||||
|
||||
@@ -77,6 +77,9 @@ func TestVolume(t *testing.T) {
|
||||
t.Run("SelfHeal", tc.wrap(testSelfHeal))
|
||||
t.Run("GranularEntryHeal", tc.wrap(testGranularEntryHeal))
|
||||
|
||||
// Volume profile test
|
||||
t.Run("VolumeProfile", tc.wrap(testVolumeProfileInfo))
|
||||
|
||||
}
|
||||
|
||||
func testVolumeCreate(t *testing.T, tc *testCluster) {
|
||||
@@ -938,3 +941,87 @@ func testArbiterVolumeCreate(t *testing.T, tc *testCluster) {
|
||||
|
||||
r.Nil(client.VolumeDelete(volumeName))
|
||||
}
|
||||
|
||||
func testVolumeProfileInfo(t *testing.T, tc *testCluster) {
|
||||
r := require.New(t)
|
||||
var brickPaths []string
|
||||
for i := 1; i <= 3; i++ {
|
||||
brickPath := testTempDir(t, "brick")
|
||||
brickPaths = append(brickPaths, brickPath)
|
||||
}
|
||||
volname := formatVolName(t.Name())
|
||||
createReq := api.VolCreateReq{
|
||||
Name: volname,
|
||||
Subvols: []api.SubvolReq{
|
||||
{
|
||||
ReplicaCount: 3,
|
||||
Type: "replicate",
|
||||
Bricks: []api.BrickReq{
|
||||
{PeerID: tc.gds[0].PeerID(), Path: brickPaths[0]},
|
||||
{PeerID: tc.gds[1].PeerID(), Path: brickPaths[1]},
|
||||
{PeerID: tc.gds[1].PeerID(), Path: brickPaths[2]},
|
||||
},
|
||||
},
|
||||
},
|
||||
Force: true,
|
||||
}
|
||||
_, err := client.VolumeCreate(createReq)
|
||||
r.Nil(err)
|
||||
|
||||
r.Nil(client.VolumeStart(volname, false))
|
||||
|
||||
profileOpKeys := []string{"io-stats.count-fop-hits", "io-stats.latency-measurement"}
|
||||
var optionReq api.VolOptionReq
|
||||
for _, profileOpKey := range profileOpKeys {
|
||||
optionReq.Options = map[string]string{profileOpKey: "on"}
|
||||
optionReq.AllowAdvanced = true
|
||||
r.Nil(client.VolumeSet(volname, optionReq))
|
||||
}
|
||||
|
||||
_, err = client.VolumeProfileInfo(volname, "info")
|
||||
r.Nil(err)
|
||||
|
||||
_, err = client.VolumeProfileInfo(volname, "info-peek")
|
||||
r.Nil(err)
|
||||
|
||||
_, err = client.VolumeProfileInfo(volname, "info-incremental")
|
||||
r.Nil(err)
|
||||
|
||||
_, err = client.VolumeProfileInfo(volname, "info-incremental-peek")
|
||||
r.Nil(err)
|
||||
|
||||
_, err = client.VolumeProfileInfo(volname, "info-cumulative")
|
||||
r.Nil(err)
|
||||
|
||||
_, err = client.VolumeProfileInfo(volname, "info-clear")
|
||||
r.Nil(err)
|
||||
|
||||
for _, profileOpKey := range profileOpKeys {
|
||||
optionReq.Options = map[string]string{profileOpKey: "off"}
|
||||
optionReq.AllowAdvanced = true
|
||||
r.Nil(client.VolumeSet(volname, optionReq))
|
||||
}
|
||||
|
||||
_, err = client.VolumeProfileInfo(volname, "info")
|
||||
r.NotNil(err)
|
||||
|
||||
_, err = client.VolumeProfileInfo(volname, "info-peek")
|
||||
r.NotNil(err)
|
||||
|
||||
_, err = client.VolumeProfileInfo(volname, "info-incremental")
|
||||
r.NotNil(err)
|
||||
|
||||
_, err = client.VolumeProfileInfo(volname, "info-incremental-peek")
|
||||
r.NotNil(err)
|
||||
|
||||
_, err = client.VolumeProfileInfo(volname, "info-cumulative")
|
||||
r.NotNil(err)
|
||||
|
||||
_, err = client.VolumeProfileInfo(volname, "info-clear")
|
||||
r.NotNil(err)
|
||||
|
||||
r.Nil(client.VolumeStop(volname))
|
||||
|
||||
r.Nil(client.VolumeDelete(volname))
|
||||
|
||||
}
|
||||
|
||||
125
glustercli/cmd/volume-profile.go
Normal file
125
glustercli/cmd/volume-profile.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/gluster/glusterd2/pkg/api"
|
||||
|
||||
"github.com/olekukonko/tablewriter"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
// Profile Info Flags
|
||||
flagProfileInfoPeek bool
|
||||
flagProfileInfoIncremental bool
|
||||
flagProfileInfoIncrementalPeek bool
|
||||
flagProfileInfoCumulative bool
|
||||
flagProfileInfoClear bool
|
||||
)
|
||||
|
||||
var volumeProfileCmd = &cobra.Command{
|
||||
Use: "profile",
|
||||
Short: "Gluster volume profile",
|
||||
Long: "Gluster Volume Profile retrieves info on stats like latency, no of fops performed on the volume etc",
|
||||
Args: cobra.ExactArgs(2),
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Volume Profile Info
|
||||
volumeProfileInfoCmd.Flags().BoolVar(&flagProfileInfoPeek, "peek", false, "Volume Profile Info Peek")
|
||||
volumeProfileInfoCmd.Flags().BoolVar(&flagProfileInfoIncremental, "incremental", false, "Volume Profile Info Incremental")
|
||||
volumeProfileInfoCmd.Flags().BoolVar(&flagProfileInfoIncrementalPeek, "incremental-peek", false, "Volume Profile Info Incremental Peek")
|
||||
volumeProfileInfoCmd.Flags().BoolVar(&flagProfileInfoCumulative, "cumulative", false, "Volume Profile Info Cumulative")
|
||||
volumeProfileInfoCmd.Flags().BoolVar(&flagProfileInfoClear, "clear", false, "Volume Profile Info Clear")
|
||||
|
||||
volumeProfileCmd.AddCommand(volumeProfileInfoCmd)
|
||||
|
||||
volumeCmd.AddCommand(volumeProfileCmd)
|
||||
}
|
||||
|
||||
var volumeProfileInfoCmd = &cobra.Command{
|
||||
Use: "info <volname> [--peek|--incremental|--incremental-peek|--cumulative|--clear]",
|
||||
Short: "Volume Profile Info",
|
||||
Long: "Volume Profile Info retrieves stats like latency, read/write bytes, no of fops performed on the volume etc",
|
||||
Args: cobra.ExactArgs(1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var err error
|
||||
var volumeProfileInfo []api.BrickProfileInfo
|
||||
volname := args[0]
|
||||
option := "info"
|
||||
if flagProfileInfoPeek {
|
||||
option = "info-peek"
|
||||
} else if flagProfileInfoIncremental {
|
||||
option = "info-incremental"
|
||||
} else if flagProfileInfoIncrementalPeek {
|
||||
option = "info-incremental-peek"
|
||||
} else if flagProfileInfoCumulative {
|
||||
option = "info-cumulative"
|
||||
} else if flagProfileInfoClear {
|
||||
option = "info-clear"
|
||||
}
|
||||
|
||||
volumeProfileInfo, err = client.VolumeProfileInfo(volname, option)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("volname", volname).Error("failed to get volume profile info")
|
||||
failure(fmt.Sprintf("Failed to get volume profile info for volume %s\n", volname), err, 1)
|
||||
}
|
||||
// Iterate over all bricks
|
||||
for index := range volumeProfileInfo {
|
||||
|
||||
// Display Cumulative Stats
|
||||
table := tablewriter.NewWriter(os.Stdout)
|
||||
fmt.Printf("Brick: %s\n\n", volumeProfileInfo[index].BrickName)
|
||||
if volumeProfileInfo[index].CumulativeStats.Interval != "" {
|
||||
fmt.Printf("Cumulative Stats: \n")
|
||||
}
|
||||
table.SetHeader([]string{"%-Latency", "AvgLatency", "MinLatency", "MaxLatency", "No. Of Calls", "FOP"})
|
||||
if len(volumeProfileInfo[index].CumulativeStats.StatsInfo) != 0 {
|
||||
// Iterate over stats of fop in Cumulative stats, key being the FOP name
|
||||
for key := range volumeProfileInfo[index].CumulativeStats.StatsInfo {
|
||||
table.Append([]string{volumeProfileInfo[index].CumulativeStats.StatsInfo[key]["%-latency"],
|
||||
volumeProfileInfo[index].CumulativeStats.StatsInfo[key]["avglatency"],
|
||||
volumeProfileInfo[index].CumulativeStats.StatsInfo[key]["minlatency"],
|
||||
volumeProfileInfo[index].CumulativeStats.StatsInfo[key]["maxlatency"],
|
||||
volumeProfileInfo[index].CumulativeStats.StatsInfo[key]["hits"],
|
||||
key})
|
||||
}
|
||||
table.Render()
|
||||
}
|
||||
if volumeProfileInfo[index].CumulativeStats.Duration != "" {
|
||||
fmt.Printf("Duration: %s seconds\n", volumeProfileInfo[index].CumulativeStats.Duration)
|
||||
fmt.Printf("Data Read: %s bytes\n", volumeProfileInfo[index].CumulativeStats.DataRead)
|
||||
fmt.Printf("Data Write: %s bytes\n\n\n", volumeProfileInfo[index].CumulativeStats.DataWrite)
|
||||
}
|
||||
fmt.Printf("\n\n")
|
||||
|
||||
// Display Interval Stats
|
||||
table = tablewriter.NewWriter(os.Stdout)
|
||||
table.SetHeader([]string{"%-Latency", "AvgLatency", "MinLatency", "MaxLatency", "No. Of Calls", "FOP"})
|
||||
if volumeProfileInfo[index].IntervalStats.Interval != "" {
|
||||
fmt.Printf("Interval %s Stats: \n\n", volumeProfileInfo[index].IntervalStats.Interval)
|
||||
}
|
||||
if len(volumeProfileInfo[index].IntervalStats.StatsInfo) != 0 {
|
||||
// Iterate over stats of fop in Cumulative stats, key being the FOP name
|
||||
for key := range volumeProfileInfo[index].IntervalStats.StatsInfo {
|
||||
table.Append([]string{volumeProfileInfo[index].IntervalStats.StatsInfo[key]["%-latency"],
|
||||
volumeProfileInfo[index].IntervalStats.StatsInfo[key]["avglatency"],
|
||||
volumeProfileInfo[index].IntervalStats.StatsInfo[key]["minlatency"],
|
||||
volumeProfileInfo[index].IntervalStats.StatsInfo[key]["maxlatency"],
|
||||
volumeProfileInfo[index].IntervalStats.StatsInfo[key]["hits"],
|
||||
key})
|
||||
}
|
||||
table.Render()
|
||||
}
|
||||
if volumeProfileInfo[index].IntervalStats.Duration != "" {
|
||||
fmt.Printf("Duration: %s seconds\n", volumeProfileInfo[index].IntervalStats.Duration)
|
||||
fmt.Printf("Data Read: %s bytes\n", volumeProfileInfo[index].IntervalStats.DataRead)
|
||||
fmt.Printf("Data Write: %s bytes\n", volumeProfileInfo[index].IntervalStats.DataWrite)
|
||||
}
|
||||
fmt.Printf("\n\n")
|
||||
}
|
||||
},
|
||||
}
|
||||
@@ -157,11 +157,12 @@ func (c *Command) Routes() route.Routes {
|
||||
ResponseType: utils.GetTypeString((*api.VolumeEditResp)(nil)),
|
||||
HandlerFunc: volumeEditHandler},
|
||||
route.Route{
|
||||
Name: "ProfileVolume",
|
||||
Method: "GET",
|
||||
Pattern: "/volumes/{volname}/profile/{option}",
|
||||
Version: 1,
|
||||
HandlerFunc: volumeProfileHandler},
|
||||
Name: "ProfileVolume",
|
||||
Method: "GET",
|
||||
Pattern: "/volumes/{volname}/profile/{option}",
|
||||
Version: 1,
|
||||
ResponseType: utils.GetTypeString((*api.BrickProfileInfo)(nil)),
|
||||
HandlerFunc: volumeProfileHandler},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
99
glusterd2/commands/volumes/volume-profile-utils.go
Normal file
99
glusterd2/commands/volumes/volume-profile-utils.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package volumecommands
|
||||
|
||||
import (
|
||||
"github.com/gluster/glusterd2/glusterd2/volume"
|
||||
)
|
||||
|
||||
// Fops
|
||||
var fops = []string{
|
||||
"NULL",
|
||||
"STAT",
|
||||
"READLINK",
|
||||
"MKNOD",
|
||||
"MKDIR",
|
||||
"UNLINK",
|
||||
"RMDIR",
|
||||
"SYMLINK",
|
||||
"RENAME",
|
||||
"LINK",
|
||||
"TRUNCATE",
|
||||
"OPEN",
|
||||
"READ",
|
||||
"WRITE",
|
||||
"STATFS",
|
||||
"FLUSH",
|
||||
"FSYNC", /* 16 */
|
||||
"SETXATTR",
|
||||
"GETXATTR",
|
||||
"REMOVEXATTR",
|
||||
"OPENDIR",
|
||||
"FSYNCDIR",
|
||||
"ACCESS",
|
||||
"CREATE",
|
||||
"FTRUNCATE",
|
||||
"FSTAT", /* 25 */
|
||||
"LK",
|
||||
"LOOKUP",
|
||||
"READDIR",
|
||||
"INODELK",
|
||||
"FINODELK",
|
||||
"ENTRYLK",
|
||||
"FENTRYLK",
|
||||
"XATTROP",
|
||||
"FXATTROP",
|
||||
"FGETXATTR",
|
||||
"FSETXATTR",
|
||||
"RCHECKSUM",
|
||||
"SETATTR",
|
||||
"FSETATTR",
|
||||
"READDIRP",
|
||||
"FORGET",
|
||||
"RELEASE",
|
||||
"RELEASEDIR",
|
||||
"GETSPEC",
|
||||
"FREMOVEXATTR",
|
||||
"FALLOCATE",
|
||||
"DISCARD",
|
||||
"ZEROFILL",
|
||||
"IPC",
|
||||
"SEEK",
|
||||
"LEASE",
|
||||
"COMPOUND",
|
||||
"GETACTIVELK",
|
||||
"SETACTIVELK",
|
||||
"PUT",
|
||||
"ICREATE",
|
||||
"NAMELINK",
|
||||
"MAXVALUE",
|
||||
}
|
||||
|
||||
var profileSessionKeys = [...]string{"io-stats.count-fop-hits", "io-stats.latency-measurement"}
|
||||
|
||||
// BrickProfileInfo holds profile info of each brick
|
||||
type BrickProfileInfo struct {
|
||||
BrickName string `json:"brick-name"`
|
||||
CumulativeStats StatType `json:"cumulative-stats,omitempty"`
|
||||
IntervalStats StatType `json:"interval-stats,omitempty"`
|
||||
}
|
||||
|
||||
// StatType contains profile info of cumulative/interval stats of a brick
|
||||
type StatType struct {
|
||||
Duration string `json:"duration"`
|
||||
DataRead string `json:"data-read"`
|
||||
DataWrite string `json:"data-write"`
|
||||
Interval string `json:"interval"`
|
||||
PercentageAvgLatency float64 `json:"percentage-avg-latency"`
|
||||
StatsInfo map[string]map[string]string `json:"stat-info,omitempty"`
|
||||
}
|
||||
|
||||
// getActiveProfileSession return true if there is any active volume profile session otherwise returns false.
|
||||
func getActiveProfileSession(v *volume.Volinfo) bool {
|
||||
for _, key := range profileSessionKeys {
|
||||
value, ok := v.Options[key]
|
||||
if ok && value == "on" {
|
||||
continue
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
@@ -1,8 +1,11 @@
|
||||
package volumecommands
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/gluster/glusterd2/glusterd2/brick"
|
||||
"github.com/gluster/glusterd2/glusterd2/daemon"
|
||||
@@ -15,6 +18,13 @@ import (
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
type keyType int8
|
||||
|
||||
const (
|
||||
cumulativeType keyType = iota
|
||||
intervalType
|
||||
)
|
||||
|
||||
func registerVolProfileStepFuncs() {
|
||||
transaction.RegisterStepFunc(txnVolumeProfile, "volume.Profile")
|
||||
}
|
||||
@@ -35,15 +45,22 @@ func volumeProfileHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
volinfo, err := volume.GetVolume(volname)
|
||||
if err != nil {
|
||||
restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, err)
|
||||
status, err := restutils.ErrToStatusCode(err)
|
||||
restutils.SendHTTPError(ctx, w, status, err)
|
||||
return
|
||||
}
|
||||
|
||||
if volinfo.State != volume.VolStarted {
|
||||
restutils.SendHTTPError(ctx, w, http.StatusBadRequest, "Volume must be in stopped state before deleting.")
|
||||
restutils.SendHTTPError(ctx, w, http.StatusBadRequest, "volume must be in start state")
|
||||
return
|
||||
}
|
||||
|
||||
if !getActiveProfileSession(volinfo) {
|
||||
restutils.SendHTTPError(ctx, w, http.StatusBadRequest, "there are no active profile sessions running")
|
||||
return
|
||||
|
||||
}
|
||||
|
||||
txn.Steps = []*transaction.Step{
|
||||
{
|
||||
DoFunc: "volume.Profile",
|
||||
@@ -62,17 +79,147 @@ func volumeProfileHandler(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
if err := txn.Do(); err != nil {
|
||||
logger.WithError(err).WithField(
|
||||
"volume", volname).Error("transaction to profile volume failed")
|
||||
"volname", volname).Error("transaction to profile volume failed")
|
||||
status, err := restutils.ErrToStatusCode(err)
|
||||
restutils.SendHTTPError(ctx, w, status, err)
|
||||
return
|
||||
}
|
||||
|
||||
restutils.SendHTTPResponse(ctx, w, http.StatusCreated, nil)
|
||||
var volumeProfileInfo []BrickProfileInfo
|
||||
for _, node := range volinfo.Nodes() {
|
||||
var nodeResult []map[string]string
|
||||
err := txn.Ctx.GetNodeResult(node, "node-result", &nodeResult)
|
||||
if err != nil {
|
||||
restutils.SendHTTPError(ctx, w, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
// Get profile Info Array for each nodes of a volume
|
||||
for brickResult := range nodeResult {
|
||||
var brickProfileInfo BrickProfileInfo
|
||||
brickProfileInfo.BrickName = fmt.Sprintf("%s:%s", node, nodeResult[brickResult]["brick"])
|
||||
brickProfileInfo.CumulativeStats.Interval = nodeResult[brickResult]["cumulative"]
|
||||
brickProfileInfo.IntervalStats.Interval = nodeResult[brickResult]["interval"]
|
||||
// Store stats for each fop in cumulative stats
|
||||
brickProfileInfo.CumulativeStats.StatsInfo = make(map[string]map[string]string)
|
||||
// Store stats for each fop in Interval Stats
|
||||
brickProfileInfo.IntervalStats.StatsInfo = make(map[string]map[string]string)
|
||||
// Iterate over each brick info of a node
|
||||
for key, value := range nodeResult[brickResult] {
|
||||
// Assures if key is a part of cumulative stats and whether cumulative stats are present in the profile info or not
|
||||
if strings.HasPrefix(key, nodeResult[brickResult]["cumulative"]) && nodeResult[brickResult]["cumulative"] != "" {
|
||||
brickProfileInfo.CumulativeStats = populateStatsWithFop(brickProfileInfo.CumulativeStats, key, value, cumulativeType)
|
||||
} else if strings.HasPrefix(key, nodeResult[brickResult]["interval"]) && nodeResult[brickResult]["interval"] != "" {
|
||||
brickProfileInfo.IntervalStats = populateStatsWithFop(brickProfileInfo.IntervalStats, key, value, intervalType)
|
||||
}
|
||||
}
|
||||
|
||||
brickProfileInfo.CumulativeStats = calculatePercentageLatencyForEachFop(brickProfileInfo.CumulativeStats)
|
||||
|
||||
brickProfileInfo.IntervalStats = calculatePercentageLatencyForEachFop(brickProfileInfo.IntervalStats)
|
||||
|
||||
// Append each brick's profile Info in an array and return the array
|
||||
volumeProfileInfo = append(volumeProfileInfo, brickProfileInfo)
|
||||
}
|
||||
}
|
||||
|
||||
restutils.SendHTTPResponse(ctx, w, http.StatusOK, &volumeProfileInfo)
|
||||
}
|
||||
|
||||
// Calculate Percentage latency for each fop in cumulative/interval stats
|
||||
func calculatePercentageLatencyForEachFop(stats StatType) StatType {
|
||||
// Calculate sum of (hits * avgLatency) of all fops in Cumulative Stats. Used Later to calculate
|
||||
// %-Latency for each FOP.
|
||||
var tmpPercentageAvgLatency float64
|
||||
for key := range stats.StatsInfo {
|
||||
tmpAvgLatency, _ := strconv.ParseFloat(stats.StatsInfo[key]["avglatency"], 64)
|
||||
tmpHits, _ := strconv.ParseFloat(stats.StatsInfo[key]["hits"], 64)
|
||||
tmpPercentageAvgLatency += tmpAvgLatency * tmpHits
|
||||
}
|
||||
// Calculate %-Latency for each fop
|
||||
// %-Avg-Latency for one fop = 100 * (Hits for that fop * AvgLatency for that fop ) / (sum of hits * avgLatency of all fop)
|
||||
for key := range stats.StatsInfo {
|
||||
tmpAvgLatency, _ := strconv.ParseFloat(stats.StatsInfo[key]["avglatency"], 64)
|
||||
tmpHits, _ := strconv.ParseFloat(stats.StatsInfo[key]["hits"], 64)
|
||||
tmpPercentageAvgLatencyForEachFop := 100 * ((tmpAvgLatency * tmpHits) / tmpPercentageAvgLatency)
|
||||
stats.StatsInfo[key]["%-latency"] = fmt.Sprintf("%f", tmpPercentageAvgLatencyForEachFop)
|
||||
}
|
||||
stats.PercentageAvgLatency = tmpPercentageAvgLatency
|
||||
|
||||
return stats
|
||||
}
|
||||
|
||||
// populateStatsWithFop populates both interval/cumulative stats with fops and its profile info
|
||||
func populateStatsWithFop(stats StatType, key string, value string, ktype keyType) StatType {
|
||||
var fop string
|
||||
var k string
|
||||
// Decode the fop and stat for the given key
|
||||
switch ktype {
|
||||
case cumulativeType:
|
||||
fop, k = decodeCumulativeKey(key)
|
||||
break
|
||||
case intervalType:
|
||||
fop, k = decodeIntervalKey(key)
|
||||
break
|
||||
}
|
||||
if fop != "" && fop != "NULL" {
|
||||
if _, ok := stats.StatsInfo[fop]; ok {
|
||||
stats.StatsInfo[fop][k] = value
|
||||
} else {
|
||||
// Create new map with fop as key if this particular Fop is encountered for the first time
|
||||
stats.StatsInfo[fop] = make(map[string]string)
|
||||
stats.StatsInfo[fop][k] = value
|
||||
}
|
||||
} else {
|
||||
if k == "read" {
|
||||
stats.DataRead = value
|
||||
} else if k == "write" {
|
||||
stats.DataWrite = value
|
||||
} else if k == "duration" {
|
||||
stats.Duration = value
|
||||
}
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
// Decode key for fop and stat. keys starting with -1 belong to cumulative stat
|
||||
// Eg: -1-12-maxlatency fop : 12 stat: maxlatency
|
||||
// -1-duration fop : "" stat: duration
|
||||
func decodeCumulativeKey(key string) (string, string) {
|
||||
var fop string
|
||||
var k string
|
||||
s := strings.Split(key, "-")
|
||||
if len(s) == 4 {
|
||||
k = s[len(s)-1]
|
||||
index, _ := strconv.Atoi(s[2])
|
||||
fop = fops[index]
|
||||
} else if len(s) == 3 {
|
||||
k = s[len(s)-1]
|
||||
fop = ""
|
||||
}
|
||||
return fop, k
|
||||
}
|
||||
|
||||
// Decode key for fop and stat for keys belonging to interval stat
|
||||
// Eg: 12-12-maxlatency IntervalNo.: 12 fop : 12 stat: maxlatency
|
||||
// 12-duration IntervalNo.: 12 fop : "" stat: duration
|
||||
func decodeIntervalKey(key string) (string, string) {
|
||||
var fop string
|
||||
var k string
|
||||
s := strings.Split(key, "-")
|
||||
if len(s) == 3 {
|
||||
k = s[len(s)-1]
|
||||
index, _ := strconv.Atoi(s[1])
|
||||
fop = fops[index]
|
||||
} else if len(s) == 2 {
|
||||
k = s[len(s)-1]
|
||||
fop = ""
|
||||
}
|
||||
return fop, k
|
||||
}
|
||||
|
||||
func txnVolumeProfile(c transaction.TxnCtx) error {
|
||||
var volinfo volume.Volinfo
|
||||
var nodeProfileInfo []map[string]string
|
||||
if err := c.Get("volinfo", &volinfo); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -85,6 +232,8 @@ func txnVolumeProfile(c transaction.TxnCtx) error {
|
||||
for _, b := range volinfo.GetLocalBricks() {
|
||||
brickDaemon, err := brick.NewGlusterfsd(b)
|
||||
if err != nil {
|
||||
c.Logger().WithError(err).WithField(
|
||||
"brick", b.String()).Error("failed to inittiate brick daemon")
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -94,31 +243,52 @@ func txnVolumeProfile(c transaction.TxnCtx) error {
|
||||
client, err := daemon.GetRPCClient(brickDaemon)
|
||||
if err != nil {
|
||||
c.Logger().WithError(err).WithField(
|
||||
"brick", b.String()).Error("failed to connect to brick, sending SIGTERM")
|
||||
"brick", b.String()).Error("failed to connect to brick, aborting volume profile operation")
|
||||
return err
|
||||
}
|
||||
reqDict := make(map[string]string)
|
||||
if option == "start" {
|
||||
reqDict["peek"] = "0"
|
||||
reqDict["op"] = "1"
|
||||
reqDict["info-op"] = "0"
|
||||
} else if option == "info" {
|
||||
switch option {
|
||||
case "info":
|
||||
reqDict["peek"] = "0"
|
||||
reqDict["op"] = "3"
|
||||
reqDict["info-op"] = "1"
|
||||
} else if option == "stop" {
|
||||
reqDict["originator_uuid"] = gdctx.MyUUID.String()
|
||||
break
|
||||
case "info-peek":
|
||||
reqDict["peek"] = "1"
|
||||
reqDict["op"] = "3"
|
||||
reqDict["info-op"] = "1"
|
||||
break
|
||||
case "info-incremental":
|
||||
reqDict["peek"] = "0"
|
||||
reqDict["op"] = "2"
|
||||
reqDict["info-op"] = "0"
|
||||
reqDict["op"] = "3"
|
||||
reqDict["info-op"] = "2"
|
||||
break
|
||||
case "info-incremental-peek":
|
||||
reqDict["peek"] = "1"
|
||||
reqDict["op"] = "3"
|
||||
reqDict["info-op"] = "2"
|
||||
break
|
||||
case "info-cumulative":
|
||||
reqDict["peek"] = "0"
|
||||
reqDict["op"] = "3"
|
||||
reqDict["info-op"] = "3"
|
||||
break
|
||||
case "info-clear":
|
||||
reqDict["peek"] = "0"
|
||||
reqDict["op"] = "3"
|
||||
reqDict["info-op"] = "4"
|
||||
break
|
||||
default:
|
||||
return fmt.Errorf("%s is not a valid operation", option)
|
||||
}
|
||||
|
||||
reqDict["volname"] = volinfo.Name
|
||||
reqDict["vol-id"] = volinfo.ID.String()
|
||||
req := &brick.GfBrickOpReq{
|
||||
Name: b.Path,
|
||||
Op: int(brick.OpBrickXlatorInfo),
|
||||
}
|
||||
fmt.Println(req)
|
||||
fmt.Println(reqDict)
|
||||
req.Input, err = dict.Serialize(reqDict)
|
||||
var rsp brick.GfBrickOpRsp
|
||||
err = client.Call("Brick.OpBrickXlatorInfo", req, &rsp)
|
||||
@@ -127,7 +297,15 @@ func txnVolumeProfile(c transaction.TxnCtx) error {
|
||||
"brick", b.String()).Error("failed to send volume profile RPC")
|
||||
return err
|
||||
}
|
||||
fmt.Println(rsp.Output)
|
||||
|
||||
output, err := dict.Unserialize(rsp.Output)
|
||||
if err != nil {
|
||||
return errors.New("error unserializing the output")
|
||||
}
|
||||
output["brick"] = b.Path
|
||||
nodeProfileInfo = append(nodeProfileInfo, output)
|
||||
}
|
||||
c.SetNodeResult(gdctx.MyUUID, "node-result", &nodeProfileInfo)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
18
pkg/api/volume-profile-resp.go
Normal file
18
pkg/api/volume-profile-resp.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package api
|
||||
|
||||
// BrickProfileInfo holds profile info of each brick
|
||||
type BrickProfileInfo struct {
|
||||
BrickName string `json:"brick-name"`
|
||||
CumulativeStats StatType `json:"cumulative-stats"`
|
||||
IntervalStats StatType `json:"interval-stats"`
|
||||
}
|
||||
|
||||
// StatType contains profile info of cumulative/interval stats of a brick
|
||||
type StatType struct {
|
||||
Duration string `json:"duration"`
|
||||
DataRead string `json:"data-read"`
|
||||
DataWrite string `json:"data-write"`
|
||||
Interval string `json:"interval"`
|
||||
PercentageAvgLatency float64 `json:"percentage-avg-latency"`
|
||||
StatsInfo map[string]map[string]string `json:"stat-info,omitempty"`
|
||||
}
|
||||
@@ -183,3 +183,11 @@ func (c *Client) VolumeReset(volname string, req api.VolOptionResetReq) error {
|
||||
url := fmt.Sprintf("/v1/volumes/%s/options", volname)
|
||||
return c.del(url, req, http.StatusOK, nil)
|
||||
}
|
||||
|
||||
//VolumeProfileInfo retrieves the stats about different file operations performed on a volume
|
||||
func (c *Client) VolumeProfileInfo(volname string, option string) ([]api.BrickProfileInfo, error) {
|
||||
var volumeProfileInfo []api.BrickProfileInfo
|
||||
url := fmt.Sprintf("/v1/volumes/%s/profile/%s", volname, option)
|
||||
err := c.get(url, nil, http.StatusOK, &volumeProfileInfo)
|
||||
return volumeProfileInfo, err
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user