1
0
mirror of https://github.com/gluster/glusterd2.git synced 2026-02-05 12:45:38 +01:00

Normalize the xlator name when stored in Volinfo

Category prefix is optional when setting Volume options. For example
to set replicate.eager-lock, we can pass `replicate.eager-lock` or
`cluster/replicate.eager-lock`. With this PR, always stored with
category prefix.

Also fixed the issue of loosing template variables when xlator default
options and volinfo.Options are loaded(Fixes: #1397)

Signed-off-by: Aravinda VK <avishwan@redhat.com>
This commit is contained in:
Aravinda VK
2018-12-13 12:34:17 +05:30
committed by Madhu Rajanna
parent 74924d58db
commit ee2fb7f89a
11 changed files with 82 additions and 47 deletions

2
doc/endpoints.md generated
View File

@@ -16,7 +16,7 @@ Name | Methods | Path | Request | Response
GetVersion | GET | /version | [](https://godoc.org/github.com/gluster/glusterd2/pkg/api#) | [VersionResp](https://godoc.org/github.com/gluster/glusterd2/pkg/api#VersionResp)
VolumeCreate | POST | /volumes | [VolCreateReq](https://godoc.org/github.com/gluster/glusterd2/pkg/api#VolCreateReq) | [VolumeCreateResp](https://godoc.org/github.com/gluster/glusterd2/pkg/api#VolumeCreateResp)
VolumeExpand | POST | /volumes/{volname}/expand | [VolExpandReq](https://godoc.org/github.com/gluster/glusterd2/pkg/api#VolExpandReq) | [VolumeExpandResp](https://godoc.org/github.com/gluster/glusterd2/pkg/api#VolumeExpandResp)
VolumeOptionGet | GET | /volumes/{volname}/options/{optname} | [](https://godoc.org/github.com/gluster/glusterd2/pkg/api#) | [VolumeOptionGetResp](https://godoc.org/github.com/gluster/glusterd2/pkg/api#VolumeOptionGetResp)
VolumeOptionGet | GET | /volumes/{volname}/options/{optname:.*} | [](https://godoc.org/github.com/gluster/glusterd2/pkg/api#) | [VolumeOptionGetResp](https://godoc.org/github.com/gluster/glusterd2/pkg/api#VolumeOptionGetResp)
VolumeOptionsGet | GET | /volumes/{volname}/options | [](https://godoc.org/github.com/gluster/glusterd2/pkg/api#) | [VolumeOptionsGetResp](https://godoc.org/github.com/gluster/glusterd2/pkg/api#VolumeOptionsGetResp)
VolumeOptions | POST | /volumes/{volname}/options | [VolOptionReq](https://godoc.org/github.com/gluster/glusterd2/pkg/api#VolOptionReq) | [VolumeOptionResp](https://godoc.org/github.com/gluster/glusterd2/pkg/api#VolumeOptionResp)
VolumeReset | DELETE | /volumes/{volname}/options | [VolOptionResetReq](https://godoc.org/github.com/gluster/glusterd2/pkg/api#VolOptionResetReq) | [VolumeOptionResp](https://godoc.org/github.com/gluster/glusterd2/pkg/api#VolumeOptionResp)

View File

@@ -124,7 +124,7 @@ func testSelfHeal(t *testing.T, tc *testCluster) {
var optionReq api.VolOptionReq
optionReq.Options = map[string]string{"replicate.self-heal-daemon": "on"}
optionReq.Options = map[string]string{"cluster/replicate.self-heal-daemon": "on"}
optionReq.AllowAdvanced = true
r.Nil(client.VolumeSet(vol1.Name, optionReq))
@@ -155,7 +155,7 @@ func testSelfHeal(t *testing.T, tc *testCluster) {
// Stop Volume
r.Nil(client.VolumeStop(vol1.Name), "Volume stop failed")
optionReq.Options = map[string]string{"replicate.self-heal-daemon": "off"}
optionReq.Options = map[string]string{"cluster/replicate.self-heal-daemon": "off"}
optionReq.AllowAdvanced = true
r.Nil(client.VolumeSet(vol1.Name, optionReq))
@@ -205,11 +205,11 @@ func testGranularEntryHeal(t *testing.T, tc *testCluster) {
}
var optionReq api.VolOptionReq
optionReq.Options = map[string]string{"replicate.granular-entry-heal": "enable"}
optionReq.Options = map[string]string{"cluster/replicate.granular-entry-heal": "enable"}
optionReq.AllowAdvanced = true
r.Nil(client.VolumeSet(volname, optionReq))
optionReq.Options = map[string]string{"replicate.self-heal-daemon": "off"}
optionReq.Options = map[string]string{"cluster/replicate.self-heal-daemon": "off"}
optionReq.AllowAdvanced = true
r.Nil(client.VolumeSet(volname, optionReq))
r.False(isProcessRunning(pidpath), "glustershd is still running")
@@ -249,7 +249,7 @@ func testGranularEntryHeal(t *testing.T, tc *testCluster) {
}
}
optionReq.Options = map[string]string{"replicate.granular-entry-heal": "disable"}
optionReq.Options = map[string]string{"cluster/replicate.granular-entry-heal": "disable"}
optionReq.AllowAdvanced = true
r.Nil(client.VolumeSet(volname, optionReq))
@@ -257,7 +257,7 @@ func testGranularEntryHeal(t *testing.T, tc *testCluster) {
r.Nil(client.VolumeStop(volname), "Volume stop failed")
r.Nil(client.VolumeStart(volname, false), "volume start failed")
optionReq.Options = map[string]string{"replicate.granular-entry-heal": "enable"}
optionReq.Options = map[string]string{"cluster/replicate.granular-entry-heal": "enable"}
optionReq.AllowAdvanced = true
r.NotNil(client.VolumeSet(volname, optionReq))
@@ -303,7 +303,7 @@ func testSplitBrainOperation(t *testing.T, tc *testCluster) {
var optionReq api.VolOptionReq
pidpath := path.Join(tc.gds[0].Rundir, "glustershd.pid")
optionReq.Options = map[string]string{"replicate.self-heal-daemon": "off"}
optionReq.Options = map[string]string{"cluster/replicate.self-heal-daemon": "off"}
optionReq.AllowAdvanced = true
r.Nil(client.VolumeSet(volname, optionReq))
r.False(isProcessRunning(pidpath), "glustershd is still running")

View File

@@ -599,7 +599,7 @@ func TestVolumeOptions(t *testing.T) {
// TODO: Remove this later if the default changes
createReq.AllowAdvanced = true
validOpKeys := []string{"gfproxy.replicate.eager-lock", "replicate.eager-lock"}
validOpKeys := []string{"gfproxy.cluster/replicate.eager-lock", "cluster/replicate.eager-lock"}
invalidOpKeys := []string{"..eager-lock", "a.b.afr.eager-lock", "afr.non-existent", "eager-lock"}
// valid option test cases
@@ -622,6 +622,33 @@ func TestVolumeOptions(t *testing.T) {
r.Nil(err)
}
// Test the option key normalization, set short name and get full name
createReq.Options = map[string]string{"replicate.eager-lock": "on"}
_, err = client.VolumeCreate(createReq)
r.Nil(err)
// test volume get on full name
_, err = client.VolumeGet(volname, "cluster/replicate.eager-lock")
r.Nil(err)
var resetOptReq api.VolOptionResetReq
resetOptReq.Options = []string{"cluster/replicate.eager-lock"}
resetOptReq.Force = true
r.Nil(client.VolumeReset(volname, resetOptReq))
var reqSetTest api.VolOptionReq
reqSetTest.Options = map[string]string{"replicate.eager-lock": "on"}
reqSetTest.AllowAdvanced = true
r.Nil(client.VolumeSet(volname, reqSetTest))
// test volume get on full name
_, err = client.VolumeGet(volname, "cluster/replicate.eager-lock")
r.Nil(err)
err = client.VolumeDelete(volname)
r.Nil(err)
// invalid option test cases
for _, invalidKey := range invalidOpKeys {
createReq.Options = map[string]string{}
@@ -654,7 +681,7 @@ func TestVolumeOptions(t *testing.T) {
r.Nil(client.VolumeSet(volname, optionReq))
var resetOptionReq api.VolOptionResetReq
resetOptionReq.Options = []string{"replicate.use-compound-fops"}
resetOptionReq.Options = []string{"cluster/replicate.use-compound-fops"}
resetOptionReq.Force = true
r.Nil(client.VolumeReset(volname, resetOptionReq))

View File

@@ -33,7 +33,7 @@ func (c *Command) Routes() route.Routes {
route.Route{
Name: "VolumeOptionGet",
Method: "GET",
Pattern: "/volumes/{volname}/options/{optname}",
Pattern: "/volumes/{volname}/options/{optname:.*}",
Version: 1,
ResponseType: utils.GetTypeString((*api.VolumeOptionGetResp)(nil)),
HandlerFunc: volumeOptionsGetHandler},

View File

@@ -66,8 +66,9 @@ func validateOptions(opts map[string]string, flags api.VolOptionFlags) error {
}
func validateXlatorOptions(opts map[string]string, volinfo *volume.Volinfo) error {
var toreplace [][]string
for k, v := range opts {
_, xl, key := options.SplitKey(k)
graphName, xl, key := options.SplitKey(k)
xltr, err := xlator.Find(xl)
if err != nil {
return err
@@ -77,6 +78,22 @@ func validateXlatorOptions(opts map[string]string, volinfo *volume.Volinfo) erro
return err
}
}
normalizedKeyName := xltr.FullName() + "." + key
if graphName != "" {
normalizedKeyName = graphName + "." + normalizedKeyName
}
if k != normalizedKeyName {
// Do not remove and add key inplace, do it lator
// Format: Old Key, New Key, Value
toreplace = append(toreplace, []string{k, normalizedKeyName, v})
}
}
for _, v := range toreplace {
delete(opts, v[0])
opts[v[1]] = v[2]
}
return nil
}

View File

@@ -67,7 +67,7 @@ var fops = []string{
"MAXVALUE",
}
var profileSessionKeys = [...]string{"io-stats.count-fop-hits", "io-stats.latency-measurement"}
var profileSessionKeys = [...]string{"debug/io-stats.count-fop-hits", "debug/io-stats.latency-measurement"}
// BrickProfileInfo holds profile info of each brick
type BrickProfileInfo struct {

View File

@@ -89,7 +89,7 @@ func (xl *Xlator) getOptions(tmplName string, volinfo *volume.Volinfo) (map[stri
// If option set in template
v, ok := xl.Options[optKey]
if ok {
opts[optKey] = v
optVal = v
}
// Volinfo can be nil in case of cluster level

View File

@@ -120,6 +120,9 @@ func loadXlator(xlPath string) (*Xlator, error) {
filepath.Ext(xlPath))
}
// Parent directory name where xlator .so file exists
xl.Category = filepath.Base(filepath.Dir(xlPath))
soOptions := (*[maxOptions]C.volume_option_t)(p)
for _, option := range soOptions {

View File

@@ -17,4 +17,12 @@ type Xlator struct {
// This is pretty much useless now.
rawID uint32
// Category is parent directory name
Category string
}
// FullName returns xlator name including the category name
func (xl *Xlator) FullName() string {
return xl.Category + "/" + xl.ID
}

View File

@@ -1,7 +1,6 @@
package glustershd
import (
"fmt"
"os"
"path"
@@ -15,23 +14,8 @@ import (
config "github.com/spf13/viper"
)
var names = [...]string{"replicate", "afr"}
const (
selfHealKey = "self-heal-daemon"
granularEntryHealKey = "granular-entry-heal"
)
type shdActor struct{}
func getSelfHealKeys() []string {
var selfhealKeys = make([]string, len(names))
for i, n := range names {
selfhealKeys[i] = fmt.Sprintf("%s.%s", n, selfHealKey)
}
return selfhealKeys
}
func (actor *shdActor) Do(v *volume.Volinfo, key string, value string, volOp xlator.VolumeOpType, logger log.FieldLogger) error {
if v.Type != volume.Replicate && v.Type != volume.Disperse {
@@ -44,10 +28,8 @@ func (actor *shdActor) Do(v *volume.Volinfo, key string, value string, volOp xla
}
switch volOp {
case xlator.VolumeStart:
for _, key := range getSelfHealKeys() {
if val, ok := v.Options[key]; ok && val == "off" {
return nil
}
if val, ok := v.Options[shdKey]; ok && val == "off" {
return nil
}
err = volgen.ClusterVolfileToFile(v, glustershDaemon.VolfileID, "glustershd")
@@ -140,10 +122,8 @@ func (actor *shdActor) Undo(v *volume.Volinfo, key string, value string, volOp x
}
switch volOp {
case xlator.VolumeStart:
for _, key := range getSelfHealKeys() {
if val, ok := v.Options[key]; ok && val == "off" {
return nil
}
if val, ok := v.Options[shdKey]; ok && val == "off" {
return nil
}
isVolRunning, err := volume.AreReplicateVolumesRunning(v.ID)
@@ -221,7 +201,5 @@ func (actor *shdActor) Undo(v *volume.Volinfo, key string, value string, volOp x
}
func init() {
for _, name := range names {
xlator.RegisterOptionActor(name, &shdActor{})
}
xlator.RegisterOptionActor("replicate", &shdActor{})
}

View File

@@ -4,7 +4,11 @@ import (
"github.com/gluster/glusterd2/glusterd2/volume"
)
var shdKeys = [...]string{"afr.self-heal-daemon", "replicate.self-heal-daemon"}
const (
selfHealKey = "self-heal-daemon"
shdKey = "cluster/replicate." + selfHealKey
granularEntryHealKey = "granular-entry-heal"
)
// isVolReplicate returns true if volume is of type replicate, disperse, distreplicate or distdisperse
// otherwise it returns false
@@ -18,11 +22,9 @@ func isVolReplicate(vType volume.VolType) bool {
// isHealEnabled returns true if heal is enabled for the volume otherwise returns false.
func isHealEnabled(v *volume.Volinfo) bool {
for _, key := range shdKeys {
value, ok := v.Options[key]
if ok && value == "on" {
return true
}
value, ok := v.Options[shdKey]
if ok && value == "on" {
return true
}
return false
}