2018-01-25 02:14:27 -05:00
|
|
|
package e2e
|
|
|
|
|
|
|
|
|
|
import (
|
2018-09-04 14:34:35 +05:30
|
|
|
"errors"
|
|
|
|
|
"fmt"
|
|
|
|
|
"net"
|
2018-01-25 02:14:27 -05:00
|
|
|
"os"
|
2018-09-04 14:34:35 +05:30
|
|
|
"path"
|
|
|
|
|
"syscall"
|
2018-01-25 02:14:27 -05:00
|
|
|
"testing"
|
|
|
|
|
|
|
|
|
|
"github.com/gluster/glusterd2/pkg/api"
|
2018-09-04 14:34:35 +05:30
|
|
|
shdapi "github.com/gluster/glusterd2/plugins/glustershd/api"
|
|
|
|
|
|
2018-01-25 02:14:27 -05:00
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
|
)
|
|
|
|
|
|
2018-12-04 13:16:36 +05:30
|
|
|
func checkForPendingHeals(healInfo *shdapi.BrickHealInfo) error {
|
|
|
|
|
if *healInfo.EntriesInHealPending != 0 && *healInfo.EntriesInHealPending != -1 {
|
|
|
|
|
return errors.New("expecting no pending heals, found pending heals")
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-04 14:34:35 +05:30
|
|
|
func testSelfHeal(t *testing.T, tc *testCluster) {
|
2018-01-25 02:14:27 -05:00
|
|
|
r := require.New(t)
|
|
|
|
|
|
2018-09-04 14:34:35 +05:30
|
|
|
var brickPaths []string
|
2018-01-25 02:14:27 -05:00
|
|
|
|
2018-09-04 14:34:35 +05:30
|
|
|
//glustershd pid file path
|
|
|
|
|
pidpath := path.Join(tc.gds[0].Rundir, "glustershd.pid")
|
2018-01-25 02:14:27 -05:00
|
|
|
|
2018-09-04 14:34:35 +05:30
|
|
|
for i := 1; i <= 2; i++ {
|
|
|
|
|
brickPath := testTempDir(t, "brick")
|
|
|
|
|
brickPaths = append(brickPaths, brickPath)
|
2018-01-25 02:14:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
reqVol := api.VolCreateReq{
|
2018-06-20 13:56:53 +05:30
|
|
|
Name: volname,
|
2018-01-25 02:14:27 -05:00
|
|
|
Subvols: []api.SubvolReq{
|
|
|
|
|
{
|
|
|
|
|
ReplicaCount: 2,
|
|
|
|
|
Type: "replicate",
|
|
|
|
|
Bricks: []api.BrickReq{
|
2018-07-07 13:58:10 -04:00
|
|
|
{PeerID: tc.gds[0].PeerID(), Path: brickPaths[0]},
|
|
|
|
|
{PeerID: tc.gds[0].PeerID(), Path: brickPaths[1]},
|
2018-01-25 02:14:27 -05:00
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
Force: true,
|
|
|
|
|
}
|
|
|
|
|
vol1, err := client.VolumeCreate(reqVol)
|
|
|
|
|
r.Nil(err)
|
|
|
|
|
|
2019-01-15 15:47:26 +05:30
|
|
|
var optionReq api.VolOptionReq
|
|
|
|
|
|
|
|
|
|
optionReq.Options = map[string]string{"cluster/replicate.self-heal-daemon": "off"}
|
|
|
|
|
optionReq.AllowAdvanced = true
|
|
|
|
|
|
|
|
|
|
r.Nil(client.VolumeSet(vol1.Name, optionReq))
|
|
|
|
|
r.False(isProcessRunning(pidpath), "glustershd is still running")
|
|
|
|
|
|
2018-05-17 16:02:12 +05:30
|
|
|
r.Nil(client.VolumeStart(vol1.Name, false), "volume start failed")
|
2018-02-05 02:19:15 -05:00
|
|
|
|
2018-12-04 13:16:36 +05:30
|
|
|
checkFuseAvailable(t)
|
|
|
|
|
|
|
|
|
|
mntPath := testTempDir(t, "mnt")
|
|
|
|
|
defer os.RemoveAll(mntPath)
|
|
|
|
|
|
|
|
|
|
host, _, _ := net.SplitHostPort(tc.gds[0].ClientAddress)
|
|
|
|
|
err = mountVolume(host, volname, mntPath)
|
|
|
|
|
r.Nil(err, fmt.Sprintf("mount failed: %s", err))
|
|
|
|
|
|
|
|
|
|
defer syscall.Unmount(mntPath, syscall.MNT_FORCE)
|
|
|
|
|
|
|
|
|
|
f, err := os.Create(mntPath + "/file1.txt")
|
|
|
|
|
r.Nil(err, fmt.Sprintf("file creation failed: %s", err))
|
|
|
|
|
f.Close()
|
|
|
|
|
|
|
|
|
|
getBricksStatus, err := client.BricksStatus(volname)
|
|
|
|
|
r.Nil(err, fmt.Sprintf("brick status operation failed: %s", err))
|
|
|
|
|
count := 0
|
|
|
|
|
for brick := range getBricksStatus {
|
|
|
|
|
if getBricksStatus[brick].Info.PeerID.String() == tc.gds[0].PeerID() {
|
|
|
|
|
count++
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
r.Equal(count, 2)
|
|
|
|
|
|
|
|
|
|
for brick := range getBricksStatus {
|
|
|
|
|
if getBricksStatus[brick].Info.PeerID.String() == tc.gds[0].PeerID() {
|
|
|
|
|
process, err := os.FindProcess(getBricksStatus[brick].Pid)
|
|
|
|
|
r.Nil(err, fmt.Sprintf("failed to find bricks pid: %s", err))
|
|
|
|
|
err = process.Signal(syscall.Signal(15))
|
|
|
|
|
r.Nil(err, fmt.Sprintf("failed to kill bricks: %s", err))
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
f1, err := os.OpenFile(mntPath+"/file1.txt", os.O_WRONLY, 0222)
|
|
|
|
|
r.Nil(err, fmt.Sprintf("failed to open file: %s", err))
|
|
|
|
|
_, err = f1.WriteString("hello")
|
|
|
|
|
r.Nil(err, fmt.Sprintf("failed to write to file: %s", err))
|
|
|
|
|
f1.Sync()
|
|
|
|
|
defer f1.Close()
|
|
|
|
|
|
2018-05-29 21:20:40 +05:30
|
|
|
_, err = client.SelfHealInfo(vol1.Name)
|
2018-01-25 02:14:27 -05:00
|
|
|
r.Nil(err)
|
2018-05-29 21:20:40 +05:30
|
|
|
_, err = client.SelfHealInfo(vol1.Name, "split-brain-info")
|
2018-01-25 02:14:27 -05:00
|
|
|
r.Nil(err)
|
2018-12-04 13:16:36 +05:30
|
|
|
healInfo, err := client.SelfHealInfo(vol1.Name, "info-summary")
|
|
|
|
|
r.Nil(err)
|
|
|
|
|
|
|
|
|
|
count = 0
|
|
|
|
|
for node := range healInfo {
|
|
|
|
|
if healInfo[node].Status == "Connected" {
|
|
|
|
|
count++
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
r.Equal(count, 1)
|
|
|
|
|
|
|
|
|
|
for node := range healInfo {
|
|
|
|
|
if healInfo[node].Status == "Connected" {
|
|
|
|
|
r.NotNil(checkForPendingHeals(&healInfo[node]))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
r.Nil(client.VolumeStop(vol1.Name), "Volume stop failed")
|
2018-05-29 21:20:40 +05:30
|
|
|
|
2019-01-15 15:47:26 +05:30
|
|
|
optionReq.Options = map[string]string{"cluster/replicate.entry-self-heal": "on",
|
|
|
|
|
"cluster/replicate.metadata-self-heal": "on",
|
|
|
|
|
"cluster/replicate.data-self-heal": "on"}
|
|
|
|
|
optionReq.AllowAdvanced = true
|
|
|
|
|
|
|
|
|
|
r.Nil(client.VolumeSet(vol1.Name, optionReq))
|
|
|
|
|
|
|
|
|
|
r.Nil(client.VolumeStart(vol1.Name, false), "volume start failed")
|
2018-06-26 12:07:56 +05:30
|
|
|
|
2018-12-13 12:34:17 +05:30
|
|
|
optionReq.Options = map[string]string{"cluster/replicate.self-heal-daemon": "on"}
|
2018-10-03 08:03:22 +05:30
|
|
|
optionReq.AllowAdvanced = true
|
2018-06-26 12:07:56 +05:30
|
|
|
|
|
|
|
|
r.Nil(client.VolumeSet(vol1.Name, optionReq))
|
2018-09-04 14:34:35 +05:30
|
|
|
r.True(isProcessRunning(pidpath), "glustershd is not running")
|
|
|
|
|
|
2018-06-26 12:07:56 +05:30
|
|
|
r.Nil(client.SelfHeal(vol1.Name, "index"))
|
2018-12-04 13:16:36 +05:30
|
|
|
|
|
|
|
|
healInfo, err = client.SelfHealInfo(vol1.Name, "info-summary")
|
|
|
|
|
r.Nil(err)
|
|
|
|
|
|
|
|
|
|
count = 0
|
|
|
|
|
for node := range healInfo {
|
|
|
|
|
if healInfo[node].Status == "Connected" {
|
|
|
|
|
count++
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
r.Equal(count, 2)
|
|
|
|
|
|
|
|
|
|
for node := range healInfo {
|
|
|
|
|
if healInfo[node].Status == "Connected" {
|
|
|
|
|
r.Nil(checkForPendingHeals(&healInfo[node]))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-26 12:07:56 +05:30
|
|
|
r.Nil(client.SelfHeal(vol1.Name, "full"))
|
|
|
|
|
|
2018-02-05 02:19:15 -05:00
|
|
|
// Stop Volume
|
|
|
|
|
r.Nil(client.VolumeStop(vol1.Name), "Volume stop failed")
|
2018-09-04 14:34:35 +05:30
|
|
|
|
2018-12-13 12:34:17 +05:30
|
|
|
optionReq.Options = map[string]string{"cluster/replicate.self-heal-daemon": "off"}
|
2018-10-03 08:03:22 +05:30
|
|
|
optionReq.AllowAdvanced = true
|
2018-09-04 14:34:35 +05:30
|
|
|
|
|
|
|
|
r.Nil(client.VolumeSet(vol1.Name, optionReq))
|
|
|
|
|
r.False(isProcessRunning(pidpath), "glustershd is still running")
|
|
|
|
|
|
2018-01-25 02:14:27 -05:00
|
|
|
// delete volume
|
2018-06-26 12:07:56 +05:30
|
|
|
r.Nil(client.VolumeDelete(vol1.Name))
|
2018-01-25 02:14:27 -05:00
|
|
|
}
|
2018-09-04 14:34:35 +05:30
|
|
|
|
|
|
|
|
func testGranularEntryHeal(t *testing.T, tc *testCluster) {
|
|
|
|
|
r := require.New(t)
|
|
|
|
|
|
|
|
|
|
var brickPaths []string
|
|
|
|
|
pidpath := path.Join(tc.gds[0].Rundir, "glustershd.pid")
|
|
|
|
|
|
|
|
|
|
for i := 1; i <= 2; i++ {
|
|
|
|
|
brickPath := testTempDir(t, "brick")
|
|
|
|
|
brickPaths = append(brickPaths, brickPath)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// create 2x2 dist-rep volume
|
|
|
|
|
createReq := api.VolCreateReq{
|
|
|
|
|
Name: volname,
|
|
|
|
|
Subvols: []api.SubvolReq{
|
|
|
|
|
{
|
|
|
|
|
ReplicaCount: 2,
|
|
|
|
|
Type: "replicate",
|
|
|
|
|
Bricks: []api.BrickReq{
|
|
|
|
|
{PeerID: tc.gds[0].PeerID(), Path: brickPaths[0]},
|
|
|
|
|
{PeerID: tc.gds[0].PeerID(), Path: brickPaths[1]},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
Force: true,
|
|
|
|
|
}
|
|
|
|
|
_, err := client.VolumeCreate(createReq)
|
|
|
|
|
r.Nil(err)
|
|
|
|
|
|
2019-01-23 11:25:17 +05:30
|
|
|
var optionReq api.VolOptionReq
|
|
|
|
|
optionReq.Options = map[string]string{"cluster/replicate.self-heal-daemon": "on"}
|
|
|
|
|
optionReq.AllowAdvanced = true
|
|
|
|
|
r.Nil(client.VolumeSet(volname, optionReq))
|
|
|
|
|
|
2018-09-04 14:34:35 +05:30
|
|
|
r.Nil(client.VolumeStart(volname, false), "volume start failed")
|
|
|
|
|
|
|
|
|
|
healInfo, err := client.SelfHealInfo(volname, "info-summary")
|
|
|
|
|
r.Nil(err)
|
|
|
|
|
for node := range healInfo {
|
|
|
|
|
if healInfo[node].Status == "Connected" {
|
|
|
|
|
r.Nil(checkForPendingHeals(&healInfo[node]))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-13 12:34:17 +05:30
|
|
|
optionReq.Options = map[string]string{"cluster/replicate.granular-entry-heal": "enable"}
|
2018-10-03 08:03:22 +05:30
|
|
|
optionReq.AllowAdvanced = true
|
2018-09-04 14:34:35 +05:30
|
|
|
r.Nil(client.VolumeSet(volname, optionReq))
|
|
|
|
|
|
2018-12-13 12:34:17 +05:30
|
|
|
optionReq.Options = map[string]string{"cluster/replicate.self-heal-daemon": "off"}
|
2018-10-03 08:03:22 +05:30
|
|
|
optionReq.AllowAdvanced = true
|
2018-09-04 14:34:35 +05:30
|
|
|
r.Nil(client.VolumeSet(volname, optionReq))
|
|
|
|
|
r.False(isProcessRunning(pidpath), "glustershd is still running")
|
|
|
|
|
|
|
|
|
|
checkFuseAvailable(t)
|
|
|
|
|
|
|
|
|
|
mntPath := testTempDir(t, "mnt")
|
|
|
|
|
defer os.RemoveAll(mntPath)
|
|
|
|
|
|
|
|
|
|
host, _, _ := net.SplitHostPort(tc.gds[0].ClientAddress)
|
|
|
|
|
err = mountVolume(host, volname, mntPath)
|
|
|
|
|
r.Nil(err, fmt.Sprintf("mount failed: %s", err))
|
2018-09-17 13:11:33 +05:30
|
|
|
|
|
|
|
|
defer syscall.Unmount(mntPath, syscall.MNT_FORCE)
|
2018-09-04 14:34:35 +05:30
|
|
|
|
|
|
|
|
getBricksStatus, err := client.BricksStatus(volname)
|
|
|
|
|
r.Nil(err, fmt.Sprintf("brick status operation failed: %s", err))
|
|
|
|
|
for brick := range getBricksStatus {
|
|
|
|
|
if getBricksStatus[brick].Info.PeerID.String() == tc.gds[0].PeerID() {
|
|
|
|
|
process, err := os.FindProcess(getBricksStatus[brick].Pid)
|
|
|
|
|
r.Nil(err, fmt.Sprintf("failed to find bricks pid: %s", err))
|
|
|
|
|
err = process.Signal(syscall.Signal(15))
|
|
|
|
|
r.Nil(err, fmt.Sprintf("failed to kill bricks: %s", err))
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
f, err := os.Create(mntPath + "/file1.txt")
|
|
|
|
|
r.Nil(err, fmt.Sprintf("file creation failed: %s", err))
|
|
|
|
|
f.Close()
|
|
|
|
|
|
|
|
|
|
healInfo, err = client.SelfHealInfo(volname, "info-summary")
|
|
|
|
|
r.Nil(err)
|
|
|
|
|
for node := range healInfo {
|
|
|
|
|
if healInfo[node].Status == "Connected" {
|
|
|
|
|
r.NotNil(checkForPendingHeals(&healInfo[node]))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-13 12:34:17 +05:30
|
|
|
optionReq.Options = map[string]string{"cluster/replicate.granular-entry-heal": "disable"}
|
2018-10-03 08:03:22 +05:30
|
|
|
optionReq.AllowAdvanced = true
|
2018-09-04 14:34:35 +05:30
|
|
|
r.Nil(client.VolumeSet(volname, optionReq))
|
|
|
|
|
|
|
|
|
|
// Stop Volume
|
|
|
|
|
r.Nil(client.VolumeStop(volname), "Volume stop failed")
|
|
|
|
|
r.Nil(client.VolumeStart(volname, false), "volume start failed")
|
|
|
|
|
|
2018-12-13 12:34:17 +05:30
|
|
|
optionReq.Options = map[string]string{"cluster/replicate.granular-entry-heal": "enable"}
|
2018-10-03 08:03:22 +05:30
|
|
|
optionReq.AllowAdvanced = true
|
2018-09-04 14:34:35 +05:30
|
|
|
r.NotNil(client.VolumeSet(volname, optionReq))
|
|
|
|
|
|
2018-09-05 14:43:45 +05:30
|
|
|
err = syscall.Unmount(mntPath, 0)
|
2018-09-04 14:34:35 +05:30
|
|
|
r.Nil(err)
|
|
|
|
|
|
|
|
|
|
// Stop Volume
|
|
|
|
|
r.Nil(client.VolumeStop(volname), "Volume stop failed")
|
|
|
|
|
// delete volume
|
|
|
|
|
r.Nil(client.VolumeDelete(volname))
|
|
|
|
|
}
|
2018-08-06 13:04:30 +05:30
|
|
|
|
|
|
|
|
func testSplitBrainOperation(t *testing.T, tc *testCluster) {
|
|
|
|
|
r := require.New(t)
|
|
|
|
|
|
|
|
|
|
var brickPaths []string
|
|
|
|
|
for i := 1; i <= 2; i++ {
|
|
|
|
|
brickPath := testTempDir(t, "brick")
|
|
|
|
|
brickPaths = append(brickPaths, brickPath)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
volname := formatVolName(t.Name())
|
|
|
|
|
|
|
|
|
|
// create 2x2 dist-rep volume
|
|
|
|
|
createReq := api.VolCreateReq{
|
|
|
|
|
Name: volname,
|
|
|
|
|
Subvols: []api.SubvolReq{
|
|
|
|
|
{
|
|
|
|
|
ReplicaCount: 2,
|
|
|
|
|
Type: "replicate",
|
|
|
|
|
Bricks: []api.BrickReq{
|
|
|
|
|
{PeerID: tc.gds[0].PeerID(), Path: brickPaths[0]},
|
|
|
|
|
{PeerID: tc.gds[0].PeerID(), Path: brickPaths[1]},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
Force: true,
|
|
|
|
|
}
|
|
|
|
|
_, err := client.VolumeCreate(createReq)
|
|
|
|
|
r.Nil(err)
|
|
|
|
|
|
2019-01-23 11:25:17 +05:30
|
|
|
var optionReq api.VolOptionReq
|
|
|
|
|
optionReq.Options = map[string]string{"cluster/replicate.self-heal-daemon": "on"}
|
|
|
|
|
optionReq.AllowAdvanced = true
|
|
|
|
|
r.Nil(client.VolumeSet(volname, optionReq))
|
|
|
|
|
|
2018-08-06 13:04:30 +05:30
|
|
|
r.Nil(client.VolumeStart(volname, false), "volume start failed")
|
|
|
|
|
|
|
|
|
|
pidpath := path.Join(tc.gds[0].Rundir, "glustershd.pid")
|
2018-12-13 12:34:17 +05:30
|
|
|
optionReq.Options = map[string]string{"cluster/replicate.self-heal-daemon": "off"}
|
2018-08-06 13:04:30 +05:30
|
|
|
optionReq.AllowAdvanced = true
|
|
|
|
|
r.Nil(client.VolumeSet(volname, optionReq))
|
|
|
|
|
r.False(isProcessRunning(pidpath), "glustershd is still running")
|
|
|
|
|
|
|
|
|
|
if _, err := os.Lstat("/dev/fuse"); os.IsNotExist(err) {
|
|
|
|
|
t.Skip("skipping mount /dev/fuse unavailable")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
mntPath := testTempDir(t, "mnt")
|
|
|
|
|
defer os.RemoveAll(mntPath)
|
|
|
|
|
|
|
|
|
|
host, _, _ := net.SplitHostPort(tc.gds[0].ClientAddress)
|
|
|
|
|
err = mountVolume(host, volname, mntPath)
|
|
|
|
|
r.Nil(err, fmt.Sprintf("mount failed: %s", err))
|
|
|
|
|
defer syscall.Unmount(mntPath, syscall.MNT_FORCE|syscall.MNT_DETACH)
|
|
|
|
|
|
|
|
|
|
f, err := os.Create(mntPath + "/file1.txt")
|
|
|
|
|
r.Nil(err, fmt.Sprintf("file creation failed: %s", err))
|
|
|
|
|
f.Close()
|
|
|
|
|
|
|
|
|
|
var prevKilledBrick string
|
|
|
|
|
getBricksStatus, err := client.BricksStatus(volname)
|
|
|
|
|
r.Nil(err, fmt.Sprintf("brick status operation failed: %s", err))
|
|
|
|
|
|
|
|
|
|
for brick := range getBricksStatus {
|
|
|
|
|
if getBricksStatus[brick].Info.PeerID.String() == tc.gds[0].PeerID() {
|
|
|
|
|
prevKilledBrick = getBricksStatus[brick].Info.Path
|
|
|
|
|
process, err := os.FindProcess(getBricksStatus[brick].Pid)
|
|
|
|
|
r.Nil(err, fmt.Sprintf("failed to find bricks pid: %s", err))
|
|
|
|
|
err = process.Signal(syscall.Signal(15))
|
|
|
|
|
r.Nil(err, fmt.Sprintf("failed to kill bricks: %s", err))
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
f1, err := os.OpenFile(mntPath+"/file1.txt", os.O_RDWR, 0777)
|
|
|
|
|
r.Nil(err, fmt.Sprintf("failed to open file: %s", err))
|
|
|
|
|
_, err = f1.WriteString("hello")
|
|
|
|
|
r.Nil(err, fmt.Sprintf("failed to write to file: %s", err))
|
|
|
|
|
f1.Sync()
|
|
|
|
|
defer f1.Close()
|
|
|
|
|
|
|
|
|
|
err = syscall.Unmount(mntPath, syscall.MNT_FORCE|syscall.MNT_DETACH)
|
|
|
|
|
r.Nil(err)
|
|
|
|
|
|
|
|
|
|
// Stop Volume
|
|
|
|
|
r.Nil(client.VolumeStop(volname), "Volume stop failed")
|
|
|
|
|
// Start Volume
|
|
|
|
|
r.Nil(client.VolumeStart(volname, false), "Volume start failed")
|
|
|
|
|
|
|
|
|
|
err = mountVolume(host, volname, mntPath)
|
|
|
|
|
r.Nil(err, fmt.Sprintf("mount failed: %s", err))
|
|
|
|
|
defer syscall.Unmount(mntPath, syscall.MNT_FORCE|syscall.MNT_DETACH)
|
|
|
|
|
|
|
|
|
|
getBricksStatus, err = client.BricksStatus(volname)
|
|
|
|
|
r.Nil(err, fmt.Sprintf("brick status operation failed: %s", err))
|
|
|
|
|
|
|
|
|
|
for brick := range getBricksStatus {
|
|
|
|
|
if getBricksStatus[brick].Info.PeerID.String() == tc.gds[0].PeerID() && getBricksStatus[brick].Info.Path != prevKilledBrick {
|
|
|
|
|
process, err := os.FindProcess(getBricksStatus[brick].Pid)
|
|
|
|
|
r.Nil(err, fmt.Sprintf("failed to find bricks pid: %s", err))
|
|
|
|
|
err = process.Signal(syscall.Signal(15))
|
|
|
|
|
r.Nil(err, fmt.Sprintf("failed to kill bricks: %s", err))
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
f2, err := os.OpenFile(mntPath+"/file1.txt", os.O_RDWR, 0777)
|
|
|
|
|
r.Nil(err, fmt.Sprintf("failed to open file: %s", err))
|
|
|
|
|
_, err = f2.WriteString("hey")
|
|
|
|
|
r.Nil(err, fmt.Sprintf("failed to write to file: %s", err))
|
|
|
|
|
f2.Sync()
|
|
|
|
|
defer f2.Close()
|
|
|
|
|
|
|
|
|
|
// Stop Volume
|
|
|
|
|
r.Nil(client.VolumeStop(volname), "Volume stop failed")
|
|
|
|
|
// Start Volume
|
|
|
|
|
r.Nil(client.VolumeStart(volname, false), "Volume start failed")
|
|
|
|
|
|
|
|
|
|
healInfo, err := client.SelfHealInfo(volname, "info-summary")
|
|
|
|
|
r.Nil(err)
|
|
|
|
|
|
|
|
|
|
r.Equal(*healInfo[0].EntriesInSplitBrain, int64(1))
|
|
|
|
|
|
|
|
|
|
var req shdapi.SplitBrainReq
|
|
|
|
|
|
|
|
|
|
req.FileName = ""
|
|
|
|
|
err = client.SelfHealSplitBrain(volname, "latest-mtime", req)
|
|
|
|
|
r.NotNil(err)
|
|
|
|
|
|
|
|
|
|
req.FileName = "file1.txt"
|
|
|
|
|
err = client.SelfHealSplitBrain(volname, "latest-mtime", req)
|
|
|
|
|
r.NotNil(err)
|
|
|
|
|
|
|
|
|
|
req.FileName = "/file1.txt"
|
|
|
|
|
err = client.SelfHealSplitBrain(volname, "latest-mtime", req)
|
|
|
|
|
r.Nil(err)
|
|
|
|
|
|
|
|
|
|
healInfo, err = client.SelfHealInfo(volname, "info-summary")
|
|
|
|
|
r.Nil(err)
|
|
|
|
|
|
|
|
|
|
r.Equal(*healInfo[0].EntriesInSplitBrain, int64(0))
|
|
|
|
|
|
|
|
|
|
err = syscall.Unmount(mntPath, syscall.MNT_FORCE|syscall.MNT_DETACH)
|
|
|
|
|
r.Nil(err)
|
|
|
|
|
|
|
|
|
|
// Stop Volume
|
|
|
|
|
r.Nil(client.VolumeStop(volname), "Volume stop failed")
|
|
|
|
|
// delete volume
|
|
|
|
|
r.Nil(client.VolumeDelete(volname))
|
|
|
|
|
}
|