1
0
mirror of https://github.com/gluster/glusterfs.git synced 2026-02-07 03:48:44 +01:00
Files
glusterfs/cli/src/cli.h

522 lines
15 KiB
C
Raw Permalink Normal View History

/*
Copyright (c) 2006-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3 or
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
#ifndef __CLI_H__
#define __CLI_H__
#include "rpc-clnt.h"
#include <glusterfs/quota-common-utils.h>
#include "cli1-xdr.h"
#if (HAVE_LIB_XML)
#include <libxml/encoding.h>
#include <libxml/xmlwriter.h>
#endif
#define DEFAULT_EVENT_POOL_SIZE 16384
#define CLI_GLUSTERD_PORT 24007
#define DEFAULT_CLI_LOG_FILE_DIRECTORY DATADIR "/log/glusterfs"
#define CLI_VOL_STATUS_BRICK_LEN 43
#define CLI_TAB_LENGTH 8
#define CLI_BRICK_STATUS_LINE_LEN 78
// Quotad pid path.
#define QUOTAD_PID_PATH "/var/run/gluster/quotad/quotad.pid"
/* Geo-rep command positional arguments' index */
#define GEO_REP_CMD_INDEX 1
#define GEO_REP_CMD_CONFIG_INDEX 4
/* Default RPC call timeout, in seconds. */
#define CLI_DEFAULT_CONN_TIMEOUT 120
/* Special timeout for volume profile, volume
heal and ganesha RPC calls, in seconds. */
#define CLI_TEN_MINUTES_TIMEOUT 600
enum argp_option_keys {
ARGP_DEBUG_KEY = 133,
ARGP_PORT_KEY = 'p',
};
Tier/cli: tier related information in volume info xml gluster v info didnt differentiate the hot bricks and cold bricks and other few values <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <cliOutput> <opRet>0</opRet> <opErrno>0</opErrno> <opErrstr/> <volInfo> <volumes> <volume> <name>rmbr</name> <id>72d223fc-96ba-4f4a-ac6e-0d0bc16ef127</id> <status>1</status> <statusStr>Started</statusStr> <brickCount>3</brickCount> <distCount>1</distCount> <stripeCount>1</stripeCount> <replicaCount>1</replicaCount> <disperseCount>0</disperseCount> <redundancyCount>0</redundancyCount> <type>5</type> <typeStr>Tier</typeStr> <transport>0</transport> <xlators/> <bricks> <hotBricks> <hotBrickType>Distribute</hotBrickType> <numberOfBricks>1</numberOfBricks> <brick uuid="81">v1:/hb1<name>v1:/hb1</name><hostUuid>81</hostUuid></brick> </hotBricks> <coldBricks> <coldBrickType>Distribute</coldBrickType> <numberOfBricks>2</numberOfBricks> <brick uuid="81">v1:/br1<name>v1:/br1</name><hostUuid>81</hostUuid></brick> <brick uuid="81">v1:/br2<name>v1:/br2</name><hostUuid>81</hostUuid></brick> <count>0</count> </coldBricks> </bricks> </volume> </volumes> </volInfo> </cliOutput> Change-Id: I6e52541bb6d8a6a17e17bfcb42434beaac13db56 BUG: 1261837 Signed-off-by: hari gowtham <hgowtham@redhat.com> Reviewed-on: http://review.gluster.org/12158 Tested-by: NetBSD Build System <jenkins@build.gluster.org> Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Dan Lambright <dlambrig@redhat.com> Tested-by: Dan Lambright <dlambrig@redhat.com>
2015-09-10 20:15:35 +05:30
typedef enum {
COLD_BRICK_COUNT,
COLD_TYPE,
COLD_DIST_COUNT,
COLD_REPLICA_COUNT,
COLD_ARBITER_COUNT,
COLD_DISPERSE_COUNT,
COLD_REDUNDANCY_COUNT,
HOT_BRICK_COUNT,
HOT_TYPE,
HOT_REPLICA_COUNT,
MAX
Tier/cli: tier related information in volume info xml gluster v info didnt differentiate the hot bricks and cold bricks and other few values <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <cliOutput> <opRet>0</opRet> <opErrno>0</opErrno> <opErrstr/> <volInfo> <volumes> <volume> <name>rmbr</name> <id>72d223fc-96ba-4f4a-ac6e-0d0bc16ef127</id> <status>1</status> <statusStr>Started</statusStr> <brickCount>3</brickCount> <distCount>1</distCount> <stripeCount>1</stripeCount> <replicaCount>1</replicaCount> <disperseCount>0</disperseCount> <redundancyCount>0</redundancyCount> <type>5</type> <typeStr>Tier</typeStr> <transport>0</transport> <xlators/> <bricks> <hotBricks> <hotBrickType>Distribute</hotBrickType> <numberOfBricks>1</numberOfBricks> <brick uuid="81">v1:/hb1<name>v1:/hb1</name><hostUuid>81</hostUuid></brick> </hotBricks> <coldBricks> <coldBrickType>Distribute</coldBrickType> <numberOfBricks>2</numberOfBricks> <brick uuid="81">v1:/br1<name>v1:/br1</name><hostUuid>81</hostUuid></brick> <brick uuid="81">v1:/br2<name>v1:/br2</name><hostUuid>81</hostUuid></brick> <count>0</count> </coldBricks> </bricks> </volume> </volumes> </volInfo> </cliOutput> Change-Id: I6e52541bb6d8a6a17e17bfcb42434beaac13db56 BUG: 1261837 Signed-off-by: hari gowtham <hgowtham@redhat.com> Reviewed-on: http://review.gluster.org/12158 Tested-by: NetBSD Build System <jenkins@build.gluster.org> Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Dan Lambright <dlambrig@redhat.com> Tested-by: Dan Lambright <dlambrig@redhat.com>
2015-09-10 20:15:35 +05:30
} values;
#define GLUSTER_MODE_SCRIPT (1 << 0)
#define GLUSTER_MODE_ERR_FATAL (1 << 1)
#define GLUSTER_MODE_XML (1 << 2)
#define GLUSTER_MODE_WIGNORE (1 << 3)
#define GLUSTER_MODE_WIGNORE_PARTITION (1 << 4)
#define GLUSTER_MODE_GLFSHEAL_NOLOG (1 << 5)
#define GLUSTERD_GET_QUOTA_LIST_MOUNT_PATH(abspath, volname, path) \
do { \
snprintf(abspath, sizeof(abspath) - 1, \
DEFAULT_VAR_RUN_DIRECTORY "/%s_quota_list%s", volname, path); \
} while (0)
struct cli_state;
struct cli_cmd_word;
struct cli_cmd_tree;
struct cli_cmd;
extern char *cli_vol_status_str[];
extern char *cli_vol_task_status_str[];
typedef int(cli_cmd_cbk_t)(struct cli_state *state, struct cli_cmd_word *word,
const char **words, int wordcount);
typedef void(cli_cmd_reg_cbk_t)(struct cli_cmd *this);
typedef int(cli_cmd_match_t)(struct cli_cmd_word *word);
typedef int(cli_cmd_filler_t)(struct cli_cmd_word *word);
struct cli_cmd_word {
struct cli_cmd_tree *tree;
const char *word;
cli_cmd_filler_t *filler;
cli_cmd_match_t *match;
cli_cmd_cbk_t *cbkfn;
const char *desc;
const char *pattern;
int nextwords_cnt;
struct cli_cmd_word **nextwords;
};
struct cli_cmd_tree {
struct cli_state *state;
struct cli_cmd_word root;
};
struct cli_state {
int argc;
char **argv;
char debug;
/* for events dispatching */
glusterfs_ctx_t *ctx;
/* Client to perform RPC calls to glusterd. */
struct rpc_clnt *rpc;
/* Client to perform RPC calls to quotad.
May be NULL if quotad is not running. */
struct rpc_clnt *quotad_rpc;
/* registry of known commands */
struct cli_cmd_tree tree;
/* the thread which "executes" the command in non-interactive mode */
/* also the thread which reads from stdin in non-readline mode */
pthread_t input;
/* terminal I/O */
const char *prompt;
int rl_enabled;
int rl_async;
int rl_processing;
/* autocompletion state */
char **matches;
char **matchesp;
char *remote_host;
int remote_port;
int mode;
time_t await_connected;
time_t default_conn_timeout;
char *log_file;
gf_loglevel_t log_level;
cli,glusterd: Changes to cli-glusterd communication Glusterd changes: With this patch, glusterd creates a socket file in DATADIR/run/glusterd.socket , and listen on it for cli requests. It listens for 2 rpc programs on the socket file, - The glusterd cli rpc program, for all cli commands - A reduced glusterd handshake program, just for the 'system:: getspec' command The location of the socket file can be changed with the glusterd option 'glusterd-sockfile'. To retain compatibility with the '--remote-host' cli option, glusterd also listens for the cli requests on port 24007. But, for the sake of security, it listens using a reduced cli rpc program on the port. The reduced rpc program only contains read-only procs used for 'volume (info|list|status)', 'peer status' and 'system:: getwd' cli commands. CLI changes: The gluster cli now uses the glusterd socket file for communicating with glusterd by default. A new option '--gluster-sock' has been added to allow specifying the sockfile used to connect. Using the '--remote-host' option will make cli connect to the given host & port. Tests changes: cluster.rc has been modified to make use of socket files and use different log files for each glusterd. Some of the tests using cluster.rc have been fixed. Change-Id: Iaf24bc22f42f8014a5fa300ce37c7fc9b1b92b53 BUG: 980754 Signed-off-by: Kaushal M <kaushal@redhat.com> Reviewed-on: http://review.gluster.org/5280 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Krishnan Parthasarathi <kparthas@redhat.com> Reviewed-by: Vijay Bellur <vbellur@redhat.com>
2013-07-03 16:31:22 +05:30
char *glusterd_sock;
char *address_family;
};
struct cli_local {
struct {
char *volname;
int flags;
} get_vol;
dict_t *dict;
const char **words;
/* Marker for volume status all */
gf_boolean_t all;
#if (HAVE_LIB_XML)
xmlTextWriterPtr writer;
xmlDocPtr doc;
int vol_count;
#endif
gf_lock_t lock;
struct list_head dict_list;
};
struct cli_volume_status {
int port;
int rdma_port;
int online;
uint64_t block_size;
uint64_t total_inodes;
uint64_t free_inodes;
char *brick;
char *pid_str;
char *free;
char *total;
char *fs_name;
char *mount_options;
char *device;
char *inode_size;
};
struct snap_config_opt_vals_ {
char *op_name;
char *question;
};
typedef struct cli_volume_status cli_volume_status_t;
typedef struct cli_local cli_local_t;
typedef struct cli_state cli_state_t;
typedef ssize_t (*cli_serialize_t)(struct iovec outmsg, void *args);
extern rpc_clnt_prog_t *cli_rpc_prog;
typedef const char *(*cli_selector_t)(void *wcon);
void *
cli_getunamb(const char *tok, void **choices, cli_selector_t sel);
int
cli_cmd_register(struct cli_cmd_tree *tree, struct cli_cmd *cmd);
int
cli_cmds_register(struct cli_state *state);
int
cli_input_init(struct cli_state *state);
int
cli_cmd_process(struct cli_state *state, int argc, char *argv[]);
int
cli_cmd_process_line(struct cli_state *state, const char *line);
int
cli_rl_enable(struct cli_state *state);
int
cli_rl_out(struct cli_state *state, const char *fmt, va_list ap);
int
cli_rl_err(struct cli_state *state, const char *fmt, va_list ap);
int
cli_usage_out(const char *usage);
int
_cli_out(const char *fmt, ...);
int
_cli_err(const char *fmt, ...);
#define cli_out(fmt...) \
do { \
FMT_WARN(fmt); \
\
_cli_out(fmt); \
\
} while (0)
#define cli_err(fmt...) \
do { \
FMT_WARN(fmt); \
\
_cli_err(fmt); \
\
} while (0)
#define usage() \
do { \
cli_out( \
" Usage: gluster [options] <help> <peer>" \
" <pool> <volume>\n" \
" Options:\n" \
" --help Shows the help information\n" \
" --version Shows the version\n" \
" --print-logdir Shows the log directory\n" \
" --print-statedumpdir Shows the state dump directory\n"); \
\
} while (0)
int
cli_submit_request(struct rpc_clnt *rpc, void *req, call_frame_t *frame,
rpc_clnt_prog_t *prog, int procnum, xlator_t *this,
fop_cbk_fn_t cbkfn, xdrproc_t xdrproc);
int32_t
cli_cmd_volume_create_parse(struct cli_state *state, const char **words,
int wordcount, dict_t **options, char **bricks);
int32_t
cli_cmd_volume_reset_parse(const char **words, int wordcount, dict_t **opt);
int32_t
cli_cmd_gsync_set_parse(struct cli_state *state, const char **words,
int wordcount, dict_t **opt, char **errstr);
int32_t
cli_cmd_quota_parse(const char **words, int wordcount, dict_t **opt);
int32_t
cli_cmd_inode_quota_parse(const char **words, int wordcount, dict_t **opt);
int32_t
cli_cmd_bitrot_parse(const char **words, int wordcount, dict_t **opt);
int32_t
cli_cmd_volume_set_parse(struct cli_state *state, const char **words,
int wordcount, dict_t **options, char **op_errstr);
int32_t
cli_cmd_ganesha_parse(struct cli_state *state, const char **words,
int wordcount, dict_t **options, char **op_errstr);
glusterd/cli: cli to get local state representation from glusterd Currently there is no existing CLI that can be used to get the local state representation of the cluster as maintained in glusterd in a readable as well as parseable format. The CLI added has the following usage: # gluster get-state [daemon] [odir <path/to/output/dir>] [file <filename>] This would dump data points that reflect the local state representation of the cluster as maintained in glusterd (no other daemons are supported as of now) to a file inside the specified output directory. The default output directory and filename is /var/run/gluster and glusterd_state_<timestamp> respectively. The option for specifying the daemon name leaves room to add support for other daemons in the future. Following are the data points captured as of now to represent the state from the local glusterd pov: * Peer: - Primary hostname - uuid - state - connection status - List of hostnames * Volumes: - name, id, transport type, status - counts: bricks, snap, subvol, stripe, arbiter, disperse, redundancy - snapd status - quorum status - tiering related information - rebalance status - replace bricks status - snapshots * Bricks: - Path, hostname (for all bricks these info will be shown) - port, rdma port, status, mount options, filesystem type and signed in status for bricks running locally. * Services: - name, online status for initialised services * Others: - Base port, last allocated port - op-version - MYUUID Change-Id: I4a45cc5407ab92d8afdbbd2098ece851f7e3d618 BUG: 1353156 Signed-off-by: Samikshan Bairagya <samikshan@gmail.com> Reviewed-on: http://review.gluster.org/14873 Reviewed-by: Avra Sengupta <asengupt@redhat.com> Smoke: Gluster Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
2016-07-07 20:33:02 +05:30
int32_t
cli_cmd_get_state_parse(struct cli_state *state, const char **words,
int wordcount, dict_t **options, char **op_errstr);
glusterd/cli: cli to get local state representation from glusterd Currently there is no existing CLI that can be used to get the local state representation of the cluster as maintained in glusterd in a readable as well as parseable format. The CLI added has the following usage: # gluster get-state [daemon] [odir <path/to/output/dir>] [file <filename>] This would dump data points that reflect the local state representation of the cluster as maintained in glusterd (no other daemons are supported as of now) to a file inside the specified output directory. The default output directory and filename is /var/run/gluster and glusterd_state_<timestamp> respectively. The option for specifying the daemon name leaves room to add support for other daemons in the future. Following are the data points captured as of now to represent the state from the local glusterd pov: * Peer: - Primary hostname - uuid - state - connection status - List of hostnames * Volumes: - name, id, transport type, status - counts: bricks, snap, subvol, stripe, arbiter, disperse, redundancy - snapd status - quorum status - tiering related information - rebalance status - replace bricks status - snapshots * Bricks: - Path, hostname (for all bricks these info will be shown) - port, rdma port, status, mount options, filesystem type and signed in status for bricks running locally. * Services: - name, online status for initialised services * Others: - Base port, last allocated port - op-version - MYUUID Change-Id: I4a45cc5407ab92d8afdbbd2098ece851f7e3d618 BUG: 1353156 Signed-off-by: Samikshan Bairagya <samikshan@gmail.com> Reviewed-on: http://review.gluster.org/14873 Reviewed-by: Avra Sengupta <asengupt@redhat.com> Smoke: Gluster Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
2016-07-07 20:33:02 +05:30
int32_t
cli_cmd_volume_add_brick_parse(struct cli_state *state, const char **words,
int wordcount, dict_t **options, int *type);
int32_t
cli_cmd_volume_remove_brick_parse(struct cli_state *state, const char **words,
int wordcount, dict_t **options,
int *question, int *brick_count,
int32_t *command);
int32_t
cli_cmd_volume_replace_brick_parse(const char **words, int wordcount,
dict_t **options);
glusterd : Introduce reset brick The command basically allows replace brick with src and dst bricks as same. Usage: gluster v reset-brick <volname> <hostname:brick-path> start This command kills the brick to be reset. Once this command is run, admin can do other manual operations that they need to do, like configuring some options for the brick. Once this is done, resetting the brick can be continued with the following options. gluster v reset-brick <vname> <hostname:brick> <hostname:brick> commit {force} Does the job of resetting the brick. 'force' option should be used when the brick already contains volinfo id. Problem: On doing a disk-replacement of a brick in a replicate volume the following 2 scenarios may occur : a) there is a chance that reads are served from this replaced-disk brick, which leads to empty reads. b) potential data loss if next writes succeed only on replaced brick, and heal is done to other bricks from this one. Solution: After disk-replacement, make sure that reset-brick command is run for that brick so that pending markers are set for the brick and it is not chosen as source for reads and heal. But, as of now replace-brick for the same brick-path is not allowed. In order to fix the above mentioned problem, same brick-path replace-brick is needed. With this patch reset-brick commit {force} will be allowed even when source and destination <hostname:brickpath> are identical as long as 1) destination brick is not alive 2) source and destination brick have the same brick uuid and path. Also, the destination brick after replace-brick will use the same port as the source brick. Change-Id: I440b9e892ffb781ea4b8563688c3f85c7a7c89de BUG: 1266876 Signed-off-by: Anuradha Talur <atalur@redhat.com> Reviewed-on: http://review.gluster.org/12250 Smoke: Gluster Build System <jenkins@build.gluster.org> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Ashish Pandey <aspandey@redhat.com> Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com>
2016-08-22 13:22:03 -04:00
int32_t
cli_cmd_volume_reset_brick_parse(const char **words, int wordcount,
dict_t **options);
glusterd : Introduce reset brick The command basically allows replace brick with src and dst bricks as same. Usage: gluster v reset-brick <volname> <hostname:brick-path> start This command kills the brick to be reset. Once this command is run, admin can do other manual operations that they need to do, like configuring some options for the brick. Once this is done, resetting the brick can be continued with the following options. gluster v reset-brick <vname> <hostname:brick> <hostname:brick> commit {force} Does the job of resetting the brick. 'force' option should be used when the brick already contains volinfo id. Problem: On doing a disk-replacement of a brick in a replicate volume the following 2 scenarios may occur : a) there is a chance that reads are served from this replaced-disk brick, which leads to empty reads. b) potential data loss if next writes succeed only on replaced brick, and heal is done to other bricks from this one. Solution: After disk-replacement, make sure that reset-brick command is run for that brick so that pending markers are set for the brick and it is not chosen as source for reads and heal. But, as of now replace-brick for the same brick-path is not allowed. In order to fix the above mentioned problem, same brick-path replace-brick is needed. With this patch reset-brick commit {force} will be allowed even when source and destination <hostname:brickpath> are identical as long as 1) destination brick is not alive 2) source and destination brick have the same brick uuid and path. Also, the destination brick after replace-brick will use the same port as the source brick. Change-Id: I440b9e892ffb781ea4b8563688c3f85c7a7c89de BUG: 1266876 Signed-off-by: Anuradha Talur <atalur@redhat.com> Reviewed-on: http://review.gluster.org/12250 Smoke: Gluster Build System <jenkins@build.gluster.org> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Ashish Pandey <aspandey@redhat.com> Reviewed-by: Pranith Kumar Karampuri <pkarampu@redhat.com>
2016-08-22 13:22:03 -04:00
int32_t
cli_cmd_log_rotate_parse(const char **words, int wordcount, dict_t **options);
int32_t
cli_cmd_log_locate_parse(const char **words, int wordcount, dict_t **options);
int32_t
cli_cmd_log_filename_parse(const char **words, int wordcount, dict_t **options);
int32_t
cli_cmd_volume_statedump_options_parse(const char **words, int wordcount,
dict_t **options);
int32_t
cli_cmd_volume_clrlks_opts_parse(const char **words, int wordcount,
dict_t **options);
cli_local_t *
cli_local_get(void);
void
cli_local_wipe(cli_local_t *local);
cli: fix data race when handling connection status Found with GCC ThreadSanitizer: WARNING: ThreadSanitizer: data race (pid=287943) Write of size 4 at 0x00000047dfa0 by thread T4: #0 cli_rpc_notify /path/to/glusterfs/cli/src/cli.c:313 (gluster+0x40a6df) #1 rpc_clnt_handle_disconnect /path/to/glusterfs/rpc/rpc-lib/src/rpc-clnt.c:821 (libgfrpc.so.0+0x13f04) #2 rpc_clnt_notify /path/to/glusterfs/rpc/rpc-lib/src/rpc-clnt.c:882 (libgfrpc.so.0+0x13f04) #3 rpc_transport_notify /path/to/glusterfs/rpc/rpc-lib/src/rpc-transport.c:520 (libgfrpc.so.0+0xf070) #4 socket_event_poll_err /path/to/glusterfs/rpc/rpc-transport/socket/src/socket.c:1364 (socket.so+0x812c) #5 socket_event_handler /path/to/glusterfs/rpc/rpc-transport/socket/src/socket.c:2958 (socket.so+0xc453) #6 socket_event_handler /path/to/glusterfs/rpc/rpc-transport/socket/src/socket.c:2854 (socket.so+0xc453) #7 event_dispatch_epoll_handler /path/to/glusterfs/libglusterfs/src/event-epoll.c:640 (libglusterfs.so.0+0xcaf23) #8 event_dispatch_epoll_worker /path/to/glusterfs/libglusterfs/src/event-epoll.c:751 (libglusterfs.so.0+0xcaf23) #9 <null> <null> (libtsan.so.0+0x2d33f) Previous read of size 4 at 0x00000047dfa0 by thread T3 (mutexes: write M3587): #0 cli_cmd_await_connected /path/to/glusterfs/cli/src/cli-cmd.c:321 (gluster+0x40ca37) #1 cli_cmd_process /path/to/glusterfs/cli/src/cli-cmd.c:123 (gluster+0x40cc74) #2 cli_batch /path/to/glusterfs/cli/src/input.c:29 (gluster+0x40c2b9) #3 <null> <null> (libtsan.so.0+0x2d33f) Location is global 'connected' of size 4 at 0x00000047dfa0 (gluster+0x00000047dfa0) Change-Id: Ie85a8a80a2c5b82252c0c1d45e68ebe9938da2eb Signed-off-by: Dmitry Antipov <dmantipov@yandex.ru> Fixes: #1311
2020-06-16 14:45:03 +03:00
gf_boolean_t
cli_cmd_connected(void);
cli: fix data race when handling connection status Found with GCC ThreadSanitizer: WARNING: ThreadSanitizer: data race (pid=287943) Write of size 4 at 0x00000047dfa0 by thread T4: #0 cli_rpc_notify /path/to/glusterfs/cli/src/cli.c:313 (gluster+0x40a6df) #1 rpc_clnt_handle_disconnect /path/to/glusterfs/rpc/rpc-lib/src/rpc-clnt.c:821 (libgfrpc.so.0+0x13f04) #2 rpc_clnt_notify /path/to/glusterfs/rpc/rpc-lib/src/rpc-clnt.c:882 (libgfrpc.so.0+0x13f04) #3 rpc_transport_notify /path/to/glusterfs/rpc/rpc-lib/src/rpc-transport.c:520 (libgfrpc.so.0+0xf070) #4 socket_event_poll_err /path/to/glusterfs/rpc/rpc-transport/socket/src/socket.c:1364 (socket.so+0x812c) #5 socket_event_handler /path/to/glusterfs/rpc/rpc-transport/socket/src/socket.c:2958 (socket.so+0xc453) #6 socket_event_handler /path/to/glusterfs/rpc/rpc-transport/socket/src/socket.c:2854 (socket.so+0xc453) #7 event_dispatch_epoll_handler /path/to/glusterfs/libglusterfs/src/event-epoll.c:640 (libglusterfs.so.0+0xcaf23) #8 event_dispatch_epoll_worker /path/to/glusterfs/libglusterfs/src/event-epoll.c:751 (libglusterfs.so.0+0xcaf23) #9 <null> <null> (libtsan.so.0+0x2d33f) Previous read of size 4 at 0x00000047dfa0 by thread T3 (mutexes: write M3587): #0 cli_cmd_await_connected /path/to/glusterfs/cli/src/cli-cmd.c:321 (gluster+0x40ca37) #1 cli_cmd_process /path/to/glusterfs/cli/src/cli-cmd.c:123 (gluster+0x40cc74) #2 cli_batch /path/to/glusterfs/cli/src/input.c:29 (gluster+0x40c2b9) #3 <null> <null> (libtsan.so.0+0x2d33f) Location is global 'connected' of size 4 at 0x00000047dfa0 (gluster+0x00000047dfa0) Change-Id: Ie85a8a80a2c5b82252c0c1d45e68ebe9938da2eb Signed-off-by: Dmitry Antipov <dmantipov@yandex.ru> Fixes: #1311
2020-06-16 14:45:03 +03:00
int32_t
cli_cmd_await_connected(time_t timeout);
int32_t
cli: fix data race when handling connection status Found with GCC ThreadSanitizer: WARNING: ThreadSanitizer: data race (pid=287943) Write of size 4 at 0x00000047dfa0 by thread T4: #0 cli_rpc_notify /path/to/glusterfs/cli/src/cli.c:313 (gluster+0x40a6df) #1 rpc_clnt_handle_disconnect /path/to/glusterfs/rpc/rpc-lib/src/rpc-clnt.c:821 (libgfrpc.so.0+0x13f04) #2 rpc_clnt_notify /path/to/glusterfs/rpc/rpc-lib/src/rpc-clnt.c:882 (libgfrpc.so.0+0x13f04) #3 rpc_transport_notify /path/to/glusterfs/rpc/rpc-lib/src/rpc-transport.c:520 (libgfrpc.so.0+0xf070) #4 socket_event_poll_err /path/to/glusterfs/rpc/rpc-transport/socket/src/socket.c:1364 (socket.so+0x812c) #5 socket_event_handler /path/to/glusterfs/rpc/rpc-transport/socket/src/socket.c:2958 (socket.so+0xc453) #6 socket_event_handler /path/to/glusterfs/rpc/rpc-transport/socket/src/socket.c:2854 (socket.so+0xc453) #7 event_dispatch_epoll_handler /path/to/glusterfs/libglusterfs/src/event-epoll.c:640 (libglusterfs.so.0+0xcaf23) #8 event_dispatch_epoll_worker /path/to/glusterfs/libglusterfs/src/event-epoll.c:751 (libglusterfs.so.0+0xcaf23) #9 <null> <null> (libtsan.so.0+0x2d33f) Previous read of size 4 at 0x00000047dfa0 by thread T3 (mutexes: write M3587): #0 cli_cmd_await_connected /path/to/glusterfs/cli/src/cli-cmd.c:321 (gluster+0x40ca37) #1 cli_cmd_process /path/to/glusterfs/cli/src/cli-cmd.c:123 (gluster+0x40cc74) #2 cli_batch /path/to/glusterfs/cli/src/input.c:29 (gluster+0x40c2b9) #3 <null> <null> (libtsan.so.0+0x2d33f) Location is global 'connected' of size 4 at 0x00000047dfa0 (gluster+0x00000047dfa0) Change-Id: Ie85a8a80a2c5b82252c0c1d45e68ebe9938da2eb Signed-off-by: Dmitry Antipov <dmantipov@yandex.ru> Fixes: #1311
2020-06-16 14:45:03 +03:00
cli_cmd_broadcast_connected(gf_boolean_t status);
int
cli_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
void *data);
int32_t
cli_cmd_volume_profile_parse(const char **words, int wordcount,
dict_t **options);
int32_t
cli_cmd_volume_top_parse(const char **words, int wordcount, dict_t **options);
int32_t
cli_cmd_log_level_parse(const char **words, int wordcount, dict_t **options);
int32_t
cli_cmd_volume_status_parse(const char **words, int wordcount,
dict_t **options);
int
cli_cmd_volume_heal_options_parse(const char **words, int wordcount,
dict_t **options);
int
cli_cmd_volume_defrag_parse(const char **words, int wordcount,
dict_t **options);
int
cli_print_brick_status(cli_volume_status_t *status);
void
cli_print_detailed_status(cli_volume_status_t *status);
int
cli_get_detail_status(dict_t *dict, int i, cli_volume_status_t *status);
void
cli_print_line(int len);
int
cli_xml_output_str(char *op, char *str, int op_ret, int op_errno,
char *op_errstr);
int
cli_xml_output_dict(char *op, dict_t *dict, int op_ret, int op_errno,
char *op_errstr);
int
cli_xml_output_vol_top(dict_t *dict, int op_ret, int op_errno, char *op_errstr);
int
cli_xml_output_vol_profile(dict_t *dict, int op_ret, int op_errno,
char *op_errstr);
int
cli_xml_output_vol_status_begin(cli_local_t *local, int op_ret, int op_errno,
char *op_errstr);
int
cli_xml_output_vol_status_end(cli_local_t *local);
int
cli_xml_output_vol_status(cli_local_t *local, dict_t *dict);
int
cli_xml_output_vol_list(dict_t *dict, int op_ret, int op_errno,
char *op_errstr);
int
cli_xml_output_vol_info_begin(cli_local_t *local, int op_ret, int op_errno,
char *op_errstr);
int
cli_xml_output_vol_info_end(cli_local_t *local);
int
cli_xml_output_vol_info(cli_local_t *local, dict_t *dict);
int
cli_xml_output_vol_quota_limit_list_begin(cli_local_t *local, int op_ret,
int op_errno, char *op_errstr);
features/quota : Fix XML output for quota list command Sample output: --------------- Sample 1) ---------- [root@snapshot-28 glusterfs]# gluster volume quota vol1 list /dir1 /dir4 /dir5 --xml <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <cliOutput> <opRet>0</opRet> <opErrno>0</opErrno> <opErrstr/> <volQuota> <limit> <path>/dir1</path> <hard_limit>10.0MB</hard_limit> <soft_limit>80%</soft_limit> <used_space>0Bytes</used_space> <avail_space>10.0MB</avail_space> <hl_exceeded>No</hl_exceeded> <sl_exceeded>No</sl_exceeded> </limit> <limit> <path>/dir4</path> <path>No such file or directory</path> </limit> <limit> <path>/dir5</path> <path>No such file or directory</path> </limit> </volQuota> </cliOutput> Sample 2) --------- gluster volume quota vol1 list --xml <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <cliOutput> <opRet>0</opRet> <opErrno>0</opErrno> <opErrstr/> <volQuota/> </cliOutput> <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <cliOutput> <volQuota> <limit> <path>/dir</path> <hard_limit>10.0MB</hard_limit> <soft_limit>80%</soft_limit> <used_space>0Bytes</used_space> <avail_space>10.0MB</avail_space> <hl_exceeded>No</hl_exceeded> <sl_exceeded>No</sl_exceeded> </limit> <limit> <path>/dir1</path> <hard_limit>10.0MB</hard_limit> <soft_limit>80%</soft_limit> <used_space>0Bytes</used_space> <avail_space>10.0MB</avail_space> <hl_exceeded>No</hl_exceeded> <sl_exceeded>No</sl_exceeded> </limit> </volQuota> </cliOutput> Change-Id: I8a8d83cff88f778e5ee01fbca07d9f94c412317a BUG: 1185259 Signed-off-by: Sachin Pandit <spandit@redhat.com> Reviewed-on: http://review.gluster.org/9481 Reviewed-by: Vijaikumar Mallikarjuna <vmallika@redhat.com> Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Kaushal M <kaushal@redhat.com>
2015-02-03 05:01:38 +05:30
int
cli_xml_output_vol_quota_limit_list_end(cli_local_t *local);
features/quota : Fix XML output for quota list command Sample output: --------------- Sample 1) ---------- [root@snapshot-28 glusterfs]# gluster volume quota vol1 list /dir1 /dir4 /dir5 --xml <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <cliOutput> <opRet>0</opRet> <opErrno>0</opErrno> <opErrstr/> <volQuota> <limit> <path>/dir1</path> <hard_limit>10.0MB</hard_limit> <soft_limit>80%</soft_limit> <used_space>0Bytes</used_space> <avail_space>10.0MB</avail_space> <hl_exceeded>No</hl_exceeded> <sl_exceeded>No</sl_exceeded> </limit> <limit> <path>/dir4</path> <path>No such file or directory</path> </limit> <limit> <path>/dir5</path> <path>No such file or directory</path> </limit> </volQuota> </cliOutput> Sample 2) --------- gluster volume quota vol1 list --xml <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <cliOutput> <opRet>0</opRet> <opErrno>0</opErrno> <opErrstr/> <volQuota/> </cliOutput> <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <cliOutput> <volQuota> <limit> <path>/dir</path> <hard_limit>10.0MB</hard_limit> <soft_limit>80%</soft_limit> <used_space>0Bytes</used_space> <avail_space>10.0MB</avail_space> <hl_exceeded>No</hl_exceeded> <sl_exceeded>No</sl_exceeded> </limit> <limit> <path>/dir1</path> <hard_limit>10.0MB</hard_limit> <soft_limit>80%</soft_limit> <used_space>0Bytes</used_space> <avail_space>10.0MB</avail_space> <hl_exceeded>No</hl_exceeded> <sl_exceeded>No</sl_exceeded> </limit> </volQuota> </cliOutput> Change-Id: I8a8d83cff88f778e5ee01fbca07d9f94c412317a BUG: 1185259 Signed-off-by: Sachin Pandit <spandit@redhat.com> Reviewed-on: http://review.gluster.org/9481 Reviewed-by: Vijaikumar Mallikarjuna <vmallika@redhat.com> Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Kaushal M <kaushal@redhat.com>
2015-02-03 05:01:38 +05:30
int
cli_quota_list_xml_error(cli_local_t *local, char *path, char *errstr);
features/quota : Fix XML output for quota list command Sample output: --------------- Sample 1) ---------- [root@snapshot-28 glusterfs]# gluster volume quota vol1 list /dir1 /dir4 /dir5 --xml <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <cliOutput> <opRet>0</opRet> <opErrno>0</opErrno> <opErrstr/> <volQuota> <limit> <path>/dir1</path> <hard_limit>10.0MB</hard_limit> <soft_limit>80%</soft_limit> <used_space>0Bytes</used_space> <avail_space>10.0MB</avail_space> <hl_exceeded>No</hl_exceeded> <sl_exceeded>No</sl_exceeded> </limit> <limit> <path>/dir4</path> <path>No such file or directory</path> </limit> <limit> <path>/dir5</path> <path>No such file or directory</path> </limit> </volQuota> </cliOutput> Sample 2) --------- gluster volume quota vol1 list --xml <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <cliOutput> <opRet>0</opRet> <opErrno>0</opErrno> <opErrstr/> <volQuota/> </cliOutput> <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <cliOutput> <volQuota> <limit> <path>/dir</path> <hard_limit>10.0MB</hard_limit> <soft_limit>80%</soft_limit> <used_space>0Bytes</used_space> <avail_space>10.0MB</avail_space> <hl_exceeded>No</hl_exceeded> <sl_exceeded>No</sl_exceeded> </limit> <limit> <path>/dir1</path> <hard_limit>10.0MB</hard_limit> <soft_limit>80%</soft_limit> <used_space>0Bytes</used_space> <avail_space>10.0MB</avail_space> <hl_exceeded>No</hl_exceeded> <sl_exceeded>No</sl_exceeded> </limit> </volQuota> </cliOutput> Change-Id: I8a8d83cff88f778e5ee01fbca07d9f94c412317a BUG: 1185259 Signed-off-by: Sachin Pandit <spandit@redhat.com> Reviewed-on: http://review.gluster.org/9481 Reviewed-by: Vijaikumar Mallikarjuna <vmallika@redhat.com> Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Kaushal M <kaushal@redhat.com>
2015-02-03 05:01:38 +05:30
int
cli_quota_xml_output(cli_local_t *local, char *path, int64_t hl_str,
char *sl_final, int64_t sl_num, int64_t used,
int64_t avail, char *sl, char *hl, gf_boolean_t limit_set);
features/quota : Introducing inode quota ========================================================================== Inode quota ========================================================================== = Currently, the only way to retrieve the number of files/objects in a = = directory or volume is to do a crawl of the entire directory/volume. = = This is expensive and is not scalable. = = = = The proposed mechanism will provide an easier alternative to determine = = the count of files/objects in a directory or volume. = = = = The new mechanism proposes to store count of objects/files as part of = = an extended attribute of a directory. Each directory's extended = = attribute value will indicate the number of files/objects present = = in a tree with the directory being considered as the root of the tree. = = = = The count value can be accessed by performing a getxattr(). = = Cluster translators like afr, dht and stripe will perform aggregation = = of count values from various bricks when getxattr() happens on the key = = associated with file/object count. = A new interface is introduced: ------------------------------ limit-objects : limit the number of inodes at directory level list-objects : list the directories where the limit is set remove-objects : remove the limit from the directory ========================================================================== CLI COMMAND: gluster volume quota <volname> limit-objects <path> <number> [<percent>] * <number> is a hard-limit for number of objects limitation for path "<path>" If hard-limit is exceeded, creation of file/directory is no longer permitted. * <percent> is a soft-limit for number of objects creation for path "<path>" If soft-limit is exceeded, a warning is issued for each creation. CLI COMMAND: gluster volume quota <volname> remove-objects [path] ========================================================================== CLI COMMAND: gluster volume quota <volname> list-objects [path] ... Sample output: ------------------ Path Hard-limit Soft-limit Used Available Soft-limit exceeded? Hard-limit exceeded? ------------------------------------------------------------------------ -------------------------------------- /dir 10 80% 10 0 Yes Yes ========================================================================== [root@snapshot-28 dir]# ls a b file11 file12 file13 file14 file15 file16 file17 [root@snapshot-28 dir]# touch a1 touch: cannot touch `a1': Disk quota exceeded * Nine files are created in directory "dir" and directory is included in * the count too. Hence the limit "10" is reached and further file creation fails ========================================================================== Note: We have also done some re-factoring in cli for volume name validation. New function cli_validate_volname is created ========================================================================== Change-Id: I1823497de4f790a2a20ebb1770293472ea33ee2b BUG: 1190108 Signed-off-by: Sachin Pandit <spandit@redhat.com> Signed-off-by: vmallika <vmallika@redhat.com> Reviewed-on: http://review.gluster.org/9769 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Vijay Bellur <vbellur@redhat.com>
2015-03-18 23:17:23 +05:30
int
cli_quota_object_xml_output(cli_local_t *local, char *path, char *sl_str,
int64_t sl_val, quota_limits_t *limits,
quota_meta_t *used_space, int64_t avail, char *sl,
char *hl, gf_boolean_t limit_set);
features/quota : Introducing inode quota ========================================================================== Inode quota ========================================================================== = Currently, the only way to retrieve the number of files/objects in a = = directory or volume is to do a crawl of the entire directory/volume. = = This is expensive and is not scalable. = = = = The proposed mechanism will provide an easier alternative to determine = = the count of files/objects in a directory or volume. = = = = The new mechanism proposes to store count of objects/files as part of = = an extended attribute of a directory. Each directory's extended = = attribute value will indicate the number of files/objects present = = in a tree with the directory being considered as the root of the tree. = = = = The count value can be accessed by performing a getxattr(). = = Cluster translators like afr, dht and stripe will perform aggregation = = of count values from various bricks when getxattr() happens on the key = = associated with file/object count. = A new interface is introduced: ------------------------------ limit-objects : limit the number of inodes at directory level list-objects : list the directories where the limit is set remove-objects : remove the limit from the directory ========================================================================== CLI COMMAND: gluster volume quota <volname> limit-objects <path> <number> [<percent>] * <number> is a hard-limit for number of objects limitation for path "<path>" If hard-limit is exceeded, creation of file/directory is no longer permitted. * <percent> is a soft-limit for number of objects creation for path "<path>" If soft-limit is exceeded, a warning is issued for each creation. CLI COMMAND: gluster volume quota <volname> remove-objects [path] ========================================================================== CLI COMMAND: gluster volume quota <volname> list-objects [path] ... Sample output: ------------------ Path Hard-limit Soft-limit Used Available Soft-limit exceeded? Hard-limit exceeded? ------------------------------------------------------------------------ -------------------------------------- /dir 10 80% 10 0 Yes Yes ========================================================================== [root@snapshot-28 dir]# ls a b file11 file12 file13 file14 file15 file16 file17 [root@snapshot-28 dir]# touch a1 touch: cannot touch `a1': Disk quota exceeded * Nine files are created in directory "dir" and directory is included in * the count too. Hence the limit "10" is reached and further file creation fails ========================================================================== Note: We have also done some re-factoring in cli for volume name validation. New function cli_validate_volname is created ========================================================================== Change-Id: I1823497de4f790a2a20ebb1770293472ea33ee2b BUG: 1190108 Signed-off-by: Sachin Pandit <spandit@redhat.com> Signed-off-by: vmallika <vmallika@redhat.com> Reviewed-on: http://review.gluster.org/9769 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Vijay Bellur <vbellur@redhat.com>
2015-03-18 23:17:23 +05:30
int
cli_xml_output_peer_status(dict_t *dict, int op_ret, int op_errno,
char *op_errstr);
int
cli_xml_output_vol_rebalance(gf_defrag_type_t op, dict_t *dict, int op_ret,
int op_errno, char *op_errstr);
int
cli_xml_output_vol_remove_brick(gf_boolean_t status_op, dict_t *dict,
int op_ret, int op_errno, char *op_errstr,
const char *op);
int
cli_xml_output_vol_replace_brick(dict_t *dict, int op_ret, int op_errno,
char *op_errstr);
int
cli_xml_output_vol_create(dict_t *dict, int op_ret, int op_errno,
char *op_errstr);
int
cli_xml_output_generic_volume(char *op, dict_t *dict, int op_ret, int op_errno,
char *op_errstr);
int
cli_xml_output_vol_gsync(dict_t *dict, int op_ret, int op_errno,
char *op_errstr);
int
cli_xml_output_vol_status_tasks_detail(cli_local_t *local, dict_t *dict);
int
cli_xml_snapshot_delete(cli_local_t *local, dict_t *dict, gf_cli_rsp *rsp);
int
cli_xml_snapshot_begin_composite_op(cli_local_t *local);
int
cli_xml_snapshot_end_composite_op(cli_local_t *local);
int
cli_xml_output_snap_delete_begin(cli_local_t *local, int op_ret, int op_errno,
char *op_errstr);
int
cli_xml_output_snap_delete_end(cli_local_t *local);
int
cli_xml_output_snap_status_begin(cli_local_t *local, int op_ret, int op_errno,
char *op_errstr);
int
cli_xml_output_snap_status_end(cli_local_t *local);
int
cli_xml_output_snapshot(int cmd_type, dict_t *dict, int op_ret, int op_errno,
char *op_errstr);
int
cli_xml_snapshot_status_single_snap(cli_local_t *local, dict_t *dict,
char *key);
int32_t
cli_cmd_snapshot_parse(const char **words, int wordcount, dict_t **options,
struct cli_state *state);
int
cli_xml_output_vol_getopts(dict_t *dict, int op_ret, int op_errno,
char *op_errstr);
features/quota : Introducing inode quota ========================================================================== Inode quota ========================================================================== = Currently, the only way to retrieve the number of files/objects in a = = directory or volume is to do a crawl of the entire directory/volume. = = This is expensive and is not scalable. = = = = The proposed mechanism will provide an easier alternative to determine = = the count of files/objects in a directory or volume. = = = = The new mechanism proposes to store count of objects/files as part of = = an extended attribute of a directory. Each directory's extended = = attribute value will indicate the number of files/objects present = = in a tree with the directory being considered as the root of the tree. = = = = The count value can be accessed by performing a getxattr(). = = Cluster translators like afr, dht and stripe will perform aggregation = = of count values from various bricks when getxattr() happens on the key = = associated with file/object count. = A new interface is introduced: ------------------------------ limit-objects : limit the number of inodes at directory level list-objects : list the directories where the limit is set remove-objects : remove the limit from the directory ========================================================================== CLI COMMAND: gluster volume quota <volname> limit-objects <path> <number> [<percent>] * <number> is a hard-limit for number of objects limitation for path "<path>" If hard-limit is exceeded, creation of file/directory is no longer permitted. * <percent> is a soft-limit for number of objects creation for path "<path>" If soft-limit is exceeded, a warning is issued for each creation. CLI COMMAND: gluster volume quota <volname> remove-objects [path] ========================================================================== CLI COMMAND: gluster volume quota <volname> list-objects [path] ... Sample output: ------------------ Path Hard-limit Soft-limit Used Available Soft-limit exceeded? Hard-limit exceeded? ------------------------------------------------------------------------ -------------------------------------- /dir 10 80% 10 0 Yes Yes ========================================================================== [root@snapshot-28 dir]# ls a b file11 file12 file13 file14 file15 file16 file17 [root@snapshot-28 dir]# touch a1 touch: cannot touch `a1': Disk quota exceeded * Nine files are created in directory "dir" and directory is included in * the count too. Hence the limit "10" is reached and further file creation fails ========================================================================== Note: We have also done some re-factoring in cli for volume name validation. New function cli_validate_volname is created ========================================================================== Change-Id: I1823497de4f790a2a20ebb1770293472ea33ee2b BUG: 1190108 Signed-off-by: Sachin Pandit <spandit@redhat.com> Signed-off-by: vmallika <vmallika@redhat.com> Reviewed-on: http://review.gluster.org/9769 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Vijay Bellur <vbellur@redhat.com>
2015-03-18 23:17:23 +05:30
void
print_quota_list_header(int type);
features/quota : Introducing inode quota ========================================================================== Inode quota ========================================================================== = Currently, the only way to retrieve the number of files/objects in a = = directory or volume is to do a crawl of the entire directory/volume. = = This is expensive and is not scalable. = = = = The proposed mechanism will provide an easier alternative to determine = = the count of files/objects in a directory or volume. = = = = The new mechanism proposes to store count of objects/files as part of = = an extended attribute of a directory. Each directory's extended = = attribute value will indicate the number of files/objects present = = in a tree with the directory being considered as the root of the tree. = = = = The count value can be accessed by performing a getxattr(). = = Cluster translators like afr, dht and stripe will perform aggregation = = of count values from various bricks when getxattr() happens on the key = = associated with file/object count. = A new interface is introduced: ------------------------------ limit-objects : limit the number of inodes at directory level list-objects : list the directories where the limit is set remove-objects : remove the limit from the directory ========================================================================== CLI COMMAND: gluster volume quota <volname> limit-objects <path> <number> [<percent>] * <number> is a hard-limit for number of objects limitation for path "<path>" If hard-limit is exceeded, creation of file/directory is no longer permitted. * <percent> is a soft-limit for number of objects creation for path "<path>" If soft-limit is exceeded, a warning is issued for each creation. CLI COMMAND: gluster volume quota <volname> remove-objects [path] ========================================================================== CLI COMMAND: gluster volume quota <volname> list-objects [path] ... Sample output: ------------------ Path Hard-limit Soft-limit Used Available Soft-limit exceeded? Hard-limit exceeded? ------------------------------------------------------------------------ -------------------------------------- /dir 10 80% 10 0 Yes Yes ========================================================================== [root@snapshot-28 dir]# ls a b file11 file12 file13 file14 file15 file16 file17 [root@snapshot-28 dir]# touch a1 touch: cannot touch `a1': Disk quota exceeded * Nine files are created in directory "dir" and directory is included in * the count too. Hence the limit "10" is reached and further file creation fails ========================================================================== Note: We have also done some re-factoring in cli for volume name validation. New function cli_validate_volname is created ========================================================================== Change-Id: I1823497de4f790a2a20ebb1770293472ea33ee2b BUG: 1190108 Signed-off-by: Sachin Pandit <spandit@redhat.com> Signed-off-by: vmallika <vmallika@redhat.com> Reviewed-on: http://review.gluster.org/9769 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Vijay Bellur <vbellur@redhat.com>
2015-03-18 23:17:23 +05:30
void
print_quota_list_empty(char *path, int type);
features/quota : Introducing inode quota ========================================================================== Inode quota ========================================================================== = Currently, the only way to retrieve the number of files/objects in a = = directory or volume is to do a crawl of the entire directory/volume. = = This is expensive and is not scalable. = = = = The proposed mechanism will provide an easier alternative to determine = = the count of files/objects in a directory or volume. = = = = The new mechanism proposes to store count of objects/files as part of = = an extended attribute of a directory. Each directory's extended = = attribute value will indicate the number of files/objects present = = in a tree with the directory being considered as the root of the tree. = = = = The count value can be accessed by performing a getxattr(). = = Cluster translators like afr, dht and stripe will perform aggregation = = of count values from various bricks when getxattr() happens on the key = = associated with file/object count. = A new interface is introduced: ------------------------------ limit-objects : limit the number of inodes at directory level list-objects : list the directories where the limit is set remove-objects : remove the limit from the directory ========================================================================== CLI COMMAND: gluster volume quota <volname> limit-objects <path> <number> [<percent>] * <number> is a hard-limit for number of objects limitation for path "<path>" If hard-limit is exceeded, creation of file/directory is no longer permitted. * <percent> is a soft-limit for number of objects creation for path "<path>" If soft-limit is exceeded, a warning is issued for each creation. CLI COMMAND: gluster volume quota <volname> remove-objects [path] ========================================================================== CLI COMMAND: gluster volume quota <volname> list-objects [path] ... Sample output: ------------------ Path Hard-limit Soft-limit Used Available Soft-limit exceeded? Hard-limit exceeded? ------------------------------------------------------------------------ -------------------------------------- /dir 10 80% 10 0 Yes Yes ========================================================================== [root@snapshot-28 dir]# ls a b file11 file12 file13 file14 file15 file16 file17 [root@snapshot-28 dir]# touch a1 touch: cannot touch `a1': Disk quota exceeded * Nine files are created in directory "dir" and directory is included in * the count too. Hence the limit "10" is reached and further file creation fails ========================================================================== Note: We have also done some re-factoring in cli for volume name validation. New function cli_validate_volname is created ========================================================================== Change-Id: I1823497de4f790a2a20ebb1770293472ea33ee2b BUG: 1190108 Signed-off-by: Sachin Pandit <spandit@redhat.com> Signed-off-by: vmallika <vmallika@redhat.com> Reviewed-on: http://review.gluster.org/9769 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Vijay Bellur <vbellur@redhat.com>
2015-03-18 23:17:23 +05:30
geo-rep: Status Enhancements Discussion in gluster-devel http://www.gluster.org/pipermail/gluster-devel/2015-April/044301.html MASTER NODE - Master Volume Node MASTER VOL - Master Volume name MASTER BRICK - Master Volume Brick SLAVE USER - Slave User to which Geo-rep session is established SLAVE - <SLAVE_NODE>::<SLAVE_VOL> used in Geo-rep Create command SLAVE NODE - Slave Node to which Master worker is connected STATUS - Worker Status(Created, Initializing, Active, Passive, Faulty, Paused, Stopped) CRAWL STATUS - Crawl type(Hybrid Crawl, History Crawl, Changelog Crawl) LAST_SYNCED - Last Synced Time(Local Time in CLI output and UTC in XML output) ENTRY - Number of entry Operations pending.(Resets on worker restart) DATA - Number of Data operations pending(Resets on worker restart) META - Number of Meta operations pending(Resets on worker restart) FAILURES - Number of Failures CHECKPOINT TIME - Checkpoint set Time(Local Time in CLI output and UTC in XML output) CHECKPOINT COMPLETED - Yes/No or N/A CHECKPOINT COMPLETION TIME - Checkpoint Completed Time(Local Time in CLI output and UTC in XML output) XML output: <?xml version="1.0" encoding="UTF-8" standalone="yes"?> cliOutput> geoRep> volume> name> sessions> session> session_slave> pair> master_node> master_brick> slave_user> slave/> slave_node> status> crawl_status> entry> data> meta> failures> checkpoint_completed> master_node_uuid> last_synced> checkpoint_time> checkpoint_completion_time> BUG: 1212410 Change-Id: I944a6c3c67f1e6d6baf9670b474233bec8f61ea3 Signed-off-by: Aravinda VK <avishwan@redhat.com> Reviewed-on: http://review.gluster.org/10121 Tested-by: NetBSD Build System Reviewed-by: Kotresh HR <khiremat@redhat.com> Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Vijay Bellur <vbellur@redhat.com>
2015-03-12 16:07:13 +05:30
int
gf_gsync_status_t_comparator(const void *p, const void *q);
#endif /* __CLI_H__ */