在不同的機器啓動一個cluster
TOKEN=token-01
CLUSTER_STATE=new
NAME_1=machine-1
NAME_2=machine-2
NAME_3=machine-3
HOST_1=10.240.0.17
HOST_2=10.240.0.18
HOST_3=10.240.0.19
CLUSTER=${NAME_1}=http://${HOST_1}:2380,${NAME_2}=http://${HOST_2}:2380,${NAME_3}=http://${HOST_3}:2380
THIS_NAME=${NAME_$1}
THIS_IP=${HOST_$1}
etcd--data-dir=data.etcd --name ${THIS_NAME} \
--initial-advertise-peer-urlshttp://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \
--advertise-client-urlshttp://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \
--initial-cluster ${CLUSTER} \
--initial-cluster-state${CLUSTER_STATE} --initial-cluster-token ${TOKEN}
在三臺機器的腳本如下圖所示
在本地機器啓動一個cluster
First install goreman,which manages Procfile-based applications.
Our Procfile script will set up a localexample cluster. Start it with:
goreman start
This will bringup 3 etcd members infra1, infra2 and infra3 andetcd grpc-proxy, which runs locally and composes a cluster.
Every cluster member and proxy accepts key value readsand key value writes.
ETCDCTL_API=3
CMDS
$ etcdctl --endpointshttp://10.16.85.110:2379 put /message hello
$ etcdctl --endpointshttp://10.16.85.110:2379 get /message
$ etcdctl --endpointshttp://10.16.85.110:2379 rm /message
$ curl http://10.16.85.110:2379/v2/keys/message
$ curl http://10.16.85.110:2379/v2/keys
$ curl -X DELETE http://10.16.85.110:2379/v2/keys/message
$ etcdctl --endpointshttp://10.16.85.110:2379 mkdir /foo-service
$ etcdctl --endpointshttp://10.16.85.110:2379 put /foo-service/container1 localhost:1111
$ etcdctl --endpointshttp://10.16.85.110:2379 get /foo-service/container1
$ etcdctl --endpoints http://10.16.85.110:2379ls /foo-service
$ curl http://10.16.85.110:2379/v2/keys/foo-service/
$ etcdctl --endpointshttp://10.16.85.110:2379 watch --recursive --forever /foo-service
$ etcd]$ etcdctl --endpointshttp://10.16.85.110:2379 exec-watch --recursive /foo-service -- sh -c 'echo" \"$ETCD_WATCH_KEY\" key was updated to\"$ETCD_WATCH_VALUE\" value by \"$ETCD_WATCH_ACTION\"action "'
$ etcdctl --endpointshttp://10.16.85.110:2379 put /message "hi"
$ etcdctl --endpointshttp://10.16.85.110:2379 put /message "hello" --swap-with-value"hi"
$ etcdctl --endpointshttp://10.16.85.110:2379 put /foo-ttl "expired soon" --ttl 10
$ etcdctl --endpoints=$ENDPOINTS leasegrant 300
$ etcdctl --endpoints=$ENDPOINTS put samplevalue --lease=2be7547fbc6a5afa
$ etcdctl --endpoints=$ENDPOINTS lockmutex1
$ etcdctl --endpoints=$ENDPOINTS elect onep1
$ etcdctl --write-out=table--endpoints=$ENDPOINTS endpoint status
$ etcdctl --endpoints=$ENDPOINTS snapshotsave my.db
$ etcdctl --write-out=table--endpoints=$ENDPOINTS snapshot status my.db
AUTH
export ETCDCTL_API=3
ENDPOINTS=localhost:2379
etcdctl --endpoints=${ENDPOINTS} role add root
etcdctl --endpoints=${ENDPOINTS} role grant-permission root readwritefoo
etcdctl --endpoints=${ENDPOINTS} role get root
etcdctl --endpoints=${ENDPOINTS} user add root
etcdctl --endpoints=${ENDPOINTS} user grant-role root root
etcdctl --endpoints=${ENDPOINTS} user get root
etcdctl --endpoints=${ENDPOINTS} auth enable
# now all client requests go through auth
etcdctl --endpoints=${ENDPOINTS} --user=root:123 put foo bar
etcdctl --endpoints=${ENDPOINTS} get foo
etcdctl --endpoints=${ENDPOINTS} --user=root:123 get foo
etcdctl --endpoints=${ENDPOINTS} --user=root:123 get foo1
Heartbeat Interval : 100ms
Election Timeout : 1000ms (如果slave持續1000ms內沒有收到master的心跳,則自薦爲master)
Election Timeout應該爲不低於Heartbeat Interval的十倍。
# Command line arguments:
$ etcd --heartbeat-interval=100--election-timeout=500
# Environment variables:
$ ETCD_HEARTBEAT_INTERVAL=100ETCD_ELECTION_TIMEOUT=500 etcd
$ sudo ionice -c2 -n0 -p `pgrepetcd` //增加IO磁盤的優先級
GO使用ETCD分佈式鎖
endpoints := strings.Split(config.Conf.EtcdClusterNodes, ",")
if endpoints != nil && len(endpoints) != 0{ //如果採用分佈式鎖
cli, err := clientv3.New(clientv3.Config{Endpoints: endpoints})
if err != nil {
lib.Log.Panicf("ETCD NewEtcd cli failed , error is %v " , err)
}
defer cli.Close()
s, err := concurrency.NewSession(cli)
if err != nil {
lib.Log.Panicf("ETCD NewSession failed , error is %v " , err)
}
defer s.Close()
m := concurrency.NewMutex(s, "/my-lock-mbcp-bc-als/")
if err := m.Lock(context.TODO()); err != nil {
lib.Log.Panicf("ETCD NewMutex failed , error is %v " , err)
}
defer m.Unlock(context.TODO())
//誰先搶到這個分佈式鎖誰就可以得到執行機會,搶不到則阻塞在這裏
}