快速上手
下面這個代碼就是一個選主的大概邏輯
package mainimport ("context""flag""fmt"_ "net/http/pprof""os""path/filepath""time""golang.org/x/exp/rand"v1 "k8s.io/api/core/v1"metav1 "k8s.io/apimachinery/pkg/apis/meta/v1""k8s.io/apimachinery/pkg/util/uuid""k8s.io/client-go/kubernetes""k8s.io/client-go/kubernetes/scheme"corev1 "k8s.io/client-go/kubernetes/typed/core/v1""k8s.io/client-go/tools/clientcmd""k8s.io/client-go/tools/leaderelection""k8s.io/client-go/tools/leaderelection/resourcelock""k8s.io/client-go/tools/record""k8s.io/client-go/util/homedir"
)func main() {ctx := context.Background()var kubeconfig *stringif home := homedir.HomeDir(); home != "" {kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "")}config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)if err != nil {panic(err)}clientset, err := kubernetes.NewForConfig(config)if err != nil {panic(err)}broadcaster := record.NewBroadcaster()broadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: clientset.CoreV1().Events("default"),})eventRecorder := broadcaster.NewRecorder(scheme.Scheme,v1.EventSource{Component: "hello-word"})createIdentity := func() string {hostname, err := os.Hostname()if err != nil {hostname = fmt.Sprintf("rand%d", rand.Intn(10000))}return fmt.Sprintf("%s_%s", hostname, string(uuid.NewUUID()))}lock := &resourcelock.LeaseLock{LeaseMeta: metav1.ObjectMeta{Namespace: "default",Name: "hello-world",},Client: clientset.CoordinationV1(),LockConfig: resourcelock.ResourceLockConfig{Identity: createIdentity(),EventRecorder: eventRecorder,},}leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{Lock: lock,LeaseDuration: 5 * time.Second,RenewDeadline: 4 * time.Second,RetryPeriod: 2 * time.Second,Callbacks: leaderelection.LeaderCallbacks{OnStartedLeading: func(ctx context.Context) {fmt.Println("start leading")},OnStoppedLeading: func() {fmt.Println("stop leading")},OnNewLeader: func(identity string) {fmt.Printf("new leader: %s\n", identity)},},Coordinated: false,})
}
我們同時啟動多個終端來運行這個程序,并且殺掉主節點來模擬節點掛掉,觀察是否會重新進行選舉出新的master
從圖中可以看到,第一個程序選為主節點之后,第二三個程序自動成為slave,我們 kill 掉第一個程序之后,第二個程序搶到了鎖成為了 master
租約 Lease
k8s 內置很多種資源,其中 lease 也是k8s的一種資源,顧名思義表達的是租戶對某種資源的
占有的信息表示
? ~ k get lease -A
NAMESPACE NAME HOLDER AGE
default hello-world VM-221-245-tencentos_bada2219-3a27-4b19-8b80-23fc05604391 6d7h
kube-node-lease vm-221-245-tencentos vm-221-245-tencentos 36d
kube-system kube-controller-manager VM-221-245-tencentos_8f5a4f85-ca0c-4b5f-ac81-8b0ff5ff2e49 36d
kube-system kube-scheduler VM-221-245-tencentos_4fab96c4-156b-4a77-b862-87224be44cb2 36d
比如我們查看一個 k8s 的 node 的 lease
? ~ k get lease vm-221-245-tencentos -n kube-node-lease -oyaml
apiVersion: coordination.k8s.io/v1
kind: Lease # 資源的種類
metadata:creationTimestamp: "2024-09-27T12:24:07Z" # 這個資源的創建時間戳name: vm-221-245-tencentos # 名稱namespace: kube-node-lease # 命名空間ownerReferences:- apiVersion: v1kind: Nodename: vm-221-245-tencentos # 這個資源的占有名稱uid: 5df61ad6-cc1a-4669-8b0e-d48a5b0ffb91resourceVersion: "3811080"uid: 411c6d4e-5afb-4eba-a1a0-8a56d00b75db
spec:holderIdentity: vm-221-245-tencentosleaseDurationSeconds: 40 # 租約的時常40srenewTime: "2024-11-03T01:35:05.171458Z" # 租約的更新時間
而實際上 leader 選舉中的資源 lock 其實就是一種 lease,表明 master 主節點持有對某個資源
的唯一性
查看 https://github.com/kubernetes/client-go/tree/master/tools/leaderelection 的源文件
可以看到 leaderElection 的目錄結構,主要分為 resourcelock 和 leaderelection 的主文件,
文件內容不是很多
? leaderelection git:(master) tree jinchaozhu@VM-221-245-tencentos leaderelection %
.
├── healthzadaptor.go
├── healthzadaptor_test.go
├── leaderelection.go
├── leaderelection_test.go
├── leasecandidate.go
├── leasecandidate_test.go
├── metrics.go
├── OWNERS
└── resourcelock├── interface.go├── leaselock.go└── multilock.go
數據結構
存儲 Lease 相關的信息
// LeaderElectionRecord is the record that is stored in the leader election annotation.
// This information should be used for observational purposes only and could be replaced
// with a random string (e.g. UUID) with only slight modification of this code.
// TODO(mikedanese): this should potentially be versioned
type LeaderElectionRecord struct {// leader的標識HolderIdentity string `json:"holderIdentity"`// 選舉間隔LeaseDurationSeconds int `json:"leaseDurationSeconds"`// 選舉成為leader的時間AcquireTime metav1.Time `json:"acquireTime"`// 續任時間RenewTime metav1.Time `json:"renewTime"`// leader位置的轉讓次數LeaderTransitions int `json:"leaderTransitions"`// 選舉策略Strategy v1.CoordinatedLeaseStrategy `json:"strategy"`PreferredHolder string `json:"preferredHolder"`
}
Elector 相關的配置文件
type LeaderElectionConfig struct {// 鎖,用來保證時序競態Lock rl.Interface// 非leader候選者嘗試獲取leadership的間隔時間// Core clients default this value to 15 seconds.LeaseDuration time.Duration// leade 放棄leadership角色之前的確認時間// Core clients default this value to 10 seconds.RenewDeadline time.Duration// 候選者應該獲取leader角色的重試時間// Core clients default this value to 2 seconds.RetryPeriod time.Duration// 回掉函數// 比如開始leader選舉觸發什么、成為leader觸發什么、放棄leader觸發什么Callbacks LeaderCallbacks// WatchDog is the associated health checker// WatchDog may be null if it's not needed/configured.WatchDog *HealthzAdaptor// ReleaseOnCancel should be set true if the lock should be released// when the run context is cancelled. If you set this to true, you must// ensure all code guarded by this lease has successfully completed// prior to cancelling the context, or you may have two processes// simultaneously acting on the critical path.ReleaseOnCancel bool// Name is the name of the resource lock for debuggingName string// Coordinated will use the Coordinated Leader Election feature// WARNING: Coordinated leader election is ALPHA.Coordinated bool
}
主要邏輯
選舉的邏輯大概如下:
- 剛開始實例啟動的時候,各個實例都是一個 LeaderElector 的角色,最先開始選舉的就成
為 leader;成為 leader 之后便會維護一個 LeaseLock 供每個 LeaderElector 進行訪問查詢
- 其余的 LeaderElector 進入候選狀態,hang 住監控 leader 的狀態,必要時異常會再次參與選舉
- Leader 獲取到 Leadership 之后會持續性的刷新自己的 leader 狀態
func (le *LeaderElector) Run(ctx context.Context) {defer runtime.HandleCrash()defer le.config.Callbacks.OnStoppedLeading() // StoppedLeading 函數每個節點都會執行// 未獲得leadership的節點這里就會返回// acquire 就是各個節點來爭搶 leadershipif !le.acquire(ctx) {return // ctx signalled done}ctx, cancel := context.WithCancel(ctx)defer cancel()// 這里只有獲取到leadership的角色的節點才會執行 StartedLeadinggo le.config.Callbacks.OnStartedLeading(ctx)// 獲取到 leadership 之后不停的刷新當前的狀態信息le.renew(ctx)
}func (le *LeaderElector) acquire(ctx context.Context) bool {...klog.Infof("attempting to acquire leader lease %v...", desc)wait.JitterUntil(func() {if !le.config.Coordinated {succeeded = le.tryAcquireOrRenew(ctx) // 嘗試競爭} else {succeeded = le.tryCoordinatedRenew(ctx)}....klog.Infof("successfully acquired lease %v", desc)cancel().....}, le.config.RetryPeriod, JitterFactor, true, ctx.Done())return succeeded
}
核心邏輯 tryAcquireOrRenew
// tryAcquireOrRenew tries to acquire a leader lease if it is not already acquired,
// else it tries to renew the lease if it has already been acquired. Returns true
// on success else returns false.
func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool {now := metav1.NewTime(le.clock.Now())leaderElectionRecord := rl.LeaderElectionRecord{// 這里 identity 為當前競選者的標識HolderIdentity: le.config.Lock.Identity(), LeaseDurationSeconds: int(le.config.LeaseDuration / time.Second),RenewTime: now,AcquireTime: now,}// 1.判斷是否是Leader,如果是Leader并且Lease有效,則進行 Lock 的信息更新if le.IsLeader() && le.isLeaseValid(now.Time) {oldObservedRecord := le.getObservedRecord()leaderElectionRecord.AcquireTime = oldObservedRecord.AcquireTimeleaderElectionRecord.LeaderTransitions = oldObservedRecord.LeaderTransitionserr := le.config.Lock.Update(ctx, leaderElectionRecord)........}// 2.不是Leader,則進行鎖的獲取oldLeaderElectionRecord, oldLeaderElectionRawRecord, err := le.config.Lock.Get(ctx)if err != nil {........}// 3.對比檢查是否 Elctection 的 Record 信息// 需要更新則刷新本地的緩存信息if !bytes.Equal(le.observedRawRecord, oldLeaderElectionRawRecord) {le.setObservedRecord(oldLeaderElectionRecord)le.observedRawRecord = oldLeaderElectionRawRecord}if len(oldLeaderElectionRecord.HolderIdentity) > 0 && le.isLeaseValid(now.Time) && !le.IsLeader() {klog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity)return false}// 4. 按照是否Leader判斷是否進行更新 ElectionRecordif le.IsLeader() {leaderElectionRecord.AcquireTime = oldLeaderElectionRecord.AcquireTimeleaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitionsle.metrics.slowpathExercised(le.config.Name)} else {leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions + 1}// 5.更新鎖本身的信息if err = le.config.Lock.Update(ctx, leaderElectionRecord); err != nil {klog.Errorf("Failed to update lock: %v", err)return false}// 更新鎖成功則說明當前節點持有鎖:搶鎖成功/續期成功le.setObservedRecord(&leaderElectionRecord)return true
}
上面的 Lock 其實是自實現的一種 LeaseLock
// Interface offers a common interface for locking on arbitrary
// resources used in leader election. The Interface is used
// to hide the details on specific implementations in order to allow
// them to change over time. This interface is strictly for use
// by the leaderelection code.
type Interface interface {// Get returns the LeaderElectionRecordGet(ctx context.Context) (*LeaderElectionRecord, []byte, error)// Create attempts to create a LeaderElectionRecordCreate(ctx context.Context, ler LeaderElectionRecord) error// Update will update and existing LeaderElectionRecordUpdate(ctx context.Context, ler LeaderElectionRecord) error.....
}// Get returns the election record from a Lease spec
func (ll *LeaseLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte, error) {lease, err := ll.Client.Leases(ll.LeaseMeta.Namespace).Get(ctx, ll.LeaseMeta.Name, metav1.GetOptions{})if err != nil {return nil, nil, err}ll.lease = leaserecord := LeaseSpecToLeaderElectionRecord(&ll.lease.Spec)recordByte, err := json.Marshal(*record)if err != nil {return nil, nil, err}return record, recordByte, nil
}// Create attempts to create a Lease
func (ll *LeaseLock) Create(ctx context.Context, ler LeaderElectionRecord) error {var err errorll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Create(ctx, &coordinationv1.Lease{ObjectMeta: metav1.ObjectMeta{Name: ll.LeaseMeta.Name,Namespace: ll.LeaseMeta.Namespace,},Spec: LeaderElectionRecordToLeaseSpec(&ler),}, metav1.CreateOptions{})return err
}
怎么判斷當前leader是持有租約的呢?
func (le *LeaderElector) isLeaseValid(now time.Time) bool {return le.observedTime.Add(time.Second * time.Duration(le.getObservedRecord().LeaseDurationSeconds)).After(now)
}
其實就是判斷上次觀察到的時間與當前之間的差是否在 LeaseDurationSeconds 的范圍內,在
范圍內就代表是有效的
那這個選主的主到底是怎么判斷的呢?
我們查看下面的判斷邏輯
// IsLeader returns true if the last observed leader was this client else returns false.
func (le *LeaderElector) IsLeader() bool {return le.getObservedRecord().HolderIdentity == le.config.Lock.Identity()
}
其實就是拿當前的 ElectionRecord 和每個實例啟動時的配置文件里面的 Identity 來進行比較
判斷是否一致即可
而這個 ObservedRecord 的信息是從 k8s 里面進行獲取的,這就保證了唯一性
Identity 是每個實例啟動的唯一標識,這個字段千萬不能重復,否則選舉一定失敗,報錯如下
E1103 15:44:21.019391 3024650 leaderelection.go:429] Failed to update lock optimitically: Operation cannot be fulfilled on leases.coordination.k8s.io "hello-world": the object has been modified; please apply your changes to the latest version and try again, falling back to slow path