import (
"fmt"
"os"
"time"
"k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
)
func main() {
config, err := rest.InClusterConfig()
if err != nil {
fmt.Println("Failed to get kubernetes config", err)
os.Exit(1)
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
fmt.Println("Failed to create kubernetes client", err)
os.Exit(1)
}
namespace := "default"
name := "leader-election-example"
lockID := rand.New(rand.UUIDEncoder()).KeyEncoded()
rl, err := resourcelock.New(resourcelock.ConfigMapsResourceLock,
namespace,
name,
clientset.CoreV1(),
resourcelock.ResourceLockConfig{
Identity: lockID,
})
le, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{
Lock: rl,
LeaseDuration: 6 * time.Second,
RenewDeadline: 5 * time.Second,
RetryPeriod: 3 * time.Second,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(stop <-chan struct{}) {
for {
select {
case <-stop:
return
default:
fmt.Println(time.Now())
<-time.After(10 * time.Second)
}
}
},
OnStoppedLeading: func() {
fmt.Println("Stop leading")
},
},
})
if err != nil {
fmt.Println(err)
os.Exit(1)
}
for {
le.Run()
}
}