This repository has been archived by the owner on Aug 31, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 355
/
main.go
113 lines (97 loc) · 2.54 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
package main
import (
"context"
"flag"
"github.com/opsgenie/kubernetes-event-exporter/pkg/exporter"
"github.com/opsgenie/kubernetes-event-exporter/pkg/kube"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"gopkg.in/yaml.v2"
"io/ioutil"
"os"
"os/signal"
"syscall"
"time"
)
var (
conf = flag.String("conf", "config.yaml", "The config path file")
)
func main() {
flag.Parse()
b, err := ioutil.ReadFile(*conf)
if err != nil {
log.Fatal().Err(err).Msg("cannot read config file")
}
b = []byte(os.ExpandEnv(string(b)))
var cfg exporter.Config
err = yaml.Unmarshal(b, &cfg)
if err != nil {
log.Fatal().Err(err).Msg("cannot parse config to YAML")
}
log.Logger = log.With().Caller().Logger().Level(zerolog.DebugLevel)
if cfg.LogLevel != "" {
level, err := zerolog.ParseLevel(cfg.LogLevel)
if err != nil {
log.Fatal().Err(err).Str("level", cfg.LogLevel).Msg("Invalid log level")
}
log.Logger = log.Logger.Level(level)
}
if cfg.LogFormat == "json" {
// Defaults to JSON already nothing to do
} else if cfg.LogFormat == "" || cfg.LogFormat == "pretty" {
log.Logger = log.Logger.Output(zerolog.ConsoleWriter{
Out: os.Stdout,
NoColor: false,
TimeFormat: time.RFC3339,
})
} else {
log.Fatal().Str("log_format", cfg.LogFormat).Msg("Unknown log format")
}
if cfg.ThrottlePeriod == 0 {
cfg.ThrottlePeriod = 5
}
kubeconfig, err := kube.GetKubernetesConfig()
if err != nil {
log.Fatal().Err(err).Msg("cannot get kubeconfig")
}
engine := exporter.NewEngine(&cfg, &exporter.ChannelBasedReceiverRegistry{})
w := kube.NewEventWatcher(kubeconfig, cfg.Namespace, cfg.ThrottlePeriod, engine.OnEvent)
ctx, cancel := context.WithCancel(context.Background())
leaderLost := make(chan bool)
if cfg.LeaderElection.Enabled {
l, err := kube.NewLeaderElector(cfg.LeaderElection.LeaderElectionID, kubeconfig,
func(_ context.Context) {
log.Info().Msg("leader election got")
w.Start()
},
func() {
log.Error().Msg("leader election lost")
leaderLost <- true
},
)
if err != nil {
log.Fatal().Err(err).Msg("create leaderelector failed")
}
go l.Run(ctx)
} else {
w.Start()
}
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)
gracefulExit := func() {
defer close(c)
defer close(leaderLost)
cancel()
w.Stop()
engine.Stop()
log.Info().Msg("Exiting")
}
select {
case sig := <-c:
log.Info().Str("signal", sig.String()).Msg("Received signal to exit")
gracefulExit()
case <-leaderLost:
log.Warn().Msg("Leader election lost")
gracefulExit()
}
}