Adapted logging which defaults to JSON encoding
This commit is contained in:
parent
feedfad99f
commit
0194429c80
5 changed files with 145 additions and 79 deletions
|
@ -4,6 +4,7 @@ Changes adhere to [semantic versioning](https://semver.org).
|
|||
|
||||
## [1.1.0] - UNRELEASED
|
||||
|
||||
* Adapted logging which defaults to JSON encoding
|
||||
* ...
|
||||
|
||||
## [1.0.3] - 2024/01/21
|
||||
|
|
98
README.md
98
README.md
|
@ -127,50 +127,51 @@ via web interface or API.
|
|||
|
||||
The following environment variables can be used to modify application behavior.
|
||||
|
||||
| Variable | Purpose | Default/Description |
|
||||
|:-----------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `TZ` | The time zone (**recommended** to set it properly, background tasks depend on it) | Defaults to `Europe/Berlin`, can be any time zone according to _tz database_ |
|
||||
| `ADMIN_USER` | Admin user name for login | Not set by default, you need to explicitly set it to user name |
|
||||
| `ADMIN_PASSWORD` | Admin password for login | Not set by default, you need to explicitly set it to a secure random |
|
||||
| | | |
|
||||
| `DB_TYPE` | The database type (Postgres is **recommended**) | Defaults to `sqlite`, possible values are `sqlite` or `postgres` |
|
||||
| `DB_SQLITE_FILE` | Path to the SQLITE file | Defaults to `<XDG_DATA_DIR>/upda/upda.db`, e.g. `~/.local/share/upda/upda.db` |
|
||||
| `DB_POSTGRES_HOST` | The postgres host | Postgres host address, defaults to `localhost` |
|
||||
| `DB_POSTGRES_PORT` | The postgres port | Postgres port, defaults to `5432` |
|
||||
| `DB_POSTGRES_NAME` | The postgres database name | Postgres database name, needs to be set |
|
||||
| `DB_POSTGRES_TZ` | The postgres time zone | Postgres time zone settings, defaults to `Europe/Berlin` |
|
||||
| `DB_POSTGRES_USER` | The postgres user | Postgres user name, needs to be set |
|
||||
| `DB_POSTGRES_PASSWORD` | The postgres password | Postgres user password, needs to be set |
|
||||
| | | |
|
||||
| `SERVER_PORT` | Port | Defaults to `8080` |
|
||||
| `SERVER_LISTEN` | Server's listen address | Defaults to empty which equals `0.0.0.0` |
|
||||
| `SERVER_TLS_ENABLED` | If server uses TLS | Defaults `false` |
|
||||
| `SERVER_TLS_CERT_PATH` | When TLS enabled, provide the certificate path | |
|
||||
| `SERVER_TLS_KEY_PATH` | When TLS enabled, provide the key path | |
|
||||
| `SERVER_TIMEOUT` | Timeout the server waits before shutting down to end any pending tasks | Defaults to `1s` (1 second), qualifier can be `s = second`, `m = minute`, `h = hour` prefixed with a positive number |
|
||||
| `CORS_ALLOW_ORIGIN` | CORS configuration | Defaults to `*` |
|
||||
| `CORS_ALLOW_METHODS` | CORS configuration | Defaults to `GET, POST, PUT, PATCH, DELETE, OPTIONS` |
|
||||
| `CORS_ALLOW_HEADERS` | CORS configuration | Defaults to `Authorization, Content-Type` |
|
||||
| | | |
|
||||
| `LOGGING_LEVEL` | Logging level. Possible are `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal` | Defaults to `info` |
|
||||
| | | |
|
||||
| `WEBHOOKS_TOKEN_LENGTH` | The length of the token | Defaults to `16`, positive number |
|
||||
| | | |
|
||||
| `TASK_UPDATE_CLEAN_STALE_ENABLED` | If background task should run to do housekeeping of stale (ignored/approved) updates from the database | Defaults to `false` |
|
||||
| `TASK_UPDATE_CLEAN_STALE_INTERVAL` | Interval at which a background task does housekeeping by deleting stale (ignored/approved) updates from the database | Defaults to `1h` (1 hour), qualifier can be `s = second`, `m = minute`, `h = hour` prefixed with a positive number |
|
||||
| `TASK_UPDATE_CLEAN_STALE_MAX_AGE` | Number defining at which age stale (ignored/approved) updates are deleted by the background task (_updatedAt_ attribute decides) | Defaults to `168h` (168 hours = 1 week), qualifier can be `s = second`, `m = minute`, `h = hour` prefixed with a positive number |
|
||||
| `TASK_EVENT_CLEAN_STALE_ENABLED` | If background task should run to do housekeeping of stale (old) events from the database | Defaults to `false` |
|
||||
| `TASK_EVENT_CLEAN_STALE_INTERVAL` | Interval at which a background task does housekeeping by deleting stale (old) events from the database | Defaults to `8h` (8 hours), qualifier can be `s = second`, `m = minute`, `h = hour` prefixed with a positive number |
|
||||
| `TASK_EVENT_CLEAN_STALE_MAX_AGE` | Number defining at which age stale (old) events are deleted by the background task (_updatedAt_ attribute decides) | Defaults to `2190h` (2190 hours = 3 months), qualifier can be `s = second`, `m = minute`, `h = hour` prefixed with a positive number |
|
||||
| `TASK_PROMETHEUS_REFRESH_INTERVAL` | Interval at which a background task updates custom metrics | Defaults to `60s` (60 seconds), qualifier can be `s = second`, `m = minute`, `h = hour` prefixed with a positive number |
|
||||
| | | |
|
||||
| `LOCK_REDIS_ENABLED` | If locking via REDIS (multiple instances) is enabled. Requires REDIS. Otherwise uses in-memory locks. | Defaults to `false` |
|
||||
| `LOCK_REDIS_URL` | If locking via REDIS is enabled, this should point to a resolvable REDIS instance, e.g. `redis://<user>:<pass>@localhost:6379/<db>`. | |
|
||||
| | | |
|
||||
| `PROMETHEUS_ENABLED` | If Prometheus metrics are exposed | Defaults to `false` |
|
||||
| `PROMETHEUS_METRICS_PATH` | Defines the metrics endpoint path | Defaults to `/metrics` |
|
||||
| `PROMETHEUS_SECURE_TOKEN_ENABLED` | If Prometheus metrics endpoint is protected by a token when enabled (**recommended**) | Defaults to `true` |
|
||||
| `PROMETHEUS_SECURE_TOKEN` | The token securing the metrics endpoint when enabled (**recommended**) | Not set by default, you need to explicitly set it to a secure random |
|
||||
| Variable | Purpose | Default/Description |
|
||||
|:-----------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `TZ` | The time zone (**recommended** to set it properly, background tasks depend on it) | Defaults to `Europe/Berlin`, can be any time zone according to _tz database_ |
|
||||
| `ADMIN_USER` | Admin user name for login | Not set by default, you need to explicitly set it to user name |
|
||||
| `ADMIN_PASSWORD` | Admin password for login | Not set by default, you need to explicitly set it to a secure random |
|
||||
| | | |
|
||||
| `DB_TYPE` | The database type (Postgres is **recommended**) | Defaults to `sqlite`, possible values are `sqlite` or `postgres` |
|
||||
| `DB_SQLITE_FILE` | Path to the SQLITE file | Defaults to `<XDG_DATA_DIR>/upda/upda.db`, e.g. `~/.local/share/upda/upda.db` |
|
||||
| `DB_POSTGRES_HOST` | The postgres host | Postgres host address, defaults to `localhost` |
|
||||
| `DB_POSTGRES_PORT` | The postgres port | Postgres port, defaults to `5432` |
|
||||
| `DB_POSTGRES_NAME` | The postgres database name | Postgres database name, needs to be set |
|
||||
| `DB_POSTGRES_TZ` | The postgres time zone | Postgres time zone settings, defaults to `Europe/Berlin` |
|
||||
| `DB_POSTGRES_USER` | The postgres user | Postgres user name, needs to be set |
|
||||
| `DB_POSTGRES_PASSWORD` | The postgres password | Postgres user password, needs to be set |
|
||||
| | | |
|
||||
| `SERVER_PORT` | Port | Defaults to `8080` |
|
||||
| `SERVER_LISTEN` | Server's listen address | Defaults to empty which equals `0.0.0.0` |
|
||||
| `SERVER_TLS_ENABLED` | If server uses TLS | Defaults `false` |
|
||||
| `SERVER_TLS_CERT_PATH` | When TLS enabled, provide the certificate path | |
|
||||
| `SERVER_TLS_KEY_PATH` | When TLS enabled, provide the key path | |
|
||||
| `SERVER_TIMEOUT` | Timeout the server waits before shutting down to end any pending tasks | Defaults to `1s` (1 second), qualifier can be `s = second`, `m = minute`, `h = hour` prefixed with a positive number |
|
||||
| `CORS_ALLOW_ORIGIN` | CORS configuration | Defaults to `*` |
|
||||
| `CORS_ALLOW_METHODS` | CORS configuration | Defaults to `GET, POST, PUT, PATCH, DELETE, OPTIONS` |
|
||||
| `CORS_ALLOW_HEADERS` | CORS configuration | Defaults to `Authorization, Content-Type` |
|
||||
| | | |
|
||||
| `LOGGING_LEVEL` | Logging level. Possible are `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. Setting to `debug` enables high verbosity output. | Defaults to `info` |
|
||||
| `LOGGING_ENCODING` | Logging encoding. Possible are `console` and `json` | Defaults to `json` |
|
||||
| | | |
|
||||
| `WEBHOOKS_TOKEN_LENGTH` | The length of the token | Defaults to `16`, positive number |
|
||||
| | | |
|
||||
| `TASK_UPDATE_CLEAN_STALE_ENABLED` | If background task should run to do housekeeping of stale (ignored/approved) updates from the database | Defaults to `false` |
|
||||
| `TASK_UPDATE_CLEAN_STALE_INTERVAL` | Interval at which a background task does housekeeping by deleting stale (ignored/approved) updates from the database | Defaults to `1h` (1 hour), qualifier can be `s = second`, `m = minute`, `h = hour` prefixed with a positive number |
|
||||
| `TASK_UPDATE_CLEAN_STALE_MAX_AGE` | Number defining at which age stale (ignored/approved) updates are deleted by the background task (_updatedAt_ attribute decides) | Defaults to `168h` (168 hours = 1 week), qualifier can be `s = second`, `m = minute`, `h = hour` prefixed with a positive number |
|
||||
| `TASK_EVENT_CLEAN_STALE_ENABLED` | If background task should run to do housekeeping of stale (old) events from the database | Defaults to `false` |
|
||||
| `TASK_EVENT_CLEAN_STALE_INTERVAL` | Interval at which a background task does housekeeping by deleting stale (old) events from the database | Defaults to `8h` (8 hours), qualifier can be `s = second`, `m = minute`, `h = hour` prefixed with a positive number |
|
||||
| `TASK_EVENT_CLEAN_STALE_MAX_AGE` | Number defining at which age stale (old) events are deleted by the background task (_updatedAt_ attribute decides) | Defaults to `2190h` (2190 hours = 3 months), qualifier can be `s = second`, `m = minute`, `h = hour` prefixed with a positive number |
|
||||
| `TASK_PROMETHEUS_REFRESH_INTERVAL` | Interval at which a background task updates custom metrics | Defaults to `60s` (60 seconds), qualifier can be `s = second`, `m = minute`, `h = hour` prefixed with a positive number |
|
||||
| | | |
|
||||
| `LOCK_REDIS_ENABLED` | If locking via REDIS (multiple instances) is enabled. Requires REDIS. Otherwise uses in-memory locks. | Defaults to `false` |
|
||||
| `LOCK_REDIS_URL` | If locking via REDIS is enabled, this should point to a resolvable REDIS instance, e.g. `redis://<user>:<pass>@localhost:6379/<db>`. | |
|
||||
| | | |
|
||||
| `PROMETHEUS_ENABLED` | If Prometheus metrics are exposed | Defaults to `false` |
|
||||
| `PROMETHEUS_METRICS_PATH` | Defines the metrics endpoint path | Defaults to `/metrics` |
|
||||
| `PROMETHEUS_SECURE_TOKEN_ENABLED` | If Prometheus metrics endpoint is protected by a token when enabled (**recommended**) | Defaults to `true` |
|
||||
| `PROMETHEUS_SECURE_TOKEN` | The token securing the metrics endpoint when enabled (**recommended**) | Not set by default, you need to explicitly set it to a secure random |
|
||||
|
||||
## 3rd party integrations
|
||||
|
||||
|
@ -281,7 +282,14 @@ docker build --rm --no-cache -t upda:latest .
|
|||
|
||||
## Development & contribution
|
||||
|
||||
* Ensure to set `LOGGING_LEVEL=debug` for proper debug logs during development.
|
||||
* Ensure to set the following environment variables for proper debug logs during development
|
||||
|
||||
```shell
|
||||
DEVELOPMENT=true
|
||||
LOGGING_ENCODING=console
|
||||
LOGGING_LEVEL=debug
|
||||
```
|
||||
|
||||
* Code guidelines
|
||||
* Each entity has its own repository
|
||||
* Each entity is only used in repository and service (otherwise, mapping happens)
|
||||
|
|
|
@ -23,8 +23,8 @@ func Start() {
|
|||
// secure init
|
||||
util.AssertAvailablePRNG()
|
||||
|
||||
// set gin mode derived from logging level
|
||||
if zap.L().Level() == zap.DebugLevel {
|
||||
// set gin mode derived
|
||||
if env.appConfig.isDevelopment {
|
||||
gin.SetMode(gin.DebugMode)
|
||||
} else {
|
||||
gin.SetMode(gin.ReleaseMode)
|
||||
|
|
|
@ -1,14 +1,19 @@
|
|||
package server
|
||||
|
||||
const (
|
||||
envDevelopment = "DEVELOPMENT"
|
||||
|
||||
envLoggingLevel = "LOGGING_LEVEL"
|
||||
loggingLevelDefault = "info"
|
||||
envLoggingEncoding = "LOGGING_ENCODING"
|
||||
loggingEncodingDefault = "json"
|
||||
|
||||
envTZ = "TZ"
|
||||
tzDefault = "Europe/Berlin"
|
||||
|
||||
envAdminUser = "ADMIN_USER"
|
||||
envAdminPassword = "ADMIN_PASSWORD"
|
||||
|
||||
envLoggingLevel = "LOGGING_LEVEL"
|
||||
|
||||
envServerPort = "SERVER_PORT"
|
||||
envServerListen = "SERVER_LISTEN"
|
||||
envServerTlsEnabled = "SERVER_TLS_ENABLED"
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/adrg/xdg"
|
||||
"go.uber.org/zap"
|
||||
|
@ -8,6 +9,7 @@ import (
|
|||
"gorm.io/driver/postgres"
|
||||
"gorm.io/driver/sqlite"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/logger"
|
||||
"log"
|
||||
"moul.io/zapgorm2"
|
||||
"os"
|
||||
|
@ -16,7 +18,9 @@ import (
|
|||
)
|
||||
|
||||
type appConfig struct {
|
||||
timeZone string
|
||||
timeZone string
|
||||
isDevelopment bool
|
||||
isDebug bool
|
||||
}
|
||||
|
||||
type serverConfig struct {
|
||||
|
@ -74,34 +78,77 @@ type Environment struct {
|
|||
}
|
||||
|
||||
func bootstrapEnvironment() *Environment {
|
||||
// logging (configured independently)
|
||||
var logger *zap.Logger
|
||||
var err error
|
||||
level := zap.NewAtomicLevelAt(zapcore.InfoLevel)
|
||||
|
||||
envLoggingLevel := os.Getenv(envLoggingLevel)
|
||||
if envLoggingLevel != "" {
|
||||
if level, err = zap.ParseAtomicLevel(envLoggingLevel); err != nil {
|
||||
log.Fatalf("Cannot parse logging level: %v", err)
|
||||
// bootstrap logging (configured independently and required before any other action)
|
||||
loggingLevel := os.Getenv(envLoggingLevel)
|
||||
if loggingLevel == "" {
|
||||
if err = os.Setenv(envLoggingLevel, loggingLevelDefault); err != nil {
|
||||
log.Fatalf("Cannot set logging level: %v", err)
|
||||
}
|
||||
loggingLevel = os.Getenv(envLoggingLevel)
|
||||
}
|
||||
var level zap.AtomicLevel
|
||||
if level, err = zap.ParseAtomicLevel(loggingLevel); err != nil {
|
||||
log.Fatalf("Cannot parse logging level: %v", err)
|
||||
}
|
||||
loggingEncoding := os.Getenv(envLoggingEncoding)
|
||||
if loggingEncoding == "" {
|
||||
if err = os.Setenv(envLoggingEncoding, loggingEncodingDefault); err != nil {
|
||||
log.Fatalf("Cannot set logging encoding: %v", err)
|
||||
}
|
||||
loggingEncoding = os.Getenv(envLoggingEncoding)
|
||||
}
|
||||
if loggingEncoding != "json" && loggingEncoding != "console" {
|
||||
log.Fatalf("Cannot parse logging level: %v", errors.New("only 'json' and 'console' are allowed logging encodings"))
|
||||
}
|
||||
isDebug := level.Level() == zap.DebugLevel
|
||||
isDevelopment := os.Getenv(envDevelopment) == "true"
|
||||
var loggingEncoderConfig zapcore.EncoderConfig
|
||||
if loggingEncoding == "json" {
|
||||
loggingEncoderConfig = zap.NewProductionEncoderConfig()
|
||||
} else {
|
||||
loggingEncoderConfig = zap.NewDevelopmentEncoderConfig()
|
||||
}
|
||||
|
||||
var zapConfig *zap.Config
|
||||
if isDebug {
|
||||
zapConfig = &zap.Config{
|
||||
Level: level,
|
||||
Development: isDevelopment,
|
||||
Encoding: loggingEncoding,
|
||||
EncoderConfig: loggingEncoderConfig,
|
||||
OutputPaths: []string{"stderr"},
|
||||
ErrorOutputPaths: []string{"stderr"},
|
||||
}
|
||||
} else {
|
||||
zapConfig = &zap.Config{
|
||||
Level: level,
|
||||
Development: isDevelopment,
|
||||
Sampling: &zap.SamplingConfig{
|
||||
Initial: 100,
|
||||
Thereafter: 100,
|
||||
},
|
||||
Encoding: loggingEncoding,
|
||||
EncoderConfig: loggingEncoderConfig,
|
||||
OutputPaths: []string{"stderr"},
|
||||
ErrorOutputPaths: []string{"stderr"},
|
||||
}
|
||||
}
|
||||
|
||||
logger, err = zap.NewDevelopment(zap.IncreaseLevel(level))
|
||||
if err != nil {
|
||||
log.Fatalf("Can't initialize logger: %v", err)
|
||||
}
|
||||
// flushes buffer, if any
|
||||
defer logger.Sync()
|
||||
|
||||
zap.ReplaceGlobals(logger)
|
||||
zapLogger := zap.Must(zapConfig.Build())
|
||||
defer zapLogger.Sync()
|
||||
zap.ReplaceGlobals(zapLogger)
|
||||
|
||||
// assign defaults from given environment variables and validate
|
||||
bootstrapFromEnvironmentAndValidate()
|
||||
|
||||
// parse environment variables in actual configuration structs
|
||||
// app config
|
||||
appConfig := &appConfig{
|
||||
timeZone: os.Getenv(envTZ),
|
||||
ac := &appConfig{
|
||||
timeZone: os.Getenv(envTZ),
|
||||
isDebug: isDebug,
|
||||
isDevelopment: isDevelopment,
|
||||
}
|
||||
|
||||
// server config
|
||||
|
@ -137,7 +184,7 @@ func bootstrapEnvironment() *Environment {
|
|||
corsAllowHeaders: []string{os.Getenv(envCorsAllowHeaders)},
|
||||
}
|
||||
|
||||
authConfig := &authConfig{
|
||||
authC := &authConfig{
|
||||
adminUser: os.Getenv(envAdminUser),
|
||||
adminPassword: os.Getenv(envAdminPassword),
|
||||
}
|
||||
|
@ -179,24 +226,29 @@ func bootstrapEnvironment() *Environment {
|
|||
zap.L().Sugar().Fatalln("Invalid webhook token length. Reason: must be a positive number")
|
||||
}
|
||||
|
||||
webhookConfig := &webhookConfig{
|
||||
wc := &webhookConfig{
|
||||
tokenLength: webhookTokenLength,
|
||||
}
|
||||
|
||||
prometheusConfig := &prometheusConfig{
|
||||
pc := &prometheusConfig{
|
||||
enabled: os.Getenv(envPrometheusEnabled) == "true",
|
||||
path: os.Getenv(envPrometheusMetricsPath),
|
||||
secureTokenEnabled: os.Getenv(envPrometheusSecureTokenEnabled) == "true",
|
||||
secureToken: os.Getenv(envPrometheusSecureToken),
|
||||
}
|
||||
|
||||
if prometheusConfig.enabled && prometheusConfig.secureTokenEnabled {
|
||||
if pc.enabled && pc.secureTokenEnabled {
|
||||
failIfEnvKeyNotPresent(envPrometheusSecureToken)
|
||||
}
|
||||
|
||||
// database setup
|
||||
gormLogger := zapgorm2.New(logger)
|
||||
gormLogger.SetAsDefault()
|
||||
gormConfig := &gorm.Config{Logger: logger.Default.LogMode(logger.Silent)}
|
||||
if isDebug && isDevelopment {
|
||||
gormZapLogger := zap.Must(zapConfig.Build())
|
||||
defer gormZapLogger.Sync()
|
||||
gormLogger := zapgorm2.New(gormZapLogger)
|
||||
gormConfig = &gorm.Config{Logger: gormLogger}
|
||||
}
|
||||
|
||||
var db *gorm.DB
|
||||
zap.L().Sugar().Infof("Using database type '%s'", os.Getenv(envDbType))
|
||||
|
@ -213,7 +265,7 @@ func bootstrapEnvironment() *Environment {
|
|||
dbFile := os.Getenv(envDbSqliteFile)
|
||||
zap.L().Sugar().Infof("Using database file '%s'", dbFile)
|
||||
|
||||
if db, err = gorm.Open(sqlite.Open(dbFile), &gorm.Config{Logger: gormLogger}); err != nil {
|
||||
if db, err = gorm.Open(sqlite.Open(dbFile), gormConfig); err != nil {
|
||||
zap.L().Sugar().Fatalf("Could not setup database: %v", err)
|
||||
}
|
||||
|
||||
|
@ -237,7 +289,7 @@ func bootstrapEnvironment() *Environment {
|
|||
}
|
||||
|
||||
dsn := fmt.Sprintf("host=%v user=%v password=%v dbname=%v port=%v sslmode=disable TimeZone=%v", host, dbUser, dbPass, dbName, port, dbTZ)
|
||||
if db, err = gorm.Open(postgres.Open(dsn), &gorm.Config{Logger: gormLogger}); err != nil {
|
||||
if db, err = gorm.Open(postgres.Open(dsn), gormConfig); err != nil {
|
||||
zap.L().Sugar().Fatalf("Could not setup database: %v", err)
|
||||
}
|
||||
} else {
|
||||
|
@ -248,13 +300,13 @@ func bootstrapEnvironment() *Environment {
|
|||
zap.L().Sugar().Fatalf("Could not setup database")
|
||||
}
|
||||
|
||||
env := &Environment{appConfig: appConfig,
|
||||
authConfig: authConfig,
|
||||
env := &Environment{appConfig: ac,
|
||||
authConfig: authC,
|
||||
serverConfig: sc,
|
||||
taskConfig: tc,
|
||||
lockConfig: lc,
|
||||
webhookConfig: webhookConfig,
|
||||
prometheusConfig: prometheusConfig,
|
||||
webhookConfig: wc,
|
||||
prometheusConfig: pc,
|
||||
db: db}
|
||||
|
||||
if err = env.db.AutoMigrate(&Update{}, &Webhook{}, &Event{}); err != nil {
|
||||
|
|
Loading…
Reference in a new issue