Add file logging via LOGGING_DIRECTORY and add an example systemd service for native deployments
All checks were successful
/ build (push) Successful in 3m25s

This commit is contained in:
Varakh 2024-01-25 19:57:41 +01:00
parent ceee8ec6b4
commit d71e56db7c
6 changed files with 145 additions and 54 deletions

View file

@ -127,51 +127,52 @@ via web interface or API.
The following environment variables can be used to modify application behavior. The following environment variables can be used to modify application behavior.
| Variable | Purpose | Default/Description | | Variable | Purpose | Default/Description |
|:-----------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------| |:-----------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------|
| `TZ` | The time zone (**recommended** to set it properly, background tasks depend on it) | Defaults to `Europe/Berlin`, can be any time zone according to _tz database_ | | `TZ` | The time zone (**recommended** to set it properly, background tasks depend on it) | Defaults to `Europe/Berlin`, can be any time zone according to _tz database_ |
| `ADMIN_USER` | Admin user name for login | Not set by default, you need to explicitly set it to user name | | `ADMIN_USER` | Admin user name for login | Not set by default, you need to explicitly set it to user name |
| `ADMIN_PASSWORD` | Admin password for login | Not set by default, you need to explicitly set it to a secure random | | `ADMIN_PASSWORD` | Admin password for login | Not set by default, you need to explicitly set it to a secure random |
| | | | | | | |
| `DB_TYPE` | The database type (Postgres is **recommended**) | Defaults to `sqlite`, possible values are `sqlite` or `postgres` | | `DB_TYPE` | The database type (Postgres is **recommended**) | Defaults to `sqlite`, possible values are `sqlite` or `postgres` |
| `DB_SQLITE_FILE` | Path to the SQLITE file | Defaults to `<XDG_DATA_DIR>/upda/upda.db`, e.g. `~/.local/share/upda/upda.db` | | `DB_SQLITE_FILE` | Path to the SQLITE file | Defaults to `<XDG_DATA_DIR>/upda/upda.db`, e.g. `~/.local/share/upda/upda.db` |
| `DB_POSTGRES_HOST` | The postgres host | Postgres host address, defaults to `localhost` | | `DB_POSTGRES_HOST` | The postgres host | Postgres host address, defaults to `localhost` |
| `DB_POSTGRES_PORT` | The postgres port | Postgres port, defaults to `5432` | | `DB_POSTGRES_PORT` | The postgres port | Postgres port, defaults to `5432` |
| `DB_POSTGRES_NAME` | The postgres database name | Postgres database name, needs to be set | | `DB_POSTGRES_NAME` | The postgres database name | Postgres database name, needs to be set |
| `DB_POSTGRES_TZ` | The postgres time zone | Postgres time zone settings, defaults to `Europe/Berlin` | | `DB_POSTGRES_TZ` | The postgres time zone | Postgres time zone settings, defaults to `Europe/Berlin` |
| `DB_POSTGRES_USER` | The postgres user | Postgres user name, needs to be set | | `DB_POSTGRES_USER` | The postgres user | Postgres user name, needs to be set |
| `DB_POSTGRES_PASSWORD` | The postgres password | Postgres user password, needs to be set | | `DB_POSTGRES_PASSWORD` | The postgres password | Postgres user password, needs to be set |
| | | | | | | |
| `SERVER_PORT` | Port | Defaults to `8080` | | `SERVER_PORT` | Port | Defaults to `8080` |
| `SERVER_LISTEN` | Server's listen address | Defaults to empty which equals `0.0.0.0` | | `SERVER_LISTEN` | Server's listen address | Defaults to empty which equals `0.0.0.0` |
| `SERVER_TLS_ENABLED` | If server uses TLS | Defaults `false` | | `SERVER_TLS_ENABLED` | If server uses TLS | Defaults `false` |
| `SERVER_TLS_CERT_PATH` | When TLS enabled, provide the certificate path | | | `SERVER_TLS_CERT_PATH` | When TLS enabled, provide the certificate path | |
| `SERVER_TLS_KEY_PATH` | When TLS enabled, provide the key path | | | `SERVER_TLS_KEY_PATH` | When TLS enabled, provide the key path | |
| `SERVER_TIMEOUT` | Timeout the server waits before shutting down to end any pending tasks | Defaults to `1s` (1 second), qualifier can be `s = second`, `m = minute`, `h = hour` prefixed with a positive number | | `SERVER_TIMEOUT` | Timeout the server waits before shutting down to end any pending tasks | Defaults to `1s` (1 second), qualifier can be `s = second`, `m = minute`, `h = hour` prefixed with a positive number |
| `CORS_ALLOW_ORIGIN` | CORS configuration | Defaults to `*` | | `CORS_ALLOW_ORIGIN` | CORS configuration | Defaults to `*` |
| `CORS_ALLOW_METHODS` | CORS configuration | Defaults to `GET, POST, PUT, PATCH, DELETE, OPTIONS` | | `CORS_ALLOW_METHODS` | CORS configuration | Defaults to `GET, POST, PUT, PATCH, DELETE, OPTIONS` |
| `CORS_ALLOW_HEADERS` | CORS configuration | Defaults to `Authorization, Content-Type` | | `CORS_ALLOW_HEADERS` | CORS configuration | Defaults to `Authorization, Content-Type` |
| | | | | | | |
| `LOGGING_LEVEL` | Logging level. Possible are `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. Setting to `debug` enables high verbosity output. | Defaults to `info` | | `LOGGING_LEVEL` | Logging level. Possible are `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. Setting to `debug` enables high verbosity output. | Defaults to `info` |
| `LOGGING_ENCODING` | Logging encoding. Possible are `console` and `json` | Defaults to `json` | | `LOGGING_ENCODING` | Logging encoding. Possible are `console` and `json` | Defaults to `json` |
| | | | | `LOGGING_DIRECTORY` | Logging directory. When set, logs will be added to a file called `upda.log` in addition to the standard output. Ensure that upda has access permissions. Use an external program for log rotation if desired. | |
| `WEBHOOKS_TOKEN_LENGTH` | The length of the token | Defaults to `16`, positive number | | | | |
| | | | | `WEBHOOKS_TOKEN_LENGTH` | The length of the token | Defaults to `16`, positive number |
| `TASK_UPDATE_CLEAN_STALE_ENABLED` | If background task should run to do housekeeping of stale (ignored/approved) updates from the database | Defaults to `false` | | | | |
| `TASK_UPDATE_CLEAN_STALE_INTERVAL` | Interval at which a background task does housekeeping by deleting stale (ignored/approved) updates from the database | Defaults to `1h` (1 hour), qualifier can be `s = second`, `m = minute`, `h = hour` prefixed with a positive number | | `TASK_UPDATE_CLEAN_STALE_ENABLED` | If background task should run to do housekeeping of stale (ignored/approved) updates from the database | Defaults to `false` |
| `TASK_UPDATE_CLEAN_STALE_MAX_AGE` | Number defining at which age stale (ignored/approved) updates are deleted by the background task (_updatedAt_ attribute decides) | Defaults to `168h` (168 hours = 1 week), qualifier can be `s = second`, `m = minute`, `h = hour` prefixed with a positive number | | `TASK_UPDATE_CLEAN_STALE_INTERVAL` | Interval at which a background task does housekeeping by deleting stale (ignored/approved) updates from the database | Defaults to `1h` (1 hour), qualifier can be `s = second`, `m = minute`, `h = hour` prefixed with a positive number |
| `TASK_EVENT_CLEAN_STALE_ENABLED` | If background task should run to do housekeeping of stale (old) events from the database | Defaults to `false` | | `TASK_UPDATE_CLEAN_STALE_MAX_AGE` | Number defining at which age stale (ignored/approved) updates are deleted by the background task (_updatedAt_ attribute decides) | Defaults to `168h` (168 hours = 1 week), qualifier can be `s = second`, `m = minute`, `h = hour` prefixed with a positive number |
| `TASK_EVENT_CLEAN_STALE_INTERVAL` | Interval at which a background task does housekeeping by deleting stale (old) events from the database | Defaults to `8h` (8 hours), qualifier can be `s = second`, `m = minute`, `h = hour` prefixed with a positive number | | `TASK_EVENT_CLEAN_STALE_ENABLED` | If background task should run to do housekeeping of stale (old) events from the database | Defaults to `false` |
| `TASK_EVENT_CLEAN_STALE_MAX_AGE` | Number defining at which age stale (old) events are deleted by the background task (_updatedAt_ attribute decides) | Defaults to `2190h` (2190 hours = 3 months), qualifier can be `s = second`, `m = minute`, `h = hour` prefixed with a positive number | | `TASK_EVENT_CLEAN_STALE_INTERVAL` | Interval at which a background task does housekeeping by deleting stale (old) events from the database | Defaults to `8h` (8 hours), qualifier can be `s = second`, `m = minute`, `h = hour` prefixed with a positive number |
| `TASK_PROMETHEUS_REFRESH_INTERVAL` | Interval at which a background task updates custom metrics | Defaults to `60s` (60 seconds), qualifier can be `s = second`, `m = minute`, `h = hour` prefixed with a positive number | | `TASK_EVENT_CLEAN_STALE_MAX_AGE` | Number defining at which age stale (old) events are deleted by the background task (_updatedAt_ attribute decides) | Defaults to `2190h` (2190 hours = 3 months), qualifier can be `s = second`, `m = minute`, `h = hour` prefixed with a positive number |
| | | | | `TASK_PROMETHEUS_REFRESH_INTERVAL` | Interval at which a background task updates custom metrics | Defaults to `60s` (60 seconds), qualifier can be `s = second`, `m = minute`, `h = hour` prefixed with a positive number |
| `LOCK_REDIS_ENABLED` | If locking via REDIS (multiple instances) is enabled. Requires REDIS. Otherwise uses in-memory locks. | Defaults to `false` | | | | |
| `LOCK_REDIS_URL` | If locking via REDIS is enabled, this should point to a resolvable REDIS instance, e.g. `redis://<user>:<pass>@localhost:6379/<db>`. | | | `LOCK_REDIS_ENABLED` | If locking via REDIS (multiple instances) is enabled. Requires REDIS. Otherwise uses in-memory locks. | Defaults to `false` |
| | | | | `LOCK_REDIS_URL` | If locking via REDIS is enabled, this should point to a resolvable REDIS instance, e.g. `redis://<user>:<pass>@localhost:6379/<db>`. | |
| `PROMETHEUS_ENABLED` | If Prometheus metrics are exposed | Defaults to `false` | | | | |
| `PROMETHEUS_METRICS_PATH` | Defines the metrics endpoint path | Defaults to `/metrics` | | `PROMETHEUS_ENABLED` | If Prometheus metrics are exposed | Defaults to `false` |
| `PROMETHEUS_SECURE_TOKEN_ENABLED` | If Prometheus metrics endpoint is protected by a token when enabled (**recommended**) | Defaults to `true` | | `PROMETHEUS_METRICS_PATH` | Defines the metrics endpoint path | Defaults to `/metrics` |
| `PROMETHEUS_SECURE_TOKEN` | The token securing the metrics endpoint when enabled (**recommended**) | Not set by default, you need to explicitly set it to a secure random | | `PROMETHEUS_SECURE_TOKEN_ENABLED` | If Prometheus metrics endpoint is protected by a token when enabled (**recommended**) | Defaults to `true` |
| `PROMETHEUS_SECURE_TOKEN` | The token securing the metrics endpoint when enabled (**recommended**) | Not set by default, you need to explicitly set it to a secure random |
## 3rd party integrations ## 3rd party integrations

View file

@ -1,5 +1,13 @@
# Deployment # Deployment
## Native
Download the binary for your operating system. Next, use the binary or execute it locally.
See the provided systemd service example [upda.service](./contrib/upda.service) to deploy on a UNIX/Linux machine.
## Container
Use one of the provided `docker-compose` examples, edit to your needs. Then issue `docker compose up` command. Use one of the provided `docker-compose` examples, edit to your needs. Then issue `docker compose up` command.
All applications should be up and running. All applications should be up and running.
@ -10,7 +18,7 @@ Default image user is `appuser` (`uid=2033`) and group is `appgroup` (`gid=2033`
The following examples are available The following examples are available
## Postgres ### Postgres
```yaml ```yaml
version: '3.9' version: '3.9'
@ -78,7 +86,7 @@ volumes:
external: false external: false
``` ```
## SQLite ### SQLite
```yaml ```yaml
version: '3.9' version: '3.9'
@ -111,14 +119,13 @@ services:
image: git.myservermanager.com/varakh/upda:latest image: git.myservermanager.com/varakh/upda:latest
environment: environment:
- TZ=Europe/Berlin - TZ=Europe/Berlin
- DB_SQLITE_FILE=/data/upda.db
- ADMIN_USER=admin - ADMIN_USER=admin
- ADMIN_PASSWORD=changeit - ADMIN_PASSWORD=changeit
restart: unless-stopped restart: unless-stopped
networks: networks:
- internal - internal
volumes: volumes:
- upda-app-vol:/data - upda-app-vol:/home/appuser
ports: ports:
- "127.0.0.1:8080:8080" - "127.0.0.1:8080:8080"
@ -127,7 +134,7 @@ volumes:
external: false external: false
``` ```
### Reverse proxy ## Reverse proxy
You may want to use a proxy in front of them on your host, e.g., nginx. Here's a configuration snippet which should do You may want to use a proxy in front of them on your host, e.g., nginx. Here's a configuration snippet which should do
the work. the work.

14
_doc/contrib/upda.service Normal file
View file

@ -0,0 +1,14 @@
[Unit]
Description=upda
After=network.target
[Service]
Type=simple
# Using a dynamic user drops privileges and sets some security defaults
# See https://www.freedesktop.org/software/systemd/man/latest/systemd.exec.html
DynamicUser=yes
# All environment variables for upda can be put into this file
# upda picks them up (on each restart)
EnvironmentFile=/etc/upda.conf
# Requires upda' binary to be installed at this location, e.g., via package manager or copying it over manually
ExecStart=/usr/local/bin/upda-server

View file

@ -8,6 +8,9 @@ const (
envLoggingEncoding = "LOGGING_ENCODING" envLoggingEncoding = "LOGGING_ENCODING"
loggingEncodingDefault = "json" loggingEncodingDefault = "json"
envLoggingDirectory = "LOGGING_DIRECTORY"
loggingFileNameDefault = "upda.log"
envTZ = "TZ" envTZ = "TZ"
tzDefault = "Europe/Berlin" tzDefault = "Europe/Berlin"

View file

@ -3,6 +3,7 @@ package server
import ( import (
"errors" "errors"
"fmt" "fmt"
"git.myservermanager.com/varakh/upda/util"
"github.com/adrg/xdg" "github.com/adrg/xdg"
"go.uber.org/zap" "go.uber.org/zap"
"go.uber.org/zap/zapcore" "go.uber.org/zap/zapcore"
@ -13,6 +14,7 @@ import (
"log" "log"
"moul.io/zapgorm2" "moul.io/zapgorm2"
"os" "os"
"path/filepath"
"strconv" "strconv"
"time" "time"
) )
@ -111,6 +113,19 @@ func bootstrapEnvironment() *Environment {
loggingEncoderConfig = zap.NewDevelopmentEncoderConfig() loggingEncoderConfig = zap.NewDevelopmentEncoderConfig()
} }
logPaths := []string{"stderr"}
loggingDirectory := os.Getenv(envLoggingDirectory)
if loggingDirectory != "" {
logFile := filepath.Join(loggingDirectory, loggingFileNameDefault)
if err = util.CreateFileWithParent(logFile); err != nil {
log.Fatalf("Log file '%s' cannot be created: %v", loggingDirectory, err)
}
logPaths = append(logPaths, logFile)
}
var zapConfig *zap.Config var zapConfig *zap.Config
if isDebug { if isDebug {
zapConfig = &zap.Config{ zapConfig = &zap.Config{
@ -118,8 +133,8 @@ func bootstrapEnvironment() *Environment {
Development: isDevelopment, Development: isDevelopment,
Encoding: loggingEncoding, Encoding: loggingEncoding,
EncoderConfig: loggingEncoderConfig, EncoderConfig: loggingEncoderConfig,
OutputPaths: []string{"stderr"}, OutputPaths: logPaths,
ErrorOutputPaths: []string{"stderr"}, ErrorOutputPaths: logPaths,
} }
} else { } else {
zapConfig = &zap.Config{ zapConfig = &zap.Config{
@ -131,8 +146,8 @@ func bootstrapEnvironment() *Environment {
}, },
Encoding: loggingEncoding, Encoding: loggingEncoding,
EncoderConfig: loggingEncoderConfig, EncoderConfig: loggingEncoderConfig,
OutputPaths: []string{"stderr"}, OutputPaths: logPaths,
ErrorOutputPaths: []string{"stderr"}, ErrorOutputPaths: logPaths,
} }
} }
@ -265,6 +280,10 @@ func bootstrapEnvironment() *Environment {
dbFile := os.Getenv(envDbSqliteFile) dbFile := os.Getenv(envDbSqliteFile)
zap.L().Sugar().Infof("Using database file '%s'", dbFile) zap.L().Sugar().Infof("Using database file '%s'", dbFile)
if err = util.CreateFileWithParent(dbFile); err != nil {
zap.L().Sugar().Fatalf("Database file '%s' cannot be created: %v", dbFile, err)
}
if db, err = gorm.Open(sqlite.Open(dbFile), gormConfig); err != nil { if db, err = gorm.Open(sqlite.Open(dbFile), gormConfig); err != nil {
zap.L().Sugar().Fatalf("Could not setup database: %v", err) zap.L().Sugar().Fatalf("Could not setup database: %v", err)
} }

47
util/file.go Normal file
View file

@ -0,0 +1,47 @@
package util
import (
"errors"
"fmt"
"os"
"path/filepath"
)
// CreateFileWithParent creates a file and all prefixed directories first
func CreateFileWithParent(file string) error {
if file == "" {
return errors.New("assert: blank values are not allowed for 'file'")
}
var err error
parentDir := filepath.Dir(file)
if err = os.MkdirAll(parentDir, os.ModePerm); err != nil {
return errors.New(fmt.Sprintf("cannot create parent directory '%v': %v", parentDir, fmt.Errorf("%w", err)))
}
if _, err = os.Stat(file); errors.Is(err, os.ErrNotExist) {
var f *os.File
f, err = os.Create(file)
defer f.Close()
if err != nil {
return errors.New(fmt.Sprintf("cannot create file '%v': %v", file, fmt.Errorf("%w", err)))
}
}
return err
}
// CreateDirectoryRecursively creates a directory recursively
func CreateDirectoryRecursively(dir string) error {
if dir == "" {
return errors.New("assert: blank values are not allowed for 'dir'")
}
var err error
if err = os.MkdirAll(dir, os.ModePerm); err != nil {
return errors.New(fmt.Sprintf("cannot create parent directory '%v': %v", dir, fmt.Errorf("%w", err)))
}
return err
}