Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 0 additions & 9 deletions .buildkite/pipeline.yml

This file was deleted.

12 changes: 0 additions & 12 deletions .gitignore

This file was deleted.

18 changes: 6 additions & 12 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,13 +1,7 @@
FROM golang:1.10.0-alpine3.7
FROM golang:1.14-alpine AS builder
ADD . /build
RUN cd /build && go install -mod=mod

RUN apk add --update --no-cache git

ADD . /go/src/github.com/adragoset/nomad_follower

RUN set -ex \
&& go get github.com/kardianos/govendor \
&& cd /go/src/github.com/adragoset/nomad_follower \
&& govendor sync \
&& go install

CMD nomad_follower
FROM alpine:latest
COPY --from=builder /go/bin/nomad_follower .
CMD ["./nomad_follower"]
25 changes: 12 additions & 13 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,17 @@
Log forwarder for aggregating allocation logs from nomad worker agents.

## Running the application
Run the application on each worker in a nomad cluster. nomad_follower will follow all allocations on the worker and tail the allocation logs to the aggregate log file.

```docker pull devopsintralox/nomad_follower:latest```

```docker run -v log_folder:/log -e LOG_FILE="/logs/nomad-forwarder.log" devopsintralox/nomad_follower:latest```

nomad_follower will stop following completed allocations and will start following new allocations as they become available.

nomad_follower can be deployed with nomad in a system task group along with a log collector. The aggregate log file can then be shared with the log collector by writing the aggregate log file into the shared allocation folder.

nomad_follower formats log entries as json formatted logs. It will convert string formatted logs to json formatted logs by passing the log entry in the ```message``` key.

nomad_follower adds a ```service_name``` key that contains the listed service names for a task.
Run the application on each worker in a nomad cluster.
```
docker pull sas1024/nomad_follower:latest
docker run -v log_folder:/log -e LOG_TAG="logging" "-e LOG_FILE="/log/nomad-forwarder.log" sas1024/nomad_follower:latest
```

nomad_follower:
- will follow all allocations if it contains service tag ```nomad_follower``` (can be changed with `LOG_TAG` environment variable) on the worker and tail the allocation logs to the aggregate log file.
- will stop following completed allocations and will start following new allocations as they become available.
- can be deployed with nomad in a system task group along with a log collector. The aggregate log file can then be shared with the log collector by writing the aggregate log file into the shared allocation folder.
- formats log entries as json formatted logs. It will convert string formatted logs to json formatted logs by passing the log entry in the ```message``` key.
- adds a ```service_name``` key that contains the listed service names for a task.

Using nomad_follower prevents the cluster operator from having to run a log collector in every task group for every task on a worker while still allowing nomad to handle the logs for each allocation.
26 changes: 14 additions & 12 deletions allocationFollower.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,18 +13,18 @@ import (
var SaveFormatVersion = 1

type SavePoint struct {
NodeID string `json:"node_id"`
SaveFormatVersion int `json:"save_format_version"`
SavedAllocs map[string]SavedAlloc `json:"saved_allocs"`
NodeID string `json:"node_id"`
SaveFormatVersion int `json:"save_format_version"`
SavedAllocs map[string]SavedAlloc `json:"saved_allocs"`
}

type SavedAlloc struct {
ID string `json:"alloc_id"`
ID string `json:"alloc_id"`
SavedTasks map[string]SavedTask `json:"saved_tasks"`
}

type SavedTask struct {
Key string `json:"key"`
Key string `json:"key"`
StdOutOffsets map[string]int64 `json:"stdout_offsets"`
StdErrOffsets map[string]int64 `json:"stderr_offsets"`
}
Expand All @@ -38,16 +38,18 @@ type AllocationFollower struct {
Quit chan bool
Ticker *time.Ticker
log Logger
logTag string
}

//NewAllocationFollower Creates a new allocation follower
func NewAllocationFollower(nomad NomadConfig, logger Logger) (a *AllocationFollower, e error) {
func NewAllocationFollower(nomad NomadConfig, logger Logger, logTag string) (a *AllocationFollower, e error) {
return &AllocationFollower{
Allocations: make(map[string]*FollowedAllocation),
Nomad: nomad,
NodeID: "",
Quit: make(chan bool),
log: logger,
Nomad: nomad,
NodeID: "",
Quit: make(chan bool),
log: logger,
logTag: logTag,
}, nil
}

Expand All @@ -73,7 +75,7 @@ func (a *AllocationFollower) SetNodeID() error {
}

//Start registers and de registers allocation followers
func (a *AllocationFollower) Start(duration time.Duration, savePath string) (<-chan string) {
func (a *AllocationFollower) Start(duration time.Duration, savePath string) <-chan string {
logContext := "AllocationFollower.Start"
a.Ticker = time.NewTicker(duration)
a.OutChan = make(chan string)
Expand Down Expand Up @@ -238,7 +240,7 @@ func (a *AllocationFollower) collectAllocations(save *SavePoint) error {
runState := alloc.DesiredStatus == "run" || alloc.ClientStatus == "running"
if record == nil && runState {
// handle new alloc records w/ potentially saved state
falloc := NewFollowedAllocation(alloc, a.Nomad, a.OutChan, a.log)
falloc := NewFollowedAllocation(alloc, a.Nomad, a.OutChan, a.log, a.logTag)
if save != nil {
a.log.Debug("AllocationFollower.collectAllocations", "Restoring saved allocations")
savedAlloc := save.SavedAllocs[alloc.ID]
Expand Down
25 changes: 0 additions & 25 deletions build.sh

This file was deleted.

82 changes: 0 additions & 82 deletions deploy/allocation_follower.json.tpl

This file was deleted.

9 changes: 0 additions & 9 deletions deploy/config.hcl.tpl

This file was deleted.

31 changes: 31 additions & 0 deletions example_logging.nomad
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
job "random-logger-example" {
datacenters = ["dc1"]
type = "service"

group "random" {
count = 1

task "random" {
driver = "docker"
config {
image = "chentex/random-logger"
port_map {
http = 8088
}
}
resources {
network {
port "http" {}
}
}
service {
tags = [
"nomad_follower",
"test"
]
name = "random"
port = "http"
}
}
}
}
39 changes: 25 additions & 14 deletions followedAllocation.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package main

import (
"fmt"

nomadApi "github.com/hashicorp/nomad/api"
)

Expand All @@ -13,17 +14,19 @@ type FollowedAllocation struct {
Quit chan struct{}
Tasks []*FollowedTask
log Logger
logTag string
}

//NewFollowedAllocation creates a new followed allocation
func NewFollowedAllocation(alloc *nomadApi.Allocation, nomad NomadConfig, outChan chan string, logger Logger) *FollowedAllocation {
func NewFollowedAllocation(alloc *nomadApi.Allocation, nomad NomadConfig, outChan chan string, logger Logger, logTag string) *FollowedAllocation {
return &FollowedAllocation{
Alloc: alloc,
Nomad: nomad,
Alloc: alloc,
Nomad: nomad,
OutputChan: outChan,
Quit: make(chan struct{}),
Tasks: make([]*FollowedTask, 0),
log: logger,
Quit: make(chan struct{}),
Tasks: make([]*FollowedTask, 0),
log: logger,
logTag: logTag,
}
}

Expand All @@ -38,15 +41,23 @@ func (f *FollowedAllocation) Start(save *SavedAlloc) {
for _, tg := range f.Alloc.Job.TaskGroups {
for _, task := range tg.Tasks {
ft := NewFollowedTask(f.Alloc, *tg.Name, task, f.Nomad, f.Quit, f.OutputChan, f.log)
if save != nil {
f.log.Debug("FollowedAllocation.Start", "Restoring saved allocation data")
key := fmt.Sprintf("%s:%s", *tg.Name, task.Name)
savedTask := save.SavedTasks[key]
ft.Start(&savedTask)
} else {
ft.Start(nil)
skip := true
for _, s := range ft.logTemplate.ServiceTags {
if s == f.logTag {
skip = false
}
}
if !skip {
if save != nil {
f.log.Debug("FollowedAllocation.Start", "Restoring saved allocation data")
key := fmt.Sprintf("%s:%s", *tg.Name, task.Name)
savedTask := save.SavedTasks[key]
ft.Start(&savedTask)
} else {
ft.Start(nil)
}
f.Tasks = append(f.Tasks, ft)
}
f.Tasks = append(f.Tasks, ft)
}
}
}
Expand Down
Loading