slight improvements to agent memory usage

This commit is contained in:
Henry Dollman
2024-08-18 17:45:39 -04:00
parent 683dc74cbf
commit b5607025f7
2 changed files with 113 additions and 75 deletions

View File

@@ -29,14 +29,14 @@ func main() {
log.Fatal("KEY environment variable is not set") log.Fatal("KEY environment variable is not set")
} }
port := ":45876" addr := ":45876"
if p, exists := os.LookupEnv("PORT"); exists { if portEnvVar, exists := os.LookupEnv("PORT"); exists {
// allow passing an address in the form of "127.0.0.1:45876" // allow passing an address in the form of "127.0.0.1:45876"
if !strings.Contains(p, ":") { if !strings.Contains(portEnvVar, ":") {
p = ":" + p portEnvVar = ":" + portEnvVar
} }
port = p addr = portEnvVar
} }
agent.NewAgent(pubKey, port).Run() agent.NewAgent(pubKey, addr).Run()
} }

View File

@@ -3,6 +3,7 @@ package agent
import ( import (
"beszel/internal/entities/container" "beszel/internal/entities/container"
"beszel/internal/entities/system" "beszel/internal/entities/system"
"bytes"
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
@@ -26,27 +27,39 @@ import (
psutilNet "github.com/shirou/gopsutil/v4/net" psutilNet "github.com/shirou/gopsutil/v4/net"
) )
var containerStatsMap = make(map[string]*container.PrevContainerStats)
type Agent struct { type Agent struct {
port string addr string
pubKey []byte pubKey []byte
sem chan struct{} sem chan struct{}
containerStatsMap map[string]*container.PrevContainerStats
containerStatsMutex *sync.Mutex containerStatsMutex *sync.Mutex
diskIoStats system.DiskIoStats diskIoStats *system.DiskIoStats
netIoStats system.NetIoStats netIoStats *system.NetIoStats
dockerClient *http.Client dockerClient *http.Client
containerStatsPool *sync.Pool
bufferPool *sync.Pool
} }
func NewAgent(pubKey []byte, port string) *Agent { func NewAgent(pubKey []byte, addr string) *Agent {
return &Agent{ return &Agent{
addr: addr,
pubKey: pubKey, pubKey: pubKey,
sem: make(chan struct{}, 15), sem: make(chan struct{}, 15),
port: port, containerStatsMap: make(map[string]*container.PrevContainerStats),
containerStatsMutex: &sync.Mutex{}, containerStatsMutex: &sync.Mutex{},
diskIoStats: system.DiskIoStats{}, diskIoStats: &system.DiskIoStats{},
netIoStats: system.NetIoStats{}, netIoStats: &system.NetIoStats{},
dockerClient: newDockerClient(), dockerClient: newDockerClient(),
containerStatsPool: &sync.Pool{
New: func() interface{} {
return new(container.Stats)
},
},
bufferPool: &sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
},
} }
} }
@@ -156,14 +169,14 @@ func (a *Agent) getDockerStats() ([]*container.Stats, error) {
resp, err := a.dockerClient.Get("http://localhost/containers/json") resp, err := a.dockerClient.Get("http://localhost/containers/json")
if err != nil { if err != nil {
a.closeIdleConnections(err) a.closeIdleConnections(err)
return []*container.Stats{}, err return nil, err
} }
defer resp.Body.Close() defer resp.Body.Close()
var containers []*container.ApiInfo var containers []*container.ApiInfo
if err := json.NewDecoder(resp.Body).Decode(&containers); err != nil { if err := json.NewDecoder(resp.Body).Decode(&containers); err != nil {
log.Printf("Error decoding containers: %+v\n", err) log.Printf("Error decoding containers: %+v\n", err)
return []*container.Stats{}, err return nil, err
} }
containerStats := make([]*container.Stats, 0, len(containers)) containerStats := make([]*container.Stats, 0, len(containers))
@@ -206,10 +219,10 @@ func (a *Agent) getDockerStats() ([]*container.Stats, error) {
wg.Wait() wg.Wait()
for id := range containerStatsMap { for id := range a.containerStatsMap {
if _, exists := validIds[id]; !exists { if _, exists := validIds[id]; !exists {
// log.Printf("Removing container cpu map entry: %+v\n", id) // log.Printf("Removing container cpu map entry: %+v\n", id)
delete(containerStatsMap, id) delete(a.containerStatsMap, id)
} }
} }
@@ -220,15 +233,27 @@ func (a *Agent) getContainerStats(ctr *container.ApiInfo) (*container.Stats, err
// use semaphore to limit concurrency // use semaphore to limit concurrency
a.acquireSemaphore() a.acquireSemaphore()
defer a.releaseSemaphore() defer a.releaseSemaphore()
resp, err := a.dockerClient.Get("http://localhost/containers/" + ctr.IdShort + "/stats?stream=0&one-shot=1") resp, err := a.dockerClient.Get("http://localhost/containers/" + ctr.IdShort + "/stats?stream=0&one-shot=1")
if err != nil { if err != nil {
return &container.Stats{}, err return nil, err
} }
defer resp.Body.Close() defer resp.Body.Close()
// get a buffer from the pool
buf := a.bufferPool.Get().(*bytes.Buffer)
defer a.bufferPool.Put(buf)
buf.Reset()
// read the response body into the buffer
_, err = io.Copy(buf, resp.Body)
if err != nil {
return nil, err
}
// unmarshal the json data from the buffer
var statsJson container.ApiStats var statsJson container.ApiStats
if err := json.NewDecoder(resp.Body).Decode(&statsJson); err != nil { if err := json.Unmarshal(buf.Bytes(), &statsJson); err != nil {
log.Fatal(err) return nil, err
} }
name := ctr.Names[0][1:] name := ctr.Names[0][1:]
@@ -244,10 +269,10 @@ func (a *Agent) getContainerStats(ctr *container.ApiInfo) (*container.Stats, err
defer a.containerStatsMutex.Unlock() defer a.containerStatsMutex.Unlock()
// add empty values if they doesn't exist in map // add empty values if they doesn't exist in map
stats, initialized := containerStatsMap[ctr.IdShort] stats, initialized := a.containerStatsMap[ctr.IdShort]
if !initialized { if !initialized {
stats = &container.PrevContainerStats{} stats = &container.PrevContainerStats{}
containerStatsMap[ctr.IdShort] = stats a.containerStatsMap[ctr.IdShort] = stats
} }
// cpu // cpu
@@ -255,7 +280,7 @@ func (a *Agent) getContainerStats(ctr *container.ApiInfo) (*container.Stats, err
systemDelta := statsJson.CPUStats.SystemUsage - stats.Cpu[1] systemDelta := statsJson.CPUStats.SystemUsage - stats.Cpu[1]
cpuPct := float64(cpuDelta) / float64(systemDelta) * 100 cpuPct := float64(cpuDelta) / float64(systemDelta) * 100
if cpuPct > 100 { if cpuPct > 100 {
return &container.Stats{}, fmt.Errorf("%s cpu pct greater than 100: %+v", name, cpuPct) return nil, fmt.Errorf("%s cpu pct greater than 100: %+v", name, cpuPct)
} }
stats.Cpu = [2]uint64{statsJson.CPUStats.CPUUsage.TotalUsage, statsJson.CPUStats.SystemUsage} stats.Cpu = [2]uint64{statsJson.CPUStats.CPUUsage.TotalUsage, statsJson.CPUStats.SystemUsage}
@@ -277,13 +302,13 @@ func (a *Agent) getContainerStats(ctr *container.ApiInfo) (*container.Stats, err
stats.Net.Recv = total_recv stats.Net.Recv = total_recv
stats.Net.Time = time.Now() stats.Net.Time = time.Now()
cStats := &container.Stats{ cStats := a.containerStatsPool.Get().(*container.Stats)
Name: name, cStats.Name = name
Cpu: twoDecimals(cpuPct), cStats.Cpu = twoDecimals(cpuPct)
Mem: bytesToMegabytes(float64(usedMemory)), cStats.Mem = bytesToMegabytes(float64(usedMemory))
NetworkSent: bytesToMegabytes(sent_delta), cStats.NetworkSent = bytesToMegabytes(sent_delta)
NetworkRecv: bytesToMegabytes(recv_delta), cStats.NetworkRecv = bytesToMegabytes(recv_delta)
}
return cStats, nil return cStats, nil
} }
@@ -291,7 +316,7 @@ func (a *Agent) getContainerStats(ctr *container.ApiInfo) (*container.Stats, err
func (a *Agent) deleteContainerStatsSync(id string) { func (a *Agent) deleteContainerStatsSync(id string) {
a.containerStatsMutex.Lock() a.containerStatsMutex.Lock()
defer a.containerStatsMutex.Unlock() defer a.containerStatsMutex.Unlock()
delete(containerStatsMap, id) delete(a.containerStatsMap, id)
} }
func (a *Agent) gatherStats() *system.CombinedData { func (a *Agent) gatherStats() *system.CombinedData {
@@ -299,28 +324,28 @@ func (a *Agent) gatherStats() *system.CombinedData {
systemData := &system.CombinedData{ systemData := &system.CombinedData{
Stats: systemStats, Stats: systemStats,
Info: systemInfo, Info: systemInfo,
// Containers: []*container.Stats{},
} }
if containerStats, err := a.getDockerStats(); err == nil { if containerStats, err := a.getDockerStats(); err == nil {
systemData.Containers = containerStats systemData.Containers = containerStats
} }
// fmt.Printf("%+v\n", stats) // fmt.Printf("%+v\n", systemData)
return systemData return systemData
} }
func (a *Agent) startServer(addr string, pubKey []byte) { // return container stats to pool
sshServer.Handle(func(s sshServer.Session) { func (a *Agent) returnStatsToPool(containerStats []*container.Stats) {
stats := a.gatherStats() for _, stats := range containerStats {
var jsonStats []byte a.containerStatsPool.Put(stats)
jsonStats, _ = json.Marshal(stats) }
io.WriteString(s, string(jsonStats)) }
s.Exit(0)
})
log.Printf("Starting SSH server on %s", addr) func (a *Agent) startServer() {
if err := sshServer.ListenAndServe(addr, nil, sshServer.NoPty(), sshServer.Handle(a.handleSession)
log.Printf("Starting SSH server on %s", a.addr)
if err := sshServer.ListenAndServe(a.addr, nil, sshServer.NoPty(),
sshServer.PublicKeyAuth(func(ctx sshServer.Context, key sshServer.PublicKey) bool { sshServer.PublicKeyAuth(func(ctx sshServer.Context, key sshServer.PublicKey) bool {
allowed, _, _, _, _ := sshServer.ParseAuthorizedKey(pubKey) allowed, _, _, _, _ := sshServer.ParseAuthorizedKey(a.pubKey)
return sshServer.KeysEqual(key, allowed) return sshServer.KeysEqual(key, allowed)
}), }),
); err != nil { ); err != nil {
@@ -328,6 +353,18 @@ func (a *Agent) startServer(addr string, pubKey []byte) {
} }
} }
func (a *Agent) handleSession(s sshServer.Session) {
stats := a.gatherStats()
defer a.returnStatsToPool(stats.Containers)
encoder := json.NewEncoder(s)
if err := encoder.Encode(stats); err != nil {
log.Println("Error encoding stats:", err.Error())
s.Exit(1)
return
}
s.Exit(0)
}
func (a *Agent) Run() { func (a *Agent) Run() {
if filesystem, exists := os.LookupEnv("FILESYSTEM"); exists { if filesystem, exists := os.LookupEnv("FILESYSTEM"); exists {
a.diskIoStats.Filesystem = filesystem a.diskIoStats.Filesystem = filesystem
@@ -338,7 +375,35 @@ func (a *Agent) Run() {
a.initializeDiskIoStats() a.initializeDiskIoStats()
a.initializeNetIoStats() a.initializeNetIoStats()
a.startServer(a.port, a.pubKey) a.startServer()
}
func (a *Agent) initializeDiskIoStats() {
if io, err := disk.IOCounters(a.diskIoStats.Filesystem); err == nil {
for _, d := range io {
a.diskIoStats.Time = time.Now()
a.diskIoStats.Read = d.ReadBytes
a.diskIoStats.Write = d.WriteBytes
}
}
}
func (a *Agent) initializeNetIoStats() {
if netIO, err := psutilNet.IOCounters(true); err == nil {
bytesSent := uint64(0)
bytesRecv := uint64(0)
for _, v := range netIO {
if skipNetworkInterface(&v) {
continue
}
log.Printf("Found network interface: %+v (%+v recv, %+v sent)\n", v.Name, v.BytesRecv, v.BytesSent)
bytesSent += v.BytesSent
bytesRecv += v.BytesRecv
}
a.netIoStats.BytesSent = bytesSent
a.netIoStats.BytesRecv = bytesRecv
a.netIoStats.Time = time.Now()
}
} }
func bytesToMegabytes(b float64) float64 { func bytesToMegabytes(b float64) float64 {
@@ -379,34 +444,6 @@ func skipNetworkInterface(v *psutilNet.IOCountersStat) bool {
} }
} }
func (a *Agent) initializeDiskIoStats() {
if io, err := disk.IOCounters(a.diskIoStats.Filesystem); err == nil {
for _, d := range io {
a.diskIoStats.Time = time.Now()
a.diskIoStats.Read = d.ReadBytes
a.diskIoStats.Write = d.WriteBytes
}
}
}
func (a *Agent) initializeNetIoStats() {
if netIO, err := psutilNet.IOCounters(true); err == nil {
bytesSent := uint64(0)
bytesRecv := uint64(0)
for _, v := range netIO {
if skipNetworkInterface(&v) {
continue
}
log.Printf("Found network interface: %+v (%+v recv, %+v sent)\n", v.Name, v.BytesRecv, v.BytesSent)
bytesSent += v.BytesSent
bytesRecv += v.BytesRecv
}
a.netIoStats.BytesSent = bytesSent
a.netIoStats.BytesRecv = bytesRecv
a.netIoStats.Time = time.Now()
}
}
func newDockerClient() *http.Client { func newDockerClient() *http.Client {
dockerHost := "unix:///var/run/docker.sock" dockerHost := "unix:///var/run/docker.sock"
if dockerHostEnv, exists := os.LookupEnv("DOCKER_HOST"); exists { if dockerHostEnv, exists := os.LookupEnv("DOCKER_HOST"); exists {
@@ -422,6 +459,7 @@ func newDockerClient() *http.Client {
ForceAttemptHTTP2: false, ForceAttemptHTTP2: false,
IdleConnTimeout: 90 * time.Second, IdleConnTimeout: 90 * time.Second,
DisableCompression: true, DisableCompression: true,
MaxConnsPerHost: 20,
MaxIdleConnsPerHost: 20, MaxIdleConnsPerHost: 20,
DisableKeepAlives: false, DisableKeepAlives: false,
} }