Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
61 commits
Select commit Hold shift + click to select a range
8f927c3
add unknown container
notshivansh Jul 25, 2025
000e003
Merge pull request #112 from akto-api-security/hotfix/fix_no_containers
notshivansh Jul 25, 2025
a6816ff
update version
notshivansh Aug 13, 2025
e9fa4a7
Merge pull request #114 from akto-api-security/hotfix/fix_vuln
notshivansh Aug 13, 2025
a2b0968
add chunk encoding and debug logs
notshivansh Aug 18, 2025
99ae18a
attempt
notshivansh Aug 18, 2025
5c5892b
remove debug logs
notshivansh Aug 20, 2025
d853d48
make limit configurable
notshivansh Aug 20, 2025
99bd902
increase chunk limit
notshivansh Aug 20, 2025
8535a4f
reduce default limit
notshivansh Aug 20, 2025
73f3b8d
Merge pull request #116 from akto-api-security/feature/chunk_encoding
notshivansh Aug 21, 2025
b6629c4
Adding sasl auth in kafka for agent
Ark2307 Sep 1, 2025
176250e
Fixing compilation errors
Ark2307 Sep 2, 2025
2383fe0
adding missing import
Ark2307 Sep 2, 2025
7be7762
Merge pull request #118 from akto-api-security/feature/sasl_auth_kafk…
Ark2307 Sep 2, 2025
c53c2e3
enable threat events push by default
ayushaga14 Sep 30, 2025
b0eb594
remove file logging enabled check on debug urls
ayushaga14 Sep 30, 2025
b4a365b
Merge pull request #124 from akto-api-security/enable-threat-ebpf
ayushaga14 Sep 30, 2025
66d67b3
update script
notshivansh Oct 3, 2025
bdee800
configurable
notshivansh Oct 3, 2025
40c453d
add override
notshivansh Oct 3, 2025
9ab2ed5
take in mb
notshivansh Oct 3, 2025
cdce6c0
Merge pull request #125 from akto-api-security/feature/mem_check
notshivansh Oct 3, 2025
8d4d02a
add go mem limit.
notshivansh Oct 3, 2025
9c7b15c
set go mem limit as 50% of mem threshold
ayushaga14 Oct 5, 2025
e63a15c
set go mem limit as 50% of mem threshold
ayushaga14 Oct 5, 2025
d4eae05
Merge pull request #127 from akto-api-security/feature/mem_check
ayushaga14 Oct 5, 2025
eed4401
dbeug direction
gauravakto Nov 12, 2025
2a49774
label resolving add
gauravakto Nov 12, 2025
f746861
push source in threat payload
ayushaga14 Nov 19, 2025
c4e2f74
Merge pull request #131 from akto-api-security/fix/push-source-threat
ayushaga14 Nov 19, 2025
6e84665
remove the payload printing
gauravakto Nov 27, 2025
ac26129
Merge pull request #133 from akto-api-security/fix/remove-sample
gauravakto Nov 27, 2025
10bc1fc
Merge branch 'feature/k8s_ebpf' into tagging-fixes-v15
gauravakto Dec 4, 2025
751dcf3
fix the ignore urls bug
gauravakto Dec 4, 2025
19df05c
envoy dont tag
gauravakto Dec 10, 2025
7fa33f7
remove the ignore urls change
gauravakto Dec 10, 2025
b2e23ea
add back info
gauravakto Dec 10, 2025
87ac815
change parser to master
gauravakto Dec 10, 2025
b5b6f07
don't tag envoy process calls
gauravakto Dec 10, 2025
c4a70ce
exit on kafka error threshold
notshivansh Dec 16, 2025
9895ea7
make optional config changes
notshivansh Dec 16, 2025
5f93245
increase default threshold
notshivansh Dec 16, 2025
9a03ba8
increase threshold
notshivansh Dec 16, 2025
45e40a4
Merge pull request #137 from akto-api-security/feature/kafka_err_restart
notshivansh Dec 16, 2025
d73dfb5
kafka reconnect
notshivansh Dec 24, 2025
e4ada9c
refactor parseAndProduce
gauravakto Dec 25, 2025
20a705c
Merge pull request #140 from akto-api-security/feat/partial-requests
gauravakto Dec 25, 2025
d784ad1
use empty bodies on failures
gauravakto Dec 25, 2025
04e6abe
Merge pull request #141 from akto-api-security/feat/best-effort-disco…
gauravakto Dec 25, 2025
cf4a711
increase and reset timer
gauravakto Dec 25, 2025
c5a88e3
default don't reconnect
notshivansh Dec 26, 2025
bb536cf
add max 10MB limit
gauravakto Dec 26, 2025
b110a01
Merge pull request #142 from akto-api-security/feat/handle-longer-res…
gauravakto Dec 27, 2025
e4feeae
Merge pull request #139 from akto-api-security/feature/kafka_reconnect
notshivansh Dec 29, 2025
1f9ee08
Feature/communicate kafka (#144)
kural-akto Jan 12, 2026
dd96fba
feat: add kafka header
abhijeet-akto Jan 14, 2026
1484955
chore: add check
abhijeet-akto Jan 14, 2026
98933fb
Merge pull request #147 from akto-api-security/abhi/feat/kafka-header
abhijeet-akto Jan 14, 2026
54fd543
Merge branch 'feature/k8s_ebpf' into tagging-fixes-v15
gauravakto Jan 16, 2026
edde56c
add better logs
gauravakto Jan 19, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
59 changes: 50 additions & 9 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,16 @@ on:
options:
- legacy
- ebpf
default: legacy
default: legacy
Architecture:
description: "The target architecture(s) for the Docker image."
required: true
type: choice
options:
- both
- arm64
- amd64
default: both

# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
Expand Down Expand Up @@ -82,11 +91,19 @@ jobs:
ECR_REPOSITORY: akto-api-security
REGISTRY_ALIAS: p7q3h0z2
IMAGE_TAG: ${{ github.event.inputs.Tag }}
ARCH_INPUT: ${{ github.event.inputs.Architecture }}
run: |
# Build a docker container and push it to DockerHub
docker buildx create --use
echo "Building and Pushing image to ECR..."
docker buildx build --platform linux/arm64/v8,linux/amd64 -t $ECR_REGISTRY/$REGISTRY_ALIAS/mirror-api-logging:$IMAGE_TAG . --push
if [ "$ARCH_INPUT" == "arm64" ]; then
PLATFORM="linux/arm64/v8"
elif [ "$ARCH_INPUT" == "amd64" ]; then
PLATFORM="linux/amd64"
else
PLATFORM="linux/arm64/v8,linux/amd64"
fi
echo "Building and Pushing image to ECR with platform: $PLATFORM"
docker buildx build --platform $PLATFORM -t $ECR_REGISTRY/$REGISTRY_ALIAS/mirror-api-logging:$IMAGE_TAG . --push
echo "::set-output name=image::$ECR_REGISTRY/$REGISTRY_ALIAS/mirror-api-logging:$IMAGE_TAG"

- name: Build, tag, and push the image to Amazon ECR -ebpf
Expand All @@ -97,11 +114,19 @@ jobs:
ECR_REPOSITORY: akto-api-security
REGISTRY_ALIAS: p7q3h0z2
IMAGE_TAG: ${{ github.event.inputs.EbpfTag }}
ARCH_INPUT: ${{ github.event.inputs.Architecture }}
run: |
# Build a docker container and push it to DockerHub
docker buildx create --use
echo "Building and Pushing image to ECR..."
docker buildx build --platform linux/arm64/v8,linux/amd64 -t $ECR_REGISTRY/$REGISTRY_ALIAS/mirror-api-logging:$IMAGE_TAG -f Dockerfile.eBPF . --push
if [ "$ARCH_INPUT" == "arm64" ]; then
PLATFORM="linux/arm64/v8"
elif [ "$ARCH_INPUT" == "amd64" ]; then
PLATFORM="linux/amd64"
else
PLATFORM="linux/arm64/v8,linux/amd64"
fi
echo "Building and Pushing image to ECR with platform: $PLATFORM"
docker buildx build --platform $PLATFORM -t $ECR_REGISTRY/$REGISTRY_ALIAS/mirror-api-logging:$IMAGE_TAG -f Dockerfile.eBPF . --push
echo "::set-output name=image::$ECR_REGISTRY/$REGISTRY_ALIAS/mirror-api-logging:$IMAGE_TAG"

build-docker:
Expand Down Expand Up @@ -136,11 +161,19 @@ jobs:
env:
ECR_REGISTRY: aktosecurity
IMAGE_TAG: ${{ github.event.inputs.Tag }}
ARCH_INPUT: ${{ github.event.inputs.Architecture }}
run: |
# Build a docker container and push it to DockerHub
docker buildx create --use
echo "Building and Pushing image to DockerHub..."
docker buildx build --platform linux/arm64/v8,linux/amd64 -t $ECR_REGISTRY/mirror-api-logging:$IMAGE_TAG . --push
if [ "$ARCH_INPUT" == "arm64" ]; then
PLATFORM="linux/arm64/v8"
elif [ "$ARCH_INPUT" == "amd64" ]; then
PLATFORM="linux/amd64"
else
PLATFORM="linux/arm64/v8,linux/amd64"
fi
echo "Building and Pushing image to DockerHub with platform: $PLATFORM"
docker buildx build --platform $PLATFORM -t $ECR_REGISTRY/mirror-api-logging:$IMAGE_TAG . --push
echo "::set-output name=image::$ECR_REGISTRY/mirror-api-logging:$IMAGE_TAG"

- name: Build, tag, and push the image to DockerHub - ebpf
Expand All @@ -149,9 +182,17 @@ jobs:
env:
ECR_REGISTRY: aktosecurity
IMAGE_TAG: ${{ github.event.inputs.EbpfTag }}
ARCH_INPUT: ${{ github.event.inputs.Architecture }}
run: |
# Build a docker container and push it to DockerHub
docker buildx create --use
echo "Building and Pushing image to DockerHub..."
docker buildx build --platform linux/arm64/v8,linux/amd64 -t $ECR_REGISTRY/mirror-api-logging:$IMAGE_TAG -f Dockerfile.eBPF . --push
if [ "$ARCH_INPUT" == "arm64" ]; then
PLATFORM="linux/arm64/v8"
elif [ "$ARCH_INPUT" == "amd64" ]; then
PLATFORM="linux/amd64"
else
PLATFORM="linux/arm64/v8,linux/amd64"
fi
echo "Building and Pushing image to DockerHub with platform: $PLATFORM"
docker buildx build --platform $PLATFORM -t $ECR_REGISTRY/mirror-api-logging:$IMAGE_TAG -f Dockerfile.eBPF . --push
echo "::set-output name=image::$ECR_REGISTRY/mirror-api-logging:$IMAGE_TAG"
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,5 @@ mirroring-api-logging
.idea/
**/.vscode/
temp
**temp
**temp
data-*
2 changes: 1 addition & 1 deletion Dockerfile.eBPF
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM alpine:3.21 AS base
FROM alpine:3.22 AS base

USER root
RUN apk add bcc-tools bcc-dev bcc-doc linux-headers build-base
Expand Down
73 changes: 71 additions & 2 deletions ebpf-run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,10 @@

LOG_FILE="/tmp/dump.log"
MAX_LOG_SIZE=${MAX_LOG_SIZE:-10485760} # Default to 10 MB if not set (10 MB = 10 * 1024 * 1024 bytes)
CHECK_INTERVAL=60 # Check interval in seconds
CHECK_INTERVAL=${CHECK_INTERVAL:-60}
CHECK_INTERVAL_MEM=${CHECK_INTERVAL_MEM:-10} # Check interval in seconds (configurable via env)
MEMORY_THRESHOLD=${MEMORY_THRESHOLD:-80} # Kill process at this % memory usage (configurable via env)
GOMEMLIMIT_PERCENT=${GOMEMLIMIT_PERCENT:-60} # GOMEMLIMIT as % of container memory limit (configurable via env)

# Function to rotate the log file
rotate_log() {
Expand All @@ -14,6 +17,30 @@ rotate_log() {
fi
}

# Function to check memory usage and kill process if threshold exceeded
check_memory_and_kill() {
# Get current memory usage in bytes
if [ -f /sys/fs/cgroup/memory.current ]; then
# cgroup v2
CURRENT_MEM=$(cat /sys/fs/cgroup/memory.current)
elif [ -f /sys/fs/cgroup/memory/memory.usage_in_bytes ]; then
# cgroup v1
CURRENT_MEM=$(cat /sys/fs/cgroup/memory/memory.usage_in_bytes)
else
return
fi

# Calculate percentage used
PERCENT_USED=$((CURRENT_MEM * 100 / MEM_LIMIT_BYTES))

echo "Memory usage: ${PERCENT_USED}% (${CURRENT_MEM} / ${MEM_LIMIT_BYTES} bytes)"

if [ "$PERCENT_USED" -ge "$MEMORY_THRESHOLD" ]; then
echo "Memory threshold ${MEMORY_THRESHOLD}% exceeded (${PERCENT_USED}%), killing ebpf-logging process"
pkill -9 ebpf-logging
fi
}

# Start monitoring in the background
if [[ "${ENABLE_LOGS}" == "false" ]]; then
while true; do
Expand All @@ -22,10 +49,52 @@ if [[ "${ENABLE_LOGS}" == "false" ]]; then
done &
fi

# 1. Check if MEM_LIMIT is provided as env variable
if [ -z "$MEM_LIMIT" ]; then
# Not provided, detect and read cgroup memory limits
if [ -f /sys/fs/cgroup/memory.max ]; then
# cgroup v2
MEM_LIMIT_BYTES=$(cat /sys/fs/cgroup/memory.max)
elif [ -f /sys/fs/cgroup/memory/memory.limit_in_bytes ]; then
# cgroup v1
MEM_LIMIT_BYTES=$(cat /sys/fs/cgroup/memory/memory.limit_in_bytes)
else
# Fallback to free -b (bytes) if cgroup file not found
echo "Neither cgroup v2 nor v1 memory file found, defaulting to free -m"
# Convert from kB to bytes
MEM_LIMIT_BYTES=$(free -b | awk '/Mem:/ {print $2}')
fi

# 2. Handle edge cases: "max" means no strict limit or a very large limit
if [ "$MEM_LIMIT_BYTES" = "max" ]; then
# Arbitrary fallback (1 GiB in bytes here, but adjust as needed)
echo "Cgroup memory limit set to 'max', defaulting to free memory"
MEM_LIMIT_BYTES=$(free -b | awk '/Mem:/ {print $2}')
fi

# 3. Convert the memory limit from bytes to MB (integer division)
MEM_LIMIT_MB=$((MEM_LIMIT_BYTES / 1024 / 1024))
else
# MEM_LIMIT provided as env variable, treat as MB
echo "Using MEM_LIMIT from environment variable: ${MEM_LIMIT} MB"
MEM_LIMIT_MB=$MEM_LIMIT
# Convert MB to bytes for calculations
MEM_LIMIT_BYTES=$((MEM_LIMIT * 1024 * 1024))
fi

echo "Using container memory limit: ${MEM_LIMIT_MB} MB"

# Set GOMEMLIMIT for the Go process
GOMEMLIMIT_MB=$((MEM_LIMIT_MB * GOMEMLIMIT_PERCENT / 100))
export GOMEMLIMIT="${GOMEMLIMIT_MB}MiB"
echo "Setting GOMEMLIMIT to: ${GOMEMLIMIT} (${GOMEMLIMIT_PERCENT}% of ${MEM_LIMIT_MB} MB)"

# Start memory monitoring in the background

while :
do
if [[ "${ENABLE_LOGS}" == "false" ]]; then
./ebpf-logging >> "$LOG_FILE" 2>&1
./ebpf-logging >> "$LOG_FILE" 2>&1
else
./ebpf-logging
fi
Expand Down
3 changes: 2 additions & 1 deletion ebpf/bpfwrapper/eventCallbacks.go
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,7 @@ func SocketDataEventCallback(inputChan chan []byte, connectionFactory *connectio
"data", dataStr,
"rc", event.Attr.ReadEventsCount,
"wc", event.Attr.WriteEventsCount,
"ssl", event.Attr.Ssl)
"ssl", event.Attr.Ssl,
"bytesSent", bytesSent)
}
}
43 changes: 38 additions & 5 deletions ebpf/connections/factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,13 +73,15 @@ func convertToSingleByteArr(bufMap map[int][]byte) []byte {
var (
disableEgress = false
maxActiveConnections = 4096
inactivityThreshold = 3 * time.Second
inactivityThreshold = 7 * time.Second
// Value in MB
bufferMemThreshold = 400

// unique id of daemonset
uniqueDaemonsetId = uuid.New().String()
trackerDataProcessInterval = 100

socketDataEventBytesThreshold = 10 * 1024 * 1024
)

func init() {
Expand All @@ -89,6 +91,7 @@ func init() {
utils.InitVar("TRAFFIC_BUFFER_THRESHOLD", &bufferMemThreshold)
utils.InitVar("AKTO_MEM_SOFT_LIMIT", &bufferMemThreshold)
utils.InitVar("TRACKER_DATA_PROCESS_INTERVAL", &trackerDataProcessInterval)
utils.InitVar("SOCKET_DATA_EVENT_BYTES_THRESHOLD", &socketDataEventBytesThreshold)
}

func ProcessTrackerData(connID structs.ConnID, tracker *Tracker, isComplete bool) {
Expand Down Expand Up @@ -201,6 +204,25 @@ func (factory *Factory) CreateIfNotExists(connectionID structs.ConnID) {
}
}

// resetTimer stops, drains, and resets the timer to the given duration.
func resetTimer(t *time.Timer, d time.Duration) {
if !t.Stop() {
select {
case <-t.C:
default:
}
}
t.Reset(d)
}

// Worker lifecycle:
// ACTIVE:
// - socket data/open -> reset inactivity timer on each event
// - socket close -> schedule delayed termination
// - inactivity timer -> terminate immediately
//
// TERMINATION is final and happens exactly once.
// either due to inactivityThreshold or due to socker close event
func (factory *Factory) StartWorker(connectionID structs.ConnID, tracker *Tracker, ch chan interface{}) {
go func(connID structs.ConnID, tracker *Tracker, ch chan interface{}) {

Expand All @@ -216,9 +238,17 @@ func (factory *Factory) StartWorker(connectionID structs.ConnID, tracker *Tracke
case *structs.SocketDataEvent:
utils.LogProcessing("Received data event", "fd", connID.Fd, "id", connID.Id, "timestamp", connID.Conn_start_ns, "ip", connID.Ip, "port", connID.Port)
tracker.AddDataEvent(*e)
if tracker.GetSentBytes() + tracker.GetRecvBytes() > uint64(socketDataEventBytesThreshold) {
utils.LogProcessing("Socket Data threshold data breached, processing current data", "fd", connID.Fd, "id", connID.Id, "timestamp", connID.Conn_start_ns, "ip", connID.Ip, "port", connID.Port)
factory.StopProcessing(connID)
return
}else{
resetTimer(inactivityTimer, inactivityThreshold)
}
case *structs.SocketOpenEvent:
utils.LogProcessing("Received open event", "fd", connID.Fd, "id", connID.Id, "timestamp", connID.Conn_start_ns, "ip", connID.Ip, "port", connID.Port)
tracker.AddOpenEvent(*e)
resetTimer(inactivityTimer, inactivityThreshold)
case *structs.SocketCloseEvent:
utils.LogProcessing("Received close event", "fd", connID.Fd, "id", connID.Id, "timestamp", connID.Conn_start_ns, "ip", connID.Ip, "port", connID.Port)
tracker.AddCloseEvent(*e)
Expand All @@ -230,22 +260,25 @@ func (factory *Factory) StartWorker(connectionID structs.ConnID, tracker *Tracke

case <-delayedDeleteChan:
utils.LogProcessing("Stopping go routine (delayed close)", "fd", connID.Fd, "id", connID.Id, "timestamp", connID.Conn_start_ns, "ip", connID.Ip, "port", connID.Port)
factory.ProcessAndStopWorker(connID)
factory.DeleteWorker(connID)
factory.StopProcessing(connID)
return

case <-inactivityTimer.C:
// Eat the go routine after inactive threshold, process the tracker and stop the worker
utils.LogProcessing("Inactivity threshold reached, marking connection as inactive and processing", "fd", connID.Fd, "id", connID.Id, "timestamp", connID.Conn_start_ns, "ip", connID.Ip, "port", connID.Port)
factory.ProcessAndStopWorker(connID)
factory.DeleteWorker(connID)
factory.StopProcessing(connID)
utils.LogProcessing("Stopping go routine", "fd", connID.Fd, "id", connID.Id, "timestamp", connID.Conn_start_ns, "ip", connID.Ip, "port", connID.Port)
return
}
}
}(connectionID, tracker, ch)
}

func (factory *Factory) StopProcessing(connID structs.ConnID){
factory.ProcessAndStopWorker(connID)
factory.DeleteWorker(connID)
}

func (factory *Factory) ProcessAndStopWorker(connectionID structs.ConnID) {
tracker, connExists := factory.getTracker(connectionID)
if connExists {
Expand Down
15 changes: 14 additions & 1 deletion ebpf/connections/parser.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,5 +5,18 @@ import (
)

func tryReadFromBD(ip string, destIp string, receiveBuffer []byte, sentBuffer []byte, isComplete bool, direction int, id uint64, fd uint32, daemonsetIdentifier, hostName string) {
kafkaUtil.ParseAndProduce(receiveBuffer, sentBuffer, ip, destIp, 0, false, "MIRRORING", isComplete, direction, id, fd, daemonsetIdentifier, hostName)
ctx := kafkaUtil.TrafficContext{
SourceIP: ip,
DestIP: destIp,
VxlanID: 0,
IsPending: false,
TrafficSource: "MIRRORING",
IsComplete: isComplete,
Direction: direction,
ProcessID: uint32(id >> 32),
SocketFD: fd,
DaemonsetIdentifier: daemonsetIdentifier,
HostName: hostName,
}
kafkaUtil.ParseAndProduce(receiveBuffer, sentBuffer, ctx)
}
8 changes: 8 additions & 0 deletions ebpf/connections/tracker.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,3 +108,11 @@ func (conn *Tracker) AddCloseEvent(event structs.SocketCloseEvent) {
conn.closeTimestamp = uint64(time.Now().UnixNano())
conn.lastAccessTimestamp = uint64(time.Now().UnixNano())
}

func (conn *Tracker) GetSentBytes() uint64 {
return conn.sentBytes
}

func (conn *Tracker) GetRecvBytes() uint64 {
return conn.recvBytes
}
Loading
Loading