Skip to content

Commit 74c80df

Browse files
committed
fio job poc
1 parent 4050233 commit 74c80df

File tree

4 files changed

+119
-70
lines changed

4 files changed

+119
-70
lines changed

cmd/report/report.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,7 @@ var categories = []common.Category{
171171
{FlagName: flagElcName, FlagVar: &flagElc, Help: "Efficiency Latency Control Settings", TableNames: []string{report.ElcTableName}},
172172
{FlagName: flagMemoryName, FlagVar: &flagMemory, Help: "Memory Configuration", TableNames: []string{report.MemoryTableName}},
173173
{FlagName: flagDimmName, FlagVar: &flagDimm, Help: "DIMM Population", TableNames: []string{report.DIMMTableName}},
174-
{FlagName: flagNicName, FlagVar: &flagNic, Help: "Network Cards", TableNames: []string{report.NICTableName}},
174+
{FlagName: flagNicName, FlagVar: &flagNic, Help: "Network Cards", TableNames: []string{report.NICTableName, report.NICPacketSteeringTableName}},
175175
{FlagName: flagNetConfigName, FlagVar: &flagNetConfig, Help: "Network Configuration", TableNames: []string{report.NetworkConfigTableName}},
176176
{FlagName: flagNetIrqName, FlagVar: &flagNetIrq, Help: "Network IRQ to CPU Mapping", TableNames: []string{report.NetworkIRQMappingTableName}},
177177
{FlagName: flagDiskName, FlagVar: &flagDisk, Help: "Storage Devices", TableNames: []string{report.DiskTableName}},

internal/report/table_defs.go

Lines changed: 45 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -667,7 +667,7 @@ var tableDefinitions = map[string]TableDefinition{
667667
StorageBenchmarkTableName: {
668668
Name: StorageBenchmarkTableName,
669669
MenuLabel: StorageBenchmarkTableName,
670-
HasRows: false,
670+
HasRows: true,
671671
ScriptNames: []string{
672672
script.StorageBenchmarkScriptName,
673673
},
@@ -1690,6 +1690,7 @@ func nicPacketSteeringTableValues(outputs map[string]script.ScriptOutput) []Fiel
16901690
}
16911691

16921692
// Find the maximum number of queues across all NICs for both TX and RX
1693+
slog.Debug("allNicsInfo", slog.Any("allNicsInfo", allNicsInfo))
16931694
maxNumQueues := 0
16941695
for _, nicInfo := range allNicsInfo {
16951696
txq, err := strconv.Atoi(nicInfo.TXQueues)
@@ -1701,10 +1702,11 @@ func nicPacketSteeringTableValues(outputs map[string]script.ScriptOutput) []Fiel
17011702
maxNumQueues = rxq
17021703
}
17031704
}
1705+
slog.Debug("maxNumQueues", slog.Int("maxNumQueues", maxNumQueues))
17041706

17051707
fields := []Field{
17061708
{Name: "Interface"},
1707-
{Name: "Type", Description: "XPS (Transmit Packet Steering) and RPS (Receive Packet Steering) are software-based mechanisms that allow the selection of a specific CPU core to handle the transmission or processing of network packets for a given queue."},
1709+
{Name: "Queue Type", Description: "XPS (Transmit Packet Steering) and RPS (Receive Packet Steering) are software-based mechanisms that allow the selection of a specific CPU core to handle the transmission or processing of network packets for a given queue."},
17081710
}
17091711
for i := 0; i < maxNumQueues; i++ {
17101712
fields = append(fields, Field{Name: strconv.Itoa(i)})
@@ -1720,6 +1722,7 @@ func nicPacketSteeringTableValues(outputs map[string]script.ScriptOutput) []Fiel
17201722
xpsValues[queueNum] = hexBitmapToCPUList(val)
17211723
}
17221724
}
1725+
slog.Debug("xpsrow", slog.String("interface", nicInfo.Name), slog.String("type", "xps_cpus"), slog.Any("values", xpsValues))
17231726
fields[0].Values = append(fields[0].Values, nicInfo.Name)
17241727
fields[1].Values = append(fields[1].Values, "xps_cpus")
17251728
for i := 0; i < maxNumQueues; i++ {
@@ -2443,17 +2446,50 @@ func numaBenchmarkTableValues(outputs map[string]script.ScriptOutput) []Field {
24432446
return fields
24442447
}
24452448

2449+
// formatOrEmpty formats a value and returns an empty string if the formatted value is "0".
2450+
func formatOrEmpty(format string, value any) string {
2451+
s := fmt.Sprintf(format, value)
2452+
if s == "0" {
2453+
return ""
2454+
}
2455+
return s
2456+
}
2457+
24462458
func storageBenchmarkTableValues(outputs map[string]script.ScriptOutput) []Field {
2447-
readLat, readBw, writeLat, writeBw := storagePerfFromOutput(outputs)
2448-
if readLat == "" && readBw == "" && writeLat == "" && writeBw == "" {
2459+
fioData, err := storagePerfFromOutput(outputs)
2460+
if err != nil {
2461+
slog.Error("failed to get storage benchmark data", slog.String("error", err.Error()))
24492462
return []Field{}
24502463
}
2451-
return []Field{
2452-
{Name: "Single-Thread Read Latency (ns)", Values: []string{readLat}},
2453-
{Name: "Single-Thread Read Bandwidth (MiB/s)", Values: []string{readBw}},
2454-
{Name: "Single-Thread Write Latency (ns)", Values: []string{writeLat}},
2455-
{Name: "Single-Thread Write Bandwidth (MiB/s)", Values: []string{writeBw}},
2464+
2465+
if len(fioData.Jobs) == 0 {
2466+
return []Field{}
24562467
}
2468+
2469+
// Initialize the fields for metrics (column headers)
2470+
fields := []Field{
2471+
{Name: "Job"},
2472+
{Name: "Read Latency (us)"},
2473+
{Name: "Read IOPs"},
2474+
{Name: "Read Bandwidth (MiB/s)"},
2475+
{Name: "Write Latency (us)"},
2476+
{Name: "Write IOPs"},
2477+
{Name: "Write Bandwidth (MiB/s)"},
2478+
}
2479+
2480+
// For each FIO job, create a new row and populate its values
2481+
slog.Debug("fioData", slog.Any("jobs", fioData.Jobs))
2482+
for _, job := range fioData.Jobs {
2483+
fields[0].Values = append(fields[0].Values, job.Jobname)
2484+
fields[1].Values = append(fields[1].Values, formatOrEmpty("%.0f", job.Read.LatNs.Mean/1000))
2485+
fields[2].Values = append(fields[2].Values, formatOrEmpty("%.0f", job.Read.IopsMean))
2486+
fields[3].Values = append(fields[3].Values, formatOrEmpty("%d", job.Read.Bw/1024))
2487+
fields[4].Values = append(fields[4].Values, formatOrEmpty("%.0f", job.Write.LatNs.Mean/1000))
2488+
fields[5].Values = append(fields[5].Values, formatOrEmpty("%.0f", job.Write.IopsMean))
2489+
fields[6].Values = append(fields[6].Values, formatOrEmpty("%d", job.Write.Bw/1024))
2490+
}
2491+
2492+
return fields
24572493
}
24582494

24592495
// telemetry

internal/report/table_helpers_benchmarking.go

Lines changed: 7 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -120,41 +120,30 @@ func cpuSpeedFromOutput(outputs map[string]script.ScriptOutput) string {
120120
return fmt.Sprintf("%.0f", util.GeoMean(vals))
121121
}
122122

123-
func storagePerfFromOutput(outputs map[string]script.ScriptOutput) (readLat, readBw, writeLat, writeBw string) {
123+
func storagePerfFromOutput(outputs map[string]script.ScriptOutput) (fioOutput, error) {
124124
output := outputs[script.StorageBenchmarkScriptName].Stdout
125125
slog.Debug("storage benchmark output", slog.String("output", output))
126126

127127
i := strings.Index(output, "{\n \"fio version\"")
128128
if i >= 0 {
129129
output = output[i:]
130130
} else {
131-
slog.Error("Unable to find fio output", slog.String("output", output))
132-
return
131+
return fioOutput{}, fmt.Errorf("unable to find fio output")
133132
}
134133
if strings.Contains(output, "ERROR:") {
135-
slog.Error("failed to run storage benchmark", slog.String("output", output))
136-
return
134+
return fioOutput{}, fmt.Errorf("failed to run storage benchmark: %s", output)
137135
}
138136

139137
slog.Debug("parsing storage benchmark output")
140138
var fioData fioOutput
141139
if err := json.Unmarshal([]byte(output), &fioData); err != nil {
142-
slog.Error("Error unmarshalling JSON", slog.String("error", err.Error()))
143-
return
140+
return fioOutput{}, fmt.Errorf("error unmarshalling JSON: %w", err)
144141
}
145-
if len(fioData.Jobs) > 0 {
146-
slog.Debug("jobs found in storage benchmark output")
147-
job := fioData.Jobs[0]
148-
readBw = fmt.Sprintf("%d", job.Read.Bw/1024)
149-
readLat = fmt.Sprintf("%.0f", job.Read.LatNs.Mean)
150-
writeBw = fmt.Sprintf("%d", job.Write.Bw/1024)
151-
writeLat = fmt.Sprintf("%.0f", job.Write.LatNs.Mean)
152-
} else {
153-
slog.Error("No jobs found in storage benchmark output", slog.String("output", output))
142+
if len(fioData.Jobs) == 0 {
143+
return fioOutput{}, fmt.Errorf("no jobs found in storage benchmark output")
154144
}
155145

156-
slog.Debug("storage benchmark output", slog.String("readLat", readLat), slog.String("readBw", readBw), slog.String("writeLat", writeLat), slog.String("writeBw", writeBw))
157-
return
146+
return fioData, nil
158147
}
159148

160149
// avxTurboFrequenciesFromOutput parses the output of avx-turbo and returns the turbo frequencies as a map of instruction type to frequencies

internal/script/script_defs.go

Lines changed: 66 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -101,13 +101,14 @@ const (
101101
GaudiNumaScriptName = "gaudi numa"
102102
GaudiArchitectureScriptName = "gaudi architecture"
103103
// benchmark scripts
104-
MemoryBenchmarkScriptName = "memory benchmark"
105-
NumaBenchmarkScriptName = "numa benchmark"
106-
SpeedBenchmarkScriptName = "speed benchmark"
107-
FrequencyBenchmarkScriptName = "frequency benchmark"
108-
PowerBenchmarkScriptName = "power benchmark"
109-
IdlePowerBenchmarkScriptName = "idle power benchmark"
110-
StorageBenchmarkScriptName = "storage benchmark"
104+
MemoryBenchmarkScriptName = "memory benchmark"
105+
NumaBenchmarkScriptName = "numa benchmark"
106+
SpeedBenchmarkScriptName = "speed benchmark"
107+
FrequencyBenchmarkScriptName = "frequency benchmark"
108+
PowerBenchmarkScriptName = "power benchmark"
109+
IdlePowerBenchmarkScriptName = "idle power benchmark"
110+
StorageBenchmarkScriptName = "storage benchmark"
111+
StorageBenchmarkLibaioScriptName = "storage benchmark libaio"
111112
// telemetry scripts
112113
MpstatTelemetryScriptName = "mpstat telemetry"
113114
IostatTelemetryScriptName = "iostat telemetry"
@@ -1155,42 +1156,65 @@ avx-turbo --min-threads=1 --max-threads=$num_cores_per_socket --test scalar_iadd
11551156
StorageBenchmarkScriptName: {
11561157
Name: StorageBenchmarkScriptName,
11571158
ScriptTemplate: `
1158-
numjobs=1
1159-
file_size_g=5
1160-
space_needed_k=$(( (file_size_g + 1) * 1024 * 1024 * numjobs )) # space needed in kilobytes: (file_size_g + 1) GB per job
1161-
ramp_time=5s
1162-
runtime=120s
1163-
ioengine=sync
1164-
# check if .StorageDir is a directory
1165-
if [[ ! -d "{{.StorageDir}}" ]]; then
1166-
echo "ERROR: {{.StorageDir}} does not exist"
1167-
exit 1
1168-
fi
1169-
# check if .StorageDir is writeable
1170-
if [[ ! -w "{{.StorageDir}}" ]]; then
1171-
echo "ERROR: {{.StorageDir}} is not writeable"
1172-
exit 1
1173-
fi
1174-
# check if .StorageDir has enough space
1175-
# example output for df -P /tmp:
1176-
# Filesystem 1024-blocks Used Available Capacity Mounted on
1177-
# /dev/sdd 1055762868 196668944 805390452 20% /
1178-
available_space=$(df -P "{{.StorageDir}}" | awk 'NR==2 {print $4}')
1179-
if [[ $available_space -lt $space_needed_k ]]; then
1180-
echo "ERROR: {{.StorageDir}} has ${available_space}K available space. A minimum of ${space_needed_k}K is required to run this benchmark."
1181-
exit 1
1182-
fi
1183-
# create temporary directory for fio test
11841159
test_dir=$(mktemp -d --tmpdir="{{.StorageDir}}")
1185-
sync
1186-
/sbin/sysctl -w vm.drop_caches=3 || true
1187-
# single-threaded read & write bandwidth test
1188-
fio --name=bandwidth --directory=$test_dir --numjobs=$numjobs \
1189-
--size="$file_size_g"G --time_based --runtime=$runtime --ramp_time=$ramp_time --ioengine=$ioengine \
1190-
--direct=1 --verify=0 --bs=1M --iodepth=64 --rw=rw \
1191-
--group_reporting=1 --iodepth_batch_submit=64 \
1192-
--iodepth_batch_complete_max=64 \
1193-
--output-format=json
1160+
FIO_JOBFILE=$(mktemp $test_dir/fio-job-XXXXXX.fio)
1161+
1162+
cat > $FIO_JOBFILE <<EOF
1163+
[global]
1164+
ioengine=libaio
1165+
direct=1
1166+
size=5G
1167+
ramp_time=5s
1168+
time_based
1169+
create_on_open=1
1170+
unlink=1
1171+
directory=$test_dir
1172+
1173+
[iodepth_1_bs_4k_rand]
1174+
wait_for_previous
1175+
runtime=30s
1176+
rw=randrw
1177+
iodepth=1
1178+
blocksize=4k
1179+
iodepth_batch_submit=1
1180+
iodepth_batch_complete_max=1
1181+
1182+
[iodepth_256_bs_4k_rand]
1183+
wait_for_previous
1184+
runtime=30s
1185+
rw=randrw
1186+
iodepth=256
1187+
blocksize=4k
1188+
iodepth_batch_submit=256
1189+
iodepth_batch_complete_max=256
1190+
1191+
[iodepth_1_bs_1M_numjobs_16]
1192+
wait_for_previous
1193+
size=1G
1194+
runtime=30s
1195+
rw=readwrite
1196+
iodepth=1
1197+
iodepth_batch_submit=1
1198+
iodepth_batch_complete_max=1
1199+
blocksize=1M
1200+
numjobs=16
1201+
group_reporting=1
1202+
1203+
[iodepth_64_bs_1M_numjobs_16]
1204+
wait_for_previous
1205+
size=1G
1206+
runtime=30s
1207+
rw=readwrite
1208+
iodepth=64
1209+
iodepth_batch_submit=64
1210+
iodepth_batch_complete_max=64
1211+
blocksize=1M
1212+
numjobs=16
1213+
group_reporting=1
1214+
EOF
1215+
1216+
fio --output-format=json $FIO_JOBFILE
1217+
11941218
rm -rf $test_dir
11951219
`,
11961220
Superuser: true,

0 commit comments

Comments
 (0)