diff --git a/smoke-test/README.md b/smoke-test/README.md index 3550bd62097f..e261b947da28 100644 --- a/smoke-test/README.md +++ b/smoke-test/README.md @@ -73,6 +73,97 @@ Smoke tests will run against this image in your local Docker image repo. When you want to use the latest **published** Horizon docker image, make sure to remove the local image with `docker image rm opennms/horizon:latest`. You can do the same for the Minion and Sentinel. +## Time Series Strategy: INTEGRATION + +The smoke test infrastructure supports pluggable time series storage (TSS) backends via `TimeSeriesStrategy.INTEGRATION`. This enables end-to-end testing of TSS plugins (e.g., the Cortex TSS plugin) with real Prometheus-compatible backends. + +### Architecture + +```mermaid +graph LR + subgraph TestContainers Stack + PG[PostgreSQL] + TR[Thanos Receive
:19291 write
:10901 gRPC
:10902 HTTP] + TQ[Thanos Query
:9090 HTTP
:10903 gRPC] + ONMS[OpenNMS Horizon
:8980 Web/REST
:8101 Karaf SSH] + end + + subgraph OpenNMS Internals + COL[Collectd] -->|samples| TSS[TimeSeries
Storage Manager] + TSS -->|Prom remote write| CORTEX[Cortex TSS Plugin] + MEAS[Measurements API] -->|Prom query API| CORTEX + end + + CORTEX -->|write| TR + CORTEX -->|read| TQ + TQ -->|gRPC store| TR + ONMS --- PG + + TEST[CortexTssPluginIT] -->|REST API| ONMS + TEST -->|Prom query API| TQ +``` + +### Data Flow + +1. **Write path**: OpenNMS Collectd collects JVM/DB/SNMP metrics every 30s (self-monitor node) -> `TimeseriesStorageManager` -> Cortex TSS plugin -> Prometheus remote write to Thanos Receive +2. **Read path**: Test queries OpenNMS Measurements API -> Cortex TSS plugin -> Prometheus query API on Thanos Query -> data returned through OpenNMS REST response + +### Prerequisites + +The Cortex TSS plugin is built separately from OpenNMS. You need the plugin KAR file before running these tests. + +```bash +# 1. Clone and build the plugin +git clone https://github.com/OpenNMS/opennms-cortex-tss-plugin.git +cd opennms-cortex-tss-plugin +mvn clean install -DskipTests + +# 2. Run the smoke tests with the KAR path +cd /path/to/opennms +./compile.pl -t -Dsmoke -Dsmoke.flaky \ + -Dcortex.kar=/path/to/opennms-cortex-tss-plugin/assembly/kar/target/opennms-cortex-tss-plugin.kar \ + --projects :smoke-test install + +# Alternative: bind-mount your entire Maven repo into the container +./compile.pl -t -Dsmoke -Dsmoke.flaky \ + -Dorg.opennms.dev.m2=$HOME/.m2/repository \ + --projects :smoke-test install +``` + +### Container Setup + +```java +@ClassRule +public static OpenNMSStack stack = OpenNMSStack.withModel(StackModel.newBuilder() + .withTimeSeriesStrategy(TimeSeriesStrategy.INTEGRATION) + .withOpenNMS(OpenNMSProfile.newBuilder() + .withInstallFeature("opennms-plugins-cortex-tss", "opennms-cortex-tss-plugin") + .build()) + .build()); +``` + +This wires up: +- **ThanosReceiveContainer** (write endpoint) and **ThanosQueryContainer** (read endpoint) in the RuleChain +- Cortex plugin config (`org.opennms.plugins.tss.cortex.cfg`) pointing at the Thanos containers +- `org.opennms.timeseries.strategy=integration` in system properties +- `opennms-timeseries-api` feature in Karaf boot + +### Validation Utilities + +`TimeSeriesValidationUtils` provides strategy-agnostic validation methods reusable with any TSS backend: + +- `validateResourceTree(restClient, nodeCriteria)` — verifies the resource tree is populated with child resources +- `validateMeasurements(restClient, resourceId, attribute, aggregation)` — verifies data round-trips through the measurements API +- `validateMeasurementsMetadata(restClient, resourceId, attribute)` — verifies measurement metadata (node labels, resource info) + +### Available Containers + +| Container | Class | Default Ports | Purpose | +|-----------|-------|---------------|---------| +| Thanos Receive | `ThanosReceiveContainer` | 19291 (write), 10901 (gRPC), 10902 (HTTP) | Accepts Prometheus remote write | +| Thanos Query | `ThanosQueryContainer` | 9090 (HTTP), 10903 (gRPC) | Prometheus-compatible query API | +| Prometheus | `PrometheusContainer` | 9090 (HTTP) | Standalone building block (not used by default INTEGRATION stack) | + ## Writing system tests When writing a new test, use the stack rule to setup the environment: diff --git a/smoke-test/src/main/java/org/opennms/smoketest/containers/OpenNMSContainer.java b/smoke-test/src/main/java/org/opennms/smoketest/containers/OpenNMSContainer.java index c0a82401de97..a68e93b3664d 100644 --- a/smoke-test/src/main/java/org/opennms/smoketest/containers/OpenNMSContainer.java +++ b/smoke-test/src/main/java/org/opennms/smoketest/containers/OpenNMSContainer.java @@ -92,6 +92,8 @@ public class OpenNMSContainer extends GenericContainer impleme public static final String KAFKA_ALIAS = "kafka"; public static final String ELASTIC_ALIAS = "elastic"; public static final String CASSANDRA_ALIAS = "cassandra"; + public static final String THANOS_RECEIVE_ALIAS = ThanosReceiveContainer.ALIAS; + public static final String THANOS_QUERY_ALIAS = ThanosQueryContainer.ALIAS; public static final String ADMIN_USER = "admin"; public static final String ADMIN_PASSWORD = "admin"; @@ -152,7 +154,8 @@ public OpenNMSContainer(StackModel model, OpenNMSProfile profile) { this.overlay = writeOverlay(); String containerCommand = "-s"; - if (TimeSeriesStrategy.NEWTS.equals(model.getTimeSeriesStrategy())) { + if (TimeSeriesStrategy.NEWTS.equals(model.getTimeSeriesStrategy()) + || TimeSeriesStrategy.INTEGRATION.equals(model.getTimeSeriesStrategy())) { this.withEnv("OPENNMS_TIMESERIES_STRATEGY", model.getTimeSeriesStrategy().name().toLowerCase()); } @@ -313,6 +316,24 @@ private void writeOverlay(Path home) throws IOException { .put("compression.type", model.getKafkaCompressionStrategy().getCodec()) .build()); } + + // Currently assumes Cortex TSS plugin; generalize when additional TSS plugins need smoke testing + if (TimeSeriesStrategy.INTEGRATION.equals(model.getTimeSeriesStrategy())) { + writeProps(etc.resolve("org.opennms.plugins.tss.cortex.cfg"), + ImmutableMap.builder() + .put("writeUrl", "http://" + THANOS_RECEIVE_ALIAS + ":" + ThanosReceiveContainer.REMOTE_WRITE_PORT + "/api/v1/receive") + .put("readUrl", "http://" + THANOS_QUERY_ALIAS + ":" + ThanosQueryContainer.HTTP_PORT + "/api/v1") + .put("maxConcurrentHttpConnections", "100") // generous for smoke test parallelism + .put("writeTimeoutInMs", "5000") + .put("readTimeoutInMs", "30000") // Thanos queries can be slow on first compaction + .put("metricCacheSize", "1000") + .put("externalTagsCacheSize", "1000") + .put("bulkheadMaxWaitDuration", String.valueOf(Long.MAX_VALUE)) // disable timeout — smoke tests should not shed load + .put("maxSeriesLookback", "31536000") // 365 days in seconds — look back far enough to find all test data + .put("useLabelValuesForDiscovery", "true") + .put("discoveryBatchSize", "50") // batch size for label-values two-phase discovery + .build()); + } } /** @@ -400,6 +421,11 @@ public Properties getSystemProperties() { props.put("org.opennms.newts.config.hostname", CASSANDRA_ALIAS); props.put("org.opennms.newts.config.port", Integer.toString(CassandraContainer.CQL_PORT)); props.put("org.opennms.rrd.storeByForeignSource", Boolean.TRUE.toString()); + } else if (TimeSeriesStrategy.INTEGRATION.equals(model.getTimeSeriesStrategy())) { + // Use the Integration API with a TSS plugin (e.g. Cortex/Thanos) + props.put("org.opennms.timeseries.strategy", "integration"); + props.put("org.opennms.timeseries.tin.metatags.tag.node", "${node:label}"); + props.put("org.opennms.timeseries.tin.metatags.tag.location", "${node:location}"); } if (model.isJaegerEnabled()) { @@ -436,6 +462,9 @@ public List getFeaturesOnBoot() { if (model.isJaegerEnabled()) { featuresOnBoot.add("opennms-core-tracing-jaeger"); } + if (TimeSeriesStrategy.INTEGRATION.equals(model.getTimeSeriesStrategy())) { + featuresOnBoot.add("opennms-timeseries-api"); + } return featuresOnBoot; } diff --git a/smoke-test/src/main/java/org/opennms/smoketest/containers/PrometheusContainer.java b/smoke-test/src/main/java/org/opennms/smoketest/containers/PrometheusContainer.java new file mode 100644 index 000000000000..5f12955f88d1 --- /dev/null +++ b/smoke-test/src/main/java/org/opennms/smoketest/containers/PrometheusContainer.java @@ -0,0 +1,87 @@ +/* + * Licensed to The OpenNMS Group, Inc (TOG) under one or more + * contributor license agreements. See the LICENSE.md file + * distributed with this work for additional information + * regarding copyright ownership. + * + * TOG licenses this file to You under the GNU Affero General + * Public License Version 3 (the "License") or (at your option) + * any later version. You may not use this file except in + * compliance with the License. You may obtain a copy of the + * License at: + * + * https://www.gnu.org/licenses/agpl-3.0.txt + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + */ +package org.opennms.smoketest.containers; + +import java.net.MalformedURLException; +import java.net.URL; + +import org.opennms.smoketest.utils.TestContainerUtils; +import org.testcontainers.containers.BindMode; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.Network; +import org.testcontainers.utility.DockerImageName; + +/** + * Vanilla Prometheus container with remote write receiver enabled. + * Provides the gold-standard Prometheus API for integration tests. + * + *

This container is not started by the default INTEGRATION stack (which uses + * Thanos Receive + Query instead). It is available as a standalone building block + * for custom test stacks that want a simpler single-node Prometheus backend.

+ */ +public class PrometheusContainer extends GenericContainer { + + public static final String ALIAS = "prometheus"; + public static final int HTTP_PORT = 9090; + + public PrometheusContainer() { + super(DockerImageName.parse("docker.io/prom/prometheus:v2.53.4")); + withCommand( + "--config.file=/etc/prometheus/prometheus.yml", + "--storage.tsdb.path=/prometheus", + "--storage.tsdb.retention.time=30d", + "--web.enable-remote-write-receiver", + "--web.listen-address=0.0.0.0:" + HTTP_PORT + ); + withClasspathResourceMapping("prometheus.yml", "/etc/prometheus/prometheus.yml", + BindMode.READ_ONLY); + withExposedPorts(HTTP_PORT); + withNetwork(Network.SHARED); + withNetworkAliases(ALIAS); + withCreateContainerCmdModifier(TestContainerUtils::setGlobalMemAndCpuLimits); + } + + /** + * @return the internal write URL for Prometheus remote write (for use by containers on the shared network) + */ + public String getInternalWriteUrl() { + return String.format("http://%s:%d/api/v1/write", ALIAS, HTTP_PORT); + } + + /** + * @return the internal read URL for the Prometheus query API (for use by containers on the shared network) + */ + public String getInternalReadUrl() { + return String.format("http://%s:%d/api/v1", ALIAS, HTTP_PORT); + } + + /** + * @return the external URL for the Prometheus query API (for use by the test host) + */ + public URL getExternalQueryUrl() { + try { + return new URL(String.format("http://%s:%d", getContainerIpAddress(), getMappedPort(HTTP_PORT))); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } +} diff --git a/smoke-test/src/main/java/org/opennms/smoketest/containers/ThanosQueryContainer.java b/smoke-test/src/main/java/org/opennms/smoketest/containers/ThanosQueryContainer.java new file mode 100644 index 000000000000..fb4301a51794 --- /dev/null +++ b/smoke-test/src/main/java/org/opennms/smoketest/containers/ThanosQueryContainer.java @@ -0,0 +1,73 @@ +/* + * Licensed to The OpenNMS Group, Inc (TOG) under one or more + * contributor license agreements. See the LICENSE.md file + * distributed with this work for additional information + * regarding copyright ownership. + * + * TOG licenses this file to You under the GNU Affero General + * Public License Version 3 (the "License") or (at your option) + * any later version. You may not use this file except in + * compliance with the License. You may obtain a copy of the + * License at: + * + * https://www.gnu.org/licenses/agpl-3.0.txt + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + */ +package org.opennms.smoketest.containers; + +import java.net.MalformedURLException; +import java.net.URL; + +import org.opennms.smoketest.utils.TestContainerUtils; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.Network; +import org.testcontainers.utility.DockerImageName; + +/** + * Thanos Query container that provides a Prometheus-compatible query API. + * Used as the read endpoint for the Cortex TSS plugin in integration tests. + */ +public class ThanosQueryContainer extends GenericContainer { + + public static final String ALIAS = "thanos-query"; + public static final int HTTP_PORT = 9090; + public static final int GRPC_PORT = 10903; + + public ThanosQueryContainer() { + super(DockerImageName.parse("docker.io/thanosio/thanos:v0.35.1")); + withCommand( + "query", + "--http-address=0.0.0.0:" + HTTP_PORT, + "--grpc-address=0.0.0.0:" + GRPC_PORT, + "--store=" + ThanosReceiveContainer.ALIAS + ":" + ThanosReceiveContainer.GRPC_PORT + ); + withExposedPorts(HTTP_PORT, GRPC_PORT); + withNetwork(Network.SHARED); + withNetworkAliases(ALIAS); + withCreateContainerCmdModifier(TestContainerUtils::setGlobalMemAndCpuLimits); + } + + /** + * @return the internal URL for the Prometheus query API (for use by containers on the shared network) + */ + public String getInternalReadUrl() { + return String.format("http://%s:%d/api/v1", ALIAS, HTTP_PORT); + } + + /** + * @return the external URL for the Prometheus query API (for use by the test host) + */ + public URL getExternalQueryUrl() { + try { + return new URL(String.format("http://%s:%d", getContainerIpAddress(), getMappedPort(HTTP_PORT))); + } catch (MalformedURLException e) { + throw new RuntimeException(e); + } + } +} diff --git a/smoke-test/src/main/java/org/opennms/smoketest/containers/ThanosReceiveContainer.java b/smoke-test/src/main/java/org/opennms/smoketest/containers/ThanosReceiveContainer.java new file mode 100644 index 000000000000..53823f4c389d --- /dev/null +++ b/smoke-test/src/main/java/org/opennms/smoketest/containers/ThanosReceiveContainer.java @@ -0,0 +1,63 @@ +/* + * Licensed to The OpenNMS Group, Inc (TOG) under one or more + * contributor license agreements. See the LICENSE.md file + * distributed with this work for additional information + * regarding copyright ownership. + * + * TOG licenses this file to You under the GNU Affero General + * Public License Version 3 (the "License") or (at your option) + * any later version. You may not use this file except in + * compliance with the License. You may obtain a copy of the + * License at: + * + * https://www.gnu.org/licenses/agpl-3.0.txt + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + */ +package org.opennms.smoketest.containers; + +import org.opennms.smoketest.utils.TestContainerUtils; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.Network; +import org.testcontainers.utility.DockerImageName; + +/** + * Thanos Receive container that accepts Prometheus remote write requests. + * Used as the write endpoint for the Cortex TSS plugin in integration tests. + */ +public class ThanosReceiveContainer extends GenericContainer { + + public static final String ALIAS = "thanos-receive"; + public static final int REMOTE_WRITE_PORT = 19291; + public static final int GRPC_PORT = 10901; + public static final int HTTP_PORT = 10902; + + public ThanosReceiveContainer() { + super(DockerImageName.parse("docker.io/thanosio/thanos:v0.35.1")); + withCommand( + "receive", + "--tsdb.path=/data", + "--grpc-address=0.0.0.0:" + GRPC_PORT, + "--http-address=0.0.0.0:" + HTTP_PORT, + "--remote-write.address=0.0.0.0:" + REMOTE_WRITE_PORT, + "--tsdb.retention=30d", + "--label=receive_replica=\"0\"" + ); + withExposedPorts(REMOTE_WRITE_PORT, GRPC_PORT, HTTP_PORT); + withNetwork(Network.SHARED); + withNetworkAliases(ALIAS); + withCreateContainerCmdModifier(TestContainerUtils::setGlobalMemAndCpuLimits); + } + + /** + * @return the internal URL for Prometheus remote write (for use by containers on the shared network) + */ + public String getInternalRemoteWriteUrl() { + return String.format("http://%s:%d/api/v1/receive", ALIAS, REMOTE_WRITE_PORT); + } +} diff --git a/smoke-test/src/main/java/org/opennms/smoketest/stacks/OpenNMSStack.java b/smoke-test/src/main/java/org/opennms/smoketest/stacks/OpenNMSStack.java index 9a0d728a2aec..1eefc6a7fb39 100644 --- a/smoke-test/src/main/java/org/opennms/smoketest/stacks/OpenNMSStack.java +++ b/smoke-test/src/main/java/org/opennms/smoketest/stacks/OpenNMSStack.java @@ -38,6 +38,8 @@ import org.opennms.smoketest.containers.OpenNMSContainer; import org.opennms.smoketest.containers.PostgreSQLContainer; import org.opennms.smoketest.containers.SentinelContainer; +import org.opennms.smoketest.containers.ThanosQueryContainer; +import org.opennms.smoketest.containers.ThanosReceiveContainer; import org.testcontainers.containers.KafkaContainer; import org.testcontainers.containers.Network; import org.testcontainers.utility.DockerImageName; @@ -101,6 +103,10 @@ public static OpenNMSStack withModel(StackModel model) { private final OpenNMSCassandraContainer cassandraContainer; + private final ThanosReceiveContainer thanosReceiveContainer; + + private final ThanosQueryContainer thanosQueryContainer; + private final List minionContainers; private final List sentinelContainers; @@ -128,6 +134,8 @@ private OpenNMSStack() { elasticsearchContainer = null; kafkaContainer = null; cassandraContainer = null; + thanosReceiveContainer = null; + thanosQueryContainer = null; opennmsContainer = new LocalOpenNMS(); minionContainers = Collections.EMPTY_LIST; sentinelContainers = Collections.EMPTY_LIST; @@ -175,6 +183,16 @@ private OpenNMSStack(StackModel model) { cassandraContainer = null; } + if (TimeSeriesStrategy.INTEGRATION.equals(model.getTimeSeriesStrategy())) { + thanosReceiveContainer = new ThanosReceiveContainer(); + chain = chain.around(thanosReceiveContainer); + thanosQueryContainer = new ThanosQueryContainer(); + chain = chain.around(thanosQueryContainer); + } else { + thanosReceiveContainer = null; + thanosQueryContainer = null; + } + opennmsContainer = new OpenNMSContainer(model, model.getOpenNMS()); chain = chain.around(opennmsContainer); @@ -247,6 +265,20 @@ public KafkaContainer kafka() { return kafkaContainer; } + public ThanosReceiveContainer thanosReceive() { + if (thanosReceiveContainer == null) { + throw new IllegalStateException("Thanos Receive container is not enabled in this stack. Use TimeSeriesStrategy.INTEGRATION."); + } + return thanosReceiveContainer; + } + + public ThanosQueryContainer thanosQuery() { + if (thanosQueryContainer == null) { + throw new IllegalStateException("Thanos Query container is not enabled in this stack. Use TimeSeriesStrategy.INTEGRATION."); + } + return thanosQueryContainer; + } + @Override public Statement apply(Statement base, Description description) { // Delegate to the test rule we built during initialization diff --git a/smoke-test/src/main/java/org/opennms/smoketest/stacks/TimeSeriesStrategy.java b/smoke-test/src/main/java/org/opennms/smoketest/stacks/TimeSeriesStrategy.java index f384453ea9bb..39c60f2d2a48 100644 --- a/smoke-test/src/main/java/org/opennms/smoketest/stacks/TimeSeriesStrategy.java +++ b/smoke-test/src/main/java/org/opennms/smoketest/stacks/TimeSeriesStrategy.java @@ -22,9 +22,16 @@ package org.opennms.smoketest.stacks; /** - * RRD vs Newts + * Time series persistence strategy. + * + *
    + *
  • {@link #RRD} - JRobin/RRDtool (default)
  • + *
  • {@link #NEWTS} - Apache Cassandra via Newts
  • + *
  • {@link #INTEGRATION} - Plugin-based via the OpenNMS Integration API TimeSeriesStorage SPI
  • + *
*/ public enum TimeSeriesStrategy { RRD, - NEWTS + NEWTS, + INTEGRATION } diff --git a/smoke-test/src/main/java/org/opennms/smoketest/utils/CortexTestUtils.java b/smoke-test/src/main/java/org/opennms/smoketest/utils/CortexTestUtils.java new file mode 100644 index 000000000000..6f58ec399519 --- /dev/null +++ b/smoke-test/src/main/java/org/opennms/smoketest/utils/CortexTestUtils.java @@ -0,0 +1,54 @@ +/* + * Licensed to The OpenNMS Group, Inc (TOG) under one or more + * contributor license agreements. See the LICENSE.md file + * distributed with this work for additional information + * regarding copyright ownership. + * + * TOG licenses this file to You under the GNU Affero General + * Public License Version 3 (the "License") or (at your option) + * any later version. You may not use this file except in + * compliance with the License. You may obtain a copy of the + * License at: + * + * https://www.gnu.org/licenses/agpl-3.0.txt + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + */ +package org.opennms.smoketest.utils; + +import java.nio.file.Path; +import java.nio.file.Paths; + +/** + * Shared utilities for Cortex TSS plugin smoke tests. + */ +public final class CortexTestUtils { + + private CortexTestUtils() {} + + /** + * Resolve the Cortex TSS plugin KAR file path. + * + *

Set {@code -Dcortex.kar=/path/to/opennms-cortex-tss-plugin.kar} to provide the KAR explicitly. + * If not set, returns {@code null} (the KAR must then be available via {@code -Dorg.opennms.dev.m2}).

+ * + * @return the KAR file path, or {@code null} if not set + * @throws IllegalStateException if the property is set but the file does not exist + */ + public static Path resolveKarFile() { + String karPath = System.getProperty("cortex.kar"); + if (karPath != null) { + Path p = Paths.get(karPath); + if (p.toFile().exists()) { + return p; + } + throw new IllegalStateException("cortex.kar path does not exist: " + karPath); + } + return null; + } +} diff --git a/smoke-test/src/main/java/org/opennms/smoketest/utils/TimeSeriesValidationUtils.java b/smoke-test/src/main/java/org/opennms/smoketest/utils/TimeSeriesValidationUtils.java new file mode 100644 index 000000000000..3e5aa034d0f4 --- /dev/null +++ b/smoke-test/src/main/java/org/opennms/smoketest/utils/TimeSeriesValidationUtils.java @@ -0,0 +1,165 @@ +/* + * Licensed to The OpenNMS Group, Inc (TOG) under one or more + * contributor license agreements. See the LICENSE.md file + * distributed with this work for additional information + * regarding copyright ownership. + * + * TOG licenses this file to You under the GNU Affero General + * Public License Version 3 (the "License") or (at your option) + * any later version. You may not use this file except in + * compliance with the License. You may obtain a copy of the + * License at: + * + * https://www.gnu.org/licenses/agpl-3.0.txt + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + */ +package org.opennms.smoketest.utils; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.notNullValue; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.util.Arrays; +import java.util.List; + +import org.opennms.netmgt.measurements.model.QueryRequest; +import org.opennms.netmgt.measurements.model.QueryResponse; +import org.opennms.netmgt.measurements.model.Source; +import org.opennms.netmgt.model.resource.ResourceDTO; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Strategy-agnostic validation methods for the OpenNMS time series storage pipeline. + * These assertions work regardless of the underlying TSS backend (RRD, Newts, Integration/Cortex). + * + *

Usage: call these methods from any smoke test that validates the data pipeline.

+ */ +public final class TimeSeriesValidationUtils { + + private static final Logger LOG = LoggerFactory.getLogger(TimeSeriesValidationUtils.class); + + private TimeSeriesValidationUtils() {} + + /** + * Validates that the resource tree for a node is populated with child resources + * that have graph-ready attributes. + * + * @param client the REST client + * @param nodeCriteria the node criteria (e.g. "selfmonitor:1") + */ + public static void validateResourceTree(RestClient client, String nodeCriteria) { + ResourceDTO resources = client.getResourcesForNode(nodeCriteria); + assertThat("Resource tree should not be null", resources, notNullValue()); + + List children = resources.getChildren().getObjects(); + assertThat("Node should have child resources", children.size(), greaterThan(0)); + + long attrCount = children.stream() + .mapToLong(c -> c.getRrdGraphAttributes().size()) + .sum(); + assertThat("Child resources should have graph attributes", attrCount, greaterThan(0L)); + + LOG.info("Resource tree validated: {} children, {} total graph attributes", children.size(), attrCount); + } + + /** + * Validates that the measurements API returns data for a given resource and attribute, + * using the specified aggregation. + * + * @param client the REST client + * @param resourceId the resource ID (e.g. "node[selfmonitor:1].nodeSnmp[]") + * @param attribute the attribute name (e.g. "OnmsEventCount") + * @param aggregation the aggregation function ("AVERAGE", "MAX", "MIN") + * @return the query response for further inspection + */ + public static QueryResponse validateMeasurements(RestClient client, String resourceId, + String attribute, String aggregation) { + long now = System.currentTimeMillis(); + long tenMinAgo = now - (10 * 60 * 1000); + + Source source = new Source(); + source.setLabel("test"); + source.setResourceId(resourceId); + source.setAttribute(attribute); + source.setAggregation(aggregation); + + QueryRequest request = new QueryRequest(); + request.setStart(tenMinAgo); + request.setEnd(now); + request.setStep(300000L); // 5 minutes + request.setRelaxed(true); + request.setSources(Arrays.asList(source)); + + QueryResponse response = client.getMeasurements(request); + assertThat("Measurements response should not be null", response, notNullValue()); + assertThat("Response should have timestamps", response.getTimestamps().length, greaterThan(0)); + assertThat("Response should have columns", response.getColumns().length, greaterThan(0)); + + double[] values = response.getColumns()[0].getList(); + long nonNullCount = Arrays.stream(values).filter(v -> !Double.isNaN(v)).count(); + assertThat("Should have non-NaN values with " + aggregation + " aggregation", + nonNullCount, greaterThan(0L)); + + LOG.info("Measurements validated: resourceId={}, attribute={}, aggregation={}, " + + "timestamps={}, nonNullValues={}", + resourceId, attribute, aggregation, response.getTimestamps().length, nonNullCount); + + return response; + } + + /** + * Validates that measurements metadata includes resource and node information. + * + * @param client the REST client + * @param resourceId the resource ID + * @param attribute the attribute name + */ + public static void validateMeasurementsMetadata(RestClient client, String resourceId, String attribute) { + long now = System.currentTimeMillis(); + + Source source = new Source(); + source.setLabel("test"); + source.setResourceId(resourceId); + source.setAttribute(attribute); + source.setAggregation("AVERAGE"); + + QueryRequest request = new QueryRequest(); + request.setStart(now - 300000L); + request.setEnd(now); + request.setStep(60000L); + request.setRelaxed(true); + request.setSources(Arrays.asList(source)); + + QueryResponse response = client.getMeasurements(request); + assertThat("Response should not be null", response, notNullValue()); + assertThat("Response should have metadata", response.getMetadata(), notNullValue()); + assertFalse("Metadata should have resources", + response.getMetadata().getResources().isEmpty()); + assertFalse("Metadata should have nodes", + response.getMetadata().getNodes().isEmpty()); + + LOG.info("Measurements metadata validated for resourceId={}", resourceId); + } + + /** + * Validates all three supported aggregation functions (AVERAGE, MAX, MIN). + * + * @param client the REST client + * @param resourceId the resource ID + * @param attribute the attribute name + */ + public static void validateAllAggregations(RestClient client, String resourceId, String attribute) { + for (String agg : new String[]{"AVERAGE", "MAX", "MIN"}) { + validateMeasurements(client, resourceId, attribute, agg); + } + } +} diff --git a/smoke-test/src/main/resources/prometheus.yml b/smoke-test/src/main/resources/prometheus.yml new file mode 100644 index 000000000000..a3dc76c106e6 --- /dev/null +++ b/smoke-test/src/main/resources/prometheus.yml @@ -0,0 +1,5 @@ +# Minimal Prometheus config for E2E testing. +# No scrape targets needed - OpenNMS pushes via remote write. +global: + scrape_interval: 15s + evaluation_interval: 15s diff --git a/smoke-test/src/test/java/org/opennms/smoketest/CortexTssPluginIT.java b/smoke-test/src/test/java/org/opennms/smoketest/CortexTssPluginIT.java index 95d1fcded21c..a1a1370d95f2 100644 --- a/smoke-test/src/test/java/org/opennms/smoketest/CortexTssPluginIT.java +++ b/smoke-test/src/test/java/org/opennms/smoketest/CortexTssPluginIT.java @@ -21,33 +21,424 @@ */ package org.opennms.smoketest; +import static java.util.concurrent.TimeUnit.MINUTES; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.awaitility.Awaitility.await; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasItem; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.io.IOException; +import java.io.InputStream; +import java.net.HttpURLConnection; +import java.net.URL; import java.time.Duration; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.regex.Pattern; import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; import org.opennms.smoketest.stacks.OpenNMSStack; +import org.opennms.smoketest.stacks.StackModel; +import org.opennms.smoketest.stacks.TimeSeriesStrategy; +import org.opennms.smoketest.utils.CortexTestUtils; import org.opennms.smoketest.utils.KarafShell; import org.opennms.smoketest.utils.KarafShellUtils; +import org.opennms.smoketest.utils.RestClient; +import org.opennms.smoketest.utils.TimeSeriesValidationUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +/** + * Comprehensive smoke test for the Cortex TSS plugin with a Thanos backend. + * + *

Validates the full data pipeline: write path (OpenNMS → Thanos), read path + * (Thanos → OpenNMS measurements API), resource discovery, meta tags, metric + * sanitization, label ordering, label values discovery, and plugin health.

+ * + *

Strategy-agnostic tests (resource tree, measurements API, aggregation) use + * {@link TimeSeriesValidationUtils} and can be reused with other TSS backends.

+ */ @org.junit.experimental.categories.Category(org.opennms.smoketest.junit.FlakyTests.class) public class CortexTssPluginIT { + + private static final Logger LOG = LoggerFactory.getLogger(CortexTssPluginIT.class); + private static final ObjectMapper MAPPER = new ObjectMapper(); + private static final Pattern PROMETHEUS_METRIC_NAME = Pattern.compile("^[a-zA-Z_:][a-zA-Z0-9_:]*$"); + private static final Pattern PROMETHEUS_LABEL_NAME = Pattern.compile("^[a-zA-Z_][a-zA-Z0-9_]*$"); + @ClassRule - public static OpenNMSStack stack = OpenNMSStack.minimal( - b -> b.withInstallFeature("opennms-plugins-cortex-tss", "opennms-cortex-tss-plugin") - ); + public static OpenNMSStack stack = OpenNMSStack.withModel(StackModel.newBuilder() + .withTimeSeriesStrategy(TimeSeriesStrategy.INTEGRATION) + .withOpenNMS(org.opennms.smoketest.stacks.OpenNMSProfile.newBuilder() + .withFile("empty-discovery-configuration.xml", "etc/discovery-configuration.xml") + .withInstallFeature("opennms-plugins-cortex-tss", "opennms-cortex-tss-plugin", CortexTestUtils.resolveKarFile()) + .build()) + .build()); + + private static volatile boolean dataReady = false; protected KarafShell karafShell = new KarafShell(stack.opennms().getSshAddress()); + protected RestClient restClient; + protected URL thanosQueryUrl; @Before - public void setUp() throws IOException, InterruptedException { - // Make sure the Karaf shell is healthy before we start + public void setUp() throws Exception { KarafShellUtils.awaitHealthCheckSucceeded(stack.opennms()); + restClient = stack.opennms().getRestClient(); + thanosQueryUrl = stack.thanosQuery().getExternalQueryUrl(); + + if (!dataReady) { + // Wait for data to appear in Thanos (self-monitoring metrics) — runs once across all tests + LOG.info("Waiting for data to appear in Thanos..."); + await("data in Thanos") + .atMost(5, MINUTES) + .pollInterval(15, SECONDS) + .until(() -> { + try { + JsonNode resp = queryThanos("/api/v1/label/resourceId/values"); + return resp.get("data").size() > 0; + } catch (Exception e) { + return false; + } + }); + dataReady = true; + LOG.info("Data is flowing into Thanos."); + } } @Test - public void everythingHappy() throws Exception { + public void testPluginFeatureStarted() throws Exception { karafShell.checkFeature("opennms-plugins-cortex-tss", "Started", Duration.ofSeconds(30)); } + + @Test + public void testTimeseriesApiFeature() throws Exception { + karafShell.checkFeature("opennms-timeseries-api", "Started|Uninstalled", Duration.ofSeconds(30)); + } + + @Test + public void testResourceTreePopulated() { + TimeSeriesValidationUtils.validateResourceTree(restClient, "selfmonitor:1"); + } + + @Test + public void testMeasurementsAverage() { + TimeSeriesValidationUtils.validateMeasurements(restClient, + "node[selfmonitor:1].interfaceSnmp[opennms-jvm]", + "HeapUsageUsed", "AVERAGE"); + } + + @Test + public void testMeasurementsMax() { + TimeSeriesValidationUtils.validateMeasurements(restClient, + "node[selfmonitor:1].interfaceSnmp[opennms-jvm]", + "HeapUsageUsed", "MAX"); + } + + @Test + public void testMeasurementsMin() { + TimeSeriesValidationUtils.validateMeasurements(restClient, + "node[selfmonitor:1].interfaceSnmp[opennms-jvm]", + "HeapUsageUsed", "MIN"); + } + + @Test + public void testMeasurementsMetadata() { + TimeSeriesValidationUtils.validateMeasurementsMetadata(restClient, + "node[selfmonitor:1].nodeSnmp[]", "OnmsEventCount"); + } + + @Test + public void testMetricsFlowingIntoThanos() throws Exception { + JsonNode resp = queryThanos("/api/v1/label/__name__/values"); + int metricCount = resp.get("data").size(); + assertThat("Should have metrics in Thanos", metricCount, greaterThan(0)); + LOG.info("Thanos has {} unique metric names", metricCount); + } + + @Test + public void testResourceIdsWritten() throws Exception { + JsonNode resp = queryThanos("/api/v1/label/resourceId/values"); + int count = resp.get("data").size(); + assertThat("Should have resourceIds in Thanos", count, greaterThan(0)); + LOG.info("Thanos has {} unique resourceIds", count); + } + + @Test + public void testMultipleResourceTypesPresent() throws Exception { + JsonNode resp = queryThanos("/api/v1/label/resourceId/values"); + Set prefixes = new HashSet<>(); + for (JsonNode rid : resp.get("data")) { + String val = rid.asText(); + if (val.contains("/")) { + prefixes.add(val.substring(0, val.indexOf('/'))); + } + } + assertThat("Should have snmp resources", prefixes, hasItem("snmp")); + assertThat("Should have multiple resource types", prefixes.size(), greaterThan(1)); + LOG.info("Resource types found: {}", prefixes); + } + + @Test + public void testMetaTagsWrittenAsLabels() throws Exception { + JsonNode resp = queryThanos("/api/v1/labels"); + List labels = new ArrayList<>(); + for (JsonNode label : resp.get("data")) { + labels.add(label.asText()); + } + assertTrue("Should have 'node' label", labels.contains("node")); + assertTrue("Should have 'location' label", labels.contains("location")); + assertTrue("Should have 'mtype' label", labels.contains("mtype")); + } + + @Test + public void testMetaTagValuesPopulated() throws Exception { + JsonNode nodeVals = queryThanos("/api/v1/label/node/values").get("data"); + JsonNode locVals = queryThanos("/api/v1/label/location/values").get("data"); + assertThat("Should have node label values", nodeVals.size(), greaterThan(0)); + assertThat("Should have location label values", locVals.size(), greaterThan(0)); + } + + @Test + public void testMetaTagsOnSeries() throws Exception { + JsonNode resp = queryThanos("/api/v1/series?match[]={resourceId=~\"snmp/.*opennms-jvm.*\"}"); + JsonNode series = resp.get("data"); + assertThat("Should have series", series.size(), greaterThan(0)); + for (int i = 0; i < Math.min(10, series.size()); i++) { + JsonNode s = series.get(i); + assertTrue("Series should have 'node' tag: " + s, s.has("node")); + assertTrue("Series should have 'location' tag: " + s, s.has("location")); + assertTrue("Series should have 'mtype' tag: " + s, s.has("mtype")); + } + } + + @Test + public void testAllMetricNamesPrometheusValid() throws Exception { + JsonNode resp = queryThanos("/api/v1/label/__name__/values"); + for (JsonNode name : resp.get("data")) { + String metricName = name.asText(); + assertTrue("Metric name should match Prometheus pattern: " + metricName, + PROMETHEUS_METRIC_NAME.matcher(metricName).matches()); + } + } + + @Test + public void testAllLabelNamesPrometheusValid() throws Exception { + JsonNode resp = queryThanos("/api/v1/series?match[]={resourceId=~\"snmp/.*\"}"); + Set allLabels = new HashSet<>(); + JsonNode series = resp.get("data"); + for (int i = 0; i < Math.min(50, series.size()); i++) { + series.get(i).fieldNames().forEachRemaining(allLabels::add); + } + for (String label : allLabels) { + assertTrue("Label name should match Prometheus pattern: " + label, + PROMETHEUS_LABEL_NAME.matcher(label).matches()); + } + } + + @Test + public void testLabelsLexicographicallyOrdered() throws Exception { + // Prometheus remote write requires labels to be sorted. We verify this by querying + // stored series and checking that every series' label names are in sorted order. + // Note: Thanos/Prometheus typically return labels sorted regardless of write order, + // so this is a sanity check rather than a strict write-path validation. + JsonNode resp = queryThanos("/api/v1/series?match[]={resourceId=~\"snmp/.*\"}"); + JsonNode series = resp.get("data"); + assertThat("Should have series to check", series.size(), greaterThan(0)); + int checked = 0; + for (int i = 0; i < Math.min(20, series.size()); i++) { + List keys = new ArrayList<>(); + series.get(i).fieldNames().forEachRemaining(keys::add); + List sorted = new ArrayList<>(keys); + sorted.sort(String::compareTo); + assertEquals("Labels should be lexicographically ordered for series " + i, sorted, keys); + checked++; + } + LOG.info("Checked label ordering on {} series", checked); + } + + @Test + public void testSeriesApiWildcard() throws Exception { + JsonNode resp = queryThanos("/api/v1/series?match[]={resourceId=~\"^snmp/.*$\"}"); + assertThat("Should return series", resp.get("data").size(), greaterThan(0)); + } + + @Test + public void testSeriesContainExpectedFields() throws Exception { + JsonNode resp = queryThanos("/api/v1/series?match[]={resourceId=~\"^snmp/.*$\"}"); + JsonNode first = resp.get("data").get(0); + assertTrue("Series should have __name__", first.has("__name__")); + assertTrue("Series should have resourceId", first.has("resourceId")); + assertTrue("Series should have mtype", first.has("mtype")); + } + + @Test + public void testLabelValuesApi() throws Exception { + JsonNode resp = queryThanos("/api/v1/label/resourceId/values"); + assertEquals("success", resp.get("status").asText()); + assertThat("Should return resourceIds", resp.get("data").size(), greaterThan(0)); + } + + @Test + public void testLabelValuesFilteredByMatch() throws Exception { + JsonNode resp = queryThanos("/api/v1/label/resourceId/values?match[]={resourceId=~\"^snmp/.*$\"}"); + JsonNode data = resp.get("data"); + assertThat("Should return filtered resourceIds", data.size(), greaterThan(0)); + for (JsonNode rid : data) { + assertTrue("Filtered resourceId should start with 'snmp/': " + rid.asText(), + rid.asText().startsWith("snmp/")); + } + } + + @Test + public void testTwoPhaseDiscoveryMatchesWildcard() throws Exception { + // Old approach: wildcard /series + JsonNode oldResp = queryThanos("/api/v1/series?match[]={resourceId=~\"^snmp/.*$\"}"); + Set oldRids = new HashSet<>(); + for (JsonNode s : oldResp.get("data")) { + oldRids.add(s.get("resourceId").asText()); + } + + // New approach: label values + batched /series + JsonNode lvResp = queryThanos("/api/v1/label/resourceId/values?match[]={resourceId=~\"^snmp/.*$\"}"); + Set newRids = new HashSet<>(); + for (JsonNode rid : lvResp.get("data")) { + newRids.add(rid.asText()); + } + + // Use containsAll in both directions rather than assertEquals to tolerate + // metrics written between the two sequential queries (eventual consistency) + assertTrue("Label values should contain all /series resourceIds", + newRids.containsAll(oldRids)); + assertTrue("/series resourceIds should contain all label values", + oldRids.containsAll(newRids)); + LOG.info("Two-phase discovery matches: {} resourceIds", oldRids.size()); + } + + @Test + public void testMtypeValuesValid() throws Exception { + JsonNode resp = queryThanos("/api/v1/label/mtype/values"); + JsonNode data = resp.get("data"); + assertThat("Should have at least one mtype value", data.size(), greaterThan(0)); + Set knownTypes = Set.of("gauge", "counter", "count"); + Set actualTypes = new HashSet<>(); + for (JsonNode mtype : data) { + assertTrue("mtype should not be empty", !mtype.asText().isEmpty()); + actualTypes.add(mtype.asText()); + } + // Verify at least one known type is present (don't fail on new types) + assertTrue("Should contain at least one known mtype (gauge, counter, count)", + actualTypes.stream().anyMatch(knownTypes::contains)); + } + + @Test + public void testSeriesIntegrity() throws Exception { + JsonNode resp = queryThanos("/api/v1/series?match[]={resourceId=~\"snmp/.*\"}"); + JsonNode series = resp.get("data"); + int checked = 0; + for (int i = 0; i < Math.min(100, series.size()); i++) { + JsonNode s = series.get(i); + assertTrue("Series should have __name__", s.has("__name__") && !s.get("__name__").asText().isEmpty()); + assertTrue("Series should have resourceId", s.has("resourceId") && !s.get("resourceId").asText().isEmpty()); + checked++; + } + LOG.info("Series integrity check passed for {} series", checked); + } + + @Test + public void testResourceIdConsistency() throws Exception { + // Compare resourceIds from label values API vs /series + JsonNode lvResp = queryThanos("/api/v1/label/resourceId/values"); + Set lvRids = new HashSet<>(); + for (JsonNode rid : lvResp.get("data")) { + lvRids.add(rid.asText()); + } + + JsonNode seriesResp = queryThanos("/api/v1/series?match[]={resourceId=~\".+\"}"); + Set seriesRids = new HashSet<>(); + for (JsonNode s : seriesResp.get("data")) { + seriesRids.add(s.get("resourceId").asText()); + } + + // Use containsAll in both directions rather than assertEquals to tolerate + // metrics written between the two sequential queries (eventual consistency) + assertTrue("Label values should contain all /series resourceIds", + lvRids.containsAll(seriesRids)); + assertTrue("/series resourceIds should contain all label values", + seriesRids.containsAll(lvRids)); + } + + @Test + public void testRangeQueryReturnsData() throws Exception { + long now = System.currentTimeMillis() / 1000; + long fiveMinAgo = now - 300; + String url = String.format("/api/v1/query_range?query={__name__=~\".+\",resourceId=~\"snmp/.*opennms-jvm.*\"}&start=%d&end=%d&step=60", + fiveMinAgo, now); + JsonNode resp = queryThanos(url); + JsonNode results = resp.get("data").get("result"); + assertThat("Range query should return series", results.size(), greaterThan(0)); + } + + @Test + public void testExactResourceIdQuery() throws Exception { + // Get a specific resourceId + JsonNode lvResp = queryThanos("/api/v1/label/resourceId/values"); + String targetRid = null; + for (JsonNode rid : lvResp.get("data")) { + if (rid.asText().startsWith("snmp/") && rid.asText().contains("opennms-jvm")) { + targetRid = rid.asText(); + break; + } + } + if (targetRid == null) { + targetRid = lvResp.get("data").get(0).asText(); + } + + long now = System.currentTimeMillis() / 1000; + String url = String.format("/api/v1/query_range?query={resourceId=\"%s\"}&start=%d&end=%d&step=60", + targetRid, now - 300, now); + JsonNode resp = queryThanos(url); + JsonNode results = resp.get("data").get("result"); + assertThat("Exact match query should return series", results.size(), greaterThan(0)); + + // Verify all returned series have the exact resourceId + for (JsonNode r : results) { + assertEquals("All series should match exact resourceId", + targetRid, r.get("metric").get("resourceId").asText()); + } + } + + private JsonNode queryThanos(String path) throws Exception { + String urlStr = thanosQueryUrl.toString() + path; + HttpURLConnection conn = (HttpURLConnection) new URL(urlStr).openConnection(); + conn.setConnectTimeout(30_000); + conn.setReadTimeout(30_000); + try { + int code = conn.getResponseCode(); + if (code != 200) { + String error = ""; + try (InputStream es = conn.getErrorStream()) { + if (es != null) { + error = new String(es.readAllBytes()); + } + } + throw new IOException("Thanos query failed (HTTP " + code + "): " + error); + } + try (InputStream is = conn.getInputStream()) { + return MAPPER.readTree(is); + } + } finally { + conn.disconnect(); + } + } } diff --git a/smoke-test/src/test/java/org/opennms/smoketest/CortexTssTimeseriesPluginIT.java b/smoke-test/src/test/java/org/opennms/smoketest/CortexTssTimeseriesPluginIT.java index a474c110b1ac..d13afc13c862 100644 --- a/smoke-test/src/test/java/org/opennms/smoketest/CortexTssTimeseriesPluginIT.java +++ b/smoke-test/src/test/java/org/opennms/smoketest/CortexTssTimeseriesPluginIT.java @@ -28,15 +28,24 @@ import org.junit.ClassRule; import org.junit.Test; import org.opennms.smoketest.stacks.OpenNMSStack; +import org.opennms.smoketest.utils.CortexTestUtils; import org.opennms.smoketest.utils.KarafShell; import org.opennms.smoketest.utils.KarafShellUtils; +/** + * Minimal smoke test: verifies the Cortex TSS plugin feature installs and starts. + * Does not validate data flow (see {@link CortexTssPluginIT} for comprehensive tests). + * + *

Requires the Cortex TSS plugin KAR. Set {@code -Dcortex.kar=/path/to/opennms-cortex-tss-plugin.kar} + * or use {@code -Dorg.opennms.dev.m2=$HOME/.m2/repository} to make it available.

+ */ @org.junit.experimental.categories.Category(org.opennms.smoketest.junit.FlakyTests.class) public class CortexTssTimeseriesPluginIT { + @ClassRule public static OpenNMSStack stack = OpenNMSStack.minimal( b -> b.withInstallFeature("opennms-timeseries-api"), - b -> b.withInstallFeature("opennms-plugins-cortex-tss", "opennms-cortex-tss-plugin") + b -> b.withInstallFeature("opennms-plugins-cortex-tss", "opennms-cortex-tss-plugin", CortexTestUtils.resolveKarFile()) ); protected KarafShell karafShell = new KarafShell(stack.opennms().getSshAddress());