From 9c517321a1c5cd973143dc05841653cf67b9983a Mon Sep 17 00:00:00 2001 From: Michael Wolf Date: Tue, 1 Jul 2025 10:31:50 -0700 Subject: [PATCH 1/4] Update new package README template Update the README templates and generator used in new package creation to match the new standardized README structure. The updated template should be better at helping users write the readme, by explaining what is needed in each section of the template. This also makes a few other small changes which will make it easier for new users to create integration packages. It moves the generated readme to `_dev/build/docs`, adds a "Do not modify" message to the README generated during `build`, and adds a default sample-event (which is needed to build the update readme template). --- internal/docs/readme.go | 8 ++ .../_static/package-docs-readme.md.tmpl | 112 ++++++++++-------- .../_static/package-sample-event.json.tmpl | 3 + internal/packages/archetype/package.go | 8 +- internal/packages/archetype/resources.go | 3 + 5 files changed, 81 insertions(+), 53 deletions(-) create mode 100644 internal/packages/archetype/_static/package-sample-event.json.tmpl diff --git a/internal/docs/readme.go b/internal/docs/readme.go index 256b7d2d9e..bf3036d6ef 100644 --- a/internal/docs/readme.go +++ b/internal/docs/readme.go @@ -27,6 +27,11 @@ type ReadmeFile struct { Error error } +const ( + doNotModifyStr string = ` +` +) + // AreReadmesUpToDate function checks if all the .md readme files are up-to-date. func AreReadmesUpToDate() ([]ReadmeFile, error) { packageRoot, err := packages.MustFindPackageRoot() @@ -204,6 +209,9 @@ func renderReadme(fileName, packageRoot, templatePath string, linksMap linkMap) } return linksMap.RenderLink(args[0], options) }, + "header": func(args ...string) (string, error) { + return doNotModifyStr, nil + }, }).ParseFiles(templatePath) if err != nil { return nil, fmt.Errorf("parsing README template failed (path: %s): %w", templatePath, err) diff --git a/internal/packages/archetype/_static/package-docs-readme.md.tmpl b/internal/packages/archetype/_static/package-docs-readme.md.tmpl index 003c89f703..25970dde12 100644 --- a/internal/packages/archetype/_static/package-docs-readme.md.tmpl +++ b/internal/packages/archetype/_static/package-docs-readme.md.tmpl @@ -1,84 +1,92 @@ - - + - +{{ `{{header}}` }} +# {{.Manifest.Title}} for Elastic -Use the {{.Manifest.Title}} integration to {purpose}. Then visualize that data in Kibana, create alerts to notify you if something goes wrong, and reference {data stream type} when troubleshooting an issue. +## Overview -For example, if you wanted to {sample use case} you could {action}. Then you can {visualize|alert|troubleshoot} by {action}. --> + +The {{.Manifest.Title}} for Elastic integration enables collection of ... +This integration facilitates ... -## Data streams +### Compatibility - + +This integration is compatible with ... - - +### How it works - - + - +## What data does this integration collect? -## Requirements + +The {{.Manifest.Title}} integration collects log messages of the following types: +* ... -You need Elasticsearch for storing and searching your data and Kibana for visualizing and managing it. -You can use our hosted Elasticsearch Service on Elastic Cloud, which is recommended, or self-manage the Elastic Stack on your own hardware. +### Supported use cases - + + +## What do I need to use this integration? -## Setup +Elastic Agent must be installed. For more details, check the Elastic Agent [installation instructions](docs-content://reference/fleet/install-elastic-agents.md). You can install only one Elastic Agent per host. - +Elastic Agent is required to stream data from the syslog or log file receiver and ship the data to Elastic, where the events will then be processed via the integration's ingest pipelines. -For step-by-step instructions on how to set up an integration, see the -[Getting started](https://www.elastic.co/guide/en/welcome-to-elastic/current/getting-started-observability.html) guide. + - +## How do I deploy this integration? - - +### Onboard / configure + + - - + - - -An example event for `{data stream name}` looks as following: +## Scaling -{code block with example} --> +For more information on architectures that can be used for scaling this integration, check the [Ingest Architectures](https://www.elastic.co/docs/manage-data/ingest/ingest-reference-architectures) documentation. - -{insert table} --> +## Reference - - +### ECS field Reference - - +### Sample Event - - + +These inputs can be used with this integration: +* ... - + +These APIs are used with this integration: +* ... diff --git a/internal/packages/archetype/_static/package-sample-event.json.tmpl b/internal/packages/archetype/_static/package-sample-event.json.tmpl new file mode 100644 index 0000000000..71b53c2c32 --- /dev/null +++ b/internal/packages/archetype/_static/package-sample-event.json.tmpl @@ -0,0 +1,3 @@ +{ + "description": "This is an example sample-event for {{.Manifest.Title}}. Replace it with a real sample event. Hint: If system tests exist, running `elastic-package test system --generate` will generate this file." +} diff --git a/internal/packages/archetype/package.go b/internal/packages/archetype/package.go index 19da014fd0..700478848c 100644 --- a/internal/packages/archetype/package.go +++ b/internal/packages/archetype/package.go @@ -49,7 +49,7 @@ func createPackageInDir(packageDescriptor PackageDescriptor, cwd string) error { } logger.Debugf("Write docs readme") - err = renderResourceFile(packageDocsReadme, &packageDescriptor, filepath.Join(baseDir, "docs", "README.md")) + err = renderResourceFile(packageDocsReadme, &packageDescriptor, filepath.Join(baseDir, "_dev", "build", "docs", "README.md")) if err != nil { return fmt.Errorf("can't render package README: %w", err) } @@ -78,6 +78,12 @@ func createPackageInDir(packageDescriptor PackageDescriptor, cwd string) error { return fmt.Errorf("can't render sample screenshot: %w", err) } + logger.Debugf("Write sample sample_event") + err = renderResourceFile(packageSampleEvent, &packageDescriptor, filepath.Join(baseDir, "sample_event.json")) + if err != nil { + return fmt.Errorf("can't render sample sample_event: %w", err) + } + if packageDescriptor.Manifest.Type == "input" { logger.Debugf("Write base fields") err = renderResourceFile(fieldsBaseTemplate, &packageDescriptor, filepath.Join(baseDir, "fields", "base-fields.yml")) diff --git a/internal/packages/archetype/resources.go b/internal/packages/archetype/resources.go index 661d994f83..de3ef2b3cf 100644 --- a/internal/packages/archetype/resources.go +++ b/internal/packages/archetype/resources.go @@ -33,6 +33,9 @@ var packageImgSampleIcon []byte //go:embed _static/sampleScreenshot.png.b64 var packageImgSampleScreenshot string +//go:embed _static/package-sample-event.json.tmpl +var packageSampleEvent string + // Input Package templates //go:embed _static/input-package-agent-config.yml.tmpl From c2323d263fd07d6d6bfebfdec2c5e47c3afb8218 Mon Sep 17 00:00:00 2001 From: Michael Wolf Date: Tue, 1 Jul 2025 16:09:15 -0700 Subject: [PATCH 2/4] Only create new files in integration packages --- internal/packages/archetype/package.go | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/internal/packages/archetype/package.go b/internal/packages/archetype/package.go index 700478848c..920ea42f3c 100644 --- a/internal/packages/archetype/package.go +++ b/internal/packages/archetype/package.go @@ -47,13 +47,21 @@ func createPackageInDir(packageDescriptor PackageDescriptor, cwd string) error { if err != nil { return fmt.Errorf("can't render package changelog: %w", err) } - + logger.Debugf("Write docs readme") - err = renderResourceFile(packageDocsReadme, &packageDescriptor, filepath.Join(baseDir, "_dev", "build", "docs", "README.md")) + err = renderResourceFile(packageDocsReadme, &packageDescriptor, filepath.Join(baseDir, "docs", "README.md")) if err != nil { return fmt.Errorf("can't render package README: %w", err) } + if packageDescriptor.Manifest.Type == "integration" { + logger.Debugf("Write docs readme to _dev") + err = renderResourceFile(packageDocsReadme, &packageDescriptor, filepath.Join(baseDir, "_dev", "build", "docs", "README.md")) + if err != nil { + return fmt.Errorf("can't render package README in _dev: %w", err) + } + } + if license := packageDescriptor.Manifest.Source.License; license != "" { logger.Debugf("Write license file") err = licenses.WriteTextToFile(license, filepath.Join(baseDir, "LICENSE.txt")) @@ -78,10 +86,12 @@ func createPackageInDir(packageDescriptor PackageDescriptor, cwd string) error { return fmt.Errorf("can't render sample screenshot: %w", err) } - logger.Debugf("Write sample sample_event") - err = renderResourceFile(packageSampleEvent, &packageDescriptor, filepath.Join(baseDir, "sample_event.json")) - if err != nil { - return fmt.Errorf("can't render sample sample_event: %w", err) + if packageDescriptor.Manifest.Type == "integration" { + logger.Debugf("Write sample sample_event") + err = renderResourceFile(packageSampleEvent, &packageDescriptor, filepath.Join(baseDir, "sample_event.json")) + if err != nil { + return fmt.Errorf("can't render sample sample_event: %w", err) + } } if packageDescriptor.Manifest.Type == "input" { From 65c93b4b179729d5c68ce1811b9bbd05ecfdb08e Mon Sep 17 00:00:00 2001 From: Michael Wolf Date: Tue, 1 Jul 2025 16:13:46 -0700 Subject: [PATCH 3/4] Fix whitespace --- internal/packages/archetype/package.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/packages/archetype/package.go b/internal/packages/archetype/package.go index 920ea42f3c..856b955d05 100644 --- a/internal/packages/archetype/package.go +++ b/internal/packages/archetype/package.go @@ -47,7 +47,7 @@ func createPackageInDir(packageDescriptor PackageDescriptor, cwd string) error { if err != nil { return fmt.Errorf("can't render package changelog: %w", err) } - + logger.Debugf("Write docs readme") err = renderResourceFile(packageDocsReadme, &packageDescriptor, filepath.Join(baseDir, "docs", "README.md")) if err != nil { From 70e4bfddee4d89236a0c604d2e164748a43c072e Mon Sep 17 00:00:00 2001 From: Michael Wolf Date: Wed, 2 Jul 2025 08:57:53 -0700 Subject: [PATCH 4/4] Use new template for the test apache package readme Rewrite the readme for the test apache package to use the new docs format. NOTE: This format isn't used in the published apache integration. --- .../_static/package-docs-readme.md.tmpl | 4 +- .../parallel/apache/_dev/build/docs/README.md | 133 ++++- test/packages/parallel/apache/docs/README.md | 541 +++++------------- 3 files changed, 244 insertions(+), 434 deletions(-) diff --git a/internal/packages/archetype/_static/package-docs-readme.md.tmpl b/internal/packages/archetype/_static/package-docs-readme.md.tmpl index 25970dde12..5aadc761d5 100644 --- a/internal/packages/archetype/_static/package-docs-readme.md.tmpl +++ b/internal/packages/archetype/_static/package-docs-readme.md.tmpl @@ -6,12 +6,12 @@ Find more detailed documentation guidelines in https://www.elastic.co/docs/exten {{ `{{header}}` }} -# {{.Manifest.Title}} for Elastic +# {{.Manifest.Title}} Integration for Elastic ## Overview -The {{.Manifest.Title}} for Elastic integration enables collection of ... +The {{.Manifest.Title}} integration for Elastic enables collection of ... This integration facilitates ... ### Compatibility diff --git a/test/packages/parallel/apache/_dev/build/docs/README.md b/test/packages/parallel/apache/_dev/build/docs/README.md index f57e928c13..31f3242ed5 100644 --- a/test/packages/parallel/apache/_dev/build/docs/README.md +++ b/test/packages/parallel/apache/_dev/build/docs/README.md @@ -1,34 +1,129 @@ -# Apache Integration +{{ `{{header}}` }} +# Apache Integration for Elastic -This integration periodically fetches metrics from [Apache](https://httpd.apache.org/) servers. It can parse access and error -logs created by the Apache server. +## Overview -## Compatibility +The Apache integration for Elastic enables collection of access logs, error logs and metrics from [Apache](https://httpd.apache.org/) servers. +This integration facilitates performance monitoring, understanding traffic patterns and gaining security insights from +your Apache servers. -The Apache datasets were tested with Apache 2.4.12 and 2.4.46 and are expected to work with -all versions >= 2.2.31 and >= 2.4.16 (independent from operating system). +### Compatibility -## Logs +This integration is compatible with all Apache server versions >= 2.4.16 and >= 2.2.31. -### Access Logs +### How it works -Access logs collects the Apache access logs. +Access and error logs data stream read from a logfile. You will configure Apache server to write log files to a location that is readable by elastic-agent, and +elastic-agent will monitor and ingest events written to these files. -{{fields "access"}} +## What data does this integration collect? -### Error Logs +The {{.Manifest.Title}} integration collects log messages of the following types: -Error logs collects the Apache error logs. +The Apache HTTP Server integration collects log messages of the following types: -{{fields "error"}} +* [Error logs](https://httpd.apache.org/docs/current/logs.html#errorlog) +* [Access logs](https://httpd.apache.org/docs/current/logs.html#accesslog) +* Status metrics -## Metrics +### Supported use cases -### Status Metrics +The Apache integration for Elastic allows you to collect, parse, and analyze Apache web server logs and metrics within the Elastic Stack. +It centralizes data like access logs, error logs, and performance metrics, transforming unstructured information into a structured, searchable format. +This enables users to monitor performance, troubleshoot issues, gain security insights, and understand website traffic patterns through powerful visualizations in Kibana. +Ultimately, it simplifies the management and analysis of critical data for maintaining a healthy and secure web infrastructure. -The server status stream collects data from the Apache Status module. It scrapes the status data from the web page -generated by the `mod_status` module. +## What do I need to use this integration? -{{event "status"}} +Elastic Agent must be installed. For more details, check the Elastic Agent [installation instructions](docs-content://reference/fleet/install-elastic-agents.md). You can install only one Elastic Agent per host. -{{fields "status"}} +Elastic Agent is required to stream data from the syslog or log file receiver and ship the data to Elastic, where the events will then be processed via the integration's ingest pipelines. + + +## How do I deploy this integration? + +### Onboard / configure + +#### Collect access and error logs + +Follow [Apache server instructions](https://httpd.apache.org/docs/2.4/logs.html) to write error or access logs to a location readable by elastic-agent. +Elastic-agent can run on the same system as your Apache server, or the logs can be forwarded to a different system running elastic-agent. + +#### Collect metrics + +Follow the [Apache server instructions](https://httpd.apache.org/docs/2.4/mod/mod_status.html) to enable the Status module. + +#### Enable the integration in Elastic + +1. In Kibana navigate to **Management** > **Integrations**. +2. In the search bar, type **Apache HTTP Server**. +3. Select the **Apache HTTP Server** integration and add it. +4. If needed, install Elastic Agent on the systems which will receive error or access log files. +5. Enable and configure only the collection methods which you will use. + + * **To collect logs from Apache instances**, you'll need to add log file path patterns elastic-agent will monitor. + + * **To collect metrics**, you'll need to configure the Apache hosts which will be monitored. + +6. Press **Save Integration** to begin collecting logs. + +#### Anomaly Detection Configurations + +The Apache HTTP server integration also has support of anomaly detection jobs. + +These anomaly detection jobs are available in the Machine Learning app in Kibana +when you have data that matches the query specified in the +[manifest](https://github.com/elastic/integrations/blob/main/packages/apache/kibana/ml_module/apache-Logs-ml.json#L11). + +##### Apache Access Logs + +Find unusual activity in HTTP access logs. + +| Job | Description | +|---|---| +| visitor_rate_apache | HTTP Access Logs: Detect unusual visitor rates | +| status_code_rate_apache | HTTP Access Logs: Detect unusual status code rates | +| source_ip_url_count_apache | HTTP Access Logs: Detect unusual source IPs - high distinct count of URLs | +| source_ip_request_rate_apache | HTTP Access Logs: Detect unusual source IPs - high request rates | +| low_request_rate_apache | HTTP Access Logs: Detect low request rates | + +### Validation + + +The "[Logs Apache] Access and error logs" and "[Metrics Apache] Overview" dashboards will show activity for your Apache servers. +After the integration is installed, view these dashboards in Kibana and verify that information for servers is shown. + +## Troubleshooting + +For help with Elastic ingest tools, check [Common problems](https://www.elastic.co/docs/troubleshoot/ingest/fleet/common-problems). + + + +## Scaling + +For more information on architectures that can be used for scaling this integration, check the [Ingest Architectures](https://www.elastic.co/docs/manage-data/ingest/ingest-reference-architectures) documentation. + +## Reference + +### ECS field Reference + +{{ `{{fields}}` }} + +### Sample Event + +{{ `{{event}}` }} + +### Inputs used + +These inputs can be used with this integration: +* [logfile](https://www.elastic.co/docs/reference/integrations/filestream) +* [apache/metrics](https://www.elastic.co/docs/reference/beats/metricbeat/metricbeat-metricset-apache-status) + +### API usage + +These APIs are used with this integration: +* Metrics are collected using the [mod_status](https://httpd.apache.org/docs/current/mod/mod_status.html) Apache module. diff --git a/test/packages/parallel/apache/docs/README.md b/test/packages/parallel/apache/docs/README.md index 411b02c509..3594c3e428 100644 --- a/test/packages/parallel/apache/docs/README.md +++ b/test/packages/parallel/apache/docs/README.md @@ -1,414 +1,129 @@ -# Apache Integration - -This integration periodically fetches metrics from [Apache](https://httpd.apache.org/) servers. It can parse access and error -logs created by the Apache server. - -## Compatibility - -The Apache datasets were tested with Apache 2.4.12 and 2.4.46 and are expected to work with -all versions >= 2.2.31 and >= 2.4.16 (independent from operating system). - -## Logs - -### Access Logs - -Access logs collects the Apache access logs. - -**Exported fields** - -| Field | Description | Type | -|---|---|---| -| @timestamp | Event timestamp. | date | -| apache.access.ssl.cipher | SSL cipher name. | keyword | -| apache.access.ssl.protocol | SSL protocol version. | keyword | -| cloud.account.id | The cloud account or organization id used to identify different entities in a multi-tenant environment. Examples: AWS account id, Google Cloud ORG Id, or other unique identifier. | keyword | -| cloud.availability_zone | Availability zone in which this host is running. | keyword | -| cloud.image.id | Image ID for the cloud instance. | keyword | -| cloud.instance.id | Instance ID of the host machine. | keyword | -| cloud.instance.name | Instance name of the host machine. | keyword | -| cloud.machine.type | Machine type of the host machine. | keyword | -| cloud.project.id | Name of the project in Google Cloud. | keyword | -| cloud.provider | Name of the cloud provider. Example values are aws, azure, gcp, or digitalocean. | keyword | -| cloud.region | Region in which this host is running. | keyword | -| container.id | Unique container id. | keyword | -| container.image.name | Name of the image the container was built on. | keyword | -| container.labels | Image labels. | object | -| container.name | Container name. | keyword | -| data_stream.dataset | Data stream dataset. | constant_keyword | -| data_stream.namespace | Data stream namespace. | constant_keyword | -| data_stream.type | Data stream type. | constant_keyword | -| destination.domain | The domain name of the destination system. This value may be a host name, a fully qualified domain name, or another host naming format. The value may derive from the original event or be added from enrichment. | keyword | -| ecs.version | ECS version this event conforms to. `ecs.version` is a required field and must exist in all events. When querying across multiple indices -- which may conform to slightly different ECS versions -- this field lets integrations adjust to the schema version of the events. | keyword | -| error.message | Error message. | match_only_text | -| event.category | This is one of four ECS Categorization Fields, and indicates the second level in the ECS category hierarchy. `event.category` represents the "big buckets" of ECS categories. For example, filtering on `event.category:process` yields all events relating to process activity. This field is closely related to `event.type`, which is used as a subcategory. This field is an array. This will allow proper categorization of some events that fall in multiple categories. | keyword | -| event.created | event.created contains the date/time when the event was first read by an agent, or by your pipeline. This field is distinct from @timestamp in that @timestamp typically contain the time extracted from the original event. In most situations, these two timestamps will be slightly different. The difference can be used to calculate the delay between your source generating an event, and the time when your agent first processed it. This can be used to monitor your agent's or pipeline's ability to keep up with your event source. In case the two timestamps are identical, @timestamp should be used. | date | -| event.dataset | Event dataset | constant_keyword | -| event.kind | This is one of four ECS Categorization Fields, and indicates the highest level in the ECS category hierarchy. `event.kind` gives high-level information about what type of information the event contains, without being specific to the contents of the event. For example, values of this field distinguish alert events from metric events. The value of this field can be used to inform how these kinds of events should be handled. They may warrant different retention, different access control, it may also help understand whether the data coming in at a regular interval or not. | keyword | -| event.module | Event module | constant_keyword | -| event.original | Raw text message of entire event. Used to demonstrate log integrity or where the full log message (before splitting it up in multiple parts) may be required, e.g. for reindex. This field is not indexed and doc_values are disabled. It cannot be searched, but it can be retrieved from `_source`. If users wish to override this and index this field, please see `Field data types` in the `Elasticsearch Reference`. | keyword | -| event.outcome | This is one of four ECS Categorization Fields, and indicates the lowest level in the ECS category hierarchy. `event.outcome` simply denotes whether the event represents a success or a failure from the perspective of the entity that produced the event. Note that when a single transaction is described in multiple events, each event may populate different values of `event.outcome`, according to their perspective. Also note that in the case of a compound event (a single event that contains multiple logical events), this field should be populated with the value that best captures the overall success or failure from the perspective of the event producer. Further note that not all events will have an associated outcome. For example, this field is generally not populated for metric events, events with `event.type:info`, or any events for which an outcome does not make logical sense. | keyword | -| file.path | Full path to the file, including the file name. It should include the drive letter, when appropriate. | keyword | -| file.path.text | Multi-field of `file.path`. | match_only_text | -| host.architecture | Operating system architecture. | keyword | -| host.containerized | If the host is a container. | boolean | -| host.domain | Name of the domain of which the host is a member. For example, on Windows this could be the host's Active Directory domain or NetBIOS domain name. For Linux this could be the domain of the host's LDAP provider. | keyword | -| host.hostname | Hostname of the host. It normally contains what the `hostname` command returns on the host machine. | keyword | -| host.id | Unique host id. As hostname is not always unique, use values that are meaningful in your environment. Example: The current usage of `beat.name`. | keyword | -| host.ip | Host ip addresses. | ip | -| host.mac | Host mac addresses. | keyword | -| host.name | Name of the host. It can contain what `hostname` returns on Unix systems, the fully qualified domain name, or a name specified by the user. The sender decides which value to use. | keyword | -| host.os.build | OS build information. | keyword | -| host.os.codename | OS codename, if any. | keyword | -| host.os.family | OS family (such as redhat, debian, freebsd, windows). | keyword | -| host.os.kernel | Operating system kernel version as a raw string. | keyword | -| host.os.name | Operating system name, without the version. | keyword | -| host.os.name.text | Multi-field of `host.os.name`. | text | -| host.os.platform | Operating system platform (such centos, ubuntu, windows). | keyword | -| host.os.version | Operating system version as a raw string. | keyword | -| host.type | Type of host. For Cloud providers this can be the machine type like `t2.medium`. If vm, this could be the container, for example, or other information meaningful in your environment. | keyword | -| http.request.method | HTTP request method. The value should retain its casing from the original event. For example, `GET`, `get`, and `GeT` are all considered valid values for this field. | keyword | -| http.request.referrer | Referrer for this HTTP request. | keyword | -| http.response.body.bytes | Size in bytes of the response body. | long | -| http.response.status_code | HTTP response status code. | long | -| http.version | HTTP version. | keyword | -| input.type | Input type | keyword | -| log.file.path | Full path to the log file this event came from, including the file name. It should include the drive letter, when appropriate. If the event wasn't read from a log file, do not populate this field. | keyword | -| log.level | Original log level of the log event. If the source of the event provides a log level or textual severity, this is the one that goes in `log.level`. If your source doesn't specify one, you may put your event transport's severity here (e.g. Syslog severity). Some examples are `warn`, `err`, `i`, `informational`. | keyword | -| log.offset | Log offset | long | -| message | For log events the message field contains the log message, optimized for viewing in a log viewer. For structured logs without an original message field, other fields can be concatenated to form a human-readable summary of the event. If multiple messages exist, they can be combined into one message. | match_only_text | -| process.command_line | Full command line that started the process, including the absolute path to the executable, and all arguments. Some arguments may be filtered to protect sensitive information. | wildcard | -| process.command_line.text | Multi-field of `process.command_line`. | match_only_text | -| process.pid | Process id. | long | -| process.thread.id | Thread ID. | long | -| source.address | Some event source addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. Then it should be duplicated to `.ip` or `.domain`, depending on which one it is. | keyword | -| source.as.number | Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet. | long | -| source.as.organization.name | Organization name. | keyword | -| source.as.organization.name.text | Multi-field of `source.as.organization.name`. | match_only_text | -| source.domain | The domain name of the source system. This value may be a host name, a fully qualified domain name, or another host naming format. The value may derive from the original event or be added from enrichment. | keyword | -| source.geo.city_name | City name. | keyword | -| source.geo.continent_name | Name of the continent. | keyword | -| source.geo.country_iso_code | Country ISO code. | keyword | -| source.geo.country_name | Country name. | keyword | -| source.geo.location | Longitude and latitude. | geo_point | -| source.geo.region_iso_code | Region ISO code. | keyword | -| source.geo.region_name | Region name. | keyword | -| source.ip | IP address of the source (IPv4 or IPv6). | ip | -| tags | List of keywords used to tag each event. | keyword | -| tls.cipher | String indicating the cipher used during the current connection. | keyword | -| tls.version | Numeric part of the version parsed from the original string. | keyword | -| tls.version_protocol | Normalized lowercase protocol name parsed from original string. | keyword | -| url.domain | Domain of the url, such as "www.elastic.co". In some cases a URL may refer to an IP and/or port directly, without a domain name. In this case, the IP address would go to the `domain` field. If the URL contains a literal IPv6 address enclosed by `[` and `]` (IETF RFC 2732), the `[` and `]` characters should also be captured in the `domain` field. | keyword | -| url.extension | The field contains the file extension from the original request url, excluding the leading dot. The file extension is only set if it exists, as not every url has a file extension. The leading period must not be included. For example, the value must be "png", not ".png". Note that when the file name has multiple extensions (example.tar.gz), only the last one should be captured ("gz", not "tar.gz"). | keyword | -| url.original | Unmodified original url as seen in the event source. Note that in network monitoring, the observed URL may be a full URL, whereas in access logs, the URL is often just represented as a path. This field is meant to represent the URL as it was observed, complete or not. | wildcard | -| url.original.text | Multi-field of `url.original`. | match_only_text | -| url.path | Path of the request, such as "/search". | wildcard | -| url.query | The query field describes the query string of the request, such as "q=elasticsearch". The `?` is excluded from the query string. If a URL contains no `?`, there is no query field. If there is a `?` but no query, the query field exists with an empty string. The `exists` query can be used to differentiate between the two cases. | keyword | -| user.name | Short name or login of the user. | keyword | -| user.name.text | Multi-field of `user.name`. | match_only_text | -| user_agent.device.name | Name of the device. | keyword | -| user_agent.name | Name of the user agent. | keyword | -| user_agent.original | Unparsed user_agent string. | keyword | -| user_agent.original.text | Multi-field of `user_agent.original`. | match_only_text | -| user_agent.os.full | Operating system name, including the version or code name. | keyword | -| user_agent.os.full.text | Multi-field of `user_agent.os.full`. | match_only_text | -| user_agent.os.name | Operating system name, without the version. | keyword | -| user_agent.os.name.text | Multi-field of `user_agent.os.name`. | match_only_text | -| user_agent.os.version | Operating system version as a raw string. | keyword | -| user_agent.version | Version of the user agent. | keyword | - - -### Error Logs - -Error logs collects the Apache error logs. - -**Exported fields** - -| Field | Description | Type | -|---|---|---| -| @timestamp | Event timestamp. | date | -| apache.error.module | The module producing the logged message. | keyword | -| cloud.account.id | The cloud account or organization id used to identify different entities in a multi-tenant environment. Examples: AWS account id, Google Cloud ORG Id, or other unique identifier. | keyword | -| cloud.availability_zone | Availability zone in which this host is running. | keyword | -| cloud.image.id | Image ID for the cloud instance. | keyword | -| cloud.instance.id | Instance ID of the host machine. | keyword | -| cloud.instance.name | Instance name of the host machine. | keyword | -| cloud.machine.type | Machine type of the host machine. | keyword | -| cloud.project.id | Name of the project in Google Cloud. | keyword | -| cloud.provider | Name of the cloud provider. Example values are aws, azure, gcp, or digitalocean. | keyword | -| cloud.region | Region in which this host is running. | keyword | -| container.id | Unique container id. | keyword | -| container.image.name | Name of the image the container was built on. | keyword | -| container.labels | Image labels. | object | -| container.name | Container name. | keyword | -| data_stream.dataset | Data stream dataset. | constant_keyword | -| data_stream.namespace | Data stream namespace. | constant_keyword | -| data_stream.type | Data stream type. | constant_keyword | -| ecs.version | ECS version this event conforms to. `ecs.version` is a required field and must exist in all events. When querying across multiple indices -- which may conform to slightly different ECS versions -- this field lets integrations adjust to the schema version of the events. | keyword | -| error.message | Error message. | match_only_text | -| event.category | This is one of four ECS Categorization Fields, and indicates the second level in the ECS category hierarchy. `event.category` represents the "big buckets" of ECS categories. For example, filtering on `event.category:process` yields all events relating to process activity. This field is closely related to `event.type`, which is used as a subcategory. This field is an array. This will allow proper categorization of some events that fall in multiple categories. | keyword | -| event.dataset | Event dataset | constant_keyword | -| event.kind | This is one of four ECS Categorization Fields, and indicates the highest level in the ECS category hierarchy. `event.kind` gives high-level information about what type of information the event contains, without being specific to the contents of the event. For example, values of this field distinguish alert events from metric events. The value of this field can be used to inform how these kinds of events should be handled. They may warrant different retention, different access control, it may also help understand whether the data coming in at a regular interval or not. | keyword | -| event.module | Event module | constant_keyword | -| event.timezone | This field should be populated when the event's timestamp does not include timezone information already (e.g. default Syslog timestamps). It's optional otherwise. Acceptable timezone formats are: a canonical ID (e.g. "Europe/Amsterdam"), abbreviated (e.g. "EST") or an HH:mm differential (e.g. "-05:00"). | keyword | -| event.type | This is one of four ECS Categorization Fields, and indicates the third level in the ECS category hierarchy. `event.type` represents a categorization "sub-bucket" that, when used along with the `event.category` field values, enables filtering events down to a level appropriate for single visualization. This field is an array. This will allow proper categorization of some events that fall in multiple event types. | keyword | -| file.path | Full path to the file, including the file name. It should include the drive letter, when appropriate. | keyword | -| file.path.text | Multi-field of `file.path`. | match_only_text | -| host.architecture | Operating system architecture. | keyword | -| host.containerized | If the host is a container. | boolean | -| host.domain | Name of the domain of which the host is a member. For example, on Windows this could be the host's Active Directory domain or NetBIOS domain name. For Linux this could be the domain of the host's LDAP provider. | keyword | -| host.hostname | Hostname of the host. It normally contains what the `hostname` command returns on the host machine. | keyword | -| host.id | Unique host id. As hostname is not always unique, use values that are meaningful in your environment. Example: The current usage of `beat.name`. | keyword | -| host.ip | Host ip addresses. | ip | -| host.mac | Host mac addresses. | keyword | -| host.name | Name of the host. It can contain what `hostname` returns on Unix systems, the fully qualified domain name, or a name specified by the user. The sender decides which value to use. | keyword | -| host.os.build | OS build information. | keyword | -| host.os.codename | OS codename, if any. | keyword | -| host.os.family | OS family (such as redhat, debian, freebsd, windows). | keyword | -| host.os.kernel | Operating system kernel version as a raw string. | keyword | -| host.os.name | Operating system name, without the version. | keyword | -| host.os.name.text | Multi-field of `host.os.name`. | text | -| host.os.platform | Operating system platform (such centos, ubuntu, windows). | keyword | -| host.os.version | Operating system version as a raw string. | keyword | -| host.type | Type of host. For Cloud providers this can be the machine type like `t2.medium`. If vm, this could be the container, for example, or other information meaningful in your environment. | keyword | -| http.request.method | HTTP request method. The value should retain its casing from the original event. For example, `GET`, `get`, and `GeT` are all considered valid values for this field. | keyword | -| http.request.referrer | Referrer for this HTTP request. | keyword | -| http.response.body.bytes | Size in bytes of the response body. | long | -| http.response.status_code | HTTP response status code. | long | -| http.version | HTTP version. | keyword | -| input.type | Input type | keyword | -| log.file.path | Full path to the log file this event came from, including the file name. It should include the drive letter, when appropriate. If the event wasn't read from a log file, do not populate this field. | keyword | -| log.level | Original log level of the log event. If the source of the event provides a log level or textual severity, this is the one that goes in `log.level`. If your source doesn't specify one, you may put your event transport's severity here (e.g. Syslog severity). Some examples are `warn`, `err`, `i`, `informational`. | keyword | -| log.offset | Log offset | long | -| message | For log events the message field contains the log message, optimized for viewing in a log viewer. For structured logs without an original message field, other fields can be concatenated to form a human-readable summary of the event. If multiple messages exist, they can be combined into one message. | match_only_text | -| process.pid | Process id. | long | -| process.thread.id | Thread ID. | long | -| source.address | Some event source addresses are defined ambiguously. The event will sometimes list an IP, a domain or a unix socket. You should always store the raw address in the `.address` field. Then it should be duplicated to `.ip` or `.domain`, depending on which one it is. | keyword | -| source.as.number | Unique number allocated to the autonomous system. The autonomous system number (ASN) uniquely identifies each network on the Internet. | long | -| source.as.organization.name | Organization name. | keyword | -| source.as.organization.name.text | Multi-field of `source.as.organization.name`. | match_only_text | -| source.geo.city_name | City name. | keyword | -| source.geo.continent_name | Name of the continent. | keyword | -| source.geo.country_iso_code | Country ISO code. | keyword | -| source.geo.country_name | Country name. | keyword | -| source.geo.location | Longitude and latitude. | geo_point | -| source.geo.region_iso_code | Region ISO code. | keyword | -| source.geo.region_name | Region name. | keyword | -| source.ip | IP address of the source (IPv4 or IPv6). | ip | -| source.port | Port of the source. | long | -| tags | List of keywords used to tag each event. | keyword | -| url.domain | Domain of the url, such as "www.elastic.co". In some cases a URL may refer to an IP and/or port directly, without a domain name. In this case, the IP address would go to the `domain` field. If the URL contains a literal IPv6 address enclosed by `[` and `]` (IETF RFC 2732), the `[` and `]` characters should also be captured in the `domain` field. | keyword | -| url.extension | The field contains the file extension from the original request url, excluding the leading dot. The file extension is only set if it exists, as not every url has a file extension. The leading period must not be included. For example, the value must be "png", not ".png". Note that when the file name has multiple extensions (example.tar.gz), only the last one should be captured ("gz", not "tar.gz"). | keyword | -| url.original | Unmodified original url as seen in the event source. Note that in network monitoring, the observed URL may be a full URL, whereas in access logs, the URL is often just represented as a path. This field is meant to represent the URL as it was observed, complete or not. | wildcard | -| url.original.text | Multi-field of `url.original`. | match_only_text | -| url.path | Path of the request, such as "/search". | wildcard | -| url.query | The query field describes the query string of the request, such as "q=elasticsearch". The `?` is excluded from the query string. If a URL contains no `?`, there is no query field. If there is a `?` but no query, the query field exists with an empty string. The `exists` query can be used to differentiate between the two cases. | keyword | -| user.name | Short name or login of the user. | keyword | -| user.name.text | Multi-field of `user.name`. | match_only_text | -| user_agent.device.name | Name of the device. | keyword | -| user_agent.name | Name of the user agent. | keyword | -| user_agent.original | Unparsed user_agent string. | keyword | -| user_agent.original.text | Multi-field of `user_agent.original`. | match_only_text | -| user_agent.os.name | Operating system name, without the version. | keyword | -| user_agent.os.name.text | Multi-field of `user_agent.os.name`. | match_only_text | - - -## Metrics - -### Status Metrics - -The server status stream collects data from the Apache Status module. It scrapes the status data from the web page -generated by the `mod_status` module. - -An example event for `status` looks as following: - -```json -{ - "@timestamp": "2022-09-06T07:49:38.359Z", - "agent": { - "ephemeral_id": "8d98054a-3077-4bb5-81b8-89c4da73f566", - "id": "9a83e2ce-8ade-4cc3-ba6a-6305c90b3022", - "name": "docker-fleet-agent", - "type": "metricbeat", - "version": "8.3.3" - }, - "apache": { - "status": { - "bytes_per_request": 0, - "bytes_per_sec": 0, - "connections": { - "async": { - "closing": 0, - "keep_alive": 0, - "writing": 0 - }, - "total": 0 - }, - "cpu": { - "children_system": 0, - "children_user": 0, - "load": 0.230769, - "system": 0.02, - "user": 0.01 - }, - "load": { - "1": 2.68, - "15": 2.79, - "5": 3.48 - }, - "requests_per_sec": 0.923077, - "scoreboard": { - "closing_connection": 0, - "dns_lookup": 0, - "gracefully_finishing": 0, - "idle_cleanup": 0, - "keepalive": 0, - "logging": 0, - "open_slot": 325, - "reading_request": 0, - "sending_reply": 1, - "starting_up": 0, - "total": 400, - "waiting_for_connection": 74 - }, - "total_accesses": 12, - "total_bytes": 0, - "uptime": { - "server_uptime": 13, - "uptime": 13 - }, - "workers": { - "busy": 1, - "idle": 74 - } - } - }, - "data_stream": { - "dataset": "apache.status", - "namespace": "ep", - "type": "metrics" - }, - "ecs": { - "version": "8.0.0" - }, - "elastic_agent": { - "id": "9a83e2ce-8ade-4cc3-ba6a-6305c90b3022", - "snapshot": false, - "version": "8.3.3" - }, - "event": { - "agent_id_status": "verified", - "dataset": "apache.status", - "duration": 1049700, - "ingested": "2022-09-06T07:49:38Z", - "module": "apache" - }, - "host": { - "architecture": "x86_64", - "containerized": false, - "hostname": "docker-fleet-agent", - "ip": [ - "172.21.0.7" - ], - "mac": [ - "02:42:ac:15:00:07" - ], - "name": "docker-fleet-agent", - "os": { - "codename": "focal", - "family": "debian", - "kernel": "5.15.0-43-generic", - "name": "Ubuntu", - "platform": "ubuntu", - "type": "linux", - "version": "20.04.4 LTS (Focal Fossa)" - } - }, - "metricset": { - "name": "status", - "period": 30000 - }, - "service": { - "address": "http://elastic-package-service-apache-1:80/server-status?auto=", - "type": "apache" - } -} -``` - -**Exported fields** - -| Field | Description | Type | Unit | Metric Type | -|---|---|---|---|---| -| @timestamp | Event timestamp. | date | | | -| apache.status.bytes_per_request | Bytes per request. | scaled_float | | gauge | -| apache.status.bytes_per_sec | Bytes per second. | scaled_float | | gauge | -| apache.status.connections.async.closing | Async closed connections. | long | | gauge | -| apache.status.connections.async.keep_alive | Async keeped alive connections. | long | | gauge | -| apache.status.connections.async.writing | Async connection writing. | long | | gauge | -| apache.status.connections.total | Total connections. | long | | counter | -| apache.status.cpu.children_system | CPU of children system. | scaled_float | | gauge | -| apache.status.cpu.children_user | CPU of children user. | scaled_float | | gauge | -| apache.status.cpu.load | CPU Load. | scaled_float | | gauge | -| apache.status.cpu.system | System cpu. | scaled_float | | gauge | -| apache.status.cpu.user | CPU user load. | scaled_float | | gauge | -| apache.status.load.1 | Load average for the last minute. | scaled_float | | gauge | -| apache.status.load.15 | Load average for the last 15 minutes. | scaled_float | | gauge | -| apache.status.load.5 | Load average for the last 5 minutes. | scaled_float | | gauge | -| apache.status.requests_per_sec | Requests per second. | scaled_float | | gauge | -| apache.status.scoreboard.closing_connection | Closing connections. | long | | gauge | -| apache.status.scoreboard.dns_lookup | Dns Lookups. | long | | gauge | -| apache.status.scoreboard.gracefully_finishing | Gracefully finishing. | long | | gauge | -| apache.status.scoreboard.idle_cleanup | Idle cleanups. | long | | gauge | -| apache.status.scoreboard.keepalive | Keep alive. | long | | gauge | -| apache.status.scoreboard.logging | Logging | long | | gauge | -| apache.status.scoreboard.open_slot | Open slots. | long | | gauge | -| apache.status.scoreboard.reading_request | Reading requests. | long | | gauge | -| apache.status.scoreboard.sending_reply | Sending Reply. | long | | gauge | -| apache.status.scoreboard.starting_up | Starting up. | long | | gauge | -| apache.status.scoreboard.total | Total. | long | | gauge | -| apache.status.scoreboard.waiting_for_connection | Waiting for connections. | long | | gauge | -| apache.status.total_accesses | Total number of access requests. | long | | counter | -| apache.status.total_bytes | Total number of bytes served. | long | byte | counter | -| apache.status.uptime.server_uptime | Server uptime in seconds. | long | | counter | -| apache.status.uptime.uptime | Server uptime. | long | | counter | -| apache.status.workers.busy | Number of busy workers. | long | | gauge | -| apache.status.workers.idle | Number of idle workers. | long | | gauge | -| cloud.account.id | The cloud account or organization id used to identify different entities in a multi-tenant environment. Examples: AWS account id, Google Cloud ORG Id, or other unique identifier. | keyword | | | -| cloud.availability_zone | Availability zone in which this host is running. | keyword | | | -| cloud.image.id | Image ID for the cloud instance. | keyword | | | -| cloud.instance.id | Instance ID of the host machine. | keyword | | | -| cloud.instance.name | Instance name of the host machine. | keyword | | | -| cloud.machine.type | Machine type of the host machine. | keyword | | | -| cloud.project.id | Name of the project in Google Cloud. | keyword | | | -| cloud.provider | Name of the cloud provider. Example values are aws, azure, gcp, or digitalocean. | keyword | | | -| cloud.region | Region in which this host is running. | keyword | | | -| container.id | Unique container id. | keyword | | | -| container.image.name | Name of the image the container was built on. | keyword | | | -| container.labels | Image labels. | object | | | -| container.name | Container name. | keyword | | | -| data_stream.dataset | Data stream dataset. | constant_keyword | | | -| data_stream.namespace | Data stream namespace. | constant_keyword | | | -| data_stream.type | Data stream type. | constant_keyword | | | -| ecs.version | ECS version this event conforms to. `ecs.version` is a required field and must exist in all events. When querying across multiple indices -- which may conform to slightly different ECS versions -- this field lets integrations adjust to the schema version of the events. | keyword | | | -| error.message | Error message. | match_only_text | | | -| event.dataset | Event dataset | constant_keyword | | | -| event.module | Event module | constant_keyword | | | -| host.architecture | Operating system architecture. | keyword | | | -| host.containerized | If the host is a container. | boolean | | | -| host.domain | Name of the domain of which the host is a member. For example, on Windows this could be the host's Active Directory domain or NetBIOS domain name. For Linux this could be the domain of the host's LDAP provider. | keyword | | | -| host.hostname | Hostname of the host. It normally contains what the `hostname` command returns on the host machine. | keyword | | | -| host.id | Unique host id. As hostname is not always unique, use values that are meaningful in your environment. Example: The current usage of `beat.name`. | keyword | | | -| host.ip | Host ip addresses. | ip | | | -| host.mac | Host mac addresses. | keyword | | | -| host.name | Name of the host. It can contain what `hostname` returns on Unix systems, the fully qualified domain name, or a name specified by the user. The sender decides which value to use. | keyword | | | -| host.os.build | OS build information. | keyword | | | -| host.os.codename | OS codename, if any. | keyword | | | -| host.os.family | OS family (such as redhat, debian, freebsd, windows). | keyword | | | -| host.os.kernel | Operating system kernel version as a raw string. | keyword | | | -| host.os.name | Operating system name, without the version. | keyword | | | -| host.os.name.text | Multi-field of `host.os.name`. | text | | | -| host.os.platform | Operating system platform (such centos, ubuntu, windows). | keyword | | | -| host.os.version | Operating system version as a raw string. | keyword | | | -| host.type | Type of host. For Cloud providers this can be the machine type like `t2.medium`. If vm, this could be the container, for example, or other information meaningful in your environment. | keyword | | | -| service.address | Address where data about this service was collected from. This should be a URI, network address (ipv4:port or [ipv6]:port) or a resource path (sockets). | keyword | | | -| service.type | The type of the service data is collected from. The type can be used to group and correlate logs and metrics from one service type. Example: If logs or metrics are collected from Elasticsearch, `service.type` would be `elasticsearch`. | keyword | | | +{{header}} +# Apache Integration for Elastic +## Overview + +The Apache integration for Elastic enables collection of access logs, error logs and metrics from [Apache](https://httpd.apache.org/) servers. +This integration facilitates performance monitoring, understanding traffic patterns and gaining security insights from +your Apache servers. + +### Compatibility + +This integration is compatible with all Apache server versions >= 2.4.16 and >= 2.2.31. + +### How it works + +Access and error logs data stream read from a logfile. You will configure Apache server to write log files to a location that is readable by elastic-agent, and +elastic-agent will monitor and ingest events written to these files. + +## What data does this integration collect? + +The integration collects log messages of the following types: + +The Apache HTTP Server integration collects log messages of the following types: + +* [Error logs](https://httpd.apache.org/docs/current/logs.html#errorlog) +* [Access logs](https://httpd.apache.org/docs/current/logs.html#accesslog) +* Status metrics + +### Supported use cases + +The Apache integration for Elastic allows you to collect, parse, and analyze Apache web server logs and metrics within the Elastic Stack. +It centralizes data like access logs, error logs, and performance metrics, transforming unstructured information into a structured, searchable format. +This enables users to monitor performance, troubleshoot issues, gain security insights, and understand website traffic patterns through powerful visualizations in Kibana. +Ultimately, it simplifies the management and analysis of critical data for maintaining a healthy and secure web infrastructure. + +## What do I need to use this integration? + +Elastic Agent must be installed. For more details, check the Elastic Agent [installation instructions](docs-content://reference/fleet/install-elastic-agents.md). You can install only one Elastic Agent per host. + +Elastic Agent is required to stream data from the syslog or log file receiver and ship the data to Elastic, where the events will then be processed via the integration's ingest pipelines. + + +## How do I deploy this integration? + +### Onboard / configure + +#### Collect access and error logs + +Follow [Apache server instructions](https://httpd.apache.org/docs/2.4/logs.html) to write error or access logs to a location readable by elastic-agent. +Elastic-agent can run on the same system as your Apache server, or the logs can be forwarded to a different system running elastic-agent. + +#### Collect metrics + +Follow the [Apache server instructions](https://httpd.apache.org/docs/2.4/mod/mod_status.html) to enable the Status module. + +#### Enable the integration in Elastic + +1. In Kibana navigate to **Management** > **Integrations**. +2. In the search bar, type **Apache HTTP Server**. +3. Select the **Apache HTTP Server** integration and add it. +4. If needed, install Elastic Agent on the systems which will receive error or access log files. +5. Enable and configure only the collection methods which you will use. + + * **To collect logs from Apache instances**, you'll need to add log file path patterns elastic-agent will monitor. + + * **To collect metrics**, you'll need to configure the Apache hosts which will be monitored. + +6. Press **Save Integration** to begin collecting logs. + +#### Anomaly Detection Configurations + +The Apache HTTP server integration also has support of anomaly detection jobs. + +These anomaly detection jobs are available in the Machine Learning app in Kibana +when you have data that matches the query specified in the +[manifest](https://github.com/elastic/integrations/blob/main/packages/apache/kibana/ml_module/apache-Logs-ml.json#L11). + +##### Apache Access Logs + +Find unusual activity in HTTP access logs. + +| Job | Description | +|---|---| +| visitor_rate_apache | HTTP Access Logs: Detect unusual visitor rates | +| status_code_rate_apache | HTTP Access Logs: Detect unusual status code rates | +| source_ip_url_count_apache | HTTP Access Logs: Detect unusual source IPs - high distinct count of URLs | +| source_ip_request_rate_apache | HTTP Access Logs: Detect unusual source IPs - high request rates | +| low_request_rate_apache | HTTP Access Logs: Detect low request rates | + +### Validation + + +The "[Logs Apache] Access and error logs" and "[Metrics Apache] Overview" dashboards will show activity for your Apache servers. +After the integration is installed, view these dashboards in Kibana and verify that information for servers is shown. + +## Troubleshooting + +For help with Elastic ingest tools, check [Common problems](https://www.elastic.co/docs/troubleshoot/ingest/fleet/common-problems). + + + +## Scaling + +For more information on architectures that can be used for scaling this integration, check the [Ingest Architectures](https://www.elastic.co/docs/manage-data/ingest/ingest-reference-architectures) documentation. + +## Reference + +### ECS field Reference + +{{fields}} + +### Sample Event + +{{event}} + +### Inputs used + +These inputs can be used with this integration: +* [logfile](https://www.elastic.co/docs/reference/integrations/filestream) +* [apache/metrics](https://www.elastic.co/docs/reference/beats/metricbeat/metricbeat-metricset-apache-status) + +### API usage + +These APIs are used with this integration: +* Metrics are collected using the [mod_status](https://httpd.apache.org/docs/current/mod/mod_status.html) Apache module.