Skip to content

feat(playstation): Emit outcome for skipped fields #4862

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

**Internal**:

- Emit outcomes for skipped large attachments on playstation crashes. ([#4862](https://github.com/getsentry/relay/pull/4862))
- Disable span metrics. ([#4931](https://github.com/getsentry/relay/pull/4931))

## 25.7.0
Expand Down
138 changes: 117 additions & 21 deletions relay-server/src/utils/multipart.rs
Original file line number Diff line number Diff line change
@@ -1,17 +1,26 @@
use std::convert::Infallible;
use std::io;
use std::task::Poll;

use axum::extract::FromRequest;
use axum::extract::Request;
use axum::RequestExt;
use axum::extract::{FromRequest, FromRequestParts, Request};
use axum::http::StatusCode;
use axum::response::{IntoResponse, Response};
use bytes::{Bytes, BytesMut};
use futures::{StreamExt, TryStreamExt};
use multer::{Field, Multipart};
use relay_config::Config;
use relay_quotas::DataCategory;
use relay_system::Addr;
use serde::{Deserialize, Serialize};

use crate::envelope::{AttachmentType, ContentType, Item, ItemType, Items};
use crate::extractors::Remote;
use crate::extractors::{BadEventMeta, PartialDsn, Remote, RequestMeta};
use crate::service::ServiceState;
use crate::services::outcome::{
DiscardAttachmentType, DiscardItemType, DiscardReason, Outcome, TrackOutcome,
};
use crate::utils::ApiErrorResponse;

/// Type used for encoding string lengths.
type Len = u32;
Expand Down Expand Up @@ -155,14 +164,43 @@ pub fn get_multipart_boundary(data: &[u8]) -> Option<&str> {
.and_then(|slice| std::str::from_utf8(&slice[2..]).ok())
}

async fn multipart_items<F>(
#[derive(Debug, thiserror::Error)]
pub enum BadMultipart {
#[error("event metadata error: {0}")]
EventMeta(#[from] BadEventMeta),
#[error("multipart error: {0}")]
Multipart(#[from] multer::Error),
}

impl From<Infallible> for BadMultipart {
fn from(infallible: Infallible) -> Self {
match infallible {}
}
}

impl IntoResponse for BadMultipart {
fn into_response(self) -> Response {
let status_code = match self {
BadMultipart::Multipart(
multer::Error::FieldSizeExceeded { .. } | multer::Error::StreamSizeExceeded { .. },
) => StatusCode::PAYLOAD_TOO_LARGE,
_ => StatusCode::BAD_REQUEST,
};

(status_code, ApiErrorResponse::from_error(&self)).into_response()
}
}
Comment on lines +181 to +192
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This copies the behaviour found here:

fn into_response(self) -> Response {
let Self(ref error) = self;
let status_code = match error {
multer::Error::FieldSizeExceeded { .. } => StatusCode::PAYLOAD_TOO_LARGE,
multer::Error::StreamSizeExceeded { .. } => StatusCode::PAYLOAD_TOO_LARGE,
_ => StatusCode::BAD_REQUEST,
};
(status_code, ApiErrorResponse::from_error(error)).into_response()


async fn multipart_items<F, G>(
mut multipart: Multipart<'_>,
mut infer_type: F,
mut emit_outcome: G,
config: &Config,
ignore_large_fields: bool,
) -> Result<Items, multer::Error>
where
F: FnMut(Option<&str>, &str) -> AttachmentType,
G: FnMut(Outcome, u32),
{
let mut items = Items::new();
let mut form_data = FormDataWriter::new();
Expand All @@ -177,7 +215,15 @@ where
let content_type = field.content_type().cloned();
let field = LimitedField::new(field, config.max_attachment_size());
match field.bytes().await {
Err(multer::Error::FieldSizeExceeded { .. }) if ignore_large_fields => continue,
Err(multer::Error::FieldSizeExceeded { limit, .. }) if ignore_large_fields => {
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Cheating here a bit by putting the consumed size into the error rather than the limit that is breached such that we can use that value to generate the outcome.

emit_outcome(
Outcome::Invalid(DiscardReason::TooLarge(DiscardItemType::Attachment(
DiscardAttachmentType::Attachment,
))),
u32::try_from(limit).unwrap_or(u32::MAX),
);
continue;
}
Err(err) => return Err(err),
Ok(bytes) => {
attachments_size += bytes.len();
Expand Down Expand Up @@ -276,7 +322,7 @@ impl futures::Stream for LimitedField<'_> {
Poll::Ready(None) if self.consumed_size > self.size_limit => {
self.inner_finished = true;
Poll::Ready(Some(Err(multer::Error::FieldSizeExceeded {
limit: self.size_limit as u64,
limit: self.consumed_size as u64,
field_name: self.field.name().map(Into::into),
})))
}
Expand Down Expand Up @@ -313,26 +359,38 @@ impl ConstrainedMultipart {
where
F: FnMut(Option<&str>, &str) -> AttachmentType,
{
multipart_items(self.0, infer_type, config, false).await
// The emit outcome closure here does nothing since in this code branch we don't want to
// emit outcomes as we already return an error to the request.
multipart_items(self.0, infer_type, |_, _| (), config, false).await
}
}

/// Wrapper around [`multer::Multipart`] that skips over fields which are larger than
/// `max_attachment_size`. These fields are also not taken into account when checking that the
/// combined size of all fields is smaller than `max_attachments_size`.
#[allow(dead_code)]
pub struct UnconstrainedMultipart(pub Multipart<'static>);
pub struct UnconstrainedMultipart {
multipart: Multipart<'static>,
outcome_aggregator: Addr<TrackOutcome>,
request_meta: RequestMeta,
}

impl FromRequest<ServiceState> for UnconstrainedMultipart {
type Rejection = Remote<multer::Error>;
type Rejection = BadMultipart;

async fn from_request(
request: Request,
_state: &ServiceState,
mut request: Request,
state: &ServiceState,
) -> Result<Self, Self::Rejection> {
multipart_from_request(request, multer::Constraints::new())
.map(Self)
.map_err(Remote)
let mut parts = request.extract_parts().await?;
let request_meta = RequestMeta::<PartialDsn>::from_request_parts(&mut parts, state).await?;

let multipart = multipart_from_request(request, multer::Constraints::new())?;
Ok(UnconstrainedMultipart {
multipart,
outcome_aggregator: state.outcome_aggregator().clone(),
request_meta,
})
}
}

Expand All @@ -342,7 +400,30 @@ impl UnconstrainedMultipart {
where
F: FnMut(Option<&str>, &str) -> AttachmentType,
{
multipart_items(self.0, infer_type, config, true).await
let UnconstrainedMultipart {
multipart,
outcome_aggregator,
request_meta,
} = self;

multipart_items(
multipart,
infer_type,
|outcome, quantity| {
outcome_aggregator.send(TrackOutcome {
timestamp: request_meta.received_at(),
scoping: request_meta.get_partial_scoping(),
outcome,
event_id: None,
remote_addr: request_meta.remote_addr(),
category: DataCategory::Attachment,
quantity,
})
},
config,
true,
)
.await
}
}

Expand Down Expand Up @@ -460,15 +541,22 @@ mod tests {
}
}))?;

let items = UnconstrainedMultipart(multipart)
.items(|_, _| AttachmentType::Attachment, &config)
.await?;
let mut mock_outcomes = vec![];
let items = multipart_items(
multipart,
|_, _| AttachmentType::Attachment,
|_, x| (mock_outcomes.push(x)),
&config,
true,
)
.await?;

// The large field is skipped so only the small one should make it through.
assert_eq!(items.len(), 1);
let item = &items[0];
assert_eq!(item.filename(), Some("small.txt"));
assert_eq!(item.payload(), Bytes::from("ok"));
assert_eq!(mock_outcomes, vec![27]);

Ok(())
}
Expand Down Expand Up @@ -497,9 +585,17 @@ mod tests {

let multipart = Multipart::new(stream, "X-BOUNDARY");

let result = UnconstrainedMultipart(multipart)
.items(|_, _| AttachmentType::Attachment, &config)
.await;
let result = UnconstrainedMultipart {
multipart,
outcome_aggregator: Addr::dummy(),
request_meta: RequestMeta::new(
"https://a94ae32be2584e0bbd7a4cbb95971fee:@sentry.io/42"
.parse()
.unwrap(),
),
}
.items(|_, _| AttachmentType::Attachment, &config)
.await;

// Should be warned if the overall stream limit is being breached.
assert!(result.is_err_and(|x| matches!(x, multer::Error::StreamSizeExceeded { limit: _ })));
Expand Down
13 changes: 12 additions & 1 deletion tests/integration/test_playstation.py
Original file line number Diff line number Diff line change
Expand Up @@ -268,14 +268,25 @@ def test_playstation_ignore_large_fields(
{
"limits": {
"max_attachment_size": len(video_content) - 1,
}
},
"outcomes": {"emit_outcomes": True, "batch_size": 1, "batch_interval": 1},
},
)

response = relay.send_playstation_request(
PROJECT_ID, playstation_dump, video_content
)
assert response.ok
assert (mini_sentry.captured_outcomes.get()["outcomes"]) == [
{
"timestamp": mock.ANY,
"project_id": 42,
"outcome": 3,
"reason": "too_large:attachment:attachment",
"category": 4,
"quantity": len(video_content),
}
]
assert [
item.headers["filename"] for item in mini_sentry.captured_events.get().items
] == ["playstation.prosperodmp"]
Expand Down
Loading