diff --git a/api/swagger.yml b/api/swagger.yml index 56f17ab4cb6..291e049361d 100644 --- a/api/swagger.yml +++ b/api/swagger.yml @@ -1581,6 +1581,50 @@ components: - completed - update_time + MergeStatus: + type: object + properties: + task_id: + type: string + description: the id of the async merge task + completed: + type: boolean + description: true if the task has completed (either successfully or with an error) + update_time: + type: string + format: date-time + description: last time the task status was updated + result: + $ref: "#/components/schemas/MergeResult" + error: + $ref: "#/components/schemas/Error" + required: + - task_id + - completed + - update_time + + CommitStatus: + type: object + properties: + task_id: + type: string + description: the id of the async commit task + completed: + type: boolean + description: true if the task has completed (either successfully or with an error) + update_time: + type: string + format: date-time + description: last time the task status was updated + result: + $ref: "#/components/schemas/Commit" + error: + $ref: "#/components/schemas/Error" + required: + - task_id + - completed + - update_time + PrepareGCUncommittedRequest: type: object properties: @@ -4247,6 +4291,144 @@ paths: default: $ref: "#/components/responses/ServerError" + /repositories/{repository}/branches/{branch}/commits/async: + parameters: + - in: path + name: repository + required: true + schema: + type: string + - in: path + name: branch + required: true + schema: + type: string + post: + parameters: + - in: query + name: source_metarange + required: false + description: The source metarange to commit. Branch must not have uncommitted changes. + schema: + type: string + tags: + - commits + operationId: commitAsync + summary: create commit asynchronously + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CommitCreation" + responses: + 202: + description: commit task started + content: + application/json: + schema: + $ref: "#/components/schemas/TaskCreation" + 400: + $ref: "#/components/responses/ValidationError" + 401: + $ref: "#/components/responses/Unauthorized" + 403: + $ref: "#/components/responses/Forbidden" + 404: + $ref: "#/components/responses/NotFound" + 429: + description: too many requests + 501: + $ref: "#/components/responses/NotImplemented" + default: + $ref: "#/components/responses/ServerError" + + /repositories/{repository}/branches/{branch}/commits/status: + parameters: + - in: path + name: repository + required: true + schema: + type: string + - in: path + name: branch + required: true + schema: + type: string + get: + tags: + - commits + operationId: commitStatus + summary: get status of async commit operation + parameters: + - in: query + name: id + description: Unique identifier of the commit task + schema: + type: string + required: true + responses: + 200: + description: commit task status + content: + application/json: + schema: + $ref: "#/components/schemas/CommitStatus" + 400: + description: Validation Error + content: + application/json: + schema: + $ref: "#/components/schemas/CommitStatus" + 401: + description: Unauthorized + content: + application/json: + schema: + $ref: "#/components/schemas/CommitStatus" + 403: + description: Forbidden + content: + application/json: + schema: + $ref: "#/components/schemas/CommitStatus" + 404: + description: Not Found + content: + application/json: + schema: + $ref: "#/components/schemas/CommitStatus" + 409: + description: Conflict + content: + application/json: + schema: + $ref: "#/components/schemas/CommitStatus" + 412: + description: Precondition Failed (e.g. a pre-commit hook returned a failure) + content: + application/json: + schema: + $ref: "#/components/schemas/CommitStatus" + 429: + description: too many requests + content: + application/json: + schema: + $ref: "#/components/schemas/CommitStatus" + 501: + description: Not Implemented + content: + application/json: + schema: + $ref: "#/components/schemas/CommitStatus" + default: + description: Internal Server Error + content: + application/json: + schema: + $ref: "#/components/schemas/CommitStatus" + /repositories/{repository}/commits: parameters: - in: path @@ -4596,6 +4778,150 @@ paths: default: $ref: "#/components/responses/ServerError" + /repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch}/async: + parameters: + - in: path + name: repository + required: true + schema: + type: string + - in: path + name: sourceRef + required: true + schema: + type: string + description: source ref + - in: path + name: destinationBranch + required: true + schema: + type: string + description: destination branch name + post: + tags: + - refs + operationId: mergeIntoBranchAsync + summary: merge references asynchronously + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/Merge" + responses: + 202: + description: merge task started + content: + application/json: + schema: + $ref: "#/components/schemas/TaskCreation" + 400: + $ref: "#/components/responses/ValidationError" + 401: + $ref: "#/components/responses/Unauthorized" + 403: + $ref: "#/components/responses/Forbidden" + 404: + $ref: "#/components/responses/NotFound" + 429: + description: too many requests + 501: + $ref: "#/components/responses/NotImplemented" + default: + $ref: "#/components/responses/ServerError" + + /repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch}/status: + parameters: + - in: path + name: repository + required: true + schema: + type: string + - in: path + name: sourceRef + required: true + schema: + type: string + description: source ref + - in: path + name: destinationBranch + required: true + schema: + type: string + description: destination branch name + get: + tags: + - refs + operationId: mergeIntoBranchStatus + summary: get status of async merge operation + parameters: + - in: query + name: id + description: Unique identifier of the merge task + schema: + type: string + required: true + responses: + 200: + description: merge task status + content: + application/json: + schema: + $ref: "#/components/schemas/MergeStatus" + 400: + description: Validation Error + content: + application/json: + schema: + $ref: "#/components/schemas/MergeStatus" + 401: + description: Unauthorized + content: + application/json: + schema: + $ref: "#/components/schemas/MergeStatus" + 403: + description: Forbidden + content: + application/json: + schema: + $ref: "#/components/schemas/MergeStatus" + 404: + description: Not Found + content: + application/json: + schema: + $ref: "#/components/schemas/MergeStatus" + 409: + description: Conflict + content: + application/json: + schema: + $ref: "#/components/schemas/MergeStatus" + 412: + description: precondition failed (e.g. a pre-merge hook returned a failure) + content: + application/json: + schema: + $ref: "#/components/schemas/MergeStatus" + 429: + description: too many requests + content: + application/json: + schema: + $ref: "#/components/schemas/MergeStatus" + 501: + description: Not Implemented + content: + application/json: + schema: + $ref: "#/components/schemas/MergeStatus" + default: + description: Internal Server Error + content: + application/json: + schema: + $ref: "#/components/schemas/MergeStatus" + /repositories/{repository}/branches/{branch}/diff: parameters: - $ref: "#/components/parameters/PaginationAfter" diff --git a/clients/java/README.md b/clients/java/README.md index ce4572f595b..9b49e2d2bbf 100644 --- a/clients/java/README.md +++ b/clients/java/README.md @@ -196,6 +196,8 @@ Class | Method | HTTP request | Description *BranchesApi* | [**resetBranch**](docs/BranchesApi.md#resetBranch) | **PUT** /repositories/{repository}/branches/{branch} | reset branch *BranchesApi* | [**revertBranch**](docs/BranchesApi.md#revertBranch) | **POST** /repositories/{repository}/branches/{branch}/revert | revert *CommitsApi* | [**commit**](docs/CommitsApi.md#commit) | **POST** /repositories/{repository}/branches/{branch}/commits | create commit +*CommitsApi* | [**commitAsync**](docs/CommitsApi.md#commitAsync) | **POST** /repositories/{repository}/branches/{branch}/commits/async | create commit asynchronously +*CommitsApi* | [**commitStatus**](docs/CommitsApi.md#commitStatus) | **GET** /repositories/{repository}/branches/{branch}/commits/status | get status of async commit operation *CommitsApi* | [**getCommit**](docs/CommitsApi.md#getCommit) | **GET** /repositories/{repository}/commits/{commitId} | get commit *ConfigApi* | [**getConfig**](docs/ConfigApi.md#getConfig) | **GET** /config | *ExperimentalApi* | [**abortPresignMultipartUpload**](docs/ExperimentalApi.md#abortPresignMultipartUpload) | **DELETE** /repositories/{repository}/branches/{branch}/staging/pmpu/{uploadId} | Abort a presign multipart upload @@ -281,6 +283,8 @@ Class | Method | HTTP request | Description *RefsApi* | [**findMergeBase**](docs/RefsApi.md#findMergeBase) | **GET** /repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch} | find the merge base for 2 references *RefsApi* | [**logCommits**](docs/RefsApi.md#logCommits) | **GET** /repositories/{repository}/refs/{ref}/commits | get commit log from ref. If both objects and prefixes are empty, return all commits. *RefsApi* | [**mergeIntoBranch**](docs/RefsApi.md#mergeIntoBranch) | **POST** /repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch} | merge references +*RefsApi* | [**mergeIntoBranchAsync**](docs/RefsApi.md#mergeIntoBranchAsync) | **POST** /repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch}/async | merge references asynchronously +*RefsApi* | [**mergeIntoBranchStatus**](docs/RefsApi.md#mergeIntoBranchStatus) | **GET** /repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch}/status | get status of async merge operation *RemotesApi* | [**pullIcebergTable**](docs/RemotesApi.md#pullIcebergTable) | **POST** /iceberg/remotes/{catalog}/pull | take a table previously pushed from lakeFS into a remote catalog, and pull its state back into the originating lakeFS repository *RemotesApi* | [**pushIcebergTable**](docs/RemotesApi.md#pushIcebergTable) | **POST** /iceberg/remotes/{catalog}/push | register existing lakeFS table in remote catalog *RepositoriesApi* | [**createRepository**](docs/RepositoriesApi.md#createRepository) | **POST** /repositories | create repository @@ -323,6 +327,7 @@ Class | Method | HTTP request | Description - [CommitList](docs/CommitList.md) - [CommitOverrides](docs/CommitOverrides.md) - [CommitRecordCreation](docs/CommitRecordCreation.md) + - [CommitStatus](docs/CommitStatus.md) - [CompletePresignMultipartUpload](docs/CompletePresignMultipartUpload.md) - [Config](docs/Config.md) - [CopyPartSource](docs/CopyPartSource.md) @@ -365,6 +370,7 @@ Class | Method | HTTP request | Description - [LoginInformation](docs/LoginInformation.md) - [Merge](docs/Merge.md) - [MergeResult](docs/MergeResult.md) + - [MergeStatus](docs/MergeStatus.md) - [MetaRangeCreation](docs/MetaRangeCreation.md) - [MetaRangeCreationResponse](docs/MetaRangeCreationResponse.md) - [ObjectCopyCreation](docs/ObjectCopyCreation.md) diff --git a/clients/java/api/openapi.yaml b/clients/java/api/openapi.yaml index 1a1429db0de..06cc0afbaf3 100644 --- a/clients/java/api/openapi.yaml +++ b/clients/java/api/openapi.yaml @@ -3905,6 +3905,180 @@ paths: - commits x-content-type: application/json x-accepts: application/json + /repositories/{repository}/branches/{branch}/commits/async: + post: + operationId: commitAsync + parameters: + - explode: false + in: path + name: repository + required: true + schema: + type: string + style: simple + - explode: false + in: path + name: branch + required: true + schema: + type: string + style: simple + - description: The source metarange to commit. Branch must not have uncommitted + changes. + explode: true + in: query + name: source_metarange + required: false + schema: + type: string + style: form + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CommitCreation' + required: true + responses: + "202": + content: + application/json: + schema: + $ref: '#/components/schemas/TaskCreation' + description: commit task started + "400": + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + description: Validation Error + "401": + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + description: Unauthorized + "403": + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + description: Forbidden + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + description: Resource Not Found + "429": + description: too many requests + "501": + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + description: Not Implemented + default: + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + description: Internal Server Error + summary: create commit asynchronously + tags: + - commits + x-content-type: application/json + x-accepts: application/json + /repositories/{repository}/branches/{branch}/commits/status: + get: + operationId: commitStatus + parameters: + - explode: false + in: path + name: repository + required: true + schema: + type: string + style: simple + - explode: false + in: path + name: branch + required: true + schema: + type: string + style: simple + - description: Unique identifier of the commit task + explode: true + in: query + name: id + required: true + schema: + type: string + style: form + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/CommitStatus' + description: commit task status + "400": + content: + application/json: + schema: + $ref: '#/components/schemas/CommitStatus' + description: Validation Error + "401": + content: + application/json: + schema: + $ref: '#/components/schemas/CommitStatus' + description: Unauthorized + "403": + content: + application/json: + schema: + $ref: '#/components/schemas/CommitStatus' + description: Forbidden + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/CommitStatus' + description: Not Found + "409": + content: + application/json: + schema: + $ref: '#/components/schemas/CommitStatus' + description: Conflict + "412": + content: + application/json: + schema: + $ref: '#/components/schemas/CommitStatus' + description: Precondition Failed (e.g. a pre-commit hook returned a failure) + "429": + content: + application/json: + schema: + $ref: '#/components/schemas/CommitStatus' + description: too many requests + "501": + content: + application/json: + schema: + $ref: '#/components/schemas/CommitStatus' + description: Not Implemented + default: + content: + application/json: + schema: + $ref: '#/components/schemas/CommitStatus' + description: Internal Server Error + summary: get status of async commit operation + tags: + - commits + x-accepts: application/json /repositories/{repository}/commits: post: operationId: CreateCommitRecord @@ -4516,6 +4690,188 @@ paths: - refs x-content-type: application/json x-accepts: application/json + /repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch}/async: + post: + operationId: mergeIntoBranchAsync + parameters: + - explode: false + in: path + name: repository + required: true + schema: + type: string + style: simple + - description: source ref + explode: false + in: path + name: sourceRef + required: true + schema: + type: string + style: simple + - description: destination branch name + explode: false + in: path + name: destinationBranch + required: true + schema: + type: string + style: simple + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Merge' + responses: + "202": + content: + application/json: + schema: + $ref: '#/components/schemas/TaskCreation' + description: merge task started + "400": + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + description: Validation Error + "401": + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + description: Unauthorized + "403": + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + description: Forbidden + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + description: Resource Not Found + "429": + description: too many requests + "501": + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + description: Not Implemented + default: + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + description: Internal Server Error + summary: merge references asynchronously + tags: + - refs + x-content-type: application/json + x-accepts: application/json + /repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch}/status: + get: + operationId: mergeIntoBranchStatus + parameters: + - explode: false + in: path + name: repository + required: true + schema: + type: string + style: simple + - description: source ref + explode: false + in: path + name: sourceRef + required: true + schema: + type: string + style: simple + - description: destination branch name + explode: false + in: path + name: destinationBranch + required: true + schema: + type: string + style: simple + - description: Unique identifier of the merge task + explode: true + in: query + name: id + required: true + schema: + type: string + style: form + responses: + "200": + content: + application/json: + schema: + $ref: '#/components/schemas/MergeStatus' + description: merge task status + "400": + content: + application/json: + schema: + $ref: '#/components/schemas/MergeStatus' + description: Validation Error + "401": + content: + application/json: + schema: + $ref: '#/components/schemas/MergeStatus' + description: Unauthorized + "403": + content: + application/json: + schema: + $ref: '#/components/schemas/MergeStatus' + description: Forbidden + "404": + content: + application/json: + schema: + $ref: '#/components/schemas/MergeStatus' + description: Not Found + "409": + content: + application/json: + schema: + $ref: '#/components/schemas/MergeStatus' + description: Conflict + "412": + content: + application/json: + schema: + $ref: '#/components/schemas/MergeStatus' + description: precondition failed (e.g. a pre-merge hook returned a failure) + "429": + content: + application/json: + schema: + $ref: '#/components/schemas/MergeStatus' + description: too many requests + "501": + content: + application/json: + schema: + $ref: '#/components/schemas/MergeStatus' + description: Not Implemented + default: + content: + application/json: + schema: + $ref: '#/components/schemas/MergeStatus' + description: Internal Server Error + summary: get status of async merge operation + tags: + - refs + x-accepts: application/json /repositories/{repository}/branches/{branch}/diff: get: operationId: diffBranch @@ -10772,6 +11128,77 @@ components: - task_id - update_time type: object + MergeStatus: + example: + result: + reference: reference + update_time: 2000-01-23T04:56:07.000+00:00 + task_id: task_id + completed: true + error: + message: message + properties: + task_id: + description: the id of the async merge task + type: string + completed: + description: true if the task has completed (either successfully or with + an error) + type: boolean + update_time: + description: last time the task status was updated + format: date-time + type: string + result: + $ref: '#/components/schemas/MergeResult' + error: + $ref: '#/components/schemas/Error' + required: + - completed + - task_id + - update_time + type: object + CommitStatus: + example: + result: + generation: 6 + metadata: + key: metadata + committer: committer + id: id + creation_date: 0 + meta_range_id: meta_range_id + message: message + version: 0 + parents: + - parents + - parents + update_time: 2000-01-23T04:56:07.000+00:00 + task_id: task_id + completed: true + error: + message: message + properties: + task_id: + description: the id of the async commit task + type: string + completed: + description: true if the task has completed (either successfully or with + an error) + type: boolean + update_time: + description: last time the task status was updated + format: date-time + type: string + result: + $ref: '#/components/schemas/Commit' + error: + $ref: '#/components/schemas/Error' + required: + - completed + - task_id + - update_time + type: object PrepareGCUncommittedRequest: example: continuation_token: continuation_token diff --git a/clients/java/docs/CommitStatus.md b/clients/java/docs/CommitStatus.md new file mode 100644 index 00000000000..a92c2c4a407 --- /dev/null +++ b/clients/java/docs/CommitStatus.md @@ -0,0 +1,17 @@ + + +# CommitStatus + + +## Properties + +| Name | Type | Description | Notes | +|------------ | ------------- | ------------- | -------------| +|**taskId** | **String** | the id of the async commit task | | +|**completed** | **Boolean** | true if the task has completed (either successfully or with an error) | | +|**updateTime** | **OffsetDateTime** | last time the task status was updated | | +|**result** | [**Commit**](Commit.md) | | [optional] | +|**error** | [**Error**](Error.md) | | [optional] | + + + diff --git a/clients/java/docs/CommitsApi.md b/clients/java/docs/CommitsApi.md index cd73fec72df..16e3fc24246 100644 --- a/clients/java/docs/CommitsApi.md +++ b/clients/java/docs/CommitsApi.md @@ -5,6 +5,8 @@ All URIs are relative to */api/v1* | Method | HTTP request | Description | |------------- | ------------- | -------------| | [**commit**](CommitsApi.md#commit) | **POST** /repositories/{repository}/branches/{branch}/commits | create commit | +| [**commitAsync**](CommitsApi.md#commitAsync) | **POST** /repositories/{repository}/branches/{branch}/commits/async | create commit asynchronously | +| [**commitStatus**](CommitsApi.md#commitStatus) | **GET** /repositories/{repository}/branches/{branch}/commits/status | get status of async commit operation | | [**getCommit**](CommitsApi.md#getCommit) | **GET** /repositories/{repository}/commits/{commitId} | get commit | @@ -112,6 +114,211 @@ public class Example { | **429** | too many requests | - | | **0** | Internal Server Error | - | + +# **commitAsync** +> TaskCreation commitAsync(repository, branch, commitCreation).sourceMetarange(sourceMetarange).execute(); + +create commit asynchronously + +### Example +```java +// Import classes: +import io.lakefs.clients.sdk.ApiClient; +import io.lakefs.clients.sdk.ApiException; +import io.lakefs.clients.sdk.Configuration; +import io.lakefs.clients.sdk.auth.*; +import io.lakefs.clients.sdk.models.*; +import io.lakefs.clients.sdk.CommitsApi; + +public class Example { + public static void main(String[] args) { + ApiClient defaultClient = Configuration.getDefaultApiClient(); + defaultClient.setBasePath("/api/v1"); + + // Configure HTTP basic authorization: basic_auth + HttpBasicAuth basic_auth = (HttpBasicAuth) defaultClient.getAuthentication("basic_auth"); + basic_auth.setUsername("YOUR USERNAME"); + basic_auth.setPassword("YOUR PASSWORD"); + + // Configure API key authorization: cookie_auth + ApiKeyAuth cookie_auth = (ApiKeyAuth) defaultClient.getAuthentication("cookie_auth"); + cookie_auth.setApiKey("YOUR API KEY"); + // Uncomment the following line to set a prefix for the API key, e.g. "Token" (defaults to null) + //cookie_auth.setApiKeyPrefix("Token"); + + // Configure API key authorization: oidc_auth + ApiKeyAuth oidc_auth = (ApiKeyAuth) defaultClient.getAuthentication("oidc_auth"); + oidc_auth.setApiKey("YOUR API KEY"); + // Uncomment the following line to set a prefix for the API key, e.g. "Token" (defaults to null) + //oidc_auth.setApiKeyPrefix("Token"); + + // Configure API key authorization: saml_auth + ApiKeyAuth saml_auth = (ApiKeyAuth) defaultClient.getAuthentication("saml_auth"); + saml_auth.setApiKey("YOUR API KEY"); + // Uncomment the following line to set a prefix for the API key, e.g. "Token" (defaults to null) + //saml_auth.setApiKeyPrefix("Token"); + + // Configure HTTP bearer authorization: jwt_token + HttpBearerAuth jwt_token = (HttpBearerAuth) defaultClient.getAuthentication("jwt_token"); + jwt_token.setBearerToken("BEARER TOKEN"); + + CommitsApi apiInstance = new CommitsApi(defaultClient); + String repository = "repository_example"; // String | + String branch = "branch_example"; // String | + CommitCreation commitCreation = new CommitCreation(); // CommitCreation | + String sourceMetarange = "sourceMetarange_example"; // String | The source metarange to commit. Branch must not have uncommitted changes. + try { + TaskCreation result = apiInstance.commitAsync(repository, branch, commitCreation) + .sourceMetarange(sourceMetarange) + .execute(); + System.out.println(result); + } catch (ApiException e) { + System.err.println("Exception when calling CommitsApi#commitAsync"); + System.err.println("Status code: " + e.getCode()); + System.err.println("Reason: " + e.getResponseBody()); + System.err.println("Response headers: " + e.getResponseHeaders()); + e.printStackTrace(); + } + } +} +``` + +### Parameters + +| Name | Type | Description | Notes | +|------------- | ------------- | ------------- | -------------| +| **repository** | **String**| | | +| **branch** | **String**| | | +| **commitCreation** | [**CommitCreation**](CommitCreation.md)| | | +| **sourceMetarange** | **String**| The source metarange to commit. Branch must not have uncommitted changes. | [optional] | + +### Return type + +[**TaskCreation**](TaskCreation.md) + +### Authorization + +[basic_auth](../README.md#basic_auth), [cookie_auth](../README.md#cookie_auth), [oidc_auth](../README.md#oidc_auth), [saml_auth](../README.md#saml_auth), [jwt_token](../README.md#jwt_token) + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details +| Status code | Description | Response headers | +|-------------|-------------|------------------| +| **202** | commit task started | - | +| **400** | Validation Error | - | +| **401** | Unauthorized | - | +| **403** | Forbidden | - | +| **404** | Resource Not Found | - | +| **429** | too many requests | - | +| **501** | Not Implemented | - | +| **0** | Internal Server Error | - | + + +# **commitStatus** +> CommitStatus commitStatus(repository, branch, id).execute(); + +get status of async commit operation + +### Example +```java +// Import classes: +import io.lakefs.clients.sdk.ApiClient; +import io.lakefs.clients.sdk.ApiException; +import io.lakefs.clients.sdk.Configuration; +import io.lakefs.clients.sdk.auth.*; +import io.lakefs.clients.sdk.models.*; +import io.lakefs.clients.sdk.CommitsApi; + +public class Example { + public static void main(String[] args) { + ApiClient defaultClient = Configuration.getDefaultApiClient(); + defaultClient.setBasePath("/api/v1"); + + // Configure HTTP basic authorization: basic_auth + HttpBasicAuth basic_auth = (HttpBasicAuth) defaultClient.getAuthentication("basic_auth"); + basic_auth.setUsername("YOUR USERNAME"); + basic_auth.setPassword("YOUR PASSWORD"); + + // Configure API key authorization: cookie_auth + ApiKeyAuth cookie_auth = (ApiKeyAuth) defaultClient.getAuthentication("cookie_auth"); + cookie_auth.setApiKey("YOUR API KEY"); + // Uncomment the following line to set a prefix for the API key, e.g. "Token" (defaults to null) + //cookie_auth.setApiKeyPrefix("Token"); + + // Configure API key authorization: oidc_auth + ApiKeyAuth oidc_auth = (ApiKeyAuth) defaultClient.getAuthentication("oidc_auth"); + oidc_auth.setApiKey("YOUR API KEY"); + // Uncomment the following line to set a prefix for the API key, e.g. "Token" (defaults to null) + //oidc_auth.setApiKeyPrefix("Token"); + + // Configure API key authorization: saml_auth + ApiKeyAuth saml_auth = (ApiKeyAuth) defaultClient.getAuthentication("saml_auth"); + saml_auth.setApiKey("YOUR API KEY"); + // Uncomment the following line to set a prefix for the API key, e.g. "Token" (defaults to null) + //saml_auth.setApiKeyPrefix("Token"); + + // Configure HTTP bearer authorization: jwt_token + HttpBearerAuth jwt_token = (HttpBearerAuth) defaultClient.getAuthentication("jwt_token"); + jwt_token.setBearerToken("BEARER TOKEN"); + + CommitsApi apiInstance = new CommitsApi(defaultClient); + String repository = "repository_example"; // String | + String branch = "branch_example"; // String | + String id = "id_example"; // String | Unique identifier of the commit task + try { + CommitStatus result = apiInstance.commitStatus(repository, branch, id) + .execute(); + System.out.println(result); + } catch (ApiException e) { + System.err.println("Exception when calling CommitsApi#commitStatus"); + System.err.println("Status code: " + e.getCode()); + System.err.println("Reason: " + e.getResponseBody()); + System.err.println("Response headers: " + e.getResponseHeaders()); + e.printStackTrace(); + } + } +} +``` + +### Parameters + +| Name | Type | Description | Notes | +|------------- | ------------- | ------------- | -------------| +| **repository** | **String**| | | +| **branch** | **String**| | | +| **id** | **String**| Unique identifier of the commit task | | + +### Return type + +[**CommitStatus**](CommitStatus.md) + +### Authorization + +[basic_auth](../README.md#basic_auth), [cookie_auth](../README.md#cookie_auth), [oidc_auth](../README.md#oidc_auth), [saml_auth](../README.md#saml_auth), [jwt_token](../README.md#jwt_token) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details +| Status code | Description | Response headers | +|-------------|-------------|------------------| +| **200** | commit task status | - | +| **400** | Validation Error | - | +| **401** | Unauthorized | - | +| **403** | Forbidden | - | +| **404** | Not Found | - | +| **409** | Conflict | - | +| **412** | Precondition Failed (e.g. a pre-commit hook returned a failure) | - | +| **429** | too many requests | - | +| **501** | Not Implemented | - | +| **0** | Internal Server Error | - | + # **getCommit** > Commit getCommit(repository, commitId).execute(); diff --git a/clients/java/docs/MergeStatus.md b/clients/java/docs/MergeStatus.md new file mode 100644 index 00000000000..e9ca928ef92 --- /dev/null +++ b/clients/java/docs/MergeStatus.md @@ -0,0 +1,17 @@ + + +# MergeStatus + + +## Properties + +| Name | Type | Description | Notes | +|------------ | ------------- | ------------- | -------------| +|**taskId** | **String** | the id of the async merge task | | +|**completed** | **Boolean** | true if the task has completed (either successfully or with an error) | | +|**updateTime** | **OffsetDateTime** | last time the task status was updated | | +|**result** | [**MergeResult**](MergeResult.md) | | [optional] | +|**error** | [**Error**](Error.md) | | [optional] | + + + diff --git a/clients/java/docs/RefsApi.md b/clients/java/docs/RefsApi.md index 8e8e290b1fa..a0f965a464b 100644 --- a/clients/java/docs/RefsApi.md +++ b/clients/java/docs/RefsApi.md @@ -8,6 +8,8 @@ All URIs are relative to */api/v1* | [**findMergeBase**](RefsApi.md#findMergeBase) | **GET** /repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch} | find the merge base for 2 references | | [**logCommits**](RefsApi.md#logCommits) | **GET** /repositories/{repository}/refs/{ref}/commits | get commit log from ref. If both objects and prefixes are empty, return all commits. | | [**mergeIntoBranch**](RefsApi.md#mergeIntoBranch) | **POST** /repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch} | merge references | +| [**mergeIntoBranchAsync**](RefsApi.md#mergeIntoBranchAsync) | **POST** /repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch}/async | merge references asynchronously | +| [**mergeIntoBranchStatus**](RefsApi.md#mergeIntoBranchStatus) | **GET** /repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch}/status | get status of async merge operation | @@ -448,3 +450,210 @@ public class Example { | **429** | too many requests | - | | **0** | Internal Server Error | - | + +# **mergeIntoBranchAsync** +> TaskCreation mergeIntoBranchAsync(repository, sourceRef, destinationBranch).merge(merge).execute(); + +merge references asynchronously + +### Example +```java +// Import classes: +import io.lakefs.clients.sdk.ApiClient; +import io.lakefs.clients.sdk.ApiException; +import io.lakefs.clients.sdk.Configuration; +import io.lakefs.clients.sdk.auth.*; +import io.lakefs.clients.sdk.models.*; +import io.lakefs.clients.sdk.RefsApi; + +public class Example { + public static void main(String[] args) { + ApiClient defaultClient = Configuration.getDefaultApiClient(); + defaultClient.setBasePath("/api/v1"); + + // Configure HTTP basic authorization: basic_auth + HttpBasicAuth basic_auth = (HttpBasicAuth) defaultClient.getAuthentication("basic_auth"); + basic_auth.setUsername("YOUR USERNAME"); + basic_auth.setPassword("YOUR PASSWORD"); + + // Configure API key authorization: cookie_auth + ApiKeyAuth cookie_auth = (ApiKeyAuth) defaultClient.getAuthentication("cookie_auth"); + cookie_auth.setApiKey("YOUR API KEY"); + // Uncomment the following line to set a prefix for the API key, e.g. "Token" (defaults to null) + //cookie_auth.setApiKeyPrefix("Token"); + + // Configure API key authorization: oidc_auth + ApiKeyAuth oidc_auth = (ApiKeyAuth) defaultClient.getAuthentication("oidc_auth"); + oidc_auth.setApiKey("YOUR API KEY"); + // Uncomment the following line to set a prefix for the API key, e.g. "Token" (defaults to null) + //oidc_auth.setApiKeyPrefix("Token"); + + // Configure API key authorization: saml_auth + ApiKeyAuth saml_auth = (ApiKeyAuth) defaultClient.getAuthentication("saml_auth"); + saml_auth.setApiKey("YOUR API KEY"); + // Uncomment the following line to set a prefix for the API key, e.g. "Token" (defaults to null) + //saml_auth.setApiKeyPrefix("Token"); + + // Configure HTTP bearer authorization: jwt_token + HttpBearerAuth jwt_token = (HttpBearerAuth) defaultClient.getAuthentication("jwt_token"); + jwt_token.setBearerToken("BEARER TOKEN"); + + RefsApi apiInstance = new RefsApi(defaultClient); + String repository = "repository_example"; // String | + String sourceRef = "sourceRef_example"; // String | source ref + String destinationBranch = "destinationBranch_example"; // String | destination branch name + Merge merge = new Merge(); // Merge | + try { + TaskCreation result = apiInstance.mergeIntoBranchAsync(repository, sourceRef, destinationBranch) + .merge(merge) + .execute(); + System.out.println(result); + } catch (ApiException e) { + System.err.println("Exception when calling RefsApi#mergeIntoBranchAsync"); + System.err.println("Status code: " + e.getCode()); + System.err.println("Reason: " + e.getResponseBody()); + System.err.println("Response headers: " + e.getResponseHeaders()); + e.printStackTrace(); + } + } +} +``` + +### Parameters + +| Name | Type | Description | Notes | +|------------- | ------------- | ------------- | -------------| +| **repository** | **String**| | | +| **sourceRef** | **String**| source ref | | +| **destinationBranch** | **String**| destination branch name | | +| **merge** | [**Merge**](Merge.md)| | [optional] | + +### Return type + +[**TaskCreation**](TaskCreation.md) + +### Authorization + +[basic_auth](../README.md#basic_auth), [cookie_auth](../README.md#cookie_auth), [oidc_auth](../README.md#oidc_auth), [saml_auth](../README.md#saml_auth), [jwt_token](../README.md#jwt_token) + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details +| Status code | Description | Response headers | +|-------------|-------------|------------------| +| **202** | merge task started | - | +| **400** | Validation Error | - | +| **401** | Unauthorized | - | +| **403** | Forbidden | - | +| **404** | Resource Not Found | - | +| **429** | too many requests | - | +| **501** | Not Implemented | - | +| **0** | Internal Server Error | - | + + +# **mergeIntoBranchStatus** +> MergeStatus mergeIntoBranchStatus(repository, sourceRef, destinationBranch, id).execute(); + +get status of async merge operation + +### Example +```java +// Import classes: +import io.lakefs.clients.sdk.ApiClient; +import io.lakefs.clients.sdk.ApiException; +import io.lakefs.clients.sdk.Configuration; +import io.lakefs.clients.sdk.auth.*; +import io.lakefs.clients.sdk.models.*; +import io.lakefs.clients.sdk.RefsApi; + +public class Example { + public static void main(String[] args) { + ApiClient defaultClient = Configuration.getDefaultApiClient(); + defaultClient.setBasePath("/api/v1"); + + // Configure HTTP basic authorization: basic_auth + HttpBasicAuth basic_auth = (HttpBasicAuth) defaultClient.getAuthentication("basic_auth"); + basic_auth.setUsername("YOUR USERNAME"); + basic_auth.setPassword("YOUR PASSWORD"); + + // Configure API key authorization: cookie_auth + ApiKeyAuth cookie_auth = (ApiKeyAuth) defaultClient.getAuthentication("cookie_auth"); + cookie_auth.setApiKey("YOUR API KEY"); + // Uncomment the following line to set a prefix for the API key, e.g. "Token" (defaults to null) + //cookie_auth.setApiKeyPrefix("Token"); + + // Configure API key authorization: oidc_auth + ApiKeyAuth oidc_auth = (ApiKeyAuth) defaultClient.getAuthentication("oidc_auth"); + oidc_auth.setApiKey("YOUR API KEY"); + // Uncomment the following line to set a prefix for the API key, e.g. "Token" (defaults to null) + //oidc_auth.setApiKeyPrefix("Token"); + + // Configure API key authorization: saml_auth + ApiKeyAuth saml_auth = (ApiKeyAuth) defaultClient.getAuthentication("saml_auth"); + saml_auth.setApiKey("YOUR API KEY"); + // Uncomment the following line to set a prefix for the API key, e.g. "Token" (defaults to null) + //saml_auth.setApiKeyPrefix("Token"); + + // Configure HTTP bearer authorization: jwt_token + HttpBearerAuth jwt_token = (HttpBearerAuth) defaultClient.getAuthentication("jwt_token"); + jwt_token.setBearerToken("BEARER TOKEN"); + + RefsApi apiInstance = new RefsApi(defaultClient); + String repository = "repository_example"; // String | + String sourceRef = "sourceRef_example"; // String | source ref + String destinationBranch = "destinationBranch_example"; // String | destination branch name + String id = "id_example"; // String | Unique identifier of the merge task + try { + MergeStatus result = apiInstance.mergeIntoBranchStatus(repository, sourceRef, destinationBranch, id) + .execute(); + System.out.println(result); + } catch (ApiException e) { + System.err.println("Exception when calling RefsApi#mergeIntoBranchStatus"); + System.err.println("Status code: " + e.getCode()); + System.err.println("Reason: " + e.getResponseBody()); + System.err.println("Response headers: " + e.getResponseHeaders()); + e.printStackTrace(); + } + } +} +``` + +### Parameters + +| Name | Type | Description | Notes | +|------------- | ------------- | ------------- | -------------| +| **repository** | **String**| | | +| **sourceRef** | **String**| source ref | | +| **destinationBranch** | **String**| destination branch name | | +| **id** | **String**| Unique identifier of the merge task | | + +### Return type + +[**MergeStatus**](MergeStatus.md) + +### Authorization + +[basic_auth](../README.md#basic_auth), [cookie_auth](../README.md#cookie_auth), [oidc_auth](../README.md#oidc_auth), [saml_auth](../README.md#saml_auth), [jwt_token](../README.md#jwt_token) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details +| Status code | Description | Response headers | +|-------------|-------------|------------------| +| **200** | merge task status | - | +| **400** | Validation Error | - | +| **401** | Unauthorized | - | +| **403** | Forbidden | - | +| **404** | Not Found | - | +| **409** | Conflict | - | +| **412** | precondition failed (e.g. a pre-merge hook returned a failure) | - | +| **429** | too many requests | - | +| **501** | Not Implemented | - | +| **0** | Internal Server Error | - | + diff --git a/clients/java/src/main/java/io/lakefs/clients/sdk/CommitsApi.java b/clients/java/src/main/java/io/lakefs/clients/sdk/CommitsApi.java index 9ba183e8f09..4c87dbdf482 100644 --- a/clients/java/src/main/java/io/lakefs/clients/sdk/CommitsApi.java +++ b/clients/java/src/main/java/io/lakefs/clients/sdk/CommitsApi.java @@ -29,7 +29,9 @@ import io.lakefs.clients.sdk.model.Commit; import io.lakefs.clients.sdk.model.CommitCreation; +import io.lakefs.clients.sdk.model.CommitStatus; import io.lakefs.clients.sdk.model.Error; +import io.lakefs.clients.sdk.model.TaskCreation; import java.lang.reflect.Type; import java.util.ArrayList; @@ -299,6 +301,444 @@ public okhttp3.Call executeAsync(final ApiCallback _callback) throws Api public APIcommitRequest commit(String repository, String branch, CommitCreation commitCreation) { return new APIcommitRequest(repository, branch, commitCreation); } + private okhttp3.Call commitAsyncCall(String repository, String branch, CommitCreation commitCreation, String sourceMetarange, final ApiCallback _callback) throws ApiException { + String basePath = null; + // Operation Servers + String[] localBasePaths = new String[] { }; + + // Determine Base Path to Use + if (localCustomBaseUrl != null){ + basePath = localCustomBaseUrl; + } else if ( localBasePaths.length > 0 ) { + basePath = localBasePaths[localHostIndex]; + } else { + basePath = null; + } + + Object localVarPostBody = commitCreation; + + // create path and map variables + String localVarPath = "/repositories/{repository}/branches/{branch}/commits/async" + .replace("{" + "repository" + "}", localVarApiClient.escapeString(repository.toString())) + .replace("{" + "branch" + "}", localVarApiClient.escapeString(branch.toString())); + + List localVarQueryParams = new ArrayList(); + List localVarCollectionQueryParams = new ArrayList(); + Map localVarHeaderParams = new HashMap(); + Map localVarCookieParams = new HashMap(); + Map localVarFormParams = new HashMap(); + + if (sourceMetarange != null) { + localVarQueryParams.addAll(localVarApiClient.parameterToPair("source_metarange", sourceMetarange)); + } + + final String[] localVarAccepts = { + "application/json" + }; + final String localVarAccept = localVarApiClient.selectHeaderAccept(localVarAccepts); + if (localVarAccept != null) { + localVarHeaderParams.put("Accept", localVarAccept); + } + + final String[] localVarContentTypes = { + "application/json" + }; + final String localVarContentType = localVarApiClient.selectHeaderContentType(localVarContentTypes); + if (localVarContentType != null) { + localVarHeaderParams.put("Content-Type", localVarContentType); + } + + String[] localVarAuthNames = new String[] { "basic_auth", "cookie_auth", "oidc_auth", "saml_auth", "jwt_token" }; + return localVarApiClient.buildCall(basePath, localVarPath, "POST", localVarQueryParams, localVarCollectionQueryParams, localVarPostBody, localVarHeaderParams, localVarCookieParams, localVarFormParams, localVarAuthNames, _callback); + } + + @SuppressWarnings("rawtypes") + private okhttp3.Call commitAsyncValidateBeforeCall(String repository, String branch, CommitCreation commitCreation, String sourceMetarange, final ApiCallback _callback) throws ApiException { + // verify the required parameter 'repository' is set + if (repository == null) { + throw new ApiException("Missing the required parameter 'repository' when calling commitAsync(Async)"); + } + + // verify the required parameter 'branch' is set + if (branch == null) { + throw new ApiException("Missing the required parameter 'branch' when calling commitAsync(Async)"); + } + + // verify the required parameter 'commitCreation' is set + if (commitCreation == null) { + throw new ApiException("Missing the required parameter 'commitCreation' when calling commitAsync(Async)"); + } + + return commitAsyncCall(repository, branch, commitCreation, sourceMetarange, _callback); + + } + + + private ApiResponse commitAsyncWithHttpInfo(String repository, String branch, CommitCreation commitCreation, String sourceMetarange) throws ApiException { + okhttp3.Call localVarCall = commitAsyncValidateBeforeCall(repository, branch, commitCreation, sourceMetarange, null); + Type localVarReturnType = new TypeToken(){}.getType(); + return localVarApiClient.execute(localVarCall, localVarReturnType); + } + + private okhttp3.Call commitAsyncAsync(String repository, String branch, CommitCreation commitCreation, String sourceMetarange, final ApiCallback _callback) throws ApiException { + + okhttp3.Call localVarCall = commitAsyncValidateBeforeCall(repository, branch, commitCreation, sourceMetarange, _callback); + Type localVarReturnType = new TypeToken(){}.getType(); + localVarApiClient.executeAsync(localVarCall, localVarReturnType, _callback); + return localVarCall; + } + + public class APIcommitAsyncRequest { + private final String repository; + private final String branch; + private final CommitCreation commitCreation; + private String sourceMetarange; + + private APIcommitAsyncRequest(String repository, String branch, CommitCreation commitCreation) { + this.repository = repository; + this.branch = branch; + this.commitCreation = commitCreation; + } + + /** + * Set sourceMetarange + * @param sourceMetarange The source metarange to commit. Branch must not have uncommitted changes. (optional) + * @return APIcommitAsyncRequest + */ + public APIcommitAsyncRequest sourceMetarange(String sourceMetarange) { + this.sourceMetarange = sourceMetarange; + return this; + } + + /** + * Build call for commitAsync + * @param _callback ApiCallback API callback + * @return Call to execute + * @throws ApiException If fail to serialize the request body object + * @http.response.details + + + + + + + + + + +
Status Code Description Response Headers
202 commit task started -
400 Validation Error -
401 Unauthorized -
403 Forbidden -
404 Resource Not Found -
429 too many requests -
501 Not Implemented -
0 Internal Server Error -
+ */ + public okhttp3.Call buildCall(final ApiCallback _callback) throws ApiException { + return commitAsyncCall(repository, branch, commitCreation, sourceMetarange, _callback); + } + + /** + * Execute commitAsync request + * @return TaskCreation + * @throws ApiException If fail to call the API, e.g. server error or cannot deserialize the response body + * @http.response.details + + + + + + + + + + +
Status Code Description Response Headers
202 commit task started -
400 Validation Error -
401 Unauthorized -
403 Forbidden -
404 Resource Not Found -
429 too many requests -
501 Not Implemented -
0 Internal Server Error -
+ */ + public TaskCreation execute() throws ApiException { + ApiResponse localVarResp = commitAsyncWithHttpInfo(repository, branch, commitCreation, sourceMetarange); + return localVarResp.getData(); + } + + /** + * Execute commitAsync request with HTTP info returned + * @return ApiResponse<TaskCreation> + * @throws ApiException If fail to call the API, e.g. server error or cannot deserialize the response body + * @http.response.details + + + + + + + + + + +
Status Code Description Response Headers
202 commit task started -
400 Validation Error -
401 Unauthorized -
403 Forbidden -
404 Resource Not Found -
429 too many requests -
501 Not Implemented -
0 Internal Server Error -
+ */ + public ApiResponse executeWithHttpInfo() throws ApiException { + return commitAsyncWithHttpInfo(repository, branch, commitCreation, sourceMetarange); + } + + /** + * Execute commitAsync request (asynchronously) + * @param _callback The callback to be executed when the API call finishes + * @return The request call + * @throws ApiException If fail to process the API call, e.g. serializing the request body object + * @http.response.details + + + + + + + + + + +
Status Code Description Response Headers
202 commit task started -
400 Validation Error -
401 Unauthorized -
403 Forbidden -
404 Resource Not Found -
429 too many requests -
501 Not Implemented -
0 Internal Server Error -
+ */ + public okhttp3.Call executeAsync(final ApiCallback _callback) throws ApiException { + return commitAsyncAsync(repository, branch, commitCreation, sourceMetarange, _callback); + } + } + + /** + * create commit asynchronously + * + * @param repository (required) + * @param branch (required) + * @param commitCreation (required) + * @return APIcommitAsyncRequest + * @http.response.details + + + + + + + + + + +
Status Code Description Response Headers
202 commit task started -
400 Validation Error -
401 Unauthorized -
403 Forbidden -
404 Resource Not Found -
429 too many requests -
501 Not Implemented -
0 Internal Server Error -
+ */ + public APIcommitAsyncRequest commitAsync(String repository, String branch, CommitCreation commitCreation) { + return new APIcommitAsyncRequest(repository, branch, commitCreation); + } + private okhttp3.Call commitStatusCall(String repository, String branch, String id, final ApiCallback _callback) throws ApiException { + String basePath = null; + // Operation Servers + String[] localBasePaths = new String[] { }; + + // Determine Base Path to Use + if (localCustomBaseUrl != null){ + basePath = localCustomBaseUrl; + } else if ( localBasePaths.length > 0 ) { + basePath = localBasePaths[localHostIndex]; + } else { + basePath = null; + } + + Object localVarPostBody = null; + + // create path and map variables + String localVarPath = "/repositories/{repository}/branches/{branch}/commits/status" + .replace("{" + "repository" + "}", localVarApiClient.escapeString(repository.toString())) + .replace("{" + "branch" + "}", localVarApiClient.escapeString(branch.toString())); + + List localVarQueryParams = new ArrayList(); + List localVarCollectionQueryParams = new ArrayList(); + Map localVarHeaderParams = new HashMap(); + Map localVarCookieParams = new HashMap(); + Map localVarFormParams = new HashMap(); + + if (id != null) { + localVarQueryParams.addAll(localVarApiClient.parameterToPair("id", id)); + } + + final String[] localVarAccepts = { + "application/json" + }; + final String localVarAccept = localVarApiClient.selectHeaderAccept(localVarAccepts); + if (localVarAccept != null) { + localVarHeaderParams.put("Accept", localVarAccept); + } + + final String[] localVarContentTypes = { + }; + final String localVarContentType = localVarApiClient.selectHeaderContentType(localVarContentTypes); + if (localVarContentType != null) { + localVarHeaderParams.put("Content-Type", localVarContentType); + } + + String[] localVarAuthNames = new String[] { "basic_auth", "cookie_auth", "oidc_auth", "saml_auth", "jwt_token" }; + return localVarApiClient.buildCall(basePath, localVarPath, "GET", localVarQueryParams, localVarCollectionQueryParams, localVarPostBody, localVarHeaderParams, localVarCookieParams, localVarFormParams, localVarAuthNames, _callback); + } + + @SuppressWarnings("rawtypes") + private okhttp3.Call commitStatusValidateBeforeCall(String repository, String branch, String id, final ApiCallback _callback) throws ApiException { + // verify the required parameter 'repository' is set + if (repository == null) { + throw new ApiException("Missing the required parameter 'repository' when calling commitStatus(Async)"); + } + + // verify the required parameter 'branch' is set + if (branch == null) { + throw new ApiException("Missing the required parameter 'branch' when calling commitStatus(Async)"); + } + + // verify the required parameter 'id' is set + if (id == null) { + throw new ApiException("Missing the required parameter 'id' when calling commitStatus(Async)"); + } + + return commitStatusCall(repository, branch, id, _callback); + + } + + + private ApiResponse commitStatusWithHttpInfo(String repository, String branch, String id) throws ApiException { + okhttp3.Call localVarCall = commitStatusValidateBeforeCall(repository, branch, id, null); + Type localVarReturnType = new TypeToken(){}.getType(); + return localVarApiClient.execute(localVarCall, localVarReturnType); + } + + private okhttp3.Call commitStatusAsync(String repository, String branch, String id, final ApiCallback _callback) throws ApiException { + + okhttp3.Call localVarCall = commitStatusValidateBeforeCall(repository, branch, id, _callback); + Type localVarReturnType = new TypeToken(){}.getType(); + localVarApiClient.executeAsync(localVarCall, localVarReturnType, _callback); + return localVarCall; + } + + public class APIcommitStatusRequest { + private final String repository; + private final String branch; + private final String id; + + private APIcommitStatusRequest(String repository, String branch, String id) { + this.repository = repository; + this.branch = branch; + this.id = id; + } + + /** + * Build call for commitStatus + * @param _callback ApiCallback API callback + * @return Call to execute + * @throws ApiException If fail to serialize the request body object + * @http.response.details + + + + + + + + + + + + +
Status Code Description Response Headers
200 commit task status -
400 Validation Error -
401 Unauthorized -
403 Forbidden -
404 Not Found -
409 Conflict -
412 Precondition Failed (e.g. a pre-commit hook returned a failure) -
429 too many requests -
501 Not Implemented -
0 Internal Server Error -
+ */ + public okhttp3.Call buildCall(final ApiCallback _callback) throws ApiException { + return commitStatusCall(repository, branch, id, _callback); + } + + /** + * Execute commitStatus request + * @return CommitStatus + * @throws ApiException If fail to call the API, e.g. server error or cannot deserialize the response body + * @http.response.details + + + + + + + + + + + + +
Status Code Description Response Headers
200 commit task status -
400 Validation Error -
401 Unauthorized -
403 Forbidden -
404 Not Found -
409 Conflict -
412 Precondition Failed (e.g. a pre-commit hook returned a failure) -
429 too many requests -
501 Not Implemented -
0 Internal Server Error -
+ */ + public CommitStatus execute() throws ApiException { + ApiResponse localVarResp = commitStatusWithHttpInfo(repository, branch, id); + return localVarResp.getData(); + } + + /** + * Execute commitStatus request with HTTP info returned + * @return ApiResponse<CommitStatus> + * @throws ApiException If fail to call the API, e.g. server error or cannot deserialize the response body + * @http.response.details + + + + + + + + + + + + +
Status Code Description Response Headers
200 commit task status -
400 Validation Error -
401 Unauthorized -
403 Forbidden -
404 Not Found -
409 Conflict -
412 Precondition Failed (e.g. a pre-commit hook returned a failure) -
429 too many requests -
501 Not Implemented -
0 Internal Server Error -
+ */ + public ApiResponse executeWithHttpInfo() throws ApiException { + return commitStatusWithHttpInfo(repository, branch, id); + } + + /** + * Execute commitStatus request (asynchronously) + * @param _callback The callback to be executed when the API call finishes + * @return The request call + * @throws ApiException If fail to process the API call, e.g. serializing the request body object + * @http.response.details + + + + + + + + + + + + +
Status Code Description Response Headers
200 commit task status -
400 Validation Error -
401 Unauthorized -
403 Forbidden -
404 Not Found -
409 Conflict -
412 Precondition Failed (e.g. a pre-commit hook returned a failure) -
429 too many requests -
501 Not Implemented -
0 Internal Server Error -
+ */ + public okhttp3.Call executeAsync(final ApiCallback _callback) throws ApiException { + return commitStatusAsync(repository, branch, id, _callback); + } + } + + /** + * get status of async commit operation + * + * @param repository (required) + * @param branch (required) + * @param id Unique identifier of the commit task (required) + * @return APIcommitStatusRequest + * @http.response.details + + + + + + + + + + + + +
Status Code Description Response Headers
200 commit task status -
400 Validation Error -
401 Unauthorized -
403 Forbidden -
404 Not Found -
409 Conflict -
412 Precondition Failed (e.g. a pre-commit hook returned a failure) -
429 too many requests -
501 Not Implemented -
0 Internal Server Error -
+ */ + public APIcommitStatusRequest commitStatus(String repository, String branch, String id) { + return new APIcommitStatusRequest(repository, branch, id); + } private okhttp3.Call getCommitCall(String repository, String commitId, final ApiCallback _callback) throws ApiException { String basePath = null; // Operation Servers diff --git a/clients/java/src/main/java/io/lakefs/clients/sdk/JSON.java b/clients/java/src/main/java/io/lakefs/clients/sdk/JSON.java index 80f3d4468cb..2e6bbb9d1e6 100644 --- a/clients/java/src/main/java/io/lakefs/clients/sdk/JSON.java +++ b/clients/java/src/main/java/io/lakefs/clients/sdk/JSON.java @@ -109,6 +109,7 @@ private static Class getClassByDiscriminator(Map classByDiscriminatorValue, Stri gsonBuilder.registerTypeAdapterFactory(new io.lakefs.clients.sdk.model.CommitList.CustomTypeAdapterFactory()); gsonBuilder.registerTypeAdapterFactory(new io.lakefs.clients.sdk.model.CommitOverrides.CustomTypeAdapterFactory()); gsonBuilder.registerTypeAdapterFactory(new io.lakefs.clients.sdk.model.CommitRecordCreation.CustomTypeAdapterFactory()); + gsonBuilder.registerTypeAdapterFactory(new io.lakefs.clients.sdk.model.CommitStatus.CustomTypeAdapterFactory()); gsonBuilder.registerTypeAdapterFactory(new io.lakefs.clients.sdk.model.CompletePresignMultipartUpload.CustomTypeAdapterFactory()); gsonBuilder.registerTypeAdapterFactory(new io.lakefs.clients.sdk.model.Config.CustomTypeAdapterFactory()); gsonBuilder.registerTypeAdapterFactory(new io.lakefs.clients.sdk.model.CopyPartSource.CustomTypeAdapterFactory()); @@ -151,6 +152,7 @@ private static Class getClassByDiscriminator(Map classByDiscriminatorValue, Stri gsonBuilder.registerTypeAdapterFactory(new io.lakefs.clients.sdk.model.LoginInformation.CustomTypeAdapterFactory()); gsonBuilder.registerTypeAdapterFactory(new io.lakefs.clients.sdk.model.Merge.CustomTypeAdapterFactory()); gsonBuilder.registerTypeAdapterFactory(new io.lakefs.clients.sdk.model.MergeResult.CustomTypeAdapterFactory()); + gsonBuilder.registerTypeAdapterFactory(new io.lakefs.clients.sdk.model.MergeStatus.CustomTypeAdapterFactory()); gsonBuilder.registerTypeAdapterFactory(new io.lakefs.clients.sdk.model.MetaRangeCreation.CustomTypeAdapterFactory()); gsonBuilder.registerTypeAdapterFactory(new io.lakefs.clients.sdk.model.MetaRangeCreationResponse.CustomTypeAdapterFactory()); gsonBuilder.registerTypeAdapterFactory(new io.lakefs.clients.sdk.model.ObjectCopyCreation.CustomTypeAdapterFactory()); diff --git a/clients/java/src/main/java/io/lakefs/clients/sdk/RefsApi.java b/clients/java/src/main/java/io/lakefs/clients/sdk/RefsApi.java index 6ca647f0823..8fecf6bfec2 100644 --- a/clients/java/src/main/java/io/lakefs/clients/sdk/RefsApi.java +++ b/clients/java/src/main/java/io/lakefs/clients/sdk/RefsApi.java @@ -33,7 +33,9 @@ import io.lakefs.clients.sdk.model.FindMergeBaseResult; import io.lakefs.clients.sdk.model.Merge; import io.lakefs.clients.sdk.model.MergeResult; +import io.lakefs.clients.sdk.model.MergeStatus; import java.time.OffsetDateTime; +import io.lakefs.clients.sdk.model.TaskCreation; import java.lang.reflect.Type; import java.util.ArrayList; @@ -1086,4 +1088,448 @@ public okhttp3.Call executeAsync(final ApiCallback _callback) throw public APImergeIntoBranchRequest mergeIntoBranch(String repository, String sourceRef, String destinationBranch) { return new APImergeIntoBranchRequest(repository, sourceRef, destinationBranch); } + private okhttp3.Call mergeIntoBranchAsyncCall(String repository, String sourceRef, String destinationBranch, Merge merge, final ApiCallback _callback) throws ApiException { + String basePath = null; + // Operation Servers + String[] localBasePaths = new String[] { }; + + // Determine Base Path to Use + if (localCustomBaseUrl != null){ + basePath = localCustomBaseUrl; + } else if ( localBasePaths.length > 0 ) { + basePath = localBasePaths[localHostIndex]; + } else { + basePath = null; + } + + Object localVarPostBody = merge; + + // create path and map variables + String localVarPath = "/repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch}/async" + .replace("{" + "repository" + "}", localVarApiClient.escapeString(repository.toString())) + .replace("{" + "sourceRef" + "}", localVarApiClient.escapeString(sourceRef.toString())) + .replace("{" + "destinationBranch" + "}", localVarApiClient.escapeString(destinationBranch.toString())); + + List localVarQueryParams = new ArrayList(); + List localVarCollectionQueryParams = new ArrayList(); + Map localVarHeaderParams = new HashMap(); + Map localVarCookieParams = new HashMap(); + Map localVarFormParams = new HashMap(); + + final String[] localVarAccepts = { + "application/json" + }; + final String localVarAccept = localVarApiClient.selectHeaderAccept(localVarAccepts); + if (localVarAccept != null) { + localVarHeaderParams.put("Accept", localVarAccept); + } + + final String[] localVarContentTypes = { + "application/json" + }; + final String localVarContentType = localVarApiClient.selectHeaderContentType(localVarContentTypes); + if (localVarContentType != null) { + localVarHeaderParams.put("Content-Type", localVarContentType); + } + + String[] localVarAuthNames = new String[] { "basic_auth", "cookie_auth", "oidc_auth", "saml_auth", "jwt_token" }; + return localVarApiClient.buildCall(basePath, localVarPath, "POST", localVarQueryParams, localVarCollectionQueryParams, localVarPostBody, localVarHeaderParams, localVarCookieParams, localVarFormParams, localVarAuthNames, _callback); + } + + @SuppressWarnings("rawtypes") + private okhttp3.Call mergeIntoBranchAsyncValidateBeforeCall(String repository, String sourceRef, String destinationBranch, Merge merge, final ApiCallback _callback) throws ApiException { + // verify the required parameter 'repository' is set + if (repository == null) { + throw new ApiException("Missing the required parameter 'repository' when calling mergeIntoBranchAsync(Async)"); + } + + // verify the required parameter 'sourceRef' is set + if (sourceRef == null) { + throw new ApiException("Missing the required parameter 'sourceRef' when calling mergeIntoBranchAsync(Async)"); + } + + // verify the required parameter 'destinationBranch' is set + if (destinationBranch == null) { + throw new ApiException("Missing the required parameter 'destinationBranch' when calling mergeIntoBranchAsync(Async)"); + } + + return mergeIntoBranchAsyncCall(repository, sourceRef, destinationBranch, merge, _callback); + + } + + + private ApiResponse mergeIntoBranchAsyncWithHttpInfo(String repository, String sourceRef, String destinationBranch, Merge merge) throws ApiException { + okhttp3.Call localVarCall = mergeIntoBranchAsyncValidateBeforeCall(repository, sourceRef, destinationBranch, merge, null); + Type localVarReturnType = new TypeToken(){}.getType(); + return localVarApiClient.execute(localVarCall, localVarReturnType); + } + + private okhttp3.Call mergeIntoBranchAsyncAsync(String repository, String sourceRef, String destinationBranch, Merge merge, final ApiCallback _callback) throws ApiException { + + okhttp3.Call localVarCall = mergeIntoBranchAsyncValidateBeforeCall(repository, sourceRef, destinationBranch, merge, _callback); + Type localVarReturnType = new TypeToken(){}.getType(); + localVarApiClient.executeAsync(localVarCall, localVarReturnType, _callback); + return localVarCall; + } + + public class APImergeIntoBranchAsyncRequest { + private final String repository; + private final String sourceRef; + private final String destinationBranch; + private Merge merge; + + private APImergeIntoBranchAsyncRequest(String repository, String sourceRef, String destinationBranch) { + this.repository = repository; + this.sourceRef = sourceRef; + this.destinationBranch = destinationBranch; + } + + /** + * Set merge + * @param merge (optional) + * @return APImergeIntoBranchAsyncRequest + */ + public APImergeIntoBranchAsyncRequest merge(Merge merge) { + this.merge = merge; + return this; + } + + /** + * Build call for mergeIntoBranchAsync + * @param _callback ApiCallback API callback + * @return Call to execute + * @throws ApiException If fail to serialize the request body object + * @http.response.details + + + + + + + + + + +
Status Code Description Response Headers
202 merge task started -
400 Validation Error -
401 Unauthorized -
403 Forbidden -
404 Resource Not Found -
429 too many requests -
501 Not Implemented -
0 Internal Server Error -
+ */ + public okhttp3.Call buildCall(final ApiCallback _callback) throws ApiException { + return mergeIntoBranchAsyncCall(repository, sourceRef, destinationBranch, merge, _callback); + } + + /** + * Execute mergeIntoBranchAsync request + * @return TaskCreation + * @throws ApiException If fail to call the API, e.g. server error or cannot deserialize the response body + * @http.response.details + + + + + + + + + + +
Status Code Description Response Headers
202 merge task started -
400 Validation Error -
401 Unauthorized -
403 Forbidden -
404 Resource Not Found -
429 too many requests -
501 Not Implemented -
0 Internal Server Error -
+ */ + public TaskCreation execute() throws ApiException { + ApiResponse localVarResp = mergeIntoBranchAsyncWithHttpInfo(repository, sourceRef, destinationBranch, merge); + return localVarResp.getData(); + } + + /** + * Execute mergeIntoBranchAsync request with HTTP info returned + * @return ApiResponse<TaskCreation> + * @throws ApiException If fail to call the API, e.g. server error or cannot deserialize the response body + * @http.response.details + + + + + + + + + + +
Status Code Description Response Headers
202 merge task started -
400 Validation Error -
401 Unauthorized -
403 Forbidden -
404 Resource Not Found -
429 too many requests -
501 Not Implemented -
0 Internal Server Error -
+ */ + public ApiResponse executeWithHttpInfo() throws ApiException { + return mergeIntoBranchAsyncWithHttpInfo(repository, sourceRef, destinationBranch, merge); + } + + /** + * Execute mergeIntoBranchAsync request (asynchronously) + * @param _callback The callback to be executed when the API call finishes + * @return The request call + * @throws ApiException If fail to process the API call, e.g. serializing the request body object + * @http.response.details + + + + + + + + + + +
Status Code Description Response Headers
202 merge task started -
400 Validation Error -
401 Unauthorized -
403 Forbidden -
404 Resource Not Found -
429 too many requests -
501 Not Implemented -
0 Internal Server Error -
+ */ + public okhttp3.Call executeAsync(final ApiCallback _callback) throws ApiException { + return mergeIntoBranchAsyncAsync(repository, sourceRef, destinationBranch, merge, _callback); + } + } + + /** + * merge references asynchronously + * + * @param repository (required) + * @param sourceRef source ref (required) + * @param destinationBranch destination branch name (required) + * @return APImergeIntoBranchAsyncRequest + * @http.response.details + + + + + + + + + + +
Status Code Description Response Headers
202 merge task started -
400 Validation Error -
401 Unauthorized -
403 Forbidden -
404 Resource Not Found -
429 too many requests -
501 Not Implemented -
0 Internal Server Error -
+ */ + public APImergeIntoBranchAsyncRequest mergeIntoBranchAsync(String repository, String sourceRef, String destinationBranch) { + return new APImergeIntoBranchAsyncRequest(repository, sourceRef, destinationBranch); + } + private okhttp3.Call mergeIntoBranchStatusCall(String repository, String sourceRef, String destinationBranch, String id, final ApiCallback _callback) throws ApiException { + String basePath = null; + // Operation Servers + String[] localBasePaths = new String[] { }; + + // Determine Base Path to Use + if (localCustomBaseUrl != null){ + basePath = localCustomBaseUrl; + } else if ( localBasePaths.length > 0 ) { + basePath = localBasePaths[localHostIndex]; + } else { + basePath = null; + } + + Object localVarPostBody = null; + + // create path and map variables + String localVarPath = "/repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch}/status" + .replace("{" + "repository" + "}", localVarApiClient.escapeString(repository.toString())) + .replace("{" + "sourceRef" + "}", localVarApiClient.escapeString(sourceRef.toString())) + .replace("{" + "destinationBranch" + "}", localVarApiClient.escapeString(destinationBranch.toString())); + + List localVarQueryParams = new ArrayList(); + List localVarCollectionQueryParams = new ArrayList(); + Map localVarHeaderParams = new HashMap(); + Map localVarCookieParams = new HashMap(); + Map localVarFormParams = new HashMap(); + + if (id != null) { + localVarQueryParams.addAll(localVarApiClient.parameterToPair("id", id)); + } + + final String[] localVarAccepts = { + "application/json" + }; + final String localVarAccept = localVarApiClient.selectHeaderAccept(localVarAccepts); + if (localVarAccept != null) { + localVarHeaderParams.put("Accept", localVarAccept); + } + + final String[] localVarContentTypes = { + }; + final String localVarContentType = localVarApiClient.selectHeaderContentType(localVarContentTypes); + if (localVarContentType != null) { + localVarHeaderParams.put("Content-Type", localVarContentType); + } + + String[] localVarAuthNames = new String[] { "basic_auth", "cookie_auth", "oidc_auth", "saml_auth", "jwt_token" }; + return localVarApiClient.buildCall(basePath, localVarPath, "GET", localVarQueryParams, localVarCollectionQueryParams, localVarPostBody, localVarHeaderParams, localVarCookieParams, localVarFormParams, localVarAuthNames, _callback); + } + + @SuppressWarnings("rawtypes") + private okhttp3.Call mergeIntoBranchStatusValidateBeforeCall(String repository, String sourceRef, String destinationBranch, String id, final ApiCallback _callback) throws ApiException { + // verify the required parameter 'repository' is set + if (repository == null) { + throw new ApiException("Missing the required parameter 'repository' when calling mergeIntoBranchStatus(Async)"); + } + + // verify the required parameter 'sourceRef' is set + if (sourceRef == null) { + throw new ApiException("Missing the required parameter 'sourceRef' when calling mergeIntoBranchStatus(Async)"); + } + + // verify the required parameter 'destinationBranch' is set + if (destinationBranch == null) { + throw new ApiException("Missing the required parameter 'destinationBranch' when calling mergeIntoBranchStatus(Async)"); + } + + // verify the required parameter 'id' is set + if (id == null) { + throw new ApiException("Missing the required parameter 'id' when calling mergeIntoBranchStatus(Async)"); + } + + return mergeIntoBranchStatusCall(repository, sourceRef, destinationBranch, id, _callback); + + } + + + private ApiResponse mergeIntoBranchStatusWithHttpInfo(String repository, String sourceRef, String destinationBranch, String id) throws ApiException { + okhttp3.Call localVarCall = mergeIntoBranchStatusValidateBeforeCall(repository, sourceRef, destinationBranch, id, null); + Type localVarReturnType = new TypeToken(){}.getType(); + return localVarApiClient.execute(localVarCall, localVarReturnType); + } + + private okhttp3.Call mergeIntoBranchStatusAsync(String repository, String sourceRef, String destinationBranch, String id, final ApiCallback _callback) throws ApiException { + + okhttp3.Call localVarCall = mergeIntoBranchStatusValidateBeforeCall(repository, sourceRef, destinationBranch, id, _callback); + Type localVarReturnType = new TypeToken(){}.getType(); + localVarApiClient.executeAsync(localVarCall, localVarReturnType, _callback); + return localVarCall; + } + + public class APImergeIntoBranchStatusRequest { + private final String repository; + private final String sourceRef; + private final String destinationBranch; + private final String id; + + private APImergeIntoBranchStatusRequest(String repository, String sourceRef, String destinationBranch, String id) { + this.repository = repository; + this.sourceRef = sourceRef; + this.destinationBranch = destinationBranch; + this.id = id; + } + + /** + * Build call for mergeIntoBranchStatus + * @param _callback ApiCallback API callback + * @return Call to execute + * @throws ApiException If fail to serialize the request body object + * @http.response.details + + + + + + + + + + + + +
Status Code Description Response Headers
200 merge task status -
400 Validation Error -
401 Unauthorized -
403 Forbidden -
404 Not Found -
409 Conflict -
412 precondition failed (e.g. a pre-merge hook returned a failure) -
429 too many requests -
501 Not Implemented -
0 Internal Server Error -
+ */ + public okhttp3.Call buildCall(final ApiCallback _callback) throws ApiException { + return mergeIntoBranchStatusCall(repository, sourceRef, destinationBranch, id, _callback); + } + + /** + * Execute mergeIntoBranchStatus request + * @return MergeStatus + * @throws ApiException If fail to call the API, e.g. server error or cannot deserialize the response body + * @http.response.details + + + + + + + + + + + + +
Status Code Description Response Headers
200 merge task status -
400 Validation Error -
401 Unauthorized -
403 Forbidden -
404 Not Found -
409 Conflict -
412 precondition failed (e.g. a pre-merge hook returned a failure) -
429 too many requests -
501 Not Implemented -
0 Internal Server Error -
+ */ + public MergeStatus execute() throws ApiException { + ApiResponse localVarResp = mergeIntoBranchStatusWithHttpInfo(repository, sourceRef, destinationBranch, id); + return localVarResp.getData(); + } + + /** + * Execute mergeIntoBranchStatus request with HTTP info returned + * @return ApiResponse<MergeStatus> + * @throws ApiException If fail to call the API, e.g. server error or cannot deserialize the response body + * @http.response.details + + + + + + + + + + + + +
Status Code Description Response Headers
200 merge task status -
400 Validation Error -
401 Unauthorized -
403 Forbidden -
404 Not Found -
409 Conflict -
412 precondition failed (e.g. a pre-merge hook returned a failure) -
429 too many requests -
501 Not Implemented -
0 Internal Server Error -
+ */ + public ApiResponse executeWithHttpInfo() throws ApiException { + return mergeIntoBranchStatusWithHttpInfo(repository, sourceRef, destinationBranch, id); + } + + /** + * Execute mergeIntoBranchStatus request (asynchronously) + * @param _callback The callback to be executed when the API call finishes + * @return The request call + * @throws ApiException If fail to process the API call, e.g. serializing the request body object + * @http.response.details + + + + + + + + + + + + +
Status Code Description Response Headers
200 merge task status -
400 Validation Error -
401 Unauthorized -
403 Forbidden -
404 Not Found -
409 Conflict -
412 precondition failed (e.g. a pre-merge hook returned a failure) -
429 too many requests -
501 Not Implemented -
0 Internal Server Error -
+ */ + public okhttp3.Call executeAsync(final ApiCallback _callback) throws ApiException { + return mergeIntoBranchStatusAsync(repository, sourceRef, destinationBranch, id, _callback); + } + } + + /** + * get status of async merge operation + * + * @param repository (required) + * @param sourceRef source ref (required) + * @param destinationBranch destination branch name (required) + * @param id Unique identifier of the merge task (required) + * @return APImergeIntoBranchStatusRequest + * @http.response.details + + + + + + + + + + + + +
Status Code Description Response Headers
200 merge task status -
400 Validation Error -
401 Unauthorized -
403 Forbidden -
404 Not Found -
409 Conflict -
412 precondition failed (e.g. a pre-merge hook returned a failure) -
429 too many requests -
501 Not Implemented -
0 Internal Server Error -
+ */ + public APImergeIntoBranchStatusRequest mergeIntoBranchStatus(String repository, String sourceRef, String destinationBranch, String id) { + return new APImergeIntoBranchStatusRequest(repository, sourceRef, destinationBranch, id); + } } diff --git a/clients/java/src/main/java/io/lakefs/clients/sdk/model/CommitStatus.java b/clients/java/src/main/java/io/lakefs/clients/sdk/model/CommitStatus.java new file mode 100644 index 00000000000..1513569ce57 --- /dev/null +++ b/clients/java/src/main/java/io/lakefs/clients/sdk/model/CommitStatus.java @@ -0,0 +1,417 @@ +/* + * lakeFS API + * lakeFS HTTP API + * + * The version of the OpenAPI document: 1.0.0 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +package io.lakefs.clients.sdk.model; + +import java.util.Objects; +import com.google.gson.TypeAdapter; +import com.google.gson.annotations.JsonAdapter; +import com.google.gson.annotations.SerializedName; +import com.google.gson.stream.JsonReader; +import com.google.gson.stream.JsonWriter; +import io.lakefs.clients.sdk.model.Commit; +import io.lakefs.clients.sdk.model.Error; +import java.io.IOException; +import java.time.OffsetDateTime; +import java.util.Arrays; + +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; +import com.google.gson.JsonArray; +import com.google.gson.JsonDeserializationContext; +import com.google.gson.JsonDeserializer; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParseException; +import com.google.gson.TypeAdapterFactory; +import com.google.gson.reflect.TypeToken; +import com.google.gson.TypeAdapter; +import com.google.gson.stream.JsonReader; +import com.google.gson.stream.JsonWriter; +import java.io.IOException; + +import java.lang.reflect.Type; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import io.lakefs.clients.sdk.JSON; + +/** + * CommitStatus + */ +@javax.annotation.Generated(value = "org.openapitools.codegen.languages.JavaClientCodegen") +public class CommitStatus { + public static final String SERIALIZED_NAME_TASK_ID = "task_id"; + @SerializedName(SERIALIZED_NAME_TASK_ID) + private String taskId; + + public static final String SERIALIZED_NAME_COMPLETED = "completed"; + @SerializedName(SERIALIZED_NAME_COMPLETED) + private Boolean completed; + + public static final String SERIALIZED_NAME_UPDATE_TIME = "update_time"; + @SerializedName(SERIALIZED_NAME_UPDATE_TIME) + private OffsetDateTime updateTime; + + public static final String SERIALIZED_NAME_RESULT = "result"; + @SerializedName(SERIALIZED_NAME_RESULT) + private Commit result; + + public static final String SERIALIZED_NAME_ERROR = "error"; + @SerializedName(SERIALIZED_NAME_ERROR) + private Error error; + + public CommitStatus() { + } + + public CommitStatus taskId(String taskId) { + + this.taskId = taskId; + return this; + } + + /** + * the id of the async commit task + * @return taskId + **/ + @javax.annotation.Nonnull + public String getTaskId() { + return taskId; + } + + + public void setTaskId(String taskId) { + this.taskId = taskId; + } + + + public CommitStatus completed(Boolean completed) { + + this.completed = completed; + return this; + } + + /** + * true if the task has completed (either successfully or with an error) + * @return completed + **/ + @javax.annotation.Nonnull + public Boolean getCompleted() { + return completed; + } + + + public void setCompleted(Boolean completed) { + this.completed = completed; + } + + + public CommitStatus updateTime(OffsetDateTime updateTime) { + + this.updateTime = updateTime; + return this; + } + + /** + * last time the task status was updated + * @return updateTime + **/ + @javax.annotation.Nonnull + public OffsetDateTime getUpdateTime() { + return updateTime; + } + + + public void setUpdateTime(OffsetDateTime updateTime) { + this.updateTime = updateTime; + } + + + public CommitStatus result(Commit result) { + + this.result = result; + return this; + } + + /** + * Get result + * @return result + **/ + @javax.annotation.Nullable + public Commit getResult() { + return result; + } + + + public void setResult(Commit result) { + this.result = result; + } + + + public CommitStatus error(Error error) { + + this.error = error; + return this; + } + + /** + * Get error + * @return error + **/ + @javax.annotation.Nullable + public Error getError() { + return error; + } + + + public void setError(Error error) { + this.error = error; + } + + /** + * A container for additional, undeclared properties. + * This is a holder for any undeclared properties as specified with + * the 'additionalProperties' keyword in the OAS document. + */ + private Map additionalProperties; + + /** + * Set the additional (undeclared) property with the specified name and value. + * If the property does not already exist, create it otherwise replace it. + * + * @param key name of the property + * @param value value of the property + * @return the CommitStatus instance itself + */ + public CommitStatus putAdditionalProperty(String key, Object value) { + if (this.additionalProperties == null) { + this.additionalProperties = new HashMap(); + } + this.additionalProperties.put(key, value); + return this; + } + + /** + * Return the additional (undeclared) property. + * + * @return a map of objects + */ + public Map getAdditionalProperties() { + return additionalProperties; + } + + /** + * Return the additional (undeclared) property with the specified name. + * + * @param key name of the property + * @return an object + */ + public Object getAdditionalProperty(String key) { + if (this.additionalProperties == null) { + return null; + } + return this.additionalProperties.get(key); + } + + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + CommitStatus commitStatus = (CommitStatus) o; + return Objects.equals(this.taskId, commitStatus.taskId) && + Objects.equals(this.completed, commitStatus.completed) && + Objects.equals(this.updateTime, commitStatus.updateTime) && + Objects.equals(this.result, commitStatus.result) && + Objects.equals(this.error, commitStatus.error)&& + Objects.equals(this.additionalProperties, commitStatus.additionalProperties); + } + + @Override + public int hashCode() { + return Objects.hash(taskId, completed, updateTime, result, error, additionalProperties); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("class CommitStatus {\n"); + sb.append(" taskId: ").append(toIndentedString(taskId)).append("\n"); + sb.append(" completed: ").append(toIndentedString(completed)).append("\n"); + sb.append(" updateTime: ").append(toIndentedString(updateTime)).append("\n"); + sb.append(" result: ").append(toIndentedString(result)).append("\n"); + sb.append(" error: ").append(toIndentedString(error)).append("\n"); + sb.append(" additionalProperties: ").append(toIndentedString(additionalProperties)).append("\n"); + sb.append("}"); + return sb.toString(); + } + + /** + * Convert the given object to string with each line indented by 4 spaces + * (except the first line). + */ + private String toIndentedString(Object o) { + if (o == null) { + return "null"; + } + return o.toString().replace("\n", "\n "); + } + + + public static HashSet openapiFields; + public static HashSet openapiRequiredFields; + + static { + // a set of all properties/fields (JSON key names) + openapiFields = new HashSet(); + openapiFields.add("task_id"); + openapiFields.add("completed"); + openapiFields.add("update_time"); + openapiFields.add("result"); + openapiFields.add("error"); + + // a set of required properties/fields (JSON key names) + openapiRequiredFields = new HashSet(); + openapiRequiredFields.add("task_id"); + openapiRequiredFields.add("completed"); + openapiRequiredFields.add("update_time"); + } + + /** + * Validates the JSON Element and throws an exception if issues found + * + * @param jsonElement JSON Element + * @throws IOException if the JSON Element is invalid with respect to CommitStatus + */ + public static void validateJsonElement(JsonElement jsonElement) throws IOException { + if (jsonElement == null) { + if (!CommitStatus.openapiRequiredFields.isEmpty()) { // has required fields but JSON element is null + throw new IllegalArgumentException(String.format("The required field(s) %s in CommitStatus is not found in the empty JSON string", CommitStatus.openapiRequiredFields.toString())); + } + } + + // check to make sure all required properties/fields are present in the JSON string + for (String requiredField : CommitStatus.openapiRequiredFields) { + if (jsonElement.getAsJsonObject().get(requiredField) == null) { + throw new IllegalArgumentException(String.format("The required field `%s` is not found in the JSON string: %s", requiredField, jsonElement.toString())); + } + } + JsonObject jsonObj = jsonElement.getAsJsonObject(); + if (!jsonObj.get("task_id").isJsonPrimitive()) { + throw new IllegalArgumentException(String.format("Expected the field `task_id` to be a primitive type in the JSON string but got `%s`", jsonObj.get("task_id").toString())); + } + // validate the optional field `result` + if (jsonObj.get("result") != null && !jsonObj.get("result").isJsonNull()) { + Commit.validateJsonElement(jsonObj.get("result")); + } + // validate the optional field `error` + if (jsonObj.get("error") != null && !jsonObj.get("error").isJsonNull()) { + Error.validateJsonElement(jsonObj.get("error")); + } + } + + public static class CustomTypeAdapterFactory implements TypeAdapterFactory { + @SuppressWarnings("unchecked") + @Override + public TypeAdapter create(Gson gson, TypeToken type) { + if (!CommitStatus.class.isAssignableFrom(type.getRawType())) { + return null; // this class only serializes 'CommitStatus' and its subtypes + } + final TypeAdapter elementAdapter = gson.getAdapter(JsonElement.class); + final TypeAdapter thisAdapter + = gson.getDelegateAdapter(this, TypeToken.get(CommitStatus.class)); + + return (TypeAdapter) new TypeAdapter() { + @Override + public void write(JsonWriter out, CommitStatus value) throws IOException { + JsonObject obj = thisAdapter.toJsonTree(value).getAsJsonObject(); + obj.remove("additionalProperties"); + // serialize additional properties + if (value.getAdditionalProperties() != null) { + for (Map.Entry entry : value.getAdditionalProperties().entrySet()) { + if (entry.getValue() instanceof String) + obj.addProperty(entry.getKey(), (String) entry.getValue()); + else if (entry.getValue() instanceof Number) + obj.addProperty(entry.getKey(), (Number) entry.getValue()); + else if (entry.getValue() instanceof Boolean) + obj.addProperty(entry.getKey(), (Boolean) entry.getValue()); + else if (entry.getValue() instanceof Character) + obj.addProperty(entry.getKey(), (Character) entry.getValue()); + else { + obj.add(entry.getKey(), gson.toJsonTree(entry.getValue()).getAsJsonObject()); + } + } + } + elementAdapter.write(out, obj); + } + + @Override + public CommitStatus read(JsonReader in) throws IOException { + JsonElement jsonElement = elementAdapter.read(in); + validateJsonElement(jsonElement); + JsonObject jsonObj = jsonElement.getAsJsonObject(); + // store additional fields in the deserialized instance + CommitStatus instance = thisAdapter.fromJsonTree(jsonObj); + for (Map.Entry entry : jsonObj.entrySet()) { + if (!openapiFields.contains(entry.getKey())) { + if (entry.getValue().isJsonPrimitive()) { // primitive type + if (entry.getValue().getAsJsonPrimitive().isString()) + instance.putAdditionalProperty(entry.getKey(), entry.getValue().getAsString()); + else if (entry.getValue().getAsJsonPrimitive().isNumber()) + instance.putAdditionalProperty(entry.getKey(), entry.getValue().getAsNumber()); + else if (entry.getValue().getAsJsonPrimitive().isBoolean()) + instance.putAdditionalProperty(entry.getKey(), entry.getValue().getAsBoolean()); + else + throw new IllegalArgumentException(String.format("The field `%s` has unknown primitive type. Value: %s", entry.getKey(), entry.getValue().toString())); + } else if (entry.getValue().isJsonArray()) { + instance.putAdditionalProperty(entry.getKey(), gson.fromJson(entry.getValue(), List.class)); + } else { // JSON object + instance.putAdditionalProperty(entry.getKey(), gson.fromJson(entry.getValue(), HashMap.class)); + } + } + } + return instance; + } + + }.nullSafe(); + } + } + + /** + * Create an instance of CommitStatus given an JSON string + * + * @param jsonString JSON string + * @return An instance of CommitStatus + * @throws IOException if the JSON string is invalid with respect to CommitStatus + */ + public static CommitStatus fromJson(String jsonString) throws IOException { + return JSON.getGson().fromJson(jsonString, CommitStatus.class); + } + + /** + * Convert an instance of CommitStatus to an JSON string + * + * @return JSON string + */ + public String toJson() { + return JSON.getGson().toJson(this); + } +} + diff --git a/clients/java/src/main/java/io/lakefs/clients/sdk/model/MergeStatus.java b/clients/java/src/main/java/io/lakefs/clients/sdk/model/MergeStatus.java new file mode 100644 index 00000000000..be9816340e4 --- /dev/null +++ b/clients/java/src/main/java/io/lakefs/clients/sdk/model/MergeStatus.java @@ -0,0 +1,417 @@ +/* + * lakeFS API + * lakeFS HTTP API + * + * The version of the OpenAPI document: 1.0.0 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +package io.lakefs.clients.sdk.model; + +import java.util.Objects; +import com.google.gson.TypeAdapter; +import com.google.gson.annotations.JsonAdapter; +import com.google.gson.annotations.SerializedName; +import com.google.gson.stream.JsonReader; +import com.google.gson.stream.JsonWriter; +import io.lakefs.clients.sdk.model.Error; +import io.lakefs.clients.sdk.model.MergeResult; +import java.io.IOException; +import java.time.OffsetDateTime; +import java.util.Arrays; + +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; +import com.google.gson.JsonArray; +import com.google.gson.JsonDeserializationContext; +import com.google.gson.JsonDeserializer; +import com.google.gson.JsonElement; +import com.google.gson.JsonObject; +import com.google.gson.JsonParseException; +import com.google.gson.TypeAdapterFactory; +import com.google.gson.reflect.TypeToken; +import com.google.gson.TypeAdapter; +import com.google.gson.stream.JsonReader; +import com.google.gson.stream.JsonWriter; +import java.io.IOException; + +import java.lang.reflect.Type; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import io.lakefs.clients.sdk.JSON; + +/** + * MergeStatus + */ +@javax.annotation.Generated(value = "org.openapitools.codegen.languages.JavaClientCodegen") +public class MergeStatus { + public static final String SERIALIZED_NAME_TASK_ID = "task_id"; + @SerializedName(SERIALIZED_NAME_TASK_ID) + private String taskId; + + public static final String SERIALIZED_NAME_COMPLETED = "completed"; + @SerializedName(SERIALIZED_NAME_COMPLETED) + private Boolean completed; + + public static final String SERIALIZED_NAME_UPDATE_TIME = "update_time"; + @SerializedName(SERIALIZED_NAME_UPDATE_TIME) + private OffsetDateTime updateTime; + + public static final String SERIALIZED_NAME_RESULT = "result"; + @SerializedName(SERIALIZED_NAME_RESULT) + private MergeResult result; + + public static final String SERIALIZED_NAME_ERROR = "error"; + @SerializedName(SERIALIZED_NAME_ERROR) + private Error error; + + public MergeStatus() { + } + + public MergeStatus taskId(String taskId) { + + this.taskId = taskId; + return this; + } + + /** + * the id of the async merge task + * @return taskId + **/ + @javax.annotation.Nonnull + public String getTaskId() { + return taskId; + } + + + public void setTaskId(String taskId) { + this.taskId = taskId; + } + + + public MergeStatus completed(Boolean completed) { + + this.completed = completed; + return this; + } + + /** + * true if the task has completed (either successfully or with an error) + * @return completed + **/ + @javax.annotation.Nonnull + public Boolean getCompleted() { + return completed; + } + + + public void setCompleted(Boolean completed) { + this.completed = completed; + } + + + public MergeStatus updateTime(OffsetDateTime updateTime) { + + this.updateTime = updateTime; + return this; + } + + /** + * last time the task status was updated + * @return updateTime + **/ + @javax.annotation.Nonnull + public OffsetDateTime getUpdateTime() { + return updateTime; + } + + + public void setUpdateTime(OffsetDateTime updateTime) { + this.updateTime = updateTime; + } + + + public MergeStatus result(MergeResult result) { + + this.result = result; + return this; + } + + /** + * Get result + * @return result + **/ + @javax.annotation.Nullable + public MergeResult getResult() { + return result; + } + + + public void setResult(MergeResult result) { + this.result = result; + } + + + public MergeStatus error(Error error) { + + this.error = error; + return this; + } + + /** + * Get error + * @return error + **/ + @javax.annotation.Nullable + public Error getError() { + return error; + } + + + public void setError(Error error) { + this.error = error; + } + + /** + * A container for additional, undeclared properties. + * This is a holder for any undeclared properties as specified with + * the 'additionalProperties' keyword in the OAS document. + */ + private Map additionalProperties; + + /** + * Set the additional (undeclared) property with the specified name and value. + * If the property does not already exist, create it otherwise replace it. + * + * @param key name of the property + * @param value value of the property + * @return the MergeStatus instance itself + */ + public MergeStatus putAdditionalProperty(String key, Object value) { + if (this.additionalProperties == null) { + this.additionalProperties = new HashMap(); + } + this.additionalProperties.put(key, value); + return this; + } + + /** + * Return the additional (undeclared) property. + * + * @return a map of objects + */ + public Map getAdditionalProperties() { + return additionalProperties; + } + + /** + * Return the additional (undeclared) property with the specified name. + * + * @param key name of the property + * @return an object + */ + public Object getAdditionalProperty(String key) { + if (this.additionalProperties == null) { + return null; + } + return this.additionalProperties.get(key); + } + + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + MergeStatus mergeStatus = (MergeStatus) o; + return Objects.equals(this.taskId, mergeStatus.taskId) && + Objects.equals(this.completed, mergeStatus.completed) && + Objects.equals(this.updateTime, mergeStatus.updateTime) && + Objects.equals(this.result, mergeStatus.result) && + Objects.equals(this.error, mergeStatus.error)&& + Objects.equals(this.additionalProperties, mergeStatus.additionalProperties); + } + + @Override + public int hashCode() { + return Objects.hash(taskId, completed, updateTime, result, error, additionalProperties); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("class MergeStatus {\n"); + sb.append(" taskId: ").append(toIndentedString(taskId)).append("\n"); + sb.append(" completed: ").append(toIndentedString(completed)).append("\n"); + sb.append(" updateTime: ").append(toIndentedString(updateTime)).append("\n"); + sb.append(" result: ").append(toIndentedString(result)).append("\n"); + sb.append(" error: ").append(toIndentedString(error)).append("\n"); + sb.append(" additionalProperties: ").append(toIndentedString(additionalProperties)).append("\n"); + sb.append("}"); + return sb.toString(); + } + + /** + * Convert the given object to string with each line indented by 4 spaces + * (except the first line). + */ + private String toIndentedString(Object o) { + if (o == null) { + return "null"; + } + return o.toString().replace("\n", "\n "); + } + + + public static HashSet openapiFields; + public static HashSet openapiRequiredFields; + + static { + // a set of all properties/fields (JSON key names) + openapiFields = new HashSet(); + openapiFields.add("task_id"); + openapiFields.add("completed"); + openapiFields.add("update_time"); + openapiFields.add("result"); + openapiFields.add("error"); + + // a set of required properties/fields (JSON key names) + openapiRequiredFields = new HashSet(); + openapiRequiredFields.add("task_id"); + openapiRequiredFields.add("completed"); + openapiRequiredFields.add("update_time"); + } + + /** + * Validates the JSON Element and throws an exception if issues found + * + * @param jsonElement JSON Element + * @throws IOException if the JSON Element is invalid with respect to MergeStatus + */ + public static void validateJsonElement(JsonElement jsonElement) throws IOException { + if (jsonElement == null) { + if (!MergeStatus.openapiRequiredFields.isEmpty()) { // has required fields but JSON element is null + throw new IllegalArgumentException(String.format("The required field(s) %s in MergeStatus is not found in the empty JSON string", MergeStatus.openapiRequiredFields.toString())); + } + } + + // check to make sure all required properties/fields are present in the JSON string + for (String requiredField : MergeStatus.openapiRequiredFields) { + if (jsonElement.getAsJsonObject().get(requiredField) == null) { + throw new IllegalArgumentException(String.format("The required field `%s` is not found in the JSON string: %s", requiredField, jsonElement.toString())); + } + } + JsonObject jsonObj = jsonElement.getAsJsonObject(); + if (!jsonObj.get("task_id").isJsonPrimitive()) { + throw new IllegalArgumentException(String.format("Expected the field `task_id` to be a primitive type in the JSON string but got `%s`", jsonObj.get("task_id").toString())); + } + // validate the optional field `result` + if (jsonObj.get("result") != null && !jsonObj.get("result").isJsonNull()) { + MergeResult.validateJsonElement(jsonObj.get("result")); + } + // validate the optional field `error` + if (jsonObj.get("error") != null && !jsonObj.get("error").isJsonNull()) { + Error.validateJsonElement(jsonObj.get("error")); + } + } + + public static class CustomTypeAdapterFactory implements TypeAdapterFactory { + @SuppressWarnings("unchecked") + @Override + public TypeAdapter create(Gson gson, TypeToken type) { + if (!MergeStatus.class.isAssignableFrom(type.getRawType())) { + return null; // this class only serializes 'MergeStatus' and its subtypes + } + final TypeAdapter elementAdapter = gson.getAdapter(JsonElement.class); + final TypeAdapter thisAdapter + = gson.getDelegateAdapter(this, TypeToken.get(MergeStatus.class)); + + return (TypeAdapter) new TypeAdapter() { + @Override + public void write(JsonWriter out, MergeStatus value) throws IOException { + JsonObject obj = thisAdapter.toJsonTree(value).getAsJsonObject(); + obj.remove("additionalProperties"); + // serialize additional properties + if (value.getAdditionalProperties() != null) { + for (Map.Entry entry : value.getAdditionalProperties().entrySet()) { + if (entry.getValue() instanceof String) + obj.addProperty(entry.getKey(), (String) entry.getValue()); + else if (entry.getValue() instanceof Number) + obj.addProperty(entry.getKey(), (Number) entry.getValue()); + else if (entry.getValue() instanceof Boolean) + obj.addProperty(entry.getKey(), (Boolean) entry.getValue()); + else if (entry.getValue() instanceof Character) + obj.addProperty(entry.getKey(), (Character) entry.getValue()); + else { + obj.add(entry.getKey(), gson.toJsonTree(entry.getValue()).getAsJsonObject()); + } + } + } + elementAdapter.write(out, obj); + } + + @Override + public MergeStatus read(JsonReader in) throws IOException { + JsonElement jsonElement = elementAdapter.read(in); + validateJsonElement(jsonElement); + JsonObject jsonObj = jsonElement.getAsJsonObject(); + // store additional fields in the deserialized instance + MergeStatus instance = thisAdapter.fromJsonTree(jsonObj); + for (Map.Entry entry : jsonObj.entrySet()) { + if (!openapiFields.contains(entry.getKey())) { + if (entry.getValue().isJsonPrimitive()) { // primitive type + if (entry.getValue().getAsJsonPrimitive().isString()) + instance.putAdditionalProperty(entry.getKey(), entry.getValue().getAsString()); + else if (entry.getValue().getAsJsonPrimitive().isNumber()) + instance.putAdditionalProperty(entry.getKey(), entry.getValue().getAsNumber()); + else if (entry.getValue().getAsJsonPrimitive().isBoolean()) + instance.putAdditionalProperty(entry.getKey(), entry.getValue().getAsBoolean()); + else + throw new IllegalArgumentException(String.format("The field `%s` has unknown primitive type. Value: %s", entry.getKey(), entry.getValue().toString())); + } else if (entry.getValue().isJsonArray()) { + instance.putAdditionalProperty(entry.getKey(), gson.fromJson(entry.getValue(), List.class)); + } else { // JSON object + instance.putAdditionalProperty(entry.getKey(), gson.fromJson(entry.getValue(), HashMap.class)); + } + } + } + return instance; + } + + }.nullSafe(); + } + } + + /** + * Create an instance of MergeStatus given an JSON string + * + * @param jsonString JSON string + * @return An instance of MergeStatus + * @throws IOException if the JSON string is invalid with respect to MergeStatus + */ + public static MergeStatus fromJson(String jsonString) throws IOException { + return JSON.getGson().fromJson(jsonString, MergeStatus.class); + } + + /** + * Convert an instance of MergeStatus to an JSON string + * + * @return JSON string + */ + public String toJson() { + return JSON.getGson().toJson(this); + } +} + diff --git a/clients/java/src/test/java/io/lakefs/clients/sdk/CommitsApiTest.java b/clients/java/src/test/java/io/lakefs/clients/sdk/CommitsApiTest.java index f6f6275e88e..b2a5c82c05b 100644 --- a/clients/java/src/test/java/io/lakefs/clients/sdk/CommitsApiTest.java +++ b/clients/java/src/test/java/io/lakefs/clients/sdk/CommitsApiTest.java @@ -16,7 +16,9 @@ import io.lakefs.clients.sdk.ApiException; import io.lakefs.clients.sdk.model.Commit; import io.lakefs.clients.sdk.model.CommitCreation; +import io.lakefs.clients.sdk.model.CommitStatus; import io.lakefs.clients.sdk.model.Error; +import io.lakefs.clients.sdk.model.TaskCreation; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; @@ -50,6 +52,38 @@ public void commitTest() throws ApiException { // TODO: test validations } + /** + * create commit asynchronously + * + * @throws ApiException if the Api call fails + */ + @Test + public void commitAsyncTest() throws ApiException { + String repository = null; + String branch = null; + CommitCreation commitCreation = null; + String sourceMetarange = null; + TaskCreation response = api.commitAsync(repository, branch, commitCreation) + .sourceMetarange(sourceMetarange) + .execute(); + // TODO: test validations + } + + /** + * get status of async commit operation + * + * @throws ApiException if the Api call fails + */ + @Test + public void commitStatusTest() throws ApiException { + String repository = null; + String branch = null; + String id = null; + CommitStatus response = api.commitStatus(repository, branch, id) + .execute(); + // TODO: test validations + } + /** * get commit * diff --git a/clients/java/src/test/java/io/lakefs/clients/sdk/RefsApiTest.java b/clients/java/src/test/java/io/lakefs/clients/sdk/RefsApiTest.java index bda80dc09e3..929f6c969ea 100644 --- a/clients/java/src/test/java/io/lakefs/clients/sdk/RefsApiTest.java +++ b/clients/java/src/test/java/io/lakefs/clients/sdk/RefsApiTest.java @@ -20,7 +20,9 @@ import io.lakefs.clients.sdk.model.FindMergeBaseResult; import io.lakefs.clients.sdk.model.Merge; import io.lakefs.clients.sdk.model.MergeResult; +import io.lakefs.clients.sdk.model.MergeStatus; import java.time.OffsetDateTime; +import io.lakefs.clients.sdk.model.TaskCreation; import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; @@ -126,4 +128,37 @@ public void mergeIntoBranchTest() throws ApiException { // TODO: test validations } + /** + * merge references asynchronously + * + * @throws ApiException if the Api call fails + */ + @Test + public void mergeIntoBranchAsyncTest() throws ApiException { + String repository = null; + String sourceRef = null; + String destinationBranch = null; + Merge merge = null; + TaskCreation response = api.mergeIntoBranchAsync(repository, sourceRef, destinationBranch) + .merge(merge) + .execute(); + // TODO: test validations + } + + /** + * get status of async merge operation + * + * @throws ApiException if the Api call fails + */ + @Test + public void mergeIntoBranchStatusTest() throws ApiException { + String repository = null; + String sourceRef = null; + String destinationBranch = null; + String id = null; + MergeStatus response = api.mergeIntoBranchStatus(repository, sourceRef, destinationBranch, id) + .execute(); + // TODO: test validations + } + } diff --git a/clients/java/src/test/java/io/lakefs/clients/sdk/model/CommitStatusTest.java b/clients/java/src/test/java/io/lakefs/clients/sdk/model/CommitStatusTest.java new file mode 100644 index 00000000000..85fa12b1485 --- /dev/null +++ b/clients/java/src/test/java/io/lakefs/clients/sdk/model/CommitStatusTest.java @@ -0,0 +1,83 @@ +/* + * lakeFS API + * lakeFS HTTP API + * + * The version of the OpenAPI document: 1.0.0 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +package io.lakefs.clients.sdk.model; + +import com.google.gson.TypeAdapter; +import com.google.gson.annotations.JsonAdapter; +import com.google.gson.annotations.SerializedName; +import com.google.gson.stream.JsonReader; +import com.google.gson.stream.JsonWriter; +import io.lakefs.clients.sdk.model.Commit; +import io.lakefs.clients.sdk.model.Error; +import java.io.IOException; +import java.time.OffsetDateTime; +import java.util.Arrays; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +/** + * Model tests for CommitStatus + */ +public class CommitStatusTest { + private final CommitStatus model = new CommitStatus(); + + /** + * Model tests for CommitStatus + */ + @Test + public void testCommitStatus() { + // TODO: test CommitStatus + } + + /** + * Test the property 'taskId' + */ + @Test + public void taskIdTest() { + // TODO: test taskId + } + + /** + * Test the property 'completed' + */ + @Test + public void completedTest() { + // TODO: test completed + } + + /** + * Test the property 'updateTime' + */ + @Test + public void updateTimeTest() { + // TODO: test updateTime + } + + /** + * Test the property 'result' + */ + @Test + public void resultTest() { + // TODO: test result + } + + /** + * Test the property 'error' + */ + @Test + public void errorTest() { + // TODO: test error + } + +} diff --git a/clients/java/src/test/java/io/lakefs/clients/sdk/model/MergeStatusTest.java b/clients/java/src/test/java/io/lakefs/clients/sdk/model/MergeStatusTest.java new file mode 100644 index 00000000000..272534e20f6 --- /dev/null +++ b/clients/java/src/test/java/io/lakefs/clients/sdk/model/MergeStatusTest.java @@ -0,0 +1,83 @@ +/* + * lakeFS API + * lakeFS HTTP API + * + * The version of the OpenAPI document: 1.0.0 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +package io.lakefs.clients.sdk.model; + +import com.google.gson.TypeAdapter; +import com.google.gson.annotations.JsonAdapter; +import com.google.gson.annotations.SerializedName; +import com.google.gson.stream.JsonReader; +import com.google.gson.stream.JsonWriter; +import io.lakefs.clients.sdk.model.Error; +import io.lakefs.clients.sdk.model.MergeResult; +import java.io.IOException; +import java.time.OffsetDateTime; +import java.util.Arrays; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +/** + * Model tests for MergeStatus + */ +public class MergeStatusTest { + private final MergeStatus model = new MergeStatus(); + + /** + * Model tests for MergeStatus + */ + @Test + public void testMergeStatus() { + // TODO: test MergeStatus + } + + /** + * Test the property 'taskId' + */ + @Test + public void taskIdTest() { + // TODO: test taskId + } + + /** + * Test the property 'completed' + */ + @Test + public void completedTest() { + // TODO: test completed + } + + /** + * Test the property 'updateTime' + */ + @Test + public void updateTimeTest() { + // TODO: test updateTime + } + + /** + * Test the property 'result' + */ + @Test + public void resultTest() { + // TODO: test result + } + + /** + * Test the property 'error' + */ + @Test + public void errorTest() { + // TODO: test error + } + +} diff --git a/clients/python/.openapi-generator/FILES b/clients/python/.openapi-generator/FILES index 82b716b41e2..b19f6e887a8 100644 --- a/clients/python/.openapi-generator/FILES +++ b/clients/python/.openapi-generator/FILES @@ -23,6 +23,7 @@ docs/CommitCreation.md docs/CommitList.md docs/CommitOverrides.md docs/CommitRecordCreation.md +docs/CommitStatus.md docs/CommitsApi.md docs/CompletePresignMultipartUpload.md docs/Config.md @@ -73,6 +74,7 @@ docs/LoginConfig.md docs/LoginInformation.md docs/Merge.md docs/MergeResult.md +docs/MergeStatus.md docs/MetaRangeCreation.md docs/MetaRangeCreationResponse.md docs/MetadataApi.md @@ -185,6 +187,7 @@ lakefs_sdk/models/commit_creation.py lakefs_sdk/models/commit_list.py lakefs_sdk/models/commit_overrides.py lakefs_sdk/models/commit_record_creation.py +lakefs_sdk/models/commit_status.py lakefs_sdk/models/complete_presign_multipart_upload.py lakefs_sdk/models/config.py lakefs_sdk/models/copy_part_source.py @@ -227,6 +230,7 @@ lakefs_sdk/models/login_config.py lakefs_sdk/models/login_information.py lakefs_sdk/models/merge.py lakefs_sdk/models/merge_result.py +lakefs_sdk/models/merge_status.py lakefs_sdk/models/meta_range_creation.py lakefs_sdk/models/meta_range_creation_response.py lakefs_sdk/models/object_copy_creation.py @@ -315,6 +319,7 @@ test/test_commit_creation.py test/test_commit_list.py test/test_commit_overrides.py test/test_commit_record_creation.py +test/test_commit_status.py test/test_commits_api.py test/test_complete_presign_multipart_upload.py test/test_config.py @@ -365,6 +370,7 @@ test/test_login_config.py test/test_login_information.py test/test_merge.py test/test_merge_result.py +test/test_merge_status.py test/test_meta_range_creation.py test/test_meta_range_creation_response.py test/test_metadata_api.py diff --git a/clients/python/README.md b/clients/python/README.md index d6ca0af0bcb..43757121753 100644 --- a/clients/python/README.md +++ b/clients/python/README.md @@ -172,6 +172,8 @@ Class | Method | HTTP request | Description *BranchesApi* | [**reset_branch**](docs/BranchesApi.md#reset_branch) | **PUT** /repositories/{repository}/branches/{branch} | reset branch *BranchesApi* | [**revert_branch**](docs/BranchesApi.md#revert_branch) | **POST** /repositories/{repository}/branches/{branch}/revert | revert *CommitsApi* | [**commit**](docs/CommitsApi.md#commit) | **POST** /repositories/{repository}/branches/{branch}/commits | create commit +*CommitsApi* | [**commit_async**](docs/CommitsApi.md#commit_async) | **POST** /repositories/{repository}/branches/{branch}/commits/async | create commit asynchronously +*CommitsApi* | [**commit_status**](docs/CommitsApi.md#commit_status) | **GET** /repositories/{repository}/branches/{branch}/commits/status | get status of async commit operation *CommitsApi* | [**get_commit**](docs/CommitsApi.md#get_commit) | **GET** /repositories/{repository}/commits/{commitId} | get commit *ConfigApi* | [**get_config**](docs/ConfigApi.md#get_config) | **GET** /config | *ExperimentalApi* | [**abort_presign_multipart_upload**](docs/ExperimentalApi.md#abort_presign_multipart_upload) | **DELETE** /repositories/{repository}/branches/{branch}/staging/pmpu/{uploadId} | Abort a presign multipart upload @@ -257,6 +259,8 @@ Class | Method | HTTP request | Description *RefsApi* | [**find_merge_base**](docs/RefsApi.md#find_merge_base) | **GET** /repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch} | find the merge base for 2 references *RefsApi* | [**log_commits**](docs/RefsApi.md#log_commits) | **GET** /repositories/{repository}/refs/{ref}/commits | get commit log from ref. If both objects and prefixes are empty, return all commits. *RefsApi* | [**merge_into_branch**](docs/RefsApi.md#merge_into_branch) | **POST** /repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch} | merge references +*RefsApi* | [**merge_into_branch_async**](docs/RefsApi.md#merge_into_branch_async) | **POST** /repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch}/async | merge references asynchronously +*RefsApi* | [**merge_into_branch_status**](docs/RefsApi.md#merge_into_branch_status) | **GET** /repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch}/status | get status of async merge operation *RemotesApi* | [**pull_iceberg_table**](docs/RemotesApi.md#pull_iceberg_table) | **POST** /iceberg/remotes/{catalog}/pull | take a table previously pushed from lakeFS into a remote catalog, and pull its state back into the originating lakeFS repository *RemotesApi* | [**push_iceberg_table**](docs/RemotesApi.md#push_iceberg_table) | **POST** /iceberg/remotes/{catalog}/push | register existing lakeFS table in remote catalog *RepositoriesApi* | [**create_repository**](docs/RepositoriesApi.md#create_repository) | **POST** /repositories | create repository @@ -299,6 +303,7 @@ Class | Method | HTTP request | Description - [CommitList](docs/CommitList.md) - [CommitOverrides](docs/CommitOverrides.md) - [CommitRecordCreation](docs/CommitRecordCreation.md) + - [CommitStatus](docs/CommitStatus.md) - [CompletePresignMultipartUpload](docs/CompletePresignMultipartUpload.md) - [Config](docs/Config.md) - [CopyPartSource](docs/CopyPartSource.md) @@ -341,6 +346,7 @@ Class | Method | HTTP request | Description - [LoginInformation](docs/LoginInformation.md) - [Merge](docs/Merge.md) - [MergeResult](docs/MergeResult.md) + - [MergeStatus](docs/MergeStatus.md) - [MetaRangeCreation](docs/MetaRangeCreation.md) - [MetaRangeCreationResponse](docs/MetaRangeCreationResponse.md) - [ObjectCopyCreation](docs/ObjectCopyCreation.md) diff --git a/clients/python/docs/CommitStatus.md b/clients/python/docs/CommitStatus.md new file mode 100644 index 00000000000..0eeb3f61c63 --- /dev/null +++ b/clients/python/docs/CommitStatus.md @@ -0,0 +1,33 @@ +# CommitStatus + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**task_id** | **str** | the id of the async commit task | +**completed** | **bool** | true if the task has completed (either successfully or with an error) | +**update_time** | **datetime** | last time the task status was updated | +**result** | [**Commit**](Commit.md) | | [optional] +**error** | [**Error**](Error.md) | | [optional] + +## Example + +```python +from lakefs_sdk.models.commit_status import CommitStatus + +# TODO update the JSON string below +json = "{}" +# create an instance of CommitStatus from a JSON string +commit_status_instance = CommitStatus.from_json(json) +# print the JSON string representation of the object +print CommitStatus.to_json() + +# convert the object into a dict +commit_status_dict = commit_status_instance.to_dict() +# create an instance of CommitStatus from a dict +commit_status_form_dict = commit_status.from_dict(commit_status_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/clients/python/docs/CommitsApi.md b/clients/python/docs/CommitsApi.md index 96479c4003e..1818fadeffd 100644 --- a/clients/python/docs/CommitsApi.md +++ b/clients/python/docs/CommitsApi.md @@ -5,6 +5,8 @@ All URIs are relative to */api/v1* Method | HTTP request | Description ------------- | ------------- | ------------- [**commit**](CommitsApi.md#commit) | **POST** /repositories/{repository}/branches/{branch}/commits | create commit +[**commit_async**](CommitsApi.md#commit_async) | **POST** /repositories/{repository}/branches/{branch}/commits/async | create commit asynchronously +[**commit_status**](CommitsApi.md#commit_status) | **GET** /repositories/{repository}/branches/{branch}/commits/status | get status of async commit operation [**get_commit**](CommitsApi.md#get_commit) | **GET** /repositories/{repository}/commits/{commitId} | get commit @@ -129,6 +131,245 @@ Name | Type | Description | Notes [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) +# **commit_async** +> TaskCreation commit_async(repository, branch, commit_creation, source_metarange=source_metarange) + +create commit asynchronously + +### Example + +* Basic Authentication (basic_auth): +* Api Key Authentication (cookie_auth): +* Api Key Authentication (oidc_auth): +* Api Key Authentication (saml_auth): +* Bearer (JWT) Authentication (jwt_token): + +```python +import time +import os +import lakefs_sdk +from lakefs_sdk.models.commit_creation import CommitCreation +from lakefs_sdk.models.task_creation import TaskCreation +from lakefs_sdk.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to /api/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = lakefs_sdk.Configuration( + host = "/api/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure HTTP basic authorization: basic_auth +configuration = lakefs_sdk.Configuration( + username = os.environ["USERNAME"], + password = os.environ["PASSWORD"] +) + +# Configure API key authorization: cookie_auth +configuration.api_key['cookie_auth'] = os.environ["API_KEY"] + +# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed +# configuration.api_key_prefix['cookie_auth'] = 'Bearer' + +# Configure API key authorization: oidc_auth +configuration.api_key['oidc_auth'] = os.environ["API_KEY"] + +# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed +# configuration.api_key_prefix['oidc_auth'] = 'Bearer' + +# Configure API key authorization: saml_auth +configuration.api_key['saml_auth'] = os.environ["API_KEY"] + +# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed +# configuration.api_key_prefix['saml_auth'] = 'Bearer' + +# Configure Bearer authorization (JWT): jwt_token +configuration = lakefs_sdk.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +with lakefs_sdk.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = lakefs_sdk.CommitsApi(api_client) + repository = 'repository_example' # str | + branch = 'branch_example' # str | + commit_creation = lakefs_sdk.CommitCreation() # CommitCreation | + source_metarange = 'source_metarange_example' # str | The source metarange to commit. Branch must not have uncommitted changes. (optional) + + try: + # create commit asynchronously + api_response = api_instance.commit_async(repository, branch, commit_creation, source_metarange=source_metarange) + print("The response of CommitsApi->commit_async:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling CommitsApi->commit_async: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **repository** | **str**| | + **branch** | **str**| | + **commit_creation** | [**CommitCreation**](CommitCreation.md)| | + **source_metarange** | **str**| The source metarange to commit. Branch must not have uncommitted changes. | [optional] + +### Return type + +[**TaskCreation**](TaskCreation.md) + +### Authorization + +[basic_auth](../README.md#basic_auth), [cookie_auth](../README.md#cookie_auth), [oidc_auth](../README.md#oidc_auth), [saml_auth](../README.md#saml_auth), [jwt_token](../README.md#jwt_token) + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**202** | commit task started | - | +**400** | Validation Error | - | +**401** | Unauthorized | - | +**403** | Forbidden | - | +**404** | Resource Not Found | - | +**429** | too many requests | - | +**501** | Not Implemented | - | +**0** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **commit_status** +> CommitStatus commit_status(repository, branch, id) + +get status of async commit operation + +### Example + +* Basic Authentication (basic_auth): +* Api Key Authentication (cookie_auth): +* Api Key Authentication (oidc_auth): +* Api Key Authentication (saml_auth): +* Bearer (JWT) Authentication (jwt_token): + +```python +import time +import os +import lakefs_sdk +from lakefs_sdk.models.commit_status import CommitStatus +from lakefs_sdk.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to /api/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = lakefs_sdk.Configuration( + host = "/api/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure HTTP basic authorization: basic_auth +configuration = lakefs_sdk.Configuration( + username = os.environ["USERNAME"], + password = os.environ["PASSWORD"] +) + +# Configure API key authorization: cookie_auth +configuration.api_key['cookie_auth'] = os.environ["API_KEY"] + +# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed +# configuration.api_key_prefix['cookie_auth'] = 'Bearer' + +# Configure API key authorization: oidc_auth +configuration.api_key['oidc_auth'] = os.environ["API_KEY"] + +# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed +# configuration.api_key_prefix['oidc_auth'] = 'Bearer' + +# Configure API key authorization: saml_auth +configuration.api_key['saml_auth'] = os.environ["API_KEY"] + +# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed +# configuration.api_key_prefix['saml_auth'] = 'Bearer' + +# Configure Bearer authorization (JWT): jwt_token +configuration = lakefs_sdk.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +with lakefs_sdk.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = lakefs_sdk.CommitsApi(api_client) + repository = 'repository_example' # str | + branch = 'branch_example' # str | + id = 'id_example' # str | Unique identifier of the commit task + + try: + # get status of async commit operation + api_response = api_instance.commit_status(repository, branch, id) + print("The response of CommitsApi->commit_status:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling CommitsApi->commit_status: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **repository** | **str**| | + **branch** | **str**| | + **id** | **str**| Unique identifier of the commit task | + +### Return type + +[**CommitStatus**](CommitStatus.md) + +### Authorization + +[basic_auth](../README.md#basic_auth), [cookie_auth](../README.md#cookie_auth), [oidc_auth](../README.md#oidc_auth), [saml_auth](../README.md#saml_auth), [jwt_token](../README.md#jwt_token) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | commit task status | - | +**400** | Validation Error | - | +**401** | Unauthorized | - | +**403** | Forbidden | - | +**404** | Not Found | - | +**409** | Conflict | - | +**412** | Precondition Failed (e.g. a pre-commit hook returned a failure) | - | +**429** | too many requests | - | +**501** | Not Implemented | - | +**0** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + # **get_commit** > Commit get_commit(repository, commit_id) diff --git a/clients/python/docs/MergeStatus.md b/clients/python/docs/MergeStatus.md new file mode 100644 index 00000000000..dc17da8f99e --- /dev/null +++ b/clients/python/docs/MergeStatus.md @@ -0,0 +1,33 @@ +# MergeStatus + + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**task_id** | **str** | the id of the async merge task | +**completed** | **bool** | true if the task has completed (either successfully or with an error) | +**update_time** | **datetime** | last time the task status was updated | +**result** | [**MergeResult**](MergeResult.md) | | [optional] +**error** | [**Error**](Error.md) | | [optional] + +## Example + +```python +from lakefs_sdk.models.merge_status import MergeStatus + +# TODO update the JSON string below +json = "{}" +# create an instance of MergeStatus from a JSON string +merge_status_instance = MergeStatus.from_json(json) +# print the JSON string representation of the object +print MergeStatus.to_json() + +# convert the object into a dict +merge_status_dict = merge_status_instance.to_dict() +# create an instance of MergeStatus from a dict +merge_status_form_dict = merge_status.from_dict(merge_status_dict) +``` +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/clients/python/docs/RefsApi.md b/clients/python/docs/RefsApi.md index 21272712cd3..346aa712161 100644 --- a/clients/python/docs/RefsApi.md +++ b/clients/python/docs/RefsApi.md @@ -8,6 +8,8 @@ Method | HTTP request | Description [**find_merge_base**](RefsApi.md#find_merge_base) | **GET** /repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch} | find the merge base for 2 references [**log_commits**](RefsApi.md#log_commits) | **GET** /repositories/{repository}/refs/{ref}/commits | get commit log from ref. If both objects and prefixes are empty, return all commits. [**merge_into_branch**](RefsApi.md#merge_into_branch) | **POST** /repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch} | merge references +[**merge_into_branch_async**](RefsApi.md#merge_into_branch_async) | **POST** /repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch}/async | merge references asynchronously +[**merge_into_branch_status**](RefsApi.md#merge_into_branch_status) | **GET** /repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch}/status | get status of async merge operation # **diff_refs** @@ -502,3 +504,244 @@ Name | Type | Description | Notes [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) +# **merge_into_branch_async** +> TaskCreation merge_into_branch_async(repository, source_ref, destination_branch, merge=merge) + +merge references asynchronously + +### Example + +* Basic Authentication (basic_auth): +* Api Key Authentication (cookie_auth): +* Api Key Authentication (oidc_auth): +* Api Key Authentication (saml_auth): +* Bearer (JWT) Authentication (jwt_token): + +```python +import time +import os +import lakefs_sdk +from lakefs_sdk.models.merge import Merge +from lakefs_sdk.models.task_creation import TaskCreation +from lakefs_sdk.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to /api/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = lakefs_sdk.Configuration( + host = "/api/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure HTTP basic authorization: basic_auth +configuration = lakefs_sdk.Configuration( + username = os.environ["USERNAME"], + password = os.environ["PASSWORD"] +) + +# Configure API key authorization: cookie_auth +configuration.api_key['cookie_auth'] = os.environ["API_KEY"] + +# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed +# configuration.api_key_prefix['cookie_auth'] = 'Bearer' + +# Configure API key authorization: oidc_auth +configuration.api_key['oidc_auth'] = os.environ["API_KEY"] + +# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed +# configuration.api_key_prefix['oidc_auth'] = 'Bearer' + +# Configure API key authorization: saml_auth +configuration.api_key['saml_auth'] = os.environ["API_KEY"] + +# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed +# configuration.api_key_prefix['saml_auth'] = 'Bearer' + +# Configure Bearer authorization (JWT): jwt_token +configuration = lakefs_sdk.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +with lakefs_sdk.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = lakefs_sdk.RefsApi(api_client) + repository = 'repository_example' # str | + source_ref = 'source_ref_example' # str | source ref + destination_branch = 'destination_branch_example' # str | destination branch name + merge = lakefs_sdk.Merge() # Merge | (optional) + + try: + # merge references asynchronously + api_response = api_instance.merge_into_branch_async(repository, source_ref, destination_branch, merge=merge) + print("The response of RefsApi->merge_into_branch_async:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling RefsApi->merge_into_branch_async: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **repository** | **str**| | + **source_ref** | **str**| source ref | + **destination_branch** | **str**| destination branch name | + **merge** | [**Merge**](Merge.md)| | [optional] + +### Return type + +[**TaskCreation**](TaskCreation.md) + +### Authorization + +[basic_auth](../README.md#basic_auth), [cookie_auth](../README.md#cookie_auth), [oidc_auth](../README.md#oidc_auth), [saml_auth](../README.md#saml_auth), [jwt_token](../README.md#jwt_token) + +### HTTP request headers + + - **Content-Type**: application/json + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**202** | merge task started | - | +**400** | Validation Error | - | +**401** | Unauthorized | - | +**403** | Forbidden | - | +**404** | Resource Not Found | - | +**429** | too many requests | - | +**501** | Not Implemented | - | +**0** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **merge_into_branch_status** +> MergeStatus merge_into_branch_status(repository, source_ref, destination_branch, id) + +get status of async merge operation + +### Example + +* Basic Authentication (basic_auth): +* Api Key Authentication (cookie_auth): +* Api Key Authentication (oidc_auth): +* Api Key Authentication (saml_auth): +* Bearer (JWT) Authentication (jwt_token): + +```python +import time +import os +import lakefs_sdk +from lakefs_sdk.models.merge_status import MergeStatus +from lakefs_sdk.rest import ApiException +from pprint import pprint + +# Defining the host is optional and defaults to /api/v1 +# See configuration.py for a list of all supported configuration parameters. +configuration = lakefs_sdk.Configuration( + host = "/api/v1" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure HTTP basic authorization: basic_auth +configuration = lakefs_sdk.Configuration( + username = os.environ["USERNAME"], + password = os.environ["PASSWORD"] +) + +# Configure API key authorization: cookie_auth +configuration.api_key['cookie_auth'] = os.environ["API_KEY"] + +# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed +# configuration.api_key_prefix['cookie_auth'] = 'Bearer' + +# Configure API key authorization: oidc_auth +configuration.api_key['oidc_auth'] = os.environ["API_KEY"] + +# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed +# configuration.api_key_prefix['oidc_auth'] = 'Bearer' + +# Configure API key authorization: saml_auth +configuration.api_key['saml_auth'] = os.environ["API_KEY"] + +# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed +# configuration.api_key_prefix['saml_auth'] = 'Bearer' + +# Configure Bearer authorization (JWT): jwt_token +configuration = lakefs_sdk.Configuration( + access_token = os.environ["BEARER_TOKEN"] +) + +# Enter a context with an instance of the API client +with lakefs_sdk.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = lakefs_sdk.RefsApi(api_client) + repository = 'repository_example' # str | + source_ref = 'source_ref_example' # str | source ref + destination_branch = 'destination_branch_example' # str | destination branch name + id = 'id_example' # str | Unique identifier of the merge task + + try: + # get status of async merge operation + api_response = api_instance.merge_into_branch_status(repository, source_ref, destination_branch, id) + print("The response of RefsApi->merge_into_branch_status:\n") + pprint(api_response) + except Exception as e: + print("Exception when calling RefsApi->merge_into_branch_status: %s\n" % e) +``` + + + +### Parameters + + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **repository** | **str**| | + **source_ref** | **str**| source ref | + **destination_branch** | **str**| destination branch name | + **id** | **str**| Unique identifier of the merge task | + +### Return type + +[**MergeStatus**](MergeStatus.md) + +### Authorization + +[basic_auth](../README.md#basic_auth), [cookie_auth](../README.md#cookie_auth), [oidc_auth](../README.md#oidc_auth), [saml_auth](../README.md#saml_auth), [jwt_token](../README.md#jwt_token) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | merge task status | - | +**400** | Validation Error | - | +**401** | Unauthorized | - | +**403** | Forbidden | - | +**404** | Not Found | - | +**409** | Conflict | - | +**412** | precondition failed (e.g. a pre-merge hook returned a failure) | - | +**429** | too many requests | - | +**501** | Not Implemented | - | +**0** | Internal Server Error | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/clients/python/lakefs_sdk/__init__.py b/clients/python/lakefs_sdk/__init__.py index 3a19c718531..9a644ba6eb4 100644 --- a/clients/python/lakefs_sdk/__init__.py +++ b/clients/python/lakefs_sdk/__init__.py @@ -66,6 +66,7 @@ from lakefs_sdk.models.commit_list import CommitList from lakefs_sdk.models.commit_overrides import CommitOverrides from lakefs_sdk.models.commit_record_creation import CommitRecordCreation +from lakefs_sdk.models.commit_status import CommitStatus from lakefs_sdk.models.complete_presign_multipart_upload import CompletePresignMultipartUpload from lakefs_sdk.models.config import Config from lakefs_sdk.models.copy_part_source import CopyPartSource @@ -108,6 +109,7 @@ from lakefs_sdk.models.login_information import LoginInformation from lakefs_sdk.models.merge import Merge from lakefs_sdk.models.merge_result import MergeResult +from lakefs_sdk.models.merge_status import MergeStatus from lakefs_sdk.models.meta_range_creation import MetaRangeCreation from lakefs_sdk.models.meta_range_creation_response import MetaRangeCreationResponse from lakefs_sdk.models.object_copy_creation import ObjectCopyCreation diff --git a/clients/python/lakefs_sdk/api/commits_api.py b/clients/python/lakefs_sdk/api/commits_api.py index 4fe270ab412..843755b8d90 100644 --- a/clients/python/lakefs_sdk/api/commits_api.py +++ b/clients/python/lakefs_sdk/api/commits_api.py @@ -32,6 +32,8 @@ from lakefs_sdk.models.commit import Commit from lakefs_sdk.models.commit_creation import CommitCreation +from lakefs_sdk.models.commit_status import CommitStatus +from lakefs_sdk.models.task_creation import TaskCreation from lakefs_sdk.api_client import ApiClient from lakefs_sdk.api_response import ApiResponse @@ -229,6 +231,343 @@ def commit_with_http_info(self, repository : StrictStr, branch : StrictStr, comm collection_formats=_collection_formats, _request_auth=_params.get('_request_auth')) + @validate_arguments + def commit_async(self, repository : StrictStr, branch : StrictStr, commit_creation : CommitCreation, source_metarange : Annotated[Optional[StrictStr], Field(description="The source metarange to commit. Branch must not have uncommitted changes.")] = None, **kwargs) -> TaskCreation: # noqa: E501 + """create commit asynchronously # noqa: E501 + + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + + >>> thread = api.commit_async(repository, branch, commit_creation, source_metarange, async_req=True) + >>> result = thread.get() + + :param repository: (required) + :type repository: str + :param branch: (required) + :type branch: str + :param commit_creation: (required) + :type commit_creation: CommitCreation + :param source_metarange: The source metarange to commit. Branch must not have uncommitted changes. + :type source_metarange: str + :param async_req: Whether to execute the request asynchronously. + :type async_req: bool, optional + :param _request_timeout: timeout setting for this request. + If one number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :return: Returns the result object. + If the method is called asynchronously, + returns the request thread. + :rtype: TaskCreation + """ + kwargs['_return_http_data_only'] = True + if '_preload_content' in kwargs: + message = "Error! Please call the commit_async_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data" # noqa: E501 + raise ValueError(message) + return self.commit_async_with_http_info(repository, branch, commit_creation, source_metarange, **kwargs) # noqa: E501 + + @validate_arguments + def commit_async_with_http_info(self, repository : StrictStr, branch : StrictStr, commit_creation : CommitCreation, source_metarange : Annotated[Optional[StrictStr], Field(description="The source metarange to commit. Branch must not have uncommitted changes.")] = None, **kwargs) -> ApiResponse: # noqa: E501 + """create commit asynchronously # noqa: E501 + + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + + >>> thread = api.commit_async_with_http_info(repository, branch, commit_creation, source_metarange, async_req=True) + >>> result = thread.get() + + :param repository: (required) + :type repository: str + :param branch: (required) + :type branch: str + :param commit_creation: (required) + :type commit_creation: CommitCreation + :param source_metarange: The source metarange to commit. Branch must not have uncommitted changes. + :type source_metarange: str + :param async_req: Whether to execute the request asynchronously. + :type async_req: bool, optional + :param _preload_content: if False, the ApiResponse.data will + be set to none and raw_data will store the + HTTP response body without reading/decoding. + Default is True. + :type _preload_content: bool, optional + :param _return_http_data_only: response data instead of ApiResponse + object with status code, headers, etc + :type _return_http_data_only: bool, optional + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the authentication + in the spec for a single request. + :type _request_auth: dict, optional + :type _content_type: string, optional: force content-type for the request + :return: Returns the result object. + If the method is called asynchronously, + returns the request thread. + :rtype: tuple(TaskCreation, status_code(int), headers(HTTPHeaderDict)) + """ + + _params = locals() + + _all_params = [ + 'repository', + 'branch', + 'commit_creation', + 'source_metarange' + ] + _all_params.extend( + [ + 'async_req', + '_return_http_data_only', + '_preload_content', + '_request_timeout', + '_request_auth', + '_content_type', + '_headers' + ] + ) + + # validate the arguments + for _key, _val in _params['kwargs'].items(): + if _key not in _all_params: + raise ApiTypeError( + "Got an unexpected keyword argument '%s'" + " to method commit_async" % _key + ) + _params[_key] = _val + del _params['kwargs'] + + _collection_formats = {} + + # process the path parameters + _path_params = {} + if _params['repository']: + _path_params['repository'] = _params['repository'] + + if _params['branch']: + _path_params['branch'] = _params['branch'] + + + # process the query parameters + _query_params = [] + if _params.get('source_metarange') is not None: # noqa: E501 + _query_params.append(('source_metarange', _params['source_metarange'])) + + # process the header parameters + _header_params = dict(_params.get('_headers', {})) + # process the form parameters + _form_params = [] + _files = {} + # process the body parameter + _body_params = None + if _params['commit_creation'] is not None: + _body_params = _params['commit_creation'] + + # set the HTTP header `Accept` + _header_params['Accept'] = self.api_client.select_header_accept( + ['application/json']) # noqa: E501 + + # set the HTTP header `Content-Type` + _content_types_list = _params.get('_content_type', + self.api_client.select_header_content_type( + ['application/json'])) + if _content_types_list: + _header_params['Content-Type'] = _content_types_list + + # authentication setting + _auth_settings = ['basic_auth', 'cookie_auth', 'oidc_auth', 'saml_auth', 'jwt_token'] # noqa: E501 + + _response_types_map = { + '202': "TaskCreation", + '400': "Error", + '401': "Error", + '403': "Error", + '404': "Error", + '429': None, + '501': "Error", + } + + return self.api_client.call_api( + '/repositories/{repository}/branches/{branch}/commits/async', 'POST', + _path_params, + _query_params, + _header_params, + body=_body_params, + post_params=_form_params, + files=_files, + response_types_map=_response_types_map, + auth_settings=_auth_settings, + async_req=_params.get('async_req'), + _return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501 + _preload_content=_params.get('_preload_content', True), + _request_timeout=_params.get('_request_timeout'), + collection_formats=_collection_formats, + _request_auth=_params.get('_request_auth')) + + @validate_arguments + def commit_status(self, repository : StrictStr, branch : StrictStr, id : Annotated[StrictStr, Field(..., description="Unique identifier of the commit task")], **kwargs) -> CommitStatus: # noqa: E501 + """get status of async commit operation # noqa: E501 + + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + + >>> thread = api.commit_status(repository, branch, id, async_req=True) + >>> result = thread.get() + + :param repository: (required) + :type repository: str + :param branch: (required) + :type branch: str + :param id: Unique identifier of the commit task (required) + :type id: str + :param async_req: Whether to execute the request asynchronously. + :type async_req: bool, optional + :param _request_timeout: timeout setting for this request. + If one number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :return: Returns the result object. + If the method is called asynchronously, + returns the request thread. + :rtype: CommitStatus + """ + kwargs['_return_http_data_only'] = True + if '_preload_content' in kwargs: + message = "Error! Please call the commit_status_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data" # noqa: E501 + raise ValueError(message) + return self.commit_status_with_http_info(repository, branch, id, **kwargs) # noqa: E501 + + @validate_arguments + def commit_status_with_http_info(self, repository : StrictStr, branch : StrictStr, id : Annotated[StrictStr, Field(..., description="Unique identifier of the commit task")], **kwargs) -> ApiResponse: # noqa: E501 + """get status of async commit operation # noqa: E501 + + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + + >>> thread = api.commit_status_with_http_info(repository, branch, id, async_req=True) + >>> result = thread.get() + + :param repository: (required) + :type repository: str + :param branch: (required) + :type branch: str + :param id: Unique identifier of the commit task (required) + :type id: str + :param async_req: Whether to execute the request asynchronously. + :type async_req: bool, optional + :param _preload_content: if False, the ApiResponse.data will + be set to none and raw_data will store the + HTTP response body without reading/decoding. + Default is True. + :type _preload_content: bool, optional + :param _return_http_data_only: response data instead of ApiResponse + object with status code, headers, etc + :type _return_http_data_only: bool, optional + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the authentication + in the spec for a single request. + :type _request_auth: dict, optional + :type _content_type: string, optional: force content-type for the request + :return: Returns the result object. + If the method is called asynchronously, + returns the request thread. + :rtype: tuple(CommitStatus, status_code(int), headers(HTTPHeaderDict)) + """ + + _params = locals() + + _all_params = [ + 'repository', + 'branch', + 'id' + ] + _all_params.extend( + [ + 'async_req', + '_return_http_data_only', + '_preload_content', + '_request_timeout', + '_request_auth', + '_content_type', + '_headers' + ] + ) + + # validate the arguments + for _key, _val in _params['kwargs'].items(): + if _key not in _all_params: + raise ApiTypeError( + "Got an unexpected keyword argument '%s'" + " to method commit_status" % _key + ) + _params[_key] = _val + del _params['kwargs'] + + _collection_formats = {} + + # process the path parameters + _path_params = {} + if _params['repository']: + _path_params['repository'] = _params['repository'] + + if _params['branch']: + _path_params['branch'] = _params['branch'] + + + # process the query parameters + _query_params = [] + if _params.get('id') is not None: # noqa: E501 + _query_params.append(('id', _params['id'])) + + # process the header parameters + _header_params = dict(_params.get('_headers', {})) + # process the form parameters + _form_params = [] + _files = {} + # process the body parameter + _body_params = None + # set the HTTP header `Accept` + _header_params['Accept'] = self.api_client.select_header_accept( + ['application/json']) # noqa: E501 + + # authentication setting + _auth_settings = ['basic_auth', 'cookie_auth', 'oidc_auth', 'saml_auth', 'jwt_token'] # noqa: E501 + + _response_types_map = { + '200': "CommitStatus", + '400': "CommitStatus", + '401': "CommitStatus", + '403': "CommitStatus", + '404': "CommitStatus", + '409': "CommitStatus", + '412': "CommitStatus", + '429': "CommitStatus", + '501': "CommitStatus", + } + + return self.api_client.call_api( + '/repositories/{repository}/branches/{branch}/commits/status', 'GET', + _path_params, + _query_params, + _header_params, + body=_body_params, + post_params=_form_params, + files=_files, + response_types_map=_response_types_map, + auth_settings=_auth_settings, + async_req=_params.get('async_req'), + _return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501 + _preload_content=_params.get('_preload_content', True), + _request_timeout=_params.get('_request_timeout'), + collection_formats=_collection_formats, + _request_auth=_params.get('_request_auth')) + @validate_arguments def get_commit(self, repository : StrictStr, commit_id : StrictStr, **kwargs) -> Commit: # noqa: E501 """get commit # noqa: E501 diff --git a/clients/python/lakefs_sdk/api/refs_api.py b/clients/python/lakefs_sdk/api/refs_api.py index 83ef25373f0..0118337c71c 100644 --- a/clients/python/lakefs_sdk/api/refs_api.py +++ b/clients/python/lakefs_sdk/api/refs_api.py @@ -37,6 +37,8 @@ from lakefs_sdk.models.find_merge_base_result import FindMergeBaseResult from lakefs_sdk.models.merge import Merge from lakefs_sdk.models.merge_result import MergeResult +from lakefs_sdk.models.merge_status import MergeStatus +from lakefs_sdk.models.task_creation import TaskCreation from lakefs_sdk.api_client import ApiClient from lakefs_sdk.api_response import ApiResponse @@ -816,3 +818,348 @@ def merge_into_branch_with_http_info(self, repository : StrictStr, source_ref : _request_timeout=_params.get('_request_timeout'), collection_formats=_collection_formats, _request_auth=_params.get('_request_auth')) + + @validate_arguments + def merge_into_branch_async(self, repository : StrictStr, source_ref : Annotated[StrictStr, Field(..., description="source ref")], destination_branch : Annotated[StrictStr, Field(..., description="destination branch name")], merge : Optional[Merge] = None, **kwargs) -> TaskCreation: # noqa: E501 + """merge references asynchronously # noqa: E501 + + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + + >>> thread = api.merge_into_branch_async(repository, source_ref, destination_branch, merge, async_req=True) + >>> result = thread.get() + + :param repository: (required) + :type repository: str + :param source_ref: source ref (required) + :type source_ref: str + :param destination_branch: destination branch name (required) + :type destination_branch: str + :param merge: + :type merge: Merge + :param async_req: Whether to execute the request asynchronously. + :type async_req: bool, optional + :param _request_timeout: timeout setting for this request. + If one number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :return: Returns the result object. + If the method is called asynchronously, + returns the request thread. + :rtype: TaskCreation + """ + kwargs['_return_http_data_only'] = True + if '_preload_content' in kwargs: + message = "Error! Please call the merge_into_branch_async_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data" # noqa: E501 + raise ValueError(message) + return self.merge_into_branch_async_with_http_info(repository, source_ref, destination_branch, merge, **kwargs) # noqa: E501 + + @validate_arguments + def merge_into_branch_async_with_http_info(self, repository : StrictStr, source_ref : Annotated[StrictStr, Field(..., description="source ref")], destination_branch : Annotated[StrictStr, Field(..., description="destination branch name")], merge : Optional[Merge] = None, **kwargs) -> ApiResponse: # noqa: E501 + """merge references asynchronously # noqa: E501 + + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + + >>> thread = api.merge_into_branch_async_with_http_info(repository, source_ref, destination_branch, merge, async_req=True) + >>> result = thread.get() + + :param repository: (required) + :type repository: str + :param source_ref: source ref (required) + :type source_ref: str + :param destination_branch: destination branch name (required) + :type destination_branch: str + :param merge: + :type merge: Merge + :param async_req: Whether to execute the request asynchronously. + :type async_req: bool, optional + :param _preload_content: if False, the ApiResponse.data will + be set to none and raw_data will store the + HTTP response body without reading/decoding. + Default is True. + :type _preload_content: bool, optional + :param _return_http_data_only: response data instead of ApiResponse + object with status code, headers, etc + :type _return_http_data_only: bool, optional + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the authentication + in the spec for a single request. + :type _request_auth: dict, optional + :type _content_type: string, optional: force content-type for the request + :return: Returns the result object. + If the method is called asynchronously, + returns the request thread. + :rtype: tuple(TaskCreation, status_code(int), headers(HTTPHeaderDict)) + """ + + _params = locals() + + _all_params = [ + 'repository', + 'source_ref', + 'destination_branch', + 'merge' + ] + _all_params.extend( + [ + 'async_req', + '_return_http_data_only', + '_preload_content', + '_request_timeout', + '_request_auth', + '_content_type', + '_headers' + ] + ) + + # validate the arguments + for _key, _val in _params['kwargs'].items(): + if _key not in _all_params: + raise ApiTypeError( + "Got an unexpected keyword argument '%s'" + " to method merge_into_branch_async" % _key + ) + _params[_key] = _val + del _params['kwargs'] + + _collection_formats = {} + + # process the path parameters + _path_params = {} + if _params['repository']: + _path_params['repository'] = _params['repository'] + + if _params['source_ref']: + _path_params['sourceRef'] = _params['source_ref'] + + if _params['destination_branch']: + _path_params['destinationBranch'] = _params['destination_branch'] + + + # process the query parameters + _query_params = [] + # process the header parameters + _header_params = dict(_params.get('_headers', {})) + # process the form parameters + _form_params = [] + _files = {} + # process the body parameter + _body_params = None + if _params['merge'] is not None: + _body_params = _params['merge'] + + # set the HTTP header `Accept` + _header_params['Accept'] = self.api_client.select_header_accept( + ['application/json']) # noqa: E501 + + # set the HTTP header `Content-Type` + _content_types_list = _params.get('_content_type', + self.api_client.select_header_content_type( + ['application/json'])) + if _content_types_list: + _header_params['Content-Type'] = _content_types_list + + # authentication setting + _auth_settings = ['basic_auth', 'cookie_auth', 'oidc_auth', 'saml_auth', 'jwt_token'] # noqa: E501 + + _response_types_map = { + '202': "TaskCreation", + '400': "Error", + '401': "Error", + '403': "Error", + '404': "Error", + '429': None, + '501': "Error", + } + + return self.api_client.call_api( + '/repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch}/async', 'POST', + _path_params, + _query_params, + _header_params, + body=_body_params, + post_params=_form_params, + files=_files, + response_types_map=_response_types_map, + auth_settings=_auth_settings, + async_req=_params.get('async_req'), + _return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501 + _preload_content=_params.get('_preload_content', True), + _request_timeout=_params.get('_request_timeout'), + collection_formats=_collection_formats, + _request_auth=_params.get('_request_auth')) + + @validate_arguments + def merge_into_branch_status(self, repository : StrictStr, source_ref : Annotated[StrictStr, Field(..., description="source ref")], destination_branch : Annotated[StrictStr, Field(..., description="destination branch name")], id : Annotated[StrictStr, Field(..., description="Unique identifier of the merge task")], **kwargs) -> MergeStatus: # noqa: E501 + """get status of async merge operation # noqa: E501 + + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + + >>> thread = api.merge_into_branch_status(repository, source_ref, destination_branch, id, async_req=True) + >>> result = thread.get() + + :param repository: (required) + :type repository: str + :param source_ref: source ref (required) + :type source_ref: str + :param destination_branch: destination branch name (required) + :type destination_branch: str + :param id: Unique identifier of the merge task (required) + :type id: str + :param async_req: Whether to execute the request asynchronously. + :type async_req: bool, optional + :param _request_timeout: timeout setting for this request. + If one number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :return: Returns the result object. + If the method is called asynchronously, + returns the request thread. + :rtype: MergeStatus + """ + kwargs['_return_http_data_only'] = True + if '_preload_content' in kwargs: + message = "Error! Please call the merge_into_branch_status_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data" # noqa: E501 + raise ValueError(message) + return self.merge_into_branch_status_with_http_info(repository, source_ref, destination_branch, id, **kwargs) # noqa: E501 + + @validate_arguments + def merge_into_branch_status_with_http_info(self, repository : StrictStr, source_ref : Annotated[StrictStr, Field(..., description="source ref")], destination_branch : Annotated[StrictStr, Field(..., description="destination branch name")], id : Annotated[StrictStr, Field(..., description="Unique identifier of the merge task")], **kwargs) -> ApiResponse: # noqa: E501 + """get status of async merge operation # noqa: E501 + + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + + >>> thread = api.merge_into_branch_status_with_http_info(repository, source_ref, destination_branch, id, async_req=True) + >>> result = thread.get() + + :param repository: (required) + :type repository: str + :param source_ref: source ref (required) + :type source_ref: str + :param destination_branch: destination branch name (required) + :type destination_branch: str + :param id: Unique identifier of the merge task (required) + :type id: str + :param async_req: Whether to execute the request asynchronously. + :type async_req: bool, optional + :param _preload_content: if False, the ApiResponse.data will + be set to none and raw_data will store the + HTTP response body without reading/decoding. + Default is True. + :type _preload_content: bool, optional + :param _return_http_data_only: response data instead of ApiResponse + object with status code, headers, etc + :type _return_http_data_only: bool, optional + :param _request_timeout: timeout setting for this request. If one + number provided, it will be total request + timeout. It can also be a pair (tuple) of + (connection, read) timeouts. + :param _request_auth: set to override the auth_settings for an a single + request; this effectively ignores the authentication + in the spec for a single request. + :type _request_auth: dict, optional + :type _content_type: string, optional: force content-type for the request + :return: Returns the result object. + If the method is called asynchronously, + returns the request thread. + :rtype: tuple(MergeStatus, status_code(int), headers(HTTPHeaderDict)) + """ + + _params = locals() + + _all_params = [ + 'repository', + 'source_ref', + 'destination_branch', + 'id' + ] + _all_params.extend( + [ + 'async_req', + '_return_http_data_only', + '_preload_content', + '_request_timeout', + '_request_auth', + '_content_type', + '_headers' + ] + ) + + # validate the arguments + for _key, _val in _params['kwargs'].items(): + if _key not in _all_params: + raise ApiTypeError( + "Got an unexpected keyword argument '%s'" + " to method merge_into_branch_status" % _key + ) + _params[_key] = _val + del _params['kwargs'] + + _collection_formats = {} + + # process the path parameters + _path_params = {} + if _params['repository']: + _path_params['repository'] = _params['repository'] + + if _params['source_ref']: + _path_params['sourceRef'] = _params['source_ref'] + + if _params['destination_branch']: + _path_params['destinationBranch'] = _params['destination_branch'] + + + # process the query parameters + _query_params = [] + if _params.get('id') is not None: # noqa: E501 + _query_params.append(('id', _params['id'])) + + # process the header parameters + _header_params = dict(_params.get('_headers', {})) + # process the form parameters + _form_params = [] + _files = {} + # process the body parameter + _body_params = None + # set the HTTP header `Accept` + _header_params['Accept'] = self.api_client.select_header_accept( + ['application/json']) # noqa: E501 + + # authentication setting + _auth_settings = ['basic_auth', 'cookie_auth', 'oidc_auth', 'saml_auth', 'jwt_token'] # noqa: E501 + + _response_types_map = { + '200': "MergeStatus", + '400': "MergeStatus", + '401': "MergeStatus", + '403': "MergeStatus", + '404': "MergeStatus", + '409': "MergeStatus", + '412': "MergeStatus", + '429': "MergeStatus", + '501': "MergeStatus", + } + + return self.api_client.call_api( + '/repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch}/status', 'GET', + _path_params, + _query_params, + _header_params, + body=_body_params, + post_params=_form_params, + files=_files, + response_types_map=_response_types_map, + auth_settings=_auth_settings, + async_req=_params.get('async_req'), + _return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501 + _preload_content=_params.get('_preload_content', True), + _request_timeout=_params.get('_request_timeout'), + collection_formats=_collection_formats, + _request_auth=_params.get('_request_auth')) diff --git a/clients/python/lakefs_sdk/models/__init__.py b/clients/python/lakefs_sdk/models/__init__.py index b1cd9f77e3b..3352cb65b04 100644 --- a/clients/python/lakefs_sdk/models/__init__.py +++ b/clients/python/lakefs_sdk/models/__init__.py @@ -31,6 +31,7 @@ from lakefs_sdk.models.commit_list import CommitList from lakefs_sdk.models.commit_overrides import CommitOverrides from lakefs_sdk.models.commit_record_creation import CommitRecordCreation +from lakefs_sdk.models.commit_status import CommitStatus from lakefs_sdk.models.complete_presign_multipart_upload import CompletePresignMultipartUpload from lakefs_sdk.models.config import Config from lakefs_sdk.models.copy_part_source import CopyPartSource @@ -73,6 +74,7 @@ from lakefs_sdk.models.login_information import LoginInformation from lakefs_sdk.models.merge import Merge from lakefs_sdk.models.merge_result import MergeResult +from lakefs_sdk.models.merge_status import MergeStatus from lakefs_sdk.models.meta_range_creation import MetaRangeCreation from lakefs_sdk.models.meta_range_creation_response import MetaRangeCreationResponse from lakefs_sdk.models.object_copy_creation import ObjectCopyCreation diff --git a/clients/python/lakefs_sdk/models/commit_status.py b/clients/python/lakefs_sdk/models/commit_status.py new file mode 100644 index 00000000000..50db85317b9 --- /dev/null +++ b/clients/python/lakefs_sdk/models/commit_status.py @@ -0,0 +1,91 @@ +# coding: utf-8 + +""" + lakeFS API + + lakeFS HTTP API + + The version of the OpenAPI document: 1.0.0 + Contact: services@treeverse.io + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from datetime import datetime +from typing import Optional +try: + from pydantic.v1 import BaseModel, Field, StrictBool, StrictStr +except ImportError: + from pydantic import BaseModel, Field, StrictBool, StrictStr +from lakefs_sdk.models.commit import Commit +from lakefs_sdk.models.error import Error + +class CommitStatus(BaseModel): + """ + CommitStatus + """ + task_id: StrictStr = Field(..., description="the id of the async commit task") + completed: StrictBool = Field(..., description="true if the task has completed (either successfully or with an error)") + update_time: datetime = Field(..., description="last time the task status was updated") + result: Optional[Commit] = None + error: Optional[Error] = None + __properties = ["task_id", "completed", "update_time", "result", "error"] + + class Config: + """Pydantic configuration""" + allow_population_by_field_name = True + validate_assignment = True + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.dict(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> CommitStatus: + """Create an instance of CommitStatus from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self): + """Returns the dictionary representation of the model using alias""" + _dict = self.dict(by_alias=True, + exclude={ + }, + exclude_none=True) + # override the default output from pydantic by calling `to_dict()` of result + if self.result: + _dict['result'] = self.result.to_dict() + # override the default output from pydantic by calling `to_dict()` of error + if self.error: + _dict['error'] = self.error.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: dict) -> CommitStatus: + """Create an instance of CommitStatus from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return CommitStatus.parse_obj(obj) + + _obj = CommitStatus.parse_obj({ + "task_id": obj.get("task_id"), + "completed": obj.get("completed"), + "update_time": obj.get("update_time"), + "result": Commit.from_dict(obj.get("result")) if obj.get("result") is not None else None, + "error": Error.from_dict(obj.get("error")) if obj.get("error") is not None else None + }) + return _obj + + diff --git a/clients/python/lakefs_sdk/models/merge_status.py b/clients/python/lakefs_sdk/models/merge_status.py new file mode 100644 index 00000000000..c932c782b1b --- /dev/null +++ b/clients/python/lakefs_sdk/models/merge_status.py @@ -0,0 +1,91 @@ +# coding: utf-8 + +""" + lakeFS API + + lakeFS HTTP API + + The version of the OpenAPI document: 1.0.0 + Contact: services@treeverse.io + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +from __future__ import annotations +import pprint +import re # noqa: F401 +import json + +from datetime import datetime +from typing import Optional +try: + from pydantic.v1 import BaseModel, Field, StrictBool, StrictStr +except ImportError: + from pydantic import BaseModel, Field, StrictBool, StrictStr +from lakefs_sdk.models.error import Error +from lakefs_sdk.models.merge_result import MergeResult + +class MergeStatus(BaseModel): + """ + MergeStatus + """ + task_id: StrictStr = Field(..., description="the id of the async merge task") + completed: StrictBool = Field(..., description="true if the task has completed (either successfully or with an error)") + update_time: datetime = Field(..., description="last time the task status was updated") + result: Optional[MergeResult] = None + error: Optional[Error] = None + __properties = ["task_id", "completed", "update_time", "result", "error"] + + class Config: + """Pydantic configuration""" + allow_population_by_field_name = True + validate_assignment = True + + def to_str(self) -> str: + """Returns the string representation of the model using alias""" + return pprint.pformat(self.dict(by_alias=True)) + + def to_json(self) -> str: + """Returns the JSON representation of the model using alias""" + return json.dumps(self.to_dict()) + + @classmethod + def from_json(cls, json_str: str) -> MergeStatus: + """Create an instance of MergeStatus from a JSON string""" + return cls.from_dict(json.loads(json_str)) + + def to_dict(self): + """Returns the dictionary representation of the model using alias""" + _dict = self.dict(by_alias=True, + exclude={ + }, + exclude_none=True) + # override the default output from pydantic by calling `to_dict()` of result + if self.result: + _dict['result'] = self.result.to_dict() + # override the default output from pydantic by calling `to_dict()` of error + if self.error: + _dict['error'] = self.error.to_dict() + return _dict + + @classmethod + def from_dict(cls, obj: dict) -> MergeStatus: + """Create an instance of MergeStatus from a dict""" + if obj is None: + return None + + if not isinstance(obj, dict): + return MergeStatus.parse_obj(obj) + + _obj = MergeStatus.parse_obj({ + "task_id": obj.get("task_id"), + "completed": obj.get("completed"), + "update_time": obj.get("update_time"), + "result": MergeResult.from_dict(obj.get("result")) if obj.get("result") is not None else None, + "error": Error.from_dict(obj.get("error")) if obj.get("error") is not None else None + }) + return _obj + + diff --git a/clients/python/test/test_commit_status.py b/clients/python/test/test_commit_status.py new file mode 100644 index 00000000000..fbbdd848975 --- /dev/null +++ b/clients/python/test/test_commit_status.py @@ -0,0 +1,74 @@ +# coding: utf-8 + +""" + lakeFS API + + lakeFS HTTP API + + The version of the OpenAPI document: 1.0.0 + Contact: services@treeverse.io + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest +import datetime + +from lakefs_sdk.models.commit_status import CommitStatus # noqa: E501 + +class TestCommitStatus(unittest.TestCase): + """CommitStatus unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> CommitStatus: + """Test CommitStatus + include_option is a boolean, when False only required + params are included, when True both required and + optional params are included """ + # uncomment below to create an instance of `CommitStatus` + """ + model = CommitStatus() # noqa: E501 + if include_optional: + return CommitStatus( + task_id = '', + completed = True, + update_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), + result = lakefs_sdk.models.commit.Commit( + id = '', + parents = [ + '' + ], + committer = '', + message = '', + creation_date = 56, + meta_range_id = '', + metadata = { + 'key' : '' + }, + generation = 56, + version = 0, ), + error = lakefs_sdk.models.error.Error( + message = '', ) + ) + else: + return CommitStatus( + task_id = '', + completed = True, + update_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), + ) + """ + + def testCommitStatus(self): + """Test CommitStatus""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + +if __name__ == '__main__': + unittest.main() diff --git a/clients/python/test/test_commits_api.py b/clients/python/test/test_commits_api.py index 928d2035bbe..b453a016a9a 100644 --- a/clients/python/test/test_commits_api.py +++ b/clients/python/test/test_commits_api.py @@ -34,6 +34,20 @@ def test_commit(self) -> None: """ pass + def test_commit_async(self) -> None: + """Test case for commit_async + + create commit asynchronously # noqa: E501 + """ + pass + + def test_commit_status(self) -> None: + """Test case for commit_status + + get status of async commit operation # noqa: E501 + """ + pass + def test_get_commit(self) -> None: """Test case for get_commit diff --git a/clients/python/test/test_merge_status.py b/clients/python/test/test_merge_status.py new file mode 100644 index 00000000000..4517050d04d --- /dev/null +++ b/clients/python/test/test_merge_status.py @@ -0,0 +1,62 @@ +# coding: utf-8 + +""" + lakeFS API + + lakeFS HTTP API + + The version of the OpenAPI document: 1.0.0 + Contact: services@treeverse.io + Generated by OpenAPI Generator (https://openapi-generator.tech) + + Do not edit the class manually. +""" # noqa: E501 + + +import unittest +import datetime + +from lakefs_sdk.models.merge_status import MergeStatus # noqa: E501 + +class TestMergeStatus(unittest.TestCase): + """MergeStatus unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def make_instance(self, include_optional) -> MergeStatus: + """Test MergeStatus + include_option is a boolean, when False only required + params are included, when True both required and + optional params are included """ + # uncomment below to create an instance of `MergeStatus` + """ + model = MergeStatus() # noqa: E501 + if include_optional: + return MergeStatus( + task_id = '', + completed = True, + update_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), + result = lakefs_sdk.models.merge_result.MergeResult( + reference = '', ), + error = lakefs_sdk.models.error.Error( + message = '', ) + ) + else: + return MergeStatus( + task_id = '', + completed = True, + update_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), + ) + """ + + def testMergeStatus(self): + """Test MergeStatus""" + # inst_req_only = self.make_instance(include_optional=False) + # inst_req_and_optional = self.make_instance(include_optional=True) + +if __name__ == '__main__': + unittest.main() diff --git a/clients/python/test/test_refs_api.py b/clients/python/test/test_refs_api.py index 3b1e92b371e..9f6dd3b495d 100644 --- a/clients/python/test/test_refs_api.py +++ b/clients/python/test/test_refs_api.py @@ -55,6 +55,20 @@ def test_merge_into_branch(self) -> None: """ pass + def test_merge_into_branch_async(self) -> None: + """Test case for merge_into_branch_async + + merge references asynchronously # noqa: E501 + """ + pass + + def test_merge_into_branch_status(self) -> None: + """Test case for merge_into_branch_status + + get status of async merge operation # noqa: E501 + """ + pass + if __name__ == '__main__': unittest.main() diff --git a/clients/rust/.openapi-generator/FILES b/clients/rust/.openapi-generator/FILES index 1f0d16ec585..c288e4c7575 100644 --- a/clients/rust/.openapi-generator/FILES +++ b/clients/rust/.openapi-generator/FILES @@ -22,6 +22,7 @@ docs/CommitCreation.md docs/CommitList.md docs/CommitOverrides.md docs/CommitRecordCreation.md +docs/CommitStatus.md docs/CommitsApi.md docs/CompletePresignMultipartUpload.md docs/Config.md @@ -72,6 +73,7 @@ docs/LoginConfig.md docs/LoginInformation.md docs/Merge.md docs/MergeResult.md +docs/MergeStatus.md docs/MetaRangeCreation.md docs/MetaRangeCreationResponse.md docs/MetadataApi.md @@ -180,6 +182,7 @@ src/models/commit_creation.rs src/models/commit_list.rs src/models/commit_overrides.rs src/models/commit_record_creation.rs +src/models/commit_status.rs src/models/complete_presign_multipart_upload.rs src/models/config.rs src/models/copy_part_source.rs @@ -222,6 +225,7 @@ src/models/login_config.rs src/models/login_information.rs src/models/merge.rs src/models/merge_result.rs +src/models/merge_status.rs src/models/meta_range_creation.rs src/models/meta_range_creation_response.rs src/models/mod.rs diff --git a/clients/rust/README.md b/clients/rust/README.md index 33a419ac1df..a714b9c6a4e 100644 --- a/clients/rust/README.md +++ b/clients/rust/README.md @@ -79,6 +79,8 @@ Class | Method | HTTP request | Description *BranchesApi* | [**reset_branch**](docs/BranchesApi.md#reset_branch) | **PUT** /repositories/{repository}/branches/{branch} | reset branch *BranchesApi* | [**revert_branch**](docs/BranchesApi.md#revert_branch) | **POST** /repositories/{repository}/branches/{branch}/revert | revert *CommitsApi* | [**commit**](docs/CommitsApi.md#commit) | **POST** /repositories/{repository}/branches/{branch}/commits | create commit +*CommitsApi* | [**commit_async**](docs/CommitsApi.md#commit_async) | **POST** /repositories/{repository}/branches/{branch}/commits/async | create commit asynchronously +*CommitsApi* | [**commit_status**](docs/CommitsApi.md#commit_status) | **GET** /repositories/{repository}/branches/{branch}/commits/status | get status of async commit operation *CommitsApi* | [**get_commit**](docs/CommitsApi.md#get_commit) | **GET** /repositories/{repository}/commits/{commitId} | get commit *ConfigApi* | [**get_config**](docs/ConfigApi.md#get_config) | **GET** /config | *ExperimentalApi* | [**abort_presign_multipart_upload**](docs/ExperimentalApi.md#abort_presign_multipart_upload) | **DELETE** /repositories/{repository}/branches/{branch}/staging/pmpu/{uploadId} | Abort a presign multipart upload @@ -164,6 +166,8 @@ Class | Method | HTTP request | Description *RefsApi* | [**find_merge_base**](docs/RefsApi.md#find_merge_base) | **GET** /repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch} | find the merge base for 2 references *RefsApi* | [**log_commits**](docs/RefsApi.md#log_commits) | **GET** /repositories/{repository}/refs/{ref}/commits | get commit log from ref. If both objects and prefixes are empty, return all commits. *RefsApi* | [**merge_into_branch**](docs/RefsApi.md#merge_into_branch) | **POST** /repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch} | merge references +*RefsApi* | [**merge_into_branch_async**](docs/RefsApi.md#merge_into_branch_async) | **POST** /repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch}/async | merge references asynchronously +*RefsApi* | [**merge_into_branch_status**](docs/RefsApi.md#merge_into_branch_status) | **GET** /repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch}/status | get status of async merge operation *RemotesApi* | [**pull_iceberg_table**](docs/RemotesApi.md#pull_iceberg_table) | **POST** /iceberg/remotes/{catalog}/pull | take a table previously pushed from lakeFS into a remote catalog, and pull its state back into the originating lakeFS repository *RemotesApi* | [**push_iceberg_table**](docs/RemotesApi.md#push_iceberg_table) | **POST** /iceberg/remotes/{catalog}/push | register existing lakeFS table in remote catalog *RepositoriesApi* | [**create_repository**](docs/RepositoriesApi.md#create_repository) | **POST** /repositories | create repository @@ -206,6 +210,7 @@ Class | Method | HTTP request | Description - [CommitList](docs/CommitList.md) - [CommitOverrides](docs/CommitOverrides.md) - [CommitRecordCreation](docs/CommitRecordCreation.md) + - [CommitStatus](docs/CommitStatus.md) - [CompletePresignMultipartUpload](docs/CompletePresignMultipartUpload.md) - [Config](docs/Config.md) - [CopyPartSource](docs/CopyPartSource.md) @@ -248,6 +253,7 @@ Class | Method | HTTP request | Description - [LoginInformation](docs/LoginInformation.md) - [Merge](docs/Merge.md) - [MergeResult](docs/MergeResult.md) + - [MergeStatus](docs/MergeStatus.md) - [MetaRangeCreation](docs/MetaRangeCreation.md) - [MetaRangeCreationResponse](docs/MetaRangeCreationResponse.md) - [ObjectCopyCreation](docs/ObjectCopyCreation.md) diff --git a/clients/rust/docs/CommitStatus.md b/clients/rust/docs/CommitStatus.md new file mode 100644 index 00000000000..4e4410bfe30 --- /dev/null +++ b/clients/rust/docs/CommitStatus.md @@ -0,0 +1,15 @@ +# CommitStatus + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**task_id** | **String** | the id of the async commit task | +**completed** | **bool** | true if the task has completed (either successfully or with an error) | +**update_time** | **String** | last time the task status was updated | +**result** | Option<[**models::Commit**](Commit.md)> | | [optional] +**error** | Option<[**models::Error**](Error.md)> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/clients/rust/docs/CommitsApi.md b/clients/rust/docs/CommitsApi.md index 7abad52e53c..c534b4607f2 100644 --- a/clients/rust/docs/CommitsApi.md +++ b/clients/rust/docs/CommitsApi.md @@ -5,6 +5,8 @@ All URIs are relative to */api/v1* Method | HTTP request | Description ------------- | ------------- | ------------- [**commit**](CommitsApi.md#commit) | **POST** /repositories/{repository}/branches/{branch}/commits | create commit +[**commit_async**](CommitsApi.md#commit_async) | **POST** /repositories/{repository}/branches/{branch}/commits/async | create commit asynchronously +[**commit_status**](CommitsApi.md#commit_status) | **GET** /repositories/{repository}/branches/{branch}/commits/status | get status of async commit operation [**get_commit**](CommitsApi.md#get_commit) | **GET** /repositories/{repository}/commits/{commitId} | get commit @@ -40,6 +42,67 @@ Name | Type | Description | Required | Notes [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) +## commit_async + +> models::TaskCreation commit_async(repository, branch, commit_creation, source_metarange) +create commit asynchronously + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**repository** | **String** | | [required] | +**branch** | **String** | | [required] | +**commit_creation** | [**CommitCreation**](CommitCreation.md) | | [required] | +**source_metarange** | Option<**String**> | The source metarange to commit. Branch must not have uncommitted changes. | | + +### Return type + +[**models::TaskCreation**](TaskCreation.md) + +### Authorization + +[basic_auth](../README.md#basic_auth), [cookie_auth](../README.md#cookie_auth), [oidc_auth](../README.md#oidc_auth), [saml_auth](../README.md#saml_auth), [jwt_token](../README.md#jwt_token) + +### HTTP request headers + +- **Content-Type**: application/json +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## commit_status + +> models::CommitStatus commit_status(repository, branch, id) +get status of async commit operation + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**repository** | **String** | | [required] | +**branch** | **String** | | [required] | +**id** | **String** | Unique identifier of the commit task | [required] | + +### Return type + +[**models::CommitStatus**](CommitStatus.md) + +### Authorization + +[basic_auth](../README.md#basic_auth), [cookie_auth](../README.md#cookie_auth), [oidc_auth](../README.md#oidc_auth), [saml_auth](../README.md#saml_auth), [jwt_token](../README.md#jwt_token) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + ## get_commit > models::Commit get_commit(repository, commit_id) diff --git a/clients/rust/docs/MergeStatus.md b/clients/rust/docs/MergeStatus.md new file mode 100644 index 00000000000..41b94f9334b --- /dev/null +++ b/clients/rust/docs/MergeStatus.md @@ -0,0 +1,15 @@ +# MergeStatus + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**task_id** | **String** | the id of the async merge task | +**completed** | **bool** | true if the task has completed (either successfully or with an error) | +**update_time** | **String** | last time the task status was updated | +**result** | Option<[**models::MergeResult**](MergeResult.md)> | | [optional] +**error** | Option<[**models::Error**](Error.md)> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/clients/rust/docs/RefsApi.md b/clients/rust/docs/RefsApi.md index a2354eb4b41..3bc302c1101 100644 --- a/clients/rust/docs/RefsApi.md +++ b/clients/rust/docs/RefsApi.md @@ -8,6 +8,8 @@ Method | HTTP request | Description [**find_merge_base**](RefsApi.md#find_merge_base) | **GET** /repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch} | find the merge base for 2 references [**log_commits**](RefsApi.md#log_commits) | **GET** /repositories/{repository}/refs/{ref}/commits | get commit log from ref. If both objects and prefixes are empty, return all commits. [**merge_into_branch**](RefsApi.md#merge_into_branch) | **POST** /repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch} | merge references +[**merge_into_branch_async**](RefsApi.md#merge_into_branch_async) | **POST** /repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch}/async | merge references asynchronously +[**merge_into_branch_status**](RefsApi.md#merge_into_branch_status) | **GET** /repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch}/status | get status of async merge operation @@ -144,3 +146,65 @@ Name | Type | Description | Required | Notes [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +## merge_into_branch_async + +> models::TaskCreation merge_into_branch_async(repository, source_ref, destination_branch, merge) +merge references asynchronously + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**repository** | **String** | | [required] | +**source_ref** | **String** | source ref | [required] | +**destination_branch** | **String** | destination branch name | [required] | +**merge** | Option<[**Merge**](Merge.md)> | | | + +### Return type + +[**models::TaskCreation**](TaskCreation.md) + +### Authorization + +[basic_auth](../README.md#basic_auth), [cookie_auth](../README.md#cookie_auth), [oidc_auth](../README.md#oidc_auth), [saml_auth](../README.md#saml_auth), [jwt_token](../README.md#jwt_token) + +### HTTP request headers + +- **Content-Type**: application/json +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + + +## merge_into_branch_status + +> models::MergeStatus merge_into_branch_status(repository, source_ref, destination_branch, id) +get status of async merge operation + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**repository** | **String** | | [required] | +**source_ref** | **String** | source ref | [required] | +**destination_branch** | **String** | destination branch name | [required] | +**id** | **String** | Unique identifier of the merge task | [required] | + +### Return type + +[**models::MergeStatus**](MergeStatus.md) + +### Authorization + +[basic_auth](../README.md#basic_auth), [cookie_auth](../README.md#cookie_auth), [oidc_auth](../README.md#oidc_auth), [saml_auth](../README.md#saml_auth), [jwt_token](../README.md#jwt_token) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/clients/rust/src/apis/commits_api.rs b/clients/rust/src/apis/commits_api.rs index 4e5a75d6a70..7a08a054983 100644 --- a/clients/rust/src/apis/commits_api.rs +++ b/clients/rust/src/apis/commits_api.rs @@ -30,6 +30,36 @@ pub enum CommitError { UnknownValue(serde_json::Value), } +/// struct for typed errors of method [`commit_async`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum CommitAsyncError { + Status400(models::Error), + Status401(models::Error), + Status403(models::Error), + Status404(models::Error), + Status429(), + Status501(models::Error), + DefaultResponse(models::Error), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`commit_status`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum CommitStatusError { + Status400(models::CommitStatus), + Status401(models::CommitStatus), + Status403(models::CommitStatus), + Status404(models::CommitStatus), + Status409(models::CommitStatus), + Status412(models::CommitStatus), + Status429(models::CommitStatus), + Status501(models::CommitStatus), + DefaultResponse(models::CommitStatus), + UnknownValue(serde_json::Value), +} + /// struct for typed errors of method [`get_commit`] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] @@ -80,6 +110,77 @@ pub async fn commit(configuration: &configuration::Configuration, repository: &s } } +pub async fn commit_async(configuration: &configuration::Configuration, repository: &str, branch: &str, commit_creation: models::CommitCreation, source_metarange: Option<&str>) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!("{}/repositories/{repository}/branches/{branch}/commits/async", local_var_configuration.base_path, repository=crate::apis::urlencode(repository), branch=crate::apis::urlencode(branch)); + let mut local_var_req_builder = local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); + + if let Some(ref local_var_str) = source_metarange { + local_var_req_builder = local_var_req_builder.query(&[("source_metarange", &local_var_str.to_string())]); + } + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_auth_conf) = local_var_configuration.basic_auth { + local_var_req_builder = local_var_req_builder.basic_auth(local_var_auth_conf.0.to_owned(), local_var_auth_conf.1.to_owned()); + }; + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + local_var_req_builder = local_var_req_builder.json(&commit_creation); + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity }; + Err(Error::ResponseError(local_var_error)) + } +} + +pub async fn commit_status(configuration: &configuration::Configuration, repository: &str, branch: &str, id: &str) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!("{}/repositories/{repository}/branches/{branch}/commits/status", local_var_configuration.base_path, repository=crate::apis::urlencode(repository), branch=crate::apis::urlencode(branch)); + let mut local_var_req_builder = local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); + + local_var_req_builder = local_var_req_builder.query(&[("id", &id.to_string())]); + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_auth_conf) = local_var_configuration.basic_auth { + local_var_req_builder = local_var_req_builder.basic_auth(local_var_auth_conf.0.to_owned(), local_var_auth_conf.1.to_owned()); + }; + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity }; + Err(Error::ResponseError(local_var_error)) + } +} + pub async fn get_commit(configuration: &configuration::Configuration, repository: &str, commit_id: &str) -> Result> { let local_var_configuration = configuration; diff --git a/clients/rust/src/apis/refs_api.rs b/clients/rust/src/apis/refs_api.rs index 4950945827d..feb5f65fc7c 100644 --- a/clients/rust/src/apis/refs_api.rs +++ b/clients/rust/src/apis/refs_api.rs @@ -66,6 +66,36 @@ pub enum MergeIntoBranchError { UnknownValue(serde_json::Value), } +/// struct for typed errors of method [`merge_into_branch_async`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum MergeIntoBranchAsyncError { + Status400(models::Error), + Status401(models::Error), + Status403(models::Error), + Status404(models::Error), + Status429(), + Status501(models::Error), + DefaultResponse(models::Error), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`merge_into_branch_status`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum MergeIntoBranchStatusError { + Status400(models::MergeStatus), + Status401(models::MergeStatus), + Status403(models::MergeStatus), + Status404(models::MergeStatus), + Status409(models::MergeStatus), + Status412(models::MergeStatus), + Status429(models::MergeStatus), + Status501(models::MergeStatus), + DefaultResponse(models::MergeStatus), + UnknownValue(serde_json::Value), +} + pub async fn diff_refs(configuration: &configuration::Configuration, repository: &str, left_ref: &str, right_ref: &str, after: Option<&str>, amount: Option, prefix: Option<&str>, delimiter: Option<&str>, r#type: Option<&str>, include_right_stats: Option) -> Result> { let local_var_configuration = configuration; @@ -248,3 +278,71 @@ pub async fn merge_into_branch(configuration: &configuration::Configuration, rep } } +pub async fn merge_into_branch_async(configuration: &configuration::Configuration, repository: &str, source_ref: &str, destination_branch: &str, merge: Option) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!("{}/repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch}/async", local_var_configuration.base_path, repository=crate::apis::urlencode(repository), sourceRef=crate::apis::urlencode(source_ref), destinationBranch=crate::apis::urlencode(destination_branch)); + let mut local_var_req_builder = local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); + + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_auth_conf) = local_var_configuration.basic_auth { + local_var_req_builder = local_var_req_builder.basic_auth(local_var_auth_conf.0.to_owned(), local_var_auth_conf.1.to_owned()); + }; + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + local_var_req_builder = local_var_req_builder.json(&merge); + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity }; + Err(Error::ResponseError(local_var_error)) + } +} + +pub async fn merge_into_branch_status(configuration: &configuration::Configuration, repository: &str, source_ref: &str, destination_branch: &str, id: &str) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!("{}/repositories/{repository}/refs/{sourceRef}/merge/{destinationBranch}/status", local_var_configuration.base_path, repository=crate::apis::urlencode(repository), sourceRef=crate::apis::urlencode(source_ref), destinationBranch=crate::apis::urlencode(destination_branch)); + let mut local_var_req_builder = local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); + + local_var_req_builder = local_var_req_builder.query(&[("id", &id.to_string())]); + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_auth_conf) = local_var_configuration.basic_auth { + local_var_req_builder = local_var_req_builder.basic_auth(local_var_auth_conf.0.to_owned(), local_var_auth_conf.1.to_owned()); + }; + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity }; + Err(Error::ResponseError(local_var_error)) + } +} + diff --git a/clients/rust/src/models/commit_status.rs b/clients/rust/src/models/commit_status.rs new file mode 100644 index 00000000000..e715dda6df7 --- /dev/null +++ b/clients/rust/src/models/commit_status.rs @@ -0,0 +1,41 @@ +/* + * lakeFS API + * + * lakeFS HTTP API + * + * The version of the OpenAPI document: 1.0.0 + * Contact: services@treeverse.io + * Generated by: https://openapi-generator.tech + */ + +use crate::models; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct CommitStatus { + /// the id of the async commit task + #[serde(rename = "task_id")] + pub task_id: String, + /// true if the task has completed (either successfully or with an error) + #[serde(rename = "completed")] + pub completed: bool, + /// last time the task status was updated + #[serde(rename = "update_time")] + pub update_time: String, + #[serde(rename = "result", skip_serializing_if = "Option::is_none")] + pub result: Option>, + #[serde(rename = "error", skip_serializing_if = "Option::is_none")] + pub error: Option>, +} + +impl CommitStatus { + pub fn new(task_id: String, completed: bool, update_time: String) -> CommitStatus { + CommitStatus { + task_id, + completed, + update_time, + result: None, + error: None, + } + } +} + diff --git a/clients/rust/src/models/merge_status.rs b/clients/rust/src/models/merge_status.rs new file mode 100644 index 00000000000..7b5ee7d0d28 --- /dev/null +++ b/clients/rust/src/models/merge_status.rs @@ -0,0 +1,41 @@ +/* + * lakeFS API + * + * lakeFS HTTP API + * + * The version of the OpenAPI document: 1.0.0 + * Contact: services@treeverse.io + * Generated by: https://openapi-generator.tech + */ + +use crate::models; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct MergeStatus { + /// the id of the async merge task + #[serde(rename = "task_id")] + pub task_id: String, + /// true if the task has completed (either successfully or with an error) + #[serde(rename = "completed")] + pub completed: bool, + /// last time the task status was updated + #[serde(rename = "update_time")] + pub update_time: String, + #[serde(rename = "result", skip_serializing_if = "Option::is_none")] + pub result: Option>, + #[serde(rename = "error", skip_serializing_if = "Option::is_none")] + pub error: Option>, +} + +impl MergeStatus { + pub fn new(task_id: String, completed: bool, update_time: String) -> MergeStatus { + MergeStatus { + task_id, + completed, + update_time, + result: None, + error: None, + } + } +} + diff --git a/clients/rust/src/models/mod.rs b/clients/rust/src/models/mod.rs index a8f68bc2065..6c78a506d5d 100644 --- a/clients/rust/src/models/mod.rs +++ b/clients/rust/src/models/mod.rs @@ -30,6 +30,8 @@ pub mod commit_overrides; pub use self::commit_overrides::CommitOverrides; pub mod commit_record_creation; pub use self::commit_record_creation::CommitRecordCreation; +pub mod commit_status; +pub use self::commit_status::CommitStatus; pub mod complete_presign_multipart_upload; pub use self::complete_presign_multipart_upload::CompletePresignMultipartUpload; pub mod config; @@ -114,6 +116,8 @@ pub mod merge; pub use self::merge::Merge; pub mod merge_result; pub use self::merge_result::MergeResult; +pub mod merge_status; +pub use self::merge_status::MergeStatus; pub mod meta_range_creation; pub use self::meta_range_creation::MetaRangeCreation; pub mod meta_range_creation_response; diff --git a/cmd/lakefs/cmd/run.go b/cmd/lakefs/cmd/run.go index 0592efec19f..ca928cd2b05 100644 --- a/cmd/lakefs/cmd/run.go +++ b/cmd/lakefs/cmd/run.go @@ -172,6 +172,8 @@ var runCmd = &cobra.Command{ } defer func() { _ = c.Close() }() + asyncOperationsHandler := catalogfactory.BuildAsyncOperationsHandler(c) + // Setup usage reporter - it is no longer possible to disable it usageReporter := stats.NewUsageReporter(metadata.InstallationID, kvStore) usageReporter.Start(ctx, baseCfg.UsageReport.FlushInterval, logger.WithField("service", "usage_report")) @@ -249,6 +251,7 @@ var runCmd = &cobra.Command{ migrator, bufferedCollector, actionsService, + asyncOperationsHandler, auditChecker, logger.WithField("service", "api_gateway"), baseCfg.Gateways.S3.DomainNames, diff --git a/modules/catalog/factory/build.go b/modules/catalog/factory/build.go index f421062a154..cc6cd56b780 100644 --- a/modules/catalog/factory/build.go +++ b/modules/catalog/factory/build.go @@ -2,6 +2,7 @@ package factory import ( "github.com/treeverse/lakefs/pkg/block" + "github.com/treeverse/lakefs/pkg/catalog" "github.com/treeverse/lakefs/pkg/config" "github.com/treeverse/lakefs/pkg/graveler" ) @@ -9,3 +10,7 @@ import ( func BuildConflictResolvers(cfg config.Config, block block.Adapter) []graveler.ConflictResolver { return nil } + +func BuildAsyncOperationsHandler(_ *catalog.Catalog) catalog.AsyncOperationsHandler { + return catalog.NewNoopAsyncOperationsHandler() +} diff --git a/pkg/api/controller.go b/pkg/api/controller.go index ee44bcb4415..627c4e3767f 100644 --- a/pkg/api/controller.go +++ b/pkg/api/controller.go @@ -102,6 +102,7 @@ type Controller struct { Migrator Migrator Collector stats.Collector Actions actionsHandler + asyncOpsHandler catalog.AsyncOperationsHandler AuditChecker AuditChecker Logger logging.Logger sessionStore sessions.Store @@ -125,6 +126,7 @@ func NewController( migrator Migrator, collector stats.Collector, actions actionsHandler, + asyncOpsHandler catalog.AsyncOperationsHandler, auditChecker AuditChecker, logger logging.Logger, sessionStore sessions.Store, @@ -134,7 +136,7 @@ func NewController( icebergSyncer icebergsync.Controller, loginTokenProvider authentication.LoginTokenProvider, ) *Controller { - return &Controller{ + controller := &Controller{ Config: cfg, Catalog: catalog, Authenticator: authenticator, @@ -145,6 +147,7 @@ func NewController( Migrator: migrator, Collector: collector, Actions: actions, + asyncOpsHandler: asyncOpsHandler, AuditChecker: auditChecker, Logger: logger, sessionStore: sessionStore, @@ -154,6 +157,8 @@ func NewController( icebergSyncer: icebergSyncer, loginTokenProvider: loginTokenProvider, } + catalog.APIErrorCB = controller.HandleAPIErrorCallback + return controller } func (c *Controller) DeleteUser(w http.ResponseWriter, r *http.Request, userID string) { @@ -3101,7 +3106,7 @@ func (c *Controller) GetBranch(w http.ResponseWriter, r *http.Request, repositor writeResponse(w, r, http.StatusOK, response) } -func (c *Controller) handleAPIErrorCallback(ctx context.Context, w http.ResponseWriter, r *http.Request, err error, cb func(w http.ResponseWriter, r *http.Request, code int, v interface{})) bool { +func (c *Controller) HandleAPIErrorCallback(ctx context.Context, w http.ResponseWriter, r *http.Request, err error, cb func(w http.ResponseWriter, r *http.Request, code int, v interface{})) bool { // verify if request canceled even if there is no error, early exit point if httputil.IsRequestCanceled(r) { cb(w, r, httputil.HttpStatusClientClosedRequest, httputil.HttpStatusClientClosedRequestText) @@ -3189,23 +3194,29 @@ func (c *Controller) handleAPIErrorCallback(ctx context.Context, w http.Response case errors.Is(err, graveler.ErrTooManyTries): log.Debug("Retried too many times") cb(w, r, http.StatusTooManyRequests, "Too many attempts, try again later") + case errors.Is(err, kv.ErrSlowDown): log.Debug("KV Throttling") cb(w, r, http.StatusServiceUnavailable, "Throughput exceeded. Slow down and retry") + case errors.Is(err, graveler.ErrPreconditionFailed): log.Debug("Precondition failed") cb(w, r, http.StatusPreconditionFailed, "Precondition failed") + case errors.Is(err, authentication.ErrNotImplemented), errors.Is(err, auth.ErrNotImplemented), errors.Is(err, license.ErrNotImplemented), errors.Is(err, catalog.ErrNotImplemented): cb(w, r, http.StatusNotImplemented, "Not implemented") + case errors.Is(err, authentication.ErrInsufficientPermissions): c.Logger.WithContext(ctx).WithError(err).Info("User verification failed - insufficient permissions") cb(w, r, http.StatusUnauthorized, http.StatusText(http.StatusUnauthorized)) + case errors.Is(err, actions.ErrActionFailed): log.WithError(err).Debug("Precondition failed, aborted by action failure") cb(w, r, http.StatusPreconditionFailed, err) + default: c.Logger.WithContext(ctx).WithError(err).Error("API call returned status internal server error") cb(w, r, http.StatusInternalServerError, err) @@ -3215,7 +3226,7 @@ func (c *Controller) handleAPIErrorCallback(ctx context.Context, w http.Response } func (c *Controller) handleAPIError(ctx context.Context, w http.ResponseWriter, r *http.Request, err error) bool { - return c.handleAPIErrorCallback(ctx, w, r, err, writeError) + return c.HandleAPIErrorCallback(ctx, w, r, err, writeError) } func (c *Controller) ResetBranch(w http.ResponseWriter, r *http.Request, body apigen.ResetBranchJSONRequestBody, repository, branch string) { @@ -3460,6 +3471,96 @@ func (c *Controller) Commit(w http.ResponseWriter, r *http.Request, body apigen. commitResponse(w, r, newCommit) } +func (c *Controller) CommitAsync(w http.ResponseWriter, r *http.Request, body apigen.CommitAsyncJSONRequestBody, repository, branch string, params apigen.CommitAsyncParams) { + if !c.authorize(w, r, permissions.Node{ + Permission: permissions.Permission{ + Action: permissions.CreateCommitAction, + Resource: permissions.BranchArn(repository, branch), + }, + }) { + return + } + ctx := r.Context() + c.LogAction(ctx, "create_commit_async", r, repository, branch, "") + user, err := auth.GetUser(ctx) + if err != nil { + writeError(w, r, http.StatusUnauthorized, "user not found") + return + } + var metadata map[string]string + if body.Metadata != nil { + metadata = body.Metadata.AdditionalProperties + } + + taskID, err := c.asyncOpsHandler.SubmitCommit(ctx, repository, branch, body.Message, user.Committer(), metadata, body.Date, params.SourceMetarange, swag.BoolValue(body.AllowEmpty), graveler.WithForce(swag.BoolValue(body.Force))) + if c.handleAPIError(ctx, w, r, err) { + return + } + writeResponse(w, r, http.StatusAccepted, apigen.TaskCreation{ + Id: taskID, + }) +} + +func (c *Controller) CommitStatus(w http.ResponseWriter, r *http.Request, repository, branch string, params apigen.CommitStatusParams) { + if !c.authorize(w, r, permissions.Node{ + Permission: permissions.Permission{ + Action: permissions.CreateCommitAction, + Resource: permissions.BranchArn(repository, branch), + }, + }) { + return + } + ctx := r.Context() + c.LogAction(ctx, "create_commit_async_status", r, repository, branch, "") + taskID := params.Id + status, err := c.asyncOpsHandler.GetCommitStatus(ctx, repository, taskID) + if status == nil { + writeResponse(w, r, http.StatusInternalServerError, apigen.CommitStatus{ + TaskId: taskID, + Completed: true, + UpdateTime: time.Now(), + Error: &apigen.Error{ + Message: "failed to get commit status", + }, + }) + return + } + c.HandleAPIErrorCallback(ctx, nil, nil, err, catalog.SetTaskStatusCodeAndError(status.Task)) + + resp := apigen.CommitStatus{ + TaskId: status.Task.Id, + Completed: status.Task.Done, + } + if status.Task.UpdatedAt != nil { + resp.UpdateTime = status.Task.UpdatedAt.AsTime() + } + + if status.Task.ErrorMsg != "" { + resp.Error = &apigen.Error{ + Message: status.Task.ErrorMsg, + } + } + + if status.Info != nil { + resp.Result = &apigen.Commit{ + Id: status.Info.Id, + Parents: status.Info.Parents, + Committer: status.Info.Committer, + Message: status.Info.Message, + CreationDate: status.Info.CreationDate.AsTime().Unix(), + MetaRangeId: status.Info.MetaRangeId, + Metadata: &apigen.Commit_Metadata{ + AdditionalProperties: status.Info.Metadata, + }, + Generation: apiutil.Ptr(status.Info.Generation), + Version: apiutil.Ptr(int(status.Info.Version)), + } + } + + statusCode := getStatusCodeFromTaskStatusCode(status.Task.StatusCode) + writeResponse(w, r, statusCode, resp) +} + func (c *Controller) CreateCommitRecord(w http.ResponseWriter, r *http.Request, body apigen.CreateCommitRecordJSONRequestBody, repository string) { if !c.authorize(w, r, permissions.Node{ Permission: permissions.Permission{ @@ -4143,9 +4244,9 @@ func (c *Controller) PrepareGarbageCollectionCommitsStatus(w http.ResponseWriter resp.UpdateTime = status.Task.UpdatedAt.AsTime() } - if status.Task.Error != "" { + if status.Task.ErrorMsg != "" { resp.Error = &apigen.Error{ - Message: status.Task.Error, + Message: status.Task.ErrorMsg, } } @@ -4547,8 +4648,8 @@ func (c *Controller) DumpStatus(w http.ResponseWriter, r *http.Request, reposito Done: status.Task.Done, UpdateTime: status.Task.UpdatedAt.AsTime(), } - if status.Task.Error != "" { - response.Error = apiutil.Ptr(status.Task.Error) + if status.Task.ErrorMsg != "" { + response.Error = apiutil.Ptr(status.Task.ErrorMsg) } if status.Task.Done && status.Info != nil { response.Refs = &apigen.RefsDump{ @@ -4648,8 +4749,8 @@ func (c *Controller) RestoreStatus(w http.ResponseWriter, r *http.Request, repos Done: status.Task.Done, UpdateTime: status.Task.UpdatedAt.AsTime(), } - if status.Task.Error != "" { - response.Error = apiutil.Ptr(status.Task.Error) + if status.Task.ErrorMsg != "" { + response.Error = apiutil.Ptr(status.Task.ErrorMsg) } writeResponse(w, r, http.StatusOK, response) } @@ -4870,7 +4971,7 @@ func (c *Controller) HeadObject(w http.ResponseWriter, r *http.Request, reposito // read the FS entry entry, err := c.Catalog.GetEntry(ctx, repository, ref, params.Path, catalog.GetEntryParams{}) if err != nil { - c.handleAPIErrorCallback(ctx, w, r, err, func(w http.ResponseWriter, r *http.Request, code int, v interface{}) { + c.HandleAPIErrorCallback(ctx, w, r, err, func(w http.ResponseWriter, r *http.Request, code int, v interface{}) { writeResponse(w, r, code, nil) }) return @@ -5396,6 +5497,96 @@ func (c *Controller) MergeIntoBranch(w http.ResponseWriter, r *http.Request, bod }) } +func (c *Controller) MergeIntoBranchAsync(w http.ResponseWriter, r *http.Request, body apigen.MergeIntoBranchAsyncJSONRequestBody, repository, sourceRef, destinationBranch string) { + if !c.authorize(w, r, permissions.Node{ + Permission: permissions.Permission{ + Action: permissions.CreateCommitAction, + Resource: permissions.BranchArn(repository, destinationBranch), + }, + }) { + return + } + ctx := r.Context() + c.LogAction(ctx, "merge_branches_async", r, repository, destinationBranch, sourceRef) + user, err := auth.GetUser(ctx) + if err != nil { + writeError(w, r, http.StatusUnauthorized, "user not found") + return + } + metadata := map[string]string{} + if body.Metadata != nil { + metadata = body.Metadata.AdditionalProperties + } + + taskID, err := c.asyncOpsHandler.SubmitMergeIntoBranch(ctx, + repository, destinationBranch, sourceRef, + user.Committer(), + swag.StringValue(body.Message), + metadata, + swag.StringValue(body.Strategy), + graveler.WithForce(swag.BoolValue(body.Force)), + graveler.WithAllowEmpty(swag.BoolValue(body.AllowEmpty)), + graveler.WithSquashMerge(swag.BoolValue(body.SquashMerge)), + ) + + if c.handleAPIError(ctx, w, r, err) { + return + } + writeResponse(w, r, http.StatusAccepted, apigen.TaskCreation{ + Id: taskID, + }) +} + +func (c *Controller) MergeIntoBranchStatus(w http.ResponseWriter, r *http.Request, repository, sourceRef, destinationBranch string, params apigen.MergeIntoBranchStatusParams) { + if !c.authorize(w, r, permissions.Node{ + Permission: permissions.Permission{ + Action: permissions.CreateCommitAction, + Resource: permissions.BranchArn(repository, destinationBranch), + }, + }) { + return + } + ctx := r.Context() + c.LogAction(ctx, "merge_branches_async_status", r, repository, destinationBranch, sourceRef) + taskID := params.Id + status, err := c.asyncOpsHandler.GetMergeIntoBranchStatus(ctx, repository, taskID) + if status == nil { + writeResponse(w, r, http.StatusInternalServerError, apigen.MergeStatus{ + TaskId: taskID, + Completed: true, + UpdateTime: time.Now(), + Error: &apigen.Error{ + Message: "failed to get merge status", + }, + }) + return + } + c.HandleAPIErrorCallback(ctx, nil, nil, err, catalog.SetTaskStatusCodeAndError(status.Task)) + + resp := apigen.MergeStatus{ + TaskId: status.Task.Id, + Completed: status.Task.Done, + } + if status.Task.UpdatedAt != nil { + resp.UpdateTime = status.Task.UpdatedAt.AsTime() + } + + if status.Task.ErrorMsg != "" { + resp.Error = &apigen.Error{ + Message: status.Task.ErrorMsg, + } + } + + if status.Info != nil { + resp.Result = &apigen.MergeResult{ + Reference: status.Info.Reference, + } + } + + statusCode := getStatusCodeFromTaskStatusCode(status.Task.StatusCode) + writeResponse(w, r, statusCode, resp) +} + func (c *Controller) FindMergeBase(w http.ResponseWriter, r *http.Request, repository string, sourceRef string, destinationRef string) { if !c.authorize(w, r, permissions.Node{ Permission: permissions.Permission{ @@ -6058,6 +6249,13 @@ func writeResponse(w http.ResponseWriter, r *http.Request, code int, response in httputil.WriteAPIResponse(w, r, code, response) } +func getStatusCodeFromTaskStatusCode(statusCode int64) int { + if statusCode == 0 { + return http.StatusOK + } + return int(statusCode) +} + func paginationAfter(v *apigen.PaginationAfter) string { if v == nil { return "" diff --git a/pkg/api/controller_test.go b/pkg/api/controller_test.go index 248ff2e3610..c73482b429d 100644 --- a/pkg/api/controller_test.go +++ b/pkg/api/controller_test.go @@ -5654,8 +5654,9 @@ func TestController_DumpRestoreRepository(t *testing.T) { if restoreStatus.Error == nil { t.Fatal("Expected restore to fail, got nil Error") } - if !strings.Contains(*restoreStatus.Error, graveler.ErrNotFound.Error()) { - t.Fatal("Expected restore to fail with not found error") + const expectedErr = "No data" + if *restoreStatus.Error != expectedErr { + t.Fatalf("Expected restore to fail with '%s', got '%s'", expectedErr, *restoreStatus.Error) } }) diff --git a/pkg/api/serve.go b/pkg/api/serve.go index b91b584d033..bd01856fcf6 100644 --- a/pkg/api/serve.go +++ b/pkg/api/serve.go @@ -45,6 +45,7 @@ func Serve( migrator Migrator, collector stats.Collector, actions actionsHandler, + asyncOpsHandler catalog.AsyncOperationsHandler, auditChecker AuditChecker, logger logging.Logger, gatewayDomains []string, @@ -88,6 +89,7 @@ func Serve( migrator, collector, actions, + asyncOpsHandler, auditChecker, logger, sessionStore, diff --git a/pkg/api/serve_test.go b/pkg/api/serve_test.go index 7f6296a3739..a5522654b13 100644 --- a/pkg/api/serve_test.go +++ b/pkg/api/serve_test.go @@ -186,6 +186,7 @@ func setupHandler(t testing.TB) (http.Handler, *dependencies) { migrator, collector, actionsService, + catalog.NewNoopAsyncOperationsHandler(), auditChecker, logger, nil, diff --git a/pkg/catalog/async_operations.go b/pkg/catalog/async_operations.go new file mode 100644 index 00000000000..665861f2445 --- /dev/null +++ b/pkg/catalog/async_operations.go @@ -0,0 +1,97 @@ +package catalog + +import ( + "context" + + "github.com/treeverse/lakefs/pkg/graveler" +) + +type AsyncOperationsHandler interface { + SubmitCommit( + ctx context.Context, + repositoryID string, + branch string, + message string, + committer string, + metadata Metadata, + date *int64, + sourceMetarange *string, + allowEmpty bool, + opts ...graveler.SetOptionsFunc, + ) (taskID string, err error) + + GetCommitStatus( + ctx context.Context, + repositoryID string, + taskID string, + ) (*CommitAsyncStatus, error) + + SubmitMergeIntoBranch( + ctx context.Context, + repositoryID string, + destinationBranch string, + sourceRef string, + committer string, + message string, + metadata Metadata, + strategy string, + opts ...graveler.SetOptionsFunc, + ) (taskID string, err error) + + GetMergeIntoBranchStatus( + ctx context.Context, + repositoryID string, + taskID string, + ) (*MergeAsyncStatus, error) +} + +type NoopAsyncOperationsHandler struct{} + +func NewNoopAsyncOperationsHandler() *NoopAsyncOperationsHandler { + return &NoopAsyncOperationsHandler{} +} + +func (h *NoopAsyncOperationsHandler) SubmitCommit( + ctx context.Context, + repositoryID string, + branch string, + message string, + committer string, + metadata Metadata, + date *int64, + sourceMetarange *string, + allowEmpty bool, + opts ...graveler.SetOptionsFunc, +) (string, error) { + return "", ErrNotImplemented +} + +func (h *NoopAsyncOperationsHandler) GetCommitStatus( + ctx context.Context, + repositoryID string, + taskID string, +) (*CommitAsyncStatus, error) { + return nil, ErrNotImplemented +} + +func (h *NoopAsyncOperationsHandler) SubmitMergeIntoBranch( + ctx context.Context, + repositoryID string, + destinationBranch string, + sourceRef string, + committer string, + message string, + metadata Metadata, + strategy string, + opts ...graveler.SetOptionsFunc, +) (string, error) { + return "", ErrNotImplemented +} + +func (h *NoopAsyncOperationsHandler) GetMergeIntoBranchStatus( + ctx context.Context, + repositoryID string, + taskID string, +) (*MergeAsyncStatus, error) { + return nil, ErrNotImplemented +} diff --git a/pkg/catalog/catalog.go b/pkg/catalog/catalog.go index 2d83a101d60..d9c8fbfd307 100644 --- a/pkg/catalog/catalog.go +++ b/pkg/catalog/catalog.go @@ -12,6 +12,7 @@ import ( "errors" "fmt" "io" + "net/http" "net/url" "os" "reflect" @@ -71,6 +72,8 @@ const ( DumpRefsTaskIDPrefix = "DR" RestoreRefsTaskIDPrefix = "RR" GarbageCollectionPrepareCommitsPrefix = "GCPC" + CommitAsyncPrefix = "CA" + MergeAsyncPrefix = "MA" TaskExpiryTime = 24 * time.Hour @@ -237,6 +240,10 @@ type Config struct { ConflictResolvers []graveler.ConflictResolver } +// APIErrorHandler is a callback function used to classify and handle errors in background tasks. +// It returns true if the error was handled. +type APIErrorHandler func(ctx context.Context, w http.ResponseWriter, r *http.Request, err error, cb func(w http.ResponseWriter, r *http.Request, code int, v any)) bool + type Catalog struct { BlockAdapter block.Adapter Store Store @@ -251,6 +258,7 @@ type Catalog struct { UGCPrepareMaxFileSize int64 UGCPrepareInterval time.Duration signingKey config.SecureString + APIErrorCB APIErrorHandler } const ( @@ -431,6 +439,14 @@ func New(ctx context.Context, cfg Config) (*Catalog, error) { addressProvider: addressProvider, deleteSensor: deleteSensor, signingKey: cfg.Config.StorageConfig().SigningKey(), + // Initiate the API callback function + APIErrorCB: func(ctx context.Context, w http.ResponseWriter, r *http.Request, err error, cb func(w http.ResponseWriter, r *http.Request, code int, v interface{})) bool { + if err == nil { + return false + } + cb(w, r, http.StatusInternalServerError, err) + return true + }, }, nil } @@ -2104,7 +2120,7 @@ func (c *Catalog) DumpRepositorySubmit(ctx context.Context, repositoryID string) } taskStatus := &RepositoryDumpStatus{} - taskSteps := []taskStep{ + taskSteps := []TaskStep{ { Name: "dump commits", Func: func(ctx context.Context) error { @@ -2144,7 +2160,7 @@ func (c *Catalog) DumpRepositorySubmit(ctx context.Context, repositoryID string) // create refs dump task and update initial status. taskID := NewTaskID(DumpRefsTaskIDPrefix) - err = c.runBackgroundTaskSteps(repository, taskID, taskSteps, taskStatus) + err = c.RunBackgroundTaskSteps(repository, taskID, taskSteps, taskStatus) if err != nil { return "", err } @@ -2185,7 +2201,7 @@ func (c *Catalog) RestoreRepositorySubmit(ctx context.Context, repositoryID stri // create refs restore task and update initial status taskStatus := &RepositoryRestoreStatus{} - taskSteps := []taskStep{ + taskSteps := []TaskStep{ { Name: "load commits", Func: func(ctx context.Context) error { @@ -2206,7 +2222,7 @@ func (c *Catalog) RestoreRepositorySubmit(ctx context.Context, repositoryID stri }, } taskID := NewTaskID(RestoreRefsTaskIDPrefix) - if err := c.runBackgroundTaskSteps(repository, taskID, taskSteps, taskStatus); err != nil { + if err := c.RunBackgroundTaskSteps(repository, taskID, taskSteps, taskStatus); err != nil { return "", err } return taskID, nil @@ -2229,10 +2245,10 @@ func (c *Catalog) RestoreRepositoryStatus(ctx context.Context, repositoryID stri return &status, nil } -// runBackgroundTaskSteps update task status provided after filling the 'Task' field and update for each step provided. +// RunBackgroundTaskSteps update task status provided after filling the 'Task' field and update for each step provided. // the task status is updated after each step, and the task is marked as completed if the step is the last one. // initial update if the task is done before running the steps. -func (c *Catalog) runBackgroundTaskSteps(repository *graveler.RepositoryRecord, taskID string, steps []taskStep, taskStatus protoreflect.ProtoMessage) error { +func (c *Catalog) RunBackgroundTaskSteps(repository *graveler.RepositoryRecord, taskID string, steps []TaskStep, taskStatus protoreflect.ProtoMessage) error { // Allocate Task and set if on the taskStatus's 'Task' field. // We continue to update this field while running each step. // If the task field in the common Protobuf message is changed, we need to update the field name here as well. @@ -2260,7 +2276,9 @@ func (c *Catalog) runBackgroundTaskSteps(repository *graveler.RepositoryRecord, if err != nil { log.WithError(err).WithField("step", step.Name).Errorf("Catalog background task step failed") task.Done = true - task.Error = err.Error() + // Classify the error using the API callback (handleAPIErrorCallback from the controller) + // before the original error is lost when stored in protobuf, and populate the task's error details. + c.APIErrorCB(ctx, nil, nil, err, SetTaskStatusCodeAndError(task)) } else if stepIdx == len(steps)-1 { task.Done = true } @@ -2725,7 +2743,7 @@ func (c *Catalog) PrepareExpiredCommitsAsync(ctx context.Context, repositoryID s } taskStatus := &GarbageCollectionPrepareStatus{} - taskSteps := []taskStep{ + taskSteps := []TaskStep{ { Name: "prepare expired commits on " + repository.RepositoryID.String(), Func: func(ctx context.Context) error { @@ -2744,24 +2762,32 @@ func (c *Catalog) PrepareExpiredCommitsAsync(ctx context.Context, repositoryID s } taskID := NewTaskID(GarbageCollectionPrepareCommitsPrefix) - err = c.runBackgroundTaskSteps(repository, taskID, taskSteps, taskStatus) + err = c.RunBackgroundTaskSteps(repository, taskID, taskSteps, taskStatus) if err != nil { return "", err } return taskID, nil } -func (c *Catalog) GetGarbageCollectionPrepareStatus(ctx context.Context, repositoryID string, id string) (*GarbageCollectionPrepareStatus, error) { +func (c *Catalog) GetTaskStatus(ctx context.Context, repositoryID string, taskID string, prefix string, statusMsg protoreflect.ProtoMessage) error { repository, err := c.getRepository(ctx, repositoryID) if err != nil { - return nil, err + return err } - if !IsTaskID(GarbageCollectionPrepareCommitsPrefix, id) { - return nil, graveler.ErrNotFound + if !IsTaskID(prefix, taskID) { + return graveler.ErrNotFound } + err = GetTaskStatus(ctx, c.KVStore, repository, taskID, statusMsg) + if err != nil { + return err + } + return nil +} + +func (c *Catalog) GetGarbageCollectionPrepareStatus(ctx context.Context, repositoryID string, id string) (*GarbageCollectionPrepareStatus, error) { var taskStatus GarbageCollectionPrepareStatus - err = GetTaskStatus(ctx, c.KVStore, repository, id, &taskStatus) + err := c.GetTaskStatus(ctx, repositoryID, id, GarbageCollectionPrepareCommitsPrefix, &taskStatus) if err != nil { return nil, err } diff --git a/pkg/catalog/catalog.go.bak b/pkg/catalog/catalog.go.bak new file mode 100644 index 00000000000..8a40bd7d850 --- /dev/null +++ b/pkg/catalog/catalog.go.bak @@ -0,0 +1,3508 @@ +package catalog + +import ( + "bytes" + "container/heap" + "context" + "crypto" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "reflect" + "strings" + "time" + + "github.com/alitto/pond/v2" + "github.com/cockroachdb/pebble" + "github.com/hashicorp/go-multierror" + lru "github.com/hnlq715/golang-lru" + "github.com/rs/xid" + blockfactory "github.com/treeverse/lakefs/modules/block/factory" + "github.com/treeverse/lakefs/pkg/batch" + "github.com/treeverse/lakefs/pkg/block" + "github.com/treeverse/lakefs/pkg/config" + "github.com/treeverse/lakefs/pkg/graveler" + "github.com/treeverse/lakefs/pkg/graveler/branch" + "github.com/treeverse/lakefs/pkg/graveler/committed" + "github.com/treeverse/lakefs/pkg/graveler/ref" + "github.com/treeverse/lakefs/pkg/graveler/retention" + "github.com/treeverse/lakefs/pkg/graveler/settings" + "github.com/treeverse/lakefs/pkg/graveler/sstable" + "github.com/treeverse/lakefs/pkg/graveler/staging" + "github.com/treeverse/lakefs/pkg/ident" + "github.com/treeverse/lakefs/pkg/kv" + "github.com/treeverse/lakefs/pkg/logging" + "github.com/treeverse/lakefs/pkg/pyramid" + pyramidparams "github.com/treeverse/lakefs/pkg/pyramid/params" + "github.com/treeverse/lakefs/pkg/upload" + "github.com/treeverse/lakefs/pkg/validator" + "go.uber.org/atomic" + "go.uber.org/ratelimit" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // hashAlg is the hashing algorithm to use to generate graveler identifiers. Changing it + // causes all old identifiers to change, so while existing installations will continue to + // function they will be unable to re-use any existing objects. + hashAlg = crypto.SHA256 + + NumberOfParentsOfNonMergeCommit = 1 + + gcParquetParallelNum = 1 // Number of goroutines to handle marshaling of data + + // Calculation of size deviation by gcPeriodicCheckSize value + // "data" prefix = 4 bytes + // partition id = 20 bytes (xid) + // object name = 20 bytes (xid) + // timestamp (int64) = 8 bytes + // + // Total per entry ~52 bytes + // Deviation with gcPeriodicCheckSize = 100000 will be around 5 MB + gcPeriodicCheckSize = 100000 + + DumpRefsTaskIDPrefix = "DR" + RestoreRefsTaskIDPrefix = "RR" + GarbageCollectionPrepareCommitsPrefix = "GCPC" + CommitAsyncPrefix = "CA" + MergeAsyncPrefix = "MA" + + TaskExpiryTime = 24 * time.Hour + + // LinkAddressTime the time address is valid from get to link + LinkAddressTime = 6 * time.Hour + LinkAddressSigningDelimiter = "," + + // CloneGracePeriod period during which we allow clone metadata as part of the object copy operation. + // Prevent the GC from collecting physical addresses that were not found while scanning uncommitted data. + CloneGracePeriod = 5 * time.Hour +) + +type Path string + +type EntryRecord struct { + Path Path + *Entry +} + +type EntryListing struct { + CommonPrefix bool + Path + *Entry +} + +type EntryDiff struct { + Type graveler.DiffType + Path Path + Entry *Entry +} + +type EntryIterator interface { + Next() bool + SeekGE(id Path) + Value() *EntryRecord + Err() error + Close() +} + +type EntryListingIterator interface { + Next() bool + SeekGE(id Path) + Value() *EntryListing + Err() error + Close() +} + +type EntryDiffIterator interface { + Next() bool + SeekGE(id Path) + Value() *EntryDiff + Err() error + Close() +} + +func (id Path) String() string { + return string(id) +} + +type Store interface { + graveler.KeyValueStore + graveler.VersionController + graveler.Dumper + graveler.Loader + graveler.Plumbing + graveler.Collaborator +} + +type EntryConflictResolver interface { + // FilterByPath returns true if the path should be considered for conflict resolution. + // This is an optimization to avoid decoding irrelevant entries. + FilterByPath(path string) bool + + // ResolveConflict resolves conflicts between two DBEntry values. + // It returns the resolved value, or nil if the conflict cannot be resolved automatically. + // Assuming the source and dest have the same key (path). + ResolveConflict(ctx context.Context, sCtx graveler.StorageContext, srcValue, destValue *DBEntry) (*DBEntry, error) +} + +const ( + RangeFSName = "range" + MetaRangeFSName = "meta-range" +) + +const ( + DefaultPathDelimiter = "/" +) + +type DiffParams struct { + Limit int + After string + Prefix string + Delimiter string + AdditionalFields []string // db fields names that will be load in additional to Path on Difference's Entry +} + +type RevertParams struct { + Reference string // the commit to revert + ParentNumber int // if reverting a merge commit, the change will be reversed relative to this parent number (1-based). + Committer string + AllowEmpty bool // allow empty commit (revert without changes) + *graveler.CommitOverrides +} + +type CherryPickParams struct { + Reference string // the commit to pick + ParentNumber *int // if a merge commit was picked, the change will be applied relative to this parent number (1-based). + Committer string + *graveler.CommitOverrides +} + +type PathRecord struct { + Path Path + IsPrefix bool +} + +type LogParams struct { + PathList []PathRecord + FromReference string + Amount int + Limit bool + FirstParent bool + Since *time.Time + StopAt string +} + +type ExpireResult struct { + Repository string + Branch string + PhysicalAddress string + InternalReference string +} + +// ExpiryRows is a database iterator over ExpiryResults. Use Next to advance from row to row. +type ExpiryRows interface { + Close() + Next() bool + Err() error + // Read returns the current from ExpiryRows, or an error on failure. Call it only after + // successfully calling Next. + Read() (*ExpireResult, error) +} + +// GetEntryParams configures what entries GetEntry returns. +type GetEntryParams struct { + // StageOnly when true will return entry found on stage without checking committed data + StageOnly bool +} + +type WriteRangeRequest struct { + SourceURI string + Prepend string + After string + StagingToken string + ContinuationToken string +} + +type Config struct { + Config config.Config + KVStore kv.Store + SettingsManagerOption settings.ManagerOption + PathProvider *upload.PathPartitionProvider + // ConflictResolvers alternative conflict resolvers (if nil, the default behavior is kept) + ConflictResolvers []graveler.ConflictResolver +} + +// APIErrorHandlerInterface is an interface for handling API errors in background tasks. +type APIErrorHandlerInterface interface { + // HandleAPIError classifies and handles errors in background tasks. + // It returns true if the error was handled. + HandleAPIError(ctx context.Context, w http.ResponseWriter, r *http.Request, err error, cb func(w http.ResponseWriter, r *http.Request, code int, v any)) bool + // GetHandlerType returns a string identifier for the handler type, used for validation. + GetHandlerType() string +} + +type defaultAPIErrorHandler struct{} + +func (h *defaultAPIErrorHandler) HandleAPIError(ctx context.Context, w http.ResponseWriter, r *http.Request, err error, cb func(w http.ResponseWriter, r *http.Request, code int, v any)) bool { + if err == nil { + return false + } + cb(w, r, http.StatusInternalServerError, err) + return true +} + +func (h *defaultAPIErrorHandler) GetHandlerType() string { + return "catalog.defaultAPIErrorHandler" +} + +type Catalog struct { + BlockAdapter block.Adapter + Store Store + managers []io.Closer + workPool pond.Pool + PathProvider *upload.PathPartitionProvider + BackgroundLimiter ratelimit.Limiter + KVStore kv.Store + KVStoreLimited kv.Store + addressProvider *ident.HexAddressProvider + deleteSensor *graveler.DeleteSensor + UGCPrepareMaxFileSize int64 + UGCPrepareInterval time.Duration + signingKey config.SecureString + APIErrorCB APIErrorHandlerInterface +} + +const ( + ListRepositoriesLimitMax = 1000 + ListBranchesLimitMax = 1000 + ListTagsLimitMax = 1000 + DiffLimitMax = 1000 + ListPullsLimitMax = 1000 + ListEntriesLimitMax = 10000 + sharedWorkers = 30 +) + +type ImportPathType string + +const ( + ImportPathTypePrefix = "common_prefix" + ImportPathTypeObject = "object" +) + +type ImportPath struct { + Path string + Destination string + Type ImportPathType +} + +func GetImportPathType(t string) (ImportPathType, error) { + switch t { + case ImportPathTypePrefix, + ImportPathTypeObject: + return ImportPathType(t), nil + default: + return "", fmt.Errorf("invalid import type: %w", graveler.ErrInvalidValue) + } +} + +type ImportCommit struct { + CommitMessage string + Committer string + Metadata Metadata +} + +type ImportRequest struct { + Paths []ImportPath + Commit ImportCommit + Force bool +} + +type ctxCloser struct { + close context.CancelFunc +} + +func (c *ctxCloser) Close() error { + go c.close() + return nil +} + +func makeBranchApproximateOwnershipParams(cfg config.ApproximatelyCorrectOwnership) ref.BranchApproximateOwnershipParams { + if !cfg.Enabled { + // zero Durations => no branch ownership + return ref.BranchApproximateOwnershipParams{} + } + return ref.BranchApproximateOwnershipParams{ + AcquireInterval: cfg.Acquire, + RefreshInterval: cfg.Refresh, + } +} + +func New(ctx context.Context, cfg Config) (*Catalog, error) { + ctx, cancelFn := context.WithCancel(ctx) + adapter, err := blockfactory.BuildBlockAdapter(ctx, nil, cfg.Config) + if err != nil { + cancelFn() + return nil, fmt.Errorf("build block adapter: %w", err) + } + + baseCfg := cfg.Config.GetBaseConfig() + tierFSParams, err := pyramidparams.NewCommittedTierFSParams(baseCfg, adapter) + if err != nil { + cancelFn() + return nil, fmt.Errorf("configure tiered FS for committed: %w", err) + } + metaRangeFS, err := pyramid.NewFS(&pyramidparams.InstanceParams{ + SharedParams: tierFSParams.SharedParams, + FSName: MetaRangeFSName, + DiskAllocProportion: tierFSParams.MetaRangeAllocationProportion, + }) + if err != nil { + cancelFn() + return nil, fmt.Errorf("create tiered FS for committed metaranges: %w", err) + } + + rangeFS, err := pyramid.NewFS(&pyramidparams.InstanceParams{ + SharedParams: tierFSParams.SharedParams, + FSName: RangeFSName, + DiskAllocProportion: tierFSParams.RangeAllocationProportion, + }) + if err != nil { + cancelFn() + return nil, fmt.Errorf("create tiered FS for committed ranges: %w", err) + } + + pebbleSSTableCache := pebble.NewCache(tierFSParams.PebbleSSTableCacheSizeBytes) + defer pebbleSSTableCache.Unref() + + committedManager, closers, err := buildCommittedManager(cfg, pebbleSSTableCache, rangeFS, metaRangeFS) + if err != nil { + cancelFn() + return nil, err + } + + executor := batch.NewConditionalExecutor(logging.ContextUnavailable()) + go executor.Run(ctx) + + // Setup rate limiter used for background operations + limiter := newLimiter(baseCfg.Graveler.Background.RateLimit) + + storeLimiter := kv.NewStoreLimiter(cfg.KVStore, limiter) + addressProvider := ident.NewHexAddressProvider() + + refManager := ref.NewRefManager( + ref.ManagerConfig{ + Executor: executor, + KVStore: cfg.KVStore, + KVStoreLimited: storeLimiter, + AddressProvider: addressProvider, + RepositoryCacheConfig: ref.CacheConfig(baseCfg.Graveler.RepositoryCache), + CommitCacheConfig: ref.CacheConfig(baseCfg.Graveler.CommitCache), + MaxBatchDelay: baseCfg.Graveler.MaxBatchDelay, + BranchApproximateOwnershipParams: makeBranchApproximateOwnershipParams(baseCfg.Graveler.BranchOwnership), + }, + cfg.Config.StorageConfig(), + ) + gcManager := retention.NewGarbageCollectionManager(tierFSParams.Adapter, refManager, baseCfg.Committed.BlockStoragePrefix) + settingManager := settings.NewManager(refManager, cfg.KVStore) + if cfg.SettingsManagerOption != nil { + cfg.SettingsManagerOption(settingManager) + } + + protectedBranchesManager := branch.NewProtectionManager(settingManager) + stagingManager := staging.NewManager(ctx, cfg.KVStore, storeLimiter, baseCfg.Graveler.BatchDBIOTransactionMarkers, executor) + var deleteSensor *graveler.DeleteSensor + if baseCfg.Graveler.CompactionSensorThreshold > 0 { + cb := func(repositoryID graveler.RepositoryID, branchID graveler.BranchID, stagingTokenID graveler.StagingToken, inGrace bool) { + logging.FromContext(ctx).WithFields(logging.Fields{ + "repositoryID": repositoryID, + "branchID": branchID, + "stagingTokenID": stagingTokenID, + "inGrace": inGrace, + }).Info("Delete sensor callback") + } + deleteSensor = graveler.NewDeleteSensor(baseCfg.Graveler.CompactionSensorThreshold, cb) + } + + // The size of the workPool is determined by the number of workers and the number of desired pending tasks for each worker. + workPool := pond.NewPool(sharedWorkers, pond.WithContext(ctx)) + + gStore := graveler.NewGraveler(graveler.GravelerConfig{ + CommittedManager: committedManager, + StagingManager: stagingManager, + RefManager: refManager, + GarbageCollectionManager: gcManager, + ProtectedBranchesManager: protectedBranchesManager, + DeleteSensor: deleteSensor, + WorkPool: workPool, + }) + closers = append(closers, &ctxCloser{cancelFn}) + return &Catalog{ + BlockAdapter: tierFSParams.Adapter, + Store: gStore, + UGCPrepareMaxFileSize: baseCfg.UGC.PrepareMaxFileSize, + UGCPrepareInterval: baseCfg.UGC.PrepareInterval, + PathProvider: cfg.PathProvider, + BackgroundLimiter: limiter, + workPool: workPool, + KVStore: cfg.KVStore, + managers: closers, + KVStoreLimited: storeLimiter, + addressProvider: addressProvider, + deleteSensor: deleteSensor, + signingKey: cfg.Config.StorageConfig().SigningKey(), + APIErrorCB: &defaultAPIErrorHandler{}, + }, nil +} + +func buildCommittedManager(cfg Config, pebbleSSTableCache *pebble.Cache, rangeFS pyramid.FS, metaRangeFS pyramid.FS) (graveler.CommittedManager, []io.Closer, error) { + baseCfg := cfg.Config.GetBaseConfig() + committedParams := committed.Params{ + MinRangeSizeBytes: baseCfg.Committed.Permanent.MinRangeSizeBytes, + MaxRangeSizeBytes: baseCfg.Committed.Permanent.MaxRangeSizeBytes, + RangeSizeEntriesRaggedness: baseCfg.Committed.Permanent.RangeRaggednessEntries, + MaxUploaders: baseCfg.Committed.LocalCache.MaxUploadersPerWriter, + } + var closers []io.Closer + sstableManagers := make(map[graveler.StorageID]committed.RangeManager) + sstableMetaRangeManagers := make(map[graveler.StorageID]committed.MetaRangeManager) + storageIDs := cfg.Config.StorageConfig().GetStorageIDs() + for _, sID := range storageIDs { + sstableRangeManager := sstable.NewPebbleSSTableRangeManager(pebbleSSTableCache, rangeFS, hashAlg, committed.StorageID(sID)) + sstableManagers[graveler.StorageID(sID)] = sstableRangeManager + closers = append(closers, sstableRangeManager) + + storage := cfg.Config.StorageConfig().GetStorageByID(sID) + if storage.IsBackwardsCompatible() { + sstableManagers[config.SingleBlockstoreID] = sstableRangeManager + } + + sstableMetaManager := sstable.NewPebbleSSTableRangeManager(pebbleSSTableCache, metaRangeFS, hashAlg, committed.StorageID(sID)) + closers = append(closers, sstableMetaManager) + + sstableMetaRangeManager, err := committed.NewMetaRangeManager( + committedParams, + sstableMetaManager, + sstableRangeManager, + graveler.StorageID(sID), + ) + if err != nil { + return nil, nil, fmt.Errorf("create SSTable-based metarange manager: %w", err) + } + sstableMetaRangeManagers[graveler.StorageID(sID)] = sstableMetaRangeManager + if storage.IsBackwardsCompatible() { + sstableMetaRangeManagers[config.SingleBlockstoreID] = sstableMetaRangeManager + } + } + + crs := cfg.ConflictResolvers + if len(crs) == 0 { + // set a default conflict resolver if none was provided + crs = []graveler.ConflictResolver{&committed.StrategyConflictResolver{}} + } + + committedManager := committed.NewCommittedManager( + sstableMetaRangeManagers, + sstableManagers, + crs, + committedParams, + ) + return committedManager, closers, nil +} + +func newLimiter(rateLimit int) ratelimit.Limiter { + var limiter ratelimit.Limiter + if rateLimit == 0 { + limiter = ratelimit.NewUnlimited() + } else { + limiter = ratelimit.New(rateLimit) + } + return limiter +} + +func (c *Catalog) SetHooksHandler(hooks graveler.HooksHandler) { + c.Store.SetHooksHandler(hooks) +} + +func (c *Catalog) log(ctx context.Context) logging.Logger { + return logging.FromContext(ctx).WithField("service_name", "entry_catalog") +} + +// CreateRepository create a new repository pointing to 'storageNamespace' (ex: s3://bucket1/repo) with default branch name 'branch' +func (c *Catalog) CreateRepository(ctx context.Context, repository string, storageID string, storageNamespace string, branch string, readOnly bool) (*Repository, error) { + repositoryID := graveler.RepositoryID(repository) + storageIdentifier := graveler.StorageID(storageID) + storageNS := graveler.StorageNamespace(storageNamespace) + branchID := graveler.BranchID(branch) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "name", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "storageNamespace", Value: storageNS, Fn: graveler.ValidateStorageNamespace}, + }); err != nil { + return nil, err + } + + repo, err := c.Store.CreateRepository(ctx, repositoryID, storageIdentifier, storageNS, branchID, readOnly) + if err != nil { + return nil, err + } + catalogRepo := &Repository{ + Name: repositoryID.String(), + StorageID: storageIdentifier.String(), + StorageNamespace: storageNS.String(), + DefaultBranch: branchID.String(), + CreationDate: repo.CreationDate, + ReadOnly: repo.ReadOnly, + } + return catalogRepo, nil +} + +// CreateBareRepository create a new repository pointing to 'storageNamespace' (ex: s3://bucket1/repo) with no initial branch or commit +// defaultBranchID will point to a non-existent branch on creation, it is up to the caller to eventually create it. +func (c *Catalog) CreateBareRepository(ctx context.Context, repository string, storageID string, storageNamespace string, defaultBranchID string, readOnly bool) (*Repository, error) { + repositoryID := graveler.RepositoryID(repository) + storageIdentifier := graveler.StorageID(storageID) + storageNS := graveler.StorageNamespace(storageNamespace) + branchID := graveler.BranchID(defaultBranchID) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "name", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "storageNamespace", Value: storageNS, Fn: graveler.ValidateStorageNamespace}, + }); err != nil { + return nil, err + } + + repo, err := c.Store.CreateBareRepository(ctx, repositoryID, storageIdentifier, storageNS, branchID, readOnly) + if err != nil { + return nil, err + } + catalogRepo := &Repository{ + Name: repositoryID.String(), + StorageID: storageIdentifier.String(), + StorageNamespace: storageNS.String(), + DefaultBranch: branchID.String(), + CreationDate: repo.CreationDate, + ReadOnly: readOnly, + } + return catalogRepo, nil +} + +func (c *Catalog) getRepository(ctx context.Context, repository string) (*graveler.RepositoryRecord, error) { + repositoryID := graveler.RepositoryID(repository) + return c.Store.GetRepository(ctx, repositoryID) +} + +// GetRepository get repository information +func (c *Catalog) GetRepository(ctx context.Context, repository string) (*Repository, error) { + repositoryID := graveler.RepositoryID(repository) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + }); err != nil { + return nil, err + } + repo, err := c.getRepository(ctx, repository) + if err != nil { + return nil, err + } + + catalogRepository := &Repository{ + Name: repositoryID.String(), + StorageID: repo.StorageID.String(), + StorageNamespace: repo.StorageNamespace.String(), + DefaultBranch: repo.DefaultBranchID.String(), + CreationDate: repo.CreationDate, + ReadOnly: repo.ReadOnly, + } + return catalogRepository, nil +} + +// DeleteRepository delete a repository +func (c *Catalog) DeleteRepository(ctx context.Context, repository string, opts ...graveler.SetOptionsFunc) error { + repositoryID := graveler.RepositoryID(repository) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + }); err != nil { + return err + } + return c.Store.DeleteRepository(ctx, repositoryID, opts...) +} + +// GetRepositoryMetadata get repository metadata +func (c *Catalog) GetRepositoryMetadata(ctx context.Context, repository string) (graveler.RepositoryMetadata, error) { + repositoryID := graveler.RepositoryID(repository) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + }); err != nil { + return nil, err + } + return c.Store.GetRepositoryMetadata(ctx, repositoryID) +} + +// UpdateRepositoryMetadata set repository metadata +func (c *Catalog) UpdateRepositoryMetadata(ctx context.Context, repository string, metadata graveler.RepositoryMetadata) error { + if len(metadata) == 0 { + return nil + } + r, err := c.getRepository(ctx, repository) + if err != nil { + return err + } + return c.Store.SetRepositoryMetadata(ctx, r, func(md graveler.RepositoryMetadata) (graveler.RepositoryMetadata, error) { + if md == nil { + return metadata, nil + } + for k, v := range metadata { + md[k] = v + } + return md, nil + }) +} + +// DeleteRepositoryMetadata delete repository metadata +func (c *Catalog) DeleteRepositoryMetadata(ctx context.Context, repository string, keys []string) error { + if len(keys) == 0 { + return nil + } + r, err := c.getRepository(ctx, repository) + if err != nil { + return err + } + return c.Store.SetRepositoryMetadata(ctx, r, func(md graveler.RepositoryMetadata) (graveler.RepositoryMetadata, error) { + for _, k := range keys { + delete(md, k) + } + return md, nil + }) +} + +// ListRepositories list repository information, the bool returned is true when more repositories can be listed. +// In this case, pass the last repository name as 'after' on the next call to ListRepositories. Results can be +// filtered by specifying a prefix or, more generally, a searchString. +func (c *Catalog) ListRepositories(ctx context.Context, limit int, prefix, searchString, after string) ([]*Repository, bool, error) { + // normalize limit + if limit < 0 || limit > ListRepositoriesLimitMax { + limit = ListRepositoriesLimitMax + } + // get list repositories iterator + it, err := c.Store.ListRepositories(ctx) + if err != nil { + return nil, false, fmt.Errorf("get iterator: %w", err) + } + defer it.Close() + // seek for first item + afterRepositoryID := graveler.RepositoryID(after) + prefixRepositoryID := graveler.RepositoryID(prefix) + startPos := prefixRepositoryID + if afterRepositoryID > startPos { + startPos = afterRepositoryID + } + if startPos != "" { + it.SeekGE(startPos) + } + + var repos []*Repository + for it.Next() { + record := it.Value() + + if !strings.HasPrefix(string(record.RepositoryID), prefix) { + break + } + if !strings.Contains(string(record.RepositoryID), searchString) { + continue + } + if record.RepositoryID == afterRepositoryID { + continue + } + repos = append(repos, &Repository{ + Name: record.RepositoryID.String(), + StorageID: record.StorageID.String(), + StorageNamespace: record.StorageNamespace.String(), + DefaultBranch: record.DefaultBranchID.String(), + CreationDate: record.CreationDate, + ReadOnly: record.ReadOnly, + }) + // collect limit +1 to return limit and has more + if len(repos) >= limit+1 { + break + } + } + if err := it.Err(); err != nil { + return nil, false, err + } + // trim result if needed and return has more + hasMore := false + if len(repos) > limit { + hasMore = true + repos = repos[:limit] + } + return repos, hasMore, nil +} + +func (c *Catalog) GetStagingToken(ctx context.Context, repositoryID string, branch string) (*string, error) { + branchID := graveler.BranchID(branch) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "branch", Value: branchID, Fn: graveler.ValidateBranchID}, + }); err != nil { + return nil, err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return nil, err + } + token, err := c.Store.GetStagingToken(ctx, repository, branchID) + if err != nil { + return nil, err + } + tokenString := "" + if token != nil { + tokenString = string(*token) + } + return &tokenString, nil +} + +func (c *Catalog) CreateBranch(ctx context.Context, repositoryID string, branch string, sourceBranch string, opts ...graveler.SetOptionsFunc) (*CommitLog, error) { + branchID := graveler.BranchID(branch) + sourceRef := graveler.Ref(sourceBranch) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "branch", Value: branchID, Fn: graveler.ValidateBranchID}, + {Name: "ref", Value: sourceRef, Fn: graveler.ValidateRef}, + }); err != nil { + if errors.Is(err, graveler.ErrInvalidBranchID) { + return nil, fmt.Errorf("%w: branch id must consist of letters, digits, underscores and dashes, and cannot start with a dash", err) + } + return nil, err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return nil, err + } + if err := c.checkCommitIDDuplication(ctx, repository, graveler.CommitID(branchID)); err != nil { + return nil, err + } + + // look for a tag with the same name to avoid reference conflict + if _, err := c.Store.GetTag(ctx, repository, graveler.TagID(branchID)); err == nil { + return nil, fmt.Errorf("tag ID %s: %w", branchID, graveler.ErrConflictFound) + } else if !errors.Is(err, graveler.ErrNotFound) { + return nil, err + } + newBranch, err := c.Store.CreateBranch(ctx, repository, branchID, sourceRef, opts...) + if err != nil { + return nil, err + } + commit, err := c.Store.GetCommit(ctx, repository, newBranch.CommitID) + if err != nil { + return nil, err + } + catalogCommitLog := &CommitLog{ + Reference: newBranch.CommitID.String(), + Committer: commit.Committer, + Message: commit.Message, + Metadata: Metadata(commit.Metadata), + Version: CommitVersion(commit.Version), + Generation: CommitGeneration(commit.Generation), + } + for _, parent := range commit.Parents { + catalogCommitLog.Parents = append(catalogCommitLog.Parents, string(parent)) + } + return catalogCommitLog, nil +} + +func (c *Catalog) DeleteBranch(ctx context.Context, repositoryID string, branch string, opts ...graveler.SetOptionsFunc) error { + branchID := graveler.BranchID(branch) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "name", Value: branchID, Fn: graveler.ValidateBranchID}, + }); err != nil { + return err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return err + } + return c.Store.DeleteBranch(ctx, repository, branchID, opts...) +} + +func (c *Catalog) ListBranches(ctx context.Context, repositoryID string, prefix string, limit int, after string, opts ...graveler.ListOptionsFunc) ([]*Branch, bool, error) { + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + }); err != nil { + return nil, false, err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return nil, false, err + } + + // normalize limit + if limit < 0 || limit > ListBranchesLimitMax { + limit = ListBranchesLimitMax + } + it, err := c.Store.ListBranches(ctx, repository, opts...) + if err != nil { + return nil, false, err + } + defer it.Close() + + afterBranch := graveler.BranchID(after) + prefixBranch := graveler.BranchID(prefix) + if afterBranch < prefixBranch { + it.SeekGE(prefixBranch) + } else { + it.SeekGE(afterBranch) + } + var branches []*Branch + for it.Next() { + v := it.Value() + if v.BranchID == afterBranch { + continue + } + branchID := v.BranchID.String() + // break in case we got to a branch outside our prefix + if !strings.HasPrefix(branchID, prefix) { + break + } + b := &Branch{ + Name: v.BranchID.String(), + Reference: v.CommitID.String(), + } + branches = append(branches, b) + if len(branches) >= limit+1 { + break + } + } + if err := it.Err(); err != nil { + return nil, false, err + } + // return results (optionally trimmed) and hasMore + hasMore := false + if len(branches) > limit { + hasMore = true + branches = branches[:limit] + } + return branches, hasMore, nil +} + +func (c *Catalog) BranchExists(ctx context.Context, repositoryID string, branch string) (bool, error) { + branchID := graveler.BranchID(branch) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "name", Value: branchID, Fn: graveler.ValidateBranchID}, + }); err != nil { + return false, err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return false, err + } + _, err = c.Store.GetBranch(ctx, repository, branchID) + if errors.Is(err, graveler.ErrNotFound) { + return false, nil + } + if err != nil { + return false, err + } + return true, nil +} + +func (c *Catalog) GetBranchReference(ctx context.Context, repositoryID string, branch string) (string, error) { + branchID := graveler.BranchID(branch) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "branch", Value: branchID, Fn: graveler.ValidateBranchID}, + }); err != nil { + return "", err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return "", err + } + b, err := c.Store.GetBranch(ctx, repository, branchID) + if err != nil { + return "", err + } + return string(b.CommitID), nil +} + +func (c *Catalog) HardResetBranch(ctx context.Context, repositoryID, branch, refExpr string, opts ...graveler.SetOptionsFunc) error { + branchID := graveler.BranchID(branch) + reference := graveler.Ref(refExpr) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "branch", Value: branchID, Fn: graveler.ValidateBranchID}, + {Name: "ref", Value: reference, Fn: graveler.ValidateRef}, + }); err != nil { + return err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return err + } + return c.Store.ResetHard(ctx, repository, branchID, reference, opts...) +} + +func (c *Catalog) ResetBranch(ctx context.Context, repositoryID string, branch string, opts ...graveler.SetOptionsFunc) error { + branchID := graveler.BranchID(branch) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "branch", Value: branchID, Fn: graveler.ValidateBranchID}, + }); err != nil { + return err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return err + } + return c.Store.Reset(ctx, repository, branchID, opts...) +} + +func (c *Catalog) CreateTag(ctx context.Context, repositoryID string, tagID string, ref string, opts ...graveler.SetOptionsFunc) (string, error) { + tag := graveler.TagID(tagID) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "tagID", Value: tag, Fn: graveler.ValidateTagID}, + }); err != nil { + return "", err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return "", err + } + + // look for a branch with the same name to avoid reference conflict + if _, err := c.Store.GetBranch(ctx, repository, graveler.BranchID(tagID)); err == nil { + return "", fmt.Errorf("branch name %s: %w", tagID, graveler.ErrConflictFound) + } else if !errors.Is(err, graveler.ErrNotFound) { + return "", err + } + if err := c.checkCommitIDDuplication(ctx, repository, graveler.CommitID(tagID)); err != nil { + return "", err + } + + commitID, err := c.dereferenceCommitID(ctx, repository, graveler.Ref(ref)) + if err != nil { + return "", err + } + err = c.Store.CreateTag(ctx, repository, tag, commitID, opts...) + if err != nil { + return "", err + } + return commitID.String(), nil +} + +func (c *Catalog) DeleteTag(ctx context.Context, repositoryID string, tagID string, opts ...graveler.SetOptionsFunc) error { + tag := graveler.TagID(tagID) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "name", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "tagID", Value: tag, Fn: graveler.ValidateTagID}, + }); err != nil { + return err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return err + } + return c.Store.DeleteTag(ctx, repository, tag, opts...) +} + +func (c *Catalog) ListTags(ctx context.Context, repositoryID string, prefix string, limit int, after string) ([]*Tag, bool, error) { + if limit < 0 || limit > ListTagsLimitMax { + limit = ListTagsLimitMax + } + if err := validator.Validate([]validator.ValidateArg{ + {Name: "name", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + }); err != nil { + return nil, false, err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return nil, false, err + } + it, err := c.Store.ListTags(ctx, repository) + if err != nil { + return nil, false, err + } + defer it.Close() + afterTagID := graveler.TagID(after) + prefixTagID := graveler.TagID(prefix) + if afterTagID < prefixTagID { + it.SeekGE(prefixTagID) + } else { + it.SeekGE(afterTagID) + } + var tags []*Tag + for it.Next() { + v := it.Value() + if v.TagID == afterTagID { + continue + } + if !strings.HasPrefix(v.TagID.String(), prefix) { + break + } + tag := &Tag{ + ID: string(v.TagID), + CommitID: v.CommitID.String(), + } + tags = append(tags, tag) + if len(tags) >= limit+1 { + break + } + } + if err := it.Err(); err != nil { + return nil, false, err + } + // return results (optionally trimmed) and hasMore + hasMore := false + if len(tags) > limit { + hasMore = true + tags = tags[:limit] + } + return tags, hasMore, nil +} + +func (c *Catalog) GetTag(ctx context.Context, repositoryID string, tagID string) (string, error) { + tag := graveler.TagID(tagID) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "name", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "tagID", Value: tag, Fn: graveler.ValidateTagID}, + }); err != nil { + return "", err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return "", err + } + commit, err := c.Store.GetTag(ctx, repository, tag) + if err != nil { + return "", err + } + return commit.String(), nil +} + +// GetEntry returns the current entry for a path in repository branch reference. Returns +// the entry with ExpiredError if it has expired from underlying storage. +func (c *Catalog) GetEntry(ctx context.Context, repositoryID string, reference string, path string, params GetEntryParams) (*DBEntry, error) { + refToGet := graveler.Ref(reference) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "ref", Value: refToGet, Fn: graveler.ValidateRef}, + {Name: "path", Value: Path(path), Fn: ValidatePath}, + }); err != nil { + return nil, err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return nil, err + } + val, err := c.Store.Get(ctx, repository, refToGet, graveler.Key(path), graveler.WithStageOnly(params.StageOnly)) + if err != nil { + return nil, err + } + ent, err := ValueToEntry(val) + if err != nil { + return nil, err + } + catalogEntry := newCatalogEntryFromEntry(false, path, ent) + return &catalogEntry, nil +} + +// UpdateEntryUserMetadata updates user metadata for the current entry for a +// path in repository branch reference. +func (c *Catalog) UpdateEntryUserMetadata(ctx context.Context, repositoryID, branch, path string, newUserMetadata map[string]string) error { + branchID := graveler.BranchID(branch) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "branch", Value: branchID, Fn: graveler.ValidateBranchID}, + {Name: "path", Value: Path(path), Fn: ValidatePath}, + }); err != nil { + return err + } + + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return nil + } + + key := graveler.Key(path) + updater := graveler.ValueUpdateFunc(func(value *graveler.Value) (*graveler.Value, error) { + if value == nil { + return nil, fmt.Errorf("update user metadata on %s/%s/%s: %w", + repositoryID, branchID, path, graveler.ErrNotFound) + } + entry, err := ValueToEntry(value) + if err != nil { + return nil, err + } + entry.Metadata = newUserMetadata + return EntryToValue(entry) + }) + return c.Store.Update(ctx, repository, branchID, key, updater) +} + +func newEntryFromCatalogEntry(entry DBEntry) *Entry { + ent := &Entry{ + Address: entry.PhysicalAddress, + AddressType: addressTypeToProto(entry.AddressType), + Metadata: entry.Metadata, + LastModified: timestamppb.New(entry.CreationDate), + ETag: entry.Checksum, + Size: entry.Size, + ContentType: ContentTypeOrDefault(entry.ContentType), + } + return ent +} + +func addressTypeToProto(t AddressType) Entry_AddressType { + switch t { + case AddressTypeByPrefixDeprecated: + return Entry_BY_PREFIX_DEPRECATED + case AddressTypeRelative: + return Entry_RELATIVE + case AddressTypeFull: + return Entry_FULL + default: + panic(fmt.Sprintf("unknown address type: %d", t)) + } +} + +func addressTypeToCatalog(t Entry_AddressType) AddressType { + switch t { + case Entry_BY_PREFIX_DEPRECATED: + return AddressTypeByPrefixDeprecated + case Entry_RELATIVE: + return AddressTypeRelative + case Entry_FULL: + return AddressTypeFull + default: + panic(fmt.Sprintf("unknown address type: %d", t)) + } +} + +// EntryCondition adapts an Entry-level condition function to a graveler.ConditionFunc. +// It converts graveler Values to Entries before applying the condition, enabling Entry-based +// validation logic (e.g., Object metadata checks) to work with graveler's conditional operations. +func EntryCondition(condition func(*Entry) error) graveler.ConditionFunc { + return func(currentValue *graveler.Value) error { + if currentValue == nil { + return condition(nil) + } + + currentEntry, err := ValueToEntry(currentValue) + if err != nil { + return err + } + + return condition(currentEntry) + } +} + +func (c *Catalog) CreateEntry(ctx context.Context, repositoryID string, branch string, entry DBEntry, opts ...graveler.SetOptionsFunc) error { + branchID := graveler.BranchID(branch) + ent := newEntryFromCatalogEntry(entry) + path := Path(entry.Path) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "branch", Value: branchID, Fn: graveler.ValidateBranchID}, + {Name: "path", Value: path, Fn: ValidatePath}, + }); err != nil { + return err + } + key := graveler.Key(path) + value, err := EntryToValue(ent) + if err != nil { + return err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return err + } + return c.Store.Set(ctx, repository, branchID, key, *value, opts...) +} + +func (c *Catalog) DeleteEntry(ctx context.Context, repositoryID string, branch string, path string, opts ...graveler.SetOptionsFunc) error { + branchID := graveler.BranchID(branch) + p := Path(path) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "branch", Value: branchID, Fn: graveler.ValidateBranchID}, + {Name: "path", Value: p, Fn: ValidatePath}, + }); err != nil { + return err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return err + } + key := graveler.Key(p) + return c.Store.Delete(ctx, repository, branchID, key, opts...) +} + +func (c *Catalog) DeleteEntries(ctx context.Context, repositoryID string, branch string, paths []string, opts ...graveler.SetOptionsFunc) error { + branchID := graveler.BranchID(branch) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "branch", Value: branchID, Fn: graveler.ValidateBranchID}, + }); err != nil { + return err + } + // validate path + for i, path := range paths { + p := Path(path) + if err := ValidatePath(p); err != nil { + return fmt.Errorf("argument path[%d]: %w", i, err) + } + } + + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return err + } + + keys := make([]graveler.Key, len(paths)) + for i := range paths { + keys[i] = graveler.Key(paths[i]) + } + return c.Store.DeleteBatch(ctx, repository, branchID, keys, opts...) +} + +func (c *Catalog) ListEntries(ctx context.Context, repositoryID string, reference string, prefix string, after string, delimiter string, limit int) ([]*DBEntry, bool, error) { + // normalize limit + if limit < 0 || limit > ListEntriesLimitMax { + limit = ListEntriesLimitMax + } + prefixPath := Path(prefix) + afterPath := Path(after) + delimiterPath := Path(delimiter) + refToList := graveler.Ref(reference) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "ref", Value: refToList, Fn: graveler.ValidateRef}, + {Name: "prefix", Value: prefixPath, Fn: ValidatePathOptional}, + {Name: "delimiter", Value: delimiterPath, Fn: ValidatePathOptional}, + }); err != nil { + return nil, false, err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return nil, false, err + } + iter, err := c.Store.List(ctx, repository, refToList, limit+1) + if err != nil { + return nil, false, err + } + it := NewEntryListingIterator(NewValueToEntryIterator(iter), prefixPath, delimiterPath) + defer it.Close() + + if afterPath != "" { + it.SeekGE(afterPath) + } + + var entries []*DBEntry + for it.Next() { + v := it.Value() + if v.Path == afterPath { + continue + } + entry := newCatalogEntryFromEntry(v.CommonPrefix, v.Path.String(), v.Entry) + entries = append(entries, &entry) + if len(entries) >= limit+1 { + break + } + } + if err := it.Err(); err != nil { + return nil, false, err + } + // trim result if needed and return has more + hasMore := false + if len(entries) > limit { + hasMore = true + entries = entries[:limit] + } + return entries, hasMore, nil +} + +func (c *Catalog) ResetEntry(ctx context.Context, repositoryID string, branch string, path string, opts ...graveler.SetOptionsFunc) error { + branchID := graveler.BranchID(branch) + entryPath := Path(path) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "branch", Value: branchID, Fn: graveler.ValidateBranchID}, + {Name: "path", Value: entryPath, Fn: ValidatePath}, + }); err != nil { + return err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return err + } + key := graveler.Key(entryPath) + return c.Store.ResetKey(ctx, repository, branchID, key, opts...) +} + +func (c *Catalog) ResetEntries(ctx context.Context, repositoryID string, branch string, prefix string, opts ...graveler.SetOptionsFunc) error { + branchID := graveler.BranchID(branch) + prefixPath := Path(prefix) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "branch", Value: branchID, Fn: graveler.ValidateBranchID}, + }); err != nil { + return err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return err + } + keyPrefix := graveler.Key(prefixPath) + return c.Store.ResetPrefix(ctx, repository, branchID, keyPrefix, opts...) +} + +func (c *Catalog) Commit(ctx context.Context, repositoryID, branch, message, committer string, metadata Metadata, date *int64, sourceMetarange *string, allowEmpty bool, opts ...graveler.SetOptionsFunc) (*CommitLog, error) { + branchID := graveler.BranchID(branch) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "branch", Value: branchID, Fn: graveler.ValidateBranchID}, + }); err != nil { + return nil, err + } + + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return nil, err + } + + p := graveler.CommitParams{ + Committer: committer, + Message: message, + Date: date, + Metadata: map[string]string(metadata), + AllowEmpty: allowEmpty, + } + if sourceMetarange != nil { + x := graveler.MetaRangeID(*sourceMetarange) + p.SourceMetaRange = &x + } + commitID, err := c.Store.Commit(ctx, repository, branchID, p, opts...) + if err != nil { + return nil, err + } + catalogCommitLog := &CommitLog{ + Reference: commitID.String(), + Committer: committer, + Message: message, + Metadata: metadata, + } + // in order to return commit log we need the commit creation time and parents + commit, err := c.Store.GetCommit(ctx, repository, commitID) + if err != nil { + return catalogCommitLog, graveler.ErrCommitNotFound + } + for _, parent := range commit.Parents { + catalogCommitLog.Parents = append(catalogCommitLog.Parents, parent.String()) + } + catalogCommitLog.CreationDate = commit.CreationDate.UTC() + catalogCommitLog.MetaRangeID = string(commit.MetaRangeID) + catalogCommitLog.Version = CommitVersion(commit.Version) + catalogCommitLog.Generation = CommitGeneration(commit.Generation) + return catalogCommitLog, nil +} + +func (c *Catalog) CreateCommitRecord(ctx context.Context, repositoryID string, commitID string, version int, committer string, message string, metaRangeID string, creationDate int64, parents []string, metadata map[string]string, generation int32, opts ...graveler.SetOptionsFunc) error { + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return err + } + commitParents := make([]graveler.CommitID, len(parents)) + for i, parent := range parents { + commitParents[i] = graveler.CommitID(parent) + } + commit := graveler.Commit{ + // cast from int32 to int. no information loss danger + Version: graveler.CommitVersion(version), //nolint:gosec + Committer: committer, + Message: message, + MetaRangeID: graveler.MetaRangeID(metaRangeID), + CreationDate: time.Unix(creationDate, 0).UTC(), + Parents: commitParents, + Metadata: metadata, + // cast from int32 to int32 + Generation: graveler.CommitGeneration(generation), + } + return c.Store.CreateCommitRecord(ctx, repository, graveler.CommitID(commitID), commit, opts...) +} + +func (c *Catalog) GetCommit(ctx context.Context, repositoryID string, reference string) (*CommitLog, error) { + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + }); err != nil { + return nil, err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return nil, err + } + commitID, err := c.dereferenceCommitID(ctx, repository, graveler.Ref(reference)) + if err != nil { + return nil, err + } + commit, err := c.Store.GetCommit(ctx, repository, commitID) + if err != nil { + return nil, err + } + catalogCommitLog := &CommitLog{ + Reference: commitID.String(), + Committer: commit.Committer, + Message: commit.Message, + CreationDate: commit.CreationDate, + MetaRangeID: string(commit.MetaRangeID), + Metadata: Metadata(commit.Metadata), + Generation: CommitGeneration(commit.Generation), + Version: CommitVersion(commit.Version), + Parents: []string{}, + } + for _, parent := range commit.Parents { + catalogCommitLog.Parents = append(catalogCommitLog.Parents, string(parent)) + } + return catalogCommitLog, nil +} + +func (c *Catalog) ListCommits(ctx context.Context, repositoryID string, ref string, params LogParams) ([]*CommitLog, bool, error) { + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "ref", Value: graveler.Ref(ref), Fn: graveler.ValidateRef}, + }); err != nil { + return nil, false, err + } + + // disabling batching for this flow. See #3935 for more details + ctx = context.WithValue(ctx, batch.SkipBatchContextKey, struct{}{}) + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return nil, false, err + } + + commitID, err := c.dereferenceCommitID(ctx, repository, graveler.Ref(ref)) + if err != nil { + return nil, false, fmt.Errorf("ref: %w", err) + } + if params.StopAt != "" { + stopAtCommitID, err := c.dereferenceCommitID(ctx, repository, graveler.Ref(params.StopAt)) + if err != nil { + return nil, false, fmt.Errorf("stop_at: %w", err) + } + params.StopAt = stopAtCommitID.String() + } + it, err := c.Store.Log(ctx, repository, commitID, params.FirstParent, params.Since) + if err != nil { + return nil, false, err + } + defer it.Close() + // skip until 'fromReference' if needed + if params.FromReference != "" { + fromCommitID, err := c.dereferenceCommitID(ctx, repository, graveler.Ref(params.FromReference)) + if err != nil { + return nil, false, fmt.Errorf("from ref: %w", err) + } + for it.Next() { + if it.Value().CommitID == fromCommitID { + break + } + } + if err := it.Err(); err != nil { + return nil, false, err + } + } + + paths := params.PathList + if len(paths) == 0 { + return listCommitsWithoutPaths(it, params) + } + + return c.listCommitsWithPaths(ctx, repository, it, params) +} + +func (c *Catalog) listCommitsWithPaths(ctx context.Context, repository *graveler.RepositoryRecord, it graveler.CommitIterator, params LogParams) ([]*CommitLog, bool, error) { + // verify we are not listing commits without any paths + if len(params.PathList) == 0 { + return nil, false, fmt.Errorf("%w: list commits without paths", graveler.ErrInvalid) + } + + // commit/key to value cache - helps when fetching the same commit/key while processing parent commits + const commitLogCacheSize = 1024 * 5 + commitCache, err := lru.New(commitLogCacheSize) + if err != nil { + return nil, false, err + } + + const numReadResults = 3 + done := atomic.NewBool(false) + + // Shared workPool for the workers. 2 designated to create the work and receive the result + paths := params.PathList + + // iterate over commits log and push work into the work channel + outCh := make(chan *commitLogJob, numReadResults) + var mgmtGroup multierror.Group + mgmtGroup.Go(func() error { + defer close(outCh) + + // workers to check if commit record in path + workerGroup := c.workPool.NewGroupContext(ctx) + ctx := workerGroup.Context() + + current := 0 + readLoop: + for it.Next() && !done.Load() { + // if context canceled we stop processing + select { + case <-ctx.Done(): + break readLoop + default: + } + + commitRecord := it.Value() + // skip merge commits + if len(commitRecord.Parents) != NumberOfParentsOfNonMergeCommit { + continue + } + + // submit work to the pool + commitOrder := current + current++ + workerGroup.SubmitErr(func() error { + pathInCommit, err := c.checkPathListInCommit(ctx, repository, commitRecord, paths, commitCache) + if err != nil { + return err + } + job := &commitLogJob{order: commitOrder} + if pathInCommit { + job.log = CommitRecordToLog(commitRecord) + } + outCh <- job + return nil + }) + } + // wait until workers are done or the first non-nil error was returned from a worker + if err := workerGroup.Wait(); err != nil { + return err + } + return it.Err() + }) + + // process out the channel to keep order into results channel by using heap + resultCh := make(chan *CommitLog, numReadResults) + var jobsHeap commitLogJobHeap + mgmtGroup.Go(func() error { + defer close(resultCh) + // read and sort by heap the result to results channel + current := 0 + for result := range outCh { + heap.Push(&jobsHeap, result) + for len(jobsHeap) > 0 && jobsHeap[0].order == current { + job := heap.Pop(&jobsHeap).(*commitLogJob) + if job.log != nil { + resultCh <- job.log + } + current++ + } + } + // flush heap content when no more results on output channel + for len(jobsHeap) > 0 { + job := heap.Pop(&jobsHeap).(*commitLogJob) + if job.log != nil { + resultCh <- job.log + } + } + return nil + }) + + // fill enough results, in case of an error the result channel will be closed + commits := make([]*CommitLog, 0) + for res := range resultCh { + commits = append(commits, res) + if foundAllCommits(params, commits) { + // All results returned until the last commit found + // and the number of commits found is as expected. + // we have what we need. + break + } + } + // mark we stopped processing results and throw if needed all the rest + done.Store(true) + for range resultCh { + // drain results channel + } + + // wait until background work is completed + if err := mgmtGroup.Wait().ErrorOrNil(); err != nil { + return nil, false, err + } + return logCommitsResult(commits, params) +} + +func listCommitsWithoutPaths(it graveler.CommitIterator, params LogParams) ([]*CommitLog, bool, error) { + // no need to parallelize here - just read the commits + var commits []*CommitLog + for it.Next() { + val := it.Value() + + commits = append(commits, CommitRecordToLog(val)) + if foundAllCommits(params, commits) { + // All results returned until the last commit found + // and the number of commits found is as expected. + // we have what we need. return commits, true + break + } + } + if it.Err() != nil { + return nil, false, it.Err() + } + + return logCommitsResult(commits, params) +} + +func foundAllCommits(params LogParams, commits []*CommitLog) bool { + return (params.Limit && len(commits) >= params.Amount) || + len(commits) >= params.Amount+1 || (len(commits) > 0 && commits[len(commits)-1].Reference == params.StopAt) +} + +func CommitRecordToLog(val *graveler.CommitRecord) *CommitLog { + if val == nil { + return nil + } + commit := &CommitLog{ + Reference: val.CommitID.String(), + Committer: val.Committer, + Message: val.Message, + CreationDate: val.CreationDate, + Metadata: map[string]string(val.Metadata), + MetaRangeID: string(val.MetaRangeID), + Parents: make([]string, 0, len(val.Parents)), + Version: CommitVersion(val.Version), + Generation: CommitGeneration(val.Generation), + } + for _, parent := range val.Parents { + commit.Parents = append(commit.Parents, parent.String()) + } + return commit +} + +func logCommitsResult(commits []*CommitLog, params LogParams) ([]*CommitLog, bool, error) { + hasMore := false + if len(commits) > params.Amount { + hasMore = true + commits = commits[:params.Amount] + } + return commits, hasMore, nil +} + +type commitLogJob struct { + order int + log *CommitLog +} + +// commitLogJobHeap heap of commit logs based on order. The minimum element in the tree is the root, at index 0. +type commitLogJobHeap []*commitLogJob + +//goland:noinspection GoMixedReceiverTypes +func (h commitLogJobHeap) Len() int { return len(h) } + +//goland:noinspection GoMixedReceiverTypes +func (h commitLogJobHeap) Less(i, j int) bool { return h[i].order < h[j].order } + +//goland:noinspection GoMixedReceiverTypes +func (h commitLogJobHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } + +//goland:noinspection GoMixedReceiverTypes +func (h *commitLogJobHeap) Push(x interface{}) { + *h = append(*h, x.(*commitLogJob)) +} + +//goland:noinspection GoMixedReceiverTypes +func (h *commitLogJobHeap) Pop() interface{} { + old := *h + n := len(old) + x := old[n-1] + *h = old[0 : n-1] + return x +} + +// checkPathListInCommit checks whether the given commit contains changes to a list of paths. +// it searches the path in the diff between the commit, and it's parent, but do so only to commits +// that have single parent (not merge commits) +func (c *Catalog) checkPathListInCommit(ctx context.Context, repository *graveler.RepositoryRecord, commit *graveler.CommitRecord, pathList []PathRecord, commitCache *lru.Cache) (bool, error) { + left := commit.Parents[0] + right := commit.CommitID + + // diff iterator - open lazy, just in case we have a prefix to match + var diffIter graveler.DiffIterator + defer func() { + if diffIter != nil { + diffIter.Close() + } + }() + + // check each path + for _, path := range pathList { + key := graveler.Key(path.Path) + if path.IsPrefix { + // get diff iterator if needed for prefix lookup + if diffIter == nil { + var err error + diffIter, err = c.Store.Diff(ctx, repository, graveler.Ref(left), graveler.Ref(right)) + if err != nil { + return false, err + } + } + diffIter.SeekGE(key) + if diffIter.Next() { + diffKey := diffIter.Value().Key + if bytes.HasPrefix(diffKey, key) { + return true, nil + } + } + if err := diffIter.Err(); err != nil { + return false, err + } + } else { + // check if the key exists in both commits + // First, check if we can compare the ranges. + // If ranges match, or if no ranges for both commits, we can skip the key lookup. + lRangeID, err := c.Store.GetRangeIDByKey(ctx, repository, left, key) + lFound := !errors.Is(err, graveler.ErrNotFound) + if err != nil && lFound { + return false, err + } + rRangeID, err := c.Store.GetRangeIDByKey(ctx, repository, right, key) + rFound := !errors.Is(err, graveler.ErrNotFound) + if err != nil && rFound { + return false, err + } + + if !lFound && !rFound { + // no range matching the key exist in both commits + continue + } + if lRangeID == rRangeID { + // It's the same range - the value of the key is identical in both + continue + } + + // The key possibly exists in both commits, but the range ID is different - the value is needs to be looked at + leftObject, err := storeGetCache(ctx, c.Store, repository, left, key, commitCache) + if err != nil { + return false, err + } + rightObject, err := storeGetCache(ctx, c.Store, repository, right, key, commitCache) + if err != nil { + return false, err + } + + // if left or right are missing or doesn't hold the same identify + // we want the commit log + if leftObject == nil && rightObject != nil || + leftObject != nil && rightObject == nil || + (leftObject != nil && rightObject != nil && !bytes.Equal(leftObject.Identity, rightObject.Identity)) { + return true, nil + } + } + } + return false, nil +} + +// storeGetCache helper to calls Get and cache the return info 'commitCache'. This method is helpful in case of calling Get +// on a large set of commits with the same object, and we can return the cached data we returned so far +func storeGetCache(ctx context.Context, store graveler.KeyValueStore, repository *graveler.RepositoryRecord, commitID graveler.CommitID, key graveler.Key, commitCache *lru.Cache) (*graveler.Value, error) { + cacheKey := fmt.Sprintf("%s/%s", commitID, key) + if o, found := commitCache.Get(cacheKey); found { + return o.(*graveler.Value), nil + } + o, err := store.GetByCommitID(ctx, repository, commitID, key) + if err != nil && !errors.Is(err, graveler.ErrNotFound) { + return nil, err + } + _ = commitCache.Add(cacheKey, o) + return o, nil +} + +func (c *Catalog) Revert(ctx context.Context, repositoryID string, branch string, params RevertParams, opts ...graveler.SetOptionsFunc) error { + branchID := graveler.BranchID(branch) + reference := graveler.Ref(params.Reference) + commitParams := graveler.CommitParams{ + Committer: params.Committer, + Message: fmt.Sprintf("Revert %s", params.Reference), + AllowEmpty: params.AllowEmpty, + } + + parentNumber := params.ParentNumber + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "branch", Value: branchID, Fn: graveler.ValidateBranchID}, + {Name: "ref", Value: reference, Fn: graveler.ValidateRef}, + {Name: "committer", Value: commitParams.Committer, Fn: validator.ValidateRequiredString}, + {Name: "message", Value: commitParams.Message, Fn: validator.ValidateRequiredString}, + {Name: "parentNumber", Value: parentNumber, Fn: validator.ValidateNonNegativeInt}, + }); err != nil { + return err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return err + } + _, err = c.Store.Revert(ctx, repository, branchID, reference, parentNumber, commitParams, params.CommitOverrides, opts...) + return err +} + +func (c *Catalog) CherryPick(ctx context.Context, repositoryID string, branch string, params CherryPickParams, opts ...graveler.SetOptionsFunc) (*CommitLog, error) { + branchID := graveler.BranchID(branch) + reference := graveler.Ref(params.Reference) + parentNumber := params.ParentNumber + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "branch", Value: branchID, Fn: graveler.ValidateBranchID}, + {Name: "ref", Value: reference, Fn: graveler.ValidateRef}, + {Name: "committer", Value: params.Committer, Fn: validator.ValidateRequiredString}, + {Name: "parentNumber", Value: parentNumber, Fn: validator.ValidateNilOrPositiveInt}, + }); err != nil { + return nil, err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return nil, err + } + + commitID, err := c.Store.CherryPick(ctx, repository, branchID, reference, parentNumber, params.Committer, params.CommitOverrides, opts...) + if err != nil { + return nil, err + } + + // in order to return commit log we need the commit creation time and parents + commit, err := c.Store.GetCommit(ctx, repository, commitID) + if err != nil { + return nil, graveler.ErrCommitNotFound + } + + catalogCommitLog := &CommitLog{ + Reference: commitID.String(), + Committer: params.Committer, + Message: commit.Message, + CreationDate: commit.CreationDate.UTC(), + MetaRangeID: string(commit.MetaRangeID), + Metadata: Metadata(commit.Metadata), + Version: CommitVersion(commit.Version), + Generation: CommitGeneration(commit.Generation), + } + for _, parent := range commit.Parents { + catalogCommitLog.Parents = append(catalogCommitLog.Parents, parent.String()) + } + return catalogCommitLog, nil +} + +func (c *Catalog) Diff(ctx context.Context, repositoryID string, leftReference string, rightReference string, params DiffParams) (Differences, bool, error) { + left := graveler.Ref(leftReference) + right := graveler.Ref(rightReference) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "left", Value: left, Fn: graveler.ValidateRef}, + {Name: "right", Value: right, Fn: graveler.ValidateRef}, + }); err != nil { + return nil, false, err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return nil, false, err + } + + iter, err := c.Store.Diff(ctx, repository, left, right) + if err != nil { + return nil, false, err + } + it := NewEntryDiffIterator(iter) + defer it.Close() + return listDiffHelper(it, params.Prefix, params.Delimiter, params.Limit, params.After) +} + +func (c *Catalog) Compare(ctx context.Context, repositoryID, leftReference string, rightReference string, params DiffParams) (Differences, bool, error) { + left := graveler.Ref(leftReference) + right := graveler.Ref(rightReference) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repositoryName", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "left", Value: left, Fn: graveler.ValidateRef}, + {Name: "right", Value: right, Fn: graveler.ValidateRef}, + }); err != nil { + return nil, false, err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return nil, false, err + } + + iter, err := c.Store.Compare(ctx, repository, left, right) + if err != nil { + return nil, false, err + } + it := NewEntryDiffIterator(iter) + defer it.Close() + return listDiffHelper(it, params.Prefix, params.Delimiter, params.Limit, params.After) +} + +func (c *Catalog) DiffUncommitted(ctx context.Context, repositoryID, branch, prefix, delimiter string, limit int, after string) (Differences, bool, error) { + branchID := graveler.BranchID(branch) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "branch", Value: branchID, Fn: graveler.ValidateBranchID}, + }); err != nil { + return nil, false, err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return nil, false, err + } + + iter, err := c.Store.DiffUncommitted(ctx, repository, branchID) + if err != nil { + return nil, false, err + } + it := NewEntryDiffIterator(iter) + defer it.Close() + return listDiffHelper(it, prefix, delimiter, limit, after) +} + +// GetStartPos returns a key that SeekGE will transform to a place start iterating on all elements in +// +// the keys that start with 'prefix' after 'after' and taking 'delimiter' into account +func GetStartPos(prefix, after, delimiter string) string { + if after == "" { + // whether we have a delimiter or not, if after is not set, start at prefix + return prefix + } + if after < prefix { + // if after is before prefix, no point in starting there, start with prefix instead + return prefix + } + if delimiter == "" { + // no delimiter, continue from after + return after + } + // there is a delimiter and after is not empty, start at the next common prefix after "after" + return string(graveler.UpperBoundForPrefix([]byte(after))) +} + +const commonPrefixSplitParts = 2 + +func listDiffHelper(it EntryDiffIterator, prefix, delimiter string, limit int, after string) (Differences, bool, error) { + if limit < 0 || limit > DiffLimitMax { + limit = DiffLimitMax + } + seekStart := GetStartPos(prefix, after, delimiter) + it.SeekGE(Path(seekStart)) + + diffs := make(Differences, 0) + for it.Next() { + v := it.Value() + path := string(v.Path) + + if path == after { + continue // emulate SeekGT using SeekGE + } + if !strings.HasPrefix(path, prefix) { + break // we only want things that start with prefix, apparently there are none left + } + + if delimiter != "" { + // common prefix logic goes here. + // for every path, after trimming "prefix", take the string upto-and-including the delimiter + // if the received path == the entire remainder, add that object as is + // if it's just a part of the name, add it as a "common prefix" entry - + // and skip to next record following all those starting with this prefix + pathRelativeToPrefix := strings.TrimPrefix(path, prefix) + // we want the common prefix and the remainder + parts := strings.SplitN(pathRelativeToPrefix, delimiter, commonPrefixSplitParts) + if len(parts) == commonPrefixSplitParts { + // a common prefix exists! + commonPrefix := prefix + parts[0] + delimiter + diffs = append(diffs, Difference{ + DBEntry: NewDBEntryBuilder().CommonLevel(true).Path(commonPrefix).Build(), + // We always return "changed" for common prefixes. Seeing if a common prefix is e.g. deleted is O(N) + Type: DifferenceTypePrefixChanged, + }) + if len(diffs) >= limit+1 { + break // collected enough results + } + + // let's keep collecting records. We want the next record that doesn't + // start with this common prefix + it.SeekGE(Path(graveler.UpperBoundForPrefix([]byte(commonPrefix)))) + continue + } + } + + // got a regular entry + diff, err := newDifferenceFromEntryDiff(v) + if err != nil { + return nil, false, fmt.Errorf("[I] %w", err) + } + diffs = append(diffs, diff) + if len(diffs) >= limit+1 { + break + } + } + if err := it.Err(); err != nil { + return nil, false, err + } + hasMore := false + if len(diffs) > limit { + hasMore = true + diffs = diffs[:limit] + } + return diffs, hasMore, nil +} + +func (c *Catalog) Merge(ctx context.Context, repositoryID string, destinationBranch string, sourceRef string, committer string, message string, metadata Metadata, strategy string, opts ...graveler.SetOptionsFunc) (string, error) { + destination := graveler.BranchID(destinationBranch) + source := graveler.Ref(sourceRef) + meta := graveler.Metadata(metadata) + commitParams := graveler.CommitParams{ + Committer: committer, + Message: message, + Metadata: meta, + } + if commitParams.Message == "" { + commitParams.Message = fmt.Sprintf("Merge '%s' into '%s'", source, destination) + } + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "destination", Value: destination, Fn: graveler.ValidateBranchID}, + {Name: "source", Value: source, Fn: graveler.ValidateRef}, + {Name: "committer", Value: commitParams.Committer, Fn: validator.ValidateRequiredString}, + {Name: "message", Value: commitParams.Message, Fn: validator.ValidateRequiredString}, + {Name: "strategy", Value: strategy, Fn: graveler.ValidateRequiredStrategy}, + }); err != nil { + return "", err + } + + // disabling batching for this flow. See #3935 for more details + ctx = context.WithValue(ctx, batch.SkipBatchContextKey, struct{}{}) + + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return "", err + } + + commitID, err := c.Store.Merge(ctx, repository, destination, source, commitParams, strategy, opts...) + if err != nil { + return "", err + } + return commitID.String(), nil +} + +func (c *Catalog) FindMergeBase(ctx context.Context, repositoryID string, destinationRef string, sourceRef string) (string, string, string, error) { + destination := graveler.Ref(destinationRef) + source := graveler.Ref(sourceRef) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "destination", Value: destination, Fn: graveler.ValidateRef}, + {Name: "source", Value: source, Fn: graveler.ValidateRef}, + }); err != nil { + return "", "", "", err + } + + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return "", "", "", err + } + + fromCommit, toCommit, baseCommit, err := c.Store.FindMergeBase(ctx, repository, destination, source) + if err != nil { + return "", "", "", err + } + return fromCommit.CommitID.String(), toCommit.CommitID.String(), c.addressProvider.ContentAddress(baseCommit), nil +} + +func (c *Catalog) DumpRepositorySubmit(ctx context.Context, repositoryID string) (string, error) { + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return "", err + } + + taskStatus := &RepositoryDumpStatus{} + taskSteps := []TaskStep{ + { + Name: "dump commits", + Func: func(ctx context.Context) error { + commitsMetaRangeID, err := c.Store.DumpCommits(ctx, repository) + if err != nil { + return err + } + taskStatus.Info = &RepositoryDumpInfo{ + CommitsMetarangeId: string(*commitsMetaRangeID), + } + return nil + }, + }, + { + Name: "dump branches", + Func: func(ctx context.Context) error { + branchesMetaRangeID, err := c.Store.DumpBranches(ctx, repository) + if err != nil { + return err + } + taskStatus.Info.BranchesMetarangeId = string(*branchesMetaRangeID) + return nil + }, + }, + { + Name: "dump tags", + Func: func(ctx context.Context) error { + tagsMetaRangeID, err := c.Store.DumpTags(ctx, repository) + if err != nil { + return err + } + taskStatus.Info.TagsMetarangeId = string(*tagsMetaRangeID) + return nil + }, + }, + } + + // create refs dump task and update initial status. + taskID := NewTaskID(DumpRefsTaskIDPrefix) + err = c.RunBackgroundTaskSteps(repository, taskID, taskSteps, taskStatus) + if err != nil { + return "", err + } + return taskID, nil +} + +func (c *Catalog) DumpRepositoryStatus(ctx context.Context, repositoryID string, id string) (*RepositoryDumpStatus, error) { + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return nil, err + } + if !IsTaskID(DumpRefsTaskIDPrefix, id) { + return nil, graveler.ErrNotFound + } + + var taskStatus RepositoryDumpStatus + err = GetTaskStatus(ctx, c.KVStore, repository, id, &taskStatus) + if err != nil { + return nil, err + } + return &taskStatus, nil +} + +func (c *Catalog) RestoreRepositorySubmit(ctx context.Context, repositoryID string, info *RepositoryDumpInfo, opts ...graveler.SetOptionsFunc) (string, error) { + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return "", err + } + + // verify bare repository - no commits + _, _, err = c.ListCommits(ctx, repository.RepositoryID.String(), repository.DefaultBranchID.String(), LogParams{ + Amount: 1, + Limit: true, + }) + if !errors.Is(err, graveler.ErrNotFound) { + return "", ErrNonEmptyRepository + } + + // create refs restore task and update initial status + taskStatus := &RepositoryRestoreStatus{} + taskSteps := []TaskStep{ + { + Name: "load commits", + Func: func(ctx context.Context) error { + return c.Store.LoadCommits(ctx, repository, graveler.MetaRangeID(info.CommitsMetarangeId), opts...) + }, + }, + { + Name: "load branches", + Func: func(ctx context.Context) error { + return c.Store.LoadBranches(ctx, repository, graveler.MetaRangeID(info.BranchesMetarangeId), opts...) + }, + }, + { + Name: "load tags", + Func: func(ctx context.Context) error { + return c.Store.LoadTags(ctx, repository, graveler.MetaRangeID(info.TagsMetarangeId), opts...) + }, + }, + } + taskID := NewTaskID(RestoreRefsTaskIDPrefix) + if err := c.RunBackgroundTaskSteps(repository, taskID, taskSteps, taskStatus); err != nil { + return "", err + } + return taskID, nil +} + +func (c *Catalog) RestoreRepositoryStatus(ctx context.Context, repositoryID string, id string) (*RepositoryRestoreStatus, error) { + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return nil, err + } + if !IsTaskID(RestoreRefsTaskIDPrefix, id) { + return nil, graveler.ErrNotFound + } + + var status RepositoryRestoreStatus + err = GetTaskStatus(ctx, c.KVStore, repository, id, &status) + if err != nil { + return nil, err + } + return &status, nil +} + +// RunBackgroundTaskSteps update task status provided after filling the 'Task' field and update for each step provided. +// the task status is updated after each step, and the task is marked as completed if the step is the last one. +// initial update if thRunBackgroundTaskStepse task is done before running the steps. +func (c *Catalog) RunBackgroundTaskSteps(repository *graveler.RepositoryRecord, taskID string, steps []TaskStep, taskStatus protoreflect.ProtoMessage) error { + // Validate that APIErrorCB is properly configured + if c.APIErrorCB == nil { + return ErrAPIErrorCBNotSet + } + if c.APIErrorCB.GetHandlerType() != "controller.handleAPIErrorCallback" { + return fmt.Errorf("%w: expected 'controller.handleAPIErrorCallback', got '%s'", ErrAPIErrorCBHandlerTypeMismatch, c.APIErrorCB.GetHandlerType()) + } + + // Allocate Task and set if on the taskStatus's 'Task' field. + // We continue to update this field while running each step. + // If the task field in the common Protobuf message is changed, we need to update the field name here as well. + task := &Task{ + Id: taskID, + UpdatedAt: timestamppb.Now(), + } + reflect.ValueOf(taskStatus).Elem().FieldByName("Task").Set(reflect.ValueOf(task)) + + // make sure we use background context as soon as we submit the task the request is done + ctx := context.Background() + + // initial task update done before we run each step in the background task + if err := UpdateTaskStatus(ctx, c.KVStore, repository, taskID, taskStatus); err != nil { + return err + } + + log := c.log(ctx).WithFields(logging.Fields{"task_id": taskID, "repository": repository.RepositoryID}) + c.workPool.Submit(func() { + for stepIdx, step := range steps { + // call the step function + err := step.Func(ctx) + // update task part + task.UpdatedAt = timestamppb.Now() + if err != nil { + log.WithError(err).WithField("step", step.Name).Errorf("Catalog background task step failed") + task.Done = true + // Classify the error using the API callback (handleAPIErrorCallback from the controller) + // before the original error is lost when stored in protobuf, and populate the task's error details. + c.APIErrorCB.HandleAPIError(ctx, nil, nil, err, func(w http.ResponseWriter, r *http.Request, code int, v any) { + task.StatusCode = int64(code) + if v == nil { + task.ErrorMsg = "" + } else { + task.ErrorMsg = fmt.Sprintf("%v", v) + } + }) + } else if stepIdx == len(steps)-1 { + task.Done = true + } + + // update task status + if err := UpdateTaskStatus(ctx, c.KVStore, repository, taskID, taskStatus); err != nil { + log.WithError(err).WithField("step", step.Name).Error("Catalog failed to update task status") + } + + // make sure we stop based on task completed status, as we may fail + if task.Done { + break + } + } + }) + return nil +} + +// DeleteExpiredRepositoryTasks deletes all expired tasks for the given repository +func (c *Catalog) deleteRepositoryExpiredTasks(ctx context.Context, repo *graveler.RepositoryRecord) error { + // new scan iterator to iterate over all tasks + repoPartition := graveler.RepoPartition(repo) + it, err := kv.NewPrimaryIterator(ctx, c.KVStoreLimited, (&TaskMsg{}).ProtoReflect().Type(), + repoPartition, []byte(TaskPath("")), kv.IteratorOptionsFrom([]byte(""))) + if err != nil { + return err + } + defer it.Close() + + // iterate over all tasks and delete expired ones + for it.Next() { + ent := it.Entry() + msg := ent.Value.(*TaskMsg) + if msg.Task == nil { + continue + } + if time.Since(msg.Task.UpdatedAt.AsTime()) < TaskExpiryTime { + continue + } + err := c.KVStoreLimited.Delete(ctx, []byte(repoPartition), ent.Key) + if err != nil { + return err + } + } + return it.Err() +} + +func (c *Catalog) DumpCommits(ctx context.Context, repositoryID string) (string, error) { + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return "", err + } + + metaRangeID, err := c.Store.DumpCommits(ctx, repository) + if err != nil { + return "", err + } + return string(*metaRangeID), nil +} + +func (c *Catalog) DumpBranches(ctx context.Context, repositoryID string) (string, error) { + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return "", err + } + + metaRangeID, err := c.Store.DumpBranches(ctx, repository) + if err != nil { + return "", err + } + return string(*metaRangeID), nil +} + +func (c *Catalog) DumpTags(ctx context.Context, repositoryID string) (string, error) { + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return "", err + } + + metaRangeID, err := c.Store.DumpTags(ctx, repository) + if err != nil { + return "", err + } + return string(*metaRangeID), nil +} + +func (c *Catalog) LoadCommits(ctx context.Context, repositoryID, commitsMetaRangeID string, opts ...graveler.SetOptionsFunc) error { + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return err + } + return c.Store.LoadCommits(ctx, repository, graveler.MetaRangeID(commitsMetaRangeID), opts...) +} + +func (c *Catalog) LoadBranches(ctx context.Context, repositoryID, branchesMetaRangeID string, opts ...graveler.SetOptionsFunc) error { + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return err + } + return c.Store.LoadBranches(ctx, repository, graveler.MetaRangeID(branchesMetaRangeID), opts...) +} + +func (c *Catalog) LoadTags(ctx context.Context, repositoryID, tagsMetaRangeID string, opts ...graveler.SetOptionsFunc) error { + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return err + } + return c.Store.LoadTags(ctx, repository, graveler.MetaRangeID(tagsMetaRangeID), opts...) +} + +func (c *Catalog) GetMetaRange(ctx context.Context, repositoryID, metaRangeID string) (graveler.MetaRangeAddress, error) { + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return "", err + } + return c.Store.GetMetaRange(ctx, repository, graveler.MetaRangeID(metaRangeID)) +} + +func (c *Catalog) GetRange(ctx context.Context, repositoryID, rangeID string) (graveler.RangeAddress, error) { + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return "", err + } + return c.Store.GetRange(ctx, repository, graveler.RangeID(rangeID)) +} + +func (c *Catalog) importAsync(ctx context.Context, repository *graveler.RepositoryRecord, branchID, importID string, params ImportRequest, logger logging.Logger) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + importManager, err := NewImport(ctx, cancel, logger, c.KVStore, repository, importID) + if err != nil { + return fmt.Errorf("creating import manager: %w", err) + } + defer importManager.Close() + + wg := c.workPool.NewGroupContext(ctx) + wgCtx := wg.Context() + for _, source := range params.Paths { + wg.SubmitErr(func() error { + uri, err := url.Parse(source.Path) + if err != nil { + return fmt.Errorf("could not parse storage URI %s: %w", uri, err) + } + + walker, err := c.BlockAdapter.GetWalker(repository.StorageID.String(), block.WalkerOptions{StorageURI: uri}) + if err != nil { + return fmt.Errorf("creating object-store walker on path %s: %w", source.Path, err) + } + + it, err := NewWalkEntryIterator(wgCtx, block.NewWalkerWrapper(walker, uri), source.Type, source.Destination, "", "") + if err != nil { + return fmt.Errorf("creating walk iterator on path %s: %w", source.Path, err) + } + + logger.WithFields(logging.Fields{"source": source.Path, "itr": it}).Debug("Ingest source") + defer it.Close() + return importManager.Ingest(it) + }) + } + + err = wg.Wait() + if err != nil { + importError := fmt.Errorf("error on ingest: %w", err) + importManager.SetError(importError) + return importError + } + + importItr, err := importManager.NewItr() + if err != nil { + importError := fmt.Errorf("error on import iterator: %w", err) + importManager.SetError(importError) + return importError + } + defer importItr.Close() + + var ranges []*graveler.RangeInfo + for importItr.hasMore { + rangeInfo, err := c.Store.WriteRange(ctx, repository, importItr, graveler.WithForce(params.Force)) + if err != nil { + importError := fmt.Errorf("write range: %w", err) + importManager.SetError(importError) + return importError + } + + ranges = append(ranges, rangeInfo) + // Check if operation was canceled + if ctx.Err() != nil { + return nil + } + } + + // Create metarange + metarange, err := c.Store.WriteMetaRange(ctx, repository, ranges, graveler.WithForce(params.Force)) + if err != nil { + importError := fmt.Errorf("create metarange: %w", err) + importManager.SetError(importError) + return importError + } + + prefixes := make([]graveler.Prefix, 0, len(params.Paths)) + for _, ip := range params.Paths { + prefixes = append(prefixes, graveler.Prefix(ip.Destination)) + } + + if params.Commit.CommitMessage == "" { + params.Commit.CommitMessage = "Import objects" + } + commitID, err := c.Store.Import(ctx, repository, graveler.BranchID(branchID), metarange.ID, graveler.CommitParams{ + Committer: params.Commit.Committer, + Message: params.Commit.CommitMessage, + Metadata: map[string]string(params.Commit.Metadata), + }, prefixes, graveler.WithForce(params.Force)) + if err != nil { + importError := fmt.Errorf("merge import: %w", err) + importManager.SetError(importError) + return importError + } + + commit, err := c.Store.GetCommit(ctx, repository, commitID) + if err != nil { + importError := fmt.Errorf("get commit: %w", err) + importManager.SetError(importError) + return importError + } + + // Update import status + status := importManager.Status() + status.MetaRangeID = commit.MetaRangeID + status.Commit = &graveler.CommitRecord{ + CommitID: commitID, + Commit: commit, + } + + status.Completed = true + importManager.SetStatus(status) + return nil +} + +// verifyImportPaths - Verify that import paths will not cause an import of objects from the repository namespace itself +func verifyImportPaths(storageNamespace string, params ImportRequest) error { + for _, p := range params.Paths { + if strings.HasPrefix(p.Path, storageNamespace) { + return fmt.Errorf("import path (%s) in repository namespace (%s) is prohibited: %w", p.Path, storageNamespace, ErrInvalidImportSource) + } + if p.Type == ImportPathTypePrefix && strings.HasPrefix(storageNamespace, p.Path) { + return fmt.Errorf("prefix (%s) contains repository namespace: (%s), %w", p.Path, storageNamespace, ErrInvalidImportSource) + } + } + return nil +} + +func (c *Catalog) Import(ctx context.Context, repositoryID, branchID string, params ImportRequest) (string, error) { + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return "", err + } + + _, err = c.Store.GetBranch(ctx, repository, graveler.BranchID(branchID)) + if err != nil { + return "", err + } + + if err = verifyImportPaths(repository.StorageNamespace.String(), params); err != nil { + return "", err + } + + id := xid.New().String() + // Run import + go func() { + logger := c.log(ctx).WithField("import_id", id) + // Passing context.WithoutCancel to avoid canceling the import operation when the wrapping Import function returns, + // and keep the context's fields intact for next operations (for example, PreCommitHook runs). + err = c.importAsync(context.WithoutCancel(ctx), repository, branchID, id, params, logger) + if err != nil { + logger.WithError(err).Error("import failure") + } + }() + return id, nil +} + +func (c *Catalog) CancelImport(ctx context.Context, repositoryID, importID string) error { + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return err + } + + importStatus, err := c.getImportStatus(ctx, repository, importID) + if err != nil { + return err + } + if importStatus.Completed { + c.log(ctx).WithFields(logging.Fields{ + "import_id": importID, + "completed": importStatus.Completed, + "error": importStatus.Error, + }).Warning("Not canceling import - already completed") + return graveler.ErrConflictFound + } + importStatus.Error = ImportCanceled + importStatus.UpdatedAt = timestamppb.Now() + return kv.SetMsg(ctx, c.KVStore, graveler.RepoPartition(repository), []byte(graveler.ImportsPath(importID)), importStatus) +} + +func (c *Catalog) getImportStatus(ctx context.Context, repository *graveler.RepositoryRecord, importID string) (*graveler.ImportStatusData, error) { + repoPartition := graveler.RepoPartition(repository) + data := &graveler.ImportStatusData{} + _, err := kv.GetMsg(ctx, c.KVStore, repoPartition, []byte(graveler.ImportsPath(importID)), data) + if err != nil { + if errors.Is(err, kv.ErrNotFound) { + return nil, graveler.ErrNotFound + } + return nil, err + } + return data, nil +} + +func (c *Catalog) GetImportStatus(ctx context.Context, repositoryID, importID string) (*graveler.ImportStatus, error) { + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return nil, err + } + + data, err := c.getImportStatus(ctx, repository, importID) + if err != nil { + return nil, err + } + return graveler.ImportStatusFromProto(data), nil +} + +func (c *Catalog) WriteRange(ctx context.Context, repositoryID string, params WriteRangeRequest, opts ...graveler.SetOptionsFunc) (*graveler.RangeInfo, *Mark, error) { + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return nil, nil, err + } + + uri, err := url.Parse(params.SourceURI) + if err != nil { + return nil, nil, fmt.Errorf("could not parse storage URI %s: %w", uri, err) + } + + walker, err := c.BlockAdapter.GetWalker(repository.StorageID.String(), block.WalkerOptions{StorageURI: uri}) + if err != nil { + return nil, nil, fmt.Errorf("creating object-store walker on path %s: %w", params.SourceURI, err) + } + + it, err := NewWalkEntryIterator(ctx, block.NewWalkerWrapper(walker, uri), ImportPathTypePrefix, params.Prepend, params.After, params.ContinuationToken) + if err != nil { + return nil, nil, fmt.Errorf("creating walk iterator: %w", err) + } + defer it.Close() + + rangeInfo, err := c.Store.WriteRange(ctx, repository, NewEntryToValueIterator(it), opts...) + if err != nil { + return nil, nil, fmt.Errorf("writing range from entry iterator: %w", err) + } + + stagingToken := params.StagingToken + skipped := it.GetSkippedEntries() + if len(skipped) > 0 { + c.log(ctx).Warning("Skipped count:", len(skipped)) + if stagingToken == "" { + stagingToken = graveler.GenerateStagingToken("import", "ingest_range").String() + } + + for _, obj := range skipped { + p := params.Prepend + obj.RelativeKey + entryRecord := objectStoreEntryToEntryRecord(obj, p) + entry, err := EntryToValue(entryRecord.Entry) + if err != nil { + return nil, nil, fmt.Errorf("parsing entry: %w", err) + } + if err := c.Store.StageObject(ctx, stagingToken, graveler.ValueRecord{ + Key: graveler.Key(entryRecord.Path), + Value: entry, + }); err != nil { + return nil, nil, fmt.Errorf("staging skipped keys: %w", err) + } + } + } + mark := it.Marker() + mark.StagingToken = stagingToken + + return rangeInfo, &mark, nil +} + +func (c *Catalog) WriteMetaRange(ctx context.Context, repositoryID string, ranges []*graveler.RangeInfo, opts ...graveler.SetOptionsFunc) (*graveler.MetaRangeInfo, error) { + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return nil, err + } + return c.Store.WriteMetaRange(ctx, repository, ranges, opts...) +} + +func (c *Catalog) GetGarbageCollectionRules(ctx context.Context, repositoryID string) (*graveler.GarbageCollectionRules, error) { + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return nil, err + } + return c.Store.GetGarbageCollectionRules(ctx, repository) +} + +func (c *Catalog) SetGarbageCollectionRules(ctx context.Context, repositoryID string, rules *graveler.GarbageCollectionRules) error { + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return err + } + if repository.ReadOnly { + return graveler.ErrReadOnlyRepository + } + return c.Store.SetGarbageCollectionRules(ctx, repository, rules) +} + +func (c *Catalog) GetBranchProtectionRules(ctx context.Context, repositoryID string) (*graveler.BranchProtectionRules, *string, error) { + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return nil, nil, err + } + + return c.Store.GetBranchProtectionRules(ctx, repository) +} + +func (c *Catalog) SetBranchProtectionRules(ctx context.Context, repositoryID string, rules *graveler.BranchProtectionRules, lastKnownChecksum *string) error { + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return err + } + if repository.ReadOnly { + return graveler.ErrReadOnlyRepository + } + return c.Store.SetBranchProtectionRules(ctx, repository, rules, lastKnownChecksum) +} + +func (c *Catalog) PrepareExpiredCommits(ctx context.Context, repositoryID string) (*graveler.GarbageCollectionRunMetadata, error) { + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + }); err != nil { + return nil, err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return nil, err + } + if repository.ReadOnly { + return nil, graveler.ErrReadOnlyRepository + } + return c.Store.SaveGarbageCollectionCommits(ctx, repository) +} + +func (c *Catalog) PrepareExpiredCommitsAsync(ctx context.Context, repositoryID string) (string, error) { + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + }); err != nil { + return "", err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return "", err + } + if repository.ReadOnly { + return "", graveler.ErrReadOnlyRepository + } + + taskStatus := &GarbageCollectionPrepareStatus{} + taskSteps := []TaskStep{ + { + Name: "prepare expired commits on " + repository.RepositoryID.String(), + Func: func(ctx context.Context) error { + gcRunMetadata, err := c.Store.SaveGarbageCollectionCommits(ctx, repository) + if err != nil { + return err + } + taskStatus.Info = &GarbageCollectionPrepareCommitsInfo{ + RunId: gcRunMetadata.RunID, + GcCommitsLocation: gcRunMetadata.CommitsCSVLocation, + GcAddressesLocation: gcRunMetadata.AddressLocation, + } + return nil + }, + }, + } + + taskID := NewTaskID(GarbageCollectionPrepareCommitsPrefix) + err = c.RunBackgroundTaskSteps(repository, taskID, taskSteps, taskStatus) + if err != nil { + return "", err + } + return taskID, nil +} + +func (c *Catalog) GetTaskStatus(ctx context.Context, repositoryID string, taskID string, prefix string, statusMsg protoreflect.ProtoMessage) error { + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return err + } + if !IsTaskID(prefix, taskID) { + return graveler.ErrNotFound + } + + err = GetTaskStatus(ctx, c.KVStore, repository, taskID, statusMsg) + if err != nil { + return err + } + return nil +} + +func (c *Catalog) GetGarbageCollectionPrepareStatus(ctx context.Context, repositoryID string, id string) (*GarbageCollectionPrepareStatus, error) { + var taskStatus GarbageCollectionPrepareStatus + err := c.GetTaskStatus(ctx, repositoryID, id, GarbageCollectionPrepareCommitsPrefix, &taskStatus) + if err != nil { + return nil, err + } + return &taskStatus, nil +} + +// GCUncommittedMark Marks the *next* item to be scanned by the paginated call to PrepareGCUncommitted +type GCUncommittedMark struct { + BranchID graveler.BranchID `json:"branch"` + Path Path `json:"path"` + RunID string `json:"run_id"` + Key string `json:"key"` +} + +type PrepareGCUncommittedInfo struct { + RunID string `json:"run_id"` + Location string `json:"location"` + Filename string `json:"filename"` + Mark *GCUncommittedMark +} + +type UncommittedParquetObject struct { + PhysicalAddress string `parquet:"name=physical_address, type=BYTE_ARRAY, convertedtype=UTF8, encoding=PLAIN_DICTIONARY"` + CreationDate int64 `parquet:"name=creation_date, type=INT64, convertedtype=INT_64"` +} + +func (c *Catalog) uploadFile(ctx context.Context, repo *graveler.RepositoryRecord, location string, fd *os.File, size int64) (string, error) { + _, err := fd.Seek(0, 0) + if err != nil { + return "", err + } + // location is full path to underlying storage - join a unique filename and upload data + name := xid.New().String() + identifier, err := url.JoinPath(location, name) + if err != nil { + return "", err + } + obj := block.ObjectPointer{ + StorageID: repo.StorageID.String(), + StorageNamespace: repo.StorageNamespace.String(), + Identifier: identifier, + IdentifierType: block.IdentifierTypeFull, + } + _, err = c.BlockAdapter.Put(ctx, obj, size, fd, block.PutOpts{}) + if err != nil { + return "", err + } + return name, nil +} + +func (c *Catalog) PrepareGCUncommitted(ctx context.Context, repositoryID string, mark *GCUncommittedMark) (*PrepareGCUncommittedInfo, error) { + var err error + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + }); err != nil { + return nil, err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return nil, err + } + if repository.ReadOnly { + return nil, graveler.ErrReadOnlyRepository + } + + var runID string + if mark == nil { + runID = c.Store.GCNewRunID() + } else { + runID = mark.RunID + } + + fd, err := os.CreateTemp("", "") + if err != nil { + return nil, err + } + defer func() { + _ = fd.Close() + if err := os.Remove(fd.Name()); err != nil { + c.log(ctx).WithField("filename", fd.Name()).Warn("Failed to delete temporary gc uncommitted data file") + } + }() + + uw := NewUncommittedWriter(fd) + + // Write parquet to local storage + newMark, hasData, err := gcWriteUncommitted(ctx, c.Store, repository, uw, mark, runID, c.UGCPrepareMaxFileSize, c.UGCPrepareInterval) + if err != nil { + return nil, err + } + + // Upload parquet file to object store + var ( + uncommittedLocation string + name string + ) + if hasData { + uncommittedLocation, err = c.Store.GCGetUncommittedLocation(repository, runID) + if err != nil { + return nil, err + } + + name, err = c.uploadFile(ctx, repository, uncommittedLocation, fd, uw.Size()) + if err != nil { + return nil, err + } + } + + return &PrepareGCUncommittedInfo{ + Mark: newMark, + RunID: runID, + Location: uncommittedLocation, + Filename: name, + }, nil +} + +// cloneEntry clones entry information without copying the underlying object in the object store, so the physical address remains the same. +// clone can only clone within the same repository and branch. +// It is limited to our grace-time from the object creation, in order to prevent GC from deleting the object. +// ErrCannotClone error is returned if clone conditions are not met. +func (c *Catalog) cloneEntry(ctx context.Context, srcRepo *Repository, srcEntry *DBEntry, destRepository, destBranch, destPath string, + replaceSrcMetadata bool, metadata Metadata, opts ...graveler.SetOptionsFunc, +) (*DBEntry, error) { + // validate clone conditions in case we + if srcRepo.Name != destRepository { + return nil, fmt.Errorf("not on the same repository: %w", graveler.ErrCannotClone) + } + + // we verify the metadata creation date is within the grace period + if time.Since(srcEntry.CreationDate) > CloneGracePeriod { + return nil, fmt.Errorf("object creation beyond grace period: %w", graveler.ErrCannotClone) + } + + // entry information can be cloned over and over, + // so we also need to verify the grace period against the actual object last-modified time + srcObject := block.ObjectPointer{ + StorageID: srcRepo.StorageID, + StorageNamespace: srcRepo.StorageNamespace, + IdentifierType: srcEntry.AddressType.ToIdentifierType(), + Identifier: srcEntry.PhysicalAddress, + } + props, err := c.BlockAdapter.GetProperties(ctx, srcObject) + if err != nil { + return nil, err + } + if !props.LastModified.IsZero() && time.Since(props.LastModified) > CloneGracePeriod { + return nil, fmt.Errorf("object last-modified beyond grace period: %w", graveler.ErrCannotClone) + } + + // copy the metadata into a new entry + dstEntry := *srcEntry + dstEntry.Path = destPath + dstEntry.CreationDate = time.Now() + if replaceSrcMetadata { + dstEntry.Metadata = metadata + } + if err := c.CreateEntry(ctx, destRepository, destBranch, dstEntry, opts...); err != nil { + return nil, err + } + return &dstEntry, nil +} + +// CopyEntry copy entry information by using the block adapter to make a copy of the data to a new physical address or clone the entry if possible. +// if clone is possible, the entry will be cloned with the same physical address and metadata. +// if replaceSrcMetadata is true, the metadata will be replaced with the provided metadata. +// if replaceSrcMetadata is false, the metadata will be copied from the source entry. +func (c *Catalog) CopyEntry(ctx context.Context, srcRepository, srcRef, srcPath, destRepository, destBranch, destPath string, replaceSrcMetadata bool, metadata Metadata, opts ...graveler.SetOptionsFunc) (*DBEntry, error) { + // copyObjectFull copy data from srcEntry's physical address (if set) or srcPath into destPath + // fetch src entry if needed - optimization in case we already have the entry + srcEntry, err := c.GetEntry(ctx, srcRepository, srcRef, srcPath, GetEntryParams{}) + if err != nil { + return nil, err + } + + // load repositories information for storage namespace + srcRepo, err := c.GetRepository(ctx, srcRepository) + if err != nil { + return nil, err + } + + // load destination repository information, if needed + destRepo := srcRepo + if srcRepository != destRepository { + destRepo, err = c.GetRepository(ctx, destRepository) + if err != nil { + return nil, err + } + // in case of different repositories, we verify the storage ID is the same + if srcRepo.StorageID != destRepo.StorageID { + return nil, fmt.Errorf("cannot copy between repos with different StorageIDs: %w", graveler.ErrInvalidStorageID) + } + } + + // Clone entry if possible, fallthrough to copy otherwise + clonedEntry, err := c.cloneEntry(ctx, srcRepo, srcEntry, destRepository, destBranch, destPath, replaceSrcMetadata, metadata, opts...) + if err == nil { + return clonedEntry, nil + } + if !errors.Is(err, graveler.ErrCannotClone) { + return nil, err + } + + // copy data to a new physical address + dstEntry := *srcEntry + dstEntry.Path = destPath + dstEntry.AddressType = AddressTypeRelative + dstEntry.PhysicalAddress = c.PathProvider.NewPath() + + if replaceSrcMetadata { + dstEntry.Metadata = metadata + } + + srcObject := block.ObjectPointer{ + StorageID: srcRepo.StorageID, + StorageNamespace: srcRepo.StorageNamespace, + IdentifierType: srcEntry.AddressType.ToIdentifierType(), + Identifier: srcEntry.PhysicalAddress, + } + destObj := block.ObjectPointer{ + StorageID: destRepo.StorageID, + StorageNamespace: destRepo.StorageNamespace, + IdentifierType: dstEntry.AddressType.ToIdentifierType(), + Identifier: dstEntry.PhysicalAddress, + } + err = c.BlockAdapter.Copy(ctx, srcObject, destObj) + if err != nil { + return nil, err + } + + // Update creation date only after actual copy!!! + // The actual file upload can take a while and depend on many factors so we would like + // The mtime (creationDate) in lakeFS to be as close as possible to the mtime in the underlying storage + dstEntry.CreationDate = time.Now() + + // create entry for the final copy + err = c.CreateEntry(ctx, destRepository, destBranch, dstEntry, opts...) + if err != nil { + return nil, err + } + return &dstEntry, nil +} + +func (c *Catalog) DeleteExpiredImports(ctx context.Context) { + repos, err := c.listRepositoriesHelper(ctx) + if err != nil { + c.log(ctx).WithError(err).Warn("Delete expired imports: failed to list repositories") + return + } + + for _, repo := range repos { + err = c.Store.DeleteExpiredImports(ctx, repo) + if err != nil { + c.log(ctx).WithError(err).WithField("repository", repo.RepositoryID).Warn("Delete expired imports failed") + } + } +} + +func (c *Catalog) DeleteExpiredTasks(ctx context.Context) { + repos, err := c.listRepositoriesHelper(ctx) + if err != nil { + c.log(ctx).WithError(err).Warn("Delete expired tasks, failed to list repositories") + return + } + + for _, repo := range repos { + err := c.deleteRepositoryExpiredTasks(ctx, repo) + if err != nil { + c.log(ctx).WithError(err).WithField("repository", repo.RepositoryID).Warn("Delete expired tasks failed") + } + } +} + +func (c *Catalog) listRepositoriesHelper(ctx context.Context) ([]*graveler.RepositoryRecord, error) { + it, err := c.Store.ListRepositories(ctx) + if err != nil { + return nil, err + } + defer it.Close() + + var repos []*graveler.RepositoryRecord + for it.Next() { + repos = append(repos, it.Value()) + } + if err := it.Err(); err != nil { + return nil, err + } + return repos, nil +} + +func getHashSum(value, signingKey []byte) []byte { + // create a new HMAC by defining the hash type and the key + h := hmac.New(sha256.New, signingKey) + // compute the HMAC + h.Write(value) + return h.Sum(nil) +} + +func (c *Catalog) VerifyLinkAddress(repository, branch, path, physicalAddress string) error { + idx := strings.LastIndex(physicalAddress, LinkAddressSigningDelimiter) + if idx < 0 { + return fmt.Errorf("address is not signed: %w", graveler.ErrLinkAddressInvalid) + } + address := physicalAddress[:idx] + signature := physicalAddress[idx+1:] + + stringToVerify, err := getAddressJSON(repository, branch, path, address) + if err != nil { + return fmt.Errorf("failed json encoding: %w", graveler.ErrLinkAddressInvalid) + } + decodedSig, err := base64.RawURLEncoding.DecodeString(signature) + if err != nil { + return fmt.Errorf("malformed address signature: %s: %w", stringToVerify, graveler.ErrLinkAddressInvalid) + } + + calculated := getHashSum(stringToVerify, []byte(c.signingKey)) + if !hmac.Equal(calculated, decodedSig) { + return fmt.Errorf("invalid address signature: %w", block.ErrInvalidAddress) + } + creationTime, err := c.PathProvider.ResolvePathTime(address) + if err != nil { + return err + } + + if time.Since(creationTime) > LinkAddressTime { + return graveler.ErrLinkAddressExpired + } + return nil +} + +func (c *Catalog) signAddress(logicalAddress []byte) string { + dataHmac := getHashSum(logicalAddress, []byte(c.signingKey)) + return base64.RawURLEncoding.EncodeToString(dataHmac) // Using url encoding to avoid "/" +} + +func getAddressJSON(repository, branch, path, physicalAddress string) ([]byte, error) { + return json.Marshal(struct { + Repository string + Branch string + Path string + PhysicalAddress string + }{ + Repository: repository, + Branch: branch, + Path: path, + PhysicalAddress: physicalAddress, + }) +} + +func (c *Catalog) GetAddressWithSignature(repository, branch, path string) (string, error) { + physicalPath := c.PathProvider.NewPath() + data, err := getAddressJSON(repository, branch, path, physicalPath) + if err != nil { + return "", err + } + return physicalPath + LinkAddressSigningDelimiter + c.signAddress(data), nil +} + +func (c *Catalog) Close() error { + var errs error + for _, manager := range c.managers { + err := manager.Close() + if err != nil { + _ = multierror.Append(errs, err) + } + } + c.workPool.StopAndWait() + if c.deleteSensor != nil { + c.deleteSensor.Close() + } + return errs +} + +// dereferenceCommitID dereference 'ref' to a commit ID, this helper makes sure we do not point to explicit branch staging +func (c *Catalog) dereferenceCommitID(ctx context.Context, repository *graveler.RepositoryRecord, ref graveler.Ref) (graveler.CommitID, error) { + resolvedRef, err := c.Store.Dereference(ctx, repository, ref) + if err != nil { + return "", err + } + if resolvedRef.CommitID == "" { + return "", fmt.Errorf("%w: no commit", graveler.ErrInvalidRef) + } + if resolvedRef.ResolvedBranchModifier == graveler.ResolvedBranchModifierStaging { + return "", fmt.Errorf("%w: should point to a commit", graveler.ErrInvalidRef) + } + return resolvedRef.CommitID, nil +} + +func (c *Catalog) checkCommitIDDuplication(ctx context.Context, repository *graveler.RepositoryRecord, id graveler.CommitID) error { + _, err := c.Store.GetCommit(ctx, repository, id) + if err == nil { + return fmt.Errorf("commit ID %s: %w", id, graveler.ErrConflictFound) + } + if errors.Is(err, graveler.ErrNotFound) { + return nil + } + + return err +} + +func (c *Catalog) GetPullRequest(ctx context.Context, repositoryID string, pullRequestID string) (*graveler.PullRequest, error) { + pid := graveler.PullRequestID(pullRequestID) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "pullRequestID", Value: pid, Fn: graveler.ValidatePullRequestID}, + }); err != nil { + return nil, err + } + + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return nil, err + } + + pr, err := c.Store.GetPullRequest(ctx, repository, pid) + if err != nil { + return nil, err + } + + return pr, nil +} + +func (c *Catalog) CreatePullRequest(ctx context.Context, repositoryID string, request *PullRequest) (string, error) { + srcBranchID := graveler.BranchID(request.SourceBranch) + destBranchID := graveler.BranchID(request.DestinationBranch) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "dest", Value: destBranchID, Fn: graveler.ValidateBranchID}, + {Name: "src", Value: srcBranchID, Fn: graveler.ValidateBranchID}, + }); err != nil { + return "", err + } + + // Verify src and dst are different + if srcBranchID == destBranchID { + return "", fmt.Errorf("source and destination branches are the same: %w", graveler.ErrSameBranch) + } + + // Check all entities exist + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return "", err + } + if _, err = c.Store.GetBranch(ctx, repository, srcBranchID); err != nil { + return "", err + } + if _, err = c.Store.GetBranch(ctx, repository, destBranchID); err != nil { + return "", err + } + + pullID := graveler.NewRunID() + pull := &graveler.PullRequestRecord{ + ID: graveler.PullRequestID(pullID), + PullRequest: graveler.PullRequest{ + CreationDate: time.Now(), + Title: request.Title, + Author: request.Author, + Description: request.Description, + Source: request.SourceBranch, + Destination: request.DestinationBranch, + }, + } + if err = c.Store.CreatePullRequest(ctx, repository, pull); err != nil { + return "", err + } + + return pullID, nil +} + +func shouldSkipByStatus(requested string, status graveler.PullRequestStatus) bool { + if status.String() == requested { + return false + } + + switch requested { + case graveler.PullRequestStatus_CLOSED.String(): // CLOSED can be either CLOSED OR MERGED + return status != graveler.PullRequestStatus_CLOSED && status != graveler.PullRequestStatus_MERGED + case graveler.PullRequestStatus_OPEN.String(): // OPEN must be equal to OPEN + return status != graveler.PullRequestStatus_OPEN + default: // Anything else should return all + return false + } +} + +func (c *Catalog) ListPullRequest(ctx context.Context, repositoryID, prefix string, limit int, after, status string) ([]*PullRequest, bool, error) { + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + }); err != nil { + return nil, false, err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return nil, false, err + } + + // normalize limit + if limit < 0 || limit > ListPullsLimitMax { + limit = ListPullsLimitMax + } + it, err := c.Store.ListPullRequests(ctx, repository) + if err != nil { + return nil, false, err + } + defer it.Close() + + afterPR := graveler.PullRequestID(after) + prefixPR := graveler.PullRequestID(prefix) + if afterPR < prefixPR { + it.SeekGE(prefixPR) + } else { + it.SeekGE(afterPR) + } + var pulls []*PullRequest + for it.Next() { + v := it.Value() + if v.ID == afterPR || shouldSkipByStatus(status, v.Status) { + continue + } + pullID := v.ID.String() + // break in case we got to a pull outside our prefix + if !strings.HasPrefix(pullID, prefix) { + break + } + p := &PullRequest{ + ID: pullID, + Title: v.Title, + Status: strings.ToLower(v.Status.String()), + Description: v.Description, + Author: v.Author, + SourceBranch: v.Source, + DestinationBranch: v.Destination, + CreationDate: v.CreationDate, + ClosedDate: v.ClosedDate, + } + pulls = append(pulls, p) + if len(pulls) >= limit+1 { + break + } + } + if err := it.Err(); err != nil { + return nil, false, err + } + // return results (optionally trimmed) and hasMore + hasMore := false + if len(pulls) > limit { + hasMore = true + pulls = pulls[:limit] + } + return pulls, hasMore, nil +} + +func (c *Catalog) UpdatePullRequest(ctx context.Context, repositoryID string, pullRequestID string, request *graveler.UpdatePullRequest) error { + pullID := graveler.PullRequestID(pullRequestID) + if err := validator.Validate([]validator.ValidateArg{ + {Name: "repository", Value: repositoryID, Fn: graveler.ValidateRepositoryID}, + {Name: "pullRequestID", Value: pullID, Fn: graveler.ValidatePullRequestID}, + }); err != nil { + return err + } + repository, err := c.getRepository(ctx, repositoryID) + if err != nil { + return err + } + return c.Store.UpdatePullRequest(ctx, repository, pullID, request) +} + +func newCatalogEntryFromEntry(commonPrefix bool, path string, ent *Entry) DBEntry { + b := NewDBEntryBuilder(). + CommonLevel(commonPrefix). + Path(path) + if ent != nil { + b.PhysicalAddress(ent.Address) + b.AddressType(addressTypeToCatalog(ent.AddressType)) + b.CreationDate(ent.LastModified.AsTime()) + b.Size(ent.Size) + b.Checksum(ent.ETag) + b.Metadata(ent.Metadata) + b.Expired(false) + b.AddressType(addressTypeToCatalog(ent.AddressType)) + b.ContentType(ContentTypeOrDefault(ent.ContentType)) + } + return b.Build() +} + +func catalogDiffType(typ graveler.DiffType) (DifferenceType, error) { + switch typ { + case graveler.DiffTypeAdded: + return DifferenceTypeAdded, nil + case graveler.DiffTypeRemoved: + return DifferenceTypeRemoved, nil + case graveler.DiffTypeChanged: + return DifferenceTypeChanged, nil + case graveler.DiffTypeConflict: + return DifferenceTypeConflict, nil + default: + return DifferenceTypeNone, fmt.Errorf("%d: %w", typ, ErrUnknownDiffType) + } +} + +func newDifferenceFromEntryDiff(v *EntryDiff) (Difference, error) { + var ( + diff Difference + err error + ) + diff.DBEntry = newCatalogEntryFromEntry(false, v.Path.String(), v.Entry) + diff.Type, err = catalogDiffType(v.Type) + return diff, err +} + +func NewUncommittedWriter(writer io.Writer) *UncommittedWriter { + return &UncommittedWriter{ + writer: writer, + } +} + +// UncommittedWriter wraps io.Writer and tracks the total size of writes done on this writer +// Used to get the current file size written without expensive calls to Flush and Stat +type UncommittedWriter struct { + size int64 + writer io.Writer +} + +func (w *UncommittedWriter) Write(p []byte) (n int, err error) { + n, err = w.writer.Write(p) + w.size += int64(n) + return n, err +} + +func (w *UncommittedWriter) Size() int64 { + return w.size +} + +func newCatalogEntryFromValueRecord(valueRecord *graveler.ValueRecord) (*DBEntry, error) { + entry, err := ValueToEntry(valueRecord.Value) + if err != nil { + return nil, fmt.Errorf("decode entry: %w", err) + } + dbEntry := newCatalogEntryFromEntry(false, string(valueRecord.Key), entry) + return &dbEntry, nil +} + +type ConflictResolverWrapper struct { + ConflictResolver EntryConflictResolver +} + +func (cr *ConflictResolverWrapper) ResolveConflict(ctx context.Context, sCtx graveler.StorageContext, _ graveler.MergeStrategy, srcValue, destValue *graveler.ValueRecord) (*graveler.ValueRecord, error) { + if !cr.ConflictResolver.FilterByPath(string(srcValue.Key)) { + // Not a conflict the catalog should resolve + return nil, nil + } + + // Decode values to entries + srcDBEntry, err := newCatalogEntryFromValueRecord(srcValue) + if err != nil { + return nil, err + } + destDBEntry, err := newCatalogEntryFromValueRecord(destValue) + if err != nil { + return nil, err + } + + // Resolve conflict + resolvedDBEntry, err := cr.ConflictResolver.ResolveConflict(ctx, sCtx, srcDBEntry, destDBEntry) + if err != nil { + return nil, err + } + + var returnValue *graveler.ValueRecord + switch { + case resolvedDBEntry == nil: + // Conflict wasn't resolved - return nil + returnValue = nil + case resolvedDBEntry.Path == srcDBEntry.Path: + // Resolved to src entry - return src value + returnValue = srcValue + case resolvedDBEntry.Path == destDBEntry.Path: + // Resolved to dest entry - return dest value + returnValue = destValue + default: + // Resolved to a new entry - encode resolved entry to value + resolvedEntry := newEntryFromCatalogEntry(*resolvedDBEntry) + value, err := EntryToValue(resolvedEntry) + if err != nil { + return nil, fmt.Errorf("encode resolved entry: %w", err) + } + returnValue = &graveler.ValueRecord{ + Key: srcValue.Key, // srcValue and destValue keys are the same + Value: value, + } + } + return returnValue, nil +} diff --git a/pkg/catalog/catalog.pb.go b/pkg/catalog/catalog.pb.go index be329777e2e..cebcbe8bece 100644 --- a/pkg/catalog/catalog.pb.go +++ b/pkg/catalog/catalog.pb.go @@ -173,7 +173,8 @@ type Task struct { Done bool `protobuf:"varint,2,opt,name=done,proto3" json:"done,omitempty"` UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` Progress int64 `protobuf:"varint,4,opt,name=progress,proto3" json:"progress,omitempty"` - Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"` + ErrorMsg string `protobuf:"bytes,5,opt,name=error_msg,json=errorMsg,proto3" json:"error_msg,omitempty"` + StatusCode int64 `protobuf:"varint,6,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -236,13 +237,20 @@ func (x *Task) GetProgress() int64 { return 0 } -func (x *Task) GetError() string { +func (x *Task) GetErrorMsg() string { if x != nil { - return x.Error + return x.ErrorMsg } return "" } +func (x *Task) GetStatusCode() int64 { + if x != nil { + return x.StatusCode + } + return 0 +} + // RepositoryDumpInfo holds the metarange IDs for a repository dump type RepositoryDumpInfo struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -517,6 +525,262 @@ func (x *GarbageCollectionPrepareStatus) GetInfo() *GarbageCollectionPrepareComm return nil } +type CommitAsyncInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Parents []string `protobuf:"bytes,2,rep,name=parents,proto3" json:"parents,omitempty"` + Committer string `protobuf:"bytes,3,opt,name=committer,proto3" json:"committer,omitempty"` + Message string `protobuf:"bytes,4,opt,name=message,proto3" json:"message,omitempty"` + CreationDate *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=creation_date,json=creationDate,proto3" json:"creation_date,omitempty"` + MetaRangeId string `protobuf:"bytes,6,opt,name=meta_range_id,json=metaRangeId,proto3" json:"meta_range_id,omitempty"` + Metadata map[string]string `protobuf:"bytes,7,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Generation int64 `protobuf:"varint,8,opt,name=generation,proto3" json:"generation,omitempty"` + Version int64 `protobuf:"varint,9,opt,name=version,proto3" json:"version,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CommitAsyncInfo) Reset() { + *x = CommitAsyncInfo{} + mi := &file_catalog_catalog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CommitAsyncInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommitAsyncInfo) ProtoMessage() {} + +func (x *CommitAsyncInfo) ProtoReflect() protoreflect.Message { + mi := &file_catalog_catalog_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommitAsyncInfo.ProtoReflect.Descriptor instead. +func (*CommitAsyncInfo) Descriptor() ([]byte, []int) { + return file_catalog_catalog_proto_rawDescGZIP(), []int{7} +} + +func (x *CommitAsyncInfo) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *CommitAsyncInfo) GetParents() []string { + if x != nil { + return x.Parents + } + return nil +} + +func (x *CommitAsyncInfo) GetCommitter() string { + if x != nil { + return x.Committer + } + return "" +} + +func (x *CommitAsyncInfo) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *CommitAsyncInfo) GetCreationDate() *timestamppb.Timestamp { + if x != nil { + return x.CreationDate + } + return nil +} + +func (x *CommitAsyncInfo) GetMetaRangeId() string { + if x != nil { + return x.MetaRangeId + } + return "" +} + +func (x *CommitAsyncInfo) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *CommitAsyncInfo) GetGeneration() int64 { + if x != nil { + return x.Generation + } + return 0 +} + +func (x *CommitAsyncInfo) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +type CommitAsyncStatus struct { + state protoimpl.MessageState `protogen:"open.v1"` + Task *Task `protobuf:"bytes,1,opt,name=task,proto3" json:"task,omitempty"` + Info *CommitAsyncInfo `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CommitAsyncStatus) Reset() { + *x = CommitAsyncStatus{} + mi := &file_catalog_catalog_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CommitAsyncStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommitAsyncStatus) ProtoMessage() {} + +func (x *CommitAsyncStatus) ProtoReflect() protoreflect.Message { + mi := &file_catalog_catalog_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommitAsyncStatus.ProtoReflect.Descriptor instead. +func (*CommitAsyncStatus) Descriptor() ([]byte, []int) { + return file_catalog_catalog_proto_rawDescGZIP(), []int{8} +} + +func (x *CommitAsyncStatus) GetTask() *Task { + if x != nil { + return x.Task + } + return nil +} + +func (x *CommitAsyncStatus) GetInfo() *CommitAsyncInfo { + if x != nil { + return x.Info + } + return nil +} + +type MergeAsyncInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + Reference string `protobuf:"bytes,1,opt,name=reference,proto3" json:"reference,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MergeAsyncInfo) Reset() { + *x = MergeAsyncInfo{} + mi := &file_catalog_catalog_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MergeAsyncInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MergeAsyncInfo) ProtoMessage() {} + +func (x *MergeAsyncInfo) ProtoReflect() protoreflect.Message { + mi := &file_catalog_catalog_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MergeAsyncInfo.ProtoReflect.Descriptor instead. +func (*MergeAsyncInfo) Descriptor() ([]byte, []int) { + return file_catalog_catalog_proto_rawDescGZIP(), []int{9} +} + +func (x *MergeAsyncInfo) GetReference() string { + if x != nil { + return x.Reference + } + return "" +} + +type MergeAsyncStatus struct { + state protoimpl.MessageState `protogen:"open.v1"` + Task *Task `protobuf:"bytes,1,opt,name=task,proto3" json:"task,omitempty"` + Info *MergeAsyncInfo `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MergeAsyncStatus) Reset() { + *x = MergeAsyncStatus{} + mi := &file_catalog_catalog_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MergeAsyncStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MergeAsyncStatus) ProtoMessage() {} + +func (x *MergeAsyncStatus) ProtoReflect() protoreflect.Message { + mi := &file_catalog_catalog_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MergeAsyncStatus.ProtoReflect.Descriptor instead. +func (*MergeAsyncStatus) Descriptor() ([]byte, []int) { + return file_catalog_catalog_proto_rawDescGZIP(), []int{10} +} + +func (x *MergeAsyncStatus) GetTask() *Task { + if x != nil { + return x.Task + } + return nil +} + +func (x *MergeAsyncStatus) GetInfo() *MergeAsyncInfo { + if x != nil { + return x.Info + } + return nil +} + // TaskMsg described generic message with Task field // used for all status messages and for cleanup messages type TaskMsg struct { @@ -528,7 +792,7 @@ type TaskMsg struct { func (x *TaskMsg) Reset() { *x = TaskMsg{} - mi := &file_catalog_catalog_proto_msgTypes[7] + mi := &file_catalog_catalog_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -540,7 +804,7 @@ func (x *TaskMsg) String() string { func (*TaskMsg) ProtoMessage() {} func (x *TaskMsg) ProtoReflect() protoreflect.Message { - mi := &file_catalog_catalog_proto_msgTypes[7] + mi := &file_catalog_catalog_proto_msgTypes[11] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -553,7 +817,7 @@ func (x *TaskMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use TaskMsg.ProtoReflect.Descriptor instead. func (*TaskMsg) Descriptor() ([]byte, []int) { - return file_catalog_catalog_proto_rawDescGZIP(), []int{7} + return file_catalog_catalog_proto_rawDescGZIP(), []int{11} } func (x *TaskMsg) GetTask() *Task { @@ -582,14 +846,16 @@ const file_catalog_catalog_proto_rawDesc = "" + "\vAddressType\x12\x18\n" + "\x14BY_PREFIX_DEPRECATED\x10\x00\x12\f\n" + "\bRELATIVE\x10\x01\x12\b\n" + - "\x04FULL\x10\x02\"\x97\x01\n" + + "\x04FULL\x10\x02\"\xbf\x01\n" + "\x04Task\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n" + "\x04done\x18\x02 \x01(\bR\x04done\x129\n" + "\n" + "updated_at\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\tupdatedAt\x12\x1a\n" + - "\bprogress\x18\x04 \x01(\x03R\bprogress\x12\x14\n" + - "\x05error\x18\x05 \x01(\tR\x05error\"\xa6\x01\n" + + "\bprogress\x18\x04 \x01(\x03R\bprogress\x12\x1b\n" + + "\terror_msg\x18\x05 \x01(\tR\berrorMsg\x12\x1f\n" + + "\vstatus_code\x18\x06 \x01(\x03R\n" + + "statusCode\"\xa6\x01\n" + "\x12RepositoryDumpInfo\x120\n" + "\x14commits_metarange_id\x18\x01 \x01(\tR\x12commitsMetarangeId\x12*\n" + "\x11tags_metarange_id\x18\x02 \x01(\tR\x0ftagsMetarangeId\x122\n" + @@ -605,7 +871,30 @@ const file_catalog_catalog_proto_rawDesc = "" + "\x15gc_addresses_location\x18\x03 \x01(\tR\x13gcAddressesLocation\"\x85\x01\n" + "\x1eGarbageCollectionPrepareStatus\x12!\n" + "\x04task\x18\x01 \x01(\v2\r.catalog.TaskR\x04task\x12@\n" + - "\x04info\x18\x02 \x01(\v2,.catalog.GarbageCollectionPrepareCommitsInfoR\x04info\",\n" + + "\x04info\x18\x02 \x01(\v2,.catalog.GarbageCollectionPrepareCommitsInfoR\x04info\"\x93\x03\n" + + "\x0fCommitAsyncInfo\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12\x18\n" + + "\aparents\x18\x02 \x03(\tR\aparents\x12\x1c\n" + + "\tcommitter\x18\x03 \x01(\tR\tcommitter\x12\x18\n" + + "\amessage\x18\x04 \x01(\tR\amessage\x12?\n" + + "\rcreation_date\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampR\fcreationDate\x12\"\n" + + "\rmeta_range_id\x18\x06 \x01(\tR\vmetaRangeId\x12B\n" + + "\bmetadata\x18\a \x03(\v2&.catalog.CommitAsyncInfo.MetadataEntryR\bmetadata\x12\x1e\n" + + "\n" + + "generation\x18\b \x01(\x03R\n" + + "generation\x12\x18\n" + + "\aversion\x18\t \x01(\x03R\aversion\x1a;\n" + + "\rMetadataEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"d\n" + + "\x11CommitAsyncStatus\x12!\n" + + "\x04task\x18\x01 \x01(\v2\r.catalog.TaskR\x04task\x12,\n" + + "\x04info\x18\x02 \x01(\v2\x18.catalog.CommitAsyncInfoR\x04info\".\n" + + "\x0eMergeAsyncInfo\x12\x1c\n" + + "\treference\x18\x01 \x01(\tR\treference\"b\n" + + "\x10MergeAsyncStatus\x12!\n" + + "\x04task\x18\x01 \x01(\v2\r.catalog.TaskR\x04task\x12+\n" + + "\x04info\x18\x02 \x01(\v2\x17.catalog.MergeAsyncInfoR\x04info\",\n" + "\aTaskMsg\x12!\n" + "\x04task\x18\x01 \x01(\v2\r.catalog.TaskR\x04taskB$Z\"github.com/treevese/lakefs/catalogb\x06proto3" @@ -622,7 +911,7 @@ func file_catalog_catalog_proto_rawDescGZIP() []byte { } var file_catalog_catalog_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_catalog_catalog_proto_msgTypes = make([]protoimpl.MessageInfo, 9) +var file_catalog_catalog_proto_msgTypes = make([]protoimpl.MessageInfo, 14) var file_catalog_catalog_proto_goTypes = []any{ (Entry_AddressType)(0), // 0: catalog.Entry.AddressType (*Entry)(nil), // 1: catalog.Entry @@ -632,26 +921,37 @@ var file_catalog_catalog_proto_goTypes = []any{ (*RepositoryRestoreStatus)(nil), // 5: catalog.RepositoryRestoreStatus (*GarbageCollectionPrepareCommitsInfo)(nil), // 6: catalog.GarbageCollectionPrepareCommitsInfo (*GarbageCollectionPrepareStatus)(nil), // 7: catalog.GarbageCollectionPrepareStatus - (*TaskMsg)(nil), // 8: catalog.TaskMsg - nil, // 9: catalog.Entry.MetadataEntry - (*timestamppb.Timestamp)(nil), // 10: google.protobuf.Timestamp + (*CommitAsyncInfo)(nil), // 8: catalog.CommitAsyncInfo + (*CommitAsyncStatus)(nil), // 9: catalog.CommitAsyncStatus + (*MergeAsyncInfo)(nil), // 10: catalog.MergeAsyncInfo + (*MergeAsyncStatus)(nil), // 11: catalog.MergeAsyncStatus + (*TaskMsg)(nil), // 12: catalog.TaskMsg + nil, // 13: catalog.Entry.MetadataEntry + nil, // 14: catalog.CommitAsyncInfo.MetadataEntry + (*timestamppb.Timestamp)(nil), // 15: google.protobuf.Timestamp } var file_catalog_catalog_proto_depIdxs = []int32{ - 10, // 0: catalog.Entry.last_modified:type_name -> google.protobuf.Timestamp - 9, // 1: catalog.Entry.metadata:type_name -> catalog.Entry.MetadataEntry + 15, // 0: catalog.Entry.last_modified:type_name -> google.protobuf.Timestamp + 13, // 1: catalog.Entry.metadata:type_name -> catalog.Entry.MetadataEntry 0, // 2: catalog.Entry.address_type:type_name -> catalog.Entry.AddressType - 10, // 3: catalog.Task.updated_at:type_name -> google.protobuf.Timestamp + 15, // 3: catalog.Task.updated_at:type_name -> google.protobuf.Timestamp 2, // 4: catalog.RepositoryDumpStatus.task:type_name -> catalog.Task 3, // 5: catalog.RepositoryDumpStatus.info:type_name -> catalog.RepositoryDumpInfo 2, // 6: catalog.RepositoryRestoreStatus.task:type_name -> catalog.Task 2, // 7: catalog.GarbageCollectionPrepareStatus.task:type_name -> catalog.Task 6, // 8: catalog.GarbageCollectionPrepareStatus.info:type_name -> catalog.GarbageCollectionPrepareCommitsInfo - 2, // 9: catalog.TaskMsg.task:type_name -> catalog.Task - 10, // [10:10] is the sub-list for method output_type - 10, // [10:10] is the sub-list for method input_type - 10, // [10:10] is the sub-list for extension type_name - 10, // [10:10] is the sub-list for extension extendee - 0, // [0:10] is the sub-list for field type_name + 15, // 9: catalog.CommitAsyncInfo.creation_date:type_name -> google.protobuf.Timestamp + 14, // 10: catalog.CommitAsyncInfo.metadata:type_name -> catalog.CommitAsyncInfo.MetadataEntry + 2, // 11: catalog.CommitAsyncStatus.task:type_name -> catalog.Task + 8, // 12: catalog.CommitAsyncStatus.info:type_name -> catalog.CommitAsyncInfo + 2, // 13: catalog.MergeAsyncStatus.task:type_name -> catalog.Task + 10, // 14: catalog.MergeAsyncStatus.info:type_name -> catalog.MergeAsyncInfo + 2, // 15: catalog.TaskMsg.task:type_name -> catalog.Task + 16, // [16:16] is the sub-list for method output_type + 16, // [16:16] is the sub-list for method input_type + 16, // [16:16] is the sub-list for extension type_name + 16, // [16:16] is the sub-list for extension extendee + 0, // [0:16] is the sub-list for field type_name } func init() { file_catalog_catalog_proto_init() } @@ -665,7 +965,7 @@ func file_catalog_catalog_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_catalog_catalog_proto_rawDesc), len(file_catalog_catalog_proto_rawDesc)), NumEnums: 1, - NumMessages: 9, + NumMessages: 14, NumExtensions: 0, NumServices: 0, }, diff --git a/pkg/catalog/catalog.proto b/pkg/catalog/catalog.proto index dffab36873a..1ced479b9b7 100644 --- a/pkg/catalog/catalog.proto +++ b/pkg/catalog/catalog.proto @@ -30,7 +30,8 @@ message Task { bool done = 2; google.protobuf.Timestamp updated_at = 3; int64 progress = 4; - string error = 5; + string error_msg = 5; + int64 status_code = 6; } // RepositoryDumpInfo holds the metarange IDs for a repository dump @@ -65,6 +66,33 @@ message GarbageCollectionPrepareStatus { GarbageCollectionPrepareCommitsInfo info = 2; } +message CommitAsyncInfo { + string id = 1; + repeated string parents = 2; + string committer = 3; + string message = 4; + google.protobuf.Timestamp creation_date = 5; + string meta_range_id = 6; + map metadata = 7; + int64 generation = 8; + int64 version = 9; + +} + +message CommitAsyncStatus { + Task task = 1; + CommitAsyncInfo info = 2; +} + +message MergeAsyncInfo { + string reference = 1; +} + +message MergeAsyncStatus { + Task task = 1; + MergeAsyncInfo info = 2; +} + // TaskMsg described generic message with Task field // used for all status messages and for cleanup messages message TaskMsg { diff --git a/pkg/catalog/task.go b/pkg/catalog/task.go index 78efec99c48..541838f2dbf 100644 --- a/pkg/catalog/task.go +++ b/pkg/catalog/task.go @@ -3,6 +3,8 @@ package catalog import ( "context" "errors" + "fmt" + "net/http" "strings" nanoid "github.com/matoous/go-nanoid/v2" @@ -16,7 +18,7 @@ const ( tasksPrefix = "tasks" ) -type taskStep struct { +type TaskStep struct { Name string Func func(ctx context.Context) error } @@ -51,3 +53,14 @@ func GetTaskStatus(ctx context.Context, kvStore kv.Store, repository *graveler.R } return nil } + +func SetTaskStatusCodeAndError(task *Task) func(w http.ResponseWriter, r *http.Request, code int, v interface{}) { + return func(w http.ResponseWriter, r *http.Request, code int, v interface{}) { + task.StatusCode = int64(code) + if v == nil { + task.ErrorMsg = "" + } else { + task.ErrorMsg = fmt.Sprintf("%v", v) + } + } +} diff --git a/pkg/httputil/request.go b/pkg/httputil/request.go index 1f1cf70f566..86dd1e95a3d 100644 --- a/pkg/httputil/request.go +++ b/pkg/httputil/request.go @@ -7,5 +7,8 @@ import ( ) func IsRequestCanceled(r *http.Request) bool { + if r == nil { + return false + } return errors.Is(r.Context().Err(), context.Canceled) } diff --git a/pkg/loadtest/local_load_test.go b/pkg/loadtest/local_load_test.go index d086dd4618c..cd9a93c0efb 100644 --- a/pkg/loadtest/local_load_test.go +++ b/pkg/loadtest/local_load_test.go @@ -108,6 +108,7 @@ func TestLocalLoad(t *testing.T) { migrator, &stats.NullCollector{}, actionsService, + catalog.NewNoopAsyncOperationsHandler(), auditChecker, logging.ContextUnavailable(), nil,