diff --git a/docs/assets/db-1-component.png b/docs/assets/db-1-component.png new file mode 100644 index 0000000..0ddbd94 Binary files /dev/null and b/docs/assets/db-1-component.png differ diff --git a/docs/assets/db-2-envs.png b/docs/assets/db-2-envs.png new file mode 100644 index 0000000..9f42289 Binary files /dev/null and b/docs/assets/db-2-envs.png differ diff --git a/docs/assets/db-3-network-ports.png b/docs/assets/db-3-network-ports.png new file mode 100644 index 0000000..cf7f51c Binary files /dev/null and b/docs/assets/db-3-network-ports.png differ diff --git a/docs/assets/db-4-disk.png b/docs/assets/db-4-disk.png new file mode 100644 index 0000000..ff034f7 Binary files /dev/null and b/docs/assets/db-4-disk.png differ diff --git a/docs/assets/hasura-1-image.png b/docs/assets/hasura-1-image.png new file mode 100644 index 0000000..cb0ae89 Binary files /dev/null and b/docs/assets/hasura-1-image.png differ diff --git a/docs/assets/hasura-2-envs.png b/docs/assets/hasura-2-envs.png new file mode 100644 index 0000000..36fb3d3 Binary files /dev/null and b/docs/assets/hasura-2-envs.png differ diff --git a/docs/assets/hasura-3-ports.png b/docs/assets/hasura-3-ports.png new file mode 100644 index 0000000..eabba2c Binary files /dev/null and b/docs/assets/hasura-3-ports.png differ diff --git a/docs/assets/hasura-4-access.png b/docs/assets/hasura-4-access.png new file mode 100644 index 0000000..7fd6e6f Binary files /dev/null and b/docs/assets/hasura-4-access.png differ diff --git a/docs/assets/hasura-dashboard.png b/docs/assets/hasura-dashboard.png new file mode 100644 index 0000000..70d80eb Binary files /dev/null and b/docs/assets/hasura-dashboard.png differ diff --git a/docs/assets/hasura-pod-green.png b/docs/assets/hasura-pod-green.png new file mode 100644 index 0000000..20c82dc Binary files /dev/null and b/docs/assets/hasura-pod-green.png differ diff --git a/docs/assets/hasuraroute-1-domain.png b/docs/assets/hasuraroute-1-domain.png new file mode 100644 index 0000000..adcfdb9 Binary files /dev/null and b/docs/assets/hasuraroute-1-domain.png differ diff --git a/docs/assets/hasuraroute-2-https.png b/docs/assets/hasuraroute-2-https.png new file mode 100644 index 0000000..0d8a93f Binary files /dev/null and b/docs/assets/hasuraroute-2-https.png differ diff --git a/docs/assets/hasuraroute-3-target.png b/docs/assets/hasuraroute-3-target.png new file mode 100644 index 0000000..815dfec Binary files /dev/null and b/docs/assets/hasuraroute-3-target.png differ diff --git a/docs/assets/install-saas-0-new-cluster.png b/docs/assets/install-saas-0-new-cluster.png new file mode 100644 index 0000000..658ab60 Binary files /dev/null and b/docs/assets/install-saas-0-new-cluster.png differ diff --git a/docs/assets/install-saas-1-cluster-name.png b/docs/assets/install-saas-1-cluster-name.png new file mode 100644 index 0000000..5ccde81 Binary files /dev/null and b/docs/assets/install-saas-1-cluster-name.png differ diff --git a/docs/assets/install-saas-2-cmd.png b/docs/assets/install-saas-2-cmd.png new file mode 100644 index 0000000..c1a164f Binary files /dev/null and b/docs/assets/install-saas-2-cmd.png differ diff --git a/docs/assets/install-saas-3-install-progress.png b/docs/assets/install-saas-3-install-progress.png new file mode 100644 index 0000000..abb5899 Binary files /dev/null and b/docs/assets/install-saas-3-install-progress.png differ diff --git a/docs/assets/install-saas-4-done.png b/docs/assets/install-saas-4-done.png new file mode 100644 index 0000000..b833ada Binary files /dev/null and b/docs/assets/install-saas-4-done.png differ diff --git a/docs/assets/kalm-cloud-1-new-cluster.png b/docs/assets/kalm-cloud-1-new-cluster.png new file mode 100644 index 0000000..658ab60 Binary files /dev/null and b/docs/assets/kalm-cloud-1-new-cluster.png differ diff --git a/docs/assets/kalm-cloud-2-cluster-name.png b/docs/assets/kalm-cloud-2-cluster-name.png new file mode 100644 index 0000000..5ccde81 Binary files /dev/null and b/docs/assets/kalm-cloud-2-cluster-name.png differ diff --git a/docs/assets/kalm-cloud-3-install-script.png b/docs/assets/kalm-cloud-3-install-script.png new file mode 100644 index 0000000..c1a164f Binary files /dev/null and b/docs/assets/kalm-cloud-3-install-script.png differ diff --git a/docs/assets/kalm-cloud-4-installing.png b/docs/assets/kalm-cloud-4-installing.png new file mode 100644 index 0000000..abb5899 Binary files /dev/null and b/docs/assets/kalm-cloud-4-installing.png differ diff --git a/docs/assets/kalm-cloud-5-install-succeed.png b/docs/assets/kalm-cloud-5-install-succeed.png new file mode 100644 index 0000000..b833ada Binary files /dev/null and b/docs/assets/kalm-cloud-5-install-succeed.png differ diff --git a/docs/assets/kalm.png b/docs/assets/kalm.png index 0e333f8..f0adf60 100644 Binary files a/docs/assets/kalm.png and b/docs/assets/kalm.png differ diff --git a/docs/assets/strapi-admin.jpg b/docs/assets/strapi-admin.jpg new file mode 100644 index 0000000..6ec3d98 Binary files /dev/null and b/docs/assets/strapi-admin.jpg differ diff --git a/docs/assets/strapi-pod-green.jpg b/docs/assets/strapi-pod-green.jpg new file mode 100644 index 0000000..609f6f2 Binary files /dev/null and b/docs/assets/strapi-pod-green.jpg differ diff --git a/docs/auth/overview.md b/docs/auth/overview.md index 3e94b29..5f1405c 100644 --- a/docs/auth/overview.md +++ b/docs/auth/overview.md @@ -1,6 +1,6 @@ --- title: Authentication & Authorization -sidebar_label: Overview +sidebar_label: Auth Overview --- Kalm has built in systems for managing user **Authentication** and **Authorization**. The following documentation provides a high level overview of how Kalm's Auth systems work. @@ -47,4 +47,3 @@ For specific details on Kalm's Role Permission definitions, see our [detailed Ro The Kubernetes RBAC is powerful and configurable for teams which require precise control at a granular level. However, it can be quite complicated for simple scenarios involving standard permissions and roles. This complexity grows proportionately to the number of CRDs involved. Kalm's RBAC system is designed to be initially simple and intuitive, while still allowing for complex customization if needed. - diff --git a/docs/cert-challenge.md b/docs/cert-challenge.md index 855c892..16ca1bd 100644 --- a/docs/cert-challenge.md +++ b/docs/cert-challenge.md @@ -1,16 +1,18 @@ --- -title: Certificate Issuing +lastAuthor: infrar +lastEdited: 1611360652055 --- - ## Overview Kalm can help you create certificates via Let's Encrypt. This article provides an overview of how certificates are obtained, including extra details on complexities regarding issuance and renewal of **wildcard** certificates. +\ +Adding some content. + ## Obtaining a (non-wildcard) certificate When requesting a certificate from Let's Encrypt, you must complete a "challenge" to prove that you are in control of the domain(s) to be certified. There are multiple types of challenges. Typically we can just use **HTTP-01**, which is the most common and simplest challenge type. - ### HTTP-01 Let's Encrypt generates a random token, which you must serve at a specific url: @@ -25,9 +27,9 @@ This proves that you have permission to serve files(and are therefore in control Kalm automates most of this process, all you have to do is point to the IP of the Kalm cluster. For example, if is **myapp.com** and the cluster IP is **34.84.45.1**, you would add the following DNS record. -| Type | Host | Answer | -| ---- | --------- | ---------- | -| A | myapp.com | 34.84.45.1 | +| Type | Host | Answer | +|----|----|----| +| A | myapp.com | 34.84.45.1 | Then you can initiate the certificate obtaining process in the Kalm UI by following [this guide](https-certs). Behind the scenes, Kalm does the necessary work to ensure that the token is accessible via the specified URL, then tells Let's Encrypt to initiate the challenge. @@ -41,9 +43,9 @@ Instead of serving a token on your webserver, the DNS-01 challenge asks you to p To complete the challenge, you could manually add an entry to your DNS provider: -| Type | Host | Answer | -| ---- | -------------------------- | -------------- | -| TXT | \_acme-challenge.myapp.com | | +| Type | Host | Answer | +|----|----|----| +| TXT | _acme-challenge.myapp.com | | However, depending on your DNS provider's API you may not be able to **automatically renew** this certificate. Instead a common solution is to delegate the DNS lookup to a **Validation-specific DNS Server**. Kalm provides a validation-specific DNS Server out of the box for this exact usecase. @@ -57,10 +59,10 @@ acme-d985e9.mycluster.com The Validation-specific DNS Server contains 2 entries created by default. -| Type | Host | Answer | -| ---- | ---------------------------- | ---------------------------- | -| A | ns.acme-d985e9.mycluster.com | 34.84.45.105 | -| NS | acme-d985e9.mycluster.com | ns.acme-d985e9.mycluster.com | +| Type | Host | Answer | +|----|----|----| +| A | ns.acme-d985e9.mycluster.com | 34.84.45.105 | +| NS | acme-d985e9.mycluster.com | ns.acme-d985e9.mycluster.com | The A record indicates that there is a DNS server ns.acme-d985e9.mycluster.com located at 34.84.45.105. @@ -78,21 +80,21 @@ Let's say we want to obtain a wildcard certificate for: We can create a new certificate in Kalm. At this point, Kalm will generate a unique challenge URL that is capable of passing the DNS-01 challenge. The challenge URL is shown in the Certificate details page: -![pic with domain for wildcard cert](./assets/wildcard-cname-cert.png) + ![pic with domain for wildcard cert](./assets/wildcard-cname-cert.png) This table indicates that the challenge for **\*.myapp.com** can be answered by **b6e4682c-5109-4a34-ac99-d5097d5b2b68.acme.mycluster.com**. Thus, in order to create a wildcard certificate for myapp.com, all we need to do is add a CNAME record at the DNS provider of myapp.com -| Type | Host | Answer | -| ----- | ------------------------------ | ------------------------------------------------------- | -| CNAME | **\_acme-challenge.myapp.com** | b6e4682c-5109-4a34-ac99-d5097d5b2b68.acme.mycluster.com | +| Type | Host | Answer | +|----|----|----| +| CNAME | **_acme-challenge.myapp.com** | b6e4682c-5109-4a34-ac99-d5097d5b2b68.acme.mycluster.com | -_*Note - some DNS management interfaces automatically include your domain (".myapp.com" in the above example) at the end of the Host. In this case, only include the first portion of the Host and omit the rest of the domain (Host = "\_acme-challenge")_ +*\*Note - some DNS management interfaces automatically include your domain (".myapp.com" in the above example) at the end of the Host. In this case, only include the first portion of the Host and omit the rest of the domain (Host = "_acme-challenge")* From this point on, Kalm tells Let's Encrypt to initiate the challenge. The following steps occur: -1. Let's encrypt will make a request to **\_acme-challenge.myapp.com** +1. Let's encrypt will make a request to **_acme-challenge.myapp.com** 2. The request gets forwarded to **b6e4682c-5109-4a34-ac99-d5097d5b2b68.acme.mycluster.com** due to the CNAME record 3. The TXT record for **b6e4682c-5109-4a34-ac99-d5097d5b2b68.acme.mycluster.com** is the secret token (served by the Validation-specific DNS server) 4. The challenge passes and the certification process proceeds normally. @@ -103,6 +105,6 @@ As long as the CNAME record at your DNS provider is kept intact, the path will w #### Wildcard Cert Issuing Flow -![](./assets/acme-dns-flow.svg) + ![](./assets/acme-dns-flow.svg) -_Note: This flowchart is hard to follow, should redraw a simpler version with bigger text._ +*Note: This flowchart is hard to follow, should redraw a simpler version with bigger text.* diff --git a/docs/crd/access-token.mdx b/docs/crd/access-token.mdx index 18a733f..ef6bff9 100644 --- a/docs/crd/access-token.mdx +++ b/docs/crd/access-token.mdx @@ -2,30 +2,50 @@ title: Access Token --- +`AccessToken` defines a token with permissions. + +For example, the following configurations sets up a token with edit permission for the component named `wordpress` in the default namespace: + +```yaml +apiVersion: core.kalm.dev/v1alpha1 +kind: AccessToken +metadata: + name: c153f45fd4344...95d29ec2a3bad2d8 +spec: + creator: admin@kalm.dev + memo: token for update webhook + rules: + - kind: components + name: wordpress + namespace: default + verb: edit + token: 4ddb864cfx56pkxw +``` + ## AccessToken -A model to describe general access token permissions, It's designed to be easy to translate to [casbin](https://casbin.org/) policies. +A model to describe general access token permissions, It's designed to be easy to translate to [casbin](https://casbin.org/) policies. This model should NOT be generate manually through Kubernetes api directly. Instead, use kalm apis to manage records. -| Name | Type | Description | Required | -| --------- | ------------------------------------------------------------ | ---------------------------------- | -------- | -| memo | string | memo for this token | False | -| token | string | token value, minimum length is 64 | True | -| rules | [AccessTokenRule](#accesstokenrule)[] | rules of this token | True | -| creator | string | creator of this token | True | -| expiredAt | *[metav1.Time](https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Time) | when will this access token expire | False | +| Name | Type | Description | Required | +| --------- | ----------------------------------------------------------------------------- | ---------------------------------- | -------- | +| memo | string | memo for this token | False | +| token | string | token value, minimum length is 64 | True | +| rules | [AccessTokenRule](#accesstokenrule)[] | rules of this token | True | +| creator | string | creator of this token | True | +| expiredAt | \*[metav1.Time](https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Time) | when will this access token expire | False | ## AccessTokenRule describe the permission this token has. -| Name | Type | Description | Required | -| ---- | ---- | ---- | ---- | -| verb | [AccessTokenVerb](#accessTokenVerb) | what this token can do | True | -| namespace | string | namespace this rule has effect on, value `*` means all namespaces. | True | -| kind | string | kind of resource this rule has effect on, e.g. Kalm's Component, value `*` means all kinds of resources. | True | -| name | string | name of resource this rule has effect on, value `*` means all resources of the given `kind`. | True | +| Name | Type | Description | Required | +| --------- | ----------------------------------- | -------------------------------------------------------------------------------------------------------- | -------- | +| verb | [AccessTokenVerb](#accessTokenVerb) | what this token can do | True | +| namespace | string | namespace this rule has effect on, value `*` means all namespaces. | True | +| kind | string | kind of resource this rule has effect on, e.g. Kalm's Component, value `*` means all kinds of resources. | True | +| name | string | name of resource this rule has effect on, value `*` means all resources of the given `kind`. | True | ## AccessTokenVerb @@ -40,4 +60,4 @@ describe the permission this token has. | Name | Type | Description | | ---------- | ---- | ------------------------------------------------- | | lastUsedAt | int | timestamp that this token last been used at. | -| usedCount | int | count of how many times this token has been used. | \ No newline at end of file +| usedCount | int | count of how many times this token has been used. | diff --git a/docs/crd/acme-server.mdx b/docs/crd/acme-server.mdx index 585e715..db3dfc5 100644 --- a/docs/crd/acme-server.mdx +++ b/docs/crd/acme-server.mdx @@ -2,17 +2,30 @@ title: ACME Server --- +`ACMEServer` defines the ACMEDNS server. + +For example, a typical definition of ACMEServer for Kalm-Cloud cluster would look as follows: + +```yaml +apiVersion: core.kalm.dev/v1alpha1 +kind: ACMEServer +metadata: + name: acme-server +spec: + acmeDomain: acme.example-cluster.clusters.kalm-dns.com + nsDomain: ns-acme.example-cluster.clusters.kalm-dns.com +``` + ## ACMEServer -| Field | Type | Description | Required | -| ---- | ---- | ---- | ---- | -| acmeDomain | string | sub-domains of this will server TXT records for DNS01 challenge | True | -| nsDomain | string | the NameServer domain | True | +| Field | Type | Description | Required | +| ---------- | ------ | --------------------------------------------------------------- | -------- | +| acmeDomain | string | sub-domains of this will server TXT records for DNS01 challenge | True | +| nsDomain | string | the NameServer domain | True | ## ACMEServerStatus -| Field | Type | Description | -| ---- | ---- | ---- | -| ready | bool | whether this ACME-Server is up running. | -| ipForNameServer | string | ip for this name server | - +| Field | Type | Description | +| --------------- | ------ | --------------------------------------- | +| ready | bool | whether this ACME-Server is up running. | +| ipForNameServer | string | ip for this name server | diff --git a/docs/crd/component.mdx b/docs/crd/component.mdx index 6256a5b..c0f7cef 100644 --- a/docs/crd/component.mdx +++ b/docs/crd/component.mdx @@ -2,69 +2,81 @@ title: Component --- -## Component - `Component` describes your workload. -| Field | Type | Description | Required | -| ---- | ---- | ---- | ---- | -| labels | map[string]string | labels will add to pods | False | -| annotations | map[string]string | annotations will add to pods | False | -| env | [EnvVar](#envvar)[] | List of environment variables to set in the container | False | -| image | string | Docker image name | True | -| replicas | *int32 | the number of desired pods, default to 1 | False | -| nodeSelectorLabels | map[string]string | if set, pod will only be scheduled onto the node with given labels | False | -| preferNotCoLocated | bool | if set, will avoid putting pods of this component in the same node | False | -| startAfterComponents | string[] | will start this component after the components specified | False | -| command | string | run command for pod | False | -| enableHeadlessService | bool | if set, will start a headless service for the component | False | -| ports | [Port](#port)[] | information on the component's service ports | False | -| workloadType | [WorkloadType](#workloadtype) | type of the component workload, default to: server | False | -| schedule | string | REQUIRED if WorkloadType is `cronjob`, the schedule of cronjob, e.g. `* * * * *` | False | -| livenessProbe | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#probe-v1-core) | describes a health check to be performed against a container to determine whether it is alive to receive traffic. | False | -| readinessProbe | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#probe-v1-core) | similar as LivenessProbe, but used to determine if the container is ready to receive traffic | False | -| ~~BeforeStart~~ | []string | scripts running before the container starts | False | -| ~~AfterStart~~ | []string | scripts running after the container starts | False | -| ~~BeforeDestroy~~ | []string | scripts running before the container destroy | False | -| resourceRequirements | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#resourcerequirements-v1-core) | describes the compute resource requirements | False | -| terminationGracePeriodSeconds | *int64 | duration in seconds the pod needs to terminate gracefully. | False | -| dnsPolicy | [v1.DNSPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#podspec-v1-core) | how a pod's DNS will be configured. | False | -| restartPolicy | [v1.RestartPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#podspec-v1-core) | how the container should be restarted. | False | -| restartStrategy | [appsv1.DeploymentStrategyType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#deploymentstrategy-v1-apps) | The deployment strategy to use to replace existing pods with new ones. | False | -| volumes | [Volume](#volume)[] | temporary or persistent volumes for the pods | False | -| runnerPermission | *[RunnerPermission](#runnerpermission) | setup RBAC permission for the running pods | False | -| preInjectedFiles | [PreInjectFile](#preinjectfile)[] | convenient way to mount files into containers | False | +For example, the following component configuration sets up a hello-world workload running the `kalmhq/echoserver` image, the workload has one replica and exposes the 8001 port for receiving requests. + +```yaml +apiVersion: core.kalm.dev/v1alpha1 +kind: Component +metadata: + name: hello-world + namespace: default +spec: + replicas: 1 + workloadType: server + image: kalmhq/echoserver + ports: + - containerPort: 8001 + protocol: http + servicePort: 8001 +``` + +## Component +| Field | Type | Description | Required | +| ----------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------- | -------- | +| labels | map[string]string | labels will add to pods | False | +| annotations | map[string]string | annotations will add to pods | False | +| env | [EnvVar](#envvar)[] | List of environment variables to set in the container | False | +| image | string | Docker image name | True | +| replicas | \*int32 | the number of desired pods, default to 1 | False | +| nodeSelectorLabels | map[string]string | if set, pod will only be scheduled onto the node with given labels | False | +| preferNotCoLocated | bool | if set, will avoid putting pods of this component in the same node | False | +| startAfterComponents | string[] | will start this component after the components specified | False | +| command | string | run command for pod | False | +| enableHeadlessService | bool | if set, will start a headless service for the component | False | +| ports | [Port](#port)[] | information on the component's service ports | False | +| workloadType | [WorkloadType](#workloadtype) | type of the component workload, default to: server | False | +| schedule | string | REQUIRED if WorkloadType is `cronjob`, the schedule of cronjob, e.g. `* * * * *` | False | +| livenessProbe | \*[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#probe-v1-core) | describes a health check to be performed against a container to determine whether it is alive to receive traffic. | False | +| readinessProbe | \*[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#probe-v1-core) | similar as LivenessProbe, but used to determine if the container is ready to receive traffic | False | +| resourceRequirements | \*[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#resourcerequirements-v1-core) | describes the compute resource requirements | False | +| terminationGracePeriodSeconds | \*int64 | duration in seconds the pod needs to terminate gracefully. | False | +| dnsPolicy | [v1.DNSPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#podspec-v1-core) | how a pod's DNS will be configured. | False | +| restartPolicy | [v1.RestartPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#podspec-v1-core) | how the container should be restarted. | False | +| restartStrategy | [appsv1.DeploymentStrategyType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#deploymentstrategy-v1-apps) | The deployment strategy to use to replace existing pods with new ones. | False | +| volumes | [Volume](#volume)[] | temporary or persistent volumes for the pods | False | +| runnerPermission | \*[RunnerPermission](#runnerpermission) | setup RBAC permission for the running pods | False | +| preInjectedFiles | [PreInjectFile](#preinjectfile)[] | convenient way to mount files into containers | False | ## EnvVar -| Field | Type | Description | Required | -| ---- | ---- | ---- | ---- | -| name | string | Name of the environment variable. | True | -| value | string | Source for the environment variable's value. | False | -| type | EnvVarType | different types of EnvVar have different ways of parsing EnvVar.Value | False | -| prefix | string | prefix of env name | False | -| suffix | string | suffix of env name | False | +| Field | Type | Description | Required | +| ------ | ---------- | --------------------------------------------------------------------- | -------- | +| name | string | Name of the environment variable. | True | +| value | string | Source for the environment variable's value. | False | +| type | EnvVarType | different types of EnvVar have different ways of parsing EnvVar.Value | False | +| prefix | string | prefix of env name | False | +| suffix | string | suffix of env name | False | ## EnvVarType -| Name | Description | -| ---------------------- | ------------------------------------------------------------ | -| static | environment variable's value is simply EnvVar.Value | -| ~~EnvVarTypeExternal~~ | | -| linked | EnvVar.Value in format of: _serviceName_/_servicePortName_, environment variable's value will resove to: _serviceName_.currentNamespace_:_servicePort_, useful if you wanna reference other component's service in same application. | -| fieldref | EnvVar.Value is pod FieldSelector, environment variable's value will resove to the value of pod field. | +| Name | Description | +| ---------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| static | environment variable's value is simply EnvVar.Value | +| ~~EnvVarTypeExternal~~ | | +| linked | EnvVar.Value in format of: _serviceName_/_servicePortName_, environment variable's value will resove to: _serviceName_.currentNamespace*:\_servicePort*, useful if you wanna reference other component's service in same application. | +| fieldref | EnvVar.Value is pod FieldSelector, environment variable's value will resove to the value of pod field. | | builtin | possible EnvVar values are: `EnvVarBuiltinHost`, `EnvVarBuiltinPodName` and `EnvVarBuiltinNamespace`.

- `EnvVarBuiltinHost`: environment variable's value will resolve to `spec.nodeName` of the pod
- `EnvVarBuiltinPodName`: `metadata.name` of the pod
- `EnvVarBuiltinNamespace`: `metadata.namespace` of the pod | - - ## Port -| Field | Type | Description | Required | -| ---- | ---- | ---- | ---- | -| containerPort | uint32 | container port | True | -| servicePort | uint32 | service port, if absent, will set as same as ContainerPort | False | -| protocol | PortProtocol | protocol of the service | False | +| Field | Type | Description | Required | +| ------------- | ------------ | ---------------------------------------------------------- | -------- | +| containerPort | uint32 | container port | True | +| servicePort | uint32 | service port, if absent, will set as same as ContainerPort | False | +| protocol | PortProtocol | protocol of the service | False | ## PortProtocol @@ -81,24 +93,24 @@ title: Component ## WorkloadType -| Name | Description | -| ---- | ---- | -| server | component run as deployment | -| cronjob | component run as CronJob | -| daemonset | component run as DaemonSet | +| Name | Description | +| ----------- | ---------------------------- | +| server | component run as deployment | +| cronjob | component run as CronJob | +| daemonset | component run as DaemonSet | | statefulset | component run as StatefulSet | ## Volume -| Field | Type | Description | Required | -| ---- | ---- | ---- | ---- | -| path | string | Path within the container at which the volume should be mounted. | True | -| hostPath | string | Required when VolumeType is: VolumeTypeHostPath,Path of the directory on the host. | False | -| size | [resource.Quantity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#quantity-resource-core) | size of the volume | False | -| type | [VolumeType](#volumetype) | type of storage used as volume | False | -| storageClassName | *string | Name of the StorageClass required by the claim, used for VolumeTypePersistentVolumeClaim and VolumeTypePersistentVolumeClaimTemplate | False | -| pvToMatch | string | for VolumeTypePersistentVolumeClaim, re-use volume which has the same same as set by PVToMatch | False | -| pvc | string | for VolumeTypePersistentVolumeClaim and VolumeTypePersistentVolumeClaimTemplate, re-use exist PVC. | Flase | +| Field | Type | Description | Required | +| ---------------- | ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------ | -------- | +| path | string | Path within the container at which the volume should be mounted. | True | +| hostPath | string | Required when VolumeType is: VolumeTypeHostPath,Path of the directory on the host. | False | +| size | [resource.Quantity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#quantity-resource-core) | size of the volume | False | +| type | [VolumeType](#volumetype) | type of storage used as volume | False | +| storageClassName | \*string | Name of the StorageClass required by the claim, used for VolumeTypePersistentVolumeClaim and VolumeTypePersistentVolumeClaimTemplate | False | +| pvToMatch | string | for VolumeTypePersistentVolumeClaim, re-use volume which has the same same as set by PVToMatch | False | +| pvc | string | for VolumeTypePersistentVolumeClaim and VolumeTypePersistentVolumeClaimTemplate, re-use exist PVC. | Flase | ## VolumeType @@ -112,18 +124,17 @@ title: Component ## RunnerPermission -| Field | Type | Description | Required | -| ---- | ---- | ---- | ---- | -| roleType | string | if is `clusterRole`, will bind `ClusterRole` to the serviceAccount of the pod, otherwise, will bind `Role` | False | -| rules | [rbacV1.PolicyRule](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#policyrule-v1-rbac-authorization-k8s-io)[] | the PolicyRules for the binding `ClusterRole` or `Role` | False | +| Field | Type | Description | Required | +| -------- | ----------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | -------- | +| roleType | string | if is `clusterRole`, will bind `ClusterRole` to the serviceAccount of the pod, otherwise, will bind `Role` | False | +| rules | [rbacV1.PolicyRule](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#policyrule-v1-rbac-authorization-k8s-io)[] | the PolicyRules for the binding `ClusterRole` or `Role` | False | ## PreInjectFile -| Field | Type | Description | Required | -| ---- | ---- | ---- | ---- | -| content | string | content of the file | True | -| base64 | bool | To support binary content, it allows set base64 encoded data into `Content` field and set this flag to `true`. Binary data will be restored instead of plain string in `Content`. | False | -| mountPath | string | the mount path of the file | True | -| readonly | bool | if `true`, the file will be readonly, default to `false` | False | -| runnable | bool | if `true`, the file can be executed, default to `false` | False | - +| Field | Type | Description | Required | +| --------- | ------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | +| content | string | content of the file | True | +| base64 | bool | To support binary content, it allows set base64 encoded data into `Content` field and set this flag to `true`. Binary data will be restored instead of plain string in `Content`. | False | +| mountPath | string | the mount path of the file | True | +| readonly | bool | if `true`, the file will be readonly, default to `false` | False | +| runnable | bool | if `true`, the file can be executed, default to `false` | False | diff --git a/docs/crd/http-route.mdx b/docs/crd/http-route.mdx index 1617fac..87523e9 100644 --- a/docs/crd/http-route.mdx +++ b/docs/crd/http-route.mdx @@ -2,26 +2,59 @@ title: HttpRoute --- - +`HTTPRoute` defines the route to the service in cluster. + +For example, the default configuration for Kalm dashboard would look as follows: + +```yaml +apiVersion: core.kalm.dev/v1alpha1 +kind: HttpRoute +metadata: + name: kalm-route +spec: + hosts: + - hello-world.kalm.dev + destinations: + - host: kalm.kalm-system.svc.cluster.local:80 + weight: 1 + paths: + - / + httpRedirectToHttps: true + schemes: + - https + - http + methods: + - GET + - HEAD + - POST + - PUT + - PATCH + - DELETE + - OPTIONS + - TRACE + - CONNECT +``` + +The configuration defines that the traffic to domain: `hello-world.kalm.dev` will be directed to the in-cluster service: `kalm.kalm-system.svc.cluster.local:80`. It accepts both HTTP and HTTPS requests, but will auto redirect HTTP to HTTPS. ## HttpRoute -| Field | Type | Description | Required | -| ---- | ---- | ---- | ---- | -| hosts | []string | The destination hosts to which traffic is being sent. Could be a DNS name with wildcard prefix or an IP address. | True | -| paths | []string | URI paths for prefix-based match | True | -| methods | [HttpRouteMethod](#httproutemethod)[] | HTTP Method | T | -| schemes | [HttpRouteScheme](#httproutescheme)[] | URI Scheme values | T | -| stripPath | bool | strip path for URL match | F | -| conditions | [HttpRouteCondition](#httproutecondition)[] | conditions for route match | F | -| destinations | [HttpRouteDestination](#httproutedestination)[] | targets of upstream services | T | -| httpRedirectToHttps | bool | redirect HTTP to HTTPS | F | -| timeout | *int | Timeout for HTTP requests, default is disabled. | F | -| retries | *[HttpRouteRetries](#httprouteretries) | Retry policy for HTTP requests. | F | -| mirror | *[HttpRouteMirror](#httproutemirror) | Mirror HTTP traffic to a another destination in addition to forwarding the requests to the intended destination. | F | -| fault | *[HttpRouteFault](#httproutefault) | Fault injection policy to apply on HTTP traffic at the client side. | F | -| delay | *[HttpRouteDelay](#httproutedelay) | Delay requests before forwarding, emulating various failures such as network issues, overloaded upstream service, etc. | F | -| cors | *[HttpRouteCORS](#httproutecors) | Cross-Origin Resource Sharing policy (CORS). | F | +| Field | Type | Description | Required | +| ------------------- | ----------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | -------- | +| hosts | []string | The destination hosts to which traffic is being sent. Could be a DNS name with wildcard prefix or an IP address. | True | +| paths | []string | URI paths for prefix-based match | True | +| methods | [HttpRouteMethod](#httproutemethod)[] | HTTP Method | T | +| schemes | [HttpRouteScheme](#httproutescheme)[] | URI Scheme values | T | +| stripPath | bool | strip path for URL match | F | +| conditions | [HttpRouteCondition](#httproutecondition)[] | conditions for route match | F | +| destinations | [HttpRouteDestination](#httproutedestination)[] | targets of upstream services | T | +| httpRedirectToHttps | bool | redirect HTTP to HTTPS | F | +| timeout | \*int | Timeout for HTTP requests, default is disabled. | F | +| retries | \*[HttpRouteRetries](#httprouteretries) | Retry policy for HTTP requests. | F | +| mirror | \*[HttpRouteMirror](#httproutemirror) | Mirror HTTP traffic to a another destination in addition to forwarding the requests to the intended destination. | F | +| fault | \*[HttpRouteFault](#httproutefault) | Fault injection policy to apply on HTTP traffic at the client side. | F | +| delay | \*[HttpRouteDelay](#httproutedelay) | Delay requests before forwarding, emulating various failures such as network issues, overloaded upstream service, etc. | F | +| cors | \*[HttpRouteCORS](#httproutecors) | Cross-Origin Resource Sharing policy (CORS). | F | ## HttpRouteMethod @@ -46,72 +79,73 @@ title: HttpRoute ## HttpRouteCondition -| Field | Type | Description | Required | -| ---- | ---- | ---- | ---- | -|Type|[HttpRouteConditionType](#httprouteconditiontype)|can be `query` or `header`|T| -|Name|string||T| -|Value|string||F| -|Operator|[HttpRouteConditionOperator](#httprouteconditionoperator)|can be `equal`, `withPrefix` or `matchRegexp`|T| +| Field | Type | Description | Required | +| -------- | --------------------------------------------------------- | --------------------------------------------- | -------- | +| Type | [HttpRouteConditionType](#httprouteconditiontype) | can be `query` or `header` | T | +| Name | string | | T | +| Value | string | | F | +| Operator | [HttpRouteConditionOperator](#httprouteconditionoperator) | can be `equal`, `withPrefix` or `matchRegexp` | T | ## HttpRouteConditionType -| Name | Description | -| ----- | ----------- | +| Name | Description | +| ------ | ------------------------------------- | | query | Query parameters in URL for matching. | -| header | header in HTTP request for matching. | +| header | header in HTTP request for matching. | ## HttpRouteConditionOperator -| Name | Description | -| ----- | ----------- | -| equal | same value | -| withPrefix | condition value is prefix | +| Name | Description | +| ----------- | ------------------------------------------------------- | +| equal | same value | +| withPrefix | condition value is prefix | | matchRegexp | condition value should be interpret as regex expression | ## HttpRouteDestination -| Field | Type | Description | Required | -| ---- | ---- | ---- | ---- | -|host|string|destination host|T| -| weight | int | The proportion of traffic to be forwarded to the destination | F | +| Field | Type | Description | Required | +| ------ | ------ | ------------------------------------------------------------ | -------- | +| host | string | destination host | T | +| weight | int | The proportion of traffic to be forwarded to the destination | F | + ## HttpRouteRetries -| Field | Type | Description | Required | -| ---- | ---- | ---- | ---- | -| Attempts | int | Number of retries for a given request. | F | +| Field | Type | Description | Required | +| -------------------- | -------- | ------------------------------------------------------------------------ | -------- | +| Attempts | int | Number of retries for a given request. | F | | PerTtyTimeoutSeconds | int | Timeout per retry attempt for a given request, in seconds. MUST BE >= 1. | T | -| RetryOn | []string | the conditions under which retry takes place. | F | +| RetryOn | []string | the conditions under which retry takes place. | F | ## HttpRouteMirror -| Field | Type | Description | Required | -| ---- | ---- | ---- | ---- | -| percentage | int | Percentage of the traffic to be mirrored. | F | -| destination | [HttpRouteDestination](#httproutedestination) | Mirror HTTP traffic to this destination | T | +| Field | Type | Description | Required | +| ----------- | --------------------------------------------- | ----------------------------------------- | -------- | +| percentage | int | Percentage of the traffic to be mirrored. | F | +| destination | [HttpRouteDestination](#httproutedestination) | Mirror HTTP traffic to this destination | T | ## HttpRouteFault -| Field | Type | Description | Required | -| ---- | ---- | ---- | ---- | -| Percentage | int | Percentage of requests to be aborted with the error code provided. | F | -| ErrorStatus | int | HTTP status code to use to abort the Http request. | F | +| Field | Type | Description | Required | +| ----------- | ---- | ------------------------------------------------------------------ | -------- | +| Percentage | int | Percentage of requests to be aborted with the error code provided. | F | +| ErrorStatus | int | HTTP status code to use to abort the Http request. | F | ## HttpRouteDelay -| Field | Type | Description | Required | -| ---- | ---- | ---- | ---- | -| Percentage | int | Percentage of requests on which the delay will be injected. | F | -| DelaySeconds | int | Add a fixed delay before forwarding the request, in seconds, MUST be >= 1 | T | +| Field | Type | Description | Required | +| ------------ | ---- | ------------------------------------------------------------------------- | -------- | +| Percentage | int | Percentage of requests on which the delay will be injected. | F | +| DelaySeconds | int | Add a fixed delay before forwarding the request, in seconds, MUST be >= 1 | T | ## HttpRouteCORS -| Field | Type | Description | Required | -| ---- | ---- | ---- | ---- | -| AllowOrigins | [HttpRouteCondition](#httproutecondition)[] | String patterns that match allowed origins. An origin is allowed if any of the string matchers match. If a match is found, then the outgoing Access-Control-Allow-Origin would be set to the origin as provided by the client. | F | -| AllowMethods | AllowMethod[] | List of HTTP methods allowed to access the resource. | F | -| AllowCredentials | bool | Indicates whether the caller is allowed to send the actual request (not the preflight) using credentials. | F | -| AllowHeaders | string[] | List of HTTP headers that can be used when requesting the resource. Serialized to Access-Control-Allow-Headers header. | F | -| MaxAgeSeconds | int | Specifies how long the results of a preflight request can be cached. Translates to the `Access-Control-Max-Age` header. | F | +| Field | Type | Description | Required | +| ---------------- | ------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------- | +| AllowOrigins | [HttpRouteCondition](#httproutecondition)[] | String patterns that match allowed origins. An origin is allowed if any of the string matchers match. If a match is found, then the outgoing Access-Control-Allow-Origin would be set to the origin as provided by the client. | F | +| AllowMethods | AllowMethod[] | List of HTTP methods allowed to access the resource. | F | +| AllowCredentials | bool | Indicates whether the caller is allowed to send the actual request (not the preflight) using credentials. | F | +| AllowHeaders | string[] | List of HTTP headers that can be used when requesting the resource. Serialized to Access-Control-Allow-Headers header. | F | +| MaxAgeSeconds | int | Specifies how long the results of a preflight request can be cached. Translates to the `Access-Control-Max-Age` header. | F | ## AllowMethod diff --git a/docs/crd/https-cert-issuer.mdx b/docs/crd/https-cert-issuer.mdx index d79c57e..5c49420 100644 --- a/docs/crd/https-cert-issuer.mdx +++ b/docs/crd/https-cert-issuer.mdx @@ -2,45 +2,65 @@ title: Https Cert Issuer --- -## HttpsCertIssuer +`HttpsCertIssuer` contains configurations for HTTPs certificate issuer. -| Field | Type | Description | Required | -| ------------------ | ------------------------------------ | ----------------------------------------------------- | -------- | -| caForTest | *[CAForTestIssuer](#cafortestissuer) | Self Signed CA, mainly for test | False | -| ~~ACMECloudFlare~~ | *ACMECloudFlareIssuer | | False | -| http01 | *[HTTP01Issuer](#http01issuer) | Certificate Issuer using HTTP01 challenge validations | False | -| dns01 | *[DNS01Issuer](#dns01issuer) | Certificate Issuer using DNS01 challenge validations | True | +The default HTTP01 issuer would look as follows: -## CAForTestIssuer +```yaml +apiVersion: core.kalm.dev/v1alpha1 +kind: HttpsCertIssuer +metadata: + name: default-http01-issuer +spec: + http01: {} +``` -a simple empty struct: +The default DNS01 issuer would look as follows: -```go -type CAForTestIssuer struct{} +```yaml +apiVersion: core.kalm.dev/v1alpha1 +kind: HttpsCertIssuer +metadata: + name: default-dns01-issuer +spec: + dns01: + baseACMEDomain: acme.foobar-cluster.clusters.kalm-dns.com + configs: + foobar-cluster.clusters.kalm-apps.com: + fulldomain: 4dcf92.acme.foobar-cluster.clusters.kalm-dns.com + password: m8-PS6CzMlP_VW_Mp7qWr7fwqeXly09 + subdomain: 4dcfea60-4074-9d44-dbdc5bfe5192 + username: bb453100-422e-8839-0013c700bc12 ``` +## HttpsCertIssuer + +| Field | Type | Description | Required | +| ------ | ------------------------------- | ----------------------------------------------------- | -------- | +| http01 | \*[HTTP01Issuer](#http01issuer) | Certificate Issuer using HTTP01 challenge validations | False | +| dns01 | \*[DNS01Issuer](#dns01issuer) | Certificate Issuer using DNS01 challenge validations | False | + ## HTTP01Issuer -| Field | Type | Description | Required | -| ---- | ---- | ---- | ---- | -| email | string | email for admin |False| +| Field | Type | Description | Required | +| ----- | ------ | --------------- | -------- | +| email | string | email for admin | False | ## DNS01Issuer -| Field | Type | Description | Required | -| ---- | ---- | ---- | ---- | -| BaseACMEDomain | string | base domain for our ACME-DNS server | True | -| Configs | map(string, [DNS01IssuerConfig](#dns01issuerconfig)) | configs to setup different wildcard certificate domains in our ACME-DNS server, key is domain, value is config. | False | +| Field | Type | Description | Required | +| -------------- | ---------------------------------------------------- | --------------------------------------------------------------------------------------------------------------- | -------- | +| BaseACMEDomain | string | base domain for our ACME-DNS server | True | +| Configs | map(string, [DNS01IssuerConfig](#dns01issuerconfig)) | configs to setup different wildcard certificate domains in our ACME-DNS server, key is domain, value is config. | False | ## DNS01IssuerConfig config used by cert-manager to configure our ACME-DNS server to pass DNS01 challenges. -| Field | Type | Description | Required | -| ---- | ---- | ---- | ---- | -| UserName | string | | True | -| Password | string | | True | -| FullDomain | string | | True | -| SubDomain | string | | True | -| AllowFrom | []string | | False | - +| Field | Type | Description | Required | +| ---------- | -------- | ----------- | -------- | +| UserName | string | | True | +| Password | string | | True | +| FullDomain | string | | True | +| SubDomain | string | | True | +| AllowFrom | []string | | False | diff --git a/docs/crd/https-cert.mdx b/docs/crd/https-cert.mdx index 1856c3b..cf02058 100644 --- a/docs/crd/https-cert.mdx +++ b/docs/crd/https-cert.mdx @@ -2,31 +2,46 @@ title: Https Cert --- -## HttpsCert +`Httpscert` defines a x.509 certificate. -| Field | Type | Description | Required | -| ------------------------- | -------- | ------------------------------------------------------------ | -------- | -| isSelfManaged | bool | whether certificate is uploaded by user or managed by Kalm | False | -| selfManagedCertSecretName | string | if `IsSelfManaged` is true, the name of secret storing the certificate info | False | -| httpsCertIssuer | string | issuer name of the cert | False | -| domains | string[] | domains in certificate | True | +For example, the default configuration for Kalm dashboard would look as follows: + +```yaml +apiVersion: core.kalm.dev/v1alpha1 +kind: HttpsCert +metadata: + name: dashboard +spec: + domains: + - foobar-cluster.kalm.dev + httpsCertIssuer: default-http01-issuer +``` + +It defines a certificate for the domain: `foobar-cluster.kalm.dev` and reference the certificate issuer: `default-http01-issuer`. +## HttpsCert +| Field | Type | Description | Required | +| ------------------------- | -------- | --------------------------------------------------------------------------- | -------- | +| isSelfManaged | bool | whether certificate is uploaded by user or managed by Kalm | False | +| selfManagedCertSecretName | string | if `IsSelfManaged` is true, the name of secret storing the certificate info | False | +| httpsCertIssuer | string | issuer name of the cert | False | +| domains | string[] | domains in certificate | True | ## HttpsCertStatus -| Field | Type | Description | -| --------------------------------- | ------------------------------------------- | ------------------------------------------------------------ | -| Conditions | [HttpsCertCondition](#httpscertcondition)[] | details of httpsCert condition | -| ExpireTimestamp | int64 | expire timestamp of the certificate. | -| IsSignedByPublicTrustedCA | bool | is this certificate signed by publicly trusted Certificate Authority. | + +| Field | Type | Description | +| --------------------------------- | ------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Conditions | [HttpsCertCondition](#httpscertcondition)[] | details of httpsCert condition | +| ExpireTimestamp | int64 | expire timestamp of the certificate. | +| IsSignedByPublicTrustedCA | bool | is this certificate signed by publicly trusted Certificate Authority. | | WildcardCertDNSChallengeDomainMap | map[string]string | for wildcard certificate, CNAME info of domains, key is the certificate domain, value if where this certificate domain should add CNAME DNS record to pass the DNS-01 challenge. | ## HttpsCertCondition -| Field | Type | Description | -| ---- | ---- | ---- | -| Type | HttpsCertConditionType | type of the HttpsCert condition, possible value: `Ready`. | -| Status | corev1.ConditionStatus | status of the condition, possible values: `True`, `False` and `Unknown`. | -| Reason | string | a brief machine readable explanation for the condition's last transition. | -| Message | string | a human readable description of the details of the last transition, complementing reason. | - +| Field | Type | Description | +| ------- | ---------------------- | ----------------------------------------------------------------------------------------- | +| Type | HttpsCertConditionType | type of the HttpsCert condition, possible value: `Ready`. | +| Status | corev1.ConditionStatus | status of the condition, possible values: `True`, `False` and `Unknown`. | +| Reason | string | a brief machine readable explanation for the condition's last transition. | +| Message | string | a human readable description of the details of the last transition, complementing reason. | diff --git a/docs/crd/protected-endpoint.mdx b/docs/crd/protected-endpoint.mdx index d0b7a60..cdfc451 100644 --- a/docs/crd/protected-endpoint.mdx +++ b/docs/crd/protected-endpoint.mdx @@ -2,12 +2,33 @@ title: Protected Endpoint --- -| Field | Type | Description | Required | -| --------------------------- | ----------------------------------------------- | ------------------------------------------------------------ | -------- | -| name | string | name of the endpoint | True | -| type | [ProtectedEndpointType](#protectedendpointtype) | type of protected endpoint | True | -| ports | uint32[] | the protected service ports | False | -| groups | string[] | groups the user should be in to access protected endpoint. | False | +`ProtectedEndpoint` defines the endpoint that needs authorization to access. + +For example, the default configuration for Kalm dashboard would look as follows: + +``` +apiVersion: core.kalm.dev/v1alpha1 +kind: ProtectedEndpoint +metadata: + name: kalm + namespace: kalm-system +spec: + name: kalm + allowToPassIfHasBearerToken: true + ports: + - 3001 +``` + +It protects the 3001 port of the kalm component. + +## Protected Endpoint + +| Field | Type | Description | Required | +| --------------------------- | ----------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | +| name | string | name of the endpoint | True | +| type | [ProtectedEndpointType](#protectedendpointtype) | type of protected endpoint | True | +| ports | uint32[] | the protected service ports | False | +| groups | string[] | groups the user should be in to access protected endpoint. | False | | allowToPassIfHasBearerToken | bool | Allow auth proxy to let the request pass if it has bearer token. This flag should be set carefully. Please make sure that the upstream can handle the token correctly. Otherwise, client can bypass Kalm SSO by sending a not empty bearer token. | False | ## ProtectedEndpointType @@ -17,4 +38,3 @@ title: Protected Endpoint | Port | Protect a single port of a component | | Component | Protect all ports of a component | | HttpRoute | Protect all ports that receive traffic from this route | - diff --git a/docs/crd/role-binding.mdx b/docs/crd/role-binding.mdx index db7a25c..c64d2fa 100644 --- a/docs/crd/role-binding.mdx +++ b/docs/crd/role-binding.mdx @@ -2,15 +2,29 @@ title: Role Binding --- -# RoleBinding +`RoleBinding` describes the roles that the subject have in Kalm system. -RoleBinding describes the roles that the subject have in Kalm system. +For example, the following `RoleBinding` configuration sets the user: `foo@kalm.dev` as clusterOwner: -| Name | Type | Description | Required | -| --------- | ------------------------------------------------------------ | ---------------------------------- | -------- | -| subject | string | subject that is binding to role | True | -| subjectType | string | type of subject, can be: `user` and `group`. | True | -| role | string | the role that this subject is binding to, can be: `viewer`, `editor`, `owner`, `clusterViewer`, `clusterEditor` and `clusterOwner`. | True | -| creator | string | Creator of this binding | True | -| expiredAt | *[metav1.Time](https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Time) | Expire time of this key. Infinity if blank | False | +```yaml +apiVersion: core.kalm.dev/v1alpha1 +kind: RoleBinding +metadata: + name: cluster-rolebinding-e06ee4322400e9905e8aa917bc384ef9 + namespace: kalm-system +spec: + creator: admin@kalm.dev + role: clusterOwner + subject: foo@kalm.dev + subjectType: user +``` +## RoleBinding + +| Name | Type | Description | Required | +| ----------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | -------- | +| subject | string | subject that is binding to role | True | +| subjectType | string | type of subject, can be: `user` and `group`. | True | +| role | string | the role that this subject is binding to, can be: `viewer`, `editor`, `owner`, `clusterViewer`, `clusterEditor` and `clusterOwner`. | True | +| creator | string | Creator of this binding | True | +| expiredAt | \*[metav1.Time](https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Time) | Expire time of this key. Infinity if blank | False | diff --git a/docs/crd/sso.mdx b/docs/crd/sso.mdx index e2e37fc..a483318 100644 --- a/docs/crd/sso.mdx +++ b/docs/crd/sso.mdx @@ -2,53 +2,64 @@ title: SingleSignOn Config --- -:::note --Working in progress -::: +`SingleSignOnConfig` defines the configurations of SSO. -## SingleSignOnConfig +A typical `SingleSignOnConfig` for a Kalm-Cloud cluster would look as follows: -| Name | Type | Description | Required | -|-----------------------|----------------------------------------|----------------------------------------------------------------------------|---------------------------------------------------------------| -| issuer | string | The base path of dex and the external name of the OpenID Connect service. | Domain or issuer can't be blank at the same time. | -| jwksUri | string | JWKS endpoint used to verify JWT tokens | | -| domain | string | kalm dex oidc provider domain | Domain or issuer can't be blank at the same time. | -| useHttp | bool | Default scheme is https, this flag is to change it to http | False | -| port | *int | port of kalm dex oidc provider | False | -| showApproveScreen | bool | | | -| alwaysShowLoginScreen | bool | | | -| connectors | [DexConnector](#dexconnector) [] | Dex connectors config | Connectors and TemporaryUser can't be blank at the same time. | -| temporaryUser | *[TemporaryDexUser](#temporarydexuser) | Temporary Dex user, mainly used for bootstrapping setup of Kalm. | Connectors and TemporaryUser can't be blank at the same time. | -| externalEnvoyExtAuthz | *[ExtAuthzEndpoint](#extauthzendpoint) | Create service entry if the ext_authz service is running out of istio mesh | False | -| idTokenExpirySeconds | *uint32 | expiry of idToken in seconds | False | +```yaml +apiVersion: core.kalm.dev/v1alpha1 +kind: SingleSignOnConfig +metadata: + name: sso + namespace: kalm-system +spec: + domain: foobar.kalm.dev + idTokenExpirySeconds: 300 + issuer: https://kalm.dev/oidc + issuerClientId: W67pe2..LQ01wf1p + issuerClientSecret: -oR5lZmbgERGz9F2..YQYE0J561mRYQ +``` -## DexConnector +It configures its OIDC provider by assigning `issuer` as `https://kalm.dev/oidc`, it also set the client id and client secret in the spec. + +## SingleSignOnConfig -| Name | Type | Description | Required | -|--------|-----------------------|-------------------------------------------------------------|----------| -| type | string | type of Connector, currently support: `github` and `gitlab` | True | -| id | string | id of dex connector | True | -| name | string | name of dex connector | True | -| config | *runtime.RawExtension | config for dex connector | True | +| Name | Type | Description | Required | +| --------------------- | --------------------------------------- | -------------------------------------------------------------------------- | ------------------------------------------------------------- | +| issuer | string | The base path of dex and the external name of the OpenID Connect service. | Domain or issuer can't be blank at the same time. | +| jwksUri | string | JWKS endpoint used to verify JWT tokens | | +| domain | string | kalm dex oidc provider domain | Domain or issuer can't be blank at the same time. | +| useHttp | bool | Default scheme is https, this flag is to change it to http | False | +| port | \*int | port of kalm dex oidc provider | False | +| showApproveScreen | bool | | | +| alwaysShowLoginScreen | bool | | | +| connectors | [DexConnector](#dexconnector) [] | Dex connectors config | Connectors and TemporaryUser can't be blank at the same time. | +| temporaryUser | \*[TemporaryDexUser](#temporarydexuser) | Temporary Dex user, mainly used for bootstrapping setup of Kalm. | Connectors and TemporaryUser can't be blank at the same time. | +| externalEnvoyExtAuthz | \*[ExtAuthzEndpoint](#extauthzendpoint) | Create service entry if the ext_authz service is running out of istio mesh | False | +| idTokenExpirySeconds | \*uint32 | expiry of idToken in seconds | False | +## DexConnector +| Name | Type | Description | Required | +| ------ | ---------------------- | ----------------------------------------------------------- | -------- | +| type | string | type of Connector, currently support: `github` and `gitlab` | True | +| id | string | id of dex connector | True | +| name | string | name of dex connector | True | +| config | \*runtime.RawExtension | config for dex connector | True | ## TemporaryDexUser | Name | Type | Description | Required | -|--------------|--------|-----------------------------|----------| +| ------------ | ------ | --------------------------- | -------- | | username | string | | True | | passowrdHash | string | bcrypt hash of the password | True | | userId | string | | True | | email | string | | True | - - ## ExtAuthzEndpoint | Name | Type | Description | Required | -|--------|--------|------------------|----------| +| ------ | ------ | ---------------- | -------- | | host | string | host of endpoint | True | | port | int | port of endpoint | True | | scheme | string | http or https | True | - diff --git a/docs/tut-hello.md b/docs/get-started.md similarity index 97% rename from docs/tut-hello.md rename to docs/get-started.md index 5c28288..7a9090a 100644 --- a/docs/tut-hello.md +++ b/docs/get-started.md @@ -1,5 +1,5 @@ --- -title: Hello Kalm +title: Get Started --- Let's go through a simple example of deploying a single pod holding an nginx container. @@ -10,8 +10,8 @@ This tutorial will teach you: - How to create application configurations - How to open a port and direct traffic to an application -
- +
+
## Step 1: Create Application diff --git a/docs/guide-config.md b/docs/guide-config.md index ba85e7a..aa81e9c 100644 --- a/docs/guide-config.md +++ b/docs/guide-config.md @@ -4,11 +4,10 @@ title: Add Env Variables & Configs A well designed application is likely to have configurations which vary between deployments. Kubernetes makes it easy to override the configurations specified in images. -
- +
+
- ## Example Container Setup Let's go through an example of configuring Redis. Our goal is to change the `maxmemory` parameter. (Imagine that you need a larger amount of memory for production vs dev) diff --git a/docs/guide-logs.md b/docs/guide-logs.md index 769b0c8..a82aef5 100644 --- a/docs/guide-logs.md +++ b/docs/guide-logs.md @@ -2,8 +2,8 @@ title: View Container Logs --- -
- +
+
Sometimes its useful to view the log output of a particular container. Kalm provides a view to quickly view logs in the web: diff --git a/docs/install.md b/docs/install.md deleted file mode 100644 index afdfb02..0000000 --- a/docs/install.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: Installation ---- - -## Compatibility - -Kalm is optimized to work with the latest version of Kubernetes (currently 1.18.x), and is backward compatible down to 1.14.x - -| Kalm version | k8s 1.15.x | k8s 1.16.x | k8s 1.17.x | k8s 1.18.x | -| ------------ | ---------- | ---------- | ---------- | ---------- | -| v0.1.0 | ✔ | ✔ | ✔ | ✔ | - -For smooth performance, we recommend a Kubernetes cluster with at least 4 vCPUs and 8G of memory. - -## Step 1: Prerequisites - -### Cluster Setup - -Kalm can be used to manage any Kubernetes cluster. -For the purpose of this tutorial, we recommend that you try kalm on [Minikube](./platform-setup/minikube.md) localhost cluster first. - -Alternatively, see the References sections for provisioning clusters on [AWS](./platform-setup/aws-eks.md), [Google Cloud](./platform-setup/gcp-gke.md) and [Azure](./platform-setup/azure-aks.md). - -### Install `kubectl` - -Installation of Kalm requires `kubectl`, which can be installed according to the official Install and Set Up `kubectl` docs. - -:::note -Please make sure that the version of the `kubectl` is sufficient. It is strongly recommended that you use the version corresponding to the cluster. Using an earlier version of `kubectl` may cause errors in the installation process. -::: - -## Step 2: Install Kalm - -:::caution -Before proceeding, please make sure that the current context of your `kubectl` is set to the correct cluster. -::: - -Kalm can be installed as a [Kubernetes Operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) directly onto your cluster via: - -```bash -curl -sL https://get.kalm.dev | bash - -# You can install a specific version like this -curl -sL https://get.kalm.dev | bash -s v0.1.0-rc.7 -``` - -This command installs Kalm plus a few dependencies, and typically takes 3-5 minutes to complete. Relax in the mean time, or watch this short video on how Kalm works: - -
- -
- -
- -The installation script will give you real-time feedback on services spinning up. Once you see **Installation Complete**, move on to the next step. - -## Step 3: Access Webserver - -To enable browser access, open a port via: - -```bash -kubectl port-forward -n kalm-system \ - $(kubectl get pod -n kalm-system \ - -l app=kalm \ - -ojsonpath="{.items[0].metadata.name}") \ - 3010:3010 -``` - -Now open http://localhost:3010/ - -![login screen](assets/kalm-empty-state.png) - -## Step 4: (Optional) Configure Access - -Although we can visit Kalm through localhost port forwarding, it is a good idea to setup a domain and basic login, so you can access Kalm on any computer, and share it with colleagues. - -To setup permanent access, click **FINISH THE SETUP STEPS** button in the top navigation bar and follow the on-screen directions. - -![setup domain](assets/setup-domain.png) - -Point a domain to the cluster ip. If you don't have a domain, you can use the wildcard DNS nip.io: - -_.nip.io_ - -:::note -For minikube, if no ip is shown, run `minikube tunnel` and refresh the page. -::: - -Click **CHECK AND CONTINUE**. After the configuration is complete, record the generated **Email** and **Password** login information. From this point on, port-forwarding is no longer required, and you should be able to login via the domain you specified. - -## Next Step - -Congratulations! Kalm is now properly setup and running. Next, let's create our first application to see how Kalm works. diff --git a/docs/install/eks.md b/docs/install/eks.md new file mode 100644 index 0000000..c8cc741 --- /dev/null +++ b/docs/install/eks.md @@ -0,0 +1,89 @@ +--- +title: AWS Elastic Kubernetes Service +--- + +This guide demonstrates how to create an Amazon EKS cluster using either the AWS Command Line Interface (CLI) or with Terraform. The resulting cluster generated from this guide will be ready to install Kalm. +## Prerequisites + +Before following either guide below + +- Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) +- Install the [the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html). +- Configure the AWS CLI with: + ```shell + $ aws configure + AWS Access Key ID [None]: AKIAxxxxxxxxxEXAMPLE + AWS Secret Access Key [None]: wJalrXUtxxxxxxxxxxxxxxXAMPLEKEY + Default region name [None]: us-west-2 + Default output format [None]: json + ``` + Access key ID and secret need to be entered in the prompt, find the access key information at [your aws page](https://console.aws.amazon.com/iam/home?#/security_credentials), create an access key if no one exists yet. + +## Creating an EKS Cluster using The AWS CLI + +In addition to the AWS CLI, this guide uses [eksctl](https://eksctl.io/) - a simple CLI tool for creating clusters on EKS. + +To install eksctl, follow Amazon's detailed [eksctl installation instructions](https://docs.aws.amazon.com/eks/latest/userguide/eksctl.html#installing-eksctl) for linux, mac, and windows. + +```bash +# create key +aws ec2 create-key-pair --region us-east-2 --key-name keypair-for-kalm # <--- key-name can be updated according to your needs + +# create eks cluster, for details, see: https://docs.aws.amazon.com/eks/latest/userguide/getting-started-eksctl.html +eksctl create cluster \ +--name kalm-on-eks \ # <--- cluster name can be updated according to your needs +--region us-east-2 \ # <--- region can be updated according to your needs +--with-oidc \ +--ssh-access \ +--ssh-public-key keypair-for-kalm \ # <--- key can be updated according to your needs, make sure the key exist though +--managed +``` + +The creation process typically takes between 15 and 30 minutes. Once it completes, the `kubectl` config file will automatically update to use the newly created cluster as the current cluster. To double-check this, run `kubectl config get-context`. The new cluster should be marked with a `*` in the output. + +## Creating an EKS Cluster with Terraform + +Alternatively, the EKS Cluster can be created using Terraform. To use Terraform, first: + +- Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) +- Install [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) + +Next, clone the git repository below and `cd` into the eks directory (script included below for convenience): + +``` +git clone https://github.com/kalmhq/terraform +cd terraform/eks +``` + +Create the cluster by running the following commands: + +``` +terraform init +terraform apply +``` + +Type `yes` to confirm. + +This process should take around 15-30 minutes. + +Once it finishes, view the newly created cluster with: + +``` +aws eks list-clusters +``` + +The following command configures kubectl to use the new cluster. + +``` +aws eks --region us-west-2 update-kubeconfig --name NAME_OF_YOUR_CLUSTER +``` + +Verify the cluster is properly setup and accessible. + +``` +kubectl get nodes +``` + +## Next Step + +To install Kalm onto the cluster, see [Install Kalm Cloud](install-kalm-cloud). diff --git a/docs/install/gke.md b/docs/install/gke.md new file mode 100644 index 0000000..2abeffb --- /dev/null +++ b/docs/install/gke.md @@ -0,0 +1,152 @@ +--- +title: Google Kubernetes Engine +--- + +This guide demonstrates how to create a GKE cluster using any of the following options: + +- The Google Cloud Console (web interface at https://console.cloud.google.com/) +- The gcloud command-line tool +- Terraform + +## Create a GKE Cluster using The Google Cloud Console + +The simplest way to create a GKE cluster is by using the Google Cloud Console, which acts as a web interface for GCP. Use the following steps to create a GKE cluster: + +- Go to [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list) +- Click **CREATE CLUSTER** + - Select a cluster name, and customize any other fields as needed (default options will work) +- Click **CREATE** + - The cluster creation process typically takes 5-10 minutes + +Once the cluster is created: +- Go to [https://console.cloud.google.com/kubernetes/list](https://console.cloud.google.com/kubernetes/list), click the 3 dots to the right of the newly created cluster, and select Connect + - Enable the Kubernetes API if not already enabled +- Copy the command-line access command and run it on a local terminal. This command will configure `kubectl config` to access the newly created cluster. + +## Create a GKE Cluster using The Gcloud Command-Line Tool + +GKE Clusters can also be created using Google's gcloud CLI. The gcloud CLI is included in their SDK, which can be installed by following instructions in [Google's SDK installation docs](https://cloud.google.com/sdk/docs). + +GKE clusters are organized within [Google Cloud Projects](https://cloud.google.com/resource-manager/docs/creating-managing-projects). The project name is used in several spots, so for the sake of simplicity this demo sets a PROJECT_ID variable to the name of the project used. + +``` +export PROJECT_ID=hello-kalm +``` + +This can be either an existing project or a new one. To create a new project, run the command below (the project name is set to "hello-kalm" in this example): + +``` +gcloud projects create $PROJECT_ID +``` + +In order to create a Kubernetes cluster on GCP [billing must be enabled](https://cloud.google.com/billing/docs/how-to/modify-project##confirm_billing_is_enabled_on_a_project) for that project. By default, new projects will **not** have billing enabled. + +Additionally, projects need to specifically enable the Kubernetes Engine API, which can be done using the command below: + +``` +gcloud services enable container.googleapis.com +``` + +With billing and engine API enabled, the following commands will provision a cluster with 4 nodes (modify the zone as needed): + +``` +export M_TYPE=n1-standard-2 && \ +export ZONE=us-west2-a && \ +export CLUSTER_NAME=${PROJECT_ID}-${RANDOM} && \ +gcloud container clusters create $CLUSTER_NAME \ + --cluster-version latest \ + --machine-type=$M_TYPE \ + --num-nodes 4 \ + --zone $ZONE \ + --project $PROJECT_ID +``` + +It will take a few minutes to create the cluster. Once complete, configure kubectl to use the new cluster: + +``` +gcloud container clusters get-credentials $CLUSTER_NAME \ + --zone $ZONE \ + --project $PROJECT_ID +``` + +Verify the cluster is properly setup and accessible. + +``` +kubectl cluster-info +``` + +## Terraform + +Alternatively, the GKE Cluster can be created using Terraform. To use Terraform, first: + +- Install [The gcloud CLI](https://cloud.google.com/sdk/docs). +- Install [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli?in=terraform/gcp-get-started). +- Install [kubectl](https://kubernetes.io/docs/tasks/tools/included/install-kubectl-gcloud/) + +GKE clusters are organized within [Google Cloud Projects](https://cloud.google.com/resource-manager/docs/creating-managing-projects). A new project can be created with: + +``` +gcloud projects create PROJECT-NAME-HERE +``` + +Use the `gcloud init` command and select the new (or existing) project ([usage details here](https://cloud.google.com/sdk/gcloud/reference/init)). + +Terraform also requires access to the Application Default Credentials (ADC), which can be granted using the following command: + +``` +gcloud auth application-default login +``` + +The Kubernetes Engine API will need to be enabled for the project as well, which can be done using the following command: + +``` +gcloud services enable container.googleapis.com +``` + +Clone the repository below to download Kalm's installation scripts for Terraform. + +``` +git clone https://github.com/kalmhq/terraform +cd terraform/gke +``` + +Open 'terraform.tfvars' in a text editor and specify the ID of the current Google Cloud project. The region can be specified as well. + +``` +# terraform.tfvars +project_id = "REPLACE_ME" +region = "us-west2" +``` + +With these settings configured, create the cluster by using the following commands: + +``` +terraform init +terraform apply +``` + +Type `yes` to confirm the installation. + +The process should take around 5-10 minutes. Once complete, retrieve the name of the newly created cluster. + +``` +terraform output +``` + +Configure kubectl to use the new cluster. + +``` +gcloud container clusters get-credentials NAME_OF_YOUR_CLUSTER --zone ZONE_OF_CLUSTER +``` + +- *Note - identify the project's zone either on [gcloud console](https://console.cloud.google.com/) or using the CLI by entering: `gcloud info | grep "zone"`* + +Verify the cluster is properly setup and accessible. + +``` +kubectl cluster-info +``` + +## Next Step + +To install Kalm onto the cluster, see [Install Kalm Cloud](install-kalm-cloud). diff --git a/docs/install/install-kalm-cloud.md b/docs/install/install-kalm-cloud.md new file mode 100644 index 0000000..ae7b5d8 --- /dev/null +++ b/docs/install/install-kalm-cloud.md @@ -0,0 +1,126 @@ +--- +title: How To Install Kalm Cloud +--- + +Kalm installs smoothly into almost any existing Kubernetes cluster. + +1. Go to [http://kalm.dev/signin](http://kalm.dev/signin) + +Signing in can be done with either Github or Google OAuth, which effectively creates a Kalm account. + +2. After signing in, click the **INSTALL NEW CLUSTER** button in the upper right corner: + + ![How%20To%20Install%20Kalm%20Cloud%202efc1e7deda144498352c5303d983da0/Untitled.png](../assets/kalm-cloud-1-new-cluster.png) + +3. Choose a name for your cluster + +![How%20To%20Install%20Kalm%20Cloud%202efc1e7deda144498352c5303d983da0/Untitled%201.png](../assets/kalm-cloud-2-cluster-name.png) + +4. Before running the installation script, double check to make sure `kubectl` is pointing at the desired cluster. Running `kubectl config get-context` will mark a new cluster with a `*` in the output. + +5. Run the install script: + +![How%20To%20Install%20Kalm%20Cloud%202efc1e7deda144498352c5303d983da0/Untitled%202.png](../assets/kalm-cloud-3-install-script.png) + +The installation process will continuously update while it runs: + +![How%20To%20Install%20Kalm%20Cloud%202efc1e7deda144498352c5303d983da0/Untitled%203.png](../assets/kalm-cloud-4-installing.png) + +Typically, the installation takes around 15-30 minutes to complete. + +6. Once done, click the **GO TO DASHBOARD** button to start using Kalm. + +![How%20To%20Install%20Kalm%20Cloud%202efc1e7deda144498352c5303d983da0/Untitled%204.png](../assets/kalm-cloud-5-install-succeed.png) + +# Troubleshooting (WORK IN PROGRESS) + +While installing Kalm is designed to be as smooth as possible, we have provided some additional information here to help with troubleshooting just in case something goes wrong during the process. The troubleshooting guides below are categorized by which step of the installation path fails. + +Possible issues for each install step: + +- INSTALL_CERT_MANAGER: Kalm is installing Cert-Manager. If this step is stuck, the cluster is likely having problems installing the pods. Run `kubectl get pods -n cert-manager` to see if anything is going wrong. Most normal clusters should return something similiar to: + + ```bash + ➜ kubectl get pods -n cert-manager + NAME READY STATUS RESTARTS AGE + cert-manager-7cb75cf6b4-48jvk 1/1 Running 0 29d + cert-manager-cainjector-759496659c-7smnl 1/1 Running 0 29d + cert-manager-webhook-7c75b89bf6-p9lwx 1/1 Running 0 29d + ``` + +- INSTALL_ISTIO: Kalm is installing Istio. If this step is stuck, the cluster is (perhaps obviously) likely having problems installing Istio, run `kubectl get pods -n istio-operator` and `kubectl get pods -n istio-system` to see if anything is going wrong, a normal running cluster should return something similar as: + + ```bash + ➜ kubectl get pods -n istio-operator + NAME READY STATUS RESTARTS AGE + istio-operator-7654b568b7-nqclz 1/1 Running 0 16d + + ➜ kubectl get pods -n istio-system + NAME READY STATUS RESTARTS AGE + istio-ingressgateway-59cf75bf7-hqpkd 1/1 Running 0 16d + istiod-59cc49f5d9-cmzwb 1/1 Running 0 16d + prometheus-6c77954d87-wmrpn 1/1 Running 0 16d + ``` + +- INSTALL_KALM_CONTROLLER: Kalm is installing kalm-controller, if this step stuck, very likely the cluster is having problems installing the pods, run `kubectl get pods -n kalm-system -l control-plane=controller` to see if anything is going wrong, a normal running cluster should return something similar as: + + ```bash + ➜ kubectl get pods -n kalm-system -l control-plane=controller + NAME READY STATUS RESTARTS AGE + kalm-controller-758c5498c9-6vtp7 3/3 Running 0 110m + ``` + +- INSTALL_KALM_DASHBOARD: Kalm is installing the Kalm dashboard, if this step stuck, very likely the cluster is having problems installing the pods, run `kubectl get pods -n kalm-system -l kalm-component=kalm` to see if anything is going wrong, a normal running cluster should return something similar as: + + ```bash + ➜ kubectl get pods -n kalm-system -l kalm-component=kalm + NAME READY STATUS RESTARTS AGE + kalm-6654df4b5c-w88cv 2/2 Running 0 121m + ``` + +- INSTALL_ACME_SERVER: Kalm is installing the ACME DNS server, if this step stuck, very likely the cluster is having problems installing the pods, run `kubectl get pods -n kalm-system -l kalm-component=acme-server` to see if anything is going wrong, a normal running cluster should return something similar as: + + ```bash + ➜ kubectl get pods -n kalm-system -l kalm-component=acme-server + NAME READY STATUS RESTARTS AGE + acme-server-5f9f786f58-89tgl 2/2 Running 0 125m + ``` + +- CONFIGURE_KALM_DASHBOARD_ACCESS: Kalm is waiting for Cloud Provider to assign a public Load Balancer for our Kalm dashboard, use `kubectl describe services -n istio-system istio-ingressgateway` to check the details of our service for the dashboard. Normally the cloud provider will assign a public IP or domain for our service. run `kubectl get services -n istio-system istio-ingressgateway -ojsonpath='{.status}'` to check out the info. + + ```bash + ➜ kalm-cloud git:(main) kubectl get services -n istio-system istio-ingressgateway -ojsonpath='{.status}' + {"loadBalancer":{"ingress":[{"hostname":"a8298a1a1a8ee473cbb923fa5de3576c-60911533.ap-northeast-1.elb.amazonaws.com"}]}} + ``` + + if the return result is empty, run `kubectl describe services -n istio-system istio-ingressgateway` to get related events of the service. + + ```bash + ➜ kubectl describe services -n istio-system istio-ingressgateway + Name: istio-ingressgateway + Namespace: istio-system + ... + Session Affinity: None + External Traffic Policy: Cluster + Events: ... + ``` + + The last section of the output: **Events** may show some related info about the reason why the assignment is not working, there are several possible reasons for the failure: wrong tags of the AWS VPC subnets, number of load balancers is out of quota, etc(see [https://aws.amazon.com/premiumsupport/knowledge-center/eks-load-balancers-troubleshooting/](https://aws.amazon.com/premiumsupport/knowledge-center/eks-load-balancers-troubleshooting/) for more details). It can also happen that the assignment silently fails without any warning events, in this case, it will be really hard to debug, and this is why we strongly suggest you to setup the EKS cluster using our recommend ways here: [How to setup EKS](eks). + +- CONFIGURE_ACME_SERVER_ACCESS: similar to CONFIGURE_KALM_DASHBOARD_ACCESS, this is Kalm waiting for Cloud Provider to assign a public Load Balancer for our ACME DNS server, use `kubectl get service -n kalm-system lb-svc-acme-server -ojsonpath='{.status}'` and `kubectl describe service -n kalm-system lb-svc-acme-server` to see details of the service. + + ```bash + ➜ kubectl get service -n kalm-system lb-svc-acme-server -ojsonpath='{.status}' + {"loadBalancer":{"ingress":[{"hostname":"xxx.elb.ap-northeast-1.amazonaws.com"}]} + + ➜ kubectl describe service -n kalm-system lb-svc-acme-server + Name: lb-svc-acme-server + Namespace: kalm-system + ... + Session Affinity: None + External Traffic Policy: Cluster + Events: + ``` + +- REPORT_CLUSTER_INFO: Kalm is reporting cluster info like the public IPs and Domains the Cloud Provider just assigned to our to Kalm Cloud. +- CLUSTER_FULLY_SETUP: Kalm is waiting for the final setup of the cluster, the most time-consuming process is the issuance of the HTTPS certificate of our Kalm dashboard, which can take 10 - 15 minutes. diff --git a/docs/install/install-local-k3s.md b/docs/install/install-local-k3s.md new file mode 100644 index 0000000..46c32e5 --- /dev/null +++ b/docs/install/install-local-k3s.md @@ -0,0 +1,174 @@ +--- +title: "Install Kalm On K3s" +--- + +## Install K3s on Mac + +K3s is the lightweight Kubernetes distribution which is natively available for Linux. To install on a Mac, we need install [multipass](https://multipass.run/) first. + +``` +❯ brew install --cask multipass +❯ multipass version +multipass 1.6.2+mac +multipassd 1.6.2+mac +``` + +Now create a VM with multipass, specifying 2GB of memory and a 5GB disk. + +``` +❯ multipass launch --name k3sVM --mem 2G --disk 5G +Creating k3sVM - +Launched: k3sVM +``` + +Wait for the VM to create, then open a shell to the VM + +``` +❯ multipass shell k3sVM +Welcome to Ubuntu 20.04.2 LTS (GNU/Linux 5.4.0-66-generic x86_64) + + * Documentation: https://help.ubuntu.com + * Management: https://landscape.canonical.com + * Support: https://ubuntu.com/advantage + + System information as of Fri Feb 26 03:49:55 CST 2021 + + System load: 0.0 Processes: 107 + Usage of /: 26.7% of 4.67GB Users logged in: 0 + Memory usage: 9% IPv4 address for enp0s2: 192.168.64.9 + Swap usage: 0% + +1 update can be installed immediately. +0 of these updates are security updates. +To see these additional updates run: apt list --upgradable + +To run a command as administrator (user "root"), use "sudo ". +See "man sudo_root" for details. + +ubuntu@k3sVM:~$ +``` + +## Install K3s and Create a Cluster + +To install k3s, we recommend using the flag `—write-kubeconfig-mode`. It will make your first Kubernetes experience easier. For more detail [check here](https://github.com/k3s-io/k3s/issues/389#issuecomment-503616742). + +``` +ubuntu@k3sVM:~$ curl -sfL https://get.k3s.io | sh -s - --write-kubeconfig-mode 644 +[INFO] Finding release for channel stable +[INFO] Using v1.20.4+k3s1 as release +[INFO] Downloading hash https://github.com/rancher/k3s/releases/download/v1.20.4+k3s1/sha256sum-amd64.txt +[INFO] Skipping binary downloaded, installed k3s matches hash +[INFO] Skipping /usr/local/bin/kubectl symlink to k3s, already exists +[INFO] Skipping /usr/local/bin/crictl symlink to k3s, already exists +[INFO] Skipping /usr/local/bin/ctr symlink to k3s, already exists +[INFO] Creating killall script /usr/local/bin/k3s-killall.sh +[INFO] Creating uninstall script /usr/local/bin/k3s-uninstall.sh +[INFO] env: Creating environment file /etc/systemd/system/k3s.service.env +[INFO] systemd: Creating service file /etc/systemd/system/k3s.service +[INFO] systemd: Enabling k3s unit +Created symlink /etc/systemd/system/multi-user.target.wants/k3s.service → /etc/systemd/system/k3s.service. +[INFO] systemd: Starting k3s +ubuntu@k3sVM:~$ kubectl get nodes +NAME STATUS ROLES AGE VERSION +k3svm Ready control-plane,master 14m v1.20.4+k3s1 +``` + +Now that k3s is installed, you can install Kalm! + +``` +git clone https://github.com/kalmhq/kalm.git +cd kalm + +# run the install script +./scripts/install-local-mode.sh + +Initializing Kalm - 4/4 modules ready: + +✔ kalm-operator +✔ cert-manager +✔ istio-system +✔ kalm-system +Kalm Installation Complete! 🎉 + +To start using Kalm, open a port via: + +kubectl port-forward -n kalm-system $(kubectl get pod -n kalm-system -l app=kalm -ojsonpath="{.items[0].metadata.name}") 3010:3010 + +Then visit http://localhost:3010 in your browser +``` + +Kalm has installed successfully! Now let's access kalm via your web browser. + +Open another terminal and run + +``` +❯ multipass info k3sVM +Name: k3sVM +State: Running +IPv4: 192.168.64.9 + 10.42.0.0 + 10.42.0.1 +Release: Ubuntu 20.04.2 LTS +Image hash: c5f2f08c6a1a (Ubuntu 20.04 LTS) +Load: 0.61 1.56 2.47 +Disk usage: 3.5G out of 4.7G +❯ K3S_IP=$(multipass info k3sVM | grep IPv4 | awk '{print $2}') +❯ echo $K3S_IP +192.168.64.9 +``` + +Update your mac's `/etc/hosts` + +``` +❯ grep kalm /etc/hosts +192.168.64.9 kalm.local +``` + +``` +# export `kubeconfig` file +❯ multipass exec k3sVM sudo cat /etc/rancher/k3s/k3s.yaml > k3s.yaml +❯ cat k3s.yaml +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkakNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUyTVRReU9ESTJOVEl3SGhjTk1qRXdNakkxTVRrMU1EVXlXaGNOTXpFd01qSXpNVGsxTURVeQpXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUyTVRReU9ESTJOVEl3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFSK2hmMDJXYWdYMVNERUk1QjRsS0l5bDluNndiNXlhSFdaR20yTndFaXIKREJsemsrVFluT1NSdzlkL2twc1oycXZTY3FkSXNoRExFZVA2V21iR0RsUkFvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVXNaZjNUd2tHSU5vVWx2VERnRjA3CjVUcDh6RU13Q2dZSUtvWkl6ajBFQXdJRFJ3QXdSQUlnR0d0MUV1Uk1TSmVGWjhvRkE4SEV1ZUNmYVZEaS90b2cKMmY0NzlrTWd1Y01DSUZhUmxzWHZ1T09MWUVjUFI1N0treHdDV0RXTWhZRzY1MWZoWW1mYUVyNGYKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + server: https://127.0.0.1:6443 + name: default +contexts: +- context: + cluster: default + user: default + name: default +current-context: default +kind: Config +preferences: {} +users: +- name: default + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrakNDQVRlZ0F3SUJBZ0lJT0ZZTWxPNmVPN3d3Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOakUwTWpneU5qVXlNQjRYRFRJeE1ESXlOVEU1TlRBMU1sb1hEVEl5TURJeQpOVEU1TlRBMU1sb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJQbXpZMkYwYzg5OHg3U3cKU3FTa2xCREJNcHJPb1JCM214eHo5TGRLRlZDRDlscWMwR1dtOXRnSVFRM1docmZtWkFUSGxjYjlTWUhqdEQwQwoyQnY2VlN1alNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCUXVsanZKQkNvYjcvRnRZRTlWTDJrY2Z0ZDluakFLQmdncWhrak9QUVFEQWdOSkFEQkcKQWlFQW5yWWZHQmFlaEhZNll3d3NpOG8wVlNCT1NMVGRLeXYvNUVwa21QcStZVjhDSVFEVUpnKzl4d1lMSXVnaQpKblh5ZGdzQzJqWlFQanJpNlNJTnl1NlRlUE44VUE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCi0tLS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQpNSUlCZGpDQ0FSMmdBd0lCQWdJQkFEQUtCZ2dxaGtqT1BRUURBakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwClpXNTBMV05oUURFMk1UUXlPREkyTlRJd0hoY05NakV3TWpJMU1UazFNRFV5V2hjTk16RXdNakl6TVRrMU1EVXkKV2pBak1TRXdId1lEVlFRRERCaHJNM010WTJ4cFpXNTBMV05oUURFMk1UUXlPREkyTlRJd1dUQVRCZ2NxaGtqTwpQUUlCQmdncWhrak9QUU1CQndOQ0FBVGpLZFBUTXhDYWxtV0VrYnQrNjlCNldQZENORXVHWENiS2p0MFhPU0RFCnZRRFd6S1dENWRpSmRyMjNIOXZneFVyMExQeWxiVEY1c2tGYUVLREJBK3RLbzBJd1FEQU9CZ05WSFE4QkFmOEUKQkFNQ0FxUXdEd1lEVlIwVEFRSC9CQVV3QXdFQi96QWRCZ05WSFE0RUZnUVVMcFk3eVFRcUcrL3hiV0JQVlM5cApISDdYZlo0d0NnWUlLb1pJemowRUF3SURSd0F3UkFJZ1RqL0prY2tEMWczMUJFblYydXNFZHVyc2swcGVjMVFBCmhPV3dYRUp1clFnQ0lIall5SWk3ZDVLeCtoUDJISTJqVzBDa0ZFU2prRXp4T2RMQ1AvMVh0bUNTCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + client-key-data: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUNaMmpaU3JEYVo3VUVCN2xlajdncUdRSDZtVFpMdW5GanVFdlYrQ05UV2hvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFK2JOallYUnp6M3pIdExCS3BLU1VFTUV5bXM2aEVIZWJISFAwdDBvVlVJUDJXcHpRWmFiMgoyQWhCRGRhR3QrWmtCTWVWeHYxSmdlTzBQUUxZRy9wVkt3PT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= + +``` + +Replace `[https://127.0.0.1:6443](https://127.0.0.1:6443)` to + +``` +❯ sed -i '' "s/127.0.0.1/${K3S_IP}/" k3s.yaml +``` + +Now, test that you can access the k3sVM cluster + +``` +❯ export KUBECONFIG=${PWD}/k3s.yaml +❯ kubectl get nodes +NAME STATUS ROLES AGE VERSION +k3svm Ready control-plane,master 105m v1.20.4+k3s1 +``` + +You should now be able to open Kalm in a web browser + +``` +kubectl port-forward -n kalm-system $(kubectl get pod -n kalm-system -l app=kalm -ojsonpath="{.items[0].metadata.name}") 3010:3010 +``` + +Open [http://localhost:3010/applications](http://localhost:3010/applications) in your browser to use your freshly installed Kalm on k3s! diff --git a/docs/install/install-local-kind.md b/docs/install/install-local-kind.md new file mode 100644 index 0000000..3533545 --- /dev/null +++ b/docs/install/install-local-kind.md @@ -0,0 +1,71 @@ +--- +title: "Install Kalm on Kind" +--- + +## Install Kind + +Kind is a tool for running local Kubernetes clusters using Docker container “nodes”. If you are using Kind for the first time, please checkout its [official website](https://kind.sigs.k8s.io/docs/user/quick-start/) to get basic information and go through their installation process. + +``` +brew install kind +``` + +## Create a new cluster + +``` +❯ kind create cluster +Creating cluster "kind" ... + ✓ Ensuring node image (kindest/node:v1.20.2) 🖼 + ✓ Preparing nodes 📦 + ✓ Writing configuration 📜 + ✓ Starting control-plane 🕹️ + ✓ Installing CNI 🔌 + ✓ Installing StorageClass 💾 +Set kubectl context to "kind-kind" +You can now use your cluster with: + +kubectl cluster-info --context kind-kind + +Have a nice day! 👋 + +``` + +``` +❯ kubectl get pods -A +dNAMESPACE NAME READY STATUS RESTARTS AGE +kube-system coredns-74ff55c5b-8x47r 1/1 Running 0 64s +kube-system coredns-74ff55c5b-v8qsl 1/1 Running 0 64s +kube-system etcd-kind-control-plane 0/1 Running 0 66s +kube-system kindnet-bcdxs 1/1 Running 0 64s +kube-system kube-apiserver-kind-control-plane 1/1 Running 0 66s +kube-system kube-controller-manager-kind-control-plane 0/1 Running 0 66s +kube-system kube-proxy-m8bff 1/1 Running 0 64s +kube-system kube-scheduler-kind-control-plane 0/1 Running 0 66s +local-path-storage local-path-provisioner-78776bfc44-b8tq2 1/1 Running 0 64s +``` + +## Install Kalm + +``` +# clone the repo +git clone https://github.com/kalmhq/kalm.git +cd kalm +``` + +``` +./scripts/install.sh $(git rev-parse HEAD) + +Initializing Kalm - 4/4 modules ready: + +✔ kalm-operator +✔ cert-manager +✔ istio-system +✔ kalm-system +Kalm Installation Complete! 🎉 + +To start using Kalm, open a port via: + +kubectl port-forward -n kalm-system $(kubectl get pod -n kalm-system -l app=kalm -ojsonpath="{.items[0].metadata.name}") 3010:3010 + +Then visit http://localhost:3010 in your browser +``` diff --git a/docs/install/install-local-minikube.md b/docs/install/install-local-minikube.md new file mode 100644 index 0000000..74e4e7b --- /dev/null +++ b/docs/install/install-local-minikube.md @@ -0,0 +1,110 @@ +--- +title: "Install Kalm on Minikube" +--- + +## Install Minikube + +If you are using minikube for the first time, please open its [official website](https://minikube.sigs.k8s.io/docs/start/) to get basic information about minikube and go through the official documentation to install it. + +``` +❯ minikube version +minikube version: v1.17.1 +commit: 043bdca07e54ab6e4fc0457e3064048f34133d7e +``` + +## Create a New Cluster + +``` +❯ minikube start --memory 4096 --cpus 4 --kubernetes-version v1.18.0 +😄 minikube v1.17.1 on Darwin 11.2.1 +🆕 Kubernetes 1.20.2 is now available. If you would like to upgrade, specify: --kubernetes-version=v1.20.2 +✨ Using the hyperkit driver based on existing profile +👍 Starting control plane node minikube in cluster minikube +🔄 Restarting existing hyperkit VM for "minikube" .... +💡 To pull new external images, you may need to configure a proxy: https://minikube.sigs.k8s.io/docs/reference/networking/proxy/ +🐳 Preparing Kubernetes v1.18.0 on Docker 20.10.2 ... + ▪ Generating certificates and keys ... + ▪ Booting up control plane ... + ▪ Configuring RBAC rules ... +🔎 Verifying Kubernetes components... +🌟 Enabled addons: storage-provisioner, default-storageclass + +❗ /usr/local/bin/kubectl is version 1.20.4, which may have incompatibilites with Kubernetes 1.18.0. + ▪ Want kubectl v1.18.0? Try 'minikube kubectl -- get pods -A' +🏄 Done! kubectl is now configured to use "minikube" cluster and "default" namespace by default + +``` + +Note - if you're experiencing networking issues, consider configuring a proxy: [Minikube Proxy](https://minikube.sigs.k8s.io/docs/reference/networking/proxy/) +Your install log will be like this: + +``` +❯ minikube start --memory 4096 --cpus 4 --kubernetes-version v1.18.0 +😄 minikube v1.17.1 on Darwin 11.2.1 +🆕 Kubernetes 1.20.2 is now available. If you would like to upgrade, specify: --kubernetes-version=v1.20.2 +✨ Using the hyperkit driver based on existing profile +❗ Local proxy ignored: not passing HTTP_PROXY=http://127.0.0.1:1087 to docker env. +❗ Local proxy ignored: not passing HTTPS_PROXY=http://127.0.0.1:1087 to docker env. +👍 Starting control plane node minikube in cluster minikube +🔄 Restarting existing hyperkit VM for "minikube" ... +❗ Local proxy ignored: not passing HTTP_PROXY=http://127.0.0.1:1087 to docker env. +❗ Local proxy ignored: not passing HTTPS_PROXY=http://127.0.0.1:1087 to docker env. +🌐 Found network options: + ▪ http_proxy=http://127.0.0.1:1087 +❗ You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP (192.168.64.12). +📘 Please see https://minikube.sigs.k8s.io/docs/handbook/vpn_and_proxy/ for more details + ▪ https_proxy=http://127.0.0.1:1087 +❗ This VM is having trouble accessing https://k8s.gcr.io +💡 To pull new external images, you may need to configure a proxy: https://minikube.sigs.k8s.io/docs/reference/networking/proxy/ +🐳 Preparing Kubernetes v1.18.0 on Docker 20.10.2 ... + ▪ Generating certificates and keys ... + ▪ Booting up control plane ... + ▪ Configuring RBAC rules ... +🔎 Verifying Kubernetes components... +🌟 Enabled addons: storage-provisioner, default-storageclass + +❗ /usr/local/bin/kubectl is version 1.20.4, which may have incompatibilites with Kubernetes 1.18.0. + ▪ Want kubectl v1.18.0? Try 'minikube kubectl -- get pods -A' +🏄 Done! kubectl is now configured to use "minikube" cluster and "default" namespace by default + +``` + +After your minikube cluster is running, you can check its status by using the following command: + +``` +❯ minikube status +minikube +type: Control Plane +host: Running +kubelet: Running +apiserver: Running +kubeconfig: Configured +timeToStop: Nonexistent + +``` + +## Install Kalm + +``` +# clone the repo +git clone https://github.com/kalmhq/kalm.git +cd kalm +``` + +``` +./scripts/install.sh $(git rev-parse HEAD) + +Initializing Kalm - 4/4 modules ready: + +✔ kalm-operator +✔ cert-manager +✔ istio-system +✔ kalm-system +Kalm Installation Complete! 🎉 + +To start using Kalm, open a port via: + +kubectl port-forward -n kalm-system $(kubectl get pod -n kalm-system -l app=kalm -ojsonpath="{.items[0].metadata.name}") 3010:3010 + +Then visit http://localhost:3010 in your browser +``` diff --git a/docs/install/install-open-source.md b/docs/install/install-open-source.md new file mode 100644 index 0000000..ce2c007 --- /dev/null +++ b/docs/install/install-open-source.md @@ -0,0 +1,31 @@ +--- +title: How to install the open-source version +--- + +# Prerequisites + +- a Kubernetes cluster, check out [Kubernetes Setup](https://www.notion.so/Kubernetes-Setup-90ccfb21bd8344deb410be143f3864a2) for details +- make sure your `kubectl` is pointing at the right Kubernetes cluster + +# Install Kalm using the default config + +Clone the Kalm repo and run the script to install: + +```bash +# clone the repo +git clone https://github.com/kalmhq/kalm.git +cd kalm + +# run the install script +./scripts/install-local-mode.sh +``` + +The whole process typically takes up to 15-30 minutes. + +Once the installation is complete, open a port to the web server. + +```bash +kubectl port-forward -n kalm-system $(kubectl get pod -n kalm-system -l app=kalm -ojsonpath="{.items[0].metadata.name}") 3010:3010 +``` + +Kalm should now be accessible at [http://localhost:3010](http://localhost:3010/). \ No newline at end of file diff --git a/docs/install/install.md b/docs/install/install.md new file mode 100644 index 0000000..8e3f015 --- /dev/null +++ b/docs/install/install.md @@ -0,0 +1,37 @@ +--- +title: Installation +--- + +To install Kalm, you first need a Kubernetes cluster. You can either test Kalm on a local cluster or you can run it in the cloud using managed Kubernetes services such as EKS and GKE. Often, simplistic testing is done on local clusters while production environments are run in the cloud. + +## Testing Kalm Locally + +To quickly test out Kalm, you may run Kalm on "minified" versions of Kubernetes like minikube, k3s, and microk8s. Check out our minified how-to manuals: + +[Install Kalm on Minikube](minikube) + +[Install Kalm on k3s](install-local-k3s) + +[Install Kalm on kind](install-local-kind) + +## Kalm On The Cloud + +For a more robust setups, you can install Kalm directly on EKS(Elastic Kubernetes Service) and GKE(Google Kubernetes Engine). To do so, you'll need to configure your cluster so that Kalm can install smoothly. We strongly recommend using the following guides when setting up Kalm on an EKS or GKE cluster: + +[AWS Elastic Kubernetes Service](eks) + +[Google Kubernetes Engine](gke) + +## Install Kalm + +Once you have a cluster ready, you can choose between installing Kalm-Cloud or the open source version. The Cloud version is recommended for most serious development, while the open source version is sufficient for hobbyists. The following guides walk you through each of these processes: + +[How To Install Kalm Cloud](install-kalm-cloud) + +[How to install the open-source version](install-open-source) + +## Uninstalling Kalm + +To uninstall Kalm from your cluster cleanly, use the following guide: + +[Uninstall](uninstall) \ No newline at end of file diff --git a/docs/install/minikube.md b/docs/install/minikube.md new file mode 100644 index 0000000..6b23f3e --- /dev/null +++ b/docs/install/minikube.md @@ -0,0 +1,88 @@ +--- +title: Install Kalm on Minikube +--- + +## Overview + +Minikube is a popular tool that quickly sets up a local Kubernetes cluster on macOS, Linux, and Windows. While not frequently recommended for production environments, minikube is commonly used by developers to quickly test Kubernetes ideas locally. This guide will walk you through how to install and run Kalm on a local cluster created through minikube. + +## Install Minikube + +First you'll need have minikube installed. If you don't already have it installed, follow [the official minikube documentation](https://minikube.sigs.k8s.io/docs/start/) to install it on your OS. + +You can try running the `minikube version` command to confirm installation. + +``` +❯ minikube version + +minikube version: v1.17.1 +commit: 043bdca07e54ab6e4fc0457e3064048f34133d7e +``` + +## Create a New Cluster + +Use minikube to create a new local cluster using the `minikube start` command. We recommend initializing a cluster with the settings below. + +``` +❯ minikube start --memory 4096 --cpus 4 --kubernetes-version v1.18.0 +``` + +*Note - if you're experiencing networking issues, consider configuring a proxy: [Minikube Proxy](https://minikube.sigs.k8s.io/docs/reference/networking/proxy/)* + +After your minikube cluster is up and running, you can use the `minikube status` command to see some basic details: + +``` +❯ minikube status + +minikube +type: Control Plane +host: Running +kubelet: Running +apiserver: Running +kubeconfig: Configured +timeToStop: Nonexistent +``` + +## Install Kalm on Your Minikube Cluster + +With a minikube cluster up and running, you can install Kalm by running our install script. + +First, clone the Kalm git repository: + +``` +# clone the repo +git clone https://github.com/kalmhq/kalm.git +cd kalm +``` + +Next, run the install script to install Kalm: + +```bash +./scripts/install-local-mode.sh +``` + +The installation log should be similar as this: + +```bash +Initializing Kalm - 4/4 modules ready: + +✔ kalm-operator +✔ cert-manager +✔ istio-system +✔ kalm-system +Kalm Installation Complete! 🎉 + +To start using Kalm, open a port via: + +kubectl port-forward -n kalm-system $(kubectl get pod -n kalm-system -l app=kalm -ojsonpath="{.items[0].metadata.name}") 3010:3010 + +Then visit http://localhost:3010 in your browser +``` + +Once the installation is complete, your can access the dashboard using port-forward: + +```bash +kubectl port-forward -n kalm-system $(kubectl get pod -n kalm-system -l app=kalm -ojsonpath="{.items[0].metadata.name}") 3010:3010 +``` + +Then visit [http://localhost:3010](http://localhost:3010) to check out the dashboard.% \ No newline at end of file diff --git a/docs/install/open-source-version.md b/docs/install/open-source-version.md new file mode 100644 index 0000000..47f3dac --- /dev/null +++ b/docs/install/open-source-version.md @@ -0,0 +1,26 @@ +--- +title: Install the Open Source Version +--- + +For getting started on localhost, make sure `kubectl` is installed and a minikube cluster is created before hand. + +If you already have access to an existing cluster via `kubectl`, deploy Kalm via: + +```shell +# clone the repo +git clone https://github.com/kalmhq/kalm.git +cd kalm + +# run the install script +./scripts/install-local-mode.sh +``` + +The whole process typically takes up to 5-10 minutes. Relax or check out our doc in the mean time. + +Once the installation is complete, open a port to the web server. + +```shell +kubectl port-forward -n kalm-system $(kubectl get pod -n kalm-system -l app=kalm -ojsonpath="{.items[0].metadata.name}") 3010:3010 +``` + +Kalm should now be accessible at [http://localhost:3010](http://localhost:3010). \ No newline at end of file diff --git a/docs/install/saas-version.md b/docs/install/saas-version.md new file mode 100644 index 0000000..56630ef --- /dev/null +++ b/docs/install/saas-version.md @@ -0,0 +1,30 @@ +--- +title: Install the SaaS Version +--- + +1. go to [http://kalm.dev/signin](http://kalm.dev/signin) +2. after sign in, click the **INSTALL NEW CLUSTER** button in the upper right corner: + + ![assets/Untitled.png](../assets/install-saas-0-new-cluster.png) + +3. choose a name for your cluster + +![cluster name](../assets/install-saas-1-cluster-name.png) + +4. run the command to install Kalm on your cluster: + +:::note +to make sure your `kubectl` is pointing at the cluster for Kalm, run `kubectl config get-context`, the new cluster should be marked with a `*` in the output. +::: + +![cmd](../assets/install-saas-2-cmd.png) + +the install process will be updated during the execution: + +![install-progress](../assets/install-saas-3-install-progress.png) + +usually the whole process took 5 - 15 minutes. + +5. once done, click the **GO TO DASHBOARD** button to start using Kalm + +![done](../assets/install-saas-4-done.png) \ No newline at end of file diff --git a/docs/install/uninstall.md b/docs/install/uninstall.md new file mode 100644 index 0000000..b67fb56 --- /dev/null +++ b/docs/install/uninstall.md @@ -0,0 +1,33 @@ +--- +title: Uninstall +--- + +## Uninstall Kalm + +To remove core components of Kalm, run the uninstall script + +```bash +# rm kalm-operator +kubectl delete --ignore-not-found=true -f https://raw.githubusercontent.com/kalmhq/kalm/main/kalm-install-operator.yaml + +# rm kalm core +kubectl delete --ignore-not-found=true -f https://raw.githubusercontent.com/kalmhq/kalm/main/kalm.yaml +``` + +It is safe to ignore errors for non-existent resources because they may have been deleted hierarchically. + +## Uninstall Istio and Cert-Manager + +Istio and Cert-Manger are not removed by default. + +To remove Istio + +```bash +kubectl delete --ignore-not-found=true -f https://raw.githubusercontent.com/kalmhq/kalm/main/operator/resources/istio-in-one.yaml +``` + +To remove Cert-Manager + +```bash +kubectl delete --ignore-not-found=true -f operator/resources/cert-manager/cert-manager.yaml +``` \ No newline at end of file diff --git a/docs/intro.md b/docs/intro.md index 62199f9..bf742ff 100644 --- a/docs/intro.md +++ b/docs/intro.md @@ -9,27 +9,22 @@ description: What is Kalm keywords: - docs - docusaurus -image: https://i.imgur.com/mErPwqL.png +image: https://docs.kalm.dev/img/kalm-logo-blue.svg +slug: / --- -Kalm (Kubernetes Application Manager) is an open-source tool that makes it easier to manage applications on Kubernetes without struggling with yamls. Kalm comes with a web interface for the most common operations including: +Kubernetes is powerful but hard to use. It is not uncommon for teams to have to first spend weeks writing internal tools and yaml configurations before getting a satisfactory Kubernetes setup. Kalm is a suite of open-source tools that makes it easier for you to quickly setup a working Kubernetes production environment. Key features include: -- Creation of new application deployments -- Deploying, updating, and scaling existing deployments -- Volume, config, and secret management +- **Web UI for common operations:** create and update deployments, networks, environment variables, configs and secrets, and problems, all from a web interface. +- **SSO & Membership:** Invite team members to a cluster and allow them to authenicate via SSO. Setup permission rules(view/edit/owner) for users and groups. +- ** HTTPS Certification:** Setup HTTPS certification and auto-renewal with a few clicks, including wildcard certificates. +- **Traffic Management:** Kalm allows you to route traffic from multiple domains/sub-domains to one or or target deployments. Easy start servicing external traffic, or setup more advanced schemes such as as blue-green and canary deployments. +- ** Webhooks:** Kalm let's you update or roll-back deployments via webhooks. This is useful in quickly integrate with CI/CD tools such as Github Actions or CircleCI. -Kalm is installed as a Kubernetes controller directly on your cluster, and automatically sets up istio and cert-manager, which makes it easy to configure HTTPS certificates, routes, SSO, and logging system out of the box. +Kalm is a standard [Kubernetes controller](https://kubernetes.io/docs/concepts/architecture/controller/) that can be installed onto any Kubernetes cluster(v1.15+), including Amazon EKS and Google GKE. Think of it as a special app that helps you manage the rest of your apps and workflows. -![Web Interface](assets/kalm.png) - -## Why Kalm - -Kubernetes is a powerful and flexible tool for managing microservices. However first-time to setup and configuration can be daunting. The high upfront cost makes it prohibitive for smaller teams to adopt Kubernetes. We made kalm in an attempt to decrease the cognitive load for developers to interact with Kubernetes in both development and production environments. +Although Kalm extends the [Kubernetes API](https://kubernetes.io/docs/concepts/overview/kubernetes-api/) and is accessible via `kubectl`, a core goal of the project is to provide an intuitive UI to make common tasks easier for teams of developers. -Kalm tries to reduce complexity in three different ways: - -1. Provide an intuitive graphical interface for the most common operations. -2. Introduce higher level Custom Resource Definitions such as `Application`. These CRDs help to reduce the amount of boilerplate configuration and copy-pasting. -3. Designed with popular extensions in mind - Kalm is designed to work with istio, cert-manager, and Prometheus, and more, which make setting up a productive stack quick and easy. +![Web Interface](assets/kalm.png) -Next, let‘s install Kalm and go through an example to illustrate how it works. +If you have a running Kubernetes cluster, you can install Kalm and try out the [Getting Started](get-started) tutorial. diff --git a/docs/platform-setup/aws-eks.md b/docs/platform-setup/aws-eks.md deleted file mode 100644 index 3808ee8..0000000 --- a/docs/platform-setup/aws-eks.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: Install on Amazon EKS ---- - -There are a many different ways to create a Kubernetes Cluster on Amazon. We will cover kops and terraform. - -## Step 1: Install Prerequisits - -- Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) -- Install [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) -- Install [Amazon CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) - -## Step 2: AWS Setup - -You need an AWS service account. Configure [service credentials](https://console.aws.amazon.com/iam/home?#/security_credentials), then configure the aws CLI with: - -```bash -aws configure -``` - -Enter your Access key ID and secret. - -## Step 3: Terraform Apply - -Clone the repository below and `cd` into the aks directory - -```bash -git clone https://github.com/kalmhq/tf-scripts -cd tf-scripts/eks -``` - -Install the cluster with the following commands. - -```bash -terraform init -terraform apply -``` - -Type `yes` to confirm the installation. - -After 5-10 minutes, the cluster should be created. - -View the newly cluster with: - -```bash -aws eks list-clusters -``` - -Configure kubectl to use the new cluster. - -```bash -aws eks --region us-west-2 update-kubeconfig --name NAME_OF_YOUR_CLUSTER -``` - -Verify the cluster is properly setup and accessible. - -```sh -kubectl get nodes -``` - -## Step 4: Install Kalm - -Once the cluster is setup, install Kalm with the following command. - -```bash -curl -sL https://get.kalm.dev | bash -``` - -To enable localhost access, open a port via: - -```bash -kubectl port-forward -n kalm-system \ - $(kubectl get pod -n kalm-system \ - -l app=kalm \ - -ojsonpath="{.items[0].metadata.name}") \ - 3010:3010 -``` - -Now open http://localhost:3010/ - -## Clean Up - -Delete the cluster to avoid resource charges. - -```bash -terraform destroy -``` - -## Next Step - -You've now setup Kalm on an Amazon EKS cluster. To get a sense of how Kalm works, see the [Hello Kalm](/docs/tut-hello) tutorial. diff --git a/docs/platform-setup/azure-aks.md b/docs/platform-setup/azure-aks.md deleted file mode 100644 index 809ed56..0000000 --- a/docs/platform-setup/azure-aks.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -title: Install on Azure AKS ---- - -There are a few different ways to create a Kubernetes Cluster on Azure. The following guide utilizes Terraform to provision an Azure AKS cluster. - -## Step 1: Install Prerequisits - -- Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) -- Install [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli?in=terraform/azure-get-started) -- Install [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) - -## Step 2: Azure Setup - -You need an Azure account which you can log into the azure CLI with: - -```sh -az login -``` - -Next, create a service principal account so Terraform can authenticate to Azure: - -```sh -az ad sp create-for-rbac --skip-assignment -``` - -Note: the resulting output only appears once. Save the appId and password immediately. Otherwise it takes non-trivial effort to retrieve the information. - -## Step 3: Terraform Apply - -Clone the repository below and `cd` into the aks directory - -```sh -git clone https://github.com/kalmhq/tf-scripts -cd tf-scripts/aks -``` - -Open 'terraform.tfvars', and paste in the appId and password from Step 2. - -``` -# terraform.tfvars -appId = "REPLACE_WITH_YOUR_APP_ID" -password = "REPLACE_WITH_YOUR_PASSWORD" -``` - -Install the cluster with the following commands. - -```bash -terraform init -terraform apply -``` - -Type `yes` to confirm the installation. - -After 5-10 minutes, the cluster should be created. Once complete, record the **kubernetes_cluster_name** and **resource_group_name** from the terraform output. - -```sh -terraform output -``` - -Configure kubectl to use the new cluster. - -```bash -az aks get-credentials --resource-group NAME_OF_YOUR_RESOURCE_GROUP --name NAME_OF_YOUR_CLUSTER -``` - -Verify the cluster is properly setup and accessible. - -```sh -kubectl cluster-info -``` - -## Step 4: Install Kalm - -Once the cluster is setup, install Kalm with the following command. - -```bash -curl -sL https://get.kalm.dev | bash -``` - -To enable localhost access, open a port via: - -```bash -kubectl port-forward -n kalm-system \ - $(kubectl get pod -n kalm-system \ - -l app=kalm \ - -ojsonpath="{.items[0].metadata.name}") \ - 3010:3010 -``` - -Now open http://localhost:3010/ - -## Clean Up - -Delete the cluster to avoid resource charges. - -```bash -terraform destroy -``` - -## Next Step - -You've now setup Kalm on an Azure AKS cluster. To get a sense of how Kalm works, see the [Hello Kalm](/docs/tut-hello) tutorial. diff --git a/docs/platform-setup/gcp-gke.md b/docs/platform-setup/gcp-gke.md deleted file mode 100644 index 769a546..0000000 --- a/docs/platform-setup/gcp-gke.md +++ /dev/null @@ -1,160 +0,0 @@ ---- -title: Install on Google Kubernetes Engine ---- - -The simplest way to provision a cluster on Google Cloud Platform is via Google Kubernetes Engine. - -As a prerequisit, please install and authenticate the `gcloud` command line tool. Instructions can be found here. - -## Step 1: Create a GKE Cluster - -There are a few different ways to create a GKE Cluster. You can either create one through only gcloud, use the web interface, or with Terraform. - -### Option A - gcloud command line - -To begin, choose a Google Cloud project - -```bash -export PROJECT_ID=hello-kalm -``` - -Note: If you don't have an existing Google Cloud project, you can create one with: - -```bash -export PROJECT_ID=hello-kalm -gcloud projects create $PROJECT_ID -``` - -Make sure billing is enabled. - -You need to enable Kubernetes Engine API as well: - -```bash -gcloud services enable container.googleapis.com -``` - -
- -Next, provision a cluster with 4 nodes - -```bash -export M_TYPE=n1-standard-2 && \ -export ZONE=us-west2-a && \ -export CLUSTER_NAME=${PROJECT_ID}-${RANDOM} && \ -gcloud container clusters create $CLUSTER_NAME \ - --cluster-version latest \ - --machine-type=$M_TYPE \ - --num-nodes 4 \ - --zone $ZONE \ - --project $PROJECT_ID -``` - -The creation of the cluster will take a few minutes. Once complete, configure kubectl to use the new cluster: - -```bash -gcloud container clusters get-credentials $CLUSTER_NAME \ - --zone $ZONE \ - --project $PROJECT_ID -``` - -Verify the cluster is properly setup and accessible. - -```sh -kubectl cluster-info -``` - -### Option B - Terraform - -If you are more familiar with Terraform, you can provision a demo cluster with the following steps. - -First, install Terraform. - -Give Terraform access to the Application Default Credentials (ADC). - -```bash -gcloud auth application-default login -``` - -You will need to have the Kubernetes Engine API enabled for your project as well: - -```bash -gcloud services enable container.googleapis.com -``` - -Clone the repository below. - -```bash -git clone https://github.com/kalmhq/tf-scripts -cd tf-scripts/gke -``` - -Open 'terraform.tfvars', and specify the ID of the Google Cloud project you would like to install to. - -``` -# terraform.tfvars -project_id = "REPLACE_ME" -region = "us-west2" -``` - -Install the cluster with the following commands. - -```bash -terraform init -terraform apply -``` - -Type `yes` to confirm the installation. - -After 5-10 minutes, the cluster should be created. Once complete, retrieve the name of the newly created cluster. - -```bash -terraform output -``` - -Configure kubectl to use the new cluster. - -```bash -gcloud container clusters get-credentials NAME_OF_YOUR_CLUSTER --zone ZONE_OF_CLUSTER -``` - -_*Note - You can see your project's zone on your [gcloud console](https://console.cloud.google.com/) or in your CLI enter: `gcloud info | grep "zone"`_ - -Verify the cluster is properly setup and accessible. - -```sh -kubectl cluster-info -``` - -## Step 2: Install Kalm - -Once the cluster is setup, install Kalm with the following command. - -```bash -curl -sL https://get.kalm.dev | bash -``` - -To enable localhost access, open a port via: - -```bash -kubectl port-forward -n kalm-system \ - $(kubectl get pod -n kalm-system \ - -l app=kalm \ - -ojsonpath="{.items[0].metadata.name}") \ - 3010:3010 -``` - -Now open http://localhost:3010/ - -## Clean Up - -Delete the cluster to avoid resource charges. - -If you created the cluster with Terraform: - -```bash -terraform destroy -``` - -## Next Step - -You've now setup Kalm on a GKE cluster. To get a sense of how Kalm works, see the [Hello Kalm](/docs/tut-hello) tutorial. diff --git a/docs/platform-setup/minikube.md b/docs/platform-setup/minikube.md deleted file mode 100644 index 7f11e43..0000000 --- a/docs/platform-setup/minikube.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Install on Minikube ---- - -The simplest way to provision a localhost cluster is minikube. - -## Step 1: Install minikube - -Please follow [minikube official document](https://kubernetes.io/docs/tasks/tools/install-minikube/) to install minikube. - -## Step 2: Start a minikube cluster - -It recommended to use 8G memory and 4 core cpu to test kalm with. Adjust resources base on your environment. - -```bash -minikube start --memory 8192 --cpus 4 -``` - -After the cluster is up and running. Open a new terminal and type the following command. You may be prompted to enter your password. It will create a route to services deployed with type LoadBalancer and sets their Ingress to their ClusterIP. - -```bash -minikube tunnel -``` - -## Step 2: Install Kalm - -Once the cluster is setup, install Kalm with the following command. - -```bash -curl -sL https://get.kalm.dev | bash -``` - -To enable localhost access, open a port via: - -```bash -kubectl port-forward -n kalm-system \ - $(kubectl get pod -n kalm-system \ - -l app=kalm \ - -ojsonpath="{.items[0].metadata.name}") \ - 3010:3010 -``` - -Now open http://localhost:3010/ - -## Clean Up - -Delete the cluster after test. - -``` -minikube delete -``` - -## Next Step - -You've now setup Kalm on a minikube cluster. To get a sense of how Kalm works, see the [Hello Kalm](./tut-hello.md) tutorial. diff --git a/docs/sidebars.json b/docs/sidebars.json new file mode 100644 index 0000000..bf6b81b --- /dev/null +++ b/docs/sidebars.json @@ -0,0 +1 @@ +{"/":{"id":"","path":"/","title":"","type":"folder","children":["assets","auth","cert-challenge.md","cert-issuing.md","crd","cronjob.md","faq","faq-auth.md","faq-permissions.md","get-started.md","guide-config.md","guide-export-kalm-resources.md","guide-logs.md","https-certs.md","intro.md","logging","probes.md","ref-crd.md","registry.md","service-account.md","traffic","tut-bookinfo.md","tut-create.md","tut-deploy.md","tut-overview.md","tut-rollback.md","tut-wordpress.md","volumes.md"]},"assets":{"id":"folder-assets-0","title":"assets","children":[],"type":"folder","path":"assets"},"auth":{"id":"folder-auth-0","title":"auth","children":[],"type":"folder","path":"auth"},"auth/impersonation.md":{"id":"impersonation","title":"impersonation","type":"document","path":"auth/impersonation.md"},"auth/overview.md":{"id":"overview","title":"overview","type":"document","path":"auth/overview.md"},"auth/roles.md":{"id":"roles","title":"roles","type":"document","path":"auth/roles.md"},"crd":{"id":"folder-crd-0","title":"crd","children":[],"type":"folder","path":"crd"},"faq":{"id":"folder-faq-0","title":"faq","children":[],"type":"folder","path":"faq"},"faq/old-resources-visibility.md":{"id":"old-resources-visibility","title":"old-resources-visibility","type":"document","path":"faq/old-resources-visibility.md"},"faq/overview.md":{"id":"overview-0","title":"overview","type":"document","path":"faq/overview.md"},"logging":{"id":"folder-logging-0","title":"logging","children":[],"type":"folder","path":"logging"},"logging/efk.md":{"id":"efk","title":"efk","type":"document","path":"logging/efk.md"},"logging/index.md":{"id":"index","title":"index","type":"document","path":"logging/index.md"},"logging/plg.md":{"id":"plg","title":"plg","type":"document","path":"logging/plg.md"},"traffic":{"id":"folder-traffic-0","title":"traffic","children":[],"type":"folder","path":"traffic"},"cert-challenge.md":{"id":"cert-challenge","title":"cert-challenge","type":"document","path":"cert-challenge.md"},"cert-issuing.md":{"id":"cert-issuing","title":"cert-issuing","type":"document","path":"cert-issuing.md"},"cronjob.md":{"id":"cronjob","title":"cronjob","type":"document","path":"cronjob.md"},"faq-auth.md":{"id":"faq-auth","title":"faq-auth","type":"document","path":"faq-auth.md"},"faq-permissions.md":{"id":"faq-permissions","title":"faq-permissions","type":"document","path":"faq-permissions.md"},"get-started.md":{"id":"get-started","title":"get-started","type":"document","path":"get-started.md"},"guide-config.md":{"id":"guide-config","title":"guide-config","type":"document","path":"guide-config.md"},"guide-export-kalm-resources.md":{"id":"guide-export-kalm-resources","title":"guide-export-kalm-resources","type":"document","path":"guide-export-kalm-resources.md"},"guide-logs.md":{"id":"guide-logs","title":"guide-logs","type":"document","path":"guide-logs.md"},"https-certs.md":{"id":"https-certs","title":"https-certs","type":"document","path":"https-certs.md"},"intro.md":{"id":"intro","title":"intro","type":"document","path":"intro.md"},"probes.md":{"id":"probes","title":"probes","type":"document","path":"probes.md"},"ref-crd.md":{"id":"ref-crd","title":"ref-crd","type":"document","path":"ref-crd.md"},"registry.md":{"id":"registry","title":"registry","type":"document","path":"registry.md"},"service-account.md":{"id":"service-account","title":"service-account","type":"document","path":"service-account.md"},"tut-bookinfo.md":{"id":"tut-bookinfo","title":"tut-bookinfo","type":"document","path":"tut-bookinfo.md"},"tut-create.md":{"id":"tut-create","title":"tut-create","type":"document","path":"tut-create.md"},"tut-deploy.md":{"id":"tut-deploy","title":"tut-deploy","type":"document","path":"tut-deploy.md"},"tut-overview.md":{"id":"tut-overview","title":"tut-overview","type":"document","path":"tut-overview.md"},"tut-rollback.md":{"id":"tut-rollback","title":"tut-rollback","type":"document","path":"tut-rollback.md"},"tut-wordpress.md":{"id":"tut-wordpress","title":"tut-wordpress","type":"document","path":"tut-wordpress.md"},"volumes.md":{"id":"volumes","title":"volumes","type":"document","path":"volumes.md"}} \ No newline at end of file diff --git a/docs/tut-hasura.md b/docs/tut-hasura.md new file mode 100644 index 0000000..26e7dc0 --- /dev/null +++ b/docs/tut-hasura.md @@ -0,0 +1,229 @@ +--- +title: "Hasura on Kalm" +--- + +This tutorial shows you how to install [Hasura](https://hasura.io/) on Kalm, and expose the dashboard only to users in your organization. + +## Objectives + +- Deploy Hasura on Kalm as an Component +- Protect the Hasura dashboard: only authenticated users can visit it + +## Before you begin + +- You'll need a Kubernetes cluster with Kalm installed (You'll need SSO configured through Kalm as well) + +## (Optional)Deploy Postgres DB + +Skip this section if you already have a Postgres DB available in your cluster. + +This demo will require a Postgres Database - this step will help you set one up through Kalm. + +We'll use image: `postgres:10-alpine`: + +![image-1](assets/db-1-component.png) + +Add the following environment variables: + +![image-2](assets/db-2-envs.png) + +Then expose the DB at port 5432: + +![db image-3](assets/db-3-network-ports.png) + +Use a 1Gi disk for the database: + +![db image-4](assets/db-4-disk.png) + +The YAML below is the equivalent result of our configurations above. + +_Note that your storageClass can be different than the yaml file below depending on what Kubernetes service you're using. If you want to try installing by applying a yaml file instead of following the steps above, you may need to update the storageClass field._ + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + labels: + istio-injection: enabled + kalm-enabled: "true" + name: hasura +--- +apiVersion: core.kalm.dev/v1alpha1 +kind: Component +metadata: + name: pg + namespace: hasura +spec: + image: postgres:10-alpine + replicas: 1 + workloadType: server + env: + - name: POSTGRES_USER + type: static + value: demo-user + - name: POSTGRES_PASSWORD + type: static + value: demo-password + - name: POSTGRES_DB + type: static + value: demo-db + - name: PGDATA + type: static + value: /data/db + ports: + - containerPort: 5432 + protocol: tcp + servicePort: 5432 + volumes: + - path: /data + size: 1Gi + storageClassName: gp2 + type: pvc + resourceRequirements: + limits: + cpu: 200m + memory: 256Mi + requests: + cpu: 200m + memory: 256Mi +``` + +## Deploy Hasura + +Create an application and component to use for Hasura. Then configure the component as follows: + +Use this image: `hasura/graphql-engine:v2.0.0-alpha.2`: + +![image](assets/hasura-1-image.png) + +Add the following environment variables: + +![image](assets/hasura-2-envs.png) + +Next, expose the service at port 8080: + +![image](assets/hasura-3-ports.png) + +Next, you'll want to protect this dashboard so that only authenticated users (via the Kalm SSO) can view it. You can do this by checking the "Only users authenticated by Single Sign-on can access" box under the **ACCESS** tab: + +![image](assets/hasura-4-access.png) + +The corresponding YAML file to do this is shown below: + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + labels: + istio-injection: enabled + kalm-enabled: "true" + name: hasura +--- +apiVersion: core.kalm.dev/v1alpha1 +kind: Component +metadata: + name: hasura + namespace: hasura +spec: + image: hasura/graphql-engine:v2.0.0-alpha.2 + workloadType: server + replicas: 1 + env: + - name: HASURA_GRAPHQL_ENABLE_CONSOLE + type: static + value: "true" + - name: HASURA_GRAPHQL_DEV_MODE + type: static + value: "true" + - name: HASURA_GRAPHQL_DATABASE_URL + type: static + value: 'postgres://demo-user: demo-password@pg:5432/demo-db' + ports: + - containerPort: 8080 + protocol: http + servicePort: 8080 + resourceRequirements: + limits: + cpu: 200m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi +--- +apiVersion: core.kalm.dev/v1alpha1 +kind: ProtectedEndpoint +metadata: + name: component-hasura + namespace: hasura +spec: + groups: ##todo fix + - kalmhq + name: hasura +``` + +## Setup HttpRoute + +Next you'll want to setup a "Route" for Hasura, so that it can be viewed in your browser. + +Head over to the Routes tab and create a New Route. + +You can choose a subdomain for your Hasura service here: + +![domain](assets/hasuraroute-1-domain.png) + +_Note - you can add new domains easily in the Domains & Certs tab. Or you can use a default domain created by Kalm_ + +HTTPS is also ready, so check the box: + +![https](assets/hasuraroute-2-https.png) + +Our target is what we want this domain to route to. In this case, select your Hasura component you exposed in the previous step. + +![target](assets/hasuraroute-3-target.png) + +The corresponding YAML is shown below. Note that your domain will be different. + +```yaml +apiVersion: core.kalm.dev/v1alpha1 +kind: HttpRoute +metadata: + name: http-route-for-hasura +spec: + destinations: + - host: hasura.hasura.svc.cluster.local:8080 + weight: 1 + hosts: + - hasura.UPDATE-THIS.clusters.kalm-apps.com + httpRedirectToHttps: true + methods: + - GET + - POST + - PUT + - PATCH + - DELETE + - HEAD + - OPTIONS + - CONNECT + - TRACE + paths: + - / + schemes: + - http + - https +``` + +## Try it out + +Go back to your application that contains the Hasura Component to verify that your Hasura service is up now. If everything works as expected, you should see the green light on the page: + +![pod-green](assets/hasura-pod-green.png) + +Now if you visit the domain your have configured (you can just click the domain directly from the Routes tab) you should see the Hasura dashboard up running! + +![hasura-dashboard](assets/hasura-dashboard.png) + +To ensure the access protection is working, open a Private Browser window, and enter the dashboard page again, you should be redirected to a page that asks you to login first. + +## Clean Up + +If you want to delete your work here, simply delete the Hasura app within the Kalm dashboard. To delete the DB disk, go to the Disks tab, and delete the disk there. diff --git a/docs/tut-strapi.md b/docs/tut-strapi.md new file mode 100644 index 0000000..64685bc --- /dev/null +++ b/docs/tut-strapi.md @@ -0,0 +1,243 @@ +--- +title: "Strapi on Kalm" +--- + +Kalm provides a quick dashboard to simplify your daily work, but sometimes you may still want to set things up with YAML files. Fortunately, it's designed to be mutually inclusive with other ways of applying YAML files. + +In this tutorial, we will show you how to install the headless CMS project: [Strapi](https://strapi.io/) by applying YAML files through kubectl. As you go through this process, you can check the Kalm Dashboard to see how it automatically pulls in your work. + +## Objectives + +- Deploy MongoDB on Kalm as an Component +- Deploy Strapi on Kalm as an Component + +## Before you begin + +- You'll need a Kubernetes cluster with Kalm installed + +## Deploy MongoDB + +Define the DB by using a Component. Use kubectl to apply the YAML file below: + +```yaml +apiVersion: core.kalm.dev/v1alpha1 +kind: Component +metadata: + name: strapi-mongodb + namespace: strapi +spec: + image: mongo:4.4.4-bionic + workloadType: server + replicas: 1 + env: + - name: MONGO_INITDB_ROOT_USERNAME + type: static + value: admin + - name: MONGO_INITDB_ROOT_PASSWORD + type: static + value: admin + - name: MONGO_INITDB_DATABASE + type: static + value: strapi + ports: + - containerPort: 27017 + protocol: tcp + servicePort: 27017 + resourceRequirements: + limits: + cpu: 200m + memory: 128Mi + requests: + cpu: 200m + memory: 128Mi + volumes: + - path: /data/db + size: 2Gi + storageClassName: gp2 + type: pvc +``` + +Let's walk through some key points here. + +We use image: `mongo:4.4.4-bionic` to run our DB instance: + +```yaml +spec: + image: mongo:4.4.4-bionic +``` + +and we initialized the database using these Environment variables: + +```yaml +spec: + ... + env: + - name: MONGO_INITDB_ROOT_USERNAME + type: static + value: admin + - name: MONGO_INITDB_ROOT_PASSWORD + type: static + value: admin + - name: MONGO_INITDB_DATABASE + type: static + value: strapi +``` + +We exposed the db service at port: `27017`: + +```yaml + ports: + - containerPort: 27017 + protocol: tcp + servicePort: 27017 +``` + +We also asked for a 2Gi disk for our database: + +```yaml + volumes: + - path: /data/db + size: 2Gi + storageClassName: gp2 + type: pvc +``` + +:::note +The storageClassName (gp2) displayed in this demo is for EKS. This name is provided by AWS. For a different platform this name will be different, please update the field accordingly. (For GCP it is pd-ssd) +::: + +# Deploy Strapi + +The YAML for Strapi is quite similar: + +```yaml +apiVersion: core.kalm.dev/v1alpha1 +kind: Component +metadata: + name: strapi + namespace: strapi +spec: + image: strapi/strapi:3.5.2-node12-alpine + workloadType: server + replicas: 1 + env: + - name: DATABASE_CLIENT + type: static + value: mongo + - name: DATABASE_HOST + type: static + value: strapi-mongodb + - name: DATABASE_NAME + type: static + value: strapi + - name: DATABASE_USERNAME + type: static + value: admin + - name: DATABASE_PASSWORD + type: static + value: admin + - name: DATABASE_PORT + type: static + value: "27017" + ports: + - containerPort: 1337 + protocol: http + servicePort: 1337 + volumes: + - path: /srv/app + size: 2Gi + storageClassName: gp2 + type: pvc + resourceRequirements: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 500m + memory: 512Mi +``` + +:::note +- update the ENVs if your db configuration is different +- update the storageClassName again if you are on a different cloud platform +::: + +# Setup HTTPRoute + +Finally let's setup the HTTP route for our Strapi service: + +```yaml +apiVersion: core.kalm.dev/v1alpha1 +kind: HttpRoute +metadata: + name: http-route-strapi +spec: + destinations: + - host: strapi.strapi.svc.cluster.local:1337 + weight: 1 + hosts: + - strapi.UPDATE-THIS.clusters.kalm-apps.com + httpRedirectToHttps: true + methods: + - GET + - POST + - PUT + - PATCH + - DELETE + - HEAD + - OPTIONS + - CONNECT + - TRACE + paths: + - / + schemes: + - http + - https +``` + +:::note +You will need to update your domain for the field: `spec.hosts`. +::: + +Some key points: + +We route the traffic to the service at `strapi.strapi.svc.cluster.local:1337`: + +```yaml +spec: + ... + destinations: + - host: strapi.strapi.svc.cluster.local:1337 + weight: 1 +``` + +The destination is the strapi component we just defined above. + +We set our domain in the `spec.hosts` field: + +```yaml +spec: + ... + hosts: + - strapi.UPDATE-THIS.clusters.kalm-apps.com +``` + +HTTPs is ready for clusters initialized by Kalm, so we enable the HTTPS redirect option: + +```yaml + httpRedirectToHttps: true +``` + +## Try it out + +Go into Kalm, and find your application. Check if your Strapi service is up now. If everything works as expected, you should see a green light on the page: + +![pod-green](assets/strapi-pod-green.jpg) + +Now visit the domain you just configured (you can also find this on the Routes tab) and you should see the admin page is up and running: + +![strapi-admin](assets/strapi-admin.jpg) + +## Clean Up + +To delete your work here, simply delete the app within the Kalm dashboard. To delete the DB disk, go to the Disks page, and delete the disk there. diff --git a/docusaurus.config.js b/docusaurus.config.js index 891382a..a22d270 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -7,6 +7,7 @@ // See https://docusaurus.io/docs/site-config for all the possible // site configuration options. +const path = require("path"); const siteConfig = { title: "Kalm", // Title for your website. @@ -15,10 +16,9 @@ const siteConfig = { // Used for publishing and more projectName: "kalm", organizationName: "kalmhq", - /* path to images for header/footer */ favicon: "img/kalm-logo-blue.svg", - + plugins: [path.resolve(__dirname, "heap-plugin")], themeConfig: { algolia: { apiKey: "d589acaac8cdb8ae96fca3f78c600ae4", @@ -34,15 +34,15 @@ const siteConfig = { src: "img/kalm-logo-blue.svg", }, items: [ - { - type: "docsVersionDropdown", - position: "left", - // to: "/path // by default, link to active/latest version - // label: "label" // by default, show active/latest version label - }, - { to: "/docs", label: "Docs" }, - { href: "https://github.com/kalmhq/kalm", label: "Github" }, - { to: "/versions", label: "All Versions", position: "right" }, + // { + // type: "docsVersionDropdown", + // position: "left", + // // to: "/path // by default, link to active/latest version + // // label: "label" // by default, show active/latest version label + // }, + { to: "/", label: "Docs" }, + // { href: "https://github.com/kalmhq/kalm", label: "Github" }, + // { to: "/versions", label: "All Versions", position: "right" }, ], }, @@ -52,12 +52,8 @@ const siteConfig = { title: "Docs", items: [ { - label: "Installation", - to: "docs/install", - }, - { - label: "Basic Tutorial", - to: "docs/tut-hello", + label: "Getting Started", + to: "get-started", }, ], }, @@ -103,10 +99,10 @@ const siteConfig = { "@docusaurus/preset-classic", { docs: { - homePageId: "intro", sidebarPath: require.resolve("./sidebars.json"), showLastUpdateAuthor: true, showLastUpdateTime: true, + routeBasePath: "/", // For no header links in the top nav bar -> headerLinks: [], editUrl: "https://github.com/kalmhq/docs/edit/master/", }, diff --git a/heap-plugin/index.js b/heap-plugin/index.js new file mode 100644 index 0000000..a7297ce --- /dev/null +++ b/heap-plugin/index.js @@ -0,0 +1,29 @@ +module.exports = function (context, options) { + // ... + return { + name: "heap-plugin", + async loadContent() { + /* ... */ + console.log("HEAP plugin load content"); + }, + async contentLoaded({ content, actions }) { + console.log("HEAP plugin content loaded"); + }, + /* other lifecycle API */ + injectHtmlTags() { + let heapId = process.env.HEAP_ID; + + return { + headTags: [ + { + tagName: "script", + innerHTML: ` + window.heap=window.heap||[],heap.load=function(e,t){window.heap.appid=e,window.heap.config=t=t||{};var r=document.createElement("script");r.type="text/javascript",r.async=!0,r.src="https://cdn.heapanalytics.com/js/heap-"+e+".js";var a=document.getElementsByTagName("script")[0];a.parentNode.insertBefore(r,a);for(var n=function(e){return function(){heap.push([e].concat(Array.prototype.slice.call(arguments,0)))}},p=["addEventProperties","addUserProperties","clearEventProperties","identify","resetIdentity","removeEventProperty","setEventProperties","track","unsetEventProperty"],o=0;o - {/* {imgUrl && ( -
- {title} -
- )} */} - -

{title}

-

{description}

- - ); -} - -const FeatureSection = (props) => { - return ( -
-
-

{props.title}

-

{props.desc}

-
-
- {props.title} -
-
- ); -}; - -const features = [ - { - title: <>Open Source, - icon: "img/github.svg", - description: <>Kalm is free, open source, and actively maintained. , - }, - { - title: "Get Running Quickly", - icon: "img/build-24px.svg", - description: ( - <> - Kalm simplifies the common workflows related to Kubernetes, including - deploying applications, routing, and integrating with your existing - pipeline. - - ), - }, - { - title: <>Works With Any Kubernetes Cluster, - icon: "img/install.svg", - description: ( - <> - Kalm works on Google GKE, Amazon EKS, Azure AKS, and most Kubernetes - configurations. Take it with you if you decide to migrate someday. - - ), - }, -]; - -var divStyle = { - padding: "12px 0px 0px 0px" -} - -function Home() { - const context = useDocusaurusContext(); - const { siteConfig = {} } = context; - return ( - -
-
-

- Easily deploy and manage applications on Kubernetes -

-

- Get what you want out of Kubernetes without having to write and - maintain a ton of custom tooling. Deploy apps, handle requests, and - hook up CI/CD, all through an intuitive web interface. -

- -
- - Try it now - - - -
-
- -
- -
-
- -
- {features && features.length > 0 && ( -
-
-
- {features.map((props, idx) => ( - - ))} -
-
-
- )} - -
-
-

Create and Manage Applications

-

Kalm provides an intuitive web interface for core Kubernetes - functionalities: -

-
    -
  • Configuring and deploying applications
  • -
  • Managing ports and container networking
  • -
  • Probes and Auto-Healing
  • -
  • Scaling
  • -
  • Mounting Volumes
  • -
  • Scheduling according to Resources
  • -
-
-
- -
-
- - - Kalm supports the Service Mesh{" "} - Istio out of the box. This - gives you full control over traffic entering the cluster. You can - setup Request Routing, Error Injection, Mirroring, Traffic - Shifting, and more. - - } - image="img/feature-routes.png" - /> - - Want the Heroku-like experience of "git push, update app"? Kalm - provides webhooks which you can use to invoke deployment updates. - In addition, you can generate snippets for popular build tools. - - } - title="CI/CD Integration" - image="img/feature-cicd.png" - /> - - Easily obtain and renew HTTPS Certificates via{" "} - Let's Encrypt. Kalm - currently supports specific domain name certificates using http-01 - challenge. (Wildcard certificates coming soon) - - } - image="img/feature-cert.png" - /> - - New to Kubernetes and struggling with log collection? Kalm can - help you setup a logging solution within minutes. Choose either{" "} - - Loki(PLG stack){" "} - - or{" "} - - ELK - - . -

- } - image="img/feature-logs.png" - /> - - Kalm abides by Kubernetes standards and tries to avoid platform - specific dependencies. Kalm has been tested on: -

-
    -
  • Amazon EKS
  • -
  • Google GKE
  • -
  • Azure AKS
  • -
  • Digital Ocean Kubernetes
  • -
  • Linode Kubernetes Engine
  • -
  • k3s raspberry pi
  • -
  • Minikube
  • -
{" "} - - } - image="img/feature-clusters.png" - /> - - - Kalm utilizes dex, - which lets you use your existing team authentication system(i.e - Github, Gitlab, Auth0) to control access to applications running - on your Kubernetes cluster. Kalm supports RBAC mode and - application-level access control. - - } - title="Built-in Single Sign-On" - image="img/feature-sso.png" - /> -
-
- ); -} - -export default Home; diff --git a/src/pages/versions.js b/src/pages/versions.js deleted file mode 100644 index 20135bf..0000000 --- a/src/pages/versions.js +++ /dev/null @@ -1,97 +0,0 @@ -import React from "react"; - -import Layout from "@theme/Layout"; - -import useDocusaurusContext from "@docusaurus/useDocusaurusContext"; -import Link from "@docusaurus/Link"; -import useBaseUrl from "@docusaurus/useBaseUrl"; - -import versions from "../../versions.json"; - -function Version() { - const context = useDocusaurusContext(); - const { siteConfig = {} } = context; - const latestVersion = versions[0]; - const pastVersions = versions.filter((version) => version !== latestVersion); - const repoUrl = `https://github.com/${siteConfig.organizationName}/${siteConfig.projectName}`; - return ( - -
-

Kalm documentation versions

-
-

Latest version (Stable)

-

Here you can find the latest documentation.

- - - - - - - - -
{latestVersion} - Documentation - - - Release Notes - -
-
-
-

Next version (Unreleased)

-

Here you can find the documentation for unreleased version.

- - - - - - - - -
master - Documentation - - Source Code -
-
- {pastVersions.length > 0 && ( -
-

Past Versions

-

- Here you can find documentation for previous versions of Kalm. -

- - - {pastVersions.map((version) => ( - - - - - - ))} - -
{version} - - Documentation - - - - Release Notes - -
-
- )} -
-
- ); -} - -export default Version; diff --git a/static/js/code-block-buttons.js b/static/js/code-block-buttons.js deleted file mode 100644 index 0279288..0000000 --- a/static/js/code-block-buttons.js +++ /dev/null @@ -1,47 +0,0 @@ -// Turn off ESLint for this file because it's sent down to users as-is. -/* eslint-disable */ -window.addEventListener("load", function () { - function button(label, ariaLabel, icon, className) { - const btn = document.createElement("button"); - btn.classList.add("btnIcon", className); - btn.setAttribute("type", "button"); - btn.setAttribute("aria-label", ariaLabel); - btn.innerHTML = - '
' + - icon + - '' + - label + - "" + - "
"; - return btn; - } - - function addButtons(codeBlockSelector, btn) { - document.querySelectorAll(codeBlockSelector).forEach(function (code) { - code.parentNode.appendChild(btn.cloneNode(true)); - }); - } - - const copyIcon = - ''; - - addButtons( - ".hljs", - button("Copy", "Copy code to clipboard", copyIcon, "btnClipboard") - ); - - const clipboard = new ClipboardJS(".btnClipboard", { - target: function (trigger) { - return trigger.parentNode.querySelector("code"); - }, - }); - - clipboard.on("success", function (event) { - event.clearSelection(); - const textEl = event.trigger.querySelector(".btnIcon__label"); - textEl.textContent = "Copied"; - setTimeout(function () { - textEl.textContent = "Copy"; - }, 2000); - }); -}); diff --git a/versioned_docs/version-v0.1.0-alpha.4/amazon-eks.md b/versioned_docs/version-v0.1.0-alpha.4/amazon-eks.md deleted file mode 100644 index cf9fd56..0000000 --- a/versioned_docs/version-v0.1.0-alpha.4/amazon-eks.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: Install on Amazon EKS ---- - -There are a many different ways to create a Kubernetes Cluster on Amazon. We will cover kops and terraform. - -## Step 1: Install Prerequisits - -- Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) -- Install [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli?in=terraform/azure-get-started) -- Install [Amazon CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) - -## Step 2: Azure Setup - -You need an AWS service account. Configure [service credentials](https://console.aws.amazon.com/iam/home?#/security_credentials), then configure the aws CLI with: - -```sh -aws configure -``` - -Enter your Access key ID and secret. - -## Step 3: Terraform Apply - -Clone the repository below and `cd` into the aks directory - -```sh -git clone https://github.com/kalmhq/tf-scripts -cd tf-scripts/eks -``` - -Install the cluster with the following commands. - -```bash -terraform init -terraform apply -``` - -Type `yes` to confirm the installation. - -After 5-10 minutes, the cluster should be created. - -View the newly cluster with: - -```sh -aws eks list-clusters -``` - -Configure kubectl to use the new cluster. - -```bash -aws eks --region us-west-2 update-kubeconfig --name NAME_OF_YOUR_CLUSTER -``` - -Verify the cluster is properly setup and accessible. - -```sh -kubectl get nodes -``` - -## Step 4: Install Kalm - -Once the cluster is setup, install Kalm with the following command. - -```bash -curl -sL https://get.kalm.dev | bash -``` - -To enable localhost access, open a port via: - -```bash -kubectl port-forward -n kalm-system \ - $(kubectl get pod -n kalm-system \ - -l app=kalm \ - -ojsonpath="{.items[0].metadata.name}") \ - 3010:3010 -``` - -Now open http://localhost:3010/ - -## Clean Up - -Delete the cluster to avoid resource charges. - -```bash -terraform destroy -``` - -## Next Step - -You've now setup Kalm on an Amazon EKS cluster. To get a sense of how Kalm works, see the [Hello Kalm](/docs/tut-hello) tutorial. diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/add-app.png b/versioned_docs/version-v0.1.0-alpha.4/assets/add-app.png deleted file mode 100644 index a9da60d..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/add-app.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/add-registry-form.png b/versioned_docs/version-v0.1.0-alpha.4/assets/add-registry-form.png deleted file mode 100644 index 9785114..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/add-registry-form.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/add-route.png b/versioned_docs/version-v0.1.0-alpha.4/assets/add-route.png deleted file mode 100644 index ccc6c74..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/add-route.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/add-target.png b/versioned_docs/version-v0.1.0-alpha.4/assets/add-target.png deleted file mode 100644 index da1d903..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/add-target.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/comp2-data.png b/versioned_docs/version-v0.1.0-alpha.4/assets/comp2-data.png deleted file mode 100644 index 395018c..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/comp2-data.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/completed-jobs.png b/versioned_docs/version-v0.1.0-alpha.4/assets/completed-jobs.png deleted file mode 100644 index 62985dc..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/completed-jobs.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/create-and-mount-disk.png b/versioned_docs/version-v0.1.0-alpha.4/assets/create-and-mount-disk.png deleted file mode 100644 index 33dd278..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/create-and-mount-disk.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/create-comp.png b/versioned_docs/version-v0.1.0-alpha.4/assets/create-comp.png deleted file mode 100644 index aa9d4d5..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/create-comp.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/create-comp1.png b/versioned_docs/version-v0.1.0-alpha.4/assets/create-comp1.png deleted file mode 100644 index 0ab63b3..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/create-comp1.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/delete-comp1.png b/versioned_docs/version-v0.1.0-alpha.4/assets/delete-comp1.png deleted file mode 100644 index c898db5..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/delete-comp1.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/edit-comp.png b/versioned_docs/version-v0.1.0-alpha.4/assets/edit-comp.png deleted file mode 100644 index 41e127e..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/edit-comp.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/edit-config-file.png b/versioned_docs/version-v0.1.0-alpha.4/assets/edit-config-file.png deleted file mode 100644 index afe0dc8..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/edit-config-file.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/env-variables.png b/versioned_docs/version-v0.1.0-alpha.4/assets/env-variables.png deleted file mode 100644 index e84843d..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/env-variables.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/example-token.png b/versioned_docs/version-v0.1.0-alpha.4/assets/example-token.png deleted file mode 100644 index 835db9a..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/example-token.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/first-pod.png b/versioned_docs/version-v0.1.0-alpha.4/assets/first-pod.png deleted file mode 100644 index 2406268..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/first-pod.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/guide-logging-for-kalm1.png b/versioned_docs/version-v0.1.0-alpha.4/assets/guide-logging-for-kalm1.png deleted file mode 100644 index babf08c..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/guide-logging-for-kalm1.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/guide-logging-for-kalm2.png b/versioned_docs/version-v0.1.0-alpha.4/assets/guide-logging-for-kalm2.png deleted file mode 100644 index 03cbec0..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/guide-logging-for-kalm2.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/guide-logging-for-kalm3.png b/versioned_docs/version-v0.1.0-alpha.4/assets/guide-logging-for-kalm3.png deleted file mode 100644 index deb83cf..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/guide-logging-for-kalm3.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/guide-logging-for-kalm4.png b/versioned_docs/version-v0.1.0-alpha.4/assets/guide-logging-for-kalm4.png deleted file mode 100644 index 51e17fb..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/guide-logging-for-kalm4.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/guide-logging-for-kalm5.png b/versioned_docs/version-v0.1.0-alpha.4/assets/guide-logging-for-kalm5.png deleted file mode 100644 index e785f6e..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/guide-logging-for-kalm5.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/increase-replicas.png b/versioned_docs/version-v0.1.0-alpha.4/assets/increase-replicas.png deleted file mode 100644 index 2985900..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/increase-replicas.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/job-component.png b/versioned_docs/version-v0.1.0-alpha.4/assets/job-component.png deleted file mode 100644 index 4a4773c..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/job-component.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/kalm.png b/versioned_docs/version-v0.1.0-alpha.4/assets/kalm.png deleted file mode 100644 index 1b4a3ad..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/kalm.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/log-button-example.png b/versioned_docs/version-v0.1.0-alpha.4/assets/log-button-example.png deleted file mode 100644 index b5dcf05..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/log-button-example.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/log-button.png b/versioned_docs/version-v0.1.0-alpha.4/assets/log-button.png deleted file mode 100644 index 1d0b289..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/log-button.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/log-component.png b/versioned_docs/version-v0.1.0-alpha.4/assets/log-component.png deleted file mode 100644 index 733fd21..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/log-component.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/log-view.png b/versioned_docs/version-v0.1.0-alpha.4/assets/log-view.png deleted file mode 100644 index 89b8dbe..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/log-view.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/login-screen.png b/versioned_docs/version-v0.1.0-alpha.4/assets/login-screen.png deleted file mode 100644 index a7006dc..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/login-screen.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/main-page.png b/versioned_docs/version-v0.1.0-alpha.4/assets/main-page.png deleted file mode 100644 index 96d6e6a..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/main-page.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/mount-existing-disk.png b/versioned_docs/version-v0.1.0-alpha.4/assets/mount-existing-disk.png deleted file mode 100644 index 2dc4f0b..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/mount-existing-disk.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/name-app.png b/versioned_docs/version-v0.1.0-alpha.4/assets/name-app.png deleted file mode 100644 index c899459..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/name-app.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/networking-tab.png b/versioned_docs/version-v0.1.0-alpha.4/assets/networking-tab.png deleted file mode 100644 index 5eebd6c..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/networking-tab.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/nginx-success.png b/versioned_docs/version-v0.1.0-alpha.4/assets/nginx-success.png deleted file mode 100644 index 4911c10..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/nginx-success.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/open-in-browser.png b/versioned_docs/version-v0.1.0-alpha.4/assets/open-in-browser.png deleted file mode 100644 index 234f584..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/open-in-browser.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/placeholder.png b/versioned_docs/version-v0.1.0-alpha.4/assets/placeholder.png deleted file mode 100644 index 48e510c..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/placeholder.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/ports.png b/versioned_docs/version-v0.1.0-alpha.4/assets/ports.png deleted file mode 100644 index 12c61bc..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/ports.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/pull-error.png b/versioned_docs/version-v0.1.0-alpha.4/assets/pull-error.png deleted file mode 100644 index 023e44b..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/pull-error.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/redis-command.png b/versioned_docs/version-v0.1.0-alpha.4/assets/redis-command.png deleted file mode 100644 index f3b3b0e..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/redis-command.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/registry-validated.png b/versioned_docs/version-v0.1.0-alpha.4/assets/registry-validated.png deleted file mode 100644 index 0986305..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/registry-validated.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/rolling-update.png b/versioned_docs/version-v0.1.0-alpha.4/assets/rolling-update.png deleted file mode 100644 index a9e62a3..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/rolling-update.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/shell-button.png b/versioned_docs/version-v0.1.0-alpha.4/assets/shell-button.png deleted file mode 100644 index 0d89261..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/shell-button.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/shell-cmd.png b/versioned_docs/version-v0.1.0-alpha.4/assets/shell-cmd.png deleted file mode 100644 index 90e1f8d..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/shell-cmd.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/specify-host.png b/versioned_docs/version-v0.1.0-alpha.4/assets/specify-host.png deleted file mode 100644 index 9880f6e..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/specify-host.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/three-pods.png b/versioned_docs/version-v0.1.0-alpha.4/assets/three-pods.png deleted file mode 100644 index a8fb381..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/three-pods.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/token-input.png b/versioned_docs/version-v0.1.0-alpha.4/assets/token-input.png deleted file mode 100644 index 3368aab..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/token-input.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-0.jpeg b/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-0.jpeg deleted file mode 100644 index 2eca3f4..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-0.jpeg and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-1.png b/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-1.png deleted file mode 100644 index 8921876..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-1.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-10.png b/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-10.png deleted file mode 100644 index 2e32027..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-10.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-2.png b/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-2.png deleted file mode 100644 index 0c4e508..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-2.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-3.png b/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-3.png deleted file mode 100644 index d478e4a..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-3.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-4.png b/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-4.png deleted file mode 100644 index 9f6fbbc..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-4.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-5.png b/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-5.png deleted file mode 100644 index 992339e..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-5.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-6.png b/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-6.png deleted file mode 100644 index e13680e..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-6.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-7.png b/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-7.png deleted file mode 100644 index 5dec571..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-7.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-8.png b/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-8.png deleted file mode 100644 index 9875c61..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-8.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-9.png b/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-9.png deleted file mode 100644 index 42e8ddd..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.4/assets/wp-tut-9.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.4/azure-aks.md b/versioned_docs/version-v0.1.0-alpha.4/azure-aks.md deleted file mode 100644 index 0d7fde4..0000000 --- a/versioned_docs/version-v0.1.0-alpha.4/azure-aks.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -title: Install on Azure ---- - -There are a few different ways to create a Kubernetes Cluster on Azure. The following guide utilizes Terraform to provision an Azure AKS cluster. - -## Step 1: Install Prerequisits - -- Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) -- Install [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli?in=terraform/azure-get-started) -- Install [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) - -## Step 2: Azure Setup - -You need an Azure account which you can log into the azure CLI with: - -```sh -az login -``` - -Next, create a service principal account so Terraform can authenticate to Azure: - -```sh -az ad sp create-for-rbac --skip-assignment -``` - -Note: the resulting output only appears once. Save the appId and password immediately. Otherwise it takes non-trivial effort to retrieve the information. - -## Step 3: Terraform Apply - -Clone the repository below and `cd` into the aks directory - -```sh -git clone https://github.com/kalmhq/tf-scripts -cd tf-scripts/aks -``` - -Open 'terraform.tfvars', and paste in the appId and password from Step 2. - -``` -# terraform.tfvars -appId = "REPLACE_WITH_YOUR_APP_ID" -password = "REPLACE_WITH_YOUR_PASSWORD" -``` - -Install the cluster with the following commands. - -```bash -terraform init -terraform apply -``` - -Type `yes` to confirm the installation. - -After 5-10 minutes, the cluster should be created. Once complete, record the **kubernetes_cluster_name** and **resource_group_name** from the terraform output. - -```sh -terraform output -``` - -Configure kubectl to use the new cluster. - -```bash -az aks get-credentials --resource-group NAME_OF_YOUR_RESOURCE_GROUP --name NAME_OF_YOUR_CLUSTER -``` - -Verify the cluster is properly setup and accessible. - -```sh -kubectl cluster-info -``` - -## Step 4: Install Kalm - -Once the cluster is setup, install Kalm with the following command. - -```bash -curl -sL https://get.kalm.dev | bash -``` - -To enable localhost access, open a port via: - -```bash -kubectl port-forward -n kalm-system \ - $(kubectl get pod -n kalm-system \ - -l app=kalm \ - -ojsonpath="{.items[0].metadata.name}") \ - 3010:3010 -``` - -Now open http://localhost:3010/ - -## Clean Up - -Delete the cluster to avoid resource charges. - -```bash -terraform destroy -``` - -## Next Step - -You've now setup Kalm on an Azure AKS cluster. To get a sense of how Kalm works, see the [Hello Kalm](/docs/tut-hello) tutorial. diff --git a/versioned_docs/version-v0.1.0-alpha.4/cronjob.md b/versioned_docs/version-v0.1.0-alpha.4/cronjob.md deleted file mode 100644 index 95f22e8..0000000 --- a/versioned_docs/version-v0.1.0-alpha.4/cronjob.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Schedule CronJobs ---- - -- create new application -- create new component -- select `busybox` as the image -- select `Cronjob` as Workload Type -- enter `* * * * *` (every minute) for Cronjob Schedule -- enter `/bin/sh -c 'echo performing a job'` - -![job component](assets/job-component.png) - -- click `Deploy Component` - -You should see a list of jobs appearing 1 minute apart, with `Terminated: Completed` Status - -![job complete](assets/completed-jobs.png) diff --git a/versioned_docs/version-v0.1.0-alpha.4/faq-auth.md b/versioned_docs/version-v0.1.0-alpha.4/faq-auth.md deleted file mode 100644 index 09e5750..0000000 --- a/versioned_docs/version-v0.1.0-alpha.4/faq-auth.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Authorization ---- - -Kapp is using k8s [RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) for authentication. Generally, authorization info is passing through `Authorization` Header. A user should provide enough info to construct this header for kapp, then kapp will pass it to kubernetes api server. If you are running kapp behind a proxy, which is in charge of authentication and providing `Authorization` header, then kapp will use the header directly. Kubernetes API server needs to be configured properly to accept these tokens. - -_IMAGE_PLACE_HOLDER_ - -### Username and password: -You need to config static user for k8s first. It's easy to understand but rarely used, as extra configuration and a restart are required. Learn more from https://kubernetes.io/docs/reference/access-authn-authz/authentication/#static-password-file - -### Token -Kubernetes has various ways to config token. -- [Static Token](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#static-password-file). This method will require extra configuration on your api server, and a restart is required. -- [Bootstrap Tokens](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#bootstrap-tokens) -- [Service Account Tokens](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#bootstrap-tokens). The following sections shows how to login by using a servier account token. -- [OIDC Tokens](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#bootstrap-tokens). This is a advanced way to authorize users. There is another artical to talk about OIDC with kapp. You can find out more details there. TODO - -## Create a test user - -IMPORTANT: This is for test only! Do not create token this way on a production cluster. Make sure that you know what you are doing before proceeding. Granting admin privileges to Dashboard's Service Account might be a security risk. - -To bypass the annoying configuration and restart, in this guide, we will find out how to create a new user using Service Account mechanism of Kubernetes, grant this user admin permissions and login to Kapp Dashboard using bearer token tied to this user. - -The commands should be executed in the same shell seesion. - -1. Create a service account - -```bash -kubectl create sa kapp-sample-user -``` - -2. grant admin permission to the service account - -```bash -kubectl create clusterrolebinding kapp-sample-user-admin --user=system:serviceaccount:default:kapp-sample-user --clusterrole=cluster-admin -``` - -3. Get service account secret name - -``` -secret=$(kubectl get sa kapp-sample-user -o json | jq -r .secrets\[\].name) -echo $secret -``` - -You will see some token name like `kapp-sample-user-token-vbhwr` - -4. Get secret token - -``` -secret_token=$(kubectl get secret $secret -o json | jq -r '.data["token"]' | base64 -D) -echo $secret_token -``` - -5. Use the token you got to login - -_IMAGE_PLACEHOLDER_ - -you will success login. - -_IMAGE_PLACEHOLDER_ diff --git a/versioned_docs/version-v0.1.0-alpha.4/faq-permissions.md b/versioned_docs/version-v0.1.0-alpha.4/faq-permissions.md deleted file mode 100644 index adcc788..0000000 --- a/versioned_docs/version-v0.1.0-alpha.4/faq-permissions.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Permissions ---- - -## How Permission Works - -Permissions will be big topic. This part will be splited into two parts. One is about "How to config users permissions when using kapp?". Seconds is "How to build/integrate kapp, k8s with a IDP(Identity Provider)?" - -The following topics are all related to permissions. Will reorganize later. - -- Aladdin research on rbac: https://quip.com/FycNArbIZh7v -- Wanglei's note about using keycloak: #11 -- David thoughts about kapp architecture: #13 - -Reference: -- https://kubernetes.io/docs/reference/access-authn-authz/authentication/#openid-connect-tokens -- https://github.com/kubernetes/dashboard/blob/master/docs/user/access-control/README.md#authorization-header \ No newline at end of file diff --git a/versioned_docs/version-v0.1.0-alpha.4/first-deploy.md b/versioned_docs/version-v0.1.0-alpha.4/first-deploy.md deleted file mode 100644 index e69de29..0000000 diff --git a/versioned_docs/version-v0.1.0-alpha.4/google-gke.md b/versioned_docs/version-v0.1.0-alpha.4/google-gke.md deleted file mode 100644 index a07f4c0..0000000 --- a/versioned_docs/version-v0.1.0-alpha.4/google-gke.md +++ /dev/null @@ -1,152 +0,0 @@ ---- -title: Install on Google Kubernetes Engine ---- - -The simplest way to provision a cluster on Google Cloud Platform is via Google Kubernetes Engine. - -As a prerequisit, please install and authenticate the `gcloud` command line tool. Instructions can be found here. - -## Step 1: Create a GKE Cluster - -There are a few different ways to create a GKE Cluster. You can either create one through only gcloud, use the web interface, or with Terraform. - -### Option A - gcloud command line - -To begin, choose a Google Cloud project - -```bash -export PROJECT_ID=hello-kalm -``` - -Note: If you don't have an existing Google Cloud project, you can create one with: - -```bash -export PROJECT_ID=hello-kalm -gcloud projects create $PROJECT_ID -``` - -Make sure billing is enabled. - -You need to enable Kubernetes Engine API as well: - -```bash -gcloud services enable container.googleapis.com -``` - -
- -Next, provision a cluster with 4 nodes - -```bash -export M_TYPE=n1-standard-2 && \ -export ZONE=us-west2-a && \ -export CLUSTER_NAME=${PROJECT_ID}-${RANDOM} && \ -gcloud container clusters create $CLUSTER_NAME \ - --cluster-version latest \ - --machine-type=$M_TYPE \ - --num-nodes 4 \ - --zone $ZONE \ - --project $PROJECT_ID -``` - -The creation of the cluster will take a few minutes. Once complete, configure kubectl to use the new cluster: - -```bash -gcloud container clusters get-credentials $CLUSTER_NAME \ - --zone $ZONE \ - --project $PROJECT_ID -``` - -Verify the cluster is properly setup and accessible. - -```sh -kubectl cluster-info -``` - -### Option B - Terraform - -If you are more familiar with Terraform, you can provision a demo cluster with the following steps. - -First, install Terraform. - -Give Terraform access to the Application Default Credentials (ADC). - -```bash -gcloud auth application-default login -``` - -Clone the repository below. - -```bash -git clone https://github.com/kalmhq/tf-scripts -cd tf-scripts/gke -``` - -Open 'terraform.tfvars', and specify the ID of the Google Cloud project you would like to install to. - -``` -# terraform.tfvars -project_id = "REPLACE_ME" -region = "us-west2" -``` - -Install the cluster with the following commands. - -```bash -terraform init -terraform apply -``` - -Type `yes` to confirm the installation. - -After 5-10 minutes, the cluster should be created. Once complete, retrieve the name of the newly created cluster. - -```bash -terraform output -``` - -Configure kubectl to use the new cluster. - -```bash -gcloud container clusters get-credentials NAME_OF_YOUR_CLUSTER --zone ZONE_OF_CLUSTER -``` - -Verify the cluster is properly setup and accessible. - -```sh -kubectl cluster-info -``` - -## Step 2: Install Kalm - -Once the cluster is setup, install Kalm with the following command. - -```bash -curl -sL https://get.kalm.dev | bash -``` - -To enable localhost access, open a port via: - -```bash -kubectl port-forward -n kalm-system \ - $(kubectl get pod -n kalm-system \ - -l app=kalm \ - -ojsonpath="{.items[0].metadata.name}") \ - 3010:3010 -``` - -Now open http://localhost:3010/ - -## Clean Up - -Delete the cluster to avoid resource charges. - -If you created the cluster with Terraform: - -```bash -terraform destroy -``` - -## Next Step - -You've now setup Kalm on a GKE cluster. To get a sense of how Kalm works, see the [Hello Kalm](/docs/tut-hello) tutorial. diff --git a/versioned_docs/version-v0.1.0-alpha.4/guide-aws.md b/versioned_docs/version-v0.1.0-alpha.4/guide-aws.md deleted file mode 100644 index f4d3da0..0000000 --- a/versioned_docs/version-v0.1.0-alpha.4/guide-aws.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Install on AWS ---- - -Work In Progress - -## Via KOPS - -## Via diff --git a/versioned_docs/version-v0.1.0-alpha.4/guide-config.md b/versioned_docs/version-v0.1.0-alpha.4/guide-config.md deleted file mode 100644 index ba85e7a..0000000 --- a/versioned_docs/version-v0.1.0-alpha.4/guide-config.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: Add Env Variables & Configs ---- - -A well designed application is likely to have configurations which vary between deployments. Kubernetes makes it easy to override the configurations specified in images. - -
- -
- - -## Example Container Setup - -Let's go through an example of configuring Redis. Our goal is to change the `maxmemory` parameter. (Imagine that you need a larger amount of memory for production vs dev) - -Create a new Application - -Create a new Component - -Set the image to _"redis"_ - -## Container Lifecycle - -To understand why configuration should be setup outside the container itself, let's first try something that doesn't work: configuring the application right in the container. - -Click the _Shell_ icon to open a shell attached to our redis container. In the shell, enter: - -``` -redis-cli CONFIG SET maxmemory 2mb -``` - -Verify that the config has been correctly set - -``` -redis-cli CONFIG GET maxmemory -``` - -Everything seem to be working. However, let's try deleting the pod. - -Go to **Containers** and click on the **Delete** icon - -As soon as the pod is deleted, kubernetes will schedule a new one to be deployed, automatically maintaining the desired state of 1 pod running. - -After the new pod is created, click on the _Shell_ icon next to it. - -Check the `maxmemory` setting via - -``` -redis-cli CONFIG GET maxmemory -``` - -So what happened to our setting? It was lost when the pod was destroyed and recreated. Since pods and containers are ephemeral, we can't store configuration states within the pod. - -## Adding a Config File - -The correct way to configure containers is via environment variables and configuration files. Redis happens to use config files. - -Go to **Components**, click on **Edit** - -In the Configuration Files section click **Add** - -For Mount Path, type `/redis-config/redis.conf` - -Click on the **Edit** Icon - -A modal editor will pop up, inside the editor, enter our desired config -of `maxmemory 5mb` - -![edit config file](assets/edit-config-file.png) - -click **Save** - -We've just created a config file. Now we need to override the startup command to tell redis to use it. - -In the **Command** input box, type `redis-server "/redis-config/redis.conf"` - -![redis command](assets/redis-command.png) - -Click **Update Component** - -After the update is complete, open the shell again and type - -``` -redis-cli CONFIG GET maxmemory -``` - -Our configuration is picked up, this time via the configuration pod. - -Now if we delete the pod again(go ahead and try it), the configuration will still be there. - -## Environment Variables - -Other Applications may use environment variables instead of config files. To add an environment variable, click _Add_ in the Environment Variables section of the Edit Component Form: - -![env variables](assets/env-variables.png) - -Click **Update Component** to apply this change - -To verify that the Environment Variable has been set, open a shell and type - -``` -echo $MY_CUSTOM_SETTING -``` - -You should see the value set above(42) as the output diff --git a/versioned_docs/version-v0.1.0-alpha.4/guide-export-kalm-resources.md b/versioned_docs/version-v0.1.0-alpha.4/guide-export-kalm-resources.md deleted file mode 100644 index 2c3efcd..0000000 --- a/versioned_docs/version-v0.1.0-alpha.4/guide-export-kalm-resources.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Import / Export ---- - -:::info -This is a temporary solution, kalm will provide better import and export functions. -::: - -Sometimes we need to export an application, and reinstall it on another cluster. This is the way to export resource from KALM. - -## Prerequisites - -- Make sure KALM installed to your cluster. -- Make sure you have ***kubectl*** and configure to your cluster. - -### Export & import an application - -Run the command as follow, a resource file that describe the application will be created *$KALM_APP_NAME.bak.yaml* - -```bash -export KALM_APP_NAME= -curl https://raw.githubusercontent.com/kalmhq/kalm/add_script_to_export_kalm_resources/scripts/export-resources.sh > export-resources.sh ; bash export-resources.sh $KALM_APP_NAME $KALM_APP_NAME.bak.yaml -``` - -Run the command as follow in another KALM (make sure that there is no application with the same name in KALM, otherwise there will be conflicts when import application), and the application you export will be imported in a few minutes. - -```bash -kubectl apply -f $KALM_APP_NAME.bak.yaml -``` - -### Migrate from one KALM to another - -1. Prepare a new KALM. https://kalm.dev/docs/install -2. Export all resource from old KALM -3. Import all resource to new KALM - -Run the following command to export all resource from old KALM. - -```bash -curl https://raw.githubusercontent.com/kalmhq/kalm/add_script_to_export_kalm_resources/scripts/export-resources.sh > export-resources.sh ; bash export-resources.sh all-application kalm.bak.yaml -``` - -Run the following command to import all resource to new KALM (make sure in your new cluster, there is no application with the same name as the old cluster). -*Persistent data will not be migrated such as postgresql data and some file that you mount on disks. If you want to migrate these data, you should through your own customized way. - -```bash -kubectl apply -f kalm.bak.yaml -``` diff --git a/versioned_docs/version-v0.1.0-alpha.4/guide-logging-for-kalm.md b/versioned_docs/version-v0.1.0-alpha.4/guide-logging-for-kalm.md deleted file mode 100644 index 9c198f3..0000000 --- a/versioned_docs/version-v0.1.0-alpha.4/guide-logging-for-kalm.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -title: Logging System ---- - -KALM provide a simple log board for component, you can view component’s logs in this board. - -![guide-logging-for-kalm1.png](assets/guide-logging-for-kalm1.png) - -![guide-logging-for-kalm2.png](assets/guide-logging-for-kalm2.png) - -For production environment, the features of log board is obviously not enough. Usually, we need a logging system to collection, pipeline, query, analysis logs. - -EFK (Elasticsearch) and PLG (Loki) were the most popular logging system of the moment. They have their own advantages. - -## Using PLG on KALM - -### Prerequisites - -Make sure KALM installed to your cluster. -Make sure you have ***kubectl*** and configure to your cluster. -Make sure your cluster has at least 500m cpu and 800Mi memory (each node has at least 100m cpu and 100Mi) to install PLG. - -### Install PLG on KALM - -Run the following command to install plg. - -```bash -kubectl apply -f https://raw.githubusercontent.com/kalmhq/kalm/master/resources/kalm-install-plg.yaml -``` - -### View & Query logs - -```bash -kubectl -n loki port-forward $(kubectl get pods -n loki -l app=grafana -o jsonpath="{.items[].metadata.name}") 3000:3000 -``` - -Open [http://localhost:3000](http://localhost:3000/login) in browser, and use ‘*admin’* and ‘*password’ to login.* -Jump to view logs for [grafana pod](http://localhost:3000/explore?orgId=1&left=[%22now-1h%22,%22now%22,%22Loki%22,{%22expr%22:%22{job=\%22loki/grafana\%22}%22},{%22mode%22:%22Logs%22},{%22ui%22:[true,true,true,%22none%22]}]). - -![guide-logging-for-kalm3.png](assets/guide-logging-for-kalm3.png) - -We can enter more complex query statements in the input field. For more LogQL in [here](https://github.com/grafana/loki/blob/v1.5.0/docs/logql.md). - -### Persistence and retention and for PLG - -By default, we set up a 10Gi disk to store logs to loki, you can reset to a suitable value for you system. - -Logs in loki will never be deleted by default. For some reason, we need to delete logs that have existed for a long time. See [more configuration about retention of logs](https://github.com/grafana/loki/blob/master/docs/sources/configuration/_index.md#table_manager_config). - -### PLG documentation - -For more informations of PLG, see https://github.com/grafana/loki/blob/master/docs/README.md. - - -## Using EFK on KALM - -### Prerequisites - -Make sure KALM installed to your cluster. -Make sure you have ***kubectl*** and configure to your cluster. -Make sure your cluster has at least 4100m cpu and 8100Mi memory(each node has at least 100m cpu and 100Mi) to install EFK. - -### Install EFK on KALM - -Run the following command to install EFK. - -```bash -kubectl apply -f https://raw.githubusercontent.com/kalmhq/kalm/master/resources/kalm-install-efk.yaml -``` - -### View & Query logs - -```bash -kubectl -n efk port-forward $(kubectl get pods -n efk -l app=kibana -o jsonpath="{.items[].metadata.name}") 5601:5601 - -``` - -Open [http://localhost:5601](http://localhost:5601/) in browser. -Jump to view logs for [kibana](http://localhost:5601/app/logs/stream). - -![guide-logging-for-kalm4.png](assets/guide-logging-for-kalm4.png) - -You can use [KQL](https://www.elastic.co/guide/en/kibana/master/kuery-query.html) to query logs. - -![guide-logging-for-kalm5.png](assets/guide-logging-for-kalm5.png) - -### Persistence and retention and for EFK - -By default, we set up a 30Gi disk to store logs for elasticsearch, you can reset to a suitable value for you system. - -You can configure index lifecycle management (ILM) policies to automatically manage indices according to your performance, resiliency, and retention requirements. For example, you could use ILM to: - -* Spin up a new index when an index reaches a certain size or number of documents -* Create a new index each day, week, or month and archive previous ones -* Delete stale indices to enforce data retention standards - - See [more informations about ILM](https://www.elastic.co/guide/en/elasticsearch/reference/7.8//index-lifecycle-management.html). - -### EFK documentation - -For more informations of EFK, see [https://github.com/grafana/loki/blob/master/docs/README.md](https://www.elastic.co/guide/index.html). diff --git a/versioned_docs/version-v0.1.0-alpha.4/guide-logs.md b/versioned_docs/version-v0.1.0-alpha.4/guide-logs.md deleted file mode 100644 index 700a899..0000000 --- a/versioned_docs/version-v0.1.0-alpha.4/guide-logs.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: View Container Logs ---- - -
- -
- -Sometimes its useful to view the log output of a particular container. Kalm provides a view to quickly view logs in the web: - -![log button](assets/log-button.png) - -## Example Container Setup - -Let's create a container that logs output every second. Create a New Application, then Add a single Component named **logviewer** with the image set to **busybox**. - -In the **Command** input box, enter the following command, which outputs a timestamp every second: - -``` -/bin/sh -c 'i=0; while true; do echo "$i: $(date)"; i=$((i+1)); sleep 1; done' -``` - -![log component](assets/log-component.png) - -Click **Deploy** to instantiate the component container. - -After deployment is complete, click the _Log Icon_: - -## View log - -![log button example](assets/log-button-example.png) - -
-You should see the Log View: - -![log view](assets/log-view.png) - -## Logging system - -You may ask, can I only view my program logs one by one container? What if I want to view aggregated logs for a specific time period? - -Don't worry, see if [Logging System](./guide-logging-for-kalm.md) meets your needs. diff --git a/versioned_docs/version-v0.1.0-alpha.4/guide-minikube.md b/versioned_docs/version-v0.1.0-alpha.4/guide-minikube.md deleted file mode 100644 index 7f11e43..0000000 --- a/versioned_docs/version-v0.1.0-alpha.4/guide-minikube.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Install on Minikube ---- - -The simplest way to provision a localhost cluster is minikube. - -## Step 1: Install minikube - -Please follow [minikube official document](https://kubernetes.io/docs/tasks/tools/install-minikube/) to install minikube. - -## Step 2: Start a minikube cluster - -It recommended to use 8G memory and 4 core cpu to test kalm with. Adjust resources base on your environment. - -```bash -minikube start --memory 8192 --cpus 4 -``` - -After the cluster is up and running. Open a new terminal and type the following command. You may be prompted to enter your password. It will create a route to services deployed with type LoadBalancer and sets their Ingress to their ClusterIP. - -```bash -minikube tunnel -``` - -## Step 2: Install Kalm - -Once the cluster is setup, install Kalm with the following command. - -```bash -curl -sL https://get.kalm.dev | bash -``` - -To enable localhost access, open a port via: - -```bash -kubectl port-forward -n kalm-system \ - $(kubectl get pod -n kalm-system \ - -l app=kalm \ - -ojsonpath="{.items[0].metadata.name}") \ - 3010:3010 -``` - -Now open http://localhost:3010/ - -## Clean Up - -Delete the cluster after test. - -``` -minikube delete -``` - -## Next Step - -You've now setup Kalm on a minikube cluster. To get a sense of how Kalm works, see the [Hello Kalm](./tut-hello.md) tutorial. diff --git a/versioned_docs/version-v0.1.0-alpha.4/https-certs.md b/versioned_docs/version-v0.1.0-alpha.4/https-certs.md deleted file mode 100644 index 59a47bf..0000000 --- a/versioned_docs/version-v0.1.0-alpha.4/https-certs.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: HTTPS Certificates ---- - -## Overview - -To enable HTTPS access to your applications, you need a certificate from a Certificate Authority. Kalm can help you create new certificate via Let's Encrypt, or hook up existing ones. Domains with certificates managed by Kalm can be easily routed to any container on your cluster. - -## Automated Certificates with Let's Encrypt - -- Click **Certificates** in the Navigation Sidebar -- Click **New Certificate** -- The Certificate Creation page displays your cluster IP. You need to point domains you want to configure to this cluster IP by adding an A Record. (Specific instructions depends on your domain provider) - -* Enter a **Certificate Name** -* Enter one or more domains in the **Domains** field. You should get an indicator next to each domain which specifies if the IP is properly configured. If you get a warning symbol, please check your DNS configurations. (DNS records can sometimes take a few moments to update) -* Click **Create Certificate** - -![Create Cert](assets/placeholder.png) - -## Upload Existing Certificate - -If you want to use an existing certificate, click **Use an existing certificate** and paste your Certificate and Private Key. - -![Upload Cert](assets/placeholder.png) - -## Additional Instructions - -### Using Certified Domains in Routes - -Domains which have certificates properly configured can be used in Routes to handle HTTPs traffic. See the [Routes Guide](/) for more details. - -![Routes HTTPS](assets/placeholder.png) - -### Wildcard Certificates - -We have plans to support wildcard certificates in the near future, and can suggest workarounds in the mean time. Email david@kalm.dev if you need this. diff --git a/versioned_docs/version-v0.1.0-alpha.4/install.md b/versioned_docs/version-v0.1.0-alpha.4/install.md deleted file mode 100644 index 18c600e..0000000 --- a/versioned_docs/version-v0.1.0-alpha.4/install.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -title: Installation ---- - -## Compatibility - -Kalm is optimized to work with the latest version of Kubernetes (currently 1.18.x), and is backwards compatible down to 1.14.x - -| Kalm version | k8s 1.16.x | k8s 1.17.x | k8s 1.18.x | -|--------------|------------|------------|------------| -| v0.1.0 | ✔ | ✔ | ✔ | - -## Step 1: Prerequisites - -### Cluster Setup - -Kalm can be used to manage any kubernetes cluster. -For the purpose of this tutorial, we recommend that you try kalm on [Minikube](./guide-minikube.md) localhost cluster first. - -Alternatively, see the References sections for provisioning clusters on [AWS](./guide-aws.md), [Google Cloud](./google-gke.md) and [Azure](./azure-aks.md). - -### Install Kubectl - -Installation of Kalm requires kubectl, which can be installed according to the official Install and Set Up kubectl docs. - -:::note -Please make sure that the version of kubectl is sufficient. It is strongly recommended that you use the version corresponding to the cluster. Using an earlier version of kubectl may cause errors in the installation process. -::: - -## Step 2: Install Kalm - -:::caution -Before proceeding, please make sure that the current context of your kubectl is the correct cluster. -::: - -Kalm can be installed as a [Kubernetes Operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) directly onto your cluster via: - -```bash -curl -sL https://get.kalm.dev | bash -``` - -This command installs Kalm plus a few dependencies, and typically takes 3-5 minutes to complete. Relax in the mean time, or watch this short video on how Kalm works: - -
- -
- -
- -The installation script will give you real-time feedback of services spinning up. Once you see **Installation Complete**, move on to the next step. - -## Step 3: Access Webserver - -To enable browser access, open a port via: - -```bash -kubectl port-forward -n kalm-system \ - $(kubectl get pod -n kalm-system \ - -l app=kalm \ - -ojsonpath="{.items[0].metadata.name}") \ - 3010:3010 -``` - -Now open http://localhost:3010/ - -![login screen](assets/main-page.png) - -## Next Step - -Congratulations! Kalm is now properly setup and running. Next, let's create our first application to see how Kalm works. diff --git a/versioned_docs/version-v0.1.0-alpha.4/intro.md b/versioned_docs/version-v0.1.0-alpha.4/intro.md deleted file mode 100644 index 79d912c..0000000 --- a/versioned_docs/version-v0.1.0-alpha.4/intro.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: What is Kalm -hide_title: false -hide_table_of_contents: true -sidebar_label: What is Kalm - -# SEO options -description: What is kalm -keywords: - - docs - - docusaurus -image: https://i.imgur.com/mErPwqL.png ---- - -Kalm (Kubernetes AppLication Manager) is an open source tool that makes it easier to manage applications on kubernetes without struggling with yamls. Kalm comes with a web interface for the most common operations including: - -- Creation of new application deployments -- Deploying, updating, and scaling existing deployments -- Volume, config, and secret management - -Kalm is installed as a kubernetes controller directly on your cluster, and automatically sets up istio and cert-manager, which makes it easy to configure HTTPS certificates, routes, SSO, and logging system out of the box. - -![Web Interface](assets/kalm.png) - -## Why Kalm - -Kubernetes is a powerful and flexible tool for managing microservices. However first-time to setup and configuration can be daunting. The high upfront cost makes it prohibitive for smaller teams to adopt kubernetes. We made kalm in an attempt decrease the cognitive load for developers to interact with kubernetes in both development and production environments. - -Kalm tries to reduce complexity in three different ways: - -1. Provide intuitive graphical inteface for the most common operations. -2. Introduce higher level Custom Resource Definitions such as `Application`. These CRDs help reducing the amount of boilerplate configuration and copy-pasting. -3. Designed with popular extensions in mind - Kalm is designed to work with istio, cert-manager, and prometheus and more, which make setting up a productive stack quick and easy. - -Next, lets install Kalm and go through an example to illustrate how it works. diff --git a/versioned_docs/version-v0.1.0-alpha.4/probes.md b/versioned_docs/version-v0.1.0-alpha.4/probes.md deleted file mode 100644 index c4eef96..0000000 --- a/versioned_docs/version-v0.1.0-alpha.4/probes.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: Health Checking Probes ---- - -Kubernetes Liveness and Readiness probes are useful for checking the health of pods. Readiness probes determine if a pod is ready to receive traffic, and Liveness probes signals if a pod's containers should be restarted. - -Both liveness and readiness probes support a variety of action types to determine if something is healhty: - -- HTTP: Healthy means a request to some specified HTTP endpoint returned a response between 200 to 399 -- Command: - Healhty means a command executed successfully (return code 0) -- TCP: Healthy means a specific TCP socket was successfully opened - -## Liveness Probe Example - -Let's go through an example of a liveness probe implemented via a command - -- Create a new application -- Create a new component, with `busybox` as image -- Add the command `/bin/sh -c 'touch /tmp/healthy; sleep 10000'` - -This creates a file upon startup, in this case representing the health of our Component. - -- Click the 'Health' Tab -- Select `Command` from the Liveness Probe dropdown. -- Enter `cat /tmp/healthy` as the command - -The `cat` command will execute successfully if the file exists. - -- Decrease the number of consequtive tests from `3` to `1`. This will save us some time to see the results -- Click **Deploy Component** - -The pod should spin up successfully. Now let's delete the file by opening a shell. - -``` -rm /tmp/healthy -``` - -Within 20 seconds, the Terminal will become disconnectd because the container is deleted. Go back to the Component view, and you will see the number of Restarts increase from 0 to 1. - -By restarting the pod, the "problem" of the missing /tmp/healthy is fixed, as the file is created by the startup command. This demonstrates the purpose of the livenessProbe: triggering automatic restarts in an attempt to fix problematic pods. - -## Readiness Probe Example - -Readiness Probes are very similar. Let's create one with an HTTP action. - -- Create a new component with `quay.io/openshiftlabs/simpleservice:0.5.0` as the image -- In the **Networking** tab, Add a port named `healthport` with Container Port set to `9876` -- In the \*\*Health Tabcreate a HTTP liveness probe with `/health` and `9876` for the port - -The pod should be ready according to the probe. - -The image we are using allows us to insert an [artificial delay to /health](https://github.com/mhausenblas/simpleservice#changing-runtime-behaviour) by adding an environment variable: - -- add an environment variable `HEALTH_MAX` with the value `5000` which means there's a 5 second delay for the timeout - -Probe should Now fail - -- Remove the environment variable, and the probe should start working again. diff --git a/versioned_docs/version-v0.1.0-alpha.4/ref-crd.md b/versioned_docs/version-v0.1.0-alpha.4/ref-crd.md deleted file mode 100644 index c18fc69..0000000 --- a/versioned_docs/version-v0.1.0-alpha.4/ref-crd.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: Custom Resource Definitions ---- - -Discuss: do we need this? What to put here that would be useful and add to transparency? diff --git a/versioned_docs/version-v0.1.0-alpha.4/registry.md b/versioned_docs/version-v0.1.0-alpha.4/registry.md deleted file mode 100644 index 1be524e..0000000 --- a/versioned_docs/version-v0.1.0-alpha.4/registry.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: Private Image Registry ---- - -If your images resides in a private registry, you must configure permissions, or else you'll get a `PullImageError` when deploying applications. - -Kalm can help you simplify the process of configuring Private Image Registries. - -## Example Usage - -As a prerequisit you need access to a private registry. One option is to create a free private repository on Docker Hub. - -Let's upload an image to a private dockerhub registry. - -Start by pulling down the `busybox` image onto your local machine, then re-tag it and upload to your private repo. Replace `` with the name of your repository. - -``` -docker pull busybox -docker tag busybox:latest /private-busybox:latest -docker push /private-busybox:latest -``` - -Now let's create a new application with a component using this private image - -- Create an Application -- Create a Component with `/private-busybox:latest` as the image -- In the command field, enter /bin/sh -c \`sleep 10000\` to keep the container alive. -- Click **Deploy** - -You should get a "Failed to pull Image..." error. - -![cannot pull error](assets/pull-error.png) - -This error is expected because the pod does not have permission to pull the specified image. - -### Adding a Private Repository - -We can fix the issue by adding a private registry. - -- Click **Registries** in the left navigation sidebar -- Click **Add Registry** -- Enter username and password for your repository. The host can be blank if you are using hub.docker.com, otherwise enter the full URL(i.e https://gcr.io) - -- Press **Save** - -![registry form](assets/add-registry-form.png) - -If the login info is correct, you should see Verified checkbox light up shortly. - -![registry validated](assets/registry-validated.png) - -### Redeploy - -Now let's redeploy our application. - -Go back to the component and **delete the failing pod**. Deleting a pod will trigger a redeployment. This time, the pod should be successfully deployed. diff --git a/versioned_docs/version-v0.1.0-alpha.4/service-account.md b/versioned_docs/version-v0.1.0-alpha.4/service-account.md deleted file mode 100644 index 47ab8f7..0000000 --- a/versioned_docs/version-v0.1.0-alpha.4/service-account.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Authorization via Service Account ---- - -## Create a Service Account - -In order to manage applications, Kalm requires _cluster-admin_ privilages on the cluster. To keep things clean, we recommend creating a _Service Acount_ for Kalm. - -To create a service account with cluster-admin privilages: - -```bash -kubectl create sa kalm-admin-user -kubectl create clusterrolebinding \ - kalm-admin-binding \ - --user=system:serviceaccount:default:kalm-admin-user \ - --clusterrole=cluster-admin -``` - -### Retrieve Token - -_Note: the following command utilizes jq, a useful json processor which can be found at [https://stedolan.github.io/jq/](https://stedolan.github.io/jq/)_ - -To use the service account we've just created, we need to retrieve the token - -``` -KALM_ADMIN_SECRET=$(kubectl get sa kalm-admin-user -o json | jq -r .secrets\[\].name) -KALM_ADMIN_TOKEN=$(kubectl get secret $KALM_ADMIN_SECRET -o json | jq -r '.data["token"]' | base64 -d) -echo $KALM_ADMIN_TOKEN -``` - -\ -You should see a token similar to: - -![example token](assets/example-token.png) -\ -Copy the token, paste it into the input field, and press **Login** - -![token input](assets/token-input.png) - -You should see the main page - -![main page](assets/main-page.png) diff --git a/versioned_docs/version-v0.1.0-alpha.4/tut-bookinfo.md b/versioned_docs/version-v0.1.0-alpha.4/tut-bookinfo.md deleted file mode 100644 index 421877f..0000000 --- a/versioned_docs/version-v0.1.0-alpha.4/tut-bookinfo.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Application and Routing ---- - -Now that you've seen how Kalm works, let's setup a more representative Application. - -About Bookinfo - -This tutorial will teach you: - -- deploy a representative application with multiple pods working together -- configuring pods to work with eachother -- editing existing applications -- how to setup external routing for applications diff --git a/versioned_docs/version-v0.1.0-alpha.4/tut-create.md b/versioned_docs/version-v0.1.0-alpha.4/tut-create.md deleted file mode 100644 index d052f9a..0000000 --- a/versioned_docs/version-v0.1.0-alpha.4/tut-create.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: Creating an Application ---- - -For this tutorial let's create an application \ No newline at end of file diff --git a/versioned_docs/version-v0.1.0-alpha.4/tut-deploy.md b/versioned_docs/version-v0.1.0-alpha.4/tut-deploy.md deleted file mode 100644 index 1626954..0000000 --- a/versioned_docs/version-v0.1.0-alpha.4/tut-deploy.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: Deploy ---- - -Some Text \ No newline at end of file diff --git a/versioned_docs/version-v0.1.0-alpha.4/tut-hello.md b/versioned_docs/version-v0.1.0-alpha.4/tut-hello.md deleted file mode 100644 index b25dd93..0000000 --- a/versioned_docs/version-v0.1.0-alpha.4/tut-hello.md +++ /dev/null @@ -1,130 +0,0 @@ ---- -title: Hello Kalm ---- - -Let's go through a simple example of deploying a single pod holding an nginx container. - -This tutorial will teach you: - -- The basics of how Kalm organizes deployments into applications -- How to create application configurations -- How to open a port and direct traffic to an application - -
- -
- -## Step 1: Create Application - -The main page of Kalm shows a list of applications. Press the **Add Application** button - -![add application](assets/add-app.png) - -Enter a name for your application, then press **Create Application** - -![name application](assets/name-app.png) - -
- -## Step 2: Create Component - -An application is made of one or more _components_. Each component typically holds a docker image. In this example, we will create a component which contains the official nginx image - -Under the Basic Information section, enter a name such as _"webserver"_ into the **Name** field and **_nginx:alpine_** into the **Image** field. Leave all other fields as is and click **Deploy Component**. - -![create component](assets/create-comp.png) - -After a few seconds, a single Pod holding the nginx:alpine image will be deployed to your cluster. - -![first pod](assets/first-pod.png) - -
- -## Step 3: Examining and Updating - -Let's examine our pod to see if nginx is really deployed. Kalm provides a handy terminal for quick examinations. Open a shell to our pod by clicking on the shell icon. - -![shell button](assets/shell-button.png) - -Run a few commands, for example: - -``` -cd /etc/nginx -ls -cat nginx.conf -``` - -It seems that nginx is correctly installed! - -![shell commands](assets/shell-cmd.png) - -Next let's go back to the Components list by clicking on **Components** in the navigation sidebar. - -Currently one pod is running. Let's scale up our application. Click the **Edit** button. - -![edit component](assets/edit-comp.png) - -Change the number of replicas to **3** and click Deploy. - -![increase replicas](assets/increase-replicas.png) - -After a few moments, there should be three pods running. - -![three pods](assets/three-pods.png) - -Kubernetes is declarative, which means you specify the end result("I want 3 pods"), and the kubernetes control plane figures out how best to achieve the end result for you("let's add 2 more"). - -## Step 4: Ports and Routing - -Let's make our application accessible in the browser. We can do this by opening a port on our component, then adding a route to it. - -First we'll open a port. Once again, we'll go the components area and click on **Edit**, then click on the **Networking** tab. - -![networking tab](assets/networking-tab.png) - -To open a port, we need to specify a `Container Port` and a `Service Port`. Kalm tries to be helpful by providing a visual reminder of which is which. - -The Container port should be **80** because its what the `nginx:alpine` image defaults to. The Service port can be an arbitrary port of your choosing, such as **8080**. - -![specify ports](assets/ports.png) - -Click **Deploy** to apply changes. - -:::note -During the deployment you may notice that the number of pods temporarily exceeds three. Kalm by default uses `rolling update`, which means pods are incrementally added and removed one by one, resulting in zero downtime. -::: - - -![rolling updates](assets/rolling-update.png) - -
- -Now that the port is open, let's add a route. Click **Routes** in the navigation sidebar tab, then click **Add Route** - -![add route](assets/add-route.png) - -Let's enter "\*" for host, which allows us to visit directly via IP address. - -![specify host](assets/specify-host.png) - -In the _Targets_ section, select our _"webserver"_ component from the dropdown. - -![add target](assets/add-target.png) - -Click **Update Route**. Our route is now completely setup. - -Click **Open in Browser** to check it out. - -![open in browser](assets/open-in-browser.png) - -Great, our application is working! - -![nginx success](assets/nginx-success.png) - -You've just installed an application on your cluster, modified it, scaled it, and setup routing to make it accessible from the outside world! - -All the heavy lifting is done via kubernetes and istio. Kalm is simply applying the appropriate configurations to your cluster. In fact anyone with familiarity with kubernetes should be able to create the same application configuration with a text editor and `kubectl`. We encourage this as an _exercise for the reader_. - -## Next Step - -Admittedly our "application" is rather trivial. In the next tutorial, let's go over a more representative application containing multiple microservices working together. diff --git a/versioned_docs/version-v0.1.0-alpha.4/tut-overview.md b/versioned_docs/version-v0.1.0-alpha.4/tut-overview.md deleted file mode 100644 index f902b11..0000000 --- a/versioned_docs/version-v0.1.0-alpha.4/tut-overview.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: Basic Tutorial Overview ---- - -Overview of steps in the tutorial \ No newline at end of file diff --git a/versioned_docs/version-v0.1.0-alpha.4/tut-rollback.md b/versioned_docs/version-v0.1.0-alpha.4/tut-rollback.md deleted file mode 100644 index 6fa0432..0000000 --- a/versioned_docs/version-v0.1.0-alpha.4/tut-rollback.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Rollback ---- - -- Something broke -- Rollback \ No newline at end of file diff --git a/versioned_docs/version-v0.1.0-alpha.4/tut-wordpress.md b/versioned_docs/version-v0.1.0-alpha.4/tut-wordpress.md deleted file mode 100644 index 2c3f4d4..0000000 --- a/versioned_docs/version-v0.1.0-alpha.4/tut-wordpress.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -title: "Demo: Hosting Wordpress on K8S" ---- - -![Wordpress with Kubernetes](assets/wp-tut-0.jpeg) - -While hosting a blog on Kubernetes might be somewhat of an overkill, it can make for a good learning experience. In this tutorial, let's deploy Wordpress with Kalm in 5 minutes. - -## Create New Application - -The first thing we need is a new Application for the two components that make up our WordPress site. - -- In your Kalm application dashbord, click _Create App_ and enter your application's name - -We used the name `another-blog` for this tutorial. - -![Create new application](assets/wp-tut-1.png) - -## Add The Database Component - -Once you create the application, you'll be prompted to add a component to it. We'll start by adding our Database Component. - -- For the component _Name_, enter `wordpress-mysql` -- For the component _Image_, enter `mysql:5.6` - -### Configurations - -Next we'll move into the _Config_ tab. - -- Under Environment Variables, click _+ New Variable_ - - For the _Name_, enter `MYSQL_ROOT_PASSWORD` - - For the _Value_, enter `mysql-pass` - -![Add Database Component 1](assets/wp-tut-2.png) - -### Networking - -Now we'll expose a Container Port to the cluster so we can access this image. Click the _Networking_ tab. - -- Under Ports, click _+ ADD_ - - For the _Protocol_, keep the default `http` - - For the _Container Port_, enter `3306` - - You can keep the _Service Port_ blank - -![Add Database Component 2](assets/wp-tut-3.png) - -### Disks - -Lastly, we'll add some storage for this image. Click on the _Disks_ tab. - -- Under Disks, click _+ Add_ - - For the _Type_, keep the default `Create and mount disk` - - For the _Mount Path_, enter `/var/lib/mysql` - - For the _Storage Class_, keep the default `kalm-hdd` - - For the _Size_, enter `1Gi` - -![Add Database Component 3](assets/wp-tut-4.png) - -Click _Deploy Component_ and our pod will start spinning up. While this is deploying, we'll setup our second component. - -## Add The Wordpress Component - -To add our second component, we'll click _Add Component_ from the Application Components page. - -- For the component _Name_, enter `wordpress` -- For the component _Image_, also enter `wordpress` - -### Configurations - -Once again we'll move to the _Config_ tab. This time we're adding two Environment Variables. - -- Under Environment Variables, click _+ New Variable_ - - For the _Name_, enter `MYSQL_ROOT_HOST` - - For the _Value_, enter `wordpress-mysql` -- Click _+ New Variable_ again to add a second Environment Variable - - For the _Name_, enter `MYSQL_ROOT_PASSWORD` - - For the _Value_, enter `mysql-pass` - -![Add Database Component 3](assets/wp-tut-5.png) - -### Networking - -Once again we'll click the _Networking_ tab and expose a container port to the cluster. - -- Under Ports click _+ ADD_ - - For the _Protocol_, keep the default `http` - - For the _Container Port_, enter `80` - - You can keep the _Service Port_ blank again - -![Add Database Component 2](assets/wp-tut-6.png) - -### Disks - -Again we'll click the _Disks_ tab to add storage for this component. - -- Under Disks, click _+ Add_ - - For the _Type_, keep the default `Create and mount disk` - - For the _Mount Path_, enter `/var/www/html` - - For the _Storage Class_, keep the default `kalm-hdd` - - For the _Size_, enter `1Gi` - -![Add Database Component 3](assets/wp-tut-7.png) - -## Add Route - -Lastly, we need to open a Route for our WordPress component so we can access it via a web browser. Click the _Routes_ tab on the left navigation menu. - -- Click `Add Route` - - For _Hosts_, you can either use your own domain or just click the ingress ip quick action (the cluster IP under the Hosts input field) - - For Targets, click the _Choose a target_ dropdown and select your `wordpress:80` component from the menu. - - You can leave all of the other options as their defaults - - Click `Create Route` to create your route - -![Add Database Component 3](assets/wp-tut-8.png) - -## Rock it! - -After a few seconds, all of the components should be up and running and there should be a green checkmark next to your new route. - -![Add Database Component 3](assets/wp-tut-9.png) - -You can click the domain to open up and play with your new WordPress site! - -![Add Database Component 3](assets/wp-tut-10.png) diff --git a/versioned_docs/version-v0.1.0-alpha.4/volumes.md b/versioned_docs/version-v0.1.0-alpha.4/volumes.md deleted file mode 100644 index 0ed4462..0000000 --- a/versioned_docs/version-v0.1.0-alpha.4/volumes.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: Mount Disks ---- - -In Kubernetes, volumes are directories avaliable to containers to use as filesystem. Volumes persist beyond the lifecycle of pods. - -There are many types of Volumes. Kalm simplifies the typical usecase by representing them as "Disks". - -Let's create an application with 2 containers, and share a Disk between them. - -- create an application -- create a component called `comp1` with `centos:7` as the image -- one caviat is that containers must have a running process, or it will be terminated immediately after creation. Therefore even though we don't need the container to do anything, we still need to give it a command to stay alive: -- enter `/bin/bash -c 'sleep 100000'` for the command, basically telling it to sleep - -![create comp1](assets/create-comp1.png) - -Now lets create and mount a disk - -- Go to the **Disks** tab -- Click **Add** Disk -- Select `Create and mount disk` in the Type dropdown -- enter `/tmp/folder1` for Mount Path -- select `standard` for Storage Class -- enter `0.1` (Gi) for Size -- click **Update Component** - -![create and mount](assets/create-and-mount-disk.png) - -After the component is created, open a shell and navigate to the mounted disk, and create a file representing application data. - -```bash -cd /tmp/folder1 -echo "Some Data" > mydata -``` - -Now let's create a second component, identical to the first one: - -- create a second component called `comp2` with `centos:7` as the image -- enter `/bin/bash -c 'sleep 100000'` for the command - -This time, let's mount the existing disk we created above with `comp1` - -- Go to the **Disks** tab -- Click **Add** Disk -- Select `Mount an existing disk` in the Type dropdown -- enter `/tmp/folder2` for Mount Path -- select the option that starts with `pvc-comp1-...` in the Claim Name dropdown -- click **Update Component** - -![mount existing disk](assets/mount-existing-disk.png) - -Let's delete comp1 - -![delete comp 1](assets/delete-comp1.png) - -After comp2 is created, open a shell to the pod and navigate to the mounted disk - -``` -cd /tmp/folder2 -``` - -Let's check to see if the file we created by shelling into comp1's container exists here - -``` -cat mydata -``` - -Hey it seems the data created while we were attached to comp1 (which no longer exists) is avaliable in comp2. - -![comp 2 data](assets/comp2-data.png) - -## Recap - -In this guide, we created two components attached to the same Disk, and shown that data is shared across the two volumes, and the disk survives the deletion of components. diff --git a/versioned_docs/version-v0.1.0-alpha.5/amazon-eks.md b/versioned_docs/version-v0.1.0-alpha.5/amazon-eks.md deleted file mode 100644 index a966abc..0000000 --- a/versioned_docs/version-v0.1.0-alpha.5/amazon-eks.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: Install on Amazon EKS ---- - -There are a many different ways to create a Kubernetes Cluster on Amazon. We will cover installing with terraform and kops _(coming soon)_. - -## Step 1: Install Prerequisites - -- Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) -- Install [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) -- Install [Amazon CLI](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) - -## Step 2: AWS Setup - -You will need an AWS service account. Configure [service credentials](https://console.aws.amazon.com/iam/home?#/security_credentials), then configure the AWS CLI with: - -```bash -aws configure -``` - -Enter your Access key ID and secret. - -## Step 3: Terraform Apply - -Clone the repository below and `cd` into the eks directory - -```bash -git clone https://github.com/kalmhq/tf-scripts -cd tf-scripts/eks -``` - -Install the cluster with the following commands. - -```bash -terraform init -terraform apply -``` - -Type `yes` to confirm the installation. - -After 5-10 minutes, the cluster should be created. - -View the newly cluster with: - -```bash -aws eks list-clusters -``` - -Configure kubectl to use the new cluster. - -```bash -aws eks --region us-west-2 update-kubeconfig --name NAME_OF_YOUR_CLUSTER -``` - -Verify the cluster is properly setup and accessible. - -```sh -kubectl get nodes -``` - -## Step 4: Install Kalm - -Once the cluster is setup, install Kalm with the following command. - -```bash -curl -sL https://get.kalm.dev | bash -``` - -To enable localhost access, open a port via: - -```bash -kubectl port-forward -n kalm-system \ - $(kubectl get pod -n kalm-system \ - -l app=kalm \ - -ojsonpath="{.items[0].metadata.name}") \ - 3010:3010 -``` - -Now open http://localhost:3010/ - you can now access Kalm on your Amazon EKS cluster! - -## Clean Up - -Delete the cluster to avoid resource charges. - -```bash -terraform destroy -``` - -## Next Step - -You've now setup Kalm on an Amazon EKS cluster. To get a greater sense of how Kalm works, see the [Hello Kalm](/docs/tut-hello) tutorial. diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/add-app.png b/versioned_docs/version-v0.1.0-alpha.5/assets/add-app.png deleted file mode 100644 index 0ac2519..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/add-app.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/add-registry-form.png b/versioned_docs/version-v0.1.0-alpha.5/assets/add-registry-form.png deleted file mode 100644 index 9785114..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/add-registry-form.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/add-route.png b/versioned_docs/version-v0.1.0-alpha.5/assets/add-route.png deleted file mode 100644 index 3963583..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/add-route.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/add-target.png b/versioned_docs/version-v0.1.0-alpha.5/assets/add-target.png deleted file mode 100644 index 91ea018..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/add-target.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/comp2-data.png b/versioned_docs/version-v0.1.0-alpha.5/assets/comp2-data.png deleted file mode 100644 index 395018c..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/comp2-data.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/completed-jobs.png b/versioned_docs/version-v0.1.0-alpha.5/assets/completed-jobs.png deleted file mode 100644 index 7b94bba..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/completed-jobs.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/create-and-mount-disk.png b/versioned_docs/version-v0.1.0-alpha.5/assets/create-and-mount-disk.png deleted file mode 100644 index 33dd278..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/create-and-mount-disk.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/create-certificate.png b/versioned_docs/version-v0.1.0-alpha.5/assets/create-certificate.png deleted file mode 100644 index cca4806..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/create-certificate.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/create-comp.png b/versioned_docs/version-v0.1.0-alpha.5/assets/create-comp.png deleted file mode 100644 index f7a2c60..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/create-comp.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/create-comp1.png b/versioned_docs/version-v0.1.0-alpha.5/assets/create-comp1.png deleted file mode 100644 index 0ab63b3..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/create-comp1.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/delete-comp1.png b/versioned_docs/version-v0.1.0-alpha.5/assets/delete-comp1.png deleted file mode 100644 index c898db5..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/delete-comp1.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/delete-pod.png b/versioned_docs/version-v0.1.0-alpha.5/assets/delete-pod.png deleted file mode 100644 index 08f4828..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/delete-pod.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/edit-comp.png b/versioned_docs/version-v0.1.0-alpha.5/assets/edit-comp.png deleted file mode 100644 index 41e127e..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/edit-comp.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/edit-config-file.png b/versioned_docs/version-v0.1.0-alpha.5/assets/edit-config-file.png deleted file mode 100644 index 59d8ab6..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/edit-config-file.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/env-variables.png b/versioned_docs/version-v0.1.0-alpha.5/assets/env-variables.png deleted file mode 100644 index dca489f..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/env-variables.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/example-token.png b/versioned_docs/version-v0.1.0-alpha.5/assets/example-token.png deleted file mode 100644 index 835db9a..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/example-token.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/existing-certificate.png b/versioned_docs/version-v0.1.0-alpha.5/assets/existing-certificate.png deleted file mode 100644 index 44212d1..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/existing-certificate.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/first-pod.png b/versioned_docs/version-v0.1.0-alpha.5/assets/first-pod.png deleted file mode 100644 index 9791c02..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/first-pod.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/guide-logging-for-kalm1.png b/versioned_docs/version-v0.1.0-alpha.5/assets/guide-logging-for-kalm1.png deleted file mode 100644 index babf08c..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/guide-logging-for-kalm1.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/guide-logging-for-kalm2.png b/versioned_docs/version-v0.1.0-alpha.5/assets/guide-logging-for-kalm2.png deleted file mode 100644 index 03cbec0..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/guide-logging-for-kalm2.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/guide-logging-for-kalm3.png b/versioned_docs/version-v0.1.0-alpha.5/assets/guide-logging-for-kalm3.png deleted file mode 100644 index deb83cf..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/guide-logging-for-kalm3.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/guide-logging-for-kalm4.png b/versioned_docs/version-v0.1.0-alpha.5/assets/guide-logging-for-kalm4.png deleted file mode 100644 index 51e17fb..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/guide-logging-for-kalm4.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/guide-logging-for-kalm5.png b/versioned_docs/version-v0.1.0-alpha.5/assets/guide-logging-for-kalm5.png deleted file mode 100644 index e785f6e..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/guide-logging-for-kalm5.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/increase-replicas.png b/versioned_docs/version-v0.1.0-alpha.5/assets/increase-replicas.png deleted file mode 100644 index a239801..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/increase-replicas.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/job-component.png b/versioned_docs/version-v0.1.0-alpha.5/assets/job-component.png deleted file mode 100644 index d7fb268..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/job-component.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/kalm-empty-state.png b/versioned_docs/version-v0.1.0-alpha.5/assets/kalm-empty-state.png deleted file mode 100644 index ede1ca1..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/kalm-empty-state.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/kalm.png b/versioned_docs/version-v0.1.0-alpha.5/assets/kalm.png deleted file mode 100644 index 1b4a3ad..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/kalm.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/log-button-example.png b/versioned_docs/version-v0.1.0-alpha.5/assets/log-button-example.png deleted file mode 100644 index 9c855b0..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/log-button-example.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/log-button.png b/versioned_docs/version-v0.1.0-alpha.5/assets/log-button.png deleted file mode 100644 index 47f170b..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/log-button.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/log-component.png b/versioned_docs/version-v0.1.0-alpha.5/assets/log-component.png deleted file mode 100644 index fdedac2..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/log-component.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/log-view.png b/versioned_docs/version-v0.1.0-alpha.5/assets/log-view.png deleted file mode 100644 index 89b8dbe..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/log-view.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/login-screen.png b/versioned_docs/version-v0.1.0-alpha.5/assets/login-screen.png deleted file mode 100644 index a7006dc..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/login-screen.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/main-page.png b/versioned_docs/version-v0.1.0-alpha.5/assets/main-page.png deleted file mode 100644 index 96d6e6a..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/main-page.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/mount-existing-disk.png b/versioned_docs/version-v0.1.0-alpha.5/assets/mount-existing-disk.png deleted file mode 100644 index 2dc4f0b..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/mount-existing-disk.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/name-app.png b/versioned_docs/version-v0.1.0-alpha.5/assets/name-app.png deleted file mode 100644 index 832eaaf..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/name-app.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/networking-tab.png b/versioned_docs/version-v0.1.0-alpha.5/assets/networking-tab.png deleted file mode 100644 index b30b51a..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/networking-tab.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/nginx-success.png b/versioned_docs/version-v0.1.0-alpha.5/assets/nginx-success.png deleted file mode 100644 index 4911c10..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/nginx-success.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/open-in-browser.png b/versioned_docs/version-v0.1.0-alpha.5/assets/open-in-browser.png deleted file mode 100644 index 640456d..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/open-in-browser.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/placeholder.png b/versioned_docs/version-v0.1.0-alpha.5/assets/placeholder.png deleted file mode 100644 index 48e510c..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/placeholder.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/ports.png b/versioned_docs/version-v0.1.0-alpha.5/assets/ports.png deleted file mode 100644 index bcfe75a..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/ports.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/pull-error.png b/versioned_docs/version-v0.1.0-alpha.5/assets/pull-error.png deleted file mode 100644 index 023e44b..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/pull-error.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/redis-command.png b/versioned_docs/version-v0.1.0-alpha.5/assets/redis-command.png deleted file mode 100644 index f3b3b0e..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/redis-command.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/registry-validated.png b/versioned_docs/version-v0.1.0-alpha.5/assets/registry-validated.png deleted file mode 100644 index 0986305..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/registry-validated.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/rolling-update.png b/versioned_docs/version-v0.1.0-alpha.5/assets/rolling-update.png deleted file mode 100644 index a9e62a3..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/rolling-update.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/routes-and-certs.png b/versioned_docs/version-v0.1.0-alpha.5/assets/routes-and-certs.png deleted file mode 100644 index 2e92205..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/routes-and-certs.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/setup-domain.png b/versioned_docs/version-v0.1.0-alpha.5/assets/setup-domain.png deleted file mode 100644 index c063c6a..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/setup-domain.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/shell-button.png b/versioned_docs/version-v0.1.0-alpha.5/assets/shell-button.png deleted file mode 100644 index 05be278..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/shell-button.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/shell-cmd.png b/versioned_docs/version-v0.1.0-alpha.5/assets/shell-cmd.png deleted file mode 100644 index ee9dd49..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/shell-cmd.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/specify-host.png b/versioned_docs/version-v0.1.0-alpha.5/assets/specify-host.png deleted file mode 100644 index 9880f6e..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/specify-host.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/specify-ip.png b/versioned_docs/version-v0.1.0-alpha.5/assets/specify-ip.png deleted file mode 100644 index 22852eb..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/specify-ip.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/three-pods.png b/versioned_docs/version-v0.1.0-alpha.5/assets/three-pods.png deleted file mode 100644 index d3b445a..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/three-pods.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/token-input.png b/versioned_docs/version-v0.1.0-alpha.5/assets/token-input.png deleted file mode 100644 index 3368aab..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/token-input.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-0.jpeg b/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-0.jpeg deleted file mode 100644 index 2eca3f4..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-0.jpeg and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-1.png b/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-1.png deleted file mode 100644 index 8921876..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-1.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-10.png b/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-10.png deleted file mode 100644 index 2e32027..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-10.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-2.png b/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-2.png deleted file mode 100644 index 0c4e508..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-2.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-3.png b/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-3.png deleted file mode 100644 index d478e4a..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-3.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-4.png b/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-4.png deleted file mode 100644 index 9f6fbbc..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-4.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-5.png b/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-5.png deleted file mode 100644 index 992339e..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-5.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-6.png b/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-6.png deleted file mode 100644 index e13680e..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-6.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-7.png b/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-7.png deleted file mode 100644 index 5dec571..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-7.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-8.png b/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-8.png deleted file mode 100644 index 9875c61..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-8.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-9.png b/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-9.png deleted file mode 100644 index 42e8ddd..0000000 Binary files a/versioned_docs/version-v0.1.0-alpha.5/assets/wp-tut-9.png and /dev/null differ diff --git a/versioned_docs/version-v0.1.0-alpha.5/azure-aks.md b/versioned_docs/version-v0.1.0-alpha.5/azure-aks.md deleted file mode 100644 index a79dabc..0000000 --- a/versioned_docs/version-v0.1.0-alpha.5/azure-aks.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -title: Install on Azure ---- - -There are a few different ways to create a Kubernetes Cluster on Azure. The following guide utilizes Terraform to provision an Azure AKS cluster. - -## Step 1: Install Prerequisits - -- Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) -- Install [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli?in=terraform/azure-get-started) -- Install [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) - -## Step 2: Azure Setup - -You need an Azure account which you can log into the azure CLI with: - -```sh -az login -``` - -Next, create a service principal account so Terraform can authenticate to Azure: - -```sh -az ad sp create-for-rbac --skip-assignment -``` - -Note: the resulting output only appears once. Save the appId and password immediately. Otherwise it takes non-trivial effort to retrieve the information. - -## Step 3: Terraform Apply - -Clone the repository below and `cd` into the aks directory - -```sh -git clone https://github.com/kalmhq/tf-scripts -cd tf-scripts/aks -``` - -Open 'terraform.tfvars', and paste in the appId and password from Step 2. - -``` -# terraform.tfvars -appId = "REPLACE_WITH_YOUR_APP_ID" -password = "REPLACE_WITH_YOUR_PASSWORD" -``` - -Install the cluster with the following commands. - -```bash -terraform init -terraform apply -``` - -Type `yes` to confirm the installation. - -After 5-10 minutes, the cluster should be created. Once complete, record the **kubernetes_cluster_name** and **resource_group_name** from the terraform output. - -```sh -terraform output -``` - -Configure kubectl to use the new cluster. - -```bash -az aks get-credentials --resource-group NAME_OF_YOUR_RESOURCE_GROUP --name NAME_OF_YOUR_CLUSTER -``` - -Verify the cluster is properly setup and accessible. - -```sh -kubectl cluster-info -``` - -## Step 4: Install Kalm - -Once the cluster is setup, install Kalm with the following command. - -```bash -curl -sL https://get.kalm.dev | bash -``` - -To enable localhost access, open a port via: - -```bash -kubectl port-forward -n kalm-system \ - $(kubectl get pod -n kalm-system \ - -l app=kalm \ - -ojsonpath="{.items[0].metadata.name}") \ - 3010:3010 -``` - -Now open http://localhost:3010/ - you can now test Kalm on your Azure AKS cluster! - -## Clean Up - -Delete the cluster to avoid resource charges. - -```bash -terraform destroy -``` - -## Next Step - -You've now setup Kalm on an Azure AKS cluster. To get a greater sense of how Kalm works, see the [Hello Kalm](/docs/tut-hello) tutorial. diff --git a/versioned_docs/version-v0.1.0-alpha.5/cronjob.md b/versioned_docs/version-v0.1.0-alpha.5/cronjob.md deleted file mode 100644 index b4c4ce7..0000000 --- a/versioned_docs/version-v0.1.0-alpha.5/cronjob.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: Schedule CronJobs ---- - -Kubernetes CronJobs run defined tasks on one or more pods which are terminated once the task is over. They can be scheduled to run at specified intervals (e.g. every minute), and are perfect for periodic/recurring tasks (e.g. email notifications, collecting data, etc.). - -In this guide, we'll walk you through how to setup a basic Kubernetes CronJob in Kalm. - -## Example Container Setup - -We'll create a simple application with a recurring component that echos "performing a job" before shutting the pod down. - -- create a new Application -- create a new Component -- select `busybox` as the Image -- select `Cronjob` as the Type -- enter `* * * * *` (every minute) for the Cronjob Schedule -- enter `/bin/sh -c 'echo performing a job'` under Config Command - -![job component](assets/job-component.png) - -- click `Deploy Component` - -Shortly after deploying you will see a list of jobs appearing 1 minute apart, each with a `Terminated: Completed` status - -![job complete](assets/completed-jobs.png) - -If you click the `logs` action next to each pod, you should see the "performing a job" echoed as expected. diff --git a/versioned_docs/version-v0.1.0-alpha.5/faq-auth.md b/versioned_docs/version-v0.1.0-alpha.5/faq-auth.md deleted file mode 100644 index 21dfb8d..0000000 --- a/versioned_docs/version-v0.1.0-alpha.5/faq-auth.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Authorization ---- - -Kapp is using k8s [RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) for authentication. Generally, authorization info is passing through `Authorization` Header. A user should provide enough info to construct this header for kapp, then kapp will pass it to kubernetes api server. If you are running kapp behind a proxy, which is in charge of authentication and providing `Authorization` header, then kapp will use the header directly. Kubernetes API server needs to be configured properly to accept these tokens. - -_IMAGE_PLACE_HOLDER_ - -### Username and password: -You need to config static user for k8s first. It's easy to understand but rarely used, as extra configuration and a restart are required. Learn more from https://kubernetes.io/docs/reference/access-authn-authz/authentication/#static-password-file - -### Token -Kubernetes has various ways to config token. -- [Static Token](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#static-password-file). This method will require extra configuration on your api server, and a restart is required. -- [Bootstrap Tokens](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#bootstrap-tokens) -- [Service Account Tokens](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#bootstrap-tokens). The following sections shows how to login by using a servier account token. -- [OIDC Tokens](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#bootstrap-tokens). This is a advanced way to authorize users. There is another artical to talk about OIDC with kapp. You can find out more details there. TODO - -## Create a test user - -IMPORTANT: This is for test only! Do not create token this way on a production cluster. Make sure that you know what you are doing before proceeding. Granting admin privileges to Dashboard's Service Account might be a security risk. - -To bypass the annoying configuration and restart, in this guide, we will find out how to create a new user using Service Account mechanism of Kubernetes, grant this user admin permissions and login to Kapp Dashboard using bearer token tied to this user. - -The commands should be executed in the same shell seesion. - -1. Create a service account - -```bash -kubectl create sa kapp-sample-user -``` - -2. grant admin permission to the service account - -```bash -kubectl create clusterrolebinding kapp-sample-user-admin --user=system:serviceaccount:default:kapp-sample-user --clusterrole=cluster-admin -``` - -1. Get service account secret name - -``` -secret=$(kubectl get sa kapp-sample-user -o json | jq -r .secrets\[\].name) -echo $secret -``` - -You will see some token name like `kapp-sample-user-token-vbhwr` - -4. Get secret token - -``` -secret_token=$(kubectl get secret $secret -o json | jq -r '.data["token"]' | base64 -D) -echo $secret_token -``` - -5. Use the token you got to login - -_IMAGE_PLACEHOLDER_ - -you will success login. - -_IMAGE_PLACEHOLDER_ diff --git a/versioned_docs/version-v0.1.0-alpha.5/faq-permissions.md b/versioned_docs/version-v0.1.0-alpha.5/faq-permissions.md deleted file mode 100644 index adcc788..0000000 --- a/versioned_docs/version-v0.1.0-alpha.5/faq-permissions.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Permissions ---- - -## How Permission Works - -Permissions will be big topic. This part will be splited into two parts. One is about "How to config users permissions when using kapp?". Seconds is "How to build/integrate kapp, k8s with a IDP(Identity Provider)?" - -The following topics are all related to permissions. Will reorganize later. - -- Aladdin research on rbac: https://quip.com/FycNArbIZh7v -- Wanglei's note about using keycloak: #11 -- David thoughts about kapp architecture: #13 - -Reference: -- https://kubernetes.io/docs/reference/access-authn-authz/authentication/#openid-connect-tokens -- https://github.com/kubernetes/dashboard/blob/master/docs/user/access-control/README.md#authorization-header \ No newline at end of file diff --git a/versioned_docs/version-v0.1.0-alpha.5/first-deploy.md b/versioned_docs/version-v0.1.0-alpha.5/first-deploy.md deleted file mode 100644 index e69de29..0000000 diff --git a/versioned_docs/version-v0.1.0-alpha.5/google-gke.md b/versioned_docs/version-v0.1.0-alpha.5/google-gke.md deleted file mode 100644 index 36317bf..0000000 --- a/versioned_docs/version-v0.1.0-alpha.5/google-gke.md +++ /dev/null @@ -1,152 +0,0 @@ ---- -title: Install on Google Kubernetes Engine ---- - -The simplest way to provision a cluster on Google Cloud Platform is via Google Kubernetes Engine. - -As a prerequisite, please install and authenticate the `gcloud` command line tool. Instructions can be found [here](https://cloud.google.com/sdk/docs). - -## Step 1: Create a GKE Cluster - -There are a few different ways to create a GKE Cluster. You can either create one through only gcloud, use the web interface, or with Terraform. - -### Option A - gcloud command line - -To begin, choose a Google Cloud project - -```bash -export PROJECT_ID=hello-kalm -``` - -Note: If you don't have an existing Google Cloud project, you can create one with: - -```bash -export PROJECT_ID=hello-kalm -gcloud projects create $PROJECT_ID -``` - -Make sure billing is enabled. - -You need to enable Kubernetes Engine API as well: - -```bash -gcloud services enable container.googleapis.com -``` - -
- -Next, provision a cluster with 4 nodes - -```bash -export M_TYPE=n1-standard-2 && \ -export ZONE=us-west2-a && \ -export CLUSTER_NAME=${PROJECT_ID}-${RANDOM} && \ -gcloud container clusters create $CLUSTER_NAME \ - --cluster-version latest \ - --machine-type=$M_TYPE \ - --num-nodes 4 \ - --zone $ZONE \ - --project $PROJECT_ID -``` - -The creation of the cluster will take a few minutes. Once complete, configure kubectl to use the new cluster: - -```bash -gcloud container clusters get-credentials $CLUSTER_NAME \ - --zone $ZONE \ - --project $PROJECT_ID -``` - -Verify the cluster is properly setup and accessible. - -```sh -kubectl cluster-info -``` - -### Option B - Terraform - -If you are more familiar with Terraform, you can provision a demo cluster with the following steps. - -First, install Terraform. - -Give Terraform access to the Application Default Credentials (ADC). - -```bash -gcloud auth application-default login -``` - -Clone the repository below. - -```bash -git clone https://github.com/kalmhq/tf-scripts -cd tf-scripts/gke -``` - -Open 'terraform.tfvars', and specify the ID of the Google Cloud project you would like to install to. - -``` -# terraform.tfvars -project_id = "REPLACE_ME" -region = "us-west2" -``` - -Install the cluster with the following commands. - -```bash -terraform init -terraform apply -``` - -Type `yes` to confirm the installation. - -After 5-10 minutes, the cluster should be created. Once complete, retrieve the name of the newly created cluster. - -```bash -terraform output -``` - -Configure kubectl to use the new cluster. - -```bash -gcloud container clusters get-credentials NAME_OF_YOUR_CLUSTER --zone ZONE_OF_CLUSTER -``` - -Verify the cluster is properly setup and accessible. - -```sh -kubectl cluster-info -``` - -## Step 2: Install Kalm - -Once the cluster is setup, install Kalm with the following command. - -```bash -curl -sL https://get.kalm.dev | bash -``` - -To enable localhost access, open a port via: - -```bash -kubectl port-forward -n kalm-system \ - $(kubectl get pod -n kalm-system \ - -l app=kalm \ - -ojsonpath="{.items[0].metadata.name}") \ - 3010:3010 -``` - -Now open http://localhost:3010/ - you can now test Kalm on your Google GKE cluster! - -## Clean Up - -Delete the cluster to avoid resource charges. - -If you created the cluster with Terraform: - -```bash -terraform destroy -``` - -## Next Step - -You've now setup Kalm on a GKE cluster. To get a greater sense of how Kalm works, see the [Hello Kalm](/docs/tut-hello) tutorial. diff --git a/versioned_docs/version-v0.1.0-alpha.5/guide-aws.md b/versioned_docs/version-v0.1.0-alpha.5/guide-aws.md deleted file mode 100644 index f4d3da0..0000000 --- a/versioned_docs/version-v0.1.0-alpha.5/guide-aws.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: Install on AWS ---- - -Work In Progress - -## Via KOPS - -## Via diff --git a/versioned_docs/version-v0.1.0-alpha.5/guide-config.md b/versioned_docs/version-v0.1.0-alpha.5/guide-config.md deleted file mode 100644 index 5d733b8..0000000 --- a/versioned_docs/version-v0.1.0-alpha.5/guide-config.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -title: Add Env Variables & Configs ---- - -A well designed application is likely to have configurations which vary between deployments. Kubernetes makes it easy to override configurations specified in images. - -
- -
- -In this guide you will: - -- Create a container with the _redis_ image -- Change a variable using the CLI (ephemeral state) -- Add a Config File to change a variable (with persistent state) -- Create an Environment Variable - -## Example Container Setup - -Let's go through an example of configuring Redis. Our goal is to change the `maxmemory` parameter. (Imagine that you need a larger amount of memory for production vs dev) - -Create a new Application - -Create a new Component - -Set the image to _"redis"_ - -## Container Lifecycle - -To understand why configuration should be setup outside the container itself, let's first try something that doesn't work: configuring the application right in the container. - -Click the _Shell_ icon to open a shell attached to our redis container. In the shell, enter: - -``` -redis-cli CONFIG SET maxmemory 2mb -``` - -Verify that the config has been correctly set - -``` -redis-cli CONFIG GET maxmemory -``` - -Everything seem to be working. However, let's try deleting the pod. - -Go back to your application **Components** and click on the **Delete** icon - -![delete-pod](assets/delete-pod.png) - -As soon as the pod is deleted, Kubernetes will schedule a new one to be deployed, automatically maintaining the desired state of 1 pod running. - -After the new pod is created, click on the _Shell_ icon next to it. - -Check the `maxmemory` setting via - -``` -redis-cli CONFIG GET maxmemory -``` - -So what happened to our setting? It was lost when the pod was destroyed and recreated. Since pods and containers are ephemeral, we can't store configuration states within the pod. - -## Adding a Config File - -The correct way to configure containers is via environment variables and configuration files. Redis happens to use config files. - -Go to **Components**, click on **Edit** - -In the Config Files section click **New File** - -For Mount Path, type `/redis-config/redis.conf` - -Click on the **Edit** Icon - -A modal editor will pop up, inside the editor, enter our desired config -of `maxmemory 5mb` - -![edit config file](assets/edit-config-file.png) - -click **Save** - -We've just created a config file. Now we need to override the startup command to tell redis to use it. - -In the **Command** input box, type `redis-server "/redis-config/redis.conf"` - -![redis command](assets/redis-command.png) - -Click **Update Component** - -After the update is complete, open the shell again and type - -``` -redis-cli CONFIG GET maxmemory -``` - -Our configuration is picked up, this time via the configuration pod. - -Now if we delete the pod again(go ahead and try it), the configuration will still be there. - -## Environment Variables - -Other Applications may use environment variables instead of config files. To add an environment variable, click _New Variable_ in the Environment Variables section of the Edit Component Form: - -![env variables](assets/env-variables.png) - -For this example, you can enter a _Name_ of `MY_CUSTOM_SETTING` and a _Value_ of `42` - -Click **Update Component** to apply this change - -To verify that the Environment Variable has been set, open a shell and type - -``` -echo $MY_CUSTOM_SETTING -``` - -You should see the value set above(42) as the output diff --git a/versioned_docs/version-v0.1.0-alpha.5/guide-export-kalm-resources.md b/versioned_docs/version-v0.1.0-alpha.5/guide-export-kalm-resources.md deleted file mode 100644 index db1cc36..0000000 --- a/versioned_docs/version-v0.1.0-alpha.5/guide-export-kalm-resources.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Import / Export ---- - -:::info -This is a temporary solution, Kalm will provide better import and export functions soon. -::: - -Sometimes you need to export an application and reinstall it on another cluster. We'll walk you through how to export resources from KALM. - -## Prerequisites - -- Make sure KALM is installed on your cluster. -- Make sure you have ***kubectl*** and it is configured to your cluster. - -### Export & import an application - -Run the following command. A resource file that describes the application will be created *$KALM_APP_NAME.bak.yaml* - -```bash -export KALM_APP_NAME= -curl https://raw.githubusercontent.com/kalmhq/kalm/add_script_to_export_kalm_resources/scripts/export-resources.sh > export-resources.sh ; bash export-resources.sh $KALM_APP_NAME $KALM_APP_NAME.bak.yaml -``` - -Now run the following command in another instance of KALM (make sure that there is no application with the same name in KALM, otherwise there will be conflicts when importing application). - -```bash -kubectl apply -f $KALM_APP_NAME.bak.yaml -``` - -The application you exported will be imported to your other Kalm instance within a few minutes. - -### Migrate from one KALM to another - -1. Prepare a new KALM. https://kalm.dev/docs/install -2. Export all resources from the old KALM -3. Import all resources to the new KALM - -Run the following command to export all resources from old KALM. - -```bash -curl https://raw.githubusercontent.com/kalmhq/kalm/add_script_to_export_kalm_resources/scripts/export-resources.sh > export-resources.sh ; bash export-resources.sh all-application kalm.bak.yaml -``` - -Run the following command to import all resources to the new KALM (make sure in your new cluster, there is no application with the same name as the old cluster). -*Persistent data will not be migrated such as postgresql data and some file that you mount on disks. If you want to migrate these data, you should through your own customized way. - -```bash -kubectl apply -f kalm.bak.yaml -``` diff --git a/versioned_docs/version-v0.1.0-alpha.5/guide-logging-for-kalm.md b/versioned_docs/version-v0.1.0-alpha.5/guide-logging-for-kalm.md deleted file mode 100644 index a584c3c..0000000 --- a/versioned_docs/version-v0.1.0-alpha.5/guide-logging-for-kalm.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: Logging System ---- - -For production environments, it's common to use a more complex logging system (beyond basic container level logging). Often logging systems need to query and collect data, access entire pipelines, and generate analysis logs. For this type of advanced aggregated logging across the entire cluster, KALM provides some simple out of the box logging system solutions. - -EFK (Elasticsearch) and PLG (Loki) are the most popular logging systems at the moment. They each have their own advantages and Kalm supports them both. - -## Using PLG (Promtail + Loki + Grafana) - -The key component in this stack is Loki. Loki has a set of components which are internally referred to as modules. Each component spawns a gRPC server for internal traffic and an HTTP/1 server for external API requests. - -There are two ways to deploy Loki - monolithic mode and microservices mode. - -### Monolithic mode (single process mode) - -Monolithic mode (single process mode) is ideally suited for local development, small workloads, and for evaluation purposes. - -#### Prerequisites - -- Make sure KALM is installed on your cluster. -- Make sure you have ***kubectl*** and it is configured to your cluster. -- Make sure your cluster has at least 500m cpu and 800Mi memory (each node has at least 100m cpu and 100Mi) to install PLG. - -#### Install PLG on KALM - -Run the following command to install the monolithic mode PLG. _In the future, this step will be able to be done through the web interface._ - -```bash -kubectl apply -f - < - -
- -Sometimes its useful to view the log output of a particular container. You can quickly view logs for any container within Kalm through the log button: - -![log button](assets/log-button.png) - -In this guide you will: - -- Create an application component that continously logs output -- View the container logs within Kalm - -## Example Container Setup - -Let's create a container that logs output every second. Create a new application, then add a single Component named **logviewer** with the image set to **busybox**. - -In the **Command** input box, enter the following command, which outputs a timestamp every second: - -``` -/bin/sh -c 'i=0; while true; do echo "$i: $(date)"; i=$((i+1)); sleep 1; done' -``` - -![log component](assets/log-component.png) - -Click **Deploy Component** to instantiate the component container. - -## View Log - -After your component is successfully deployed, click the _Log Icon_: - -![log button example](assets/log-button-example.png) - -
-You should see the Log View: - -![log view](assets/log-view.png) - -## Logging System - -While it's helpful to view logs for a single container individually as shown above, often you may want to view aggregated logs for multiple containers for specific time periods. For more on setting up a more robust logging system through Kalm, head to our [Logging System](./guide-logging-for-kalm.md) section. diff --git a/versioned_docs/version-v0.1.0-alpha.5/guide-minikube.md b/versioned_docs/version-v0.1.0-alpha.5/guide-minikube.md deleted file mode 100644 index 9358678..0000000 --- a/versioned_docs/version-v0.1.0-alpha.5/guide-minikube.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: Install on Minikube ---- - -The simplest way to provision a localhost cluster is through minikube. - -## Step 1: Install minikube - -Please follow the [minikube official documentation](https://kubernetes.io/docs/tasks/tools/install-minikube/) to install minikube. - -## Step 2: Start a minikube cluster - -It recommended to use 8G memory and 4 core cpu to test kalm with. You can always adjust resources based on your environment. - -```bash -minikube start --memory 8192 --cpus 4 -``` - -After the cluster is up and running. Open a new terminal and type the following command. _You may be prompted to enter your password._ - -```bash -minikube tunnel -``` - -This creates a route to services deployed with type LoadBalancer and sets their Ingress to their ClusterIP. - -## Step 2: Install Kalm - -Once the cluster is setup, install Kalm with the following command: - -```bash -curl -sL https://get.kalm.dev | bash -``` - -To enable localhost access, open a port via: - -```bash -kubectl port-forward -n kalm-system \ - $(kubectl get pod -n kalm-system \ - -l app=kalm \ - -ojsonpath="{.items[0].metadata.name}") \ - 3010:3010 -``` - -Now open http://localhost:3010/ - you can now play around with Kalm on your localhost cluster! - -## Clean Up - -Delete the cluster after you're finished testing. - -``` -minikube delete -``` - -## Next Step - -You've now setup Kalm on a minikube cluster. To get a greater sense of how Kalm works, see the [Hello Kalm](./tut-hello.md) tutorial. diff --git a/versioned_docs/version-v0.1.0-alpha.5/https-certs.md b/versioned_docs/version-v0.1.0-alpha.5/https-certs.md deleted file mode 100644 index ff70046..0000000 --- a/versioned_docs/version-v0.1.0-alpha.5/https-certs.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: HTTPS Certificates ---- - -## Overview - -To enable HTTPS access to your applications, you need a certificate from a Certificate Authority. Kalm can help you create new certificate via Let's Encrypt, or hook up existing ones. Domains with certificates managed by Kalm can be easily routed to any container on your cluster. - -## Automated Certificates with Let's Encrypt - -- Click **Certificates** in the Navigation Sidebar -- Click **New Certificate** -- The Certificate Creation page displays your cluster IP. You need to point domains you want to configure to this cluster IP by adding an A Record. _(Specific instructions depend on your domain provider)_ - -* Enter a **Certificate Name** -* Enter one or more domains in the **Domains** field. You should get an indicator next to each domain which specifies if the IP is properly configured. If you get a warning symbol, please check your DNS configurations. (DNS records can sometimes take a few moments to update) -* Click **Create** to create your certificate - -![Create Cert](assets/create-certificate.png) - -## Upload Existing Certificate - -If you want to use an existing certificate, click **Use an existing certificate** and paste your Certificate and Private Key. - -![Upload Cert](assets/existing-certificate.png) - -## Additional Instructions - -### Using Certified Domains in Routes - -Domains which have certificates properly configured can be used in Routes to handle HTTPs traffic. See the [Routes Guide](/) _(coming soon!)_ for more details. - -![Routes HTTPS](assets/routes-and-certs.png) - -The screenshot above shows that "test" domain can be used in Routes because we have a certificate created for it. However, the "random" domain does not have a certificate, so it displays a warning flag. - -### Wildcard Certificates - -We have plans to support wildcard certificates in the near future, and can suggest workarounds in the mean time. Email david@kalm.dev if you need this. diff --git a/versioned_docs/version-v0.1.0-alpha.5/install.md b/versioned_docs/version-v0.1.0-alpha.5/install.md deleted file mode 100644 index 075efe1..0000000 --- a/versioned_docs/version-v0.1.0-alpha.5/install.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: Installation ---- - -## Compatibility - -Kalm is optimized to work with the latest version of Kubernetes (currently 1.18.x), and is backwards compatible down to 1.14.x - -| Kalm version | k8s 1.15.x | k8s 1.16.x | k8s 1.17.x | k8s 1.18.x | -| ------------ | ---------- | ---------- | ---------- | ---------- | -| v0.1.0 | ✔ | ✔ | ✔ | ✔ | - -For smooth performance, we recommend a Kubernetes cluster with at least 4 vCPUs and 8G of memory. - -## Step 1: Prerequisites - -### Cluster Setup - -Kalm can be used to manage any Kubernetes cluster. -For the purpose of this tutorial, we recommend that you try Kalm using [Minikube](./guide-minikube.md) localhost cluster first. - -Alternatively, see the References sections for provisioning clusters on [AWS](./amazon-eks.md), [Google Cloud](./google-gke.md) and [Azure](./azure-aks.md). - -### Install Kubectl - -Installation of Kalm requires kubectl, which can be installed according to the official Install and Set Up kubectl docs. - -:::note -Please make sure that the version of kubectl is sufficient. It is strongly recommended that you use the version corresponding to the cluster. Using an earlier version of kubectl may cause errors in the installation process. -::: - -## Step 2: Install Kalm - -:::caution -Before proceeding, please make sure that the current context of your kubectl is set to the correct cluster. -::: - -Kalm can be installed as a [Kubernetes Operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) directly onto your cluster via: - -```bash -curl -sL https://get.kalm.dev | bash -``` - -This command installs Kalm plus a few dependencies, and typically takes 3-5 minutes to complete. Relax in the meantime, or watch this short video on how Kalm works: - -
- -
- -
- -The installation script will give you real-time feedback of services spinning up. Once you see **Installation Complete**, move on to the next step. - -## Step 3: Access Webserver - -To enable browser access, open a port via: - -```bash -kubectl port-forward -n kalm-system \ - $(kubectl get pod -n kalm-system \ - -l app=kalm \ - -ojsonpath="{.items[0].metadata.name}") \ - 3010:3010 -``` - -Now open http://localhost:3010/ - -![login screen](assets/kalm-empty-state.png) - -## Step 4: (Optional) Configure Access - -Although we can visit Kalm through localhost port forwarding, it is a good idea to setup a domain and basic login, so you can access Kalm on any computer and share it with colleagues. - -To setup permanent acess, click **FINISH THE SETUP STEPS** button in the top navigation bar and follow the onscreen directions. - -![setup domain](assets/setup-domain.png) - -Point a domain to the cluster ip. If you don't have a domain, you can use the wildcard DNS nip.io: - -_.nip.io_ - -:::note -For minikube, if no ip is shown, run `minikube tunnel` and refresh the page. -::: - -Click **Check and Continue**. After configuration is complete, record the generated **Email** and **Password** login information. From this point on, port-forwarding is no longer required, and you should be able to login via the domain you specified. - -## Next Step - -Congratulations! Kalm is now properly setup and running. Next, let's create our first application to see how Kalm works. diff --git a/versioned_docs/version-v0.1.0-alpha.5/intro.md b/versioned_docs/version-v0.1.0-alpha.5/intro.md deleted file mode 100644 index f478f28..0000000 --- a/versioned_docs/version-v0.1.0-alpha.5/intro.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: What is Kalm -hide_title: false -hide_table_of_contents: true -sidebar_label: What is Kalm - -# SEO options -description: What is kalm -keywords: - - docs - - docusaurus -image: https://i.imgur.com/mErPwqL.png ---- - -Kalm (Kubernetes AppLication Manager) is an open source tool that makes it easier to manage applications on kubernetes without struggling with yamls. Kalm comes with a web interface for the most common operations including: - -- Creation of new application deployments -- Deploying, updating, and scaling existing deployments -- Volume, config, and secret management - -Kalm is installed as a kubernetes controller directly on your cluster, and automatically sets up istio and cert-manager, which makes it easy to configure HTTPS certificates, routes, SSO, and logging system out of the box. - -![Web Interface](assets/kalm.png) - -## Why Kalm - -Kubernetes is a powerful and flexible tool for managing microservices. However, first-time setup and configuration can be daunting. The high upfront cost makes it prohibitive for smaller teams to adopt kubernetes. We made kalm in an attempt decrease the cognitive load for developers to interact with kubernetes in both development and production environments. - -Kalm tries to reduce complexity in three different ways: - -1. Provide an intuitive graphical inteface for the most common operations. -2. Introduce higher level Custom Resource Definitions such as `Application`. These CRDs help reducing the amount of boilerplate configuration and copy-pasting. -3. Designed with popular extensions in mind - Kalm is designed to work with istio, cert-manager, prometheus and more, which make setting up a productive stack quick and easy. - -Next, lets install Kalm and go through an example to illustrate how it works. diff --git a/versioned_docs/version-v0.1.0-alpha.5/probes.md b/versioned_docs/version-v0.1.0-alpha.5/probes.md deleted file mode 100644 index 9545459..0000000 --- a/versioned_docs/version-v0.1.0-alpha.5/probes.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: Health Checking Probes ---- - -Kubernetes Liveness and Readiness probes are useful for checking the health of pods. Readiness probes determine if a pod is ready to receive traffic, and Liveness probes signal if a pod's containers should be restarted. - -Both liveness and readiness probes support a variety of action types to determine if a pod is healthy: - -- HTTP: Healthy means a request to some specified HTTP endpoint returned a response between 200 to 399 -- Command: - Healthy means a command executed successfully (return code 0) -- TCP: Healthy means a specific TCP socket was successfully opened - -## Liveness Probe Example - -Let's go through an example of a liveness probe implemented via a command. - -- Create a new application -- Create a new component, with `busybox` as the image -- Add the command `/bin/sh -c 'touch /tmp/healthy; sleep 10000'` - -This creates a file upon startup, in this case representing the health of our Component. - -- Click the 'Health' Tab -- Select `Command` from the Liveness Probe dropdown. -- Enter `cat /tmp/healthy` as the command - -The `cat` command will execute successfully if the file exists. - -- Decrease the number of consecutive tests from `3` to `1`. This will save us some time to see the results -- Click **Deploy Component** - -The pod should spin up successfully. Now let's delete the file by opening a shell. - -``` -rm /tmp/healthy -``` - -Within 20 seconds, the Terminal will become disconnected because the container is deleted. Go back to the Component view, and you will see the number of Restarts increase from 0 to 1. - -By restarting the pod, the "problem" of the missing /tmp/healthy is fixed, as the file is created by the startup command. This demonstrates the purpose of the livenessProbe: triggering automatic restarts in an attempt to fix problematic pods. - -## Readiness Probe Example - -Readiness Probes are very similar. Let's create one with an HTTP action. - -- Create a new component with `quay.io/openshiftlabs/simpleservice:0.5.0` as the image -- In the **Networking** tab, Add a port named `healthport` with Container Port set to `9876` -- In the **Health** tab, create an HTTP liveness probe with `/health` and `9876` for the port - -The pod should be ready according to the probe. - -The image we are using allows us to insert an [artificial delay to /health](https://github.com/mhausenblas/simpleservice#changing-runtime-behaviour) by adding an environment variable: - -- add an environment variable `HEALTH_MAX` with the value `5000` which means there's a 5 second delay for the timeout - -The probe should now fail. - -- Remove the environment variable and the probe should start working again. diff --git a/versioned_docs/version-v0.1.0-alpha.5/ref-crd.md b/versioned_docs/version-v0.1.0-alpha.5/ref-crd.md deleted file mode 100644 index c18fc69..0000000 --- a/versioned_docs/version-v0.1.0-alpha.5/ref-crd.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: Custom Resource Definitions ---- - -Discuss: do we need this? What to put here that would be useful and add to transparency? diff --git a/versioned_docs/version-v0.1.0-alpha.5/registry.md b/versioned_docs/version-v0.1.0-alpha.5/registry.md deleted file mode 100644 index 557d335..0000000 --- a/versioned_docs/version-v0.1.0-alpha.5/registry.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: Private Image Registry ---- - -If your image resides in a private registry, you must configure permissions or you'll receive a `PullImageError` when deploying applications. - -Kalm can help you simplify the process of configuring Private Image Registries. - -## Example Usage - -As a prerequisite you need access to a private registry. One option is to create a free private repository on Docker Hub. - -Let's upload an image to a private dockerhub registry. - -Start by pulling down the `busybox` image onto your local machine, then re-tag it and upload to your private repo. Replace `` with the name of your repository. - -``` -docker pull busybox -docker tag busybox:latest /private-busybox:latest -docker push /private-busybox:latest -``` - -Now let's create a new application with a component using this private image. - -- Create an Application -- Create a Component with `/private-busybox:latest` as the image -- In the command field, enter /bin/sh -c \`sleep 10000\` to keep the container alive. -- Click **Deploy Component** - -You should get a "Failed to pull Image..." error. - -![cannot pull error](assets/pull-error.png) - -This error is expected because the pod does not have permission to pull the specified image. - -### Adding a Private Repository - -We can fix the issue by adding a private registry. - -- Click **Registries** in the left navigation sidebar -- Click **Add Registry** -- Enter username and password for your repository. The host can be blank if you are using hub.docker.com, otherwise enter the full URL(i.e https://gcr.io) - -- Press **Save** - -![registry form](assets/add-registry-form.png) - -If the login info is correct, you should see the Verified checkbox light up shortly. - -![registry validated](assets/registry-validated.png) - -### Redeploy - -Now let's redeploy our application. - -Go back to the component and **delete the failing pod**. Deleting a pod will trigger a redeployment. This time, the pod should be successfully deployed. diff --git a/versioned_docs/version-v0.1.0-alpha.5/service-account.md b/versioned_docs/version-v0.1.0-alpha.5/service-account.md deleted file mode 100644 index 47ab8f7..0000000 --- a/versioned_docs/version-v0.1.0-alpha.5/service-account.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Authorization via Service Account ---- - -## Create a Service Account - -In order to manage applications, Kalm requires _cluster-admin_ privilages on the cluster. To keep things clean, we recommend creating a _Service Acount_ for Kalm. - -To create a service account with cluster-admin privilages: - -```bash -kubectl create sa kalm-admin-user -kubectl create clusterrolebinding \ - kalm-admin-binding \ - --user=system:serviceaccount:default:kalm-admin-user \ - --clusterrole=cluster-admin -``` - -### Retrieve Token - -_Note: the following command utilizes jq, a useful json processor which can be found at [https://stedolan.github.io/jq/](https://stedolan.github.io/jq/)_ - -To use the service account we've just created, we need to retrieve the token - -``` -KALM_ADMIN_SECRET=$(kubectl get sa kalm-admin-user -o json | jq -r .secrets\[\].name) -KALM_ADMIN_TOKEN=$(kubectl get secret $KALM_ADMIN_SECRET -o json | jq -r '.data["token"]' | base64 -d) -echo $KALM_ADMIN_TOKEN -``` - -\ -You should see a token similar to: - -![example token](assets/example-token.png) -\ -Copy the token, paste it into the input field, and press **Login** - -![token input](assets/token-input.png) - -You should see the main page - -![main page](assets/main-page.png) diff --git a/versioned_docs/version-v0.1.0-alpha.5/tut-bookinfo.md b/versioned_docs/version-v0.1.0-alpha.5/tut-bookinfo.md deleted file mode 100644 index 13b6ecf..0000000 --- a/versioned_docs/version-v0.1.0-alpha.5/tut-bookinfo.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Bookinfo Application ---- - -Now that you've seen how Kalm works, let's setup a more representative Application. - -About Bookinfo - -This example will teach you: - -- deploy a representative application with multiple pods working together -- configuring pods to work with eachother -- editing existing applications -- how to setup external routing for applications diff --git a/versioned_docs/version-v0.1.0-alpha.5/tut-create.md b/versioned_docs/version-v0.1.0-alpha.5/tut-create.md deleted file mode 100644 index d052f9a..0000000 --- a/versioned_docs/version-v0.1.0-alpha.5/tut-create.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: Creating an Application ---- - -For this tutorial let's create an application \ No newline at end of file diff --git a/versioned_docs/version-v0.1.0-alpha.5/tut-deploy.md b/versioned_docs/version-v0.1.0-alpha.5/tut-deploy.md deleted file mode 100644 index 1626954..0000000 --- a/versioned_docs/version-v0.1.0-alpha.5/tut-deploy.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: Deploy ---- - -Some Text \ No newline at end of file diff --git a/versioned_docs/version-v0.1.0-alpha.5/tut-hello.md b/versioned_docs/version-v0.1.0-alpha.5/tut-hello.md deleted file mode 100644 index 8d82334..0000000 --- a/versioned_docs/version-v0.1.0-alpha.5/tut-hello.md +++ /dev/null @@ -1,133 +0,0 @@ ---- -title: Hello Kalm ---- - -Let's go through a simple example of deploying a single pod holding an nginx container. - -This tutorial will teach you: - -- The basics of how Kalm organizes deployments into applications -- How to create application configurations -- How to open a port and direct traffic to an application - -
- -
- -## Step 1: Create Application - -The main page of Kalm shows a list of applications. Press the **Create App** button - -![add application](assets/add-app.png) - -Enter a name for your application, then press **Create App** - -![name application](assets/name-app.png) - -
- -## Step 2: Create Component - -An application is made of one or more _components_. Each component typically holds a docker image. In this example, we will create a component which contains the official nginx image - -Under the Basic Information section, enter a name such as _"webserver"_ into the **Name** field and **_nginx:alpine_** into the **Image** field. Leave all other fields as is and click **Deploy Component**. - -![create component](assets/create-comp.png) - -After a few seconds, a single pod holding the nginx:alpine image will be deployed to your cluster. - -![first pod](assets/first-pod.png) - -
- -## Step 3: Examining and Updating - -Let's examine our pod to see if nginx is really deployed. Kalm provides a handy terminal for quick examinations. Open a shell to our pod by clicking on the shell icon. - -![shell button](assets/shell-button.png) - -Run a few commands, for example: - -``` -cd /etc/nginx -ls -cat nginx.conf -``` - -It seems that nginx is correctly installed! - -![shell commands](assets/shell-cmd.png) - -Next let's go back to the Components list by clicking on **Components** in the navigation sidebar. - -Currently one pod is running. Let's scale up our application. Click the **Edit** button. - -![edit component](assets/first-pod.png) - -Change the number of replicas to **3** and click Deploy. - -![increase replicas](assets/increase-replicas.png) - -After a few moments, there should be three pods running. - -![three pods](assets/three-pods.png) - -Kubernetes is declarative, which means you specify the end result("I want 3 pods"), and the Kubernetes control plane figures out how best to achieve the end result for you("let's add 2 more"). - -## Step 4: Ports and Routing - -Let's make our application accessible in the browser. We can do this by opening a port on our component, then adding a route to it. - -First we'll open a port. Once again, we'll go the components area and click on **Edit**, then click on the **Networking** tab. - -![networking tab](assets/networking-tab.png) - -To open a port, we need to specify a `Container Port` and a `Service Port`. Kalm tries to be helpful by providing a visual reminder of which is which. - -The Container port should be **80** because its what the `nginx:alpine` image defaults to. The Service port can be an arbitrary port of your choosing, such as **8080**. - -![specify ports](assets/ports.png) - -Click **Update Component** to apply changes. - -:::note -During the deployment you may notice that the number of pods temporarily exceeds three. Kalm by default uses `rolling update`, which means pods are incrementally added and removed one by one, resulting in zero downtime. -::: - -![rolling updates](assets/rolling-update.png) - -
- -Now that the port is open, let's add a route. Click **Routes** in the navigation sidebar tab, then click **Add Route** - -![add route](assets/add-route.png) - -The cluster ip address should be displayed under the Hosts field. Enter this address in the **Hosts** field. - -![specify host](assets/specify-ip.png) - -:::note -For minikube, if no ip is shown, run `minikube tunnel` and refresh the page. -::: - -In the _Targets_ section, select our _"webserver"_ component from the dropdown. - -![add target](assets/add-target.png) - -Click **Update Route**. Our route is now completely setup. - -There should be a new entry in the Routes table. Click on the link in the _Domain_ column to open our app in a new browser tab. - -![open in browser](assets/open-in-browser.png) - -Great, our nginx webserver app is working! - -![nginx success](assets/nginx-success.png) - -You've just installed an application on your cluster, modified it, scaled it, and setup routing to make it accessible from the outside world! - -All the heavy lifting is done via Kubernetes and istio. Kalm is simply applying the appropriate configurations to your cluster. In fact anyone with familiarity with Kubernetes should be able to create the same application configuration with a text editor and `kubectl`. We encourage this as an _exercise for the reader_. - -## Next Step - -Admittedly our "application" is rather trivial. In the next tutorial, let's go over a more representative application containing multiple microservices working together. diff --git a/versioned_docs/version-v0.1.0-alpha.5/tut-overview.md b/versioned_docs/version-v0.1.0-alpha.5/tut-overview.md deleted file mode 100644 index f902b11..0000000 --- a/versioned_docs/version-v0.1.0-alpha.5/tut-overview.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: Basic Tutorial Overview ---- - -Overview of steps in the tutorial \ No newline at end of file diff --git a/versioned_docs/version-v0.1.0-alpha.5/tut-rollback.md b/versioned_docs/version-v0.1.0-alpha.5/tut-rollback.md deleted file mode 100644 index 6fa0432..0000000 --- a/versioned_docs/version-v0.1.0-alpha.5/tut-rollback.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Rollback ---- - -- Something broke -- Rollback \ No newline at end of file diff --git a/versioned_docs/version-v0.1.0-alpha.5/tut-wordpress.md b/versioned_docs/version-v0.1.0-alpha.5/tut-wordpress.md deleted file mode 100644 index 408f688..0000000 --- a/versioned_docs/version-v0.1.0-alpha.5/tut-wordpress.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -title: "Wordpress" ---- - -![Wordpress with Kubernetes](assets/wp-tut-0.jpeg) - -While hosting a blog on Kubernetes might be somewhat of an overkill, it can make for a good learning experience. In this tutorial, let's deploy Wordpress with Kalm in 5 minutes. - -## Create New Application - -The first thing we need is a new Application for the two components that make up our WordPress site. - -- In your Kalm application dashbord, click _Create App_ and enter your application's name - -We used the name `another-blog` for this tutorial. - -![Create new application](assets/wp-tut-1.png) - -## Add The Database Component - -Once you create the application, you'll be prompted to add a component to it. We'll start by adding our Database Component. - -- For the component _Name_, enter `wordpress-mysql` -- For the component _Image_, enter `mysql:5.6` - -### Configurations - -Next we'll move into the _Config_ tab. - -- Under Environment Variables, click _+ New Variable_ - - For the _Name_, enter `MYSQL_ROOT_PASSWORD` - - For the _Value_, enter `mysql-pass` - -![Add Database Component 1](assets/wp-tut-2.png) - -### Networking - -Now we'll expose a Container Port to the cluster so we can access this image. Click the _Networking_ tab. - -- Under Ports, click _+ ADD_ - - For the _Protocol_, keep the default `http` - - For the _Container Port_, enter `3306` - - You can keep the _Service Port_ blank - -![Add Database Component 2](assets/wp-tut-3.png) - -### Disks - -Lastly, we'll add some storage for this image. Click on the _Disks_ tab. - -- Under Disks, click _+ Add_ - - For the _Type_, keep the default `Create and mount disk` - - For the _Mount Path_, enter `/var/lib/mysql` - - For the _Storage Class_, keep the default `kalm-hdd` - - For the _Size_, enter `1Gi` - -![Add Database Component 3](assets/wp-tut-4.png) - -Click _Deploy Component_ and our pod will start spinning up. While this is deploying, we'll setup our second component. - -## Add The Wordpress Component - -To add our second component, we'll click _Add Component_ from the Application Components page. - -- For the component _Name_, enter `wordpress` -- For the component _Image_, also enter `wordpress` - -### Configurations - -Once again we'll move to the _Config_ tab. This time we're adding two Environment Variables. - -- Under Environment Variables, click _+ New Variable_ - - For the _Name_, enter `MYSQL_ROOT_HOST` - - For the _Value_, enter `wordpress-mysql` -- Click _+ New Variable_ again to add a second Environment Variable - - For the _Name_, enter `MYSQL_ROOT_PASSWORD` - - For the _Value_, enter `mysql-pass` - -![Add Database Component 3](assets/wp-tut-5.png) - -### Networking - -Once again we'll click the _Networking_ tab and expose a container port to the cluster. - -- Under Ports click _+ ADD_ - - For the _Protocol_, keep the default `http` - - For the _Container Port_, enter `80` - - You can keep the _Service Port_ blank again - -![Add Database Component 2](assets/wp-tut-6.png) - -### Disks - -Again we'll click the _Disks_ tab to add storage for this component. - -- Under Disks, click _+ Add_ - - For the _Type_, keep the default `Create and mount disk` - - For the _Mount Path_, enter `/var/www/html` - - For the _Storage Class_, keep the default `kalm-hdd` - - For the _Size_, enter `1Gi` - -![Add Database Component 3](assets/wp-tut-7.png) - -## Add Route - -Lastly, we need to open a Route for our WordPress component so we can access it via a web browser. Click the _Routes_ tab on the left navigation menu. - -- Click `Add Route` - - For _Hosts_, you can either use your own domain or just click the ingress ip quick action (the cluster IP under the Hosts input field) - - For Targets, click the _Choose a target_ dropdown and select your `wordpress:80` component from the menu. - - You can leave all of the other options as their defaults - - Click `Create Route` to create your route - -![Add Database Component 3](assets/wp-tut-8.png) - -## Rock it! - -After a few seconds, all of the components should be up and running and there should be a green checkmark next to your new route. - -![Add Database Component 3](assets/wp-tut-9.png) - -You can click the domain to open up and play with your new WordPress site! - -![Add Database Component 3](assets/wp-tut-10.png) diff --git a/versioned_docs/version-v0.1.0-alpha.5/volumes.md b/versioned_docs/version-v0.1.0-alpha.5/volumes.md deleted file mode 100644 index 6b95760..0000000 --- a/versioned_docs/version-v0.1.0-alpha.5/volumes.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: Mount Disks ---- - -In Kubernetes, volumes are directories avaliable to containers to use as a filesystem. Volumes persist beyond the lifecycle of pods. - -There are many types of Volumes. Kalm simplifies the typical usecases by representing them as "Disks". - -Let's create an application with 2 containers, and share a Disk between them. - -- create an application -- create a component called `comp1` with `centos:7` as the image -- one caveat is that containers must have a running process, or it will be terminated immediately after creation. Therefore even though we don't need the container to do anything, we still need to give it a command to stay alive: -- enter `/bin/bash -c 'sleep 100000'` for the command, basically telling it to sleep - -![create comp1](assets/create-comp1.png) - -Now lets create and mount a disk. - -- Go to the **Disks** tab -- Click **Add** Disk -- Select `Create and mount disk` in the Type dropdown -- enter `/tmp/folder1` for Mount Path -- select `standard` for Storage Class -- enter `0.1` (Gi) for Size -- click **Deploy Component** - -![create and mount](assets/create-and-mount-disk.png) - -After the component is created, open a shell, navigate to the mounted disk and create a file representing application data. - -```bash -cd /tmp/folder1 -echo "Some Data" > mydata -``` - -Now let's create a second component identical to the first one: - -- create a second component called `comp2` with `centos:7` as the image -- enter `/bin/bash -c 'sleep 100000'` for the command - -This time, let's mount the existing disk we created above with `comp1` - -- Go to the **Disks** tab -- Click **Add** Disk -- Select `Mount an existing disk` in the Type dropdown -- enter `/tmp/folder2` for Mount Path -- select the option that starts with `pvc-comp1-...` in the Claim Name dropdown -- click **Deploy Component** - -![mount existing disk](assets/mount-existing-disk.png) - -Now let's delete comp1 - -![delete comp 1](assets/delete-comp1.png) - -After comp2 is created, open a shell to the pod and navigate to the mounted disk - -``` -cd /tmp/folder2 -``` - -Let's check to see if the file we created by shelling into comp1's container exists here - -``` -cat mydata -``` - -Hey it seems that the data created while we were attached to comp1 (which no longer exists) is still avaliable in comp2. - -![comp 2 data](assets/comp2-data.png) - -## Recap - -In this guide, we created two components attached to the same Disk, showed that data is shared across the two volumes, and learned that disks survive the deletion of components. diff --git a/versioned_sidebars/version-v0.1.0-alpha.4-sidebars.json b/versioned_sidebars/version-v0.1.0-alpha.4-sidebars.json deleted file mode 100644 index 461d96f..0000000 --- a/versioned_sidebars/version-v0.1.0-alpha.4-sidebars.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "version-v0.1.0-alpha.4/docs": [ - { - "type": "category", - "label": "Getting Started", - "collapsed": false, - "items": [ - "version-v0.1.0-alpha.4/intro", - "version-v0.1.0-alpha.4/install", - "version-v0.1.0-alpha.4/tut-hello" - ] - }, - { - "type": "category", - "label": "Guides", - "collapsed": false, - "items": [ - "version-v0.1.0-alpha.4/guide-logs", - "version-v0.1.0-alpha.4/guide-config", - "version-v0.1.0-alpha.4/cronjob", - "version-v0.1.0-alpha.4/volumes", - "version-v0.1.0-alpha.4/registry", - "version-v0.1.0-alpha.4/probes", - "version-v0.1.0-alpha.4/https-certs" - ] - }, - { - "type": "category", - "label": "Examples", - "items": [ - "version-v0.1.0-alpha.4/tut-bookinfo", - "version-v0.1.0-alpha.4/tut-wordpress" - ] - }, - { - "type": "category", - "label": "Operations", - "items": [ - "version-v0.1.0-alpha.4/guide-export-kalm-resources", - "version-v0.1.0-alpha.4/guide-logging-for-kalm" - ] - }, - { - "type": "category", - "label": "Platform Setup", - "items": [ - "version-v0.1.0-alpha.4/guide-minikube", - "version-v0.1.0-alpha.4/amazon-eks", - "version-v0.1.0-alpha.4/google-gke", - "version-v0.1.0-alpha.4/azure-aks" - ] - } - ] -} diff --git a/versioned_sidebars/version-v0.1.0-alpha.5-sidebars.json b/versioned_sidebars/version-v0.1.0-alpha.5-sidebars.json deleted file mode 100644 index a4918fe..0000000 --- a/versioned_sidebars/version-v0.1.0-alpha.5-sidebars.json +++ /dev/null @@ -1,107 +0,0 @@ -{ - "version-v0.1.0-alpha.5/docs": [ - { - "collapsed": false, - "type": "category", - "label": "Getting Started", - "items": [ - { - "type": "doc", - "id": "version-v0.1.0-alpha.5/intro" - }, - { - "type": "doc", - "id": "version-v0.1.0-alpha.5/install" - }, - { - "type": "doc", - "id": "version-v0.1.0-alpha.5/tut-hello" - } - ] - }, - { - "collapsed": false, - "type": "category", - "label": "Guides", - "items": [ - { - "type": "doc", - "id": "version-v0.1.0-alpha.5/guide-logs" - }, - { - "type": "doc", - "id": "version-v0.1.0-alpha.5/guide-config" - }, - { - "type": "doc", - "id": "version-v0.1.0-alpha.5/cronjob" - }, - { - "type": "doc", - "id": "version-v0.1.0-alpha.5/volumes" - }, - { - "type": "doc", - "id": "version-v0.1.0-alpha.5/registry" - }, - { - "type": "doc", - "id": "version-v0.1.0-alpha.5/probes" - }, - { - "type": "doc", - "id": "version-v0.1.0-alpha.5/https-certs" - } - ] - }, - { - "collapsed": true, - "type": "category", - "label": "Examples", - "items": [ - { - "type": "doc", - "id": "version-v0.1.0-alpha.5/tut-wordpress" - } - ] - }, - { - "collapsed": true, - "type": "category", - "label": "Operations", - "items": [ - { - "type": "doc", - "id": "version-v0.1.0-alpha.5/guide-export-kalm-resources" - }, - { - "type": "doc", - "id": "version-v0.1.0-alpha.5/guide-logging-for-kalm" - } - ] - }, - { - "collapsed": true, - "type": "category", - "label": "Platform Setup", - "items": [ - { - "type": "doc", - "id": "version-v0.1.0-alpha.5/guide-minikube" - }, - { - "type": "doc", - "id": "version-v0.1.0-alpha.5/amazon-eks" - }, - { - "type": "doc", - "id": "version-v0.1.0-alpha.5/google-gke" - }, - { - "type": "doc", - "id": "version-v0.1.0-alpha.5/azure-aks" - } - ] - } - ] -} diff --git a/versions.json b/versions.json index 5bd134d..41b42e6 100644 --- a/versions.json +++ b/versions.json @@ -1,4 +1,3 @@ [ - "v0.1.0-alpha.5", - "v0.1.0-alpha.4" + ]