From ec87329e7c896bf8b71d8610dbcb61e37a8695b4 Mon Sep 17 00:00:00 2001 From: Natalie Somersall Date: Wed, 17 Jan 2024 09:15:18 -0700 Subject: [PATCH 1/2] bump runner, compose (#228) --- images/ghes-demo.Dockerfile | 4 ++-- images/rootless-ubuntu-jammy.Dockerfile | 4 ++-- images/ubi8.Dockerfile | 2 +- images/ubi9.Dockerfile | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/images/ghes-demo.Dockerfile b/images/ghes-demo.Dockerfile index 430e3e6..c9e8fce 100644 --- a/images/ghes-demo.Dockerfile +++ b/images/ghes-demo.Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:22.04 # GitHub runner arguments ARG RUNNER_ARCH=linux/amd64 -ARG RUNNER_VERSION=2.311.0 +ARG RUNNER_VERSION=2.312.0 ARG RUNNER_CONTAINER_HOOKS_VERSION=0.5.0 # CodeQL arguments @@ -10,7 +10,7 @@ ARG RUNNER_CONTAINER_HOOKS_VERSION=0.5.0 # Docker and Compose arguments ARG DOCKER_VERSION=24.0.7 -ARG COMPOSE_VERSION=v2.23.2 +ARG COMPOSE_VERSION=v2.24.0 # Dumb-init version ARG DUMB_INIT_VERSION=1.2.5 diff --git a/images/rootless-ubuntu-jammy.Dockerfile b/images/rootless-ubuntu-jammy.Dockerfile index 82f1263..c95f4cf 100644 --- a/images/rootless-ubuntu-jammy.Dockerfile +++ b/images/rootless-ubuntu-jammy.Dockerfile @@ -2,12 +2,12 @@ FROM ubuntu:22.04 # GitHub runner arguments ARG RUNNER_ARCH=linux/amd64 -ARG RUNNER_VERSION=2.311.0 +ARG RUNNER_VERSION=2.312.0 ARG RUNNER_CONTAINER_HOOKS_VERSION=0.5.0 # Docker and Compose arguments ARG DOCKER_VERSION=24.0.7 -ARG COMPOSE_VERSION=v2.23.3 +ARG COMPOSE_VERSION=v2.24.0 # Dumb-init version ARG DUMB_INIT_VERSION=1.2.5 diff --git a/images/ubi8.Dockerfile b/images/ubi8.Dockerfile index 04ab8b3..d7b7a97 100644 --- a/images/ubi8.Dockerfile +++ b/images/ubi8.Dockerfile @@ -10,7 +10,7 @@ LABEL org.opencontainers.image.documentation /~https://github.com/some-natalie/kub # Arguments ARG TARGETPLATFORM=linux/amd64 -ARG RUNNER_VERSION=2.311.0 +ARG RUNNER_VERSION=2.312.0 ARG RUNNER_CONTAINER_HOOKS_VERSION=0.5.0 # Shell setup diff --git a/images/ubi9.Dockerfile b/images/ubi9.Dockerfile index 65de442..d925f6d 100644 --- a/images/ubi9.Dockerfile +++ b/images/ubi9.Dockerfile @@ -10,7 +10,7 @@ LABEL org.opencontainers.image.documentation /~https://github.com/some-natalie/kub # Arguments ARG TARGETPLATFORM=linux/amd64 -ARG RUNNER_VERSION=2.311.0 +ARG RUNNER_VERSION=2.312.0 ARG RUNNER_CONTAINER_HOOKS_VERSION=0.5.0 # Shell setup From 17f49912a0b1176472e83db4f7ab676877f7b61c Mon Sep 17 00:00:00 2001 From: Shaker Gilbert <24681097+shakerg@users.noreply.github.com> Date: Wed, 17 Jan 2024 11:18:07 -0500 Subject: [PATCH 2/2] Updating the notes for using the community version of ARC with OpenShift (#227) * Update Dockerfile Rolled up 'action-runner'. * Update Dockerfile Rolled up 'actions-runner' and 'podman' versions. * Update Multitenancy.md * Update Multitenancy.md * Update openshift-docker-runner.yaml * Update openshift-podman-deployment.yaml * Rename openshift-docker-runner.yaml to openshift-docker-runnerdeployment.yaml * Update and rename openshift-podman-deployment.yaml to openshift-podman-runnerdeployment.yaml * Update docker-runner-autoscaler.yaml * Update openshift-docker-runnerdeployment.yaml * Update docker-runner-autoscaler.yaml * Update openshift-podman-runnerdeployment.yaml * Update podman-runner-autoscaler.yaml * Update Multitenancy.md * Update README.md * consolidated notes * Update README.md --------- Co-authored-by: Kevin Alwell --- openshift/Multitenancy.md | 103 ------------ openshift/README.md | 157 +++++++++--------- .../builds/openshift-custom-runner/Dockerfile | 14 +- .../builds/podman-custom-runner/Dockerfile | 13 +- .../manifests/docker-runner-autoscaler.yaml | 22 ++- .../manifests/openshift-docker-runner.yaml | 45 ----- .../openshift-docker-runnerdeployment.yaml | 32 ++++ .../openshift-podman-deployment.yaml | 60 ------- .../openshift-podman-runnerdeployment.yaml | 27 +++ .../manifests/podman-runner-autoscaler.yaml | 4 +- 10 files changed, 164 insertions(+), 313 deletions(-) delete mode 100644 openshift/Multitenancy.md delete mode 100644 openshift/manifests/openshift-docker-runner.yaml create mode 100644 openshift/manifests/openshift-docker-runnerdeployment.yaml delete mode 100644 openshift/manifests/openshift-podman-deployment.yaml create mode 100644 openshift/manifests/openshift-podman-runnerdeployment.yaml diff --git a/openshift/Multitenancy.md b/openshift/Multitenancy.md deleted file mode 100644 index 50f1ad6..0000000 --- a/openshift/Multitenancy.md +++ /dev/null @@ -1,103 +0,0 @@ -# Multitenancy on OpenShift (GitHub Apps) - -With a few changes we can leverage a single ARC controller-manager across multiple orgazations. A quick prereq is that the controller must be on -version 0.26.0+. The initial advantage of this is no having the overhead of multiple controllers and crd's that need to be managed, being our of -sync with multiple deployments causes issues with your runner deployments. - -### Cert-Manager Installation -Prior to installing ARC, you will need to install and configure cert-manager, this can be done by installing the `cert-manager` operator from the Operator Hub. Once the operator is installed (using the defaults), we will need to setup the private CA cert & key. - -1. Copy your ca.crt & ca.key files locally - -2. Create a SECRET with these files in the openshift-operators namespace \ - `oc create secret tls ca-key-pair --cert=ca.crt --key=ca.key` - -3. I chose to provide acces to the cluster by creating a kind ClusterIssuer - ``` - kind: ClusterIssuer - apiVersion: cert-manager.io/v1 - metadata: - name: redcloud-clusterissuer - spec: - selfSigned: - ca: - secretName: ca-key-pair -``` - -## ARC to 0.26.0 -1. If this is your initial deployment just instal 0.26.0 \ -`kubectl replace --force -f /~https://github.com/actions/actions-runner-controller/releases/download/v0.26.0/actions-runner-controller.yam` \ - - [Notes](#Troubleshooting) - If you are upgrading to multitenancy, you must remove all of your runnerdeployments and horizontalrunnerautoscale - deployments prior to upgrading. Not doing this could cause your reinstall to hang and fail. Additionaly, we use the "replace --force" to install the - controller on OCP or it'll complain _"metadata.annotations: Too long: must have at most 262144 bytes" - - -2. When deploying the solution for a GHES environment you need to provide an additional environment variable as part of the controller deployment \ -`kubectl set env deploy controller-manager -c manager GITHUB_ENTERPRISE_URL=https://${YOUR_GHES_SERVER} --namespace actions-runner-system` - -3. Set _privileged_ access \ -`oc adm policy add-scc-to-user privileged -z default -n actions-runner-system` - -4. Create a PAT using an Admin that has access to the orgs you'll be deploying ARC into. \ - admin:org, admin:org_hook, notifications, read:public_key, read:repo_hook, repo, workflow - -5. Set the controller-manager secret using this PAT \ - `oc create secret generic controller-manager --from-literal=github_token=${GITHUB_TOKEN}` - -6. Each Organzation will require it's own GitHub App \ - Replace the ${PARTS} of the following URL with your GHES address & org name before opening it. - Then enter any unique name in the "GitHub App name" field, and hit the "Create GitHub App" button at the bottom of the page to create a GitHub App. - - `https://${YOUR_GHES_SERVER}/organizations/${YOUR_ORG}/settings/apps/new?url=http://github.com/actions/actions-runner-controller&webhook_active=false&public=false&administration=write&organization_self_hosted_runners=write&actions=read&checks=read` - - You will see an App ID on the page of the GitHub App you created as follows, the value of this App ID will be used later. - -7. Download the private key file by pushing the "Generate a private key" button at the bottom of the GitHub App page. This file will also be used later. - -8. Go to the "Install App" tab on the left side of the page and install the GitHub App that you created for your account or organization. - -9. Register the App ID `${APP_ID}`, Installation ID `${INSTALLATION_ID}`, and the downloaded private key file `${PRIVATE_KEY_FILE_PATH}` to OpenShift as a secret. - ``` - $ kubectl create secret generic org1-github-app \ - -n actions-runner-system \ - --from-literal=github_app_id=${APP_ID} \ - --from-literal=github_app_installation_id=${INSTALLATION_ID} \ - --from-file=github_app_private_key=${PRIVATE_KEY_FILE_PATH} - ``` -10. You'll now call out org1-github-app in your manifests for RunnerDeployment and HorizonalRunnerAutoscaler - ``` - Example: - --- - kind: RunnerDeployment - metadata: - namespace: org1-runners - spec: - template: - spec: - githubAPICredentialsFrom: - secretRef: - name: org1-github-app - --- - kind: HorizontalRunnerAutoscaler - metadata: - namespace: org1-runners - spec: - githubAPICredentialsFrom: - secretRef: - name: org1-github-app - ``` - 👉 Repeat for each Org GitHub App (RunnerDeployment/HorizontalRunnerAutoscaler) - - --------- - -## Troubleshooting -1. You upgraded to 0.26.0 without removing your deployments beforehand and the removal has hung. - If your pods are in a 'Terminating' state, select the pod, switch to YAML and then remove finalizsers, save and move to the next pod. This should - remove them one-by-one. -2. During the replace phase, your upgrade stops deleting CRD's. - Search your CRD's for runners \ - `oc get crd | grep runner` - Edit the CRD and remove the finalizers, when you save/exit the CRD will be removed and the install should complete. - diff --git a/openshift/README.md b/openshift/README.md index dda1d81..142744d 100644 --- a/openshift/README.md +++ b/openshift/README.md @@ -1,59 +1,57 @@ -## GitHub Actions Runner Controller (ARC) for OpenShift 4.X +# Multitenancy on OpenShift (Community Version) -Source : /~https://github.com/actions-runner-controller/actions-runner-controller - -There are multiple ways of installing ARC, I have chosen to use GitHub Apps to store credentials and access controls and to configure my runners at the *Org* level so that all repos underneath could have access to them. I also will install everything using .yaml files vs Helm. +With a few changes we can leverage a single ARC controller-manager across multiple organizations. A quick prereq is that the controller must be on +version 0.26.0+. The initial advantage of this is no having the overhead of multiple controllers and crd's that need to be managed, being our of +sync with multiple deployments causes issues with your runner deployments. ### Cert-Manager Installation -Prior to installing ARC, you will need to install and configure cert-manager, this can be done by installing the `cert-manager` operator from the Operator Hub. Once the operator is installed (using the defaults), we will need to setup the private CA cert & key. +Prior to installing ARC, it's highly recommended to install and configure cert-manager, this can be done by installing the `cert-manager` operator from the Operator Hub. Once the operator is installed (using the defaults), we will need to setup your Issuer. I've chosen to use the ClusterIssuer so it will apply to all namespaces. -1. Copy your ca.crt & ca.key files locally +1. Copy any private ca.crt & ca.key files locally, or configure for [ACME](https://cert-manager.io/docs/configuration/acme/). 2. Create a SECRET with these files in the openshift-operators namespace \ `oc create secret tls ca-key-pair --cert=ca.crt --key=ca.key` 3. I chose to provide acces to the cluster by creating a kind ClusterIssuer - ``` - kind: ClusterIssuer - apiVersion: cert-manager.io/v1 - metadata: - name: redcloud-clusterissuer - spec: - selfSigned: - ca: - secretName: ca-key-pair - ``` - -### ARC Installation w/ GitHub Apps Authentication -Releases : /~https://github.com/actions/actions-runner-controller/releases/ + ``` + kind: ClusterIssuer + apiVersion: cert-manager.io/v1 + metadata: + name: my-clusterissuer + spec: + selfSigned: + ca: + secretName: ca-key-pair +``` -1. Install the current release, we'll use the "replace --force" to install the controller on OCP or it'll complain that _"metadata.annotations: Too long: must have at most 262144 bytes"_ \ - `kubectl replace --force -f /~https://github.com/actions/actions-runner-controller/releases/download/v0.22.0/actions-runner-controller.yaml` +## ARC to 0.27.6 +### Using PAT +1. If this is your initial deployment, install the ARC controller \ +`oc create -f /~https://github.com/actions/actions-runner-controller/releases/download/v0.27.6/actions-runner-controller.yaml` \ -2. When deploying the solution for a GHES environment you need to provide an additional environment variable as part of the controller deployment \ - `kubectl set env deploy controller-manager -c manager GITHUB_ENTERPRISE_URL=https://${YOUR_GHES_SERVER} --namespace actions-runner-system` + [Notes](#Troubleshooting) - If you are upgrading to multitenancy, you must remove all of your runnerdeployments and horizontalrunnerautoscaler + deployments prior to upgrading. Not doing this _could_ cause your reinstall to hang and fail. Additionaly, if your controller version complains _"metadata.annotations: Too long: must have at most 262144 bytes"_ then use `kubectl replace --force -f https...` instead of the `oc` command above. -3. Prior to 0.25 you have to set _privileged_ access \ - `oc adm policy add-scc-to-user privileged -z default -n actions-runner-system` +3. When deploying the solution for a GHES environment you need to provide an additional environment variable as part of the controller deployment \ +`oc -n actions-runner-system set env deploy controller-manager -c manager GITHUB_ENTERPRISE_URL=https://${YOUR_GHES_SERVER}` -### GitHub App Authentication -You can create a GitHub App for either your user account or any organization, below are the app permissions required for each supported type of runner. +4. In this example, we'll set _privileged_ & _anyuid_ access \ +`oc adm policy add-scc-to-user privileged -z default -n actions-runner-system` +`oc adm policy add-scc-to-user anyuid -z default -n actions-runner-system` -**Required Permissions for Repository Runners:** -* Actions (read) -* Administration (read / write) -* Checks (read) (if you are going to use Webhook Driven Scaling) -* Metadata (read) +Note: If you deploy runners in other (!= actions-runner-system) projects/namespaces, you will need to do step 4 in those namespaces to provide access to the 'default' service account. Alternatively, you can you may managed your own SCC and SA for improved RBAC (out-of-scope). -**Required Permissions for Organization Runners:** -* Actions (read) -* Metadata (read) +6. Since we'll use 1 controller for all of our jobs, we'll deploy it using a Personal Access Token. Create a PAT using an Admin that has access to the orgs you'll be deploying ARC into. \ + admin:org, admin:org_hook, notifications, read:public_key, read:repo_hook, repo, workflow -**Organization Permissions** -* Self-hosted runners (read / write) +7. Set the controller-manager secret using this PAT \ + `oc -n actions-runner-system create secret generic controller-manager --from-literal=github_token=${GITHUB_TOKEN}` + +### Using GitHub Apps -### GitHub App for your organization -1. Replace the `${PARTS}` of the following URL with your GHES address & organization name before opening it. Then enter any unique name in the "GitHub App name" field, and hit the "Create GitHub App" button at the bottom of the page to create a GitHub App. +1. Optionally, if you want a spepart controller-manager & namespace for each Organzation (runner group), it will require it's own GitHub App \ + Replace the ${PARTS} of the following URL with your GHES address & Org name before opening it in your browser. + Then enter any unique name in the "GitHub App name" field, and hit the "Create GitHub App" button at the bottom of the page to create a GitHub App. `https://${YOUR_GHES_SERVER}/organizations/${YOUR_ORG}/settings/apps/new?url=http://github.com/actions/actions-runner-controller&webhook_active=false&public=false&administration=write&organization_self_hosted_runners=write&actions=read&checks=read` @@ -61,47 +59,50 @@ You can create a GitHub App for either your user account or any organization, be 2. Download the private key file by pushing the "Generate a private key" button at the bottom of the GitHub App page. This file will also be used later. -3. Go to the "Install App" tab on the left side of the page and install the GitHub App that you created for your account or organization. \ - ##### ```NOTE: You will need to Installation ID, to retrieve it go to your ORG that you've installed the app into, visit Settings > GitHub Apps > {YOUR_APP} > and then when you highlight the URL in your browser. The number at the end would be your Intallation ID, example: https://${YOUR_GHES_SERVER}/organizations/${YOUR_ORG}/settings/installation/${INSTALLATION_ID}.``` +3. Go to the "Install App" tab on the left side of the page and install the GitHub App that you created for your account or organization. 4. Register the App ID `${APP_ID}`, Installation ID `${INSTALLATION_ID}`, and the downloaded private key file `${PRIVATE_KEY_FILE_PATH}` to OpenShift as a secret. - ``` - $ kubectl create secret generic controller-manager \ - -n actions-runner-system \ - --from-literal=github_app_id=${APP_ID} \ - --from-literal=github_app_installation_id=${INSTALLATION_ID} \ - --from-file=github_app_private_key=${PRIVATE_KEY_FILE_PATH} - ``` + ``` + $ kubectl create secret generic org1-github-app \ + -n actions-runner-system \ + --from-literal=github_app_id=${APP_ID} \ + --from-literal=github_app_installation_id=${INSTALLATION_ID} \ + --from-file=github_app_private_key=${PRIVATE_KEY_FILE_PATH} + ``` -### Runner Deployments -There are additional ways to launch your runners, here I chose using kind: RunnerDeployment -#### NOTE: Keep in mind that OpenShift will not natively display your deployments, to view them as well as the later HorizontalRunnerAutoscaler, you'll need to use `oc get runnerdeployment`, `oc get hra` & `oc get horizonalrunnerautoscaler`. +### Running the deployments - see [manifests](./manifests) for more examples +13. You'll now call out org1-github-app in your manifests for RunnerDeployment and HorizonalRunnerAutoscaler + ``` + Example: + --- + kind: RunnerDeployment + metadata: + name: example-runner + spec: + template: + spec: + githubAPICredentialsFrom: + secretRef: + name: org1-github-app + --- + kind: HorizontalRunnerAutoscaler + metadata: + name: example-runner-hra + spec: + githubAPICredentialsFrom: + secretRef: + name: org1-github-app + ``` + 👉 Repeat for each deployment (RunnerDeployment/HorizontalRunnerAutoscaler) + - ``` - apiVersion: actions.summerwind.dev/v1alpha1 - kind: RunnerDeployment - metadata: - name: example-runner-deployment - spec: - template: - spec: - repository: example/myrepo - --- - apiVersion: actions.summerwind.dev/v1alpha1 - kind: HorizontalRunnerAutoscaler - metadata: - name: example-runner-deployment-autoscaler - spec: - scaleTargetRef: - name: example-runner-deployment - # IMPORTANT : If your HRA is targeting a RunnerSet you must specify the kind in the scaleTargetRef:, uncomment the below - #kind: RunnerSet - minReplicas: 1 - maxReplicas: 5 - metrics: - - type: TotalNumberOfQueuedAndInProgressWorkflowRuns - repositoryNames: - - example/myrepo +-------- + +## Troubleshooting +1. You upgraded to 0.26.0 without removing your deployments beforehand and the removal has hung. + If your pods are in a 'Terminating' state, select the pod, switch to YAML and then remove finalizers, save and move to the next pod. This should remove them one-by-one. +2. During the replace phase, your upgrade stops deleting CRD's. + Search your CRD's for runners \ + `oc get crd | grep runner` + Edit the CRD and remove the finalizers, when you save/exit the CRD will be removed and the install should complete. -``` -There are a lot of options here, so I am only showing the defaults, but if you'd like an example I have included my scripts under /manifests. Additionally, I have evaluated two custom runners - one based on docker and the other based on podman (buildah). I will include these as examples under /builds. diff --git a/openshift/builds/openshift-custom-runner/Dockerfile b/openshift/builds/openshift-custom-runner/Dockerfile index d600e99..31f49dd 100644 --- a/openshift/builds/openshift-custom-runner/Dockerfile +++ b/openshift/builds/openshift-custom-runner/Dockerfile @@ -1,5 +1,5 @@ -FROM summerwind/actions-runner:latest -ENV VERSION_ID="20.04" +FROM --platform=linux/amd64 summerwind/actions-runner:latest +ENV VERSION_ID="22.04" ENV DEBIAN_FRONTEND=noninteractive RUN sudo sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list" @@ -8,13 +8,15 @@ RUN sudo wget -nv https://download.opensuse.org/repositories/devel:kubic:libcont RUN sudo apt-get update -y \ && sudo apt-get upgrade -y \ - && sudo apt-get install buildah podman skopeo uidmap vim -y \ + && sudo apt-get install uidmap vim -y \ && sudo rm -rf /var/lib/apt/lists/* - -RUN sudo sed -i 's|\[machine\]|\#\[machine\]|g' /usr/share/containers/containers.conf - + USER root +# Adding private CA +# COPY ca.crt /usr/local/share/ca-certificates/ca.crt +# RUN update-ca-certificates + RUN usermod -aG sudo runner \ && usermod -aG docker runner \ && echo "%sudo ALL=(ALL:ALL) NOPASSWD:ALL" > /etc/sudoers diff --git a/openshift/builds/podman-custom-runner/Dockerfile b/openshift/builds/podman-custom-runner/Dockerfile index 3f44047..6ff3288 100644 --- a/openshift/builds/podman-custom-runner/Dockerfile +++ b/openshift/builds/podman-custom-runner/Dockerfile @@ -1,10 +1,10 @@ -FROM quay.io/podman/stable:v4.3.1 +FROM --platform=linux/amd64 quay.io/podman/stable:v4.8.3 # Target architecture ARG TARGETPLATFORM=linux/amd64 # GitHub runner arguments -ARG RUNNER_VERSION=2.299.1 +ARG RUNNER_VERSION=2.311.0 # Other arguments ARG DEBUG=false @@ -23,10 +23,6 @@ RUN dnf install -y \ curl \ && dnf clean all -# Removing kubectl - runner load via GitHub Actions instead -# RUN curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" \ -# && mv kubectl /usr/local/bin/ - # Runner download supports amd64 as x64 RUN test -n "$TARGETPLATFORM" || (echo "TARGETPLATFORM must be set" && false) RUN ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \ @@ -42,9 +38,12 @@ RUN ARCH=$(echo ${TARGETPLATFORM} | cut -d / -f2) \ && dnf clean all COPY logger.sh /opt/bash-utils/logger.sh +# Adding private CA +# COPY ca.crt /etc/pki/ca-trust/source/anchors/ca.crt COPY entrypoint.sh /usr/local/bin/ -RUN chmod +x /usr/local/bin/entrypoint.sh \ +RUN update-ca-trust \ + && chmod +x /usr/local/bin/entrypoint.sh \ && sed -i 's|\[machine\]|\#\[machine\]|g' /usr/share/containers/containers.conf \ && sed -i 's|\#ignore_chown_errors = "false"|ignore_chown_errors = "true"|g' /etc/containers/storage.conf diff --git a/openshift/manifests/docker-runner-autoscaler.yaml b/openshift/manifests/docker-runner-autoscaler.yaml index 41f8779..00048fd 100644 --- a/openshift/manifests/docker-runner-autoscaler.yaml +++ b/openshift/manifests/docker-runner-autoscaler.yaml @@ -1,18 +1,16 @@ apiVersion: actions.summerwind.dev/v1alpha1 kind: HorizontalRunnerAutoscaler metadata: - name: openshift-runner-autoscaler - # Optional - # namespace: + name: openshift-docker-autoscaler spec: - maxReplicas: 6 - metrics: - - scaleDownFactor: "0.5" - scaleDownThreshold: "0.25" - scaleUpFactor: "2" - scaleUpThreshold: "0.75" - type: PercentageRunnersBusy - minReplicas: 2 scaleDownDelaySecondsAfterScaleOut: 300 scaleTargetRef: - name: openshift-runner + name: openshift-docker-runners + minReplicas: 1 + maxReplicas: 4 + metrics: + - type: PercentageRunnersBusy + scaleUpThreshold: '0.75' + scaleDownThreshold: '0.25' + scaleUpFactor: '2' + scaleDownFactor: '0.5' diff --git a/openshift/manifests/openshift-docker-runner.yaml b/openshift/manifests/openshift-docker-runner.yaml deleted file mode 100644 index 265b282..0000000 --- a/openshift/manifests/openshift-docker-runner.yaml +++ /dev/null @@ -1,45 +0,0 @@ -apiVersion: actions.summerwind.dev/v1alpha1 -kind: RunnerDeployment -metadata: - name: openshift-runner - # Optional namesapce - # namespace: -spec: - replicas: 0 # 0 if using HRA - template: - metadata: - annotations: - cluster-autoscaler.kubernetes.io/safe-to-evict: "true" - spec: - containers: - # Optional - passing in daemon.json entries via ConfigMap - # See daemon-json-configmap.yaml - - name: docker - resources: {} - volumeMounts: - - mountPath: /etc/docker/daemon.json - name: daemon-json - subPath: daemon.json - dockerdContainerResources: {} - # GitHub Runner Group - group: OpenShift - image: ghcr.io/ocpdude/actions-custom-runner:latest - imagePullPolicy: Always - # Lables to focus jobs - labels: - - docker,openshift - # GitHub Org - organization: ocpdude - resources: - limits: - cpu: "2" - memory: 4Gi - requests: - cpu: "1" - memory: 2Gi - securityContext: - fsGroup: 1000 - volumes: - - configMap: - name: daemon-json - name: daemon-json diff --git a/openshift/manifests/openshift-docker-runnerdeployment.yaml b/openshift/manifests/openshift-docker-runnerdeployment.yaml new file mode 100644 index 0000000..4c142ce --- /dev/null +++ b/openshift/manifests/openshift-docker-runnerdeployment.yaml @@ -0,0 +1,32 @@ +apiVersion: actions.summerwind.dev/v1alpha1 +kind: RunnerDeployment +metadata: + name: openshift-docker-runners +spec: + # Using "HorizontalRunnerAutoscaler" + # replicas: 1 + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + spec: + group: OpenShift + labels: + - docker,openshift + organization: ${ORG_NAME} + imagePullPolicy: Always + resources: + limits: + cpu: "250m" + memory: "500Mi" + image: ${PATH_TO_IMAGE} + containers: + - name: docker + volumeMounts: + - mountPath: /etc/docker/daemon.json + name: daemon-json + subPath: daemon.json + volumes: + - name: daemon-json + configMap: + name: daemon-json diff --git a/openshift/manifests/openshift-podman-deployment.yaml b/openshift/manifests/openshift-podman-deployment.yaml deleted file mode 100644 index 242fcb4..0000000 --- a/openshift/manifests/openshift-podman-deployment.yaml +++ /dev/null @@ -1,60 +0,0 @@ -apiVersion: actions.summerwind.dev/v1alpha1 -kind: RunnerDeployment -metadata: - name: openshift-podman-runner - # Claim namespace if needed - # namespace: -spec: - # Using "HorizontalRunnerAutoscaler" - replicas: 0 # 0 if using HRA - template: - metadata: - annotations: - cluster-autoscaler.kubernetes.io/safe-to-evict: "true" - spec: - # Assign runner group - group: OpenShift - # Add lables for targeting - labels: - - podman,openshift - # Adding the security context - securityContext: - privileged: true - fsGroup: 1000 - # Name of the GitHub Org - organization: ocpdude - imagePullPolicy: Always - resources: - limits: - cpu: "4" - memory: "8Gi" - requests: - cpu: "2" - memory: "2Gi" - image: ghcr.io/ocpdude/podman-runner:6 - # Optionally break out the storage layer for podman, - # handy for large builds that need persistant data - # between runners -# containers: -# - name: runner -# volumeMounts: -# - mountPath: /home/podman/.local/share/containers/storage -# name: podman-storage -# volumes: -# - name: podman-storage -# persistentVolumeClaim: -# claimName: "podman-runner-pvc" - -# --- -# apiVersion: "v1" -# kind: "PersistentVolumeClaim" -# metadata: -# name: "podman-runner-pvc" -# spec: -# accessModes: -# - ReadWriteOnce -# resources: -# requests: -# storage: 1Gi -# storageClassName: "thin" -# volumeMode: Filesystem diff --git a/openshift/manifests/openshift-podman-runnerdeployment.yaml b/openshift/manifests/openshift-podman-runnerdeployment.yaml new file mode 100644 index 0000000..a2f9d68 --- /dev/null +++ b/openshift/manifests/openshift-podman-runnerdeployment.yaml @@ -0,0 +1,27 @@ +apiVersion: actions.summerwind.dev/v1alpha1 +kind: RunnerDeployment +metadata: + name: openshift-podman-runner +spec: + # Using "HorizontalRunnerAutoscaler" + # replicas: 0 + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + spec: + # Assign runner group + dockerdWithinRunnerContainer: false #disable the docker container + group: ${RUNNER_GROUP_NAME} + labels: + - podman + organization: ${ORG} + imagePullPolicy: Always + resources: + limits: + cpu: "1" + memory: "1Gi" + requests: + cpu: "250m" + memory: "250Mi" + image: ${PATH_TO_IMAGE} diff --git a/openshift/manifests/podman-runner-autoscaler.yaml b/openshift/manifests/podman-runner-autoscaler.yaml index 8dceca4..999bece 100644 --- a/openshift/manifests/podman-runner-autoscaler.yaml +++ b/openshift/manifests/podman-runner-autoscaler.yaml @@ -1,13 +1,13 @@ apiVersion: actions.summerwind.dev/v1alpha1 kind: HorizontalRunnerAutoscaler metadata: - name: podman-runner-autoscaler + name: openshift-podman-autoscaler spec: scaleDownDelaySecondsAfterScaleOut: 300 scaleTargetRef: name: openshift-podman-runner minReplicas: 1 - maxReplicas: 5 + maxReplicas: 3 metrics: - type: PercentageRunnersBusy scaleUpThreshold: '0.75'