Delete acceptance directory
This commit is contained in:
parent
ddc2918a48
commit
e2ebc9c0c2
|
|
@ -1,100 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# See https://developers.cloudflare.com/cloudflare-one/tutorials/many-cfd-one-tunnel/
|
|
||||||
|
|
||||||
kubectl create ns tunnel || :
|
|
||||||
|
|
||||||
kubectl -n tunnel delete secret tunnel-credentials || :
|
|
||||||
|
|
||||||
kubectl -n tunnel create secret generic tunnel-credentials \
|
|
||||||
--from-file=credentials.json=$HOME/.cloudflared/${TUNNEL_ID}.json || :
|
|
||||||
|
|
||||||
cat <<MANIFEST | kubectl -n tunnel ${OP} -f -
|
|
||||||
---
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: cloudflared
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: cloudflared
|
|
||||||
replicas: 2 # You could also consider elastic scaling for this deployment
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: cloudflared
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: cloudflared
|
|
||||||
image: cloudflare/cloudflared:latest
|
|
||||||
args:
|
|
||||||
- tunnel
|
|
||||||
# Points cloudflared to the config file, which configures what
|
|
||||||
# cloudflared will actually do. This file is created by a ConfigMap
|
|
||||||
# below.
|
|
||||||
- --config
|
|
||||||
- /etc/cloudflared/config/config.yaml
|
|
||||||
- run
|
|
||||||
livenessProbe:
|
|
||||||
httpGet:
|
|
||||||
# Cloudflared has a /ready endpoint which returns 200 if and only if
|
|
||||||
# it has an active connection to the edge.
|
|
||||||
path: /ready
|
|
||||||
port: 2000
|
|
||||||
failureThreshold: 1
|
|
||||||
initialDelaySeconds: 10
|
|
||||||
periodSeconds: 10
|
|
||||||
volumeMounts:
|
|
||||||
- name: config
|
|
||||||
mountPath: /etc/cloudflared/config
|
|
||||||
readOnly: true
|
|
||||||
# Each tunnel has an associated "credentials file" which authorizes machines
|
|
||||||
# to run the tunnel. cloudflared will read this file from its local filesystem,
|
|
||||||
# and it'll be stored in a k8s secret.
|
|
||||||
- name: creds
|
|
||||||
mountPath: /etc/cloudflared/creds
|
|
||||||
readOnly: true
|
|
||||||
volumes:
|
|
||||||
- name: creds
|
|
||||||
secret:
|
|
||||||
secretName: tunnel-credentials
|
|
||||||
# Create a config.yaml file from the ConfigMap below.
|
|
||||||
- name: config
|
|
||||||
configMap:
|
|
||||||
name: cloudflared
|
|
||||||
items:
|
|
||||||
- key: config.yaml
|
|
||||||
path: config.yaml
|
|
||||||
---
|
|
||||||
# This ConfigMap is just a way to define the cloudflared config.yaml file in k8s.
|
|
||||||
# It's useful to define it in k8s, rather than as a stand-alone .yaml file, because
|
|
||||||
# this lets you use various k8s templating solutions (e.g. Helm charts) to
|
|
||||||
# parameterize your config, instead of just using string literals.
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: cloudflared
|
|
||||||
data:
|
|
||||||
config.yaml: |
|
|
||||||
# Name of the tunnel you want to run
|
|
||||||
tunnel: ${TUNNEL_NAME}
|
|
||||||
credentials-file: /etc/cloudflared/creds/credentials.json
|
|
||||||
# Serves the metrics server under /metrics and the readiness server under /ready
|
|
||||||
metrics: 0.0.0.0:2000
|
|
||||||
# Autoupdates applied in a k8s pod will be lost when the pod is removed or restarted, so
|
|
||||||
# autoupdate doesn't make sense in Kubernetes. However, outside of Kubernetes, we strongly
|
|
||||||
# recommend using autoupdate.
|
|
||||||
no-autoupdate: true
|
|
||||||
ingress:
|
|
||||||
# The first rule proxies traffic to the httpbin sample Service defined in app.yaml
|
|
||||||
- hostname: ${TUNNEL_HOSTNAME}
|
|
||||||
service: http://actions-runner-controller-actions-metrics-server.actions-runner-system:80
|
|
||||||
path: /metrics$
|
|
||||||
- hostname: ${TUNNEL_HOSTNAME}
|
|
||||||
service: http://actions-runner-controller-github-webhook-server.actions-runner-system:80
|
|
||||||
# This rule matches any traffic which didn't match a previous rule, and responds with HTTP 404.
|
|
||||||
- service: http_status:404
|
|
||||||
MANIFEST
|
|
||||||
|
|
||||||
kubectl -n tunnel delete po -l app=cloudflared || :
|
|
||||||
|
|
@ -1,84 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set +e
|
|
||||||
|
|
||||||
repo_runnerdeployment_passed="skipped"
|
|
||||||
repo_runnerset_passed="skipped"
|
|
||||||
|
|
||||||
echo "Checking if RunnerDeployment repo test is set"
|
|
||||||
if [ "${TEST_REPO}" ] && [ ! "${USE_RUNNERSET}" ]; then
|
|
||||||
runner_name=
|
|
||||||
count=0
|
|
||||||
while [ $count -le 30 ]; do
|
|
||||||
echo "Finding Runner ..."
|
|
||||||
runner_name=$(kubectl get runner --output=jsonpath="{.items[*].metadata.name}")
|
|
||||||
if [ "${runner_name}" ]; then
|
|
||||||
while [ $count -le 30 ]; do
|
|
||||||
runner_pod_name=
|
|
||||||
echo "Found Runner \""${runner_name}"\""
|
|
||||||
echo "Finding underlying pod ..."
|
|
||||||
runner_pod_name=$(kubectl get pod --output=jsonpath="{.items[*].metadata.name}" | grep ${runner_name})
|
|
||||||
if [ "${runner_pod_name}" ]; then
|
|
||||||
echo "Found underlying pod \""${runner_pod_name}"\""
|
|
||||||
echo "Waiting for pod \""${runner_pod_name}"\" to become ready..."
|
|
||||||
kubectl wait pod/${runner_pod_name} --for condition=ready --timeout 270s
|
|
||||||
break 2
|
|
||||||
fi
|
|
||||||
sleep 1
|
|
||||||
let "count=count+1"
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
sleep 1
|
|
||||||
let "count=count+1"
|
|
||||||
done
|
|
||||||
if [ $count -ge 30 ]; then
|
|
||||||
repo_runnerdeployment_passed=false
|
|
||||||
else
|
|
||||||
repo_runnerdeployment_passed=true
|
|
||||||
fi
|
|
||||||
echo "Checking if RunnerSet repo test is set"
|
|
||||||
elif [ "${TEST_REPO}" ] && [ "${USE_RUNNERSET}" ]; then
|
|
||||||
runnerset_name=
|
|
||||||
count=0
|
|
||||||
while [ $count -le 30 ]; do
|
|
||||||
echo "Finding RunnerSet ..."
|
|
||||||
runnerset_name=$(kubectl get runnerset --output=jsonpath="{.items[*].metadata.name}")
|
|
||||||
if [ "${runnerset_name}" ]; then
|
|
||||||
while [ $count -le 30 ]; do
|
|
||||||
runnerset_pod_name=
|
|
||||||
echo "Found RunnerSet \""${runnerset_name}"\""
|
|
||||||
echo "Finding underlying pod ..."
|
|
||||||
runnerset_pod_name=$(kubectl get pod --output=jsonpath="{.items[*].metadata.name}" | grep ${runnerset_name})
|
|
||||||
echo "BEFORE IF"
|
|
||||||
if [ "${runnerset_pod_name}" ]; then
|
|
||||||
echo "AFTER IF"
|
|
||||||
echo "Found underlying pod \""${runnerset_pod_name}"\""
|
|
||||||
echo "Waiting for pod \""${runnerset_pod_name}"\" to become ready..."
|
|
||||||
kubectl wait pod/${runnerset_pod_name} --for condition=ready --timeout 270s
|
|
||||||
break 2
|
|
||||||
fi
|
|
||||||
sleep 1
|
|
||||||
let "count=count+1"
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
sleep 1
|
|
||||||
let "count=count+1"
|
|
||||||
done
|
|
||||||
if [ $count -ge 30 ]; then
|
|
||||||
repo_runnerset_passed=false
|
|
||||||
else
|
|
||||||
repo_runnerset_passed=true
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ${repo_runnerset_passed} == true ] || [ ${repo_runnerset_passed} == "skipped" ] && \
|
|
||||||
[ ${repo_runnerdeployment_passed} == true ] || [ ${repo_runnerdeployment_passed} == "skipped" ]; then
|
|
||||||
echo "INFO : All tests passed or skipped"
|
|
||||||
echo "RunnerSet Repo Test Status : ${repo_runnerset_passed}"
|
|
||||||
echo "RunnerDeployment Repo Test Status : ${repo_runnerdeployment_passed}"
|
|
||||||
else
|
|
||||||
echo "ERROR : Some tests failed"
|
|
||||||
echo "RunnerSet Repo Test Status : ${repo_runnerset_passed}"
|
|
||||||
echo "RunnerDeployment Repo Test Status : ${repo_runnerdeployment_passed}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
@ -1,130 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
tpe=${ACCEPTANCE_TEST_SECRET_TYPE}
|
|
||||||
|
|
||||||
VALUES_FILE=${VALUES_FILE:-$(dirname $0)/values.yaml}
|
|
||||||
|
|
||||||
kubectl delete secret -n actions-runner-system controller-manager || :
|
|
||||||
|
|
||||||
if [ "${tpe}" == "token" ]; then
|
|
||||||
if ! kubectl get secret controller-manager -n actions-runner-system >/dev/null; then
|
|
||||||
kubectl create secret generic controller-manager \
|
|
||||||
-n actions-runner-system \
|
|
||||||
--from-literal=github_token=${GITHUB_TOKEN:?GITHUB_TOKEN must not be empty}
|
|
||||||
fi
|
|
||||||
elif [ "${tpe}" == "app" ]; then
|
|
||||||
kubectl create secret generic controller-manager \
|
|
||||||
-n actions-runner-system \
|
|
||||||
--from-literal=github_app_id=${APP_ID:?must not be empty} \
|
|
||||||
--from-literal=github_app_installation_id=${APP_INSTALLATION_ID:?must not be empty} \
|
|
||||||
--from-file=github_app_private_key=${APP_PRIVATE_KEY_FILE:?must not be empty}
|
|
||||||
else
|
|
||||||
echo "ACCEPTANCE_TEST_SECRET_TYPE must be set to either \"token\" or \"app\"" 1>&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "${WEBHOOK_GITHUB_TOKEN}" ]; then
|
|
||||||
kubectl -n actions-runner-system delete secret \
|
|
||||||
github-webhook-server || :
|
|
||||||
kubectl -n actions-runner-system create secret generic \
|
|
||||||
github-webhook-server \
|
|
||||||
--from-literal=github_token=${WEBHOOK_GITHUB_TOKEN:?WEBHOOK_GITHUB_TOKEN must not be empty}
|
|
||||||
else
|
|
||||||
echo 'Skipped deploying secret "github-webhook-server". Set WEBHOOK_GITHUB_TOKEN to deploy.' 1>&2
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "${WEBHOOK_GITHUB_TOKEN}" ] && [ -z "${CREATE_SECRETS_USING_HELM}" ]; then
|
|
||||||
kubectl -n actions-runner-system delete secret \
|
|
||||||
actions-metrics-server || :
|
|
||||||
kubectl -n actions-runner-system create secret generic \
|
|
||||||
actions-metrics-server \
|
|
||||||
--from-literal=github_token=${WEBHOOK_GITHUB_TOKEN:?WEBHOOK_GITHUB_TOKEN must not be empty}
|
|
||||||
else
|
|
||||||
echo 'Skipped deploying secret "actions-metrics-server". Set WEBHOOK_GITHUB_TOKEN to deploy.' 1>&2
|
|
||||||
fi
|
|
||||||
|
|
||||||
tool=${ACCEPTANCE_TEST_DEPLOYMENT_TOOL}
|
|
||||||
|
|
||||||
TEST_ID=${TEST_ID:-default}
|
|
||||||
|
|
||||||
if [ "${tool}" == "helm" ]; then
|
|
||||||
set -v
|
|
||||||
|
|
||||||
CHART=${CHART:-charts/actions-runner-controller}
|
|
||||||
|
|
||||||
flags=()
|
|
||||||
if [ "${IMAGE_PULL_SECRET}" != "" ]; then
|
|
||||||
flags+=( --set imagePullSecrets[0].name=${IMAGE_PULL_SECRET})
|
|
||||||
flags+=( --set image.actionsRunnerImagePullSecrets[0].name=${IMAGE_PULL_SECRET})
|
|
||||||
flags+=( --set githubWebhookServer.imagePullSecrets[0].name=${IMAGE_PULL_SECRET})
|
|
||||||
flags+=( --set actionsMetricsServer.imagePullSecrets[0].name=${IMAGE_PULL_SECRET})
|
|
||||||
fi
|
|
||||||
if [ "${WATCH_NAMESPACE}" != "" ]; then
|
|
||||||
flags+=( --set watchNamespace=${WATCH_NAMESPACE} --set singleNamespace=true)
|
|
||||||
fi
|
|
||||||
if [ "${CHART_VERSION}" != "" ]; then
|
|
||||||
flags+=( --version ${CHART_VERSION})
|
|
||||||
fi
|
|
||||||
if [ "${LOG_FORMAT}" != "" ]; then
|
|
||||||
flags+=( --set logFormat=${LOG_FORMAT})
|
|
||||||
flags+=( --set githubWebhookServer.logFormat=${LOG_FORMAT})
|
|
||||||
flags+=( --set actionsMetricsServer.logFormat=${LOG_FORMAT})
|
|
||||||
fi
|
|
||||||
if [ "${ADMISSION_WEBHOOKS_TIMEOUT}" != "" ]; then
|
|
||||||
flags+=( --set admissionWebHooks.timeoutSeconds=${ADMISSION_WEBHOOKS_TIMEOUT})
|
|
||||||
fi
|
|
||||||
if [ -n "${CREATE_SECRETS_USING_HELM}" ]; then
|
|
||||||
if [ -z "${WEBHOOK_GITHUB_TOKEN}" ]; then
|
|
||||||
echo 'Failed deploying secret "actions-metrics-server" using helm. Set WEBHOOK_GITHUB_TOKEN to deploy.' 1>&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
flags+=( --set actionsMetricsServer.secret.create=true)
|
|
||||||
flags+=( --set actionsMetricsServer.secret.github_token=${WEBHOOK_GITHUB_TOKEN})
|
|
||||||
fi
|
|
||||||
if [ -n "${GITHUB_WEBHOOK_SERVER_ENV_NAME}" ] && [ -n "${GITHUB_WEBHOOK_SERVER_ENV_VALUE}" ]; then
|
|
||||||
flags+=( --set githubWebhookServer.env[0].name=${GITHUB_WEBHOOK_SERVER_ENV_NAME})
|
|
||||||
flags+=( --set githubWebhookServer.env[0].value=${GITHUB_WEBHOOK_SERVER_ENV_VALUE})
|
|
||||||
fi
|
|
||||||
|
|
||||||
set -vx
|
|
||||||
|
|
||||||
helm upgrade --install actions-runner-controller \
|
|
||||||
${CHART} \
|
|
||||||
-n actions-runner-system \
|
|
||||||
--create-namespace \
|
|
||||||
--set syncPeriod=${SYNC_PERIOD} \
|
|
||||||
--set authSecret.create=false \
|
|
||||||
--set image.repository=${NAME} \
|
|
||||||
--set image.tag=${VERSION} \
|
|
||||||
--set podAnnotations.test-id=${TEST_ID} \
|
|
||||||
--set githubWebhookServer.podAnnotations.test-id=${TEST_ID} \
|
|
||||||
--set actionsMetricsServer.podAnnotations.test-id=${TEST_ID} \
|
|
||||||
${flags[@]} --set image.imagePullPolicy=${IMAGE_PULL_POLICY} \
|
|
||||||
--set image.dindSidecarRepositoryAndTag=${DIND_SIDECAR_REPOSITORY_AND_TAG} \
|
|
||||||
-f ${VALUES_FILE}
|
|
||||||
set +v
|
|
||||||
# To prevent `CustomResourceDefinition.apiextensions.k8s.io "runners.actions.summerwind.dev" is invalid: metadata.annotations: Too long: must have at most 262144 bytes`
|
|
||||||
# errors
|
|
||||||
kubectl create -f charts/actions-runner-controller/crds || kubectl replace -f charts/actions-runner-controller/crds
|
|
||||||
# This wait fails due to timeout when it's already in crashloopback and this update doesn't change the image tag.
|
|
||||||
# That's why we add `|| :`. With that we prevent stopping the script in case of timeout and
|
|
||||||
# proceed to delete (possibly in crashloopback and/or running with outdated image) pods so that they are recreated by K8s.
|
|
||||||
kubectl -n actions-runner-system wait deploy/actions-runner-controller --for condition=available --timeout 60s || :
|
|
||||||
else
|
|
||||||
kubectl apply \
|
|
||||||
-n actions-runner-system \
|
|
||||||
-f release/actions-runner-controller.yaml
|
|
||||||
kubectl -n actions-runner-system wait deploy/controller-manager --for condition=available --timeout 120s || :
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Restart all ARC pods
|
|
||||||
kubectl -n actions-runner-system delete po -l app.kubernetes.io/name=actions-runner-controller
|
|
||||||
|
|
||||||
echo Waiting for all ARC pods to be up and running after restart
|
|
||||||
|
|
||||||
kubectl -n actions-runner-system wait deploy/actions-runner-controller --for condition=available --timeout 120s
|
|
||||||
|
|
||||||
# Adhocly wait for some time until actions-runner-controller's admission webhook gets ready
|
|
||||||
sleep 20
|
|
||||||
|
|
@ -1,64 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
OP=${OP:-apply}
|
|
||||||
|
|
||||||
RUNNER_LABEL=${RUNNER_LABEL:-self-hosted}
|
|
||||||
|
|
||||||
# See https://github.com/actions/actions-runner-controller/issues/2123
|
|
||||||
kubectl delete secret generic docker-config || :
|
|
||||||
kubectl create secret generic docker-config --from-file .dockerconfigjson=<(jq -M 'del(.aliases)' $HOME/.docker/config.json) --type=kubernetes.io/dockerconfigjson || :
|
|
||||||
|
|
||||||
cat acceptance/testdata/kubernetes_container_mode.envsubst.yaml | NAMESPACE=${RUNNER_NAMESPACE} envsubst | kubectl apply -f -
|
|
||||||
|
|
||||||
if [ -n "${TEST_REPO}" ]; then
|
|
||||||
if [ "${USE_RUNNERSET}" != "false" ]; then
|
|
||||||
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_ORG= RUNNER_MIN_REPLICAS=${REPO_RUNNER_MIN_REPLICAS} NAME=repo-runnerset envsubst | kubectl ${OP} -f -
|
|
||||||
else
|
|
||||||
echo "Running ${OP} runnerdeployment and hra. Set USE_RUNNERSET if you want to deploy runnerset instead."
|
|
||||||
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_ORG= RUNNER_MIN_REPLICAS=${REPO_RUNNER_MIN_REPLICAS} NAME=repo-runnerdeploy envsubst | kubectl ${OP} -f -
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "Skipped ${OP} for runnerdeployment and hra. Set TEST_REPO to "yourorg/yourrepo" to deploy."
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "${TEST_ORG}" ]; then
|
|
||||||
if [ "${USE_RUNNERSET}" != "false" ]; then
|
|
||||||
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} NAME=org-runnerset envsubst | kubectl ${OP} -f -
|
|
||||||
else
|
|
||||||
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} NAME=org-runnerdeploy envsubst | kubectl ${OP} -f -
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "${TEST_ORG_GROUP}" ]; then
|
|
||||||
if [ "${USE_RUNNERSET}" != "false" ]; then
|
|
||||||
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ORG_GROUP} NAME=orggroup-runnerset envsubst | kubectl ${OP} -f -
|
|
||||||
else
|
|
||||||
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ENTERPRISE= TEST_REPO= RUNNER_MIN_REPLICAS=${ORG_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ORG_GROUP} NAME=orggroup-runnerdeploy envsubst | kubectl ${OP} -f -
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "Skipped ${OP} on enterprise runnerdeployment. Set TEST_ORG_GROUP to ${OP}."
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "Skipped ${OP} on organizational runnerdeployment. Set TEST_ORG to ${OP}."
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "${TEST_ENTERPRISE}" ]; then
|
|
||||||
if [ "${USE_RUNNERSET}" != "false" ]; then
|
|
||||||
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} NAME=enterprise-runnerset envsubst | kubectl ${OP} -f -
|
|
||||||
else
|
|
||||||
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} NAME=enterprise-runnerdeploy envsubst | kubectl ${OP} -f -
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "${TEST_ENTERPRISE_GROUP}" ]; then
|
|
||||||
if [ "${USE_RUNNERSET}" != "false" ]; then
|
|
||||||
cat acceptance/testdata/runnerset.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ENTERPRISE_GROUP} NAME=enterprisegroup-runnerset envsubst | kubectl ${OP} -f -
|
|
||||||
else
|
|
||||||
cat acceptance/testdata/runnerdeploy.envsubst.yaml | TEST_ORG= TEST_REPO= RUNNER_MIN_REPLICAS=${ENTERPRISE_RUNNER_MIN_REPLICAS} TEST_GROUP=${TEST_ENTERPRISE_GROUP} NAME=enterprisegroup-runnerdeploy envsubst | kubectl ${OP} -f -
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "Skipped ${OP} on enterprise runnerdeployment. Set TEST_ENTERPRISE_GROUP to ${OP}."
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "Skipped ${OP} on enterprise runnerdeployment. Set TEST_ENTERPRISE to ${OP}."
|
|
||||||
fi
|
|
||||||
|
|
@ -1,10 +0,0 @@
|
||||||
apiVersion: kind.x-k8s.io/v1alpha4
|
|
||||||
kind: Cluster
|
|
||||||
nodes:
|
|
||||||
- role: control-plane
|
|
||||||
extraPortMappings:
|
|
||||||
- containerPort: 31000
|
|
||||||
hostPort: 31000
|
|
||||||
listenAddress: "0.0.0.0"
|
|
||||||
protocol: tcp
|
|
||||||
#- role: worker
|
|
||||||
|
|
@ -1,38 +0,0 @@
|
||||||
name: EKS Integration Tests
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
env:
|
|
||||||
IRSA_ROLE_ARN:
|
|
||||||
ASSUME_ROLE_ARN:
|
|
||||||
AWS_REGION:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
assume-role-in-runner-test:
|
|
||||||
runs-on: ["self-hosted", "Linux"]
|
|
||||||
steps:
|
|
||||||
- name: Test aws-actions/configure-aws-credentials Action
|
|
||||||
# https://github.com/aws-actions/configure-aws-credentials/releases/tag/v4.1.0
|
|
||||||
uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722
|
|
||||||
with:
|
|
||||||
aws-region: ${{ env.AWS_REGION }}
|
|
||||||
role-to-assume: ${{ env.ASSUME_ROLE_ARN }}
|
|
||||||
role-duration-seconds: 900
|
|
||||||
assume-role-in-container-test:
|
|
||||||
runs-on: ["self-hosted", "Linux"]
|
|
||||||
container:
|
|
||||||
image: amazon/aws-cli
|
|
||||||
env:
|
|
||||||
AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token
|
|
||||||
AWS_ROLE_ARN: ${{ env.IRSA_ROLE_ARN }}
|
|
||||||
volumes:
|
|
||||||
- /var/run/secrets/eks.amazonaws.com/serviceaccount/token:/var/run/secrets/eks.amazonaws.com/serviceaccount/token
|
|
||||||
steps:
|
|
||||||
- name: Test aws-actions/configure-aws-credentials Action in container
|
|
||||||
# https://github.com/aws-actions/configure-aws-credentials/releases/tag/v4.1.0
|
|
||||||
uses: aws-actions/configure-aws-credentials@ececac1a45f3b08a01d2dd070d28d111c5fe6722
|
|
||||||
with:
|
|
||||||
aws-region: ${{ env.AWS_REGION }}
|
|
||||||
role-to-assume: ${{ env.ASSUME_ROLE_ARN }}
|
|
||||||
role-duration-seconds: 900
|
|
||||||
|
|
@ -1,84 +0,0 @@
|
||||||
name: Runner Integration Tests
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
env:
|
|
||||||
ImageOS: ubuntu18 # Used by ruby/setup-ruby action | Update me for the runner OS version you are testing against
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run-step-in-container-test:
|
|
||||||
runs-on: ["self-hosted", "Linux"]
|
|
||||||
container:
|
|
||||||
image: alpine
|
|
||||||
steps:
|
|
||||||
- name: Test we are working in the container
|
|
||||||
run: |
|
|
||||||
if [[ $(sed -n '2p' < /etc/os-release | cut -d "=" -f2) != "alpine" ]]; then
|
|
||||||
echo "::error ::Failed OS detection test, could not match /etc/os-release with alpine. Are we really running in the container?"
|
|
||||||
echo "/etc/os-release below:"
|
|
||||||
cat /etc/os-release
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
setup-python-test:
|
|
||||||
runs-on: ["self-hosted", "Linux"]
|
|
||||||
steps:
|
|
||||||
- name: Print native Python environment
|
|
||||||
run: |
|
|
||||||
which python
|
|
||||||
python --version
|
|
||||||
- uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: 3.9
|
|
||||||
- name: Test actions/setup-python works
|
|
||||||
run: |
|
|
||||||
VERSION=$(python --version 2>&1 | cut -d ' ' -f2 | cut -d '.' -f1-2)
|
|
||||||
if [[ $VERSION != '3.9' ]]; then
|
|
||||||
echo "Python version detected : $(python --version 2>&1)"
|
|
||||||
echo "::error ::Detected python failed setup version test, could not match version with version specified in the setup action"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "Python version detected : $(python --version 2>&1)"
|
|
||||||
fi
|
|
||||||
setup-node-test:
|
|
||||||
runs-on: ["self-hosted", "Linux"]
|
|
||||||
steps:
|
|
||||||
- uses: actions/setup-node@v2
|
|
||||||
with:
|
|
||||||
node-version: "12"
|
|
||||||
- name: Test actions/setup-node works
|
|
||||||
run: |
|
|
||||||
VERSION=$(node --version | cut -c 2- | cut -d '.' -f1)
|
|
||||||
if [[ $VERSION != '12' ]]; then
|
|
||||||
echo "Node version detected : $(node --version 2>&1)"
|
|
||||||
echo "::error ::Detected node failed setup version test, could not match version with version specified in the setup action"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "Node version detected : $(node --version 2>&1)"
|
|
||||||
fi
|
|
||||||
setup-ruby-test:
|
|
||||||
runs-on: ["self-hosted", "Linux"]
|
|
||||||
steps:
|
|
||||||
# https://github.com/ruby/setup-ruby/releases/tag/v1.227.0
|
|
||||||
- uses: ruby/setup-ruby@1a615958ad9d422dd932dc1d5823942ee002799f
|
|
||||||
with:
|
|
||||||
ruby-version: 3.0
|
|
||||||
bundler-cache: true
|
|
||||||
- name: Test ruby/setup-ruby works
|
|
||||||
run: |
|
|
||||||
VERSION=$(ruby --version | cut -d ' ' -f2 | cut -d '.' -f1-2)
|
|
||||||
if [[ $VERSION != '3.0' ]]; then
|
|
||||||
echo "Ruby version detected : $(ruby --version 2>&1)"
|
|
||||||
echo "::error ::Detected ruby failed setup version test, could not match version with version specified in the setup action"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "Ruby version detected : $(ruby --version 2>&1)"
|
|
||||||
fi
|
|
||||||
python-shell-test:
|
|
||||||
runs-on: ["self-hosted", "Linux"]
|
|
||||||
steps:
|
|
||||||
- name: Test Python shell works
|
|
||||||
run: |
|
|
||||||
import os
|
|
||||||
print(os.environ['PATH'])
|
|
||||||
shell: python
|
|
||||||
|
|
@ -1,86 +0,0 @@
|
||||||
# USAGE:
|
|
||||||
# cat acceptance/testdata/kubernetes_container_mode.envsubst.yaml | NAMESPACE=default envsubst | kubectl apply -f -
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRole
|
|
||||||
metadata:
|
|
||||||
name: k8s-mode-runner
|
|
||||||
rules:
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["pods"]
|
|
||||||
verbs: ["get", "list", "create", "delete"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["pods/exec"]
|
|
||||||
verbs: ["get", "create"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["pods/log"]
|
|
||||||
verbs: ["get", "list", "watch",]
|
|
||||||
- apiGroups: ["batch"]
|
|
||||||
resources: ["jobs"]
|
|
||||||
verbs: ["get", "list", "create", "delete"]
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["secrets"]
|
|
||||||
verbs: ["get", "list", "create", "delete"]
|
|
||||||
# Needed to report test success by crating a cm from within workflow job step
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["configmaps"]
|
|
||||||
verbs: ["create", "delete"]
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRole
|
|
||||||
metadata:
|
|
||||||
name: runner-status-updater
|
|
||||||
rules:
|
|
||||||
- apiGroups: ["actions.summerwind.dev"]
|
|
||||||
resources: ["runners/status"]
|
|
||||||
verbs: ["get", "update", "patch"]
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
name: ${RUNNER_SERVICE_ACCOUNT_NAME}
|
|
||||||
namespace: ${NAMESPACE}
|
|
||||||
---
|
|
||||||
# To verify it's working, try:
|
|
||||||
# kubectl auth can-i --as system:serviceaccount:default:runner get pod
|
|
||||||
# If incomplete, workflows and jobs would fail with an error message like:
|
|
||||||
# Error: Error: The Service account needs the following permissions [{"group":"","verbs":["get","list","create","delete"],"resource":"pods","subresource":""},{"group":"","verbs":["get","create"],"resource":"pods","subresource":"exec"},{"group":"","verbs":["get","list","watch"],"resource":"pods","subresource":"log"},{"group":"batch","verbs":["get","list","create","delete"],"resource":"jobs","subresource":""},{"group":"","verbs":["create","delete","get","list"],"resource":"secrets","subresource":""}] on the pod resource in the 'default' namespace. Please contact your self hosted runner administrator.
|
|
||||||
# Error: Process completed with exit code 1.
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
# This role binding allows "jane" to read pods in the "default" namespace.
|
|
||||||
# You need to already have a Role named "pod-reader" in that namespace.
|
|
||||||
kind: RoleBinding
|
|
||||||
metadata:
|
|
||||||
name: runner-k8s-mode-runner
|
|
||||||
namespace: ${NAMESPACE}
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: ${RUNNER_SERVICE_ACCOUNT_NAME}
|
|
||||||
namespace: ${NAMESPACE}
|
|
||||||
roleRef:
|
|
||||||
kind: ClusterRole
|
|
||||||
name: k8s-mode-runner
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
---
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: RoleBinding
|
|
||||||
metadata:
|
|
||||||
name: runner-runner-stat-supdater
|
|
||||||
namespace: ${NAMESPACE}
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: ${RUNNER_SERVICE_ACCOUNT_NAME}
|
|
||||||
namespace: ${NAMESPACE}
|
|
||||||
roleRef:
|
|
||||||
kind: ClusterRole
|
|
||||||
name: runner-status-updater
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
---
|
|
||||||
apiVersion: storage.k8s.io/v1
|
|
||||||
kind: StorageClass
|
|
||||||
metadata:
|
|
||||||
name: org-runnerdeploy-runner-work-dir
|
|
||||||
labels:
|
|
||||||
content: org-runnerdeploy-runner-work-dir
|
|
||||||
provisioner: rancher.io/local-path
|
|
||||||
reclaimPolicy: Delete
|
|
||||||
volumeBindingMode: WaitForFirstConsumer
|
|
||||||
|
|
@ -1,166 +0,0 @@
|
||||||
apiVersion: storage.k8s.io/v1
|
|
||||||
kind: StorageClass
|
|
||||||
metadata:
|
|
||||||
name: ${NAME}-runner-work-dir
|
|
||||||
labels:
|
|
||||||
content: ${NAME}-runner-work-dir
|
|
||||||
provisioner: rancher.io/local-path
|
|
||||||
reclaimPolicy: Delete
|
|
||||||
volumeBindingMode: WaitForFirstConsumer
|
|
||||||
---
|
|
||||||
apiVersion: storage.k8s.io/v1
|
|
||||||
kind: StorageClass
|
|
||||||
metadata:
|
|
||||||
name: ${NAME}-rootless-dind-work-dir
|
|
||||||
labels:
|
|
||||||
content: ${NAME}-rootless-dind-work-dir
|
|
||||||
provisioner: rancher.io/local-path
|
|
||||||
reclaimPolicy: Delete
|
|
||||||
volumeBindingMode: WaitForFirstConsumer
|
|
||||||
---
|
|
||||||
apiVersion: actions.summerwind.dev/v1alpha1
|
|
||||||
kind: RunnerDeployment
|
|
||||||
metadata:
|
|
||||||
name: ${NAME}
|
|
||||||
spec:
|
|
||||||
# replicas: 1
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
enterprise: ${TEST_ENTERPRISE}
|
|
||||||
group: ${TEST_GROUP}
|
|
||||||
organization: ${TEST_ORG}
|
|
||||||
repository: ${TEST_REPO}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Custom runner image
|
|
||||||
#
|
|
||||||
image: ${RUNNER_NAME}:${RUNNER_TAG}
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
|
|
||||||
ephemeral: ${TEST_EPHEMERAL}
|
|
||||||
|
|
||||||
#
|
|
||||||
# dockerd within runner container
|
|
||||||
#
|
|
||||||
## Replace `mumoshu/actions-runner-dind:dev` with your dind image
|
|
||||||
#dockerdWithinRunnerContainer: true
|
|
||||||
#image: mumoshu/actions-runner-dind:dev
|
|
||||||
dockerdWithinRunnerContainer: ${RUNNER_DOCKERD_WITHIN_RUNNER_CONTAINER}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Set the MTU used by dockerd-managed network interfaces (including docker-build-ubuntu)
|
|
||||||
#
|
|
||||||
#dockerMTU: 1450
|
|
||||||
|
|
||||||
#Runner group
|
|
||||||
# labels:
|
|
||||||
# - "mylabel 1"
|
|
||||||
# - "mylabel 2"
|
|
||||||
labels:
|
|
||||||
- "${RUNNER_LABEL}"
|
|
||||||
|
|
||||||
serviceAccountName: ${RUNNER_SERVICE_ACCOUNT_NAME}
|
|
||||||
terminationGracePeriodSeconds: ${RUNNER_TERMINATION_GRACE_PERIOD_SECONDS}
|
|
||||||
|
|
||||||
env:
|
|
||||||
- name: RUNNER_GRACEFUL_STOP_TIMEOUT
|
|
||||||
value: "${RUNNER_GRACEFUL_STOP_TIMEOUT}"
|
|
||||||
- name: ROLLING_UPDATE_PHASE
|
|
||||||
value: "${ROLLING_UPDATE_PHASE}"
|
|
||||||
- name: ARC_DOCKER_MTU_PROPAGATION
|
|
||||||
value: "true"
|
|
||||||
# https://github.com/docker/docs/issues/8663
|
|
||||||
- name: DOCKER_DEFAULT_ADDRESS_POOL_BASE
|
|
||||||
value: "172.17.0.0/12"
|
|
||||||
- name: DOCKER_DEFAULT_ADDRESS_POOL_SIZE
|
|
||||||
value: "24"
|
|
||||||
- name: WAIT_FOR_DOCKER_SECONDS
|
|
||||||
value: "3"
|
|
||||||
|
|
||||||
dockerMTU: 1400
|
|
||||||
dockerEnv:
|
|
||||||
- name: RUNNER_GRACEFUL_STOP_TIMEOUT
|
|
||||||
value: "${RUNNER_GRACEFUL_STOP_TIMEOUT}"
|
|
||||||
|
|
||||||
# Fix the following no space left errors with rootless-dind runners that can happen while running buildx build:
|
|
||||||
# ------
|
|
||||||
# > [4/5] RUN go mod download:
|
|
||||||
# ------
|
|
||||||
# ERROR: failed to solve: failed to prepare yxsw8lv9hqnuafzlfta244l0z: mkdir /home/runner/.local/share/docker/vfs/dir/yxsw8lv9hqnuafzlfta244l0z/usr/local/go/src/cmd/compile/internal/types2/testdata: no space left on device
|
|
||||||
# Error: Process completed with exit code 1.
|
|
||||||
#
|
|
||||||
volumeMounts:
|
|
||||||
- name: rootless-dind-work-dir
|
|
||||||
# Omit the /share/docker part of the /home/runner/.local/share/docker as
|
|
||||||
# that part is created by dockerd.
|
|
||||||
mountPath: /home/runner/.local
|
|
||||||
readOnly: false
|
|
||||||
# See https://github.com/actions/actions-runner-controller/issues/2123
|
|
||||||
# Be sure to omit the "aliases" field from the config.json.
|
|
||||||
# Otherwise you may encounter nasty errors like:
|
|
||||||
# $ docker build
|
|
||||||
# docker: 'buildx' is not a docker command.
|
|
||||||
# See 'docker --help'
|
|
||||||
# due to the incompatibility between your host docker config.json and the runner environment.
|
|
||||||
# That is, your host dockcer config.json might contain this:
|
|
||||||
# "aliases": {
|
|
||||||
# "builder": "buildx"
|
|
||||||
# }
|
|
||||||
# And this results in the above error when the runner does not have buildx installed yet.
|
|
||||||
- name: docker-config
|
|
||||||
mountPath: /home/runner/.docker/config.json
|
|
||||||
subPath: config.json
|
|
||||||
readOnly: true
|
|
||||||
- name: docker-config-root
|
|
||||||
mountPath: /home/runner/.docker
|
|
||||||
volumes:
|
|
||||||
- name: rootless-dind-work-dir
|
|
||||||
ephemeral:
|
|
||||||
volumeClaimTemplate:
|
|
||||||
spec:
|
|
||||||
accessModes: [ "ReadWriteOnce" ]
|
|
||||||
storageClassName: "${NAME}-rootless-dind-work-dir"
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 3Gi
|
|
||||||
- name: docker-config
|
|
||||||
# Refer to .dockerconfigjson/.docker/config.json
|
|
||||||
secret:
|
|
||||||
secretName: docker-config
|
|
||||||
items:
|
|
||||||
- key: .dockerconfigjson
|
|
||||||
path: config.json
|
|
||||||
- name: docker-config-root
|
|
||||||
emptyDir: {}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Non-standard working directory
|
|
||||||
#
|
|
||||||
# workDir: "/"
|
|
||||||
|
|
||||||
# # Uncomment the below to enable the kubernetes container mode
|
|
||||||
# # See https://github.com/actions/actions-runner-controller#runner-with-k8s-jobs
|
|
||||||
containerMode: ${RUNNER_CONTAINER_MODE}
|
|
||||||
workVolumeClaimTemplate:
|
|
||||||
accessModes:
|
|
||||||
- ReadWriteOnce
|
|
||||||
storageClassName: "${NAME}-runner-work-dir"
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 10Gi
|
|
||||||
---
|
|
||||||
apiVersion: actions.summerwind.dev/v1alpha1
|
|
||||||
kind: HorizontalRunnerAutoscaler
|
|
||||||
metadata:
|
|
||||||
name: ${NAME}
|
|
||||||
spec:
|
|
||||||
scaleTargetRef:
|
|
||||||
name: ${NAME}
|
|
||||||
scaleUpTriggers:
|
|
||||||
- githubEvent:
|
|
||||||
workflowJob: {}
|
|
||||||
amount: 1
|
|
||||||
duration: "10m"
|
|
||||||
minReplicas: ${RUNNER_MIN_REPLICAS}
|
|
||||||
maxReplicas: 10
|
|
||||||
scaleDownDelaySecondsAfterScaleOut: ${RUNNER_SCALE_DOWN_DELAY_SECONDS_AFTER_SCALE_OUT}
|
|
||||||
|
|
@ -1,312 +0,0 @@
|
||||||
---
|
|
||||||
apiVersion: storage.k8s.io/v1
|
|
||||||
kind: StorageClass
|
|
||||||
metadata:
|
|
||||||
name: ${NAME}-runner-work-dir
|
|
||||||
labels:
|
|
||||||
content: ${NAME}-runner-work-dir
|
|
||||||
provisioner: rancher.io/local-path
|
|
||||||
reclaimPolicy: Delete
|
|
||||||
volumeBindingMode: WaitForFirstConsumer
|
|
||||||
---
|
|
||||||
apiVersion: storage.k8s.io/v1
|
|
||||||
kind: StorageClass
|
|
||||||
metadata:
|
|
||||||
name: ${NAME}
|
|
||||||
# In kind environments, the provider writes:
|
|
||||||
# /var/lib/docker/volumes/KIND_NODE_CONTAINER_VOL_ID/_data/local-path-provisioner/PV_NAME
|
|
||||||
# It can be hundreds of gigabytes depending on what you cache in the test workflow. Beware to not encounter `no space left on device` errors!
|
|
||||||
# If you did encounter no space errorrs try:
|
|
||||||
# docker system prune
|
|
||||||
# docker buildx prune #=> frees up /var/lib/docker/volumes/buildx_buildkit_container-builder0_state
|
|
||||||
# sudo rm -rf /var/lib/docker/volumes/KIND_NODE_CONTAINER_VOL_ID/_data/local-path-provisioner #=> frees up local-path-provisioner's data
|
|
||||||
provisioner: rancher.io/local-path
|
|
||||||
reclaimPolicy: Retain
|
|
||||||
volumeBindingMode: WaitForFirstConsumer
|
|
||||||
---
|
|
||||||
apiVersion: storage.k8s.io/v1
|
|
||||||
kind: StorageClass
|
|
||||||
metadata:
|
|
||||||
name: ${NAME}-var-lib-docker
|
|
||||||
labels:
|
|
||||||
content: ${NAME}-var-lib-docker
|
|
||||||
provisioner: rancher.io/local-path
|
|
||||||
reclaimPolicy: Retain
|
|
||||||
volumeBindingMode: WaitForFirstConsumer
|
|
||||||
---
|
|
||||||
apiVersion: storage.k8s.io/v1
|
|
||||||
kind: StorageClass
|
|
||||||
metadata:
|
|
||||||
name: ${NAME}-cache
|
|
||||||
labels:
|
|
||||||
content: ${NAME}-cache
|
|
||||||
provisioner: rancher.io/local-path
|
|
||||||
reclaimPolicy: Retain
|
|
||||||
volumeBindingMode: WaitForFirstConsumer
|
|
||||||
---
|
|
||||||
apiVersion: storage.k8s.io/v1
|
|
||||||
kind: StorageClass
|
|
||||||
metadata:
|
|
||||||
name: ${NAME}-runner-tool-cache
|
|
||||||
labels:
|
|
||||||
content: ${NAME}-runner-tool-cache
|
|
||||||
provisioner: rancher.io/local-path
|
|
||||||
reclaimPolicy: Retain
|
|
||||||
volumeBindingMode: WaitForFirstConsumer
|
|
||||||
---
|
|
||||||
apiVersion: storage.k8s.io/v1
|
|
||||||
kind: StorageClass
|
|
||||||
metadata:
|
|
||||||
name: ${NAME}-rootless-dind-work-dir
|
|
||||||
labels:
|
|
||||||
content: ${NAME}-rootless-dind-work-dir
|
|
||||||
provisioner: rancher.io/local-path
|
|
||||||
reclaimPolicy: Delete
|
|
||||||
volumeBindingMode: WaitForFirstConsumer
|
|
||||||
---
|
|
||||||
apiVersion: actions.summerwind.dev/v1alpha1
|
|
||||||
kind: RunnerSet
|
|
||||||
metadata:
|
|
||||||
name: ${NAME}
|
|
||||||
spec:
|
|
||||||
# MANDATORY because it is based on StatefulSet: Results in a below error when omitted:
|
|
||||||
# missing required field "selector" in dev.summerwind.actions.v1alpha1.RunnerSet.spec
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: ${NAME}
|
|
||||||
|
|
||||||
# MANDATORY because it is based on StatefulSet: Results in a below error when omitted:
|
|
||||||
# missing required field "serviceName" in dev.summerwind.actions.v1alpha1.RunnerSet.spec]
|
|
||||||
serviceName: ${NAME}
|
|
||||||
|
|
||||||
#replicas: 1
|
|
||||||
|
|
||||||
# From my limited testing, `ephemeral: true` is more reliable.
|
|
||||||
# Seomtimes, updating already deployed runners from `ephemeral: false` to `ephemeral: true` seems to
|
|
||||||
# result in queued jobs hanging forever.
|
|
||||||
ephemeral: ${TEST_EPHEMERAL}
|
|
||||||
|
|
||||||
enterprise: ${TEST_ENTERPRISE}
|
|
||||||
group: ${TEST_GROUP}
|
|
||||||
organization: ${TEST_ORG}
|
|
||||||
repository: ${TEST_REPO}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Custom runner image
|
|
||||||
#
|
|
||||||
image: ${RUNNER_NAME}:${RUNNER_TAG}
|
|
||||||
|
|
||||||
#
|
|
||||||
# dockerd within runner container
|
|
||||||
#
|
|
||||||
## Replace `mumoshu/actions-runner-dind:dev` with your dind image
|
|
||||||
#dockerdWithinRunnerContainer: true
|
|
||||||
dockerdWithinRunnerContainer: ${RUNNER_DOCKERD_WITHIN_RUNNER_CONTAINER}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Set the MTU used by dockerd-managed network interfaces (including docker-build-ubuntu)
|
|
||||||
#
|
|
||||||
#dockerMTU: 1450
|
|
||||||
#Runner group
|
|
||||||
# labels:
|
|
||||||
# - "mylabel 1"
|
|
||||||
# - "mylabel 2"
|
|
||||||
labels:
|
|
||||||
- "${RUNNER_LABEL}"
|
|
||||||
#
|
|
||||||
# Non-standard working directory
|
|
||||||
#
|
|
||||||
# workDir: "/"
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: ${NAME}
|
|
||||||
spec:
|
|
||||||
serviceAccountName: ${RUNNER_SERVICE_ACCOUNT_NAME}
|
|
||||||
terminationGracePeriodSeconds: ${RUNNER_TERMINATION_GRACE_PERIOD_SECONDS}
|
|
||||||
containers:
|
|
||||||
# # Uncomment only when non-dind-runner / you're using docker sidecar
|
|
||||||
# - name: docker
|
|
||||||
# # Image is required for the dind sidecar definition within RunnerSet spec
|
|
||||||
# image: "docker:dind"
|
|
||||||
# env:
|
|
||||||
# - name: RUNNER_GRACEFUL_STOP_TIMEOUT
|
|
||||||
# value: "${RUNNER_GRACEFUL_STOP_TIMEOUT}"
|
|
||||||
- name: runner
|
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
env:
|
|
||||||
- name: RUNNER_GRACEFUL_STOP_TIMEOUT
|
|
||||||
value: "${RUNNER_GRACEFUL_STOP_TIMEOUT}"
|
|
||||||
- name: RUNNER_FEATURE_FLAG_EPHEMERAL
|
|
||||||
value: "${RUNNER_FEATURE_FLAG_EPHEMERAL}"
|
|
||||||
- name: GOMODCACHE
|
|
||||||
value: "/home/runner/.cache/go-mod"
|
|
||||||
- name: ROLLING_UPDATE_PHASE
|
|
||||||
value: "${ROLLING_UPDATE_PHASE}"
|
|
||||||
# PV-backed runner work dir
|
|
||||||
volumeMounts:
|
|
||||||
# Comment out the ephemeral work volume if you're going to test the kubernetes container mode
|
|
||||||
# The volume and mount with the same names will be created by workVolumeClaimTemplate and the kubernetes container mode support.
|
|
||||||
# - name: work
|
|
||||||
# mountPath: /runner/_work
|
|
||||||
# Cache docker image layers, in case dockerdWithinRunnerContainer=true
|
|
||||||
- name: var-lib-docker
|
|
||||||
mountPath: /var/lib/docker
|
|
||||||
# Cache go modules and builds
|
|
||||||
# - name: gocache
|
|
||||||
# # Run `goenv | grep GOCACHE` to verify the path is correct for your env
|
|
||||||
# mountPath: /home/runner/.cache/go-build
|
|
||||||
# - name: gomodcache
|
|
||||||
# # Run `goenv | grep GOMODCACHE` to verify the path is correct for your env
|
|
||||||
# # mountPath: /home/runner/go/pkg/mod
|
|
||||||
- name: cache
|
|
||||||
# go: could not create module cache: stat /home/runner/.cache/go-mod: permission denied
|
|
||||||
mountPath: "/home/runner/.cache"
|
|
||||||
- name: runner-tool-cache
|
|
||||||
# This corresponds to our runner image's default setting of RUNNER_TOOL_CACHE=/opt/hostedtoolcache.
|
|
||||||
#
|
|
||||||
# In case you customize the envvar in both runner and docker containers of the runner pod spec,
|
|
||||||
# You'd need to change this mountPath accordingly.
|
|
||||||
#
|
|
||||||
# The tool cache directory is defined in actions/toolkit's tool-cache module:
|
|
||||||
# https://github.com/actions/toolkit/blob/2f164000dcd42fb08287824a3bc3030dbed33687/packages/tool-cache/src/tool-cache.ts#L621-L638
|
|
||||||
#
|
|
||||||
# Many setup-* actions like setup-go utilizes the tool-cache module to download and cache installed binaries:
|
|
||||||
# https://github.com/actions/setup-go/blob/56a61c9834b4a4950dbbf4740af0b8a98c73b768/src/installer.ts#L144
|
|
||||||
mountPath: "/opt/hostedtoolcache"
|
|
||||||
# Valid only when dockerdWithinRunnerContainer=false
|
|
||||||
# - name: docker
|
|
||||||
# # PV-backed runner work dir
|
|
||||||
# volumeMounts:
|
|
||||||
# - name: work
|
|
||||||
# mountPath: /runner/_work
|
|
||||||
# # Cache docker image layers, in case dockerdWithinRunnerContainer=false
|
|
||||||
# - name: var-lib-docker
|
|
||||||
# mountPath: /var/lib/docker
|
|
||||||
# # image: mumoshu/actions-runner-dind:dev
|
|
||||||
|
|
||||||
# # For buildx cache
|
|
||||||
# - name: cache
|
|
||||||
# mountPath: "/home/runner/.cache"
|
|
||||||
|
|
||||||
# For fixing no space left error on rootless dind runner
|
|
||||||
- name: rootless-dind-work-dir
|
|
||||||
# Omit the /share/docker part of the /home/runner/.local/share/docker as
|
|
||||||
# that part is created by dockerd.
|
|
||||||
mountPath: /home/runner/.local
|
|
||||||
readOnly: false
|
|
||||||
|
|
||||||
# Comment out the ephemeral work volume if you're going to test the kubernetes container mode
|
|
||||||
# volumes:
|
|
||||||
# - name: work
|
|
||||||
# ephemeral:
|
|
||||||
# volumeClaimTemplate:
|
|
||||||
# spec:
|
|
||||||
# accessModes:
|
|
||||||
# - ReadWriteOnce
|
|
||||||
# storageClassName: "${NAME}-runner-work-dir"
|
|
||||||
# resources:
|
|
||||||
# requests:
|
|
||||||
# storage: 10Gi
|
|
||||||
|
|
||||||
# Fix the following no space left errors with rootless-dind runners that can happen while running buildx build:
|
|
||||||
# ------
|
|
||||||
# > [4/5] RUN go mod download:
|
|
||||||
# ------
|
|
||||||
# ERROR: failed to solve: failed to prepare yxsw8lv9hqnuafzlfta244l0z: mkdir /home/runner/.local/share/docker/vfs/dir/yxsw8lv9hqnuafzlfta244l0z/usr/local/go/src/cmd/compile/internal/types2/testdata: no space left on device
|
|
||||||
# Error: Process completed with exit code 1.
|
|
||||||
#
|
|
||||||
volumes:
|
|
||||||
- name: rootless-dind-work-dir
|
|
||||||
ephemeral:
|
|
||||||
volumeClaimTemplate:
|
|
||||||
spec:
|
|
||||||
accessModes: [ "ReadWriteOnce" ]
|
|
||||||
storageClassName: "${NAME}-rootless-dind-work-dir"
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 3Gi
|
|
||||||
volumeClaimTemplates:
|
|
||||||
- metadata:
|
|
||||||
name: vol1
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- ReadWriteOnce
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 10Mi
|
|
||||||
storageClassName: ${NAME}
|
|
||||||
## Dunno which provider supports auto-provisioning with selector.
|
|
||||||
## At least the rancher local path provider stopped with:
|
|
||||||
## waiting for a volume to be created, either by external provisioner "rancher.io/local-path" or manually created by system administrator
|
|
||||||
# selector:
|
|
||||||
# matchLabels:
|
|
||||||
# runnerset-volume-id: ${NAME}-vol1
|
|
||||||
- metadata:
|
|
||||||
name: vol2
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- ReadWriteOnce
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 10Mi
|
|
||||||
storageClassName: ${NAME}
|
|
||||||
# selector:
|
|
||||||
# matchLabels:
|
|
||||||
# runnerset-volume-id: ${NAME}-vol2
|
|
||||||
- metadata:
|
|
||||||
name: var-lib-docker
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- ReadWriteOnce
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 10Mi
|
|
||||||
storageClassName: ${NAME}-var-lib-docker
|
|
||||||
- metadata:
|
|
||||||
name: cache
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- ReadWriteOnce
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 10Mi
|
|
||||||
storageClassName: ${NAME}-cache
|
|
||||||
- metadata:
|
|
||||||
name: runner-tool-cache
|
|
||||||
# It turns out labels doesn't distinguish PVs across PVCs and the
|
|
||||||
# end result is PVs are reused by wrong PVCs.
|
|
||||||
# The correct way seems to be to differentiate storage class per pvc template.
|
|
||||||
# labels:
|
|
||||||
# id: runner-tool-cache
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- ReadWriteOnce
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 10Mi
|
|
||||||
storageClassName: ${NAME}-runner-tool-cache
|
|
||||||
---
|
|
||||||
apiVersion: actions.summerwind.dev/v1alpha1
|
|
||||||
kind: HorizontalRunnerAutoscaler
|
|
||||||
metadata:
|
|
||||||
name: ${NAME}
|
|
||||||
spec:
|
|
||||||
scaleTargetRef:
|
|
||||||
kind: RunnerSet
|
|
||||||
name: ${NAME}
|
|
||||||
scaleUpTriggers:
|
|
||||||
- githubEvent:
|
|
||||||
workflowJob: {}
|
|
||||||
amount: 1
|
|
||||||
duration: "10m"
|
|
||||||
minReplicas: ${RUNNER_MIN_REPLICAS}
|
|
||||||
maxReplicas: 10
|
|
||||||
scaleDownDelaySecondsAfterScaleOut: ${RUNNER_SCALE_DOWN_DELAY_SECONDS_AFTER_SCALE_OUT}
|
|
||||||
# Comment out the whole metrics if you'd like to solely test webhook-based scaling
|
|
||||||
metrics:
|
|
||||||
- type: PercentageRunnersBusy
|
|
||||||
scaleUpThreshold: '0.75'
|
|
||||||
scaleDownThreshold: '0.25'
|
|
||||||
scaleUpFactor: '2'
|
|
||||||
scaleDownFactor: '0.5'
|
|
||||||
|
|
@ -1,55 +0,0 @@
|
||||||
# Set actions-runner-controller settings for testing
|
|
||||||
logLevel: "-4"
|
|
||||||
imagePullSecrets: []
|
|
||||||
image:
|
|
||||||
# This needs to be an empty array rather than a single-item array with empty name.
|
|
||||||
# Otherwise you end up with the following error on helm-upgrade:
|
|
||||||
# Error: UPGRADE FAILED: failed to create patch: map: map[] does not contain declared merge key: name && failed to create patch: map: map[] does not contain declared merge key: name
|
|
||||||
actionsRunnerImagePullSecrets: []
|
|
||||||
runner:
|
|
||||||
statusUpdateHook:
|
|
||||||
enabled: true
|
|
||||||
rbac:
|
|
||||||
allowGrantingKubernetesContainerModePermissions: true
|
|
||||||
githubWebhookServer:
|
|
||||||
imagePullSecrets: []
|
|
||||||
logLevel: "-4"
|
|
||||||
enabled: true
|
|
||||||
labels: {}
|
|
||||||
replicaCount: 1
|
|
||||||
syncPeriod: 10m
|
|
||||||
useRunnerGroupsVisibility: true
|
|
||||||
secret:
|
|
||||||
enabled: true
|
|
||||||
# create: true
|
|
||||||
name: "github-webhook-server"
|
|
||||||
### GitHub Webhook Configuration
|
|
||||||
#github_webhook_secret_token: ""
|
|
||||||
service:
|
|
||||||
type: NodePort
|
|
||||||
ports:
|
|
||||||
- port: 80
|
|
||||||
targetPort: http
|
|
||||||
protocol: TCP
|
|
||||||
name: http
|
|
||||||
nodePort: 31000
|
|
||||||
actionsMetricsServer:
|
|
||||||
imagePullSecrets: []
|
|
||||||
logLevel: "-4"
|
|
||||||
enabled: true
|
|
||||||
labels: {}
|
|
||||||
replicaCount: 1
|
|
||||||
secret:
|
|
||||||
enabled: true
|
|
||||||
# create: true
|
|
||||||
name: "actions-metrics-server"
|
|
||||||
### GitHub Webhook Configuration
|
|
||||||
#github_webhook_secret_token: ""
|
|
||||||
service:
|
|
||||||
type: NodePort
|
|
||||||
ports:
|
|
||||||
- port: 80
|
|
||||||
targetPort: http
|
|
||||||
protocol: TCP
|
|
||||||
name: http
|
|
||||||
nodePort: 31001
|
|
||||||
Loading…
Reference in New Issue