-
Notifications
You must be signed in to change notification settings - Fork 1
/
arc-runners-set-values.yaml
224 lines (211 loc) · 9.32 KB
/
arc-runners-set-values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
## githubConfigUrl is the GitHub url for where you want to configure runners
## ex: https://github.com/myorg/myrepo or https://github.com/myorg
#githubConfigUrl: ""
## githubConfigSecret is the k8s secrets to use when auth with GitHub API.
## You can choose to use GitHub App or a PAT token
#githubConfigSecret:
### GitHub Apps Configuration
## NOTE: IDs MUST be strings, use quotes
#github_app_id: ""
#github_app_installation_id: ""
#github_app_private_key: |
### GitHub PAT Configuration
# github_token: ""
## If you have a pre-define Kubernetes secret in the same namespace the gha-runner-scale-set is going to deploy,
## you can also reference it via `githubConfigSecret: pre-defined-secret`.
## You need to make sure your predefined secret has all the required secret data set properly.
## For a pre-defined secret using GitHub PAT, the secret needs to be created like this:
## > kubectl create secret generic pre-defined-secret --namespace=my_namespace --from-literal=github_token='ghp_your_pat'
## For a pre-defined secret using GitHub App, the secret needs to be created like this:
## > kubectl create secret generic pre-defined-secret --namespace=my_namespace --from-literal=github_app_id=123456 --from-literal=github_app_installation_id=654321 --from-literal=github_app_private_key='-----BEGIN CERTIFICATE-----*******'
# githubConfigSecret: pre-defined-secret
## proxy can be used to define proxy settings that will be used by the
## controller, the listener and the runner of this scale set.
#
# proxy:
# http:
# url: http://proxy.com:1234
# credentialSecretRef: proxy-auth # a secret with `username` and `password` keys
# https:
# url: http://proxy.com:1234
# credentialSecretRef: proxy-auth # a secret with `username` and `password` keys
# noProxy:
# - example.com
# - example.org
## maxRunners is the max number of runners the autoscaling runner set will scale up to.
# maxRunners: 5
## minRunners is the min number of idle runners. The target number of runners created will be
## calculated as a sum of minRunners and the number of jobs assigned to the scale set.
# minRunners: 0
# runnerGroup: "default"
## name of the runner scale set to create. Defaults to the helm release name
# runnerScaleSetName: ""
## A self-signed CA certificate for communication with the GitHub server can be
## provided using a config map key selector. If `runnerMountPath` is set, for
## each runner pod ARC will:
## - create a `github-server-tls-cert` volume containing the certificate
## specified in `certificateFrom`
## - mount that volume on path `runnerMountPath`/{certificate name}
## - set NODE_EXTRA_CA_CERTS environment variable to that same path
## - set RUNNER_UPDATE_CA_CERTS environment variable to "1" (as of version
## 2.303.0 this will instruct the runner to reload certificates on the host)
##
## If any of the above had already been set by the user in the runner pod
## template, ARC will observe those and not overwrite them.
## Example configuration:
#
# githubServerTLS:
# certificateFrom:
# configMapKeyRef:
# name: config-map-name
# key: ca.crt
# runnerMountPath: /usr/local/share/ca-certificates/
## Container mode is an object that provides out-of-box configuration
## for dind and kubernetes mode. Template will be modified as documented under the
## template object.
##
## If any customization is required for dind or kubernetes mode, containerMode should remain
## empty, and configuration should be applied to the template.
containerMode:
type: "kubernetes" ## type can be set to dind or kubernetes
# ## the following is required when containerMode.type=kubernetes
kubernetesModeWorkVolumeClaim:
accessModes: ["ReadWriteMany"]
# For local testing, use https://github.com/openebs/dynamic-localpv-provisioner/blob/develop/docs/quickstart.md to provide dynamic provision volume with storageClassName: openebs-hostpath
storageClassName: "github-azurefile-premium" # or "github-azurefile" for Standard_LRS
resources:
requests:
storage: 100Gi # 100Gi minimum to premium or any size when using Standard_LRS "github-azurefile" storage class
# kubernetesModeServiceAccount:
# annotations: ""
## listenerTemplate is the PodSpec for each listener Pod
## For reference: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec
# listenerTemplate:
# spec:
# containers:
# # Use this section to append additional configuration to the listener container.
# # If you change the name of the container, the configuration will not be applied to the listener,
# # and it will be treated as a side-car container.
# - name: listener
# securityContext:
# runAsUser: 1000
# # Use this section to add the configuration of a side-car container.
# # Comment it out or remove it if you don't need it.
# # Spec for this container will be applied as is without any modifications.
# - name: side-car
# image: example-sidecar
## template is the PodSpec for each runner Pod
## For reference: https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec
template:
## template.spec will be modified if you change the container mode
## with containerMode.type=dind, we will populate the template.spec with following pod spec
## template:
## spec:
## initContainers:
## - name: init-dind-externals
## image: ghcr.io/actions/actions-runner:latest
## command: ["cp", "-r", "-v", "/home/runner/externals/.", "/home/runner/tmpDir/"]
## volumeMounts:
## - name: dind-externals
## mountPath: /home/runner/tmpDir
## containers:
## - name: runner
## image: ghcr.io/actions/actions-runner:latest
## command: ["/home/runner/run.sh"]
## env:
## - name: DOCKER_HOST
## value: unix:///var/run/docker.sock
## volumeMounts:
## - name: work
## mountPath: /home/runner/_work
## - name: dind-sock
## mountPath: /var/run
## - name: dind
## image: docker:dind
## args:
## - dockerd
## - --host=unix:///var/run/docker.sock
## - --group=$(DOCKER_GROUP_GID)
## env:
## - name: DOCKER_GROUP_GID
## value: "123"
## securityContext:
## privileged: true
## volumeMounts:
## - name: work
## mountPath: /home/runner/_work
## - name: dind-sock
## mountPath: /var/run
## - name: dind-externals
## mountPath: /home/runner/externals
## volumes:
## - name: work
## emptyDir: {}
## - name: dind-sock
## emptyDir: {}
## - name: dind-externals
## emptyDir: {}
######################################################################################################
## with containerMode.type=kubernetes, we will populate the template.spec with following pod spec
## template:
## spec:
## containers:
## - name: runner
## image: ghcr.io/actions/actions-runner:latest
## command: ["/home/runner/run.sh"]
## env:
## - name: ACTIONS_RUNNER_CONTAINER_HOOKS
## value: /home/runner/k8s/index.js
## - name: ACTIONS_RUNNER_POD_NAME
## valueFrom:
## fieldRef:
## fieldPath: metadata.name
## - name: ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER
## value: "true"
## volumeMounts:
## - name: work
## mountPath: /home/runner/_work
## volumes:
## - name: work
## ephemeral:
## volumeClaimTemplate:
## spec:
## accessModes: [ "ReadWriteOnce" ]
## storageClassName: "local-path"
## resources:
## requests:
## storage: 1Gi
spec:
securityContext:
fsGroup: 123 # Group used by GitHub default agent image
containers:
- name: runner
image: ghcr.io/actions/actions-runner:latest
command: ["/home/runner/run.sh"]
env:
- name: ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER
value: "false"
- name: ACTIONS_RUNNER_CONTAINER_HOOK_TEMPLATE
value: "/home/runner/container-config/container-podspec.yaml"
volumeMounts:
- name: "container-podspec-volume"
mountPath: "/home/runner/container-config"
readOnly: true
- name: azurefile
mountPath: "/home/runner/.nuget/"
volumes:
- name: "container-podspec-volume"
configMap:
name: hook-extension
- name: azurefile
persistentVolumeClaim:
claimName: azurefile
## Optional controller service account that needs to have required Role and RoleBinding
## to operate this gha-runner-scale-set installation.
## The helm chart will try to find the controller deployment and its service account at installation time.
## In case the helm chart can't find the right service account, you can explicitly pass in the following value
## to help it finish RoleBinding with the right service account.
## Note: if your controller is installed to only watch a single namespace, you have to pass these values explicitly.
#controllerServiceAccount:
# namespace: arc-systems
# name: "arc-runner-set-gha-sa"