1 | #!/bin/bash |
---|
2 | # |
---|
3 | # No changes on next lines are needed |
---|
4 | # |
---|
5 | # Between |
---|
6 | # |
---|
7 | # Begin ########################################### |
---|
8 | # and |
---|
9 | # End ########################################### |
---|
10 | # |
---|
11 | # |
---|
12 | OPTION=`echo ${1:-'--buildimages=no'}| sed -e 's/./\l&/g'` |
---|
13 | case ${OPTION} in |
---|
14 | #( |
---|
15 | --buildimages=no | buildimages=no |\ |
---|
16 | --buildimages=yes | buildimages=yes ) |
---|
17 | TOBUILD=`IFS=\= ; set -- ${OPTION} ; echo ${2}` ;export TOBUILD |
---|
18 | ;; |
---|
19 | #( |
---|
20 | * ) |
---|
21 | echo Usage: `basename ${0}` with building images --buildimages=yes |
---|
22 | echo ' without omit option or use --buildimages=no' |
---|
23 | exit 1 |
---|
24 | ;; |
---|
25 | esac |
---|
26 | # |
---|
27 | BASE_WORKING_DIR="${BASE_WORKING_DIR:-`dirname \`pwd\``}"/to_container ; export BASE_WORKING_DIR |
---|
28 | # |
---|
29 | # |
---|
30 | # Louis Mulder September 2020 |
---|
31 | # |
---|
32 | # Deploy a Kubernetes base VDI environment on cluster |
---|
33 | # Below there are some variables to fill-in or to change |
---|
34 | # depending on your situation. |
---|
35 | # |
---|
36 | # Script is provided as it is. |
---|
37 | # |
---|
38 | # Check if you are running as root on the master |
---|
39 | # |
---|
40 | # Xpra is released under the terms of the GNU GPL v2, or, at your option, any |
---|
41 | # later version. See the file COPYING for details. |
---|
42 | # |
---|
43 | MASTER_IP_ADDR="`exec 2> /dev/null; kubectl cluster-info|grep -i master| sed -e 's%^.*//%%' -e 's%:.*$%%'`" |
---|
44 | # |
---|
45 | if ! ip a | grep "${MASTER_IP_ADDR}" 1> /dev/null 2>&1 || \ |
---|
46 | [ `id | sed -e 's/^.*=//' -e 's/(.*$//'` != 0 ] |
---|
47 | then |
---|
48 | MASTER_NAME=`set -- \`getent hosts ${MASTER_IP_ADDR}\` ; echo ${2}` |
---|
49 | echo "You must run `basename ${0}` as root on server ${MASTER_NAME:-'???'} with ip ${MASTER_IP_ADDR}" |
---|
50 | exit 1 |
---|
51 | fi |
---|
52 | DISTNAME='vdi-dist' ; export DISTNAME |
---|
53 | OLDIFS="${IFS}" ; export OLDIFS |
---|
54 | PROG=${0} |
---|
55 | BPROG=`basename ${PROG}` |
---|
56 | DPROG=`dirname ${PROG}` |
---|
57 | ABS_PATH=`(cd ${DPROG}; pwd)` |
---|
58 | export PROG |
---|
59 | case ${ABS_PATH} in |
---|
60 | #( |
---|
61 | /*/${DISTNAME}/* ) |
---|
62 | ( echo "You must first copy the content of `dirname ${ABS_PATH}`" |
---|
63 | echo "to new directory ending in directory-name which will be used as a" |
---|
64 | echo "new NAMESPACE name. Then go to this directory/deploy and" |
---|
65 | echo "adjust xpra-proxy.sh, and run it with ./xpra-proxy.sh from this position" |
---|
66 | ) 1>&2 |
---|
67 | exit 1 |
---|
68 | ;; |
---|
69 | esac |
---|
70 | # |
---|
71 | # |
---|
72 | # |
---|
73 | # Xpra-proxy server will be exposed as service |
---|
74 | # And will accessable by external ip-addresses |
---|
75 | # |
---|
76 | # Be sure that this address is configured as a VIP on a worker/master |
---|
77 | # For HA use for example keepalived. |
---|
78 | # |
---|
79 | # |
---|
80 | EXTERNALIPS='10.7.6.237' # Fill in the addresses separated by a space |
---|
81 | # |
---|
82 | # Ip address(es) where the service xpra-proxy can be accessed |
---|
83 | # from outside the cluster. |
---|
84 | # Portnumber on the outside is ${PROXY_PORT} |
---|
85 | # Proxy instance will be exposed internally with 8${PROXY_PORT} |
---|
86 | # |
---|
87 | PROXY_PORT=443 ;export PROXY_PORT |
---|
88 | # |
---|
89 | # Variables specified above the line OLDENV="`set | sed -e '.... |
---|
90 | # will NOT included in the ../etc/xpra-vars.sh profile !! |
---|
91 | # If you want to add variables which must be used in the scripts etc. |
---|
92 | # in the session-containers or proxy-container(s) specify them after |
---|
93 | # the line # End ########################################### |
---|
94 | # |
---|
95 | # Don't remove next 3 lines !!!! |
---|
96 | OLDENV="`set | sed -e 's/=.*$/=/' -e '/^'"'"'/d'\ |
---|
97 | -e 's%^%/^%' -e 's%$%/d%'`" # Don't remove this lines !!!! |
---|
98 | export OLDENV |
---|
99 | # |
---|
100 | # End ########################################### |
---|
101 | # |
---|
102 | # BEGINVARS Don't remove this line !!!! |
---|
103 | # Inventory where the user sessions may run. |
---|
104 | # |
---|
105 | # If XPRA_WORKERS is empty or has the value all (lower or uppercase) |
---|
106 | # No labeling will take place and session(s) may run everywhere. |
---|
107 | # if it contains a list of servernames these servers will be labeled |
---|
108 | # with xpra-worker=${NAMESPACE} |
---|
109 | # |
---|
110 | XPRA_WORKERS='' |
---|
111 | # |
---|
112 | export XPRA_WORKERS |
---|
113 | # |
---|
114 | XPRA_LOCAL_TIME=/usr/share/zoneinfo/Europe/Amsterdam |
---|
115 | # |
---|
116 | # localtime zone setting |
---|
117 | # Must be an absolute path to the timezone |
---|
118 | # |
---|
119 | # If empty default zone = /usr/share/zoneinfo/Europe/Amsterdam |
---|
120 | # |
---|
121 | export XPRA_LOCAL_TIME |
---|
122 | # |
---|
123 | # Default namespace will be derived off the current directory, remove 'deploy' and |
---|
124 | # take the basename of the result. |
---|
125 | # |
---|
126 | # Proxy Ingress server(s) will run in the namespace ingress-${NAMESPACE} |
---|
127 | # |
---|
128 | NAMESPACE="${NAMESPACE:-`basename \`dirname \\\`pwd\\\`\``}" ; export NAMESPACE |
---|
129 | # |
---|
130 | # |
---|
131 | XPRA_DEPLOYNAME='xpra-proxy'; export XPRA_DEPLOYNAME |
---|
132 | # |
---|
133 | # TOPDIRS and NFS server configuration |
---|
134 | # |
---|
135 | # Shared via a mount on the underlaying server (worker) |
---|
136 | # |
---|
137 | # BASE_WORKING_DIR == Full path of shared storage on the servers |
---|
138 | # |
---|
139 | # XPRA_TOPDIR_INT == mountpath in the pod/container of the distribution |
---|
140 | # |
---|
141 | SRV_NFS_SERVER='kubemaster01.vdi.xpra.demo' ; export SRV_NFS_SERVER |
---|
142 | SRV_NFS_SERVER_INT="${XPRA_SRV_NFS_SERVER_INT:-/srv}" ; export SRV_NFS_SERVER_INT |
---|
143 | SRV_NFS_SERVER_EXT="/export/data/srv/${NAMESPACE}/to_container" ; export SRV_NFS_SERVER_EXT |
---|
144 | # |
---|
145 | XPRA_TOPDIR_INT=${SRV_NFS_SERVER_INT:-/srv} ; export XPRA_TOPDIR_INT |
---|
146 | XPRA_TOPDIR_EXT=${SRV_NFS_SERVER_EXT:-/srv} ; export XPRA_TOPDIR_EXT |
---|
147 | # |
---|
148 | # If variable XPRA_DATALOCK_NFS_SERVER is specified and has the value of servername which is exporting |
---|
149 | # a directory it will be mounted under /incoming as readonly on a session pod which has also |
---|
150 | # shared homedir is mounted. (sessiontype with a prefix mhd) |
---|
151 | # |
---|
152 | # XPRA_DATALOCK_NFS_SERVER= ; export XPRA_DATALOCK_NFS_SERVER # No datalock server |
---|
153 | # |
---|
154 | DATALOCK_NFS_SERVER='vdi-worker01.vdi.xpra.demo' ; export DATALOCK_NFS_SERVER |
---|
155 | DATALOCK_NFS_SERVER_EXT='/export/home/outgoing' ; export DATALOCK_NFS_SERVER_EXT |
---|
156 | DATALOCK_NFS_SERVER_INT='/incoming' ; export DATALOCK_NFS_SERVER_EXT |
---|
157 | DATALOCK_NFS_SERVER_OPTIONS='readOnly: true'; export DATALOCK_NFS_SERVER_OPTIONS |
---|
158 | # |
---|
159 | # Proxy may create user directories |
---|
160 | # In session set it to readonly true |
---|
161 | # |
---|
162 | # If session is specified as mhd-XXXXX. XXXX stands for example desktop, seamless etc. |
---|
163 | # A shared directory/storage will be mounted in the pod and will be used as |
---|
164 | # placeholder for persitent homedirs. |
---|
165 | # |
---|
166 | # In case high-secured is desired remove all the sessions startups begginning with mhd- |
---|
167 | # out of the directory ../session_types (seen from the current ../deploy directory. |
---|
168 | # |
---|
169 | # |
---|
170 | HOME_NFS_SERVER='vdi-worker01.vdi.xpra.demo' ; export HOME_NFS_SERVER |
---|
171 | HOME_NFS_SERVER_EXT="${HOME_NFS_SERVER_EXT:-/export/home}" ;export HOME_NFS_SERVER_EXT |
---|
172 | HOME_NFS_SERVER_INT="${HOME_NFS_SERVER_INT:-/home}" ;export HOME_NFS_SERVER_INT |
---|
173 | HOME_NFS_SERVER_EXT_OPT_NFS='readOnly: false' export HOME_NFS_SERVER_EXT_OPT_NFS |
---|
174 | # |
---|
175 | # If user activate session with a prefix 'mhd' and it is permitted to mount |
---|
176 | # his/her global homedir set XPRA_USER_MHD to Y |
---|
177 | # |
---|
178 | XPRA_USER_MHD=Y ;export XPRA_USER_MHD |
---|
179 | # |
---|
180 | # |
---|
181 | # See also comment around the variable XPRA_SCRATCH_EXT |
---|
182 | # |
---|
183 | if [ "${XPRA_SCRATCH_EXT}" != '' -o "${XPRA_SCRATCH_EXT}" != 'N' -o "${XPRA_SCRATCH_EXT}" != 'n' ] |
---|
184 | then |
---|
185 | SHRDTMP_NFS_SERVER='kubemaster01.vdi.xpra.demo' ; export SHRDTMP_NFS_SERVER |
---|
186 | SHRDTMP_NFS_SERVER_INT="/shrd-tmp" ; export SHRDTMP_NFS_SERVER_INT |
---|
187 | SHRDTMP_NFS_SERVER_EXT="/export/data/srv/${NAMESPACE}/to_container/scratch/tmp" ; export SHRDTMP_NFS_SERVER_EXT |
---|
188 | SHRDTMP_NFS_SERVER_OPTIONS='readOnly: false'; export SHRDTMP_NFS_SERVER_OPTIONS |
---|
189 | # |
---|
190 | fi |
---|
191 | # |
---|
192 | SRVETC_NFS_SERVER='kubemaster01.vdi.xpra.demo' ; export SRVETC_NFS_SERVER |
---|
193 | SRVETC_NFS_SERVER_INT="/srv/etc" ; export SRVETC_NFS_SERVER_INT |
---|
194 | SRVETC_NFS_SERVER_EXT="/export/data/srv/${NAMESPACE}/to_container/etc" ; export SRVETC_NFS_SERVER_EXT |
---|
195 | SRVETC_NFS_SERVER_OPTIONS='readOnly: true'; export SRVETC_NFS_SERVER_OPTIONS |
---|
196 | # |
---|
197 | # |
---|
198 | SRVBIN_NFS_SERVER='kubemaster01.vdi.xpra.demo' ; export SRVBIN_NFS_SERVER |
---|
199 | SRVBIN_NFS_SERVER_INT="/srv/bin" ; export SRVBIN_NFS_SERVER_INT |
---|
200 | SRVBIN_NFS_SERVER_EXT="/export/data/srv/${NAMESPACE}/to_container/bin" ; export SRVBIN_NFS_SERVER_EXT |
---|
201 | SRVBIN_NFS_SERVER_OPTIONS='readOnly: true'; export SRVBIN_NFS_SERVER_OPTIONS |
---|
202 | # |
---|
203 | # |
---|
204 | STAGING_NFS_SERVER='vdi-worker01.vdi.xpra.demo' ; export STAGING_NFS_SERVER |
---|
205 | STAGING_NFS_SERVER_INT="/staging" ; export STAGING_NFS_SERVER_INT |
---|
206 | STAGING_NFS_SERVER_EXT="/export/home/staging" ; export STAGING_NFS_SERVER_EXT |
---|
207 | STAGING_NFS_SERVER_OPTIONS='readOnly: true'; export STAGING_NFS_SERVER_OPTIONS |
---|
208 | # |
---|
209 | # Don't expose staging in session-pods !!! |
---|
210 | # |
---|
211 | # Image registry server |
---|
212 | # Format FQDN:PORTNUMBER |
---|
213 | # |
---|
214 | XPRA_REGISTRY_SRV='registry.vdi.xpra.demo:5000' |
---|
215 | # |
---|
216 | export XPRA_REGISTRY_SRV |
---|
217 | # A default image must be specified and available |
---|
218 | # |
---|
219 | IMAGE_DEFAULT="${XPRA_REGISTRY_SRV}/vdi-xfce4" ;export IMAGE_DEFAULT |
---|
220 | # |
---|
221 | # Specific images per session type if desired. |
---|
222 | # |
---|
223 | IMAGE_DESKTOP_OFFICE="${XPRA_REGISTRY_SRV}/vdi-office" ;export IMAGE_DESKTOP_OFFICE |
---|
224 | IMAGE_SEAMLESS_OFFICE="${XPRA_REGISTRY_SRV}/vdi-office" ;export IMAGE_SEAMLESS_OFFICE |
---|
225 | IMAGE_DESKTOP="${XPRA_REGISTRY_SRV}/vdi-xfce4" ;export IMAGE_DESKTOP |
---|
226 | IMAGE_SEAMLESS="${XPRA_REGISTRY_SRV}/vdi-xfce4" ;export IMAGE_SEAMLESS |
---|
227 | IMAGE_XPRA_PROXY=${IMAGE_XPRA_PROXY:-"${XPRA_REGISTRY_SRV}/vdi-base"};export IMAGE_XPRA_PROXY |
---|
228 | #IMAGE_XPRA_PROXY="${XPRA_REGISTRY_SRV}/vdi-base-ubuntu" ; export IMAGE_XPRA_PROXY |
---|
229 | PRESTOP_CMD="${PRESTOP_CMD:-xpra stop}" ; export PRESTOP_CMD |
---|
230 | # |
---|
231 | #registry.do.not.where:5000/vdi-base |
---|
232 | # |
---|
233 | # Certificates |
---|
234 | # Accessing the xpra-proxy with websocket etc. must be done on a secure way !! |
---|
235 | # |
---|
236 | # Place the cert. files in ../ssl |
---|
237 | # |
---|
238 | SSL=on |
---|
239 | SSL_CERT=/etc/xpra/ssl/server.crt |
---|
240 | SSL_KEY=/etc/xpra/ssl/server.key |
---|
241 | # |
---|
242 | # If XPRA_DEMO_USERS=Y and on the .../etc directory the files |
---|
243 | # demousers-passwd and demousers-shadow are available |
---|
244 | # these files will be appended in the pod in /etc/passwd and |
---|
245 | # /etc/shadow |
---|
246 | # |
---|
247 | # Format is the same as for shadow and passwd |
---|
248 | # |
---|
249 | # With a useradd and chpasswd a existing passwd/shadow can be append |
---|
250 | # in the first stage. With a copy/paste insert the created demo-users |
---|
251 | # in demousers-passwd and ---shadow. |
---|
252 | # |
---|
253 | # default added a demouser-passwd/--shadow with xpra-user01--05 and |
---|
254 | # a simple password 'only4now' |
---|
255 | # |
---|
256 | XPRA_DEMO_USERS=Y |
---|
257 | export XPRA_DEMO_USERS |
---|
258 | # |
---|
259 | # |
---|
260 | # Domainname (DNS) |
---|
261 | # |
---|
262 | DOMNAME="${DOMNAME:-vdi.xpra.demo}" |
---|
263 | # |
---|
264 | # Sessions using IDM/IPA |
---|
265 | # If IDM_DOMAIN = empty only the |
---|
266 | # local passwd file will be used to determine |
---|
267 | # the users UID/GID and validating |
---|
268 | # Idm/Freeipa is also installed in the session-pods |
---|
269 | # |
---|
270 | # See also the variable SESSION_USING_IDM |
---|
271 | # If you using for large amounts of users be sure you have |
---|
272 | # more freeipa/IDM servers. |
---|
273 | # If users has a 2 factor auth. The pam_auth xpra-module cannot |
---|
274 | # handle this. |
---|
275 | # But: If a OTP is found in the users credentials the user needs |
---|
276 | # only this as password for validation. See also the pam service |
---|
277 | # xpra. The standard generated OTP will not work because it length |
---|
278 | # is too long (key), it is not compatible with the oath pam-module. |
---|
279 | # How to work around is as follows: |
---|
280 | # |
---|
281 | # Install the packages gen-oath-safe and oathtool |
---|
282 | # Generate a token and use the key as key input for freeipa/IDM server |
---|
283 | # during the creation of a token for a user. |
---|
284 | # |
---|
285 | # Example: |
---|
286 | # Generate key with |
---|
287 | # on the shell-prompt: |
---|
288 | # gen-oath-safe totp |
---|
289 | # |
---|
290 | # INFO: Bad or no token type specified, using TOTP. |
---|
291 | # INFO: No secret provided, generating random secret. |
---|
292 | # |
---|
293 | # Key in Hex: 7b43210b0d981195b5e68875eb1f94b28d6a6103 |
---|
294 | # Key in b32: PNBSCCYNTAIZLNPGRB26WH4UWKGWUYID (checksum: 8) |
---|
295 | # |
---|
296 | # URI: otpauth://totp/totp?secret=PNBSCCYNTAIZLNPGRB26WH4UWKGWUYID |
---|
297 | # |
---|
298 | # <DISPLAY OF THE QR-CODE> |
---|
299 | # |
---|
300 | # users.oath / otp.users configuration: |
---|
301 | # HOTP/T30 totp - 7b43210b0d981195b5e68875eb1f94b28d6a6103 |
---|
302 | # |
---|
303 | # take the string: PNBSCCYNTAIZLNPGRB26WH4UWKGWUYID paste it in |
---|
304 | # keyfield of the popup of the OTP-window or use it as |
---|
305 | # option value when creating a user with the CLI of ipa/idm. |
---|
306 | # |
---|
307 | # The script used by PAM /bin/pre-auth.sh searches the |
---|
308 | # LDAP environment for the OTP of a user and place it in /etc/oath/users.oath |
---|
309 | # The pam-module pam_oath.so will look in this file. |
---|
310 | # |
---|
311 | # It works for the tcp_auth option and when you are using ssh as transport-channel. |
---|
312 | # However by using ssh you need give two times a OTP. In most cases you need |
---|
313 | # generate the OTP twice. |
---|
314 | # |
---|
315 | # For generating a OTP use FreeOtp or on a Linux command prompt you can generate |
---|
316 | # the OTP with oathtool |
---|
317 | # For example: |
---|
318 | # oathtool -w0 --totp -b PNBSCCYNTAIZLNPGRB26WH4UWKGWUYID |
---|
319 | # |
---|
320 | # And will give you the otp as output -- 683205 |
---|
321 | # |
---|
322 | # Using: websocket or ssl (creates a ssl-tunnel between the proxy-server and your workplace) |
---|
323 | # |
---|
324 | # Obtaining the ca-certificate can be done by first logging in with a browser |
---|
325 | # and exporting the certificate. |
---|
326 | # |
---|
327 | # xpra attach wss://....@srvname:portnr/session-type --ssl-ca-certs=<CA Cert file> |
---|
328 | # |
---|
329 | # xpra attach ssh://....@srvname:portnr/session-type |
---|
330 | # If a OTP is created in the ipa/idm environment you need to generate a OTP 2 times. |
---|
331 | # |
---|
332 | # Xpra will allow you to use a plain in the clear data transmission ! |
---|
333 | # |
---|
334 | # In most cases the ipa/idm domainame is equal to the DNS-domainname |
---|
335 | # If not change the line below or when NOT using ipa/idm set the variable |
---|
336 | # to empty |
---|
337 | # |
---|
338 | IDM_DOMAIN="${IDM_DOMAIN:-${DOMNAME}}" ; export IDM_DOMAIN |
---|
339 | #IDM_DOMAIN="" ; export IDM_DOMAIN |
---|
340 | # |
---|
341 | # IDM_ADMIN_PASSWORD, password of dirsrv specified as base64 |
---|
342 | # string |
---|
343 | # Example: On the prompt |
---|
344 | # echo -n changeMe | base64 |
---|
345 | # And take the result. |
---|
346 | # |
---|
347 | # Don't forget the '-n' argument of echo |
---|
348 | # |
---|
349 | IDM_ADMIN_USER=${IDM_ADMIN_USER:-'YWRtaW4='} ; export IDM_ADMIN_USER |
---|
350 | IDM_ADMIN_PASSWORD='b25seTRub3c=' ; export IDM_ADMIN_PASSWORD |
---|
351 | # |
---|
352 | # Todo future |
---|
353 | # |
---|
354 | XPRA_VALIDATE_USER='Y' ; export XPRA_VALIDATE_USER |
---|
355 | # |
---|
356 | XPRA_AUTH_METHOD="${XPRA_AUTH_METHOD:-tcp,pamexec:service=login:command=/srv/bin/start_pod.sh:timeout=900}" |
---|
357 | export XPRA_AUTH_METHOD |
---|
358 | # |
---|
359 | # if SESSION_USING_IDM=Y and IDM_DOMAIN is NOT empty idm/Freeipa will be |
---|
360 | # configured in the session-pod of the user |
---|
361 | # if SESSION_USING_IDM=N and IDM_DOMAIN is NOT empty idm/Freeipa will be |
---|
362 | # not installed in the session-pod. Idm/Freeipa will be activated only in |
---|
363 | # the XPRA proxy server. User uid/gid information will be provided but passing |
---|
364 | # a Env. variable to the session-pod. |
---|
365 | # |
---|
366 | SESSION_USING_IDM='N' ; export SESSION_USING_IDM |
---|
367 | # |
---|
368 | # Shared scratch directory |
---|
369 | # |
---|
370 | # ${XPRA_SCRATCH_EXT}/tmp will be mounted in the pod |
---|
371 | # as /shrd-tmp and is read/writeable like /tmp |
---|
372 | # Users can change files etc. between them. |
---|
373 | # |
---|
374 | # However if it is forbidden to copy in or out data from |
---|
375 | # underlying server(s) set it to N |
---|
376 | # |
---|
377 | # |
---|
378 | XPRA_SCRATCH_EXT=${BASE_WORKING_DIR}/scratch/tmp ;export XPRA_SCRATCH_EXT |
---|
379 | # |
---|
380 | # If next variables contains an external directory |
---|
381 | # Sockets and Status of Xpra will outside the container |
---|
382 | # available. |
---|
383 | # |
---|
384 | XPRA_STATUS_DIR=${XPRA_TOPDIR_INT}/xpra-status ; export XPRA_STATUS_DIR |
---|
385 | XPRA_SOCKET_DIR=${XPRA_TOPDIR_INT}/xpra-socket ; export XPRA_SOCKET_DIR |
---|
386 | # |
---|
387 | # Number of proxy-instances initial |
---|
388 | # For explanation see the Kubernetes documentation |
---|
389 | # |
---|
390 | REPLICAS=1 |
---|
391 | # |
---|
392 | # Default xpra has a maximum of 100 concurrent connections |
---|
393 | # To overrule with XPRA_MAX_CONCURRENT_CONNECTIONS |
---|
394 | # |
---|
395 | XPRA_MAX_CONCURRENT_CONNECTIONS=${XPRA_MAX_CONCURRENT_CONNECTIONS:-1024} |
---|
396 | # |
---|
397 | export XPRA_MAX_CONCURRENT_CONNECTIONS |
---|
398 | # |
---|
399 | # Special settings XPRA and waiting times (sleep) |
---|
400 | # may delete# |
---|
401 | # Don't change unless you know what you are doing.... |
---|
402 | # |
---|
403 | SECRET_NAME_PROXY="${XPRA_DEPLOYNAME}-certs" ; export SECRET_NAME_PROXY |
---|
404 | SECRET_NAME_CERTS="${SECRET_NAME_PROXY}" ; export SECRET_NAME_CERTS |
---|
405 | SECRET_NAME_KUBE="${XPRA_DEPLOYNAME}-kube" ; export XPRA_DEPLOYNAME |
---|
406 | XPRA_SOCKET_TIMEOUT=180 ; export XPRA_SOCKET_TIMEOUT |
---|
407 | XPRA_PING_TIMEOUT=120 ; export XPRA_PING_TIMEOUT |
---|
408 | XPRA_PROXY_SOCKET_TIMEOUT=0.8 ;export XPRA_PROXY_SOCKET_TIMEOUT |
---|
409 | XPRA_PROXY_WS_TIMEOUT=0.8;export XPRA_PROXY_WS_TIMEOUT |
---|
410 | XPRA_CONNECT_TIMEOUT=60 ; export XPRA_CONNECT_TIMEOUT |
---|
411 | XPRA_EXEC_AUTH_TIMEOUT=900; export XPRA_EXEC_AUTH_TIMEOUT |
---|
412 | # |
---|
413 | XPRA_STARTUP_PROXY="${XPRA_TOPDIR_INT}/bin/startup_proxy.sh ${XPRA_TOPDIR_INT}/bin/start_or_get_pod.sh" |
---|
414 | #XPRA_STARTUP_PROXY="/bin/sleep 17200" # Startup proxy for debugging etc. Pod will startup only with a sleep of 17200 sec. |
---|
415 | PRE_STOP_CMD="/usr/bin/xpra stop" |
---|
416 | LIVENESS_PROBE_CMD="${XPRA_TOPDIR_INT}/bin/health_check_xpra-proxy.sh" |
---|
417 | PORT="${PORT:-14500}" ;export PORT |
---|
418 | # |
---|
419 | XPRA_SERVER_CRT="${BASE_WORKING_DIR}/../ssl/server.crt" |
---|
420 | XPRA_SERVER_KEY="${BASE_WORKING_DIR}/../ssl/server.key" |
---|
421 | # may delete# |
---|
422 | # may deleteVOLUME_SRVDIR="${BASE_WORKING_DIR}" ; export VOLUME_SRVDIR |
---|
423 | # may deleteVOLUME_SSLDIR="${BASE_WORKING_DIR}"'/ssl'; export VOLUME_SSLDIR |
---|
424 | # may delete# |
---|
425 | # |
---|
426 | EMPTYDIR="/tmp/em${$}ty" export EMPTYDIR |
---|
427 | # |
---|
428 | SECRET_NAME_IDM="" |
---|
429 | [ "${IDM_DOMAIN}" != '' ] && \ |
---|
430 | SECRET_NAME_IDM="join-idm-`echo ${IDM_DOMAIN}| sed -e 's/\./-/g'`" |
---|
431 | export SECRET_NAME_IDM |
---|
432 | # |
---|
433 | export SSL SSL_CERT SSL_KEY DOMNAME |
---|
434 | export XPRA_SERVER_CRT XPRA_SERVER_KEY |
---|
435 | # |
---|
436 | # Be sure directories are available |
---|
437 | # |
---|
438 | [ "${XPRA_SCRATCH_EXT}" != '' -a ! -d "${XPRA_SCRATCH_EXT}" ] && mkdir -p ${XPRA_SCRATCH_EXT} 2> /dev/null |
---|
439 | # |
---|
440 | if [ -d "${XPRA_SCRATCH_EXT}/." ] |
---|
441 | then |
---|
442 | chown root:root ${XPRA_SCRATCH_EXT} |
---|
443 | chmod 700 `dirname ${XPRA_SCRATCH_EXT}` |
---|
444 | chown root:root ${XPRA_SCRATCH_EXT} |
---|
445 | chmod 1777 ${XPRA_SCRATCH_EXT} |
---|
446 | fi |
---|
447 | |
---|
448 | [ ! -d "${BASE_WORKING_DIR}/save-states/." ] && mkdir -p "${BASE_WORKING_DIR}/save-states" |
---|
449 | |
---|
450 | # |
---|
451 | # Source general SHELL functions |
---|
452 | # |
---|
453 | if [ -f "${BASE_WORKING_DIR}/etc/xpra-functions.sh" ] |
---|
454 | then |
---|
455 | . "${BASE_WORKING_DIR}/etc/xpra-functions.sh" |
---|
456 | else |
---|
457 | echo "Can't find file ${BASE_WORKING_DIR}/etc/xpra-functions.sh" 1>&2 |
---|
458 | exit 3 |
---|
459 | fi |
---|
460 | generate_xpra_proxy () { # Don't remove this line and must be begin at column 0 !!! |
---|
461 | # |
---|
462 | ( |
---|
463 | XPRA_MODE='proxy' ; export XPRA_MODE |
---|
464 | cat <<EOB |
---|
465 | kind: Namespace |
---|
466 | apiVersion: v1 |
---|
467 | metadata: |
---|
468 | name: ingress-${NAMESPACE} |
---|
469 | labels: |
---|
470 | name: ingress-${NAMESPACE} |
---|
471 | --- |
---|
472 | kind: Namespace |
---|
473 | apiVersion: v1 |
---|
474 | metadata: |
---|
475 | name: ${NAMESPACE} |
---|
476 | labels: |
---|
477 | name: ${NAMESPACE} |
---|
478 | `if [ "${IDM_DOMAIN}" != '' ] |
---|
479 | then |
---|
480 | echo '---' |
---|
481 | echo 'apiVersion: v1' |
---|
482 | echo 'kind: Secret' |
---|
483 | echo 'metadata:' |
---|
484 | echo ' name: "'"${SECRET_NAME_IDM}"'"' |
---|
485 | echo ' namespace: '"ingress-${NAMESPACE}" |
---|
486 | echo 'data:' |
---|
487 | echo ' dirsrv-password: "'"${IDM_ADMIN_PASSWORD}"'"' |
---|
488 | echo ' idm-admin-password: "'"${IDM_ADMIN_PASSWORD}"'"' |
---|
489 | echo ' idm-admin-user: "'"${IDM_ADMIN_USER}"'"' |
---|
490 | echo 'type: Opaque' |
---|
491 | echo '---' |
---|
492 | echo 'apiVersion: v1' |
---|
493 | echo 'data:' |
---|
494 | echo ' idm-admin-user: "'"${IDM_ADMIN_USER}"'"' |
---|
495 | echo ' idm-admin-password: "'"${IDM_ADMIN_PASSWORD}"'"' |
---|
496 | echo 'kind: Secret' |
---|
497 | echo 'metadata:' |
---|
498 | echo ' name: '"${SECRET_NAME_IDM}" |
---|
499 | echo ' namespace: '"${NAMESPACE}" |
---|
500 | echo 'type: Opaque' |
---|
501 | fi` |
---|
502 | --- |
---|
503 | apiVersion: v1 |
---|
504 | kind: Service |
---|
505 | metadata: |
---|
506 | labels: |
---|
507 | app: ${XPRA_DEPLOYNAME}-8${PROXY_PORT} |
---|
508 | name: ${XPRA_DEPLOYNAME}-8${PROXY_PORT} |
---|
509 | namespace: ingress-${NAMESPACE} |
---|
510 | spec: |
---|
511 | externalIPs: |
---|
512 | `for ip in ${EXTERNALIPS} |
---|
513 | do |
---|
514 | echo " - ${ip}" |
---|
515 | done` |
---|
516 | externalTrafficPolicy: Cluster |
---|
517 | ports: |
---|
518 | - port: ${PROXY_PORT:-443} |
---|
519 | protocol: TCP |
---|
520 | targetPort: 8${PROXY_PORT:-443} |
---|
521 | selector: |
---|
522 | app: ${XPRA_DEPLOYNAME}-8${PROXY_PORT:-443} |
---|
523 | type: NodePort |
---|
524 | --- |
---|
525 | apiVersion: v1 |
---|
526 | kind: Service |
---|
527 | metadata: |
---|
528 | labels: |
---|
529 | app: ${XPRA_DEPLOYNAME}-8${PROXY_PORT} |
---|
530 | name: ${XPRA_DEPLOYNAME}-8`expr \`echo ${PROXY_PORT:-443}\` + 1` |
---|
531 | namespace: ingress-${NAMESPACE} |
---|
532 | spec: |
---|
533 | externalIPs: |
---|
534 | `for ip in ${EXTERNALIPS} |
---|
535 | do |
---|
536 | echo " - ${ip}" |
---|
537 | done` |
---|
538 | externalTrafficPolicy: Cluster |
---|
539 | ports: |
---|
540 | - port: `expr \`echo ${PROXY_PORT:-443}\` + 1` |
---|
541 | protocol: TCP |
---|
542 | targetPort: 8444 |
---|
543 | selector: |
---|
544 | app: ${XPRA_DEPLOYNAME}-8${PROXY_PORT:-443} |
---|
545 | type: NodePort |
---|
546 | --- |
---|
547 | apiVersion: v1 |
---|
548 | kind: ConfigMap |
---|
549 | metadata: |
---|
550 | name: xpra-env |
---|
551 | namespace: ingress-${NAMESPACE} |
---|
552 | data: |
---|
553 | SESSION_USING_IDM: "${SESSION_USING_IDM}" |
---|
554 | POD_FROM_NAMESPACE: "ingress-${NAMESPACE}" |
---|
555 | SRC_PORT: "8${PROXY_PORT}" |
---|
556 | PROTO_TYPE: "${PROTO_TYPE:-tcp}" |
---|
557 | PASSWD_ENTRY: "${PASSWD_ENTRY}" |
---|
558 | SSL: "${SSL}" |
---|
559 | SSL_CERT: "${SSL_CERT}" |
---|
560 | SSL_KEY: "${SSL_KEY}" |
---|
561 | GROUP_ENTRIES: "${GROUP_ENTRIES}" |
---|
562 | SECRET_NAME_CERTS: "${SECRET_NAME_CERTS}" |
---|
563 | |
---|
564 | `for var in \`env | sort -u | sed -e '/\(^[Xx][Pp][Rr][Aa]_[A-Za-z0-9_][A-Za-z0-9_]*\)\(=\)\(..*\)/!d' \ |
---|
565 | -e 's/=.*$//'\` |
---|
566 | do |
---|
567 | eval echo '\ \ \ '"${var}"': \"''$'"${var}"'\"' |
---|
568 | done` |
---|
569 | |
---|
570 | `if [ "${IDM_DOMAIN}" != '' ] |
---|
571 | then |
---|
572 | echo ' IDM_DOMAIN: "'"${IDM_DOMAIN}"'"' |
---|
573 | echo ' USE_OTP_PW: "'"${USE_OTP_PW}"'"' |
---|
574 | echo ' NAMESPACE: "'"${NAMESPACE}"'"' |
---|
575 | fi` |
---|
576 | --- |
---|
577 | apiVersion: apps/v1 |
---|
578 | kind: Deployment |
---|
579 | metadata: |
---|
580 | name: ${XPRA_DEPLOYNAME}-8${PROXY_PORT:-443} |
---|
581 | namespace: ingress-${NAMESPACE} |
---|
582 | labels: |
---|
583 | app: ${XPRA_DEPLOYNAME}-8${PROXY_PORT:-443} |
---|
584 | spec: |
---|
585 | replicas: ${REPLICAS} |
---|
586 | selector: |
---|
587 | matchLabels: |
---|
588 | app: ${XPRA_DEPLOYNAME}-8${PROXY_PORT:-443} |
---|
589 | template: |
---|
590 | metadata: |
---|
591 | labels: |
---|
592 | app: ${XPRA_DEPLOYNAME}-8${PROXY_PORT:-443} |
---|
593 | spec: |
---|
594 | containers: |
---|
595 | - name: ${XPRA_DEPLOYNAME}-8${PROXY_PORT:-443} |
---|
596 | image: "${IMAGE_XPRA_PROXY:-${IMAGE_DEFAULT}}" |
---|
597 | securityContext: |
---|
598 | capabilities: |
---|
599 | add: ["NET_ADMIN", "SYS_TIME","CAP_SYS_ADMIN","SYS_ADMIN"] |
---|
600 | command: ["/bin/bash","-c" ] |
---|
601 | args: ["${XPRA_STARTUP_PROXY}"] |
---|
602 | envFrom: |
---|
603 | - configMapRef: |
---|
604 | name: xpra-env |
---|
605 | lifecycle: |
---|
606 | preStop: |
---|
607 | exec: |
---|
608 | # SIGTERM triggers a quick exit; gracefully terminate instead |
---|
609 | command: ["/bin/bash", "-c", "${PRE_STOP_CMD}"] |
---|
610 | livenessProbe: |
---|
611 | initialDelaySeconds: 90 |
---|
612 | periodSeconds: 10 |
---|
613 | timeoutSeconds: 20 |
---|
614 | failureThreshold: 5 |
---|
615 | exec: |
---|
616 | command: |
---|
617 | - "${LIVENESS_PROBE_CMD}" |
---|
618 | volumeMounts: |
---|
619 | `nfs_generate_volume_mounts` |
---|
620 | - mountPath: /sys/fs/cgroup |
---|
621 | name: sys-fs-cgroup |
---|
622 | readOnly: true |
---|
623 | - mountPath: /dev/shm |
---|
624 | name: dshm |
---|
625 | `if [ "${IDM_DOMAIN}" != '' ] |
---|
626 | then |
---|
627 | echo ' - mountPath: /etc/join-idm-'\`echo "${IDM_DOMAIN}"| sed -e 's/\./-/g'\` |
---|
628 | echo ' name: join-idm-'\`echo "${IDM_DOMAIN}"| sed -e 's/\./-/g'\` |
---|
629 | echo ' readOnly: true' |
---|
630 | fi` |
---|
631 | - mountPath: `(IFS=\:;set -- \`getent passwd root\`; echo ${6})`/.kube |
---|
632 | name: ${SECRET_NAME_KUBE} |
---|
633 | readOnly: true |
---|
634 | `if [ "${SECRET_NAME_CERTS}" != '' ] |
---|
635 | then |
---|
636 | echo ' - mountPath: /etc/xpra/ssl/crt' |
---|
637 | echo ' name: "'"${SECRET_NAME_CERTS}-crt"'"' |
---|
638 | echo ' readOnly: true' |
---|
639 | echo ' - mountPath: /etc/xpra/ssl/key' |
---|
640 | echo ' name: "'"${SECRET_NAME_CERTS}-key"'"' |
---|
641 | echo ' readOnly: true' |
---|
642 | fi` |
---|
643 | volumes: |
---|
644 | `nfs_generate_volumes` |
---|
645 | - name: sys-fs-cgroup |
---|
646 | hostPath: |
---|
647 | path: /sys/fs/cgroup |
---|
648 | type: Directory |
---|
649 | - name: dshm |
---|
650 | emptyDir: |
---|
651 | medium: Memory |
---|
652 | ####### |
---|
653 | `if [ "${IDM_DOMAIN}" != '' ] |
---|
654 | then |
---|
655 | echo ' - name: "'"${SECRET_NAME_IDM}"'"' |
---|
656 | echo ' secret:' |
---|
657 | echo ' secretName: "'"${SECRET_NAME_IDM}"'"' |
---|
658 | echo ' defaultMode: 256' |
---|
659 | fi` |
---|
660 | - name: "${SECRET_NAME_KUBE}" |
---|
661 | secret: |
---|
662 | secretName: "${SECRET_NAME_KUBE}" |
---|
663 | defaultMode: 256 |
---|
664 | `if [ "${SECRET_NAME_CERTS}" != '' ] |
---|
665 | then |
---|
666 | echo ' - name: "'"${SECRET_NAME_CERTS}-key"'"' |
---|
667 | echo ' secret:' |
---|
668 | echo ' secretName: "'"${SECRET_NAME_CERTS}-key"'"' |
---|
669 | echo ' defaultMode: 256' |
---|
670 | echo ' - name: "'"${SECRET_NAME_CERTS}-crt"'"' |
---|
671 | echo ' secret:' |
---|
672 | echo ' secretName: "'"${SECRET_NAME_CERTS}-crt"'"' |
---|
673 | echo ' defaultMode: 292' |
---|
674 | fi` |
---|
675 | `if [ "${XPRA_WORKERS}" != '' -o "\`echo "${XPRA_WORKERS}" | sed -e 's/./\l&/g'\`" != 'all' ] |
---|
676 | then |
---|
677 | echo ' nodeSelector:' |
---|
678 | fi` |
---|
679 | restartPolicy: Always |
---|
680 | EOB |
---|
681 | ) |
---|
682 | } |
---|
683 | |
---|
684 | |
---|
685 | node_labeling() { |
---|
686 | |
---|
687 | for srv in ${XPRA_WORKERS} |
---|
688 | do |
---|
689 | kubectl label nodes ${srv} "xpra_run_${NAMESPACE}"'=true' --overwrite=true |
---|
690 | done |
---|
691 | } |
---|
692 | |
---|
693 | do_some_hardening() { |
---|
694 | |
---|
695 | [ ${BASE_WORKING_DIR} != '' -a -d ${BASE_WORKING_DIR}/. ] && chmod 755 ${BASE_WORKING_DIR}/. |
---|
696 | |
---|
697 | EXCLUDE=`basename ${BASE_WORKING_DIR}` |
---|
698 | |
---|
699 | for dir in ${BASE_WORKING_DIR}/../* |
---|
700 | do |
---|
701 | if [ -d "${dir}"/. -a "`basename ${dir}`" != "${EXCLUDE}" ] |
---|
702 | then |
---|
703 | chown root:root "${dir}"/. |
---|
704 | chmod 700 "${dir}"/. |
---|
705 | fi |
---|
706 | done |
---|
707 | if [ "${XPRA_SCRATCH_EXT}" != '' ] |
---|
708 | then |
---|
709 | [ "{XPRA_SCRATCH_EXT}" != '' -a ! -d `dirname "${XPRA_SCRATCH_EXT}"`/. ] && mkdir -p "${XPRA_SCRATCH_EXT}" |
---|
710 | chmod 700 `dirname "${XPRA_SCRATCH_EXT}"`/. |
---|
711 | chmod 1777 "${XPRA_SCRATCH_EXT}"/. |
---|
712 | fi |
---|
713 | if [ "${XPRA_STATUS_DIR}" != '' ] |
---|
714 | then |
---|
715 | SRC_PTH="${BASE_WORKING_DIR}/`basename ${XPRA_STATUS_DIR}`" |
---|
716 | [ ! -d "${SRC_PTH}" ] && mkdir -p "${SRC_PTH}" |
---|
717 | chmod 1777 "${SRC_PTH}" |
---|
718 | fi |
---|
719 | if [ "${XPRA_SOCKET_DIR}" != '' ] |
---|
720 | then |
---|
721 | SRC_PTH="${BASE_WORKING_DIR}/`basename ${XPRA_SOCKET_DIR}`" |
---|
722 | [ ! -d "${SRC_PTH}" ] && mkdir -p "${SRC_PTH}" |
---|
723 | chmod 1777 "${SRC_PTH}" |
---|
724 | fi |
---|
725 | [ "${SRC_PTH}" != '' ] && unset SRC_PTH |
---|
726 | } |
---|
727 | |
---|
728 | generate_xpra_vars_sh () { |
---|
729 | # |
---|
730 | # unset all shell functions |
---|
731 | # |
---|
732 | cat <<EOB |
---|
733 | #!/bin/sh |
---|
734 | # |
---|
735 | #--------------------------------------------------------------------------------# |
---|
736 | # Lines below may change depending on your Caas/Openshift/Kubernetes environment # |
---|
737 | # Don't remove the if statement with corresponding fi statement. When script # |
---|
738 | # is starting up under UID 0 (root) it will switch over to non-root user. # |
---|
739 | # (${RUNADUSER}) # |
---|
740 | # Starting up a Pod with be done with the user as specified in the variable # |
---|
741 | # USERNAME_RUNASUSER. Be sure this user has a ${HOME}/.kube directory containing # |
---|
742 | # a valid config readable file. (Copy of the kubemaster /etcubernetes/admin.conf# |
---|
743 | # # |
---|
744 | # Louis Mulder 2020 # |
---|
745 | # Xpra is released under the terms of the GNU GPL v2, or, at your option, any # |
---|
746 | # later version. See the file COPYING for details. # |
---|
747 | #--------------------------------------------------------------------------------# |
---|
748 | # |
---|
749 | EOB |
---|
750 | ( |
---|
751 | for fnc in `declare -F | sed 's/.* //'` |
---|
752 | do |
---|
753 | unset "${fnc}" |
---|
754 | done |
---|
755 | unset fnc |
---|
756 | set | sed -e '{ |
---|
757 | '"${OLDENV}"' |
---|
758 | /^[A-Za-z0-9_].*[Pp][Aa][Ss][Ss][Ww][Oo][Rr][Dd].*$/d |
---|
759 | /^[Pp][Aa][Ss][Ss][Ww][Oo][Rr][Dd].*$/d |
---|
760 | /^[Pp][Aa][Ss][Ss][Ww][Oo][Rr][Dd].*$/d |
---|
761 | /\/\^/d |
---|
762 | /^'"'"'/d |
---|
763 | /^[Pp][Ii][Pp][Ee][Ss][Tt][Aa][Tt][Uu][Ss]=.*/d |
---|
764 | /^[Ff][Uu][Nn][Cc][Nn][Aa][Mm][Ee]/d |
---|
765 | /^_=/d |
---|
766 | s/\(^[A-Za-z_]\)\([0-9A-Za-z_]*\)\(=\)\(.*$\)/\1\2\3\4 ; export \1\2/ |
---|
767 | }' |
---|
768 | ) |
---|
769 | } |
---|
770 | |
---|
771 | gen_namespaces_certs_secrets () { |
---|
772 | # |
---|
773 | SSLDIR_TMP=/tmp/ssl${$} |
---|
774 | mkdir -p ${SSLDIR_TMP} |
---|
775 | cp ${XPRA_SERVER_KEY} ${SSLDIR_TMP}/. |
---|
776 | cp ${XPRA_SERVER_CRT} ${SSLDIR_TMP}/. |
---|
777 | |
---|
778 | OLDPWD=`pwd` |
---|
779 | if cd ${SSLDIR_TMP} |
---|
780 | then |
---|
781 | ( |
---|
782 | cat <<EOB |
---|
783 | kind: Namespace |
---|
784 | apiVersion: v1 |
---|
785 | metadata: |
---|
786 | name: ingress-${NAMESPACE} |
---|
787 | labels: |
---|
788 | name: ingress-${NAMESPACE} |
---|
789 | --- |
---|
790 | kind: Namespace |
---|
791 | apiVersion: v1 |
---|
792 | metadata: |
---|
793 | name: ${NAMESPACE} |
---|
794 | labels: |
---|
795 | name: ${NAMESPACE} |
---|
796 | EOB |
---|
797 | ) | kubectl apply -f - |
---|
798 | # |
---|
799 | cat <<EOF >./kustomization.yaml |
---|
800 | secretGenerator: |
---|
801 | - name: ${SECRET_NAME_PROXY}-key |
---|
802 | namespace: ${NAMESPACE} |
---|
803 | files: |
---|
804 | - `basename ${XPRA_SERVER_KEY}` |
---|
805 | EOF |
---|
806 | kubectl apply -k . |
---|
807 | sec_name="`kubectl -n ${NAMESPACE} get secrets | grep "${SECRET_NAME_PROXY}-key-"| sed -e 's/ *[Oo][Pp].*$//'| head -1`" |
---|
808 | DATA="`kubectl -n ${NAMESPACE} get secrets "${sec_name}" -o yaml`" |
---|
809 | # |
---|
810 | for sec in ${NAMESPACE} ingress-${NAMESPACE} |
---|
811 | do |
---|
812 | echo "${DATA}" |\ |
---|
813 | sed -e '/[Kk][Ii][Nn][Dd].*[Ss][Ee][Cc][Rr][Ee][Tt]/,$d' \ |
---|
814 | -e 's/^[Dd][Aa][Tt][Aa]/kind: Secret\nmetadata:\n name: '"${SECRET_NAME_PROXY}-key"'\n namespace: '"${sec}"'\ntype: Opaque\n&/' |
---|
815 | echo '---' |
---|
816 | done | kubectl apply -f - |
---|
817 | kubectl -n ${NAMESPACE} delete secrets "${sec_name}" |
---|
818 | # |
---|
819 | cat <<EOF >./kustomization.yaml |
---|
820 | secretGenerator: |
---|
821 | - name: ${SECRET_NAME_PROXY}-crt |
---|
822 | namespace: ${NAMESPACE} |
---|
823 | files: |
---|
824 | - `basename ${XPRA_SERVER_CRT}` |
---|
825 | EOF |
---|
826 | kubectl apply -k . |
---|
827 | sec_name="`kubectl -n ${NAMESPACE} get secrets | grep "${SECRET_NAME_PROXY}-crt-"| sed -e 's/ *[Oo][Pp].*$//'| head -1`" |
---|
828 | DATA="`kubectl -n ${NAMESPACE} get secrets "${sec_name}" -o yaml`" |
---|
829 | # |
---|
830 | for sec in ${NAMESPACE} ingress-${NAMESPACE} |
---|
831 | do |
---|
832 | echo "${DATA}" |\ |
---|
833 | sed -e '/[Kk][Ii][Nn][Dd].*[Ss][Ee][Cc][Rr][Ee][Tt]/,$d' \ |
---|
834 | -e 's/^[Dd][Aa][Tt][Aa]/kind: Secret\nmetadata:\n name: '"${SECRET_NAME_PROXY}-crt"'\n namespace: '"${sec}"'\ntype: Opaque\n&/' |
---|
835 | echo '---' |
---|
836 | done | kubectl apply -f - |
---|
837 | kubectl -n ${NAMESPACE} delete secrets "${sec_name}" |
---|
838 | # |
---|
839 | unset DATA sec_name sec |
---|
840 | fi |
---|
841 | [ "${SSLDIR_TMP}" != '' -a -d "${SSLDIR_TMP}"/. ] && rm -rf "${SSLDIR_TMP}" |
---|
842 | unset SSLDIR_TMP |
---|
843 | cd ${OLDPWD} |
---|
844 | } |
---|
845 | gen_namespaces_kube_config_secrets() { |
---|
846 | # |
---|
847 | DIR_TMP=/tmp/kube${$} |
---|
848 | mkdir -p ${DIR_TMP} |
---|
849 | KUBE_CONFIG=${KUBE_CONFIG:-/etc/kubernetes/admin.conf} |
---|
850 | # |
---|
851 | if [ -f ${KUBE_CONFIG} ] |
---|
852 | then |
---|
853 | cp "${KUBE_CONFIG}" ${DIR_TMP}/config |
---|
854 | else |
---|
855 | echo 'Are you on a kubemaster, no '${KUBE_CONFIG}' found' 1>&2 |
---|
856 | [ "${DIR_TMP}" != '' -a -d "${DIR_TMP}" ] && rm -rf "${DIR_TMP}" |
---|
857 | exit 1 |
---|
858 | fi |
---|
859 | OLDPWD=`pwd` |
---|
860 | if cd ${DIR_TMP} |
---|
861 | then |
---|
862 | ( |
---|
863 | cat <<EOB |
---|
864 | kind: Namespace |
---|
865 | apiVersion: v1 |
---|
866 | metadata: |
---|
867 | name: ingress-${NAMESPACE} |
---|
868 | labels: |
---|
869 | name: ingress-${NAMESPACE} |
---|
870 | --- |
---|
871 | kind: Namespace |
---|
872 | apiVersion: v1 |
---|
873 | metadata: |
---|
874 | name: ${NAMESPACE} |
---|
875 | labels: |
---|
876 | name: ${NAMESPACE} |
---|
877 | EOB |
---|
878 | ) | kubectl apply -f - |
---|
879 | # |
---|
880 | cat <<EOF >./kustomization.yaml |
---|
881 | secretGenerator: |
---|
882 | - name: ${SECRET_NAME_KUBE} |
---|
883 | namespace: ingress-${NAMESPACE} |
---|
884 | files: |
---|
885 | - config |
---|
886 | EOF |
---|
887 | cp ./kustomization.yaml /var/tmp/kust2 |
---|
888 | kubectl apply -k . |
---|
889 | sec_name="`kubectl -n ingress-${NAMESPACE} get secrets | grep "${SECRET_NAME_KUBE}-" | sed -e 's/ *[Oo][Pp].*$//'| head -1`" |
---|
890 | DATA="`kubectl -n ingress-${NAMESPACE} get secrets "${sec_name}" -o yaml`" |
---|
891 | |
---|
892 | sec=ingress-${NAMESPACE} |
---|
893 | ( |
---|
894 | echo "${DATA}" |\ |
---|
895 | sed -e '/[Kk][Ii][Nn][Dd].*[Ss][Ee][Cc][Rr][Ee][Tt]/,$d' \ |
---|
896 | -e 's/^[Dd][Aa][Tt][Aa]/kind: Secret\nmetadata:\n name: '"${SECRET_NAME_KUBE}"'\n namespace: '"${sec}"'\ntype: Opaque\n&/' |
---|
897 | echo '---' |
---|
898 | ) | kubectl apply -f - |
---|
899 | kubectl -n ingress-${NAMESPACE} delete secrets "${sec_name}" |
---|
900 | unset DATA sec_name sec |
---|
901 | fi |
---|
902 | [ "${DIR_TMP}" != '' -a -d "${DIR_TMP}"/. ] && rm -rf "${DIR_TMP}" |
---|
903 | unset DIR_TMP |
---|
904 | cd ${OLDPWD} |
---|
905 | } |
---|
906 | # |
---|
907 | build_images () { |
---|
908 | if cd `dirname ${BASE_WORKING_DIR}`/images |
---|
909 | then |
---|
910 | |
---|
911 | # find the base |
---|
912 | VDI_BASE='' ;export VDI_BASE |
---|
913 | VDI_BUILDLIST='' ;export VDI_BUILDLIST |
---|
914 | |
---|
915 | for d in * |
---|
916 | do |
---|
917 | if [ -d ${d}/. ] |
---|
918 | then |
---|
919 | case ${d} in |
---|
920 | #( |
---|
921 | *[Bb][Aa][Ss][Ee]* ) |
---|
922 | VDI_BASE="${d}" |
---|
923 | ;; |
---|
924 | #( |
---|
925 | * ) |
---|
926 | VDI_BUILDLIST="${VDI_BUILDLIST:+${VDI_BUILDLIST} ${d}}" |
---|
927 | VDI_BUILDLIST="${VDI_BUILDLIST:-${d}}" |
---|
928 | ;; |
---|
929 | esac |
---|
930 | fi |
---|
931 | done |
---|
932 | |
---|
933 | if [ "${VDI_BASE}" = '' ] |
---|
934 | then |
---|
935 | echo "No base docker build image directory find `pwd` "'(vdi-base ???)' 1>&2 |
---|
936 | exit 1 |
---|
937 | fi |
---|
938 | |
---|
939 | # Perform the base |
---|
940 | if cd ${VDI_BASE} |
---|
941 | then |
---|
942 | sed -e '1 i\'"FROM centos:7" < Dockerfile.tmpl > Dockerfile |
---|
943 | docker build -t ${XPRA_REGISTRY_SRV}/${VDI_BASE} . |
---|
944 | docker push ${XPRA_REGISTRY_SRV}/${VDI_BASE} |
---|
945 | fi |
---|
946 | cd .. |
---|
947 | for dir in ${VDI_BUILDLIST} |
---|
948 | do |
---|
949 | if cd ${dir} |
---|
950 | then |
---|
951 | sed -e '1 i\'"FROM ${XPRA_REGISTRY_SRV}/${VDI_BASE}" < Dockerfile.tmpl > Dockerfile |
---|
952 | docker build -t ${XPRA_REGISTRY_SRV}/${dir} . |
---|
953 | docker push ${XPRA_REGISTRY_SRV}/${dir} |
---|
954 | cd .. |
---|
955 | fi |
---|
956 | done |
---|
957 | unset d dir VDI_BUILDLIST VDI_BASE |
---|
958 | else |
---|
959 | echo "Huh no `dirname ${BASE_WORKING_DIR}`/images directory....." 1>&2 |
---|
960 | exit 1 |
---|
961 | fi |
---|
962 | } |
---|
963 | # |
---|
964 | TODO=`grep -n 'ADJUST' < ${PROG} | sed -e 's/=.*$//' -e '/^[0-9][0-9]*:TODO/d'` |
---|
965 | if [ "${TODO}" != '' ] |
---|
966 | then |
---|
967 | ( |
---|
968 | for item in ${TODO} |
---|
969 | do |
---|
970 | eval `IFS=':' ; set -- ${item} ; echo "LINE=${1};VAR=${2}"` |
---|
971 | echo 'You need to adjust or give (a) values(s) for parameter '${VAR} at line ${LINE} |
---|
972 | done |
---|
973 | ) 1>&2 |
---|
974 | exit 1 |
---|
975 | fi |
---|
976 | # |
---|
977 | [ "${TOBUILD}" = 'yes' ] && build_images |
---|
978 | gen_namespaces_certs_secrets |
---|
979 | gen_namespaces_kube_config_secrets |
---|
980 | generate_xpra_vars_sh > ${BASE_WORKING_DIR}/etc/xpra-vars.sh |
---|
981 | generate_xpra_proxy | tee ${BASE_WORKING_DIR}/../yaml/xpra-proxy-8${PROXY_PORT}.yaml | kubectl apply -f - |
---|
982 | do_some_hardening |
---|
983 | node_labeling |
---|
984 | # |
---|