1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
|
#!/bin/bash
# Setup nodes for the experiment
# This script does the following:
# 0. Stop previous experiments
# 1. Parse the experiment-info.json from jFed to get
# - The user which runs the experiment (used for NFS) - env: G5K_USER
# - Which Grid5k node is which node in jFed (used to run the correct script later)
# This also sets variables like PRIMARY_EXCHANGE, NUM_<NODES>, NODE_NAME (jFed), <NODE>_HOSTS and
# G5K_HOST (e.g. dahu-3)
# - <NODE>_HOSTS will be used in run.sh to identify which role specific script to call
# - NUM_<NODES> is particularly interesting for e.g. sharding to know how many shards
# to create for the main db node
# - PRIMARY_EXCHANGE is mainly used when multiple Exchange nodes use NFS to
# share key material. The primary one is then responsible for generating.
# - G5K_HOST can be used to identify the nodes, but is not used in scripts anywhere
# 2. Setup the directory where application logs will be stored (/home/G5K_USER/exp-logs)
# or /tmp/exp-logs
# 3. Export all environment to ~/.env and /etc/environment
# 4. Update the g5k repo from taler.net and copy the configurations (g5k-repo/configs) to /
# 5. Configure the DNS and start the DNS server on the DNS node
# Set the current user
echo "G5K_USER=$(cat ~/experiment-info.json | jq -r '.user.name')" >> ~/.env
source ~/.env
set -euax
# Parse and export the experiment nodes specified in the meta file
# experiment-info.json
function parse_experiment_nodes() {
# Get the nodes and add them to nodes.json in the form:
# {node: jFed node-name, host: Grid5000 node}
if [ ! -f ~/nodes.json ]; then
cat ~/experiment-info.json | \
jq '.nodes | to_entries | .[] | {node: .key, host: .value.ssh_login[1].hostname }' | \
jq -s '.' \
> ~/nodes.json
fi
# Read the jFed node-names which are defined in the NODES env
# and export their Grid5000 hostname to .env
# Assume the jFed nodes Exchange-1 Exchange-2, then Exchange must be added in NODES
# The env will then be the following:
# EXCHANGE_HOSTS=node-1.site-1.grid5000.fr|node-2.site-2.grid5000.fr
# When jFed has only Exchange, the env will be just
# EXCHANGE_HOSTS=node-1.site-1.grid5000.fr
# This will be used in run.sh to determine which role script to execute
for NODE in ${NODES}; do
NODES_STR=$(\
jq --arg NODE ${NODE}.* -r \
'map(select(.node | test($NODE)) | .host) |
join("|") |
select(. != "") // "none"' \
~/nodes.json
)
echo "${NODE^^}_HOSTS=\"${NODES_STR}\"" >> ~/.env
echo "NUM_${NODE^^}S=$(echo ${NODES_STR} | grep -v "none" | awk -F '|' '{print NF}')" >> ~/.env
done
# Export NODE_NAME, which can be used to set log directories for example
jq -r '.[] | .node, .host' ~/nodes.json | \
while read -r NODE; read -r HOST; do
if [[ "${HOST}" =~ "${HOSTNAME}" ]]; then
echo "NODE_NAME=\"${NODE,,}\"" >> ~/.env
if grep -q "Red Hat" /proc/version; then
# Hostname in centos is set to e.g. dahu-8 only
# override with the fqdn from the grid
echo HOSTNAME="${HOST}" >> ~/.env
fi
fi
done
echo "PRIMARY_EXCHANGE=${PRIMARY_EXCH,,}.${DNS_ZONE}" >> ~/.env
echo "WALLET_HOSTS=*" >> ~/.env
echo "G5K_HOST=\"$(hostname | cut -d '.' -f1)\"" >> ~/.env
}
# Determine and create the base log directory
# If NFS exists, it will be created in the users home dir on the NFS
function setup_log_dir() {
LOG_DIR=/home/${G5K_USER}/exp-logs
if [ -d ${LOG_DIR} ]; then
# If multiple nodes want to delete the same dir we run into errors
# Let it fail safely with || true
rm -rf ${LOG_DIR}/* || true
elif [ ! -d /home/${G5K_USER} ]; then
LOG_DIR=/tmp/taler
fi
mkdir ${LOG_DIR} || true
echo "LOG_DIR=${LOG_DIR}" >> ~/.env
}
# Setup the environment configuration
function setup_environment() {
set +x
# Determine the port the taler-exchange-* should use
if [ "$USE_PGBOUNCER" = "true" ]; then
echo "DB_PORT=6432" >> ~/.env
else
echo "DB_PORT=5432" >> ~/.env
fi
echo "START_TIME=$(date +%s)" >> ~/.env
# Needed for envsubst to work
export DNS_ZONE=${DNS_ZONE}
# Set the hostnames completely with substituting ${DNS_ZONE}
# Important: dont use cat env | envsubst > env - this will truncate env before its read
# and thus it will be empty in the end
cat ~/.env | envsubst > /tmp/.env && mv /tmp/.env ~/.env
# Add the environment config for following shells
cat ~/.env | grep -v API_KEY | tee /etc/environment
# Reload the env since HOST_* and *_DOMAIN was added
source ~/.env
set -x
}
# Setup shared configurations such as the ones from configs/*
function setup_config() {
# Temporarily checkout to the feature branch
cd "${G5K_HOME}" && git checkout "${G5K_COMMIT_SHA}" && git pull && cd
# Remove default nginx config
rm /etc/nginx/sites-enabled/default > /dev/null 2>&1 || true
# Override default configurations with the one from this Git.
cp -r "${G5K_HOME}"/configs/* /
find /usr/lib/systemd/system/ -iname taler-exchange-httpd*.service \
-exec sed -i "s|<CMD_PREFIX_HERE>|${EXCHANGE_CMD_PREFIX} |g" {} \;
if [ -f ~/scripts/taler-perf.sh ]; then
mv ~/scripts/taler-perf.sh /usr/local/bin/taler-perf
fi
}
# Configure the experiments DNS
function setup_dns() {
NS_IP=$(host ${DNS_HOSTS} | sed -n 1p | awk '{print $4}')
echo "DNS_IP=${NS_IP}" >> ~/.env
echo "DNS_IP=${NS_IP}" >> /etc/environment
# Set our DNS to be the only DNS to query by the stub resolver
if ! grep -Fxq "server=${NS_IP}" /etc/dnsmasq.conf ; then
echo "server=${NS_IP}" >> /etc/dnsmasq.conf
fi
# Set dnsmasq as our only resolver (stub)
if ! grep -Fxq "nameserver 127.0.0.1" /etc/resolv.conf ; then
mv /etc/resolv.conf /etc/resolv.conf.bak
echo "nameserver 127.0.0.1" > /etc/resolv.conf
fi
# Add the Grid5000 DNS servers as forwarders to our DNS so grid5000 stuff
# gets resolved correctly
BIND_SERVERS=$(grep nameserver /etc/resolv.conf.bak | awk '{print $2}' ORS='; ')
sed -i "s/<DNS_ZONE_HERE>/${DNS_ZONE}/g" \
/etc/bind/named.conf.local
sed -i "s/<GRID_DNS_HERE>/${BIND_SERVERS}/g" \
/etc/bind/named.conf.options
sed -i -e "s/<DNS_ZONE_HERE>/${DNS_ZONE}/g" \
-e "s/<NS_IP_HERE>/${NS_IP}/g" \
/var/lib/bind/perf.taler
# Remove potentionally expired zone journals so bind will not complain
# (this can happen when the Espec - or just this script are executed multiple times)
if [ -f /var/lib/bind/perf.taler.jnl ]; then
rm -rf /var/lib/bind/perf.taler.jnl
fi
systemctl daemon-reload
if ! [[ "${HOSTNAME}" =~ ${DNS_HOSTS} ]]; then
# Wait for named to be ready before starting dnsmasq
sleep 10
systemctl restart dnsmasq
else
# Start the DNS when we are the DNS host
systemctl restart named
fi
}
# Stop and 'unconfigure' all important services
# to start on an 'empty' playground
function clean_previous_setup() {
# (not all are present on every node - || true - to ignore errors)
systemctl stop taler-exchange-* \
taler-wallet* \
prometheus \
prometheus-*-exporter \
postgresql* \
promtail \
loki \
|| true
# Remove access to postgres for all nodes
HBA_FILE=/etc/postgresql/${POSTGRES_VERSION}/main/pg_hba.conf
if grep -q "Red Hat" /proc/version; then
HBA_FILE=/tmp/postgresql/${POSTGRES_VERSION}/data/pg_hba.conf
fi
sed -i "/172.16.0.0\/12/d" "${HBA_FILE}" || true
}
clean_previous_setup
# Check if binaries need to be rebuilt on debian based operating systems
if ! grep -q "Red Hat" /proc/version; then
source ~/scripts/install.sh
fi
parse_experiment_nodes
setup_log_dir
setup_environment
setup_config
setup_dns
if ! grep -q "Red Hat" /proc/version; then
# Only works in debian based operating systems
exec ~/scripts/createusers.sh
fi
|