Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions ansible/files/wiab_server_nftables.conf.j2
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,9 @@ table ip nat {
chain POSTROUTING {
type nat hook postrouting priority 100;
oifname != docker0 ip saddr 172.17.0.0/16 counter masquerade
{% if not (private_deployment | default(true) | bool) %}
oifname $INF_WAN counter masquerade comment "{{ wire_comment }} masquerade outgoing traffic"
{% endif %}
}
chain DOCKER {
iifname docker0 counter return
Expand Down
4 changes: 3 additions & 1 deletion ansible/inventory/demo/wiab-staging.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,6 @@ wiab-staging:
ansible_user: 'demo'
ansible_ssh_private_key_file: "~/.ssh/id_ed25519"
vars:
artifact_hash: 2b117e47a00e0cb6bf72dc629f8c970be1682a76
artifact_hash: 6b0f54979289f7031308bcde3a2c50822b160211
# when enabled, disable WAN SNAT/masquerading for VMs on the private network
private_deployment: true
3 changes: 1 addition & 2 deletions ansible/wiab-staging-provision.yml
Original file line number Diff line number Diff line change
Expand Up @@ -297,9 +297,8 @@
kubenode2_ip: "{{ kubenode_ip_result.results[1].stdout }}"
kubenode3_ip: "{{ kubenode_ip_result.results[2].stdout }}"
wire_comment: "wiab-stag"

tags: always

- name: Configure nftables
import_playbook: ./wiab-staging-nftables.yaml
tags: nftables
tags: [never, nftables]
6 changes: 3 additions & 3 deletions bin/debug_logs.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,14 @@ set -euo pipefail
echo "Printing all pods status"
kubectl get pods --all-namespaces
echo "------------------------------------"
namespaces=$(kubectl get ns -o=jsonpath='{.items[*].metadata.name}')
namespaces="cert-manager-ns default"
echo "Namespaces = $namespaces"
for ns in $namespaces; do
pods=$(kubectl get pods --all-namespaces -o=jsonpath='{.items[*].metadata.name}')
pods=$(kubectl get pods -n "$ns" -o=jsonpath='{.items[*].metadata.name}')
echo "Pods in namespace: $ns = $pods"
for pod in $pods; do
echo "Logs for pod: $pod"
kubectl logs --all-containers -n "$ns" "$pod" || true
kubectl logs --tail 30 --all-containers -n "$ns" "$pod" || true
echo "Description for pod: $pod"
kubectl describe pod -n "$ns" "$pod" || true
echo "------------------------------------"
Expand Down
78 changes: 60 additions & 18 deletions bin/helm-operations.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,31 +3,64 @@
set -Eeo pipefail

# Read values from environment variables with defaults
BASE_DIR="/wire-server-deploy"
TARGET_SYSTEM="example.dev"
CERT_MASTER_EMAIL="certmaster@${TARGET_SYSTEM}"
BASE_DIR="${BASE_DIR:-/wire-server-deploy}"
TARGET_SYSTEM="${TARGET_SYSTEM:-example.com}"
CERT_MASTER_EMAIL="${CERT_MASTER_EMAIL:-certmaster@example.com}"

# DEPLOY_CERT_MANAGER env variable is used to decide if cert_manager and nginx-ingress-services charts should get deployed
# default is set to TRUE to deploy it unless changed
DEPLOY_CERT_MANAGER="${DEPLOY_CERT_MANAGER:-TRUE}"

# DUMP_LOGS_ON_FAIL to dump logs on failure
# it is false by default
DUMP_LOGS_ON_FAIL="${DUMP_LOGS_ON_FAIL:-FALSE}"

# this IP should match the DNS A record value for TARGET_SYSTEM
# assuming it to be the public address used by clients to reach public Address
HOST_IP=""
HOST_IP="${HOST_IP:-}"

if [ -z "$HOST_IP" ]; then
HOST_IP=$(wget -qO- https://api.ipify.org)
fi

function dump_debug_logs {
local exit_code=$?
if [[ "$DUMP_LOGS_ON_FAIL" == "TRUE" ]]; then
"$BASE_DIR"/bin/debug_logs.sh
fi
return $exit_code
}
trap dump_debug_logs ERR

# picking a node for calling traffic (3rd kube worker node)
CALLING_NODE=$(kubectl get nodes --no-headers | tail -n 1 | awk '{print $1}')
if [[ -z "$CALLING_NODE" ]]; then
echo "Error: could not determine the last kube worker node via kubectl"
exit 1
fi

sync_pg_secrets() {
echo "Retrieving PostgreSQL password from databases-ephemeral for wire-server deployment..."
if kubectl get secret wire-postgresql-external-secret &>/dev/null; then
# Usage: sync-k8s-secret-to-wire-secrets.sh <secret-name> <secret-key> <yaml-file> <yaml-path's>
"$BASE_DIR/bin/sync-k8s-secret-to-wire-secrets.sh" \
wire-postgresql-external-secret password \
"$BASE_DIR/values/wire-server/secrets.yaml" \
.brig.secrets.pgPassword .galley.secrets.pgPassword .background-worker.secrets.pgPassword
else
echo "⚠️ Warning: PostgreSQL secret 'wire-postgresql-secret' not found, skipping secret sync"
echo " Make sure databases-ephemeral chart is deployed before wire-server"
fi
return $?
}

# Creates values.yaml from prod-values.example.yaml and secrets.yaml from prod-secrets.example.yaml
# Works on all chart directories in $BASE_DIR/values/
process_values() {

ENV=$1
TYPE=$2
charts=(fake-aws demo-smtp databases-ephemeral reaper wire-server webapp account-pages team-settings smallstep-accomp ingress-nginx-controller nginx-ingress-services coturn sftd cert-manager)
charts=(fake-aws demo-smtp rabbitmq databases-ephemeral reaper wire-server webapp account-pages team-settings ingress-nginx-controller nginx-ingress-services coturn sftd cert-manager)

if [[ "$ENV" != "prod" ]] || [[ -z "$TYPE" ]] ; then
echo "Error: This function only supports prod deployments with TYPE as values or secrets. ENV must be 'prod', got: '$ENV' and '$TYPE'"
Expand Down Expand Up @@ -147,7 +180,7 @@ deploy_charts() {
deploy_cert_manager() {

kubectl get namespace cert-manager-ns || kubectl create namespace cert-manager-ns
helm upgrade --install -n cert-manager-ns cert-manager "$BASE_DIR/charts/cert-manager" --values "$BASE_DIR/values/cert-manager/values.yaml"
helm upgrade --install --wait --timeout=5m0s -n cert-manager-ns cert-manager "$BASE_DIR/charts/cert-manager" --values "$BASE_DIR/values/cert-manager/values.yaml"

# display running pods
kubectl get pods --sort-by=.metadata.creationTimestamp -n cert-manager-ns
Expand All @@ -158,36 +191,45 @@ deploy_calling_services() {
echo "Deploying sftd and coturn"
# select the node to deploy sftd
kubectl annotate node "$CALLING_NODE" wire.com/external-ip="$HOST_IP" --overwrite
helm upgrade --install sftd "$BASE_DIR/charts/sftd" --set "nodeSelector.kubernetes\\.io/hostname=$CALLING_NODE" --values "$BASE_DIR/values/sftd/values.yaml"
helm upgrade --install --wait --timeout=5m0s sftd "$BASE_DIR/charts/sftd" --set "nodeSelector.kubernetes\\.io/hostname=$CALLING_NODE" --values "$BASE_DIR/values/sftd/values.yaml"

kubectl annotate node "$CALLING_NODE" wire.com/external-ip="$HOST_IP" --overwrite
helm upgrade --install coturn "$BASE_DIR/charts/coturn" --set "nodeSelector.kubernetes\\.io/hostname=$CALLING_NODE" --values "$BASE_DIR/values/coturn/values.yaml" --values "$BASE_DIR/values/coturn/secrets.yaml"
helm upgrade --install --wait --timeout=5m0s coturn "$BASE_DIR/charts/coturn" --set "nodeSelector.kubernetes\\.io/hostname=$CALLING_NODE" --values "$BASE_DIR/values/coturn/values.yaml" --values "$BASE_DIR/values/coturn/secrets.yaml"

# display running pods post deploying all helm charts in default namespace
kubectl get pods --sort-by=.metadata.creationTimestamp
}

main() {

# Create prod-values.example.yaml to values.yaml and take backup
process_values "prod" "values"
# Create prod-secrets.example.yaml to secrets.yaml and take backup
process_values "prod" "secrets"

# Sync postgresql secret
# sync_pg_secrets

# configure chart specific variables for each chart in values.yaml file
configure_values

# deploying with external datastores, useful for prod setup
deploy_charts cassandra-external elasticsearch-external minio-external rabbitmq-external fake-aws demo-smtp databases-ephemeral reaper wire-server webapp account-pages team-settings smallstep-accomp ingress-nginx-controller
deploy_charts cassandra-external elasticsearch-external minio-external fake-aws demo-smtp rabbitmq-external databases-ephemeral reaper wire-server webapp account-pages team-settings ingress-nginx-controller

# deploying cert-manager only when the env var DEPLOY_CERT_MANAGER is set to TRUE
if [[ "$DEPLOY_CERT_MANAGER" == "TRUE" ]]; then
# deploying cert manager to issue certs, by default letsencrypt-http01 issuer is configured
deploy_cert_manager

# deploying cert manager to issue certs, by default letsencrypt-http01 issuer is configured
deploy_cert_manager
# nginx-ingress-services chart needs cert-manager to be deployed
deploy_charts nginx-ingress-services

# nginx-ingress-services chart needs cert-manager to be deployed
deploy_charts nginx-ingress-services
# print status of certs
kubectl get certificate
fi

# deploying sft and coturn services
# not implemented yet
deploy_calling_services

# print status of certs
kubectl get certificate
}

main
main
2 changes: 1 addition & 1 deletion bin/offline-deploy.sh
Original file line number Diff line number Diff line change
Expand Up @@ -41,4 +41,4 @@ fi

$DOCKER_RUN_BASE $SSH_MOUNT $WSD_CONTAINER ./bin/offline-cluster.sh

sudo docker run --network=host -v $PWD:/wire-server-deploy $WSD_CONTAINER ./bin/helm-operations.sh
sudo docker run --network=host -v $PWD:/wire-server-deploy $WSD_CONTAINER sh -c 'TARGET_SYSTEM="example.dev" CERT_MASTER_EMAIL="certmaster@example.dev" DEPLOY_CERT_MANAGER=TRUE DUMP_LOGS_ON_FAIL=TRUE ./bin/helm-operations.sh'
Binary file added offline/architecture-wiab-stag.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading