diff --git a/customize_fusion_values.sh b/customize_fusion_values.sh index 9d1bb93..3f3e6ed 100755 --- a/customize_fusion_values.sh +++ b/customize_fusion_values.sh @@ -13,6 +13,7 @@ CHART_VERSION="5.1.1" NAMESPACE=default OUTPUT_SCRIPT="" ADDITIONAL_VALUES=() +KUBECTL="kubectl" function print_usage() { CMD="$1" @@ -25,6 +26,7 @@ function print_usage() { echo -e "\nUse this script to create a custom Fusion values yaml from a template" echo -e "\nUsage: $CMD [OPTIONS] ... where OPTIONS include:\n" echo -e " -c Cluster name (required)\n" + echo -e " -b The Kubernetes command line tool executable to use, defaults to 'kubectl'\n" echo -e " -n Kubernetes namespace to install Fusion 5 into, defaults to 'default'\n" echo -e " -r Helm release name for installing Fusion 5; defaults to the namespace, see -n option\n" echo -e " --provider Name of your K8s provider, e.g. eks, aks, gke; defaults to 'gke'\n" @@ -68,6 +70,14 @@ if [ $# -gt 1 ]; then CLUSTER_NAME="$2" shift 2 ;; + -b) + if [[ -z "$2" || "${2:0:1}" == "-" ]]; then + print_usage "$SCRIPT_CMD" "Missing value for the -b parameter!" + exit 1 + fi + KUBECTL="$2" + shift 2 + ;; -n) if [[ -z "$2" || "${2:0:1}" == "-" ]]; then print_usage "$SCRIPT_CMD" "Missing value for the -n parameter!" @@ -337,5 +347,6 @@ else sed -i '' -e "s||${ADDITIONAL_VALUES_STRING}|g" "$OUTPUT_SCRIPT" fi +sed -i -e "s||${KUBECTL}|g" "$OUTPUT_SCRIPT" echo -e "\nCreate $OUTPUT_SCRIPT for upgrading you Fusion cluster. Please keep this script along with your custom values yaml file(s) in version control.\n" diff --git a/install_prom.sh b/install_prom.sh index e9ca156..506e775 100755 --- a/install_prom.sh +++ b/install_prom.sh @@ -2,6 +2,7 @@ PROVIDER=gke NODE_POOL="" +KUBECTL="kubectl" function print_usage() { CMD="$1" @@ -14,6 +15,7 @@ function print_usage() { echo -e "\nUse this script to install Prometheus and Grafana into an existing Fusion 5 cluster" echo -e "\nUsage: $CMD [OPTIONS] ... where OPTIONS include:\n" echo -e " -c Name of the K8s cluster (required)\n" + echo -e " -b The Kubernetes command line tool executable to use, defaults to 'kubectl'\n" echo -e " -n Kubernetes namespace to install Fusion 5 into (required)\n" echo -e " -r Helm release name for installing Fusion 5; defaults to the namespace, see -n option\n" echo -e " --node-pool Node pool label to assign pods to specific nodes, this option is only useful for existing clusters" @@ -32,6 +34,14 @@ if [ $# -gt 0 ]; then CLUSTER_NAME="$2" shift 2 ;; + -b) + if [[ -z "$2" || "${2:0:1}" == "-" ]]; then + print_usage "$SCRIPT_CMD" "Missing value for the -b parameter!" + exit 1 + fi + KUBECTL="$2" + shift 2 + ;; -n) if [[ -z "$2" || "${2:0:1}" == "-" ]]; then print_usage "$SCRIPT_CMD" "Missing value for the -n parameter!" @@ -119,8 +129,8 @@ if ! helm repo list | grep -q "https://kubernetes-charts.storage.googleapis.com" helm repo add stable https://kubernetes-charts.storage.googleapis.com fi -if ! kubectl get namespace "${NAMESPACE}" > /dev/null 2>&1; then - kubectl create namespace "${NAMESPACE}" +if ! ${KUBECTL} get namespace "${NAMESPACE}" > /dev/null 2>&1; then + ${KUBECTL} create namespace "${NAMESPACE}" if [ "$PROVIDER" == "gke" ]; then who_am_i=$(gcloud auth list --filter=status:ACTIVE --format="value(account)") else @@ -128,12 +138,12 @@ if ! kubectl get namespace "${NAMESPACE}" > /dev/null 2>&1; then fi OWNER_LABEL="${who_am_i//@/-}" if [ "${OWNER_LABEL}" != "" ]; then - kubectl label namespace "${NAMESPACE}" "owner=${OWNER_LABEL}" + ${KUBECTL} label namespace "${NAMESPACE}" "owner=${OWNER_LABEL}" fi echo -e "\nCreated namespace ${NAMESPACE} with owner label ${OWNER_LABEL}\n" fi -if kubectl get sts -n "${NAMESPACE}" -l "app=prometheus" -o "jsonpath={.items[0].metadata.labels['release']}" 2>&1 | grep -q "${RELEASE}-monitoring"; then +if ${KUBECTL} get sts -n "${NAMESPACE}" -l "app=prometheus" -o "jsonpath={.items[0].metadata.labels['release']}" 2>&1 | grep -q "${RELEASE}-monitoring"; then echo -e "\nERROR: There is already a Prometheus StatefulSet in namespace: ${NAMESPACE} with release name: ${RELEASE}-monitoring\n" exit 1 fi diff --git a/setup_f5_k8s.sh b/setup_f5_k8s.sh index 44e0dcc..86a0c3d 100755 --- a/setup_f5_k8s.sh +++ b/setup_f5_k8s.sh @@ -21,6 +21,7 @@ DRY_RUN="" SOLR_DISK_GB=50 SOLR_REPLICAS=1 NODE_POOL="{}" +KUBECTL="kubectl" function print_usage() { CMD="$1" @@ -35,6 +36,7 @@ function print_usage() { echo -e " -c Name of the K8s cluster (required)\n" echo -e " -r Helm release name for installing Fusion 5, defaults to 'f5'\n" echo -e " -n Kubernetes namespace to install Fusion 5 into, defaults to 'default'\n" + echo -e " -b The Kubernetes command line tool executable to use, defaults to 'kubectl'\n" echo -e " --provider Lowercase label for your K8s platform provider, e.g. eks, aks, gke; defaults to 'k8s'\n" echo -e " --node-pool Node pool label to assign pods to specific nodes, this option is only useful for existing clusters" echo -e " where you defined a custom node pool, wrap the arg in double-quotes\n" @@ -82,6 +84,14 @@ if [ $# -gt 0 ]; then RELEASE="$2" shift 2 ;; + -b) + if [[ -z "$2" || "${2:0:1}" == "-" ]]; then + print_usage "$SCRIPT_CMD" "Missing value for the -b parameter!" + exit 1 + fi + KUBECTL="$2" + shift 2 + ;; --provider) if [[ -z "$2" || "${2:0:1}" == "-" ]]; then print_usage "$SCRIPT_CMD" "Missing value for the --provider parameter!" @@ -220,10 +230,10 @@ DEFAULT_MY_VALUES="${PROVIDER}_${CLUSTER_NAME}_${RELEASE}_fusion_values.yaml" UPGRADE_SCRIPT="${PROVIDER}_${CLUSTER_NAME}_${RELEASE}_upgrade_fusion.sh" # Check our prerequisites are in place -hash kubectl +hash ${KUBECTL} has_prereq=$? if [ $has_prereq == 1 ]; then - echo -e "\nERROR: Must install kubectl before proceeding with this script!" + echo -e "\nERROR: Must install ${KUBECTL} before proceeding with this script!" exit 1 fi @@ -235,7 +245,7 @@ if [ $has_prereq == 1 ]; then fi # Log our current kube context for the user -current=$(kubectl config current-context) +current=$(${KUBECTL} config current-context) echo -e "Using kubeconfig: $current" # Setup our owner label so we can check ownership of namespaces @@ -252,12 +262,12 @@ is_helm_v3=$(helm version --short | grep v3) if [ "${is_helm_v3}" == "" ]; then # see if Tiller is deployed ... - kubectl rollout status deployment/tiller-deploy --timeout=10s -n kube-system > /dev/null 2>&1 + ${KUBECTL} rollout status deployment/tiller-deploy --request-timeout=10s -n kube-system > /dev/null 2>&1 rollout_status=$? if [ $rollout_status != 0 ]; then echo -e "\nSetting up Helm Tiller ..." - kubectl create serviceaccount --namespace kube-system tiller - kubectl create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller + ${KUBECTL} create serviceaccount --namespace kube-system tiller + ${KUBECTL} create clusterrolebinding tiller-cluster-rule --clusterrole=cluster-admin --serviceaccount=kube-system:tiller helm init --service-account tiller --wait helm version fi @@ -268,20 +278,20 @@ fi # If we are upgrading if [ "${UPGRADE}" == "1" ]; then # Make sure the namespace exists - if ! kubectl get namespace "${NAMESPACE}" > /dev/null 2>&1; then + if ! ${KUBECTL} get namespace "${NAMESPACE}" > /dev/null 2>&1; then echo -e "\nNamespace ${NAMESPACE} not found, if this is a new cluster please run an install first" exit 1 fi # Check if the owner label on the namespace is the same as we are, so we cannot # accidentally upgrade a release from someone elses namespace - namespace_owner=$(kubectl get namespace "${NAMESPACE}" -o 'jsonpath={.metadata.labels.owner}') + namespace_owner=$(${KUBECTL} get namespace "${NAMESPACE}" -o 'jsonpath={.metadata.labels.owner}') if [ "${namespace_owner}" != "${OWNER_LABEL}" ] && [ "${FORCE}" != "1" ]; then echo -e "Namespace ${NAMESPACE} is owned by: ${namespace_owner}, by we are: ${OWNER_LABEL} please provide the --force parameter if you are sure you wish to upgrade this namespace" exit 1 fi elif [ "$PURGE" == "1" ]; then - kubectl get namespace "${NAMESPACE}" + ${KUBECTL} get namespace "${NAMESPACE}" namespace_exists=$? if [ "$namespace_exists" != "0" ]; then echo -e "\nNamespace ${NAMESPACE} not found so assuming ${RELEASE_NAME} has already been purged" @@ -290,7 +300,7 @@ elif [ "$PURGE" == "1" ]; then # Check if the owner label on the namespace is the same as we are, so we cannot # accidentally purge someone elses release - namespace_owner=$(kubectl get namespace "${NAMESPACE}" -o 'jsonpath={.metadata.labels.owner}') + namespace_owner=$(${KUBECTL} get namespace "${NAMESPACE}" -o 'jsonpath={.metadata.labels.owner}') if [ "${namespace_owner}" != "${OWNER_LABEL}" ] && [ "${FORCE}" != "1" ]; then echo -e "Namespace ${NAMESPACE} is owned by: ${namespace_owner}, by we are: ${OWNER_LABEL} please provide the --force parameter if you are sure you wish to purge this namespace" exit 1 @@ -309,16 +319,16 @@ elif [ "$PURGE" == "1" ]; then else helm del --purge "${RELEASE}" fi - kubectl delete deployments -l app.kubernetes.io/part-of=fusion --namespace "${NAMESPACE}" --grace-period=0 --force --timeout=5s - kubectl delete job "${RELEASE}-api-gateway" --namespace "${NAMESPACE}" --grace-period=0 --force --timeout=1s - kubectl delete svc -l app.kubernetes.io/part-of=fusion --namespace "${NAMESPACE}" --grace-period=0 --force --timeout=2s - kubectl delete pvc -l app.kubernetes.io/part-of=fusion --namespace "${NAMESPACE}" --grace-period=0 --force --timeout=5s - kubectl delete pvc -l "release=${RELEASE}" --namespace "${NAMESPACE}" --grace-period=0 --force --timeout=5s - kubectl delete pvc -l "app.kubernetes.io/instance=${RELEASE}" --namespace "${NAMESPACE}" --grace-period=0 --force --timeout=5s - kubectl delete pvc -l app=prometheus --namespace "${NAMESPACE}" --grace-period=0 --force --timeout=5s - kubectl delete serviceaccount --namespace "${NAMESPACE}" "${RELEASE}-api-gateway-jks-create" + ${KUBECTL} delete deployments -l app.kubernetes.io/part-of=fusion --namespace "${NAMESPACE}" --grace-period=0 --force --request-timeout=5s + ${KUBECTL} delete job "${RELEASE}-api-gateway" --namespace "${NAMESPACE}" --grace-period=0 --force --request-timeout=1s + ${KUBECTL} delete svc -l app.kubernetes.io/part-of=fusion --namespace "${NAMESPACE}" --grace-period=0 --force --request-timeout=2s + ${KUBECTL} delete pvc -l app.kubernetes.io/part-of=fusion --namespace "${NAMESPACE}" --grace-period=0 --force --request-timeout=5s + ${KUBECTL} delete pvc -l "release=${RELEASE}" --namespace "${NAMESPACE}" --grace-period=0 --force --request-timeout=5s + ${KUBECTL} delete pvc -l "app.kubernetes.io/instance=${RELEASE}" --namespace "${NAMESPACE}" --grace-period=0 --force --request-timeout=5s + ${KUBECTL} delete pvc -l app=prometheus --namespace "${NAMESPACE}" --grace-period=0 --force --request-timeout=5s + ${KUBECTL} delete serviceaccount --namespace "${NAMESPACE}" "${RELEASE}-api-gateway-jks-create" if [ "${NAMESPACE}" != "default" ] && [ "${NAMESPACE}" != "kube-public" ] && [ "${NAMESPACE}" != "kube-system" ]; then - kubectl delete namespace "${NAMESPACE}" --grace-period=0 --force --timeout=10s + ${KUBECTL} delete namespace "${NAMESPACE}" --grace-period=0 --force --request-timeout=10s fi fi exit 0 @@ -337,10 +347,10 @@ else fi # There isn't let's check if there is a fusion deployment in the namespace already - if ! kubectl get deployment -n "${NAMESPACE}" -l "app.kubernetes.io/component=query-pipeline,app.kubernetes.io/part-of=fusion" 2>&1 | grep -q "No resources"; then + if ! ${KUBECTL} get deployment -n "${NAMESPACE}" -l "app.kubernetes.io/component=query-pipeline,app.kubernetes.io/part-of=fusion" 2>&1 | grep -q "No resources"; then # There is a fusion deployed into this namespace, try and protect against two releases being installed into # The same namespace - instance=$(kubectl get deployment -n "${NAMESPACE}" -l "app.kubernetes.io/component=query-pipeline,app.kubernetes.io/part-of=fusion" -o "jsonpath={.items[0].metadata.labels['app\.kubernetes\.io/instance']}") + instance=$(${KUBECTL} get deployment -n "${NAMESPACE}" -l "app.kubernetes.io/component=query-pipeline,app.kubernetes.io/part-of=fusion" -o "jsonpath={.items[0].metadata.labels['app\.kubernetes\.io/instance']}") echo -e "\nERROR: There is already a fusion deployment in namespace: ${NAMESPACE} with release name: ${instance}, please choose a new namespace\n" exit 1 fi @@ -350,19 +360,19 @@ fi # report_ns logs a message to the user informing them how to change the default namespace function report_ns() { if [ "${NAMESPACE}" != "default" ]; then - echo -e "\nNote: Change the default namespace for kubectl to ${NAMESPACE} by doing:\n kubectl config set-context --current --namespace=${NAMESPACE}\n" + echo -e "\nNote: Change the default namespace for ${KUBECTL} to ${NAMESPACE} by doing:\n ${KUBECTL} config set-context --current --namespace=${NAMESPACE}\n" fi } # proxy_url prints how to access the proxy via a LoadBalancer service function proxy_url() { if [ "${PROVIDER}" == "eks" ]; then - export PROXY_HOST=$(kubectl --namespace "${NAMESPACE}" get service proxy -o jsonpath='{.status.loadBalancer.ingress[0].hostname}') + export PROXY_HOST=$(${KUBECTL} --namespace "${NAMESPACE}" get service proxy -o jsonpath='{.status.loadBalancer.ingress[0].hostname}') else - export PROXY_HOST=$(kubectl --namespace "${NAMESPACE}" get service proxy -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + export PROXY_HOST=$(${KUBECTL} --namespace "${NAMESPACE}" get service proxy -o jsonpath='{.status.loadBalancer.ingress[0].ip}') fi - export PROXY_PORT=$(kubectl --namespace "${NAMESPACE}" get service proxy -o jsonpath='{.spec.ports[?(@.protocol=="TCP")].port}') + export PROXY_PORT=$(${KUBECTL} --namespace "${NAMESPACE}" get service proxy -o jsonpath='{.spec.ports[?(@.protocol=="TCP")].port}') export PROXY_URL="$PROXY_HOST:$PROXY_PORT" if [ "$PROXY_URL" != ":" ]; then @@ -380,7 +390,7 @@ function ingress_setup() { echo -ne "\nWaiting for the Loadbalancer IP to be assigned" loops=24 while (( loops > 0 )); do - ingressIp=$(kubectl --namespace "${NAMESPACE}" get ingress "${RELEASE}-api-gateway" -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + ingressIp=$(${KUBECTL} --namespace "${NAMESPACE}" get ingress "${RELEASE}-api-gateway" -o jsonpath='{.status.loadBalancer.ingress[0].ip}') if [[ ! -z ${ingressIp} ]]; then export INGRESS_IP="${ingressIp}" break @@ -392,7 +402,7 @@ function ingress_setup() { echo -e "\n\nFusion 5 Gateway service exposed at: ${INGRESS_HOSTNAME}\n" echo -e "Please ensure that the public DNS record for ${INGRESS_HOSTNAME} is updated to point to ${INGRESS_IP}" if [ "$TLS_ENABLED" == "1" ]; then - echo -e "An SSL certificate will be automatically generated once the public DNS record has been updated,\nthis may take up to an hour after DNS has updated to be issued.\nYou can use kubectl get managedcertificates -o yaml to check the status of the certificate issue process." + echo -e "An SSL certificate will be automatically generated once the public DNS record has been updated,\nthis may take up to an hour after DNS has updated to be issued.\nYou can use ${KUBECTL} get managedcertificates -o yaml to check the status of the certificate issue process." fi report_ns } @@ -430,11 +440,11 @@ if [ "$UPGRADE" != "1" ]; then #Adding a retry loop because EKS takes more time to create nodes. retries=2 while (( retries > 0 )); do - find_nodes=$(kubectl get nodes -l "${node_selector}" | grep -i ready) + find_nodes=$(${KUBECTL} get nodes -l "${node_selector}" | grep -i ready) has_nodes=$? if [ "${has_nodes}" == "0" ]; then echo -e "Found at least one healthy node matching nodeSelector: ${NODE_POOL}" - num_nodes=$(kubectl get nodes -l "${node_selector}" | grep -i ready | wc -l) + num_nodes=$(${KUBECTL} get nodes -l "${node_selector}" | grep -i ready | wc -l) retries=-1 else echo -e "\nERROR: No 'Ready' nodes found matching nodeSelector: ${node_selector}! Retrying in 30 seconds" @@ -448,7 +458,7 @@ if [ "$UPGRADE" != "1" ]; then exit 1 fi else - num_nodes=$(kubectl get nodes | grep -i ready | wc -l) + num_nodes=$(${KUBECTL} get nodes | grep -i ready | wc -l) fi ( "${SCRIPT_DIR}/customize_fusion_values.sh" "${DEFAULT_MY_VALUES}" -c "${CLUSTER_NAME}" -n "${NAMESPACE}" -r "${RELEASE}" --provider "${PROVIDER}" --prometheus "${PROMETHEUS_ON}" \ @@ -462,7 +472,7 @@ fi # just let the user do that manually with Helm as needed if [ "$UPGRADE" != "1" ] && [ "${PROMETHEUS}" != "none" ]; then if [ "${PROMETHEUS}" == "install" ]; then - ( "${SCRIPT_DIR}/install_prom.sh" -c "${CLUSTER_NAME}" -n "${NAMESPACE}" -r "${RELEASE}" --provider "${PROVIDER}" --node-pool "${NODE_POOL}" ) + ( "${SCRIPT_DIR}/install_prom.sh" -b "${KUBECTL}" -c "${CLUSTER_NAME}" -n "${NAMESPACE}" -r "${RELEASE}" --provider "${PROVIDER}" --node-pool "${NODE_POOL}" ) fi fi diff --git a/upgrade_fusion.sh.example b/upgrade_fusion.sh.example index 5c9d774..855f611 100755 --- a/upgrade_fusion.sh.example +++ b/upgrade_fusion.sh.example @@ -7,6 +7,7 @@ CLUSTER_NAME= RELEASE= NAMESPACE= CHART_VERSION= +KUBECTL= MY_VALUES="" @@ -24,14 +25,14 @@ if [ ! -z "${DRY_RUN_REQUESTED}" ]; then DRY_RUN="--dry-run" fi -current_context=$(kubectl config current-context | grep "$CLUSTER_NAME") +current_context=$(${KUBECTL} config current-context | grep "$CLUSTER_NAME") if [ "${current_context}" == "" ]; then echo -e "\nERROR: Current kubeconfig not pointing to the $CLUSTER_NAME cluster!\nPlease update your current config to the correct cluster for upgrading Fusion.\n" exit 1 fi -if ! kubectl get namespace "${NAMESPACE}" > /dev/null 2>&1; then - kubectl create namespace "${NAMESPACE}" +if ! ${KUBECTL} get namespace "${NAMESPACE}" > /dev/null 2>&1; then + ${KUBECTL} create namespace "${NAMESPACE}" if [ "$PROVIDER" == "gke" ]; then who_am_i=$(gcloud auth list --filter=status:ACTIVE --format="value(account)") else @@ -39,7 +40,7 @@ if ! kubectl get namespace "${NAMESPACE}" > /dev/null 2>&1; then fi OWNER_LABEL="${who_am_i//@/-}" if [ "${OWNER_LABEL}" != "" ]; then - kubectl label namespace "${NAMESPACE}" "owner=${OWNER_LABEL}" + ${KUBECTL} label namespace "${NAMESPACE}" "owner=${OWNER_LABEL}" fi echo -e "\nCreated namespace ${NAMESPACE} with owner label ${OWNER_LABEL}\n" fi @@ -56,8 +57,8 @@ helm repo update if [ "$PROVIDER" == "gke" ]; then # Make sure that the metric server is running - metrics_deployment=$(kubectl get deployment -n kube-system | grep metrics-server | cut -d ' ' -f1 -) - kubectl rollout status deployment/${metrics_deployment} --timeout=60s --namespace "kube-system" + metrics_deployment=$(${KUBECTL} get deployment -n kube-system | grep metrics-server | cut -d ' ' -f1 -) + ${KUBECTL} rollout status deployment/${metrics_deployment} --timeout=60s --namespace "kube-system" echo "" fi @@ -67,13 +68,13 @@ echo -e "\nNOTE: If this will be a long-running cluster for production purposes, helm upgrade ${DRY_RUN} ${RELEASE} "${lw_helm_repo}/fusion" --install --namespace "${NAMESPACE}" --version "${CHART_VERSION}" ${MY_VALUES} echo -e "\nWaiting up to 10 minutes to see the Fusion API Gateway deployment come online ...\n" -kubectl rollout status deployment/${RELEASE}-api-gateway --timeout=600s --namespace "${NAMESPACE}" +${KUBECTL} rollout status deployment/${RELEASE}-api-gateway --timeout=600s --namespace "${NAMESPACE}" echo -e "\nWaiting up to 5 minutes to see the Fusion Indexing deployment come online ...\n" -kubectl rollout status deployment/${RELEASE}-fusion-indexing --timeout=300s --namespace "${NAMESPACE}" +${KUBECTL} rollout status deployment/${RELEASE}-fusion-indexing --timeout=300s --namespace "${NAMESPACE}" -current_ns=$(kubectl config view --minify --output 'jsonpath={..namespace}') +current_ns=$(${KUBECTL} config view --minify --output 'jsonpath={..namespace}') if [ "$NAMESPACE" != "$current_ns" ]; then - kubectl config set-context --current --namespace=${NAMESPACE} + ${KUBECTL} config set-context --current --namespace=${NAMESPACE} fi echo "" helm ls