proxy acme-client -> certbot

This commit is contained in:
Michael Brachmann 2019-09-18 09:04:03 -04:00
parent fdd52375d0
commit 106564f2d3
3 changed files with 45 additions and 20 deletions

View file

@ -4,25 +4,25 @@ GIT_PASS=$2
#mimir-async #mimir-async
cd ./mimir cd ./mimir
sudo docker build -t 127.0.0.1:32000/vizier-mimir-async-spark ./ --build-arg gituser=$GIT_USER --build-arg gitpass=$GIT_PASS --build-arg MIMIR_BRANCH=pdf-datasource sudo docker build -t docker.mimirdb.info/vizier-mimir-async-spark ./ --build-arg gituser=$GIT_USER --build-arg gitpass=$GIT_PASS --build-arg MIMIR_BRANCH=pdf-datasource
#api-async #api-async
cd ../api-async cd ../api-async
sudo docker build -t 127.0.0.1:32000/vizier-api-async-spark ./ --build-arg gituser=$GIT_USER --build-arg gitpass=$GIT_PASS --build-arg API_BRANCH=master sudo docker build -t docker.mimirdb.info/vizier-api-async-spark ./ --build-arg gituser=$GIT_USER --build-arg gitpass=$GIT_PASS --build-arg API_BRANCH=master
#ui-async #ui-async
cd ../ui-nginx cd ../ui-nginx
sudo docker build -t 127.0.0.1:32000/vizier-ui-async ./ --build-arg UI_BRANCH=master sudo docker build -t docker.mimirdb.info/vizier-ui-async ./ --build-arg UI_BRANCH=master
#analytics #analytics
cd ../analytics-nginx cd ../analytics-nginx
#sudo docker build -t 127.0.0.1:32000/vizier-analytics ./ sudo docker build -t docker.mimirdb.info/vizier-analytics ./
#spark-master and spark-worker #spark-master and spark-worker
cd ../spark-docker cd ../spark-docker
#sudo docker build -t 127.0.0.1:32000/spark-hadoop ./ #sudo docker build -t docker.mimirdb.info/spark-hadoop ./
#proxy #proxy
cd ../vizier-nginx-proxy cd ../vizier-nginx-proxy
sudo docker build -t 127.0.0.1:32000/vizier-proxy ./ sudo docker build -t docker.mimirdb.info/vizier-proxy ./

View file

@ -1,12 +1,13 @@
#run the containers #run the containers
#spark-master #spark-master
#kubectl run namenode --image=localhost:32000/spark-hadoop --replicas=1 --port=22 --port=6066 --port=7077 --port=8020 --port=8080 --port=50070 --env="MASTER=spark://namenode:7077" --env="SPARK_CONF_DIR=/conf" --env="SPARK_PUBLIC_DNS=127.0.0.1" --env="LD_LIBRARY_PATH=/usr/local/hadoop/lib/native/" --env="SPARK_EXECUTOR_MEMORY=8g" --env="SPARK_DAEMON_MEMORY=8g" --env="SPARK_DRIVER_MEMORY=8g" --env="SPARK_WORKER_MEMORY=8g" --env="HDFS_CONF_dfs_client_use_datanode_hostname=true" --env="AWS_ECS=false" --command /usr/local/spark-2.2.0-bin-without-hadoop/master.sh #kubectl run namenode --image=docker.mimirdb.info/spark-hadoop --replicas=1 --port=22 --port=6066 --port=7077 --port=8020 --port=8080 --port=50070 --env="MASTER=spark://namenode:7077" --env="SPARK_CONF_DIR=/conf" --env="SPARK_PUBLIC_DNS=127.0.0.1" --env="LD_LIBRARY_PATH=/usr/local/hadoop/lib/native/" --env="SPARK_EXECUTOR_MEMORY=8g" --env="SPARK_DAEMON_MEMORY=8g" --env="SPARK_DRIVER_MEMORY=8g" --env="SPARK_WORKER_MEMORY=8g" --env="HDFS_CONF_dfs_client_use_datanode_hostname=true" --env="AWS_ECS=false" --command /usr/local/spark-2.2.0-bin-without-hadoop/master.sh
kind: PersistentVolume kind: PersistentVolume
apiVersion: v1 apiVersion: v1
metadata: metadata:
name: nn-pv-1 name: nn-pv-1
labels: labels:
type: namenode type: namenode
namespace: vizier
spec: spec:
capacity: capacity:
storage: 5Gi storage: 5Gi
@ -20,6 +21,7 @@ kind: PersistentVolume
apiVersion: v1 apiVersion: v1
metadata: metadata:
name: dn-pv-1 name: dn-pv-1
namespace: vizier
spec: spec:
capacity: capacity:
storage: 3Gi storage: 3Gi
@ -33,6 +35,7 @@ kind: PersistentVolume
apiVersion: v1 apiVersion: v1
metadata: metadata:
name: dn-pv-2 name: dn-pv-2
namespace: vizier
spec: spec:
capacity: capacity:
storage: 1Gi storage: 1Gi
@ -46,6 +49,7 @@ kind: PersistentVolumeClaim
apiVersion: v1 apiVersion: v1
metadata: metadata:
name: hdfs-name name: hdfs-name
namespace: vizier
spec: spec:
selector: selector:
matchLabels: matchLabels:
@ -61,6 +65,7 @@ kind: Deployment
apiVersion: extensions/v1beta1 apiVersion: extensions/v1beta1
metadata: metadata:
name: namenode name: namenode
namespace: vizier
spec: spec:
replicas: 1 replicas: 1
selector: selector:
@ -70,6 +75,7 @@ spec:
metadata: metadata:
labels: labels:
component: namenode component: namenode
namespace: vizier
spec: spec:
volumes: volumes:
- name: hdfs-name - name: hdfs-name
@ -132,7 +138,7 @@ metadata:
app: namenode app: namenode
hasuraService: custom hasuraService: custom
name: namenode name: namenode
namespace: default namespace: vizier
spec: spec:
ports: ports:
- name: ssh - name: ssh
@ -164,7 +170,7 @@ spec:
type: ClusterIP type: ClusterIP
--- ---
#spark-worker #spark-worker
#kubectl run $HOSTNAME --image=localhost:32000/spark-hadoop --replicas=2 --port=$WORKER_PORT --port=$DATANODE_PORT --env="SPARK_CONF_DIR=/conf" --env="SPARK_PUBLIC_DNS=127.0.0.1" --env="SPARK_WORKER_CORES=4" --env="SPARK_WORKER_PORT=$WORKER_PORT" --env="SPARK_WORKER_WEBUI_PORT=$WORKER_WEBUI_PORT" --env="LD_LIBRARY_PATH=/usr/local/hadoop/lib/native/" --env="HDFS_DATA_HOST=$HOSTNAME" --env="HDFS_HOST=spark-master" --env="HDFS_CONF_dfs_datanode_address=0.0.0.0:$DATANODE_PORT" --env="SPARK_EXECUTOR_MEMORY=8g" --env="SPARK_DAEMON_MEMORY=8g" --env="SPARK_DRIVER_MEMORY=8g" --env="SPARK_WORKER_MEMORY=8g" --env="HDFS_CONF_dfs_client_use_datanode_hostname=true" --env="AWS_ECS=false" --command /usr/local/spark-2.2.0-bin-without-hadoop/worker.sh #kubectl run $HOSTNAME --image=docker.mimirdb.info/spark-hadoop --replicas=2 --port=$WORKER_PORT --port=$DATANODE_PORT --env="SPARK_CONF_DIR=/conf" --env="SPARK_PUBLIC_DNS=127.0.0.1" --env="SPARK_WORKER_CORES=4" --env="SPARK_WORKER_PORT=$WORKER_PORT" --env="SPARK_WORKER_WEBUI_PORT=$WORKER_WEBUI_PORT" --env="LD_LIBRARY_PATH=/usr/local/hadoop/lib/native/" --env="HDFS_DATA_HOST=$HOSTNAME" --env="HDFS_HOST=spark-master" --env="HDFS_CONF_dfs_datanode_address=0.0.0.0:$DATANODE_PORT" --env="SPARK_EXECUTOR_MEMORY=8g" --env="SPARK_DAEMON_MEMORY=8g" --env="SPARK_DRIVER_MEMORY=8g" --env="SPARK_WORKER_MEMORY=8g" --env="HDFS_CONF_dfs_client_use_datanode_hostname=true" --env="AWS_ECS=false" --command /usr/local/spark-2.2.0-bin-without-hadoop/worker.sh
kind: Service kind: Service
apiVersion: v1 apiVersion: v1
metadata: metadata:
@ -173,7 +179,7 @@ metadata:
app: datanode app: datanode
hasuraService: custom hasuraService: custom
name: datanode name: datanode
namespace: default namespace: vizier
spec: spec:
ports: ports:
- name: sparkblock - name: sparkblock
@ -221,7 +227,7 @@ metadata:
app: datanode app: datanode
hasuraService: custom hasuraService: custom
name: datanode name: datanode
namespace: default namespace: vizier
spec: spec:
serviceName: "datanode" serviceName: "datanode"
replicas: 1 replicas: 1
@ -230,6 +236,7 @@ spec:
creationTimestamp: null creationTimestamp: null
labels: labels:
app: datanode app: datanode
namespace: vizier
spec: spec:
containers: containers:
- name: datanode - name: datanode
@ -287,6 +294,7 @@ spec:
volumeClaimTemplates: volumeClaimTemplates:
- metadata: - metadata:
name: hdfs-data name: hdfs-data
namespace: vizier
spec: spec:
storageClassName: persist storageClassName: persist
accessModes: accessModes:
@ -296,13 +304,14 @@ spec:
storage: 1Gi storage: 1Gi
--- ---
#mimir #mimir
#kubectl run vizier-mimir --image=localhost:32000/vizier-mimir-spark --replicas=1 --port=9001 --port=33388 --expose --env="RESTORE_BACKUP=false" --env="PULL_MIMIR=false" --env="AWS_ACCESS_KEY_ID=AKIAJ7MLFSPYLYG47ARQ" --env="AWS_SECRET_ACCESS_KEY=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" --env="S3_BUCKET_NAME=vizier-data-test" #kubectl run vizier-mimir --image=docker.mimirdb.info/vizier-mimir-spark --replicas=1 --port=9001 --port=33388 --expose --env="RESTORE_BACKUP=false" --env="PULL_MIMIR=false" --env="AWS_ACCESS_KEY_ID=AKIAJ7MLFSPYLYG47ARQ" --env="AWS_SECRET_ACCESS_KEY=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" --env="S3_BUCKET_NAME=vizier-data-test"
kind: PersistentVolume kind: PersistentVolume
apiVersion: v1 apiVersion: v1
metadata: metadata:
name: vizier-data-volume name: vizier-data-volume
labels: labels:
type: local type: local
namespace: vizier
spec: spec:
storageClassName: persist storageClassName: persist
capacity: capacity:
@ -316,6 +325,7 @@ kind: PersistentVolumeClaim
apiVersion: v1 apiVersion: v1
metadata: metadata:
name: vizier-data-pv-claim name: vizier-data-pv-claim
namespace: vizier
spec: spec:
storageClassName: persist storageClassName: persist
accessModes: accessModes:
@ -328,6 +338,7 @@ apiVersion: v1
kind: Secret kind: Secret
metadata: metadata:
name: s3-credentials name: s3-credentials
namespace: vizier
data: data:
access-key-id: QUtJQUo3TUxGU1BZTFlHNDdBUlEK access-key-id: QUtJQUo3TUxGU1BZTFlHNDdBUlEK
access-key-secret: ZEw3OXFKR3lMa1VGeVl2bW1nM2hFbjhiSWtsU2FUa3JmRzBJWHVraQo= access-key-secret: ZEw3OXFKR3lMa1VGeVl2bW1nM2hFbjhiSWtsU2FUa3JmRzBJWHVraQo=
@ -336,6 +347,7 @@ kind: Deployment
apiVersion: extensions/v1beta1 apiVersion: extensions/v1beta1
metadata: metadata:
name: vizier-mimir name: vizier-mimir
namespace: vizier
spec: spec:
replicas: 1 replicas: 1
selector: selector:
@ -345,6 +357,7 @@ spec:
metadata: metadata:
labels: labels:
component: vizier-mimir component: vizier-mimir
namespace: vizier
spec: spec:
volumes: volumes:
- name: vizier-data-pv-storage - name: vizier-data-pv-storage
@ -352,7 +365,7 @@ spec:
claimName: vizier-data-pv-claim claimName: vizier-data-pv-claim
containers: containers:
- name: vizier-mimir - name: vizier-mimir
image: localhost:32000/vizier-mimir-async-spark image: docker.mimirdb.info/vizier-mimir-async-spark
ports: ports:
- containerPort: 9001 - containerPort: 9001
- containerPort: 4041 - containerPort: 4041
@ -399,6 +412,7 @@ kind: Service
apiVersion: v1 apiVersion: v1
metadata: metadata:
name: vizier-mimir name: vizier-mimir
namespace: vizier
spec: spec:
ports: ports:
- name: mimirapi - name: mimirapi
@ -420,11 +434,12 @@ spec:
component: vizier-mimir component: vizier-mimir
--- ---
#api #api
#kubectl run vizier-api --image=localhost:32000/vizier-api-spark --replicas=1 --port=9001 --port=80 --port=443 --expose --env="APP_PATH=" --env="API_SERVER=localhost" --env="API_LOCAL_PORT=443" --env="API_PORT=443" --env="API_SCHEME=http" --env="AWS_ACCESS_KEY_ID=AKIAJ7MLFSPYLYG47ARQ" --env="AWS_SECRET_ACCESS_KEY=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" --env="S3_BUCKET_NAME=vizier-data-test" #kubectl run vizier-api --image=docker.mimirdb.info/vizier-api-spark --replicas=1 --port=9001 --port=80 --port=443 --expose --env="APP_PATH=" --env="API_SERVER=localhost" --env="API_LOCAL_PORT=443" --env="API_PORT=443" --env="API_SCHEME=http" --env="AWS_ACCESS_KEY_ID=AKIAJ7MLFSPYLYG47ARQ" --env="AWS_SECRET_ACCESS_KEY=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" --env="S3_BUCKET_NAME=vizier-data-test"
kind: Deployment kind: Deployment
apiVersion: extensions/v1beta1 apiVersion: extensions/v1beta1
metadata: metadata:
name: vizier-api name: vizier-api
namespace: vizier
spec: spec:
replicas: 1 replicas: 1
selector: selector:
@ -441,7 +456,7 @@ spec:
claimName: vizier-data-pv-claim claimName: vizier-data-pv-claim
containers: containers:
- name: vizier-api - name: vizier-api
image: localhost:32000/vizier-api-async-spark image: docker.mimirdb.info/vizier-api-async-spark
ports: ports:
- containerPort: 80 - containerPort: 80
- containerPort: 9001 - containerPort: 9001
@ -485,6 +500,7 @@ kind: Service
apiVersion: v1 apiVersion: v1
metadata: metadata:
name: vizier-api name: vizier-api
namespace: vizier
spec: spec:
ports: ports:
- name: api - name: api
@ -497,11 +513,12 @@ spec:
component: vizier-api component: vizier-api
--- ---
#ui #ui
#kubectl run vizier-ui --image=localhost:32000/vizier-ui --replicas=1 --port=9001 --port=80 --port=443 --expose #kubectl run vizier-ui --image=docker.mimirdb.info/vizier-ui --replicas=1 --port=9001 --port=80 --port=443 --expose
kind: Deployment kind: Deployment
apiVersion: extensions/v1beta1 apiVersion: extensions/v1beta1
metadata: metadata:
name: vizier-ui name: vizier-ui
namespace: vizier
spec: spec:
replicas: 1 replicas: 1
selector: selector:
@ -511,11 +528,12 @@ spec:
metadata: metadata:
labels: labels:
component: vizier-ui component: vizier-ui
namespace: vizier
spec: spec:
hostname: vizier-ui hostname: vizier-ui
containers: containers:
- name: vizier-ui - name: vizier-ui
image: localhost:32000/vizier-ui-async image: docker.mimirdb.info/vizier-ui-async
ports: ports:
- containerPort: 80 - containerPort: 80
- containerPort: 443 - containerPort: 443
@ -537,6 +555,7 @@ kind: Service
apiVersion: v1 apiVersion: v1
metadata: metadata:
name: vizier-ui name: vizier-ui
namespace: vizier
spec: spec:
ports: ports:
- name: ui - name: ui
@ -552,11 +571,12 @@ spec:
component: vizier-ui component: vizier-ui
--- ---
#analytics #analytics
#kubectl run vizier-analytics --image=localhost:32000/vizier-analytics --replicas=1 --port=9001 --port=80 --expose #kubectl run vizier-analytics --image=docker.mimirdb.info/vizier-analytics --replicas=1 --port=9001 --port=80 --expose
kind: Deployment kind: Deployment
apiVersion: extensions/v1beta1 apiVersion: extensions/v1beta1
metadata: metadata:
name: vizier-analytics name: vizier-analytics
namespace: vizier
spec: spec:
replicas: 1 replicas: 1
selector: selector:
@ -566,10 +586,11 @@ spec:
metadata: metadata:
labels: labels:
component: vizier-analytics component: vizier-analytics
namespace: vizier
spec: spec:
containers: containers:
- name: vizier-analytics - name: vizier-analytics
image: localhost:32000/vizier-analytics image: docker.mimirdb.info/vizier-analytics
ports: ports:
- containerPort: 80 - containerPort: 80
- containerPort: 443 - containerPort: 443
@ -582,6 +603,7 @@ kind: Service
apiVersion: v1 apiVersion: v1
metadata: metadata:
name: vizier-analytics name: vizier-analytics
namespace: vizier
spec: spec:
ports: ports:
- name: analytics - name: analytics
@ -602,6 +624,7 @@ kind: Deployment
apiVersion: extensions/v1beta1 apiVersion: extensions/v1beta1
metadata: metadata:
name: vizier-proxy name: vizier-proxy
namespace: vizier
spec: spec:
replicas: 1 replicas: 1
selector: selector:
@ -611,6 +634,7 @@ spec:
metadata: metadata:
labels: labels:
component: vizier-proxy component: vizier-proxy
namespace: vizier
spec: spec:
containers: containers:
- name: vizier-proxy - name: vizier-proxy
@ -633,6 +657,7 @@ kind: Service
apiVersion: v1 apiVersion: v1
metadata: metadata:
name: vizier-proxy name: vizier-proxy
namespace: vizier
spec: spec:
ports: ports:
- name: ssh - name: ssh

View file

@ -3,7 +3,7 @@
hosts=$ACME_HOSTS hosts=$ACME_HOSTS
for host in $hosts; do for host in $hosts; do
acme-client -a https://letsencrypt.org/documents/LE-SA-v1.2-November-15-2017.pdf -Nnmv $host && renew=1 sudo /usr/local/bin/certbot-auto renew
done done
[ "$renew" = 1 ] && rc-service nginx reload [ "$renew" = 1 ] && rc-service nginx reload