Kubernetes deployment to aws eks

This commit is contained in:
Mike 2019-02-12 13:57:18 -05:00
parent b3a936ec46
commit 4276e98749
9 changed files with 705 additions and 22 deletions

View file

@ -4,7 +4,7 @@ FROM docker.mimirdb.info/alpine_oraclejdk8
LABEL base.image="docker.mimirdb.info/alpine_oraclejdk8"
LABEL version="0.2"
LABEL software="Vizier"
LABEL software.version="0.2.20190122"
LABEL software.version="0.2.20190212"
LABEL description="an open source, provenance aware, iterative data cleaning tool"
LABEL website="http://vizierdb.info"
LABEL sourcecode="https://github.com/VizierDB"

View file

@ -1,13 +1,13 @@
sed -i "s/http:\/\/localhost/$API_SCHEME:\/\/$API_SERVER/g" /usr/local/source/web-api/config/config-default.yaml
sed -i "s/server_port: 5000/server_port: $API_PORT/g" /usr/local/source/web-api/config/config-default.yaml
sed -i "s/server_local_port: 5000/server_local_port: $API_LOCAL_PORT/g" /usr/local/source/web-api/config/config-default.yaml
sed -i "s/app_path: '.*'/app_path: '$APP_PATH'/g" /usr/local/source/web-api/config/config-default.yaml
sed -i "s/http:\/\/localhost/$API_SCHEME:\/\/$API_SERVER/g" /usr/local/source/web-api/config/config-mimir.yaml
sed -i "s/server_port: 5000/server_port: $API_PORT/g" /usr/local/source/web-api/config/config-mimir.yaml
sed -i "s/server_local_port: 5000/server_local_port: $API_LOCAL_PORT/g" /usr/local/source/web-api/config/config-mimir.yaml
sed -i "s/app_path: '.*'/app_path: '$APP_PATH'/g" /usr/local/source/web-api/config/config-mimir.yaml
sed -i "s/http:\/\/localhost/$API_SCHEME:\/\/$API_SERVER/g" /usr/local/source/web-api/vizier/config.yaml
sed -i "s/server_port: 5000/server_port: $API_PORT/g" /usr/local/source/web-api/vizier/config.yaml
sed -i "s/server_local_port: 5000/server_local_port: $API_LOCAL_PORT/g" /usr/local/source/web-api/vizier/config.yaml
sed -i "s/app_path: '.*'/app_path: '$APP_PATH'/g" /usr/local/source/web-api/vizier/config.yaml
sed -i "s/jvm_gateway('127.0.0.1', 33388)/jvm_gateway('$MIMIR_HOST', 33388)/g" /opt/conda/envs/vizier/lib/python2.7/site-packages/vistrails/packages/mimir/init.py
sed -i "s#http://localhost#$API_SCHEME://$API_SERVER#g" /usr/local/source/web-api/config/config-default.yaml
sed -i "s#server_port: 5000#server_port: $API_PORT#g" /usr/local/source/web-api/config/config-default.yaml
sed -i "s#server_local_port: 5000#server_local_port: $API_LOCAL_PORT#g" /usr/local/source/web-api/config/config-default.yaml
sed -i "s#app_path: '.*'#app_path: '$APP_PATH'#g" /usr/local/source/web-api/config/config-default.yaml
sed -i "s#http://localhost#$API_SCHEME://$API_SERVER#g" /usr/local/source/web-api/config/config-mimir.yaml
sed -i "s#server_port: 5000#server_port: $API_PORT#g" /usr/local/source/web-api/config/config-mimir.yaml
sed -i "s#server_local_port: 5000#server_local_port: $API_LOCAL_PORT#g" /usr/local/source/web-api/config/config-mimir.yaml
sed -i "s#app_path: '.*'#app_path: '$APP_PATH'#g" /usr/local/source/web-api/config/config-mimir.yaml
sed -i "s#http://localhost#$API_SCHEME://$API_SERVER#g" /usr/local/source/web-api/vizier/config.yaml
sed -i "s#server_port: 5000#server_port: $API_PORT#g" /usr/local/source/web-api/vizier/config.yaml
sed -i "s#server_local_port: 5000#server_local_port: $API_LOCAL_PORT#g" /usr/local/source/web-api/vizier/config.yaml
sed -i "s#app_path: '.*'#app_path: '$APP_PATH'#g" /usr/local/source/web-api/vizier/config.yaml
sed -i "s#jvm_gateway('127.0.0.1', 33388)#jvm_gateway('$MIMIR_HOST', 33388)#g" /opt/conda/envs/vizier/lib/python2.7/site-packages/vistrails/packages/mimir/init.py

View file

@ -0,0 +1,630 @@
#run the containers
#spark-master
#kubectl run namenode --image=docker.mimirdb.info/spark-hadoop --replicas=1 --port=22 --port=6066 --port=7077 --port=8020 --port=8080 --port=50070 --env="MASTER=spark://namenode:7077" --env="SPARK_CONF_DIR=/conf" --env="SPARK_PUBLIC_DNS=127.0.0.1" --env="LD_LIBRARY_PATH=/usr/local/hadoop/lib/native/" --env="SPARK_EXECUTOR_MEMORY=8g" --env="SPARK_DAEMON_MEMORY=8g" --env="SPARK_DRIVER_MEMORY=8g" --env="SPARK_WORKER_MEMORY=8g" --env="HDFS_CONF_dfs_client_use_datanode_hostname=true" --env="AWS_ECS=false" --command /usr/local/spark-2.2.0-bin-without-hadoop/master.sh
kind: PersistentVolume
apiVersion: v1
metadata:
name: nn-pv-1
labels:
type: namenode
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
storageClassName: persist
hostPath:
path: "/mnt/hdfs-data/name"
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: dn-pv-1
spec:
capacity:
storage: 3Gi
accessModes:
- ReadWriteOnce
storageClassName: persist
hostPath:
path: "/mnt/hdfs-data/1"
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: dn-pv-2
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
storageClassName: persist
hostPath:
path: "/mnt/hdfs-data/2"
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: hdfs-name
spec:
selector:
matchLabels:
type: namenode
accessModes:
- ReadWriteOnce
storageClassName: persist
resources:
requests:
storage: 1Gi
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: namenode
spec:
replicas: 1
selector:
matchLabels:
component: namenode
template:
metadata:
labels:
component: namenode
spec:
volumes:
- name: hdfs-name
persistentVolumeClaim:
claimName: hdfs-name
containers:
- name: namenode
image: docker.mimirdb.info/spark-hadoop
command: ["/usr/local/spark-2.4.0-bin-without-hadoop/master.sh"]
ports:
- containerPort: 22
- containerPort: 6066
- containerPort: 7001
- containerPort: 7005
- containerPort: 7077
- containerPort: 8020
- containerPort: 8080
- containerPort: 50070
resources:
requests:
cpu: 100m
env:
- name: MASTER
value: "spark://namenode:7077"
- name: MASTER_IP
value: "127.0.0.1"
- name: HDFS_HOST
value: "namenode"
- name: SPARK_CONF_DIR
value: "/conf"
- name: SPARK_PUBLIC_DNS
value: "namenode"
- name: LD_LIBRARY_PATH
value: "/usr/local/hadoop/lib/native/"
- name: SPARK_EXECUTOR_MEMORY
value: "8g"
- name: SPARK_DAEMON_MEMORY
value: "8g"
- name: SPARK_DRIVER_MEMORY
value: "8g"
- name: SPARK_WORKER_MEMORY
value: "8g"
- name: SPARK_WORKER_PORT
value: "30001"
- name: HDFS_CONF_dfs_client_use_datanode_hostname
value: "true"
- name: HDFS_CONF_dfs_datanode_use_datanode_hostname
value: "true"
- name: AWS_ECS
value: "true"
volumeMounts:
- name: hdfs-name
mountPath: /hadoop/dfs/name
---
kind: Service
apiVersion: v1
metadata:
creationTimestamp: null
labels:
app: namenode
hasuraService: custom
name: namenode
namespace: default
spec:
ports:
- name: ssh
port: 22
targetPort: 22
- name: hadoop
port: 6066
targetPort: 6066
- name: spark
port: 7077
targetPort: 7077
- name: sparkdriver
port: 7001
targetPort: 7001
- name: sparkblock
port: 7005
targetPort: 7005
- name: hdfs
port: 8020
targetPort: 8020
- name: sparkui
port: 8080
targetPort: 8080
- name: hadoopui
port: 50070
targetPort: 50070
selector:
component: namenode
type: ClusterIP
---
#spark-worker
#kubectl run $HOSTNAME --image=docker.mimirdb.info/spark-hadoop --replicas=2 --port=$WORKER_PORT --port=$DATANODE_PORT --env="SPARK_CONF_DIR=/conf" --env="SPARK_PUBLIC_DNS=127.0.0.1" --env="SPARK_WORKER_CORES=4" --env="SPARK_WORKER_PORT=$WORKER_PORT" --env="SPARK_WORKER_WEBUI_PORT=$WORKER_WEBUI_PORT" --env="LD_LIBRARY_PATH=/usr/local/hadoop/lib/native/" --env="HDFS_DATA_HOST=$HOSTNAME" --env="HDFS_HOST=spark-master" --env="HDFS_CONF_dfs_datanode_address=0.0.0.0:$DATANODE_PORT" --env="SPARK_EXECUTOR_MEMORY=8g" --env="SPARK_DAEMON_MEMORY=8g" --env="SPARK_DRIVER_MEMORY=8g" --env="SPARK_WORKER_MEMORY=8g" --env="HDFS_CONF_dfs_client_use_datanode_hostname=true" --env="AWS_ECS=false" --command /usr/local/spark-2.2.0-bin-without-hadoop/worker.sh
kind: Service
apiVersion: v1
metadata:
creationTimestamp: null
labels:
app: datanode
hasuraService: custom
name: datanode
namespace: default
spec:
ports:
- name: sparkblock
port: 7005
targetPort: 7005
- name: hdfsd1
port: 7022
targetPort: 7022
- name: hdfsd2
port: 7023
targetPort: 7023
- name: hdfsd3
port: 7024
targetPort: 7024
- name: hdfsd4
port: 7025
targetPort: 7025
- name: hdfsd5
port: 7026
targetPort: 7026
- name: hdfsdd
port: 8882
targetPort: 8882
- name: hdfsweb
port: 8082
targetPort: 8082
- name: sparkworker
port: 30001
targetPort: 30001
- name: hdfsdp
port: 50010
targetPort: 50010
- name: hdfswp
port: 50075
targetPort: 50075
selector:
app: datanode
clusterIP: None
---
kind: StatefulSet
apiVersion: apps/v1beta1
metadata:
creationTimestamp: null
labels:
app: datanode
hasuraService: custom
name: datanode
namespace: default
spec:
serviceName: "datanode"
replicas: 1
template:
metadata:
creationTimestamp: null
labels:
app: datanode
spec:
containers:
- name: datanode
image: docker.mimirdb.info/spark-hadoop
command: ["/usr/local/spark-2.4.0-bin-without-hadoop/worker.sh"]
ports:
- containerPort: 7005
- containerPort: 7022
- containerPort: 7023
- containerPort: 7024
- containerPort: 7025
- containerPort: 7026
- containerPort: 8882
- containerPort: 8082
- containerPort: 30001
- containerPort: 50010
- containerPort: 50075
resources:
requests:
cpu: 100m
env:
- name: SPARK_CONF_DIR
value: "/conf"
- name: SPARK_WORKER_CORES
value: "4"
- name: SPARK_WORKER_PORT
value: "8882"
- name: SPARK_WORKER_WEBUI_PORT
value: "8082"
- name: LD_LIBRARY_PATH
value: "/usr/local/hadoop/lib/native/"
- name: HDFS_HOST
value: "namenode"
- name: HDFS_CONF_dfs_datanode_address
value: "0.0.0.0:50010"
- name: SPARK_EXECUTOR_MEMORY
value: "8g"
- name: SPARK_DAEMON_MEMORY
value: "8g"
- name: SPARK_DRIVER_MEMORY
value: "8g"
- name: SPARK_WORKER_MEMORY
value: "8g"
- name: SPARK_WORKER_PORT
value: "30001"
- name: HDFS_CONF_dfs_client_use_datanode_hostname
value: "true"
- name: HDFS_CONF_dfs_datanode_use_datanode_hostname
value: "true"
- name: AWS_ECS
value: "true"
volumeMounts:
- name: hdfs-data
mountPath: /hadoop/dfs/data
volumeClaimTemplates:
- metadata:
name: hdfs-data
spec:
storageClassName: persist
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
#mimir
#kubectl run vizier-mimir --image=docker.mimirdb.info/vizier-mimir-spark --replicas=1 --port=9001 --port=33388 --expose --env="RESTORE_BACKUP=false" --env="PULL_MIMIR=false" --env="AWS_ACCESS_KEY_ID=AKIAJ7MLFSPYLYG47ARQ" --env="AWS_SECRET_ACCESS_KEY=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" --env="S3_BUCKET_NAME=vizier-data-test"
kind: PersistentVolume
apiVersion: v1
metadata:
name: vizier-data-volume
labels:
type: local
spec:
storageClassName: persist
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/mnt/vizier-data"
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: vizier-data-pv-claim
spec:
storageClassName: persist
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: vizier-mimir
spec:
replicas: 1
selector:
matchLabels:
component: vizier-mimir
template:
metadata:
labels:
component: vizier-mimir
spec:
volumes:
- name: vizier-data-pv-storage
persistentVolumeClaim:
claimName: vizier-data-pv-claim
containers:
- name: vizier-mimir
image: docker.mimirdb.info/vizier-mimir-spark
ports:
- containerPort: 9001
- containerPort: 4041
- containerPort: 7001
- containerPort: 7005
- containerPort: 33388
resources:
requests:
cpu: 100m
volumeMounts:
- mountPath: "/usr/local/source/web-api/.vizierdb"
name: vizier-data-pv-storage
env:
- name: RESTORE_BACKUP
value: "false"
- name: PULL_MIMIR
value: "false"
- name: AWS_ACCESS_KEY_ID
value: "AKIAJ7MLFSPYLYG47ARQ"
- name: AWS_SECRET_ACCESS_KEY
value: "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"
- name: S3_BUCKET_NAME
value: "vizier-data-test"
- name: USE_S3_VOLUME
value: "false"
- name: HDFS_CONF_dfs_client_use_datanode_hostname
value: "true"
- name: DATA_STAGING_TYPE
value: "s3"
- name: MIMIR_HOST
value: "vizier-mimir"
---
kind: Service
apiVersion: v1
metadata:
name: vizier-mimir
spec:
ports:
- name: mimir
port: 33388
targetPort: 33388
- name: sparkui
port: 4041
targetPort: 4041
- name: sparkdriver
port: 7001
targetPort: 7001
- name: sparkblock
port: 7005
targetPort: 7005
- name: supervisor
port: 9001
targetPort: 9001
selector:
component: vizier-mimir
---
#api
#kubectl run vizier-api --image=docker.mimirdb.info/vizier-api-spark --replicas=1 --port=9001 --port=80 --port=443 --expose --env="APP_PATH=" --env="API_SERVER=localhost" --env="API_LOCAL_PORT=443" --env="API_PORT=443" --env="API_SCHEME=http" --env="AWS_ACCESS_KEY_ID=AKIAJ7MLFSPYLYG47ARQ" --env="AWS_SECRET_ACCESS_KEY=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" --env="S3_BUCKET_NAME=vizier-data-test"
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: vizier-api
spec:
replicas: 1
selector:
matchLabels:
component: vizier-api
template:
metadata:
labels:
component: vizier-api
spec:
volumes:
- name: vizier-data-pv-storage
persistentVolumeClaim:
claimName: vizier-data-pv-claim
containers:
- name: vizier-api
image: docker.mimirdb.info/vizier-api-spark
ports:
- containerPort: 80
- containerPort: 9001
resources:
requests:
cpu: 100m
volumeMounts:
- mountPath: "/usr/local/source/web-api/.vizierdb"
name: vizier-data-pv-storage
env:
- name: MIMIR_HOST
value: "vizier-mimir"
- name: APP_PATH
value: ""
- name: API_SERVER
value: "demo.vizier.io/api"
- name: API_LOCAL_PORT
value: "80"
- name: API_PORT
value: "80"
- name: API_SCHEME
value: "https"
- name: AWS_ACCESS_KEY_ID
value: "AKIAJ7MLFSPYLYG47ARQ"
- name: AWS_SECRET_ACCESS_KEY
value: "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"
- name: S3_BUCKET_NAME
value: "vizier-data-test"
- name: USE_S3_VOLUME
value: "false"
---
kind: Service
apiVersion: v1
metadata:
name: vizier-api
spec:
ports:
- name: api
port: 80
targetPort: 80
- name: supervisor
port: 9001
targetPort: 9001
selector:
component: vizier-api
---
#ui
#kubectl run vizier-ui --image=docker.mimirdb.info/vizier-ui --replicas=1 --port=9001 --port=80 --port=443 --expose
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: vizier-ui
spec:
replicas: 1
selector:
matchLabels:
component: vizier-ui
template:
metadata:
labels:
component: vizier-ui
spec:
hostname: vizier-ui
containers:
- name: vizier-ui
image: docker.mimirdb.info/vizier-ui
ports:
- containerPort: 80
- containerPort: 443
- containerPort: 9001
resources:
requests:
cpu: 100m
env:
- name: API_SERVER
value: "demo.vizier.io"
- name: API_PORT
value: "443"
- name: API_SCHEME
value: "https"
- name: APP_PATH
value: "/api/"
---
kind: Service
apiVersion: v1
metadata:
name: vizier-ui
spec:
ports:
- name: ui
port: 80
targetPort: 80
- name: uis
port: 443
targetPort: 443
- name: supervisor
port: 9001
targetPort: 9001
selector:
component: vizier-ui
---
#analytics
#kubectl run vizier-analytics --image=docker.mimirdb.info/vizier-analytics --replicas=1 --port=9001 --port=80 --expose
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: vizier-analytics
spec:
replicas: 1
selector:
matchLabels:
component: vizier-analytics
template:
metadata:
labels:
component: vizier-analytics
spec:
containers:
- name: vizier-analytics
image: docker.mimirdb.info/vizier-analytics
ports:
- containerPort: 80
- containerPort: 443
- containerPort: 9001
resources:
requests:
cpu: 100m
---
kind: Service
apiVersion: v1
metadata:
name: vizier-analytics
spec:
ports:
- name: analytics
port: 80
targetPort: 80
- name: analyticss
port: 443
targetPort: 443
- name: supervisor
port: 9001
targetPort: 9001
selector:
component: vizier-analytics
---
#proxy
#sudo docker run -d -p 80:80 -p 443:443 -p 9001:9001 -h vizier-proxy --network spark-net ${PROXY_IMAGE}
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: vizier-proxy
spec:
replicas: 1
selector:
matchLabels:
component: vizier-proxy
template:
metadata:
labels:
component: vizier-proxy
spec:
containers:
- name: vizier-proxy
image: docker.mimirdb.info/vizier-proxy
ports:
- containerPort: 22
- containerPort: 80
- containerPort: 443
- containerPort: 9001
resources:
requests:
cpu: 100m
env:
- name: VIZIER_CONFIG
value: "vizier_k8s.conf"
- name: VIZIER_DOMAIN
value: "vizier.io"
---
kind: Service
apiVersion: v1
metadata:
name: vizier-proxy
namespace: default
annotations:
service.beta.kubernetes.io/aws-load-balancer-type: nlb
spec:
type: LoadBalancer
ports:
- name: ssh
port: 22
targetPort: 22
- name: proxy
port: 80
targetPort: 80
- name: proxys
port: 443
targetPort: 443
- name: supervisor
port: 9001
targetPort: 9001
selector:
component: vizier-proxy

View file

@ -439,7 +439,7 @@ spec:
- name: APP_PATH
value: ""
- name: API_SERVER
value: "api.vizier.dev"
value: "demo.vizier.dev/api"
- name: API_LOCAL_PORT
value: "80"
- name: API_PORT
@ -499,11 +499,13 @@ spec:
cpu: 100m
env:
- name: API_SERVER
value: "api.vizier.dev"
value: "demo.vizier.dev"
- name: API_PORT
value: "443"
- name: API_SCHEME
value: "https"
- name: APP_PATH
value: "/api/"
---
kind: Service
apiVersion: v1

View file

@ -2,9 +2,9 @@ FROM docker.mimirdb.info/alpine_oraclejdk8_nginx
# Metadata
LABEL base.image="docker.mimirdb.info/alpine_oraclejdk8_nginx"
LABEL version="0.2"
LABEL version="0.3"
LABEL software="Vizier"
LABEL software.version="0.2.20181206"
LABEL software.version="0.2.20190208"
LABEL description="an open source, provenance aware, iterative data cleaning tool"
LABEL website="http://vizierdb.info"
LABEL sourcecode="https://github.com/VizierDB"
@ -14,8 +14,6 @@ LABEL tags="CSV,Data Cleaning,Databases,Provenance,Workflow,Machine Learning"
ARG gituser
ARG gitpass
ARG MIMIR_BRANCH=master
ARG MIMIR_XARGS
ARG MIMIR_ARGS
ENV RUN_SSH=true
ENV PULL_MIMIR=true

View file

@ -1,6 +1,6 @@
FROM nginx:alpine
LABEL software.version="0.2.20181126"
LABEL software.version="0.2.20190208"
EXPOSE 80
EXPOSE 22

View file

@ -1,5 +1,5 @@
#!/bin/sh
sed -ri "s/'https?:\/\/[a-zA-Z0-9.-]*[:0-9]*\/?.*'/'$API_SCHEME:\/\/$API_SERVER:$API_PORT$APP_PATH'/g" /usr/local/source/web-ui/build/env.js
sed -ri "s#'https?://[a-zA-Z0-9.-]*[:0-9]*/?.*'#'$API_SCHEME://$API_SERVER:$API_PORT$APP_PATH'#g" /usr/local/source/web-ui/build/env.js
sed -ri "s/ANALYTICS_URL: '.*'/ANALYTICS_URL: '${ANALYTICS_URL}'/g" /usr/local/source/web-ui/build/env.js
sed -ri "s/ANALYTICS_SITE_ID: '.*'/ANALYTICS_SITE_ID: '${ANALYTICS_SITE_ID}'/g" /usr/local/source/web-ui/build/env.js
/usr/bin/supervisord

View file

@ -23,6 +23,11 @@ http {
client_max_body_size 0;
keepalive_timeout 65;
proxy_connect_timeout 600;
proxy_send_timeout 600;
proxy_read_timeout 600;
send_timeout 600;
map $http_upgrade $connection_upgrade {
default upgrade;

View file

@ -6,6 +6,10 @@ upstream mimir-supervisor {
server vizier-mimir:9001;
}
upstream mimir-driver {
server vizier-mimir:4041;
}
upstream proxy-supervisor {
server 127.0.0.1:9001;
}
@ -118,6 +122,50 @@ server {
}
}
server {
listen 80;
server_name driver.vizier.app;
# For Lets Encrypt, this needs to be served via HTTP
location ^~ /.well-known/acme-challenge {
allow all;
alias /var/www/acme;
}
location / {
return 301 https://driver.vizier.app$request_uri;
}
}
server {
listen 443 ssl;
server_name driver.vizier.app;
server_tokens off;
ssl on;
ssl_certificate /etc/ssl/acme/mimir.vizier.app/fullchain.pem;
ssl_certificate_key /etc/ssl/acme/private/mimir.vizier.app/privkey.pem;
#auth_basic "Vizier Demo";
#auth_basic_user_file /etc/nginx/.htpasswd;
location / {
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Frame-Options SAMEORIGIN;
proxy_pass http://mimir-driver;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header X-Real-IP $remote_addr;
}
}
server {
listen 80;
server_name proxy.vizier.app;