docker/kubernetes/run-containers-microk8s.yaml
2019-02-13 12:12:53 -05:00

648 lines
17 KiB
YAML

#run the containers
#spark-master
#kubectl run namenode --image=docker.mimirdb.info/spark-hadoop --replicas=1 --port=22 --port=6066 --port=7077 --port=8020 --port=8080 --port=50070 --env="MASTER=spark://namenode:7077" --env="SPARK_CONF_DIR=/conf" --env="SPARK_PUBLIC_DNS=127.0.0.1" --env="LD_LIBRARY_PATH=/usr/local/hadoop/lib/native/" --env="SPARK_EXECUTOR_MEMORY=8g" --env="SPARK_DAEMON_MEMORY=8g" --env="SPARK_DRIVER_MEMORY=8g" --env="SPARK_WORKER_MEMORY=8g" --env="HDFS_CONF_dfs_client_use_datanode_hostname=true" --env="AWS_ECS=false" --command /usr/local/spark-2.2.0-bin-without-hadoop/master.sh
kind: PersistentVolume
apiVersion: v1
metadata:
name: nn-pv-1
labels:
type: namenode
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
storageClassName: persist
hostPath:
path: "/mnt/hdfs-data/name"
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: dn-pv-1
spec:
capacity:
storage: 3Gi
accessModes:
- ReadWriteOnce
storageClassName: persist
hostPath:
path: "/mnt/hdfs-data/1"
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: dn-pv-2
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
storageClassName: persist
hostPath:
path: "/mnt/hdfs-data/2"
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: hdfs-name
spec:
selector:
matchLabels:
type: namenode
accessModes:
- ReadWriteOnce
storageClassName: persist
resources:
requests:
storage: 1Gi
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: namenode
spec:
replicas: 1
selector:
matchLabels:
component: namenode
template:
metadata:
labels:
component: namenode
spec:
volumes:
- name: hdfs-name
persistentVolumeClaim:
claimName: hdfs-name
containers:
- name: namenode
image: docker.mimirdb.info/spark-hadoop
command: ["/usr/local/spark-2.4.0-bin-without-hadoop/master.sh"]
ports:
- containerPort: 22
- containerPort: 6066
- containerPort: 7001
- containerPort: 7005
- containerPort: 7077
- containerPort: 8020
- containerPort: 8080
- containerPort: 50070
resources:
requests:
cpu: 100m
env:
- name: MASTER
value: "spark://namenode:7077"
- name: MASTER_IP
value: "127.0.0.1"
- name: HDFS_HOST
value: "namenode"
- name: SPARK_CONF_DIR
value: "/conf"
- name: SPARK_PUBLIC_DNS
value: "namenode"
- name: LD_LIBRARY_PATH
value: "/usr/local/hadoop/lib/native/"
- name: SPARK_EXECUTOR_MEMORY
value: "8g"
- name: SPARK_DAEMON_MEMORY
value: "8g"
- name: SPARK_DRIVER_MEMORY
value: "8g"
- name: SPARK_WORKER_MEMORY
value: "8g"
- name: SPARK_WORKER_PORT
value: "30001"
- name: HDFS_CONF_dfs_client_use_datanode_hostname
value: "true"
- name: HDFS_CONF_dfs_datanode_use_datanode_hostname
value: "true"
- name: AWS_ECS
value: "true"
volumeMounts:
- name: hdfs-name
mountPath: /hadoop/dfs/name
---
kind: Service
apiVersion: v1
metadata:
creationTimestamp: null
labels:
app: namenode
hasuraService: custom
name: namenode
namespace: default
spec:
ports:
- name: ssh
port: 22
targetPort: 22
- name: hadoop
port: 6066
targetPort: 6066
- name: spark
port: 7077
targetPort: 7077
- name: sparkdriver
port: 7001
targetPort: 7001
- name: sparkblock
port: 7005
targetPort: 7005
- name: hdfs
port: 8020
targetPort: 8020
- name: sparkui
port: 8080
targetPort: 8080
- name: hadoopui
port: 50070
targetPort: 50070
selector:
component: namenode
type: ClusterIP
---
#spark-worker
#kubectl run $HOSTNAME --image=docker.mimirdb.info/spark-hadoop --replicas=2 --port=$WORKER_PORT --port=$DATANODE_PORT --env="SPARK_CONF_DIR=/conf" --env="SPARK_PUBLIC_DNS=127.0.0.1" --env="SPARK_WORKER_CORES=4" --env="SPARK_WORKER_PORT=$WORKER_PORT" --env="SPARK_WORKER_WEBUI_PORT=$WORKER_WEBUI_PORT" --env="LD_LIBRARY_PATH=/usr/local/hadoop/lib/native/" --env="HDFS_DATA_HOST=$HOSTNAME" --env="HDFS_HOST=spark-master" --env="HDFS_CONF_dfs_datanode_address=0.0.0.0:$DATANODE_PORT" --env="SPARK_EXECUTOR_MEMORY=8g" --env="SPARK_DAEMON_MEMORY=8g" --env="SPARK_DRIVER_MEMORY=8g" --env="SPARK_WORKER_MEMORY=8g" --env="HDFS_CONF_dfs_client_use_datanode_hostname=true" --env="AWS_ECS=false" --command /usr/local/spark-2.2.0-bin-without-hadoop/worker.sh
kind: Service
apiVersion: v1
metadata:
creationTimestamp: null
labels:
app: datanode
hasuraService: custom
name: datanode
namespace: default
spec:
ports:
- name: sparkblock
port: 7005
targetPort: 7005
- name: hdfsd1
port: 7022
targetPort: 7022
- name: hdfsd2
port: 7023
targetPort: 7023
- name: hdfsd3
port: 7024
targetPort: 7024
- name: hdfsd4
port: 7025
targetPort: 7025
- name: hdfsd5
port: 7026
targetPort: 7026
- name: hdfsdd
port: 8882
targetPort: 8882
- name: hdfsweb
port: 8082
targetPort: 8082
- name: sparkworker
port: 30001
targetPort: 30001
- name: hdfsdp
port: 50010
targetPort: 50010
- name: hdfswp
port: 50075
targetPort: 50075
selector:
app: datanode
clusterIP: None
---
kind: StatefulSet
apiVersion: apps/v1beta1
metadata:
creationTimestamp: null
labels:
app: datanode
hasuraService: custom
name: datanode
namespace: default
spec:
serviceName: "datanode"
replicas: 1
template:
metadata:
creationTimestamp: null
labels:
app: datanode
spec:
containers:
- name: datanode
image: docker.mimirdb.info/spark-hadoop
command: ["/usr/local/spark-2.4.0-bin-without-hadoop/worker.sh"]
ports:
- containerPort: 7005
- containerPort: 7022
- containerPort: 7023
- containerPort: 7024
- containerPort: 7025
- containerPort: 7026
- containerPort: 8882
- containerPort: 8082
- containerPort: 30001
- containerPort: 50010
- containerPort: 50075
resources:
requests:
cpu: 100m
env:
- name: SPARK_CONF_DIR
value: "/conf"
- name: SPARK_WORKER_CORES
value: "4"
- name: SPARK_WORKER_PORT
value: "8882"
- name: SPARK_WORKER_WEBUI_PORT
value: "8082"
- name: LD_LIBRARY_PATH
value: "/usr/local/hadoop/lib/native/"
- name: HDFS_HOST
value: "namenode"
- name: HDFS_CONF_dfs_datanode_address
value: "0.0.0.0:50010"
- name: SPARK_EXECUTOR_MEMORY
value: "8g"
- name: SPARK_DAEMON_MEMORY
value: "8g"
- name: SPARK_DRIVER_MEMORY
value: "8g"
- name: SPARK_WORKER_MEMORY
value: "8g"
- name: SPARK_WORKER_PORT
value: "30001"
- name: HDFS_CONF_dfs_client_use_datanode_hostname
value: "true"
- name: HDFS_CONF_dfs_datanode_use_datanode_hostname
value: "true"
- name: AWS_ECS
value: "true"
volumeMounts:
- name: hdfs-data
mountPath: /hadoop/dfs/data
volumeClaimTemplates:
- metadata:
name: hdfs-data
spec:
storageClassName: persist
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
#mimir
#kubectl run vizier-mimir --image=docker.mimirdb.info/vizier-mimir-spark --replicas=1 --port=9001 --port=33388 --expose --env="RESTORE_BACKUP=false" --env="PULL_MIMIR=false" --env="AWS_ACCESS_KEY_ID=AKIAJ7MLFSPYLYG47ARQ" --env="AWS_SECRET_ACCESS_KEY=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" --env="S3_BUCKET_NAME=vizier-data-test"
kind: PersistentVolume
apiVersion: v1
metadata:
name: vizier-data-volume
labels:
type: local
spec:
storageClassName: persist
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/mnt/vizier-data"
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: vizier-data-pv-claim
spec:
storageClassName: persist
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
---
apiVersion: v1
kind: Secret
metadata:
name: s3-credentials
data:
access-key-id: QUtJQUo3TUxGU1BZTFlHNDdBUlEK
access-key-secret: ZEw3OXFKR3lMa1VGeVl2bW1nM2hFbjhiSWtsU2FUa3JmRzBJWHVraQo=
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: vizier-mimir
spec:
replicas: 1
selector:
matchLabels:
component: vizier-mimir
template:
metadata:
labels:
component: vizier-mimir
spec:
volumes:
- name: vizier-data-pv-storage
persistentVolumeClaim:
claimName: vizier-data-pv-claim
containers:
- name: vizier-mimir
image: docker.mimirdb.info/vizier-mimir-spark
ports:
- containerPort: 9001
- containerPort: 4041
- containerPort: 7001
- containerPort: 7005
- containerPort: 33388
resources:
requests:
cpu: 100m
volumeMounts:
- mountPath: "/usr/local/source/web-api/.vizierdb"
name: vizier-data-pv-storage
env:
- name: RESTORE_BACKUP
value: "false"
- name: PULL_MIMIR
value: "false"
- name: S3A_ENDPOINT
value: "https://s3.vizier.io/"
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: s3-credentials
key: access-key-id
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: s3-credentials
key: access-key-secret
- name: S3_BUCKET_NAME
value: "vizier-data-test"
- name: USE_S3_VOLUME
value: "false"
- name: HDFS_CONF_dfs_client_use_datanode_hostname
value: "true"
- name: DATA_STAGING_TYPE
value: "s3"
- name: MIMIR_HOST
value: "vizier-mimir"
---
kind: Service
apiVersion: v1
metadata:
name: vizier-mimir
spec:
ports:
- name: mimir
port: 33388
targetPort: 33388
- name: sparkui
port: 4041
targetPort: 4041
- name: sparkdriver
port: 7001
targetPort: 7001
- name: sparkblock
port: 7005
targetPort: 7005
- name: supervisor
port: 9001
targetPort: 9001
selector:
component: vizier-mimir
---
#api
#kubectl run vizier-api --image=docker.mimirdb.info/vizier-api-spark --replicas=1 --port=9001 --port=80 --port=443 --expose --env="APP_PATH=" --env="API_SERVER=localhost" --env="API_LOCAL_PORT=443" --env="API_PORT=443" --env="API_SCHEME=http" --env="AWS_ACCESS_KEY_ID=AKIAJ7MLFSPYLYG47ARQ" --env="AWS_SECRET_ACCESS_KEY=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" --env="S3_BUCKET_NAME=vizier-data-test"
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: vizier-api
spec:
replicas: 1
selector:
matchLabels:
component: vizier-api
template:
metadata:
labels:
component: vizier-api
spec:
volumes:
- name: vizier-data-pv-storage
persistentVolumeClaim:
claimName: vizier-data-pv-claim
containers:
- name: vizier-api
image: docker.mimirdb.info/vizier-api-spark
ports:
- containerPort: 80
- containerPort: 9001
resources:
requests:
cpu: 100m
volumeMounts:
- mountPath: "/usr/local/source/web-api/.vizierdb"
name: vizier-data-pv-storage
env:
- name: MIMIR_HOST
value: "vizier-mimir"
- name: APP_PATH
value: ""
- name: API_SERVER
value: "demo.vizier.dev/api"
- name: API_LOCAL_PORT
value: "80"
- name: API_PORT
value: "80"
- name: API_SCHEME
value: "https"
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: s3-credentials
key: access-key-id
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: s3-credentials
key: access-key-secret
- name: S3_BUCKET_NAME
value: "vizier-data-test"
- name: USE_S3_VOLUME
value: "false"
---
kind: Service
apiVersion: v1
metadata:
name: vizier-api
spec:
ports:
- name: api
port: 80
targetPort: 80
- name: supervisor
port: 9001
targetPort: 9001
selector:
component: vizier-api
---
#ui
#kubectl run vizier-ui --image=docker.mimirdb.info/vizier-ui --replicas=1 --port=9001 --port=80 --port=443 --expose
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: vizier-ui
spec:
replicas: 1
selector:
matchLabels:
component: vizier-ui
template:
metadata:
labels:
component: vizier-ui
spec:
hostname: vizier-ui
containers:
- name: vizier-ui
image: docker.mimirdb.info/vizier-ui
ports:
- containerPort: 80
- containerPort: 443
- containerPort: 9001
resources:
requests:
cpu: 100m
env:
- name: API_SERVER
value: "demo.vizier.dev"
- name: API_PORT
value: "443"
- name: API_SCHEME
value: "https"
- name: APP_PATH
value: "/api/"
---
kind: Service
apiVersion: v1
metadata:
name: vizier-ui
spec:
ports:
- name: ui
port: 80
targetPort: 80
- name: uis
port: 443
targetPort: 443
- name: supervisor
port: 9001
targetPort: 9001
selector:
component: vizier-ui
---
#analytics
#kubectl run vizier-analytics --image=docker.mimirdb.info/vizier-analytics --replicas=1 --port=9001 --port=80 --expose
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: vizier-analytics
spec:
replicas: 1
selector:
matchLabels:
component: vizier-analytics
template:
metadata:
labels:
component: vizier-analytics
spec:
containers:
- name: vizier-analytics
image: docker.mimirdb.info/vizier-analytics
ports:
- containerPort: 80
- containerPort: 443
- containerPort: 9001
resources:
requests:
cpu: 100m
---
kind: Service
apiVersion: v1
metadata:
name: vizier-analytics
spec:
ports:
- name: analytics
port: 80
targetPort: 80
- name: analyticss
port: 443
targetPort: 443
- name: supervisor
port: 9001
targetPort: 9001
selector:
component: vizier-analytics
---
#proxy
#sudo docker run -d -p 80:80 -p 443:443 -p 9001:9001 -h vizier-proxy --network spark-net ${PROXY_IMAGE}
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: vizier-proxy
spec:
replicas: 1
selector:
matchLabels:
component: vizier-proxy
template:
metadata:
labels:
component: vizier-proxy
spec:
containers:
- name: vizier-proxy
image: docker.mimirdb.info/vizier-proxy
ports:
- containerPort: 22
- containerPort: 80
- containerPort: 443
- containerPort: 9001
resources:
requests:
cpu: 100m
env:
- name: VIZIER_CONFIG
value: "vizier_k8s.conf"
- name: VIZIER_DOMAIN
value: "vizier.dev"
---
kind: Service
apiVersion: v1
metadata:
name: vizier-proxy
spec:
ports:
- name: ssh
port: 22
targetPort: 22
- name: proxy
port: 80
targetPort: 80
- name: proxys
port: 443
targetPort: 443
- name: supervisor
port: 9001
targetPort: 9001
selector:
component: vizier-proxy