Commit 5fd76139 authored by 陈健's avatar 陈健

使用helm创建的模板chart包

parent d66af736
......@@ -19,3 +19,4 @@
.project
.idea/
*.tmproj
.vscode/
apiVersion: v1
appVersion: "1.0"
description: A Helm chart for Java Oidc Example
description: A Helm chart for Kubernetes
name: oidcExample
version: "1.1"
version: "0.1.0"
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range .Values.ingress.hosts }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ . }}{{ $.Values.ingress.path }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "setup.fullname" . }})
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "oidcExample.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc -w {{ template "setup.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "setup.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "oidcExample.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "oidcExample.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "setup.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "oidcExample.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl port-forward $POD_NAME 8080:80
{{- end }}
......@@ -2,7 +2,7 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "setup.name" -}}
{{- define "oidcExample.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
......@@ -11,7 +11,7 @@ Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "setup.fullname" -}}
{{- define "oidcExample.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
......@@ -27,6 +27,19 @@ If release name contains chart name it will be used as a full name.
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "setup.chart" -}}
{{- define "oidcExample.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "oidcExample.labels" -}}
app.kubernetes.io/name: {{ include "oidcExample.name" . }}
helm.sh/chart: {{ include "oidcExample.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "setup.fullname" . }}
name: {{ include "oidcExample.fullname" . }}
labels:
app: {{ template "setup.name" . }}
chart: {{ template "setup.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{ include "oidcExample.labels" . | indent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: {{ template "setup.name" . }}
release: {{ .Release.Name }}
app.kubernetes.io/name: {{ include "oidcExample.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ template "setup.name" . }}
release: {{ .Release.Name }}
app.kubernetes.io/name: {{ include "oidcExample.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: {{ template "setup.name" . }}
image: "{{ .Values.global.hub }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}"
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 8080
protocol: TCP
{{- if .Values.livenessProbe.enabled }}
livenessProbe:
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
httpGet:
path: {{ .Values.livenessProbe.path }}
path: /health
port: http
{{- end }}
{{- if .Values.readinessProbe.enabled }}
readinessProbe:
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
httpGet:
path: {{.Values.readinessProbe.path }}
path: /health
port: http
{{- end }}
volumeMounts:
- name: docker-sock-file
mountPath: /var/run/docker.sock # docker sock文件
- name: localtime
mountPath: /etc/localtime
resources:
{{ toYaml .Values.resources | indent 12 }}
volumes:
- name: docker-sock-file
hostPath:
path: {{ .Values.volumes.hostPath.path }}
- name: localtime
hostPath:
path: /etc/localtime
{{- with .Values.nodeSelector }}
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
\ No newline at end of file
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "setup.fullname" . -}}
{{- $ingressPath := .Values.ingress.path -}}
{{- $fullName := include "oidcExample.fullname" . -}}
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
app: {{ template "setup.name" . }}
chart: {{ template "setup.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{- with .Values.ingress.annotations }}
{{ include "oidcExample.labels" . | indent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ . }}
- host: {{ .host | quote }}
http:
paths:
- path: {{ $ingressPath }}
{{- range .paths }}
- path: {{ . }}
backend:
serviceName: {{ $fullName }}
servicePort: http
{{- end }}
{{- end }}
{{- end }}
apiVersion: v1
kind: Service
metadata:
name: {{ template "setup.fullname" . }}
name: {{ include "oidcExample.fullname" . }}
labels:
app: {{ template "setup.name" . }}
chart: {{ template "setup.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
{{ include "oidcExample.labels" . | indent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
nodePort: {{ .Values.service.nodePort }}
protocol: TCP
nodePort: {{ .Values.service.nodePort }}
name: http
selector:
app: {{ template "setup.name" . }}
release: {{ .Release.Name }}
app.kubernetes.io/name: {{ include "oidcExample.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "oidcExample.fullname" . }}-test-connection"
labels:
{{ include "oidcExample.labels" . | indent 4 }}
annotations:
"helm.sh/hook": test-success
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['{{ include "oidcExample.fullname" . }}:{{ .Values.service.port }}']
restartPolicy: Never
# Default values for oidcExample.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
global:
hub: harbor.dev.k2paas.com/paas-dev
image:
repository: oidc-example
repository: harbor.dev.k2paas.com/paas-dev/oidc-example
tag: dev
pullPolicy: IfNotPresent
imagePullSecrets: []
nameOverride: "oidc-example"
fullnameOverride: "oidc-example"
service:
type: NodePort
type: ClusterIP
port: 80
nodePort: 8080
ingress:
enabled: true
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: "100m"
nginx.ingress.kubernetes.io/client-body-buffer-size: "10m"
nginx.ingress.kubernetes.io/proxy-connect-timeout: "120"
nginx.ingress.kubernetes.io/proxy-read-timeout: "120"
nginx.ingress.kubernetes.io/proxy-send-timeout: "120"
#nginx.ingress.kubernetes.io/proxy-buffering: "on"
#nginx.ingress.kubernetes.io/proxy-buffer-size: "10m"
nginx.ingress.kubernetes.io/enable-cors: "true"
nginx.ingress.kubernetes.io/cors-allow-methods: "PUT, GET, POST, DELETE, OPTIONS"
nginx.ingress.kubernetes.io/cors-allow-credentials: "true"
nginx.ingress.kubernetes.io/cors-allow-headers: "DNT,X-CustomHeader,X-LANG,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,X-Api-Key,X-Device-Id,Authorization,Access-Control-Allow-Origin,pageSize,pageIndex,content-type,pageindex,pagesize"
#kubernetes.io/ingress.class: nginx
#kubernetes.io/tls-acme: "true"
path: /
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- exmaple.dev.k2paas.com
tls: []
- host: exmaple.dev.k2paas.com
paths: []
persistence:
enabled: false
local:
enabled: false # 是否启用本地存储
name: setup-pvc # 对应本地存储名称
storageClass: "vsphere-dynamic-class" # 集群共享存储
accessMode: ReadWriteOnce # 存储访问模式
size: 30Mi # 声明所需存储的大小
annotations: {}
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources:
requests: # 声明最少使用的资源,不够的话则应用无法启动成功
memory: 200Mi
cpu: 200m
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
volumes:
hostPath:
path: /var/vcap/sys/run/docker/docker.sock
tolerations: []
livenessProbe: # 存活探针
enabled: true
path: /health # 心跳检测的接口,默认都是httpGet请求
initialDelaySeconds: 5 # 容器启动后第一次执行探测是需要等待多少秒。
periodSeconds: 30 # 执行探测的频率。默认是10秒,最小1秒。
timeoutSeconds: 5 # 探测超时时间。默认1秒,最小1秒。
successThreshold: 1 # 探测失败后,最少连续探测成功多少次才被认定为成功。默认是1
failureThreshold: 5 # 探测成功后,最少连续探测失败多少次才被认定为失败。默认是3
readinessProbe: # 就绪探针
enabled: true
path: /health # 心跳检测的接口,默认都是httpGet请求
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
affinity: {}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment