-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsftpplus-nfs-service.yaml
115 lines (110 loc) · 2.57 KB
/
sftpplus-nfs-service.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
# USAGE:
#
# This is the Part-1 from deploying an SFTPPlus application
# backed by a shared persistence disk available via the NFS server.
#
# It uses a Compute Engine disk located into a single zone.
#
# This handles the creation of the NFS server.
# Part-2 will handle the creation of the SFTPPlus application.
#
# After updating the content of this file, you can copy it to your
# kubernetes cluster control system and apply the changes using:
#
# kubectl apply -f sftpplus-nfs-service.yaml
#
# WARNING: This will deploy the SFTPPlus application with public access
# available over the internet with the default username and password.
#
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nfs-disk-single-zone-claim
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: Service
metadata:
labels:
role: nfs-server
name: nfs-server
namespace: default
spec:
ports:
- name: 2049-to-2049-tcp
port: 2049
protocol: TCP
targetPort: 2049
- name: 20048-to-20048-tcp
port: 20048
protocol: TCP
targetPort: 20048
- name: 111-to-111-tcp
port: 111
protocol: TCP
targetPort: 111
selector:
role: nfs-server
sessionAffinity: None
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nfs-server
name: nfs-server
namespace: default
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
role: nfs-server
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
labels:
role: nfs-server
spec:
containers:
- image: gcr.io/google_containers/volume-nfs:0.8
imagePullPolicy: IfNotPresent
name: nfs-server
ports:
- containerPort: 2049
name: nfs
protocol: TCP
- containerPort: 20048
name: mountd
protocol: TCP
- containerPort: 111
name: rpcbind
protocol: TCP
resources: {}
securityContext:
privileged: true
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /exports
name: nfs-disk-single-zone
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
volumes:
- name: nfs-disk-single-zone
persistentVolumeClaim:
claimName: nfs-disk-single-zone-claim