-# Calico Version v2.4.1
-# https://docs.projectcalico.org/v2.4/releases#v2.4.1
+# Calico Version v2.6.3
+# https://docs.projectcalico.org/v2.6/releases#v2.6.3
# This manifest includes the following component versions:
-# calico/node:v2.4.1
-# calico/cni:v1.10.0
-# calico/kube-policy-controller:v0.7.0
+# calico/node:v2.6.3
+# calico/cni:v1.11.1
+# calico/kube-controllers:v1.0.1
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
spec:
# Only run this pod on the master.
tolerations:
+ # this taint is set by all kubelets running `--cloud-provider=external`
+ # so we should tolerate it to schedule the calico pods
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
spec:
hostNetwork: true
tolerations:
+ # this taint is set by all kubelets running `--cloud-provider=external`
+ # so we should tolerate it to schedule the calico pods
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
- key: CriticalAddonsOnly
operator: Exists
serviceAccountName: calico-cni-plugin
+ # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
+ # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
+ terminationGracePeriodSeconds: 0
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
- image: quay.io/calico/node:v2.4.1
+ image: quay.io/calico/node:v2.6.3
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "kubeadm,bgp"
+ # Set noderef for node controller.
+ - name: CALICO_K8S_NODE_REF
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "info"
- - name: FELIX_HEALTHENABLED
- value: "true"
# Auto-detect the BGP IP address.
- name: IP
value: ""
+ - name: FELIX_HEALTHENABLED
+ value: "true"
securityContext:
privileged: true
resources:
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
- image: quay.io/calico/cni:v1.10.0
+ image: quay.io/calico/cni:v1.11.1
command: ["/install-cni.sh"]
env:
# The location of the Calico etcd cluster.
---
-# This manifest deploys the Calico policy controller on Kubernetes.
-# See https://github.com/projectcalico/k8s-policy
+# This manifest deploys the Calico Kubernetes controllers.
+# See https://github.com/projectcalico/kube-controllers
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
- name: calico-policy-controller
+ name: calico-kube-controllers
namespace: kube-system
labels:
- k8s-app: calico-policy
+ k8s-app: calico-kube-controllers
spec:
- # The policy controller can only have a single active instance.
+ # The controllers can only have a single active instance.
replicas: 1
strategy:
type: Recreate
template:
metadata:
- name: calico-policy-controller
+ name: calico-kube-controllers
namespace: kube-system
labels:
- k8s-app: calico-policy-controller
+ k8s-app: calico-kube-controllers
annotations:
# Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
# reserves resources for critical add-on pods so that they can be rescheduled after
# a failure. This annotation works in tandem with the toleration below.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
- # The policy controller must run in the host network namespace so that
+ # The controllers must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
tolerations:
+ # this taint is set by all kubelets running `--cloud-provider=external`
+ # so we should tolerate it to schedule the calico pods
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
# This, along with the annotation above marks this pod as a critical add-on.
- key: CriticalAddonsOnly
operator: Exists
- serviceAccountName: calico-policy-controller
+ serviceAccountName: calico-kube-controllers
containers:
- - name: calico-policy-controller
- image: quay.io/calico/kube-policy-controller:v0.7.0
+ - name: calico-kube-controllers
+ image: quay.io/calico/kube-controllers:v1.0.1
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
# service for API access.
- name: K8S_API
value: "https://kubernetes.default:443"
+ # Choose which controllers to run.
+ - name: ENABLED_CONTROLLERS
+ value: policy,profile,workloadendpoint,node
# Since we're running in the host namespace and might not have KubeDNS
# access, configure the container's /etc/hosts to resolve
# kubernetes.default to the correct service clusterIP.
- name: CONFIGURE_ETC_HOSTS
value: "true"
+
---
+
+# This deployment turns off the old "policy-controller". It should remain at 0 replicas, and then
+# be removed entirely once the new kube-controllers deployment has been deployed above.
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: calico-policy-controller
+ namespace: kube-system
+ labels:
+ k8s-app: calico-policy-controller
+spec:
+ # Turn this deployment off in favor of the kube-controllers deployment above.
+ replicas: 0
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: calico-policy-controller
+ namespace: kube-system
+ labels:
+ k8s-app: calico-policy-controller
+ spec:
+ hostNetwork: true
+ serviceAccountName: calico-kube-controllers
+ containers:
+ - name: calico-policy-controller
+ image: quay.io/calico/kube-controllers:v1.0.1
+ env:
+ - name: ETCD_ENDPOINTS
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_endpoints
+
+---
+
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
- kind: ServiceAccount
name: calico-cni-plugin
namespace: kube-system
+
---
+
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-cni-plugin
- namespace: kube-system
rules:
- apiGroups: [""]
resources:
- nodes
verbs:
- get
+
---
+
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-cni-plugin
namespace: kube-system
+
---
+
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
- name: calico-policy-controller
+ name: calico-kube-controllers
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
- name: calico-policy-controller
+ name: calico-kube-controllers
subjects:
- kind: ServiceAccount
- name: calico-policy-controller
+ name: calico-kube-controllers
namespace: kube-system
+
---
+
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
- name: calico-policy-controller
- namespace: kube-system
+ name: calico-kube-controllers
rules:
- apiGroups:
- ""
- pods
- namespaces
- networkpolicies
+ - nodes
verbs:
- watch
- list
+
---
+
apiVersion: v1
kind: ServiceAccount
metadata:
- name: calico-policy-controller
+ name: calico-kube-controllers
namespace: kube-system