From df4099a85b982497c3bcb466d51ba2bb35f3b231 Mon Sep 17 00:00:00 2001 From: Casey Davenport Date: Mon, 24 Jul 2017 15:46:59 -0700 Subject: [PATCH] Changes for Calico release v2.4.0-rc1 --- _config.yml | 5 + _data/v2_4/navbars/getting-started.yml | 167 +++++ _data/v2_4/navbars/introduction.yml | 7 + _data/v2_4/navbars/reference.yml | 127 ++++ _data/v2_4/navbars/releases.yml | 7 + _data/v2_4/navbars/usage.yml | 57 ++ _data/versions.yml | 48 ++ _includes/v2.4/install-k8s-addons.md | 34 ++ _includes/v2.4/ports.md | 6 + _includes/v2.4/selectors.md | 15 + calico_node/Makefile | 2 +- .../bare-metal/bare-metal-install.md | 142 +++++ v2.4/getting-started/bare-metal/bare-metal.md | 431 +++++++++++++ v2.4/getting-started/docker/index.md | 57 ++ .../docker/installation/manual.md | 71 +++ .../docker/installation/requirements.md | 44 ++ .../installation/vagrant-coreos/Vagrantfile | 75 +++ .../installation/vagrant-coreos/index.md | 92 +++ .../vagrant-coreos/user-data-first | 83 +++ .../vagrant-coreos/user-data-others | 78 +++ .../installation/vagrant-ubuntu/Vagrantfile | 89 +++ .../installation/vagrant-ubuntu/index.md | 89 +++ .../getting-started/docker/troubleshooting.md | 4 + v2.4/getting-started/docker/tutorials/ipam.md | 44 ++ ...curity-using-calico-profiles-and-policy.md | 325 ++++++++++ .../security-using-calico-profiles.md | 106 ++++ ...y-using-docker-labels-and-calico-policy.md | 187 ++++++ v2.4/getting-started/docker/upgrade.md | 4 + v2.4/getting-started/index.md | 19 + v2.4/getting-started/kubernetes/index.md | 41 ++ .../kubernetes/installation/aws.md | 35 ++ .../kubernetes/installation/gce.md | 6 + .../installation/hosted/calico.yaml | 317 ++++++++++ .../installation/hosted/calicoctl.yaml | 25 + .../kubernetes/installation/hosted/hosted.md | 22 + .../kubernetes/installation/hosted/index.md | 126 ++++ .../hosted/kubeadm/1.5/calico.yaml | 285 +++++++++ .../hosted/kubeadm/1.6/calico.yaml | 376 ++++++++++++ .../installation/hosted/kubeadm/index.md | 70 +++ .../calico-networking/1.5/calico.yaml | 164 +++++ .../calico-networking/1.6/calico.yaml | 182 ++++++ .../kubernetes-datastore/calicoctl.yaml | 22 + .../hosted/kubernetes-datastore/index.md | 161 +++++ .../policy-only/1.5/calico.yaml | 161 +++++ .../policy-only/1.6/calico.yaml | 179 ++++++ .../kubernetes/installation/hosted/rbac.yaml | 100 +++ .../kubernetes/installation/index.md | 45 ++ .../kubernetes/installation/integration.md | 227 +++++++ .../installation/manifests/skydns.yaml | 124 ++++ .../installation/policy-controller.yaml | 54 ++ .../kubernetes/installation/rbac.yaml | 62 ++ .../installation/vagrant/Vagrantfile | 58 ++ .../kubernetes/installation/vagrant/index.md | 115 ++++ .../installation/vagrant/master-config.yaml | 130 ++++ .../installation/vagrant/node-config.yaml | 87 +++ .../kubernetes/troubleshooting.md | 55 ++ .../kubernetes/tutorials/advanced-policy.md | 279 +++++++++ .../kubernetes/tutorials/simple-policy.md | 145 +++++ .../tutorials/stars-policy/index.md | 96 +++ .../stars-policy/manifests/00-namespace.yaml | 4 + .../manifests/01-management-ui.yaml | 39 ++ .../stars-policy/manifests/02-backend.yaml | 34 ++ .../stars-policy/manifests/03-frontend.yaml | 34 ++ .../stars-policy/manifests/04-client.yaml | 40 ++ .../policies/allow-ui-client.yaml | 12 + .../stars-policy/policies/allow-ui.yaml | 12 + .../stars-policy/policies/backend-policy.yaml | 17 + .../stars-policy/policies/default-deny.yaml | 6 + .../policies/frontend-policy.yaml | 17 + .../tutorials/stars-policy/reset.sh | 27 + .../kubernetes/tutorials/using-calicoctl.md | 57 ++ v2.4/getting-started/kubernetes/upgrade.md | 206 +++++++ v2.4/getting-started/mesos/index.md | 42 ++ .../mesos/installation/dc-os/custom.md | 116 ++++ .../mesos/installation/dc-os/framework.md | 61 ++ .../mesos/installation/dc-os/index.md | 122 ++++ .../mesos/installation/integration.md | 79 +++ .../mesos/installation/prerequisites.md | 86 +++ .../installation/vagrant-centos/Vagrantfile | 220 +++++++ .../vagrant-centos/calico.service | 26 + .../installation/vagrant-centos/index.md | 66 ++ .../vagrant-centos/marathon-lb.service | 17 + .../vagrant-centos/mesos-dns.service | 8 + .../mesos/tutorials/connecting-tasks.md | 132 ++++ .../mesos/tutorials/launching-tasks.md | 116 ++++ .../tutorials/policy/docker-containerizer.md | 11 + .../policy/universal-containerizer.md | 157 +++++ .../getting-started/openshift/installation.md | 91 +++ .../getting-started/openstack/connectivity.md | 218 +++++++ v2.4/getting-started/openstack/index.md | 44 ++ .../openstack/installation/chef.md | 155 +++++ .../openstack/installation/devstack.md | 75 +++ .../openstack/installation/fuel.md | 331 ++++++++++ .../openstack/installation/index.md | 53 ++ .../openstack/installation/juju.md | 27 + .../openstack/installation/redhat.md | 515 ++++++++++++++++ .../openstack/installation/ubuntu.md | 409 +++++++++++++ v2.4/getting-started/openstack/neutron-api.md | 204 +++++++ v2.4/getting-started/openstack/tutorials.md | 119 ++++ v2.4/getting-started/openstack/upgrade.md | 245 ++++++++ .../getting-started/openstack/verification.md | 176 ++++++ v2.4/getting-started/rkt/index.md | 38 ++ .../rkt/installation/manual.md | 180 ++++++ .../installation/vagrant-coreos/Vagrantfile | 46 ++ .../vagrant-coreos/first-node-config.yaml | 55 ++ .../rkt/installation/vagrant-coreos/index.md | 113 ++++ .../vagrant-coreos/other-node-config.yaml | 49 ++ v2.4/getting-started/rkt/troubleshooting.md | 17 + v2.4/getting-started/rkt/tutorials/basic.md | 172 ++++++ v2.4/index.html | 9 + v2.4/introduction/index.html | 12 + v2.4/reference/advanced/etcd-rbac.md | 27 + v2.4/reference/architecture/components.md | 69 +++ v2.4/reference/architecture/data-path.md | 72 +++ v2.4/reference/architecture/index.md | 212 +++++++ v2.4/reference/calicoctl/commands/apply.md | 100 +++ v2.4/reference/calicoctl/commands/config.md | 126 ++++ v2.4/reference/calicoctl/commands/create.md | 101 ++++ v2.4/reference/calicoctl/commands/delete.md | 130 ++++ v2.4/reference/calicoctl/commands/get.md | 231 +++++++ v2.4/reference/calicoctl/commands/index.md | 59 ++ .../calicoctl/commands/ipam/index.md | 36 ++ .../calicoctl/commands/ipam/release.md | 52 ++ .../reference/calicoctl/commands/ipam/show.md | 60 ++ .../calicoctl/commands/node/checksystem.md | 32 + .../calicoctl/commands/node/diags.md | 69 +++ .../calicoctl/commands/node/index.md | 43 ++ v2.4/reference/calicoctl/commands/node/run.md | 358 +++++++++++ .../calicoctl/commands/node/status.md | 42 ++ v2.4/reference/calicoctl/commands/replace.md | 97 +++ v2.4/reference/calicoctl/commands/version.md | 43 ++ v2.4/reference/calicoctl/index.md | 31 + v2.4/reference/calicoctl/resources/bgppeer.md | 58 ++ .../calicoctl/resources/hostendpoint.md | 52 ++ v2.4/reference/calicoctl/resources/index.md | 87 +++ v2.4/reference/calicoctl/resources/ippool.md | 63 ++ v2.4/reference/calicoctl/resources/node.md | 52 ++ v2.4/reference/calicoctl/resources/policy.md | 111 ++++ v2.4/reference/calicoctl/resources/profile.md | 99 +++ .../calicoctl/resources/workloadendpoint.md | 63 ++ v2.4/reference/calicoctl/setup/etcdv2.md | 133 ++++ v2.4/reference/calicoctl/setup/index.md | 35 ++ v2.4/reference/calicoctl/setup/kubernetes.md | 95 +++ v2.4/reference/cni-plugin/configuration.md | 279 +++++++++ v2.4/reference/contribute.md | 52 ++ v2.4/reference/felix/configuration.md | 129 ++++ v2.4/reference/felix/prometheus.md | 108 ++++ v2.4/reference/index.md | 6 + v2.4/reference/involved.md | 42 ++ v2.4/reference/license.md | 70 +++ v2.4/reference/node/configuration.md | 119 ++++ .../policy-controller/configuration.md | 66 ++ v2.4/reference/previous-releases.md | 6 + .../private-cloud/l2-interconnect-fabric.md | 245 ++++++++ .../private-cloud/l3-interconnect-fabric.md | 572 ++++++++++++++++++ v2.4/reference/public-cloud/aws.md | 101 ++++ v2.4/reference/public-cloud/gce.md | 53 ++ v2.4/reference/repo-structure.md | 64 ++ v2.4/reference/requirements.md | 116 ++++ v2.4/reference/supported-platforms.md | 10 + v2.4/releases/index.md | 21 + v2.4/usage/calicoctl/container.md | 23 + .../calicoctl/install-and-configuration.md | 76 +++ v2.4/usage/configuration/as-service.md | 112 ++++ v2.4/usage/configuration/bgp.md | 256 ++++++++ v2.4/usage/configuration/conntrack.md | 14 + v2.4/usage/configuration/ip-in-ip.md | 114 ++++ v2.4/usage/configuration/mtu.md | 90 +++ v2.4/usage/configuration/node.md | 126 ++++ v2.4/usage/decommissioning-a-node.md | 80 +++ v2.4/usage/external-connectivity.md | 84 +++ v2.4/usage/index.md | 5 + v2.4/usage/ipv6.md | 95 +++ v2.4/usage/openstack/configuration.md | 63 ++ v2.4/usage/openstack/floating-ips.md | 86 +++ v2.4/usage/openstack/host-routes.md | 77 +++ v2.4/usage/openstack/kuryr.md | 37 ++ v2.4/usage/openstack/semantics.md | 119 ++++ v2.4/usage/openstack/service-ips.md | 360 +++++++++++ v2.4/usage/routereflector/bird-rr-config.md | 186 ++++++ .../routereflector/calico-routereflector.md | 323 ++++++++++ v2.4/usage/routereflector/mesh-topology.png | Bin 0 -> 93744 bytes v2.4/usage/troubleshooting/faq.md | 364 +++++++++++ v2.4/usage/troubleshooting/index.md | 92 +++ v2.4/usage/troubleshooting/logging.md | 85 +++ 185 files changed, 19044 insertions(+), 1 deletion(-) create mode 100644 _data/v2_4/navbars/getting-started.yml create mode 100644 _data/v2_4/navbars/introduction.yml create mode 100644 _data/v2_4/navbars/reference.yml create mode 100644 _data/v2_4/navbars/releases.yml create mode 100644 _data/v2_4/navbars/usage.yml create mode 100644 _includes/v2.4/install-k8s-addons.md create mode 100644 _includes/v2.4/ports.md create mode 100644 _includes/v2.4/selectors.md create mode 100644 v2.4/getting-started/bare-metal/bare-metal-install.md create mode 100644 v2.4/getting-started/bare-metal/bare-metal.md create mode 100644 v2.4/getting-started/docker/index.md create mode 100644 v2.4/getting-started/docker/installation/manual.md create mode 100644 v2.4/getting-started/docker/installation/requirements.md create mode 100644 v2.4/getting-started/docker/installation/vagrant-coreos/Vagrantfile create mode 100644 v2.4/getting-started/docker/installation/vagrant-coreos/index.md create mode 100644 v2.4/getting-started/docker/installation/vagrant-coreos/user-data-first create mode 100644 v2.4/getting-started/docker/installation/vagrant-coreos/user-data-others create mode 100644 v2.4/getting-started/docker/installation/vagrant-ubuntu/Vagrantfile create mode 100644 v2.4/getting-started/docker/installation/vagrant-ubuntu/index.md create mode 100644 v2.4/getting-started/docker/troubleshooting.md create mode 100644 v2.4/getting-started/docker/tutorials/ipam.md create mode 100644 v2.4/getting-started/docker/tutorials/security-using-calico-profiles-and-policy.md create mode 100644 v2.4/getting-started/docker/tutorials/security-using-calico-profiles.md create mode 100644 v2.4/getting-started/docker/tutorials/security-using-docker-labels-and-calico-policy.md create mode 100644 v2.4/getting-started/docker/upgrade.md create mode 100644 v2.4/getting-started/index.md create mode 100644 v2.4/getting-started/kubernetes/index.md create mode 100644 v2.4/getting-started/kubernetes/installation/aws.md create mode 100644 v2.4/getting-started/kubernetes/installation/gce.md create mode 100644 v2.4/getting-started/kubernetes/installation/hosted/calico.yaml create mode 100644 v2.4/getting-started/kubernetes/installation/hosted/calicoctl.yaml create mode 100644 v2.4/getting-started/kubernetes/installation/hosted/hosted.md create mode 100644 v2.4/getting-started/kubernetes/installation/hosted/index.md create mode 100644 v2.4/getting-started/kubernetes/installation/hosted/kubeadm/1.5/calico.yaml create mode 100644 v2.4/getting-started/kubernetes/installation/hosted/kubeadm/1.6/calico.yaml create mode 100644 v2.4/getting-started/kubernetes/installation/hosted/kubeadm/index.md create mode 100644 v2.4/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.5/calico.yaml create mode 100644 v2.4/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.6/calico.yaml create mode 100644 v2.4/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calicoctl.yaml create mode 100644 v2.4/getting-started/kubernetes/installation/hosted/kubernetes-datastore/index.md create mode 100644 v2.4/getting-started/kubernetes/installation/hosted/kubernetes-datastore/policy-only/1.5/calico.yaml create mode 100644 v2.4/getting-started/kubernetes/installation/hosted/kubernetes-datastore/policy-only/1.6/calico.yaml create mode 100644 v2.4/getting-started/kubernetes/installation/hosted/rbac.yaml create mode 100644 v2.4/getting-started/kubernetes/installation/index.md create mode 100644 v2.4/getting-started/kubernetes/installation/integration.md create mode 100644 v2.4/getting-started/kubernetes/installation/manifests/skydns.yaml create mode 100644 v2.4/getting-started/kubernetes/installation/policy-controller.yaml create mode 100644 v2.4/getting-started/kubernetes/installation/rbac.yaml create mode 100644 v2.4/getting-started/kubernetes/installation/vagrant/Vagrantfile create mode 100644 v2.4/getting-started/kubernetes/installation/vagrant/index.md create mode 100644 v2.4/getting-started/kubernetes/installation/vagrant/master-config.yaml create mode 100644 v2.4/getting-started/kubernetes/installation/vagrant/node-config.yaml create mode 100644 v2.4/getting-started/kubernetes/troubleshooting.md create mode 100644 v2.4/getting-started/kubernetes/tutorials/advanced-policy.md create mode 100644 v2.4/getting-started/kubernetes/tutorials/simple-policy.md create mode 100644 v2.4/getting-started/kubernetes/tutorials/stars-policy/index.md create mode 100644 v2.4/getting-started/kubernetes/tutorials/stars-policy/manifests/00-namespace.yaml create mode 100644 v2.4/getting-started/kubernetes/tutorials/stars-policy/manifests/01-management-ui.yaml create mode 100644 v2.4/getting-started/kubernetes/tutorials/stars-policy/manifests/02-backend.yaml create mode 100644 v2.4/getting-started/kubernetes/tutorials/stars-policy/manifests/03-frontend.yaml create mode 100644 v2.4/getting-started/kubernetes/tutorials/stars-policy/manifests/04-client.yaml create mode 100644 v2.4/getting-started/kubernetes/tutorials/stars-policy/policies/allow-ui-client.yaml create mode 100644 v2.4/getting-started/kubernetes/tutorials/stars-policy/policies/allow-ui.yaml create mode 100644 v2.4/getting-started/kubernetes/tutorials/stars-policy/policies/backend-policy.yaml create mode 100644 v2.4/getting-started/kubernetes/tutorials/stars-policy/policies/default-deny.yaml create mode 100644 v2.4/getting-started/kubernetes/tutorials/stars-policy/policies/frontend-policy.yaml create mode 100755 v2.4/getting-started/kubernetes/tutorials/stars-policy/reset.sh create mode 100644 v2.4/getting-started/kubernetes/tutorials/using-calicoctl.md create mode 100644 v2.4/getting-started/kubernetes/upgrade.md create mode 100644 v2.4/getting-started/mesos/index.md create mode 100644 v2.4/getting-started/mesos/installation/dc-os/custom.md create mode 100644 v2.4/getting-started/mesos/installation/dc-os/framework.md create mode 100644 v2.4/getting-started/mesos/installation/dc-os/index.md create mode 100644 v2.4/getting-started/mesos/installation/integration.md create mode 100644 v2.4/getting-started/mesos/installation/prerequisites.md create mode 100644 v2.4/getting-started/mesos/installation/vagrant-centos/Vagrantfile create mode 100644 v2.4/getting-started/mesos/installation/vagrant-centos/calico.service create mode 100644 v2.4/getting-started/mesos/installation/vagrant-centos/index.md create mode 100644 v2.4/getting-started/mesos/installation/vagrant-centos/marathon-lb.service create mode 100644 v2.4/getting-started/mesos/installation/vagrant-centos/mesos-dns.service create mode 100644 v2.4/getting-started/mesos/tutorials/connecting-tasks.md create mode 100644 v2.4/getting-started/mesos/tutorials/launching-tasks.md create mode 100644 v2.4/getting-started/mesos/tutorials/policy/docker-containerizer.md create mode 100644 v2.4/getting-started/mesos/tutorials/policy/universal-containerizer.md create mode 100644 v2.4/getting-started/openshift/installation.md create mode 100644 v2.4/getting-started/openstack/connectivity.md create mode 100644 v2.4/getting-started/openstack/index.md create mode 100644 v2.4/getting-started/openstack/installation/chef.md create mode 100644 v2.4/getting-started/openstack/installation/devstack.md create mode 100644 v2.4/getting-started/openstack/installation/fuel.md create mode 100644 v2.4/getting-started/openstack/installation/index.md create mode 100644 v2.4/getting-started/openstack/installation/juju.md create mode 100644 v2.4/getting-started/openstack/installation/redhat.md create mode 100644 v2.4/getting-started/openstack/installation/ubuntu.md create mode 100644 v2.4/getting-started/openstack/neutron-api.md create mode 100644 v2.4/getting-started/openstack/tutorials.md create mode 100644 v2.4/getting-started/openstack/upgrade.md create mode 100644 v2.4/getting-started/openstack/verification.md create mode 100644 v2.4/getting-started/rkt/index.md create mode 100644 v2.4/getting-started/rkt/installation/manual.md create mode 100644 v2.4/getting-started/rkt/installation/vagrant-coreos/Vagrantfile create mode 100644 v2.4/getting-started/rkt/installation/vagrant-coreos/first-node-config.yaml create mode 100644 v2.4/getting-started/rkt/installation/vagrant-coreos/index.md create mode 100644 v2.4/getting-started/rkt/installation/vagrant-coreos/other-node-config.yaml create mode 100644 v2.4/getting-started/rkt/troubleshooting.md create mode 100644 v2.4/getting-started/rkt/tutorials/basic.md create mode 100644 v2.4/index.html create mode 100644 v2.4/introduction/index.html create mode 100644 v2.4/reference/advanced/etcd-rbac.md create mode 100644 v2.4/reference/architecture/components.md create mode 100644 v2.4/reference/architecture/data-path.md create mode 100644 v2.4/reference/architecture/index.md create mode 100644 v2.4/reference/calicoctl/commands/apply.md create mode 100644 v2.4/reference/calicoctl/commands/config.md create mode 100644 v2.4/reference/calicoctl/commands/create.md create mode 100644 v2.4/reference/calicoctl/commands/delete.md create mode 100644 v2.4/reference/calicoctl/commands/get.md create mode 100644 v2.4/reference/calicoctl/commands/index.md create mode 100644 v2.4/reference/calicoctl/commands/ipam/index.md create mode 100644 v2.4/reference/calicoctl/commands/ipam/release.md create mode 100644 v2.4/reference/calicoctl/commands/ipam/show.md create mode 100644 v2.4/reference/calicoctl/commands/node/checksystem.md create mode 100644 v2.4/reference/calicoctl/commands/node/diags.md create mode 100644 v2.4/reference/calicoctl/commands/node/index.md create mode 100644 v2.4/reference/calicoctl/commands/node/run.md create mode 100644 v2.4/reference/calicoctl/commands/node/status.md create mode 100644 v2.4/reference/calicoctl/commands/replace.md create mode 100644 v2.4/reference/calicoctl/commands/version.md create mode 100644 v2.4/reference/calicoctl/index.md create mode 100644 v2.4/reference/calicoctl/resources/bgppeer.md create mode 100644 v2.4/reference/calicoctl/resources/hostendpoint.md create mode 100644 v2.4/reference/calicoctl/resources/index.md create mode 100644 v2.4/reference/calicoctl/resources/ippool.md create mode 100644 v2.4/reference/calicoctl/resources/node.md create mode 100644 v2.4/reference/calicoctl/resources/policy.md create mode 100644 v2.4/reference/calicoctl/resources/profile.md create mode 100644 v2.4/reference/calicoctl/resources/workloadendpoint.md create mode 100644 v2.4/reference/calicoctl/setup/etcdv2.md create mode 100644 v2.4/reference/calicoctl/setup/index.md create mode 100644 v2.4/reference/calicoctl/setup/kubernetes.md create mode 100644 v2.4/reference/cni-plugin/configuration.md create mode 100644 v2.4/reference/contribute.md create mode 100644 v2.4/reference/felix/configuration.md create mode 100644 v2.4/reference/felix/prometheus.md create mode 100644 v2.4/reference/index.md create mode 100644 v2.4/reference/involved.md create mode 100644 v2.4/reference/license.md create mode 100644 v2.4/reference/node/configuration.md create mode 100644 v2.4/reference/policy-controller/configuration.md create mode 100644 v2.4/reference/previous-releases.md create mode 100644 v2.4/reference/private-cloud/l2-interconnect-fabric.md create mode 100644 v2.4/reference/private-cloud/l3-interconnect-fabric.md create mode 100644 v2.4/reference/public-cloud/aws.md create mode 100644 v2.4/reference/public-cloud/gce.md create mode 100644 v2.4/reference/repo-structure.md create mode 100644 v2.4/reference/requirements.md create mode 100644 v2.4/reference/supported-platforms.md create mode 100644 v2.4/releases/index.md create mode 100644 v2.4/usage/calicoctl/container.md create mode 100644 v2.4/usage/calicoctl/install-and-configuration.md create mode 100644 v2.4/usage/configuration/as-service.md create mode 100644 v2.4/usage/configuration/bgp.md create mode 100644 v2.4/usage/configuration/conntrack.md create mode 100644 v2.4/usage/configuration/ip-in-ip.md create mode 100644 v2.4/usage/configuration/mtu.md create mode 100644 v2.4/usage/configuration/node.md create mode 100644 v2.4/usage/decommissioning-a-node.md create mode 100644 v2.4/usage/external-connectivity.md create mode 100644 v2.4/usage/index.md create mode 100644 v2.4/usage/ipv6.md create mode 100644 v2.4/usage/openstack/configuration.md create mode 100644 v2.4/usage/openstack/floating-ips.md create mode 100644 v2.4/usage/openstack/host-routes.md create mode 100644 v2.4/usage/openstack/kuryr.md create mode 100644 v2.4/usage/openstack/semantics.md create mode 100644 v2.4/usage/openstack/service-ips.md create mode 100644 v2.4/usage/routereflector/bird-rr-config.md create mode 100644 v2.4/usage/routereflector/calico-routereflector.md create mode 100644 v2.4/usage/routereflector/mesh-topology.png create mode 100644 v2.4/usage/troubleshooting/faq.md create mode 100644 v2.4/usage/troubleshooting/index.md create mode 100644 v2.4/usage/troubleshooting/logging.md diff --git a/_config.yml b/_config.yml index 119982b69ae..e50a9dd02ba 100644 --- a/_config.yml +++ b/_config.yml @@ -65,3 +65,8 @@ defaults: path: v2.3 values: version: v2.3 + - + scope: + path: v2.4 + values: + version: v2.4 diff --git a/_data/v2_4/navbars/getting-started.yml b/_data/v2_4/navbars/getting-started.yml new file mode 100644 index 00000000000..fb356dfc68b --- /dev/null +++ b/_data/v2_4/navbars/getting-started.yml @@ -0,0 +1,167 @@ +bigheader: "Getting Started" +abstract: "All you need to know to get started with Calico" +path: /getting-started +order: 1 +toc: +- title: Getting Started with Calico + path: /getting-started/ +- title: Kubernetes + section: + - title: Overview + path: /getting-started/kubernetes/ + - title: Installation + section: + - title: Introduction + path: /getting-started/kubernetes/installation/ + - title: Hosted (via kubectl) + section: + - title: Introduction + path: /getting-started/kubernetes/installation/hosted/ + - title: Standard Hosted Install + path: /getting-started/kubernetes/installation/hosted/hosted + - title: Kubeadm Hosted Install + path: /getting-started/kubernetes/installation/hosted/kubeadm/ + - title: Kubernetes Datastore Hosted Install + path: /getting-started/kubernetes/installation/hosted/kubernetes-datastore/ + - title: Integration Guide + path: /getting-started/kubernetes/installation/integration + - title: AWS + path: /getting-started/kubernetes/installation/aws + - title: GCE + path: /getting-started/kubernetes/installation/gce + - title: 'Vagrant/VirtualBox: CoreOS' + path: /getting-started/kubernetes/installation/vagrant/ + - title: Tutorials + section: + - title: Using calicoctl + path: /getting-started/kubernetes/tutorials/using-calicoctl + - title: Simple Policy Demo + path: /getting-started/kubernetes/tutorials/simple-policy + - title: Stars Policy Demo + path: /getting-started/kubernetes/tutorials/stars-policy/ + - title: Advanced network policy + path: /getting-started/kubernetes/tutorials/advanced-policy + - title: Troubleshooting / FAQ + path: /getting-started/kubernetes/troubleshooting + - title: Upgrade + path: /getting-started/kubernetes/upgrade + +- title: OpenShift + section: + - title: Installation + path: /getting-started/openshift/installation + +- title: Mesos + section: + - title: Overview + path: /getting-started/mesos/ + - title: Installation + section: + - title: Requirements + path: /getting-started/mesos/installation/prerequisites + - title: Integration Guide + path: /getting-started/mesos/installation/integration + - title: DC/OS + section: + - title: Overview + path: /getting-started/mesos/installation/dc-os/ + - title: Universe Package Install + path: /getting-started/mesos/installation/dc-os/framework + - title: Customize Universe Package Install + path: /getting-started/mesos/installation/dc-os/custom + - title: 'Vagrant/VirtualBox: CentOS' + path: /getting-started/mesos/installation/vagrant-centos/ + - title: Tutorials + section: + - title: Launching Tasks + path: /getting-started/mesos/tutorials/launching-tasks + - title: Connecting to Tasks + path: /getting-started/mesos/tutorials/connecting-tasks + - title: Configuring Policy + section: + - title: Universal Containerizer + path: /getting-started/mesos/tutorials/policy/universal-containerizer + - title: Docker Containerizer + path: /getting-started/mesos/tutorials/policy/docker-containerizer + +- title: Docker + section: + - title: Overview + path: /getting-started/docker/ + - title: Installation + section: + - title: Requirements + path: /getting-started/docker/installation/requirements + - title: Manual + path: /getting-started/docker/installation/manual + - title: 'Vagrant/VirtualBox: CoreOS' + path: /getting-started/docker/installation/vagrant-coreos/ + - title: 'Vagrant/VirtualBox: Ubuntu' + path: /getting-started/docker/installation/vagrant-ubuntu/ + - title: Tutorials + section: + - title: Security using Calico Profiles + path: /getting-started/docker/tutorials/security-using-calico-profiles + - title: Security using Calico Profiles and Policy + path: /getting-started/docker/tutorials/security-using-calico-profiles-and-policy + - title: Security using Docker Labels and Calico Policy + path: /getting-started/docker/tutorials/security-using-docker-labels-and-calico-policy + - title: IPAM + path: /getting-started/docker/tutorials/ipam + +- title: OpenStack + section: + - title: Overview + path: /getting-started/openstack/ + - title: Installation + section: + - title: Overview + path: /getting-started/openstack/installation/ + - title: Ubuntu + path: /getting-started/openstack/installation/ubuntu + - title: Red Hat Enterprise Linux 7 + path: /getting-started/openstack/installation/redhat + - title: Juju Charms + path: /getting-started/openstack/installation/juju + - title: Fuel + path: /getting-started/openstack/installation/fuel + - title: DevStack + path: /getting-started/openstack/installation/devstack + - title: Chef + path: /getting-started/openstack/installation/chef + - title: Upgrade + path: /getting-started/openstack/upgrade + - title: Tutorials + path: /getting-started/openstack/tutorials + - title: Troubleshooting + path: /getting-started/openstack/verification + - title: IP Addressing and Connectivity + path: /getting-started/openstack/connectivity + - title: Use of the Neutron API + path: /getting-started/openstack/neutron-api + +- title: rkt + section: + - title: Overview + path: /getting-started/rkt/ + - title: Installation + section: + - title: 'Manual Installation' + path: /getting-started/rkt/installation/manual + - title: 'Vagrant/VirtualBox: CoreOS' + path: /getting-started/rkt/installation/vagrant-coreos/ + - title: Tutorials + section: + - title: Basic Network Isolation + path: /getting-started/rkt/tutorials/basic + - title: Troubleshooting + path: /getting-started/rkt/troubleshooting + +- title: Host Protection + section: + - title: Overview + path: /getting-started/bare-metal/bare-metal + - title: Installation + section: + - title: Installing Felix as a static binary + path: /getting-started/bare-metal/bare-metal-install diff --git a/_data/v2_4/navbars/introduction.yml b/_data/v2_4/navbars/introduction.yml new file mode 100644 index 00000000000..938031d11b2 --- /dev/null +++ b/_data/v2_4/navbars/introduction.yml @@ -0,0 +1,7 @@ +bigheader: "Introduction" +abstract: "Background information on Calico" +path: /introduction +order: 0 +toc: +- title: Introduction + path: /introduction/ diff --git a/_data/v2_4/navbars/reference.yml b/_data/v2_4/navbars/reference.yml new file mode 100644 index 00000000000..55e76086b8f --- /dev/null +++ b/_data/v2_4/navbars/reference.yml @@ -0,0 +1,127 @@ +bigheader: "Reference" +abstract: "Reference Information for Calico" +path: /reference +order: 3 +toc: +- title: Reference + path: /reference/ +- title: Supported Platforms + path: /reference/supported-platforms +- title: calicoctl + section: + - title: Overview + path: /reference/calicoctl/ + - title: Command Reference + path: /reference/calicoctl/commands + section: + - title: Overview + path: /reference/calicoctl/commands/ + - title: create + path: /reference/calicoctl/commands/create + - title: replace + path: /reference/calicoctl/commands/replace + - title: apply + path: /reference/calicoctl/commands/apply + - title: delete + path: /reference/calicoctl/commands/delete + - title: get + path: /reference/calicoctl/commands/get + - title: config + path: /reference/calicoctl/commands/config + - title: ipam + section: + - title: Overview + path: /reference/calicoctl/commands/ipam/ + - title: release + path: /reference/calicoctl/commands/ipam/release + - title: show + path: /reference/calicoctl/commands/ipam/show + - title: node + section: + - title: Overview + path: /reference/calicoctl/commands/node/ + - title: run + path: /reference/calicoctl/commands/node/run + - title: status + path: /reference/calicoctl/commands/node/status + - title: diags + path: /reference/calicoctl/commands/node/diags + - title: checksystem + path: /reference/calicoctl/commands/node/checksystem + - title: version + path: /reference/calicoctl/commands/version + - title: Resource Definitions + path: /reference/calicoctl/resources + section: + - title: Overview + path: /reference/calicoctl/resources/ + - title: BGP Peer + path: /reference/calicoctl/resources/bgppeer + - title: Host Endpoint + path: /reference/calicoctl/resources/hostendpoint + - title: IP Pool + path: /reference/calicoctl/resources/ippool + - title: Node + path: /reference/calicoctl/resources/node + - title: Policy + path: /reference/calicoctl/resources/policy + - title: Profile + path: /reference/calicoctl/resources/profile + - title: Workload Endpoint + path: /reference/calicoctl/resources/workloadendpoint + - title: Setup + section: + - title: Overview + path: /reference/calicoctl/setup/ + - title: etcdv2 datastore + path: /reference/calicoctl/setup/etcdv2 + - title: kubernetes api datastore + path: /reference/calicoctl/setup/kubernetes +- title: calico/node + section: + - title: Configuration + path: /reference/node/configuration +- title: felix + section: + - title: Configuration + path: /reference/felix/configuration + - title: Prometheus Statistics + path: /reference/felix/prometheus +- title: Calico CNI Plugins + path: /reference/cni-plugin/configuration +- title: Calico Policy Controller + path: /reference/policy-controller/configuration +- title: Deploying on Public Cloud + section: + - title: AWS + path: /reference/public-cloud/aws + - title: GCE + path: /reference/public-cloud/gce +- title: Deploying on Private Cloud + section: + - title: Calico Over Ethernet Fabrics + path: /reference/private-cloud/l2-interconnect-fabric + - title: Calico Over IP Fabrics + path: /reference/private-cloud/l3-interconnect-fabric +- title: Architecture + section: + - title: Introduction + path: /reference/architecture/ + - title: calico/node + path: /reference/architecture/components + - title: Data Path + path: /reference/architecture/data-path +- title: Advanced + section: + - title: RBAC for etcdv2 + path: /reference/advanced/etcd-rbac +- title: System Requirements + path: /reference/requirements +- title: Repo Structure + path: /reference/repo-structure +- title: Contribute + path: /reference/contribute +- title: Involved + path: /reference/involved +- title: License + path: /reference/license diff --git a/_data/v2_4/navbars/releases.yml b/_data/v2_4/navbars/releases.yml new file mode 100644 index 00000000000..cf05a4a29ae --- /dev/null +++ b/_data/v2_4/navbars/releases.yml @@ -0,0 +1,7 @@ +bigheader: Releases +abstract: Versioning information for Calico Components +path: /releases +order: 4 +toc: +- title: Releases + path: /releases/ diff --git a/_data/v2_4/navbars/usage.yml b/_data/v2_4/navbars/usage.yml new file mode 100644 index 00000000000..1aa458be775 --- /dev/null +++ b/_data/v2_4/navbars/usage.yml @@ -0,0 +1,57 @@ +bigheader: "Usage" +abstract: "How to configure and troubleshoot Calico systems" +path: /usage +order: 2 +toc: +- title: Using Calico + path: /usage/ +- title: calicoctl + section: + - title: Installing and Configuring calicoctl + path: /usage/calicoctl/install-and-configuration + - title: calico/ctl container + path: /usage/calicoctl/container +- title: Calico and Systemd + path: /usage/configuration/as-service +- title: Configuring BGP Peers + path: /usage/configuration/bgp +- title: Route Reflector + section: + - title: Configuring BIRD + path: /usage/routereflector/bird-rr-config + - title: The calico/routereflector container + path: /usage/routereflector/calico-routereflector +- title: IPv6 + path: /usage/ipv6 +- title: External Connectivity + path: /usage/external-connectivity +- title: Configuring IP-in-IP + path: /usage/configuration/ip-in-ip +- title: Configuring a Node IP Address and Subnet + path: /usage/configuration/node +- title: Configuring MTU + path: /usage/configuration/mtu +- title: Decommissioning a Node + path: /usage/decommissioning-a-node +- title: Configuring Conntrack + path: /usage/configuration/conntrack +- title: Calico for OpenStack + section: + - title: Configuration + path: /usage/openstack/configuration + - title: Detailed Semantics + path: /usage/openstack/semantics + - title: Floating IPs + path: /usage/openstack/floating-ips + - title: Service IPs + path: /usage/openstack/service-ips + - title: Host routes + path: /usage/openstack/host-routes + - title: Kuryr + path: /usage/openstack/kuryr +- title: Troubleshooting + path: /usage/troubleshooting/ +- title: Logging + path: /usage/troubleshooting/logging +- title: Frequently Asked Questions + path: /usage/troubleshooting/faq diff --git a/_data/versions.yml b/_data/versions.yml index 84b74cb1a60..921dc06c073 100644 --- a/_data/versions.yml +++ b/_data/versions.yml @@ -708,6 +708,54 @@ v2.1: version: 1.4.1 url: http://git.openstack.org/cgit/openstack/networking-calico/commit/?h=1.4.1 + +v2.4: +- title: v2.4.0-rc1 + note: | + 24 July 2017 + + ### Changes / New Features + + Work-in-progress release notes for the upcoming v2.4.0 release can be found on GitHub. + + components: + felix: + version: 2.4.0-rc1 + url: https://github.com/projectcalico/felix/releases/tag/2.4.0-rc1 + typha: + version: v0.3.0 + url: https://github.com/projectcalico/typha/releases/tag/v0.3.0 + calicoctl: + version: v1.4.0-rc1 + url: https://github.com/projectcalico/calicoctl/releases/tag/v1.4.0-rc1 + download_url: https://github.com/projectcalico/calicoctl/releases/download/v1.4.0-rc1/calicoctl + calico/node: + version: v2.4.0-rc1 + calico/cni: + version: v1.10.0 + url: https://github.com/projectcalico/cni-plugin/releases/tag/v1.10.0 + download_calico_url: https://github.com/projectcalico/cni-plugin/releases/download/v1.10.0/calico + download_calico_ipam_url: https://github.com/projectcalico/cni-plugin/releases/download/v1.10.0/calico-ipam + calico-bird: + version: v0.3.1 + url: https://github.com/projectcalico/calico-bird/releases/tag/v0.3.1 + calico-bgp-daemon: + version: v0.2.1 + url: https://github.com/projectcalico/calico-bgp-daemon/releases/tag/v0.2.1 + libnetwork-plugin: + version: v1.1.0 + url: https://github.com/projectcalico/libnetwork-plugin/releases/tag/v1.1.0 + calico/kube-policy-controller: + version: v0.7.0 + url: https://github.com/projectcalico/k8s-policy/releases/tag/v0.7.0 + networking-calico: + version: 1.4.2 + url: http://git.openstack.org/cgit/openstack/networking-calico/commit/?h=1.4.2 + calico/routereflector: + version: v0.3.0 + url: "" + + # The master release stream is used to generate the master version of the docs, # as well as for builds of `calico/node:master` via CI. Submit PRs to update the # versions when a component changes. diff --git a/_includes/v2.4/install-k8s-addons.md b/_includes/v2.4/install-k8s-addons.md new file mode 100644 index 00000000000..a6a39705c2a --- /dev/null +++ b/_includes/v2.4/install-k8s-addons.md @@ -0,0 +1,34 @@ +### Install Calico +Calico can be installed on Kubernetes using Kubernetes resources (DaemonSets, etc). + +The Calico self-hosted installation consists of three objects in the `kube-system` Namespace: + +- A `ConfigMap` which contains the Calico configuration. +- A `DaemonSet` which installs the `calico/node` pod and CNI plugin. +- A `ReplicaSet` which installs the `calico/kube-policy-controller` pod. + +Install the Calico manifest: + +```shell +kubectl apply -f {{site.url}}/{{page.version}}/getting-started/kubernetes/installation/hosted/calico.yaml +``` + +You should see the pods start in the `kube-system` Namespace: + +```shell +$ kubectl get pods --namespace=kube-system +NAME READY STATUS RESTARTS AGE +calico-node-1f4ih 2/2 Running 0 1m +calico-node-hor7x 2/2 Running 0 1m +calico-node-si5br 2/2 Running 0 1m +calico-policy-controller-so4gl 1/1 Running 0 1m + info: 1 completed object(s) was(were) not shown in pods list. Pass --show-all to see all objects. +``` + +### Install DNS + +To install KubeDNS, use the provided manifest. This enables Kubernetes Service discovery. + +```shell +kubectl apply -f {{site.url}}/{{page.version}}/getting-started/kubernetes/installation/manifests/skydns.yaml +``` diff --git a/_includes/v2.4/ports.md b/_includes/v2.4/ports.md new file mode 100644 index 00000000000..47478577f0c --- /dev/null +++ b/_includes/v2.4/ports.md @@ -0,0 +1,6 @@ +Calico supports the following syntaxes for expressing ports. + +| Syntax | Example | Description | +|------------|-----------|-------------| +| int | 80 | The exact port specified +| start:end | 6040:6050 | All ports within the range start <= x <= end diff --git a/_includes/v2.4/selectors.md b/_includes/v2.4/selectors.md new file mode 100644 index 00000000000..b63d1033a20 --- /dev/null +++ b/_includes/v2.4/selectors.md @@ -0,0 +1,15 @@ +A label selector is an expression which either matches or does not match an endpoint based on its labels. + +Calico label selectors support a number of syntactic primitives. Each of the following +primitive expressions can be combined using the logical operator `&&`. + +| Syntax | Meaning | +|---------------------|-----------------------------| +| k == 'v' | Matches any endpoint with the label 'k' and value 'v'. +| k != 'v' | Matches any endpoint with the label 'k' and value that is _not_ 'v'. +| has(k) | Matches any endpoint with label 'k', independent of value. +| !has(k) | Matches any endpoint that does not have label 'k' +| k in { 'v1', 'v2' } | Matches any endpoint with label 'k' and value in the given set +| k not in { 'v1', 'v2' } | Matches any endpoint without label 'k' or any with label 'k' and value _not_ in the given set + + diff --git a/calico_node/Makefile b/calico_node/Makefile index e1bdf739557..6c3f42a4a57 100644 --- a/calico_node/Makefile +++ b/calico_node/Makefile @@ -1,6 +1,6 @@ ############################################################################### # Versions: -RELEASE_STREAM?=v2.3 +RELEASE_STREAM?=v2.4 GO_BUILD_VER:=v0.7 CALICO_BUILD?=calico/go-build:$(GO_BUILD_VER) diff --git a/v2.4/getting-started/bare-metal/bare-metal-install.md b/v2.4/getting-started/bare-metal/bare-metal-install.md new file mode 100644 index 00000000000..e8821507df1 --- /dev/null +++ b/v2.4/getting-started/bare-metal/bare-metal-install.md @@ -0,0 +1,142 @@ +--- +title: Installing Felix as a static binary +--- + +These instructions will take you through a first-time install of +Calico's per-host daemon, Felix, starting with the calico-felix binary. +These instructions apply to Calico v2.1.0 and above. Older versions + were packaged differently. + +> **NOTE** +> +> This install process is most suited to bare-metal-only +> installations where Felix is to be used to control policy for the +> host's interfaces. For OpenStack and containers there are +> additional daemons that need to be installed, which are not +> covered here. +> + +## Prerequisites + +Felix has the following pre-requisites: + +- For IPv4 support, Linux kernel v2.6.32 is required. We have tested + against v2.6.32-573+. Note: if you intend to run containers, Docker + requires kernel >=v3.10. The kernel's version can be checked with + `uname -a`. +- For IPv6 support, Linux kernel 3.10+ is required (due to the lack of + reverse path filtering for IPv6 in older versions). +- [conntrack-tools](http://conntrack-tools.netfilter.org/); in + particular, the `conntrack` command must be available. We test + against >=v1.4.1. To check the version, run `conntrack --version`. +- [iptables](http://www.netfilter.org/projects/iptables/index.html); + for IPv6 support, the `ip6tables` command must be available. We test + against >=v1.4.7. To check the version, run `iptables --version`. +- [ipset](http://ipset.netfilter.org/); we test against >=v6.11. To + check the version, run `ipset --version`. +- The conntrack, iptables and ipsets kernel modules must be available + (or compiled-in). +- An [etcd](https://github.com/coreos/etcd/releases/) cluster that + supports the etcdv2 protocol. We recommend running the latest + stable release of etcd v3.x. To check the version, run + `etcd --version` + +> **NOTE** +> +> If any of the commands above fail when run with the `--version` +> flag then you have an old version that doesn't support reporting +> its version. +> + +## Download and install the binary + +The calico-felix binary is distributed via the +[Github releases page](https://github.com/projectcalico/felix/releases). +Download the calico-felix attachment to your system, then make it executable +and move it to a location on your path, for example: + + curl -o calico-felix + chmod +x calico-felix + sudo cp calico-felix /usr/local/bin + +## Create a start-up script + +Felix should be started at boot by your init system and the init system +**must** be configured to restart Felix if it stops. Felix relies on +that behaviour for certain configuration changes. + +If your distribution uses systemd, then you could use the following unit +file: + + [Unit] + Description=Calico Felix agent + After=syslog.target network.target + + [Service] + User=root + ExecStartPre=/usr/bin/mkdir -p /var/run/calico + ExecStart=/usr/local/bin/calico-felix + KillMode=process + Restart=on-failure + LimitNOFILE=32000 + + [Install] + WantedBy=multi-user.target + +Or, for upstart: + + description "Felix (Calico agent)" + author "Project Calico Maintainers " + + start on stopped rc RUNLEVEL=[2345] + stop on runlevel [!2345] + + limit nofile 32000 32000 + + respawn + respawn limit 5 10 + + chdir /var/run + + pre-start script + mkdir -p /var/run/calico + chown root:root /var/run/calico + end script + + exec /usr/local/bin/calico-felix + +## Configure Felix + + +Optionally, you can create a file at `/etc/calico/felix.cfg` to +configure Felix. The configuration file as well as other options for +configuring felix (including environment variables) are described in +[this]({{site.baseurl}}/{{page.version}}/reference/felix/configuration) document. + +If etcd is not running on the local machine, it's essential to configure +the `EtcdAddr` or `EtcdEndpoints` setting to tell Felix how to reach +etcd. + +Felix tries to detect whether IPv6 is available on your platform but +the detection can fail on older (or more unusual) systems. If Felix +exits soon after startup with `ipset` or `iptables` errors try +setting the `Ipv6Support` setting to `false`. + +## Start Felix + +Once you've configured Felix, start it up via your init system. + +For systemd, with the above unit file installed, you could run: + + systemctl start calico-felix + +For upstart: + + start calico-felix + +## Running Felix manually + +For debugging, it's sometimes useful to run Felix manually and tell it +to emit its logs to screen. You can do that with the following command: + + FELIX_LOGSEVERITYSCREEN=INFO /opt/calico-felix/calico-felix diff --git a/v2.4/getting-started/bare-metal/bare-metal.md b/v2.4/getting-started/bare-metal/bare-metal.md new file mode 100644 index 00000000000..fdb053750f4 --- /dev/null +++ b/v2.4/getting-started/bare-metal/bare-metal.md @@ -0,0 +1,431 @@ +--- +title: Using Calico to Secure Host Interfaces +--- + +This guide describes how to use Calico to secure the network interfaces +of the host itself (as opposed to those of any container/VM workloads +that are present on the host). We call such interfaces "host endpoints", +to distinguish them from "workload endpoints" (such as containers or VMs). + +Calico supports the same rich security policy model for host endpoints +that it supports for workload endpoints. Host endpoints can have labels, and +their labels are in the same "namespace" as those of workload endpoints. This +allows security rules for either type of endpoint to refer to the other type +(or a mix of the two) using labels and selectors. + +Calico does not support setting IPs or policing MAC addresses for host +interfaces, it assumes that the interfaces are configured by the +underlying network fabric. + +Calico distinguishes workload endpoints from host endpoints by a configurable +prefix. Unless you happen to have host interfaces whose name matches the +default for that prefix (`cali`), you won't need to change it. In case you do, +see the `InterfacePrefix` configuration value at [Configuring +Felix]({{site.baseurl}}/{{page.version}}/reference/felix/configuration). +Interfaces that start with a value listed in `InterfacePrefix` are assumed to +be workload interfaces. Others are treated as host interfaces. + +Calico blocks all traffic to/from workload interfaces by default; +allowing traffic only if the interface is known and policy is in place. +However, for host endpoints, Calico is more lenient; it only polices +traffic to/from interfaces that it's been explicitly told about. Traffic +to/from other interfaces is left alone. + +As of Calico v2.1.0, Calico applies host endpoint security policy both to traffic +that is terminated locally, and to traffic that is forwarded between host +endpoints. Previously, policy was only applied to traffic that was terminated +locally. The change allows Calico to be used to secure a NAT gateway or router. +Calico supports selector-based policy as normal when running on a gateway or router +allowing for rich, dynamic security policy based on the labels attached to your +workloads. + +> **NOTE** +> +> If you have a host with workloads on it then traffic that is forwarded to +> workloads bypasses the policy applied to host endpoints. If that weren't the +> case, the host endpoint policy would need to be very broad to allow all +> traffic destined for any possible workload. +> +> Since version 2.1.0, Calico applies host endpoint policy to traffic that is +> being forwarded between host interfaces. +> +> ![]({{site.baseurl}}/images/bare-metal-packet-flows.png) + +## Installation overview + +To make use of Calico's host endpoint support, you will need to follow +these steps, described in more detail below: + +- download the calicoctl binary +- create an etcd cluster, if you haven't already +- install Calico's Felix daemon on each host +- initialize the etcd database +- add policy to allow basic connectivity and Calico function +- create host endpoint objects in etcd for each interface you want + Calico to police (in a later release, we plan to support interface + templates to remove the need to explicitly configure + every interface) +- insert policy into etcd for Calico to apply +- decide whether to disable "failsafe SSH/etcd" access. + +### Download the calicoctl binary + +Download the calicoctl binary onto your host. + + wget {{site.data.versions[page.version].first.components.calicoctl.download_url}} + chmod +x calicoctl + +This binary should be placed in your `$PATH` so it can be run from any +directory. + +## Creating an etcd cluster + +If you haven't already created an etcd cluster for your Calico +deployment, you'll need to create one. + +To create a single-node etcd cluster for testing, download an etcd v3.x +release from [the etcd releases archive](https://github.com/coreos/etcd/releases); we recommend using +the most recent bugfix release. Then follow the instructions on that +page to unpack and run the etcd binary. + +To create a production cluster, you should follow the guidance in the +[etcd manual](https://coreos.com/etcd/docs/latest/). In particular, the +[clustering guide](https://coreos.com/etcd/docs/latest/). + +## Installing Felix + +{% include ppa_repo_name %} + +There are several ways to install Felix. + +- if you are running Ubuntu 14.04 or 16.04, you can install from our PPA: + + sudo add-apt-repository ppa:project-calico/{{ ppa_repo_name }} + sudo apt-get update + sudo apt-get upgrade + sudo apt-get install calico-felix + +- if you are running a RedHat 7-derived distribution, you can install + from our RPM repository: + + cat > /etc/yum.repos.d/calico.repo < +EOF +``` + +If you check the felix logfile after this step, the logs should +transition from periodic notifications that felix is in state +"wait-for-ready" to a stream of initialisation messages. + +## Creating basic connectivity and Calico policy + +When a host endpoint is added, if there is no security policy for that +endpoint, Calico will default to denying traffic to/from that endpoint, +except for traffic that is allowed by the [failsafe rules](#failsafe-rules). + +While the [failsafe rules](#failsafe-rules) provide protection against removing all +connectivity to a host, + +- they are overly broad in allowing inbound SSH on any interface and + allowing traffic out to etcd's ports on any interface +- depending on your network, they may not cover all the ports that are + required; for example, your network may reply on allowing ICMP, + or DHCP. + +Therefore, we recommend creating a failsafe Calico security policy that +is tailored to your environment. The example command below shows one +example of how you might do that; the command uses `calicoctl` to: + +- Create a single policy resource, which + - applies to all known endpoints + - allows inbound ssh access from a defined "management" subnet + - allows outbound connectivity to etcd on a particular IP; if + you have multiple etcd servers you should duplicate the rule + for each destination + - allows inbound ICMP + - allows outbound UDP on port 67, for DHCP. + +When running this command, replace the placeholders in angle brackets with +appropriate values for your deployment. + + +``` +cat << EOF | calicoctl create -f - +- apiVersion: v1 + kind: policy + metadata: + name: failsafe + spec: + selector: "all()" + order: 0 + ingress: + - action: allow + protocol: tcp + source: + nets: + - "" + destination: + ports: [22] + - action: allow + protocol: icmp + egress: + - action: allow + protocol: tcp + destination: + nets: + - "/32" + ports: [] + - action: allow + protocol: udp + destination: + ports: [67] +EOF +``` + +Once you have such a policy in place, you may want to disable the +[failsafe rules](#failsafe-rules). + +> **NOTE** +> +> Packets that reach the end of the list of rules fall-through to the next policy (sorted by the order field). +> +> The selector in the policy, `all()`, will match *all* endpoints, +> including any workload endpoints. If you have workload endpoints as +> well as host endpoints then you may wish to use a more restrictive +> selector. For example, you could label management interfaces with +> label `endpoint_type = management` and then use selector +> `endpoint_type == "management"` +> +> If you are using Calico for networking workloads, you should add +> inbound and outbound rules to allow BGP: add an ingress and egress rule +> to allow TCP traffic to destination port 179. + +## Creating host endpoint objects + +For each host endpoint that you want Calico to secure, you'll need to +create a host endpoint object in etcd. Use the `calicoctl create` command +to create a host endpoint resource (hostEndpoint). + +There are two ways to specify the interface that a host endpoint should +refer to. You can either specify the name of the interface or its +expected IP address. In either case, you'll also need to know the name given to +the Calico node running on the host that owns the interface; in most cases this +will be the same as the hostname of the host. + +For example, to secure the interface named `eth0` with IP 10.0.0.1 on +host `my-host`, run the command below. The name of the endpoint is an +arbitrary name required for endpoint identification. + +When running this command, replace the placeholders in angle brackets with +appropriate values for your deployment. + +``` +cat << EOF | calicoctl create -f - +- apiVersion: v1 + kind: hostEndpoint + metadata: + name: + node: + labels: + role: webserver + environment: production + spec: + interfaceName: eth0 + profiles: [] + expectedIPs: ["10.0.0.1"] +EOF +``` + +> **NOTE** +> +> Felix tries to detect the correct hostname for a system. It logs +> out the value it has determined at start-of-day in the following +> format: +> +> `2015-10-20 17:42:09,813 \[INFO\]\[30149/5\] calico.felix.config 285: Parameter FelixHostname (Felix compute host hostname) has value 'my-hostname' read from None` +> +> The value (in this case "my-hostname") needs to match the hostname +> used in etcd. Ideally, the host's system hostname should be set +> correctly but if that's not possible, the Felix value can be +> overridden with the FelixHostname configuration setting. See +> configuration for more details. + +Where `` is an optional list of security profiles +to apply to the endpoint and labels contains a set of arbitrary +key/value pairs that can be used in selector expressions. + + + +> **Warning** +> +> When rendering security rules on other hosts, Calico uses the +> `expectedIPs` field to resolve label selectors +> to IP addresses. If the `expectedIPs` field is omitted +> then security rules that use labels will fail to match +> this endpoint. + +Or, if you knew that the IP address should be 10.0.0.1, but not the name +of the interface: + +``` +cat << EOF | calicoctl create -f - +- apiVersion: v1 + kind: hostEndpoint + metadata: + name: + node: + labels: + role: webserver + environment: production + spec: + profiles: [] + expectedIPs: ["10.0.0.1"] +EOF +``` + +After you create host endpoint objects, Felix will start policing +traffic to/from that interface. If you have no policy or profiles in +place, then you should see traffic being dropped on the interface. + +> **NOTE** +> +> By default, Calico has a failsafe in place that whitelists certain +> traffic such as ssh. See below for more details on +> disabling/configuring the failsafe rules. +> + +If you don't see traffic being dropped, check the hostname, IP address +and (if used) the interface name in the configuration. If there was +something wrong with the endpoint data, Felix will log a validation +error at `WARNING` level and it will ignore the endpoint: + + $ grep "Validation failed" /var/log/calico/felix.log + 2016-05-31 12:16:21,651 [WARNING][8657/3] calico.felix.fetcd 1017: + Validation failed for host endpoint HostEndpointId, treating as + missing: 'name' or 'expected_ipvX_addrs' must be present.; + '{ "labels": {"foo": "bar"}, "profile_ids": ["prof1"]}' + +The error can be quite long but it should log the precise cause of the +rejection; in this case "'name' or 'expected\_ipvX\_addrs' must be +present" tells us that either the interface's name or its expected IP +address must be specified. + +## Creating more security policy + +We recommend using selector-based security policy with +bare-metal workloads. This allows ordered policy to be applied to +endpoints that match particular label selectors. + ++For example, you could add a second policy for webserver access: + +``` +cat << EOF | dist/calicoctl create -f - +- apiVersion: v1 + kind: policy + metadata: + name: webserver + spec: + selector: "role==\"webserver\"" + order: 100 + ingress: + - action: allow + protocol: tcp + destination: + ports: [80] + egress: + - action: allow +EOF +``` + +## Failsafe rules + +To avoid completely cutting off a host via incorrect or malformed +policy, Calico has a failsafe mechanism that keeps various pinholes open +in the firewall. + +By default, Calico keeps port 22 inbound open on *all* host endpoints, +which allows access to ssh; as well as outbound communication to ports +2379, 2380, 4001 and 7001, which allows access to etcd's default ports. + +The lists of failsafe ports can be configured via the configuration parameters +described in [Configuring +Felix]({{site.baseurl}}/{{page.version}}/reference/felix/configuration). They +can be disabled by setting each configuration value to "none". + +> **WARNING** +> +> Removing the inbound failsafe rules can leave a host inaccessible. +> +> Removing the outbound failsafe rules can leave Felix unable to connect +> to etcd. +> +> Before disabling the failsafe rules, we recommend creating a policy to +> replace it with more-specific rules for your environment: see [above](#creating-basic-connectivity-and-calico-policy). + +## Untracked policy + +Policy for host endpoints can be marked as 'doNotTrack'. This means that rules +in that policy should be applied before any data plane connection tracking, and +that packets allowed by these rules should not be tracked. + +A typical scenario for using 'doNotTrack' policy would be a server, running +directly on a host, that accepts a very high rate of shortlived connections, +such as `memcached`. On Linux, if those connections are tracked, the conntrack +table can fill up and then Linux may drop packets for further connection +attempts, meaning that those newer connections will fail. If you are using +Calico to secure that server's host, you can avoid this problem by defining a +policy that allows access to the server's ports and is marked as 'doNotTrack'. + +Since there is no connection tracking for a 'doNotTrack' policy, it is +important that the policy's ingress and egress rules are specified +symmetrically. For example, for a server on port 999, the policy must include +an ingress rule allowing access *to* port 999 and an egress rule allowing +outbound traffic *from* port 999. (Whereas for a connection tracked policy, it +is usually enough to specify the ingress rule only, and then connection +tracking will automatically allow the return path.) diff --git a/v2.4/getting-started/docker/index.md b/v2.4/getting-started/docker/index.md new file mode 100644 index 00000000000..c1f5aec5026 --- /dev/null +++ b/v2.4/getting-started/docker/index.md @@ -0,0 +1,57 @@ +--- +title: Calico with Docker +--- + +Calico implements a Docker network plugin that can be used to provide routing and advanced network policy for Docker containers. + +Use the navigation bar on the left to view information on Calico for Docker, +or continue reading for an overview of recommended guides to get started. + + +## Installation + +#### [Requirements](installation/requirements) + +Information on running etcd and configuring Docker for multi-host networking. + +#### [Installation Guide]({{site.baseurl}}/{{page.version}}/getting-started/docker/installation/manual) + +Learn the two-step process for launching Calico for Docker. + +## Quickstart with "Calico-Ready" Clusters + +#### [Vagrant/VirtualBox: Container Linux by CoreOS](installation/vagrant-coreos) + +Follow this guide to launch a local 2-node CoreOS Container Linux cluster with everything +you need to install and use Calico. + +#### [Vagrant/VirtualBox: Ubuntu](installation/vagrant-ubuntu) + +Follow this guide to launch a local 2-node Ubuntu cluster with everything +you need to install and use Calico. + +## Tutorials + +#### [Security using Calico Profiles]({{site.baseurl}}/{{page.version}}/getting-started/docker/tutorials/security-using-calico-profiles) + +The above guide demonstrates Calico connectivity cross host, and how to limit +that connectivity using simple Calico profiles. One profile is created for +each network and the connectivity is defined as policy on each profile. + +#### [Security using Calico Profiles and Policy]({{site.baseurl}}/{{page.version}}/getting-started/docker/tutorials/security-using-calico-profiles-and-policy) + +The above guide digs deeper into advanced policy configurations for workloads. +There is stil one profile created for each network but now the profiles define +labels that are inherited by each container added to the network. The policy uses +the labels in selectors to configure connectivity. + +#### [Security using Docker Labels and Calico Policy]({{site.baseurl}}/{{page.version}}/getting-started/docker/tutorials/security-using-docker-labels-and-calico-policy) + +The above guide demonstrates Calico connectivity between containers without using +Profiles at all. Instead, Calico Policies are defined which apply to +containers depending on the labels assigned to them at runtime. This allows +policy adjustment at the container level rather than at the network level. + +#### [IPAM]({{site.baseurl}}/{{page.version}}/getting-started/docker/tutorials/ipam) + +This guide walks through configuring a Docker network for use with Calico and how to statically assign IP addresses from that network diff --git a/v2.4/getting-started/docker/installation/manual.md b/v2.4/getting-started/docker/installation/manual.md new file mode 100644 index 00000000000..d58ea2d5b5d --- /dev/null +++ b/v2.4/getting-started/docker/installation/manual.md @@ -0,0 +1,71 @@ +--- +title: Installing Calico for Docker +--- + +Calico runs as a Docker container on each host. The `calicoctl` command line tool can be used to launch the `calico/node` container. + +> Before following the steps here ensure that you have satisfied these +[requirements]({{site.baseurl}}/{{page.version}}/getting-started/docker/installation/requirements). + +## Using calicoctl + +1. Download the calicoctl binary: + + ``` + sudo wget -O /usr/local/bin/calicoctl {{site.data.versions[page.version].first.components.calicoctl.download_url}} + sudo chmod +x /usr/local/bin/calicoctl + ``` + +2. Configure access to your etcd cluster, [calicoctl - etcd datastore]({{site.baseurl}}/{{page.version}}/reference/calicoctl/setup/etcdv2). +3. Launch `calico/node`: + + ``` + sudo calicoctl node run --node-image=quay.io/calico/node:{{site.data.versions[page.version].first.title}} + ``` + +Check that `calico/node` is now running: + +``` +vagrant@calico-01:~$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +408bd2b9ba53 quay.io/calico/node:{{site.data.versions[page.version].first.title}} "start_runit" About an hour ago Up About an hour calico-node +``` + +Furthermore, check that the `calico/node` container is functioning properly +with the following command: + +``` +sudo calicoctl node status +``` + +## Using "docker run" + +For more control over the Calico startup process, and to simplify binding +startup to an init system, `calicoctl` can print the command it uses +to launch `calico/node`. + +To print the command `calicoctl node run` uses to launch Calico on this host, +run the command with the `--init-system` and `--dry-run` flags: + +``` +$ calicoctl node run --init-system --dryrun --node-image=quay.io/calico/node:{{site.data.versions[page.version].first.title}} +Use the following command to start the calico/node container: + +docker run --net=host --privileged --name=calico-node --rm -e ETCD_AUTHORITY=127.0.0.1:2379 -e ETCD_SCHEME=http -e ETCD_ENDPOINTS= -e NODENAME=calico -e CALICO_NETWORKING_BACKEND=bird -e NO_DEFAULT_POOLS= -e CALICO_LIBNETWORK_ENABLED=true -e CALICO_LIBNETWORK_IFPREFIX=cali -v /var/run/calico:/var/run/calico -v /lib/modules:/lib/modules -v /var/log/calico:/var/log/calico -v /run/docker/plugins:/run/docker/plugins -v /var/run/docker.sock:/var/run/docker.sock quay.io/calico/node:{{site.data.versions[page.version].first.title}} + +Use the following command to stop the calico/node container: + +docker stop calico-node + +``` + +Pair the printed command with your favorite init system to ensure Calico is +always running on each host. + +See [additional information on binding to an init system +]({{site.baseurl}}/{{page.version}}/usage/configuration/as-service). + +## Next Steps + +With `calico/node` running, you are ready to start using Calico by following +[Security using Calico Profiles]({{site.baseurl}}/{{page.version}}/getting-started/docker/tutorials/security-using-calico-profiles). diff --git a/v2.4/getting-started/docker/installation/requirements.md b/v2.4/getting-started/docker/installation/requirements.md new file mode 100644 index 00000000000..be2be06fd1e --- /dev/null +++ b/v2.4/getting-started/docker/installation/requirements.md @@ -0,0 +1,44 @@ +--- +title: Requirements +--- + +The following information details basic prerequisites that must be met +in order for Calico to function properly with Docker. + +### Host IP Connectivity + +As with all Calico clusters, all hosts should have IP connectivity between them. + +### etcd + +You will also need an etcd cluster accessible from each host which Calico +uses for coordinating state between the nodes. See the [etcd documentation][etcd] +for details on setting up a cluster. + +### Docker with Multi-host Networking + +Each server should have Docker 1.9 or greater installed. +Follow the [instructions for installing Docker][docker]. + +To use Calico as a Docker network plugin, the Docker daemon must be configured +with a cluster store. If using etcd as a cluster store, +configure the `cluster-store` on the Docker daemon to `etcd://:`, +replacing `` and `` with the appropriate address and client +port for your etcd cluster. If your etcd is configured with TLS then you must +also [configure the Docker daemon][daemon-cert-config] with the correct +certificates to allow access. + +> For Docker 1.10+, you can use the [daemon configuration file][daemon-config-file], +> or for 1.9 see the appropriate 'Configuring Docker' section in [configuring docker][configuring-docker-1.9]. + +## Next Steps + +With etcd running and Docker configured, you are ready to +[install Calico](manual). + + +[etcd]: https://coreos.com/etcd/docs/latest/ +[docker]: https://docs.docker.com/engine/installation/ +[daemon-config-file]: https://docs.docker.com/engine/reference/commandline/dockerd/#/daemon-configuration-file +[daemon-cert-config]: https://docs.docker.com/engine/reference/commandline/dockerd/#nodes-discovery +[configuring-docker-1.9]: https://docs.docker.com/v1.9/engine/articles/configuring/ diff --git a/v2.4/getting-started/docker/installation/vagrant-coreos/Vagrantfile b/v2.4/getting-started/docker/installation/vagrant-coreos/Vagrantfile new file mode 100644 index 00000000000..fb287f50ab0 --- /dev/null +++ b/v2.4/getting-started/docker/installation/vagrant-coreos/Vagrantfile @@ -0,0 +1,75 @@ +--- +layout: null +--- +# -*- mode: ruby -*- +# # vi: set ft=ruby : + +# To skip Docker pre-load of calico/node and busybox, run vagrant up with: +# vagrant up --provision-with file,shell + +# The version of the calico docker images to install. This is used to pre-load +# the calico/node image which slows down the install process, but speeds up the tutorial. +# +# This version should match the version required by calicoctl installed in the +# cloud config files. +calico_node_ver = "{{site.data.versions[page.version].first.title}}" + +# Size of the cluster created by Vagrant +num_instances=2 + +# Change basename of the VM +instance_name_prefix="calico" + +# Official CoreOS channel from which updates should be downloaded +update_channel='stable' + +Vagrant.configure("2") do |config| + # always use Vagrants insecure key + config.ssh.insert_key = false + + config.vm.box = "coreos-%s" % update_channel + config.vm.box_version = ">= 1122.0.0" + config.vm.box_url = "http://%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant.json" % update_channel + + config.vm.provider :virtualbox do |v| + # On VirtualBox, we don't have guest additions or a functional vboxsf + # in CoreOS Container Linux, so tell Vagrant that so it can be smarter. + v.check_guest_additions = false + v.functional_vboxsf = false + end + + # plugin conflict + if Vagrant.has_plugin?("vagrant-vbguest") then + config.vbguest.auto_update = false + end + + # Set up each box + (1..num_instances).each do |i| + vm_name = "%s-%02d" % [instance_name_prefix, i] + config.vm.define vm_name do |host| + host.vm.hostname = vm_name + + ip = "172.17.8.#{i+100}" + host.vm.network :private_network, ip: ip + # Workaround VirtualBox issue where eth1 has 2 IP Addresses at startup + host.vm.provision :shell, :inline => "sudo /usr/bin/ip addr flush dev eth1" + host.vm.provision :shell, :inline => "sudo /usr/bin/ip addr add #{ip}/24 dev eth1" + + # Pre-load the calico/node image. This slows down the vagrant up + # command, but speeds up the actual tutorial. + host.vm.provision :docker, images: ["quay.io/calico/node:#{calico_node_ver}", "busybox:latest"] + + # Use a different cloud-init on the first server. + if i == 1 + host.vm.provision :file, :source => "user-data-first", :destination => "/tmp/vagrantfile-user-data" + host.vm.provision :shell, :inline => "mv /tmp/vagrantfile-user-data /var/lib/coreos-vagrant/", :privileged => true + else + host.vm.provision :file, :source => "user-data-others", :destination => "/tmp/vagrantfile-user-data" + host.vm.provision :shell, :inline => "mv /tmp/vagrantfile-user-data /var/lib/coreos-vagrant/", :privileged => true + end + + config.vm.post_up_message = "Vagrant has finished but cloud-init might still be executing. + Check the progress using systemctl status -f" + end + end +end diff --git a/v2.4/getting-started/docker/installation/vagrant-coreos/index.md b/v2.4/getting-started/docker/installation/vagrant-coreos/index.md new file mode 100644 index 00000000000..109134dff45 --- /dev/null +++ b/v2.4/getting-started/docker/installation/vagrant-coreos/index.md @@ -0,0 +1,92 @@ +--- +title: Running the Calico tutorials on CoreOS Container Linux using Vagrant and VirtualBox +--- + +These instructions allow you to set up a CoreOS Container Linux cluster ready to network Docker containers with +Calico networking using Vagrant. + +## 1. Streamlined setup of the VMs + +### 1.1 Install dependencies + +* [VirtualBox][virtualbox] 5.1.8 or greater. +* [Vagrant][vagrant] 1.8.5 or greater. +* [Curl][curl] + +### 1.2 Download the source files + + mkdir demo; cd demo + curl -O {{site.url}}{{page.dir}}Vagrantfile + curl -O {{site.url}}{{page.dir }}user-data-first + curl -O {{site.url}}{{page.dir }}user-data-others + +### 1.4 Startup and SSH + +Run the following: + + vagrant up + +To connect to your servers + +* Linux/Mac OS X + * run `vagrant ssh ` +* Windows + * Follow instructions from https://github.com/nickryand/vagrant-multi-putty + * run `vagrant putty ` + +### 1.5 Verify environment + +You should now have two CoreOS Container Linux servers, each running etcd in a cluster. The servers are named calico-01 and calico-02 +and IP addresses 172.17.8.101 and 172.17.8.102. + +At this point, it's worth checking that your servers can ping each other. + +From calico-01 + + ping 172.17.8.102 + +From calico-02 + + ping 172.17.8.101 + +If you see ping failures, the likely culprit is a problem with the VirtualBox network between the VMs. You should +check that each host is connected to the same virtual network adapter in VirtualBox and rebooting the host may also +help. Remember to shut down the VMs with `vagrant halt` before you reboot. + +You should also verify each host can access etcd. The following will return an error if etcd is not available. + + curl -L http://172.17.8.101:2379/version + +And finally check that Docker is running on both hosts by running + + docker ps + +## 2. Install Calico + +With your VMs running, and connectivity between them established, +it is time to launch `calico/node`. + +The Vagrant machines already have `calicoctl` installed. Use it to launch `calico/node`: + + sudo ETCD_ENDPOINTS=http://172.17.8.101:2379 calicoctl node run --node-image=quay.io/calico/node:{{site.data.versions[page.version].first.title}} + +This will start the `calico/node` container on this host. Check it is running: + + docker ps + +You should see output like this on each node + + vagrant@calico-01:~$ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 408bd2b9ba53 quay.io/calico/node:{{site.data.versions[page.version].first.title}} "start_runit" About an hour ago Up About an hour calico-node + +## Next Steps + +Now that you have a basic two node CoreOS Container Linux cluster setup, see +[Security using Calico Profiles]({{site.baseurl}}/{{page.version}}/getting-started/docker/tutorials/security-using-calico-profiles) +for a simple security walkthrough. + +[virtualbox]: https://www.virtualbox.org/ +[vagrant]: https://www.vagrantup.com/downloads.html +[using-coreos]: http://coreos.com/using-coreos/ +[curl]: https://curl.haxx.se/ diff --git a/v2.4/getting-started/docker/installation/vagrant-coreos/user-data-first b/v2.4/getting-started/docker/installation/vagrant-coreos/user-data-first new file mode 100644 index 00000000000..ab67261c12c --- /dev/null +++ b/v2.4/getting-started/docker/installation/vagrant-coreos/user-data-first @@ -0,0 +1,83 @@ +--- +layout: null +--- +#cloud-config + +coreos: + update: + reboot-strategy: 'off' + etcd2: + advertise-client-urls: http://$private_ipv4:2379 + listen-client-urls: http://0.0.0.0:2379 + fleet: + public-ip: $public_ipv4 + etcd_servers: http://$private_ipv4:2379 + units: + - name: etcd2.service + command: start + - name: fleet.service + command: start + - name: download-reqs.service + command: start + content: |- + [Unit] + Description=Download and unpack the prereqs + Wants=network-online.target + After=network-online.target + + [Service] + RemainAfterExit=yes + Type=oneshot + ExecStart=/home/core/add_path.sh + ExecStart=/home/core/get_calicoctl.sh + - name: docker.service + command: restart + content: |- + [Unit] + Description=Docker Application Container Engine + Documentation=http://docs.docker.com + After=containerd.service docker.socket network.target download-reqs.service + Requires=containerd.service docker.socket download-reqs.service + + [Service] + Type=notify + + # the default is not to use systemd for cgroups because the delegate issues still + # exists and systemd currently does not support the cgroup feature set required + # for containers run by docker + ExecStart=/usr/lib/coreos/dockerd --cluster-store=etcd://172.17.8.101:2379 --host=fd:// --containerd=/var/run/docker/libcontainerd/docker-containerd.sock $DOCKER_OPTS $DOCKER_CGROUPS $DOCKER_OPT_BIP $DOCKER_OPT_MTU $DOCKER_OPT_IPMASQ + ExecReload=/bin/kill -s HUP $MAINPID + LimitNOFILE=1048576 + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNPROC=infinity + LimitCORE=infinity + # Uncomment TasksMax if your systemd version supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + TimeoutStartSec=0 + # set delegate yes so that systemd does not reset the cgroups of docker containers + Delegate=yes + + [Install] + WantedBy=multi-user.target +write_files: +- path: /home/core/add_path.sh + permissions: 777 + owner: root + content: | + #!/usr/bin/bash -e + # Add /opt/bin to the _front_ of the PATH. + # Can't directly write to .profile since it's a symlink to a RO filesystem + mkdir -p /opt/bin + rm /home/core/.bashrc + echo 'PATH=$PATH:/opt/bin' > /home/core/.bashrc + echo 'export ETCD_ENDPOINTS="http://$private_ipv4:2379"' >> /home/core/.bashrc + echo 'Defaults env_keep +="ETCD_ENDPOINTS"' >>/etc/sudoers.d/etcd +- path: /home/core/get_calicoctl.sh + permissions: 777 + owner: root + content: | + #!/usr/bin/bash -e + wget -O /opt/bin/calicoctl {{site.data.versions[page.version].first.components.calicoctl.download_url}} + chmod +x /opt/bin/calicoctl diff --git a/v2.4/getting-started/docker/installation/vagrant-coreos/user-data-others b/v2.4/getting-started/docker/installation/vagrant-coreos/user-data-others new file mode 100644 index 00000000000..b8e182ca81f --- /dev/null +++ b/v2.4/getting-started/docker/installation/vagrant-coreos/user-data-others @@ -0,0 +1,78 @@ +--- +layout: null +--- +#cloud-config + +coreos: + update: + reboot-strategy: 'off' + fleet: + public-ip: $public_ipv4 + etcd_servers: http://172.17.8.101:2379 + units: + - name: fleet.service + command: start + - name: download-reqs.service + command: start + content: |- + [Unit] + Description=Download and unpack the prereqs + Wants=network-online.target + After=network-online.target + + [Service] + RemainAfterExit=yes + Type=oneshot + ExecStart=/home/core/add_path.sh + ExecStart=/home/core/get_calicoctl.sh + - name: docker.service + command: restart + content: |- + [Unit] + Description=Docker Application Container Engine + Documentation=http://docs.docker.com + After=containerd.service docker.socket network.target download-reqs.service + Requires=containerd.service docker.socket download-reqs.service + + [Service] + Type=notify + + # the default is not to use systemd for cgroups because the delegate issues still + # exists and systemd currently does not support the cgroup feature set required + # for containers run by docker + ExecStart=/usr/lib/coreos/dockerd --cluster-store=etcd://172.17.8.101:2379 --host=fd:// --containerd=/var/run/docker/libcontainerd/docker-containerd.sock $DOCKER_OPTS $DOCKER_CGROUPS $DOCKER_OPT_BIP $DOCKER_OPT_MTU $DOCKER_OPT_IPMASQ + ExecReload=/bin/kill -s HUP $MAINPID + LimitNOFILE=1048576 + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNPROC=infinity + LimitCORE=infinity + # Uncomment TasksMax if your systemd version supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + TimeoutStartSec=0 + # set delegate yes so that systemd does not reset the cgroups of docker containers + Delegate=yes + + [Install] + WantedBy=multi-user.target +write_files: +- path: /home/core/add_path.sh + permissions: 777 + owner: root + content: | + #!/usr/bin/bash -e + # Add /opt/bin to the _front_ of the PATH. + # Can't directly write to .profile since it's a symlink to a RO filesystem + mkdir -p /opt/bin + rm /home/core/.bashrc + echo 'PATH=$PATH:/opt/bin' > /home/core/.bashrc + echo 'export ETCD_ENDPOINTS="http://172.17.8.101:2379"' >> /home/core/.bashrc + echo 'Defaults env_keep +="ETCD_ENDPOINTS"' >>/etc/sudoers.d/etcd +- path: /home/core/get_calicoctl.sh + permissions: 777 + owner: root + content: | + #!/usr/bin/bash -e + wget -O /opt/bin/calicoctl {{site.data.versions[page.version].first.components.calicoctl.download_url}} + chmod +x /opt/bin/calicoctl diff --git a/v2.4/getting-started/docker/installation/vagrant-ubuntu/Vagrantfile b/v2.4/getting-started/docker/installation/vagrant-ubuntu/Vagrantfile new file mode 100644 index 00000000000..cba98c16ab3 --- /dev/null +++ b/v2.4/getting-started/docker/installation/vagrant-ubuntu/Vagrantfile @@ -0,0 +1,89 @@ +--- +layout: null +--- +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# The calicoctl download URL. +calicoctl_url = "{{site.data.versions[page.version].first.components.calicoctl.download_url}}" + +# The version of the calico docker images to install. This is used to pre-load +# the calico/node image which slows down the install process, but speeds up the tutorial. +# +# This version should match the version required by calicoctl installed from +# calicoctl_url. +calico_node_ver = "{{site.data.versions[page.version].first.title}}" + +# Size of the cluster created by Vagrant +num_instances=2 + +# Change basename of the VM +instance_name_prefix="calico" + +# The IP address of the first server +primary_ip = "172.17.8.101" + +Vagrant.configure(2) do |config| + # always use Vagrants insecure key + config.ssh.insert_key = true + config.ssh.username = "vagrant" + + # Use Bento Ubuntu 16.04 box (officially-recommended box by Vagrant) + # https://www.vagrantup.com/boxes.html + config.vm.box = "bento/ubuntu-16.04" + + # Workaround 16.04 issue with Virtualbox where Box waits 5 minutes to start + # if network "cable" is not connected: https://github.com/chef/bento/issues/682 + config.vm.provider "virtualbox" do |vb| + vb.customize ["modifyvm", :id, "--cableconnected1", "on"] + end + + # Set up each box + (1..num_instances).each do |i| + vm_name = "%s-%02d" % [instance_name_prefix, i] + config.vm.define vm_name do |host| + host.vm.hostname = vm_name + + ip = "172.17.8.#{i+100}" + host.vm.network :private_network, ip: ip + + # Fix stdin: is not a tty error (http://foo-o-rama.com/vagrant--stdin-is-not-a-tty--fix.html) + config.vm.provision "fix-no-tty", type: "shell" do |s| + s.privileged = false + s.inline = "sudo sed -i '/tty/!s/mesg n/tty -s \\&\\& mesg n/' /root/.profile" + end + + # The docker provisioner installs docker. + host.vm.provision :docker, images: [ + "busybox:latest", + "quay.io/calico/node:#{calico_node_ver}" + ] + + # Calico uses etcd for calico and docker clustering. Install it on the first host only. + if i == 1 + # Download etcd and start. + host.vm.provision :shell, inline: <<-SHELL + # sudo apt-get install -y unzip + curl -L --silent https://github.com/coreos/etcd/releases/download/v2.2.0/etcd-v2.2.0-linux-amd64.tar.gz -o etcd-v2.2.0-linux-amd64.tar.gz + tar xzvf etcd-v2.2.0-linux-amd64.tar.gz + nohup etcd-v2.2.0-linux-amd64/etcd --addr=#{primary_ip}:2379 > etcd.log & + SHELL + end + + # Set Docker to use etcd for multihost, then reload systemctl and restart Docker. + host.vm.provision :shell, inline: "mkdir -p /etc/systemd/system/docker.service.d/" + host.vm.provision :shell, inline: %Q|sudo sh -c 'printf "[Service]\nExecStart=\nExecStart=/usr/bin/dockerd -H fd:// --cluster-store=etcd://#{primary_ip}:2379" > /etc/systemd/system/docker.service.d/10-execstart.conf'| + host.vm.provision :shell, inline: "systemctl daemon-reload" + host.vm.provision :shell, inline: "systemctl restart docker.service" + + # download calicoctl. + host.vm.provision :shell, inline: "curl -L --silent #{calicoctl_url} -o /usr/local/bin/calicoctl" + + host.vm.provision :shell, inline: "chmod +x /usr/local/bin/calicoctl" + + # Ensure the vagrant and root users get the ETCD_ENDPOINTS environment. + host.vm.provision :shell, inline: %Q|echo 'export ETCD_ENDPOINTS="http://#{primary_ip}:2379"' >> /home/vagrant/.profile| + host.vm.provision :shell, inline: %Q|sudo sh -c 'echo "Defaults env_keep +=\"ETCD_ENDPOINTS\"" >>/etc/sudoers'| + end + end +end diff --git a/v2.4/getting-started/docker/installation/vagrant-ubuntu/index.md b/v2.4/getting-started/docker/installation/vagrant-ubuntu/index.md new file mode 100644 index 00000000000..35278730c7b --- /dev/null +++ b/v2.4/getting-started/docker/installation/vagrant-ubuntu/index.md @@ -0,0 +1,89 @@ +--- +title: Running the Calico tutorials on Ubuntu using Vagrant and VirtualBox +--- + +These instructions allow you to set up an Ubuntu cluster ready to network Docker containers with +Calico using Vagrant. + +## 1. Streamlined setup of the VMs + +### 1.1 Install dependencies + +* [VirtualBox][virtualbox] 5.1.8 or greater. +* [Vagrant][vagrant] 1.8.5 or greater. +* [Curl][curl] + +### 1.2 Download the source files + + mkdir demo; cd demo + curl -O {{site.url}}{{page.dir}}Vagrantfile + +### 1.3 Startup and SSH + +For Calico as a Docker network plugin + + vagrant up + +To connect to your servers + +* Linux/Mac OS X + * run `vagrant ssh ` +* Windows + * Follow instructions from [https://github.com/nickryand/vagrant-multi-putty](https://github.com/nickryand/vagrant-multi-putty) + * run `vagrant putty ` + +### 1.4 Verify environment + +You should now have two Ubuntu servers, with Etcd running on the first server. + +At this point, it's worth checking that your servers can ping each other. + +From calico-1 + + ping 172.17.8.102 + +From calico-2 + + ping 172.17.8.101 + +If you see ping failures, the likely culprit is a problem with the VirtualBox network between the VMs. You should +check that each host is connected to the same virtual network adapter in VirtualBox and rebooting the host may also +help. Remember to shut down the VMs with `vagrant halt` before you reboot. + +You should also verify each host can access etcd. The following will return an error if etcd is not available. + + curl -L http://172.17.8.101:2379/version + +And finally check that Docker is running on both hosts by running + + docker ps + +## 2. Install Calico + +With your VMs running, and connectivity between them established, +it is time to launch `calico/node`. + +The Vagrant machines already have `calicoctl` installed. Use it to launch `calico/node`: + + sudo ETCD_ENDPOINTS=http://172.17.8.101:2379 calicoctl node run --node-image=quay.io/calico/node:{{site.data.versions[page.version].first.title}} + +This will start the `calico/node` container on this host. Check it is running: + + docker ps + +You should see output like this on each node + + vagrant@calico-01:~$ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 408bd2b9ba53 quay.io/calico/node:{{site.data.versions[page.version].first.title}} "start_runit" About an hour ago Up About an hour calico-node + +## Next Steps + +Now that you have a basic two node Ubuntu cluster setup, see +[Security using Calico Profiles]({{site.baseurl}}/{{page.version}}/getting-started/docker/tutorials/security-using-calico-profiles) + +[libnetwork]: https://github.com/docker/libnetwork +[experimental-channel]: https://github.com/docker/docker/tree/master/experimental +[virtualbox]: https://www.virtualbox.org/ +[vagrant]: https://www.vagrantup.com/downloads.html +[curl]: https://curl.haxx.se/ diff --git a/v2.4/getting-started/docker/troubleshooting.md b/v2.4/getting-started/docker/troubleshooting.md new file mode 100644 index 00000000000..9a7a4a1690e --- /dev/null +++ b/v2.4/getting-started/docker/troubleshooting.md @@ -0,0 +1,4 @@ +--- +title: Troubleshooting Calico for Docker +--- +Information coming soon! diff --git a/v2.4/getting-started/docker/tutorials/ipam.md b/v2.4/getting-started/docker/tutorials/ipam.md new file mode 100644 index 00000000000..a2405457355 --- /dev/null +++ b/v2.4/getting-started/docker/tutorials/ipam.md @@ -0,0 +1,44 @@ +--- +title: IPAM +--- + +With the release of Docker 1.10, support has been added to allow users to +select a specific IP address when creating a container. In order to use +this feature, Docker requires that you specify the `--subnet` parameter when running +`docker network create`. + +Calico requires that the passed `--subnet` value be the same CIDR as an existing +Calico IP pool. + +## Example + +#### 1. Create a Calico IP pool + +``` +cat << EOF | calicoctl create -f - +- apiVersion: v1 + kind: ipPool + metadata: + cidr: 192.0.2.0/24 +EOF +``` + +#### 2. Create a Docker network using the IP pool + +``` +docker network create --driver calico --ipam-driver calico-ipam --subnet=192.0.2.0/24 my_net +``` + +>Notice that our `--subnet` value is identical to our `cidr` above. + +#### 3. Create a container using a specific IP address from the pool + +``` +docker run --net my_net --name my_workload --ip 192.0.2.100 -tid busybox +``` + +#### 4. Verify that the IP address was assigned to the container + +``` +docker inspect -f {%raw%}'{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}'{%endraw%} my_workload +``` diff --git a/v2.4/getting-started/docker/tutorials/security-using-calico-profiles-and-policy.md b/v2.4/getting-started/docker/tutorials/security-using-calico-profiles-and-policy.md new file mode 100644 index 00000000000..a2132780de7 --- /dev/null +++ b/v2.4/getting-started/docker/tutorials/security-using-calico-profiles-and-policy.md @@ -0,0 +1,325 @@ +--- +title: Security using Calico Profiles and Policy +--- + +## Background + +With Calico as a Docker network plugin, Calico uses an identically named +[profile]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/profile) +to represent each Docker network. This profile is applied to each container +in that network and the profile is used by Calico to configure access policy +for that container. The Calico network plugin will automatically create the associated profile if it +does not exist when the container is attached to the network. By default, the profile contains rules that allow full +egress traffic but allow ingress traffic only from containers within the same +network and no other source. Custom policy for a network can be configured by +creating in advance, or editing, the profile associated with the Docker network. + +There are two ways in which the policy that defines the Docker network can be modified: + +1. Modify the profile policy rules. This policy is applied directly to each container + in the associated Docker network. This approach is simple, but not very flexible, + as the profile must describe the full set of rules that apply to the containers in + the network. + +2. Assign labels to the profile, and define global selector based policy. The + (Calico-specific) labels are assigned to containers in the associated Docker network. + The globally defined policy uses selectors to determine which subset of the policy + is applied to each container based on their labels. This approach provides a powerful + way to group together all of your network Policy, makes it easy to reuse policy in + different networks, and makes it easier to define policy that extends across + different orchestration systems that use Calico. + +## Managing Calico policy for a network + +This section provides a worked examples applying policy using the two approaches +described above. + +In both cases we create a Calico-Docker network and use the `calicoctl` tool to +achieve the required isolation. + +For the worked examples let's assume that we want to provide the following +isolation between a set of database containers and a set of frontend containers: + +- Frontend containers can only access the Database containers over TCP to port 3306. + For now we'll assume no other connectivity is allowed to/from the frontend. +- Database containers have no isolation between themselves (to handle synchronization + within a cluster). This could be improved by locking down the port ranges and + protocols, but for brevity we'll just allow full access between database + containers. + +### a) Policy applied directly by the profile + +In this example we apply the policy for containers in both networks just using +profiles. Each network has associated an identically named profile that consists +of a set of labels and policy rules. We set the labels and policy rules for each +of the two network profiles to provide the required isolation. + +#### a.1 Create the Docker networks + +On any host in your Calico / Docker network, run the following commands: + +``` +docker network create --driver calico --ipam-driver calico-ipam database +docker network create --driver calico --ipam-driver calico-ipam frontend +``` + +#### a.2 Create the profiles + +Create the profiles for each of these networks. + +We set labels on each profile indicating the network role, in our case frontend +or database. Each profile also includes a set of ingress and egress rules and +actions, where each rule can filter packets based on a variety of source or +destination attributes (which includes selector based filtering using label +selection). The labels and rules are applied directly to each container in the +corresponding network. + +The labels themselves are arbitrary key/value pairs, and we have decided here to +use the key `role` indicating the network role and a value of either `frontend` +or `database`. + +Use `calicoctl apply` to create or update the profiles: + +``` +cat << EOF | calicoctl apply -f - +- apiVersion: v1 + kind: profile + metadata: + name: database + labels: + role: database + spec: + ingress: + - action: allow + protocol: tcp + source: + selector: role == 'frontend' + destination: + ports: + - 3306 + - action: allow + source: + selector: role == 'database' + egress: + - action: allow + destination: + selector: role == 'database' +- apiVersion: v1 + kind: profile + metadata: + name: frontend + labels: + role: frontend + spec: + egress: + - action: allow + protocol: tcp + destination: + selector: role == 'database' + ports: + - 3306 +EOF +``` + +The above profiles provide the required isolation between the frontend and database +containers. This works as follows: + +- Containers in the "database" Docker network are assigned the "database" + Calico profile. +- Containers in the "frontend" Docker network are assigned the "frontend" + Calico profile. +- Each container in the "database" network inherits the label `role = database` + from its profile. +- Each container in the "frontend" network inherits the label `role = frontend` + from its profile. +- The "database" profile applies ingress and egress policy: + - An ingress rule to allow TCP traffic to port 3306 from endpoints that have + the label `role = frontend` (i.e. from frontend containers since they are + the only ones with the label `role = frontend`) + - An ingress and egress rule to allow all traffic from and to endpoints that + have the label `role = database` (i.e. from database containers). +- The "frontend" profile applies a single egress rule to allow all TCP traffic + to port 3306 on endpoints that have the label `role = database` (i.e. to + database containers) + +For details on all of the possible match criteria, see the +[profile resource]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/profile) +documentation. + +### b) Global policy applied through label selection + +The same example can be demonstrated using global selector-based policy. +In this case we use the network profiles to apply labels (as in the previous +example), but define a set of global policy resources that use selectors to +determine which subset of the policy applies to each container based on the +labels applied by the profile. + +> The advantage of using this approach is that by sharing the same labels +> across different Docker networks, we can re-use globally defined policy without +> having to re-specify it. + +#### b.1 Create the Docker networks + +On any host in your Calico / Docker network, run the following commands: + +``` +docker network create --driver calico --ipam-driver calico-ipam database +docker network create --driver calico --ipam-driver calico-ipam frontend +``` + +#### b.2 Create the profiles + +Create the profiles for each of these networks. + +We set labels on each profile indicating the network role, in our case frontend +or database. The labels are applied directly to each container in the +corresponding network. + +As with the previous example we have decided to use the key `role` indicating +the network role and a value of either `frontend` or `database`. Unlike the +previous, we do not define any policy rules within the profile. + +Use `calicoctl apply` to create or update the profiles: + +``` +cat << EOF | calicoctl apply -f - +- apiVersion: v1 + kind: profile + metadata: + name: database + labels: + role: database +- apiVersion: v1 + kind: profile + metadata: + name: frontend + labels: + role: frontend +EOF +``` + +#### b.3 Create policy + +Create the global policy to provide the required network isolation. + +Policy resources are defined globally, and like profile includes a set of ingress +and egress rules and actions, where each rule can filter packets based on a variety +of source or destination attributes (which includes selector based filtering using label +selection). + +Each policy resource also has a "main" selector that is used to determine which +endpoints the policy is applied to based on the labels applied by the network +profiles. + +We can use `calicoctl create` to create two new policies for this: + +``` +cat << EOF | calicoctl create -f - +- apiVersion: v1 + kind: policy + metadata: + name: database + spec: + order: 0 + selector: role == 'database' + ingress: + - action: allow + protocol: tcp + source: + selector: role == 'frontend' + destination: + ports: + - 3306 + - action: allow + source: + selector: role == 'database' + egress: + - action: allow + destination: + selector: role == 'database' +- apiVersion: v1 + kind: policy + metadata: + name: frontend + spec: + order: 0 + selector: role == 'frontend' + egress: + - action: allow + protocol: tcp + destination: + selector: role == 'database' + ports: + - 3306 +EOF +``` + +The above policies provide the same isolation as the previous example. +This works as follows: + +- Containers in the "database" Docker network are assigned the "database" + Calico profile. +- Containers in the "frontend" Docker network are assigned the "frontend" + Calico profile. +- Each container in the "database" network inherits the label `role = database` + from its profile. +- Each container in the "frontend" network inherits the label `role = frontend` + from its profile. +- The global policy resource "database" uses the selector `role == database` to + select containers with label `role = database` and applies ingress and egress + policy: + - An ingress rule to allow TCP traffic to port 3306 from endpoints that have + the label `role = frontend` (i.e. from frontend containers since they are + the only ones with the label `role = frontend`) + - An ingress and egress rule to allow all traffic from and to endpoints that + have the label `role = database` (i.e. from database containers). +- The global policy resource "frontend" uses the selector `role == frontend` to + select containers with label `role = frontend` and applies a single egress + rule to allow all TCP traffic to port 3306 on endpoints that have the label + `role = database` (i.e. to database containers) + +For details on all of the possible match criteria, see the +[policy resource]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/profile) +documentation. + +## Multiple networks + +Whilst the Docker API supports the ability to attach a container to multiple +networks, it is not possible to use this feature of Docker when using the Calico. + +However, using the selector-based approach for defining network policy it is +possible to achieve the same effect of overlapping networks but with a far +richer policy set. + +Extending the previous example, suppose we introduce another network that is +used for system backups and that we want some of our database containers to be +on both the database network and the backup network (so that they are able to +back up the database). + +One approach for doing this is as follows: + +- Define a new label, say `backup = true` to indicate that a particular + endpoint should be allowed access to the backup network. +- Define global policy "backupnetwork" that allows full access between all + components with the `backup = true` label. +- Create a Docker network "backups" for backups and update the associated + profile to assign the `backup = true` label +- Create a Docker network "database-backup" for database _and_ backup access, + and update the associated profile to assign both the `backup = true` and + `role = database` labels. + +For your database containers that also need to be on the backup network, use the +"database-backup" network. Since containers in this network will have the two +labels assigned to it, they will pick up policy that selects both labels - in +other words they will have the locked down database access plus access to the +backup network. + +Obviously, the example of allowing full access between everything on the "backup" +network is probably a little too permissive, so you can lock down the access within +the backup network by modifying the global policy selected by the `backup = true` +label. + +## Further Reading + +For details on configuring advanced policy using container labels, see +[Security using Docker Labels and Calico Policy]({{site.baseurl}}/{{page.version}}/getting-started/docker/tutorials/security-using-docker-labels-and-calico-policy). diff --git a/v2.4/getting-started/docker/tutorials/security-using-calico-profiles.md b/v2.4/getting-started/docker/tutorials/security-using-calico-profiles.md new file mode 100644 index 00000000000..399ad2d2b03 --- /dev/null +++ b/v2.4/getting-started/docker/tutorials/security-using-calico-profiles.md @@ -0,0 +1,106 @@ +--- +title: Security using Calico Profiles +--- + +## Background + +With Calico as a Docker network plugin, Calico uses an identically named +[profile]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/profile) +to represent each Docker network. This profile is applied to each container +in that network and the profile is used by Calico to configure access policy +for that container. The Calico network plugin will automatically create the +associated profile if it does not exist when the container is attached to the +network. By default, the profile contains rules that allow full egress traffic +but allow ingress traffic only from containers within the same network and no +other source. Custom policy for a network can be configured by creating in +advance, or editing, the profile associated with the Docker network. + +## Tutorial + +To run through the worked example in this tutorial you will need to set up two hosts +with calico installed. + +Follow the +[Manual setup]({{site.baseurl}}/{{page.version}}/getting-started/docker/installation/manual) +if you don't already have a cluster prepared. + +Or alternatively, use one of the two quickstart clusters: + +- [Vagrant install with Container Linux by CoreOS]({{site.baseurl}}/{{page.version}}/getting-started/docker/installation/vagrant-coreos/) +- [Vagrant install with Ubuntu]({{site.baseurl}}/{{page.version}}/getting-started/docker/installation/vagrant-ubuntu) + +### 1. Create the network + +To create the networks, run the following commands on one of the hosts: + + docker network create --driver calico --ipam-driver calico-ipam net1 + docker network create --driver calico --ipam-driver calico-ipam net2 + docker network create --driver calico --ipam-driver calico-ipam net3 + +>Note: To allocate from a specific Calico IP Pool, the `--subnet a.b.c.d/xx` command can be passed to `docker network create`. For more details see below. + +## 2. Create the workloads in the networks + +With the networks created, let's start some containers on each host spread +between these networks. + +On calico-01 + + docker run --net net1 --name workload-A -tid busybox + docker run --net net2 --name workload-B -tid busybox + docker run --net net1 --name workload-C -tid busybox + +On calico-02 + + docker run --net net3 --name workload-D -tid busybox + docker run --net net1 --name workload-E -tid busybox + +By default, networks are configured so that their members can communicate with +one another, but workloads in other networks cannot reach them. A, C and E are +all in the same network so should be able to ping each other. B and D are in +their own networks so shouldn't be able to ping anyone else. + +## 3. Check Workload Connectivity + +On calico-01 check that A can ping C and E. We can ping workloads within a +containers networks by name. + + docker exec workload-A ping -c 4 workload-C.net1 + docker exec workload-A ping -c 4 workload-E.net1 + +Also check that A cannot ping B or D. This is slightly trickier because the +hostnames for different networks will not be added to the host configuration of +the container - so we need to determine the IP addresses assigned to containers +B and D. + +Since A and B are on the same host, we can run a single command that inspects +the IP address and issues the ping from A to B. These pings will fail. On calico-01, run: + + docker exec workload-A ping -c 2 `docker inspect --format "{% raw %}{{ .NetworkSettings.Networks.net2.IPAddress }}{% endraw %}" workload-B` + + +To test connectivity between A and D which are on different hosts, it is +necessary to run the `docker inspect` command on the host for D (calico-02) +and then run the ping command on the host for A (calico-01). + +On calico-02: + + docker inspect --format "{% raw %}{{ .NetworkSettings.Networks.net3.IPAddress }}{% endraw %}" workload-D + +This returns the IP address of workload-D. + +On calico-01: + + docker exec workload-A ping -c 2 + +replacing the `<...>` with the appropriate IP address of D. These pings will +fail. + +To see the list of networks, use: + + docker network ls + +## Further Reading + +For details on configuring more advanced policy, see +[Security using Calico Profiles and Policy]({{site.baseurl}}/{{page.version}}/getting-started/docker/tutorials/security-using-calico-profiles-and-policy). diff --git a/v2.4/getting-started/docker/tutorials/security-using-docker-labels-and-calico-policy.md b/v2.4/getting-started/docker/tutorials/security-using-docker-labels-and-calico-policy.md new file mode 100644 index 00000000000..5586765ca4d --- /dev/null +++ b/v2.4/getting-started/docker/tutorials/security-using-docker-labels-and-calico-policy.md @@ -0,0 +1,187 @@ +--- +title: Security using Docker Labels and Calico Policy +--- + +## Background + +With Calico as a Docker network plugin, Calico can be configured to extract the +labels on a container and apply them to the workload endpoint for use with Calico +[policy]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/policy). +By default, Calico blocks all traffic unless it has been explicitly allowed +through configuration of the globally +defined policy which uses selectors to determine which subset of the policy is +applied to each container based on their labels. This approach provides a +powerful way to group together all of your network Policy, makes it easy to +reuse policy in different networks, and makes it easier to define policy that +extends across different orchestration systems that use Calico. + +When Calico is configured to use container labels, profiles are not created and +have no impact on any container traffic. + +## Enabling Docker Networking Container Labels Policy + +To enable labels to be used in Policy selectors the flag +`--use-docker-networking-container-labels` must be passed when starting +calico/node with the `calicoctl node run` command. All calico/node instances +should be started with the flag to avoid a mix of labels and profiles. + +## Managing Calico policy for a network + +This section provides an example applying policy using the approach described +above once container labels are enabled. + +We create a Calico-Docker network and use the `calicoctl` tool to set policies +that achieve the required isolation and allowances. + +For the example let's assume that we want to provide the following isolation +between a set of database containers and a set of frontend containers: + +- Frontend containers can only access the Database containers over TCP to port 3306. + For now we'll assume no other connectivity is allowed to/from the frontend. +- Database containers have no isolation between themselves (to handle synchronization + within a cluster). This could be improved by locking down the port ranges and + protocols, but for brevity we'll just allow full access between database + containers. + +### Global policy applied through label selection + +This example demonstrates using global selector-based policy with labels +extracted from the Docker containers. + +#### 1. Create the Docker network + +On any host in your Calico / Docker network, run the following command: + +``` +docker network create --driver calico --ipam-driver calico-ipam net1 +``` + +#### 2. Create the Labeled Workloads + +We set labels on each container indicating the role, in our case frontend +or database. The labels are applied directly to each container and must be +prefixed with `org.projectcalico.label.` for them to be extracted and applied +to the workload endpoint. + +We have decided to use the label `role` indicating the role and a value of +either `frontend` or `database`. + +Create the workloads as docker containers with appropriate labels. + +``` +docker run --label org.projectcalico.label.role=frontend --net net1 --name frontend-A -tid busybox +docker run --label org.projectcalico.label.role=database --net net1 --name database-A -tid busybox +``` + +#### 3. Create policy + +Create the global policy to provide the required network isolation. + +Policy resources are defined globally, and include a set of ingress and egress +rules and actions, where each rule can filter packets based on a variety +of source or destination attributes (which includes selector based filtering +using label selection). + +Each policy resource also has a "main" selector that is used to determine which +endpoints the policy is applied to based on the applied labels. + +We can use `calicoctl create` to create two new policies for this: + +``` +cat << EOF | calicoctl create -f - +- apiVersion: v1 + kind: policy + metadata: + name: database + spec: + order: 0 + selector: role == 'database' + ingress: + - action: allow + protocol: tcp + source: + selector: role == 'frontend' + destination: + ports: + - 3306 + - action: allow + source: + selector: role == 'database' + egress: + - action: allow + destination: + selector: role == 'database' +- apiVersion: v1 + kind: policy + metadata: + name: frontend + spec: + order: 0 + selector: role == 'frontend' + egress: + - action: allow + protocol: tcp + destination: + selector: role == 'database' + ports: + - 3306 +EOF +``` + +This works as follows: + +- Each database container is given the label `role = database`. +- Each frontend container in given the label `role = frontend`. +- The global policy resource "database" uses the selector `role == database` to + select containers with label `role = database` and applies ingress and egress + policy: + - An ingress rule to allow TCP traffic to port 3306 from endpoints that have + the label `role = frontend` (i.e. from frontend containers since they are + the only ones with the label `role = frontend`) + - An ingress and egress rule to allow all traffic from and to endpoints that + have the label `role = database` (i.e. from database containers). +- The global policy resource "frontend" uses the selector `role == frontend` to + select containers with label `role = frontend` and applies a single egress + rule to allow all TCP traffic to port 3306 on endpoints that have the label + `role = database` (i.e. to database containers) + +For details on all of the possible match criteria, see the +[policy resource]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/policy) +documentation. + +## Multiple networks + +While some network providers tend to use multiple networks to enforce +isolation, Calico philosophy instead opts to put all containers in the same, +flat network, where they are separated by default, and then connect them using +Calico policy. For this reason, Calico does not support attaching a container +to multiple docker networks. + +Extending the previous example, suppose we introduce another label that is +used for system backups and that we want some of our database containers to +have the database label and a backup label (so they have database policy and +backup policy applied). + +One approach for doing this is as follows: + +- Define a new label, say `backup = true` to indicate that a particular + endpoint should be allowed access to the backup network. +- Define global policy "backupnetwork" that allows full access between all + components with the `backup = true` label. + +For your database containers that also need to be able to access the backup +endpoints, launch them assigning both the `role = database` and `backup = true` +labels. + +``` +docker run --label org.projectcalico.label.role=database --label org.projectcalico.label.backup=true --net net1 --name database-B -tid busybox +``` + +Since containers started like this will have the two labels assigned to them, +they will pick up policy that selects both labels - in other words they will +have the locked down database access plus access to the backup network. + +Obviously, the example of allowing full access between everything on the "backup" +network is probably a little too permissive, so you can lock down the access within +the backup network by modifying the global policy selected by the `backup = true` +label. diff --git a/v2.4/getting-started/docker/upgrade.md b/v2.4/getting-started/docker/upgrade.md new file mode 100644 index 00000000000..e46479ef49b --- /dev/null +++ b/v2.4/getting-started/docker/upgrade.md @@ -0,0 +1,4 @@ +--- +title: Upgrading Calico for Docker +--- +Information coming soon! diff --git a/v2.4/getting-started/index.md b/v2.4/getting-started/index.md new file mode 100644 index 00000000000..d16023d9afb --- /dev/null +++ b/v2.4/getting-started/index.md @@ -0,0 +1,19 @@ +--- +title: Calico Integrations +--- + +To get started using Calico, we recommend running through one or more of the +available tutorials linked below. + +These tutorials will help you understand the different environment options when +using Calico. In most cases we provide worked examples using manual setup on +your own servers, a quick set-up in a virtualized environment using Vagrant and +a number of cloud services. + +- [Calico with Kubernetes](kubernetes) +- [Calico with Mesos](mesos) + - [Calico with DC/OS](mesos/installation/dc-os) +- [Calico with Docker](docker) +- [Calico with OpenStack](openstack) +- [Calico with rkt](rkt) +- [Host protection](bare-metal/bare-metal) diff --git a/v2.4/getting-started/kubernetes/index.md b/v2.4/getting-started/kubernetes/index.md new file mode 100644 index 00000000000..d6d95488a72 --- /dev/null +++ b/v2.4/getting-started/kubernetes/index.md @@ -0,0 +1,41 @@ +--- +title: Calico for Kubernetes +--- + +Calico enables networking and network policy in Kubernetes clusters across the cloud. Calico works +everywhere - on all major public cloud providers and private cloud as well. + +Calico uses a pure IP networking fabric to provide high performance networking, and its battle-tested policy engine +enforces high-level, intent-focused network policy. Together, Calico and Kubernetes provide a secure, +cloud-native platform that can scale your infrastructure to hundreds of thousands of workloads. + +## Installing Calico for Kubernetes + +There are a number of ways to install Calico and Kubernetes. The [installation documentation](installation) +includes links to a number of popular guides and installers which use Calico. It also +includes information on installing Calico on a from-scratch Kubernetes cluster using either a self-hosted Kubernetes manifest, +or by integrating Calico into your own configuration management scripts. + +## Using Calico with Kubernetes + +Once you have a Kubernetes cluster with Calico installed, the following articles will help you +get familiar with Calico and make the most of the features that Calico provides. + +##### Tutorials + +**[Using the NetworkPolicy API](tutorials/simple-policy)**: this guide explains how to use Calico to secure a simple two-tier application +using the Kubernetes NetworkPolicy API. + +**[Advanced Calico Policy](tutorials/advanced-policy)**: this guide explains how to use Calico to provide policy features beyond +what can be done with the Kubernetes NetworkPolicy API like egress and CIDR based policy. + +**[Stars Demo](tutorials/stars-policy/)**: this demo features a UI which actively shows blocked and allowed connections as policy is implemented. + +##### Usage Reference + +**[Using the calicoctl CLI tool][calicoctl]**: reference documentation for the Calico CLI tool, calicoctl. + +**[Configuring BGP Peering][bgp-peering]**: this guide is for users on private cloud who want to configure Calico to peer with their underlying infrastructure. + +[calicoctl]: {{site.baseurl}}/{{page.version}}/reference/calicoctl/ +[bgp-peering]: {{site.baseurl}}/{{page.version}}/usage/configuration/bgp diff --git a/v2.4/getting-started/kubernetes/installation/aws.md b/v2.4/getting-started/kubernetes/installation/aws.md new file mode 100644 index 00000000000..c60295a0470 --- /dev/null +++ b/v2.4/getting-started/kubernetes/installation/aws.md @@ -0,0 +1,35 @@ +--- +title: Deploying Calico and Kubernetes on AWS +--- + +There are a number of solutions for deploying Calico and Kubernetes on AWS. We recommend taking +a look at the following solutions and guides which install Calico for networking and network policy on AWS. + +Make sure you've read the [Calico AWS reference guide][aws-reference] for details on how to configure Calico and AWS. + +#### Popular guides and tools + +**[Heptio AWS Quickstart][heptio]** uses kubeadm and CloudFormation to build Kubernetes clusters on AWS using Calico +for networking and network policy enforcement. + + +**[Kops][kops]** is a popular Kubernetes project for launching production-ready clusters on AWS, +as well as other public and private cloud environments. + + +**[kube-aws][kube-aws]** is a command-line tool by CoreOS to create, update, and destroy production-ready +container-linux based Kubernetes clusters on AWS. + +#### More installation options + +If the out-of-the-box solutions listed above don't meet your requirements, you can install Calico for Kubernetes +on AWS using one of our [self-hosted manifests][self-hosted], or by [integrating Calico with your own configuration management][integration-guide]. + +[heptio]: https://s3.amazonaws.com/quickstart-reference/heptio/latest/doc/heptio-kubernetes-on-the-aws-cloud.pdf +[kops]: https://github.com/kubernetes/kops/blob/master/docs/networking.md#calico-example-for-cni-and-network-policy +[kube-aws]: https://github.com/coreos/kube-aws/#getting-started + +[self-hosted]: hosted +[integration-guide]: integration + +[aws-reference]: {{site.baseurl}}/{{page.version}}/reference/public-cloud/aws diff --git a/v2.4/getting-started/kubernetes/installation/gce.md b/v2.4/getting-started/kubernetes/installation/gce.md new file mode 100644 index 00000000000..bed736b8b01 --- /dev/null +++ b/v2.4/getting-started/kubernetes/installation/gce.md @@ -0,0 +1,6 @@ +--- +title: Deploying Calico and Kubernetes on GCE +--- + +See [this page]({{site.baseurl}}/{{page.version}}/reference/public-cloud/gce) +for info on running Calico on GCE. diff --git a/v2.4/getting-started/kubernetes/installation/hosted/calico.yaml b/v2.4/getting-started/kubernetes/installation/hosted/calico.yaml new file mode 100644 index 00000000000..f4340f1767c --- /dev/null +++ b/v2.4/getting-started/kubernetes/installation/hosted/calico.yaml @@ -0,0 +1,317 @@ +--- +layout: null +--- +# Calico Version {{site.data.versions[page.version].first.title}} +# http://docs.projectcalico.org/{{page.version}}/releases#{{site.data.versions[page.version].first.title}} +# This manifest includes the following component versions: +# calico/node:{{site.data.versions[page.version].first.title}} +# calico/cni:{{site.data.versions[page.version].first.components["calico/cni"].version}} +# calico/kube-policy-controller:{{site.data.versions[page.version].first.components["calico/kube-policy-controller"].version}} + +# This ConfigMap is used to configure a self-hosted Calico installation. +kind: ConfigMap +apiVersion: v1 +metadata: + name: calico-config + namespace: kube-system +data: + # Configure this with the location of your etcd cluster. + etcd_endpoints: "http://127.0.0.1:2379" + + # Configure the Calico backend to use. + calico_backend: "bird" + + # The CNI network configuration to install on each node. + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.1.0", + "type": "calico", + "etcd_endpoints": "__ETCD_ENDPOINTS__", + "etcd_key_file": "__ETCD_KEY_FILE__", + "etcd_cert_file": "__ETCD_CERT_FILE__", + "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__", + "log_level": "info", + "mtu": 1500, + "ipam": { + "type": "calico-ipam" + }, + "policy": { + "type": "k8s", + "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", + "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + } + + # If you're using TLS enabled etcd uncomment the following. + # You must also populate the Secret below with these files. + etcd_ca: "" # "/calico-secrets/etcd-ca" + etcd_cert: "" # "/calico-secrets/etcd-cert" + etcd_key: "" # "/calico-secrets/etcd-key" + +--- + +# The following contains k8s Secrets for use with a TLS enabled etcd cluster. +# For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/ +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: calico-etcd-secrets + namespace: kube-system +data: + # Populate the following files with etcd TLS configuration if desired, but leave blank if + # not using TLS for etcd. + # This self-hosted install expects three files with the following names. The values + # should be base64 encoded strings of the entire contents of each file. + # etcd-key: null + # etcd-cert: null + # etcd-ca: null + +--- + +# This manifest installs the calico/node container, as well +# as the Calico CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: extensions/v1beta1 +metadata: + name: calico-node + namespace: kube-system + labels: + k8s-app: calico-node +spec: + selector: + matchLabels: + k8s-app: calico-node + template: + metadata: + labels: + k8s-app: calico-node + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + scheduler.alpha.kubernetes.io/tolerations: | + [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, + {"key":"CriticalAddonsOnly", "operator":"Exists"}] + spec: + hostNetwork: true + serviceAccountName: calico-node + containers: + # Runs calico/node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: quay.io/calico/node:{{site.data.versions[page.version].first.title}} + env: + # The location of the Calico etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # Choose the backend to use. + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "k8s" + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "ACCEPT" + # Configure the IP Pool from which Pod IPs will be chosen. + - name: CALICO_IPV4POOL_CIDR + value: "192.168.0.0/16" + - name: CALICO_IPV4POOL_IPIP + value: "always" + # Disable IPv6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + # Set Felix logging to "info" + - name: FELIX_LOGSEVERITYSCREEN + value: "info" + # Set MTU for tunnel device used if ipip is enabled + - name: FELIX_IPINIPMTU + value: "1440" + # Location of the CA certificate for etcd. + - name: ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_ca + # Location of the client key for etcd. + - name: ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_key + # Location of the client certificate for etcd. + - name: ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_cert + # Auto-detect the BGP IP address. + - name: IP + value: "" + securityContext: + privileged: true + resources: + requests: + cpu: 250m + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /calico-secrets + name: etcd-certs + # This container installs the Calico CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: quay.io/calico/cni:{{site.data.versions[page.version].first.components["calico/cni"].version}} + command: ["/install-cni.sh"] + env: + # The location of the Calico etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: calico-config + key: cni_network_config + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + - mountPath: /calico-secrets + name: etcd-certs + volumes: + # Used by calico/node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + # Mount in the etcd TLS secrets. + - name: etcd-certs + secret: + secretName: calico-etcd-secrets + +--- + +# This manifest deploys the Calico policy controller on Kubernetes. +# See https://github.com/projectcalico/k8s-policy +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: calico-policy-controller + namespace: kube-system + labels: + k8s-app: calico-policy + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + scheduler.alpha.kubernetes.io/tolerations: | + [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, + {"key":"CriticalAddonsOnly", "operator":"Exists"}] +spec: + # The policy controller can only have a single active instance. + replicas: 1 + strategy: + type: Recreate + template: + metadata: + name: calico-policy-controller + namespace: kube-system + labels: + k8s-app: calico-policy + spec: + # The policy controller must run in the host network namespace so that + # it isn't governed by policy that would prevent it from working. + hostNetwork: true + serviceAccountName: calico-policy-controller + containers: + - name: calico-policy-controller + image: quay.io/calico/kube-policy-controller:{{site.data.versions[page.version].first.components["calico/kube-policy-controller"].version}} + env: + # The location of the Calico etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # Location of the CA certificate for etcd. + - name: ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_ca + # Location of the client key for etcd. + - name: ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_key + # Location of the client certificate for etcd. + - name: ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_cert + # The location of the Kubernetes API. Use the default Kubernetes + # service for API access. + - name: K8S_API + value: "https://kubernetes.default:443" + # Since we're running in the host namespace and might not have KubeDNS + # access, configure the container's /etc/hosts to resolve + # kubernetes.default to the correct service clusterIP. + - name: CONFIGURE_ETC_HOSTS + value: "true" + volumeMounts: + # Mount in the etcd TLS secrets. + - mountPath: /calico-secrets + name: etcd-certs + volumes: + # Mount in the etcd TLS secrets. + - name: etcd-certs + secret: + secretName: calico-etcd-secrets + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-policy-controller + namespace: kube-system + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-node + namespace: kube-system diff --git a/v2.4/getting-started/kubernetes/installation/hosted/calicoctl.yaml b/v2.4/getting-started/kubernetes/installation/hosted/calicoctl.yaml new file mode 100644 index 00000000000..ed37131f21e --- /dev/null +++ b/v2.4/getting-started/kubernetes/installation/hosted/calicoctl.yaml @@ -0,0 +1,25 @@ +--- +layout: null +--- +# Calico Version {{site.data.versions[page.version].first.title}} +# http://docs.projectcalico.org/{{page.version}}/releases#{{site.data.versions[page.version].first.title}} +# This manifest includes the following component versions: +# calico/ctl:{{site.data.versions[page.version].first.components["calicoctl"].version}} + +apiVersion: v1 +kind: Pod +metadata: + name: calicoctl + namespace: kube-system +spec: + hostNetwork: true + containers: + - name: calicoctl + image: quay.io/calico/ctl:{{site.data.versions[page.version].first.components["calicoctl"].version}} + command: ["/bin/sh", "-c", "while true; do sleep 3600; done"] + env: + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints diff --git a/v2.4/getting-started/kubernetes/installation/hosted/hosted.md b/v2.4/getting-started/kubernetes/installation/hosted/hosted.md new file mode 100644 index 00000000000..3e9e64daf34 --- /dev/null +++ b/v2.4/getting-started/kubernetes/installation/hosted/hosted.md @@ -0,0 +1,22 @@ +--- +title: Standard Hosted Install +--- + +To install Calico as a Kubernetes add-on using your own etcd cluster: + +1. Download [calico.yaml](calico.yaml) +2. Configure `etcd_endpoints` in the provided ConfigMap to match your etcd cluster. + +Then simply apply the manifest: + +```shell +kubectl apply -f calico.yaml +``` + +> **NOTE** +> +> Make sure you configure the provided ConfigMap with the location of your etcd cluster before running the above command. + +## Configuration Options + +The above manifest supports a number of configuration options documented [here](index#configuration-options) diff --git a/v2.4/getting-started/kubernetes/installation/hosted/index.md b/v2.4/getting-started/kubernetes/installation/hosted/index.md new file mode 100644 index 00000000000..e4a94b8ce46 --- /dev/null +++ b/v2.4/getting-started/kubernetes/installation/hosted/index.md @@ -0,0 +1,126 @@ +--- +title: Calico Kubernetes Hosted Install +--- + +Calico can be installed on a Kubernetes cluster with a single command. + +``` +kubectl apply -f calico.yaml +``` + +We maintain several manifests. Which one you use depends on the specific +requirements of your Calico installation: + +#### [Standard Hosted Install](hosted) + +This manifest installs Calico for use with an existing etcd cluster. This is +the recommended hosted approach for deploying Calico in production. + +#### [Kubeadm Hosted Install](kubeadm/) + +This manifest installs Calico as well as a single node etcd cluster. This is the recommended hosted approach +for getting started quickly with Calico in conjunction with tools like kubeadm. + +#### [Kubernetes Datastore](kubernetes-datastore/) + +This manifest installs Calico in a mode where it does not require its own etcd cluster. + +## How it works + +Each manifest contains all the necessary resources for installing Calico on each node in your Kubernetes cluster. + +It installs the following Kubernetes resources: + +- The `calico-config` ConfigMap, which contains parameters for configuring the install. +- Installs the `calico/node` container on each host using a DaemonSet. +- Installs the Calico CNI binaries and network config on each host using a DaemonSet. +- Runs the `calico/kube-policy-controller` in a Deployment. +- The `calico-etcd-secrets` Secret, which optionally allows for providing etcd TLS assets. + +## Configuration options + +The ConfigMap in `calico.yaml` provides a way to configure a Calico self-hosted installation. It exposes +the following configuration parameters: + +### Configuring the Pod IP range + +Calico IPAM assigns IP addresses from [IP pools]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/ippool). + +To change the default IP range used for pods, modify the `CALICO_IPV4POOL_CIDR` section of the calico.yaml manifest. For more +information, see the [calico/node configuration reference]({{site.baseurl}}/{{page.version}}/reference/node/configuration). + +### Configuring IP-in-IP + +By default, the self-hosted manifests enable IP-in-IP encapsulation across subnets. Many users may +want to disable IP-in-IP encapsulation, for example if: + +- Their cluster is [running in a properly configured AWS VPC]({{site.baseurl}}/{{page.version}}/reference/public-cloud/aws). +- All their Kubernetes nodes are connected to the same L2 network. +- They intend to use BGP peering to make their underlying infrastructure aware of Pod IP addresses. + +To disable IP-in-IP encapsulation, modify the `CALICO_IPV4POOL_IPIP` section of the manifest. For more +information, see the [calico/node configuration reference]({{site.baseurl}}/{{page.version}}/reference/node/configuration). + +### Etcd Configuration + +By default, these manifests do not configure secure access to etcd and assume an etcd proxy is running on each host. The following configuration +options let you specify custom etcd cluster endpoints as well as TLS. + +The following table outlines the supported ConfigMap options for etcd: + +| Option | Description | Default +|------------------------|----------------|---------- +| etcd_endpoints | A comma separated list of etcd nodes. | http://127.0.0.1:2379 +| etcd_ca | The location of the CA mounted in the pods deployed by the DaemonSet. | None +| etcd_key | The location of the client cert mounted in the pods deployed by the DaemonSet. | None +| etcd_cert | The location of the client key mounted in the pods deployed by the DaemonSet. | None + +To use these manifests with a TLS enabled etcd cluster you must do the following: + +- Populate the `calico-etcd-secrets` Secret with the contents of the following files: + - `etcd-ca` + - `etcd-key` + - `etcd-cert` +- Populate the following options in the ConfigMap which will trigger the various services to expect the provided TLS assets: + - `etcd_ca: /calico-secrets/etcd-ca` + - `etcd_key: /calico-secrets/etcd-key` + - `etcd_cert: /calico-secrets/etcd-cert` + +### Authorization Options + +Calico's manifests assign its components one of two service accounts. +Depending on your cluster's authorization mode, you'll want to back these +ServiceAccounts with the neccessary permissions. + +#### RBAC + +If using Calico with RBAC, apply the `ClusterRole` and `ClusterRoleBinding` specs: + +``` +kubectl apply -f {{site.url}}/{{page.version}}/getting-started/kubernetes/installation/rbac.yaml +``` + +### Other Configuration Options + +The following table outlines the remaining supported ConfigMap options: + +| Option | Description | Default +|------------------------|---------------------|---------- +| calico_backend | The backend to use. | bird +| cni_network_config | The CNI Network config to install on each node. Supports templating as described below. | + +### CNI Network Config Template Support + +The `cni_network_config` configuration option supports the following template fields, which will +be filled in automatically by the `calico/cni` container: + +| Field | Substituted with +|---------------------------------------|---------------------------------- +| `__KUBERNETES_SERVICE_HOST__` | The Kubernetes Service ClusterIP. e.g 10.0.0.1 +| `__KUBERNETES_SERVICE_PORT__` | The Kubernetes Service port. e.g 443 +| `__SERVICEACCOUNT_TOKEN__` | The serviceaccount token for the namespace, if one exists. +| `__ETCD_ENDPOINTS__` | The etcd endpoints specified in etcd_endpoints. +| `__KUBECONFIG_FILEPATH__` | The path to the automatically generated kubeconfig file in the same directory as the CNI network config file. +| `__ETCD_KEY_FILE__` | The path to the etcd key file installed to the host, empty if no key present. +| `__ETCD_CERT_FILE__` | The path to the etcd cert file installed to the host, empty if no cert present. +| `__ETCD_CA_CERT_FILE__` | The path to the etcd CA file installed to the host, empty if no CA present. diff --git a/v2.4/getting-started/kubernetes/installation/hosted/kubeadm/1.5/calico.yaml b/v2.4/getting-started/kubernetes/installation/hosted/kubeadm/1.5/calico.yaml new file mode 100644 index 00000000000..dbc158a5128 --- /dev/null +++ b/v2.4/getting-started/kubernetes/installation/hosted/kubeadm/1.5/calico.yaml @@ -0,0 +1,285 @@ +--- +layout: null +--- +# Calico Version {{site.data.versions[page.version].first.title}} +# http://docs.projectcalico.org/{{page.version}}/releases#{{site.data.versions[page.version].first.title}} +# This manifest includes the following component versions: +# calico/node:{{site.data.versions[page.version].first.title}} +# calico/cni:{{site.data.versions[page.version].first.components["calico/cni"].version}} +# calico/kube-policy-controller:{{site.data.versions[page.version].first.components["calico/kube-policy-controller"].version}} + +# This ConfigMap is used to configure a self-hosted Calico installation. +kind: ConfigMap +apiVersion: v1 +metadata: + name: calico-config + namespace: kube-system +data: + # The location of your etcd cluster. This uses the Service clusterIP + # defined below. + etcd_endpoints: "http://10.96.232.136:6666" + + # Configure the Calico backend to use. + calico_backend: "bird" + + # The CNI network configuration to install on each node. + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.1.0", + "type": "calico", + "etcd_endpoints": "__ETCD_ENDPOINTS__", + "log_level": "info", + "mtu": 1500, + "ipam": { + "type": "calico-ipam" + }, + "policy": { + "type": "k8s", + "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", + "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" + }, + "kubernetes": { + "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__" + } + } + +--- + +# This manifest installs the Calico etcd on the kubeadm master. This uses a DaemonSet +# to force it to run on the master even when the master isn't schedulable, and uses +# nodeSelector to ensure it only runs on the master. +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: calico-etcd + namespace: kube-system + labels: + k8s-app: calico-etcd +spec: + template: + metadata: + labels: + k8s-app: calico-etcd + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + scheduler.alpha.kubernetes.io/tolerations: | + [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, + {"key":"CriticalAddonsOnly", "operator":"Exists"}] + spec: + # Only run this pod on the master. + nodeSelector: + kubeadm.alpha.kubernetes.io/role: master + hostNetwork: true + containers: + - name: calico-etcd + image: quay.io/coreos/etcd:v3.1.10 + env: + - name: CALICO_ETCD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + command: ["/bin/sh","-c"] + args: ["/usr/local/bin/etcd --name=calico --data-dir=/var/etcd/calico-data --advertise-client-urls=http://$CALICO_ETCD_IP:6666 --listen-client-urls=http://0.0.0.0:6666 --listen-peer-urls=http://0.0.0.0:6667"] + volumeMounts: + - name: var-etcd + mountPath: /var/etcd + volumes: + - name: var-etcd + hostPath: + path: /var/etcd + +--- + +# This manfiest installs the Service which gets traffic to the Calico +# etcd. +apiVersion: v1 +kind: Service +metadata: + labels: + k8s-app: calico-etcd + name: calico-etcd + namespace: kube-system +spec: + # Select the calico-etcd pod running on the master. + selector: + k8s-app: calico-etcd + # This ClusterIP needs to be known in advance, since we cannot rely + # on DNS to get access to etcd. + clusterIP: 10.96.232.136 + ports: + - port: 6666 + +--- + +# This manifest installs the calico/node container, as well +# as the Calico CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: extensions/v1beta1 +metadata: + name: calico-node + namespace: kube-system + labels: + k8s-app: calico-node +spec: + selector: + matchLabels: + k8s-app: calico-node + template: + metadata: + labels: + k8s-app: calico-node + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + scheduler.alpha.kubernetes.io/tolerations: | + [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, + {"key":"CriticalAddonsOnly", "operator":"Exists"}] + spec: + hostNetwork: true + containers: + # Runs calico/node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: quay.io/calico/node:{{site.data.versions[page.version].first.title}} + env: + # The location of the Calico etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # Enable BGP. Disable to enforce policy only. + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "kubeadm" + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "ACCEPT" + # Configure the IP Pool from which Pod IPs will be chosen. + - name: CALICO_IPV4POOL_CIDR + value: "192.168.0.0/16" + - name: CALICO_IPV4POOL_IPIP + value: "always" + # Disable IPv6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + # Set MTU for tunnel device used if ipip is enabled + - name: FELIX_IPINIPMTU + value: "1440" + # Set Felix logging to "info" + - name: FELIX_LOGSEVERITYSCREEN + value: "info" + # Auto-detect the BGP IP address. + - name: IP + value: "" + securityContext: + privileged: true + resources: + requests: + cpu: 250m + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + # This container installs the Calico CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: quay.io/calico/cni:{{site.data.versions[page.version].first.components["calico/cni"].version}} + command: ["/install-cni.sh"] + env: + # The location of the Calico etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: calico-config + key: cni_network_config + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + volumes: + # Used by calico/node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + +--- + +# This manifest deploys the Calico policy controller on Kubernetes. +# See https://github.com/projectcalico/k8s-policy +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: calico-policy-controller + namespace: kube-system + labels: + k8s-app: calico-policy +spec: + # The policy controller can only have a single active instance. + replicas: 1 + strategy: + type: Recreate + template: + metadata: + name: calico-policy-controller + namespace: kube-system + labels: + k8s-app: calico-policy-controller + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + scheduler.alpha.kubernetes.io/tolerations: | + [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, + {"key":"CriticalAddonsOnly", "operator":"Exists"}] + spec: + # The policy controller must run in the host network namespace so that + # it isn't governed by policy that would prevent it from working. + hostNetwork: true + containers: + - name: calico-policy-controller + image: quay.io/calico/kube-policy-controller:{{site.data.versions[page.version].first.components["calico/kube-policy-controller"].version}} + env: + # The location of the Calico etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # The location of the Kubernetes API. Use the default Kubernetes + # service for API access. + - name: K8S_API + value: "https://kubernetes.default:443" + # Since we're running in the host namespace and might not have KubeDNS + # access, configure the container's /etc/hosts to resolve + # kubernetes.default to the correct service clusterIP. + - name: CONFIGURE_ETC_HOSTS + value: "true" diff --git a/v2.4/getting-started/kubernetes/installation/hosted/kubeadm/1.6/calico.yaml b/v2.4/getting-started/kubernetes/installation/hosted/kubeadm/1.6/calico.yaml new file mode 100644 index 00000000000..c039fee0c29 --- /dev/null +++ b/v2.4/getting-started/kubernetes/installation/hosted/kubeadm/1.6/calico.yaml @@ -0,0 +1,376 @@ +--- +layout: null +--- +# Calico Version {{site.data.versions[page.version].first.title}} +# http://docs.projectcalico.org/{{page.version}}/releases#{{site.data.versions[page.version].first.title}} +# This manifest includes the following component versions: +# calico/node:{{site.data.versions[page.version].first.title}} +# calico/cni:{{site.data.versions[page.version].first.components["calico/cni"].version}} +# calico/kube-policy-controller:{{site.data.versions[page.version].first.components["calico/kube-policy-controller"].version}} + +# This ConfigMap is used to configure a self-hosted Calico installation. +kind: ConfigMap +apiVersion: v1 +metadata: + name: calico-config + namespace: kube-system +data: + # The location of your etcd cluster. This uses the Service clusterIP + # defined below. + etcd_endpoints: "http://10.96.232.136:6666" + + # Configure the Calico backend to use. + calico_backend: "bird" + + # The CNI network configuration to install on each node. + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.1.0", + "type": "calico", + "etcd_endpoints": "__ETCD_ENDPOINTS__", + "log_level": "info", + "mtu": 1500, + "ipam": { + "type": "calico-ipam" + }, + "policy": { + "type": "k8s", + "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", + "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" + }, + "kubernetes": { + "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__" + } + } + +--- + +# This manifest installs the Calico etcd on the kubeadm master. This uses a DaemonSet +# to force it to run on the master even when the master isn't schedulable, and uses +# nodeSelector to ensure it only runs on the master. +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: calico-etcd + namespace: kube-system + labels: + k8s-app: calico-etcd +spec: + template: + metadata: + labels: + k8s-app: calico-etcd + annotations: + # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler + # reserves resources for critical add-on pods so that they can be rescheduled after + # a failure. This annotation works in tandem with the toleration below. + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + # Only run this pod on the master. + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode. + # This, along with the annotation above marks this pod as a critical add-on. + - key: CriticalAddonsOnly + operator: Exists + nodeSelector: + node-role.kubernetes.io/master: "" + hostNetwork: true + containers: + - name: calico-etcd + image: quay.io/coreos/etcd:v3.1.10 + env: + - name: CALICO_ETCD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + command: ["/bin/sh","-c"] + args: ["/usr/local/bin/etcd --name=calico --data-dir=/var/etcd/calico-data --advertise-client-urls=http://$CALICO_ETCD_IP:6666 --listen-client-urls=http://0.0.0.0:6666 --listen-peer-urls=http://0.0.0.0:6667"] + volumeMounts: + - name: var-etcd + mountPath: /var/etcd + volumes: + - name: var-etcd + hostPath: + path: /var/etcd + +--- + +# This manifest installs the Service which gets traffic to the Calico +# etcd. +apiVersion: v1 +kind: Service +metadata: + labels: + k8s-app: calico-etcd + name: calico-etcd + namespace: kube-system +spec: + # Select the calico-etcd pod running on the master. + selector: + k8s-app: calico-etcd + # This ClusterIP needs to be known in advance, since we cannot rely + # on DNS to get access to etcd. + clusterIP: 10.96.232.136 + ports: + - port: 6666 + +--- + +# This manifest installs the calico/node container, as well +# as the Calico CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: extensions/v1beta1 +metadata: + name: calico-node + namespace: kube-system + labels: + k8s-app: calico-node +spec: + selector: + matchLabels: + k8s-app: calico-node + template: + metadata: + labels: + k8s-app: calico-node + annotations: + # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler + # reserves resources for critical add-on pods so that they can be rescheduled after + # a failure. This annotation works in tandem with the toleration below. + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + hostNetwork: true + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode. + # This, along with the annotation above marks this pod as a critical add-on. + - key: CriticalAddonsOnly + operator: Exists + serviceAccountName: calico-cni-plugin + containers: + # Runs calico/node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: quay.io/calico/node:{{site.data.versions[page.version].first.title}} + env: + # The location of the Calico etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # Enable BGP. Disable to enforce policy only. + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "kubeadm" + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "ACCEPT" + # Configure the IP Pool from which Pod IPs will be chosen. + - name: CALICO_IPV4POOL_CIDR + value: "192.168.0.0/16" + - name: CALICO_IPV4POOL_IPIP + value: "always" + # Disable IPv6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + # Set MTU for tunnel device used if ipip is enabled + - name: FELIX_IPINIPMTU + value: "1440" + # Set Felix logging to "info" + - name: FELIX_LOGSEVERITYSCREEN + value: "info" + # Auto-detect the BGP IP address. + - name: IP + value: "" + securityContext: + privileged: true + resources: + requests: + cpu: 250m + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + # This container installs the Calico CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: quay.io/calico/cni:{{site.data.versions[page.version].first.components["calico/cni"].version}} + command: ["/install-cni.sh"] + env: + # The location of the Calico etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: calico-config + key: cni_network_config + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + volumes: + # Used by calico/node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + +--- + +# This manifest deploys the Calico policy controller on Kubernetes. +# See https://github.com/projectcalico/k8s-policy +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: calico-policy-controller + namespace: kube-system + labels: + k8s-app: calico-policy +spec: + # The policy controller can only have a single active instance. + replicas: 1 + strategy: + type: Recreate + template: + metadata: + name: calico-policy-controller + namespace: kube-system + labels: + k8s-app: calico-policy-controller + annotations: + # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler + # reserves resources for critical add-on pods so that they can be rescheduled after + # a failure. This annotation works in tandem with the toleration below. + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + # The policy controller must run in the host network namespace so that + # it isn't governed by policy that would prevent it from working. + hostNetwork: true + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode. + # This, along with the annotation above marks this pod as a critical add-on. + - key: CriticalAddonsOnly + operator: Exists + serviceAccountName: calico-policy-controller + containers: + - name: calico-policy-controller + image: quay.io/calico/kube-policy-controller:{{site.data.versions[page.version].first.components["calico/kube-policy-controller"].version}} + env: + # The location of the Calico etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # The location of the Kubernetes API. Use the default Kubernetes + # service for API access. + - name: K8S_API + value: "https://kubernetes.default:443" + # Since we're running in the host namespace and might not have KubeDNS + # access, configure the container's /etc/hosts to resolve + # kubernetes.default to the correct service clusterIP. + - name: CONFIGURE_ETC_HOSTS + value: "true" +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: calico-cni-plugin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-cni-plugin +subjects: +- kind: ServiceAccount + name: calico-cni-plugin + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-cni-plugin + namespace: kube-system +rules: + - apiGroups: [""] + resources: + - pods + - nodes + verbs: + - get +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-cni-plugin + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: calico-policy-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-policy-controller +subjects: +- kind: ServiceAccount + name: calico-policy-controller + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-policy-controller + namespace: kube-system +rules: + - apiGroups: + - "" + - extensions + resources: + - pods + - namespaces + - networkpolicies + verbs: + - watch + - list +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-policy-controller + namespace: kube-system diff --git a/v2.4/getting-started/kubernetes/installation/hosted/kubeadm/index.md b/v2.4/getting-started/kubernetes/installation/hosted/kubeadm/index.md new file mode 100644 index 00000000000..25aefc73455 --- /dev/null +++ b/v2.4/getting-started/kubernetes/installation/hosted/kubeadm/index.md @@ -0,0 +1,70 @@ +--- +title: Kubeadm Hosted Install +--- + +This document outlines how to install Calico, as well as a as single node +etcd cluster for use by Calico on a Kubernetes cluster created by kubeadm. + +Users who have deployed their own etcd cluster outside of kubeadm should +use the [Calico only manifest](../hosted) instead, as it does not deploy its +own etcd. + +You can easily create a cluster compatible with this manifest by following [the official kubeadm guide](http://kubernetes.io/docs/getting-started-guides/kubeadm/). + + +#### Installation + +To install this Calico and a single node etcd on a run the following command +depending on your kubeadm / kubernetes version: + +For Kubeadm 1.6 with Kubernetes 1.6+: + +``` +kubectl apply -f {{site.url}}/{{page.version}}/getting-started/kubernetes/installation/hosted/kubeadm/1.6/calico.yaml +``` + +>[Click here to view the above yaml directly.](1.6/calico.yaml) + +For Kubeadm 1.5 with Kubernetes 1.5.x: + +``` +kubectl apply -f {{site.url}}/{{page.version}}/getting-started/kubernetes/installation/hosted/kubeadm/1.5/calico.yaml +``` + +>[Click here to view the above yaml directly.](1.5/calico.yaml) + +## Using calicoctl in a Kubeadm Cluster + +The simplest way to use calicoctl in Kubeadm is by running it as a pod. +See [using calicoctl with Kubernetes](../../../tutorials/using-calicoctl#b-running-calicoctl-as-a-kubernetes-pod) for more information. + +## About + +This manifest deploys the standard Calico components described +[here]({{site.baseurl}}/{{page.version}}/getting-started/kubernetes/installation/hosted) +as well as a dedicated Calico etcd node on the Kubernetes master. Note that in a production cluster, it is +recommended you use a secure, replicated etcd cluster. + +This manifest uses a node label to select the master node on which Calico's etcd is run. This label is configured +automatically on the master when using kubeadm. + +To check if the required label is applied, run the following command and +inspect the output for the correct label: + +```shell +$ kubectl get node -o yaml +``` + +### Requirements / Limitations + +* This install does not configure etcd TLS +* This install expects that one Kubernetes master node has been labeled with: + * For Kubeadm 1.5 `kubeadm.alpha.kubernetes.io/role: master` + * For Kubeadm 1.6 `node-role.kubernetes.io/master: ""` +* This install assumes no other pod network has been installed. +* The CIDR(s) specified with the flag `--cluster-cidr` (pre 1.6) or + `--pod-network-cidr` (1.6+) must match the Calico IP Pools to have Network + Policy function correctly. The default is `192.168.0.0/16`. +* The CIDR specified with the flag `--service-cidr` should not overlap with the Calico IP Pool. + * The default CIDR for `--service-cidr` is `10.96.0.0/12`. + * The calico.yaml(s) linked sets the Calico IP Pool to `192.168.0.0/16`. diff --git a/v2.4/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.5/calico.yaml b/v2.4/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.5/calico.yaml new file mode 100644 index 00000000000..1c320d2a292 --- /dev/null +++ b/v2.4/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.5/calico.yaml @@ -0,0 +1,164 @@ +--- +layout: null +--- +# Calico Version {{site.data.versions[page.version].first.title}} +# http://docs.projectcalico.org/{{page.version}}/releases#{{site.data.versions[page.version].first.title}} +# This manifest includes the following component versions: +# calico/node:{{site.data.versions[page.version].first.title}} +# calico/cni:{{site.data.versions[page.version].first.components["calico/cni"].version}} + +# This ConfigMap is used to configure a self-hosted Calico installation. +kind: ConfigMap +apiVersion: v1 +metadata: + name: calico-config + namespace: kube-system +data: + # The CNI network configuration to install on each node. + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.1.0", + "type": "calico", + "log_level": "debug", + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", + "mtu": 1500, + "ipam": { + "type": "host-local", + "subnet": "usePodCidr" + }, + "policy": { + "type": "k8s", + "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" + }, + "kubernetes": { + "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + } + +--- + +# This manifest installs the calico/node container, as well +# as the Calico CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: extensions/v1beta1 +metadata: + name: calico-node + namespace: kube-system + labels: + k8s-app: calico-node +spec: + selector: + matchLabels: + k8s-app: calico-node + template: + metadata: + labels: + k8s-app: calico-node + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + scheduler.alpha.kubernetes.io/tolerations: | + [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, + {"key":"CriticalAddonsOnly", "operator":"Exists"}] + spec: + hostNetwork: true + containers: + # Runs calico/node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: quay.io/calico/node:{{site.data.versions[page.version].first.title}} + env: + # Use Kubernetes API as the backing datastore. + - name: DATASTORE_TYPE + value: "kubernetes" + # Enable felix info logging. + - name: FELIX_LOGSEVERITYSCREEN + value: "info" + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "k8s" + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "ACCEPT" + # Disable IPV6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + # Set MTU for tunnel device used if ipip is enabled + - name: FELIX_IPINIPMTU + value: "1440" + # Wait for the datastore. + - name: WAIT_FOR_DATASTORE + value: "true" + # The Calico IPv4 pool to use. This should match `--cluster-cidr` + - name: CALICO_IPV4POOL_CIDR + value: "192.168.0.0/16" + # Enable IPIP + - name: CALICO_IPV4POOL_IPIP + value: "always" + # Enable IP-in-IP within Felix. + - name: FELIX_IPINIPENABLED + value: "true" + # Set based on the k8s node name. + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # No IP address needed. + - name: IP + value: "" + securityContext: + privileged: true + resources: + requests: + cpu: 250m + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + # This container installs the Calico CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: quay.io/calico/cni:{{site.data.versions[page.version].first.components["calico/cni"].version}} + command: ["/install-cni.sh"] + env: + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: calico-config + key: cni_network_config + # Set the hostname based on the k8s node name. + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + volumes: + # Used by calico/node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d diff --git a/v2.4/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.6/calico.yaml b/v2.4/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.6/calico.yaml new file mode 100644 index 00000000000..ba82747b5ca --- /dev/null +++ b/v2.4/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.6/calico.yaml @@ -0,0 +1,182 @@ +--- +layout: null +--- +# Calico Version {{site.data.versions[page.version].first.title}} +# http://docs.projectcalico.org/{{page.version}}/releases#{{site.data.versions[page.version].first.title}} +# This manifest includes the following component versions: +# calico/node:{{site.data.versions[page.version].first.title}} +# calico/cni:{{site.data.versions[page.version].first.components["calico/cni"].version}} + +# This ConfigMap is used to configure a self-hosted Calico installation. +kind: ConfigMap +apiVersion: v1 +metadata: + name: calico-config + namespace: kube-system +data: + # The CNI network configuration to install on each node. + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.1.0", + "type": "calico", + "log_level": "debug", + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", + "mtu": 1500, + "ipam": { + "type": "host-local", + "subnet": "usePodCidr" + }, + "policy": { + "type": "k8s", + "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" + }, + "kubernetes": { + "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + } + +--- + +# This manifest installs the calico/node container, as well +# as the Calico CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: extensions/v1beta1 +metadata: + name: calico-node + namespace: kube-system + labels: + k8s-app: calico-node +spec: + selector: + matchLabels: + k8s-app: calico-node + template: + metadata: + labels: + k8s-app: calico-node + annotations: + # This, along with the CriticalAddonsOnly toleration below, + # marks the pod as a critical add-on, ensuring it gets + # priority scheduling and that its resources are reserved + # if it ever gets evicted. + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + hostNetwork: true + serviceAccountName: calico-node + tolerations: + # Allow the pod to run on the master. This is required for + # the master to communicate with pods. + - key: node-role.kubernetes.io/master + effect: NoSchedule + # Mark the pod as a critical add-on for rescheduling. + - key: "CriticalAddonsOnly" + operator: "Exists" + containers: + # Runs calico/node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: quay.io/calico/node:{{site.data.versions[page.version].first.title}} + env: + # Use Kubernetes API as the backing datastore. + - name: DATASTORE_TYPE + value: "kubernetes" + # Enable felix info logging. + - name: FELIX_LOGSEVERITYSCREEN + value: "info" + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "k8s" + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "ACCEPT" + # Disable IPV6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + # Set MTU for tunnel device used if ipip is enabled + - name: FELIX_IPINIPMTU + value: "1440" + # Wait for the datastore. + - name: WAIT_FOR_DATASTORE + value: "true" + # The Calico IPv4 pool to use. This should match `--cluster-cidr` + - name: CALICO_IPV4POOL_CIDR + value: "192.168.0.0/16" + # Enable IPIP + - name: CALICO_IPV4POOL_IPIP + value: "always" + # Enable IP-in-IP within Felix. + - name: FELIX_IPINIPENABLED + value: "true" + # Set based on the k8s node name. + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # No IP address needed. + - name: IP + value: "" + securityContext: + privileged: true + resources: + requests: + cpu: 250m + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + # This container installs the Calico CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: quay.io/calico/cni:{{site.data.versions[page.version].first.components["calico/cni"].version}} + command: ["/install-cni.sh"] + env: + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: calico-config + key: cni_network_config + # Set the hostname based on the k8s node name. + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + volumes: + # Used by calico/node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-node + namespace: kube-system diff --git a/v2.4/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calicoctl.yaml b/v2.4/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calicoctl.yaml new file mode 100644 index 00000000000..5893c52828e --- /dev/null +++ b/v2.4/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calicoctl.yaml @@ -0,0 +1,22 @@ +--- +layout: null +--- +# Calico Version {{site.data.versions[page.version].first.title}} +# http://docs.projectcalico.org/{{page.version}}/releases#{{site.data.versions[page.version].first.title}} +# This manifest includes the following component versions: +# calico/ctl:{{site.data.versions[page.version].first.components["calicoctl"].version}} + +apiVersion: v1 +kind: Pod +metadata: + name: calicoctl + namespace: kube-system +spec: + hostNetwork: true + containers: + - name: calicoctl + image: quay.io/calico/ctl:{{site.data.versions[page.version].first.components["calicoctl"].version}} + command: ["/bin/sh", "-c", "while true; do sleep 3600; done"] + env: + - name: DATASTORE_TYPE + value: kubernetes diff --git a/v2.4/getting-started/kubernetes/installation/hosted/kubernetes-datastore/index.md b/v2.4/getting-started/kubernetes/installation/hosted/kubernetes-datastore/index.md new file mode 100644 index 00000000000..4f13fb68a56 --- /dev/null +++ b/v2.4/getting-started/kubernetes/installation/hosted/kubernetes-datastore/index.md @@ -0,0 +1,161 @@ +--- +title: Kubernetes Datastore +--- + +This document describes how to install Calico on Kubernetes in a mode that does not require access to an etcd cluster. +This mode uses the Kubernetes API as the datastore. + +Note that this feature currently comes with a number of limitations, namely: + +- It does not yet support Calico IPAM. It is recommended to use `host-local` IPAM in conjunction with Kubernetes pod CIDR assignments. +- It does not yet support the full set of `calicoctl` commands. +- It does not yet support the full set of calico/node options (such as IP autodiscovery). +- Calico networking support is in Beta and has limited configuration options: + - it only supports a full BGP node-to-node mesh + - it does not yet support BGP peer configuration. + +## Requirements + +The provided manifest configures Calico to use host-local IPAM in conjunction with the Kubernetes assigned +pod CIDRs for each node. + +You must have a cluster which meets the following requirements: + +- You have a Kubernetes cluster configured to use CNI network plugins (i.e. by passing `--network-plugin=cni` to the kubelet) +- Your Kubernetes controller manager is configured to allocate pod CIDRs (i.e. by passing `--allocate-node-cidrs=true` to the controller manager) +- Your Kubernetes controller manager has been provided a cluster-cidr (i.e. by passing `--cluster-cidr=192.168.0.0/16`, which the manifest expects by default). + +> Note: If you are upgrading from Calico v2.1, the cluster-cidr selected for your controller manager should remain +> unchanged from the v2.1 install (the v2.1 manifests default to `10.244.0.0/16`). + +## Installation + +This document describes three installation options for Calico using Kubernetes API as the datastore: + +1. Calico policy with Calico networking (beta) +2. Calico policy-only with user-supplied networking +3. Calico policy-only with flannel networking + +Ensure you have a cluster which meets the above requirements. There may be additional requirements based on the installation option you choose. + +> Note: There is currently no upgrade path to switch between different installation options. Therefore, +> if you are upgrading from Calico v2.1, use the [Calico policy-only with user-supplied networking](#2-calico-policy-only-with-user-supplied-networking) installation instructions +> to upgrade Calico policy-only which leaves the networking solution unchanged. + +### 1. Calico policy with Calico networking (Beta) + +With Kubernetes as the Calico datastore, Calico has Beta support for Calico networking. This provides BGP-based +networking with a full node-to-node mesh. It is not currently possible to configure the Calico BGP network to peer with +other routers - future releases of Calico are expected to bring feature parity with the etcd-backed Calico. + +To install Calico with Calico networking, run one of the following commands based on your Kubernetes version: + +For **Kubernetes 1.6+** clusters: + +``` +kubectl apply -f {{site.url}}/{{page.version}}/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.6/calico.yaml +``` + +>[Click here to view the above yaml directly.](calico-networking/1.6/calico.yaml) + +For **Kubernetes 1.5** clusters: + +``` +kubectl apply -f {{site.url}}/{{page.version}}/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.5/calico.yaml +``` + +>[Click here to view the above yaml directly.](calico-networking/1.5/calico.yaml) + +#### Calico policy with Calico networking on kubeadm + +The above manifests are compatible with kubeadm clusters initialized with a +pod-network-cidr matching the default pool of `192.168.0.0/16`, as follows: + +``` +kubeadm init --pod-network-cidr=192.168.0.0/16 +``` + +### 2. Calico policy-only with user-supplied networking + +If you run Calico in policy-only mode it is necessary to configure your network to route pod traffic based on pod +CIDR allocations, either through static routes, a Kubernetes cloud-provider integration, or flannel (self-installed). + +To install Calico in policy-only mode, run one of the following commands based on your Kubernetes version: + +For **Kubernetes 1.6+** clusters: + +``` +kubectl apply -f {{site.url}}/{{page.version}}/getting-started/kubernetes/installation/hosted/kubernetes-datastore/policy-only/1.6/calico.yaml +``` + +>[Click here to view the above yaml directly.](policy-only/1.6/calico.yaml) + +For **Kubernetes 1.5** clusters: + +``` +kubectl apply -f {{site.url}}/{{page.version}}/getting-started/kubernetes/installation/hosted/kubernetes-datastore/policy-only/1.5/calico.yaml +``` + +>[Click here to view the above yaml directly.](policy-only/1.5/calico.yaml) + +### 3. Calico policy-only with flannel networking + +The [Canal](https://github.com/projectcalico/canal) project provides a way to easily deploy +Calico with flannel networking. + +Refer to the following [Kubernetes self-hosted install guide](https://github.com/projectcalico/canal/blob/master/k8s-install/README.md) +in the Canal project for details on installing Calico with flannel. + +### RBAC + +If your Kubernetes cluster has RBAC enabled, you'll need to create RBAC roles for Calico. +Apply the following manifest to create these RBAC roles. + +>Note: The following RBAC policy is compatible with the Kubernetes v1.6+ manifest only. + +``` +kubectl apply -f {{site.url}}/{{page.version}}/getting-started/kubernetes/installation/hosted/rbac.yaml +``` + +>[Click here to view the above yaml directly.](../rbac.yaml) + +Once installed, you can try out NetworkPolicy by following the [simple policy guide](../../../tutorials/simple-policy). + +Below are a few examples for how to get started. + +## Configuration details + +The following environment variable configuration options are supported by the various Calico components **when running without etcd**. + +| Option | Description | Examples +|------------------------|----------------|---------- +| DATASTORE_TYPE | Indicates the datastore to use | kubernetes +| KUBECONFIG | When using the kubernetes datastore, the location of a kubeconfig file to use. | /path/to/kube/config +| K8S_API_ENDPOINT | Location of the Kubernetes API. Not required if using kubeconfig. | https://kubernetes-api:443 +| K8S_CERT_FILE | Location of a client certificate for accessing the Kubernetes API. | /path/to/cert +| K8S_KEY_FILE | Location of a client key for accessing the Kubernetes API. | /path/to/key +| K8S_CA_FILE | Location of a CA for accessing the Kubernetes API. | /path/to/ca +| K8S_TOKEN | Token to be used for accessing the Kubernetes API. | + +An example using `calicoctl`: + +```shell +$ export DATASTORE_TYPE=kubernetes +$ export KUBECONFIG=~/.kube/config +$ calicoctl get workloadendpoints + +HOSTNAME ORCHESTRATOR WORKLOAD NAME +kubernetes-minion-group-tbmi k8s kube-system.kube-dns-v20-jhk10 eth0 +kubernetes-minion-group-x7ce k8s kube-system.kubernetes-dashboard-v1.4.0-wtrtm eth0 +``` + +## How it works + +Calico typically uses `etcd` to store information about Kubernetes Pods, Namespaces, and NetworkPolicies. This information +is populated to etcd by the Calico CNI plugin and policy controller, and is interpreted by Felix and BIRD to program the dataplane on +each host in the cluster. + +The above manifest deploys Calico such that Felix uses the Kubernetes API directly to learn the required information to enforce policy, +removing Calico's dependency on etcd and the need for the Calico kubernetes policy controller. + +The Calico CNI plugin is still required to configure each pod's virtual ethernet device and network namespace. diff --git a/v2.4/getting-started/kubernetes/installation/hosted/kubernetes-datastore/policy-only/1.5/calico.yaml b/v2.4/getting-started/kubernetes/installation/hosted/kubernetes-datastore/policy-only/1.5/calico.yaml new file mode 100644 index 00000000000..b530b027dea --- /dev/null +++ b/v2.4/getting-started/kubernetes/installation/hosted/kubernetes-datastore/policy-only/1.5/calico.yaml @@ -0,0 +1,161 @@ +--- +layout: null +--- +# Calico Version {{site.data.versions[page.version].first.title}} +# http://docs.projectcalico.org/{{page.version}}/releases#{{site.data.versions[page.version].first.title}} +# This manifest includes the following component versions: +# calico/node:{{site.data.versions[page.version].first.title}} +# calico/cni:{{site.data.versions[page.version].first.components["calico/cni"].version}} + +# This ConfigMap is used to configure a self-hosted Calico installation. +kind: ConfigMap +apiVersion: v1 +metadata: + name: calico-config + namespace: kube-system +data: + # The CNI network configuration to install on each node. + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.1.0", + "type": "calico", + "log_level": "debug", + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", + "mtu": 1500, + "ipam": { + "type": "host-local", + "subnet": "usePodCidr" + }, + "policy": { + "type": "k8s", + "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" + }, + "kubernetes": { + "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + } + +--- + +# This manifest installs the calico/node container, as well +# as the Calico CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: extensions/v1beta1 +metadata: + name: calico-node + namespace: kube-system + labels: + k8s-app: calico-node +spec: + selector: + matchLabels: + k8s-app: calico-node + template: + metadata: + labels: + k8s-app: calico-node + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + scheduler.alpha.kubernetes.io/tolerations: | + [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, + {"key":"CriticalAddonsOnly", "operator":"Exists"}] + spec: + hostNetwork: true + containers: + # Runs calico/node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: quay.io/calico/node:{{site.data.versions[page.version].first.title}} + env: + # Use Kubernetes API as the backing datastore. + - name: DATASTORE_TYPE + value: "kubernetes" + # Enable felix info logging. + - name: FELIX_LOGSEVERITYSCREEN + value: "info" + # Don't enable BGP. + - name: CALICO_NETWORKING_BACKEND + value: "none" + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "k8s,policyonly" + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "ACCEPT" + # Disable IPV6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + # Wait for the datastore. + - name: WAIT_FOR_DATASTORE + value: "true" + # The Calico IPv4 pool to use. This should match `--cluster-cidr` + - name: CALICO_IPV4POOL_CIDR + value: "192.168.0.0/16" + # Enable IPIP + - name: CALICO_IPV4POOL_IPIP + value: "always" + # Set based on the k8s node name. + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # No IP address needed. + - name: IP + value: "" + securityContext: + privileged: true + resources: + requests: + cpu: 250m + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + # This container installs the Calico CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: quay.io/calico/cni:{{site.data.versions[page.version].first.components["calico/cni"].version}} + command: ["/install-cni.sh"] + env: + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: calico-config + key: cni_network_config + # Set the hostname based on the k8s node name. + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + volumes: + # Used by calico/node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d diff --git a/v2.4/getting-started/kubernetes/installation/hosted/kubernetes-datastore/policy-only/1.6/calico.yaml b/v2.4/getting-started/kubernetes/installation/hosted/kubernetes-datastore/policy-only/1.6/calico.yaml new file mode 100644 index 00000000000..34fca1817a5 --- /dev/null +++ b/v2.4/getting-started/kubernetes/installation/hosted/kubernetes-datastore/policy-only/1.6/calico.yaml @@ -0,0 +1,179 @@ +--- +layout: null +--- +# Calico Version {{site.data.versions[page.version].first.title}} +# http://docs.projectcalico.org/{{page.version}}/releases#{{site.data.versions[page.version].first.title}} +# This manifest includes the following component versions: +# calico/node:{{site.data.versions[page.version].first.title}} +# calico/cni:{{site.data.versions[page.version].first.components["calico/cni"].version}} + +# This ConfigMap is used to configure a self-hosted Calico installation. +kind: ConfigMap +apiVersion: v1 +metadata: + name: calico-config + namespace: kube-system +data: + # The CNI network configuration to install on each node. + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.1.0", + "type": "calico", + "log_level": "debug", + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", + "mtu": 1500, + "ipam": { + "type": "host-local", + "subnet": "usePodCidr" + }, + "policy": { + "type": "k8s", + "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" + }, + "kubernetes": { + "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + } + +--- + +# This manifest installs the calico/node container, as well +# as the Calico CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: extensions/v1beta1 +metadata: + name: calico-node + namespace: kube-system + labels: + k8s-app: calico-node +spec: + selector: + matchLabels: + k8s-app: calico-node + template: + metadata: + labels: + k8s-app: calico-node + annotations: + # This, along with the CriticalAddonsOnly toleration below, + # marks the pod as a critical add-on, ensuring it gets + # priority scheduling and that its resources are reserved + # if it ever gets evicted. + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + hostNetwork: true + serviceAccountName: calico-node + tolerations: + # Allow the pod to run on the master. This is required for + # the master to communicate with pods. + - key: node-role.kubernetes.io/master + effect: NoSchedule + # Mark the pod as a critical add-on for rescheduling. + - key: "CriticalAddonsOnly" + operator: "Exists" + containers: + # Runs calico/node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: quay.io/calico/node:{{site.data.versions[page.version].first.title}} + env: + # Use Kubernetes API as the backing datastore. + - name: DATASTORE_TYPE + value: "kubernetes" + # Enable felix info logging. + - name: FELIX_LOGSEVERITYSCREEN + value: "info" + # Don't enable BGP. + - name: CALICO_NETWORKING_BACKEND + value: "none" + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "k8s,policyonly" + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "ACCEPT" + # Disable IPV6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + # Wait for the datastore. + - name: WAIT_FOR_DATASTORE + value: "true" + # The Calico IPv4 pool to use. This should match `--cluster-cidr` + - name: CALICO_IPV4POOL_CIDR + value: "192.168.0.0/16" + # Enable IPIP + - name: CALICO_IPV4POOL_IPIP + value: "always" + # Set based on the k8s node name. + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # No IP address needed. + - name: IP + value: "" + securityContext: + privileged: true + resources: + requests: + cpu: 250m + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + # This container installs the Calico CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: quay.io/calico/cni:{{site.data.versions[page.version].first.components["calico/cni"].version}} + command: ["/install-cni.sh"] + env: + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: calico-config + key: cni_network_config + # Set the hostname based on the k8s node name. + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + volumes: + # Used by calico/node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-node + namespace: kube-system diff --git a/v2.4/getting-started/kubernetes/installation/hosted/rbac.yaml b/v2.4/getting-started/kubernetes/installation/hosted/rbac.yaml new file mode 100644 index 00000000000..2a4e3bbe2f5 --- /dev/null +++ b/v2.4/getting-started/kubernetes/installation/hosted/rbac.yaml @@ -0,0 +1,100 @@ +--- +layout: null +--- +# Calico Version {{site.data.versions[page.version].first.title}} +# http://docs.projectcalico.org/{{page.version}}/releases#{{site.data.versions[page.version].first.title}} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-node + namespace: kube-system +rules: + - apiGroups: [""] + resources: + - namespaces + verbs: + - get + - list + - watch + - apiGroups: [""] + resources: + - pods/status + verbs: + - update + - apiGroups: [""] + resources: + - pods + verbs: + - get + - list + - watch + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - update + - watch + - apiGroups: ["extensions"] + resources: + - thirdpartyresources + verbs: + - create + - get + - list + - watch + - apiGroups: ["extensions"] + resources: + - networkpolicies + verbs: + - get + - list + - watch + - apiGroups: ["projectcalico.org"] + resources: + - globalbgppeers + verbs: + - get + - list + - apiGroups: ["projectcalico.org"] + resources: + - globalconfigs + - globalbgpconfigs + verbs: + - create + - get + - list + - update + - watch + - apiGroups: ["projectcalico.org"] + resources: + - ippools + verbs: + - create + - get + - list + - update + - watch + - apiGroups: ["alpha.projectcalico.org"] + resources: + - systemnetworkpolicies + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: calico-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: calico-node + namespace: kube-system diff --git a/v2.4/getting-started/kubernetes/installation/index.md b/v2.4/getting-started/kubernetes/installation/index.md new file mode 100644 index 00000000000..1f9ecd4d86c --- /dev/null +++ b/v2.4/getting-started/kubernetes/installation/index.md @@ -0,0 +1,45 @@ +--- +title: Installing Calico on Kubernetes +--- + +Calico can be installed on a Kubernetes cluster in a number of configurations. This document +gives an overview of the most popular approaches, and provides links to each for more detailed +information. + +## Requirements + +Calico can run on any Kubernetes cluster which meets the following criteria. + +- The kubelet must be configured to use CNI network plugins (e.g `--network-plugin=cni`). +- The kube-proxy must be started in `iptables` proxy mode. This is the default as of Kubernetes v1.2.0. +- The kube-proxy must be started without the `--masquerade-all` flag, which conflicts with Calico policy. +- The Kubernetes NetworkPolicy API requires at least Kubernetes version v1.3.0. +- When RBAC is enabled, the proper accounts, roles, and bindings must be defined + and utilized by the Calico components. See the cluster roles and bindings used + in the [hosted](hosted#rbac) approach for an example. + +## [Calico Hosted Install](hosted) + +Installs the Calico components as a DaemonSet entirely using Kubernetes manifests through a single +kubectl command. This method is supported for Kubernetes versions >= v1.4.0. + +## [Custom Installation](integration) + +In addition to the hosted approach above, the Calico components can also be installed using your +own orchestration mechanisms (e.g ansible, chef, bash, etc) + +Follow the [integration guide](integration) if you're using a Kubernetes version < v1.4.0, or if you would like +to integrate Calico into your own installation or deployment scripts. + +## Third Party Integrations + +A number of popular Kubernetes installers use Calico to provide networking and/or network policy. +Here are a few, listed alphabetically. + +- [Apprenda Kismatic Enterprise Toolkit](https://github.com/apprenda/kismatic) +- [Container Linux by CoreOS](https://coreos.com/kubernetes/docs/latest/) +- [GCE](http://kubernetes.io/docs/getting-started-guides/network-policy/calico/) +- [Gravitational Telekube](http://gravitational.com/blog/gravitational-tigera-partnership/) +- [Kargo](https://github.com/kubernetes-incubator/kargo) +- [Kops](https://github.com/kubernetes/kops) +- [StackPointCloud](https://stackpoint.io) diff --git a/v2.4/getting-started/kubernetes/installation/integration.md b/v2.4/getting-started/kubernetes/installation/integration.md new file mode 100644 index 00000000000..8f637f8c204 --- /dev/null +++ b/v2.4/getting-started/kubernetes/installation/integration.md @@ -0,0 +1,227 @@ +--- +title: Integration Guide +--- + + +This document explains the components necessary to install Calico on Kubernetes for integrating +with custom configuration management. + +The [hosted installation method](hosted/) will perform these steps automatically for you and is recommended +for most users. + +* TOC +{:toc} + +## Requirements + +- An existing Kubernetes cluster running Kubernetes >= v1.1. To use NetworkPolicy, Kubernetes >= v1.3.0 is required. +- An `etcd` cluster accessible by all nodes in the Kubernetes cluster + - Calico can share the etcd cluster used by Kubernetes, but in some cases it's recommended that a separate cluster is set up. + A number of production users do share the etcd cluster between the two, but separating them gives better performance at high scale. + +> **NOTE:** +> +> Calico can also enforce network policy [without a dependency on etcd](hosted/kubernetes-datastore/). + +## About the Calico Components + +There are three components of a Calico / Kubernetes integration. + +- The Calico per-node docker container, [calico/node](https://quay.io/repository/calico/node?tab=tags) +- The [cni-plugin](https://github.com/projectcalico/cni-plugin) network plugin binaries. + - This is the combination of two binary executables and a configuration file. +- When using Kubernetes NetworkPolicy, the Calico policy controller is also required. + +The `calico/node` docker container must be run on the Kubernetes master and each +Kubernetes node in your cluster. It contains the BGP agent necessary for Calico routing to occur, +and the Felix agent which programs network policy rules. + +The `cni-plugin` plugin integrates directly with the Kubernetes `kubelet` process +on each node to discover which pods have been created, and adds them to Calico networking. + +The `calico/kube-policy-controller` container runs as a pod on top of Kubernetes and implements +the NetworkPolicy API. This component requires Kubernetes >= 1.3.0. + +## Installing `calico/node` + +### Run `calico/node` and configure the node. + +The Kubernetes master and each Kubernetes node require the `calico/node` container. +Each node must also be recorded in the Calico datastore. + +The calico/node container can be run directly through docker, or it can be +done using the `calicoctl` utility. + +``` +# Download and install `calicoctl` +wget {{site.data.versions[page.version].first.components.calicoctl.download_url}} +sudo chmod +x calicoctl + +# Run the calico/node container +sudo ETCD_ENDPOINTS=http://: ./calicoctl node run --node-image=quay.io/calico/node:{{site.data.versions[page.version].first.title}} +``` + +See the [`calicoctl node run` documentation]({{site.baseurl}}/{{page.version}}/reference/calicoctl/commands/node/) +for more information. + +### Example systemd unit file (calico-node.service) + +If you're using systemd as your init system then the following service file can be used. + +```bash +[Unit] +Description=calico node +After=docker.service +Requires=docker.service + +[Service] +User=root +Environment=ETCD_ENDPOINTS=http://: +PermissionsStartOnly=true +ExecStart=/usr/bin/docker run --net=host --privileged --name=calico-node \ + -e ETCD_ENDPOINTS=${ETCD_ENDPOINTS} \ + -e NODENAME=${HOSTNAME} \ + -e IP= \ + -e NO_DEFAULT_POOLS= \ + -e AS= \ + -e CALICO_LIBNETWORK_ENABLED=true \ + -e IP6= \ + -e CALICO_NETWORKING_BACKEND=bird \ + -e FELIX_DEFAULTENDPOINTTOHOSTACTION=ACCEPT \ + -v /var/run/calico:/var/run/calico \ + -v /lib/modules:/lib/modules \ + -v /run/docker/plugins:/run/docker/plugins \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -v /var/log/calico:/var/log/calico \ + quay.io/calico/node:{{site.data.versions[page.version].first.title}} +ExecStop=/usr/bin/docker rm -f calico-node +Restart=always +RestartSec=10 + +[Install] +WantedBy=multi-user.target +``` +> Replace `:` with your etcd configuration. + +> **NOTE:** +> +> To ensure reasonable dataplane programming latency on a system under load, +`calico/node` requires a CPU reservation of at least 0.25 cores with additional +benefits up to 0.5 cores. + +## Installing the Calico CNI plugins + +The Kubernetes `kubelet` should be configured to use the `calico` and `calico-ipam` plugins. + +### Install the Calico plugins + +Download the binaries and make sure they're executable + +```bash +wget -N -P /opt/cni/bin {{site.data.versions[page.version].first.components["calico/cni"].download_calico_url}} +wget -N -P /opt/cni/bin {{site.data.versions[page.version].first.components["calico/cni"].download_calico_ipam_url}} +chmod +x /opt/cni/bin/calico /opt/cni/bin/calico-ipam +``` + +The Calico CNI plugins require a standard CNI config file. The `policy` section is only required when +deploying the `calico/kube-policy-controller` for NetworkPolicy. + +```bash +mkdir -p /etc/cni/net.d +cat >/etc/cni/net.d/10-calico.conf <:", + "log_level": "info", + "ipam": { + "type": "calico-ipam" + }, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "" + } +} +EOF +``` + +Replace `:` with your etcd configuration. +Replace `` with your kubeconfig file. See [kubernetes kubeconfig](http://kubernetes.io/docs/user-guide/kubeconfig-file/) for more information about kubeconfig. + +For more information on configuring the Calico CNI plugins, see the [configuration guide]({{site.baseurl}}/{{page.version}}/reference/cni-plugin/configuration) + +### Install standard CNI lo plugin + +In addition to the CNI plugin specified by the CNI config file, Kubernetes requires the standard CNI loopback plugin. + +Download the file `loopback` and cp it to CNI binary dir. + +```bash +wget https://github.com/containernetworking/cni/releases/download/v0.3.0/cni-v0.3.0.tgz +tar -zxvf cni-v0.3.0.tgz +sudo cp loopback /opt/cni/bin/ +``` + +## Installing the Calico network policy controller + +The `calico/kube-policy-controller` implements the Kubernetes NetworkPolicy API by watching the +Kubernetes API for Pod, Namespace, and NetworkPolicy events and configuring Calico in response. It runs as +a single pod managed by a Deployment. + +To install the policy controller: + +- Download the [policy controller manifest](policy-controller.yaml). +- Modify `` to point to your etcd cluster. +- Install it using `kubectl`. + +```shell +$ kubectl create -f policy-controller.yaml +``` + +After a few moments, you should see the policy controller enter `Running` state: + +```shell +$ kubectl get pods --namespace=kube-system +NAME READY STATUS RESTARTS AGE +calico-policy-controller 1/1 Running 0 1m +``` + +For more information on how to configure the policy controller, +see the [configuration guide]({{site.baseur}}/{{page.version}}/reference/policy-controller/configuration). + +## Configuring Kubernetes + +### Configuring the Kubelet + +The Kubelet needs to be configured to use the Calico network plugin when starting pods. + +The `kubelet` can be configured to use Calico by starting it with the following options + +- `--network-plugin=cni` +- `--cni-conf-dir=/etc/cni/net.d` +- `--cni-bin-dir=/opt/cni/bin` + +For Kubernetes versions prior to v1.4.0, the `cni-conf-dir` and `cni-bin-dir` options are +not supported. Use `--network-plugin-dir=/etc/cni/net.d` instead. + +See the [`kubelet` documentation](http://kubernetes.io/docs/admin/kubelet/) +for more details. + +### Configuring the Kube-Proxy + +In order to use Calico policy with Kubernetes, the `kube-proxy` component must +be configured to leave the source address of service bound traffic intact. +This feature is first officially supported in Kubernetes v1.1.0 and is the default mode starting +in Kubernetes v1.2.0. + +We highly recommend using the latest stable Kubernetes release, but if you're using an older release +there are two ways to enable this behavior. + +- Option 1: Start the `kube-proxy` with the `--proxy-mode=iptables` option. +- Option 2: Annotate the Kubernetes Node API object with `net.experimental.kubernetes.io/proxy-mode` set to `iptables`. + +See the [kube-proxy documentation](http://kubernetes.io/docs/admin/kube-proxy/) +for more details. diff --git a/v2.4/getting-started/kubernetes/installation/manifests/skydns.yaml b/v2.4/getting-started/kubernetes/installation/manifests/skydns.yaml new file mode 100644 index 00000000000..1caac5f6276 --- /dev/null +++ b/v2.4/getting-started/kubernetes/installation/manifests/skydns.yaml @@ -0,0 +1,124 @@ +apiVersion: v1 +kind: Service +metadata: + name: kube-dns + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "KubeDNS" +spec: + selector: + k8s-app: kube-dns + clusterIP: 10.100.0.10 + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP +--- + +apiVersion: v1 +kind: ReplicationController +metadata: + name: kube-dns-v19 + namespace: kube-system + labels: + k8s-app: kube-dns + version: v19 + kubernetes.io/cluster-service: "true" +spec: + replicas: 1 + selector: + k8s-app: kube-dns + version: v19 + template: + metadata: + labels: + k8s-app: kube-dns + version: v19 + kubernetes.io/cluster-service: "true" + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' + spec: + containers: + - name: kubedns + image: gcr.io/google_containers/kubedns-amd64:1.7 + resources: + # TODO: Set memory limits when we've profiled the container for large + # clusters, then set request = limit to keep this container in + # guaranteed class. Currently, this container falls into the + # "burstable" category so the kubelet doesn't backoff from restarting it. + limits: + cpu: 100m + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + livenessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /readiness + port: 8081 + scheme: HTTP + # we poll on pod startup for the Kubernetes master service and + # only setup the /readiness HTTP server once that's available. + initialDelaySeconds: 30 + timeoutSeconds: 5 + args: + # command = "/kube-dns" + - --domain=cluster.local + - --dns-port=10053 + ports: + - containerPort: 10053 + name: dns-local + protocol: UDP + - containerPort: 10053 + name: dns-tcp-local + protocol: TCP + - name: dnsmasq + image: gcr.io/google_containers/kube-dnsmasq-amd64:1.3 + args: + - --cache-size=1000 + - --no-resolv + - --server=127.0.0.1#10053 + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - name: healthz + image: gcr.io/google_containers/exechealthz-amd64:1.1 + resources: + # keep request = limit to keep this container in guaranteed class + limits: + cpu: 10m + memory: 50Mi + requests: + cpu: 10m + # Note that this container shouldn't really need 50Mi of memory. The + # limits are set higher than expected pending investigation on #29688. + # The extra memory was stolen from the kubedns container to keep the + # net memory requested by the pod constant. + memory: 50Mi + args: + - -cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1 >/dev/null && nslookup kubernetes.default.svc.cluster.local 127.0.0.1:10053 >/dev/null + - -port=8080 + - -quiet + ports: + - containerPort: 8080 + protocol: TCP + dnsPolicy: Default # Don't use cluster DNS. diff --git a/v2.4/getting-started/kubernetes/installation/policy-controller.yaml b/v2.4/getting-started/kubernetes/installation/policy-controller.yaml new file mode 100644 index 00000000000..2a9e4bb97ec --- /dev/null +++ b/v2.4/getting-started/kubernetes/installation/policy-controller.yaml @@ -0,0 +1,54 @@ +--- +layout: null +--- +# Calico Version {{site.data.versions[page.version].first.title}} +# http://docs.projectcalico.org/{{page.version}}/releases#{{site.data.versions[page.version].first.title}} +# This manifest includes the following component versions: +# calico/kube-policy-controller:{{site.data.versions[page.version].first.components["calico/kube-policy-controller"].version}} + +# Create this manifest using kubectl to deploy +# the Calico policy controller on Kubernetes. +# It deploys a single instance of the policy controller. +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: calico-policy-controller + namespace: kube-system + labels: + k8s-app: calico-policy +spec: + # Only a single instance of the policy controller should be + # active at a time. Since this pod is run as a Deployment, + # Kubernetes will ensure the pod is recreated in case of failure, + # removing the need for passive backups. + replicas: 1 + strategy: + type: Recreate + template: + metadata: + name: calico-policy-controller + namespace: kube-system + labels: + k8s-app: calico-policy + spec: + hostNetwork: true + containers: + - name: calico-policy-controller + # Make sure to pin this to your desired version. + image: quay.io/calico/kube-policy-controller:{{site.data.versions[page.version].first.components["calico/kube-policy-controller"].version}} + env: + # Configure the policy controller with the location of + # your etcd cluster. + - name: ETCD_ENDPOINTS + value: "" + # Location of the Kubernetes API - this shouldn't need to be + # changed so long as it is used in conjunction with + # CONFIGURE_ETC_HOSTS="true". + - name: K8S_API + value: "https://kubernetes.default:443" + # Configure /etc/hosts within the container to resolve + # the kubernetes.default Service to the correct clusterIP + # using the environment provided by the kubelet. + # This removes the need for KubeDNS to resolve the Service. + - name: CONFIGURE_ETC_HOSTS + value: "true" diff --git a/v2.4/getting-started/kubernetes/installation/rbac.yaml b/v2.4/getting-started/kubernetes/installation/rbac.yaml new file mode 100644 index 00000000000..21ed8633db8 --- /dev/null +++ b/v2.4/getting-started/kubernetes/installation/rbac.yaml @@ -0,0 +1,62 @@ +--- +layout: null +--- +# Calico Version {{site.data.versions[page.version].first.title}} +# http://docs.projectcalico.org/{{page.version}}/releases#{{site.data.versions[page.version].first.title}} + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-policy-controller + namespace: kube-system +rules: + - apiGroups: + - "" + - extensions + resources: + - pods + - namespaces + - networkpolicies + verbs: + - watch + - list +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-policy-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-policy-controller +subjects: +- kind: ServiceAccount + name: calico-policy-controller + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-node + namespace: kube-system +rules: + - apiGroups: [""] + resources: + - pods + - nodes + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: calico-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: calico-node + namespace: kube-system diff --git a/v2.4/getting-started/kubernetes/installation/vagrant/Vagrantfile b/v2.4/getting-started/kubernetes/installation/vagrant/Vagrantfile new file mode 100644 index 00000000000..4e1d91201ad --- /dev/null +++ b/v2.4/getting-started/kubernetes/installation/vagrant/Vagrantfile @@ -0,0 +1,58 @@ +# Size of the cluster created by Vagrant +num_instances=3 + +# Change basename of the VM +instance_name_prefix="k8s-node" + +# Official CoreOS channel from which updates should be downloaded +update_channel='stable' + +Vagrant.configure("2") do |config| + # always use Vagrants insecure key + config.ssh.insert_key = false + + config.vm.box = "coreos-%s" % update_channel + config.vm.box_version = ">= 1122.0.0" + config.vm.box_url = "http://%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant.json" % update_channel + + config.vm.provider :virtualbox do |v| + # On VirtualBox, we don't have guest additions or a functional vboxsf + # in CoreOS, so tell Vagrant that so it can be smarter. + v.check_guest_additions = false + v.memory = 1024 + v.cpus = 1 + v.functional_vboxsf = false + end + + # Set up each box + (1..num_instances).each do |i| + if i == 1 + vm_name = "k8s-master" + else + vm_name = "%s-%02d" % [instance_name_prefix, i-1] + end + + config.vm.define vm_name do |host| + host.vm.hostname = vm_name + + ip = "172.18.18.#{i+100}" + host.vm.network :private_network, ip: ip + # Workaround VirtualBox issue where eth1 has 2 IP Addresses at startup + host.vm.provision :shell, :inline => "sudo /usr/bin/ip addr flush dev eth1" + host.vm.provision :shell, :inline => "sudo /usr/bin/ip addr add #{ip}/24 dev eth1" + + if i == 1 + # Configure the master. + host.vm.provision :file, :source => "master-config.yaml", :destination => "/tmp/vagrantfile-user-data" + host.vm.provision :shell, :inline => "mv /tmp/vagrantfile-user-data /var/lib/coreos-vagrant/", :privileged => true + + host.vm.provision :shell, :inline => "echo '127.0.0.1\tlocalhost' > /etc/hosts", :privileged => true + host.vm.provision :shell, :inline => "mkdir -p /etc/kubernetes/manifests/", :privileged => true + else + # Configure a node. + host.vm.provision :file, :source => "node-config.yaml", :destination => "/tmp/vagrantfile-user-data" + host.vm.provision :shell, :inline => "mv /tmp/vagrantfile-user-data /var/lib/coreos-vagrant/", :privileged => true + end + end + end +end diff --git a/v2.4/getting-started/kubernetes/installation/vagrant/index.md b/v2.4/getting-started/kubernetes/installation/vagrant/index.md new file mode 100644 index 00000000000..f534b48517b --- /dev/null +++ b/v2.4/getting-started/kubernetes/installation/vagrant/index.md @@ -0,0 +1,115 @@ +--- +title: Deploying Calico and Kubernetes on Container Linux by CoreOS using Vagrant and VirtualBox +--- + +These instructions allow you to set up a Kubernetes cluster with Calico networking using Vagrant and the [Calico CNI plugin][cni-plugin]. This guide does not setup TLS between Kubernetes components. + +## 1. Deploy cluster using Vagrant + +### 1.1 Install dependencies + +* [VirtualBox][virtualbox] 5.0.0 or greater. +* [Vagrant][vagrant] 1.7.4 or greater. +* [Curl][curl] + +### 1.2 Download the source files + + curl -O {{site.url}}{{page.dir}}Vagrantfile + curl -O {{site.url}}{{page.dir}}master-config.yaml + curl -O {{site.url}}{{page.dir}}node-config.yaml + +### 1.3 Startup and SSH + +Run + + vagrant up + +> *Note*: This will deploy a Kubernetes master and two Kubernetes nodes. To run more nodes, modify the value `num_instances` in the Vagrantfile before running `vagrant up`. + +To connect to your servers + +* Linux/Mac OS X + * run `vagrant ssh ` +* Windows + * Follow instructions from https://github.com/nickryand/vagrant-multi-putty + * run `vagrant putty ` + +### 1.4 Verify environment + +You should now have three CoreOS Container Linux servers: + +| Hostname | IP | +|-------------|---------------| +| k8s-master | 172.18.18.101 | +| k8s-node-01 | 172.18.18.102 | +| k8s-node-02 | 172.18.18.103 | + +At this point, it's worth checking that your servers can ping each other. + +From k8s-master + + ping 172.18.18.102 + ping 172.18.18.103 + +From k8s-node-01 + + ping 172.18.18.101 + ping 172.18.18.103 + +From k8s-node-02 + + ping 172.18.18.101 + ping 172.18.18.102 + +If you see ping failures, the likely culprit is a problem with the VirtualBox network between the VMs. You should +check that each host is connected to the same virtual network adapter in VirtualBox and rebooting the host may also +help. Remember to shut down the VMs with `vagrant halt` before you reboot. + +You should also verify each host can access etcd. The following will return an error if etcd is not available. + + curl -L http://172.18.18.101:2379/version + +And finally check that Docker is running on both hosts by running + + docker ps + +## 2. Configuring the Cluster and `kubectl` + +Let's configure `kubectl` so you can access the cluster from your local machine. Make sure you have `kubectl` installed locally. The version you choose depends on your host OS. + +For Mac: + +```shell +wget http://storage.googleapis.com/kubernetes-release/release/v1.7.0/bin/darwin/amd64/kubectl +chmod +x ./kubectl +``` + +For Linux: + +```shell +wget http://storage.googleapis.com/kubernetes-release/release/v1.7.0/bin/linux/amd64/kubectl +chmod +x ./kubectl +``` + +Then, tell `kubectl` to use the Vagrant cluster we just created. + +```shell +kubectl config set-cluster vagrant-cluster --server=http://172.18.18.101:8080 +kubectl config set-context vagrant-system --cluster=vagrant-cluster +kubectl config use-context vagrant-system +``` + +## 3. Install Addons + +{% include {{page.version}}/install-k8s-addons.md %} + +## Next Steps + +You should now have a fully functioning Kubernetes cluster using Calico for networking. You're ready to use your cluster. + +We recommend you try using [Calico for Kubernetes NetworkPolicy]({{site.baseurl}}/{{page.version}}/getting-started/kubernetes/tutorials/simple-policy). + +[cni-plugin]: https://github.com/projectcalico/cni-plugin +[virtualbox]: https://www.virtualbox.org/ +[vagrant]: https://www.vagrantup.com/downloads.html +[curl]: https://curl.haxx.se/ diff --git a/v2.4/getting-started/kubernetes/installation/vagrant/master-config.yaml b/v2.4/getting-started/kubernetes/installation/vagrant/master-config.yaml new file mode 100644 index 00000000000..4e52142c72d --- /dev/null +++ b/v2.4/getting-started/kubernetes/installation/vagrant/master-config.yaml @@ -0,0 +1,130 @@ +#cloud-config +--- +coreos: + update: + reboot-strategy: off + units: + - name: etcd-member.service + command: start + drop-ins: + - name: 10-etcd.conf + content: | + [Service] + Environment="ETCD_OPTS=--name \"etcdserver\" --listen-client-urls=http://0.0.0.0:2379 --advertise-client-urls=http://$private_ipv4:2379 --initial-cluster=etcdserver=http://$private_ipv4:2380 --initial-advertise-peer-urls=http://$private_ipv4:2380 --listen-client-urls=http://0.0.0.0:2379,http://0.0.0.0:4001 --listen-peer-urls=http://0.0.0.0:2380" + Environment="ETCD_IMAGE_TAG=v3.0.12" + + - name: kube-apiserver.service + command: start + content: | + [Unit] + Description=Kubernetes API Server + Documentation=https://github.com/GoogleCloudPlatform/kubernetes + Requires=etcd-member.service + After=etcd-member.service + [Service] + TimeoutStartSec=1800 + ExecStartPre=/usr/bin/wget -N -P /opt/bin https://storage.googleapis.com/kubernetes-release/release/v1.7.0/bin/linux/amd64/kubectl + ExecStartPre=/usr/bin/wget -N -P /opt/bin https://storage.googleapis.com/kubernetes-release/release/v1.7.0/bin/linux/amd64/kube-apiserver + ExecStartPre=/usr/bin/chmod +x /opt/bin/kubectl + ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-apiserver + ExecStart=/opt/bin/kube-apiserver \ + --allow-privileged=true \ + --etcd-servers=http://$private_ipv4:2379 \ + --runtime-config=extensions/v1beta1=true,extensions/v1beta1/networkpolicies=true \ + --insecure-bind-address=0.0.0.0 \ + --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota \ + --advertise-address=$private_ipv4 \ + --service-cluster-ip-range=10.100.0.0/24 \ + --logtostderr=true + Restart=always + RestartSec=10 + + - name: kube-controller-manager.service + command: start + content: | + [Unit] + Description=Kubernetes Controller Manager + Documentation=https://github.com/GoogleCloudPlatform/kubernetes + Requires=kube-apiserver.service + After=kube-apiserver.service + [Service] + TimeoutStartSec=1800 + ExecStartPre=/usr/bin/wget -N -P /opt/bin https://storage.googleapis.com/kubernetes-release/release/v1.7.0/bin/linux/amd64/kube-controller-manager + ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-controller-manager + # --cluster-cidr must match the IP Pool defined in the manifest + ExecStart=/opt/bin/kube-controller-manager \ + --master=$private_ipv4:8080 \ + --service-account-private-key-file=/var/run/kubernetes/apiserver.key \ + --root-ca-file=/var/run/kubernetes/apiserver.crt \ + --logtostderr=true \ + --allocate-node-cidrs=true \ + --cluster-cidr="192.168.0.0/16" + Restart=always + RestartSec=10 + + - name: kube-scheduler.service + command: start + content: | + [Unit] + Description=Kubernetes Scheduler + Documentation=https://github.com/GoogleCloudPlatform/kubernetes + Requires=kube-apiserver.service + After=kube-apiserver.service + [Service] + TimeoutStartSec=1800 + ExecStartPre=/usr/bin/wget -N -P /opt/bin https://storage.googleapis.com/kubernetes-release/release/v1.7.0/bin/linux/amd64/kube-scheduler + ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-scheduler + ExecStart=/opt/bin/kube-scheduler --master=$private_ipv4:8080 + Restart=always + RestartSec=10 + + - name: kubelet.service + runtime: true + command: start + content: | + [Unit] + Description=Kubernetes Kubelet + Documentation=https://github.com/kubernetes/kubernetes + After=docker.service + Requires=docker.service + + [Service] + TimeoutStartSec=1800 + ExecStartPre=/usr/bin/wget -N -P /opt/bin https://storage.googleapis.com/kubernetes-release/release/v1.7.0/bin/linux/amd64/kubelet + ExecStartPre=/usr/bin/chmod +x /opt/bin/kubelet + ExecStart=/opt/bin/kubelet \ + --address=0.0.0.0 \ + --allow-privileged=true \ + --api-servers=http://127.0.0.1:8080 \ + --cluster-dns=10.100.0.10 \ + --cluster-domain=cluster.local \ + --hostname-override=$private_ipv4 \ + --logtostderr=true \ + --register-node=true \ + --register-schedulable=false + Restart=always + RestartSec=10 + + [Install] + WantedBy=multi-user.target + + - name: kube-proxy.service + command: start + content: | + [Unit] + Description=Kubernetes Proxy + Documentation=https://github.com/GoogleCloudPlatform/kubernetes + Requires=kubelet.service + After=kubelet.service + [Service] + TimeoutStartSec=1800 + ExecStartPre=/usr/bin/wget -N -P /opt/bin https://storage.googleapis.com/kubernetes-release/release/v1.7.0/bin/linux/amd64/kube-proxy + ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-proxy + # --cluster-cidr must match the IP Pool defined in the manifest + ExecStart=/opt/bin/kube-proxy \ + --master=http://$private_ipv4:8080 \ + --cluster-cidr="192.168.0.0/16" \ + --proxy-mode=iptables \ + --logtostderr=true + Restart=always + RestartSec=10 diff --git a/v2.4/getting-started/kubernetes/installation/vagrant/node-config.yaml b/v2.4/getting-started/kubernetes/installation/vagrant/node-config.yaml new file mode 100644 index 00000000000..e27a8321ae9 --- /dev/null +++ b/v2.4/getting-started/kubernetes/installation/vagrant/node-config.yaml @@ -0,0 +1,87 @@ +#cloud-config +--- +write_files: + # Kubeconfig file. + - path: /etc/kubernetes/worker-kubeconfig.yaml + owner: root + permissions: 0755 + content: | + apiVersion: v1 + kind: Config + clusters: + - name: local + cluster: + server: http://172.18.18.101:8080 + users: + - name: kubelet + contexts: + - context: + cluster: local + user: kubelet + name: kubelet-context + current-context: kubelet-context + +coreos: + update: + reboot-strategy: off + units: + - name: etcd-member.service + command: start + drop-ins: + - name: 10-etcd-member_proxy.conf + content: | + [Service] + Environment="ETCD_OPTS=--proxy on --listen-client-urls http://127.0.0.1:2379 --initial-cluster=etcdserver=http://172.18.18.101:2380" + Environment="ETCD_IMAGE_TAG=v3.0.12" + - name: kubelet.service + runtime: true + command: start + content: | + [Unit] + Description=Kubernetes Kubelet + Documentation=https://github.com/kubernetes/kubernetes + After=docker.service + Requires=docker.service + + [Service] + TimeoutStartSec=1800 + ExecStartPre=/usr/bin/wget -N -P /opt/cni/bin https://github.com/containernetworking/cni/releases/download/v0.5.1/cni-v0.5.1.tgz + ExecStartPre=/usr/bin/tar -xvf /opt/cni/bin/cni-v0.5.1.tgz -C /opt/cni/bin + ExecStartPre=/usr/bin/wget -N -P /opt/bin https://storage.googleapis.com/kubernetes-release/release/v1.7.0/bin/linux/amd64/kubelet + ExecStartPre=/usr/bin/chmod +x /opt/bin/kubelet + ExecStartPre=/usr/bin/mkdir -p /opt/cni/bin + ExecStart=/opt/bin/kubelet \ + --address=0.0.0.0 \ + --allow-privileged=true \ + --api-servers=http://172.18.18.101:8080 \ + --cluster-dns=10.100.0.10 \ + --cluster-domain=cluster.local \ + --hostname-override=$private_ipv4 \ + --logtostderr=true \ + --network-plugin=cni + Restart=always + RestartSec=10 + + [Install] + WantedBy=multi-user.target + + - name: kube-proxy.service + command: start + content: | + [Unit] + Description=Kubernetes Proxy + Documentation=https://github.com/GoogleCloudPlatform/kubernetes + Requires=kubelet.service + After=kubelet.service + [Service] + TimeoutStartSec=1800 + ExecStartPre=/usr/bin/wget -N -P /opt/bin https://storage.googleapis.com/kubernetes-release/release/v1.7.0/bin/linux/amd64/kube-proxy + ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-proxy + # --cluster-cidr must match the IP Pool defined in the manifest + ExecStart=/opt/bin/kube-proxy \ + --master=http://172.18.18.101:8080 \ + --cluster-cidr="192.168.0.0/16" \ + --proxy-mode=iptables \ + --logtostderr=true + Restart=always + RestartSec=10 diff --git a/v2.4/getting-started/kubernetes/troubleshooting.md b/v2.4/getting-started/kubernetes/troubleshooting.md new file mode 100644 index 00000000000..d3f6ee0257d --- /dev/null +++ b/v2.4/getting-started/kubernetes/troubleshooting.md @@ -0,0 +1,55 @@ +--- +title: Troubleshooting Calico for Kubernetes +--- + +This article contains Kubernetes specific troubleshooting advice for Calico and +frequently asked questions. +See also the [main Calico troubleshooting](../../usage/troubleshooting) pages. + +## Frequently Asked Questions + +#### Why isn't Calico working on CoreOS Container Linux / hyperkube? + +Calico hosted install places the necessary CNI binaries and config on each +Kubernetes node in a directory on the host as specified in the manifest. By +default it places binaries in /opt/cni/bin and config /etc/cni/net.d. + +When running the kubelet as a container using hyperkube as is common on CoreOS Container Linux, +you need to make sure that the containerized kubelet can see the CNI network +plugins and config that have been installed by mounting them into the kubelet container. + +For example add the following arguments to the kubelet-wrapper service: + +``` +--volume /etc/cni/net.d:/etc/cni/net.d \ +--volume /opt/cni/bin:/opt/cni/bin \ +``` + +Without the above volume mounts, the kubelet will not call the Calico CNI binaries, and so +Calico [workload endpoints]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/workloadendpoint) will +not be created, and Calico policy will not be enforced. + +#### How do I view Calico CNI logs? + +The Calico CNI plugin emits logs to stderr, which are then logged out by the kubelet. Where these logs end up +depend on how your kubelet is configured. For deployments using `systemd`, you can do this via `journalctl`. + +The log level can be configured via the CNI network configuration file, by changing the value of the +key `log_level`. See [the configuration guide]({{site.baseurl}}/{{page.version}}/reference/cni-plugin/configuration) for more information. + +#### How do I configure the Pod IP range? + +When using Calico IPAM, IP addresses are assigned from [IP Pools]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/ippool). + +By default, all enabled IP Pool are used. However, you can specify which IP Pools to use for IP address management in the [CNI network config]({{site.baseurl}}/{{page.version}}/reference/cni-plugin/configuration#ipam), +or on a per-Pod basis using [Kubernetes annotations]({{site.baseurl}}/{{page.version}}/reference/cni-plugin/configuration#ipam-manipulation-with-kubernetes-annotations). + +#### How do I assign a specific IP address to a pod? + +For most use-cases it's not necessary to assign specific IP addresses to a Kubernetes Pod, and it's recommended to use Kubernetes Services instead. +However, if you do need to assign a particular address to a Pod, Calico provides two ways of doing this: + +- You can request an IP that is available in Calico IPAM using the `cni.projectcalico.org/ipAddrs` annotation. +- You can request an IP using the `cni.projectcalico.org/ipAddrsNoIpam` annotation. Note that this annotation bypasses the configured IPAM plugin, and thus in most cases it is recommended to use the above annotation. + +See the [Requesting a Specific IP address]({{site.baseurl}}/{{page.version}}/reference/cni-plugin/configuration#requesting-a-specific-ip-address) section in the CNI plugin reference documentation for more details. diff --git a/v2.4/getting-started/kubernetes/tutorials/advanced-policy.md b/v2.4/getting-started/kubernetes/tutorials/advanced-policy.md new file mode 100644 index 00000000000..162c735e4cc --- /dev/null +++ b/v2.4/getting-started/kubernetes/tutorials/advanced-policy.md @@ -0,0 +1,279 @@ +--- +title: Going Beyond `NetworkPolicy` with Calico +--- + +The Kubernetes NetworkPolicy API allows users to express ingress policy to Kubernetes pods +based on labels and ports. Calico implements this API, but also supports a number of +policy features which are not currently expressble through the NetworkPolicy API such as CIDR +and egress policy. + +This guide walks through using the Calico APIs directly in conjunction with Kubernetes NetworkPolicy +in order to define more complex network policies. + +### Requirements + +- This guide assumes you have a working Kubernetes cluster with Calico for policy. (See: [installation]({{site.baseurl}}/{{page.version}}/getting-started/kubernetes/installation) for help) +- This guide assumes that your pods have connectivity to the public internet. +- This guide assumes you are familiar with [Kubernetes NetworkPolicy](simple-policy) +- This guide assumes you are using etcdv2 (or v3) as the Calico backend datastore. +- You must have configured kubectl access to the cluster. +- You must have installed and [configured the calicoctl tool]({{site.baseurl}}/{{page.version}}/reference/calicoctl/setup/etcdv2) + +### Setup + +#### Create the Namespace + +We'll use a new namespace for this guide. Run the following command to create it. + +``` +kubectl create ns advanced-policy-demo +``` + +And then enable isolation on the Namespace using a [default policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/#default-policies). + +``` +kubectl create -f - < **Note:** + > + > This requires the [calicoctl tool to be configured]({{site.baseurl}}/{{page.version}}/reference/calicoctl/setup/etcdv2). + > For example: `export ETCD_ENDPOINTS=http://10.96.232.136:6666` + +Now that we've created a Namespace and a set of pods, we should see those objects show up in the +Calico API using `calicoctl`. + +We can see that the Namespace has a corresponding [Profile]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/profile). + +```shell +$ calicoctl get profile -o wide +NAME TAGS +k8s_ns.advanced-policy-demo k8s_ns.advanced-policy-demo +k8s_ns.default k8s_ns.default +k8s_ns.kube-system k8s_ns.kube-system +``` + +Because we've enabled isolation on the Namespace, the profile denies all ingress +traffic and allows all egress traffic. + +``` +$ calicoctl get profile k8s_ns.advanced-policy-demo -o yaml +- apiVersion: v1 + kind: profile + metadata: + name: k8s_ns.advanced-policy-demo + tags: + - k8s_ns.advanced-policy-demo + spec: + egress: + - action: allow + destination: {} + source: {} + ingress: + - action: deny + destination: {} + source: {} +``` + +We can see that this is the case by running another pod in the Namespace and attempting to +access the nginx Service. + +``` +$ kubectl run --namespace=advanced-policy-demo access --rm -ti --image busybox /bin/sh +Waiting for pod advanced-policy-demo/access-472357175-y0m47 to be running, status is Pending, pod ready: false + +If you don't see a command prompt, try pressing enter. + +/ # wget -q --timeout=5 nginx -O - +wget: download timed out +/ # +``` + +We can also see that the two nginx pods are represented as [WorkloadEndpoints]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/workloadendpoint) in the Calico API. + +``` +calicoctl get workloadendpoint + +NODE ORCHESTRATOR WORKLOAD NAME +k8s-node-01 k8s advanced-policy-demo.nginx-701339712-x1uqe eth0 +k8s-node-02 k8s advanced-policy-demo.nginx-701339712-xeeay eth0 +k8s-node-01 k8s kube-system.kube-dns-v19-mjd8x eth0 +``` + +Taking a closer look, we can see that they reference the correct profile for the Namespace, +and that the correct label information has been filled in. Notice that the endpoint also +includes a special label `calico/k8s_ns`, which is automatically populated with the +pod's Kubernetes Namespace. + +``` +$ calicoctl get wep --workload advanced-policy-demo.nginx-701339712-x1uqe -o yaml +- apiVersion: v1 + kind: workloadEndpoint + metadata: + labels: + calico/k8s_ns: advanced-policy-demo + pod-template-hash: "701339712" + run: nginx + name: eth0 + node: k8s-node-01 + orchestrator: k8s + workload: advanced-policy-demo.nginx-701339712-x1uqe + spec: + interfaceName: cali347609b8bd7 + ipNetworks: + - 192.168.44.65/32 + mac: 56:b5:54:be:b2:a2 + profiles: + - k8s_ns.advanced-policy-demo +``` + +### Define Kubernetes policy + +We'll define some network policy through the Kubernetes API. Run the following to create +a NetworkPolicy which allows traffic to nginx pods from any pods in the advanced-policy-demo Namespace. + +```shell +kubectl create -f - < **NOTE** +> +> The `k8s-policy-no-match` policy is used to send all unmatched traffic to the corresponding per-namespace Profile. This policy is created automatically by the calico/kube-policy-controller. + +After creating the policy, we can now access the nginx Service. We also see that the pod can +access google.com on the public internet. This is because we have not defined any egress policy. + +``` +$ kubectl run --namespace=advanced-policy-demo access --rm -ti --image busybox /bin/sh +Waiting for pod advanced-policy-demo/access-472357175-y0m47 to be running, status is Pending, pod ready: false + +If you don't see a command prompt, try pressing enter. + +/ # wget -q --timeout=5 nginx -O - +... +/ # ping google.com +PING google.com (216.58.219.206): 56 data bytes +64 bytes from 216.58.219.206: seq=0 ttl=61 time=14.365 ms +``` + +### Prevent outgoing connections from pods + +Kubernetes NetworkPolicy does not provide a way to prevent outgoing connections from pods. However, +Calico does. In this section we'll create a Policy using `calicoctl` which prevents all outgoing +connections from Kubernetes pods in the advanced-policy-demo Namespace. + +To do this, we'll need to create a Policy which selects all pods in the Namespace, and denies +traffic that doesn't match another Pod in the Namespace. + +``` +calicoctl apply -f - < Notice the NetworkPolicy allows traffic from Pods with the label `run: access` to Pods with the label `run: nginx`. These are the labels automatically added to Pods started via `kubectl run` based on the name of the `Deployment`. + + +We should now be able to access the Service from the `access` Pod. + +``` +# Run a Pod and try to access the `nginx` Service. +$ kubectl run --namespace=policy-demo access --rm -ti --image busybox /bin/sh +Waiting for pod policy-demo/access-472357175-y0m47 to be running, status is Pending, pod ready: false + +If you don't see a command prompt, try pressing enter. + +/ # wget -q --timeout=5 nginx -O - +``` + +However, we still cannot access the Service from a Pod without the label `run: access`: + +``` +# Run a Pod and try to access the `nginx` Service. +$ kubectl run --namespace=policy-demo cant-access --rm -ti --image busybox /bin/sh +Waiting for pod policy-demo/cant-access-472357175-y0m47 to be running, status is Pending, pod ready: false + +If you don't see a command prompt, try pressing enter. + +/ # wget -q --timeout=5 nginx -O - +wget: download timed out +/ # +``` + +You can clean up the demo by deleting the demo Namespace: + +```shell +kubectl delete ns policy-demo +``` + +This was just a simple example of the Kubernetes NetworkPolicy API and how Calico can secure your Kubernetes cluster. For more +information on network policy in Kubernetes, see the [Kubernetes user-guide](http://kubernetes.io/docs/user-guide/networkpolicies/). + +For a slightly more detailed demonstration of Policy, check out the [stars demo](stars-policy). diff --git a/v2.4/getting-started/kubernetes/tutorials/stars-policy/index.md b/v2.4/getting-started/kubernetes/tutorials/stars-policy/index.md new file mode 100644 index 00000000000..0625891c41c --- /dev/null +++ b/v2.4/getting-started/kubernetes/tutorials/stars-policy/index.md @@ -0,0 +1,96 @@ +--- +title: Stars Policy Demo +--- +The included demo sets up a frontend and backend service, as well as a client service, all +running on Kubernetes. It then configures network policy on each service. + +## Pre-requisites + +To create a Kubernetes cluster which supports the Kubernetes network policy API, follow +one of our [getting started guides]({{site.baseurl}}/{{page.version}}/getting-started/kubernetes). + +## Running the stars example + +### 1) Create the frontend, backend, client, and management-ui apps. + +```shell +kubectl create -f {{site.url}}/{{page.version}}/getting-started/kubernetes/tutorials/stars-policy/manifests/00-namespace.yaml +kubectl create -f {{site.url}}/{{page.version}}/getting-started/kubernetes/tutorials/stars-policy/manifests/01-management-ui.yaml +kubectl create -f {{site.url}}/{{page.version}}/getting-started/kubernetes/tutorials/stars-policy/manifests/02-backend.yaml +kubectl create -f {{site.url}}/{{page.version}}/getting-started/kubernetes/tutorials/stars-policy/manifests/03-frontend.yaml +kubectl create -f {{site.url}}/{{page.version}}/getting-started/kubernetes/tutorials/stars-policy/manifests/04-client.yaml +``` + +Wait for all the pods to enter `Running` state. + +```shell +kubectl get pods --all-namespaces --watch +``` +> Note that it may take several minutes to download the necessary Docker images for this demo. + +The management UI runs as a `NodePort` Service on Kubernetes, and shows the connectivity +of the Services in this example. + +You can view the UI by visiting `http://:30002` in a browser. + +Once all the pods are started, they should have full connectivity. You can see this by visiting the UI. Each service is +represented by a single node in the graph. + +- `backend` -> Node "B" +- `frontend` -> Node "F" +- `client` -> Node "C" + +### 2) Enable isolation + +Running following commands will prevent all access to the frontend, backend, and client Services. + +```shell +kubectl create -n stars -f {{site.url}}/{{page.version}}/getting-started/kubernetes/tutorials/stars-policy/policies/default-deny.yaml +kubectl create -n client -f {{site.url}}/{{page.version}}/getting-started/kubernetes/tutorials/stars-policy/policies/default-deny.yaml +``` + +#### Confirm isolation + +Refresh the management UI (it may take up to 10 seconds for changes to be reflected in the UI). +Now that we've enabled isolation, the UI can no longer access the pods, and so they will no longer show up in the UI. + +### 3) Allow the UI to access the Services using NetworkPolicy objects + +```shell +# Allow access from the management UI. +kubectl create -f {{site.url}}/{{page.version}}/getting-started/kubernetes/tutorials/stars-policy/policies/allow-ui.yaml +kubectl create -f {{site.url}}/{{page.version}}/getting-started/kubernetes/tutorials/stars-policy/policies/allow-ui-client.yaml +``` + +After a few seconds, refresh the UI - it should now show the Services, but they should not be able to access each other any more. + +### 4) Create the "backend-policy.yaml" file to allow traffic from the frontend to the backend. + +```shell +kubectl create -f {{site.url}}/{{page.version}}/getting-started/kubernetes/tutorials/stars-policy/policies/backend-policy.yaml +``` + +Refresh the UI. You should see the following: + +- The frontend can now access the backend (on TCP port 80 only). +- The backend cannot access the frontend at all. +- The client cannot access the frontend, nor can it access the backend. + +### 5) Expose the frontend service to the `client` namespace. + +```shell +kubectl create -f {{site.url}}/{{page.version}}/getting-started/kubernetes/tutorials/stars-policy/policies/frontend-policy.yaml +``` + +The client can now access the frontend, but not the backend. Neither the frontend nor the backend +can initiate connections to the client. The frontend can still access the backend. + +To use Calico to enforce egress policy on Kubernetes pods, see [the advanced policy demo]({{site.baseurl}}/{{page.version}}/getting-started/kubernetes/tutorials/advanced-policy). + +### 6) (Optional) Clean up the demo environment. + +You can clean up the demo by deleting the demo Namespaces: + +```shell +kubectl delete ns client stars management-ui +``` diff --git a/v2.4/getting-started/kubernetes/tutorials/stars-policy/manifests/00-namespace.yaml b/v2.4/getting-started/kubernetes/tutorials/stars-policy/manifests/00-namespace.yaml new file mode 100644 index 00000000000..3fb3b76b626 --- /dev/null +++ b/v2.4/getting-started/kubernetes/tutorials/stars-policy/manifests/00-namespace.yaml @@ -0,0 +1,4 @@ +kind: Namespace +apiVersion: v1 +metadata: + name: stars diff --git a/v2.4/getting-started/kubernetes/tutorials/stars-policy/manifests/01-management-ui.yaml b/v2.4/getting-started/kubernetes/tutorials/stars-policy/manifests/01-management-ui.yaml new file mode 100644 index 00000000000..9a86e4b3218 --- /dev/null +++ b/v2.4/getting-started/kubernetes/tutorials/stars-policy/manifests/01-management-ui.yaml @@ -0,0 +1,39 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: management-ui + labels: + role: management-ui +--- +apiVersion: v1 +kind: Service +metadata: + name: management-ui + namespace: management-ui +spec: + type: NodePort + ports: + - port: 9001 + targetPort: 9001 + nodePort: 30002 + selector: + role: management-ui +--- +apiVersion: v1 +kind: ReplicationController +metadata: + name: management-ui + namespace: management-ui +spec: + replicas: 1 + template: + metadata: + labels: + role: management-ui + spec: + containers: + - name: management-ui + image: calico/star-collect:v0.1.0 + imagePullPolicy: Always + ports: + - containerPort: 9001 diff --git a/v2.4/getting-started/kubernetes/tutorials/stars-policy/manifests/02-backend.yaml b/v2.4/getting-started/kubernetes/tutorials/stars-policy/manifests/02-backend.yaml new file mode 100644 index 00000000000..4b349018fad --- /dev/null +++ b/v2.4/getting-started/kubernetes/tutorials/stars-policy/manifests/02-backend.yaml @@ -0,0 +1,34 @@ +apiVersion: v1 +kind: Service +metadata: + name: backend + namespace: stars +spec: + ports: + - port: 6379 + targetPort: 6379 + selector: + role: backend +--- +apiVersion: v1 +kind: ReplicationController +metadata: + name: backend + namespace: stars +spec: + replicas: 1 + template: + metadata: + labels: + role: backend + spec: + containers: + - name: backend + image: calico/star-probe:v0.1.0 + imagePullPolicy: Always + command: + - probe + - --http-port=6379 + - --urls=http://frontend.stars:80/status,http://backend.stars:6379/status,http://client.client:9000/status + ports: + - containerPort: 6379 diff --git a/v2.4/getting-started/kubernetes/tutorials/stars-policy/manifests/03-frontend.yaml b/v2.4/getting-started/kubernetes/tutorials/stars-policy/manifests/03-frontend.yaml new file mode 100644 index 00000000000..181746aacd8 --- /dev/null +++ b/v2.4/getting-started/kubernetes/tutorials/stars-policy/manifests/03-frontend.yaml @@ -0,0 +1,34 @@ +apiVersion: v1 +kind: Service +metadata: + name: frontend + namespace: stars +spec: + ports: + - port: 80 + targetPort: 80 + selector: + role: frontend +--- +apiVersion: v1 +kind: ReplicationController +metadata: + name: frontend + namespace: stars +spec: + replicas: 1 + template: + metadata: + labels: + role: frontend + spec: + containers: + - name: frontend + image: calico/star-probe:v0.1.0 + imagePullPolicy: Always + command: + - probe + - --http-port=80 + - --urls=http://frontend.stars:80/status,http://backend.stars:6379/status,http://client.client:9000/status + ports: + - containerPort: 80 diff --git a/v2.4/getting-started/kubernetes/tutorials/stars-policy/manifests/04-client.yaml b/v2.4/getting-started/kubernetes/tutorials/stars-policy/manifests/04-client.yaml new file mode 100644 index 00000000000..8c6a9dfd7d4 --- /dev/null +++ b/v2.4/getting-started/kubernetes/tutorials/stars-policy/manifests/04-client.yaml @@ -0,0 +1,40 @@ +kind: Namespace +apiVersion: v1 +metadata: + name: client + labels: + role: client +--- +apiVersion: v1 +kind: ReplicationController +metadata: + name: client + namespace: client +spec: + replicas: 1 + template: + metadata: + labels: + role: client + spec: + containers: + - name: client + image: calico/star-probe:v0.1.0 + imagePullPolicy: Always + command: + - probe + - --urls=http://frontend.stars:80/status,http://backend.stars:6379/status + ports: + - containerPort: 9000 +--- +apiVersion: v1 +kind: Service +metadata: + name: client + namespace: client +spec: + ports: + - port: 9000 + targetPort: 9000 + selector: + role: client diff --git a/v2.4/getting-started/kubernetes/tutorials/stars-policy/policies/allow-ui-client.yaml b/v2.4/getting-started/kubernetes/tutorials/stars-policy/policies/allow-ui-client.yaml new file mode 100644 index 00000000000..9e56fbd6ae2 --- /dev/null +++ b/v2.4/getting-started/kubernetes/tutorials/stars-policy/policies/allow-ui-client.yaml @@ -0,0 +1,12 @@ +kind: NetworkPolicy +apiVersion: extensions/v1beta1 +metadata: + namespace: client + name: allow-ui +spec: + podSelector: + ingress: + - from: + - namespaceSelector: + matchLabels: + role: management-ui diff --git a/v2.4/getting-started/kubernetes/tutorials/stars-policy/policies/allow-ui.yaml b/v2.4/getting-started/kubernetes/tutorials/stars-policy/policies/allow-ui.yaml new file mode 100644 index 00000000000..036353ec650 --- /dev/null +++ b/v2.4/getting-started/kubernetes/tutorials/stars-policy/policies/allow-ui.yaml @@ -0,0 +1,12 @@ +kind: NetworkPolicy +apiVersion: extensions/v1beta1 +metadata: + namespace: stars + name: allow-ui +spec: + podSelector: + ingress: + - from: + - namespaceSelector: + matchLabels: + role: management-ui diff --git a/v2.4/getting-started/kubernetes/tutorials/stars-policy/policies/backend-policy.yaml b/v2.4/getting-started/kubernetes/tutorials/stars-policy/policies/backend-policy.yaml new file mode 100644 index 00000000000..663e1ff2aa5 --- /dev/null +++ b/v2.4/getting-started/kubernetes/tutorials/stars-policy/policies/backend-policy.yaml @@ -0,0 +1,17 @@ +kind: NetworkPolicy +apiVersion: extensions/v1beta1 +metadata: + namespace: stars + name: backend-policy +spec: + podSelector: + matchLabels: + role: backend + ingress: + - from: + - podSelector: + matchLabels: + role: frontend + ports: + - protocol: TCP + port: 6379 diff --git a/v2.4/getting-started/kubernetes/tutorials/stars-policy/policies/default-deny.yaml b/v2.4/getting-started/kubernetes/tutorials/stars-policy/policies/default-deny.yaml new file mode 100644 index 00000000000..5a80034b9f8 --- /dev/null +++ b/v2.4/getting-started/kubernetes/tutorials/stars-policy/policies/default-deny.yaml @@ -0,0 +1,6 @@ +kind: NetworkPolicy +apiVersion: extensions/v1beta1 +metadata: + name: default-deny +spec: + podSelector: diff --git a/v2.4/getting-started/kubernetes/tutorials/stars-policy/policies/frontend-policy.yaml b/v2.4/getting-started/kubernetes/tutorials/stars-policy/policies/frontend-policy.yaml new file mode 100644 index 00000000000..18de4290090 --- /dev/null +++ b/v2.4/getting-started/kubernetes/tutorials/stars-policy/policies/frontend-policy.yaml @@ -0,0 +1,17 @@ +kind: NetworkPolicy +apiVersion: extensions/v1beta1 +metadata: + namespace: stars + name: frontend-policy +spec: + podSelector: + matchLabels: + role: frontend + ingress: + - from: + - namespaceSelector: + matchLabels: + role: client + ports: + - protocol: TCP + port: 80 diff --git a/v2.4/getting-started/kubernetes/tutorials/stars-policy/reset.sh b/v2.4/getting-started/kubernetes/tutorials/stars-policy/reset.sh new file mode 100755 index 00000000000..7f900f8429f --- /dev/null +++ b/v2.4/getting-started/kubernetes/tutorials/stars-policy/reset.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright (c) 2016 Tigera, Inc. All rights reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kubectl delete rc,svc --all --namespace=client +kubectl delete rc,svc --all --namespace=management-ui +kubectl delete rc,svc --all --namespace=stars + +kubectl delete ns stars +kubectl delete ns client +kubectl delete ns management-ui + +policy delete frontend-policy --namespace=stars +policy delete backend-policy --namespace=stars +policy delete allow-ui --namespace=stars +policy delete allow-ui --namespace=client diff --git a/v2.4/getting-started/kubernetes/tutorials/using-calicoctl.md b/v2.4/getting-started/kubernetes/tutorials/using-calicoctl.md new file mode 100644 index 00000000000..4962d714679 --- /dev/null +++ b/v2.4/getting-started/kubernetes/tutorials/using-calicoctl.md @@ -0,0 +1,57 @@ +--- +title: Using calicoctl in Kubernetes +--- + +There are two ways to run `calicoctl` in Kubernetes: + +- As a standalone binary +- As a Kubernetes pod + +### a. Running calicoctl as a standalone binary + +You can install calicoctl by [downloading the appropriate release]({{site.baseurl}}/{{page.version}}/releases) to any +machine with access to your etcd cluster by setting `ETCD_ENDPOINTS`. For example: + +``` +ETCD_ENDPOINTS=http://etcd:2379 calicoctl get profile +``` + +>**Note for kubeadm deployments** +> +> Calico is not configured to use the etcd run by kubeadm on the Kubernetes master. +> Instead, it launches its own instance of etcd as a pod, available at +`http://10.96.232.136:6666`. +>Ensure you are connecting to the correct etcd or you will not see any of the expected data. + +### b. Running calicoctl as a Kubernetes Pod + +The `calico/ctl` docker image can be deployed as a pod and used to run calicoctl +commands. This pod will need to be configured for the Kubernetes environment it is in. + +>**Note** +> +>When calicoctl is run as a Pod, the `calicoctl node ...` suite of commands is not available. + + +For the **etcd backend** (e.g. kubeadm): + +``` +kubectl apply -f {{site.url}}/{{page.version}}/getting-started/kubernetes/installation/hosted/calicoctl.yaml +``` + +For **Kubernetes Datastore Backend**: + +``` +kubectl apply -f {{site.url}}/{{page.version}}/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calicoctl.yaml +``` + +You can then run `calicoctl` commands through the Pod using `kubectl`: + +``` +$ kubectl exec -ti -n kube-system calicoctl -- /calicoctl get profiles -o wide +NAME TAGS +k8s_ns.default k8s_ns.default +k8s_ns.kube-system k8s_ns.kube-system +``` + +See the [calicoctl reference guide]({{site.baseurl}}/{{page.version}}/reference/calicoctl) for more information. diff --git a/v2.4/getting-started/kubernetes/upgrade.md b/v2.4/getting-started/kubernetes/upgrade.md new file mode 100644 index 00000000000..9b7fc53374f --- /dev/null +++ b/v2.4/getting-started/kubernetes/upgrade.md @@ -0,0 +1,206 @@ +--- +title: Upgrading Calico for Kubernetes +--- + +This document covers upgrading the Calico components in a Kubernetes deployment. This +upgrade procedure is supported for Calico v1.6+. + +It is possible to upgrade the Calico components on a single node without affecting connectivity or +network policy for any existing pods. However, it is recommended that you do not deploy +new pods to a node that is being upgraded. + +It is recommended to upgrade one node at a time, rendering each node as +unscheduleable using [kubectl cordon](http://kubernetes.io/docs/user-guide/kubectl/v1.6/#cordon) +before upgrading the node, and then make the node scheduleable after the upgrade is +complete using [kubectl uncordon](http://kubernetes.io/docs/user-guide/kubectl/v1.6/#uncordon). + +> **NOTE** +> +> When upgrading to etcd v3, as long as the cluster is migrated with the +`etcdctl migrate` command, the v2 data will remain untouched and the etcd v3 +server will continue to speak the v2 protocol so the upgrade should have no +impact on Calico. + +> **NOTE** +> +> When upgrading Calico using the Kubernetes datastore driver from a version < v2.3.0 +> to a version >= v2.3.0, or when upgrading Calico using the etcd datastore from a version < v2.4.0 +> to a version >= v2.4.0, you should follow the steps for [upgrading to v1 NetworkPolicy semantics](#upgrading-to-v1-networkpolicy-semantics) + +## Upgrading a Hosted Installation of Calico + +This section covers upgrading a [self-hosted]({{site.baseurl}}/{{page.version}}/getting-started/kubernetes/installation/hosted) Calico installation. + +Note that while a self-hosted installation of Calico is typically done all at once (via calico.yaml), it is +recommended to perform upgrades one component at a time. + +#### Upgrading the Calico policy controller + +In a self-hosted Calico installation, the calico/kube-policy-controller is run under a Deployment. As such, +it can be upgraded via the standard [Deployment mechanism](http://kubernetes.io/docs/user-guide/deployments/#updating-a-deployment). + +To upgrade the policy controller, simply apply changes to the Deployment specification and Kubernetes will +do the rest. + +``` +kubectl apply -f policy-controller.yaml +``` + +> **NOTE** +> +> The Deployment must use `.spec.strategy.type==Recreate` to ensure that at most one instance of the +controller is running at a time. + +##### Upgrading from pre-2.0 + +_Some earlier versions of the calico/kube-policy-controller were deployed as a ReplicaSet rather than a Deployment. +To upgrade from the ReplicaSet to a Deployment, follow these steps:_ + +- _Scale the existing ReplicaSet to 0 replicas_ + +``` +kubectl scale rs -n kube-system calico-policy-controller --replicas=0 +``` + +- _Deploy the new policy controller as a Deployment_ + +``` +kubectl apply -f policy-controller.yaml +``` + +#### Upgrading the Calico DaemonSet + +Upgrading the CNI plugin or calico/node image is done through a DaemonSet. DaemonSets do not +currently support an update operation, and as such must be updated manually. + +To upgrade the DaemonSet: + +##### 1. Apply changes to the existing DaemonSet via kubectl apply. + +Modify the DaemonSet manifest and run: + +``` +kubectl apply -f calico-node.yaml +``` + +> Alternatively, you can use `kubectl edit` to modify the DaemonSet. + +##### 2. Upgrade each node. + +Perform the following steps on each node one at a time. + +First make the node unschedulable: + +``` +kubectl cordon node-01 +``` + +Delete the calico-node pod running on the cordoned node and wait for the +DaemonSet controller to deploy a replacement. + +``` +kubectl delete pod -n kube-system calico-node-ajzy6e3t +``` + +Once the new calico-node Pod has started, make the node schedulable again. + +``` +kubectl uncordon node-01 +``` + +> **NOTE** +> +> You may want to pre-fetch new Docker image to ensure the new node image is started +within BIRD's graceful restart period of 90 seconds. + +#### Updating the Calico ConfigMap + +Most self-hosted Calico deployments use a ConfigMap for configuration of the Calico +components. + +To update the ConfigMap, make any desired changes and apply the new ConfigMap using +kubectl. You will need to restart the policy controller and each calico/node instance +as described above before new config is reflected. + +## Upgrading Components Individually + +This section covers upgrading each component individually for use with custom configuration +management tools. + +#### Upgrading the calico/node container + +The calico/node container runs on each node in a Kubernetes cluster. It runs Felix for policy +enforcement and BIRD for BGP networking (when enabled). + +To upgrade the calico/node container: + +- Pull the new version of the calico/node image to each node. e.g `docker pull quay.io/calico/node:vA.B.C` +- Update the image in your process management to reference the new version. +- Stop the running calico/node container, and start it with the newly pulled version. + +#### Upgrading the Calico CNI plugins + +The Calico CNI plugins (calico and calico-ipam) are typically installed in /opt/cni/bin, though +this can vary based on deployment. + +To upgrade the plugins, simply remove the existing binaries and replace them with the desired version. + +To upgrade the CNI config (typically located in /etc/cni/net.d) simply make the desired changes to the +config file. It will be picked up by the kubelet automatically for Kubernetes v1.4.0+. For older versions +of Kubernetes you must restart the kubelet for changes to be applied. + +#### Upgrading the Calico Policy Controller + +The calico/kube-policy-controller can be stopped and restarted without affecting connectivity or +policy on existing pods. New pods in existing Namespaces will correctly have +existing policy applied even when the controller is not running. However, when the +policy controller is not running: + +- New NetworkPolicies will not be applied. +- New Pods in new Namespaces will not get network connectivity. +- Label changes to existing pods will not be reflected in the applied policy. + +> **NOTE** +> +> Only one instance of the controller should ever be active at a time. + +To upgrade the policy controller: + +- Pull the new version of the calico/kube-policy-controller image to each node. +- Update the image in your process management to reference the new version. +- Stop the running container, and start it with the newly pulled version. + +We recommend running the policy controller as a Kubernetes Deployment with type "recreate", in which +case upgrade can be handled entirely through the +standard [Deployment mechanism](http://kubernetes.io/docs/user-guide/deployments/#updating-a-deployment) + +## Upgrading to v1 NetworkPolicy semantics + +Calico v2.3.0 (when using the Kubernetes datastore driver) and Calico v2.4.0 (when using the etcd datastore driver) +interpret the Kubernetes `NetworkPolicy` differently than previous releases, as specified +in [upstream Kubernetes](https://github.com/kubernetes/kubernetes/pull/39164#issue-197243974). + +To maintain behavior when upgrading, you should follow these steps prior to upgrading Calico to ensure your configured policy is +enforced consistently throughout the upgrade process. + +- In any Namespace that previously did _not_ have a "DefaultDeny" annotation: + - Delete any NetworkPolicy objects in that Namespace. After upgrade, these policies will become active and may block traffic that was previously allowed. +- In any Namespace that previously had a "DefaultDeny" annotation: + - Create a NetworkPolicy which matches all pods but does not allow any traffic. After upgrade, the Namespace annotation will have no effect, but this empty NetworkPolicy will provide the same behavior. + +Here is an example of a NetworkPolicy which selects all pods in the Namespace, but does not allow any traffic: + +```yaml +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: default-deny +spec: + podSelector: +``` + +> **Note**: +> +> The above steps should be followed when upgrading to Calico v2.3.0+ using the Kubernetes +> datastore driver, and Calico v2.4.0+ using the etcd datastore, +> independent of the Kubernetes version being used. diff --git a/v2.4/getting-started/mesos/index.md b/v2.4/getting-started/mesos/index.md new file mode 100644 index 00000000000..0ab510e59a3 --- /dev/null +++ b/v2.4/getting-started/mesos/index.md @@ -0,0 +1,42 @@ +--- +title: Integration Guide +--- + +Calico introduces IP-per-container & fine-grained security policies to Mesos, while +maintaining speed and scalability and rendering port-forwarding obsolete. + +Use the navigation bar on the left to view information on Calico's Mesos +integration, or continue reading for an overview of recommended guides to get +started. + +## Installation + +#### [Requirements](installation/prerequisites) + +Information on running etcd and configuring Docker for multi-host networking. + +#### [Integration Guide](installation/integration) + +This method walks through the necessary manual steps to integrate Calico with your own deployment scripts and tools. Follow this guide if you’re integrating Calico with your own configuration management tools. + +#### [DC/OS Installation Guide](installation/dc-os) + +This guide shows how to launch Calico's Installation Framework from the DC/OS Universe. + +This install can be customized to lessen service impact +and improve reliability. See additional information on +[Customizing Calico's DC/OS Installation Framework](installation/dc-os/custom). + +## Quickstart with "Calico-Ready" Clusters + +#### [Vagrant/VirtualBox: CentOS](installation/vagrant-centos) + +Follow this guide to launch a local 2-node Mesos cluster on CentOS VMs with everything +you need to install and use Calico. + +## Tutorials + +- [Launching Tasks](tutorials/launching-tasks) +- [Connecting to Tasks](tutorials/connecting-tasks) +- [Configuring Policy for Docker Containerizer Tasks](tutorials/policy/docker-containerizer) +- [Configuring Policy for Universal Containerizer Tasks](tutorials/policy/universal-containerizer) diff --git a/v2.4/getting-started/mesos/installation/dc-os/custom.md b/v2.4/getting-started/mesos/installation/dc-os/custom.md new file mode 100644 index 00000000000..1a0350f280f --- /dev/null +++ b/v2.4/getting-started/mesos/installation/dc-os/custom.md @@ -0,0 +1,116 @@ +--- +title: Customizing the Calico Universe Framework +--- + +The the Calico Universe Framework includes customization options which support +more stable deployments when users + +#### Custom etcd + +By default, Calico will run etcd in proxy mode on every agent, forwarding requests +to `http://localhost:2379` to the running etcd cluster launched by Universee, +accessible via an SRV entry. + +The Calico Universe framework alternatively can be configured to directly connect +to an etcd instance launched outside of universe, removing +the need for etcd-proxy: + +1. Run an etcd cluster across your masters. Follow the + [official etcd clustering guide](https://coreos.com/etcd/docs/latest/clustering.html#static) + for information on how to run a HA etcd cluster. + + For demo purposes, we'll run one single instance of etcd on our first master + (available at http://m1.dcos:2379): + + ```shell + docker run -d --net=host --name=etcd quay.io/coreos/etcd:v3.1.10 \ + --advertise-client-urls "http://m1.dcos:2379" \ + --listen-client-urls "http://m1.dcos:2379,http://127.0.0.1:2379" \ + ``` + +2. Launch the Calico Universe Framework with the following configuration: + + ```json + { + "Etcd Settings": { + "run-proxy": false, + "etcd-endpoints": "http://m1.dcos:2379" + } + } + ``` + +#### Configure Docker with Cluster-Store + +The Docker engine must be restarted after + +Users who want to minimize impact on cluster availability during installation +can perform the docker cluster-store configuration manually. + +1. On each agent, create or modify `/etc/docker/daemon.json` with the following content: + + ```json + { + "cluster-store": "etcd://m1.dcos:2379" + } + ``` + +2. Restart docker: + + ``` + systemctl restart docker + ``` + + Ensure it has picked up the changes: + + ``` + docker info | grep -i "cluster store" + ``` + +3. When launching the Calico Universe Framework, disable the Docker Cluster-Store configuration step: + + ```json + { + "Configure Docker Cluster-Store": { + "enable": false + } + } + ``` + +#### Install the Calico CNI Plugins + +Installation of CNI plugins requires a restart of the Mesos-Agent process. +Users who want to minimize impact on cluster availability during installation +can install the Calico plugin manually by performing the following steps +on each agent: + +1. Download Calico's CNI plugin binaries: + + ```shell + curl -L -o /opt/mesosphere/active/cni/calico {{site.data.versions[page.version].first.components["calico/cni"].download_calico_url}} + curl -L -o /opt/mesosphere/active/cni/calico-ipam {{site.data.versions[page.version].first.components["calico/cni"].download_calico_ipam_url}} + ``` + +2. Create a standard Calico CNI network configuration: + + ```shell + cat < /opt/mesosphere/etc/dcos/network/cni/calico.conf + { + "name": "calico", + "cniVersion": "0.1.0", + "type": "calico", + "ipam": { + "type": "calico-ipam" + }, + "etcd_endpoints": "http://m1.dcos:2379" + } + ``` + +3. When launching the Calico Universe Framework, disable the CNI plugin installation step: + + ```json + { + "Install CNI": { + "enable": false + } + } + ``` diff --git a/v2.4/getting-started/mesos/installation/dc-os/framework.md b/v2.4/getting-started/mesos/installation/dc-os/framework.md new file mode 100644 index 00000000000..1c8d221a86a --- /dev/null +++ b/v2.4/getting-started/mesos/installation/dc-os/framework.md @@ -0,0 +1,61 @@ +--- +title: Calico DC/OS Installation Guide +--- + +The following guide walks through installing Calico for DC/OS using the Universe +package repostiory. + +#### Installing etcd + +To get started, first install etcd from Universe: + +![Installing etcd from Universe]({{site.baseurl}}/images/dcos-install-etcd.gif) + +#### Installing Calico + +Then install Calico from Universe. + +![Installing Calico from Universe]({{site.baseurl}}/images/dcos-install-calico.gif) + +It will take a few minutes for Calico to finish +installing on your cluster. You can check the status of the installation by +visiting Calico's web status interface: + + - Go to the **Services** tab + - Select "calico-install-framework" in the list of running services + (note that it may take a few minutes for Calico + to appear). + - Once the Calico service is `Healthy`, + Select the "calico-install-framework" task. + - Click the Endpoint URL to open the Calico status page in a new tab. + +![sample demonstrating how to locate the framework service page]({{site.baseurl}}/images/dcos-calico-status.gif) + +## Further Reading + +This concludes the installation of Calico for DC/OS! Before you start +launching IP-per-container applications with Calico policy, +review the following information which may apply to your deployment. + +#### AWS + +DC/OS users on Amazon Web Services should view +[Calico's AWS reference]({{site.baseurl}}/{{page.version}}/reference/public-cloud/aws) +for information on how to configure AWS networking for use with Calico. + +#### Note on Cluster Impact + +The Installation method detailed above will affect availability of all Agents +in the cluster in order to work around two limitations in DC/OS 1.8: + +1. [Mesos-Agents require a restart to detect newly added CNI networks](https://issues.apache.org/jira/browse/MESOS-6567). +2. [DC/OS does not configure Docker with a Cluster-Store](https://dcosjira.atlassian.net/browse/DCOS-155) +a [requirement for Multi-host docker networking](https://docs.docker.com/engine/userguide/networking/get-started-overlay/#/overlay-networking-with-an-external-key-value-store). + +Because of these two limitations, Calico-DC/OS will restart each agent process +and restart each docker daemon. Learn how to handle this installation steps manually +and prevent cluster availability impact by viewing the [Custom Install Guide](custom). + +#### Deploying Applications + +Once installed, see the [standard usage guides]({{site.baseurl}}/{{page.version}}/getting-started/mesos#tutorials) diff --git a/v2.4/getting-started/mesos/installation/dc-os/index.md b/v2.4/getting-started/mesos/installation/dc-os/index.md new file mode 100644 index 00000000000..1bb610cc38a --- /dev/null +++ b/v2.4/getting-started/mesos/installation/dc-os/index.md @@ -0,0 +1,122 @@ +--- +title: Overview of Calico for DC/OS +--- + +The following information details Calico's installation and runtime dependencies +in DC/OS, and looks at how to leverage Calico-DC/OS Framework to get up and running. + +## Overview + +Calico provides multi-host networking for DC/OS, giving each task its own IP +address and isolated networking namespace, with highly flexible policy configuration. + +Calico has the following prerequisites in DC/OS: + +- An available etcd store +- Docker configured with a cluster-store (if networking Docker Tasks) + +Since many default DC/OS clusters do not meet these basic requirements, Calico +maintains a simple Universe package for DC/OS that can get Calico +installed and running in one-click. The package performs the following +steps on every agent in the cluster: + +1. Run etcd (in proxy mode) +2. Configure docker with a cluster store +3. Install Calico CNI binaries and configs (for Unified Containerizer networking) +4. Run calico-libnetwork (for Docker Containerizer networking) +5. Run calico-node. + +The framework is flexible, allowing users to enable, disable, or customize each step. +Below, we'll see what each step does, and how it can be modified. + +The framework runs Calico (and its configuration) **within DC/OS.** +This means it registers as a Mesos Framework, and uses Mesos Resource offers +to run and configure the cluster with Calico. Alternative to this approach, +Calico can be manually installed directly onto Agents as a daemon service integrated +with the OS (using systemd) to ensure it is available when tasks are eventually +provisioned. + +### Note on rp_filter in DC/OS + +Containers with permission `CAP_NET_RAW` can spoof their IP address if the +`rp_filter` kernel setting is set to 'loose'. Typically, `rp_filter` is +configured to 'strict', preventing this behavior. +[DC/OS, however, arbitrarily sets `rp_filter` to 'loose' across all interfaces](https://dcosjira.atlassian.net/browse/DCOS-265), including the interfaces +Calico creates and uses. By default, [Felix notices this and refuses to launch](https://github.com/projectcalico/calicoctl/issues/1082#issue-168163079). In DC/OS, however, we configure Felix to ignore this by setting +[IgnoreLooseRPF](https://github.com/projectcalico/felix/blob/ab8799eaea66627e5db7717e62fca61fd9c08646/python/calico/felix/config.py#L198) to true. As a result, be cautious when granting containers `CAP_NET_RAW` since, if compromised, these +containers will be able to spoof their IP address, potentially allowing them to bypass firewall restrictions. + +Next, we'll dive into each task the Framework performs. + +### etcd + +Calico uses etcd as its central database. There are two popular ways to run +etcd in DC/OS: + +1. **Use the Universe etcd package** + + The Universe etcd package launches a Mesos Framework that uses Mesos resource + offers to spawn a multi-node etcd cluster. + The endpoint endpoint address can be resolved via a SRV lookup of + `_etcd-server._tcp.etcd.mesos`. + + Calico doesn't support connections to etcd via + SRV record, so the Calico-DC/OS Framework first runs its own instance + of etcd in proxy mode on every agent, which it relies on to forward requests + made to `localhost:2379` onwards to the actual etcd cluster. + +2. **Manually running etcd** + + Running the etcd cluster manually across all masters can be considered more + stable than the previous option, as the endpoint address is static. + Users launching etcd in this way can skip running etcd in proxy mode, and + can simply change `ETCD_ENDPOINTS` to point directly at their static + etcd cluster. + +### Docker Cluster Store + +Calico networks Docker Containerizer tasks at the Docker-engine layer. +To do multi-host networking in Docker, each docker engine must be configured +to use the same cluster-store. + +By default, the Calico-DC/OS Framework will parse the value set for `ETCD_ENDPOINTS`, +configure Docker to use it by adding it to `/etc/docker/daemon.json`, +and finally restart Docker. + +Users can set `override-docker-cluster-store` to manually choose a different +cluster store (e.g. the existing zookeeper on master), or, if they are only +planning to use Calico for Unified Containerizer networking, +can disable modification of the docker daemon altogether. + +### Calico CNI Installation + +To perform networking on Unified Containerizer tasks, Calico's CNI binaries and +configuration file must be installed on every agent, and the slave process must +be restarted to pick up the change. The Framework then performs the following steps: + +1. Download [`calico`]({{site.data.versions[page.version].first.components["calico/cni"].download_calico_url}}) to `/opt/mesosphere/active/cni/` +2. Download [`calico-ipam`]({{site.data.versions[page.version].first.components["calico/cni"].download_calico_ipam_url}}) to `/opt/mesosphere/active/cni/` +3. Create the following JSON file at `/opt/mesosphere/etc/dcos/network/cni/calico.cni`: + + ```json + { + "name": "calico", + "type": "calico", + "etcd_endpoints": "http://localhost:2379", + "ipam": { + "type": "calico-ipam" + } + } + ``` + >Note: If not running etcd in proxy mode, be sure to change `etcd_endpoints` + to your correct etcd endpoint address. + +4. Restart the slave process with `systemctl restart dcos-mesos-slave` + +### Run Calico Node + +This task ensures the Calico's core process `calico/node` is running. + +## Next Steps: Installing + +For installation instructions, see [The Calico DC/OS Install Guide]({{site.baseurl}}/{{page.version}}/getting-started/mesos/installation/dc-os/framework) diff --git a/v2.4/getting-started/mesos/installation/integration.md b/v2.4/getting-started/mesos/installation/integration.md new file mode 100644 index 00000000000..a1fe182d9c6 --- /dev/null +++ b/v2.4/getting-started/mesos/installation/integration.md @@ -0,0 +1,79 @@ +--- +title: Integration Guide +--- + +This guide explains how to integrate Calico networking and policy on an existing +Mesos cluster. These instructions should be followed on each **Agent**. + +Ensure you've met the [prerequisites](prerequisites) before continuing, namely that +you have etcd running. + +Calico runs as a Docker container on each host. The `calicoctl` command line tool can be used to launch the `calico/node` container. + +1. Download the calicoctl binary: + + ``` + sudo wget -O /usr/local/bin/calicoctl {{site.data.versions[page.version].first.components.calicoctl.download_url}} + sudo chmod +x /usr/local/bin/calicoctl + ``` + +3. Launch `calico/node`: + + ``` + sudo ETCD_ENDPOINTS=http://$ETCD_IP:$ETCD_PORT calicoctl node run --node-image=quay.io/calico/node:{{site.data.versions[page.version].first.title}} + ``` + + >Note: Ensure you've set or replaced `$ETCD_IP` and `$ETCD_PORT` to point to + [your etcd cluster](prerequisites). + + Check that `calico/node` is now running: + + ``` + vagrant@calico-01:~$ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 408bd2b9ba53 quay.io/calico/node:{{site.data.versions[page.version].first.title}} "start_runit" 3 seconds ago Up 2 seconds calico-node + ``` + + Furthermore, check that the `calico/node` container is functioning properly + with the following command: + + ``` + sudo calicoctl node status + ``` + +4. Download the Calico CNI plugin to the + [`$NETWORK_CNI_PLUGINS_DIR` you configured for Mesos](prerequisites). + You may skip this step if you do not plan on using the Unified Containerizer. + + ```shell + curl -L -o $NETWORK_CNI_PLUGINS_DIR/calico \ + {{site.data.versions[page.version].first.components["calico/cni"].download_calico_url}} + curl -L -o $NETWORK_CNI_PLUGINS_DIR/calico \ + {{site.data.versions[page.version].first.components["calico/cni"].download_calico_ipam_url}} + chmod +x $NETWORK_CNI_PLUGINS_DIR/calico + chmod +x $NETWORK_CNI_PLUGINS_DIR/calico-ipam + ``` + +5. Create a Calico CNI configuration in the [`$NETWORK_CNI_CONF_DIR` you configured for Mesos](prerequisites), replacing `http://master.mesos:2379` with + etcd's address: + + ```shell + cat > $NETWORK_CNI_CONF_DIR/calico.conf <Again, set or replace `$ETCD_IP` and `$ETCD_PORT` with the appropriate address of your +etcd cluster. + +Restart docker, then ensure it has picked up the changes: + +``` +$ docker info | grep -i "cluster store" +Cluster Store: etcd://10.0.0.1:2379 +``` + +#### 3. Docker Containerizer Enabled for Mesos Agents + +By default, Mesos only enables the "Mesos" Containerizer. Ensure +the Docker Containerizer is also enabled on each Agent. + +>Note: You may skip this step if you do not plan on using the Docker Containerizer. + +If you are using the default `mesos-init-wrapper` from the official Mesos package, +you can enable the Docker Containerizer with the following command: + +```shell +$ sh -c 'echo docker > /etc/mesos-slave/containerizers' +$ systemctl restart mesos-slave.service +``` + +#### 4. CNI Isolator Enabled for Mesos Agents + +If you are planning to use Calico with the Unified containerizer, +[enable the CNI Isolator on each agent](http://mesos.apache.org/documentation/latest/cni/#usage) + +>Note: You may skip this step if you do not plan on using the Unified Containerizer. + +When enabling CNI, you will have specified a `network_cni_config_dir` +and `network_cni_plugins_dir`. We'll refer to these going forward as +`$NETWORK_CNI_CONFIG_DIR` and `$NETWORK_CNI_PLUGINS_DIR`, respectively. + +## Next Steps + +Once you have met the prerequisites, view the [Integration Guide](integration) + +[slack]: https://slack.projectcalico.org diff --git a/v2.4/getting-started/mesos/installation/vagrant-centos/Vagrantfile b/v2.4/getting-started/mesos/installation/vagrant-centos/Vagrantfile new file mode 100644 index 00000000000..c6717a179c7 --- /dev/null +++ b/v2.4/getting-started/mesos/installation/vagrant-centos/Vagrantfile @@ -0,0 +1,220 @@ +--- +layout: null +--- +# -*- mode: ruby -*- +# vi: set ft=ruby : +# Size of the cluster created by Vagrant +num_instances = 2 + +# VM Basename +instance_name_prefix="calico" + +# Version of mesos to install from official mesos repo +mesos_version = "1.1.0" + +# Download URL for Mesos DNS. +mesos_dns_url = "https://github.com/mesosphere/mesos-dns/releases/download/v0.5.0/mesos-dns-v0.5.0-linux-amd64" + +# The calicoctl download URL. +calicoctl_url = "{{site.data.versions[page.version].first.components.calicoctl.download_url}}" + +# The version of the calico docker images to install. This is used to pre-load +# the calico/node image which slows down the install process, but speeds up the tutorial. +# +# This version should match the version required by calicoctl installed from +# calicoctl_url. +calico_node_ver = "{{site.data.versions[page.version].first.title}}" + +# Script to write out the Calico environment file. +$write_calico_env=< diff --git a/v2.4/introduction/index.html b/v2.4/introduction/index.html new file mode 100644 index 00000000000..82e5c34f428 --- /dev/null +++ b/v2.4/introduction/index.html @@ -0,0 +1,12 @@ +--- +title: Project Calico Documentation +description: Home +layout: docwithnav +--- +

Calico is a new approach to virtual networking and network security for containers, VMs, and bare metal services, that provides a rich set of security enforcement capabilities running on top of a highly scalable and efficient virtual network fabric. Calico includes pre-integration with Kubernetes and Mesos (as a CNI network plugin), Docker (as a libnetwork plugin) and OpenStack (as a Neutron plugin). Calico supports a broad range of deployment options including on premise or public cloud (AWS, GCE, etc) providing the same rich set of features across all.

+ +

Calico's network policy enforcement ensures that the only packets that flow to/from a workload are the ones the developer or operater expects. This is achieved by mapping high-level developer/operator intent to fully distributed ACLs running on every host. You can think of Calico as providing every workload with a fully automated virtual firewall to protect the workload and to protect the rest of your application should that workload become compromised. Calico automatically maps any network policy concepts from the orchestration environment into Calico network policy. (For example, Calico was selected as the reference implementation of network policy for Kubernetes.) Calico network policy can also be specified using the Calico command line tools and APIs, either in place of or augmenting the policy concepts provided by the orchestration system.

+ +

Calico's highly scalable network fabric is built using the same principles as the internet - the most highly scaled network in existence. In contrast to most virtual networking solutions, Calico provides a flat IP network that can typically be run without any encapsulation (no overlays). The ability to run without an overlay provides exceptional throughput characteristics, and for large scale service operators makes diagnosing network connectivity issues a breeze. In addition Calico's network fabric also includes the ability to route packets using a stateless IP-in-IP overlay for any scenarios where an overlay network might be preferred.

+ +

Calico's network policy enforcement can also be combined with other virtual networking solutions. For example, the Canal project adds Calico policy enforcement to Flannel virtual networking. This exciting project brings rich network policy to Flannel users, and brings more network connectivity options to Calico users.

diff --git a/v2.4/reference/advanced/etcd-rbac.md b/v2.4/reference/advanced/etcd-rbac.md new file mode 100644 index 00000000000..ad96626678e --- /dev/null +++ b/v2.4/reference/advanced/etcd-rbac.md @@ -0,0 +1,27 @@ +--- +title: Configuring a Calico Role for etcdv2 RBAC +--- + +Calico writes all of its data in a `/calico/` directory of etcd. +To function properly with [etcdv2's RBAC](https://coreos.com/etcd/docs/latest/authentication.html), +it will need the following permissions: + +- R/W access to `/calico` +- R/W access to `/calico/*` + +The following example will create a role called `calico-role` with the necessary +permissions: + +``` +$ etcdctl role add calico-role +$ etcdctl role grant calico-role -path '/calico' -readwrite +$ etcdctl role grant calico-role -path '/calico/*' -readwrite +``` + +### Configuring calicoctl to use authenticated etcd access + +To configure Calico to use the newly created role, each component will +individually need to be supplied with the role name and password. See the relevant +component configuration guide: + +- [calicoctl]({{site.baseurl}}/{{page.version}}/reference/calicoctl/setup/etcdv2) diff --git a/v2.4/reference/architecture/components.md b/v2.4/reference/architecture/components.md new file mode 100644 index 00000000000..e528dcb9f7c --- /dev/null +++ b/v2.4/reference/architecture/components.md @@ -0,0 +1,69 @@ +--- +title: Anatomy of a calico-node container +--- + +`calico/node` can be regarded as a helper container that bundles together the +various components required for networking containers with Calico. The key +components are: + +- Felix +- BIRD +- confd + +In addition, we use runit for logging (`svlogd`) and init (`runsv`) services. + +The [calicoctl repostiory](https://github.com/projectcalico/calicoctl) contains the Dockerfile for `calico/node` along with various +configuration files that are used to configure and "glue" these components +together. + +> Note that the `calico/node` may be run in *policy only mode* in which Felix +> runs, but both BIRD and confd are removed. This provides policy management +> without route distribution between hosts. This mode can be enabled by +> setting the environment variable `CALICO_NETWORKING=false` before starting +> the node with `calicoctl node run`. + +#### Calico Felix agent + +The Felix daemon is the heart of Calico networking. Felix's primary job is to +program routes and ACL's on a workload host to provide desired connectivity to +and from workloads on the host. + +Felix also programs interface information to the kernel for outgoing endpoint +traffic. Felix instructs the host to respond to ARPs for workloads with the +MAC address of the host. + +For more details about Felix, please refer to the core [calico project](https://github.com/projectcalico/felix). + +#### BIRD/BIRD6 internet routing daemon + +BIRD is an open source BGP client that is used to exchange routing information +between hosts. The routes that Felix programs into the kernel for endpoints +are picked up by BIRD and distributed to BGP peers on the network, which +provides inter-host routing. + +There are two BIRD processes running in the `calico-node` container. One for +IPv4 (bird) and one for IPv6 (bird6). + +For more information on BIRD, please refer to the [BIRD internet routing daemon project](http://bird.network.cz/). + +Calico uses a fork of the main BIRD repo, to include an additional feature +required for IPIP support when running Calico in a cloud environment. Refer +to the [calico-bird repo](https://github.com/projectcalico/calico-bird) for more details. + +#### confd templating engine + +The confd templating engine monitors the etcd datastore for any changes to BGP +configuration (and some top level global default configuration such as AS +Number, logging levels, and IPAM information). + +Confd dynamically generates BIRD configuration files based on the data in etcd, +triggered automatically from updates to the data. When the configuration file +changes, confd triggers BIRD to load the new files. + +For more information on confd, please refer to the [confd project](https://github.com/kelseyhightower/confd). + +Calico uses a fork of the main confd repo which includes an additional change +to improve performance with the handling of watch prefixes +[calico-bird repo](https://github.com/projectcalico/calico-bird) for more details. + + diff --git a/v2.4/reference/architecture/data-path.md b/v2.4/reference/architecture/data-path.md new file mode 100644 index 00000000000..70b001d7516 --- /dev/null +++ b/v2.4/reference/architecture/data-path.md @@ -0,0 +1,72 @@ +--- +title: 'The Calico Data Path: IP Routing and iptables' +--- + + +One of Calico’s key features is how packets flow between workloads in a +data center, or between a workload and the Internet, without additional +encapsulation. + +In the Calico approach, IP packets to or from a workload are routed and +firewalled by the Linux routing table and iptables infrastructure on the +workload’s host. For a workload that is sending packets, Calico ensures +that the host is always returned as the next hop MAC address regardless +of whatever routing the workload itself might configure. For packets +addressed to a workload, the last IP hop is that from the destination +workload’s host to the workload itself. + +![Calico datapath]({{site.baseurl}}/images/calico-datapath.png) + +Suppose that IPv4 addresses for the workloads are allocated from a +datacenter-private subnet of 10.65/16, and that the hosts have IP +addresses from 172.18.203/24. If you look at the routing table on a host +you will see something like this: + +``` +ubuntu@calico-ci02:~$ route -n +Kernel IP routing table +Destination Gateway Genmask Flags Metric Ref Use Iface +0.0.0.0 172.18.203.1 0.0.0.0 UG 0 0 0 eth0 +10.65.0.0 0.0.0.0 255.255.0.0 U 0 0 0 ns-db03ab89-b4 +10.65.0.21 172.18.203.126 255.255.255.255 UGH 0 0 0 eth0 +10.65.0.22 172.18.203.129 255.255.255.255 UGH 0 0 0 eth0 +10.65.0.23 172.18.203.129 255.255.255.255 UGH 0 0 0 eth0 +10.65.0.24 0.0.0.0 255.255.255.255 UH 0 0 0 tapa429fb36-04 +172.18.203.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0 +``` + +There is one workload on this host with IP address 10.65.0.24, and +accessible from the host via a TAP (or veth, etc.) interface named +tapa429fb36-04. Hence there is a direct route for 10.65.0.24, through +tapa429fb36-04. Other workloads, with the .21, .22 and .23 addresses, +are hosted on two other hosts (172.18.203.126 and .129), so the routes +for those workload addresses are via those hosts. + +The direct routes are set up by a Calico agent named Felix when it is +asked to provision connectivity for a particular workload. A BGP client +(such as BIRD) then notices those and distributes them – perhaps via a +route reflector – to BGP clients running on other hosts, and hence the +indirect routes appear also. + +## Bookended security + +The routing above in principle allows any workload in a data center to +communicate with any other – but in general, an operator will want to +restrict that; for example, so as to isolate customer A’s workloads from +those of customer B. Therefore Calico also programs iptables on each +host, to specify the IP addresses (and optionally ports etc.) that each +workload is allowed to send to or receive from. This programming is +‘bookended’ in that the traffic between workloads X and Y will be +firewalled by both X’s host and Y’s host – this helps to keep unwanted +traffic off the data center’s core network, and as a secondary defense +in case it is possible for a rogue workload to compromise its local +host. + +## Is that all? + +As far as the static data path is concerned, yes. It’s just a +combination of responding to workload ARP requests with the host MAC, IP +routing and iptables. There’s a great deal more to Calico in terms of +how the required routing and security information is managed, and for +handling dynamic things such as workload migration – but the basic data +path really is that simple. diff --git a/v2.4/reference/architecture/index.md b/v2.4/reference/architecture/index.md new file mode 100644 index 00000000000..37ff65632a0 --- /dev/null +++ b/v2.4/reference/architecture/index.md @@ -0,0 +1,212 @@ +--- +title: Calico Architecture +--- + +This document discusses the various pieces of the Calico's architecture, +with a focus on what specific role each component plays in +the Calico network. + + + +# Components + +Calico is made up of the following interdependent components: + +- [Felix](#felix), the primary Calico agent that runs on each + machine that hosts endpoints. +- The [Orchestrator plugin](#orchestrator-plugin), + orchestrator-specific code that tightly integrates Calico into + that orchestrator. +- [etcd](#etcd), the data store. +- [BIRD](#bgp-client-bird), a BGP client that + distributes routing information. +- [BGP Route Reflector (BIRD)](#bgp-route-reflector-bird), an optional BGP + route reflector for higher scale. + +The following sections break down each component in more detail. + + +## Felix + +Felix is a daemon that runs on every machine that provides endpoints: in +most cases that means on nodes that host containers or VMs. It is +responsible for programming routes and ACLs, and anything else required +on the host, in order to provide the desired connectivity for the +endpoints on that host. + +Depending on the specific orchestrator environment, Felix is responsible +for the following tasks: + +#### Interface Management + +Felix programs some information about interfaces into the kernel in +order to get the kernel to correctly handle the traffic emitted by that +endpoint. In particular, it will ensure that the host responds to ARP +requests from each workload with the MAC of the host, and will enable IP +forwarding for interfaces that it manages. + +It also monitors for interfaces to appear and disappear so that it can +ensure that the programming for those interfaces is applied at the +appropriate time. + +#### Route Programming + +Felix is responsible for programming routes to the endpoints on its host +into the Linux kernel FIB (Forwarding Information Base) . This ensures that packets destined for those +endpoints that arrive on at the host are forwarded accordingly. + +#### ACL Programming + +Felix is also responsible for programming ACLs into the Linux kernel. +These ACLs are used to ensure that only valid traffic can be sent +between endpoints, and ensure that endpoints are not capable of +circumventing Calico's security measures. + +#### State Reporting + +Felix is responsible for providing data about the health of the network. +In particular, it reports errors and problems with configuring its host. +This data is written into etcd, to make it visible to other components +and operators of the network. + + +## Orchestrator Plugin + +Unlike Felix there is no single 'orchestrator plugin': instead, there +are separate plugins for each major cloud orchestration platform (e.g. +OpenStack, Kubernetes). The purpose of these plugins is to bind Calico +more tightly into the orchestrator, allowing users to manage the Calico +network just as they'd manage network tools that were built into the +orchestrator. + +A good example of an orchestrator plugin is the Calico Neutron ML2 +mechanism driver. This component integrates with Neutron's ML2 plugin, +and allows users to configure the Calico network by making Neutron API +calls. This provides seamless integration with Neutron. + +The orchestrator plugin is responsible for the following tasks: + +#### API Translation + +The orchestrator will inevitably have its own set of APIs for managing +networks. The orchestrator plugin's primary job is to translate those +APIs into Calico's data-model and then store it in Calico's datastore. + +Some of this translation will be very simple, other bits may be more +complex in order to render a single complex operation (e.g. live +migration) into the series of simpler operations the rest of the Calico +network expects. + +#### Feedback + +If necessary, the orchestrator plugin will provide feedback from the +Calico network into the orchestrator. Examples include: providing +information about Felix liveness; marking certain endpoints as failed if +network setup failed. + + + +## etcd + +etcd is a distributed key-value store that has a focus on consistency. +Calico uses etcd to provide the communication between components and as +a consistent data store, which ensures Calico can always build an +accurate network. + +Depending on the orchestrator plugin, etcd may either be the master data +store or a lightweight mirror of a separate data store. For example, in +an OpenStack deployment, the OpenStack database is considered the +"source of truth" and etcd is used to mirror information about the +network to the other Calico components. + +The etcd component is distributed across the entire deployment. It is +divided into two groups of machines: the core cluster, and the proxies. + +For small deployments, the core cluster can be an etcd cluster of one +node (which would typically be co-located with the +[orchestrator plugin](#orchestrator-plugin) component). This deployment model is simple but provides no redundancy for etcd -- in the case of etcd failure the +[orchstrator plugin](#orchestrator-plugin) would have to rebuild the database which, as noted for OpenStack, will simply require that the plugin resynchronizes +state to etcd from the OpenStack database. + +In larger deployments, the core cluster can be scaled up, as per the +[etcd admin guide](https://coreos.com/etcd/docs/latest/admin_guide.html#optimal-cluster-size). + +Additionally, on each machine that hosts either a [Felix](#felix) +or a [plugin](#orchestrator-plugin), we run an etcd proxy. This reduces the load +on the core cluster and shields nodes from the specifics of the etcd +cluster. In the case where the etcd cluster has a member on the same +machine as a [plugin](#orchestrator-plugin), we can forgo the proxy on that +machine. + +etcd is responsible for performing the following tasks: + +#### Data Storage + +etcd stores the data for the Calico network in a distributed, +consistent, fault-tolerant manner (for cluster sizes of at least three +etcd nodes). This set of properties ensures that the Calico network is +always in a known-good state, while allowing for some number of the +machines hosting etcd to fail or become unreachable. + +This distributed storage of Calico data also improves the ability of the +Calico components to read from the database (which is their most common +operation), as they can distribute their reads around the cluster. + +#### Communication + +etcd is also used as a communication bus between components. We do this +by having the non-etcd components watch certain points in the keyspace +to ensure that they see any changes that have been made, allowing them +to respond to those changes in a timely manner. This allows the act of +committing the state to the database to cause that state to be programmed +into the network. + + + +## BGP Client (BIRD) + +Calico deploys a BGP client on every node that also hosts a [Felix](#felix). The role of the BGP client is to read routing state that [Felix](#felix) programs into the kernel and +distribute it around the data center. + +In Calico, this BGP component is most commonly +[BIRD](http://bird.network.cz/), though any BGP client, such as [GoBGP](https://github.com/osrg/gobgp) that can draw +routes from the kernel and distribute them is suitable in this role. + +The BGP client is responsible for performing the following task: + +#### Route Distribution + +When [Felix](#felix) inserts routes into the Linux kernel FIB, +the BGP client will pick them up and distribute them to the other nodes +in the deployment. This ensures that traffic is efficiently routed +around the deployment. + + +## BGP Route Reflector (BIRD) + +For larger deployments, simple BGP can become a limiting factor because +it requires every BGP client to be connected to every other BGP client +in a mesh topology. This requires an increasing number of connections +that rapidly become tricky to maintain, due to the N^2 nature of the +increase. + +For that reason, in larger deployments, Calico will deploy a BGP route +reflector. This component, commonly used in the Internet, acts as a +central point to which the BGP clients connect, preventing them from +needing to talk to every single BGP client in the cluster. + +For redundancy, multiple BGP route reflectors can be deployed +seamlessly. The route reflectors are purely involved in the control of +the network: no endpoint data passes through them. + +In Calico, this BGP component is also most commonly +[BIRD](http://bird.network.cz/), configured as a route reflector rather +than as a standard BGP client. + +The BGP route reflector is responsible for the following task: + +#### Centralized Route Distribution + +When the [Calico BGP client](#bgp-client-bird) advertises routes +from its FIB to the route reflector, the route reflector advertises +those routes out to the other nodes in the deployment. diff --git a/v2.4/reference/calicoctl/commands/apply.md b/v2.4/reference/calicoctl/commands/apply.md new file mode 100644 index 00000000000..5d1bacc6df6 --- /dev/null +++ b/v2.4/reference/calicoctl/commands/apply.md @@ -0,0 +1,100 @@ +--- +title: calicoctl apply +--- + +This sections describes the `calicoctl apply` command. + +Read the [calicoctl command line interface user reference]({{site.baseurl}}/{{page.version}}/reference/calicoctl/) +for a full list of calicoctl commands. + +## Displaying the help text for 'calicoctl apply' command + +Run `calicoctl apply --help` to display the following help menu for the +command. + +``` +Usage: + calicoctl apply --filename= [--config=] + +Examples: + # Apply a policy using the data in policy.yaml. + calicoctl apply -f ./policy.yaml + + # Apply a policy based on the JSON passed into stdin. + cat policy.json | calicoctl apply -f - + +Options: + -h --help Show this screen. + -f --filename= Filename to use to apply the resource. If set to + "-" loads from stdin. + -c --config= Path to the file containing connection + configuration in YAML or JSON format. + [default: /etc/calico/calicoctl.cfg] + +Description: + The apply command is used to create or replace a set of resources by filename + or stdin. JSON and YAML formats are accepted. + + Valid resource types are: + + * node + * bgpPeer + * hostEndpoint + * workloadEndpoint + * ipPool + * policy + * profile + + When applying a resource: + - if the resource does not already exist (as determined by it's primary + identifiers) then it is created + - if the resource already exists then the specification for that resource is + replaced in it's entirety by the new resource specification. + + The output of the command indicates how many resources were successfully + applied, and the error reason if an error occurred. + + The resources are applied in the order they are specified. In the event of a + failure applying a specific resource it is possible to work out which + resource failed based on the number of resources successfully applied + + When applying a resource to perform an update, the complete resource spec + must be provided, it is not sufficient to supply only the fields that are + being updated. +``` + +### Examples + +``` +# Apply a set of resources (of mixed type) using the data in resources.yaml. +# Results indicate that 8 resources were successfully applied +$ calicoctl apply -f ./resources.yaml +Successfully applied 8 resource(s) + +# Apply two policy resources based on the JSON passed into stdin. +$ cat policy.json | calicoctl apply -f - +Successfully applied 2 'policy' resource(s) +``` + +### Options + +``` +-f --filename= Filename to use to apply the resource. If set to + "-" loads from stdin. +``` + +### General options + +``` +-c --config= Path to the file containing connection + configuration in YAML or JSON format. + [default: /etc/calico/calicoctl.cfg] +``` + +## See also + +- [Resources]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/) for details on all valid resources, including file format + and schema +- [Policy]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/policy) for details on the Calico selector-based policy model +- [calicoctl configuration]({{site.baseurl}}/{{page.version}}/reference/calicoctl/setup) for details on configuring `calicoctl` to access + the Calico datastore. diff --git a/v2.4/reference/calicoctl/commands/config.md b/v2.4/reference/calicoctl/commands/config.md new file mode 100644 index 00000000000..7ac9fe9a206 --- /dev/null +++ b/v2.4/reference/calicoctl/commands/config.md @@ -0,0 +1,126 @@ +--- +title: calicoctl config +--- + +This sections describes the `calicoctl config` commands. + +The `calicoctl config` command allows users to view or modify +low-level component configurations for Felix and BGP. + +Read the [calicoctl Overview]({{site.baseurl}}/{{page.version}}/reference/calicoctl/) for a full list of calicoctl commands. + +## Displaying the help text for 'calicoctl config' commands + +Run `calicoctl config --help` to display the following help menu for the +commands. + +``` +Usage: + calicoctl config set [--node=] + [--raw=(bgp|felix)] + [--config=] + calicoctl config unset [--node=] + [--raw=(bgp|felix)] + [--config=] + calicoctl config get [--node=] + [--raw=(bgp|felix)] + [--config=] + +Examples: + # Turn off the full BGP node-to-node mesh + calicoctl config set nodeToNodeMesh off + + # Set global log level to warning + calicoctl config set logLevel warning + + # Set log level to info for node "node1" + calicoctl config set logLevel info --node=node1 + + # Display the current setting for the nodeToNodeMesh + calicoctl config get nodeToNodeMesh + +Options: + -n --node= The node name. + --raw=(bgp|felix) Apply raw configuration for the specified component. + This option should be used with care; the data is not + validated and it is possible to configure or remove + data that may prevent the component from working as + expected. + -c --config= Path to the file containing connection configuration in + YAML or JSON format. + [default: /etc/calico/calicoctl.cfg] + +Description: + +These commands can be used to manage global system-wide configuration and some +node-specific low level configuration. + +The --node option is used to specify the node name for low-level configuration +that is specific to a particular node. + +For configuration that has both global values and node-specific values, the +--node parameter is optional: including the parameter will manage the +node-specific value, excluding it will manage the global value. For these +options, if the node-specific value is unset, the global value will be used on +the node. + +For configuration that is only global, the --node option should not be +included. Unsetting the global value will return it to it's original default. + +For configuration that is node-specific only, the --node option should be +included. Unsetting the node value will remove the configuration, and for +supported configuration will then inherit the value from the global settings. + +The table below details the valid config options. + + Name | Scope | Value | +-----------------+-------------+----------------------------------------+ + logLevel | global,node | none,debug,info,warning,error,critical | + nodeToNodeMesh | global | on,off | + asNumber | global | 0-4294967295 | + ipip | global | on,off | +``` + +### Examples + +``` +# Turn off the full BGP node-to-node mesh +$calicoctl config set nodeToNodeMesh off + +# Set global log level to warning +$calicoctl config set logLevel warning + +# Set log level to info for node "node1" +$calicoctl config set logLevel info --node=node1 + +# Display the current setting for the nodeToNodeMesh +$calicoctl config get nodeToNodeMesh +off +``` + +### Options + +``` +-n --node= The node name. + --raw=(bgp|felix) Apply raw configuration for the specified component. + This option should be used with care; the data is not + validated and it is possible to configure or remove + data that may prevent the component from working as + expected. +``` + +### General options + +``` +-c --config= Path to the file containing connection + configuration in YAML or JSON format. + [default: /etc/calico/calicoctl.cfg] +``` + +## See also + +- [Resources]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/) for details on all valid resources, including file format + and schema +- [Policy]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/policy) for details on the Calico selector-based policy model +- [calicoctl configuration]({{site.baseurl}}/{{page.version}}/reference/calicoctl/setup) for details on configuring `calicoctl` to access + the Calico datastore. diff --git a/v2.4/reference/calicoctl/commands/create.md b/v2.4/reference/calicoctl/commands/create.md new file mode 100644 index 00000000000..5848049d0e7 --- /dev/null +++ b/v2.4/reference/calicoctl/commands/create.md @@ -0,0 +1,101 @@ +--- +title: calicoctl create +--- + +This sections describes the `calicoctl create` command. + +Read the [calicoctl command line interface user reference]({{site.baseurl}}/{{page.version}}/reference/calicoctl/) +for a full list of calicoctl commands. + +## Displaying the help text for 'calicoctl create' command + +Run `calicoctl create --help` to display the following help menu for the +command. + +``` +Usage: + calicoctl create --filename= [--skip-exists] [--config=] + +Examples: + # Create a policy using the data in policy.yaml. + calicoctl create -f ./policy.yaml + + # Create a policy based on the JSON passed into stdin. + cat policy.json | calicoctl create -f - + +Options: + -h --help Show this screen. + -f --filename= Filename to use to create the resource. If set to + "-" loads from stdin. + --skip-exists Skip over and treat as successful any attempts to + create an entry that already exists. + -c --config= Path to the file containing connection + configuration in YAML or JSON format. + [default: /etc/calico/calicoctl.cfg] + +Description: + The create command is used to create a set of resources by filename or stdin. + JSON and YAML formats are accepted. + + Valid resource types are: + + * node + * bgpPeer + * hostEndpoint + * workloadEndpoint + * ipPool + * policy + * profile + + Attempting to create a resource that already exists is treated as a + terminating error unless the --skip-exists flag is set. If this flag is set, + resources that already exist are skipped. + + The output of the command indicates how many resources were successfully + created, and the error reason if an error occurred. If the --skip-exists + flag is set then skipped resources are included in the success count. + + The resources are created in the order they are specified. In the event of a + failure creating a specific resource it is possible to work out which + resource failed based on the number of resources successfully created. +``` + +### Examples + +``` +# Create a set of resources (of mixed type) using the data in resources.yaml. +# Results indicate that 8 resources were successfully created. +$ calicoctl create -f ./resources.yaml +Successfully created 8 resource(s) + +# Create the same set of resources reading from stdin. +# Results indicate failure because the first resource (in this case a Profile) +# already exists. +$ cat resources.yaml | calicoctl apply -f - +Failed to create any resources: resource already exists: Profile(name=profile1) +``` + +### Options + +``` +-f --filename= Filename to use to create the resource. If set to + "-" loads from stdin. + --skip-exists Skip over and treat as successful any attempts to + create an entry that already exists. +``` + +### General options + +``` +-c --config= Path to the file containing connection + configuration in YAML or JSON format. + [default: /etc/calico/calicoctl.cfg] +``` + +## See also + +- [Resources]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/) for details on all valid resources, including file format + and schema +- [Policy]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/policy) for details on the Calico selector-based policy model +- [calicoctl configuration]({{site.baseurl}}/{{page.version}}/reference/calicoctl/setup) for details on configuring `calicoctl` to access + the Calico datastore. diff --git a/v2.4/reference/calicoctl/commands/delete.md b/v2.4/reference/calicoctl/commands/delete.md new file mode 100644 index 00000000000..ef70e5bad80 --- /dev/null +++ b/v2.4/reference/calicoctl/commands/delete.md @@ -0,0 +1,130 @@ +--- +title: calicoctl delete +--- + +This sections describes the `calicoctl delete` command. + +Read the [calicoctl command line interface user reference]({{site.baseurl}}/{{page.version}}/reference/calicoctl/) +for a full list of calicoctl commands. + +## Displaying the help text for 'calicoctl delete' command + +Run `calicoctl delete --help` to display the following help menu for the +command. + +``` +Usage: + calicoctl delete ([--scope=] [--node=] [--orchestrator=] + [--workload=] ( []) | + --filename=) + [--skip-not-exists] [--config=] + +Examples: + # Delete a policy using the type and name specified in policy.yaml. + calicoctl delete -f ./policy.yaml + + # Delete a policy based on the type and name in the YAML passed into stdin. + cat policy.yaml | calicoctl delete -f - + + # Delete policy with name "foo" + calicoctl delete policy foo + +Options: + -h --help Show this screen. + -s --skip-not-exists Skip over and treat as successful, resources that + don't exist. + -f --filename= Filename to use to delete the resource. If set to + "-" loads from stdin. + -n --node= The node (this may be the hostname of the compute + server if your installation does not explicitly set + the names of each Calico node). + --orchestrator= The orchestrator (valid for workload endpoints). + --workload= The workload (valid for workload endpoints). + --scope= The scope of the resource type. One of global, + node. This is only valid for BGP peers and is used + to indicate whether the peer is a global peer or + node-specific. + -c --config= Path to the file containing connection + configuration in YAML or JSON format. + [default: /etc/calico/calicoctl.cfg] + +Description: + The delete command is used to delete a set of resources by filename or stdin, + or by type and identifiers. JSON and YAML formats are accepted for file and + stdin format. + + Valid resource types are: + + * node + * bgpPeer + * hostEndpoint + * workloadEndpoint + * ipPool + * policy + * profile + + The resource type is case insensitive and may be pluralized. + + Attempting to delete a resource that does not exists is treated as a + terminating error unless the --skip-not-exists flag is set. If this flag is + set, resources that do not exist are skipped. + + When deleting resources by type, only a single type may be specified at a + time. The name is required along with any and other identifiers required to + uniquely identify a resource of the specified type. + + The output of the command indicates how many resources were successfully + deleted, and the error reason if an error occurred. If the --skip-not-exists + flag is set then skipped resources are included in the success count. + + The resources are deleted in the order they are specified. In the event of a + failure deleting a specific resource it is possible to work out which + resource failed based on the number of resources successfully deleted. +``` + +### Examples + +``` +# Delete a set of resources (of mixed type) using the data in resources.yaml. +# Results indicate that 8 resources were successfully deleted. +$ calicoctl delete -f ./resources.yaml +Successfully deleted 8 resource(s) + +# Delete a policy resource by name. The policy is called "policy1". +$ bin/calicoctl delete policy policy1 +Successfully deleted 1 'policy' resource(s) +``` + +### Options + +``` +-s --skip-not-exists Skip over and treat as successful, resources that + don't exist. +-f --filename= Filename to use to delete the resource. If set to + "-" loads from stdin. +-n --node= The node (this may be the hostname of the compute + server if your installation does not explicitly set + the names of each Calico node). + --orchestrator= The orchestrator (valid for workload endpoints). + --workload= The workload (valid for workload endpoints). + --scope= The scope of the resource type. One of global, + node. This is only valid for BGP peers and is used + to indicate whether the peer is a global peer or + node-specific. +``` + +### General options + +``` +-c --config= Path to the file containing connection + configuration in YAML or JSON format. + [default: /etc/calico/calicoctl.cfg] +``` + +## See also + +- [Resources]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/) for details on all valid resources, including file format + and schema +- [Policy]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/policy) for details on the Calico selector-based policy model +- [calicoctl configuration]({{site.baseurl}}/{{page.version}}/reference/calicoctl/setup) for details on configuring `calicoctl` to access + the Calico datastore. diff --git a/v2.4/reference/calicoctl/commands/get.md b/v2.4/reference/calicoctl/commands/get.md new file mode 100644 index 00000000000..d67b2a5d528 --- /dev/null +++ b/v2.4/reference/calicoctl/commands/get.md @@ -0,0 +1,231 @@ +--- +title: calicoctl get +--- + +This sections describes the `calicoctl get` command. + +Read the [calicoctl command line interface user reference]({{site.baseurl}}/{{page.version}}/reference/calicoctl/) +for a full list of calicoctl commands. + +## Displaying the help text for 'calicoctl get' command + +Run `calicoctl get --help` to display the following help menu for the +command. + +``` +Usage: + calicoctl get ([--scope=] [--node=] [--orchestrator=] + [--workload=] ( []) | + --filename=) + [--output=] [--config=] + +Examples: + # List all policy in default output format. + calicoctl get policy + + # List a specific policy in YAML format + calicoctl get -o yaml policy my-policy-1 + +Options: + -h --help Show this screen. + -f --filename= Filename to use to get the resource. If set to + "-" loads from stdin. + -o --output= Output format. One of: yaml, json, ps, wide, + custom-columns=..., go-template=..., + go-template-file=... [Default: ps] + -n --node= The node (this may be the hostname of the + compute server if your installation does not + explicitly set the names of each Calico node). + --orchestrator= The orchestrator (valid for workload endpoints). + --workload= The workload (valid for workload endpoints). + --scope= The scope of the resource type. One of global, + node. This is only valid for BGP peers and is + used to indicate whether the peer is a global + peer or node-specific. + -c --config= Path to the file containing connection + configuration in YAML or JSON format. + [default: /etc/calico/calicoctl.cfg] + +Description: + The get command is used to display a set of resources by filename or stdin, + or by type and identifiers. JSON and YAML formats are accepted for file and + stdin format. + + Valid resource types are: + + * node + * bgpPeer + * hostEndpoint + * workloadEndpoint + * ipPool + * policy + * profile + + The resource type is case insensitive and may be pluralized. + + Attempting to get resources that do not exist will simply return no results. + + When getting resources by type, only a single type may be specified at a + time. The name and other identifiers (hostname, scope) are optional, and are + wildcarded when omitted. Thus if you specify no identifiers at all (other + than type), then all configured resources of the requested type will be + returned. + + By default the results are output in a ps-style table output. There are + alternative ways to display the data using the --output option: + + ps Display the results in ps-style output. + wide As per the ps option, but includes more headings. + custom-columns As per the ps option, but only display the columns + that are requested in the comma-separated list. + golang-template Display the results using the specified golang + template. This can be used to filter results, for + example to return a specific value. + golang-template-file Display the results using the golang template that is + contained in the specified file. + yaml Display the results in YAML output format. + json Display the results in JSON output format. + + Note that the data output using YAML or JSON format is always valid to use as + input to all of the resource management commands (create, apply, replace, + delete, get). + + Please refer to the docs at http://docs.projectcalico.org for more details on + the output formats, including example outputs, resource structure (required + for the golang template definitions) and the valid column names (required for + the custom-columns option). +``` + +### Options +``` +-h --help Show this screen. +-f --filename= Filename to use to get the resource. If set to + "-" loads from stdin. +-o --output= Output format. One of: yaml, json, ps, wide, + custom-columns=..., go-template=..., + go-template-file=... [Default: ps] +-n --node= The node (this may be the hostname of the + compute server if your installation does not + explicitly set the names of each Calico node). + --orchestrator= The orchestrator (valid for workload endpoints). + --workload= The workload (valid for workload endpoints). + --scope= The scope of the resource type. One of global, + node. This is only valid for BGP peers and is + used to indicate whether the peer is a global + peer or node-specific. +``` + +### General options + +``` +-c --config= Path to the file containing connection + configuration in YAML or JSON format. + [default: /etc/calico/calicoctl.cfg] +``` + +### Output options + +#### `ps` + +This is the default output format. It displays output in ps-style table output with sufficient columns to +uniquely identify the resource. + +The headings displayed for each resource type is fixed. However, wee `wide` option for displaying additional +columns, and `custom-columns` for selecting which columns to display. + +Example +``` +$ calicoctl get hostEndpoint +HOSTNAME NAME +host1 endpoint1 +myhost eth0 +``` + +#### `wide` + +Similar to the `ps` format, the `wide` option displays output in ps-style table output but with additional columns. + +The headings displayed for each resource type is fixed. See `custom-columns` for selecting which columns to display. + +Example +``` +$ calicoctl get hostEndpoint --output=wide +HOSTNAME NAME INTERFACE IPS PROFILES +host1 endpoint1 1.2.3.4,0:bb::aa prof1,prof2 +myhost eth0 profile1 +``` + +#### `custom-columns` + +Similar to the `ps` format, the `custom-columns` option displays output in ps-style table output but allows the user +to specify and ordered, comma-separated list of columns to display in the output. The valid heading names for each +resource type is documented in the [Resources]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/) guide. + +Example +``` +$ calicoctl get hostEndpoint --output=custom-columns=NAME,IPS +NAME IPS +endpoint1 1.2.3.4,0:bb::aa +eth0 +``` + +#### `yaml / json` + +The `yaml` and `json` options display the output as a list of YAML documents or JSON dictionaries. The fields for +resource type are documented in the [Resources]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/) guide, or alternatively view the structure +definitions (implemented in golang) in the [libcalic API](https://github.com/projectcalico/libcalico-go/tree/master/lib/api). + +The output from either of these formats may be used as input for all of the resource management commands. + +Example +``` +$ calicoctl get hostEndpoint --output=yaml +- apiVersion: v1 + kind: hostEndpoint + metadata: + hostname: host1 + labels: + type: database + name: endpoint1 + spec: + expectedIPs: + - 1.2.3.4 + - 0:bb::aa + profiles: + - prof1 + - prof2 +- apiVersion: v1 + kind: hostEndpoint + metadata: + hostname: myhost + name: eth0 + spec: + profiles: + - profile1 +``` + +#### `go-template / go-template-file` + +The `go-template` and `go-template-file` options display the output using a golang template specified as a string +on the CLI, or defined in a separate file. +When writing a template, be aware that the data passed to the template is a golang slice of resource-lists. The +resource-lists are defined in the [libcalico API]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/) and there is a resource-list defined for +each resource type. A resource-list contains an Items field which is itself a slice of resources. Thus, to output +the "Name" field from the supplied data, it is necessary to enumerate over the slice of resource-lists and the items +within that list. + +Example +{% raw %} +``` +$ bin/calicoctl get hostEndpoint --output=go-template="{{range .}}{{range .Items}}{{.Metadata.Name}},{{end}}{{end}}" +endpoint1,eth0, +``` +{% endraw %} + +## See also + +- [Resources]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/) for details on all valid resources, including file format + and schema +- [Policy]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/policy) for details on the Calico selector-based policy model +- [calicoctl configuration]({{site.baseurl}}/{{page.version}}/reference/calicoctl/setup) for details on configuring `calicoctl` to access + the Calico datastore. diff --git a/v2.4/reference/calicoctl/commands/index.md b/v2.4/reference/calicoctl/commands/index.md new file mode 100644 index 00000000000..bdd0b1ef281 --- /dev/null +++ b/v2.4/reference/calicoctl/commands/index.md @@ -0,0 +1,59 @@ +--- +title: Command Reference +--- + +The command line tool, `calicoctl`, makes it easy to manage Calico network +and security policy. + +This section is a command line reference for calicoctl, organised based on +the command hierarchy. + +## Top level help + +Run `calicoctl --help` to display the following help menu for the top level +calicoctl commands. + +``` +Usage: + calicoctl [options] [...] + + create Create a resource by filename or stdin. + replace Replace a resource by filename or stdin. + apply Apply a resource by filename or stdin. This creates a resource + if it does not exist, and replaces a resource if it does exists. + delete Delete a resource identified by file, stdin or resource type and + name. + get Get a resource identified by file, stdin or resource type and + name. + config Manage system-wide and low-level node configuration options. + ipam IP address management. + node Calico node management. + version Display the version of calicoctl. + +Options: + -h --help Show this screen. + -l --log-level= Set the log level (one of panic, fatal, error, + warn, info, debug) [default: panic] + +Description: + The calicoctl command line tool is used to manage Calico network and security + policy, to view and manage endpoint configuration, and to manage a Calico + node instance. + + See 'calicoctl --help' to read about a specific subcommand. +``` + +## Top level command line options + +Details on the `calicoctl` commands are described in the documents linked below +organized by top level command. + +- [calicoctl create]({{site.baseurl}}/{{page.version}}/reference/calicoctl/commands/create) +- [calicoctl replace]({{site.baseurl}}/{{page.version}}/reference/calicoctl/commands/replace) +- [calicoctl apply]({{site.baseurl}}/{{page.version}}/reference/calicoctl/commands/apply) +- [calicoctl delete]({{site.baseurl}}/{{page.version}}/reference/calicoctl/commands/delete) +- [calicoctl get]({{site.baseurl}}/{{page.version}}/reference/calicoctl/commands/get) +- [calicoctl config]({{site.baseurl}}/{{page.version}}/reference/calicoctl/commands/config) +- [calicoctl ipam]({{site.baseurl}}/{{page.version}}/reference/calicoctl/commands/ipam) +- [calicoctl node]({{site.baseurl}}/{{page.version}}/reference/calicoctl/commands/node) +- [calicoctl version]({{site.baseurl}}/{{page.version}}/reference/calicoctl/commands/version) diff --git a/v2.4/reference/calicoctl/commands/ipam/index.md b/v2.4/reference/calicoctl/commands/ipam/index.md new file mode 100644 index 00000000000..f5782ad0297 --- /dev/null +++ b/v2.4/reference/calicoctl/commands/ipam/index.md @@ -0,0 +1,36 @@ +--- +title: calicoctl ipam +--- + +This section describes the `calicoctl ipam` commands. + +Read the [calicoctl Overview]({{site.baseurl}}/{{page.version}}/reference/calicoctl/) for a full list of calicoctl commands. + +## Displaying the help text for 'calicoctl ipam' commands + +Run `calicoctl ipam --help` to display the following help menu for the +commands. + +``` +Usage: + calicoctl ipam [...] + + release Release a Calico assigned IP address. + show Show details of a Calico assigned IP address. + +Options: + -h --help Show this screen. + +Description: + IP Address Management specific commands for calicoctl. + + See 'calicoctl ipam --help' to read about a specific subcommand. +``` + +## IPAM specific commands + +Details on the `calicoctl ipam` commands are described in the documents linked below +organized by sub command. + +- [calicoctl ipam release]({{site.baseurl}}/{{page.version}}/reference/calicoctl/commands/ipam/release) +- [calicoctl ipam show]({{site.baseurl}}/{{page.version}}/reference/calicoctl/commands/ipam/show) diff --git a/v2.4/reference/calicoctl/commands/ipam/release.md b/v2.4/reference/calicoctl/commands/ipam/release.md new file mode 100644 index 00000000000..c4992be3e02 --- /dev/null +++ b/v2.4/reference/calicoctl/commands/ipam/release.md @@ -0,0 +1,52 @@ +--- +title: calicoctl ipam +--- + +This section describes the `calicoctl ipam release` command. + +Read the [calicoctl Overview]({{site.baseurl}}/{{page.version}}/reference/calicoctl/) for a full list of calicoctl commands. + +## Displaying the help text for 'calicoctl ipam release' command + +Run `calicoctl ipam release --help` to display the following help menu for the +command. + +``` +Usage: + calicoctl ipam release --ip= [--config=] + +Options: + -h --help Show this screen. + --ip= IP address to release. + -c --config= Path to the file containing connection + configuration in YAML or JSON format. + [default: /etc/calico/calicoctl.cfg] + +Description: + The ipam release command releases an IP address from the Calico IP Address + Manager that was been previously assigned to an endpoint. When an IP address + is released, it becomes available for assignment to any endpoint. + + Note that this does not remove the IP from any existing endpoints that may be + using it, so only use this command to clean up addresses from endpoints that + were not cleanly removed from Calico. +``` + +### Examples + +``` +$ calicoctl ipam release --ip=192.168.1.2 +``` + +### General options + +``` +-c --config= Path to the file containing connection + configuration in YAML or JSON format. + [default: /etc/calico/calicoctl.cfg] +``` + +## See also + +- [calicoctl configuration]({{site.baseurl}}/{{page.version}}/reference/calicoctl/setup) for details on configuring `calicoctl` to access + the Calico datastore. diff --git a/v2.4/reference/calicoctl/commands/ipam/show.md b/v2.4/reference/calicoctl/commands/ipam/show.md new file mode 100644 index 00000000000..3840989e5f0 --- /dev/null +++ b/v2.4/reference/calicoctl/commands/ipam/show.md @@ -0,0 +1,60 @@ +--- +title: calicoctl ipam +--- + +This section describes the `calicoctl ipam show` command. + +Read the [calicoctl Overview]({{site.baseurl}}/{{page.version}}/reference/calicoctl/) for a full list of calicoctl commands. + +## Displaying the help text for 'calicoctl ipam show' command + +Run `calicoctl ipam show --help` to display the following help menu for the +command. + +``` +Usage: + calicoctl ipam show --ip= [--config=] + +Options: + -h --help Show this screen. + --ip= IP address to show. +-c --config= Path to the file containing connection + configuration in YAML or JSON format. + [default: /etc/calico/calicoctl.cfg] + +Description: + The ipam show command prints information about a given IP address, such as + special attributes defined for the IP or whether the IP has been reserved by + a user of the Calico IP Address Manager. +``` + +### Examples + +``` +# IP is not assigned to an endpoint +$ calicoctl ipam show --ip=192.168.1.2 +IP 192.168.1.2 is not currently assigned + +# Basic Docker container has the assigned IP +$ calicoctl ipam show --ip=192.168.1.1 +No attributes defined for 192.168.1.1 +``` + +### Options + +``` + --ip= IP address to show. +``` + +### General options + +``` +-c --config= Path to the file containing connection + configuration in YAML or JSON format. + [default: /etc/calico/calicoctl.cfg] +``` + +## See also + +- [calicoctl configuration]({{site.baseurl}}/{{page.version}}/reference/calicoctl/setup) for details on configuring `calicoctl` to access + the Calico datastore. diff --git a/v2.4/reference/calicoctl/commands/node/checksystem.md b/v2.4/reference/calicoctl/commands/node/checksystem.md new file mode 100644 index 00000000000..7f0b78f493f --- /dev/null +++ b/v2.4/reference/calicoctl/commands/node/checksystem.md @@ -0,0 +1,32 @@ +--- +title: calicoctl node checksystem +--- + +This section describes the `calicoctl node checksystem` command. + +Read the [calicoctl Overview]({{site.baseurl}}/{{page.version}}/reference/calicoctl) +for a full list of calicoctl commands. + +## Displaying the help text for 'calicoctl checksystem' command + +Run `calicoctl node checksystem --help` to display the following help menu for the +command. + +``` +Usage: + calicoctl node checksystem + +Options: + -h --help Show this screen. + +Description: + Check the compatibility of this compute host to run a Calico node instance. +``` + +### Examples: + +``` +$ calicoctl checksystem +WARNING: Unable to detect the xt_set module. Load with `modprobe xt_set` +WARNING: Unable to detect the ipip module. Load with `modprobe ipip` +``` diff --git a/v2.4/reference/calicoctl/commands/node/diags.md b/v2.4/reference/calicoctl/commands/node/diags.md new file mode 100644 index 00000000000..f28e656f523 --- /dev/null +++ b/v2.4/reference/calicoctl/commands/node/diags.md @@ -0,0 +1,69 @@ +--- +title: calicoctl node diags +--- + +This section describes the `calicoctl node diags` command. + +Read the [calicoctl Overview]({{site.baseurl}}/{{page.version}}/reference/calicoctl) +for a full list of calicoctl commands. + +## Displaying the help text for 'calicoctl node diags' command + +Run `calicoctl node diags --help` to display the following help menu for the +command. + +``` +Usage: + calicoctl node diags [--log-dir=] + +Options: + -h --help Show this screen. + --log-dir= The directory containing Calico logs + [default: /var/log/calico] + +Description: + This command is used to gather diagnostic information from a Calico node. + This is usually used when trying to diagnose an issue that may be related to + your Calico network. + + The output of the command explains how to automatically upload the + diagnostics to http://transfer.sh for easy sharing of the data. Note that the + uploaded files will be deleted after 14 days. + + This command must be run on the specific Calico node that you are gathering + diagnostics for. +``` + +### Examples + +``` +$ sudo calicoctl node diags +Collecting diagnostics +Using temp dir: /tmp/calico676127473 +Dumping netstat +Dumping routes (IPv4) +Dumping routes (IPv6) +Dumping interface info (IPv4) +Dumping interface info (IPv6) +Dumping iptables (IPv4) +Dumping iptables (IPv6) +Dumping ipsets +exit status 1 +Dumping ipsets (container) +Copying journal for calico-node.service +Dumping felix stats +Copying Calico logs + +Diags saved to /tmp/calico676127473/diags-20170522_151219.tar.gz +If required, you can upload the diagnostics bundle to a file sharing service +such as transfer.sh using curl or similar. For example: + + curl --upload-file /tmp/calico676127473/diags-20170522_151219.tar.gz https://transfer.sh//tmp/calico676127473/diags-20170522_151219.tar.gz +``` + +### Options + +``` + --log-dir= The directory containing Calico logs. + [default: /var/log/calico] +``` diff --git a/v2.4/reference/calicoctl/commands/node/index.md b/v2.4/reference/calicoctl/commands/node/index.md new file mode 100644 index 00000000000..3540aeedfd5 --- /dev/null +++ b/v2.4/reference/calicoctl/commands/node/index.md @@ -0,0 +1,43 @@ +--- +title: calicoctl node +--- + +This section describes the `calicoctl node` commands. + +Read the [calicoctl Overview]({{site.baseurl}}/{{page.version}}/reference/calicoctl/) +for a full list of calicoctl commands. + +Note that if you run `calicoctl` in a container, `calicoctl node ...` commands will +not work (they need access to parts of the host filesystem). + +## Displaying the help text for 'calicoctl node' commands + +Run `calicoctl node --help` to display the following help menu for the +commands. + +``` +Usage: + calicoctl node [...] + + status View the current status of a Calico node. + diags Gather a diagnostics bundle for a Calico node. + checksystem Verify the compute host is able to run a Calico node instance. + +Options: + -h --help Show this screen. + +Description: + Node specific commands for calicoctl. These commands must be run directly on + the compute host running the Calico node instance. + + See 'calicoctl node --help' to read about a specific subcommand. +``` + +## Node specific commands + +Details on the `calicoctl node` commands are described in the documents linked below +organized by sub command. + +- [calicoctl node status]({{site.baseurl}}/{{page.version}}/reference/calicoctl/commands/node/status) +- [calicoctl node diags]({{site.baseurl}}/{{page.version}}/reference/calicoctl/commands/node/diags) +- [calicoctl node checksystem]({{site.baseurl}}/{{page.version}}/reference/calicoctl/commands/node/checksystem) diff --git a/v2.4/reference/calicoctl/commands/node/run.md b/v2.4/reference/calicoctl/commands/node/run.md new file mode 100644 index 00000000000..64e191649d1 --- /dev/null +++ b/v2.4/reference/calicoctl/commands/node/run.md @@ -0,0 +1,358 @@ +--- +title: calicoctl node run +--- + +This sections describes the `calicoctl node run` command. + +Read the [calicoctl Overview]({{site.baseurl}}/{{page.version}}/reference/calicoctl) +for a full list of calicoctl commands. + +## Displaying the help text for 'calicoctl node run' command + +Run `calicoctl node run --help` to display the following help menu for the +command. + +``` +Usage: + calicoctl node run [--ip=] [--ip6=] [--as=] + [--name=] + [--ip-autodetection-method=] + [--ip6-autodetection-method=] + [--log-dir=] + [--node-image=] + [--backend=(bird|gobgp|none)] + [--config=] + [--no-default-ippools] + [--dryrun] + [--init-system] + [--disable-docker-networking] + [--docker-networking-ifprefix=] + [--use-docker-networking-container-labels] + +Options: + -h --help Show this screen. + --name= The name of the Calico node. If this is not + supplied it defaults to the host name. + --as= Set the AS number for this node. If omitted, it + will use the value configured on the node resource. + If there is no configured value and --as option is + omitted, the node will inherit the global AS number + (see 'calicoctl config' for details). + --ip= Set the local IPv4 routing address for this node. + If omitted, it will use the value configured on the + node resource. If there is no configured value + and the --ip option is omitted, the node will + attempt to autodetect an IP address to use. Use a + value of 'autodetect' to always force autodetection + of the IP each time the node starts. + --ip6= Set the local IPv6 routing address for this node. + If omitted, it will use the value configured on the + node resource. If there is no configured value + and the --ip6 option is omitted, the node will not + route IPv6. Use a value of 'autodetect' to force + autodetection of the IP each time the node starts. + --ip-autodetection-method= + Specify the autodetection method for detecting the + local IPv4 routing address for this node. The valid + options are: + > first-found + Use the first valid IP address on the first + enumerated interface (common known exceptions are + filtered out, e.g. the docker bridge). It is not + recommended to use this if you have multiple + external interfaces on your host. + > can-reach= + Use the interface determined by your host routing + tables that will be used to reach the supplied + destination IP or domain name. + > interface= + Use the first valid IP address found on interfaces + named as per the first matching supplied interface + name regex. Regexes are separated by commas + (e.g. eth.*,enp0s.*). + > skip-interface= + Use the first valid IP address on the first + enumerated interface (same logic as first-found + above) that does NOT match with any of the + specified interface name regexes. Regexes are + separated by commas (e.g. eth.*,enp0s.*). + [default: first-found] + --ip6-autodetection-method= + Specify the autodetection method for detecting the + local IPv6 routing address for this node. See + ip-autodetection-method flag for valid options. + [default: first-found] + --log-dir= The directory containing Calico logs. + [default: /var/log/calico] + --node-image= + Docker image to use for Calico's per-node container. + [default: quay.io/calico/node:latest] + --backend=(bird|gobgp|none) + Specify which networking backend to use. When set + to "none", Calico node runs in policy only mode. + The option to run with gobgp is currently + experimental. + [default: bird] + --dryrun Output the appropriate command, without starting the + container. + --init-system Run the appropriate command to use with an init + system. + --no-default-ippools Do not create default pools upon startup. + Default IP pools will be created if this is not set + and there are no pre-existing Calico IP pools. + --disable-docker-networking + Disable Docker networking. + --docker-networking-ifprefix= + Interface prefix to use for the network interface + within the Docker containers that have been networked + by the Calico driver. + [default: cali] + --use-docker-networking-container-labels + Extract the Calico-namespaced Docker container labels + (org.projectcalico.label.*) and apply them to the + container endpoints for use with Calico policy. + This option is only valid when using Calico Docker + networking, and when enabled traffic must be + explicitly allowed by configuring Calico policies. + -c --config= Path to the file containing connection + configuration in YAML or JSON format. + [default: /etc/calico/calicoctl.cfg] + +Description: + This command is used to start a calico/node container instance which provides + Calico networking and network policy on your compute host. +``` + +### Kubernetes as the datastore + +When Calico is configured to use the Kubernetes API as the datastore, BGP routing is *currently* +not supported. Many of the command line options related to BGP routing will +have no effect. These include: +- `--ip`, `--ip6`, `--ip-autodetection-method`, `--ip6-autodetection-method` +- `--as` +- `--backend` + +### Examples + +``` +# Start the Calico node with a pre-configured IPv4 address for BGP. +$ sudo calicoctl node run +Running command to load modules: modprobe -a xt_set ip6_tables +Enabling IPv4 forwarding +Enabling IPv6 forwarding +Increasing conntrack limit +Running the following command: + +docker run --net=host --privileged --name=calico-node -d --restart=always -e ETCD_SCHEME=http -e HOSTNAME=calico -e CALICO_LIBNETWORK_ENABLED=true -e ETCD_AUTHORITY=127.0.0.1:2379 -e AS= -e NO_DEFAULT_POOLS= -e ETCD_ENDPOINTS= -e IP= -e IP6= -e CALICO_NETWORKING_BACKEND=bird -v /var/run/docker.sock:/var/run/docker.sock -v /var/run/calico:/var/run/calico -v /lib/modules:/lib/modules -v /var/log/calico:/var/log/calico -v /run/docker/plugins:/run/docker/plugins quay.io/calico/node:{{site.data.versions[page.version].first.title}} + +Waiting for etcd connection... +Using configured IPv4 address: 192.0.2.0 +No IPv6 address configured +Using global AS number +WARNING: Could not confirm that the provided IPv4 address is assigned to this host. +Calico node name: calico +CALICO_LIBNETWORK_ENABLED is true - start libnetwork service +Calico node started successfully +``` + +#### IP Autodetection method examples + +The node resource includes IPv4 and IPv6 routing IP addresses that should +match those on one of the host interfaces. These IP addresses may be +configured in advance by configuring the node resource prior to starting the +calico/node service, alternatively, the addresses may either be explicitly +specified or autodetected through options on the `calicoctl run` command. + +There are different autodetection methods available and you should use the one +best suited to your deployment. If you are able to explicitly specify the IP +addresses, that is always preferred over autodetection. This section describes +the available methods for autodetecting the hosts IP addresses. + +An IPv4 address is always required, and so if no address was previously +configured in the node resource, and no address was specified on the CLI, then +we will attempt to autodetect an IPv4 address. An IPv6 address, however, will +only be autodetected when explicitly requested. + +To force autodetection of an IPv4 address, use the option `--ip=autodetect`. To +force autodetection of an IPv6 address, use the option `--ip6=autodetect`. + +To set the autodetection method for IPv4, use the `--ip-autodetection-method` option. +To set the autodetection method for IPv6, use the `--ip6-autodetection-method` option. + +> **Note** +> If you are starting the calico/node container directly (and not using the +> `calicoctl run` helper command), the options are passed in an environment +> variables. These are described in the [calico/node configuration guide]({{site.baseurl}}/{{page.version}}/reference/node/configuration)). + +**first-found** + +The `first-found` option enumerates all interface IP addresses and returns the +first valid IP address (based on IP version and type of address) on +the first valid interface. Certain known "local" interfaces +are omitted, such as the docker bridge. The order that both the interfaces +and the IP addresses are listed is system dependent. + +This is the default detection method. However, since this method only makes a +very simplified guess, it is recommended to either configure the node with a +specific IP address, or to use one of the other detection methods. + +e.g. + +``` +# First-found auto detection method explicitly specified +sudo calicoctl node run --ip autodetect --ip-autodetection-method first-found +``` + +**can-reach=DESTINATION** + +The `can-reach` method uses your local routing to determine which IP address +will be used to reach the supplied destination. Both IP addresses and domain +names may be used. + +e.g. + +``` +# IP detection using a can-reach IP address +sudo calicoctl node run --ip autodetect --ip-autodetection-method can-reach=8.8.8.8 + +# IP detection using a can-reach domain name +sudo calicoctl node run --ip autodetect --ip-autodetection-method can-reach=www.google.com +``` + +**interface=INTERFACE-REGEX,INTERFACE-REGEX,...** + +The `interface` method uses the supplied interface regular expressions (golang +syntax) to enumerate matching interfaces and to return the first IP address on +the first interface that matches any of the interface regexes provided. The +order that both the interfaces and the IP addresses are listed is system +dependent. + +e.g. + +``` +# IP detection on interface eth0 +sudo calicoctl node run --ip autodetect --ip-autodetection-method interface=eth0 + +# IP detection on interfaces eth0, eth1, eth2 etc. +sudo calicoctl node run --ip autodetect --ip-autodetection-method interface=eth.* + +# IP detection on interfaces eth0, eth1, eth2 etc. and wlp2s0 +sudo calicoctl node run --ip-autodetect --ip-autodetection-method interface=eth.*,wlp2s0 +``` + +**skip-interface=INTERFACE-REGEX,INTERFACE-REGEX,...** + +The `skip-interface` method uses the supplied interface regular expressions (golang +syntax) to enumerate all interface IP addresses and returns the first valid IP address +(based on IP version and type of address) that does not match the listed regular +expressions. Like the `first-found` option, it also skips by default certain known +"local" interfaces such as the docker bridge. The order that both the interfaces +and the IP addresses are listed is system dependent. + +This method has the ability to take in multiple regular expressions separated by `,`. +Specifying only one regular expression for interfaces to skip will also work and a +terminating `,` character does not need to be specified for those cases. + +### Options + +``` + --name= The name of the Calico node. If this is not + supplied it defaults to the host name. + --as= Set the AS number for this node. If omitted, it + will use the value configured on the node resource. + If there is no configured value and --as option is + omitted, the node will inherit the global AS number + (see 'calicoctl config' for details). + --ip= Set the local IPv4 routing address for this node. + If omitted, it will use the value configured on the + node resource. If there is no configured value + and the --ip option is omitted, the node will + attempt to autodetect an IP address to use. Use a + value of 'autodetect' to always force autodetection + of the IP each time the node starts. + --ip6= Set the local IPv6 routing address for this node. + If omitted, it will use the value configured on the + node resource. If there is no configured value + and the --ip6 option is omitted, the node will not + route IPv6. Use a value of 'autodetect' to force + autodetection of the IP each time the node starts. + --ip-autodetection-method= + Specify the autodetection method for detecting the + local IPv4 routing address for this node. The valid + options are: + > first-found + Use the first valid IP address on the first + enumerated interface (common known exceptions are + filtered out, e.g. the docker bridge). It is not + recommended to use this if you have multiple + external interfaces on your host. + > can-reach= + Use the interface determined by your host routing + tables that will be used to reach the supplied + destination IP or domain name. + > interface= + Use the first valid IP address found on interfaces + named as per the first matching supplied interface + name regex. Regexes are separated by commas + (e.g. eth.*,enp0s.*). + > skip-interface= + Use the first valid IP address on the first + enumerated interface (same logic as first-found + above) that does NOT match with any of the + specified interface name regexes. Regexes are + separated by commas (e.g. eth.*,enp0s.*). + [default: first-found] + --ip6-autodetection-method= + Specify the autodetection method for detecting the + local IPv6 routing address for this node. See + ip-autodetection-method flag for valid options. + [default: first-found] + --log-dir= The directory containing Calico logs. + [default: /var/log/calico] + --node-image= + Docker image to use for Calico's per-node container. + [default: quay.io/calico/node:latest] + --backend=(bird|gobgp|none) + Specify which networking backend to use. When set + to "none", Calico node runs in policy only mode. + The option to run with gobgp is currently + experimental. + [default: bird] + --dryrun Output the appropriate command, without starting the + container. + --init-system Run the appropriate command to use with an init + system. + --no-default-ippools Do not create default pools upon startup. + Default IP pools will be created if this is not set + and there are no pre-existing Calico IP pools. + --disable-docker-networking + Disable Docker networking. + --docker-networking-ifprefix= + Interface prefix to use for the network interface + within the Docker containers that have been networked + by the Calico driver. + [default: cali] + --use-docker-networking-container-labels + Extract the Calico-namespaced Docker container labels + (org.projectcalico.label.*) and apply them to the + container endpoints for use with Calico policy. + This option is only valid when using Calico Docker + networking, and when enabled traffic must be + explicitly allowed by configuring Calico policies. +``` + +### General options + +``` +-c --config= Path to the file containing connection + configuration in YAML or JSON format. + [default: /etc/calico/calicoctl.cfg] +``` + +## See also + +- [Resources]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/) for details on all valid resources, including file format + and schema +- [Policy]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/policy) for details on the Calico selector-based policy model +- [calicoctl configuration]({{site.baseurl}}/{{page.version}}/reference/calicoctl/setup) for details on configuring `calicoctl` to access + the Calico datastore. diff --git a/v2.4/reference/calicoctl/commands/node/status.md b/v2.4/reference/calicoctl/commands/node/status.md new file mode 100644 index 00000000000..f82e9fe862c --- /dev/null +++ b/v2.4/reference/calicoctl/commands/node/status.md @@ -0,0 +1,42 @@ +--- +title: calicoctl node status +--- + +This sections describes the `calicoctl node status` command. + +Read the [calicoctl Overview]({{site.baseurl}}/{{page.version}}/reference/calicoctl) +for a full list of calicoctl commands. + +## Displaying the help text for 'calicoctl node status' command + +Run `calicoctl node status --help` to display the following help menu for the +command. + +``` +Usage: + calicoctl node status + +Options: + -h --help Show this screen. + +Description: + Check the status of the Calico node instance. This incudes the status and + uptime of the node instance, and BGP peering states. +``` + +### Examples + +``` +$ sudo calicoctl node status +Calico process is running. + +IPv4 BGP status ++--------------+-------------------+-------+----------+-------------+ +| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO | ++--------------+-------------------+-------+----------+-------------+ +| 172.17.8.102 | node-to-node mesh | up | 23:30:04 | Established | ++--------------+-------------------+-------+----------+-------------+ + +IPv6 BGP status +No IPv6 peers found. +``` diff --git a/v2.4/reference/calicoctl/commands/replace.md b/v2.4/reference/calicoctl/commands/replace.md new file mode 100644 index 00000000000..31593a13493 --- /dev/null +++ b/v2.4/reference/calicoctl/commands/replace.md @@ -0,0 +1,97 @@ +--- +title: calicoctl replace +--- + +This sections describes the `calicoctl replace` command. + +Read the [calicoctl command line interface user reference]({{site.baseurl}}/{{page.version}}/reference/calicoctl/) +for a full list of calicoctl commands. + +## Displaying the help text for 'calicoctl replace' command + +Run `calicoctl replace --help` to display the following help menu for the +command. + +``` +Usage: + calicoctl replace --filename= [--config=] + +Examples: + # Replace a policy using the data in policy.yaml. + calicoctl replace -f ./policy.yaml + + # Replace a policy based on the JSON passed into stdin. + cat policy.json | calicoctl replace -f - + +Options: + -h --help Show this screen. + -f --filename= Filename to use to replace the resource. If set + to "-" loads from stdin. + -c --config= Path to the file containing connection + configuration in YAML or JSON format. + [default: /etc/calico/calicoctl.cfg] + +Description: + The replace command is used to replace a set of resources by filename or + stdin. JSON and YAML formats are accepted. + + Valid resource types are: + + * node + * bgpPeer + * hostEndpoint + * workloadEndpoint + * ipPool + * policy + * profile + + Attempting to replace a resource that does not exist is treated as a + terminating error. + + The output of the command indicates how many resources were successfully + eplaced, and the error reason if an error occurred. + + The resources are replaced in the order they are specified. In the event of + a failure replacing a specific resource it is possible to work out which + resource failed based on the number of resources successfully replaced. + + When replacing a resource, the complete resource spec must be provided, it is + not sufficient to supply only the fields that are being updated. +``` + +### Examples + +``` +# Replace a set of resources (of mixed type) using the data in resources.yaml. +# Results indicate that 8 resources were successfully replaced. +$ calicoctl replace -f ./resources.yaml +Successfully replaced 8 resource(s) + +# Replace a policy based on the JSON passed into stdin. +# Results indicate the policy does not exist. +$ cat policy.json | calicoctl replace -f - +Failed to replace any 'policy' resources: resource does not exist: Policy(name=dbPolicy) +``` + +### Options + +``` +-f --filename= Filename to use to replace the resource. If set + to "-" loads from stdin. +``` + +### General options + +``` +-c --config= Path to the file containing connection + configuration in YAML or JSON format. + [default: /etc/calico/calicoctl.cfg] +``` + +## See also + +- [Resources]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/) for details on all valid resources, including file format + and schema +- [Policy]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/policy) for details on the Calico selector-based policy model +- [calicoctl configuration]({{site.baseurl}}/{{page.version}}/reference/calicoctl/setup) for details on configuring `calicoctl` to access + the Calico datastore. diff --git a/v2.4/reference/calicoctl/commands/version.md b/v2.4/reference/calicoctl/commands/version.md new file mode 100644 index 00000000000..7d95d65aea9 --- /dev/null +++ b/v2.4/reference/calicoctl/commands/version.md @@ -0,0 +1,43 @@ +--- +title: calicoctl version +--- + +This sections describes the `calicoctl version` command. + +Read the [calicoctl Overview]({{site.baseurl}}/{{page.version}}/reference/calicoctl/) +for a full list of calicoctl commands. + +## Displaying the help text for 'calicoctl version' commands + +Run `calicoctl version --help` to display the following help menu for the +commands. + +``` +Usage: + calicoctl version [--config=] + +Options: + -h --help Show this screen. + -c --config= Path to the file containing connection configuration in + YAML or JSON format. + [default: /etc/calico/calicoctl.cfg] + +Description: + Display the version of calicoctl. +``` + +### Examples: + +``` +$ calicoctl version +Client Version: v1.4.0 +Build date: 2017-07-21T19:33:04+0000 +Git commit: d2babb6 +Cluster Version: v2.4.0 +Cluster Type: KDD,hosted +``` + +## See also + +- [calicoctl configuration]({{site.baseurl}}/{{page.version}}/reference/calicoctl/setup) for details on configuring `calicoctl` to access + the Calico datastore. diff --git a/v2.4/reference/calicoctl/index.md b/v2.4/reference/calicoctl/index.md new file mode 100644 index 00000000000..da9f414be3d --- /dev/null +++ b/v2.4/reference/calicoctl/index.md @@ -0,0 +1,31 @@ +--- +title: calicoctl user reference +--- + +The command line tool, `calicoctl`, makes it easy to manage Calico network +and security policy. + +It can be downloaded from the [releases page of the +calicoctl repository](https://github.com/projectcalico/calicoctl/releases/latest/). + +Alternatively, you can run it as a docker container if you want to - the image +is `calico/ctl` on Dockerhub and Quay but note that due to limitations imposed +by running in a container, this will not have the full functionality of the +binary running directly on the host (notably the `calicoctl node ...` commands +do not work in a container). + +Follow the setup in the [Configuring calicoctl]({{site.baseurl}}/{{page.version}}/reference/calicoctl/setup) section. +This section describes how to do the initial setup of calicoctl, configuring +the connection information for your Calico datastore. + +The calicoctl command line interface provides a number of resource management +commands to allow you to create, modify, delete and view the different Calico +resources. + +The full list of commands is described in the +[Command Reference]({{site.baseurl}}/{{page.version}}/reference/calicoctl/commands/) +section. + +The full list of resources that can be managed, including a description of each, +is described in the [Resource Definitions]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/) +section. diff --git a/v2.4/reference/calicoctl/resources/bgppeer.md b/v2.4/reference/calicoctl/resources/bgppeer.md new file mode 100644 index 00000000000..efb24e1f2ae --- /dev/null +++ b/v2.4/reference/calicoctl/resources/bgppeer.md @@ -0,0 +1,58 @@ +--- +title: BGP Peer Resource (bgpPeer) +--- + +A BGP Peer resource (bgpPeer) represents a remote BGP peer with which the node(s) in a Calico +cluster will peer. Configuring BGP peers allows you to peer a +Calico network with your datacenter fabric (e.g. ToR). For more +information on cluster layouts, see Calico's documentation on +[L3 Topologies]({{site.baseurl}}/{{page.version}}/reference/private-cloud/l3-interconnect-fabric). + +For `calicoctl` commands that specify a resource type on the CLI, the following +aliases are supported (all case insensitive): `bgppeer`, `bgppeers`, `bgpp`, `bgpps`, `bp`, `bps`. + +### Sample YAML + +```yaml +apiVersion: v1 +kind: bgpPeer +metadata: + scope: node + node: rack1-host1 + peerIP: 192.168.1.1 +spec: + asNumber: 63400 +``` + +### BGP Peer Definition + +#### Metadata + +| Field | Description | Accepted Values | Schema | +|-------------|-----------------------------|-------------------|--------| +| scope | Determines the Calico nodes to which this peer applies. | global, node | string | +| node | Must be specified if scope is node, and must be omitted when scope is global. | The hostname of the node to which this peer applies. | string | +| peerIP | The IP address of this peer. | Valid IPv4 or IPv6 address. | string | + +#### Spec + +| Field | Description | Accepted Values | Schema | Default | +|-------------|-----------------------------|-------------------|--------|------------| +| asNumber | The AS Number of this peer. | A valid AS Number, may be specified in dotted notation. | integer/string | + +### Peer Scopes + +BGP Peers can exist at either a global scope or a node scope. A peer's scope +determines which Calico nodes will attempt to establish a BGP session with that peer. + +#### Global Peer + +If this is a `global` scoped BGP peer, all nodes in the cluster will attempt to +establish a BGP connection with it. + +#### Node Peer + +A BGP peer can also be added at the `node` scope, meaning only a single specified +node will peer with it. BGP peer resources of this nature must specify a `node` +to inform Calico which node this peer is targeting. + diff --git a/v2.4/reference/calicoctl/resources/hostendpoint.md b/v2.4/reference/calicoctl/resources/hostendpoint.md new file mode 100644 index 00000000000..d78682a2b7f --- /dev/null +++ b/v2.4/reference/calicoctl/resources/hostendpoint.md @@ -0,0 +1,52 @@ +--- +title: Host Endpoint Resource (hostEndpoint) +--- + +A Host Endpoint resource (hostEndpoint) represents an interface attached to a host that is running Calico. + +Each host endpoint may include a set of labels and list of profiles that Calico +will use to apply +[policy]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/policy) +to the interface. If no profiles or labels are applied, Calico will not apply +any policy. + +For `calicoctl` commands that specify a resource type on the CLI, the following +aliases are supported (all case insensitive): `hostendpoint`, `hostendpoints`, `hep`, `heps`. + +### Sample YAML + +```yaml +apiVersion: v1 +kind: hostEndpoint +metadata: + name: eth0 + node: myhost + labels: + type: production +spec: + interfaceName: eth0 + expectedIPs: + - 192.168.0.1 + - 192.168.0.2 + profiles: + - profile1 + - profile2 +``` + +### HostEndoint Definition + +#### Metadata + +| Field | Description | Accepted Values | Schema | +|-------------|-----------------------------|-------------------|---------| +| name | The name of this hostEndpoint. | | string | +| node | The name of the node where this hostEndpoint resides. | | string | +| labels | A set of labels to apply to this endpoint. | | map | + +#### Spec + +| Field | Description | Accepted Values | Schema | Default | +|-------------|-----------------------------|-------------------|--------|------------| +| interfaceName | The name of the interface on which to apply policy. | | string | +| expectedIPs | The expected IP addresses associated with the interface. | Valid IPv4 or IPv6 address | list | +| profiles | The list of profiles to apply to the endpoint. | | list | diff --git a/v2.4/reference/calicoctl/resources/index.md b/v2.4/reference/calicoctl/resources/index.md new file mode 100644 index 00000000000..d7b5b3bc18b --- /dev/null +++ b/v2.4/reference/calicoctl/resources/index.md @@ -0,0 +1,87 @@ +--- +title: Resource Definitions +--- + +This section describes the set of valid resource types that can be managed +through `calicoctl`. + +While resources may be supplied in YAML or JSON format, this guide provides examples in YAML. + +## Overview of resource structure + +The calicoctl commands for resource management (create, apply, delete, replace, get) +all take resource manifests as input. + +Each manifest may contain a single resource +(e.g. a profile resource), or a list of multiple resources (e.g. a profile and two +hostEndpoint resources). + +The general structure of a single resource is as follows: + +```yaml +apiVersion: v1 +kind: +metadata: + # Identifying information + name: + ... +spec: + # Specification of the resource + ... +``` + +### Schema + +| Field | Description | Accepted Values | Schema | +|----------|-----------------------|------------------------------|--------| +| apiVersion | Indicates the version of the API that the data corresponds to. | v1 | string | +| kind | Specifies the type of resource described by the YAML document. | | [kind](#supported-kinds) | +| metadata | Contains information used to uniquely identify the particular instance of the resource. | | map | +| spec | Contains the resource specification. | | map | + +### Supported Kinds + +The following resources are supported: + +- [bgpPeer]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/bgppeer) +- [hostEndpoint]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/hostendpoint) +- [policy]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/policy) +- [ipPool]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/ippool) +- [profile]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/profile) +- [workloadEndpoint]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/workloadendpoint) + +### Multiple resources in a single file + +A file may contain multiple resource documents specified in a YAML list format. For example, the following is the contents of a file containing two `hostEndpoint` resources: + +``` +- apiVersion: v1 + kind: hostEndpoint + metadata: + name: endpoint1 + node: host1 + labels: + type: database + spec: + interface: eth0 + profiles: + - prof1 + - prof2 + expectedIPs: + - 1.2.3.4 + - "00:bb::aa" +- apiVersion: v1 + kind: hostEndpoint + metadata: + name: endpoint2 + node: host1 + labels: + type: frontend + spec: + interface: eth1 + profiles: + - prof1 + - prof2 + expectedIPs: + - 1.2.3.5 +``` diff --git a/v2.4/reference/calicoctl/resources/ippool.md b/v2.4/reference/calicoctl/resources/ippool.md new file mode 100644 index 00000000000..5333ff43cc2 --- /dev/null +++ b/v2.4/reference/calicoctl/resources/ippool.md @@ -0,0 +1,63 @@ +--- +title: IP Pool Resource (ipPool) +--- + +An IP pool resource (ipPool) represents a collection of IP addresses from which Calico expects +endpoint IPs to be assigned. + +For `calicoctl` commands that specify a resource type on the CLI, the following +aliases are supported (all case insensitive): `ippool`, `ippools`, `ipp`, `ipps`, `pool`, `pools`. + +### Sample YAML + +```yaml +apiVersion: v1 +kind: ipPool +metadata: + cidr: 10.1.0.0/16 +spec: + ipip: + enabled: true + mode: cross-subnet + nat-outgoing: true + disabled: false +``` + +### IP Pool Definition + +#### Metadata + +| Field | Description | Accepted Values | Schema | +|-------------|-----------------------------|-------------------|--------| +| cidr | IP range to use for this pool. | A valid IPv4 or IPv6 CIDR. | string | + +#### Spec + +| Field | Description | Accepted Values | Schema | Default | +|-------------|-----------------------------|-------------------|--------|------------| +| ipip | ipip tunneling configuration for this pool. If not specified, ipip tunneling is disabled for this pool. | | [IPIP](#ipip) | +| nat-outgoing | When enabled, packets sent from calico networked containers in this pool to destinations outside of this pool will be masqueraded. | true, false | boolean | false +| disabled | When set to true, Calico IPAM will not assign addresses from this pool. | true, false | boolean | false + +#### IPIP + +| Field | Description | Accepted Values | Schema | Default | +|----------|-----------------------------|--------------|---------|------------| +| enabled | When set to true, ipip encapsulation will be used. | true, false | boolean | true | +| mode | The IPIP mode defining when IPIP will be used. | always, cross-subnet | string | always | + +Routing of packets using IP in IP will be used when the destination IP address +is in an IP Pool that has IPIP enabled. In addition, if the `mode` is set to `cross-subnet`, +Calico will only route using IP in IP if the IP address of the destination node is in a different +subnet. The subnet of each node is configured on the node resource (which may be automatically +determined when running the calico/node service). + +For details on configuring IP-in-IP on your deployment, please read the +[Configuring IP-in-IP guide]({{site.baseurl}}/{{page.version}}/usage/configuration/ip-in-ip). + +> **NOTE** +> +> Setting `nat-outgoing` is recommended on any IP Pool with `ipip` enabled. +When `ipip` is enabled without `nat-outgoing` routing between Workloads and +Hosts running Calico is asymmetric and may cause traffic to be filtered due to +[RPF](https://en.wikipedia.org/wiki/Reverse_path_forwarding) checks failing. diff --git a/v2.4/reference/calicoctl/resources/node.md b/v2.4/reference/calicoctl/resources/node.md new file mode 100644 index 00000000000..1b6bd52c69b --- /dev/null +++ b/v2.4/reference/calicoctl/resources/node.md @@ -0,0 +1,52 @@ +--- +title: Node Resource (node) +--- + +An Node resource (node) represents a node running Calico. When adding a host +to a Calico cluster, a Node resource needs to be created which contains the +configuration for the Calico Node instance running on the host. + +When starting a Calico node instance, the name supplied to the instance should +match the name configured in the Node resource. + +By default, starting a `calico/node` instance will automatically create a node resource +using the `hostname` of the compute host. + +For `calicoctl` commands that specify a resource type on the CLI, the following +aliases are supported (all case insensitive): `node`, `nodes`, `no`, `nos`. + +### Sample YAML + +```yaml +apiVersion: v1 +kind: node +metadata: + name: node-hostname +spec: + bgp: + asNumber: 64512 + ipv4Address: 10.244.0.1/24 + ipv6Address: 2001:db8:85a3::8a2e:370:7334/120 +``` + +### Definition + +#### Metadata + +| Field | Description | Accepted Values | Schema | +|-------------|-----------------------------|-------------------|--------| +| name | The name of this node. | The value passed to the node instance on the host. | string | + +#### Spec + +| Field | Description | Accepted Values | Schema | Default | +|-------------|-----------------------------|-------------------|--------|------------| +| bgp | BGP configuration for this node. Omit if using Calico for policy only. | | [BGP](#bgp) | + +#### BGP + +| Field | Description | Accepted Values | Schema | Default | +|-------------|-----------------------------|-------------------|--------|------------| +| asNumber | The AS Number of your Calico node. | Optional. If omitted the global value is used (see [calicoctl config]({{site.baseurl}}/{{page.version}}/reference/calicoctl/commands/config) for details). | integer | +| ipv4Address | The IPv4 address and subnet exported as the next-hop for the Calico endpoints on the host | The IPv4 address must be specified if BGP is enabled. | string | +| ipv6Address | The IPv6 address and subnet exported as the next-hop for the Calico endpoints on the host | Optional. | string | diff --git a/v2.4/reference/calicoctl/resources/policy.md b/v2.4/reference/calicoctl/resources/policy.md new file mode 100644 index 00000000000..5b7b8f15d2e --- /dev/null +++ b/v2.4/reference/calicoctl/resources/policy.md @@ -0,0 +1,111 @@ +--- +title: Policy Resource (policy) +--- + +A Policy resource (policy) represents an ordered set of rules which are applied +to a collection of endpoints which match a [label selector](#selector). + +Policy resources can be used to define network connectivity rules between groups of Calico endpoints and host endpoints, and +take precedence over [Profile resources]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/profile) if any are defined. + +For `calicoctl` commands that specify a resource type on the CLI, the following +aliases are supported (all case insensitive): `policy`, `policies`, `pol`, `pols`. + +### Sample YAML + +This sample policy allows TCP traffic from `frontend` endpoints to port 6379 on +`database` endpoints. + +```yaml +apiVersion: v1 +kind: policy +metadata: + name: allow-tcp-6379 +spec: + selector: role == 'database' + ingress: + - action: allow + protocol: tcp + source: + selector: role == 'frontend' + destination: + ports: + - 6379 + egress: + - action: allow +``` + +### Definition + +#### Metadata + +| Field | Description | Accepted Values | Schema | +|-------|--------------|-------------------|--------| +| name | The name of the policy. | | string | +| annotations | Opaque key/value information to be used by clients. | | map | + + +#### Spec + +| Field | Description | Accepted Values | Schema | Default | +|------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----------------+-----------------------+---------| +| order | (Optional) Indicates priority of this policy, with lower order taking precedence. No value indicates highest order (lowest precedence) | | float | | +| selector | Selects the endpoints to which this policy applies. | | [selector](#selector) | all() | +| ingress | Ordered list of ingress rules applied by policy. | | List of [Rule](#rule) | | +| egress | Ordered list of egress rules applied by this policy. | | List of [Rule](#rule) | | +| doNotTrack | Indicates that the rules in this policy should be applied before any data plane connection tracking, and that packets allowed by these rules should not be tracked. | true, false | boolean | false | + +The `doNotTrack` field is meaningful for [host +endpoints]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/hostendpoint) +only. It does not apply at all to [workload +endpoints]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/workloadendpoint); +connection tracking is always used for flows to and from those. + +[Untracked policy]({{site.baseurl}}/{{page.version}}/getting-started/bare-metal/bare-metal) explains more about how `doNotTrack` can be useful for host endpoints. + +#### Rule + +| Field | Description | Accepted Values | Schema | Default | +|-------------|-----------------------------|-------------------|--------|------------| +| action | Action to perform when matching this rule. | allow, deny, log, pass | string | | +| protocol | Positive protocol match. | tcp, udp, icmp, icmpv6, sctp, udplite, integer 1-255. | string | | +| notProtocol | Negative protocol match. | tcp, udp, icmp, icmpv6, sctp, udplite, integer 1-255. | string | | +| icmp | ICMP match criteria. | | [ICMP](#icmp) | | +| notICMP | Negative match on ICMP. | | [ICMP](#icmp) | | +| source | Source match parameters. | | [EntityRule](#entityrule) | | +| destination | Destination match parameters. | | [EntityRule](#entityrule) | | + +An `action` of `pass` will skip over the remaining Policies and jump to the +first Profile assigned to the endpoint, applying the policy configured in the +Profile; if there are no Profiles configured for the endpoint the default +applied action is deny. + +#### ICMP + +| Field | Description | Accepted Values | Schema | Default | +|-------------|-----------------------------|-------------------|--------|------------| +| type | Match on ICMP type. | Can be integer 1-255 | integer | +| code | Match on ICMP code. | Can be integer 1-255 | integer | + +#### EntityRule + +| Field | Description | Accepted Values | Schema | Default | +|-------------|-----------------------------|-------------------|--------|------------| +| tag (deprecated) | Match on tag. | | string | | +| notTag (deprecated) | Negative match on tag. | | string | | +| nets | Match packets with IP in any of the listed CIDRs. | List of valid IPv4 or IPv6 CIDRs | list of cidrs | +| net | Deprecated (use "nets" instead): Match on CIDR. | Valid IPv4 or IPv6 CIDR | cidr | | +| notNets | Negative match on CIDRs. Match packets with IP not in any of the listed CIDRs. | List of valid IPv4 or IPv6 CIDRs | list of cidrs | +| notNet | Deprecated (use "notNets" instead): Negative match on CIDR. | Valid IPv4 or IPv6 CIDR | cidr | | +| selector | Positive match on selected endpoints. | Valid selector | [selector](#selector) | | +| notSelector | Negative match on selected endpoints. | Valid selector | [selector](#selector) | | +| ports | Positive match on the specified ports | | list of [ports](#ports) | | +| notPorts | Negative match on the specified ports | | list of [ports](#ports) | | + +#### Selector + +{% include {{page.version}}/selectors.md %} + +#### Ports + +{% include {{page.version}}/ports.md %} diff --git a/v2.4/reference/calicoctl/resources/profile.md b/v2.4/reference/calicoctl/resources/profile.md new file mode 100644 index 00000000000..9985e5d2b93 --- /dev/null +++ b/v2.4/reference/calicoctl/resources/profile.md @@ -0,0 +1,99 @@ +--- +title: Profile Resource (profile) +--- + +A Profile resource (profile) represents a set of rules which are applied +to the individual endpoints to which this profile has been assigned. + +Each Calico endpoint or host endpoint can be assigned to zero or more profiles. + +Also see the [Policy resource]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/policy) +which provides an alternate way to select what policy is applied to an endpoint. + +For `calicoctl` commands that specify a resource type on the CLI, the following +aliases are supported (all case insensitive): `profile`, `profiles`, `pro`, `pros`. + +### Sample YAML + +The following sample profile allows all traffic from endpoints that +have the profile label set to `profile1` (i.e. endpoints that reference this profile), +except that *all* traffic from 10.0.20.0/24 is denied. + +```yaml +apiVersion: v1 +kind: profile +metadata: + name: profile1 + labels: + profile: profile1 +spec: + ingress: + - action: deny + source: + nets: + - 10.0.20.0/24 + - action: allow + source: + selector: profile == 'profile1' + egress: + - action: allow +``` + +### Definition + +#### Metadata + +| Field | Description | Accepted Values | Schema | Default | +|-------------|-----------------------------|-------------------|--------|------------| +| name | The name of the profile. | | string | +| labels | A set of labels to apply to endpoints using this profile. | | map of string key to string values | +| tags (deprecated) | A list of tag names to apply to endpoints using this profile. | | list of strings | + +#### Spec + +| Field | Description | Accepted Values | Schema | Default | +|-------------|-----------------------------|-------------------|--------|------------| +| ingress | The ingress rules belonging to this profile. | | List of [Rule](#rule) | +| egress | The egress rules belonging to this profile. | | List of [Rule](#rule) | + +#### Rule + +| Field | Description | Accepted Values | Schema | Default | +|-------------|-----------------------------|-------------------|--------|------------| +| action | Action to perform when matching this rule. | allow, deny, log | string | | +| protocol | Positive protocol match. | tcp, udp, icmp, icmpv6, sctp, udplite, integer 1-255. | string | | +| notProtocol | Negative protocol match. | tcp, udp, icmp, icmpv6, sctp, udplite, integer 1-255. | string | | +| icmp | ICMP match criteria. | | [ICMP](#icmp) | | +| notICMP | Negative match on ICMP. | | [ICMP](#icmp) | | +| source | Source match parameters. | | [EntityRule](#entityrule) | | +| destination | Destination match parameters. | | [EntityRule](#entityrule) | | + +#### ICMP + +| Field | Description | Accepted Values | Schema | Default | +|-------------|-----------------------------|-------------------|--------|------------| +| type | Match on ICMP type. | Can be integer 1-255 | integer | +| code | Match on ICMP code. | Can be integer 1-255 | integer | + +#### EntityRule + +| Field | Description | Accepted Values | Schema | Default | +|-------------|-----------------------------|-------------------|--------|------------| +| tag (deprecated) | Positive match on tag. | | string | | +| notTag (deprecated) | Negative match on tag. | | string | | +| nets | Match packets with IP in any of the listed CIDRs. | List of valid IPv4 or IPv6 CIDRs | list of cidrs | +| net | Deprecated (use "nets" instead): Match on CIDR. | Valid IPv4 or IPv6 CIDR | cidr | | +| notNets | Negative match on CIDRs. Match packets with IP not in any of the listed CIDRs. | List of valid IPv4 or IPv6 CIDRs | list of cidrs | +| notNet | Deprecated (use "notNets" instead): Negative match on CIDR. | Valid IPv4 or IPv6 CIDR | cidr | | +| selector | Positive match on selected endpoints. | | [selector](#selector) | | +| notSelector | Negative match on selected endpoints. | | [selector](#selector) | | +| ports | Positive match on the specified ports | | list of [ports](#ports) | | +| notPorts | Negative match on the specified ports | | list of [ports](#ports) | | + +#### Selector + +{% include {{page.version}}/selectors.md %} + +#### Ports + +{% include {{page.version}}/ports.md %} diff --git a/v2.4/reference/calicoctl/resources/workloadendpoint.md b/v2.4/reference/calicoctl/resources/workloadendpoint.md new file mode 100644 index 00000000000..c7b1c2cd5a3 --- /dev/null +++ b/v2.4/reference/calicoctl/resources/workloadendpoint.md @@ -0,0 +1,63 @@ +--- +title: Workload Endpoint Resource (workloadEndpoint) +--- + +A Workload Endpoint resource (workloadEndpoint) represents an interface +connecting a Calico networked container or VM to its host. + +Each endpoint may specify a set of labels and list of profiles that Calico will use +to apply policy to the interface. + +For `calicoctl` commands that specify a resource type on the CLI, the following +aliases are supported (all case insensitive): `workloadendpoint`, `workloadendpoints`, `wep`, `weps`. + +> **Note** +> +> While `calicoctl` allows the user to fully manage Workload Endpoint resources, +the lifecylce of these resources is generally handled by an orchestrator specific +plugin such as the Calico CNI plugin, the Calico Docker network plugin, +or the Calico OpenStack Neutron Driver. In general, we recommend that you only +use `calicoctl` to view this resource type. + +### Sample YAML + +```yaml +apiVersion: v1 +kind: workloadEndpoint +metadata: + name: eth0 + workload: default.frontend-5gs43 + orchestrator: k8s + node: rack1-host1 + labels: + app: frontend + calico/k8s_ns: default +spec: + interfaceName: cali0ef24ba + mac: ca:fe:1d:52:bb:e9 + ipNetworks: + - 192.168.0.0/16 + profiles: + - profile1 +``` + +### Definitions + +#### Metadata + +| Field | Description | Accepted Values | Schema | Default | +|-------------|-----------------------------|-------------------|--------|------------| +| name | The name of this endpoint resource. | | string | +| workload | The name of the workload to which this endpoint belongs. | | string | +| orchestrator | The orchestrator that created this endpoint. | | string | +| node | The node where this endpoint resides. | | string | +| labels | A set of labels to apply to this endpoint. | | map | + +#### Spec + +| Field | Description | Accepted Values | Schema | Default | +|-------------|-----------------------------|-------------------|--------|------------| +| ipNetworks | The CIDRs assigned to the interface. | | List of strings | +| profiles | List of profiles assigned to this endpoint. | | List of strings | +| interfaceName | The name of the host-side interface attached to the workload. | | string | +| mac | The source MAC address of traffic generated by the workload. | | IEEE 802 MAC-48, EUI-48, or EUI-64 | diff --git a/v2.4/reference/calicoctl/setup/etcdv2.md b/v2.4/reference/calicoctl/setup/etcdv2.md new file mode 100644 index 00000000000..92c33144ef9 --- /dev/null +++ b/v2.4/reference/calicoctl/setup/etcdv2.md @@ -0,0 +1,133 @@ +--- +title: Configuring calicoctl - etcdv2 datastore +--- + +This document covers the configuration options for calicoctl when using an etcdv2 datastore. + +There are two ways to configure calicoctl with your etcdv2 cluster details: +configuration file or environment variables. + + +## Configuration File + +By default `calicoctl` looks for a configuration file at `/etc/calico/calicoctl.cfg`. + +The file location may be overridden using the `--config` option on commands that required +datastore access. + +The config file is a yaml or json document in the following format: + +``` +apiVersion: v1 +kind: calicoApiConfig +metadata: +spec: + datastoreType: "etcdv2" + etcdEndpoints: "http://etcd1:2379,http://etcd2:2379" + ... +``` + +See table below for details on the etcdv2 specific fields that may be included in +the spec section. + +If the file exists, then it must be valid and readable by calicoctl. If the file +does not exist, calicoctl will read access details from the environment variables. + +## Environment variables + +If you are not using a config file to specify your access information, calicoctl +will check a particular set of environment variables. + +See the table below for details on the etcdv2 specific environment variables. + +> Note that if neither file nor environment variables are set, calicoctl defaults to +> using etcdv2 with a single endpoint of http://127.0.0.1:2379. + +## Complete list of etcdv2 connection configuration + +| Spec field | Environment | Description | Examples +|-----------------|----------------------------------------------------------------|---------- +| datastoreType | DATASTORE_TYPE | Indicates the datastore to use (optional, defaults to etcdv2) | etcdv2 +| etcdEndpoints | ETCD_ENDPOINTS | A comma separated list of etcd endpoints (optional, defaults to http://127.0.0.1:2379) | http://etcd1:2379 +| etcdUsername | ETCD_USERNAME | Username for RBAC (optional) | "user" +| etcdPassword | ETCD_PASSWORD | Password for the given username (optional) | "password" +| etcdKeyFile | ETCD_KEY_FILE | Path to the etcd key file (optional) | /etc/calico/key.pem +| etcdCertFile | ETCD_CERT_FILE | Path to the etcd client cert (optional) | /etc/calico/cert.pem +| etcdCACertFile | ETCD_CA_CERT_FILE | Path to the etcd CA file (optional) | /etc/calico/ca.pem + +> **NOTES** +> +> 1. If you are running with TLS enabled, ensure your endpoint addresses use https +> 2. When specifying through environment variables, the DATASTORE_TYPE environment +> is not required for etcdv2. +> 3. All environment variables may also be prefixed with "CALICO_", for example +> "CALICO_DATASTORE_TYPE" and "CALICO_END_ENDPOINTS" etc. may also be used. +> This is useful if the non-prefixed names clash with existing environment +> variables defined on your system +> 4. Previous versions of calicoctl supported ETCD_SCHEME and ETC_AUTHORITY environment +> variables as a mechanism for specifying the etcd endpoints. These variables are +> deprecated in favor of the ETCD_ENDPOINTS list. + +## Examples + +#### Example configuration file + +```yaml +apiVersion: v1 +kind: calicoApiConfig +metadata: +spec: + etcdEndpoints: http://etcd1:2379,http://etcd2:2379,http://etcd3:2379 + etcdKeyFile: /etc/calico/key.pem + etcdCertFile: /etc/calico/cert.pem + etcdCACertFile: /etc/calico/ca.pem +``` + +#### Example using environment variables + +``` +ETCD_ENDPOINTS=http://myhost1:2379 calicoctl get bgppeers +``` + +#### Example using IPv6 + +Create a single node etcd cluster listening on IPv6 localhost `[::1]`. + +``` +etcd --listen-client-urls=http://[::1]:2379 --advertise-client-urls=http://[::1]:2379 +``` + +Use the etcd IPv6 cluster: + +``` +ETCD_ENDPOINTS=http://[::1]:2379 calicoctl get bgppeers +``` + +#### Example using mixed IPv4/IPv6 + +Create a single node etcd cluster listening on IPv4 and IPv6 localhost `[::1]`. + +``` +etcd --listen-client-urls=http://[::1]:2379,http://127.0.0.1:2379 --advertise-client-urls=http://[::1]:2379 +``` + +Use the IPv6 endpoint: + +``` +ETCD_ENDPOINTS=http://[::1]:2379 calicoctl get bgppeers +``` + +Use the IPv4 endpoint: + +``` +ETCD_ENDPOINTS=http://127.0.0.1:2379 calicoctl get bgppeers +``` + +## calico/node + +It is important to note that not only will calicoctl will use the specified keys directly +on the host to access etcd, **it will also pass on these environment variables +and volume mount the keys into the started calico-node container.** + +Therefore, configuring calico-node for etcd is easily accomplished by running +`calicoctl node run` with the parameters set correctly. diff --git a/v2.4/reference/calicoctl/setup/index.md b/v2.4/reference/calicoctl/setup/index.md new file mode 100644 index 00000000000..ee1a2ae520a --- /dev/null +++ b/v2.4/reference/calicoctl/setup/index.md @@ -0,0 +1,35 @@ +--- +title: Calicoctl Configuration Overview +--- + +The `calicoctl` command line tool needs to be configured with details of +your datastore so that it can manage system configuration and +resources. + +Configuration may be specified either using a YAML or JSON input file, or through +environment variables. Configuration is determined as follows: + +- if a configuration file is present, the file is read and that configuration + is used, otherwise +- if the environment variables are set, those are used, otherwise +- a default etcdv2 endpoint at http://127.0.0.1:2379 is assumed. + +Calico currently supports the following datastores: + +- [etcdv2](etcdv2) (default, recommended) +- [Kubernetes API](kubernetes) + +Calico supports, but does not require: + +- role based authentication using username and password +- certificate and key authentication. + + +## Configuring datastore access + +For detailed information on configuring calicoctl, see the documentation for your chosen +datastore. + +- [etcdv2](etcdv2) (default, recommended) +- [Kubernetes API](kubernetes) + diff --git a/v2.4/reference/calicoctl/setup/kubernetes.md b/v2.4/reference/calicoctl/setup/kubernetes.md new file mode 100644 index 00000000000..013d13fbd6e --- /dev/null +++ b/v2.4/reference/calicoctl/setup/kubernetes.md @@ -0,0 +1,95 @@ +--- +title: Configuring calicoctl - Kubernetes datastore +layout: docwithnav +--- + +This document covers the configuration options for calicoctl when using the Kubernetes API as a datastore. + +> **Note** +> +> If running Calico on Kubernetes with the etcdv2 datastore, see the [etcdv2 configuration document](etcdv2) instead. +> For more information on running with the Kubernetes datastore, see [the installation guide](/{{page.version}}/getting-started/kubernetes/installation/hosted/kubernetes-datastore/) + +There are two ways to configure calicoctl with your Kubernetes API details: +configuration file or environment variables. + +## Configuration file + +By default `calicoctl` looks for a configuration file at `/etc/calico/calicoctl.cfg`. + +The file location may be overridden using the `--config` option on commands that required +datastore access. + +The config file is a yaml or json document in the following format: + +``` +apiVersion: v1 +kind: calicoApiConfig +metadata: +spec: + datastoreType: "kubernetes" + kubeconfig: "/path/to/kubeconfig" + ... +``` + +See table below for details on the Kubernetes API specific fields that may be included in +the spec section. + +If the file exists, then it must be valid and readable by calicoctl. If the file +does not exist, calicoctl will read access details from the environment variables. + +## Environment variables + +If you are not using a config file to specify your access information, calicoctl +will check a particular set of environment variables. + +See the table below for details on the Kubernetes specific environment variables. + +> Note that if neither file nor environment variables are set, calicoctl defaults to +> using etcdv2 as the datastore with a single endpoint of http://127.0.0.1:2379. + +## Complete list of Kubernetes API connection configuration + +| Spec field | Environment | Description | Examples +|----------------|---------------------------------------------------------------------------------------|---------- +| datastoreType | DATASTORE_TYPE | Indicates the datastore to use (required for kubernetes as the default is etcdv2) | kubernetes +| kubeconfig | KUBECONFIG | When using the kubernetes datastore, the location of a kubeconfig file to use. | /path/to/kube/config +| k8sAPIEndpoint | K8S_API_ENDPOINT | Location of the Kubernetes API. Not required if using kubeconfig. | https://kubernetes-api:443 +| k8sCertFile | K8S_CERT_FILE | Location of a client certificate for accessing the Kubernetes API. | /path/to/cert +| k8sKeyFile | K8S_KEY_FILE | Location of a client key for accessing the Kubernetes API. | /path/to/key +| k8sCAFile | K8S_CA_FILE | Location of a CA for accessing the Kubernetes API. | /path/to/ca +| k8sToken | K8S_TOKEN | Token to be used for accessing the Kubernetes API. | + +> Note that all environment variables may also be prefixed with "CALICO_", for +> example "CALICO_DATASTORE_TYPE" and "CALICO_KUBECONFIG" etc. may also be used. +> This is useful if the non-prefixed names clash with existing environment +> variables defined on your system + +## Examples + +#### Example configuration file + +```yaml +apiVersion: v1 +kind: calicoApiConfig +metadata: +spec: + datastoreType: "kubernetes" + kubeconfig: "/path/to/.kube/config" +``` + +#### Example using environment variables + +```shell +$ export DATASTORE_TYPE=kubernetes +$ export KUBECONFIG=~/.kube/config +$ calicoctl get workloadendpoints +``` + +And using `CALICO_` prefixed names: + +```shell +$ export CALICO_DATASTORE_TYPE=kubernetes +$ export CALICO_KUBECONFIG=~/.kube/config +$ calicoctl get workloadendpoints +``` diff --git a/v2.4/reference/cni-plugin/configuration.md b/v2.4/reference/cni-plugin/configuration.md new file mode 100644 index 00000000000..1a7708fed0d --- /dev/null +++ b/v2.4/reference/cni-plugin/configuration.md @@ -0,0 +1,279 @@ +--- +title: Configuring the Calico CNI plugins +--- + +The Calico CNI plugin is configured through the standard CNI [configuration mechanism](https://github.com/containernetworking/cni/blob/master/SPEC.md#network-configuration) + +A minimal configuration file that uses Calico for networking and IPAM looks like this + +```json +{ + "name": "any_name", + "cniVersion": "0.1.0", + "type": "calico", + "ipam": { + "type": "calico-ipam" + } +} +``` + +If the `calico-node` container on a node registered with a `NODENAME` other than the node hostname, the CNI plugin on this node must be configured with the same `nodename`: + +```json +{ + "name": "any_name", + "nodename": "", + "type": "calico", + "ipam": { + "type": "calico-ipam" + } +} +``` + +Additional configuration can be added as detailed below. + +## Generic + +### Datastore type + +The following option allows configuration of the Calico datastore type. + +* `datastore_type` (default: etcdv2) + +The Calico CNI plugin supports the following datastore types: + +* etcdv2 (default) +* kubernetes + +### Etcd location + +The following options are valid when `datastore_type` is `etcdv2`. + +Configure access to your etcd cluster using the following options + +* `etcd_endpoints` (no default. Format is comma separated list of etcd servers e.g. `http://1.2.3.4:2379,http://5.6.7.8:2379`) +* `etcd_key_file` (no default. Format is an absolute path to a file) +* `etcd_cert_file` (no default. Format is an absolute path to a file) +* `etcd_ca_cert_file` (no default. Format is an absolute path to a file) + +The following deprecated options are also supported + +* `etcd_authority` (default is `127.0.0.1:2379`) + * If `etcd_authority` is set at the same time as `etcd_endpoints` then `etcd_endpoints` is used. +* `etcd_scheme` (default is `http`) + +### Logging + +* Logging is always to `stderr` +* Logging level can be controlled by setting `"log_level"` in the netconf. Allowed levels are + * `WARNING` - the default. + * `INFO` - Enables some additional logging from the CNI plugin. + * `DEBUG` - Enables lots of debug logging from both the CNI plugin and the underlying libcalico library. + +```json +{ + "name": "any_name", + "cniVersion": "0.1.0", + "type": "calico", + "log_level": "DEBUG", + "ipam": { + "type": "calico-ipam" + } +} +``` + +### IPAM + +When using Calico IPAM, the following flags determine what IP addresses should be assigned. NOTE: These flags are strings and not boolean values. + +* `assign_ipv4` (default: `"true"`) +* `assign_ipv6` (default: `"false"`) + +A specific IP address can be chosen by using [`CNI_ARGS`](https://github.com/appc/cni/blob/master/SPEC.md#parameters) and setting `IP` to the desired value. + +By default, Calico IPAM will assign IP addresses from all the available IP Pools. + +Optionally, the list of possible IPv4 and IPv6 pools can also be specified via the following properties: + +* `ipv4_pools`: An array of CIDR strings (e.g. `"ipv4_pools": ["10.0.0.0/24", "20.0.0.0/16"]`) +* `ipv6_pools`: An array of CIDR strings (e.g. `"ipv6_pools": ["2001:db8::1/120"]`) + +Example CNI config: + +```json +{ + "name": "any_name", + "cniVersion": "0.1.0", + "type": "calico", + "ipam": { + "type": "calico-ipam", + "assign_ipv4": "true", + "assign_ipv6": "true", + "ipv4_pools": ["10.0.0.0/24", "20.0.0.0/16"], + "ipv6_pools": ["2001:db8::1/120"] + } +} +``` + +> **NOTE** +> +> `ipv6_pools` will be respected only when `assign_ipv6` is set to "true" + +Any IP Pools specified in the CNI config must have already been created. It is an error to specify IP Pools in the config that do not exist. + +## Kubernetes specific + +When using the Calico CNI plugin with Kubernetes, the plugin must be able to access the Kubernetes API server in order to find the labels assigned to the Kubernetes pods. The recommended way to configure access is through a `kubeconfig` file specified in the `kubernetes` section of the network config. e.g. + +```json +{ + "name": "any_name", + "cniVersion": "0.1.0", + "type": "calico", + "kubernetes": { + "kubeconfig": "/path/to/kubeconfig" + }, + "ipam": { + "type": "calico-ipam" + } +} +``` + +As a convenience, the API location location can also be configured directly, e.g. + +```json +{ + "name": "any_name", + "cniVersion": "0.1.0", + "type": "calico", + "kubernetes": { + "k8s_api_root": "http://127.0.0.1:8080" + }, + "ipam": { + "type": "calico-ipam" + } +} +``` + +### Enabling Kubernetes Policy + +If you wish to use the Kubernetes NetworkPolicy API then you must set a policy type in the network config. +There is a single supported policy type, `k8s` which uses the Kubernetes NetworkPolicy API in conjunction with the `calico/kube-policy-controller`. + +```json +{ + "name": "any_name", + "cniVersion": "0.1.0", + "type": "calico", + "policy": { + "type": "k8s", + "k8s_api_root": "http://127.0.0.1:8080" + }, + "ipam": { + "type": "calico-ipam" + } +} +``` + +When using `type: k8s`, the Calico CNI plugin requires read-only Kubernetes API access to the `Pods` resource in all namespaces. + +Previous versions of the plugin (`v1.3.1` and earlier) supported an alternative type called [`k8s-annotations`](https://github.com/projectcalico/calicoctl/blob/v0.20.0/docs/cni/kubernetes/AnnotationPolicy.md) This uses annotations on pods to specify network policy but is no longer supported. + +### Deprecated ways of specifying Kubernetes API access details + +From the examples above, you can see that the `k8s_api_root` can appear in either the `kubernetes` or `policy` configuration blocks. + +* `k8s_api_root` (default `http://127.0.0.1:8080`) + +In addition, the following methods are supported in the `policy` section of the CNI network config only. None of them have default values. + +* `k8s_auth_token` +* `k8s_client_certificate` +* `k8s_client_key` +* `k8s_certificate_authority` + +## IPAM + +When using the CNI `host-local` IPAM plugin, a special value `usePodCidr` is allowed for the subnet field. This tells the plugin to determine the subnet to use from the Kubernetes API based on the Node.podCIDR field. + +* `node_name` + * The node name to use when looking up the `usePodCidr` value (defaults to current hostname) + +```json +{ + "name": "any_name", + "cniVersion": "0.1.0", + "type": "calico", + "kubernetes": { + "kubeconfig": "/path/to/kubeconfig", + "node_name": "node-name-in-k8s" + }, + "ipam": { + "type": "host-local", + "subnet": "usePodCidr" + } +} +``` + +When making use of the `usePodCidr` option, the Calico CNI plugin requires read-only Kubernetes API access to the `Nodes` resource. + +### IPAM Manipulation with Kubernetes Annotations + +#### Specifying IP Pools on a per-Pod basis + +In addition to specifying IP Pools in the CNI config as discussed above, Calico IPAM supports specifying IP Pools per-Pod using the following [Kubernetes annotations](https://kubernetes.io/docs/user-guide/annotations/). + +- `cni.projectcalico.org/ipv4pools`: A list of configured IPv4 Pools from which to choose an address for the Pod. + + Example: + +```yaml +annotations: + "cni.projectcalico.org/ipv4pools": "[\"192.168.0.0/16\"]" +``` + +- `cni.projectcalico.org/ipv6pools`: A list of configured IPv6 Pools from which to choose an address for the Pod. + + Example: + +```yaml +annotations: + "cni.projectcalico.org/ipv6pools": "[\"2001:db8::1/120\"]" +``` + +If provided, these IP Pools will override any IP Pools specified in the CNI config. + + > **Note:** + > + > This requires the IP Pools to exist before `ipv4pools` or `ipv6pools` annotations are used. + > Requesting a subset of an IP Pool is not supported. IP Pools requested in the annotations must exactly match a configured [IP Pool]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/ippool). + +#### Requesting a Specific IP address + +You can also request a specific IP address through [Kubernetes annotations](https://kubernetes.io/docs/user-guide/annotations/) with Calico IPAM. +There are two annotations to request a specific IP address: + +- `cni.projectcalico.org/ipAddrs`: A list of IPv4 and/or IPv6 addresses to assign to the Pod. The requested IP addresses will be assigned from Calico IPAM and must exist within a configured IP Pool. + + Example: + +```yaml +annotations: + "cni.projectcalico.org/ipAddrs": "[\"192.168.0.1\"]" +``` + +- `cni.projectcalico.org/ipAddrsNoIpam`: A list of IPv4 and/or IPv6 addresses to assign to the Pod, bypassing IPAM. Any IP conflicts and routing have to be taken care of manually or by some other system. +Calico will only distribute routes to a Pod if its IP address falls within a Calico IP Pool. If you assign an IP address that is not in a Calico IP Pool, you must ensure that routing to that IP address is taken care of through another mechanism. + + Example: + +```yaml +annotations: + "cni.projectcalico.org/ipAddrsNoIpam": "[\"10.0.0.1\"]" +``` + + > **Note:** + > + > - The `ipAddrs` and `ipAddrsNoIpam` annotations can't be used together. + > - You can only specify one IPv4/IPv6 or one IPv4 and one IPv6 address with these annotations. + > - When `ipAddrs` or `ipAddrsNoIpam` is used with `ipv4pools` or `ipv6pools`, `ipAddrs` / `ipAddrsNoIpam` take priority. diff --git a/v2.4/reference/contribute.md b/v2.4/reference/contribute.md new file mode 100644 index 00000000000..e9aa2826c7a --- /dev/null +++ b/v2.4/reference/contribute.md @@ -0,0 +1,52 @@ +--- +title: Contribution Guidelines +--- + +Features or any changes to the codebase should be done as follows: + +1. Pull latest code in the **master** branch and create a feature + branch off this. +2. Implement your feature. Commits are cheap in Git, try to split up + your code into many. It makes reviewing easier as well as for + saner merging. + - If your commit fixes an existing issue \#123, include the text + "fixes \#123" in at least one of your commit messages. This + ensures the pull request is attached to the existing issue (see + [How do you attach a new pull request to an existing issue on + GitHub?](http://stackoverflow.com/questions/4528869/how-do-you-attach-a-new-pull-request-to-an-existing-issue-on-github)). + +3. Push your feature branch to GitHub. Note that before we can accept + your changes, you need to agree to one of our + contributor agreements. See [below](#contributor-agreements). +4. Create a pull request using GitHub, from your branch to master. +5. Reviewer process: + - Receive notice of review by GitHub email, GitHub notification, + or by checking your assigned issues. + - Make markups as comments on the pull request (either line + comments or top-level comments). + - Make a top-level comment saying something along the lines of + “Fine; some minor comments” or “Some issues to address + before merging”. + - If there are no issues, merge the pull request and close the + branch. Otherwise, assign the pull request to the developer and + leave this to them. + +6. Developer process: + - Await review. + - Address code review issues on your feature branch. + - Push your changes to the feature branch on GitHub. This + automatically updates the pull request. + - If necessary, make a top-level comment along the lines of + “Please re-review”, assign back to the reviewer, and repeat + the above. + - If no further review is necessary and you have the necessary + privileges, merge the pull request and close the branch. + Otherwise, make a top-level comment and assign back to the + reviewer as above. + +## Contributor Agreements + +If you plan to contribute in the form of documentation or code, we need +you to sign our Contributor License Agreement before we can accept your +contribution. You will be prompted to do this as part of the PR process +on Github. diff --git a/v2.4/reference/felix/configuration.md b/v2.4/reference/felix/configuration.md new file mode 100644 index 00000000000..f4bd8859ae5 --- /dev/null +++ b/v2.4/reference/felix/configuration.md @@ -0,0 +1,129 @@ +--- +title: Configuring Felix +--- + +Configuration for Felix is read from one of four possible locations, in +order, as follows. + +1. Environment variables. +2. The Felix configuration file. +3. Host specific configuration in etcd. +4. Global configuration in etcd. + +The value of any configuration parameter is the value read from the +*first* location containing a value. If not set in any of these +locations, most configuration parameters have defaults, and it should be +rare to have to explicitly set them. + +The full list of parameters which can be set is as follows. + +#### Global configuration + +| Setting | Environment variable | Default | Meaning | +|-----------------------------------------|-----------------------------------------|--------------------------------------|-----------------------------------------| +| DatastoreType | FELIX_DATASTORETYPE | etcdv2 | One of "etcdv2" or "kubernetes". The datastore that Felix should read endpoints and policy information from. | +| FelixHostname | FELIX_FELIXHOSTNAME | socket.gethostname() | The hostname Felix reports to the plugin. Should be used if the hostname Felix autodetects is incorrect or does not match what the plugin will expect. | +| LogFilePath | FELIX_LOGFILEPATH | /var/log/calico/felix.log | The full path to the Felix log. Set to "none" to disable file logging. | +| LogSeveritySys | FELIX_LOGSEVERITYSYS | INFO | The log severity above which logs are sent to the syslog. Valid values are DEBUG, INFO, WARNING, ERROR and CRITICAL, or NONE for no logging to syslog (all values case insensitive). | +| LogSeverityFile | FELIX_LOGSEVERITYFILE | INFO | The log severity above which logs are sent to the log file. Valid values as for LogSeveritySys. | +| LogSeverityScreen | FELIX_LOGSEVERITYSCREEN | INFO | The log severity above which logs are sent to the stdout. Valid values as for LogSeveritySys. | +| StartupCleanupDelay | FELIX_STARTUPCLEANUPDELAY | 30 | Delay, in seconds, before Felix does its start-of-day cleanup to remove orphaned iptables chains and ipsets. Before the first cleanup, Felix operates in "graceful restart" mode, during which it preserves any pre-existing chains and ipsets. In a large deployment you may want to increase this value to give Felix more time to load the initial snapshot from etcd before cleaning up. | +| PrometheusMetricsEnabled | FELIX_PROMETHEUSMETRICSENABLED | "false" | Set to "true" to enable the experimental Prometheus metrics server in Felix. | +| PrometheusMetricsPort | FELIX_PROMETHEUSMETRICSPORT | 9091 | Experimental: TCP port that the Prometheus metrics server should bind to. | +| PrometheusGoMetricsEnabled | FELIX_PROMETHEUSGOMETRICSENABLED | "true" | Set to "false" to disable Go runtime metrics collection, which the Prometheus client does by default. This reduces the number of metrics reported, reducing Prometheus load. | +| PrometheusProcessMetricsEnabled | FELIX_PROMETHEUSPROCESSMETRICSENABLED | "true" | Set to "false" to disable process metrics collection, which the Prometheus client does by default. This reduces the number of metrics reported, reducing Prometheus load. | +| UsageReportingEnabled | FELIX_USAGEREPORTINGENABLED | "true" | Reports anonymous Calico version number and cluster size to projectcalico.org. Logs warnings returned by the usage server. For example, if a significant security vulnerability has been discovered in the version of Calico being used. | +| FailsafeInboundHostPorts | FELIX_FAILSAFEINBOUNDHOSTPORTS | tcp:22, udp:68 | Comma-delimited list of UDP/TCP ports that Felix will allow incoming traffic to host endpoints on irrespective of the security policy. This is useful to avoid accidently cutting off a host with incorrect configuration. Each port should be specified as `tcp:` or `udp:`. For back-compatibility, if the protocol is not specified, it defaults to "tcp". To disable all inbound host ports, use the value "none". The default value allows ssh access and DHCP. | +| FailsafeOutboundHostPorts | FELIX_FAILSAFEOUTBOUNDHOSTPORTS | tcp:2379, tcp:2380, tcp:4001, tcp:7001, udp:53, udp:67 | Comma-delimited list of UDP/TCP ports that Felix will allow outgoing traffic from host endpoints to irrespective of the security policy. This is useful to avoid accidently cutting off a host with incorrect configuration. Each port should be specified as `tcp:` or `udp:`. For back-compatibility, if the protocol is not specified, it defaults to "tcp". To disable all outbound host ports, use the value "none". The default value opens etcd's standard ports to ensure that Felix does not get cut off from etcd as well as allowing DHCP and DNS. | +| ReportingIntervalSecs | FELIX_REPORTINGINTERVALSECS | 30 | Interval at which Felix reports its status into the datastore or 0 to disable. Must be non-zero in OpenStack deployments. | +| ReportingTTLSecs | FELIX_REPORTINGTTLSECS | 90 | Time-to-live setting for process-wide status reports. | +| IpInIpMtu | FELIX_IPINIPMTU | 1440 | The MTU to set on the tunnel device. See [Configuring MTU]({{site.baseurl}}/{{page.version}}/usage/configuration/mtu) | + +#### etcdv2 datastore configuration + +| Setting | Environment variable | Default | Meaning | +|-----------------------------------------|-----------------------------------------|--------------------------------------|-----------------------------------------| +| EtcdEndpoints | FELIX_ETCDENDPOINTS | "EtcdScheme://EtcdAddr" | Comma-delimited list of etcd endpoints to connect to; for example "http://etcd1:2379,http://etcd2:2379". | +| _Deprecated_ EtcdAddr | FELIX_ETCDADDR | 127.0.0.1:2379 | The location (IP / hostname and port) of the etcd node or proxy that Felix should connect to. | +| _Deprecated_ EtcdScheme | FELIX_ETCDSCHEME | http | The protocol type (http or https) of the etcd node or proxy that Felix connects to. | +| EtcdKeyFile | FELIX_ETCDKEYFILE | None | The full path to the etcd public key file, as described in usingtlswithetcd | +| EtcdCertFile | FELIX_ETCDCERTFILE | None | The full path to the etcd certificate file, as described in usingtlswithetcd | +| EtcdCaFile | FELIX_ETCDCAFILE | "/etc/ssl/certs/ca-certificates.crt" | The full path to the etcd Certificate Authority certificate file, as described in usingtlswithetcd. The default value is the standard location of the system trust store. To disable authentication of the server by Felix, set the value to "none". | + +#### Kubernetes datastore configuration + +| Setting | Environment variable | Default | Meaning | +|-----------------------------------------|-----------------------------------------|--------------------------------------|-----------------------------------------| +| N/A | N/A | | The Kubernetes datastore driver reads its configuration from Kubernetes-provided environment variables. | + + +#### iptables dataplane configuration + +| Setting | Environment variable | Default | Meaning | +|-----------------------------------------|-----------------------------------------|--------------------------------------|-----------------------------------------| +| DefaultEndpointToHostAction | FELIX_DEFAULTENDPOINTTOHOSTACTION | DROP | This parameter controls what happens to traffic that goes from a workload endpoint to the host itself (after the traffic hits the endpoint egress policy). By default Calico blocks traffic from workload endpoints to the host itself with an iptables "DROP" action. If you want to allow some or all traffic from endpoint to host, set this parameter to "RETURN" or "ACCEPT". Use "RETURN" if you have your own rules in the iptables "INPUT" chain; Calico will insert its rules at the top of that chain, then "RETURN" packets to the "INPUT" chain once it has completed processing workload endpoint egress policy. Use "ACCEPT" to unconditionally accept packets from workloads after processing workload endpoint egress policy. | +| IptablesAllowAction | FELIX_IPTABLESALLOWACTION | ACCEPT | This parameter controls what happens to traffic that is accepted by a Felix policy chain. The default will immediately ACCEPT the traffic. Use RETURN to punt the traffic back up to the system chains for further processing. | +| IptablesMarkMask | FELIX_IPTABLESMARKMASK | 0xff000000 | Mask that Felix selects its IPTables Mark bits from. Should be a 32 bit hexadecimal number with at least 8 bits set, none of which clash with any other mark bits in use on the system. | +| IptablesRefreshInterval | FELIX_IPTABLESREFRESHINTERVAL | 90 | Period, in seconds, at which Felix re-checks all iptables state to ensure that no other process has accidentally broken Calico's rules. Set to 0 to disable iptables refresh. | +| IptablesPostWriteCheckIntervalSecs | FELIX_IPTABLESPOSTWRITECHECKINTERVALSECS | 1 | Period, in seconds, after Felix has done a write to the dataplane that it schedules an extra read back in order to check the write was not clobbered by another process. This should only occur if another application on the system doesn't respect the iptables lock. | +| RouteRefreshInterval | FELIX_ROUTEREFRESHINTERVAL | 90 | Period, in seconds, at which Felix re-checks the routes in the dataplane to ensure that no other process has accidentally broken Calico's rules. Set to 0 to disable route refresh. | +| IpsetsRefreshInterval | FELIX_IPSETSREFRESHINTERVAL | 10 | Period, in seconds, at which Felix re-checks the IP sets in the dataplane to ensure that no other process has accidentally broken Calico's rules. Set to 0 to disable IP sets refresh. Note: the default for this value is lower than the other refresh intervals as a workaround for a [Linux kernel bug](https://github.com/projectcalico/felix/issues/1347) that was fixed in kernel version 4.11. If you are using v4.11 or greater you may want to set this to, a higher value to reduce Felix CPU usage. | +| MaxIpsetSize | FELIX_MAXIPSETSIZE | 1048576 | Maximum size for the ipsets used by Felix to implement tags. Should be set to a number that is greater than the maximum number of IP addresses that are ever expected in a tag. | +| ChainInsertMode | FELIX_CHAININSERTMODE | insert | One of "insert" or "append". Controls whether Felix hooks the kernel's top-level iptables chains by inserting a rule at the top of the chain or by appending a rule at the bottom. "insert" is the safe default since it prevents Calico's rules from being bypassed. If you switch to "append" mode, be sure that the other rules in the chains signal acceptance by falling through to the Calico rules, otherwise the Calico policy will be bypassed. | +| LogPrefix | FELIX_LOGPREFIX | calico-packet | The log prefix that Felix uses when rendering LOG rules. | +| IptablesLockTimeoutSecs | FELIX_IPTABLESLOCKTIMEOUTSECS | 0 (disabled) | Time, in seconds, that Felix will wait for the iptables lock, or 0, to disable. To use this feature, Felix must share the iptables lock file with all other processes that also take the lock. When running Felix inside a container, this requires the /run directory of the host to be mounted into the calico/node or calico/felix container. | +| IptablesLockFilePath | FELIX_IPTABLESLOCKFILEPATH | /run/xtables.lock | Location of the iptables lock file. You may need to change this if the lock file is not in its standard location (for example if you have mapped it into Felix's container at a different path). | +| IptablesLockProbeIntervalMillis | FELIX_IPTABLESLOCKPROBEINTERVALMILLIS | 50 | Time, in milliseconds, that Felix will wait between attempts to acquire the iptables lock if it is not available. Lower values make Felix more responsive when the lock is contended, but use more CPU. | + + +#### OpenStack specific configuration + +| Setting | Environment variable | Default | Meaning | +|-----------------------------------------|-----------------------------------------|--------------------------------------|-----------------------------------------| +| MetadataAddr | FELIX_METADATAADDR | 127.0.0.1 | The IP address or domain name of the server that can answer VM queries for cloud-init metadata. In OpenStack, thiscorresponds to the machine running nova-api (or in Ubuntu, nova-api-metadata). A value of 'None' (case insensitive) means that Felix should not set up any NAT rule for the metadata path. | +| MetadataPort | FELIX_METADATAPORT | 8775 | The port of the metadata server. This, combined with global.MetadataAddr (if not 'None'), is used to set up a NAT rule, from 169.254.169.254:80 to MetadataAddr:MetadataPort. In most cases this should not need to be changed. | + +#### Bare metal specific configuration + +| Setting | Environment variable | Default | Meaning | +|-----------------------------------------|-----------------------------------------|--------------------------------------|-----------------------------------------| +| InterfacePrefix | FELIX_INTERFACEPREFIX | cali | The interface name prefix that identifies workload endpoints and so distinguishes them from host endpoint interfaces. Note: in environments other than bare metal, the orchestrators configure this appropriately. For example our Kubernetes and Docker integrations set the 'cali' value, and our OpenStack integration sets the 'tap' value. | + +Environment variables +--------------------- + +The highest priority of configuration is that read from environment +variables. To set a configuration parameter via an environment variable, +set the environment variable formed by taking `FELIX_` and appending the +uppercase form of the variable name. For example, to set the etcd +address, set the environment variable `FELIX_ETCDADDR`. Other examples +include `FELIX_ETCDSCHEME`, `FELIX_ETCDKEYFILE`, `FELIX_ETCDCERTFILE`, +`FELIX_ETCDCAFILE`, `FELIX_FELIXHOSTNAME`, `FELIX_LOGFILEPATH` and +`FELIX_METADATAADDR`. + +### Configuration file + +On startup, Felix reads an ini-style configuration file. The path to +this file defaults to `/etc/calico/felix.cfg` but can be overridden +using the `-c` or `--config-file` options on the command line. If the +file exists, then it is read (ignoring section names) and all parameters +are set from it. + +In OpenStack, we recommend putting all configuration into configuration +files, since the etcd database is transient (and may be recreated by the +OpenStack plugin in certain error cases). However, in a Docker +environment the use of environment variables or etcd is often more +convenient. + +### Datastore + +Felix also reads configuration parameters from the datastore. It supports +a global setting and a per-host override. Datastore-based configuration +can be set using the `--raw=felix` option of the calicoctl tool. For example, +to set a per-host override for "myhost" to move the log file to /tmp/felix.log: + + ./calicoctl config set --raw=felix --node=myhost LogFilePath /tmp/felix.log + +(For a global setting, omit the `--node=` option.) + +For more information, see the [calicoctl config documentation](../calicoctl/commands/config). diff --git a/v2.4/reference/felix/prometheus.md b/v2.4/reference/felix/prometheus.md new file mode 100644 index 00000000000..e9681d4faee --- /dev/null +++ b/v2.4/reference/felix/prometheus.md @@ -0,0 +1,108 @@ +--- +title: Felix Prometheus Statistics +--- + +Felix can be configured to report a number of metrics through Prometheus. See the +[configuration reference](configuration) for how to enable metrics reporting. + +## Metric Reference + +#### Felix Specific + +Felix exports a number of Prometheus metrics. The current set is as follows. Since some metrics +are tied to particular implementation choices inside Felix we can't make any hard guarantees that +metrics will persist across releases. However, we aim not to make any spurious changes to +existing metrics. + +| Name | Description | +| ------------- | --------------- | +| `felix_active_local_endpoints` | Number of active endpoints on this host. | +| `felix_active_local_policies` | Number of active policies on this host. | +| `felix_active_local_selectors` | Number of active selectors on this host. | +| `felix_active_local_tags` | Number of active tags on this host. | +| `felix_calc_graph_output_events` | Number of events emitted by the calculation graph. | +| `felix_calc_graph_update_time_seconds` | Seconds to update calculation graph for each datastore OnUpdate call. | +| `felix_calc_graph_updates_processed` | Number of datastore updates processed by the calculation graph. | +| `felix_cluster_num_host_endpoints` | Total number of host endpoints cluster-wide. | +| `felix_cluster_num_hosts` | Total number of calico hosts in the cluster. | +| `felix_cluster_num_workload_endpoints` | Total number of workload endpoints cluster-wide. | +| `felix_exec_time_micros` | Summary of time taken to fork/exec child processes | +| `felix_int_dataplane_addr_msg_batch_size` | Number of interface address messages processed in each batch. Higher values indicate we're doing more batching to try to keep up. | +| `felix_int_dataplane_apply_time_seconds` | Time in seconds that it took to apply a dataplane update. | +| `felix_int_dataplane_failures` | Number of times dataplane updates failed and will be retried. | +| `felix_int_dataplane_iface_msg_batch_size` | Number of interface state messages processed in each batch. Higher values indicate we're doing more batching to try to keep up. | +| `felix_int_dataplane_messages` | Number dataplane messages by type. | +| `felix_int_dataplane_msg_batch_size` | Number of messages processed in each batch. Higher values indicate we're doing more batching to try to keep up. | +| `felix_ipset_calls` | Number of ipset commands executed. | +| `felix_ipset_errors` | Number of ipset command failures. | +| `felix_ipset_lines_executed` | Number of ipset operations executed. | +| `felix_ipsets_calico` | Number of active Calico IP sets. | +| `felix_ipsets_total` | Total number of active IP sets. | +| `felix_iptables_chains` | Number of active iptables chains. | +| `felix_iptables_lines_executed` | Number of iptables rule updates executed. | +| `felix_iptables_restore_calls` | Number of iptables-restore calls. | +| `felix_iptables_restore_errors` | Number of iptables-restore errors. | +| `felix_iptables_rules` | Number of active iptables rules. | +| `felix_iptables_save_calls` | Number of iptables-save calls. | +| `felix_iptables_save_errors` | Number of iptables-save errors. | +| `felix_resync_state` | Current datastore state. | +| `felix_resyncs_started` | Number of times Felix has started resyncing with the datastore. | +| `felix_route_table_list_seconds` | Time taken to list all the interfaces during a resync. | +| `felix_route_table_per_iface_sync_seconds` | Time taken to sync each interface | + +Prometheus metrics are self-documenting, with metrics turned on, `curl` can be used to list the +metrics along with their help text and type information: + +``` +$ curl -s http://localhost:9091/metrics | head +# HELP felix_active_local_endpoints Number of active endpoints on this host. +# TYPE felix_active_local_endpoints gauge +felix_active_local_endpoints 91 +# HELP felix_active_local_policies Number of active policies on this host. +# TYPE felix_active_local_policies gauge +felix_active_local_policies 0 +# HELP felix_active_local_selectors Number of active selectors on this host. +# TYPE felix_active_local_selectors gauge +felix_active_local_selectors 82 +# HELP felix_active_local_tags Number of active tags on this host. +... +``` + +#### CPU / Memory metrics + +Felix also exports the default set of metrics that Prometheus makes available. Currently, those +include: + +| Name | Description | +| ------------- | --------------- | +| `go_gc_duration_seconds` | A summary of the GC invocation durations. | +| `go_goroutines` | Number of goroutines that currently exist. | +| `go_memstats_alloc_bytes` | Number of bytes allocated and still in use. | +| `go_memstats_alloc_bytes_total` | Total number of bytes allocated, even if freed. | +| `go_memstats_buck_hash_sys_bytes` | Number of bytes used by the profiling bucket hash table. | +| `go_memstats_frees_total` | Total number of frees. | +| `go_memstats_gc_sys_bytes` | Number of bytes used for garbage collection system metadata. | +| `go_memstats_heap_alloc_bytes` | Number of heap bytes allocated and still in use. | +| `go_memstats_heap_idle_bytes` | Number of heap bytes waiting to be used. | +| `go_memstats_heap_inuse_bytes` | Number of heap bytes that are in use. | +| `go_memstats_heap_objects` | Number of allocated objects. | +| `go_memstats_heap_released_bytes_total` | Total number of heap bytes released to OS. | +| `go_memstats_heap_sys_bytes` | Number of heap bytes obtained from system. | +| `go_memstats_last_gc_time_seconds` | Number of seconds since 1970 of last garbage collection. | +| `go_memstats_lookups_total` | Total number of pointer lookups. | +| `go_memstats_mallocs_total` | Total number of mallocs. | +| `go_memstats_mcache_inuse_bytes` | Number of bytes in use by mcache structures. | +| `go_memstats_mcache_sys_bytes` | Number of bytes used for mcache structures obtained from system. | +| `go_memstats_mspan_inuse_bytes` | Number of bytes in use by mspan structures. | +| `go_memstats_mspan_sys_bytes` | Number of bytes used for mspan structures obtained from system. | +| `go_memstats_next_gc_bytes` | Number of heap bytes when next garbage collection will take place. | +| `go_memstats_other_sys_bytes` | Number of bytes used for other system allocations. | +| `go_memstats_stack_inuse_bytes` | Number of bytes in use by the stack allocator. | +| `go_memstats_stack_sys_bytes` | Number of bytes obtained from system for stack allocator. | +| `go_memstats_sys_bytes` | Number of bytes obtained by system. Sum of all system allocations. | +| `process_cpu_seconds_total` | Total user and system CPU time spent in seconds. | +| `process_max_fds` | Maximum number of open file descriptors. | +| `process_open_fds` | Number of open file descriptors. | +| `process_resident_memory_bytes` | Resident memory size in bytes. | +| `process_start_time_seconds` | Start time of the process since unix epoch in seconds. | +| `process_virtual_memory_bytes` | Virtual memory size in bytes. | diff --git a/v2.4/reference/index.md b/v2.4/reference/index.md new file mode 100644 index 00000000000..e6716f41dd5 --- /dev/null +++ b/v2.4/reference/index.md @@ -0,0 +1,6 @@ +--- +title: Reference +noversion: yes +--- + +This section contains reference information. diff --git a/v2.4/reference/involved.md b/v2.4/reference/involved.md new file mode 100644 index 00000000000..d8f271ecb34 --- /dev/null +++ b/v2.4/reference/involved.md @@ -0,0 +1,42 @@ +--- +title: Getting Involved +--- + +Calico is an open source project, and we'd love you to get involved. +Whether that might be by reading and participating on our slack, +or by diving into the code to propose enhancements or integrate with +other systems. To see the options for getting involved with Calico the +project, please take a look at the following. + +## Join us on Slack + +Our [public slack](https://slack.projectcalico.org) is the quickest way to get +in touch for help debugging any issues with Calico. + +## Read the Source, Luke! + +All of Calico's code is on [GitHub](https://github.com/projectcalico). The following +list contains the most commonly encountered repositories: + +Repository | Description +-------------------|---------------------------- +[felix](https://github.com/projectcalico/felix) | The felix policy enforcement agent. +[calicoctl](https://github.com/projectcalico/calicoctl) | Home of the calico/node and calicoctl components. +[cni-plugin](https://github.com/projectcalico/cni-plugin) | The Calico CNI plugin. +[libnetwork-plugin](https://github.com/projectcalico/libnetwork-plugin) | The Calico libnetwork plugin for Docker. +[k8s-policy](https://github.com/projectcalico/k8s-policy) | Kubernetes policy controller. +[libcalico](https://github.com/projectcalico/libcalico) | Python Calico library. +[libcalico-go](https://github.com/projectcalico/libcalico-go) | Golang Calico library. + +## Contributing + +Calico follows the "Fork & Pull" model of collaborative development, +with changes being offered to the main Calico codebase via Pull +Requests. So you can contribute a fix, change or enhancement by forking +one of our repositories and making a GitHub pull request. If you're +interested in doing that: + +- Thanks! +- See the [GitHub docs](https://help.github.com/articles/using-pull-requests) for how + to create a Pull Request. +- Check our [contibution guide](contribute) for more information. diff --git a/v2.4/reference/license.md b/v2.4/reference/license.md new file mode 100644 index 00000000000..8daa9fc55f8 --- /dev/null +++ b/v2.4/reference/license.md @@ -0,0 +1,70 @@ +--- +title: Third Party Software Attributions +--- + + +The following third party components are included with a complete +Project Calico distribution, and are made available by third parties +under separate license terms. Applicable license terms and information +regarding the authors and copyright holders are provided along with the +applicable source files and binary packages included in this +distribution. + +The licenses under which these software components are distributed are +reproduced below for clarity. + +## OpenStack + + Copyright (c) 2012 OpenStack Foundation. + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +## Dnsmasq + + dnsmasq is Copyright (c) 2000-2014 Simon Kelley + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 dated June, 1991, or + (at your option) version 3 dated 29 June, 2007. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +## BIRD + + BIRD Internet Routing Daemon + + (c) 1998--2008 Martin Mares + (c) 1998--2000 Pavel Machek + (c) 1998--2008 Ondrej Filip + (c) 2009--2013 CZ.NIC z.s.p.o. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA diff --git a/v2.4/reference/node/configuration.md b/v2.4/reference/node/configuration.md new file mode 100644 index 00000000000..5019c9144b0 --- /dev/null +++ b/v2.4/reference/node/configuration.md @@ -0,0 +1,119 @@ +--- +title: Configuring calico/node +--- + +The `calico/node` container is primarily configured through environment variables. + + +## Environment Variables + +| Environment | Description | Schema | Default | +| ------------- | -------- | ------ | ----- | +| NODENAME | A unique identifier for this host. | string | | +| NO_DEFAULT_POOLS | Prevents Calico from creating a default pool if one does not exist. | string | | +| HOSTNAME [Deprecated] | The Hostname of this host. This is used as a unique identifier for the node. This value is overridden by NODENAME. When omitted, if NODENAME has not been specified, this value defaults to the actual hostname of this host. | string | | +| IP | The IPv4 address to assign this host. When specified, the address is saved in the [node resource configuration]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/node) for this host, overriding any previously configured value. When omitted, if an address **has** been saved in the node resource, then that value will be used. When omitted, if an address **has not** yet been configured in the node resource, the node will auto-detect an IPv4 address and configure the node resource with that address. This autodetection can be forced (even if a value has already been set in the node resource) by setting IP to "autodetect". Doing so will overwrite any value configured in the node resource. | IPv4 | | +| IP6 | The IPv6 address for Calico will bind to. When specified, the address is saved in the [node resource configuration]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/node) for this host, overriding any previously configured value. When omitted, if an address has not yet been configured in the node resource, IPv6 routing is not enabled. When omitted, if an IPv6 address has been previously configured in the node resource, IPv6 is enabled using the already configured address. | IPv6 | | +| IP_AUTODETECTION_METHOD| The method to use to autodetect the IPv4 address for this host. This is only used when the IPv4 address is being autodetected. See [IP Autodetection methods](#ip-autodetection-methods) for details of the valid methods. | string | first-found | +| IP6_AUTODETECTION_METHOD| The method to use to autodetect the IPv6 address for this host. This is only used when the IPv6 address is being autodetected. See [IP Autodetection methods](#ip-autodetection-methods) for details of the valid methods. | string | first-found | +| DISABLE_NODE_IP_CHECK| Skips checks for duplicate Node IPs. This can reduce the load on the cluster when a large number of Nodes are restarting. | bool | false | +| AS | The AS number for this node. When specified, the value is saved in the node resource configuration for this host, overriding any previously configured value. When omitted, if an AS number has been previously configured in the node resource, that AS number is used for the peering. When omitted, if an AS number has not yet been configured in the node resource, the node will use the global value (managed through `calicoctl config set/get asnumber`). | int | | +| DATASTORE_TYPE | Type of datastore. | kubernetes, etcdv2 | etcdv2 | +| WAIT_FOR_DATASTORE | Wait for connection to datastore before starting. If a successful connection is not made, node will shutdown. | boolean | false | +| CALICO_LIBNETWORK_CREATE_PROFILES | Enables creating a Calico profile resource for each Docker network. When disabled, no profiles will be processed even if manually created. | boolean | true | +| CALICO_LIBNETWORK_LABEL_ENDPOINTS | Enables copying a subset of the Docker container labels for use as Calico labels on workloadendpoints. | boolean | false | +| CALICO_LIBNETWORK_ENABLED | Enables running the docker-libnetwork plugin directly in the calico/node container. | boolean | true | +| CALICO_LIBNETWORK_IFPREFIX | Interface prefix to use for the network interface within the Docker containers that have been networked by the Calico driver. | string | cali | +| CALICO_NETWORKING_BACKEND | Describes which BGP networking backend to use| gobgp, bird, none | bird | +| CALICO_IPV4POOL_CIDR | The IPv4 Pool to create if none exists at start up. It is invalid to define this variable and NO_DEFAULT_POOLS. | IPv4 CIDR | 192.168.0.0/16 | +| CALICO_IPV6POOL_CIDR | The IPv6 Pool to create if none exists at start up. It is invalid to define this variable and NO_DEFAULT_POOLS. | IPv6 CIDR | fd80:24e2:f998:72d6::/64 | +| CALICO_IPV4POOL_IPIP | IPIP Mode to use for the IPv4 POOL created at start up. | off, always, cross-subnet | off | +| CALICO_IPV4POOL_NAT_OUTGOING | Controls NAT Outgoing for the IPv4 Pool created at start up. | boolean | true | +| CALICO_IPV6POOL_NAT_OUTGOING | Controls NAT Outgoing for the IPv6 Pool created at start up. | boolean | false | +| CALICO_STARTUP_LOGLEVEL | The log severity above which startup calico/node logs are sent to the stdout. | string | ERROR | +| CLUSTER_TYPE | Contains comma delimited list of indicators about this cluster. Expected to contain information like hosted, services, kubeadm, tectonic, or datastore type. If using Kubernetes API as the datastore then KDD will automatically be added to this list. | k8s, policy, KDD, canal, mesos, docker, rkt, acs, acse, dcos, kubeadm, kubespray, kops, tectonic | | +| ETCD_ENDPOINTS | A comma separated list of etcd endpoints (optional) | string | http://127.0.0.1:2379 | +| ETCD_KEY_FILE | Path to the etcd key file, e.g. `/etc/calico/key.pem` (optional) | string | | +| ETCD_CERT_FILE | Path to the etcd client cert, e.g. `/etc/calico/cert.pem` (optional) | string | | +| ETCD_CA_CERT_FILE | Path to the etcd CA file, e.g. `/etc/calico/ca.pem` (optional) | string | | +| KUBECONFIG | When using the kubernetes datastore, the location of a kubeconfig file to use. | string | | +| K8S_API_ENDPOINT | Location of the Kubernetes API. Not required if using kubeconfig. | string | | +| K8S_CERT_FILE | Location of a client certificate for accessing the Kubernetes API. | string | | +| K8S_KEY_FILE | Location of a client key for accessing the Kubernetes API. | string | | +| K8S_CA_FILE | Location of a CA for accessing the Kubernetes API. | string | | +| K8S_TOKEN | Token to be used for accessing the Kubernetes API. | string | | + +In addition to the above, `calico/node` also supports [the standard Felix configuration environment variables](../felix/configuration). + +> Note: When Calico is configured to use the Kubernetes API as the datastore, the environments +> used for BGP configuration are ignored - this includes selection of the node AS number (AS) +> and all of the IP selection options (IP, IP6, IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD). + +### IP Autodetection methods + +When Calico is used for routing, each node must be configured with the IPv4 +address (and IPv6 address if using IPv6) that would be used to route between +nodes. To eliminate node specific IP address configuration, the calico/node +container can be configured to autodetect these IP addresses. In many systems, +there might be multiple physical interfaces on a host, or possibly multiple IP +addresses configured on a physical interface. In these cases, there are +multiple addresses to choose from and so autodetection of the correct address +can be tricky. + +The IP autodetection methods are provided to improve the selection of the +correct address, by limiting the selection based on suitable criteria for your +deployment. + +The following sections describe the available IP autodetection methods. + +#### first-found + +The `first-found` option enumerates all interface IP addresses and returns the +first valid IP address (based on IP version and type of address) on +the first valid interface. Certain known "local" interfaces +are omitted, such as the docker bridge. The order that both the interfaces +and the IP addresses are listed is system dependent. + +This is the default detection method. However, since this method only makes a +very simplified guess, it is recommended to either configure the node with a +specific IP address, or to use one of the other detection methods. + +e.g. + +``` +IP_AUTODETECTION_METHOD=first-found +IP6_AUTODETECTION_METHOD=first-found +``` + +#### can-reach=DESTINATION + +The `can-reach` method uses your local routing to determine which IP address +will be used to reach the supplied destination. Both IP addresses and domain +names may be used. + +e.g. + +``` +# Using IP addresses +IP_AUTODETECTION_METHOD=can-reach=8.8.8.8 +IP6_AUTODETECTION_METHOD=can-reach=2001:4860:4860::8888 + +# Using domain names +IP_AUTODETECTION_METHOD=can-reach=www.google.com +IP6_AUTODETECTION_METHOD=can-reach=www.google.com +``` + +#### interface=INTERFACE-REGEX + +The `interface` method uses the supplied interface regular expression (golang +syntax) to enumerate matching interfaces and to return the first IP address on +the first matching interface. The order that both the interfaces +and the IP addresses are listed is system dependent. + +e.g. + +``` +# Valid IP address on interface eth0, eth1, eth2 etc. +IP_AUTODETECTION_METHOD=interface=eth.* +IP6_AUTODETECTION_METHOD=interface=eth.* +``` diff --git a/v2.4/reference/policy-controller/configuration.md b/v2.4/reference/policy-controller/configuration.md new file mode 100644 index 00000000000..7352df42051 --- /dev/null +++ b/v2.4/reference/policy-controller/configuration.md @@ -0,0 +1,66 @@ +--- +title: Configuring the Calico policy controller +--- + +The policy controller is primarily configured through environment variables. When running +the policy controller as a Kubernetes pod, this is accomplished through the pod manifest `env` +section. + +## The calico/kube-policy-controller container + +### Configuring etcd access + +The policy controller supports the following environment variables to configure +etcd access: + +* `ETCD_ENDPOINTS`: The list of etcd nodes in your cluster. e.g `http://10.0.0.1:2379,http://10.0.0.2:2379` +* `ETCD_CA_CERT_FILE`: The full path to the CA certificate file for the Certificate Authority that signed the etcd server key/certificate pair. +* `ETCD_CERT_FILE`: The full path to the client certificate file for accessing the etcd cluster. +* `ETCD_KEY_FILE`: The full path to the client key file for accessing the etcd cluster. + +> NOTE: When running etcd with TLS enabled, the addresses in ETCD_ENDPOINTS must be hostname values, NOT an IP address, such as etcd-host:2379. + +The `*_FILE` variables are _paths_ to the corresponding certificates / keys. As such, when the policy controller is running as a Kubernetes pod, you +must ensure that the files exist within the pod. This is usually done in one of two ways: + +* Mount the certificates from the host. This requires that the certs be present on the host that the policy controller is scheduled to / running on. +* Use Kubernetes [Secrets](http://kubernetes.io/docs/user-guide/secrets/) to mount the certificates into the Pod as files. + +### Configuring Kubernetes API access + +The policy controller must access the Kubernetes API in order to learn about NetworkPolicy, Pod, and Namespace events. + +The following environment variables are useful for configuring API access: + +* `K8S_API`: The location of the Kubernetes API, including transport and port. e.g `https://kubernetes.default:443` +* `CONFIGURE_ETC_HOSTS`: Whether or not the policy controller should configure its /etc/hosts file to resolve the Kubernetes Service clusterIP. When "true", the policy controller will resolve `kubernetes.default` to the configured clusterIP of the Kubernetes API. + +It is recommended to use the following configuration for API access: + +``` +- name: K8S_API + value: "https://kubernetes.default:443" +- name: CONFIGURE_ETC_HOSTS + value: "true" +``` + +## The leader election container + +The leader election container is an optional sidecar container which performs leader election using the Kubernetes API. +This ensures that only a single instance of the policy controller is ever active. + +The leader election container is only recommended when running the policy controller as a static pod in a multi-master deployment. + +However, it is instead recommended to use a `ReplicaSet` with a single replica to ensure that one instance +will always be running without need for leader election. + +### Kubernetes API access + +The leader election container also needs Kubernetes API access, which can be configured through a `kubeconfig` file placed in +the root directory of the container. This can be done by mounting a file from the host, or using Kubernetes [ConfigMap resources](http://kubernetes.io/docs/user-guide/configmap/). + +### Other configuration + +* `LOG_LEVEL`: Supports the standard Python log levels. e.g. `LOG_LEVEL=debug`, defaults to `info` + +More information on leader election can be found in the [kubernetes/contrib](https://github.com/kubernetes/contrib/tree/master/election#simple-leader-election-with-kubernetes-and-docker) repository. diff --git a/v2.4/reference/previous-releases.md b/v2.4/reference/previous-releases.md new file mode 100644 index 00000000000..f8947a64a57 --- /dev/null +++ b/v2.4/reference/previous-releases.md @@ -0,0 +1,6 @@ +--- +title: Previous releases +--- + + +You are currentl viewing documents for Calico version {{ page.version }}. There are not currently any previous docs releases at this time. diff --git a/v2.4/reference/private-cloud/l2-interconnect-fabric.md b/v2.4/reference/private-cloud/l2-interconnect-fabric.md new file mode 100644 index 00000000000..0fc13107b27 --- /dev/null +++ b/v2.4/reference/private-cloud/l2-interconnect-fabric.md @@ -0,0 +1,245 @@ +--- +subtitle: 'At scale, and no, we''re not joking' +title: Calico over an Ethernet interconnect fabric +--- + + +This is the first of a few *tech notes* that I will be authoring that +will discuss some of the various interconnect fabric options in a Calico +network. + +Any technology that is capable of transporting IP packets can be used as +the interconnect fabric in a Calico network (the first person to test +and publish the results of using [IP over Avian +Carrier](http://tools.ietf.org/html/rfc1149) as a transport for Calico +will earn a very nice dinner on or with the core Calico team). This +means that the standard tools used to transport IP, such as MPLS and +Ethernet can be used in a Calico network. + +In this note, I'm going to focus on Ethernet as the interconnect +network. Talking to most at-scale cloud operators, they have converted +to IP fabrics, and as will cover in the next blog post that +infrastructure will work for Calico as well. However, the concerns that +drove most of those operators to IP as the interconnection network in +their pods are largely ameliorated by Project Calico, allowing Ethernet +to be viably considered as a Calico interconnect, even in large-scale +deployments. + +## Concerns over Ethernet at scale + +It has been acknowledged by the industry for years that, beyond a +certain size, classical Ethernet networks are unsuitable for production +deployment. Although there have been +[multiple](http://en.wikipedia.org/wiki/Provider_Backbone_Bridge_Traffic_Engineering) +[attempts](http://www.cisco.com/web/about/ac123/ac147/archived_issues/ipj_14-3/143_trill.html) [to address](http://en.wikipedia.org/wiki/Virtual_Private_LAN_Service) +these issues, the scale-out networking community has, largely abandoned +Ethernet for anything other than providing physical point-to-point links +in the networking fabric. The principal reasons for Ethernet failures at +large scale are: + +1. Large numbers of *end points* [^1]. Each switch in an Ethernet + network must learn the path to all Ethernet endpoints that are + connected to the Ethernet network. Learning this amount of state can + become a substantial task when we are talking about hundreds of + thousands of *end points*. +2. High rate of *churn* or change in the network. With that many end + points, most of them being ephemeral (such as virtual machines or + containers), there is a large amount of *churn* in the network. That + load of re-learning paths can be a substantial burden on the control + plane processor of most Ethernet switches. +3. High volumes of broadcast traffic. As each node on the Ethernet + network must use Broadcast packets to locate peers, and many use + broadcast for other purposes, the resultant packet replication to + each and every end point can lead to *broadcast storms* in large + Ethernet networks, effectively consuming most, if not all resources + in the network and the attached end points. +4. Spanning tree. Spanning tree is the protocol used to keep an + Ethernet network from forming loops. The protocol was designed in + the era of smaller, simpler networks, and it has not aged well. As + the number of links and interconnects in an Ethernet network goes + up, many implementations of spanning tree become more *fragile*. + Unfortunately, when spanning tree fails in an Ethernet network, the + effect is a catastrophic loop or partition (or both) in the network, + and, in most cases, difficult to troubleshoot or resolve. + +While many of these issues are crippling at *VM scale* (tens of +thousands of end points that live for hours, days, weeks), they will be +absolutely lethal at *container scale* (hundreds of thousands of end +points that live for seconds, minutes, days). + +If you weren't ready to turn off your Ethernet data center network +before this, I bet you are now. Before you do, however, let's look at +how Project Calico can mitigate these issues, even in very large +deployments. + +## How does Calico tame the Ethernet daemons? + +First, let's look at how Calico uses an Ethernet interconnect fabric. +It's important to remember that an Ethernet network *sees* nothing on +the other side of an attached IP router, the Ethernet network just +*sees* the router itself. This is why Ethernet switches can be used at +Internet peering points, where large fractions of Internet traffic is +exchanged. The switches only see the routers from the various ISPs, not +those ISPs' customers' nodes. We leverage the same effect in Calico. + +To take the issues outlined above, let's revisit them in a Calico +context. + +1. Large numbers of end points. In a Calico network, the Ethernet + interconnect fabric only sees the routers/compute servers, not the + end point. In a standard cloud model, where there is tens of VMs per + server (or hundreds of containers), this reduces the number of nodes + that the Ethernet sees (and has to learn) by one to two orders + of magnitude. Even in very large pods (say twenty thousand servers), + the Ethernet network would still only see a few tens of thousands of + end points. Well within the scale of any competent data center + Ethernet top of rack (ToR) switch. +2. High rate of *churn*. In a classical Ethernet data center fabric, + there is a *churn* event each time an end point is created, + destroyed, or moved. In a large data center, with hundreds of + thousands of endpoints, this *churn* could run into tens of events + per second, every second of the day, with peaks easily in the + hundreds or thousands of events per second. In a Calico network, + however, the *churn* is very low. The only event that would lead to + *churn* in a Calico network's Ethernet fabric would be the addition + or loss of a compute server, switch, or physical connection. In a + twenty thousand server pod, even with a 5% daily failure rate (a few + orders of magnitude more than what is normally experienced), there + would only be two thousand events per **day**. Any switch that can + not handle that volume of change in the network should not be used + for any application. +3. High volume of broadcast traffic. Since the first (and last) hop for + any traffic in a Calico network is an IP hop, and IP hops terminate + broadcast traffic, there is no endpoint broadcast network in the + Ethernet fabric, period. In fact, the only broadcast traffic that + should be seen in the Ethernet fabric is the ARPs of the compute + servers locating each other. If the traffic pattern is fairly + consistent, the steady-state ARP rate should be almost zero. Even in + a pathological case, the ARP rate should be well within normal + accepted boundaries. +4. Spanning tree. Depending on the architecture chosen for the Ethernet + fabric, it may even be possible to turn off spanning tree. However, + even if it is left on, due to the reduction in node count, and + reduction in churn, most competent spanning tree implementations + should be able to handle the load without stress. + +With these considerations in mind, it should be evident that an Ethernet +connection fabric in Calico is not only possible, it is practical and +should be seriously considered as the interconnect fabric for a Calico +network. + +As mentioned in the IP fabric post, an IP fabric is also quite feasible +for Calico, but there are more considerations that must be taken into +account. The Ethernet fabric option has fewer architectural +considerations in its design. + +## A brief note about Ethernet topology + +As mentioned elsewhere in the Calico documentation, since Calico can use +most of the standard IP tooling, some interesting options regarding +fabric topology become possible. + +We assume that an Ethernet fabric for Calico would most likely be +constructed as a *leaf/spine* architecture. Other options are possible, +but the *leaf/spine* is the predominant architectural model in use in +scale-out infrastructure today. + +Since Calico is an IP routed fabric, a Calico network can use +[ECMP](http://en.wikipedia.org/wiki/Equal-cost_multi-path_routing) to +distribute traffic across multiple links (instead of using Ethernet +techniques such as MLAG). By leveraging ECMP load balancing on the +Calico compute servers, it is possible to build the fabric out of +multiple *independent* leaf/spine planes using no technologies other +than IP routing in the Calico nodes, and basic Ethernet switching in the +interconnect fabric. These planes would operate completely independently +and could be designed such that they would not share a fault domain. +This would allow for the catastrophic failure of one (or more) plane(s) +of Ethernet interconnect fabric without the loss of the pod (the failure +would just decrease the amount of interconnect bandwidth in the pod). +This is a gentler failure mode than the pod-wide IP or Ethernet failure +that is possible with today's designs. + +A more in-depth discussion is possible, so if you'd like, please make a +request, and I will put up a post or white paper. In the meantime, it +may be interesting to venture over to Facebook's [blog +post](https://code.facebook.com/posts/360346274145943/introducing-data-center-fabric-the-next-generation-facebook-data-center-network/) +on their fabric approach. A quick picture to visualize the idea is shown +below. + +![A diagram showing the Ethernet spine planes. Each color represents a +distinct Ethernet network, transporting a unique IP +network.]({{site.baseurl}}/images/l2-spine-planes.png) + +I am not showing the end points in this diagram, and the end points +would be unaware of anything in the fabric (as noted above). + +In the particular case of this diagram, each ToR is segmented into four +logical switches (possibly by using 'port VLANs'), [^2] and each compute +server has a connection to each of those logical switches. We will +identify those logical switches by their color. Each ToR would then have +a blue, green, orange, and red logical switch. Those 'colors' would be +members of a given *plane*, so there would be a blue plane, a green +plane, an orange plane, and a red plane. Each plane would have a +dedicated spine switch. and each ToR in a given spine would be connected +to its spine, and only its spine. + +Each plane would constitute an IP network, so the blue plane would be +2001:db8:1000::/36, the green would be 2001:db8:2000::/36, and the +orange and red planes would be 2001:db8:3000::/36 and 2001:db8:4000::/36 +respectively. [^3] + +Each IP network (plane) requires it's own BGP route reflectors. Those +route reflectors need to be peered with each other within the plane, but +the route reflectors in each plane do not need to be peered with one +another. Therefore, a fabric of four planes would have four route +reflector meshes. Each compute server, border router, *etc.* would need +to be a route reflector client of at least one route reflector in each +plane, and very preferably two or more in each plane. + +A diagram that visualizes the route reflector environment can be found +below. + +![A diagram showing the route reflector topology in the l2 spine plane +architecture. The dashed diamonds are the route reflectors, with one or +more per L2 spine plane. All compute servers are peered to all route +reflectors, and all the route reflectors in a given plane are also +meshed. However, the route reflectors in each spine plane are not meshed +together (*e.g.* the *blue* route reflectors are not peered or meshed +with the *red* route reflectors. The route reflectors themselves could +be daemons running on the actual compute servers or on other dedicated +or networking hardware.]({{site.baseurl}}/images/l2-rr-spine-planes.png) + +These route reflectors could be dedicated hardware connected to the +spine switches (or the spine switches themselves), or physical or +virtual route reflectors connected to the necessary logical leaf +switches (blue, green, orange, and red). That may be a route reflector +running on a compute server and connected directly to the correct plane +link, and not routed through the vRouter, to avoid the chicken and egg +problem that would occur if the route reflector were "behind" the Calico +network. + +Other physical and logical configurations and counts are, of course, +possible, this is just an example. + +The logical configuration would then have each compute server would have +an address on each plane's subnet, and announce its end points on each +subnet. If ECMP is then turned on, the compute servers would distribute +the load across all planes. + +If a plane were to fail (say due to a spanning tree failure), then only +that one plane would fail. The remaining planes would stay running. + +[^1]: In this document (and in all Calico documents) we tend to use the + terms *end point* to refer to a virtual machine, container, + appliance, bare metal server, or any other entity that is connected + to a Calico network. If we are referring to a specific type of end + point, we will call that out (such as referring to the behavior of + VMs as distinct from containers). + +[^2]: We are using logical switches in this example. Physical ToRs could + also be used, or a mix of the two (say 2 logical switches hosted on + each physical switch). + +[^3]: We use IPv6 here purely as an example. IPv4 would be configured + similarly. I welcome your questions, either here on the blog, or via + the Project Calico mailing list. diff --git a/v2.4/reference/private-cloud/l3-interconnect-fabric.md b/v2.4/reference/private-cloud/l3-interconnect-fabric.md new file mode 100644 index 00000000000..1b5cc727a18 --- /dev/null +++ b/v2.4/reference/private-cloud/l3-interconnect-fabric.md @@ -0,0 +1,572 @@ +--- +title: IP Interconnect Fabrics in Calico +lead_text: 'Where large-scale IP networks and hardware collide' +--- + + +Calico provides an end-to-end IP network that interconnects the +endpoints [^1] in a scale-out or cloud environment. To do that, it needs +an *interconnect fabric* to provide the physical networking layer on +which Calico operates [^2]. + +While Calico is designed to work with any underlying interconnect fabric +that can support IP traffic, the fabric that has the least +considerations attached to its implementation is an Ethernet fabric as +discussed in our earlier [technical note]({{site.baseurl}}/{{page.version}}/reference/private-cloud/l2-interconnect-fabric). + +In most cases, the Ethernet fabric is the appropriate choice, but there +are infrastructures where L3 (an IP fabric) has already been deployed, +or will be deployed, and it makes sense for Calico to operate in those +environments. + +However, since Calico is, itself, a routed infrastructure, there are +more engineering, architecture, and operations considerations that have +to be weighed when running Calico with an IP routed interconnection +fabric. We will briefly outline those in the rest of this post. That +said, Calico operates equally well with Ethernet or IP interconnect +fabrics. + +## In this post +{: .no_toc} + +* TOC +{:toc} + +## Background + +### Basic Calico architecture overview + +A description of the Calico architecture can be found in our +[architectural overview]({{site.baseurl}}/{{page.version}}/reference/architecture). +However, a brief discussion of the routing and data paths is useful for +the discussion. + +In a Calico network, each compute server acts as a router for all of the +endpoints that are hosted on that compute server. We call that function +a vRouter. The data path is provided by the Linux kernel, the control +plane by a BGP protocol server, and management plane by Calico's +on-server agent, *Felix*. + +Each endpoint can only communicate through its local vRouter, and the +first and last *hop* in any Calico packet flow is an IP router hop +through a vRouter. Each vRouter announces all of the endpoints it is +attached to to all the other vRouters and other routers on the +infrastructure fabric, using BGP, usually with BGP route reflectors to +increase scale. A discussion of why we use BGP can be found in the [Why +BGP?](http://www.projectcalico.org/why-bgp/) blog post. + +Access control lists (ACLs) enforce security (and other) policy as +directed by whatever cloud orchestrator is in use. There are other +components in the Calico architecture, but they are irrelevant to the +interconnect network fabric discussion. + +### Overview of current common IP scale-out fabric architectures + +There are two approaches to building an IP fabric for a scale-out +infrastructure. However, all of them, to date, have assumed that the +edge router in the infrastructure is the top of rack (TOR) switch. In +the Calico model, that function is pushed to the compute server itself. + +Furthermore, in most current virtualized environments, the actual +endpoint is not addressed by the fabric. If it is a VM, it is usually +encapsulated in an overlay, and if it is a container, it may be +encapsulated in an overlay, or NATed by some form of proxy, such as is +done in the [weave](http://www.weave.works/) project network model, or +the router in standard [docker](http://www.docker.io/) networking. + +The two approaches are outlined below, in this technical note, we will +cover the second option, as it is more common in the scale-out world. If +there is interest in the first approach, please contact Project Calico, +and we can discuss, and if there is enough interest, maybe we will do +another technical note on that approach. If you know of other approaches +in use, we would be happy to host a guest technical note. + +1. The routing infrastructure is based on some form of IGP. Due to the + limitations in scale of IGP networks (see the [why BGP + post](http://www.projectcalico.org/why-bgp/) for discussion of this + topic), the project Calico team does not believe that using an IGP + to distribute endpoint reachability information will adequately + scale in a Calico environment. However, it is possible to use a + combination of IGP and BGP in the interconnect fabric, where an IGP + communicates the path to the *next-hop* router (in Calico, this is + often the destination compute server) and BGP is used to distribute + the actual next-hop for a given endpoint. This is a valid model, + and, in fact is the most common approach in a widely distributed IP + network (say a carrier's backbone network). The design of these + networks is somewhat complex though, and will not be addressed + further in this technical note. [^3] +2. The other model, and the one that this note concerns itself with, is + one where the routing infrastructure is based entirely on BGP. In + this model, the IP network is "tight enough" or has a small enough + diameter that BGP can be used to distribute endpoint routes, and the + paths to the next-hops for those routes is known to all of the + routers in the network (in a Calico network this includes the + compute servers). This is the network model that this note + will address. + +### BGP-only interconnect fabrics + +There are multiple methods to build a BGP-only interconnect fabric. We +will focus on three models, each with two widely viable variations. +There are other options, and we will briefly touch on why we didn't +include some of them in the [Other Options appendix](#other-options). + +The two methods are: + +1. A BGP fabric where each of the TOR switches (and their subsidiary + compute servers) are a unique [Autonomous + System (AS)](http://en.wikipedia.org/wiki/Autonomous_System_(Internet)) + and they are interconnected via either an Ethernet switching plane + provided by the spine switches in a + [leaf/spine](http://bradhedlund.com/2012/10/24/video-a-basic-introduction-to-the-leafspine-data-center-networking-fabric-design/) + architecture, or via a set of spine switches, each of which is also + a unique AS. We'll refer to this as the *AS per rack* model. This + model is detailed in [this IETF working group draft](https://tools.ietf.org/html/draft-ietf-rtgwg-bgp-routing-large-dc). +2. A BGP fabric where each of the compute servers is a unique AS, and + the TOR switches make up a transit AS. We'll refer to this as the + *AS per server* model. + +Each of these models can either have an Ethernet or IP spine. In the +case of an Ethernet spine, each spine switch provides an isolated +Ethernet connection *plane* as in the Calico Ethernet interconnect +fabric model and each TOR switch is connected to each spine switch. + +Another model is where each spine switch is a unique AS, and each TOR +switch BGP peers with each spine switch. In both cases, the TOR switches +use ECMP to load-balance traffic between all available spine switches. + +### Some BGP network design considerations + +Contrary to popular opinion, BGP is actually a fairly simple protocol. +For example, the BGP configuration on a Calico compute server is +approximately sixty lines long, not counting comments. The perceived +complexity is due to the things that you can *do* with BGP. Many uses of +BGP involve complex policy rules, where the behavior of BGP can be +modified to meet technical (or business, financial, political, *etc.*) +requirements. A default Calico network does not venture into those +areas, [^4] and therefore is fairly straight forward. + +That said, there are a few design rules for BGP that need to be kept in +mind when designing an IP fabric that will interconnect nodes in a +Calico network. These BGP design requirements *can* be worked around, if +necessary, but doing so takes the designer out of the standard BGP +*envelope* and should only be done by an implementer who is *very* +comfortable with advanced BGP design. + +These considerations are: + +AS continuity + +: or *AS puddling* Any router in an AS *must* be able to communicate + with any other router in that same AS without transiting another AS. + +Next hop behavior + +: By default BGP routers do not change the *next hop* of a route if it + is peering with another router in its same AS. The inverse is also + true, a BGP router will set itself as the *next hop* of a route if + it is peering with a router in another AS. + +Route reflection + +: All BGP routers in a given AS must *peer* with all the other routers + in that AS. This is referred to a *complete BGP mesh*. This can + become problematic as the number of routers in the AS scales up. The + use of *route reflectors* reduce the need for the complete BGP mesh. + However, route reflectors also have scaling considerations. + +Endpoints + +: In a Calico network, each endpoint is a route. Hardware networking + platforms are constrained by the number of routes they can learn. + This is usually in range of 10,000's or 100,000's of routes. Route + aggregation can help, but that is usually dependent on the + capabilities of the scheduler used by the orchestration software + (*e.g.* OpenStack). + +A deeper discussion of these considerations can be found in the IP +Fabric Design Considerations\_ appendix. + +The designs discussed below address these considerations. + +### The *AS Per Rack* model + +This model is the closest to the model suggested by the [IETF's Routing +Area Working Group draft on BGP use in data +centers](https://tools.ietf.org/html/draft-ietf-rtgwg-bgp-routing-large-dc). + +As mentioned earlier, there are two versions of this model, one with an +set of Ethernet planes interconnecting the ToR switches, and the other +where the core planes are also routers. The following diagrams may be +useful for the discussion. + +![]({{site.baseurl}}/images/l3-fabric-diagrams-as-rack-l2-spine.png) + +> This diagram shows the *AS per rack model* where the ToR switches are +> physically meshed via a set of Ethernet switching planes. + +![]({{site.baseurl}}/images/l3-fabric-diagrams-as-rack-l3-spine.png) + +> This diagram shows the *AS per rack model* where the ToR switches are +> physically meshed via a set of discrete BGP spine routers, each in +> their own AS. + +In this approach, every ToR-ToR or ToR-Spine (in the case of an AS per +spine) link is an eBGP peering which means that there is no +route-reflection possible (using standard BGP route reflectors) *north* +of the ToR switches. + +If the L2 spine option is used, the result of this is that each ToR must +either peer with every other ToR switch in the cluster (which could be +hundreds of peers). + +If the AS per spine option is used, then each ToR only has to peer with +each spine (there are usually somewhere between two and sixteen spine +switches in a pod). However, the spine switches must peer with all ToR +switches (again, that would be hundreds, but most spine switches have +more control plane capacity than the average ToR, so this might be more +scalable in many circumstances). + +Within the rack, the configuration is the same for both variants, and is +somewhat different than the configuration north of the ToR. + +Every router within the rack, which, in the case of Calico is every +compute server, shares the same AS as the ToR that they are connected +to. That connection is in the form of an Ethernet switching layer. Each +router in the rack must be directly connected to enable the AS to remain +contiguous. The ToR's *router* function is then connected to that +Ethernet switching layer as well. The actual configuration of this is +dependent on the ToR in use, but usually it means that the ports that +are connected to the compute servers are treated as *subnet* or +*segment* ports, and then the ToR's *router* function has a single +interface into that subnet. + +This configuration allows each compute server to connect to each other +compute server in the rack without going through the ToR router, but it +will, of course, go through the ToR switching function. The compute +servers and the ToR router could all be directly meshed, or a route +reflector could be used within the rack, either hosted on the ToR +itself, or as a virtual function hosted on one or more compute servers +within the rack. + +The ToR, as the eBGP router redistributes all of the routes from other +ToRs as well as routes external to the data center to the compute +servers that are in its AS, and announces all of the routes from within +the AS (rack) to the other ToRs and the larger world. This means that +each compute server will see the ToR as the next hop for all external +routes, and the individual compute servers are the next hop for all +routes external to the rack. + +### The *AS per Compute Server* model + +This model takes the concept of an AS per rack to its logical +conclusion. In the earlier referenced [IETF +draft](https://tools.ietf.org/html/draft-ietf-rtgwg-bgp-routing-large-dc) +the assumption in the overall model is that the ToR is first tier +aggregating and routing element. In Calico, the ToR, if it is an L3 +router, is actually the second tier. Remember, in Calico, the compute +server is always the first/last router for an endpoint, and is also the +first/last point of aggregation. + +Therefore, if we follow the architecture of the draft, the compute +server, not the ToR should be the AS boundary. The differences can be +seen in the following two diagrams. + +![]({{site.baseurl}}/images/l3-fabric-diagrams-as-server-l2-spine.png) + +> This diagram shows the *AS per compute server model* where the ToR +> switches are physically meshed via a set of Ethernet switching planes. + +![]({{site.baseurl}}/images/l3-fabric-diagrams-as-server-l3-spine.png) + +> This diagram shows the *AS per compute server model* where the ToR +> switches are physically connected to a set of independent routing +> planes. + +As can be seen in these diagrams, there are still the same two variants +as in the *AS per rack* model, one where the spine switches provide a +set of independent Ethernet planes to interconnect the ToR switches, and +the other where that is done by a set of independent routers. + +The real difference in this model, is that the compute servers as well +as the ToR switches are all independent autonomous systems. To make this +work at scale, the use of four byte AS numbers as discussed in +[RFC 4893](http://www.faqs.org/rfcs/rfc4893.html "RFC 4893"). Without +using four byte AS numbering, the total number of ToRs and compute +servers in a calico fabric would be limited to the approximately five +thousand available private AS [^5] numbers. If four byte AS numbers are +used, there are approximately ninety-two million private AS numbers +available. This should be sufficient for any given Calico fabric. + +The other difference in this model *vs.* the AS per rack model, is that +there are no route reflectors used, as all BGP peerings are eBGP. In +this case, each compute server in a given rack peers with its ToR switch +which is also acting as an eBGP router. For two servers within the same +rack to communicate, they will be routed through the ToR. Therefore, +each server will have one peering to each ToR it is connected to, and +each ToR will have a peering with each compute server that it is +connected to (normally, all the compute servers in the rack). + +The inter-ToR connectivity considerations are the same in scale and +scope as in the AS per rack model. + +### The *Downward Default* model + +The final model is a bit different. Whereas, in the previous models, all +of the routers in the infrastructure carry full routing tables, and +leave their AS paths intact, this model [^6] removes the AS numbers at +each stage of the routing path. This is to prevent routes from other +nodes in the network from not being installed due to it coming from the +*local* AS (since they share the source and dest of the route share the +same AS). + +The following diagram will show the AS relationships in this model. + +![]({{site.baseurl}}/images/l3-fabric-downward-default.png) + +> In this diagram, we are showing that all Calico nodes share the same +> AS number, as do all ToR switches. However, those ASs are different +> (*A1* is not the same network as *A2*, even though the both share the +> same AS number *A* ). + +While the use of a single AS for all ToR switches, and another for all +compute servers simplifies deployment (standardized configuration), the +real benefit comes in the offloading of the routing tables in the ToR +switches. + +In this model, each router announces all of its routes to its upstream +peer (the Calico routers to their ToR, the ToRs to the spine switches). +However, in return, the upstream router only announces a default route. +In this case, a given Calico router only has routes for the endpoints +that are locally hosted on it, as well as the default from the ToR. +Since the ToR is the only route for the Calico network the rest of the +network, this matches reality. The same happens between the ToR switches +and the spine. This means that the ToR only has to install the routes +that are for endpoints that are hosted on its downstream Calico nodes. +Even if we were to host 200 endpoints per Calico node, and stuff 80 +Calico nodes in each rack, that would still limit the routing table on +the ToR to a maximum of 16,000 entries (well within the capabilities of +even the most modest of switches). + +Since the default is originated by the Spine (originally) there is no +chance for a downward announced route to originate from the recipient's +AS, preventing the *AS puddling* problem. + +There is one (minor) drawback to this model, in that all traffic that is +destined for an invalid destination (the destination IP does not exist) +will be forwarded to the spine switches before they are dropped. + +It should also be noted that the spine switches do need to carry all of +the Calico network routes, just as they do in the routed spines in the +previous examples. In short, this model imposes no more load on the +spines than they already would have, and substantially reduces the +amount of routing table space used on the ToR switches. It also reduces +the number of routes in the Calico nodes, but, as we have discussed +before, that is not a concern in most deployments as the amount of +memory consumed by a full routing table in Calico is a fraction of the +total memory available on a modern compute server. + +## Recommendation + +The Project Calico team recommends the use of the [AS per rack](#the-as-per-rack-model) model if +the resultant routing table size can be accommodated by the ToR and +spine switches, remembering to account for projected growth. + +If there is concern about the route table size in the ToR switches, the +team recommends the [Downward Default](#the-downward-default-model) model. + +If there are concerns about both the spine and ToR switch route table +capacity, or there is a desire to run a very simple L2 fabric to connect +the Calico nodes, then the user should consider the Ethernet fabric as +detailed in [this post]({{site.baseurl}}/{{page.version}}/reference/private-cloud/l2-interconnect-fabric). + +If a Calico user is interested in the AS per compute server, the Project +Calico team would be very interested in discussing the deployment of +that model. + +## Appendix + +### Other Options + +The way the physical and logical connectivity is laid out in this note, +and the [Ethernet fabric note]({{site.baseurl}}/{{page.version}}/reference/private-cloud/l2-interconnect-fabric), +The next hop router for a given route is always directly connected to +the router receiving that route. This makes the need for another +protocol to distribute the next hop routes unnecessary. + +However, in many (or most) WAN BGP networks, the routers within a given +AS may not be directly adjacent. Therefore, a router may receive a route +with a next hop address that it is not directly adjacent to. In those +cases, an IGP, such as OSPF or IS-IS, is used by the routers within a +given AS to determine the path to the BGP next hop route. + +There may be Calico architectures where there are similar models where +the routers within a given AS are not directly adjacent. In those +models, the use of an IGP in Calico may be warranted. The configuration +of those protocols are, however, beyond the scope of this technical +note. + +### IP Fabric Design Considerations + +#### AS puddling + +The first consideration is that an AS must be kept contiguous. This +means that any two nodes in a given AS must be able to communicate +without traversing any other AS. If this rule is not observed, the +effect is often referred to as *AS puddling* and the network will *not* +function correctly. + +A corollary of that rule is that any two administrative regions that +share the same AS number, are in the same AS, even if that was not the +desire of the designer. BGP has no way of identifying if an AS is local +or foreign other than the AS number. Therefore re-use of an AS number +for two *networks* that are not directly connected, but only connected +through another *network* or AS number will not work without a lot of +policy changes to the BGP routers. + +Another corollary of that rule is that a BGP router will not propagate a +route to a peer if the route has an AS in its path that is the same AS +as the peer. This prevents loops from forming in the network. The effect +of this prevents two routers in the same AS from transiting another +router (either in that AS or not). + +#### Next hop behavior + +Another consideration is based on the differences between iBGP and eBGP. +BGP operates in two modes, if two routers are BGP peers, but share the +same AS number, then they are considered to be in an *internal* BGP (or +iBGP) peering relationship. If they are members of different AS's, then +they are in an *external* or eBGP relationship. + +BGP's original design model was that all BGP routers within a given AS +would know how to get to one another (via static routes, IGP [^7] +routing protocols, or the like), and that routers in different ASs would +not know how to reach one another unless they were directly connected. + +Based on that design point, routers in an iBGP peering relationship +assume that they do not transit traffic for other iBGP routers in a +given AS (i.e. A can communicate with C, and therefore will not need to +route through B), and therefore, do not change the *next hop* attribute +in BGP [^8]. + +A router with an eBGP peering, on the other hand, assumes that its eBGP +peer will not know how to reach the next hop route, and then will +substitute its own address in the next hop field. This is often referred +to as *next hop self*. + +In the Calico [Ethernet fabric]({{site.baseurl}}/{{page.version}}/reference/private-cloud/l2-interconnect-fabric) +model, all of the compute servers (the routers in a Calico network) are +directly connected over one or more Ethernet network(s) and therefore +are directly reachable. In this case, a router in the Calico network +does not need to set *next hop self* within the Calico fabric. + +The models we present in this technical note insure that all routes that +may traverse a non-Calico router are eBGP routes, and therefore *next +hop self* is automatically set correctly. If a deployment of Calico in +an IP interconnect fabric does not satisfy that constraint, then *next +hop self* must be appropriately configured. + +#### Route reflection + +As mentioned above, BGP expects that all of the iBGP routers in a +network can see (and speak) directly to one another, this is referred to +as a *BGP full mesh*. In small networks this is not a problem, but it +does become interesting as the number of routers increases. For example, +if you have 99 BGP routers in an AS and wish to add one more, you would +have to configure the peering to that new router on each of the 99 +existing routers. Not only is this a problem at configuration time, it +means that each router is maintaining 100 protocol adjacencies, which +can start being a drain on constrained resources in a router. While this +might be *interesting* at 100 routers, it becomes an impossible task +with 1000's or 10,000's of routers (the potential size of a Calico +network). + +Conveniently, large scale/Internet scale networks solved this problem +almost 20 years ago by deploying BGP route reflection as described in +[RFC 1966](http://www.faqs.org/rfcs/rfc1966.html "RFC 1966"). This is a +technique supported by almost all BGP routers today. In a large network, +a number of route reflectors [^9] are evenly distributed and each iBGP +router is *peered* with one or more route reflectors (usually 2 or 3). +Each route reflector can handle 10's or 100's of route reflector clients +(in Calico's case, the compute server), depending on the route reflector +being used. Those route reflectors are, in turn, peered with each other. +This means that there are an order of magnitude less route reflectors +that need to be completely meshed, and each route reflector client is +only configured to peer to 2 or 3 route reflectors. This is much easier +to manage. + +Other route reflector architectures are possible, but those are beyond +the scope of this document. + +#### Endpoints + +The final consideration is the number of endpoints in a Calico network. +In the [Ethernet fabric]({{site.baseurl}}/{{page.version}}/reference/private-cloud/l2-interconnect-fabric) +case the number of endpoints is not constrained by the interconnect +fabric, as the interconnect fabric does not *see* the actual endpoints, +it only *sees* the actual vRouters, or compute servers. This is not the +case in an IP fabric, however. IP networks forward by using the +destination IP address in the packet, which, in Calico's case, is the +destination endpoint. That means that the IP fabric nodes (ToR switches +and/or spine switches, for example) must know the routes to each +endpoint in the network. They learn this by participating as route +reflector clients in the BGP mesh, just as the Calico vRouter/compute +server does. + +However, unlike a compute server which has a relatively unconstrained +amount of memory, a physical switch is either memory constrained, or +quite expensive. This means that the physical switch has a limit on how +many *routes* it can handle. The current industry standard for modern +commodity switches is in the range of 128,000 routes. This means that, +without other routing *tricks*, such as aggregation, a Calico +installation that uses an IP fabric will be limited to the routing table +size of its constituent network hardware, with a reasonable upper limit +today of 128,000 endpoints. + +[^1]: In Calico's terminology, an endpoint is an IP address and + interface. It could refer to a VM, a container, or even a process + bound to an IP address running on a bare metal server. + +[^2]: This interconnect fabric provides the connectivity between the + Calico (v)Router (in almost all cases, the compute servers) nodes, + as well as any other elements in the fabric (*e.g.* bare metal + servers, border routers, and appliances). + +[^3]: If there is interest in a discussion of this approach, please let + us know. The Project Calico team could either arrange a discussion, + or if there was enough interest, publish a follow-up tech note. + +[^4]: However those tools are available if a given Calico instance needs + to utilize those policy constructs. + +[^5]: The two byte AS space reserves approximately the last five + thousand AS numbers for private use. There is no technical reason + why other AS numbers could not be used. However the re-use of global + scope AS numbers within a private infrastructure is strongly + discouraged. The chance for routing system failure or incorrect + routing is substantial, and not restricted to the entity that is + doing the reuse. + +[^6]: We first saw this design in a customer's lab, and thought it + innovative enough to share (we asked them first, of course). Similar + *AS Path Stripping* approaches are used in ISP networks, however. + +[^7]: An Interior Gateway Protocol is a local routing protocol that does + not cross an AS boundary. The primary IGPs in use today are OSPF and + IS-IS. While complex iBGP networks still use IGP routing protocols, + a data center is normally a fairly simple network, even if it has + many routers in it. Therefore, in the data center case, the use of + an IGP can often be disposed of. + +[^8]: A Next hop is an attribute of a route announced by a routing + protocol. In simple terms a route is defined by a *target*, or the + destination that is to be reached, and a *next hop*, which is the + next router in the path to reach that target. There are many other + characteristics in a route, but those are well beyond the scope of + this post. + +[^9]: A route reflector may be a physical router, a software appliance, + or simply a BGP daemon. It only processes routing messages, and does + not pass actual data plane traffic. However, some route reflectors + are co-resident on regular routers that do pass data plane traffic. + While they may sit on one platform, the functions are distinct. diff --git a/v2.4/reference/public-cloud/aws.md b/v2.4/reference/public-cloud/aws.md new file mode 100644 index 00000000000..61ef68c5eed --- /dev/null +++ b/v2.4/reference/public-cloud/aws.md @@ -0,0 +1,101 @@ +--- +title: AWS +--- + +Calico provides the following advantages when running in AWS: + +- **Network Policy for Containers:** Calico provides fine-grained network security policy for individual containers. +- **No Overlays:** Within each VPC subnet Calico doesn't need an overlay, which means high performance networking for your containers. +- **No 50 Node Limit:** Calico allows you to surpass the 50 node limit, which exists as a consequence of the [AWS 50 route limit](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Appendix_Limits.html#vpc-limits-route-tables) when using the VPC routing table. + +## Requirements + +To deploy Calico in AWS, you must ensure that the proper security group rules +have been made and that traffic between containers on different hosts is not +dropped by the VPC. There are a few different options for doing this depending +on your deployment. + +#### Configure Security Groups + +Calico requires the following security group exceptions to function properly +in AWS. + +| Description | Type | Protocol | Port Range | +|:-----------------|:----------------|:---------|:-----------| +| BGP | Custom TCP Rule | TCP | 179 | +| \*IPIP | Custom Protocol | IPIP | all | + +>\*IPIP: This rule is required only when using Calico with IPIP encapsulation. +Keep reading for information on when IPIP is required in AWS. + +#### Routing Traffic Within a Single VPC Subnet + +Since Calico assigns IP addresses outside the range used by AWS for EC2 instances, you must disable AWS src/dst +checks on each EC2 instance in your cluster +[as described in the AWS documentation](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html#EIP_Disable_SrcDestCheck). This +allows Calico to route traffic natively within a single VPC subnet without using an overlay or any of the limited VPC routing table entries. + +#### Routing Traffic Across Different VPC Subnets / VPCs + +If you need to split your deployment across multiple AZs for high availability then each AZ will have its own VPC subnet. To +use Calico across multiple different VPC subnets or [peered VPCs](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-peering.html), +in addition to disabling src/dst checks as described above you must also enable IPIP encapsulation and outgoing NAT +on your Calico IP pools. + +See the [IP pool configuration reference]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/ippool) +for information on how to configure Calico IP pools. + +By default, Calico's IPIP encapsulation applies to all container-to-container traffic. However, +encapsulation is only required for container traffic that crosses a VPC subnet boundary. For better +performance, you can configure Calico to perform IPIP encapsulation only across VPC subnet boundaries. + +To enable the "cross-subnet" IPIP feature, configure your Calico IP pool resources +to enable IPIP and set the mode to "cross-subnet". + +> This feature was introduced in Calico v2.1, if your deployment was created with +> an older version of Calico, or if you if you are unsure whether your deployment +> is configured correctly, follow the [Configuring IP-in-IP guide]({{site.baseurl}}/{{page.version}}/usage/configuration/ip-in-ip) +> which discusses this in more detail. + +The following `calicoctl` command will create or modify an IPv4 pool with +CIDR 192.168.0.0/16 using IPIP mode `cross-subnet`. Adjust the pool CIDR for your deployment. + +``` +$ calicoctl apply -f - << EOF +apiVersion: v1 +kind: ipPool +metadata: + cidr: 192.168.0.0/16 +spec: + ipip: + enabled: true + mode: cross-subnet +EOF +``` + +#### Enabling Workload-to-WAN Traffic + +To allow Calico networked containers to reach resources outside of AWS, +you must configure outgoing NAT on your [Calico IP pool]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/ippool). + +AWS will perform outbound NAT on any traffic which has the source address of an EC2 virtual +machine instance. By enabling outgoing NAT on your Calico IP pool, Calico will +NAT any outbound traffic from the containers hosted on the EC2 virtual machine instances. + +The following `calicoctl` command will create or modify an IPv4 pool with +CIDR 192.168.0.0/16 using IPIP mode `cross-subnet` and enables outgoing NAT. +Adjust the pool CIDR for your deployment. + +``` +$ calicoctl apply -f - << EOF +apiVersion: v1 +kind: ipPool +metadata: + cidr: 192.168.0.0/16 +spec: + ipip: + enabled: true + mode: cross-subnet + nat-outgoing: true +EOF +``` diff --git a/v2.4/reference/public-cloud/gce.md b/v2.4/reference/public-cloud/gce.md new file mode 100644 index 00000000000..b8b694b3219 --- /dev/null +++ b/v2.4/reference/public-cloud/gce.md @@ -0,0 +1,53 @@ +--- +title: Deploying Calico on GCE +--- + +To deploy Calico in [Google Compute Engine][GCE], you must ensure that the +proper firewall rules have been made and that traffic between containers on +different hosts is not dropped by the GCE fabric. There are a few different +options for doing this depending on your deployment. + +#### Configure GCE Firewall Rules + +Calico requires the following firewall rules to function in GCE. + +| Description | Protocol | Port Range | +|:-----------------|:---------|:-----------| +| BGP | TCP | 179 | +| \*IPIP | 4 | all | + +>\*IPIP: This rule is required only when using Calico with IPIP encapsulation. +Keep reading for information on when IPIP is required in GCE. + +#### Routing Traffic + +One of the following options must be utilized when using Calico in +GCE to ensure container traffic is allowed by the GCE network fabric. + +##### IP-in-IP encapsulation + +Container traffic routing can be enabled by setting [IP-in-IP encapsulation][IPIP] +and NAT outgoing on the configured Calico IP pools. + +See the [IP pool configuration reference][IPPool] +for information on how to configure Calico IP pools. + +##### GCE cloud routes + +Traffic routing in GCE can be achieved by utilizing GCE cloud routes and +running Calico in policy-only mode. Kubernetes GCE cloud provider integration +simplifies route configuration by enabling Kubernetes to handle creating +routes. + +#### Enabling Workload-to-WAN Traffic + +To allow Calico networked containers to reach resources outside of GCE, +you must configure outgoing NAT on your [Calico IP pool][IPPool]. + +GCE will perform outbound NAT on any traffic which has the source address of a virtual +machine instance. By enabling outgoing NAT on your Calico IP pool, Calico will +NAT any outbound traffic from the containers hosted on the virtual machine instances. + +[IPIP]: {{site.baseurl}}/{{page.version}}/usage/configuration/ip-in-ip +[IPPool]: {{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/ippool +[GCE]: https://cloud.google.com/compute/ diff --git a/v2.4/reference/repo-structure.md b/v2.4/reference/repo-structure.md new file mode 100644 index 00000000000..ebed7b67a56 --- /dev/null +++ b/v2.4/reference/repo-structure.md @@ -0,0 +1,64 @@ +--- +title: Calico Repositories +--- + +The following information details which artifacts are built from which +repositories. + +## Core Components + +### [Felix](https://github.com/projectcalico/felix) + +| Artifact | Type | Description | +|---------|-------|-----------| +| felix | Binary | Felix interfaces with the Linux kernel to configure routes and ACLs that control network policy and connectivity. | + + +### [calicoctl](https://github.com/projectcalico/calicoctl) + +| Artifact | Type | Description | +|---------|-------|-----------| +| calico/node | Docker Image | The Docker image used to run Felix and the BIRD BGP agent (for distributing routes between hosts). See [calico/node reference]({{site.baseurl}}/{{page.version}}/reference/architecture/components) for details. | +| calicoctl | Binary | The command line tool for interacting with the Calico API. See [calicoctl reference]({{site.baseurl}}/{{page.version}}/reference/calicoctl) for more info. | + +## Libraries + +### [libcalico](https://github.com/projectcalico/libcalico) + +| Artifact | Type | Description | +|---------|-------|-----------| +| libcalico | pypi library | Contains a variety of helper methods and classes for manipulating the Calico data in an etcd datastore, IP address management and useful namespace utilities to manipulating container interfaces. | +| calico/test | Docker Image | Contains useful shared testing framework code and dependencies for running unit and system tests of Calico in Docker. | +| calico/build | Docker Image | Build image which includes libcalico and the necessary python tooling to produce binaries from python source code. | + +## [libcalico-go](https://github.com/projectcalico/libcalico-go) + +| Artifact | Type | Description | +|---------|-------|-----------| +| libcalico-go | golang library | Contains a variety of helper methods and classes for interacting with the Calico API. | + +## Orchestrator Plugins + +There are several integrations available for Calico in a containerized +environment. The repositories below hold the plugin code for these +integrations. + +## [cni-plugin](https://github.com/projectcalico/cni-plugin) + +| Artifact | Type | Description | +|---------|-------|-----------| +| calico | binary | Calico networking plugin for any orchestrator that uses the [Container Network Interface](https://github.com/appc/cni), e.g. [rkt](https://github.com/coreos/rkt), [Kubernetes](https://github.com/kubernetes/kubernetes), and [Mesos](https://github.com/apache/mesos). | +| calico-ipam | binary | Calico CNI IP address management plugin. | + +## [libnetwork-plugin](https://github.com/projectcalico/libnetwork-plugin) + +| Artifact | Type | Description | +|---------|-------|-----------| +| libnetwork-plugin | binary | Docker networking plugin for use with Docker and Docker Swarm. It provides both network and IPAM drivers which may be used when creating networks through Docker. | + + +## [k8s-policy](https://github.com/projectcalico/k8s-policy) + +| Artifact | Type | Description | +|----------|-----|-------------| +| calico/kube-policy-controller | Docker Image | Implements the Kubernetes Network Policy API. | diff --git a/v2.4/reference/requirements.md b/v2.4/reference/requirements.md new file mode 100644 index 00000000000..4c5dc22c034 --- /dev/null +++ b/v2.4/reference/requirements.md @@ -0,0 +1,116 @@ +--- +title: Calico System Requirements +--- + +Depending on the Calico functionality you are using, there are some requirements your system needs to meet in order for Calico to work properly. +calico/node container image ships with the following `ip6tables`, `ipset`, `iputils`, `iproute2`, `conntrack-tools`. + +## Minumum Linux kernel versions + +IPv4 only: `2.6.32` + +IPv6: `3.10` + +## Requirements for Calico Policy: + +### iproute2 + + iproute2 is a collection of utilities for controlling TCP/IP networking and traffic control in Linux. + + **Shared libraries dependencies**: + - `glibc` + - `libelf` + +### iputils + +The iputils package is set of small useful utilities for Linux networking. + + **Shared libraries dependencies**: + - `libcap` + - `libidn` + - `openssl` + - `sysfsutils` + +### conntrack + +The [conntrack-tools](http://www.netfilter.org/projects/conntrack-tools/index.html) are a set of tools to manage the in-kernel connection tracking state table from userspace. + + **Minimum required version**: `1.4.1` + + **Kernel dependencies**: + - `nf_conntrack_netlink` subsystem + - `nf_conntrack` + - `nfnetlink` + + **Shared libraries dependencies**: + - `libnetfilter_conntrack` + - `libnfnetlink` + - `libmnl` + - `libnetfilter_cttimeout` + +This is included in kernel version `2.6.18` and above. + +### iptables / ip6tables + +[iptables](http://www.netfilter.org/projects/iptables/index.html) is a command line utility for configuring Linux kernel firewall implemented within the [Netfilter](http://www.netfilter.org) project. + + **Minimum required version**: `1.4.7` + + **Kernel dependencies**: + - `ip_tables` (for IPv4) + - `x_tables` + - `ip6_tables` (for IPv6) + - `x_tables` + + **Shared libraries dependencies**: + - `glibc` + - `libnftnl` + - `libpcap` + +`x_tables` has the shared code used by `iptables` modules. + This is included in kernel version `2.4` and above. + +### ipset + +[ipset](http://ipset.netfilter.org/) is used to set up, maintain and inspect so called IP sets in the Linux kernel. + + **Minimum required version**: `6.11` + + **Kernel modules dependencies**: + - `ip_set` + - `nfnetlink` + +### iptables match features + +`xt_mark` + - `x_tables` + +`xt_addrtype` (`ipt_addrtype`, `ip6t_addrtype`) + - `x_tables` + +`xt_multiport` + - `x_tables` + +### Other required kernel features + +`xt_set`: Kernel module which implements the set match and SET target for netfilter/iptables. + - `ip_set` + - `x_tables` + +`ipt_set`: Kernel module to match an IP set. + - `x_tables` + - `ip_set` + +`ipt_rpfilter`: Kernel module to match RPF. + +`ipt_REJECT`: Kernel module to reject packets. + +## Requirements for Calico Networking: + +### IP-in-IP Tunneling + +IP tunnel driver to provide an IP tunnel through which you can tunnel network traffic transparently across subnets. + + - `ipip` + - `ip_tunnel` + - `tunnel4` diff --git a/v2.4/reference/supported-platforms.md b/v2.4/reference/supported-platforms.md new file mode 100644 index 00000000000..5b79c7fd1b5 --- /dev/null +++ b/v2.4/reference/supported-platforms.md @@ -0,0 +1,10 @@ +--- +title: Supported Platforms +--- + +Calico version {{ page.version }} has supported integration with the following platforms. + +- Kubernetes v1.1+ +- Mesos +- Docker +- Openstack Mitaka, Liberty, Kilo, Juno and Icehouse diff --git a/v2.4/releases/index.md b/v2.4/releases/index.md new file mode 100644 index 00000000000..d69cf0aa969 --- /dev/null +++ b/v2.4/releases/index.md @@ -0,0 +1,21 @@ +--- +title: Releases +--- + +The following table shows component versioning for Calico **{{ page.version }}**. + +Use the version selector at the top-right of this page to view a different release. + +{% for release in site.data.versions[page.version] %} +## {{ release.title }} +{% unless release.title == "master" %} +[Release archive](https://github.com/projectcalico/calico/releases/download/{{ release.title }}/release-{{ release.title }}.tgz) with Kubernetes manifests, Docker images and binaries. +{% endunless %} + +{{ release.note }} + +| Component | Version | +|------------------------|---------|{% for component_name in release.components %} +| {{ component_name[0] }} | [{{ component_name[1].version }}]({{ component_name[1].url }}) |{% endfor %} + +{% endfor %} diff --git a/v2.4/usage/calicoctl/container.md b/v2.4/usage/calicoctl/container.md new file mode 100644 index 00000000000..126e0053101 --- /dev/null +++ b/v2.4/usage/calicoctl/container.md @@ -0,0 +1,23 @@ +--- +title: calico/ctl container +--- + +With each release of calicoctl the docker container `calico/ctl` is released to +Dockerhub and Quay and can be used for running calicoctl commands. +See the +[calicoctl reference overview]({{site.baseurl}}/{{page.version}}/reference/calicoctl/) +for caveats when using a containerized version. + +#### Configuring the calico/ctl container + +See [Configuring calicoctl]({{site.baseurl}}/{{page.version}}/reference/calicoctl/setup). +for guidance on manually configuring a calico/ctl container. Keep in mind +when using a container that any environment variables and configuration files +must be passed to the container so they are available to the process inside. + +#### Keeping a configured calico/ctl running + +It can be useful to keep a running container (that sleeps) configured +for your Datastore, then it is possible to `exec` into the container and +have an already configured environment. If using Kubernetes see +[Running calicoctl as a Kubernetes Pod]({{site.baseurl}}/{{page.version}}/getting-started/kubernetes/tutorials/using-calicoctl). diff --git a/v2.4/usage/calicoctl/install-and-configuration.md b/v2.4/usage/calicoctl/install-and-configuration.md new file mode 100644 index 00000000000..ade1e8bb1d4 --- /dev/null +++ b/v2.4/usage/calicoctl/install-and-configuration.md @@ -0,0 +1,76 @@ +--- +title: Installing and Configuring calicoctl +--- + +This document outlines how to install and configure calicoctl which is the +primary tool for viewing, manipulating, and creating Calico objects on the +command line. + +### Where to run calicoctl + +Calicoctl's primary function is to read or manipulate state stored in the +datastore. As such, it can run from any host with network access to the +datastore. There are also the `node` sub-commands of calicoctl that are for +starting and checking the calico/node container. To use this functionality +the calicoctl tool must be run on the host where the container will run or +is running. + +### Installing calicoctl + +The calicoctl tool can be downloaded from the +[release page of the calicoctl repository]({{site.data.versions[page.version].first.components.calicoctl.url}}), +set executable, and then it is ready to run. + +``` +curl -O -L {{site.data.versions[page.version].first.components.calicoctl.download_url}} +chmod +x calicoctl +``` + +> *Note:* Move calicoctl to a directory in your PATH or add the directory + it is in to your PATH to avoid prepending the path to invocations of + calicoctl. + +### Datastore configuration + +Datastore configuration may be as simple as using the defaults but in most +cases the endpoint will need to be specified and possibly other settings too, +all which depend on how your datastore is configured. + +Here is a simple etcdv2 example. + +``` +ETCD_ENDPOINTS=http://etcd:2379 calicoctl get nodes +``` + +Here is a simple kubernetes datastore example. + +``` +DATASTORE_TYPE=kubernetes KUBECONFIG=~/.kube/config calicoctl get nodes +``` + +For the possible options and configuration guidance see +[Configuring calicoctl]({{site.baseurl}}/{{page.version}}/reference/calicoctl/setup). + +### Checking the configuration + +Here is a simple command to check that the installation and configuration is +correct. + +``` +calicoctl get nodes +``` + +A correct setup will yield a list of the nodes that have registered. If an +empty list is returned you are either pointed at the wrong datastore or no +nodes have registred. If an error is returned then attempt to correct the +issue then try again. + +### Next steps + +Now you are ready to read and configure most aspects of Calico. You can +find the full list of commands in the +[Command Reference]({{site.baseurl}}/{{page.version}}/reference/calicoctl/commands/). + +The full list of resources that can be managed, including a description of each, +can be found in the +[Resource Definitions]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/). diff --git a/v2.4/usage/configuration/as-service.md b/v2.4/usage/configuration/as-service.md new file mode 100644 index 00000000000..ca1ebda003e --- /dev/null +++ b/v2.4/usage/configuration/as-service.md @@ -0,0 +1,112 @@ +--- +title: Running Calico Node Container as a Service +--- + +This guide explains how to run Calico as a system process or service, +with a focus on running in a Dockerized deployment. We include +examples for Systemd, but the commands can be applied to other init +daemons such as upstart as well. + +## Running the Calico Node Container as a Service +This section describes how to run the Calico node as a Docker container +in Systemd. Included here is an EnvironmentFile that defines the Environment +variables for Calico and a sample systemd service file that uses the +environment file and starts the Calico node image as a service. + +`calico.env` - the EnvironmentFile: + +```shell +ETCD_ENDPOINTS=http://localhost:2379 +ETCD_CA_FILE="" +ETCD_CERT_FILE="" +ETCD_KEY_FILE="" +CALICO_NODENAME="" +CALICO_NO_DEFAULT_POOLS="" +CALICO_IP="" +CALICO_IP6="" +CALICO_AS="" +CALICO_LIBNETWORK_ENABLED=true +CALICO_NETWORKING_BACKEND=bird +``` + +Be sure to update this environment file as necessary, such as modifying +ETCD_ENDPOINTS to point at the correct etcd cluster endpoints. + +> Note: The ETCD_CA_FILE, ETCD_CERT_FILE, and ETCD_KEY_FILE +> environment variables are required when using Etcd with SSL/TLS. The values +> here are standard values for a non-SSL version of Etcd, but you can use this +> template to define your SSL values if desired. +> +> If CALICO_NODENAME is blank, the compute server hostname will be used +> to identify the Calico node. +> +> If CALICO_IP or CALICO_IP6 are left blank, Calico will use the currently +> configured values for the next hop IP addresses for this node - these can +> be configured through the node resource. If no next hop addresses have +> been configured, Calico will automatically determine an IPv4 next hop address +> by querying the host interfaces (and it will configure this value in the +> node resource). You may set CALICO_IP to `autodetect` to force +> auto-detection of IP address every time the node starts. If you set IP +> addresses through these environments it will reconfigure any values currently +> set through the node resource. +> +> If CALICO_AS is left blank, Calico will use the currently configured value +> for the AS Number for the node BGP client - this can be configured through +> the node resource. If no value is set, Calico will inherit the AS Number +> from the global default value. If you set a value through this environment +> it will reconfigure any value currently set through the node resource. +> +> The CALICO_NETWORKING_BACKEND defaults to use Bird as the routing daemon. +> This may also be set to gobgp (to use gobgp as the routing daemon, but note +> that this does not support IP in IP), or none (if routing is handled by an +> alternative mechanism). + +### Systemd Service Example + +`calico-node.service` - the Systemd service: + +```shell +[Unit] +Description=calico-node +After=docker.service +Requires=docker.service + +[Service] +EnvironmentFile=/etc/calico/calico.env +ExecStartPre=-/usr/bin/docker rm -f calico-node +ExecStart=/usr/bin/docker run --net=host --privileged \ + --name=calico-node \ + -e NODENAME=${CALICO_NODENAME} \ + -e IP=${CALICO_IP} \ + -e IP6=${CALICO_IP6} \ + -e CALICO_NETWORKING_BACKEND=${CALICO_NETWORKING_BACKEND} \ + -e AS=${CALICO_AS} \ + -e NO_DEFAULT_POOLS=${CALICO_NO_DEFAULT_POOLS} \ + -e CALICO_LIBNETWORK_ENABLED=${CALICO_LIBNETWORK_ENABLED} \ + -e ETCD_ENDPOINTS=${ETCD_ENDPOINTS} \ + -e ETCD_CA_CERT_FILE=${ETCD_CA_CERT_FILE} \ + -e ETCD_CERT_FILE=${ETCD_CERT_FILE} \ + -e ETCD_KEY_FILE=${ETCD_KEY_FILE} \ + -v /var/log/calico:/var/log/calico \ + -v /run/docker/plugins:/run/docker/plugins \ + -v /lib/modules:/lib/modules \ + -v /var/run/calico:/var/run/calico \ + quay.io/calico/node:{{site.data.versions[page.version].first.title}} + +ExecStop=-/usr/bin/docker stop calico-node + +[Install] +WantedBy=multi-user.target +``` + +The Systemd service above does the following on start: + - Confirm docker is installed under the `[Unit]` section + - Get environment variables from the environment file above + - Remove existing `calico-node` container (if it exists) + - Start `calico/node` + +The script will also stop the calico-node container when the service is stopped. + +**Note**: Depending on how you've installed Docker, the name of the Docker service +under the `[Unit]` section may be different (such as `docker-engine.service`). +Be sure to check this before starting the service. diff --git a/v2.4/usage/configuration/bgp.md b/v2.4/usage/configuration/bgp.md new file mode 100644 index 00000000000..d6217aaf8e2 --- /dev/null +++ b/v2.4/usage/configuration/bgp.md @@ -0,0 +1,256 @@ +--- +title: Configuring BGP Peers +--- + +This document describes the commands available in `calicoctl` for managing BGP. It +is intended primarily for users who are running on private cloud +and would like to peer Calico with their underlying infrastructure. + +This document covers configuration of: + +- Global default node AS Number +- The full node-to-node mesh +- Global BGP Peers +- Node-specific BGP Peers + +### Concepts + +**AS Number** + +The global default node AS Number is the AS Number used by the BGP agent on a +Calico node when it has not been explicitly specified. Setting this value +simplifies configuration when your network topology allows all of your Calico +nodes to use the same AS Number. + +**Node-to-Node Mesh** + +The full node-to-node mesh option provides a mechanism to automatically +configure peering between all Calico nodes. When enabled, each Calico node +automatically sets up a BGP peering with every other Calico node in the +network. By default this is enabled. + +The full node-to-node mesh provides a simple mechanism for auto-configuring +the BGP network in small scale deployments (say 50 nodes - although this limit +is not set in stone and Calico has been deployed with over 100 nodes in a full +mesh topology). + +For large-scale deployments, or for deployments where you require a more specific +BGP topology (e.g. peering with ToR switches) the full node-to-node mesh should be +disabled and explicit BGP peers configured for your Calico nodes. A BGP peer may +be configured in your Calico network as a Global BGP Peer or a Per-Node BGP Peer. + +**Global BGP Peers** + +A global BGP peer is a BGP agent that peers with every Calico node in the +network. A typical use case for a global peer might be a mid-scale +deployment where all of the Calico nodes are on the same L2 network and are +each peering with the same Route Reflector (or set of Route Reflectors). + +**Per-Node BGP Peers** + +At scale, different network topologies come in to play. For example, in the +[AS per Rack model]({{site.baseurl}}/{{page.version}}/reference/private-cloud/l3-interconnect-fabric#the-as-per-rack-model) +discussed in the reference material, each Calico node peers with +a Route Reflector in the Top of Rack (ToR) switch. In this case the BGP +peerings are configured on a per-node basis (i.e. these are node-specific +peers). In the AS Per Rack model, each Calico node in a rack will be +configured with a node-specific peering to the ToR Route Reflector. + +### Configuring the default node AS number + +When creating a Calico node, you can optionally specify an AS number to use for +the node. If no AS number if specified, the node will use the global default +value. + +Use the `calicoctl config set asNumber` command to set the global default AS +number. If no value is configured, the default AS number is 64512. + +If all of your Calico nodes are within the same AS, but you require a +different AS number to be used (e.g because you are peering with a border +router), changing the default AS number to the value you require eliminates +the need to explicitly set it on a per Calico node basis. For more +complicated topologies where you are explicitly setting the AS number on each +node, the default value will not be used and therefore using this command is +not necessary. + +> Prior to version 2.0.0, calicoctl and calico/node set the global default +> AS number to 64511. Updating your deployment from a pre-2.0.0 version to use +> the 2.0.0+ calicoctl and calico/node container images will not affect the +> global value that was previously set. + +#### Example + +To set the default AS number to 64513, run the following calicoctl command on +any node: + + $ calicoctl config set asNumber 64513 + +To view the current default value, run the command without specifying an AS +number, the command will output the current value. + + $ calicoctl config get asNumber + 64513 + + +### Disabling the full node-to-node BGP mesh + +If you are explicitly configuring the BGP topology for your Calico network, +you may wish to disable the full node-to-node mesh. Use the +`calicoctl config set nodeToNodeMesh` command to disable or re-enable the mesh. + +If you are building your network from scratch and do not need the full +node-to-node mesh we recommend turning off the mesh before configuring your +nodes. If you are updating your network from a full-mesh topology to a +different topology (e.g. to start using a cluster of route reflectors to +increase scaling), configure the appropriate peers before disabling the mesh +to ensure continuity of service. + +#### Example + +To turn off the full BGP node-to-node mesh run the following command on any +node: + + $ calicoctl config set nodeToNodeMesh off + +If you need to turn the full BGP node-to-node mesh back on run the following +command on any node: + + $ calicoctl config set nodeToNodeMesh on + +To view whether the BGP node-to-node mesh is on or off, enter the command +without specifying the parameter, the command will output the current state. + + $ calicoctl config get nodeToNodeMesh + on + + +### Configuring a global BGP peer + +If your network topology includes BGP speakers that will be peered with *every* +Calico node in your deployment, you can use the `calicoctl` resource management +commands to set up the peering on your Calico nodes. We refer to these types +of peer as global peers because they are configured in Calico once (globally) +and Calico will peer every Calico node with these peers. + +Two situations where global BGP peer configuration is useful are (1) when adding +a border router that is peering into a full node-to-node mesh, or (2) configuring +a Calico network that uses one or two route reflectors to provide moderate +scale-out capabilities. In the latter case, each Calico node would peer to +each of the Route Reflectors and the full node-to-node mesh would be +disabled. + +#### Example + +To add a global BGP peer at IP address 192.20.30.40 with AS number 64567 run +the following command on any node: + +``` +$ cat << EOF | calicoctl create -f - +apiVersion: v1 +kind: bgpPeer +metadata: + peerIP: 192.20.30.40 + scope: global +spec: + asNumber: 64567 +EOF +``` + +To remove a global BGP peer that was configured with IP address 192.20.30.40 +(the AS number is not required when deleting) run the following command on any +node: + + $ calicoctl delete bgpPeer 192.20.30.40 --scope=global + +To view the current list of global peers run the following command on any node: + +``` +$ calicoctl get bgpPeer --scope=global +SCOPE PEERIP NODE ASN +global 192.20.30.40 64567 +``` + +### Configuring a node specific BGP peer + +If your network topology requires specific peerings for each Calico node, you +can use the `calicoctl` resource management commands to set up the peers +specific to a Calico nodes. We refer to these as node specific peers. + +Configuring node specific peers is necessary when the BGP topology is more +complicated and requires different peerings on different nodes. For example, +the +[AS per Rack model]({{site.baseurl}}/{{page.version}}/reference/private-cloud/l3-interconnect-fabric#the-as-per-rack-model) +or +[AS per Compute Server model]({{site.baseurl}}/{{page.version}}/reference/private-cloud/l3-interconnect-fabric#the-as-per-compute-server-model) +described in the reference material. + +#### Example + +To add a BGP peer at IP address aa:bb::ff with AS number 64514, +peering with Calico node "node1", run the following command on any node: + +``` +$ cat << EOF | calicoctl create -f - +apiVersion: v1 +kind: bgpPeer +metadata: + peerIP: aa:bb::ff + scope: node + node: node1 +spec: + asNumber: 64514 +EOF +``` + +To remove a BGP peer that was configured with IP address aa:bb::ff (the AS +number is not required), peering with Calico node "node1", run the following +command from any node: + + $ calicoctl delete bgpPeer aa:bb::ff --scope=node --node=node1 + +To view the current list of node specific peers that are configured to peer +with Calico node "node1", run the following command from any node: + +``` +$ calicoctl get bgpPeer --node=node1 +SCOPE PEERIP NODE ASN +node aa:bb::ff node1 64514 +``` + +### Checking the status of the BGP peers + +To display the status of all BGP peerings for a specific node, use the +`calicoctl node status` command. This displays the status of all BGP peers for +that node - this includes the peers that are automatically configured as part +of the full node-to-node mesh and the explicitly configured global peers and +node specific peers. + +Understanding the status of the BGP peerings is a useful first step in +diagnosing why routes may not be advertised across your network resulting in +incorrect connectivity between your workloads. + +#### Example + +To check the status of the peerings on Calico node "node1", run +the following command from the "node1" command line: + +``` +$ sudo calicoctl node status +Calico process is running. + +IPv4 BGP status ++--------------+-------------------+-------+----------+-------------+ +| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO | ++--------------+-------------------+-------+----------+-------------+ +| 172.17.8.102 | node-to-node mesh | up | 23:30:04 | Established | +| 10.20.30.40 | global | start | 16:28:38 | Connect | +| 192.10.0.0 | node specific | start | 16:28:57 | Connect | ++--------------+-------------------+-------+----------+-------------+ + +IPv6 BGP status ++--------------+-------------------+-------+----------+-------------+ +| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO | ++--------------+-------------------+-------+----------+-------------+ +| aa:bb::ff | node-to-node mesh | up | 16:17:26 | Established | ++--------------+-------------------+-------+----------+-------------+ +``` diff --git a/v2.4/usage/configuration/conntrack.md b/v2.4/usage/configuration/conntrack.md new file mode 100644 index 00000000000..3c26aac237d --- /dev/null +++ b/v2.4/usage/configuration/conntrack.md @@ -0,0 +1,14 @@ +--- +title: Configuring Conntrack +--- + +A common problem on Linux systems is running out of space in the +conntrack table, which can cause poor iptables performance. This can +happen if you run a lot of workloads on a given host, or if your +workloads create a lot of TCP connections or bidirectional UDP streams. + +To avoid this becoming a problem, we recommend increasing the conntrack +table size. To do so, run the following commands: + + sysctl -w net.netfilter.nf_conntrack_max=1000000 + echo "net.netfilter.nf_conntrack_max=1000000" >> /etc/sysctl.conf diff --git a/v2.4/usage/configuration/ip-in-ip.md b/v2.4/usage/configuration/ip-in-ip.md new file mode 100644 index 00000000000..0f73f635d83 --- /dev/null +++ b/v2.4/usage/configuration/ip-in-ip.md @@ -0,0 +1,114 @@ +--- +title: Configuring IP-in-IP +--- + +If your network fabric performs source/destination address checks +and drops traffic when those addresses are not recognized, it may be necessary to +enable IP-in-IP encapsulation of the inter-workload traffic. + +This is often the case for public-cloud environments where you have limited control +over the network, and in particular you have no option to set up BGP peering between +your Calico nodes and the network routers. + +Calico can be configured to use IP-in-IP encapsulation by enabling the IPIP option +on the [IP pool resource]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/ippool). +When enabled, Calico will use IP-in-IP encapsulation when routing packets *to* +workload IPs falling in the IP pool range. + +An optional `mode` field toggles when IP-in-IP is used, see following sections for +details. + +### Configuring IP-in-IP for all inter-workload traffic + +With the IP-in-IP `mode` set to `always`, Calico will route using IP-in-IP for +all traffic originating from a Calico enabled host to all Calico networked containers +and VMs within the IP Pool. + +The following `calicoctl` command will create or modify an IPv4 pool with +CIDR 192.168.0.0/16 to use IP-in-IP with mode `always`: + +``` +$ calicoctl apply -f - << EOF +apiVersion: v1 +kind: ipPool +metadata: + cidr: 192.168.0.0/16 +spec: + ipip: + enabled: true + mode: always + nat-outgoing: true +EOF +``` + +> Note that the default value for `mode` is `always`, and therefore may be omitted +> from the request. It is included above for clarity. + +### Configuring cross-subnet IP-in-IP + +IP-in-IP encapsulation can also be performed selectively, only for traffic crossing +subnet boundaries. This provides better performance in AWS multi-AZ deployments, +and in general when deploying on networks where pools of nodes with L2 connectivity +are connected via a router. + +To enable this feature, using an IP-in-IP `mode` of `cross-subnet`. + +The following `calicoctl` command will create or modify an IPv4 pool with +CIDR 192.168.0.0/16 to use IP-in-IP with mode `cross-subnet`: + + +``` +$ calicoctl apply -f - << EOF +apiVersion: v1 +kind: ipPool +metadata: + cidr: 192.168.0.0/16 +spec: + ipip: + enabled: true + mode: cross-subnet + nat-outgoing: true +EOF +``` + +> **Note** +> +> The `cross-subnet` mode option requires each Calico node to be configured +> with the IP address and subnet of the host. However, the subnet configuration +> was only introduced in Calico v2.1. If any nodes in your deployment were originally +> created with an older version of Calico, or if you if you are unsure whether +> your deployment is configured correctly, follow the steps in +> [Upgrading from pre-v2.1](#upgrading-from-pre-v21) before enabling "cross-subnet" IPIP. + +#### Upgrading from pre-v2.1 + +If you are planning to use cross-subnet IPIP, your entire deployment must be running with +Calico v2.1 or higher. See [releases page]({{site.baseurl}}/{{page.version}}/releases) +for details on the component versions for each release. + +Upgrade your deployment to use the latest Calico versions - the process for this +will be dependent on your orchestration system (if using one). + +Prior to Calico v2.1, the subnet information was not detected and stored on the +node configuration. Thus, if you have calico/node instances that were deployed +prior to v2.1, the node configuration may need updating to fix the host subnet. +The subnet configuration must be set correctly for each node before `cross-subnet` +IPIP mode is enabled. + +You can verify which of your nodes is correctly configured using calicoctl. + +Run `calicoctl get nodes --output=wide` to check the configuration. e.g. + +``` +$ calicoctl get nodes --output=wide +NAME ASN IPV4 IPV6 +node1 (64512) 10.0.2.15/24 +node2 (64512) 10.0.2.10/32 +``` + +In this example, node1 has the correct subnet information whereas node2 needs +to be fixed. + +The subnet configuration may be fixed in a few different ways depending on how +you have deployed your calico/node containers. This is discussed in the +[Configuring a Node IP Address and Subnet guide]({{site.baseurl}}/{{page.version}}/usage/configuration/node). diff --git a/v2.4/usage/configuration/mtu.md b/v2.4/usage/configuration/mtu.md new file mode 100644 index 00000000000..73fd71a4f3e --- /dev/null +++ b/v2.4/usage/configuration/mtu.md @@ -0,0 +1,90 @@ +--- +title: Configuring MTU +--- + +Depending on the environment Calico is being deployed into it may be +helpful or even necessary to configure the MTU of the veth (or TAP) that is +attached to each workload and the tunnel devices if IP-in-IP is enabled. + +### Selecting MTU size + +Typically the MTU for your workload interfaces should match the network MTU. +If you need IP-in-IP then the MTU size for both the workload **and** tunnel +interfaces should be 20 bytes less than the network MTU for your network. +This is due to the extra 20 byte header that the tunnel will add to each +packet. + +#### Common MTU sizes + +| Network MTU | Calico MTU | Calico MTU with IP-in-IP | Calico MTU with VXLAN (IPv4) | +|-------------|------------|--------------------------|------------------------------| +| 1500 | 1500 | 1480 | 1450 | +| 9000 | 9000 | 8980 | 8950 | +| 1460 (GCE) | 1460 | 1440 | 1410 | +| 9001 (AWS Jumbo) | 9001 | 8981 | 8951 | + +#### Default MTU sizes + +The default MTU for workload interfaces is 1500, this is to match the most +common network MTU size. The default MTU for the IP-in-IP tunnel device +is 1440 to match the value needed in GCE. + +### Setting MTU for workload network interfaces + +It is the job of the network plugin to create new interfaces, the current +major plugins are CNI and libnetwork. Currently Docker and the Mesos Docker +Containerizer integration use libnetwork which does **not** support setting MTU. +CNI which is used by Kubernetes and the Mesos Unified Containerizer support +configuring the MTU through the CNI configuration file. + +#### MTU configuration with CNI + +To set the MTU when using CNI the line `"mtu": ` must be added to +the CNI configuration file. + +Example CNI configuration + +```json +{ + "name": "any_name", + "cniVersion": "0.1.0", + "type": "calico", + "mtu": 1480, + "ipam": { + "type": "calico-ipam" + } +} +``` + +> **Note:** The MTU on existing workloads will not be updated with this +change. To have all workloads use the new MTU then they must be restarted. + +### Setting MTU for tunnel network interfaces + +If IP-in-IP is enabled and the MTU needs to be modifed then this must be +configured by setting a Felix environment variable or using calicoctl to set +the proper configuration variable. Felix will set the tunnel interfaces to +the specified MTU. + +#### Setting MTU through Felix Environment variable + +Passing in the environment variable `FELIX_IPINIPMTU` when running the +calico/node container will set the MTU for Felix to use. + +#### Setting the MTU with calicoctl + +To set the IP-in-IP MTU value for all calico nodes in your cluster, use the +following command to set the global config value. + +``` +calicoctl config set --raw=felix IpInIpMtu 1480 +``` + +> **Note:** Setting the `IpInIpMtu` config option will result in an immediate +update the tunnel interface MTU on all of the active nodes in your cluster. + +## Configuring MTU in Kubernetes self-hosted manifests + +When using self-hosted manifests with Kubernetes, the MTU should be set by +updating the Calico manifest, applying the manifest with those changes, and +then restarting each of the calico-node pods. diff --git a/v2.4/usage/configuration/node.md b/v2.4/usage/configuration/node.md new file mode 100644 index 00000000000..4ba65524642 --- /dev/null +++ b/v2.4/usage/configuration/node.md @@ -0,0 +1,126 @@ +--- +title: Configuring a Node IP Address and Subnet +--- + +By default, Calico automatically detects each Node's IP address and subnet. In most cases, +this auto-detection is enough and you will not need to change the value picked by Calico. +However, there are some scenarios where the default autodetection may not choose the right +address. For example: + +- Your host has multiple external interfaces. +- Your host may have multiple IP addresses assigned to each interface. +- You want to change the subnet configuration of each Node to use Calico's + [cross-subnet IPIP]({{site.baseurl}}/{{page.version}}/usage/configuration/ip-in-ip) feature. +- You have changed the IP of your host. + +This guide explains the various methods for configuring a Node's IP and subnet. + +### Understanding `calico/node` IP Autodetection Logic + +When `calico/node` is started, it determines the IP and subnet configuration using the +following sequence: + +- If an IP and subnet are explicitly specified using the `IP` (or `IP6`) environment variable (passed through + to the container), the container will use this value *and* update the node + resource with that value: therefore a query of the node resource will always tell you what + value the calico/node container is currently using. +- If the `IP` (or `IP6`) environment variable is set to `autodetect`, calico/node will autodetect + the IP and subnet configuration using the requested autodetection method when the + container starts, *and* update the node resource with the detected value. +- If the `IP` (or `IP6`) environment variable is not set, and there *is* an `IPv4Address` (or `IPv6Address`) + value configured in the node resource, that value will be used for routing. +- If the `IP` environment variable is not set, and there is no `IPv4Address` value configured in the node + resource, calico/node will autodetect an IPv4 address and subnet *and* update the + node resource with the detected values so that the value is persisted. +- If the `IP6` environment variable is not set, and there is no `IPv6Address` value configured in the node + resource, calico/node will not perform IP6 routing on that node. + +> If you are starting the calico/node container using `calicoctl node run` command, +> there is a direct mapping between the command line switches and the environment variables that are +> passed through to the `calico/node` container. These are listed below: +> +> | Environment | CLI | +> |-------------|-----| +> | IP | --ip | +> | IP6 | --ip6 | +> | IP_AUTODETECTION_METHOD | --ip-autodetection-method | +> | IP6_AUTODETECTION_METHOD | --ip6-autodetection-method | + +The following subsections describe different ways to configure your deployment to +specify the IP addresses for your nodes. + +#### a) Configure the IP and subnet through environment variables + +The IPv4 address and subnet may be explicitly specified using the `--ip` option on +`calicoctl node run` or the `IP` environment variable if you are starting the container +directly. For IPv6, use the equivalent `--ip6` option and `IP6` environment variable. + +If you omit the subnet, it is assumed to be /32 for IPv4 and /128 for IPv6 - it is +recommended to include the subnet information if you specify the IP addresses using +this approach. + +For example (if using calicoctl node run): +``` +calicoctl node run --ip=10.0.2.10/24 +``` + +#### b) Autodetect the IP and subnet + +The `calico/node` container can be configured to autodetect the IPv4 address and subnet everytime it +is restarted. Use a value of `autodetect` for the IP address in the `--ip` option +on `calicoctl node run` or the `IP` environment variable if you are starting the container +directly. + +In addition, the `--ip-autodetection-method` argument or the `IP_AUTODETECTION_METHOD` +environment variable can be used to specify the method used to auto detect the host address +and subnet. See [calico/node configuration guide]({{site.baseurl}}/{{page.version}}/reference/node/configuration) +and [calicoctl command reference]({{site.baseurl}}/{{page.version}}/reference/calicoctl/commands/node/run) +for details. + +For IPv6, use the equivalent `--ip6` and `--ip6-autodetection-method` options, +and `IP6` and `IP6_AUTODETECTION_METHOD` environment variables. + +For example (if using calicoctl node run): +``` +calicoctl node run --ip=autodetect --ip-autodetection-method=can-reach=8.8.8.8 +``` + +#### c) Manually configure the node resource + +The IP addresses may also be set by updating the node resource. + +You can use `calicoctl` to query the current configuration and then apply updates. +For example: + +``` +# Start by querying the current node configuration +$ calicoctl get node node2 -o yaml +- apiVersion: v1 + kind: node + metadata: + name: node2 + spec: + bgp: + ipv4Address: 10.0.2.10/32 + ipv6Address: fd80:24e2:f998:72d6::/128 + +# Now reconfigure the node with updated ipv4Address to include the correct +# subnet. +$ calicoctl apply -f - << EOF +- apiVersion: v1 + kind: node + metadata: + name: node2 + spec: + bgp: + ipv4Address: 10.0.2.10/24 + ipv6Address: fd80:24e2:f998:72d6::/120 +EOF +``` + +> Note that if you plan to edit the resource to configure the IP addresses, make sure +> you are not specifying the IP address options or environment variables when starting the +>`calico/node` container - otherwise those values will overwrite the values +> configured through the resource. + + diff --git a/v2.4/usage/decommissioning-a-node.md b/v2.4/usage/decommissioning-a-node.md new file mode 100644 index 00000000000..791cf5d51c4 --- /dev/null +++ b/v2.4/usage/decommissioning-a-node.md @@ -0,0 +1,80 @@ +--- +title: Decommissioning a Node +--- + +### Why you might be interested in this guide + +- You are decomissioning a host running calico/node or removing it from your + cluster. +- You are renaming a Node. +- You are receiving an error about an IP address already in use. +- Hosts are regularly added and removed from your cluster. + +### Purpose of this page + +Provide guidance on how to remove a host that is part of a Calico cluster +and clean up the associated [Node resource][Node resource reference] +information. + +### Prerequisites + +- Prior to removing any Node resource from the datastore the calico/node + container should be stopped on the corresponding host and it should be + ensured that it will not be restarted. +- You must have [calicoctl configured][calicoctl setup] and operational to run + the commands listed here. + +### Removing a Calico Node resource + +**Note:** +Removing a Node resource will also remove the Workload Endpoint, Host +Endpoint, and IP Address resources and any other sub configuration items +associated with that Node. + +**Warning** +- Deleting a Node resource may be service impacting if the host is still in + service. Ensure that the host is no longer in service before deleting the + Node resource. +- Any configuration specific to the node will be removed. This would be + configuration like node BGP peerings or custom Felix configs. + +### Removing a single Calico Node resource + +See the example below for how to remove a node with the calicoctl command. + +**Caution** See the [Warning](#removing-a-calico-node-resource) above + +``` +calicoctl delete node +``` + +### Removing multiple Calico Node resources + +To remove several Nodes, a file can be created with several Node resources and +then be passed to the `calicoctl delete` command with the `-f` flag. +Below is an example of how to create a file of Nodes and delete them. + +1. Create a file with the [Node resources][Node resource reference] that need + to be removed. For example: + + ``` + - apiVersion: v1 + kind: node + metadata: + name: node-02 + - apiVersion: v1 + kind: node + metadata: + name: node-03 + ``` + +2. To delete the nodes listed in the file pass it like below. + + **Caution** See the [Warning](#removing-a-calico-node-resource) above + + ``` + calicoctl delete -f nodes_to_delete.yaml + ``` + +[Node resource reference]: {{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/node +[calicoctl setup]: {{site.baseurl}}/{{page.version}}/usage/calicoctl/install-and-configuration diff --git a/v2.4/usage/external-connectivity.md b/v2.4/usage/external-connectivity.md new file mode 100644 index 00000000000..b08cfd90115 --- /dev/null +++ b/v2.4/usage/external-connectivity.md @@ -0,0 +1,84 @@ +--- +title: External Connectivity +--- +Calico creates a routed network on which your containers look like normal IP +speakers. You can connect to them from a host in your cluster (assuming the +network policy you've assigned allows this) using their IP address. + +This document discusses connectivity between Calico endpoints and hosts outside +the cluster. + +## Outbound connectivity + +Outbound connectivity refers to connections originating from Calico endpoints +to destinations outside the cluster. + +The easiest way to get outbound connectivity is to turn on NAT Outgoing on all +Calico pools you want to be able to access the internet. + +```shell +calicoctl get ipPool +``` + +# For each pool that needs connectivity: +``` +cat << EOF | calicoctl apply -f - +- apiVersion: v1 + kind: ipPool + metadata: + cidr: 192.168.0.0/16 + spec: + nat-outgoing: true +EOF +``` + +[set `ipip:` `enabled:true` if needed] + +Please note that many solutions for inbound connectivity will also provide +outbound connectivity. + +## Inbound connectivity + +Inbound connectivity refers to connections to Calico endpoints originating from +outside the cluster. + +There are two main approaches: BGP peering into your network infrastructure, or +using orchestrator specific options. + +Remember to configure your network policy to allow traffic from the internet! + +### BGP peering + +This requires access to BGP capable switches or routers in front of your Calico +cluster. + +In general, this will involve peering the nodes in your Calico cluster with BGP +capable switches, which act as the gateway to reach Calico endpoints in the +cluster from outside. + +A common scenario is for your container hosts to be on their own isolated layer +2 network, like a rack in your server room or an entire data center. Access to +that network is via a router, which also is the default router for all the +container hosts. + +![hosts-on-layer-2-network]({{site.baseurl}}/images/hosts-on-layer-2-network.png) + +See the [BGP peering document]({{site.baseurl}}/{{page.version}}/usage/configuration/bgp) +for information on how to set up the Calico node sides of the sessions. +Consult the documentation for your BGP capable switch/router to set up the +switch sides of the sessions. + +If you have a small number of hosts, you can configure BGP sessions between your router and each Calico-enabled host. With many hosts, you may wish to use a +route reflector or set up a Layer 3 topology. + +There's further advice on network topologies in the [private cloud reference documentation]({{site.baseurl}}/{{page.version}}/reference/). +We'd also encourage you to [get in touch](http://www.projectcalico.org/contact/) +to discuss your environment. + +### Orchestrator specific + +Calico supports a number of orchestrator specific options for inbound +connectivity, such as Kubernetes service IPs, or OpenStack floating IPs. + +Consult the [documentation for your orchestrator]({{site.baseurl}}/{{page.version}}/getting-started) for more +information. diff --git a/v2.4/usage/index.md b/v2.4/usage/index.md new file mode 100644 index 00000000000..e4cd18c7640 --- /dev/null +++ b/v2.4/usage/index.md @@ -0,0 +1,5 @@ +--- +title: Using Calico +--- + +This section contains information on using Calico. diff --git a/v2.4/usage/ipv6.md b/v2.4/usage/ipv6.md new file mode 100644 index 00000000000..60ea1c5c2e5 --- /dev/null +++ b/v2.4/usage/ipv6.md @@ -0,0 +1,95 @@ +--- +title: IPv6 Support +--- + +Calico supports connectivity over IPv6, between compute hosts, and +between compute hosts and their VMs. This means that, subject to +security configuration, a VM can initiate an IPv6 connection to another +VM, or to an IPv6 destination outside the data center; and that a VM can +terminate an IPv6 connection from outside. + +## Requirements for containers + +Containers have no specific requirements for utilising IPv6 +connectivity. + +## Requirements for guest VM images + +When using Calico with a VM platform (e.g. OpenStack), obtaining IPv6 +connectivity requires certain configuration in the guest VM image: + +- When it boots up, the VM should issue a DHCPv6 request for each of + its interfaces, so that it can learn the IPv6 addresses that + OpenStack has allocated for it. +- The VM must be configured to accept Router Advertisements. +- If it uses the widely deployed DHCP client from ISC, the VM must + have a fix or workaround for [this known + issue](https://kb.isc.org/article/AA-01141/31/How-to-workaround-IPv6-prefix-length-issues-with-ISC-DHCP-clients.html). + +These requirements are not yet all met in common cloud images - but it +is easy to remedy that by launching an image, making appropriate changes +to its configuration files, taking a snapshot, and then using that +snapshot thereafter instead of the original image. + +For example, starting from the Ubuntu 14.04 cloud image, the following +changes will suffice to meet the requirements just listed. + +- In `/etc/network/interfaces.d/eth0.cfg`, add: + + iface eth0 inet6 dhcp + accept_ra 1 + +- In `/sbin/dhclient-script`, add at the start of the script: + + new_ip6_prefixlen=128 + +- In `/etc/sysctl.d`, create a file named `30-eth0-rs-delay.conf` with + contents: + + net.ipv6.conf.eth0.router_solicitation_delay = 10 + +## Implementation details + +Following are the key points of how IPv6 connectivity is currently +implemented in Calico. + +- IPv6 forwarding is globally enabled on each compute host. +- Felix (the Calico agent): + - does `ip -6 neigh add lladdr dev`, instead of IPv4 case + `arp -s`, for each endpoint that is created with an IPv6 address + - adds a static route for the endpoint's IPv6 address, via its tap + or veth device, just as for IPv4. +- Dnsmasq provides both Router Advertisements and DHCPv6 service + (neither of which are required for container environments). + - Router Advertisements, without SLAAC or on-link flags, cause + each VM to create a default route to the link-local address of + the VM's TAP device on the compute host. + - DHCPv6 allows VMs to get their orchestrator-allocated + IPv6 address. +- For container environments, we don't Dnsmasq: + - rather than using Router Advertisements to create the default + route, we Proxy NDP to ensure that routes to all machines go via + the compute host. + - rather than using DHCPv6 to allocate IPv6 addresses, we allocate + the IPv6 address directly to the container interface before we + move it into the container. +- BIRD6 runs between the compute hosts to distribute routes. + +OpenStack Specific Details +-------------------------- + +In OpenStack, IPv6 connectivity requires defining an IPv6 subnet, in +each Neutron network, with: + +- the IPv6 address range that you want your VMs to use +- DHCP enabled +- (from Juno onwards) IPv6 address mode set to DHCPv6 stateful. + +We suggest initially configuring both IPv4 and IPv6 subnets in each +network. This allows handling VM images that support only IPv4 alongside +those that support both IPv4 and IPv6, and allows a VM to be accessed +over IPv4 in case this is needed to troubleshoot any issues with its +IPv6 configuration. + +In principle, though, we are not aware of any problems with configuring +and using IPv6-only networks in OpenStack. diff --git a/v2.4/usage/openstack/configuration.md b/v2.4/usage/openstack/configuration.md new file mode 100644 index 00000000000..621a9cdbce3 --- /dev/null +++ b/v2.4/usage/openstack/configuration.md @@ -0,0 +1,63 @@ +--- +title: Configuring Systems for use with Calico +--- + +When running Calico with OpenStack, you also need to configure various +OpenStack components, as follows. + +### Nova (/etc/nova/nova.conf) + +Calico uses the Nova metadata service to provide metadata to VMs, +without any proxying by Neutron. To make that work: + +- An instance of the Nova metadata API must run on every compute node. +- `/etc/nova/nova.conf` must not set `service_neutron_metadata_proxy` + or `service_metadata_proxy` to `True`. (The default `False` value is + correct for a Calico cluster.) + +### Neutron server (/etc/neutron/neutron.conf) + +In `/etc/neutron/neutron.conf` you need the following settings to +configure the Neutron service. + +| Setting | Value | Meaning | +|--------------------|--------------------------------------|----------------------| +| core_plugin | neutron.plugins.ml2.plugin.ML2Plugin | Use ML2 plugin | +|--------------------|--------------------------------------|----------------------| + +With OpenStack releases earlier than Liberty you will also need: + +| Setting | Value | Meaning | +|-------------------------|--------------------------|----------------------------| +| dhcp_agents_per_network | 9999 | Allow unlimited DHCP agents per network | + +Optionally -- depending on how you want the Calico mechanism driver to +connect to the Etcd cluster -- you can also set the following options in +the `[calico]` section of `/etc/neutron/neutron.conf`. + +| Setting | Default Value | Meaning | +|-----------|---------------|-------------------------------------------| +| etcd_host | localhost | The hostname or IP of the etcd node/proxy | +| etcd_port | 4001 | The port to use for the etcd node/proxy | + +### ML2 (.../ml2_conf.ini) + +In `/etc/neutron/plugins/ml2/ml2_conf.ini` you need the following +settings to configure the ML2 plugin. + +| Setting | Value | Meaning | +|----------------------|-------------|-----------------------------------| +| mechanism_drivers | calico | Use Calico | +| type_drivers | local, flat | Allow 'local' and 'flat' networks | +| tenant_network_types | local, flat | Allow 'local' and 'flat' networks | + +DHCP agent (.../dhcp_agent.ini) +-------------------------------- + +With OpenStack releases earlier than Liberty, in +`/etc/neutron/dhcp_agent.ini` you need the following setting to +configure the Neutron DHCP agent. + +| Setting | Value | Meaning | +|------------------|-----------------------|------------------------------------------------------------------------------------------------------| +| interface_driver | RoutedInterfaceDriver | Use Calico's modified DHCP agent support for TAP interfaces that are routed instead of being bridged | diff --git a/v2.4/usage/openstack/floating-ips.md b/v2.4/usage/openstack/floating-ips.md new file mode 100644 index 00000000000..f3e593cfc94 --- /dev/null +++ b/v2.4/usage/openstack/floating-ips.md @@ -0,0 +1,86 @@ +--- +title: Floating IPs +--- + +networking-calico includes beta support for floating IPs. Currently this +requires running Calico as a Neutron core plugin (i.e. `core_plugin = +calico`) instead of as an ML2 mechanism driver. + +> **Note:** We would like it to work as an ML2 mechanism driver too - patches +> and/or advice welcome! + +To set up a floating IP, you need the same pattern of Neutron data model +objects as you do for Neutron in general, which means: + +- a tenant network, with an instance attached to it, that will be the target of + the floating IP + +- a Neutron router, with the tenant network connected to it + +- a provider network with `router:external True` that is set as the + router's gateway (e.g. with `neutron router-gateway-set`), and with a + subnet with a CIDR that floating IPs will be allocated from + +- a floating IP, allocated from the provider network subnet, that maps onto the + instance attached to the tenant network. + +For example: + + # Create tenant network and subnet + neutron net-create --shared calico + neutron subnet-create --gateway 10.65.0.1 --enable-dhcp --ip-version 4 --name calico-v4 calico 10.65.0.0/24 + + # Boot a VM on that network, and find its Neutron port ID. + nova boot [...] + neutron port-list + + # Create external network and subnet - this is where floating + # IPs will be allocated from. + neutron net-create public --router:external True + neutron subnet-create public 172.16.1.0/24 + + # Create a router connecting the tenant and external networks. + neutron router-create router1 + neutron router-interface-add router1 + neutron router-gateway-set router1 public + + # Create a floating IP and associate it with the target VM. + neutron floatingip-create public + neutron floatingip-associate + +Then the Calico agents will arrange that the floating IP is routed to the +instance's compute host, and then DNAT'd to the instance's fixed IP address: + + core@compute-node01:~$ ip r + default via 10.240.0.1 dev eth0 + 10.65.0.13 dev tap9a7e0868-da scope link + 10.65.0.14 via 192.168.8.4 dev l2tpeth8-3 proto bird + 10.65.0.23 via 192.168.8.4 dev l2tpeth8-3 proto bird + 10.240.0.1 dev eth0 scope link + 172.16.1.3 dev tap9a7e0868-da scope link + 192.168.8.0/24 dev l2tpeth8-3 proto kernel scope link src 192.168.8.3 + 192.168.122.0/24 dev virbr0 proto kernel scope link src 192.168.122.1 + + core@compute-node01:~$ sudo iptables -L -n -v -t nat + [...] + Chain felix-FIP-DNAT (2 references) + pkts bytes target prot opt in out source destination + 0 0 DNAT all -- * * 0.0.0.0/0 172.16.1.3 to:10.65.0.13 + + Chain felix-FIP-SNAT (1 references) + pkts bytes target prot opt in out source destination + 0 0 SNAT all -- * * 10.65.0.13 10.65.0.13 to:172.16.1.3 + + Chain felix-OUTPUT (1 references) + pkts bytes target prot opt in out source destination + 1 60 felix-FIP-DNAT all -- * * 0.0.0.0/0 0.0.0.0/0 + + Chain felix-POSTROUTING (1 references) + pkts bytes target prot opt in out source destination + 1 60 felix-FIP-SNAT all -- * * 0.0.0.0/0 0.0.0.0/0 + + Chain felix-PREROUTING (1 references) + pkts bytes target prot opt in out source destination + 0 0 felix-FIP-DNAT all -- * * 0.0.0.0/0 0.0.0.0/0 + 0 0 DNAT tcp -- * * 0.0.0.0/0 169.254.169.254 tcp dpt:80 to:127.0.0.1:8775 + [...] diff --git a/v2.4/usage/openstack/host-routes.md b/v2.4/usage/openstack/host-routes.md new file mode 100644 index 00000000000..dc6788fdb22 --- /dev/null +++ b/v2.4/usage/openstack/host-routes.md @@ -0,0 +1,77 @@ +--- +title: Host routes +--- + +Neutron allows "host routes" to be configured on a subnet, with each host route +comprising + +- an IP destination prefix +- a next hop IP for routing to that prefix. + +When an instance is launched and gets an IP from that subnet, Neutron arranges, +via DHCP, that the instance's routing table gets those routes. + +With Calico, a host route's next hop IP should be the local host +---------------------------------------------------------------- + +networking-calico supports host routes, but it's important to note that a host +route is only consistent with Calico when its next hop IP represents the local +hypervisor. This is because the local hypervisor, in a Calico setup, *always* +routes all data from an instance and so is always the next hop IP for data to +any destination. If the instance's routing table has a route with some other +next hop IP, that next hop IP address will effectively be ignored, and the data +will likely *not* pass through the implied router; instead the data will go +first to the hypervisor, and then the hypervisor's routing table will determine +its next IP hop from there. + +Specifically, each host route's next hop IP should be the gateway IP of the +subnet that the desired instance NIC is attached to, and from which it got its +IP address - where 'desired instance NIC' means the one that you want data for +that host route to go through. In networking-calico's usage, subnet gateway +IPs represent the local hypervisor, because data sent by an instance is always +routed there. + +> **Note:** networking-calico avoids unnecessary IP usage by using the subnet +> gateway IP to represent the local compute host, on every compute host where +> that subnet is being used. Although that might initially sound odd, it works +> because no data is ever sent to or from the gateway IP address; the gateway +> IP is only used as the next hop address for the first IP hop from an instance +> to its compute host, and then the compute host routes the data again, +> according to its routing table, to wherever it needs to go. This also means +> that the gateway IP address really is functioning as each instance's default +> gateway, in the generally understood sense. + +When are host routes useful with Calico? +---------------------------------------- + +Host routes are useful with Calico when an instance has multiple NICs and you +want to specify which NIC should be used for data to particular prefixes. + +When an instance has multiple NICs, it should have a default route through only +one of those NICs, and use non-default routes to direct appropriate traffic +through the other NICs. Neutron host routes can be used to establish those +non-default routes; alternatively they can also be programmed manually in the +instance. + +For example, suppose an instance has eth0 attached to a subnet with gateway +10.65.0.1, eth1 attached to a subnet with gateway 11.8.0.1, and a default route +via eth0. Then a host route like + + 11.11.0.0/16,11.8.0.1 + +can be configured for the subnet, to say that data to 11.11.0.0/16 should go +out through eth1. The instance's routing table will then be: + + default via 10.65.0.1 dev eth0 + 10.65.0.0/24 dev eth0 + 11.8.0.0/24 dev eth1 + 11.11.0.0/16 via 11.8.0.1 dev eth1 + +When an instance only has a single network attachment, and so a single NIC, +host routes cannot make any difference to how data is routed, so it is +unhelpful (although also harmless) to configure them. Regardless of what the +instance's routing table says, data must exit over the single NIC, and is +always layer-2-terminated and rerouted by the host according to the host's +routing table. It's required for the host's routing table to cover whatever +destinations instances may want to send to, and host routes don't add anything +to that. diff --git a/v2.4/usage/openstack/kuryr.md b/v2.4/usage/openstack/kuryr.md new file mode 100644 index 00000000000..3ba90b8da49 --- /dev/null +++ b/v2.4/usage/openstack/kuryr.md @@ -0,0 +1,37 @@ +--- +title: Kuryr +--- + +networking-calico works with Kuryr; this means using Neutron, with the Calico +ML2 driver, to provide networking for container workloads. + +You can use DevStack to install a single node Calico/Kuryr system, with a +`local.conf` file like this: + + [[local|localrc]] + ADMIN_PASSWORD=015133ea2bdc46ed434c + DATABASE_PASSWORD=d0060b07d3f3631ece78 + RABBIT_PASSWORD=6366743536a8216bde26 + SERVICE_PASSWORD=91eb72bcafb4ddf246ab + SERVICE_TOKEN=c5680feca5e2c9c8f820 + + enable_plugin networking-calico git://git.openstack.org/openstack/networking-calico + enable_plugin kuryr git://git.openstack.org/openstack/kuryr + enable_service kuryr + enable_service etcd-server + enable_service docker-engine + + LOGFILE=stack.log + LOG_COLOR=False + +Please follow general Kuryr instructions for creating a Docker network that +uses Kuryr as its backend, and for launching containers on that network. Then +if you look at the IP routing table and iptables, you will see Calico routes to +the containers. + +Calico for containers without Kuryr +----------------------------------- + +The Calico project also provides networking for containers more directly, +without Neutron and Kuryr as intermediaries. Please see [Getting +Started]({{site.baseurl}}/{{page.version}}/getting-started) for details. diff --git a/v2.4/usage/openstack/semantics.md b/v2.4/usage/openstack/semantics.md new file mode 100644 index 00000000000..ad51ca386c8 --- /dev/null +++ b/v2.4/usage/openstack/semantics.md @@ -0,0 +1,119 @@ +--- +title: Detailed Semantics +--- + +A 'Calico' network is a Neutron network (either provider or tenant) whose +connectivity is implemented, on every compute host with instances attached to +that network, by the `calico` plugin or ML2 mechanism driver. There can be +just one Calico network, or any number of them. This page describes the +connectivity that Calico provides between instances attached to the same +network, and between instances attached to different Calico networks, and +between instances and the Internet; and explains how and why this connectivity +is in some details different from traditional Neutron API semantics. + +## Connectivity between instances on the same network + +Calico provides IP connectivity, but not layer 2 (L2) adjacency, between +instances attached to the same Calico network. This means that: + +- An instance can ping the IP of another instance, and make other IP-based + connections to other instances. (Unless restricted by security group + configuration.) + +- If an instances probe the IP path to another instance, it will find that + there are intermediate IP hops in the path; or in other words, that the + instances are not directly connected. + +- Applications or protocols that actually require L2 adjacency - such as + routing protocols like OSPF - will not run successfully on instances on a + Calico network. But the vast majority of applications that are IP-based will + be just fine. + +Traditionally, a Neutron network has always provided L2 adjacency between its +instances, so this is the first way that Calico differs from traditional +Neutron semantics. Up to and including the Mitaka release, L2 adjacency was an +assumed property of a Neutron network; so deployments using Calico simply had +to *understand* that Calico networks were different in this detail. + +As of the Newton release, Calico's IP-only connectivity is expressible in the +Neutron API, as a Network whose `l2_adjacency` property is `False`. +However work is still needed to make Calico networks report `l2_adjacency +False`, so at the moment - unfortunately - it *still* has to be understood that +Calico networks do not provide L2 adjacency, even though they report +`l2_adjacency True` when queried on the API. + +> **Note:** Calico's connectivity design, based on IP routing, allows unicast IP +> and anycast IP. Anycast IP also requires support for allowed-address-pairs, +> or some other way of assigning the same IP address to more than one instance; +> work for allowed-address-pairs support is in progress at +> https://review.openstack.org/#/c/344008/. Multicast IP support is on our +> roadmap but not yet implemented. Broadcast IP is not possible because it +> depends on L2 adjacency. + +## Connectivity between different Calico networks + +Calico provides *exactly* the same connectivity between instances on different +Calico networks, as it does between instances on the same Calico network. + +It is important to note that this is equally true for 'provider' and 'tenant' +Calico networks (i.e. for networks that are provisioned by the cloud operator, +or by a particular tenant or project), and for connectivity between any mix of +those networks. There is no way, with Calico, to get a tenant network that is +isolated by default at the connectivity level, per standard Neutron API +semantics for a tenant network that is not connected to a router, even if the +Calico tenant network is *not* connected to a Neutron router, or if there are +no Neutron routers in the deployment at all. + +Calico works this way because it targets use cases where instances are attached +either to provider networks directly, or (in Neutron data model terms) to +tenant networks that *are* attached through a router to a provider network. +One reason for the latter case is to use floating IPs with Calico, because in +current Neutron the target of a floating IP has to be an instance attached to a +tenant network. For more on this, see [Floating +IPs]({{site.baseurl}}/{{page.version}}/usage/openstack/floating-ips). + +## Flat IP addressing + +An implication of that connectivity between networks is that Calico assumes +that the IP addresses it handles are all in a single, flat address space. For +example, if one network has a subnet with CIDR 10.65.0.0/24, and another +network has a subnet with CIDR 172.18.0.0/16, an instance with IP 10.65.0.2 can +directly address an instance on the other network with IP 172.18.3.23, and the +IP packet will travel all the way between them with source IP 10.65.0.2 and +destination IP 172.18.3.23. There is no NAT anywhere along this datapath. + +## Evaluation against Neutron semantics + +Calico targets use cases that correspond to two Neutron data model patterns. + +Firstly, where instances are attached directly to provider networks: + +![]({{site.baseurl}}/images/networking-calico/calico-provider.png) + +Secondly, where instances are attached to an externally-connected tenant +network: + +![]({{site.baseurl}}/images/networking-calico/calico-tenant.png) + +In the general case those patterns may be combined - so in general there may be +any number of Calico provider networks, and any number of Calico tenant +networks, so long as each of the tenant networks is connected through some +router to a provider network. The purpose of using tenant networks - instead +of always using provider networks - is only so as to enable floating IPs. The +purpose of provisioning multiple networks of either kind - instead of just +one - is typically to allow the user to control what kind of fixed IP an +instance gets. + +However many Calico networks there are, all their IP addresses (in associated +Neutron subnet or subnet pool objects) must be defined or understood as +belonging to a single, flat address space. + +Finally it must be understood that there is no L2 adjacency between any +instances, even those that are attached to the same network. In a future +OpenStack release, we hope to make this explicit, by arranging for Calico +networks to report `l2_adjacency False`. + +Subject to those restrictions and understandings, we believe that +networking-calico fully implements Neutron semantics, i.e. that it provides the +connectivity that an operator would expect for a given sequence of Neutron API +setup calls. diff --git a/v2.4/usage/openstack/service-ips.md b/v2.4/usage/openstack/service-ips.md new file mode 100644 index 00000000000..1896b4afb0c --- /dev/null +++ b/v2.4/usage/openstack/service-ips.md @@ -0,0 +1,360 @@ +--- +title: Service IPs +--- + +Calico supports two approaches for assigning a service IP to a Calico-networked +VM: + +- using a floating IP + +- using an additional fixed IP on the relevant Neutron port. + +Both of these are standard Neutron practice - in other words, operations that +have long been supported on the Neutron API. They are not Calico-specific, +except insofar as the Calico driver needs to implement some of the low-level +operations that are needed to make the expected semantics work. + +The key semantic difference between those approaches is that: + +- With a floating IP, the target VM itself is not aware of the service IP. + Instead, data sent to the floating IP is DNAT'd, to the target VM's fixed IP, + before that data reaches the target VM. So the target VM only ever sees data + addressed to its fixed IP. + +- With the service IP as an additional fixed IP, the target VM is (and must be) + aware of the service IP, because data addressed to the service IP reaches the + target VM without any DNAT. + +The use of floating IPs is already well known, so we won't labour how to use +those here. For some additional information on how Calico supports floating +IPs, see [Floating +IPs]({{site.baseurl}}/{{page.version}}/usage/openstack/floating-ips). + +The use and maintainance of additional fixed IPs, however, is not so well +known, so in the following transcripts we demonstrate this approach for +assigning a service IP to a Calico-networked VM. + +We begin by creating a test VM that will be the target of the service IP. + +Creating a test VM +------------------ + +Check name of the available CirrOS image: + + core@access-node$ nova image-list + WARNING: Command image-list is deprecated and will be removed after Nova 15.0.0 is released. Use python-glanceclient or openstackclient instead. + +--------------------------------------+---------------------+--------+--------+ + | ID | Name | Status | Server | + +--------------------------------------+---------------------+--------+--------+ + | b69ab3bd-2bbc-4086-b4ae-f01d9f6b5078 | cirros-0.3.2-x86_64 | ACTIVE | | + | 866879b9-532b-44c6-a547-ac59de68df2d | ipv6_enabled_image | ACTIVE | | + +--------------------------------------+---------------------+--------+--------+ + +Boot a VM: + + core@access-node$ nova boot --flavor m1.tiny --image cirros-0.3.2-x86_64 --nic net-name=demo-net testvm1 + +--------------------------------------+------------------------------------------------------------+ + | Property | Value | + +--------------------------------------+------------------------------------------------------------+ + | OS-DCF:diskConfig | MANUAL | + | OS-EXT-AZ:availability_zone | nova | + | OS-EXT-SRV-ATTR:host | - | + | OS-EXT-SRV-ATTR:hypervisor_hostname | - | + | OS-EXT-SRV-ATTR:instance_name | instance-0000000d | + | OS-EXT-STS:power_state | 0 | + | OS-EXT-STS:task_state | scheduling | + | OS-EXT-STS:vm_state | building | + | OS-SRV-USG:launched_at | - | + | OS-SRV-USG:terminated_at | - | + | accessIPv4 | | + | accessIPv6 | | + | adminPass | HKLzcUT5L52B | + | config_drive | | + | created | 2017-01-13T13:50:32Z | + | flavor | m1.tiny (1) | + | hostId | | + | id | b6d8a3c4-9674-4972-9151-11107b60d622 | + | image | cirros-0.3.2-x86_64 (b69ab3bd-2bbc-4086-b4ae-f01d9f6b5078) | + | key_name | - | + | metadata | {} | + | name | testvm1 | + | os-extended-volumes:volumes_attached | [] | + | progress | 0 | + | security_groups | default | + | status | BUILD | + | tenant_id | 26778b0f745143c5a9b0c7e1a621bb80 | + | updated | 2017-01-13T13:50:32Z | + | user_id | 7efbea74c20a4eeabc00b7740aa4d353 | + +--------------------------------------+------------------------------------------------------------+ + +Check when the VM has booted: + + core@access-node$ nova list + +--------------------------------------+---------+--------+------------+-------------+----------------------------------------------+ + | ID | Name | Status | Task State | Power State | Networks | + +--------------------------------------+---------+--------+------------+-------------+----------------------------------------------+ + | b6d8a3c4-9674-4972-9151-11107b60d622 | testvm1 | ACTIVE | - | Running | demo-net=10.28.0.13, fd5f:5d21:845:1c2e:2::d | + +--------------------------------------+---------+--------+------------+-------------+----------------------------------------------+ + + core@access-node$ nova show testvm1 + +--------------------------------------+------------------------------------------------------------+ + | Property | Value | + +--------------------------------------+------------------------------------------------------------+ + | OS-DCF:diskConfig | MANUAL | + | OS-EXT-AZ:availability_zone | neil-fv-0-ubuntu-kilo-compute-node01 | + | OS-EXT-SRV-ATTR:host | neil-fv-0-ubuntu-kilo-compute-node01 | + | OS-EXT-SRV-ATTR:hypervisor_hostname | neil-fv-0-ubuntu-kilo-compute-node01 | + | OS-EXT-SRV-ATTR:instance_name | instance-0000000d | + | OS-EXT-STS:power_state | 1 | + | OS-EXT-STS:task_state | - | + | OS-EXT-STS:vm_state | active | + | OS-SRV-USG:launched_at | 2017-01-13T13:50:39.000000 | + | OS-SRV-USG:terminated_at | - | + | accessIPv4 | | + | accessIPv6 | | + | config_drive | | + | created | 2017-01-13T13:50:32Z | + | demo-net network | 10.28.0.13, fd5f:5d21:845:1c2e:2::d | + | flavor | m1.tiny (1) | + | hostId | bf3ce3c7146ba6cafd43be03886de8755e2b5c8e9f71aa9bfafde9a0 | + | id | b6d8a3c4-9674-4972-9151-11107b60d622 | + | image | cirros-0.3.2-x86_64 (b69ab3bd-2bbc-4086-b4ae-f01d9f6b5078) | + | key_name | - | + | metadata | {} | + | name | testvm1 | + | os-extended-volumes:volumes_attached | [] | + | progress | 0 | + | security_groups | default | + | status | ACTIVE | + | tenant_id | 26778b0f745143c5a9b0c7e1a621bb80 | + | updated | 2017-01-13T13:50:39Z | + | user_id | 7efbea74c20a4eeabc00b7740aa4d353 | + +--------------------------------------+------------------------------------------------------------+ + +The VM has been given a fixed IP of 10.28.0.13. Let's look at the corresponding Neutron port: + + core@access-node$ neutron port-list + +--------------------------------------+------+-------------------+------------------------------------------------------------------------------------------------+ + | id | name | mac_address | fixed_ips | + +--------------------------------------+------+-------------------+------------------------------------------------------------------------------------------------+ + | 656b3617-570d-473e-a5dd-90b61cb0c49f | | fa:16:3e:4d:d5:25 | | + | 9a7e0868-da7a-419e-a7ad-9d37e11091b8 | | fa:16:3e:28:a9:a4 | {"subnet_id": "0a1221f2-e6ed-413d-a040-62a266bd0d8f", "ip_address": "10.28.0.13"} | + | | | | {"subnet_id": "345fec2e-6493-44de-a489-97b755c16dd4", "ip_address": "fd5f:5d21:845:1c2e:2::d"} | + | a4b26bcc-ba94-4033-a9fc-edaf151c0c20 | | fa:16:3e:74:46:bd | | + | a772a5e1-2f13-4fc3-96d5-fa1c29717637 | | fa:16:3e:c9:c6:8f | | + +--------------------------------------+------+-------------------+------------------------------------------------------------------------------------------------+ + +Adding a service IP to the Neutron port as an extra fixed IP +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Now we want to set up a service IP - let's say `10.28.0.23` - that +initially points to that VM, `testvm1`. One way to do that is to add the +service IP as a second 'fixed IP' on the Neutron port: + + core@access-node$ neutron port-update --fixed-ip subnet_id=0a1221f2-e6ed-413d-a040-62a266bd0d8f,ip_address=10.28.0.13 --fixed-ip subnet_id=0a1221f2-e6ed-413d-a040-62a266bd0d8f,ip_address=10.28.0.23 9a7e0868-da7a-419e-a7ad-9d37e11091b8 + Updated port: 9a7e0868-da7a-419e-a7ad-9d37e11091b8 + + core@access-node$ neutron port-show 9a7e0868-da7a-419e-a7ad-9d37e11091b8 + +-----------------------+-----------------------------------------------------------------------------------+ + | Field | Value | + +-----------------------+-----------------------------------------------------------------------------------+ + | admin_state_up | True | + | allowed_address_pairs | | + | binding:host_id | neil-fv-0-ubuntu-kilo-compute-node01 | + | binding:profile | {} | + | binding:vif_details | {"port_filter": true, "mac_address": "00:61:fe:ed:ca:fe"} | + | binding:vif_type | tap | + | binding:vnic_type | normal | + | device_id | b6d8a3c4-9674-4972-9151-11107b60d622 | + | device_owner | compute:None | + | extra_dhcp_opts | | + | fixed_ips | {"subnet_id": "0a1221f2-e6ed-413d-a040-62a266bd0d8f", "ip_address": "10.28.0.13"} | + | | {"subnet_id": "0a1221f2-e6ed-413d-a040-62a266bd0d8f", "ip_address": "10.28.0.23"} | + | id | 9a7e0868-da7a-419e-a7ad-9d37e11091b8 | + | mac_address | fa:16:3e:28:a9:a4 | + | name | | + | network_id | 60651076-af2a-4c6d-8d64-500b53a4e547 | + | security_groups | 75fccd0a-ef3d-44cd-91ec-ef22941f50f5 | + | status | ACTIVE | + | tenant_id | 26778b0f745143c5a9b0c7e1a621bb80 | + +-----------------------+-----------------------------------------------------------------------------------+ + +Now look at local IP routes, and we see that we have a route to `10.28.0.23`: + + core@access-node$ ip r + default via 10.240.0.1 dev eth0 proto static metric 100 + 10.28.0.13 via 192.168.8.3 dev l2tpeth8-1 proto bird + 10.28.0.23 via 192.168.8.3 dev l2tpeth8-1 proto bird + [...] + +Note that, on the machine where we're running these commands: + +- BIRD is running, peered with the BIRDs that Calico runs on each compute node. + That is what causes VM routes (including `10.28.0.23`) to appear here. + +- 192.168.8.3 is the IP of the compute node that is hosting `testvm1`. + +We can also double check that `10.28.0.23` has appeared as a local device +route on the relevant compute node: + + core@access-node$ ssh core@192.168.8.3 ip r + default via 10.240.0.1 dev eth0 + 10.28.0.13 dev tap9a7e0868-da scope link + 10.28.0.23 dev tap9a7e0868-da scope link + 10.240.0.1 dev eth0 scope link + 192.168.8.0/24 dev l2tpeth8-3 proto kernel scope link src 192.168.8.3 + 192.168.122.0/24 dev virbr0 proto kernel scope link src 192.168.122.1 + +We also need - because with this approach, data that is addressed to +`10.28.0.23` will be routed to the VM without any NAT - to tell the VM +itself that it has the extra `10.28.0.23` address: + + core@access-node$ ssh cirros@10.28.0.13 + cirros@10.28.0.13's password: + $ ip a + 1: lo: mtu 16436 qdisc noqueue + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever + 2: eth0: mtu 1500 qdisc pfifo_fast qlen 1000 + link/ether fa:16:3e:28:a9:a4 brd ff:ff:ff:ff:ff:ff + inet 10.28.0.13/16 brd 10.28.255.255 scope global eth0 + inet6 fe80::f816:3eff:fe28:a9a4/64 scope link + valid_lft forever preferred_lft forever + $ sudo ip a a 10.28.0.23/16 dev eth0 + $ ip a + 1: lo: mtu 16436 qdisc noqueue + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever + 2: eth0: mtu 1500 qdisc pfifo_fast qlen 1000 + link/ether fa:16:3e:28:a9:a4 brd ff:ff:ff:ff:ff:ff + inet 10.28.0.13/16 brd 10.28.255.255 scope global eth0 + inet 10.28.0.23/16 scope global secondary eth0 + inet6 fe80::f816:3eff:fe28:a9a4/64 scope link + valid_lft forever preferred_lft forever + $ Connection to 10.28.0.13 closed. + +And now we can access the VM on its service IP: + + core@access-node$ ssh cirros@10.28.0.23 + The authenticity of host '10.28.0.23 (10.28.0.23)' can't be established. + RSA key fingerprint is 65:a5:b0:0c:e2:c4:ac:94:2a:0c:64:b8:bc:5a:aa:66. + Are you sure you want to continue connecting (yes/no)? yes + + Warning: Permanently added '10.28.0.23' (RSA) to the list of known hosts. + cirros@10.28.0.23's password: + $ + +(Note that we already have security set up that allows SSH to the instance from +our access machine (`192.168.8.1`): + + core@access-node$ neutron security-group-list + +--------------------------------------+---------+----------------------------------------------------------------------+ + | id | name | security_group_rules | + +--------------------------------------+---------+----------------------------------------------------------------------+ + | 75fccd0a-ef3d-44cd-91ec-ef22941f50f5 | default | egress, IPv4 | + | | | egress, IPv6 | + | | | ingress, IPv4, 22/tcp, remote_ip_prefix: 192.168.8.1/32 | + | | | ingress, IPv4, remote_group_id: 75fccd0a-ef3d-44cd-91ec-ef22941f50f5 | + | | | ingress, IPv6, remote_group_id: 75fccd0a-ef3d-44cd-91ec-ef22941f50f5 | + | 903d9936-ce72-4756-a2cc-7c95a846e7e5 | default | egress, IPv4 | + | | | egress, IPv6 | + | | | ingress, IPv4, 22/tcp, remote_ip_prefix: 192.168.8.1/32 | + | | | ingress, IPv4, remote_group_id: 903d9936-ce72-4756-a2cc-7c95a846e7e5 | + | | | ingress, IPv6, remote_group_id: 903d9936-ce72-4756-a2cc-7c95a846e7e5 | + +--------------------------------------+---------+----------------------------------------------------------------------+ + +) + +Moving the service IP to another VM +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Service IPs are often used for HA, so need to be moved to target a different VM +if the first one fails for some reason (or if the HA system just decides to +cycle the active VM). + +To demonstrate that we create a second test VM: + + core@access-node$ nova boot --flavor m1.tiny --image cirros-0.3.2-x86_64 --nic net-name=demo-net testvm2 + [...] + core@access-node$ nova list + +--------------------------------------+---------+--------+------------+-------------+----------------------------------------------+ + | ID | Name | Status | Task State | Power State | Networks | + +--------------------------------------+---------+--------+------------+-------------+----------------------------------------------+ + | b6d8a3c4-9674-4972-9151-11107b60d622 | testvm1 | ACTIVE | - | Running | demo-net=10.28.0.13, 10.28.0.23 | + | bb4ef5e3-dc77-472e-af6f-3f0d8c3e5a6d | testvm2 | ACTIVE | - | Running | demo-net=10.28.0.14, fd5f:5d21:845:1c2e:2::e | + +--------------------------------------+---------+--------+------------+-------------+----------------------------------------------+ + core@access-node$ neutron port-list + +--------------------------------------+------+-------------------+------------------------------------------------------------------------------------------------+ + | id | name | mac_address | fixed_ips | + +--------------------------------------+------+-------------------+------------------------------------------------------------------------------------------------+ + | 656b3617-570d-473e-a5dd-90b61cb0c49f | | fa:16:3e:4d:d5:25 | | + | 7627a298-a2db-4a1a-bc07-9f0f10f58363 | | fa:16:3e:8e:dc:33 | {"subnet_id": "0a1221f2-e6ed-413d-a040-62a266bd0d8f", "ip_address": "10.28.0.14"} | + | | | | {"subnet_id": "345fec2e-6493-44de-a489-97b755c16dd4", "ip_address": "fd5f:5d21:845:1c2e:2::e"} | + | 9a7e0868-da7a-419e-a7ad-9d37e11091b8 | | fa:16:3e:28:a9:a4 | {"subnet_id": "0a1221f2-e6ed-413d-a040-62a266bd0d8f", "ip_address": "10.28.0.13"} | + | | | | {"subnet_id": "0a1221f2-e6ed-413d-a040-62a266bd0d8f", "ip_address": "10.28.0.23"} | + | a4b26bcc-ba94-4033-a9fc-edaf151c0c20 | | fa:16:3e:74:46:bd | | + | a772a5e1-2f13-4fc3-96d5-fa1c29717637 | | fa:16:3e:c9:c6:8f | | + +--------------------------------------+------+-------------------+------------------------------------------------------------------------------------------------+ + +Remove the service IP from the first VM: + + core@access-node$ neutron port-update --fixed-ip subnet_id=0a1221f2-e6ed-413d-a040-62a266bd0d8f,ip_address=10.28.0.13 9a7e0868-da7a-419e-a7ad-9d37e11091b8 + +And add it to the second: + + core@access-node$ neutron port-update --fixed-ip subnet_id=0a1221f2-e6ed-413d-a040-62a266bd0d8f,ip_address=10.28.0.14 --fixed-ip subnet_id=0a1221f2-e6ed-413d-a040-62a266bd0d8f,ip_address=10.28.0.23 7627a298-a2db-4a1a-bc07-9f0f10f58363 + +And tell `testvm2` that it now has the service IP `10.28.0.23`: + + core@access-node$ ssh cirros@10.28.0.14 + The authenticity of host '10.28.0.14 (10.28.0.14)' can't be established. + RSA key fingerprint is 6a:02:7f:3a:bf:0c:91:de:c4:d6:e7:f6:81:3f:6a:85. + Are you sure you want to continue connecting (yes/no)? yes + + Warning: Permanently added '10.28.0.14' (RSA) to the list of known hosts. + cirros@10.28.0.14's password: + $ sudo ip a a 10.28.0.23/16 dev eth0 + +Now connections to `10.28.0.23` go to `testvm2`: + + core@access-node$ ssh cirros@10.28.0.23 + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + @ WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED! @ + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + IT IS POSSIBLE THAT SOMEONE IS DOING SOMETHING NASTY! + Someone could be eavesdropping on you right now (man-in-the-middle attack)! + It is also possible that a host key has just been changed. + The fingerprint for the RSA key sent by the remote host is + 6a:02:7f:3a:bf:0c:91:de:c4:d6:e7:f6:81:3f:6a:85. + Please contact your system administrator. + Add correct host key in /home/core/.ssh/known_hosts to get rid of this message. + Offending RSA key in /home/core/.ssh/known_hosts:4 + RSA host key for 10.28.0.23 has changed and you have requested strict checking. + Host key verification failed. + core@access-node$ rm ~/.ssh/known_hosts + core@access-node$ ssh cirros@10.28.0.23 + The authenticity of host '10.28.0.23 (10.28.0.23)' can't be established. + RSA key fingerprint is 6a:02:7f:3a:bf:0c:91:de:c4:d6:e7:f6:81:3f:6a:85. + Are you sure you want to continue connecting (yes/no)? yes + + Warning: Permanently added '10.28.0.23' (RSA) to the list of known hosts. + cirros@10.28.0.23's password: + $ hostname + testvm2 + $ ip a + 1: lo: mtu 16436 qdisc noqueue + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever + 2: eth0: mtu 1500 qdisc pfifo_fast qlen 1000 + link/ether fa:16:3e:8e:dc:33 brd ff:ff:ff:ff:ff:ff + inet 10.28.0.14/16 brd 10.28.255.255 scope global eth0 + inet 10.28.0.23/16 scope global secondary eth0 + inet6 fe80::f816:3eff:fe8e:dc33/64 scope link + valid_lft forever preferred_lft forever + $ diff --git a/v2.4/usage/routereflector/bird-rr-config.md b/v2.4/usage/routereflector/bird-rr-config.md new file mode 100644 index 00000000000..66e3e68d126 --- /dev/null +++ b/v2.4/usage/routereflector/bird-rr-config.md @@ -0,0 +1,186 @@ +--- +title: 'Configuring BIRD as a BGP Route Reflector' +--- + +For many Calico deployments, the use of a Route Reflector is not required. +However, for large scale deployments a full mesh of BGP peerings between each +of your Calico nodes may become untenable. In this case, route reflectors +allow you to remove the full mesh and scale up the size of the cluster. + +These instructions will take you through installing BIRD as a BGP route +reflector, and updating your other BIRD instances to speak to your new +route reflector. The instructions that are are valid for both Ubuntu 14.04 and +RHEL 7. + +For a container-based deployment, using the calico/node container, check +out the [Calico BIRD Route Reflector container](calico-routereflector). + +## Prerequisites + +Before starting this you will need the following: + +- A machine running either Ubuntu 14.04 or RHEL 7 that is not already + being used as a compute host. +- SSH access to the machine. + +## Installation + +### Step 1: Install BIRD + +#### Ubuntu 14.04 + +Add the official [BIRD](http://bird.network.cz/) PPA. This PPA contains +fixes to BIRD that are not yet available in Ubuntu 14.04. To add the +PPA, run: + + sudo add-apt-repository ppa:cz.nic-labs/bird + +Once that's done, update your package manager and install BIRD (the +single `bird` package installs both IPv4 and IPv6 BIRD): + + sudo apt-get update + sudo apt-get install bird + +#### RHEL 7 + +First, install EPEL. Depending on your system, the following command may +be sufficient: + + sudo yum install epel-release + +If that fails, try the following instead: + + sudo yum install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm + +With that complete, you can now install BIRD: + + yum install -y bird{,6} + +### Step 2: Set your BIRD IPv4 configuration + +Before doing this, you'll need to take note of what BGP AS number you've +used in your compute node install. + +Open `/etc/bird/bird.conf` on your route reflector system and initially +fill it with the following template, replacing `` with the +IPv4 address of your route reflector: + + # Configure logging + log syslog { debug, trace, info, remote, warning, error, auth, fatal, bug }; + log stderr all; + + # Override router ID + router id ; + + + filter import_kernel { + if ( net != 0.0.0.0/0 ) then { + accept; + } + reject; + } + + # Turn on global debugging of all protocols + debug protocols all; + + # This pseudo-protocol watches all interface up/down events. + protocol device { + scan time 2; # Scan interfaces every 2 seconds + } + +Then, at the end, for each compute node in your deployment add one of +the following blocks, replacing `` with a purely +alphabetical name for the host (this must be unique for each host, but +the shortname is only used within this file), `` with the +node's IPv4 address, and `` with the AS number you're using: + + protocol bgp { + description ""; + local as ; + neighbor as ; + multihop; + rr client; + graceful restart; + import all; + export all; + } + +### Step 3 (Optional): Set your BIRD IPv6 configuration + +If you want to use IPv6 connectivity, you'll need to repeat step 2 but +using `/etc/bird/bird6.conf`. The *only* differences between the two +are: + +- the filter needs to filter out ::/0 instead of 0.0.0.0/0 +- where before you set `` to the compute node's IPv4 address, + this time you need to set it to the compute node's IPv6 address + +Note that `` should still be set to the route reflector's +IPv4 address: you cannot use an IPv6 address in that field. + +### Step 4: Restart BIRD + +#### Ubuntu 14.04 + +Restart BIRD: + + sudo service bird restart + +Optionally, if you configured IPv6 in step 3, also restart BIRD6: + + sudo service bird6 restart + +#### RHEL 7 + +Restart BIRD: + + systemctl restart bird + systemctl enable bird + +Optionally, if you configured IPv6 in step 3, also restart BIRD6: + + systemctl restart bird6 + systemctl enable bird6 + +### Step 5: Reconfigure compute nodes + +#### Openstack deployments + +If you used the `calico-gen-bird-conf.sh` script to configure your +compute hosts, and you used the route reflector IP when you did, you do +not need to do anything further. + +Otherwise, on each of your compute nodes, edit `/etc/bird/bird.conf` +(and, if you're using IPv6, `/etc/bird/bird6.conf`) to remove all their +peer relationships (the blocks beginning with `protocol bgp`) except for +one. Edit that one's `neighbor` field IP address to be the IP address of +the route reflector (either IPv4 or IPv6). Then, restart their BIRD +instances as detailed in step 4. + +#### Container-based deployments + +For container-based deployments using the `calico/node` container, use +`calicoctl` to disable the full mesh between each node and configure the +route reflector as a global peer. + +To disable the node-to-node mesh: + +``` +$ calicoctl config set nodeToNodeMesh off +``` + +To create a global peer for the route reflector: + +``` +$ cat << EOF | calicoctl create -f - +apiVersion: v1 +kind: bgpPeer +metadata: + peerIP: 192.20.30.40 + scope: global +spec: + asNumber: 64567 +EOF +``` + +For more details/options refer to the [BGP configuration guide]({{site.baseurl}}/{{page.version}}/usage/configuration/bgp). diff --git a/v2.4/usage/routereflector/calico-routereflector.md b/v2.4/usage/routereflector/calico-routereflector.md new file mode 100644 index 00000000000..acb2c4f7054 --- /dev/null +++ b/v2.4/usage/routereflector/calico-routereflector.md @@ -0,0 +1,323 @@ +--- +title: 'Calico BIRD Route Reflector container' +--- + +For many Calico deployments, the use of a Route Reflector is not required. +However, for large scale deployments a full mesh of BGP peerings between each +of your Calico nodes may become untenable. In this case, route reflectors +allow you to remove the full mesh and scale up the size of the cluster. + +This guide discusses the calico/routereflector image: a container image that +packages up the `BIRD` BGP daemon along with the `confd` templating engine to +provide a simple route reflector image which may be used for scaled-out Calico +deployments. + +The image is currently experimental and has some key limitations discussed below. +However, it may provide a useful framework for future development. + +These instructions are focused around container-based deployments that use the +calico/node container image. + +For an OpenStack deployment, read [Configuring BIRD as a BGP Route Reflector](bird-rr-config). + +> NOTE: The API and behavior of the calico/routereflector is likely to change in +> future releases. + +#### Known limitations + +- The calico/routereflector instance will automatically peer with the Calico + nodes, but it currently has no mechanism to configure peerings with non-Calico + BGP speakers (e.g. edge routers) +- There is no `calicoctl` integration or similar. +- If you are using Kubernetes API as the Calico datastore, the Route Reflector container + currently only supports running as a single-instance. +- For etcdv2, the Route Reflector container may be used to form a cluster of route reflectors that + automatically create a full mesh between each Route Reflector. + - Note that there is no `calicoctl` integration and to form a cluster it is necessary to + configure data directly into the `etcd` datastore for each Route Reflector instance. + - It is not possible to form multiple separate meshed groups + of Route Reflectors using this image. + +## Starting and configuring your route reflectors + +Follow the appropriate section to start and configure your route reflectors depending on +the datastore you are using for Calico: + +- [Using etcdv2 as the Calico datastore](#using-etcdv2-as-the-calico-datastore) +- [Using the Kubernetes API as the Calico datastore](#using-the-kubernetes-api-as-the-calico-datastore) + +### Using etcdv2 as the Calico datastore + +#### Starting a Route Reflector instance + +On your Route Reflector host, ensure you have [Docker v1.6](http://www.docker.com) or greater +installed. + +Run the following command to start the Route Reflector container image. + +``` +docker run --privileged --net=host -d \ + -e IP= \ + [-e IP6=] \ + -e ETCD_ENDPOINTS= \ + calico/routereflector:{{site.data.versions[page.version].first.components["calico/routereflector"].version}} +``` + +Where: + +- `[]` indicates an optional parameter +- `` is the IPv4 address of the RR host (the BIRD instance binds to + the hosts IPv4 address) +- `` is the *optional* IPv6 address of the RR host (the BIRD6 instance + binds to the hosts IPv6 address) +- `` is the colon separated IPv4 address and port of an etcd + node in the etcd cluster. A comma-separated list of endpoints may be + specified. + +> Note: If you require TLS/SSL enabled etcd, see the [section below](#route-reflector-with-tlsssl-etcd) +> for details on how to start the route reflector. + +#### Configuring a cluster of Route Reflectors + +If you want to use more than one route reflector, the Route Reflector container supports +running as a single cluster of route reflectors. The Calico BIRD Route Reflector +takes care of creating a full mesh between all of the route reflectors in the +cluster. + +To operate a cluster of these route reflectorsm it is necessary to explicitly +add an entry into etcd for each route reflector. The following steps indicate how +to add an entry into etcd. + +The configuration for the Route Reflector is stored for IPv4 at: + + /calico/bgp/v1/rr_v4/ + +and IPv6 at: + + /calico/bgp/v1/rr_v6/ + +In all cases, the data is a JSON blob in the form: + + { + "ip": "", + "cluster_id": "" + } + +To add this entry into etcd, you could use the following commands: + +``` +# IPv4 entries +curl -L http:///v2/keys/calico/bgp/v1/rr_v4/ -XPUT -d value="{\"ip\":\"\",\"cluster_id\":\"\"}" + +# IPv6 entries +curl -L http:///v2/keys/calico/bgp/v1/rr_v6/ -XPUT -d value="{\"ip\":\"\",\"cluster_id\":\"\"}" +``` + +Replacing , , and as required. For +example, for a Route Reflector with the values: + +- etcd running at http://192.0.2.10:2379 +- The Route Reflector IP address of 192.0.2.50 +- A Cluster ID of 1.0.0.1 + +the following command would be used to configure the Route Reflector. + +``` +curl -L http://192.0.2.10:2379/v2/keys/calico/bgp/v1/rr_v4/192.0.2.50 -XPUT -d value="{\"ip\":\"192.0.2.50\",\"cluster_id\":\"1.0.0.1\"}" +``` + +See [below](#example-topology--multiple-cluster-ids) for details +about large networks and the use and format of the cluster ID. + +Repeat the above instructions for every Route Reflector in the cluster. + +#### Route Reflector with TLS/SSL Etcd + +If you are running secure etcd, you will need to pass in additional options +and set environment variables for the certificate and key files associated +with your etcd instance. + +When starting the Route Reflector container image, you need to mount the +certificate files and environment variable filepaths for each file: + +``` +docker run --privileged --net=host -d \ + -e IP= \ + [-e IP6=] \ + -e ETCD_ENDPOINTS= \ + -v : \ + -e ETCD_CA_CERT_FILE=/ \ + -e ETCD_CERT_FILE=/ \ + -e ETCD_KEY_FILE=/ \ + calico/routereflector:{{site.data.versions[page.version].first.components["calico/routereflector"].version}} +``` + +Where `` is a directory on the host that contains +the certificate files (you can mount multiple directories with additional +`-v ` parameters if they are in separate directories, but be sure +to choose different `` locations if this is the case). + +You will also need to pass the certificate and key files as parameters +in the curl statement when adding entries: + +``` +# IPv4 entries +curl --cacert --cert --key -L https://:2379/v2/keys/calico/bgp/v1/rr_v4/ -XPUT -d value="{\"ip\":\"\",\"cluster_id\":\"\"}" +# IPv6 entries +curl --cacert --cert --key -L https://:2379/v2/keys/calico/bgp/v1/rr_v6/ -XPUT -d value="{\"ip\":\"\",\"cluster_id\":\"\"}" +``` + +### Using the Kubernetes API as the Calico datastore + +If you are using Kuberenetes as the datastore for Calico, the Calico Route +Reflector container only supports running as a single route reflector. It is not +possible with this image to set up a cluster of route reflectors. + +#### Starting up the Route Reflector + +On your Route Reflector host, ensure you have [Docker v1.6][docker] or greater +installed. + +You will need a kubeconfig file that you need to mount into the route reflector +container. + +Run the following command to start the Route Reflector container image. + +``` +docker run --privileged --net=host -d \ + -e DATASTORE_TYPE=kubernetes \ + -e KUBECONFIG=/kubeconfig \ + -e IP= \ + -v :/kubeconfig \ + calico/routereflector:{{site.data.versions[page.version].first.components["calico/routereflector"].version}} +``` + +Where: + +- `` is the IPv4 address of the RR host (the BIRD instance binds to + the hosts IPv4 address) +- `` is the path to the kubeconfig file. + +When using Kubernetes API as the datastore, this route reflector image only works +as a single standalone reflector. + + +## Configuring Calico to use the route reflectors + +Run through this section to set up the global Calico configuration +before configuring any nodes. This only needs to be done once. + +- Disable the full node-to-node BGP mesh +- Configure the default node AS number for your network (this is used by + the Route Reflector image when setting up the Route Reflector full mesh). + +If you have a small cluster of Route Reflectors and you intend to have every +Calico Docker node peer with every Route Reflector, set this up one time as +global configuration. + + +### Turn off the full node-to-node mesh + +From any Calico Docker node, run the following: + + calicoctl config set nodeToNodeMesh off + + +### Determine the AS number for your network + +From any Calico Docker node, run the following: + + calicoctl get nodes --output=wide + +This returns table of all configured Calico node instances and includes the AS +number for each node. + +### Peering with every Route Reflector (optional) + +If you have a small cluster of Route Reflectors (e.g. a single RR or a pair of +RRs for redundancy) and you intend to have every Calico Docker node peer with +each of the Route Reflectors, you can set up the peerings as a one-time set of +global configuration. + +Use `calicoctl` to configure each route reflector as a global peer (i.e. it +peers with every node in the deployment): + +``` +calicoctl bgp peer add as +$ calicoctl create -f - << EOF +apiVersion: v1 +kind: bgpPeer +metadata: + peerIP: + scope: global +spec: + asNumber: +EOF +``` + +Where: +- `` is the IPv4 or IPv6 address of the Route Reflector. +- `` is the AS number to use for the network (set or determined + above). + +## Setting up node-specific peering + +If you are deploying a cluster of Route Reflectors, with each Calico node +peering to a subset of Route Reflectors it will be necessary to set up the +peerings on a node-by-node basis. + +This would be the typical situation when scaling out to a very large size. For +example, you may have: + +- a cluster of 100 route reflectors connected in a full mesh +- a network of 100,000 Calico Docker nodes +- each Calico Docker node is connected to two or three different Route + Reflectors. + +### Configuring a node-specific Route Reflector peering + +To configure a Route Reflector as a peer of a specific node, run the following +*from the node*: + +``` +$ cat << EOF | calicoctl create -f - +apiVersion: v1 +kind: bgpPeer +metadata: + peerIP: + scope: node + node: +spec: + asNumber: +EOF +``` + +Where: +- `` is the IPv4 or IPv6 address of the Route Reflector. +- `` is the AS number to use for the network (set or determined + above). +- `` is the name of the node. + +Run this separately for each Route Reflector that you want to peer with the +node. + +## Additional information + +### Example topology / multiple cluster IDs + +When the topology includes a cluster of Route Reflectors, BGP uses the concept +of a cluster ID to ensure there are no routing loops when distributing routes. + +The Route Reflector image provided assumes that it has a fixed cluster ID for +each Route Reflector rather than being configurable on a per peer basis. + +For example, the topology outlined in the diagram below is based on the Top of +Rack model: + +- Each rack is assigned its own cluster ID (a unique number in IPv4 address + format). +- Each node (server in the rack) peers with a redundant set of route + reflectors specific to that rack. +- All of the ToR route reflectors form a full mesh with each other. + +![Example scale topology](mesh-topology.png) diff --git a/v2.4/usage/routereflector/mesh-topology.png b/v2.4/usage/routereflector/mesh-topology.png new file mode 100644 index 0000000000000000000000000000000000000000..ed6916f3e3602b69053e6dc9a1c79b1d2354095e GIT binary patch literal 93744 zcmZU)cT^K!)HN)kfK(}h5CoBCqgQDWktR(9M0)RpB3(+9D!odF2vS9g5NasWdq+w_ zuOfshp(G^l@Oz&1u5Z2HAF@pDow;YuOy=yf_r8&@HI=EzS;((lyGEs|^775KYoyPv zT_f5cBLV)>R=T?Z14JHglwVw{7-8K8KHRXA*O0$-w7RUedttb?0o|dOPEL;cM4|a89ew%01bu7W*eEvbJZb>Hi^p z@}H?H+3k1Y*C`SflSwhGFi2edV3SrK&PizEYme0PabH^Kk2L)t663dxReb)|Aj+P{ zMtA4UIgOgMe9MtvOk(1DY#-W3i}%d7f|i;O5mBJiG{0lQaPukl*%sE++In%Q&rAs? z{6JXvh=wfkTkW%V81Y{clL=_7`KqCuEMYTYa5DqhC6`?_qE`O)0OMc1^*cw=6B=Up)$cRGqJ`meXLa zk7MMwhmN`)(aQdHe;Cx9@@gz>q~8g%S&ZT79B$2AnjC%Fj)xd~<;oa?wkk!_AtFre3{^4>?BuLx9Z)7-d>@fl)3MMY;Nz9tGpX& zH_^INHmxkZU)Rlm?rGWj_Aeh{it=UTW`2#wz6i(TwSr13l6?vM+l+Dsrvp1}+Y8>y zFYl&Work?#-Du;nFwXA6y1#j@b$L0EbTZ`LGA&uI#R?`inMvVgzpSLpX{o`jhA2V{~VHC^n z3oV(l`I2DzO@g>l_CP!9x?YMKpEiRS`h|RrVqRnV-+hf~Bg$q{nOidCO2qAmuTss2 z$;!H%=I^rKDo)oGW|lRX>WHGaM(3W&n#)Nxb&!w_@$sA4g-ni$sqrqlgb;6r>~eQ7j5=LWIfZauv3Dh&?_+cfZ=iOkc!%A zf>a$g_nPz)&*k5;`c9!WGFi1(i}P~(BZAexsBRlc7nCE zOivezpuZ29!+bp~4R<}d7b0F@m&e~a4Xzp@9*fNK_7o^+)N9#}nXvOV|ARe+>+8t&&=OQ5R;e`ZNH2npqTdU5F4$WLn-Ko$|{J zX8l;1AW_ZUZsrPotA>ML_v52F+tK!@0>^d{Vt17fW>B!-bl~!90-0*FX0GJhvs-C+ zS8H%^+alii;$2{8Cnqljkzc<-@?~RGoowg}8eAGIFWLp~%oj@;zh8Xkx?yu4-dGXE z)*%hk#MoweBP%cc%Eu;d5%HN? zLo4zjG%I084VOsR3^`wpD*3~3Tck8VyM(CqVZ@_*3XRXjCuz$J=Vq3u;RpFC-E{5WMns%(@Fn@@(e>bRZo?1m<(JzUicgp@ z_(i;K;Yo4y{zMS3{f?m#Cff=)(GO!X)8CqglgJ!1jTD0kb?5Nw5a8xd#5fxGY)R11 z@oE^!ZvQD){nU|HDE$g!_p_jS^lpUYo#n<9@1u$dmIslESrb*tS#Q+Z%7k~VDh7iP ze&x(Fr`Js0?`%!Z^-A(Uz!kRnZcX+XJe9Nqvs(pYY{WmBf+8_L(>>ePVQP!ouP3cx zFEXC^Zo+B2ML)*mLQ_JA?mzdmhg}!)8R^TIN}`hpizv-^u5MSN zvtx%?+E5{Ra~BZP z9{yaMWhNuuxR0H=t5b_$Bsj3&L-OA2(J9Pb@5k}%s!Yf+3&ov(#a2^^2g_jklZYH( zzL~bL30hau7z?x7c8&@IDH(6^PYyQxf8?A(5-~!*Tl!A#7w5n+a}1tf%Mh+dtA{;8 z?5rm6_KXLmft!r+t>z&jik4W>-5MsnV`U`;n&6hlDoGJt@oVwNtV5Sn9r)^xRx?zp zm02Sk55C){AqjF&C&F6XCu)WzMuoxS=!Fw;V>_jz80C2>jv*21yD9hYsdIY&Bz$Uf z`7k^6tVrX|Gf9JD{b8|S*BH`WXd0~*iV)23GV-*(@@8(JF(_0LlSqja@ww$Ul2 zWY!KE9(`7Z+&%ODcpMT4mk|+zF_zw(Jc|Uc;*a5_1na%K z(IcX@;L%};773);iVpBLb;70($sR2lzqNoO{e2C%_xBxks>!ixA$9RTI`C0&8eJup z$**Y6vZ^+R8Z8(2OY1wB-s>!UyYV+g4QJUYN+MD}0~6mp{}y)n^P>_4){5w4j4n|R z7_x^~ff>i_Euc@ovKc;jDQv>=Yd-(%b$nawlU>*BPoEeoAE^?Z6g#liLw5zJka92I zKP5m@s@fi)iMchblH%#*XU$mPfuC39>Pv26)tuO&b2AO11N$4Cx|PlNtVD>h1@uck z_lx!5jR)lREZlrw_k6h5cpimUwTaqeZ#&qdBCFcqn8++R=vAiDqpx~MDTf9o4^nM9r?ex{V##9Y4~Uj{%>w$8G-D-vW=VG zEUY;wJ)KS+sXfb$-wW08f!FQ&@unWO(4l>jZ%+h^md|0*T4G@``7R<3CN8^k z^;W#6PA;(|i^F$U-Rvp-$ETsI%)jlEb?SZ2VVA#O6Z!7P2}B%TV5USJ7b zO`C+scF1B$klvf9@5TJhhoikrLwe{DuZQpS=Pw~%yfqJ@>5Og>*L!YtHm{~0zk2?z zC-4H*b!#{1FSl2Yz}Ie_wUbZ&ts)H$c`e@|oQ6P&o!$GVoC%LDX`NSpbQY#V;zBI3 zC=he4t$WrKya+{E5GdO2#t@P0Ns(b`t71B~8Aat&xkpUG-B_2I-Z00LS3Mhag9PWHQtO3p|MGDpX44cBoF)U1vUx4$!O_o5<#E(SoH?hl}CbiC$s z(7ERaYx6c_4&xJ?NlSguTf_zxWn*gsK2OPHcR*~oPfd`8&|b;9cRiDi0u{ae=Qn*W zd2`V%gthqiyYk6c>8S7DUb`jg&+Os8H?>j%ch9XY1}@!S{MXp)GO0-~3{^qai;53OZ2_^lpjM5*)MIY>1 zp0fOjgyTaHl4#c3el{bqj!*GRo?88W{W*X7oa;C0bc$57>UNcwpKNH5vn!`EBL`Z4 z5o_YL@WUQGQhC3#jJ+KKF)#*6op`&IKy#vBAK;?GmgOmZdxg|l=;9+al6c$aEl-U> z*+B!j79cC{C4y@!#>zTWZ)X284*c#xPR9~kSL=E#segLcY+g)p6?p1y1@baG^XCr# zR1_;heK*eIBt(D_&lB^kE${fzDoU=4?zpj~C-I+ghdzQ)3iP6ww(#KOQOyFwT!ik> zms77Li;&X8)|pxRqgjrqE4-OMVv{-Io}d1>YdI^jM9z8s_?r*1+j#lOK*s)o1;?6&5&Hm>46J)z=dbOp?;h$wHk1J%_YlFxoJBju7#fR9ZH?XxH zw^x{J7DR;rj4;$Zpr|F-4Zi7o%f7=PdGGU2eZX#emA{M*3FDf4u5P$ zRUWZB>J$OfJk5Ds|Fx+Do{iahjK|>Ra5jB-xcAw%XIOJVV7_a1&0+m1dPP71Q#={+ z_MYk+Ne)CbEgbR*d-RhqA)?!h==>hm;#(x1GQt%`UyZm+bdLpJ_p1eDU;CwKw)f5m zvHi|2ztXR-K2RelJ6`w(0%@X7ioaEqxX?|8tOHQ1Hi*B~29`MjAm6+4DDl!=en(=d zTeY4%|96JaJ$v4CU2i5%dlb)?1x3iOY#{(4_w8BUfS?Khyi5hxiu`|O)EEGW*xE-Z zxDN6Lz{e!W;+JsoVlsL&heH4h1M%m%W6P|v1`4)QQr{*{do#|N#N0l;m(&p1@)k0O zlEilP?^%ZaSgQGCg61zGMZfHQRB^xi&RGL9`YLUyZ&h|uZg&ISS!0bRb{Imc8PnBR z4X(UrXsDiNY*;z)*^7*&=T%GBTv66f4?pQws!~z zkhV!|JvjCyRwKKjmEbQ{rv(?8{*c1~+=JEf0 zGNg!(G%Xnm0&!!Lw{H?R*3$dfNNB{=(dehAO}QBQTQ7TM?-oUlV^+uo$Y(X2w31w7 z>*`#!#MVOUXw1AP=brC;G8*68tt- z{KD~xOQsrRyeIY{1BqrFyj-2@1F)=Zw_F&#UBiPiVGVAV?`p^hOn+C8tjP(dz;U$6 z3DJJh$dMFCT_gAVNWtf7E#@(~!2I2@mss`C^W_q$$tDN(Y_k3zpVm3<&w%4s3Dqv~JAv=zLU~io znCE)#Tp{1RQ*$w!pIWv_Tr#m$T6}V4QJPlF83M5bv+QVkzFpS|V5!7g5&t{HDPq

>FYdK=PUz>?-u>P9U2- z-#U3z<<}N^pa;Utl}o&5 z)9lW(YOMg08@*jWXP~QdJ+l-YTkjHV3jzVGqJ%16@N@5eabWJ0x2C!GT&0mR+eiBB zc59`zu+@@Uf{>xmS@%byV+%3OW9YT<$0X)TW^T}YyxCM7u~1-?mGL(^QAT*=)o zMr+>$wtH8tVO^l=CR!(6a}|{l>#YVyL5jl>8b4fHs57!raABMW7`rzF`zB-ZEx2X)j2q(^WGG}-DOR; zwTk4b`-iQUFZaZQ-kGW~xvAeror#CKWy4yJlgBVu zoM=Qjd${;M*8MJT1F8?2ID5U<&DFZvwBrcOKccw0fL?D4z1jFPGVo&?mD^0@yba$| zLLQgL?rU9>Fp}byKptYw3|wN_?ItIo54EcM(%R<*&*YeVKxh(PcjWM7%Zj~L|PX$yYk#uY1)eBd#v%glHf^*!z2jBEq*9h<8^qlFj~JpU!D6;XK$gLra^ z%vwmGt}vWM#bv(ra2Y*v1hKAW7Sg-c19VZ)L3iFIypBB}iz>;1^RY+4Mo124WHIyw zQ+R1P`gxDnmbX`!L(%%>I~jPdbhU5Y*E>lDDLZ%h7x0CBejoH9=eiA4Kt*o_fDN_< z&YLqT;_byiuHmkhXUwEWB~)>@zc_q<;V)RZ0uH&qVOcS#4T`aum??a;oh_DR znx=|{#RBcCRme}iv2%03WU3^CcI%NHXSa`$C`d@{bH}_1hzP*JeIk4`sY@bE`k zsKr*AM7eWnNGhWL3I0YAa0ez6R(2Dhvr+4QN)aYc@D}EHhIanX783$$#iSZ*Kj3A7 zFXi1VI)ArtNh7!|V-sRM{fkUq$BBj6<}P>hgO%3%#y2Gw0llT9NLgQk_1qq+`Rayu ztE1165BZW0)~Z1~0zLYKXrJHeu^6AMc86bWovWRMOFz&aEq3haBgMwxc=}OY(>kLp zrqOPp+J7%^?WYyx3g%3~;Bj1LzS3m8^%An=k{dADSYwTOmNTU5>2c~*5X5O=AS#|!Qpxh~p zS?Jh@+6WHjhZEc9!ZU$?TH1d6*OtRCnK;znn$su+dif!{!PT(I-9_@1uipY&5|{NFFZ3(& z4NxGs7B8=Pr@UP{aN{RU?|O!CNUp-2c51Iug16@mep&oZt$;T_2)eZ-)4g@ZWlDck zB5Ta^eE_Vu@JF?ut2~l3nVc?S=xGD0=T_h)W~@}j@W~xqcoiNaYo-4CwR8e*4tZd#;nW!@3{~ercZsZ?rhy zk643yZ|NIg)t*nh($FO=71|)RsA4Wj&Oq)ewo7O?@QOobwm^A zKm+@%x&g~4=}$~*xWMn1GPz_Q%&*iD^GZ1^SA!MHwLoH@1VwBgH}_%+Ywvt&ghIn?m(y=LZL6Y4awC*HPx1SY*O ztXXI8W6PYn<6-sTEy7K8U9R`l<7SWzO%8d$kt)iJj)+fA)20O`6>E=C%je>B4lG2! zr>~ZD=$AMG$ExF>xzK3ze4P`}G^mJp;y<)gqZcBi zP&|f%*jT|t5tRS|`o_R!b{!^CJ#sT{Y30;g-x?`D>;@o`8@-?Mrg)ll>(B2dvL&}( z+eZ9d+_&`HlBP-70g-Q{dLaFb6n6QFL+ATH~G@N}BEiYoa#r5u|?VhznVJFGDp z;wq21Dis_c;Ile+KWEk<@NnUy^WGmc&acDH?RwIUjQIoNx;1h(evaP%NhRE35@RDL znR=Fy@(kANoS^$W6DQK<`}A<;q<4d36Sn>J);TZL5ST0JN~r>=Q#yuu_&3qab?X{m zE=SwV-OsqLj>dhYDzXf6KTn`43O=`MDv=_jXRWA$xNb04W$oxCbB)Su1#hG|yWQ}N zkJ5CJ!l? zFX_@d=Gn);)x!`zPn>CckDIWb`SSTu{5i}v2@kF#^a?Y1?*D9`7bL9P#Sm0;(u6nJ z(@$}n+e~eK0*_fOyNtr~Z?cJ^a!m-oAvJFODY(ib}s+*}^W0G4kA`j1%ow>Xf@2aRx zc)hPKU*=bvHDMEE_cW`MCtTcz^`G2=iuhcJk#-GwuSjiy6MP+p<9=Y33Grpn2{YZ} zScGicAZ@zU-8Enbmlk{9?!`r`4U(et~*gdso&0CJ*tpqkT$xlzPlmf_zB3 zR0>cujUIiR)%9Z1ccX=%8GH5riLtm0J+D#lnRq_HlZZ!#6^+jePM!UzR((o^PB-Yr zC7Wpsg6WBQB`O1+*HTxj{(!J7#EgfEriOXXOZD-@`^iM4L(`#0B~dsLp>(0@!Qs0# z*?zTa)AHheLu=2vaGkhhWxaJ+WP41aiAzuV#!%V4dg|(aE4ERNP4-Qg%=ZTb^%xl? z(yY@*RFl*uA64UNZ{9e$d}Sx120I{Jn^hw!_ONF5D#y_uk%w zQ7;i#iD$gjj8J#lb@U{}hG2ZT0k7_VB1&imOi?6XF3XvZ&?wVtr%N-JD)(7 z-_HnN1D0{t2D{6gJCg%Em7?sC&Fe+rXlFXL!^G+lzHbhCiujDXez8HQPxQoV*L`G# z>k-*+#UmRh|47k*HGY|Ba!OdOt8$Vj6s)b-X~z9X^D@v?-25Gv_42*x`#@{v7ewA^ zHGMn+K6rV&l3be2Db#e1Yg3am!KPoKf_~_`)E0Z=VPE4Ql(xbsci^Xn=_9!}_%%b- z#eoM|U2g&kcA~2n*US3A^qiIB+yS_)HWtJ1zweK`2z#TZGz5f0l9$1TZH$bAXU?yu zi!$7l^Hujz1z$8wInI??zdl6_On#_ReaaPkrJ5eJt#e3J&di-V^lCgf&AwsKqfw)Y zCw(7S0ANN3o8)(y@eGYE%nrK$U2=?Roim+NmXd^(C`YVK-v9qjQQF4rz8C3vYG;aW z@Ji5NJNBg(A^mTz5RlWBUD!vu=5yP5hmrs?NP6dq-I_`VG6#d%H8Hr zds@M^ONte%j>vf2;eiEe=!zfz4X5{WlS7h#F@+5Kd?7d&Jx zPm9J#kQ$KXSsz(uDYan^^^glq^y@!U)6UpI4v=KWFf_uS-t_mL@s>*s0Fh%A2)Q3( z4DLApMgGap!pjWlOlHFd#=e{eNx-w52JfIp_NFm~^{8DHl@DV!!V^elxTmIF-H#3p zu4sVeN`@$gZ6&szOd*?nUEcZEVigSeQ70#hJzCRkdOG?hfF!l}eJ3r3D!yc=v=wmi zP>JO}*3w4@Rrmmu%&kGG$DY$v2@c%ND`66dk#9=HUE|UG07qe=v2z5*KGE6vpgAn6 zYj)`Z-={RxEUOD}>`T49Du6n9c^0b!VP{a(h`?2Ye@x!-@Ju`f-3u_4cJFZc;k|XX zO;MX>A_{O{RLL@valSF`x2f27FsgJHA%YhqT^jD4#1VR(SEN}D$Gp8j;|aycu$XUk zt=G1?KPsIMkS4a;$(R;9F)k#WAtvauyYRsRBnCHN^+6G}n8s#)g zr_}z43~IN@Da}-XsmqVLy(fZ6)d5eXP13hkbuJ|-V3xCPT(cD(lt#d*zR~=0_(njSo)VDQwc_jqQN{s4XlqJj_4%Y`s zRLA9K7SYTf=Nx=_F<&;u7p+e&r2W~0M*7Q6i_X7e%YTWxx20V>7>spm-;Yl#9En{v zK&l_QK;3tYxfd$zBQ@B9HrM;4n4k-i+?TP@b!_iv8duz3)$l0-OyIe&+5}H+hOYxnA zcS=vx>%zuZlD!@r0D?A&1*@486%qP$b{axN;nMVpy+?@q>4%MzCCWmk!P$BcZ$~4M zgGC&|{lensB>i9|<#c4=Y1=_`d)@Wa+T+uWBE5;vf*3w?1H3{7700P0|GVi4v`hSD z_Qi=S>CVINBbg`Q4;!*;8ykCgVyfJ z&U#y)iFc>=&N`UOq;PH3P3OHUU9~3O$z9SF9ZuFSuIiHQ685~KDsoma2zylts&a6Q zN9e7=a(IJ;Kx8sO+uc$?_wGDlPu#F+;e_G0?g|OZq73b_x|A-R)W4i&oB@Aq-LCrTWrbbmm(Sj7CJIRG2wd`B*n1j9s4~w-<+RV zu!A9cb=upQ$w5Zm-!CmNjR!s3mM;17ZLan7nmy&03HbrNGzaVwjWQCN4mtKokgEpP zke6W6=>9yrVcC_Fg=uR^bbe14r}cjA)uRa&A^)b_fa`(>DqO>ai~TX0*bzzJ@#eLA&et5bQ@E44m+M2e^f zwg+$LFhtR`4wI-R&o&f;Y=a*PeiC34mu4=m$6zWIn>7`*br*m}Tr2F36J+73is@lA z+dKN8Y9V`|e%86ThQ|5_{nG@o3bvhfc7?c#11@5S+b=+SO9IGO4viu~zO@?cs8>WA zbCl1=*H%{go{de8r^hXNMsjzMtY$mob$PZ54XR&B2U6Q1b)S2Zwm`prKT)4-4`Q)d zj#WhnFBgqmwRo|w1hmH-bqpm!p+7a`(W9aJ3ID=DjBgpHf`~pp{mQg-)L2#2;4+gO z!f&@^F!bG=V)jPuc5VGVGQ{8p$zLac1Q@OxEy3nxavQszds_lZ(r8Mti1WB3>#_W4 zC>3?O(JqO&+)JrL`1@nrU35QuA1Daj?|eEH8BxtiF3^Wu{-e0eI^lAh3RzoitSx_LWm~#}6tT^&F4kcw_dek8z$p|@ zdS8U>q>J5vkssxV>op(9n$1%REZ`M+VV{F9uZ-dT{Y9x7@;?aR-GsRJ{jl1lGe@H;WTdoiIyN+8C)MFaMi z2)T70es6AU&_0h;C?ckyYXC*!e}?7?yTQd3zp;qcHkyy(4{z1vx9l=`EfO)sI0Wg!sj z`zV(Hz4RfeKdr(J=bx#<&rGBWq>Podw{|8{JV#jEB{HSX*>xJFW=NC;x1ZUED5RX@ zzD!+9eiVmX9;?yP83g%#-e3InH&`ay2llD!j>?;-pi@?qd5FRk`ak|S+}}ie@7en} zs_79zx{Q!LZB`YEg-xSam@!11!^P*z$1UkI*XiGuDd{LFafvf!h}%b1SINKp72YZX zh5xbFx#^$@FY$NM1Y&WoxE2s$;U^PMPVvD~fh?=L(Z;d<8%{0{6^&QIFYYzL)p<6| zVJs62cJjqXUEG$o)!Li8{U#smQJi-;QXV4oN`OH9= zV^SmMlIF~OtIxl};BW4uZ7!}GqYU$gehY#UrwW@q(A@rpc&?0xpLB^?P3SAsbS9oT zEjRr$ay!9*e-(J3=5sgHdIDuahRDQ^TV0r(293uqv&RXJOsX1!)Rm~v|GA3ICYiY# z1S_NLHh45Z#2c3i+ss}WnTnY^>v4j=@A8-`*Ad-$c07ia55FfN2}_EkO|I`jdt-DS^r!zm$^qE2oKvAU;V zvH0n>66gFc+7J*1&}ooMDf<^Ui)*WVa~Bl3uhfGJ zK(8v>9uIxRJTi#SR;Rck3D+-YA$OQLA$dn(qe8WHyUgb?Ed5%GPCBZ12cO)QD=xTK zD__m!GNAHgT=UFS|3@xME72<^^K*dDd#NZGvl;{KnsTLe=j|Q z&fD##CuH8MK9&)#igbS-=4O^!z+eJu(# z4zyRCLD&XNI6Jp*9cI|Zrfqj%jA)K8^r7%~F?GSO4A;2ZGfjL^Gd50%>SI_FMxR2D z)Uq;7E3q{*oedGNXAV{$T9PYUo*jYA^k~#G7<*2xH*|F?mZszAv&S&#SCS_?OL1LS z+m^!Xeuo|sVcfWG-mNO0(^=2&hAf`0<)H&PphyK9lF_c(K*amV=;);WEmqHIhPNhQ zykDOGIP^c?B_NIb8K{X+*C=c8sjbt}Z*YJ0ZXyI!%+6FyQ5SS^ib>$&SbHp*;N*X6 z5DI23d@D`1r8+XuqlJIIP{|`9n-pQ{cPSCl@UXmf#iEYo%^N*@ztMTejw&+9B$6g6 zN}B5q=28HcUk_kslEcgdW%P{SWo`B!w6vkc)>ff@2jV<%kmV zQDFkJ{rxkDcgpZj$&xLXR3=qmBzmFwfqM`TZBacCE#21cfJQvfmGf(sF<#dsfdzAo zikYC)-r=z;fQ(VZb-UTn@vi{;VAy>zJ#pdhh7I@*Ip-E?i7^{wn-IXpE)1R|DO_+vT?$o zUQ>u3O!Hq$aiKl!iDX%7kiU_Cha$%W&}T@7y))-=9RE%I2lV8*QUtDY-^PyPhyBt= z?0&t&R_>N~)o@#B4HtaEl_s`kO_P%@dfq$C{=U=mNth~0y1c>T&jk;vB~u<))4=XR z*lnVjePq&+>NO^F)e%}lKu@J}s#BB>?GY)(O(I!;RH>@m=&`lOKGN~a-AYbci4`&Q z3|JEqmnxa7a0a9-lH`6TU~mY$mZ6JpcZbl?2}R;UtdO0_O`kf-0X@p*f0v1Km9ZHT z{qhoX5cz1o`@&v`5M%e|oELp9PxVUl0dlb@`AxA`BA1JvJcYymtc&X&twF*61d{9> zUJT4)o&5hyyP`5O(F3Wn0hR|;1`x1K)VZBZ@&MSO?Vd~|u4XT;I34~hPA0W}1;QT3R=4hpIoa2Ncw=O40Ay}QZ$CPZKDjtJT@0YI`^TImH2IMi z?$r^R>eIN8NXzGp>|Ig%vF*)2kcuE)c_Rr;1L2pcF-(vOCscF9qhX9c9D@6J$aJE>1NG`w5>QF^)E*0xypm{u99u5XAmAajni|I#SD_ z3QQW@2mZdU4!{ccuMcP^`FTx}EfK?k=eqArDJ?*n@d&WiM`(uCRk{bS4JIkYeOA{@ z#U*d`M6Su&^}XIHO-^f6Th71mw>1|s3I&W_bV#R6(lHH;MoH{{q}j1O-wZrs=-hyu@wW>UPw*$bu;&k)C*wUiSDdZxuQW-qAr^%7qjRoIkk|s{T53g)MS^M_NdS(*QUm-aoiql2~*U zw`U!2lZ>52C0R3B`P`tB=O@qsT$&;h6d&!^GY~Zyzyp~I^=1TwCT+RY;-9mW+mlM8 z^WG<}XGIxwySm#J@8WcH!#2M!jFw>qUg$3o0ykki)d1f#MUkn`Gnrc*(KkF0wycM~ z>-WXx$WK_^V5G8-HoTnAe{?1;QGaI-$8)Tu`Dze+b3v5IupB8Ab$fFU5W_XBuEf0P zOg$UQGV${GnOZBGJ8}?fAC32K;KxrUwr`z{c}}<6W&%o`)e!jRcqlG}$X!vhGr}kN z{1Er(>*pZ^K*AN|=5bP;nVQfpxY#iKFev_vBet`wI1EAzcC`Das)F^IcsF6I@-{{_ zUWQ-%AP3Ykt>*dnBx3m+PmX1G2lrA%@6G?a5(EEsvlTziQGLrByDwGoj*JbC;J7(? zBLmMSW!ROlz_fPKjA{W*dw+};sW#Ok}TP7^+!?c)!W zjF_lM9MS`V4uazYFP2V^wYHw?@!h41Z)7ye!G%-@CY$Ji1V56U+nrF+Txz5%(f2$t z@)pa`t7xv2PLG(J%1?E!5)Y{ISeKo7dQZ+O=rCeG)5v(y=9{X*Bk4RMMv%!9R@-hk|NP24R9DANX_R^_?=yBUIL0}oK&h|x`rb(<10X8qkDPD) zB{h?I5+f=|xSjkgmA$4t;eVP^N}-J6qb=}eQA;O@!fRBG{2LJ*F2t}A=$|(9ffgNo)|Tz-P&{31bHK*;@S+ zF>0XM6;lr%2qeCD3M}&PI!M=|Cx>Xn%+S85`u}Y_`871+c z*EfoO;2Q45tp7{pkO9zNB%g1>Y&&rZMJ&mW=zXHO8b1~Fue&dZ6)>L>53iczIpmUu zzy>TgtfN$undOmtWPBc(>b}27=!KY`wpoWnE*~8HRmp-*NZp6{1FC8s?YZ5Rc}o4p z@n}kN(aj7dAm70*e^pn2tY;L>3iqx>_jBvG*B7CVW$o~ez5*!f*XiU+lx*oP!^WwU6w+B1`xLy4zOn2~G2o~1G(rx> zu6QbR?=EV;qx<3*+s^U&I0a4+dcZ1}llEhhwe<(*_~+QKV4)B54<8Yu{+=t}>&i2s z;Ty5Bz6>A?GWOZME54NDUs4H(j}u_eB{0waRyh$7T9#1gqPkv9dCol87bH`K56FTV zS6?tkM&QY4t)gZ+~8pEKP&MSG$ASkt8;1<(k-8R10JEg1LVdbbbk8;U0&?D`RZN z>L`Am^ZxDG?!jU|4@z9WXF@GQ_%`2u>2CISZJ+0P-#_3xznn477<=dd?pW8l za<4V#TtfxFhR)TaMcxZPHks~KAGnQvWdF7#7`-nvU~7Vr;>o$#z0CWxvvM@T38B+{ zzVsVFaC)udoO0dAK)Btt&g;w_ON25jbpwE=uk{qEp$vxCx}DI_G#;^XhQ8+lD@bXUzRrT}~bv zbB21?rYUTOO--JyFi15*srzW;+A$V{1A4{%*RZrDsZV?uWj4qQ-FH6)uhb}(b?uA2X+I>O2pI+6Jj^?YLU`}4P&zt<}|3a9Jw4dY|HzP;36O3N6Nv4iYr8tc^js z^VP8fB_PDA=rOEEwUv!X2mo}E(=S)I~!a2JyF9= zsO)>6%_lq=de53l*TM^}KbQWtaj$JnGG(slrRG|&e;Tp{^ z$upqP{IL)RJohPAQWh-@b8rK3#LF0xf*YZ!sX~zLu8&J?q{INAnMbg0{^ZhoAEx^# zJuv7mUq+{0bGKS}q5;ytro^)#m=HGhDy13A(jC8~X@kVfO?`edF2HfP*i7`%Wwq@l znfM<>AlLSyk6d}93YI#rHZWGhIUU0T)T@7Zf}{~4oUUos<4KEyO=4{=9i;h9f789a zmnqGY?GBUS{17fmC?``y!qYPaM>eP|)`8*Pk31t(OLveH{zz=^HZR`S2LJ=ZLOAdF z=lKdCNuvLr5r;^o<2Z4`2HkvdHU>W-UyEU;nKwQ&3qS_BB7F(k_HH1R+QXNy` zn;cJHeF2iwUm)WrMg(LOXA0#1f4qvj(8~`NZ;0BfEch;40rl58mj`+#zJLG)%df8t z%&w3!nncNc`Ccm+6xj;nT`)F5;9q#w}2TKiaygkL0$YU9#5Ec z>qXNs^q&}ucejz!3&=gtQ+XR3;q7P5Z}aN2sN7`C_P2V5S7GHD zInSe6ClfF7)?s~px=A4t|FHMUh%O{}SSXsAr>u#`WuChvSjr$aA{OewTk=0ch=fD` zuJBR@0g?zi!lR7lX4@Mk=0b~8(dr9n>b?Zc491XgcE^N&;0s{F>~531!tYH_e7vpe zm(qdvS7i3;cu^AcPSM;@fSdqMNWZ7*uk_Sja)79*J_emeDVG{)` z><6;;50B5p;H9$x*DSkFO>I9ol~`n`gW&}ZO#Lnco&#c7n)ZiZHSyv+BN0a=KSX>; z|Kj~>l5mU4*1#$105rw=c3hsjklNrGkBU?(z>A0*qe?{@@g@t55rE+jCKYxA1bgWM z`>5|VfNDTCZQZQ&9L}OcGRmTh==x}}tDS|I3xlPy%_&4Wt(yJ*}Dy&vqPIun%x&nNyf7o+-4;tFXt zGxD}h*mWNVnEzy;WTjb&lSu2!JF>^rZRjSm~fR#qM9*3Q$CE(D_>T zvo;JK2dMs^e-*%_`~#+7MW@eP9$1@O!X&2tuk4fo&}q_~KNP6^nrS!An8CW9FmV?7 z5k@MS24SQ<7_VOJ8{o=g?mkd>HiIbC>C`i6O_RKg5L z4?uV-aLGZw>GN4S|4!XPVBs$HE??FRiMGTtEG?|X(u3p1`b}vM=N}$`urd5gR0Q&v zDFDpv8o+R%nfo&85`%$SStM#OM603Ue(cfg2LQh~{y@iUw$a3V6n20AiGb4G>f7>} z*OFbc#VigL9)H`j{6&t!&fbnH2_(cYxcsbufaI3*T;C7;r?C@)G3guMzTKhosTQp| zxe5uLmqGP@&8SxS?=cwZ16z~bpgV~I6seyk4&5w&#gFO)XRr@7jqq`LtP(D_vYhan zCTe?Z^H`>T6OqH=TE@<39^Lf4T7LX=GU#Av%F}zp%fOscYuIS z3eE#?kYNvloL6d}URa@KY6Evpm?D#4v7rkW>b6|f!T~WafgV)+I2mPWv92d!8z4Z* z6IU%nG2rCf=>6dV>Ck22zLdX`BKRweb=Dd12SF<#;B=y=&aKo5@V>4}o3EPw9?=zr z`((Tq!+)F@o015eSf9~E9<8mj7e(I1;jR>1aJgCw8q=kdJ2q_Vd-H%pq1vCrhQ*DX znlG?-c-{L$Qim9M9Kdbvf<`%|EtqGY%!)D0JOB1E_s?f(NEfc*x28NQ{D}lr{Wg`YYdU zp0Q8Km&+lFN#H9Z@zneDuFI>md>mmg4GRS%26jE8;H(pU`yzHmfKK!AQ)2e*OLa)vUr%=uz*68h~b>A6rt)C1h! zX%ZqQ@(H1av5~)FEd!cooPywwxD1n-4{oIf7k#%I1t%TGf;Tz;X*6Atng8nL1CP!@ zNuvk7+twI_ip&a@cONOc^|faINA$yfEZ}Zppynvnm-vSa`i#yOoR$2OuG(?~9>5d- z=o0CzS*g&%WZH&z;jUY1!--jhtf?FD?{u!k>%(NOOv-*P&S(e=-Nuj4}%(9y~!Ut7|L$ zCcE}`gy~pcY_BM}qQL#q@WH0SPr3P~ejYk9h-na~twIM_Bx~oJkNWgJrw1(ZqAWBr zPF}vgKG}d@VyCrj4|^l(QyZQ|SF{^lizlJ?1?U;M!iMMEkLJ^rqe~#{Q`Aj zNGEY^SJrvTAFQDZe6JpnPQyD`yLeuzbNensSQ%F>Pxvz&>NTELkdr6U(m^z5Ui+Ie zD9!!RtF{YUPgekatgPtzLHYY|RdPmJOp^bx9`W_KM4%5 z#2NqXg}?o3yBwU(EQTS5X={hi_c)gC$Y5hE^v@;fE^H0z z>@|gjUpo;^%<&csi_L32iJ_9-llvH@SCi1vFHt_`pK)mjr?u%O{Xi{!yOuP zbY?RM)rhfD#BY`L>;v%{+u*Dac%^$tujBp{1T)&aDe>X7QiMpt$)WJs!-3D zSy22Mmrk~N37KUWmckV7LWYQ>=AeX0&C5^vpT8`z+UTmO*@t_k$+wlN3-{K#d!4vZ zVaktgv0<^llH}+MlFqZsAcGHGSpD1{3)!XqKkMC|-Wyd^?346!A3A=DsQ$f#;KVQe zX0)db@#SbhvKPhnqSz?jNi;>IJT&Ns#YW##jozbx>Wc*8@~V zdjDO4&G@F#Y0Em^I65=<^Hu3Y^wyEECDBQ|(??Wu7n-g{&Twf4>HJF&V}JhVb`iw~ z5%sbTa6;$*Xg&N}*ikT#Pn53S+kAaMdwBN?`M-;__%E@<7Tk=T_MP4-hufBQIC4Sl z67}n)0T|rUBORLznLTLid*_%I;@t?eqv97Dm}38y`)PYr zyH4F4$_02+VM7GQe%Vy@S;J??qBj_UcA{i&{HPJ=N2*3F`H6UH_0uR)Xgzvi74_*wcmZhKiW z6u^XM%zT5`+TL3E`h~(+^1Vfl+kA$Cki=jZh3}e>n1yHKq&nW^f7@S?Z{zwKhs1YI ziuu+~={-%p#t#@;4-rQH0-ZQ)b#w2oKoEUXMM_z4o$AP67l+(GW%!}M{k`C{*M&bK zQ?F{aQV83O^RBva#a77Je|^UI{74_hV)VN(oKBM5o4Ou+U!Hw2G1_DqCxAc2+dPA4 zFkheV+KL|D!}(QV?j>avLi$obh9^p&$Ac47WEKp} zT{WCC+)Gk3B01s(&6Xa@L!SdUSWO=H)1;MHjyjb!WQI z+fG|WGYOBoqI_sv5k6Qm^o-4fZ~{QLCXc`=^ORT9fVxSae)vrmI*qZ)_S{@AWby%| z72Fo{e%!ril0q&-RguC$*LY$VG88ed`w9k;35saWdTL3EM?88bW|GUx#>?9bA5PG9 zQt`=SEGYSP<_2OJ-0;D=lLvipFg|e%Y^$>Vg@1?M?6_S5SY};%R6mwoYFF0iI#C8*%1TuQT1Jj@>2^`=VItPU2a;Ew3wM*{9OQ9JpVoJ_ z0)k1N5jPYct2khvQlIXO-!Z8JjVfY~)lfX4V6$3Qkyk4QT}bC2wc9v9&&W}+-!FB< zzdNd~6H;dVL%iP}!_q`JfW$AkV&H2)U-hWsq$@Kw&Dd?-<+^W_S^bK<@X=yMq2vPIeY&r(b9o)_W((6zF#ql}8#EgioX_b9zjdUlQw6tL6gw69=s)l;k znP>K$?falYBFiPz6nbtaGYKboTei2 z4PQu0kV_Zj+_~4-v5?~HKuIE3+q|JU{oMhZ#MXB*I_xu%SyX zryI9LrCq{uh;R-s!Mue$vmNCb6(Oe06ini{Q&~mnNqpc%Vn;WudCgi&w7VP9M{ z>xq^&$T&i$owc?_e_6+=yVUah8EfIs2Kiqtu#z>`l^0euk1}IVI?MWtxhIdEMx!sp zL~g#1Ptd(lA|ds@$S}_rA|21orO12W&P!g%!A7l^mvoMw(}8>zq2 ziy*XNOx_gM``qj=ydATt42j!gxZA1%wqEmB(^3vh3_kx(Oxb=r=3L#YH;wb5>VSls zQ2(z2RgA_c0P;%Qu!u^D0bb%9BClc-p+4z;|FB-`)P*ZTSk)-4bJfqQiQ zmye=(2r4(cX1aC7cp9>OW$p!81>FiQY`#wy<@fHSbo8d@y})on9V0c zA@p!2Z>XMrf=YX@&T~x`kXCN*>eUdYJ)IX9?dOE+TjSHYBBR*&)Ma~TH#&53J%eqc z@++t+w_;rv8?#Pwtc($MQ2rP51+tBE_6j@k+l(sl>l6WY`C%A3%RLi-p zKuS8BPUmldHuW#0{j6o~B@t|3X>9hNn;7_`wY3S$8Hq_ zLo?=oL03Hk@fCu?DEJKH-*PZfZgSeR_UbkB5!jdUtkYU@4z#z&hYTU<$z*qoV~h5) z5=L`@bZzgcG8(MiA};e-q&N9HKlC2*2}6%u(84amLYB)?Y&M-nQ7Ady|L52W?^N=6 zhxBs;R%-pVj_2bDIwtAkyUy=g6BvgOOAPh75oxq!rje1Kl3LhVQwx>bsc{EuG+M^t zqew$}&Gv{!#(;7z1^7$a=68MF(lh`#`wFU6-Jx6!@>5pmYF+l;eX6uK>%AWPxWg`4!=Shi@lGVd@Znei?!8Q z6gdNx24|gf^%X!`S-lBJONU5}sABS2N=xyL6B(Rd(Qlq`Es`IA6oN~Q0QIjeCy%Mv zgqDY|Qy86kIaSa8UDwZsp1#q)0Z=tbD@cbo;hOl-W-!r1ZTmBmJELCCV$GC<(aON& zZ>BP${m7=xKd{P{Z`5jjBv6t*@B3Uaj?#Cg@ElmTi~NR$23qDP8<_i)zbh1HtEo@O$S^qU$)LivXdoR(RiV<6WHAjavS`*OHO=H)VjhM#d zHO-woUKf|oZO#zGUNN`+2S{z+dncRFGVNBwKqBTbpS8d{q^JR~a{z{#du~F8r~V1w zN7Lgd7}hprk%?3AD-5C%h;I<=7wb71dP#*QNMU&%zL2rEm$+tdmeR;UIY1u0qZ?CG z#KK2i8(O%UExDRL`aY8hMHnHo#9VyE5+IMva_vtMCb(~n#TA#^_ z(Sh_Yxwu9$Z4>m^hAzpL(#C*|KUyO$%NUi%_@wSDvUpqrW3p-z{a}2g@$->BUr6=q zB=lo<4Yw?@Ay)`ObS%Q!Owf`u^hyJs;)#x~LIrzpZSLZZA14K7-URHe^_d$f;hK6f zZwe!?uN8>8ZLim))0?yf9p01`<3eN_ly24r!kU(=7k~#6>oW^Jd`Pau@^8~7SJO5Q zu6~UxK67yT{V_0B#9Z%0z>QsM$W8-SeDZ*mZ+Mma=i2k4RKk1)Eq!>I=!0Yn8PPm! zJ1U|2!%^!Zf5wpeFE+eLYgeM;7wS)Wn9IOPEO;=+?RMZL)`ksO_+SX!Jw?obh_CtFtWccaIIJRzIqQkFKZ+IcW2%GS0 z?zDFY%5+`vn%Owd4*AuljAhvQ*OpD(inWH!HDZC++mw|h3{YZ%)Rc^aYhUNgiym*} z4S>!TPHKQbs$H0V3P)EyxgT|QF_}n%zA9V8QLpvo*{4sC(5O8RoXsLfS>k1ttEDz% z=*Smf6-!XBul(>W>)ts@QHs=y@orJ%2f!i!a?rr#3y!AkqVI~I)XFb`bJieVhg7{z zA`3n&kXFI>;=V_vLJ1~KwrYc_e@miYh{yUK>=bz83#kMHVDP_#ZeSU4l32~f|ExOy zGqKS;_Qgw~73*%;HIypSMDkgs42w$R@qRf5J)$PRug?R@(I8N(d~4g^9WBaO-Xdg#;L2-V;41`OqT*dtXchMk%zr z%8WdPTW!q~JV2RM#t3mc=&UCH3op3Okg?vy+~17Y->xQK8x^*uw(~4Aw{F?N?ugSi zh8g9}?i=xF?qab}ZM>*+sd>=y^M9`y0+Mch3yT;92tUyVhHKk1l$&KsS{}@E+pe|a zn6_fD1jR?Oi1HaMq>TfM_{_7$Yne0?4sO4_HZsO0(*a=8EVVeM0Si_Ll!{|Kp@f0Y ztms@Q@vWBFVy&r~9vIm1MWp_B+R}W_v<_#{&;4sh&%K20C3gP}pO8xaUynXPZ3Ue5PrG^@9Q(2LP zbpQ0^?Xmk&L1P7ck(qtzLgm#}35pQFBt zy$=LQ7srKGGM1(2DKa{m`sh3RVQ}3`4LN)(##Ja>Y{PAZeV>JkH0P} zVNs4L2cxs-u(+ddQ6vdZsQJ!!#nUx7PAQK`1O$E~x}Wltn#v7=iL#TBbSTyOMg_L| zP7a}u4yEfG#ahbz_(dU-ggz!c7aducTVrHI-pmuzyy;S^ZTLIKc%W-e#DFa$ANWRpAFM)P=@ZoDf(mjf}f|g%r

x-uwu~xRzf0_fAlLMX|oxC*h;5>EiZ_(aak6-Gj?& z8c~hf>43O;)3$+ZzPadO44Sr}dJKh>OKoEfz}ubIKVck@N|7LM~%zB#3HpGbeUcdiSs$DuA2A=ncod zG(LowQ5IJ*FSk;}#dckCnmzruBZEVRuz}c6f`j@e5jOO9uEaH7u&p$oKlCS=Rf^8C z2kl7Uk!#?)xF9ffMX`Mo@=k)#S`=p_6Ar$;vd7|{uknLj|8pU2Pn~&R?F^D;-?O}0 z>YfDKUy;7Vh|13U^Z2KdS(;c2p!u0~%y9-ONu9+gtC@sbd6FrWgKW z+?df1-7KFUcuNfSCrlZ8{DA=qX$oKkNC)0h+MKV<--^IqQ9^~C_{dXodPQN2gNkwb z`*d+C@u4ZBJ%IEL1V>1IUc;E_Phi$dF?2&>FDiLqGEBGOdLWre?#S@3VS?+v_n}d6 zMqH^g2j3>uW%>H@T|}8+OHwsA(wolv=JLAgfT_4EK>`m7lD-ZsHP@X&-&$(F97MdJ zxC7n7BZ1q0j38J^2y1Zc20bNmTcxZwaZb|6s7arlGia1T`Od?_KuE_LVu^|i14 zLVqW`PzgZ9G08j)3`S$%A?$`dJ+0-{`n8`6>L6_QQ)VS%v(YA;c!;#03fQoHVz}h#8kR+`MXw|Ji z2vCwB(bp7cFVb}Odp#1+v2m)En*m7~)?@ME^=mJR(uM zP`sgEch%~Jwqwp0i2i_58lH|rako9oS&i*j=PZ1X%V=}E4TM8;B)$#*MkFnv|E3Y1p2Yi?JWP_T$ z#WynGRe(ll*X10UWY3UCh{f02L z1^o|VcpvZFdokJ2Y(9c*Z9~xD#T1M3uQrGOIG_Y55VV+cp@Q%8x>l3d6jV3qC4`{C*sZJ;;DZ1u&l0zc7SA*U`XG6$xRxZ$h6>RUVs zeG-$NYwHmO#@P8dG?X?7`jkel=37}b_(k$l6Mf&BSES^XbK2GfT||(u?03P=BwTFr zJfck|3v}m2zb*}Wb~;M_{&_D&{P(Y?m-M18O&{RUK50rCJbmW~dn(#E)UfMHD21hzNhPVQel-jHDsNg}2Vp(L+Kk7B&n{klEpDwlhtR`)_vGL%UsLnJ(s znzI)=-Q002k*yx-tl#Ds2Fk(yuz*pD+_dR$2#nrL?5cA#(I!@_Vb?>mg=`X~Zr2HIs#JuNxE z?z7#W0PJfs&xq$e#_I3~J6W&QwQo4lNmXOec9K67r!UXFCMbwd`p3SP#qvVo%2yEk zqN-)}n*fGdw)^&DYDl#4tPT|66?XKm)io!vzBk7!Vp25vr@DF{^RF*!P; z?u`F5O!=Rk<@m1A93ALu`P=% z?QQ!>yhSmso6^fWrdQUuci?v#72_Qq(`wRCjL^s5Soh9S?Kn7=Ev{4(Gm_p8tyHI; zZ@$)b-Q3uUeSbGBCuyAAbt%p8g&c)%dUI; z23H9WONeFgl%fMV9M)BYnI?ecsBRc7Ga!D9(Mi8QJhzu7{ zOi1gzbjPg{8*@n}9qWrQxwtJz+3^)~wel-5#D-nz&IZME5i=ZN5uG$kTSC(vadGR( z9pM~8my@1?5gk^oubbhHChlphMD`Vnkel`7RdH6hLO*gkR65z|zB2S{=7Uwa&=~Z` z>C2_j!_*Jg=Wis!`|j`FHgISeLKlQxJt}( zZk^g@8??~(WQxvt01<^g)bAoA>Q)wB)8%=>JLtw8OG5vxtPt-@D+=ZX>+E*NQp~Qe zzi&g?Q@Jtrse-J#93vHWbVB7YF9{^Smw@?gFRlL=^1KY}^aM3RqEgth-b|LL4?=E> zs}GoeC{XDcoKBhXQ5*N#vr{?Y12@$%mJiA?ZHd|ctZK6iT|ea7<+oM5J~`DY4m|t1 zsu67&?R7xjHXmgbf%Bjc1v{ZdBiv@_-y{B2l1teP0d5FR()#Q% zjHu$hLUy*e0gm;=B)?2UyLfed3p+xM?OfwOF7`#jfk3MyU`S)*wHiyD@#xCuesIfuO8u@MO8-<-3%_x8|R ziWHUrV}Xxg=O#WEI-$N0%6su;nrDdwb{3}>76DnLME<7PC+9YDu4OL7p-l0B+_vHT zj3m`XH@{xObdrQH$z#Ve+F)?R3Z%Fmw>kA*vwrfRFM=H?;cqiGyKg9TiBo}~x2zaA zV8QoF?^ktTZRk36a}`UORz+RmhT;sIWAqneaf`N)x7zZsvOkPNbC9~Atj$Br0prka z-}MNZvX*_p%q%G7xzO&!eMH5EJgDyg{6;97CxndjT+%#SN|ZH>?_0(3yZU05gfaF+ zcERQjn3{MZ{hUM$kVvf^!WGZMYk8mRVCU-t^n$-C8Jgcg| zjLt;l|BdFSN*jFzf@Fa5pt{ipjnN4>d7|g(u2vLMWpi)>1Q@CIyv_3gFi)Fs`|Oy5 zHqhgve^zP__5RN9GGEL4Yq}-NB4yBL?{P{S-Vj*2&rg=#P+K6Q&|gf5f`x8Yoy+J1_{Ix{J!%H|E_A>W8bJsyr?O}2v?k4eQp{j*lNYF-_m z+EQ8Zw;|GVFJ6v8Zsbo3Nb*spw|akZCVw4)B^yMV*Q{0itIY*bNJRDF)_BMfG~_ko zi0A`|H||~tVaOI`9>PY2X(!h#eD!F0rCP#QxTh7AHnTUa4=|V zVl6pJ7IA)g1%*x?CrNZN2gJmi6;&Y8K~sDjk8n1TLB+uizYD_Nz8nJXyaQoRqo2%Mh|l!%sV}4;pPtT> zD;&bQ=Rc-aOYqrve)NY~s)A(lG_w!mW%1kRA77TTsn!1d%lPtTOUBDV`BvXjO{GzN zY)AOgs`o79!bVSAKz89SdYvYjidhRb8ywpHJV95{hzC`Q)m|SYD|7}#TElf<_MwnM zZ_1_}jEa~Yrw^TkEuJm8=CRthZS7ut$OX%eG{g%#*<#8s=dGSFJ+^=WC793E_G0mF2|LnZgG zvenDTJaYOlItr(hB2@x@e5HtPH>AU(qG;F(MYF}2T-?YTLi7Kvf&b4e;h#AfT8eKF zO&1kUD)UH07sG=SxU>x>MuWir%TFvx%$ontU(Fd8-YUVa`e7*+mcX?N{>t!W#{M6z zgqjJaF+`a5>tngT6i6Avast{VpOGD7Gu8R8BRz{J&f!fm!XF_uZ2Fx#U|HD&BB1goto7&NuKeRx9Gkl;6G*-5MXn4#pW*ob& zSM2~XgO*=B0@!9FwQZ!6AZ0S``hU17xHo;?_vfDhv>6eC2icDOa4@Ph6W?3f(F#OG zt>*YFzNcC_r|PlDY(QYRG_=$Bw~}_FJSCrY_K7!36f@t+`(M&a9dezO{d7F25m5G9 zab5I|G5`#i9jWA{bF=Lj?Q+ZGW)!M6CZIlXg?6encdRW?R|5fy5UyIE(wVim+yAcszB=n|Ttl`*f&C{)Tr}T+w0YB6R#4UUL>VjkKsvwS z8^x4cyv3I7z{`b3`#i37SuFKK+rwV`hhQV;2OMoXReKhYhUK~b@5x6#PlI4&#Y5h!wi4d zeT2F7sn-aGzgKVBW?^}8l0@YV5E}MWmW#=UiR7i@jG}MX11-J_n6j$KijQst(3Q`c z1DLNl{4J-*RmXR`n{|?|K&^D_b4K7VbDpB{g!6~;>(AGVLfUQ#Bh0XoL65U!-?j4l za^;u&mTWp!i|P64@d$rI&bHRKuH5{4_c|f&9qBdzzzx7d-GRFjcvmnCFc>%Gla!MT zuN|UO)N`~)#TaBuHv8e3gv?&sV;@APE>VX0gNuet^iL5vDMDr zDu`@`)%(8+zKRWf4P6)opFkRsBdq?9Dz~;f^y(X^#d#nC_iUEjMRlFUn?_0b4S4Jtl^fQ(}SheNT(h;K9=}(_Q;F3I4>yqi>321(s~WS>YGQ$&bL4Ut~zHpAoHR`AS-Gyj$J#FSp(XT~l z^=-bCw)6ZLf(29ueglO_5HsB*5~9$ydB}GKSd{H`MLmIK(LlV@OMr{lS*8%*!gDLI z5B>~T48PnUhgH^uZC7&uDRKalWvOM3!V;At?##!*8zhhaq`8=|5y3H^Rj}5V=BxTj+Mm>`*PJ{>IUNkaDXJPr$4L`h^Nf} zs5;RfA|A~3hfkYq*Gz)mImy(ay~0Sy0oRQ#jjM#{m+QL)Ex9OGF(i=eoJtz&o?T>KWlZ;w11h(GmhtFrBa3r#H>ty0QySrw>Y64%%M|Wo3Hl zjosGba((Gna9$`W$u5Ar;B!5hegq6G(#HIh#vYGp6iY5IEcTG|jOwIb1Y-Y#(V2Ck z-AXR_iO9d0aGkONI-~;L(fNvso;$zo*0C^J>r~UmV=!EYy5R{0efPZcxz<_V+m&n% zV5}evuFzqeVesLu5C-?TnnQ2x)+=ltYB-grbNsJlYn%Zih#^g}j}UFl63Mz6rUIv? zC@K|$OcIv&B*n)~Wuf;KF|O(7A5jw-QHMbyzU8Y}`{BHp!NjziWSn%f%Py?L3W>W| z3QQ;RWT#ru0gM5uH#oYfqY1aR|KQd|`Qy832KKY`T6H};#gF+0$C2+POyqQ zS>Jf@7TJ&pYIoEA%uznp7P|XR@s36;38&?)Gj>qGCIDPv4DyLTpXs2s z-{o8JcOffC)ByoKmwe0UNQi;abxZ%K*4K#>bS4kX$p79@Q0Y&bO~@8RbeH>1e}N|A zBqa~MPkxibR9ByykqPdflVQ$KD|B%3sH*$>ZSjdEK*!U`E{0M%^o_FOakJNifO=N{ z@tGQdwiR(k?Yc`lH$+OCjo>eC>teRH)c*)(1&yeyG<~!LmQw-KVKXzi9Bj;*P0s(l z7l^UW03mFTRWqj*gA|^e@HM4#m}Xpy*Z;i$RuLXOa9SUOc(Irg@(-~ zSUdY8)Jn-QT~7krYkq0DcTu&3Sl@iUmP@`m zy#9J!F%9r0Co3=kN1=4o5 sWVK|C-Im@mYHGELXsfFOnqbqG|Ls!12~y#)*#7J` zq;t-EL($4QEdhfq|N6%mfj8%NuB-U9(zK8|SNj;#y;k5_nSAVb9IYip!HqUS*9=6B zV2sFya>@dds`So$MIdgJ+nbw`^|u%%n1{5)dknq}irG3$0uWd!tbm-_Y|k#NJl&(Z zmgMW044%p_AsVwV?)w3vBH{}qN1=dCZlSOK=F=%U^ z)^bJ*xw@h|tW@*gOQl=Du_t;Q4TJmcwDFf(M2i5ZnMuCJu{jG^v)UNi-ZZzqwBolB zU9K$^HSvE=9OA-fwC5OCe zwjRJG_+uQjku~d8V_FI>!ZRWbt5;_&&s$>fapvGrJnsoATfCqqi3URDsBwQu~_nWT(dRqH$c6N*RDY#( zR&#V-5A!6ibuI&CV9i8HM2o6=89 z<-co6h4;Dut#QsR&_Cv?h^v;`%f+jNGBa~`#P%U~ei^ZwD`)9SaO-N0Wkp6T_0B_c zx?Idjs=OA+=?=0=r2gC|!N%%UVUOH|BzYa5WI!3vnszT=-}N@siwWm;tob?!uMFS} zcr5WE>*`d7+V?wz$;|RDKE0Qu5+in$4)A=nd>CU#+P8{W?;>VXcY<(#?^pel2gBzb z^+7QdF~{wBP-+Ol zQVeEnt{S|zD*jJX8woD#Wz?T9hfd$WkEgsTQ60=C#(UM_&R38Y$kO5Y73n^hm)PQv zTiL~F!^@f$VpDJHlk&yWQ#gG*$W7~Y1sS!eZDs2F$pV`VFV|5}6Om=@QKB@?+@K-_ z3a=2kMU5`cGiqH|JH)^Y(8FlI0$M!~Xe?5;P3<1ycC7uf;@fw}Xs~4_QX?8FIX~TkcsFkc5@r)O; z3ta03>_sn8%6n&IA80M+;eq|M_-wujJfdy`PtZ=j7k4;GyPz;(_!GY?VOQ`gIH-7b2}fW`(QDF(`PCF|Hs~2 zMpe~)@xLNSmxLgVASFr)NF4#`k}i=hC8hHy4T2!uEhQiw(%sTXHzM5~=dR83JpbRg zVm+ey`z8y_?O^W1aBP>DjPmj1|57ols&| zJ(bF^(j2z%Hm9bl)@mcE1br-vR~D6btjSQE$@QA9Lj~`hocb8`mZG2BEv1Ow{e*B| zm`0*6NoQ*gvp+OZo$?UZ#iCgsv;2Y4_B@LAcC>Z7d8Tfd@}&%pXd3ng%O@q7S$^zw z$3|nh@8hoZR>nV+hak2_(3xapPjHJzR0F=1ZYyVf_LTIOsjQOf-O%soJ?<6lu^#d*p&I-%BLl}{v4;PZ&JQZ~(mJZK$Ij&B+e_H%xdXfgSCJeq`D;?1aqH2v$ji#eLz#DA!FsSu zr4fGRXS6V3O~<>TedqM&`&F5_{OHG~ zg0w9@lpbi*nK-|JK*}o|4ND&@AbRj_7s!u%w}mC;__2tY>o)+{L(U;JS$v|vrloy} z2PCGt)kp#=-y~nkKYB|>-w}ye91&TZuzw}=0#x7U0s2;1R5uWz(z4n6!` zQyq$Bo&?4PGK`u7=?LCiD<2>sz%!n-3dgcumh|=5@|Ga7CHIj3Mxag>d~`xa7@cAsf8b1S@UPW#-C`^t*x9hq;(22?y@kt=+Eyzr9x&%A&# z!XGLg-Hp&`#OHapj}mQP{aOivNG=M(NLM#j(qq-1Zdu`Z+LRSR=h7i?lpO)dpME%_ zK$TfxAq^wdR)HyUu$N9qh%|;KR2#jOaIhLk*N7R=My6wWko|j*mal!i*4}wB6yDTI zUHHrsr~|efo438YOv{$tgzd|@)N>GdFtL>jboL{xY67|yA|QX62d6(V1Q#bciCBRw zRGgb&L{6U`M~a(4DQ&PR3YGB$4ODV}HtNpU_V}i@_-EM!&WX z(T{=jUp=(7{r8I)Q@obDc_iX5evfjqQe&lRKSME_Hq}0CVy(WG4)o64>Ao$=FZpvl zM=1R3KWB#@>#$rOC0!?{lTb;RFYhgou@Np8m{M^GtlV(>#_^EF#Ygr0XME9FFv*WoRq_u4ZGpNh|hKRYpcxrid46?&a(7u?3GUc z6?F$E$fwgfcb~AGkUIjYg;;UUAV%NxYR-7GAVRkWZgW!p(AE(};?trSiASl=#_=!b zO!@p1xitQBsh}mOEVN!9Z3@evRe^|S$D25I{lE|)c}LvYMg6z zEjN~0fw^2=(I@%u1xCyB$t&*-GTb^#6WJ~3PBhIK%6QKY_c3|H7V@!r=s|Mf_oIMTz! zM35V(v{y|r?~Rdj;7FE%PnLNLOo8G*4TIlJjQ?}F7}z{|jKI+}&h2W>%d-LI`U|(r zI-u|(%*48cd_w;bARQ`B(SZDTKC6&Lo_NQ^?%Yd_N}ot;Bh`NbRDit$RY|$)}IOt&8>lbbpi;EAsLU zIKjzh9>^{tr=3D~r`))*DpcIjpXd+|goNsns6vk~UersYI7z;qme+^l>@Nf^FMn5I^Wi&zfkY5Zsn%*WmMJtM0+^Ky!5R>fb+}47u?)- zdJW0PuC^oQLnh$VbmPR18`*kX8iPt42Wy?cbr|c==`L??Xn}v>^rrVqY*61vs+q za$znDO)8n)nziZ!U9VF;oSmvz&#TR(&xYBfI09!JJ@RBBpziCrCAZW@kF^{+IPwjG`aNC%QfcqVt%^YfyN?u88P)=!SnX=T>++ z)4QZS`42Xv0E!i^;m#gnm$l5RP>ski@b>)q<1lT%6wqPle(c#2sc;4*w^(H9`@aW^_8N*W? zFn+gG0wWH`xmlsAoP(w&YJp+Jz{e6~4B=f_F+zNAmkZzSGnK&0!D$B8D zP*l0G`(c0WZSk6-sj9n`+yd!nw^7#YHW1*MjHXzt4hqrzLMKIh#(Cadx;ZlGMlqH| zR!ld2aS_Ttw0Ty2&V`jj?tk5p4ik7nW}5qCvVF?uC;dj06UPIU)EQGBBN5kPk!UD= zn}<|(it)Q|xHpHNt7Jr87Hs0h0Tnq#Z*|8y==go|5EP4qO;($V&&Vsf8r-k9t2FUQ zLp{RANZjz)>h=d(Pv7i)k!>uwC=~fo=w6nSD~(0T>~avqh;J<{5rB2D3jq@$tW5uX z37)#$lg=%7`yHVtF2r;`wm-_Mgq81R-#^I0=`h^j?zFOHH5rF*e0g#DD)UbW4p3Fm zG;VuC^e65INa&VMZ6FYpF@5vtL4=p8^UO_O=*dSM3Bwa5bDJoO+>}qw3J^aDq7Mq zCeET@abfY!Rz|S2>_%SM_bddZhQRkdP_1{p*u?qScCAUgYU^^^i;hQEj7P@Y)Q z9z#2V?^~{&ZCZZe2^b(B5Y2fyWk)F9HN|Qx_~bM(uCyDMz9+=Jk_@fzXTDK5D#0>r z4rI+?`oECQFyVYx;Zx1DA#E6qFXBq&BHytehbBgZ9B}>?t~8*NbH8T9=s>NtWRKtz zsA;p}rKY$H2h*_?|MwFnZPud&bl!Wj&z+R+2`Ov9&Xy)fhL$N`d9*$d(zD2}E}|2k zBA{m6;k8aawS3_H{Gh16z3Qr6FRf(QW{bac_eWYV%hEs_41 z@En-!z{p~x)Vd|vxn}*7vj-#L+>3XTL(}yX_K@6_^&$kLxg29(DN>r%-yrX;6UOJz z1=pmxhv9S(@S;pk@H_DpYNfgu%85$(>1Y426Y`XH``jn*(Tyre%FD{z%^REWQmK;r zk889z z`&m%PkR~>ZN*7dN)@S=yKG|rO$1$x8T28G)-lIH@_u|iA!}Nm{AsJKj8jvSpVwRY^ z@!`tue4d2C*KA8D8qAP%PN3*d;{AMaDxF*<#P z^igQX7SOxFBd1B4tP*ZHZem@k7B(kZa$A(n;KtrZ(IpF*^e;@jA`-U3K1p0{x`GLD zCx=Qzctceh9M;AW-6LN^sR43eq%So2C3CkgtXII&7X5H~BHlWZ+XaJ`u!Cr2&FZzu z8!?6+k+v7AN2&%)`MODDJ*AHW$=xE21Mz7c>E0Q?glpOtFJ0HB)M4RZa- z@sWXL9ZtSE?xP)xBs)HpYmqs@%A_#T7D7iAU!$-OZNA+1A{)oPBC)bfLXZRnRad0t zl$4SiuHYA)o;-Y_Nrla~WL-X*{7+xp)FU*a(P^L4{i=XuEQqMFgIcRf8LDwKldkCzSgSc^EMsG zUF_~aw0)VN@g0rp@}}gndh5xWF?45)>;l}w0Q0QkzR+_1i1@t@ zRIw1Qhw&FvZQ@=Ot_jlbWgA<5+mwI#WoowdP(YlIR$iR^VbehbC4|o~U%f}k?Ik3s zf6Yi`qQL^Ci5i?A$m0|*tWo({F{lH0$tpbw~1u`A7 z>0?Xwwks!NZKrL2iLUaG61e>DwhYmGWu43wMI$n!N!Vj;c}&OQ5*)%tE4sb(UMR`B z)l^O#Z4;;BzP@NKS2wi~Xs3J=e_CpL5) zjQYPTs_;lKOdDP^zQ|7JJIYdVZ&~N<3++z@po~6!a^6LJ+N|`Z_zNM=4p&^9b}~vE z*v$J?8`yXHlE=1BJX{o;>(kS#6cd|6!!6oWU;F!4mej5IKo;WVigaV<2jCfBEc8>h zr)|7S++%V4LwNWC$n$Pae8ZVrcGrDIZ0EK1^k!0~%j%|3Td9t6PpM45+Et?b@qg~} zyOnP`{gkMjoQ8HJr2EikQ3W7B(jnxB&fAa7)M8>{S_tXRI^Or&xnVD6(^oGU52y1G z|2!k5V$>VS_H&zdl)jVJ>|aqR?okx!(#P=Wc)u_^3!QXg_Xv#^;{4nc7WaYOWj=iM z@qfqJg~`4%znKkIsybxp>I2%N#v%Q!NQN#Yh=B;)bVHl~G4j6ZiDvas@ov z99fLxm~`$g8@JNZGu+Iuw|q=Y*o?x6f4mou^bj%$YRDw2SMOENPjIh-oNw$tf!)@h zIK8y)Q;InY$?|oz4A{yfk z5v?X$o|ss}TQW-&iEn@X<(-?dn@BvPAGUu>&Z(U&9}1iV9685S8HzIWMTMG(c7;3= z@=RA@$GuzcX^Y)qf$eYb)wsH{4c=$`nGyFoc0J7MB?WC39z!(x{JMcFGYT}GQm$Y9 z?{C7+<8`)nV!LDY73;X(F9*!AG5Yq=7%1uu)??s~|K;FA<->URNG4)=ilEoFj=>{2 z2#Y+!H{3w(+X{ZKtleOWFFo4o*c!d%@ou^JlmBaZl(4nv- zmq%B6>DP9ajKJ%a<$xO38529u&*p^iKTb7_!-_$eEyD)`srwcb(c_i0bx!K+)oSf$ zfs|2GkCUzWs;I4@u;$KUW03oUea(3&>*pFH=w;dJObT876K6!N-bf4tT5Gtrf|5sTS!%2g9%!{PVuJDx>mF^UNX2alHI&a!W_J7Bc;&nkTp=Rg4N_MPTDD8>I<4&E)o zK~X74oa~)1zd*kyf}}lDp2k3W^TI#i@D!;Qi;6ibU=R90P3{=z@}uO8eNB; zIGHYCM#L$^)vNE_y)uPrW*vL_7U#fugUfE8E+>`k#q^AHB1B*&D5mrQR|FR?6 z!LqrM+eJS0U@QnUCZ^~V=Y@~+hb+8;WVlaWtKfmsX*anW+zVF^8ykpm*(<7in-LUi zjmUV)m8|ktttucM18RW5!3j@dr~7X0bqSROmBlJ;(BMZmL??ADrw{6gT>YjrbX#*s z-G=+ioTy00AWLD>8nIiy(ta}sB;Y(Lrbr1DMsv8npg{T<92B#5xB)n~^R=Mp7u+fD zW(AZ~OYy5eL%IB#I+W*~UaH+NkW(~y8r#C@V>H>S@EH8HF$%U~)5JLxVpD%~)S_F! zBY5UR^(8<;3wkHe%J6L;f-H2d%u2qt>$*gFmN%%80<3=BRJU9^Z-5AS_2r`EPF|9JhQyt2`4owUC%-9XXg zNp3<8_#H83Z=dK|@xQ+dVCFKT8CViBd8(*E>ne`At2u4_oB)t|2ryCt0R>06VnMxd zuiaKKeY&Ck>-^gcP*4u#4Tz}X@^~GfK`0td?(cbYD9|vM0JU-Q^YZ+!Qg-~aT7+B0 z-S3G&$6q~8>gphp>hsPgo{+{M+@R4d^h@KI|CkBEUrYts>9w`zlB0^Iu00z2xGf)1 zK&8`h9bHOw@PgG@X%>ovxt%)m$Vqgllb9@ba?wZA8Q&0^vTtgrZXnk2z}Jr!)&+#o zvQ{U%zmvzrXM0?LDsOs92E~*XCK{mug||Rkh&UkP zP{V!dG{=m2Tzw}~HJ+kSmvk3F^R?dO>$O|I{V_ptGs~7B3E+1Vx3nPxRRzy{(nu6) z1d}CGyu-9gE)MiX& zvGr3$9uc6cI=ZU^vN=y27%(9$k1?Q1ux97|#?(``r~`tB1XgG~yO;Z2l@`9ID01Q^ zOY=@odShtTokk30Imyc_+3G1nv7fRrs1bQ-@tsd4I`y+FvNm#->uT47D8Xyfx1~AU z96jZ$J2`L3rmUv`&?cbHguz0~Upt{HEQ4FOnEg*jDvs3`$$RS% zw9{qj7I^q*nXjxVY@hgttKwc$9hJA_NZJaYjI!iN>HNe|K(BXi3Pm1y2gXhR@9;ex zSmB>$?z_@G-g+EFpP+PiDG#!4M*pomHHEL0DAp7#_P5~LrR+(-(10Y7pBbLH4wo+Q zM!fQ-PF2soLNtIts*U0g<>1n9^9w{s&Z9Hj5ZD4awV7$dXral*_T3lcGh(rzQ+TR4 z$l?6)8^r3z0epkVO17~%?}Y|?C`8jBBi5dieG;C<+NOqGF`qM*U57Ql*70v<7#U!RuBKH8WK|DSGqGhFOyg9 z1PPLnwglqS!#1`(;RL+6a)9k)`H}Gh1o3@7{|(6MJ}3`P$JQDUz*glB;#4Vr`iVS% z_MO+vEqbW%>F4(vK+8H|cbEm-kxZJ?jb@kq_5G`tW07-U#tWO7#iEw|M@2{U0gUC# zc*~M2h`WYsca<^)XHBnC-rc}dBwM_*ofo5^R_V(nU#c4k*s!&i@iTHKPwAcRXe*e00C~mC{i_$Z7e0J&>aEyMD}=J(v^4k zyu36`4Oxfb4d=DQ6el*$aznVKLcIA?TyFYu!B}zWUQt!L?)hgw zTBNI>JT#$MWq|zA_ECBNN&zgB@iM^<`1l`@fn0;mf;BQy)q%G()5pu{N4|$;IokXt z6T_6spQX`0p7`2N-Ev8`7Y3mSUcZeN3P42Vk@l2gH-CWZ__7?s9#0T-m13J62Ao6m zB}@H-(Pr=>R|R}Es+?7dt~-=)rn=4>#=fT6ozov&`Hd&*!CB}pz@4I1L1QqicDVGa zur(pxd*a;WT&)B^+r8f_7OUueC{LnfGg);mq6HQN-az;xU}*QX-}bTqL(@;Bw{xuk z>CI1`aG6XHRNL(ddClAofp~0umWjxFdg&xRc+Zoh%>UExr5T0Of9}~34Z+`4u+-Hp z{NA|tU@QdAR%b9KeB=?-?0-F2YPrtKxBm>?cDacg!b6>(MN|G2qiB-RbjQYY#$b#z$PLF%%2SJz~^oq_qOS zlimfSDVBZM<4J;$@Oh?+n30u2R!$_iw(A7qe8A0%6yV!VCZ%m#4!TcrSZWHP~- z-@HD+FKh*IqnN*c&5|eZ?McyDM4pmtu3&U=Rg?DnL9g?ZT9f2lF{JsntJx7B2o(D( z{<&-TY5J@cjjLiw*Hn-h=J?AF_!)0+Rg%+V#utJ|*x#8F51P4R}*3Y|x&5)F<=U1mv$rzrAHS zHA^Vlf!!%wN-q6H^$p=b{=IiozK3n+|9yL*qj0svLWS4_+88Ozluc=pzJp0h1CXk( z1#i$50NLnX#Y_1_+Y~-M_YFUYX_catm!+!of4@4!=3?)!Q%}oQGU3O7no%5~OaX1! z`tiU~00$lE2`O#C{0F%fs__cmP#+7jh;EN_nx?@JE#_5=LE0_C>i_NXa_RAfLDnGR z`u}VK#}gCOmYS)NXZLQ8Q^KouAnSYd?sF>^cNWio$HOmGoLS)rmc=w71+lb1V*qBY z?z8n=4rtE^v@*YkAbkH2w}!c%L`Ok#&g=h-i6uQTMDhtTAy8NXxr-z2hlTTbn?1_J zxEV4!J7D9iS82Sl!b|elmUm1nePt#aYzTJy1L$Q;znSO@nO;#gkIQHd%i<+ABAI>9NzH&acB ztD|V%QLA8e;#=NXzs?g?6LUbO3iu*2Sjj9Kt$M~%YC=@}qtbIIzXfb-nl<=an(mjs zM?KWDUOyZ<1KEyw)_Udn`>?Tz;b`s!5j7l_3usbdfy+MRI&lR%nE&CX4SJS(MM;~QIC&)rl0nwe6Rr5k_V-G zQfelEET=jt^#Np129I7^!wX6wIM^p_BV;vhzV8soFW-{hVqXn?&0MicHNW`V^w76L zHXXG|u%~DB$Y9c&Z}z;7#?5%A6Z{deQieS=8Ii$(6S(#RIBW^?Js<`yaC4{!rb@Nq zCWlPN34Y>c4%0#f7hi`<4FT2glzO0%uBrx~6ns>c#pAQf-JM+PEGfE!f zA{7F>R`gS(6z=L?6JEaWm7pi=f&}L>z4qcV$dPmA(+`-J@w{$bU$CHqEw6#vnfWr$ zEE+Nr3vjY&*y|X`y;pz@>d7+v6r;-lmI~Ur$0~ha67Sl8CV+ucS{z|vajAMUUCDF| zqa6hY8Pmt$!$V1xkEP^E5R44x!X#TDVl6mIN0`00?zK}*bIs&PfiIwkExbn??tW(P zdk22`NUZ|uX})dsBM}{qa)w13=)nKU|JB%hNi?c?eEm`+6PL-9GE0w_U|eM|S3IRQ z$jr&#Ve)?&|D(k>xsBL)7{JS#!(tznU7uxeV}LVYbom2sd9@dBzy-4wXxP7o3AOhC zYXg&h3qW)F>pc4R{0DZodyL=854B4|pLPw!dj4keUB=c+FWz<sUl}#&f?q-{~Sf6jjn*ew< zPOG5Px1V5~;t~bbcH$Nhx3hA+zEo>#^c(ZdSBN-g0aEzq!AayrCpd6NS;&H(`ZGY& zqOY6}GJJ~?@D9<_%-%5k{3?UBSM^jhyYJ}MPaa7?HWcuE$g$DM$rkOHDkm=3! zPPmVb+kfV?eb$+8b~~7$M~H#FJm>j+@XCO`OCae?kg#cM@}|;N4GrqLzV{6C#y~eMMRv43{{_BGN6E5 z?NTVqgECF zI=U|<`=_QGv;U0nGkg4=jI#OH&ovZbN5hgBq3mQNg%`y@faPfpBm9MnX{KR1LHI?0k&%r)R|KK5k_BhdREn)?YP*i0f z;YkPwNrm106KL)45P z641@Q0P-B}$_T(1s~_4$PwnZ$QP)6HDb+u}7VR*kXpdw{QuBO0Ln6%Ky_|?SLi;b6 z?h6!xlSoM(EeKG4!C`Xh`(H@KR9+&b{w$_D6#u2<^WnjZk#^_Fe*!0*4~||fW6Xk_ zlolBu_3+OSvvGaVg>Q%Z*cS@nbaE@7WM;gcrRSRku`;i0y%+JN25ZoPq-lhGWgfiM zt&V8y9lC_wxM-S!gaN!MW4U|a*T$@c{mqy}+I=jV<0?Oa>t9s$u6J&JnJ;dAjLK~T zn&1G+1TTv7^HN=rW_`YL|Q8rLc*fC82*YaQGLc zM{8v96vZs@?iB$hr$5t*6+0-{t7(3Xrh?&CQ{lS*#yvOh)(jcouwcOkaN8Nh4ff$X z(NsF7a|so@B2cJ9EM-{atN=A->HR8g9^mnn>Y0Y9mkZQx8<>J=#g9NA(Vy~l{*d+2 z=r2+BVrM}?ToArDVL1EJzEw5|^c{XuY1%EJuI-KYgU)+U?PFy_MQ0*?J_iHq3y!GVFXmKh{27> zRnU}Zn^7K|Fw)=ckD)%R3UK>wbJZscW+QWfaup)=W_xV$@&|wL#*KG$ru0zh>4R!& zAn){_zuAM4%ch#UqwID^crOKRsQ z-b=#j1ka>Og5Yf+x#)f%0x9>%578Rjd!S4zZ3xI7^a21ZXnQ>X2{Ldsp0)1TGXQ!< zI4#gmDylzd*wJUL*D)|?<)WHN3b8`v61c#166X8aQt_15m~ z;^Aw1E*mfqH|D?gT$XVT*FF9BF9{q2OQ$AO$?)_Z-|&$(HHnNKLXXIyKD8^*7G}Wa zTM0RZW#TyBg84f6!mN053D!b}_P96o#@t48LT`1zxl5OSIX=_$>PA)jYm4#Fv<|VJ z^10NEMDk_(fMwbIGli#Mc!&Toku%~fwOF{?%(X%nuuTB!54B$HHmOAEF$F(|X`+R- z%M$amUsf|CZ_4@|i=%achC^riV_(vUGiv>-9wm^ur2WJTBD^4954c1Clvkzu94Z|c zMq`P1yq}Dg><^Zsc1xQzjjP+axdrWT{|<>6O-YCk!*&->bu-bcLf7xn8>>rgz?*JS zeI1;D&nSdL3t1d=j$&Qn$MR(g%TY~0 zMdKjNa&}TJvV`6WH$N?5HOEn8}DjdHd z@d|Z4BjDsRernScRnvs3qVzkM8W3p4L=pB0f}oQUXI+f1BY*EK?|87aO2;Mcg^1jxTgvWxs}u0gRm2b@0XG2K#$p>|(J}RY z;nbBpB+}C5*S%Tykh1=3T#@kfIF~!Y&a{15*br}OB;vj+*pk)3~ zd>9xK0Ut%(H8b+5Vd{~VY9-voo5&r0nedq(9{FcS%x3F7ascIaC{yF^>Q~A*0iT ze!6kq?X%1F7arWYgW#{#WP)0SB#sB)linm6@xBhLHAw61=W9`JJe`F9ra|~3+bvLl z^x3>L2SnL~Zdhy$<>;0QKP57mx#k<5-y?ULskDMM1PMvZhj5bZ9GIrjJ+a zVZyhaco$xy*6RVw3-=WqAAADHXfR=(hwj)@(fu(B` zs;m!qH2m+H9X0>@dOFb#9GQn0V6D?^Xug6IubF4cwo`mBP2nBM1so|13s$uY>epl8 ztg3;3NNQn_F+(UzG%BVy*9)J`Cfk*vk3RL?n>Dj4CdlWbqM&v=VlVIGoAGh^G+V@i zV_%16Rm_>+;N(jrKjuo>kk*%O)pQIyXiE>l;C=%dCJmMqJJnfq2%#!MDycfPLZFm} zR#1>mY4>Za4W@onK7ZFAOJh=&r71@3n2A7{J8}trWekZeL-aff*VAPQ> zE1P=&P%4XdLFMt8agc5S(Q*>*ls+w#(b?)z?7=5Ju=wW1e4^Vh>JzjzD<5BOe=x)U zAhYZvhY?9S4((xJbR0kzH{jN#%{@@>0L5169G^Az5m~Z1pXsb3up*;QWwR<-Z@Zt- za&9lBl;oxmM9L?J=c%H%mY5ZAlS7K|!SLw0sFP8K2A{*0n}?5sz2JdZ^Ohk?h|lCF z)J)wz@rk{@39*2b_EP`cECZE6U6lX_{0LuuuvYeXW~W=-yQJ3HmqjoWg82tv&VL^K zdFqPhAn@2p$>%M7JI>1TT%Wdt;~A4+-K#gYT>&`QVuP=(h6l>AiA=P1qumG^%Iub? zGoxLT3+iRrUlRAYEpEFA0GfZZj$ut%-*_TV09IC+YeQWcDf=t$i3+m?xsR)q8|!is zqi|GzPM`t5ZuE77-rT3)jd$qW)z>p*2c!TdSU;9ZJ97nyVID+6GErq*mXqL+j}X>0j0UInvY5 z$O~|wtDr2?I8Ezsv*XmJ$9EJbndVBzr!B7I8cmMAII-q4s?`*@71;?7pL%VE@L{CB z-F!;aZ`K^ze<1dq{u!u9oO#3@quwc8y>DO%Z&|>2Xg=}5 zjjvt9rKRfA<^ns%#&P3Hb6>=y{m~BpCAI3A+{N4HH#sqiy!Sxdh8JM5?57YEy5&k0 zzQ8;%mSG|Vgrubg6qDUGzYfqk>v`ENS{!1|mb{ck>y_k8KT7~;@lH+l%;-+N6iJo} zi`Ncez)vt~8z|R`98GI|&R^5rUPQq-AdO!Lw7^Ri_W)XhQM#a(mbsIeS&flc$V|v!~NP}hxHGUx?8&C0U zTibTi%6SK194)&yd~Y7nBg3!eb#D+?t%$$7H-ji&W5Z9cuRgoX$42#lCzVA>dj2?~ z^!b;p>GmP_HMr9biKPpA;nF+X>tj9gWSgic)*IB$$a%j`tnD{_^#S2>0N086eIV=G zO9Wy2#6`%xwIt(O{K%qZp1%iI^XCHawdVlX_%%=1sCpK?(WhhFi~_Md?sF4;pEe6S zgpSG3{1Xr?xZ6^KOZID?udsxgwByX3druBOn*Nm3grhQNK-e|ZanLl8k^@I^W?@f9 ziQsBJFoIjNVqQc#ocwN7c7{wK)%?KXE{zM&@w2aX}}0R z{=vtldi6^fPMH$kW%DMznOagEPIq#0T)iJqLD?gfof>$dNDGjvX@~t+o5X;tDs{2K zSt-2ijS*^@EJvC*5Qqq}a8cJy{i|uv4RKgeL-xh=33feiGr5+`fTYal~w$q(yiHsH&?4*AabV&J| zi>Wgz@d0@fJ(f$4-&N+Zel4$urOyX7da+gR&j@ChC5$YS@D!q^wUe6>=hQGohJ{n< zCryj{b-{YoKfKrpo+e9Q;iL$jZK7S40;`t^%0;XIc&hl-!1UbBNsJbDZyO&=!Wn~- zXEw}dXD~+#pj0_<2EJP1ue>)%|sP6*d!#PAmS2in$I?pXBgkOMKQxL-OAu z0gByGRM!2{U9U0t*dHD&@N+4R?$Q(*es-bx@;u*=e2k8iUmg1)2V${~8!gn{>B%5+ zl0ReRSATM^AC$7l(-vYc@u{7;+Ac&AY+K7*X}e1n!|z%4qB3@SGxNgFd~rHI3I2(Z zWvwNKA?@cHS9e9as8U@YtoEGZ90;s9HfC_`-9zNSj|OhwJQQjV2@uP4#PnIc+hfim z=M)<0+lA)NbMFJ@7EhNe(LU6{lvun1!wx>(*(^IF$FU5XuYm?6Fb_ea=zsnJ+bc!V zb!_Kw;@2r#y53o9)ph3&#P7|j8+jPZRUw5AkxuG9e<9M6NYO>e{o#me;Y{wClKzPc zIbTG#ZmM0NVCCmEc)2};?+Oy3+=M<=)me<~3JK3Iv*P4?y$wx# zXxl%0JaT;b^4^FlDahN49=vLb&gvE)7r*P?mwfa3a$@_b?O&zo&}p=BoY|0tvwm0- zfDvL%#Na4I#N_(N`KM}>6hy1Q*bFC02A95xl0I+i5o$QQtD$7_@`>V&jc$#(9UWg} zguyfuVwH2L_w{7UYeT}2X-O)0CdbN?La-kaJlcT8Z*axra(?8Sho9dC|IZ#;SI7vg zmf7UWp84Yb_`SJ1&RHr@n{{hP$*d%=!Yi=1Vl}_f0NmZ>FXF(&Kk+)RGusDnz5)S( zt5(>}`y7pDJr8MqIj<-Y|IC#M=V29PiL}-(DY%}Kv>&W@zW)7vzSw|<_dETiN=d8o zL6$TrxF~taI{ab>zVCR*)?ISX5$4;r55HAp@9mK;#*y}SJSYkTP>z54Kmbu9X{6w#i0|1)7FFB0GNrrLt* zcG?4IzNx2PPk__8p^tBrgxocwnrBDXj~sK8J9g;^Z$wG2(y82-&BI5VT!*~H(UEzQr<&iL5_p@`lc z%sg0YS&Y8KLW^w4CRk}t>7d%)Igi^bMobtmDDq^@civZFt~IRlL)4-3{s7lT9pR)A z_L&fi=d$n|m2-hD^>rHNcxcio;=0k3nmj2>)l!Qx7N%RAtY@l=ps99MVv$`Wyn9By zCQTE_BA`uSyDaG?a0~MlZU__86p&E$ppQGBLcMqOw(PTF)cidI{*|B{js4zb8;{Y` ztUivaNgCR3?AbSjv7LZBtEV{RHGMg7vxWmx~zBY!t*12#7gVkn%KIO zh^gTM&T-@v71NibRMfjhE`(lPCkZoe1zB^P2H|IwjhFI=HpQS!vpE#G5$|*nr{Xxs z4G$UnKjIR;lo3g7|{7MSEZhe%6fGLF+Bo0SC+gOyXCd= zN%iZ)D9`g(?cXhD4l|vtXGV^DDZPG7wlfy}hMQBrZD$K>wEKFhF`H9|{OA3Rf&1_q zf>C%I8huTUfk0JVXcW>kjabrVBiME>nrV&Q8*t<7W%(EB zy?*Sp8nrdLh+QwAd~z%b^~*gBUI|0AuQqZcGETs9A8aSHLn4>3|ic_X0_{4msY{J(uV_J?}<9|#KumTXWg zEH5H+v0rA=_<=JZ2gW9>D|M=V2yMD%6{k0R7mmMn!A1)S*De1gLLX9BK+9d7r$=8^Vo+i#e@WF6@GX+cRbJ!W8V8K zgl79jxfvhdiL(62T)H_1Gpv@fve|2;40(xZwg)Gsq@VgqBlfECrf}Oea=Vz?F2lC1 zfv}k;h?3iyp%M*yp$dQ4->A!7Ivi{GW1^CBxhHzy`~tV&YTSp`2=V}gJJ=uPTzhCw znqFWZqXPHetRJ=o+mQ&^o?M_Wd38p;j~5l?b{zp$A-(au6`-Z%bI-2#8>_D*SaoXjCy#kKLwN1P$I9qv zT!Tqjkl3eC?vuE@Z0}X`k+lVs376N;gQ2Agqa{*W zr;`^!&URI&;$7$*HL0m$5dXk45 zu%Hbt!~!+qkk|5Ba<3P)hvMcl9C!vIzK@kW|MbC!wRg`_nYiIxmBL^UOgt%;PPu`J zvVy_tVhjmTi7Ks$c;xee=sr$@3PuzJ+NOH&`ek6|H+o((_Ok2>An2OQj( zTlc-FO{K{tb~!-md{2T!XL?3O8x$vwFdANKv`J0`p4A-$AxF{+t|J&d6p#3*_+zvN zNdm->d_n5$Paacrb(RJF{Udxu9xGKV=AX?L6a#cTn(uqeT@v2kq~Q=m1Zy3`(z;~y zRIc!tenO6asVESvFooAz6^}#WD7110APf>OG7w|O!?<7M9r_NX`o)C)Gxk^3K+?E& z+mMc_bTKH=X{1Of53Otq&QAY5@bpjOU---0;7BEZp0b!b4)VmPS9x=uDBnb{WBp;hk*HqkG>nu{lGM7tV`}Hi}qu72$$nmDy zZZhubYwqF;YF6q}5mC{<@D70T{>+ohxe0`3ZX%4Xik&D zt<5coA|jK3gS>P33Fns}usSCFyQJ{kR|t!_1G|DUr8Ywr$Bj#+j<+=p)6YYJypP!_ zc}k|3U5FUfw2n+RXpKV?!_=kH5sW|gtRIM!Otm8&!B7A+oUR9Gh6Ov(~F ziNkJ5^Nno3$f?Ou(*)BJlG)~bU`W5y6phl$R>Je1#*)NDw=F`WTjF@!)}7?C;)%pWrR_Z z1Rs6NRqDpBeDtUK=74itgcWx@Er`H;!suf^3cFz>cPaG)wmV=6#w~2*W^E#1!dwOq zr1d04VojU8L&fj+ohrqIl{E+VHU)=+khls8Qn%3cSf^CnD zjGq_OT?sZHE9EI2KncG)z$K*>C6*Vg13>Ml2uOWmw{8C8W5SYNj;%nRO>&DlPecXK zba6l_=Sb!j81`Q$j;2n$`@_|(WNU@QN;8C8AcCw_E=9qCs6Ft1ko1hA7XWddKnYOj z!98K}pMDG0YdRf#=3$jcpp*^;+;m3%;D4~_t=SM{UFmm{lN@9JmvRF)@zu`kruQjt zSq09;$sf@5f63}#kIW&Y6zBLKkvAvzy#mB_mjvkai~}PTz*%RDNxsU!`TW&XQrQS_ z)`Pxe{DV<1wg6CemC@%plq1q*bXIY1mCj3;!AB&$3SLA0?NViQc^rK!>#ro0LjDI~ zXWmE{`Ldoh`ISfRNAA@G5#X|aHeqZuZ#4v;jgp!kIAmlp{X>nnBm)q4BFBDClm9aPOPT-whyVZU@b}Sko}a=Gda0lk`3<+?%e>f8M@T)^w!X3m zbF5nlO0^f&F|Txk$aVa%28f~V@^N$F#}66Dl4UD21>ee#Pb-e^xc&-lwF!4_&eKCw zoagE176d;rN_l(+`nkPC+zdZ#=MU+lg0Ta)eoI1F+N2B9J%t*C$?ost47qLk9z-7&hn5l|$Q5`+=b3td#;&hVv&DwTE2Y# z!j30I$9zC5J^1dnVEE-c+U70DD|5FCJ;>27=#OU&{oadIXMEvsEpuM*w~0NKQ(7Q% zgt>lsN`cqz^gHo%dzm@D`9=MrmYJ)^8Ztl|T0#ZQ4xJ`Hm12rho2!%?t7g+W)NE1m zU1Cfra6r9W%i~J?C&`&5`2?8OUKShk+8`=w9R9|2gt|m%nwhsu?W|wXIy;5{97fK} zrXoa=g>hbdvZe-{7=Fh88R~MEma1r=1&>DG=OH5@dPw$Y`I-X(Hj^kgcDM`vQ0Eh` zWJ+~`gyNSyrf;&wPiL@cH|Cr8|8t!D`_=wZy`$MBc{={vbHNco*AlojKXU+Q(m&fi zcPd}B_Ode}EA>pOod$q&@+FjjL*!ricWeC%<7{eRns+}9eg)hiKzI8?9p^jVEQ*AH zcPWJ1=#*=QX=kcPTwU!plhWd^6K^sZN1~Zagmmmbrm~jx``m`Rdr)Vo#Iw@h^vVnm_nwjWft%O+SW06&yEhf}b+5Np z|J=NC>rtdL`xT>&0pp()1}Dw6r?Q-l>f7|UwR{#xQ?rDHu&o_g$LWXq8+wz+p;IRn zA=+SBvOm!UYYAL^{X!W_pKc?t^0mU`N&Z)F1uc@VD3i0nvG(`!)B*u2qVPCfpp&tQ z+GevDU79tr6fVWQb8!1*8u6({0Xqj2dJ)uJ$mMDtFli-7oTPy+yDKNKd21T?*Z-?)wZatF@{Agj( zkni*6lK60(3R&)S<4Mb&SFc{Ly-82pcOo{I^CtXO^Zvf$ajMj`5h@}rm#}z$JR!Ae z3was**|;HJtbz?RyAGw9vE+p$^200K6%%~?3d#6fZ6*K1$vV2f`|C-55iS|Gl=Gr7 zAU_aXY`km4jIJJ5jTwD^)mtAHKyD&|%9`bT>F3`>e< zx^*dg{&$VmFJ4U0+_!Hl?1YNNgy7`{*GJ{a)BU!1GmZ007ZeN|^zWx_l$gF zGop|V{WL0?5T(h&-_`Q+le^IA<0p2LFYQ0q50|`5q$gsbNqd8jo5;u&3Aha1i=(OTNSk&?FT>gJx|sQ|IKL;q*bP`dx$6sD!W*p>kYhQ#T7C{! z_y{2YGP|}RZ4Eu3DojufaDL$^Ha{oz4>D)g=N3>@cSIAycwMu!0dhxiwvvmWOab;4 zWOH-q-r;oCn|z<_r!3EZ*{HAGF>BB9z_LH5Mb_hYTKIoWWu^WHxo}yPQO)n=+j1zM zxc)nEMjFk6osatWRqvZ^H}^q_zU~DfBMQ0Eu!XySw#=`0{Z-RP?vH6hbddPLh5@GN zh!*|QFJ3L`8O4%fypUIkapJ#(eVfSUYgt9En`UAP_f_qUq{};o&Hr7l+M4PwFBp*c zyWyQ0HhV9y@OOnz_GwUJ=GjjBk}Y{qxRGC+awU^O4n4N!rCm~ZO{^^(Aih><_G1H1 z)Kl!~$6p7^=)#KUEYCkmC;8n0^DEI=&<;osO6Vo(SeL(gd8B&GEs&XI(1ot>dGag0 zY1H2S?M)o7+@$SoD7C;IeK_WA>&e!p*r zUH3(8gdscQNHICWgE}lz2`wJJANAGJoTsF6pFWg5wTJHafY(t<%*j_{ydE_B147*V z;`X?su3j1Y)?h*X@VZBHm!R(*X?>ZQR6sVSuM~Z1-Mbd|uyJ0&pYGEOifB{E zS1|+C9uwACvDb}3dYKOH`_8EaciQK{oP6_Q9FZRPU?QtTLMH z42**pivRH8A6EN1Tof^fk*FN#NCBwUlxRJPhiX8^jOB-o;)n0(ZyX&CN$hj6@T--I z=$G>qc2)723BP!&&MN>a<#d$Y3XPz)(G3}Z5 z>yl!)e=6(p>OZ?%e+byBOg|##ukMS}d2kGBfX#!;mREKdS3K2NPurLx|qX3F86p&@*11Sw9|{TTiuBR9$<< zvMvp1(Q^oS*tXot3zi$UXBz0r>T%&0zNi29>;D%s|36^@rzmSh_n(zlOlV?V-+M-n z$;ud+`qke%$nvUg*qpjz!a(Et-s9}wW~QO_cMn3mni`G_E>uQE)jw%eapGWy3Mx|k zzn_c?zMjY52G{+C&Bp_FzcwQ=bOJ(t4Wk&;{O1`LSkQ_JL1-f;8W$qfF!uuDujOX8 znQc-}%-B^Ka7414Gg8krZ5%eiz#W=l=1En^ou|S=*Lej*YaP9}T;%UJxMc&TbtZ=E z+m9iPWiZMGX+Q6KAStjrWzyy>NiWyvVd4avOoG!@;$9zmZ}&CVOa^_g$eQDl)y~>b zAjG=$^sp?j;gYxaQZz|A8v8SwYRB zqnPnt=``weno((L zXOg=(nr&N9m>2EC*igRRa=E0#hHb(`-1IhC13TBM*t)zGzVTkbLoV=Z05JsCv78Xw zG1-a=OM(XO6JQ7gDW9vtyDv(KcBCtM(u?$CWvd|*rnyFk&$XyUHX`x$lHN6kJB$Lm zdvgI~((?!R8_crJYw&kEjUH%8=hcWkB6x`f{&e9Q4|gEJ?yyvkJu>>rHAgwq7`kNd zZwL;OFshxSM>NdQA7t#!GefJ|7}-p~%qNI18cERZ)V34z+ygXJ$w)WA+;w?82|mqr zI{sNEUAi4B=W0WwV~}s~h{&UdBq12cUPxT)T2w@n1KXe<7Zqx-@k`_xgyhJWrYD z9EW|FVZ4UREsMsWZx3GBSQzFtg<03@E^Gl#?D@`_yM~fNb;6nh0r~AOqLKs*AGokJ z2Rp3NpN5y5rZq;$zavg}5a8-1s41#kZCTw$Bw?~JMIQ`BhqfOB=TL%c9sdP4z0GK7 z70auUeY=}r2OIy^hO4c_hu)sWN?a)>{w|kO`ndfm3l}O=J^Hjs_IKXN?Xp3_;ZsM? zSDw6whaV037i;u~e^gW!$Rg6XD$UYL1;!_G4yp`7e1-$qm8Yp&J5{BB_#zdO>?RB$ zyi!BLIdpDpkJ$~S10l+RhJDSHCWd{^T$pL^K6j4k(OS?eT_?*Vspe#3ka=<=t}0TK z+Q8j>TDIlk!*tt6On=ck@RTOEKkseU+#r44U7GU#&T!|ph1&ojNv$Hu*tDVJ^VG3-L$95U~(I?;OQ z67lGevB0d7T-ZOW#g>Hn=i}&nCH$Ng{a?0=yqID(yICwY@y!+z);BQuzMttF ztJ_#!6!g+%PCk0jzTAl+HB?D%k^b)IC<9PNb6Km$@eDhTu-58gpQA=i|Etbbu#QiMe7#`+|0Z(Iq)}tzsV<+PGDgfDD|gx< z5&J3qlRDGH5#|&D%Pws6TbYi5_Nhr#9$t9e_&|@}@*5wKox!(njX3mqAs$mTU-zC5 z2Y+GI96vlR$2cbNPJT*0NDyfk0>Dn~oZl&WpZxkp0PHFXndJ^pR4w9GUz6ApJm|;W z2mV$+V8Vza{GI#HHFXOiw4q3a%+QqDaE!jbz+Bne@s|4&U;PZNqy2PQqF0yNpBZB5 zu8(UTCl^uIs~`+qmISUdxBP2wPHRjoqX!df?@sqR@Wt?Y`cPcsr1ht2uN!(mxs2N+RHwoC}O!EKjPac2aF3ET|bH@w6Y+vp<6tehnLZHP(%x26|7h3MCv4+T9 z_&wOqr0wxYjGgW6+u8!3%jBGZa`8!)qJ~6)tSeWqUafIUODa!#LgbNscTc|EgYf;# z&J=R(333HyV$ML1EU)#6Xt2{++H|11-UOkUHWO?T`<6!;Yq#_(y1xJ0(Mi;6k1M?} z=`7jqd8@>%S1((XZ%dbs;Wb2Rl)KRost>=deg_!f2=4fB`y#~|G_rAIk^}wNlA}qX z1=?|6B|ux6cQgv`cI1Ub-OmG zIhhk1+@i`T`(hJ_lzrLo+r@OqTumx!kKb1HDd8C6tjs>=RY!B_n<0R4{4|l&5|E-q z0U^YdYJm)Yw!Ah3k6kTKvYnXJA$@as<(M`4a|5-?VE&{W`-Mtkr{LY&KaWf~a`lb9 zxu16rB0~Bh&$K&}nWb}Zx}c^n)Yi|E?lXD@iOT(;;6eU&0nK^znYa`0jd3#;plVof z|8Qf@H2L-HYPu=yk8>0fH+1_gQ}uBEBCisZeY6`42u84)XT>I=BR5^l#Pn55Vm#{^ z3BKx4LG9D;Dh0BeY_4AQ_Mafi5U3!F7-Zlkf~Y(h#hZdtcJGlSB=KTmio4hCu0Q_s zbld1DZPVc+YNaDqcbE3{4Z|*y-p!HRj6J>?P_>{aO%8?cQhi+zx?e zzxz%fG3o)l1X(?%IL~#{-P3B$Jd*kg8?_na=g$AaVdFcwRO;|T? z=N4vMl-4k9y#@XB&@7g%ac^qj|E0rK?;CAvQH$Ee6UbovyVdky*FNoQqc`!xf_1|6 zU4j%kJT1HqKEfKF&yA6{j#@%GGR&e`=05({3$E5+PnTyG00%RTHWJ!=t#0z6Qe%lP zgd>?U(mN9Y_|X0Ga0eR10j>xJk6g!fDbem^{1utMpBd90K5HB3JV!zyM=@pOOA)#8 z`xuQDM~A?Y|Hftamx+w{_OHHG98k*0wi$?Yh#cqYfaZT;yIX92cWI>^@HG|@OtcTwhfsDQY|C7mD6*0D(g?N`)|f7CBHMYn$|a6$dphtC{YQva%MBe=dMv`#nn| zvH!idE+pV7A2c8dX$zCslS1%k{~lf6%kMuSZgWM}54EpR37c~KHf@toG@2)t6i`G2hkgcdd)gBk3D*DzdL?z8x^*Bv-y z?v8oLwBvMXElmT-)73LFWD?w$4E{jy{a=D}bK`)Wh_|!tkR3d?0Fj~=lg1qX{#?Vp z;>%_VR$IR@*Sd}qZl9pHRyKbIl>z3c(um5vJy{apoM>(=!nusL`jpyS4J~ zt~j!?*tbBWf~Cg4kE86QIZx6#bWXeML|#U@a((>q@wTXPjds#HKEku`^3uU)Y5Ctq zVx*$?+XvynmcyURE+44mw!09FS8M@d!~*eWzz}A~4c-Li=yg>jI-4*H!}U}6DRlYV zf&M_zM?n&^V>@wuTRTB_XDL{lJAeSd{>n8#rlwH=r+ptJe;cLJ`lJg-WxHZXsC``4 zVQ^o>Z^uqhHA&0m1H7usM&~qop+rb=|J0Hfrb^|@+H|>6TyX#Qyd_Z+`?^YzB{cU7 zGHg2uQ-XU)#*)O0i#HB9IzXg8fvuAX>kqX9yOC#`O70loSc-A}&7j0GHI< zmzF7etwzQLcTH51B;2_{#q7Hi={Tyzi~oh1-g6RguB{6 z_UnZ7CbrA^C%@=(TmcgjMj-1Z80jqGO69@Yt?;)XcuqmTQ?*py(=tNA@S^1TE+XKw zU9AZuCX)I6^2B}^chLGk(*i%!m5BaeIbo+e4GlWZ1R3r+8wVq0H+Ad75WEL%9avb< zIP^Cmv z`#?HL8hML5?Y~}Q4;#1~LIqwV5X-U~IOLrNuz^Q$lca;@BoC*0hP#dF4^|fP`k68= zW6am7xTF*t*Dv3cdkP$FWCMDzSx1k3LKu59Un-o^Wh)pYfSq zC z@jX4oDA6*Pkfh*~mMHweALLbLZ(-!~+RUFi8-MV1=}dFQwQ_VzzrA@3PVF+WV18#BmBX;jMq2F(lHGQ zH`oRI`Nf>HIn=zPJjmZvx{TL??M~!*jtb?6kQx=Mii)qToM+x>H91(OU zp}ERfV?XxZoA1LH^OT`qU?Dl~TPZ?|+*@5Qmjw$)^8#6rw`PicAr02mc zQ(sm_9A_F1BGfcyW7yXS3?aE=0sXC(b}ai@L}l?F9j8e5iLd+WN*kY)bKFa&qo+P9 zMlIBC%EbTVJqQi{AOgvL|CcAm*Rm3442i&ms6~jjZnk|)Ur!mQFx#jYwS0FOp8cg32#ukSxN@V@UXVWPGD=q!*vClc;YZAIRCX4 zxNYgjXP1YS)D!Ia(?|9qZYNe`0$rSPuJY~`Q=unl#$b!`dEyDr0TK`&5`hi@0|aO* zC0r;eTpNvfV_bB#=~1ThI>( zx2aF`1X+yDtez!qwm;Yh0hieB4KyRc?rf$Qx%e0BU0lefvG6HRuW^sLeQU&#{5%4P zehO>66NUu2PF1PZ0c?vthv|Ch#ID{7`zm|ma3Ngn>ji)_H$1O$WCqD*{)aKscQm-9 zeuyNJZke2fl+K5pZm8VgxWf%$*MEw(3V1`^aEi%K-0`mZx=S;z=aCOmLDzDSn5^?L zpX)tN%3oN*=4t4D)75S^?Vs7yhJm^DsQB6GL&E9dW2bg&Y#Jb}%PZA7^9h3d~mqpve73f?}0l<)kB z2!|(N0Tfk1ulPD_SCnsXEQ%gJ;Ur?ET@bNvjlx=q%FhcmJ*{?(3$`J3E=CTpaU)?&b|A@ z$^JtHoJ)!vuck*-j4X1?irFgmp{nGz_hZ7OLCYsmfi*|itELOw2Z-@1ZKd2PvSvZ= zTUD3>4$r{qr4bO=_n{ths@!+Q7&Z$l|Bm>f&G-B~1Bf>jq1# z=*gQLpo?G>H6ygwN)QzTD4{(?GO!EF*pQA{xT7uqgL@@pMsa7$Gs>DaMELRJ2U?&d z*QHmr?Bz-(;+*B4b6j@%i}gz1SkYUY#X_9pu2tdFyeW8irZ*ZXA@ipD!_Wn#)(Y;v16K7V427hMM2OQEOJUmQ6OhpB2h< zjoeqd)H~~CH>4ZpfO?W@G+UADqxnde2%SUmvNDE_YzaOOFAym5kfwb)v)66w_2|8= z0-PbN@7Py+T>YoVb3| zzvx-IXzt2^oO`T!=Ssi~T3lsIM&H6j30YS0^`&EFcuBZrsp@pQl!DbS>SUU<_tc5= zQUk21`ykQe`e7SC*h}nMdRgdv$YngTR(4Od+^tSikA6q`;X!7w5=f^n)U$HK^K#m7 z^@zLDjl<)phM-`iJWLm%QKjwanQtF=J}qkMa{EF+c2mj@$Rxt3`Ghc1N+_SejtAhX zD#pns>*2NUiz+g3gGvYHdppd=>t({(Bz zA@~=bTZb zoa;F0h-!ea-{XvX>)F0=b*>NIvY0{Az{~A#>jDmyFw&}=?b+n@B#$flwLC5@ee#AS z9^rjz!G^FcBl2=SRqhLD_#{&aYsI8vbO#l;6w@T;d_*X~DFModoZx zYs40uhc6Lt;o7#|DB<4en7kq3R1QKua(StH?|KozLQ68-qgwg}(L=I2%Fl4YKofj; zeWCM@ar^VJRpp@6286K~h73;mOj^HkZ9^*7mQCIX?d^$uRy}>oMrsp~C__}i+sT)N zFTD>XG%H{ct}u?K2ww5ZhSAStE-h*yW?2M~qp>Ki5toa{TJCpIP>R-D0@dH!NZD77AF>fZhNZq4Z=a%0EfUZuJ<~a; z3mBK&LMf{+Qce6?bY`eM6LSYSAV+ai{>;hw&tp`9PMOXt6$<{j9d3vWax=H=n0QnQ zm4Qz>(*A3cW-b?YfU^=s2WrUM0kk z+>n5IvGFJ8lcvluCg0)ErI_4tPPbsH<+_ET@vC3A&)l)U&y=BNE<|A3?|oyvt_!E8 z2Th59n&iIKX1xr;<(7F=;tv0_1h#wP&*x_EHZJT(+x@PI22@f8pDkgvW=UDD%ug2=Gu7t~A;rkw?|- z-**}NnGy}Ri9gVJznWSEZkdAj*LUyllhx_uKa&yt@p&z*ucDG#-8F!1CMG z*B^Oj+zM4uE(3GdQT-SE%iHChwIqt^p9_gJB@wSvg~olTB05;#f}$e#0A-4{wBAHf z|6@o^%@FEgZ>57y4`pTdYf)C&n1fjGE)ArX_t%v>#+O_J?S7`dk<$4D=`zM<=4UAF z61fXlvgc4QAm6s;Z*(E=p_7MoW3AIhJCIXlFkt;3ssn#zve1`CD`CXr!zeF!^<+>m zXlYazo;j!dO#nyQ!B;Kd2@&-`K_AvAc5{>Af{C;syL%-qEl*U`d+B>oQCiZ^Hdi?l zC?;(^u;V>IF`Z^ei^OR7G6M_?7$%M0b(GfowN3j!hz9;PcPL_W}Hzh+2cF=3C~HA&zk8U9Jd=1122Tv{3N$D^bL z&H3ERr@`+xOxE$>97KdmAw+|cLhR?-R)5}RPEY0Ml@=vQDD_jh4dNxg4}X%rlUUjG zdAWnRR_(iK)j}z--Lb|HX&5=A9m6HdSW)-CD(-5Qdm}!Erehl6zc?Li$`C4qdNJLj zwskTm=?c^hHi3?h&gN)bdh=NiL<5_Wb-4F&9yKkdOGRW6dvN-3Z5U^r@3iCx+@>-W z`V<9UI!6(&;iJ0=a>j18> z*m*2aws^?$I8CA1K^ zoO$*M1x0N%e=st_&m$nDYkp@$YFN;NH9Cyw94PtLg*AG$qYwl4zjfH37E^F*QpP=e zL$yD(gVpVj#*7h@2GNo}LPPC9QPr}#eRSQ=i_du^BPD^FXGp6%_Q3YeZbwz+JC3r# zXWm7xPk%f9M_$vb5?@Td;|iv~S~KD@rHn-E#SpadKni|x0fd(yZ*?F$p4r5dC>nml zFd>#|Gbb)Xr9-BW^aXz)MM*va1w{OU)g$MMM1gt7A0xO~I@~X#Az^=E_^rY)Bfj5Ll>aBQ_J1c^#uS}d+jYZI zVzA%iDskFvF;`!B=~@{-KRhn<)FirpKL0ze{UudO z4mt}X??Q@@X$B!w94*JEvkr(t<~FU-zBRtP^K0vJO4eNIy{F52Wy{nLKaINP!4)68 zF~{r_t%v2#d9Uso&H-ej7=9$3E|ow;fiXKA{x9?SKI*7gbK$Vk1U9gtvyMrB97m|4 zd#H-0~yR)yh9QljL+oxx|x;AH+C(f5A=0qI9kpY?3mMLz255a{a+QOwSc z4;iF^afNble>v%%&nr-J&7|f`VHwci80QKNouPje{-F8hH?aIi1nGKJi`A;=zESP5 z;4BMi=Na|Avnm>MaU0;GG{|}J$uUP9TrzKOT6u;C?vb-D9fb$ZC_Xs9Ph$3Xc-sAp zsz&ki7yoeYpxiK{3oSnI?*5Bsw9Cufd^+XwA%Ok3V19puK}9_-Xt4yO>e}jLmt2-? z3O3}WDy$r_08iGhAsNWA2_cGYZk$T(LZ|^9eA=WQbN}@|o}aqoC9BwCd{_w})|^|I zz~8%8BK2)(>&E+jZ|4r>m`RrR2PSj|fQt=-$%_AcJz6#NtkATk=`}sa z7FwLZdjapc@pYw>iyFqo4Gkdt;(~6=dEENRr7^#bJb9*D(Gs@znb?;?)N2WwR(h8& zl5&~G0Q&Dl-bxMWo9#xParpFECxzW_@BH?A&HO&29}RiLS232XtyNGbji5zIfS2Ef z8QlyXrIJ4?RhWjmdmZ z=aha-_%$)$8HBLM-v^dZKyk0$N2ILQ7v3+%&26BDgaw5uL`Y=rnH{zki%h3#_|XFT z+Q!6OvnZtBIZu^$68#V2FzCrLi}_(HtxOwGC##KxsQ!$6OrBCWYlb64F}*i6^hEmB z6;aYTFOaLLA6`QWh9py732DuV#?>z1%iefg;j47;^Pzg;41y_2=dH4}FeSi|$vg&Y z;T}4!dLS-+>1PJ1XS%v`bYLUkUsw=$;nu<}q+gM<%Au`#QBD0(6< z*IYuX<;Vum<@e3wt(lqP<|qi*XPX2R_s1_CxhFQm0l{D1zSjZ-&PB~<{ewDBuL4zn z-`#5rH!p)dl;g7=Z=)Z0iZ5=ZhRIy>Sn?ZeRSF~0k@pUAEf2GDPX$w!7CvT+eqz`2 z=M;UnV@tF%(hdIS+WLjGRe{+6nn2k*Iy9GMU+B;<%KGZiaLMX;N0s}1zr%gQTAJll zPXU4&2d#B8h18CIn`(EgOLVyG74-!@GfsH3$ORZKNGY{oiZalabrdS5Q(*g5hu|4D z&W`6;1j?cr2;^+BGF`~bg{u{`OU09dp7W1hn$U4KQ{G!NAPhsIv|J?-8#-ho1p^K^ z;`Q=NcTkQCOPU%g93o+CoKLyjoNDV zk06?~*#L8Q(dEs>-m$hp<%2Q4o(&9YPwk6jan6u-RrF2u?Px5^dt z@d(S(v#4h9_1&jw5f+E-&T=^hOg&T51`d;0}F-w6#@ z-wDkFf1UbowZ4)?arB!YEOo$Ck(_|8A+$wY8fgV#^E#w4X9;}L?SR1K>XXUG9@s;C z_&pRYa?cc~)`I$YgU`GlDHxfiU`wGUDlmanZR3 zhRfFTM-qfZp_Li@(8Mk1F=w>WE33arb|i>Muh;jb zJ)4QWlmB?Ot&dTTxm9}hNrJrxe*8)QEmt2bb;wkaf`IQ?DOz4YU$2yx-f!PTNpLIBY;$S z%^aHlxn^G(BBF_aEF&6bl~6t(&c5QpHVyP@bk{+&Z*tY>HUR=8GKX$6>65{E(O4Ci z`79>ok!d8F+#al!Yt?dS$Fngg$}x|cE26(OQhV3K_&T<~d=#sOYEY9muQP>EG2Ge?9(4^3f{8bq3w2 z7bGj)cn|d2mr0hB{L!pbSDLxr1CEd2!1{4SY%=kgZeS6q^2Ur_5InGVl!@awP zM4vTz?qv!?$LC(+cbZtPO8hQ8sgK|`ilBZ2Sp+>Oiv^j+=_qiZd>b@nFw}QaGB4hc zC7e9W)OsL)^10Cc;=C;bMLA91jO+cAn{2&(-A;quQz&mJT4L8Pa~66>TXf5IVas4Y zEPcDn=|FDCRV_z$dYjo(k}zBF#0&`ttcY#~^-+`hUaxHJs07K;uQ&y;19y0@@?2#F z36aggzExuCV{o32O%p=@#_6OrUGL_h_~w;N?-{^UUW`G)oAvQ>@HAOA6O1aZ9K6Ct zFvR7c0>1QVuCPINde)&C29B`SYo<{Y@6LLj&L_W~4uO3YGg{KN-J!l-Sk`y7=9kMQ z{l-yO6E8#N%|cp3wIkgM2q%i+0jFh|ty#PH`jSnIN6Z_X^S+Vz2l+bL-!mPq(8=)H zGT(s1EsJ5FC@K}Arf)gbZA8EOFrmz z%K};*T-D>r4$#i9|5bnwR-Z`MO7-vjaKtf*ZnUXW?}#J>Lu=0sJMIi-1*=NmtOuX~;or(t1x11`a z9~*3OEGQ_C#@3?aEUK@nb$QHqWXY6Y!4zxjRw_$`-2TMHcIfideZ!mPGE-L23cMo1A@#n4q*K-Xkb2LvV2MJ6{yFti zO;*W`Hs=X#Kqy|G%P*W%WRE+Be!73PnxWw5taUK5)REk|n(1J6$%c_4uQ`6MH+8n7 zLPLDJbz{@jqLqA<<$3yPkyLA^^MvYw|L>`Q6&1%^ws>#A=5CS+Eg_u8`@@Y&e&6Zl zDtr(55s|D%yEG9BY@u9RF$B9_+_t5lQ`=r*lKD(1p^pUGo3T*HkOD41?aegCEB9oQ zw4p*8eH#OjdESR{T`wB8BaRFM*n>_NlgShm8UUH|2b``TyZeq`?zoqAzTBTH>20=| zZX@NrYc|DnBD2#N+>OEuYR9btRX9OzUCr@2wVL4*;<8v*OKXt;a`EG=bLBR`-S3cfUg_$ zG=ybb-VaLZo)2l@avjuOZ+4joEujjII14f;2A(=HWm3ybC(I%hSS1ACnqQ9&8ZOJY ztD;_~{&sXGR#o&JW^KJ)tz+XKy(IPCqFYjw6u|Xm?zEW88rPN?ffq!caZZ6*W|js` zYg1@e*iFrAxjcZj&=?AbOKm-&D|W4(+O}nWn)xp?u->I_cdxxD7FBfPqJQGJiW+)I z5V=&Tkm+m`13szK#*COMEgCOCYRmTSis(cP#Qj}Os^$-TRDa=X2)kM*D&jn^5AHPu zUaau|SzP7{uw+ICdNv{|V^kh`vz{R;10*D#OW3aN3!KA5aymtSfg=B4!+JWAue`xG4UQ7NY2`6Ri^#5qzsnLBU4_$OGC5 zZ9?ZyAIDr%i&SWUAj2SG8R|+6EzH|ppiT6$HB~WBIM)yHQb65P=ypq-#%jK5w&SAy zIU8F1I$}tX_KSEHHSbkqN|eV462{QIm-4&)S}rP6)6ozIP&6?*T=#u8|(!O6C5#pq=hKe9Ua&+rI!7vP_*xg~aVY5(r2|c>zRL zGhY59NWbL$FJ(Ae*3C9~aYe*n$wtxx!2O=KS|C)eAB$^%X(=dlY&FC}W&(#4&0>>Z zSJdwMrR(y&(>Z6eu#eJ7&U4m1$R+{#hpn5ueN-pqI>ZmzpWi~4K z<6Yb3EzuQs|DUYMC(r?k-D)%T(K<;tCb+*QFDA#(hVPQ+n{FGr4eyRJen^ zkF-JvnJ7EH%8Vg5k14Qfh*kHkU0F>CUgzbk$a)490J}a) z$n*?gh#uL7*U>*W!*nsFKE8>-S^pbEq)d6hUdO=bH+ufwjTV|H^M9^rF8qgeuAD)e z$Nv{vc3agn7C-%xcg>f1=%?w%1r#;67woxsGNyC2s~}C%gVW}-XVYMG-@&u^-jS86 zhWFP}9~3fDrm)Zd;=L*xqOJZWEW;{i(S9h~lY?( z(=>Vf72WC5nw9Si&Fh;T`0d#G8hOYi&}UMksE+KC1B*ZWIdthyekbKZMfPk-?e5Tu z<6_-JyG&3=ds|)d;|KnTeeha)|6Y&pr>s?+bGrqwq@B?pJ3;ZZ)AJ0ME`MTv(!$xz%z?Q%|?@)_Dj0*st2qdG>YHg^4MAy>lNFWOf zqiQ^8JxEHG_Ud0-XTqM5sZ!PdS|W+5CqL?@K3(!+TFLfYvB>&kuX;q8xgT;t`8iJe|H8po@e88d@VKpFR_}cH``zM*W6W8oZGP4MoCNH#-I&^{2#X!EJQH#)V7Vz0(-J(00W`;KTz+ zDXZ_)9&>PAR^-yd&Z6b>N5>pd;+Q_FfBM-zjhOAKn*ny|K>@q#hFq_|PX*m>Je(#~ zd-)oWBy1QGd|=^`5?wMa^;<2(6c<*GZ&^IH0djr{bBNs~h7dW#V07zQ6D)puV{=~$ zb6kz*h+hwJH*(_2LHMSI$PQiUz6V&k{&`>wU+t zpyC4MVZMw>(J0er`14aXu)RBasTQi(VS|0;iqsG7I&ub_m&4z&CPyha|;zr>^;3tE{uLV~W6+qQl8oNc}D~8FJ)`xO0br%T( z$kd@mKbjpw-=%u?SrHvWfL$$(a_j=S9Nh<&@(aPUV=hX61{(xyQ?5*nvQ@(yGGTh-bsqoQM4KaN5NV4GWuKYI(mZ9tcim($;ag_7b;GSHYJ{ z0l$7H<*)SI5*S>T%Jx?ql=h=nU}y$y8%UPx*keL|QSX}{JGWVQ&kU-NrYWM4>aN6-|p4!noeCS zwY}_4rL3UW6SLgwY(4-ZnPA;#Zd=)?fd`1HlqN^l10p4HZBTY#8O$WW%*%cUKwWmJ zbv!E4d_(eTY*b_wkg)!yWUJ&krV}SU0dFL3K3@$`%;6L%s@b9ZruAjVXX^Uz2ZpM~ z;8d2OP_0wmj!SXEQFGaknrp%36Sbh(xtx(JnbJ*A=F+Uw0KgdE(^myj9%3Um$sENz zAi~L!a;}Z#uByL#7dt@=jYC4L3 zT*9q7yumNVBX=9>PE`KxSwrl(D#LWObzH|!ni+si@Qg*ILcB8f+#s#yV4Y)Sq+cD! z*p>QNZKnuo*AbO6Ejg=Y6OZFo`)s9Svf4n)$h@Y71N{6wJC~*mGE|?ZB6OkRRb@>& zt?)IjYJu?&>CSoEM)hxc?4lmJ?R4?zd{h>@ME-WK{rB0kbbZ9-;EsmhNvDE;0mfsn zE7y9qw^T_k`Z8)j?N!VLzmZQX7jAL4?~M+628=jjkcEN;vTG^>pro(>&SvSHP{7sv z1^Igw-zTm~l7nzF){RsKWTZ`~6)Tl44tIT;(~}qt@Pd0IH?JL~d5~DX(UsvQ*v+65 zsJsFS*5fxxyXcl78qEsN?OY}lXj#;bij-!?_^?216A=}9d85px0Q4Bp+I6wn+r0<< zig&mb+oB#nMw%jFB%sG$I94SId0G@#CY+MjYymwJN7*=X3oObOzREjns`%7=wOrju zRI&2~bx=<(+X`sh8~i4?k$u|g^YUAZRz+2`blQ>^Hm31P@tD(o%MSJRZM6T?xRN8%mUw)K~7=S!J0t#&y5s@cLzhOyrR`JN1e%(SzLC|_wK=> zx?ItHE?4U!tz?agvc@W@iRICM*BX` zzjwn+hV%YNinLA`I7R6fa^P=g^-QMp6xxJ<&MS7`(BbkuN<-K?9YQAYd_^m@qhhr+ zc8WI+c(HmBFZtA)$7|Z~DIcQ=hryrypQkcIQ&$~V*sCxz2wsf3DUqyIZlgGFoDv%v#g9Y5ZL~t^qVf zeUMHaUb;V0M7DdbP+9?^l=)}CsAE;UQ@+$#&qywP(=I2=Uibal8awfATPZP)Pk;Ad z?tO0t-MqK#&_VUeiUOx%yw=(l&N%C&{(f!fLEKD#Y()xmVQig~vd5ak8hQEJRv%VN zt)Ix({w_|8;XXogmyRShOihClv~tG{mEk!}P%IhioswzzqECh$a1Wo@GU2R#ZBQTa zBj`{eU7)4TQCJ4jjL+`q{DUR(QlCv|xaK~xt6vi6x9ebLvQU-Wm`xXPlI^0Wz9XRc=|Uj+|byy%Menp@5X>#BF-j9l{GLuM%dh6>{>1opw6IWKqE>8}4%pgso{ zHT%$CL;Q3xF@08E9Q%(L@%;8{r`z6g6-n*=>amv%&FfI5ASp9qsz|K*JjBWicw5tP zE=syH8X^sbqQYuW&hq}hZ4M@E^tY4?x_*QhSh^5_F7F#AKE=?h4N@-|TclcN$^P?P zWy1mD>+D`Kz@6fU=B-2viCf0i)r0EOFzIwlu`dd9-DcEGOV<&5PUdH*ierN@22O@fwf^5(<=oa~y#?~Xs;r}ezHzL%_Rt~Mh zfGQ4>a**5C2~F=1+r82dcswlv#Lx{nti5mJgl|N+$O8T#fCH zIw2;=yRe}mXk=~MwO^4~qYM66z6mJ;;RnPMHm|-RmWd%KbH7z8my@lZn6+$B|cB!6WHfqC;p@5;#%RvH@8!?RF8;3 zXaO*Q=Ku_Xg`N;DQj@KVTwmIL>|A`Zbc%%UW18L3WrtsboT!)xv`NMj7RUVD{HyFz z=3xZttS_t*tfwaE07@bW&k=lo^lE2%=G5Z=D63F=71$}S^!_q|&iij!9wKSUw|EoT zkI11_;w@_iM&dSXMO_P-c@wRt=78?EfvN)i=#qe;%xD+!HD!9Y0sX5dx`*?>;j@qD zg>8Zrb*#QSo3!;hM3()O>BC8%&3eSzKq8tB# zwCt6AcZIu`h&~{;$v3}uDjci;&|`j87kn1Km$(VKwvLbgUyDNRU>w%3z}%Ey%%)QH zW@v@TM?dXml-K{SWp8-KzIUY>=>-#Ya`sQJBoVkreGG;Y!1^vT6dUn|jL83b2}Fo~ zRrN{d$}d5_f?agNZ^%rGU4-nhs+Uvsfc2tSe!gybIs0UwsY(*(3tN*q&_kHt;iE6y z`L2f~ZBQ_>R|2;OUNgb!R%BGd@s5Ru;BD>%rHepht$Kk`e}bgzoZ|X%|tZ zD~i2|0@hN(L(1@qooy`aDERmBkOszQHO-8dXwSso*hF-N5TYD`O3*M*cQe_7!6t4s zPh6Gg3Wu<3#15Dg#2VNH>PLg8nkq7_fj%i$!yab0`(%q}>?Ps6LF47Ln#tkQT6rX! ztBfe?84t?4YUga`B|<|D6_3;QANeNZNCZI}?bUTdM|+K_sZz97sI^rBRqQP+<<&vL z9e787;&ljj3BQQv`%2uFkJyx%)GKsQVQMw7$6RW9$UdlIhAi@CNWew%C2JuaUrugJ1?q%{{mM^>mMnn-mfc?O5d zS{`DAZZXxYc+FTFN(|0dX%nYBOn&=haWI;pJ`iwWcbawUc$fkGd?tC$IL0eVJeRv% zmGVp<|Ae%ZuuKBp8%#m^A)lwZ%9GRQgj~GhwQryt1FJ2F}`H^HDK^!|mPZlNWR2@oX($RK;D&3YKlm@t25@S@X;A z5WwtYaETGOt`TM+San8_73tQMfiyg)LRRk7e7-hbivA;WXPDG0L8Q$C2u4MU5>T0G zD{6fw?n9MJURf`)^czXwD=olw3C&QQ+P`|=Rc~-4FQXaRVxl#H@l|nkU7KRZY;=LN z(Nt1snxbQF-Ndz3pM7iDu<3b-QTj5??G-Roe6pQG`yA9ZvYT_Kh|zQ8s_YN^5cJ^g z!JPI{wPp1SB_`I>^SP?r*^Qvf0Are*zLwu?Rl)hx4qQ_-R6D~_Cr_rH(r zg0(G6Kof_dt1e|$E8U|g@gv==pHG&PN)t;Qg>La;4Y+m@FBp+EOUq@atd-zYz(_aA z@JQEFn|g&XuOUNo$=d#uemkDgWeVCvL+w7kp}NU?)*Z}CK-;+~HXWU|d4iAgy>K$j z%FeNuwd;YH$XVU!i@36D?CMwbDP29`)Lc)dEc7wYsmmg)a{w$?n~C^+E$8D{V;k^t zj;CQ(vDwAKJ6@|v#NDS{$hW6!9$w)UvvWAK*JK8kZg3(J3P4^E?&7s?rp3wAmi|8jBDC-IL z>SJpqLlM_kT0LfvoKTAFTupggVKY~F*Vy3Le(<8E2bmb$$Z#ZL`eR<$Y_lEJHN{pHfkGrOdOt~L(Kgn1^0 zTYXf|XX+cai<$ha%~1hm1GOrLye^}X_RScScq{{>fIpQldeOEi#Q zfa-W5C$gsb3qmbVl7lxN`5T!1eJUb6{Y&%BwZ681=6xRNHJqezHh12UIco zEqSGUV=>-f^?GaxW+-~lxhJy_x&?|LaFB9UkR2_3y6y?)aeUrfBLdj4wY#Epi z_rNtdE0c8igN|l0=Vo4yOLoxb6`pNVJx|?s(9&l#*u#L8h(TE&RQ~D~*crOZ8<49i zl@{mqJ70}xG&k~eshYiWD=7gTjsxt}`9}Q$d)J0J@t+uMi?d%ku4p5}I(xmM@ylF2 z;>o||fTb+60b*24n&!IMg4wH^U?|71VCItz)>_W*zEw$eV8Jdr`VAq28mD|ty`h6&*O+TIEC%N;D?uV65`q& zPM{m8`ob({a*3trxeFj@0$J^4K|Q)6dnQi%hoDTCqTr6>Tc{oHKXEgljP823`BWu^ zKXH8LZ*8~E&z4F; zMzXXs=mDA3PIFD*%3p=r&fpTxLe{pBDOu5c1w@w?9s@k={?V_M1kn}7v$qK1Xivyu zAjjYOqL`(eH_^Z$up<*2&s%|XT0>H%gU6PwsP&PE*b00=!s+%=}T zHd{&q?as$wOU()bS+a&ytk9_iL$d6hwB!u2Mdl#lilBMH!@m(m%%rxN@tpGGk_%PB zT}=*xStlXr-Pc38vpvkCE(SGL@SK-&GcO0u#gq$*OT101@1$bCe!)>J2p~p==l=@7 z{pJVru*?RTQ&vSo6g)?#cH{=1IT7hRNj51VVn;~^K)Uvd=};t@t2(od-CnK9w=UoZ3KFqKt8~H@GzPK@)^zb+%K9y*lodRd^J*Ith0$DLqiM zb81zptOU)01;(_at94(~hNV+Wzc!!O@~?tiPbj_~R*`?v5sC~FT>m-g;9_h+rz>Iq zD7(_Sw?Fd-G)v_NQ8lkk5ds@Fyiyc7|3@RME)VjA2%@{8I(IE%cCCPpZuH?}fxntd z$E>ThYa!4?BYjlp%JDoHJyiSu65dv)ajvDlHvx>omvmFqhJ*!VB;D}{s8ZUlJbPoG z9GiLo1{f;2C(%r_RzGkTWCmwVRape{dK3k!%KOCTM#muUuo84M?Jb5&-Yi(l&TWXw zsseU_>5kD9)-%#=aUF^uVBq^p58zOsDAn%%h(8zbhd{#8lqH$FJ?SVpKxOVEX-3^v zybv0p;^cNjfAcWKdZ;r&H$SlsC9QyTZ=PnoXTWFkrdanlZm}qP?zzb6QS2aJFLNbB z#mM^&9Q$$zO9+&}BQ!8?HCAaC6G}mW38=&eVeRes6}0L24+YV?`tZ^9moe=jmF2- zE%6Vwylp9$P>Uq|A#@9Wj=)Dz21lr4CXCl$NWnJs>{lGlQfxHGqJId$bsq=MUIzc z!4!r+Ys>0Sk{lJSo{nV`IF*2UMBl(-Eu|;j4|Lby%GAa-NKAYptj{anz}s8T$|DgZ zvqzLuY~3q=$$=%SNfu!587_ACK^tU|9FeQ|J~#?*>v-3O0RMf)EKc9SMiV>qNR;3D=3H`o6RL?}4vITb zh#-7$EdmcH?glgdogzgd{$t#~;EaI7Zg#Vcy|z>w{WLDmfP0(}h@@E9D3 z4_4WB)GL)HL3f$;eyhE!_+ny8;h`Q&rlVC@LYzmwk~R4>L{E>nD{mz1cEq6mF-WH zy0IS22YeeD~PR0hEf>s7$5I?v(ydC zDHhBsC>BOOWv$-N{REJ{rDszcp2&R6p~1`50~j24Jsuo6b?o%Ws~3;RLmtu|g>#7i zd*o};em0jb!v6ua>F*aOMcbPwcY;A(l-!4iI`nKc6$#p9g{dj$q=NDD!HP_iw9Z9r z89{9;==6z)mDU;X7weY-GJV#}5?_ zAjM)-Uw1wp`D5iRFX}Ij$<>xS#o&#B?pS*w-Y zHXedH3Fg5L#04>R;ahk}NW2iAht0#R^Y}O^W;DRR#)U z){3cMxU5GknVX7%WF{fg@lcb7X8ydcVv&*14Q(Q}n<)cAY*1l@kRR7PVA2;g5%>ZK zFR9fL_}yFhTvgniya>WifVT*SuEMXS=gQ_qcCEV4LR$a*;J#JtS}xPmUNEK}vYyY7`LBW;rC*W77*Bs~jm8mxaQjiY?vyWSV% zOdIePEptjX-40c@6bC=rd|z_RpkN9IX0Y<3dMF3R}LmQ&S7lH~G zfu9dJ$$p!cbOw_)LzGNP0R~&5fd%S>`3b7=Fv>`um2>@x5ZHS;#q%}l<@1-B9IS$=Aqoo|?TJa@g3yQ#O z2hcmj>Zb9ff6SjP_jUSed%Sr9NpU3aQ{(m}^nIt7o{SXy@OZPxmTWgr`xxV_4OABQ)%9 zq)l)5)3yQcv4eHEE3l$HP+YcyK^yfZ&L?J`NV$e6eqzx6J`p`H5fWmW5Gs#jav%$vu`mUiE?u2Z^zRq1t1L|EkCeY94| z+gOi{GEnrt7WY>o(w7RJra9O&si0Uh#;Xk7gSKDRJk0E~GxIjm=5!Bu8zS$YBQ+3- zkbz(#(E2X)yz`Nk(QLi$`cYxe1A}?$3Ps;x*g_kz9LQCGunKLVna-;i9|L zc(gr-3qh{Eek`XeARpDiZ6 zk0l`i|B-D`27CPB?!65=cSIdfKJ0Adm{IR+ld1n`Y^{3JENkKQ~Y_#&7g(KtPxK>+jM@2IXot zGegmW5cgfb00_u>QAP)7;Tr%Hq%=fI3lDIEfWy^pKnn0w+VTk~$_{Vd{7P<>q6nwl z%KEa|3>s3<{1*7~qdL|oeG?mG-gV=j#|1|z?lE4v@jaQ^@yl!&w`(+ZKM{S>z~|(@ zg*WJBaZ0TLL>vi`84Zw9K4##r$N{o z&T;z>sJ9I_mIm6(3D>r5S$k1nIku8>jkaEmn&A8W2z(R2Dwz;BFf>GgpbCU!*E5$k z1duU-zS61f4;MOmpkyuP1*{AM+E)06~>;Eec$GK_tl3;oHdpS;@5e*qL_E#axES z%qJj{FUqCsq2uX8}OH}(Qka{C%&P_~@Ha(^B+_W&m-4z?nMw(61;zihu` zb|AHXYyMIDuc98x8d~gS3F=rxIb_RvU2;mlFv?)()zUOSlMr{Jo{WZT{If{i`%>8P zsEG$Dc8MSYsHoR0$r_-OdndRP4;vhj1Rr9YazMVn7KcvqntNM&yu`+fSa-)f@P<@c zDY!K9ZKPQmdCsYdd&cG{8ao5*Pp;aFxBGVK*c_`mSIALi+2SG zIxC=tfaR2RYDFtHavp!}sT=%N=B;r#NM3YnKdQh~bZ;ak2}nEkfGOM5bB5+82l6rX z-x}MNW&VtQtKab0L4UVb{3#(mKI2|d>i`)tmXZA1OclySp>+A(`IkL*m^3uW=AEP# z7&@d{n(Y;i= zgPV$JrX0LfF5867{NYyk8SOq1J#4z;uxY=7)X6Plhf}!cdf8mZ>1E(OpZ6ytgJxW+ zKb*iqP?kLBH`e%%e@V5Z$3KccPe3|1YP_3HjTk$Dk1E{KG4Xd|L2dG;&*i8LwJ$W- zx=a~A!rc*YN_BjNlB4>xc7zsP5mBs8b&|nLgzP5z&&zb&idH^EQfeZTdkPgy2ae$D zTMxI7fr2#cSoS+&0?4OH!)H5T1zAf6U^}Jw`B-G`Gk^~l2}#~B_R7WNFRuP*@Oyvk zLpKH6!M~ov5#w>N69xQ_6Z$s@6E9@CHxm%Q)!OTq8eMLu$?GUa&wS{$y{_iz#A1O?+U|Jw`Azkyjx(&#nL0gp;42yFUA~PsvDc z24$)JIU_tirQbv$TtSZ59_P8@{&J?ybIYa~1FJPN?hT+=cZ`}){LD%4oq1rRNc%W_ z@+Uf4tNu&ab=0QPsE+q^`hH%=ql#>cMcC~aLLCx45hqRiNb$Ur-aSUnCB-?X)Y$qH z{J!X7nx{joNq)CG2X2tCFbJ;^@8Ug&4#w~x=vUn_TjUn;n$#W~6Y6D* z)*6zNJ{E7dwNH+Z_gHSo=kdF=1oPm0`;-XdGcdoYOp`|5#=mmH`U__6(#nE)`W)ZY zp%{)o1jFIHw|7v7;du5P6yk)kgShkl79;Sd@2mJCRJ0_11q?n4&6|0VudzozUN^5= z2ip89Ra;o$Ggp{O2jQ73`E1mI>uOC?WlF2l>F-|7 zNQanf>T`$Uc`q4h=&W}RSRawS(al4!$!K5&Ngi$^J2uLImR;*R*|(V_PT~}{J`wz| zi?1TJi*VKX*^3{5iP_@dBS#KJZw4nK*-Hbhe?PR+`=X;cdr^u>U~T8*0=sg|LTV2u z!iZJh`}|5*VqX*6LyRP%Hekph!~?ba?r{-%ox(U-s>)&}+Z#4#xsE%g9zI$@S*ft6 zn^0x2RN&SGdqswR6mR)>Esw-zxJhF4f({SSM%sWEpMSbp*FWdDH31(ku|C=71d+?N z4)yxDVcqQEH@Byx$pm(>!y-q%vIhRfYkwzhVv#ZT7VL@rW9lCwmk_E?u+msngu#fr zVyxFRdVPl*dy?_5g}T$p#b#{w*Zi5$=I`=^kqO&T+#qj{vZUY(A3R(WsX?$s5L##`naIlH z=1^7^7%R=HJ&RlH{uc8v+>JsEVA<3sFIM>nBajJA@t9AUl&Oy5pg5A^UZTuY+JzRD z{Z@M?G_9YgKFO=qYb@fkulMS=s9ht z1q#LbV(EM2jU7sdYrD91*fCQIHryMxgfLR0i8fple3+bPfM%cEA41>EpA)ZEP(;AC z=9=+0<(ghhZ}1KjJ0|}ssS=1BCyI}odAkPG({gX!vYd$k`IPO6@AuoU(A>csS_D2S z-hW_63+!D?dP}4>YTJAT#(QfUalI~U9WM1b-^LKejP%~E;Ujudg>HNXn@ceK@S><& z_w9r6IP`2~BgVwOY-Tg3a1VjnMTMKg6CH-#RknlI(8nF@-W#qC!(P`O7Be<|Egf~= zbG~&DzbVSmy0s%ZQy`*<8mqTbp|vc9!ZV=i*-G=M%0$<^9;HxppiQ>hi9Z;{p%-W7 zg<;3e2!xo&?VGSg3(}R*`ECa@MMO^x!{`F+hPdi^0-E4!7{q|9g>GV8K-{-Be3vO{ z*Eu2EcSli@_{;diNGrqw(gY7Y?2;_yGK4t6QvS_kIMXgIzEy z{cn`7sbc@vranY*9ig2qMaHdzhV9WSUg1LCOGX@Gj=k--d-|?7cGz^Ir>eeIsa+;c zz2Mn*91}Lh#yunE8X?5h*ufbbgEnQLsA%Ifb10?>r4xitSsUwgg@zSjddsse!Fd#q3<7~ zbrCl5<{c%9h=0{V|47oDb^*00N-HR_Q9iSLu-d^6{LEChkbZpB%gSKpEc>>dc-!%B zF1Q^g>|`%u(9^pBM|+pX2lY}Vu`&yA8@_63UK?W-(x@s5Jy!~0RKP8RkU`92(V}V% zD?i20pSv91J{jaW^i66h@Hgq45^tnKn+86@*qp00?lMrrtjCOwv2by}+otTOTNTaR zLf+$gN2{yLJy>;8kHn+a)jpG2{yK(VI$(Sw-g(a|z~my{eL>8};OJ&c-9#6{e(1+^ zu*pfJL>JRRLVD*6UYezkK>dF1No%pQto ziPO=V_ioJ$?k7qQm!T?M82!M`GvVgnPTPc>-eD2CNWw3)+WQ@ylg&D~U zwLIqQmB80{QuT!nt>|Wn*BiMaH&y5pUN=Ak6^;Wx^pr}EUP?M*8vJWp{nScdAd>x4 z-+>;*?-6&db~i2W!vf`W`1Zlqv6t>E`0F_L9$OCUZ6qkh!Yap}l)(fMlRHn`X)M?r zJzmW`DB|R9Mh3P%g-fRB8Lp-dNDF^-osfF3VLUh2s-`j)95Z?{i2w6hA&Ry6PeU*d z7mk2{;`~wg%(WXev#+lE59fQ=GY0f}2G9I2F>rVzu@ShEK3;GeET{SlR z-=yG`!XBhoY!+-*RK8 zXy$P%j^Pg}tvCBy9QTPodBRqm(d^rVDTr97ZeQ=iN7W+O-p9E$i<%SqOQ+#B(qcZC-(bb?gjZ0gs$Z*zb>B@3v)w9bRC z5inYjJ>!cgo*Pk(`p!Pb_i(WXtJAa|*^~@T9iH^TK`;>J1~j#zTC1_zFXYX8%_bHr zBAqquy4Eo|--S3im)vKyV68Uw#>vpBa*kF^J;Brhof5@wuQI)t@#?7eZUC~XiI?ut zW@*s(@CUT3eek)Z!fNr#4Uwlub@(*|wii9ReJdn!CX?3l)u5I4*80f%9Uvora#rE3 zp5ePLHkCVYBM)Xa-c}aH(MRJ+JI@jEYvNUiz|h5GDcM8h)6)wu{}!sYixG1^;{`5m zywuJfq*BSbbdUKZzLK}K-s_yi3br^NbiO+oK;U|%t#OA)5Mkysa0A{{vV%JldN z?>W!Ky%9i5QY=1oa=-uGkljhT5~`vwDB18oA>CY_-m1g9qW5+tv?X>^jUHO z%eapx>_(Nj@X%aDdDv>Ni`2$w_sag%AB6FU_!|Vu3yocM#TQF-&q#70{=&FJorn%5 zzq&O!bXL67eR{32rjFTPhAi4rf9%+5G~cd_=)i@wIiXaPYJly_Hj+l3EC1q&q>H*i z=1x}ugFTJyb|RwY&*#Q>3~d_P-ggGXj#KNnsQ+04hS>*AO(3dZ%EKj%+vlW^`G^)q zZhQs;v%1vX_*?vDhwFfzg};-J>EVVPX3v8ta$v&}C}q^in%;)W=aAm7ld>q&1?Nx$ zd(=qqCX)6RaYry->QegzsqHrP@%_sPynZZ-beT{o1E8aQ3MP0SQx|uCFP#1j{x>zg z-S-KIEAU;zTv^#RZ@K+)Uh0~QZh7K__s&FY!+tt7e%&Ol*?W%B+=U|frL8{Ek<6q8#o`W0&~y)fIv6+iEAv zQlqZ^325(C)FIm8z9nvN6wbsf9>fIULnL%SaQ${_CW*mv~^K z6mQVjWiv=zQNMylZY~G%YZH0XN7Q<$F33ppTOd>9d6XAE`F2cynkM?OH%YMJlW#qc zoL=HD{$)Vy`G|Y?-OEDr%FtY~1G^Ftw1H7%P#w4c~BJJ$nSUH-ZR( z2ANCqL~IH_9#h;tJd*Vng($$>wa!XGk<#ok)FN6+EuRh0bcCB%mZ9Cete_BB+(jj_ z7>=;l&PRc1=*qJCx3N9`Bk@LAX>~)=G4k!X6dbHf-h}z`2AxCCtdez>*x7s|$oMM@FoJ*ArI>8{w!hv88dT9T7<@lumZSy8w3OnT&tKZxvi{I9yf_@U6^ntcRz zFcwc8TRpR?J+|y_<@yA3IAdgSpR_*POvPf;IJgc{4&|`0i=(PX)nVhL74M2dwInuDE>}D(LbB`{ z4Q?Q)Fd^!j5ixfz9#pT0jKqBEP(vmU>(-81V@*0iurHFG+}Zvc9$UdUaP*ElN*Ep6 z@y>9}*2pVBW6Rk`e=q*l30lYvXU5R-{e%@5ly8T_y}b0ih+(+0J`tzl;h6^|dG-`R zI`zC&;isA7mAFTV70SSAbtE*Anv(cEQJ94fgSaO z2ijtPh989PA22G^@LLIjDUDZ-de=3ZLC7!L@HD(m@ICVuKHg}VFhS}x1jzPlO!~kw z>bG>(G2bsatq0ESiJ<-d^)yRmW#ps^(G>DW^wFLDKiZB5=X1>lrnIByS7ox5K+4*M zNn~b;7cx?=gQPJR2zB?m0; zCp2wyPEAhnCfpZq>vy?9Sx0^G!YkrLOVnFg;>1DmLpWIj9Qzy$>9|gGM1H6u0YbyM zWIAn79Y88i8{&I_H1n&|%dqFaGHMZ}^1?+oCDM8)ehE(DtQnf)b&2ae1uf$%j@*)9 zHvglpIW`fp@c{K(hClPY+y_V;ZjjXxq#8uvWjzi(Wp5FMQWY^+CF0ptYN{{abJb4@5Zo5uttA=EjA{_#^}IYtf8z~0kl|oylkzWittx}NB zLM-Y`8XjQFur-xj=lD~&=2EVxBcd}`ReC^QB$(d8;S%wDcCEnJf%;s**ZisaA(jGV zai5OFzX_vL7RCo(flLEMIbZmcAZBPhtIzNenunL3_g3%vQin6NQBkQ+)(yIJM8C2w^(9KPw`c`r0WdxzR zm8($3DqzWVzHB&jlnW@*nyc$N4%Z;rT#?cb6~+g+>N{4*Xl}Gkb}^>8-Cq>6cWyQb^?ai~-WgI_D+BcLm~YUGU}OT|SiF)KMUqcZ?XS z1)T_<;8VI^E1|Ky&JFf9yB-v`y{7QR&_5v`wSD-KIc@Y$ z9?+McroVZjBAcec=Y*B(*SwE+pwCfE{jP~CydoM$k!JtimlfY$e5LZ8pK-t#7jX^f z>z{|+dGU}ps?YXSp*NY!9O}dkeFgl@`XO1C%nQ%o&)O04gZ_qWX`|1+?VbxQx>Bto zTl}LU5`R78iWmV0U1ijU-Mu@X&FthHv zq@U%n4?QU&ax+klXX2N>-}WhwwTxlgAs$Z?SGJA>6uL1LvU;c98QO%QVNh8WoQ ze}VtsoB#h6*|+iN%tM_>^+KTeUApS!6MO{|wnyuS>wrx?5;Vf^>v%Qb*%8+aE878Y zmR%1H@)d&mk$$)iVv4(ivWXio&-tc-X@P>8)TI($*zp7z1S$+`Szo7yI+YA(MiGUx zcguS=%@IjMW&BBacLeR*aJB^YQy~&O)?23i^tvpX_6f=wu(=g0mBq(7*kRf48f(xY z8erx{(=_+Ql9P(v!;Yo{`|$1BNw{yZLnPpMz}m)L-qK`nG<6V=B+X_M40{`y$IK7A zG+|dus-{bi?|ca(L>}j(N^C7Ne(54LmZcIbPbE#4rrahwk#{5BA>~N%mDkL#OxYM` z4i85msrW0jPe1i)@?ZkrF$F7~_ZR-#7lK2({)fOubwub2OViK{3i0fU3(@KSrIaY|I7M%R~#%(&j7+NRkzo$KMzD?i|c zq6KP^pzfi?@-x@Ai`j432tIA}zIo&&ROu*&*KyAzarmOp*sq|spUwD_X%5304}q|AuCh;$^MTzz(Sq4w?pt6caJn(Ndnqy8g-!vXq!ia)%kvzM`kjBI4?A5waG z*5?Fzy`{NJIJ~0KhsoTH+jPjj9q+oZs5`>_zg{PO(cca4zb=8BGu<=7R5XqR)<3bL z3JHaz@s70I`VxQEgt+*Q-Yu%*5c*(mrv9%7O5{}mBi4}snnAB1+??a_9qin9Q{Uwv z3no<@flo=?mmtY#n!`>aqizwN+z6YCvZ3$rYA| ziKn#%t+CP42b<~6_J_mxYQnKG*1w3~?Zl%yeqk9pv&Oy+i{ znv1#&*}7^nEOc2*vlCJ`JZx0L_vZzdUyq~J5Ki)bsP&t$$!~=32Ih)lAM|G- zuTq`r(lYH&R;)z0=qpM*{#UdY;jgfx{&erMfPw?mX~{~olF$9nO5lnzmsu!0C-M2e zTxUhr?DsU~=4(>M2&;M#`Q~ragRte%Xx21LW#D4qDXM4pcTCrgUVyK7!G-?4u9F3C z&j{DWN{9~0usFr*mps{Q%?fTe$fdS8 z12T4ocz$={n?X&w#J}`d-{WAii^2q#8)C4J-xRIrb&CZy?^*6~QuQ;fv_4ZwUP2*f zCCgQ{!0i*zd@{&uBW5M?Zq8E@OV$qoqY#R(2%m?Ge!GqQqR$Xb!_n}TsChMKYrZv; zVx>wUwfuA4~vnh9f{#D_`kiQ2?7xo`R%?0SjaSi*S< zcmLAA;BzhhOVC?=vs9&#R+J>nLG#ZVN@VDw(9|ifIqI85Y-a8h->2Che~?A_*&w*} zm;UuKig0fgzepw43>J>D%ufSC_Kp1BL zW@5#W)|>p-<(X3u>P$=XmVETE|BSOOqkG48Mx`w*L_tzavHDGQB88FH@k+@ClD=xd zyoif=t?ji)6Rqt}Bx7bmZ;nY-B}`4*OE610H4IH`r?Ha`;qt8C81pm!Wj*j|GfZ+L zP9{4Xh2vNa5^Jrp*JlhlrA~Wyg^{hPoK}DDX~w9*!#(T4IsP*Pb@Yh~Q^ZWeq5Ur& z+FB%Kuq~r7)lT`Xm4;iRy+hi7A~9Q$t~|QW5%bJgjHZ_?Xm2_*M1gv^nV7jM#-y{E z!B#bZu)h{6GZj+UX2nTmPDyy>Q5I#Nq~brmiMIbLfl>EHvX3V+=nJ+|wo&l0rM4nNMGkt$r!w zxswxUhOQv?`{0>SZ!QhLlKf_R;em9JMzhV}4!ZozqLQ8j z5YBvSo(!m5Y1v$Antip?lD^ICr7KRZX_v>S=_#b#It_^J24R?Ft2;mTljhc!D)^EV zFE#SZJNV3h1Rq1T`Tx37|La8mZv^50!|+K}GHHpOeVYZvn=59t;}P_amt8-~**0Fu zdh{ZZnX2$CR;2GQGA}?pnoaKsXi)_efIwU0b!tzlK=n! literal 0 HcmV?d00001 diff --git a/v2.4/usage/troubleshooting/faq.md b/v2.4/usage/troubleshooting/faq.md new file mode 100644 index 00000000000..ee46e8780f3 --- /dev/null +++ b/v2.4/usage/troubleshooting/faq.md @@ -0,0 +1,364 @@ +--- +title: Frequently Asked Questions +layout: docwithnav +--- + +* TOC +{:toc} + +## "Why use Calico?" + +The problem Calico tries to solve is the networking of workloads (VMs, +containers, etc) in a high scale environment. Existing L2 based methods +for solving this problem have problems at high scale. Compared to these, +we think Calico is more scalable, simpler and more flexible. We think +you should look into it if you have more than a handful of nodes on a +single site. + +Calico also provides a rich network security model that +allows operators and developers to declare intent-based network security +policy that is automatically rendered into distributed firewall rules +across a cluster of containers, VMs, and/or servers. + +For a more detailed discussion of this topic, see our blog post at +[Why Calico?](http://www.projectcalico.org/why-calico/). + +## "Does Calico work with IPv6?" + +Yes! Calico's core components support IPv6 out-of-the box. However, +not all orchestrators that we integrate with support IPv6 yet. + +## "Why does my container have a route to 169.254.1.1?" + +In a Calico network, each host acts as a gateway router for the +workloads that it hosts. In container deployments, Calico uses +169.254.1.1 as the address for the Calico router. By using a +link-local address, Calico saves precious IP addresses and avoids +burdening the user with configuring a suitable address. + +While the routing table may look a little odd to someone who is used to +configuring LAN networking, using explicit routes rather than +subnet-local gateways is fairly common in WAN networking. + +## Why can't I see the 169.254.1.1 address mentioned above on my host? + +Calico tries hard to avoid interfering with any other configuration +on the host. Rather than adding the gateway address to the host side +of each workload interface, Calico sets the `proxy_arp` flag on the +interface. This makes the host behave like a gateway, responding to +ARPs for 169.254.1.1 without having to actually allocate the IP address +to the interface. + +## Can I prevent my Kubernetes pods from initiating outgoing connections? + +The Kubernetes [NetworkPolicy](http://kubernetes.io/docs/api-reference/extensions/v1beta1/definitions/#_v1beta1_networkpolicy) +API doesn't currently support this. However, +Calico does! You can use `calicoctl` to configure egress policy to prevent +Kubernetes pods from initiating outgoing connections based on the full set of +supported Calico policy primitives including labels, Kubernetes namespaces, +CIDRs, and ports. + +## I've heard Calico uses proxy ARP, doesn't proxy ARP cause a lot of problems? + +It can, but not in the way that Calico uses it. + +In container deployments, Calico only uses proxy ARP for resolving the +169.254.1.1 address. The routing table inside the container ensures +that all traffic goes via the 169.254.1.1 gateway so that is the only +IP that will be ARPed by the container. + +## "Is Calico compliant with PCI/DSS requirements?" + +PCI certification applies to the whole end-to-end system, of which +Calico would be a part. We understand that most current solutions use +VLANs, but after studying the PCI requirements documents, we believe +that Calico does meet those requirements and that nothing in the +documents *mandates* the use of VLANs. + +## How do I enable IPIP and NAT Outgoing on an IP Pool? + +1. Retrieve current IP Pool config + + ```shell + $ calicoctl get ipPool -o yaml > pool.yaml + ``` + +2. Modify IP Pool config + + Modify the pool's spec to enable IP-IP and nat-outgoing. (See + [IP Pools]({{site.baseurl}}/{{page.version}}/reference/calicoctl/resources/ippool) + for other settings that can be edited.) + + ```shell + - apiVersion: v1 + kind: ipPool + metadata: + cidr: 192.168.0.0/16 + spec: + ipip: + enabled: true + nat-outgoing: true + ``` + +3. Load the modified file. + + ```shell + $ calicoctl replace -f pool.yaml + ``` + +## "How does Calico maintain saved state?" + +State is saved in a few places in a Calico deployment, depending on +whether it's global or local state. + +Local state is state that belongs on a single compute host, associated +with a single running Felix instance (things like kernel routes, tap +devices etc.). Local state is entirely stored by the Linux kernel on the +host, with Felix storing it only as a temporary mirror. This makes Felix +effectively stateless, with the kernel acting as a backing data store on +one side and etcd as a data source on the other. + +If Felix is restarted, it learns current local state by interrogating +the kernel at start up. It then reads from `etcd` all the local state +which it should have, and updates the kernel to match. This approach has +strong resiliency benefits, in that if Felix restarts you don't suddenly +lose access to your VMs or containers. As long as the Linux kernel is +running, you've still got full functionality. + +The bulk of global state is mastered in whatever component hosts the +plugin. + +- In the case of OpenStack, this means a Neutron database. Our + OpenStack plugin (more strictly a Neutron ML2 driver) queries the + Neutron database to find out state about the entire deployment. That + state is then reflected to `etcd` and so to Felix. +- In certain cases, `etcd` itself contains the master copy of + the data. This is because some Docker deployments have an `etcd` + cluster that has the required resiliency characteristics, used to + store all system configuration -and so `etcd` is configured so as to + be a suitable store for critical data. +- In other orchestration systems, it may be stored in distributed + databases, either owned directly by the plugin or by the + orchestrator itself. + +The only other state storage in a Calico network is in the BGP sessions, +which approximate a distributed database of routes. This BGP state is +simply a replicated copy of the per-host routes configured by Felix +based on the global state provided by the orchestrator. + +This makes the Calico design very simple, because we store very little +state. All of our components can be shutdown and restarted without risk, +because they resynchronize state as necessary. This makes modelling +their behaviour extremely simple, reducing the complexity of bugs. + +## "I heard Calico is suggesting layer 2: I thought you were layer 3! What's happening?" + +It's important to distinguish what Calico provides to the workloads +hosted in a data center (a purely layer 3 network) with what the Calico +project *recommends* operators use to build their underlying network +fabric. + +Calico's core principle is that *applications* and *workloads* +overwhelmingly need only IP connectivity to communicate. For this reason +we build an IP-forwarded network to connect the tenant applications and +workloads to each other, and the broader world. + +However, the underlying physical fabric obviously needs to be set up +too. Here, Calico has discussed how both a layer 2 (see +[here]({{site.baseurl}}/{{page.version}}/reference/private-cloud/l2-interconnect-fabric)) +or a layer 3 (see +[here]({{site.baseurl}}/{{page.version}}/reference/private-cloud/l3-interconnect-fabric)) +fabric +could be integrated with Calico. This is one of the great strengths of +the Calico model: it allows the infrastructure to be decoupled from what +we show to the tenant applications and workloads. + +We have some thoughts on different interconnect approaches (as noted +above), but just because we say that there are layer 2 and layer 3 ways +of building the fabric, and that those decisions may have an impact on +route scale, does not mean that Calico is "going back to Ethernet" or +that we're recommending layer 2 for tenant applications. In all cases we +forward on IP packets, no matter what architecture is used to build the +fabric. + +## "How do I control policy/connectivity without virtual/physical firewalls?" + +Calico provides an extremely rich security policy model, applying policy at the first and last hop +of the routed traffic within the Calico network (the source and +destination compute hosts). + +This model is substantially more robust to failure than a centralised +firewall-based model. In particular, the Calico approach has no +single-point-of-failure: if the device enforcing the firewall has failed +then so has one of the workloads involved in the traffic (because the +firewall is enforced by the compute host). + +This model is also extremely amenable to scaling out. Because we have a +central repository of policy configuration, but apply it at the edges of +the network (the hosts) where it is needed, we automatically ensure that +the rules match the topology of the data center. This allows easy +scaling out, and gives us all the advantages of a single firewall (one +place to manage the rules), but none of the disadvantages (single points +of failure, state sharing, hairpinning of traffic, etc.). + +Lastly, we decouple the reachability of nodes and the policy applied to +them. We use BGP to distribute the topology of the network, telling +every node how to get to every endpoint in case two endpoints need to +communicate. We use policy to decide *if* those two nodes should +communicate, and if so, how. If policy changes and two endpoints should +now communicate, where before they shouldn’t have, all we have to do is +update policy: the reachability information does not change. If later +they should be denied the ability to communicate, the policy is updated +again, and again the reachability doesn’t have to change. + +## "How does Calico interact with the Neutron API?" + +[This document]({{site.baseurl}}/{{page.version}}/getting-started/openstack/neutron-api) +document goes into extensive detail about how +various Neutron API calls translate into Calico actions. + +## Why isn't the `-p` flag on `docker run` working as expected? + +The `-p` flag tells Docker to set up port mapping to connect a port on the +Docker host to a port on your container via the `docker0` bridge. + +If a host's containers are connected to the `docker0` bridge interface, Calico +would be unable to enforce security rules between workloads on the same host; +all containers on the bridge would be able to communicate with one other. + +You can securely configure port mapping by following our [guide on Exposing +Container Ports to the Internet]({{site.baseurl}}/{{page.version}}/usage/external-connectivity). + +## Can Calico containers use any IP address within a pool, even subnet network/broadcast addresses? + +Yes! Calico is fully routed, so all IP address within a Calico pool are usable as +private IP addresses to assign to a workload. This means addresses commonly +reserved in a L2 subnet, such as IPv4 addresses ending in .0 or .255, are perfectly +okay to use. + +## How do I get network traffic into and out of my Calico cluster? + +The recommended way to get traffic to/from your Calico network is by peering to +your existing data center L3 routers using BGP and by assigning globally +routable IPs (public IPs) to containers that need to be accessed from the internet. +This allows incoming traffic to be routed directly to your containers without the +need for NAT. This flat L3 approach delivers exceptional network scalability +and performance. + +A common scenario is for your container hosts to be on their own +isolated layer 2 network, like a rack in your server room or an entire data +center. Access to that network is via a router, which also is the default +router for all the container hosts. + +If this describes your infrastructure, the +[External Connectivity tutorial]({{site.baseurl}}/{{page.version}}/usage/external-connectivity) explains in more detail +what to do. Otherwise, if you have a layer 3 (IP) fabric, then there are +detailed datacenter networking recommendations given +in the main [this article]({{site.baseurl}}/{{page.version}}/reference/private-cloud/l3-interconnect-fabric). +We'd also encourage you to [get in touch](http://www.projectcalico.org/contact/) +to discuss your environment. + +### How can I enable NAT for outgoing traffic from containers with private IP addresses? + +If you want to allow containers with private IP addresses to be able to access the +internet then you can use your data center's existing outbound NAT capabilities +(typically provided by the data center's border routers). + +Alternatively you can use Calico's built in outbound NAT capability by enabling it on any +Calico IP pool. In this case Calico will perform outbound NAT locally on the compute +node on which each container is hosted. + +``` +cat << EOF | calicoctl apply -f - +apiVersion: v1 +kind: ipPool +metadata: + cidr: +spec: + nat-outgoing: true +EOF +``` + +Where `` is the CIDR of your IP pool, for example `192.168.0.0/16`. + +Remember: the security profile for the container will need to allow traffic to the +internet as well. Refer to the appropriate guide for your orchestration +system for details on how to configure policy. + +### How can I enable NAT for incoming traffic to containers with private IP addresses? + +As discussed, the recommended way to get traffic to containers that +need to be accessed from the internet is to give them public IP addresses and +to configure Calico to peer with the data center's existing L3 routers. + +In cases where this is not possible then you can configure incoming NAT +(also known as DNAT) on your data centers existing border routers. Alternatively +you can configure incoming NAT with port mapping on the host on which the container +is running on. + +``` +# First create a new chain called "expose-ports" to hold the NAT rules +# and jump to that chain from the OUTPUT and PREROUTING chains. +# The OUTPUT chain is hit by traffic originating on the host itself; +# The PREROUTING chain is hit by traffic coming from elsewhere. +iptables -t nat -N expose-ports +iptables -t nat -A OUTPUT -j expose-ports +iptables -t nat -A PREROUTING -j expose-ports + +# Then, for each port you want to expose, add a rule to the +# expose-ports chain, replacing with the host IP that you +# want to use to expose the port and with the host port. +iptables -t nat -A expose-ports -p tcp --destination --dport -j DNAT --to : +``` + +For example, you have a container to which you've assigned the CALICO_IP +of 192.168.7.4, and you have NGINX running on port 8080 inside the container. +If you want to expose this service on port 80 and your host has IP 192.0.2.1, +then you could run the following commands: + +``` +iptables -t nat -N expose-ports +iptables -t nat -A OUTPUT -j expose-ports +iptables -t nat -A PREROUTING -j expose-ports + +iptables -t nat -A expose-ports -p tcp --destination 192.0.2.1 --dport 80 -j DNAT --to 192.168.7.4:8080 +``` + +The commands will need to be run each time the host is restarted. + +Remember: the security profile for the container will need to allow traffic to the exposed port as well. +Refer to the appropriate guide for your orchestration system for details on how to configure policy. + +### Can I run Calico in a public cloud environment? + +Yes. If you are running in a public cloud that doesn't allow either L3 peering or L2 connectivity between Calico hosts then you can enable `ipip` in your Calico IP pool: + +```shell +cat << EOF | calicoctl apply -f - +apiVersion: v1 +kind: ipPool +metadata: + cidr: +spec: + ipip: + enabled: true + nat-outgoing: true +EOF +``` + +Calico will then route traffic between Calico hosts using IP in IP. + +In AWS, you disable `Source/Dest. Check` instead of using IP in IP as long as all your instances are in the same subnet of your VPC. This will provide the best performance. You can disable this with the CLI, or right click the instance in the EC2 console, and `Change Source/Dest. Check` from the `Networking` submenu. + +```shell +aws ec2 modify-instance-attribute --instance-id --source-dest-check "{\"Value\": false}" + +cat << EOF | calicoctl apply -f - +apiVersion: v1 +kind: ipPool +metadata: + cidr: +spec: + nat-outgoing: true +EOF +``` diff --git a/v2.4/usage/troubleshooting/index.md b/v2.4/usage/troubleshooting/index.md new file mode 100644 index 00000000000..92682cffae7 --- /dev/null +++ b/v2.4/usage/troubleshooting/index.md @@ -0,0 +1,92 @@ +--- +title: Troubleshooting +--- + +## Running `sudo calicoctl ...` with Environment Variables + +If you use `sudo` for commands like `calicoctl node run`, remember that your environment +variables will not be transferred to the `sudo` environment. You can run `sudo` with +the `-E` flag to include your environment variables: + +```shell + sudo -E calicoctl node run +``` + +or you can set environment variables for `sudo` commands like this: + +```shell + sudo ETCD_ENDPOINTS=http://172.25.0.1:2379 calicoctl node run +``` + +Also be aware that connection information can be specified as a config +file rather than using environment variables. See the +[Calicoctl Configuration Overview]({{site.baseurl}}/{{page.version}}/reference/calicoctl/setup) +guide for details. + +## Ubuntu (or GNOME) NetworkManager + +Disable [NetworkManager](https://help.ubuntu.com/community/NetworkManager) before +attempting to use Calico networking. + +NetworkManager manipulates the routing table for interfaces in the default network +namespace where Calico veth pairs are anchored for connections to containers. +This can interfere with the Calico agent's ability to route correctly. + +You can configure interfaces in the `/etc/network/interfaces` file if the +NetworkManager removes your host's interfaces. See the Debian +[NetworkConfiguration](https://wiki.debian.org/NetworkConfiguration) +guide for more information. + +## etcd.EtcdException: No more machines in the cluster + +If you see this exception, it means `calicoctl` can't communicate with your etcd +cluster. Ensure etcd is up and listening on `localhost:2379` + +## No ping between containers on different hosts + +If you have connectivity between containers on the same host, and between +containers and the Internet, but not between containers on different hosts, it +probably indicates a problem in the BIRD setup. + +Look at `calicoctl node status` on each host. It should include output like this: + +``` +Calico process is running. + +IPv4 BGP status ++--------------+-------------------+-------+----------+-------------+ +| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO | ++--------------+-------------------+-------+----------+-------------+ +| 172.17.8.102 | node-to-node mesh | up | 23:30:04 | Established | ++--------------+-------------------+-------+----------+-------------+ + +IPv6 BGP status +No IPv6 peers found. +``` + +If you do not see this, please check the following. + +- Can your hosts ping each other? There must be IP connectivity between the + hosts. + +- Your hosts' names must be different. Calico uses hostname as a key in the + etcd data, and the etcd data is used to autogenerate the correct BIRD + config - so a duplicate hostname will prevent correct BIRD setup. + +- There must not be iptables rules, or any kind of firewall, preventing + communication between the hosts on TCP port 179. (179 is the BGP port.) + +## Basic checks + +Running `ip route` shows what routes have been programmed. Routes from other hosts +should show that they are programmed by bird. + +If your hosts reboot themselves with a message from `locksmithd` your cached CoreOS Container Linux +image is out of date. Use `vagrant box update` to pull the new version. I +recommend doing a `vagrant destroy; vagrant up` to start from a clean slate afterwards. + +If you hit issues, please raise tickets. Diags can be collected with the +`calicoctl node diags` command. This should be run with superuser privileges, +for example: + + sudo calicoctl node diags diff --git a/v2.4/usage/troubleshooting/logging.md b/v2.4/usage/troubleshooting/logging.md new file mode 100644 index 00000000000..410b874b838 --- /dev/null +++ b/v2.4/usage/troubleshooting/logging.md @@ -0,0 +1,85 @@ +--- +title: Logging +--- + +## The calico-node container + +The components in the calico-node container all log to the directories under +`/var/log/calico` inside the container. By default this is mapped to the +`/var/log/calico` directory on the host but can be changed by specifying a +`--log-dir` parameter on the `calicoctl node run` command. + +Each component (described below) logs to its own directory. Files are +automatically rotated, and by default 10 files of 1MB each are kept. The +current log file is called `current` and rotated files have @ followed by a +timestamp detailing when the files was rotated in [tai64n](http://cr.yp.to/libtai/tai64.html#tai64n) format. + +All logging is done using [svlogd](http://smarden.org/runit/svlogd.8.html). +Each component can be configured by dropping a file named `config` into that +component's logging directory. + +e.g. to configure bird to only log 4 files of 10KB each + +```shell +#/var/log/calico/bird/config +s10000 +n4 +``` + +svlogd can also be configured to forward logs to syslog, to prefix each line +and to filter logs. See the [documentation](http://smarden.org/runit/svlogd.8.html) +for further details. + +See the following sub-sections for details on configuring the log level for +each calico-node component. + +### Bird/Bird6 + +Bird and Bird6 are used for distributing IPv4 and IPv6 routes between Calico +enabled hosts. The logs are output in the `bird` and `bird6` sub-directories +of the calico/node logging directory. + +Use the `calicoctl config set loglevel` command on any host to change the +log level across all Calico nodes, _or_ use the same command with the `--node` +option to run the command for that specific node. This command affects the +logging level for both Bird/Bird6 and Felix. + +Valid log levels are: none, debug, info, warning, error, critical. For example: + + calicoctl config set logLevel error + calicoctl config set logLevel debug --node=Calico-Node-1 + +### Felix + +Felix is the primary Calico agent that runs on each machine that hosts +endpoints. Felix is responsible for the programming of iptables rules on the +host. The logs are output in the `felix` sub-directory of the calico/node +logging directory. + +Use the `calicoctl config set loglevel` command on any host to change the +log level across all Calico nodes, _or_ use the same command with the `--node` +option to run the command for that specific node. This command affects the +logging level for both Bird/Bird6 and Felix. + +Valid log levels are: none, debug, info, warning, error, critical. For example: + + calicoctl config set logLevel none + calicoctl config set logLevel error --node=Calico-Node-1 + +### confd + +The confd agent generates configuration files for Felix and Bird using +configuration data present in the etcd datastore. The logs are output in the +`confd` sub-directory of the calico/node logging directory. + +By default, the confd logging level is "debug" and cannot be changed without +editing configuration within the node image. + +For more information on the allowed levels, see the +[documentation](https://github.com/kelseyhightower/confd/blob/master/docs/configuration-guide.md) + +## Docker network and IPAM driver + +When running Calico as a Docker network plugin, the Calico network driver runs +inside the calico/node container. The logs are output in the `libnetwork` sub-directory +of the calico/node logging directory.