flanneld 100% 2131 2.1KB/s 00:00
+ NEED_RECONFIG_DOCKER=true + CNI_PLUGIN_CONF= + EXTRA_SANS=(IP:${MASTER_IP} IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1 DNS:kubernetes DNS:kubernetes.default DNS:kubernetes.default.svc DNS:kubernetes.default.svc.cluster.local) ++ tr ' ' , ++ echo IP:10.204.22.202 IP:192.168.3.1 DNS:kubernetes DNS:kubernetes.default DNS:kubernetes.default.svc DNS:kubernetes.default.svc.cluster.local + EXTRA_SANS=IP:10.204.22.202,IP:192.168.3.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local + BASH_DEBUG_FLAGS=true + [[ false == \t\r\u\e ]] + ssh -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=ERROR -t karan.singhal@10.204.22.202 ' set +e true source ~/kube/util.sh
setClusterInfo
create-etcd-opts '\''10.204.22.202'\''
create-kube-apiserver-opts '\''192.168.3.0/24'\'' '\''NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,SecurityContextDeny'\'' '\''30000-32767'\'' '\''10.204.22.202'\''
create-kube-controller-manager-opts '\''10.204.22.202'\''
create-kube-scheduler-opts
create-kubelet-opts '\''10.204.22.202'\'' '\''10.204.22.202'\'' '\''192.168.3.10'\'' '\''cluster.local'\'' '\'''\'' '\'''\''
create-kube-proxy-opts '\''10.204.22.202'\'' '\''10.204.22.202'\'' '\'''\''
create-flanneld-opts '\''127.0.0.1'\'' '\''10.204.22.202'\''
FLANNEL_OTHER_NET_CONFIG= sudo -E -p '\''[sudo] password to start master: '\'' -- /bin/bash -ce '\''
cp ~/kube/default/* /etc/default/
cp ~/kube/init_conf/* /etc/init/
cp ~/kube/init_scripts/* /etc/init.d/
groupadd -f -r kube-cert
DEBUG='\''false'\'' ~/kube/make-ca-cert.sh "10.204.22.202" "IP:10.204.22.202,IP:192.168.3.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local"
mkdir -p /opt/bin/
cp ~/kube/master/* /opt/bin/
cp ~/kube/minion/* /opt/bin/
service etcd start
echo Deploying' master and node on machine....................................... true 'failed
if true; then FLANNEL_NET="172.16.0.0/16" KUBE_CONFIG_FILE="./../cluster/../cluster/ubuntu/config-default.sh" DOCKER_OPTS="" ~/kube/reconfDocker.sh ai; fi
'\'''
usage: dzdo -K | -k | -V usage: dzdo -v [-AknS] [-p prompt] usage: dzdo -l[l] [-AknS] [-g groupname|#gid] [-p prompt] [-u user name|#uid] [-g groupname|#gid] [command] usage: dzdo [-AbHknPS] [-C fd] [-g groupname|#gid] [-p prompt] [-u user name|#uid] [-g groupname|#gid] [VAR=value] [-i|-s] [] usage: dzdo -e [-AknS] [-C fd] [-g groupname|#gid] [-p prompt] [-u user name|#uid] file ... usage: dzdo -h [user@] [-W,ssh-option] usage: dzdo --help
There are a few places in the kube-up Ubuntu scripts where sudo -E
is used. If you've aliased sudo
to dzdo
, I'm guessing that won't work (I don't see a -E
option in that usage output).
You can try to get those scripts to work for you locally, and then feel free to send a PR.