Overview

The purpose of this blueprint is an end-to-end technology solution for mobile game deployed across multiple heterogeneous edge nodes using various network access protocols such as mobile and WiFi and others. This blueprint demonstrates how an application leverages a distributed and multi access network edge environment in order to get all the benefits of edge computing.

This is the very first release of this new blueprint as part of the Akraino PCEI family. Current focus for this release is to enable open source Karmada based cloud federation & kubeEdge/EdgeMesh based East-West networking functionality.

Multi-cluster Management

Environmental Information (CentOS8.0 4U8G)

Username & Password: root/Akraino2021

CPU Name

Role

IP

Workload

kubeedge-karamda-001

Karmada Control Plane

159.138.22.176

Karmada controllers, karmada api

kubeedge-karamda-002

Cluster02

182.160.12.59

K8s, docker

kubeedge-karamda-003

Cluster01

159.138.43.243

K8s, docker

Deployment Manual

Karmada control plane deployment

# systemctl disable firewalld

# yum install wget container-selinux -y

# wget https://download.docker.com/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.6-3.3.el7.x86_64.rpm

# yum erase runc -y

# rpm -ivh containerd.io-1.2.6-3.3.el7.x86_64.rpm

Note: The above steps do not need to be operated in centos7

# update-alternatives --set iptables /usr/sbin/iptables-legacy

# yum install -y yum-utils device-mapper-persistent-data lvm2 && yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo && yum makecache

# yum -y install docker-ce

# systemctl enable docker.service && systemctl start docker

# curl –Lo ./kind https://kind.sigs.k8s.io/d1/v0.10.0/kind-linux-amd64

# chmod +x ./kind

# mv ./kind /usr/local/bin/kind

# kind version

# wget https://golang.google.cn/dl/go1.17.0.linux-amd64.tar.gz

# tar –zxvf  go1.17.0.linux-amd64.tar.gz –C /usr/local


# vi /etc/profile

Add at the end of file:

# golang env

export GOROOT=/usr/local/go

export GOPATH=/data/gopath

export PATH=$PATH:$GOROOT/bin:$GOPATH/bin


# source /etc/profile

# mkdir -p /data/gopath && cd /data/gopath

# mkidr –p src pkg bin

# cat <<EOF > /etc/yum.repos.d/kubernetes.repo

[kubernetes]

name=Kubernetes

baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64

enabled=1

gpgcheck=1

repo_gpgcheck=1

gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg

EOF

# yum makecache

# yum instal –y kubectl

# git clone https://github.com/karmada-io/karmada

# cd karmada

# hack/local-up-karmada.sh

Edge cluster deployment

# systemctl disable firewalld

# yum install wget container-selinux -y

# wget https://download.docker.com/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.6-3.3.el7.x86_64.rpm

# yum erase runc -y

# rpm -ivh containerd.io-1.2.6-3.3.el7.x86_64.rpm

Note: The above steps do not need to be operated in centos7


# update-alternatives --set iptables /usr/sbin/iptables-legacy

# yum install -y yum-utils device-mapper-persistent-data lvm2 && yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo && yum makecache

# yum -y install docker-ce

# systemctl enable docker.service && systemctl start docker

[root@kubeEdge-cloud ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo

[kubernetes]

name=Kubernetes

baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64

enabled=1

gpgcheck=1

repo_gpgcheck=1

gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg

EOF

# yum makecache

# yum install kubelet-1.21.0-0.x86_64 kubeadm-1.21.0-0.x86_64 kubectl-1.21.0-0.x86_64

[root@kubeEdge-cloud ~]# cat <<EOF >  /etc/sysctl.d/k8s.conf

net.bridge.bridge-nf-call-ip6tables = 1

net.bridge.bridge-nf-call-iptables = 1

vm.swappiness=0

EOF


[root@kubeEdge-cloud ~]# sysctl --system

[root@kubeEdge-cloud ~]# modprobe br_netfilter

[root@kubeEdge-cloud ~]# sysctl -p /etc/sysctl.d/k8s.conf


Load ipvs related kernel modules


If you reboot, you need to reload (it can be written in /etc/rc.local to automatically load at boot)


[root@ke-cloud ~]# modprobe ip_vs

[root@ke-cloud ~]# modprobe ip_vs_rr

[root@ke-cloud ~]# modprobe ip_vs_wrr

[root@ke-cloud ~]# modprobe ip_vs_sh

[root@ke-cloud ~]# modprobe nf_conntrack_ipv4


Check whether the loading is successful

[root@ke-cloud ~]# lsmod | grep ip_vs

[root@kubeEdge-cloud ~]# kubeadm config images list

[root@kubeEdge-cloud ~]# kubeadm config images pull

1Get docker cgroups

# DOCKER_CGROUPS=$(docker info | grep 'Cgroup Driver' | cut -d' ' -f4)

# echo $DOCKER_CGROUPS

cgroupfs

2Configure cgroups for kubelet

# cat >/etc/sysconfig/kubelet<<EOF

KUBELET_EXTRA_ARGS="--cgroup-driver=$DOCKER_CGROUPS --pod-infra-container-image=k8s.gcr.io/pause:3.5"

EOF

3Start kubelet

# systemctl daemon-reload

# systemctl enable kubelet && systemctl start kubelet

# kubeadm init --kubernetes-version=v1.17.9 \

--pod-network-cidr=10.244.0.0/16 \

                      --apiserver-advertise-address=192.168.0.238 \

                    --ignore-preflight-errors=Swap

1Download the yaml file of flannel plug-in.

# cd ~ && mkdir flannel && cd flannel

# curl -O https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

2)Startup

# kubectl apply -f ~/flannel/kube-flannel.yml

# hack/create-cluster.sh member1 $HOME/.kube/karmada.config
# go get github.com/karmada-io/karmada/cmd/karmadactl

# karmadactl join cluster01--cluster-kubeconfig=$HOME/.kube/karmada.config

Nginx application deployment

East-West Edge-to-Edge Networking

Environmental Information (CentOS8.0 4U8G)

Username & Password: root/Akraino2021

CPU Name

Role

IP

Workload

Kubeedge-akraino-0001

Cloud

159.138.149.190

K8S, docker, cloudcore

Kubeedge-akraino-0002

Edge Node

119.8.35.111

docker, edgecore, edgemesh

Kubeedge-akraino-0003

Edge Node

182.160.10.130

docker, edgecore, edgemesh

Deployment Manual

Deploy cloud nodes (kubeEdge-cloud)

# systemctl disable firewalld

# yum install wget container-selinux -y

# wget https://download.docker.com/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.6-3.3.el7.x86_64.rpm

# yum erase runc -y

# rpm -ivh containerd.io-1.2.6-3.3.el7.x86_64.rpm

Note: The above steps do not need to be operated in centos7


# update-alternatives --set iptables /usr/sbin/iptables-legacy

# yum install -y yum-utils device-mapper-persistent-data lvm2 && yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo && yum makecache

# yum -y install docker-ce

# systemctl enable docker.service && systemctl start docker

[root@kubeEdge-cloud ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo

[kubernetes]

name=Kubernetes

baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64

enabled=1

gpgcheck=1

repo_gpgcheck=1

gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg

EOF

# yum makecache

# yum install kubelet-1.21.0-0.x86_64 kubeadm-1.21.0-0.x86_64 kubectl-1.21.0-0.x86_64

Note: The current KubeEdge version does not match the 1.22 version of K8s, and the latest version cannot be installed.

[root@kubeEdge-cloud ~]# cat <<EOF >  /etc/sysctl.d/k8s.conf

net.bridge.bridge-nf-call-ip6tables = 1

net.bridge.bridge-nf-call-iptables = 1

vm.swappiness=0

EOF


[root@kubeEdge-cloud ~]# sysctl --system

[root@kubeEdge-cloud ~]# modprobe br_netfilter

[root@kubeEdge-cloud ~]# sysctl -p /etc/sysctl.d/k8s.conf


Load ipvs related kernel modules


If you reboot, you need to reload (it can be written in /etc/rc.local to automatically load at boot)


[root@ke-cloud ~]# modprobe ip_vs

[root@ke-cloud ~]# modprobe ip_vs_rr

[root@ke-cloud ~]# modprobe ip_vs_wrr

[root@ke-cloud ~]# modprobe ip_vs_sh

[root@ke-cloud ~]# modprobe nf_conntrack_ipv4


Check whether loading is successful

[root@ke-cloud ~]# lsmod | grep ip_vs

[root@kubeEdge-cloud ~]# kubeadm config images list

[root@kubeEdge-cloud ~]# kubeadm config images pull

[root@ke-cloud ~]# kubeadm init --kubernetes-version=v1.17.9 \

--pod-network-cidr=10.244.0.0/16 \

                      --apiserver-advertise-address=192.168.0.238 \

                    --ignore-preflight-errors=Swap

Prerequisites: install golang

[root@kudeEdge-cloud ~]# git clone https://github.com/kubeedge/kubeedge $GOPATH/src/github.com/kubeedge/kubeedge

[root@kudeEdge-cloud ~]# cd $GOPATH/src/github.com/kubeedge/kubeedge

[root@kudeEdge-cloud ~]# make all WHAT=keadm

[root@kudeEdge-cloud ~]# keadm init --advertise-address="192.168.0.238"

Deploy edge nodes (edgemesh version is release1.7)

[root@kudeEdge-cloud ~]# keadm gettoken

[root@kudeEdge-cloud ~]# keadm join --cloudcore-ipport=192.168.1.66:10000 --token=“上一步骤获取的token

--token="token obtained in the previous step"


[root@kudeEdge-cloud ~]# vim /etc/kubeedge/config/edgecore.yaml

modules:

  ..

  edgeMesh:

    enable: false

  metaManager:

    metaServer:

      enable: true

..

[root@kudeEdge-cloud ~]# systemctl restart edgecore

[root@kudeEdge-cloud ~]# vim /etc/kubeedge/config/cloudcore.yaml

modules:

  ..

  dynamicController:

    enable: true

..

[root@kudeEdge-cloud ~]# docker build -t edgemesh:0.1 -f build/Dockerfile .

Modify the following configuration file, modify service-cluster-ip-range.

[root@kudeEdge-cloud ~]# kubectl apply -f build/kubernetes/edgemesh/03-configmap.yaml

[root@kudeEdge-cloud ~]# kubectl apply -f build/kubernetes/edgemesh/04-daemonset.yaml

Deploy ROS application

Add the following configuration at the end of the file to configure the master access address and slave address:

set_base_frame:=tb3_1/base_footprint set_odom_frame:=tb3_1/odom

set_map_frame:=tb3_1/map", open the graphic scan of another room.