自用

  • K8S-HA集群部署文档

    节点规划

    10.88.29.46 master etcd

    10.88.29.47 master etcd

    10.88.29.48 master etcd

    10.88.31.27 node

    10.88.31.28 node

    10.88.31.30 node

    1、cluster-ip

    10.96.0.1/16

    证书生成工具

    wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64

    chmod +x cfssl_linux-amd64

    mv cfssl_linux-amd64 /usr/local/bin/cfssl

    wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64

    chmod +x cfssljson_linux-amd64

    mv cfssljson_linux-amd64 /usr/local/bin/cfssljson

    wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64

    chmod +x cfssl-certinfo_linux-amd64

    mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo

    证书规划

    证书路径

    /etc/kubernetes/ssl

    mkdir -pv /etc/kubernetes/ssl

    a、生成ca配置文件

    cat > ca-config.json <<EOF

    {

    “signing”: {

    ​ “default”: {

    ​ “expiry”: “87600h”

    ​ },

    ​ “profiles”: {

    ​ “kubernetes”: {

    ​ “usages”: [

    ​ “signing”,

    ​ “key encipherment”,

    ​ “server auth”,

    ​ “client auth”

    ​ ],

    ​ “expiry”: “87600h”

    ​ }

    ​ }

    }

    }

    EOF

    b、创建CA证书签名请求

    cat > ca-csr.json <<EOF

    {

    “CN”: “kubernetes”,

    “key”: {

    ​ “algo”: “rsa”,

    ​ “size”: 2048

    },

    “names”: [

    ​ {

    ​ “C”: “CN”,

    ​ “ST”: “Shanghai”,

    ​ “L”: “Shanghai”,

    ​ “O”: “k8s”,

    ​ “OU”: “System”

    ​ }

    ]

    }

    EOF

    c、生成CA证书和私钥

    cfssl gencert -initca ca-csr.json | cfssljson -bare ca

    d、生成k8s-master证书

    备注:其中Hosts列表中的Ip地址必须是master节点的IP地址,10.96.0.1是该k8s集群cluster ip range的第一个IP,该IP默认为k8s-master的cluster ip.

    cat > kubernetes-csr.json <<EOF

    {

    ​ “CN”: “kubernetes”,

    ​ “hosts”: [

    ​ “127.0.0.1”,

    ​ “10.88.29.46”,

    ​ “10.88.29.47”,

    ​ “10.88.29.48”,

    ​ “10.88.29.43”,

    ​ “10.96.0.1”,

    ​ “kubernetes”,

    ​ “kubernetes.default”,

    ​ “kubernetes.default.svc”,

    ​ “kubernetes.default.svc.cluster”,

    ​ “kubernetes.default.svc.cluster.local”

    ​ ],

    ​ “key”: {

    ​ “algo”: “rsa”,

    ​ “size”: 2048

    ​ },

    ​ “names”: [

    ​ {

    ​ “C”: “CN”,

    ​ “ST”: “Shanghai”,

    ​ “L”: “Shanghai”,

    ​ “O”: “k8s”,

    ​ “OU”: “System”

    ​ }

    ​ ]

    }

    EOF

    cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes

    e、生成etcd集群证书

    cat > etcd-csr.json <<EOF

    {

    ​ “CN”: “etcd”,

    ​ “hosts”: [

    ​ “127.0.0.1”,

    ​ “10.88.29.46”,

    ​ “10.88.29.47”,

    ​ “10.88.29.48”

    ],

    ​ “key”: {

    ​ “algo”: “rsa”,

    ​ “size”: 2048

    ​ },

    ​ “names”: [

    ​ {

    ​ “C”: “CN”,

    ​ “ST”: “Shanghai”,

    ​ “L”: “Shanghai”,

    ​ “O”: “k8s”,

    ​ “OU”: “System”

    ​ }

    ​ ]

    }

    EOF

    cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd

    f、生成kubectl管理员证书

    cat > admin-csr.json <<EOF

    {

    “CN”: “admin”,

    “hosts”: [

    “127.0.0.1”,

    “10.88.29.46”,

    “10.88.29.47”,

    “10.88.29.48”

    ],

    “key”: {

    ​ “algo”: “rsa”,

    ​ “size”: 2048

    },

    “names”: [

    ​ {

    ​ “C”: “CN”,

    ​ “ST”: “Shanghai”,

    ​ “L”: “Shanghai”,

    ​ “O”: “system:masters”,

    ​ “OU”: “System”

    ​ }

    ]

    }

    EOF

    cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

    g、生成kube proxy证书

    cat > kube-proxy-csr.json <<EOF
    {
      "CN": "system:kube-proxy",
      "hosts": [],
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "ST": "Shanghai",
          "L": "Shanghai",
          "O": "k8s",
          "OU": "System"
        }
      ]
    }
    EOF
    

    cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

    证书分发到相关节点

    内核版本升级

    rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org

    rpm -Uvh https://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm

    yum --enablerepo=elrepo-kernel -y install kernel-lt

    sed -i ‘s/saved/0/’ /etc/default/grub
    grub2-mkconfig -o /boot/grub2/grub.cfg

    shutdown -r now

    服务器内核参数调整

    cat < /etc/sysctl.d/k8s.conf

    net.bridge.bridge-nf-call-ip6tables = 1

    net.bridge.bridge-nf-call-iptables = 1

    net.ipv4.ip_forward = 1

    vm.swappiness = 0

    net.ipv4.tcp_timestamps = 0

    net.ipv4.tcp_tw_recycle = 0

    net.ipv4.tcp_tw_reuse = 0

    net.ipv4.ip_local_port_range = 10240 60999

    net.ipv4.ip_local_reserved_ports = 30000-34001

    EOF

    安装etcd

    安装文件准备

    mv etcd-v3.3.10-linux-amd64/etcd* /usr/local/bin/

    生成etcd.service文件

    环境变量配置文件 /etc/etcd/etcd.conf

    # [member]

    ETCD_NAME=etcd-host0

    ETCD_DATA_DIR="/service/software/etcd"

    ETCD_LISTEN_PEER_URLS=“https://10.88.29.46:2380”

    ETCD_LISTEN_CLIENT_URLS=“https://10.88.29.46:2379”

    # [cluster]

    ETCD_INITIAL_ADVERTISE_PEER_URLS=“https://10.88.29.46:2380”

    ETCD_INITIAL_CLUSTER_TOKEN=“etcd-cluster”

    ETCD_ADVERTISE_CLIENT_URLS=“https://10.88.29.46:2379”

    /usr/lib/systemd/system/etcd.service

    [Unit]

    Description=Etcd Server

    After=network.target

    After=network-online.target

    Wants=network-online.target

    Documentation=https://github.com/coreos

    [Service]

    Type=notify

    WorkingDirectory=/service/software/etcd/

    EnvironmentFile=-/etc/etcd/etcd.conf

    ExecStart=/usr/local/bin/etcd \

    –name ${ETCD_NAME} \

    –cert-file=/etc/kubernetes/ssl/etcd.pem \

    –key-file=/etc/kubernetes/ssl/etcd-key.pem \

    –peer-cert-file=/etc/kubernetes/ssl/etcd.pem \

    –peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \

    –trusted-ca-file=/etc/kubernetes/ssl/ca.pem \

    –peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \

    –initial-advertise-peer-urls ${ETCD_INITIAL_ADVERTISE_PEER_URLS} \

    –listen-peer-urls ${ETCD_LISTEN_PEER_URLS} \

    –listen-client-urls ${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \

    –advertise-client-urls ${ETCD_ADVERTISE_CLIENT_URLS} \

    –initial-cluster-token ${ETCD_INITIAL_CLUSTER_TOKEN} \

    –initial-cluster etcd-host0=https://10.88.29.46:2380,etcd-host1=https://10.88.29.47:2380,etcd-host2=https://10.88.29.48:2380 \

    –initial-cluster-state new \

    –data-dir=${ETCD_DATA_DIR}

    Restart=on-failure

    RestartSec=5

    LimitNOFILE=65536

    [Install]

    WantedBy=multi-user.target

    启动etcd

    systemctl daemon-reload

    systemctl enable etcd

    systemctl start etcd

    systemctl status etcd

    测试集群状态

    etcdctl \

    –ca-file=/etc/kubernetes/ssl/ca.pem \

    –cert-file=/etc/kubernetes/ssl/etcd.pem \

    –key-file=/etc/kubernetes/ssl/etcd-key.pem \

    cluster-health

    生成kubectl配置

    export CA_DIR="/etc/kubernetes/ssl/"

    kubectl config set-cluster kubernetes \

    ​ --certificate-authority=${CA_DIR}/ca.pem \

    ​ --embed-certs=true \

    ​ --server=https://10.88.29.43:6443

    kubectl config set-credentials admin \

    ​ --client-certificate=${CA_DIR}/admin.pem \

    ​ --embed-certs=true \

    ​ --client-key=${CA_DIR}/admin-key.pem

    kubectl config set-context kubernetes \

    ​ --cluster=kubernetes --user=admin

    kubectl config use-context kubernetes

    安装master

    生成kubeletbootstrap token

    export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ’ ')

    cat> token.csv <<EOF

    ${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,“system:kubelet-bootstrap”

    EOF

    部署kube-apiserver

    export CA_DIR="/etc/kubernetes/ssl"

    cat>/usr/lib/systemd/system/kube-apiserver.service<<EOF

    [Unit]

    Description=Kubernetes API Server

    Documentation=https://github.com/GoogleCloudPlatform/kubernetes

    After=network.target

    After=etcd.service

    [Service]

    ExecStart=/usr/local/bin/kube-apiserver \

    –enable-admission-plugins=NodeRestriction,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota \

    –advertise-address=10.88.29.43 \

    –insecure-port=8080 \

    –bind-address=0.0.0.0 \

    –secure-port=6443 \

    –insecure-bind-address=127.0.0.1 \

    –authorization-mode=Node,RBAC \

    –runtime-config=rbac.authorization.k8s.io/v1 \

    –kubelet-https=true \

    –anonymous-auth=false \

    –enable-bootstrap-token-auth \

    –token-auth-file=/etc/kubernetes/ssl/token.csv \

    –service-cluster-ip-range=10.96.0.1/16 \

    –service-node-port-range=30000-50000 \

    –tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \

    –tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \

    –client-ca-file=/etc/kubernetes/ssl/ca.pem \

    –service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \

    –etcd-quorum-read=true \

    –storage-backend=etcd3 \

    –etcd-cafile=/etc/kubernetes/ssl/ca.pem \

    –etcd-certfile=/etc/kubernetes/ssl/etcd.pem \

    –etcd-keyfile=/etc/kubernetes/ssl/etcd-key.pem \

    –etcd-servers=https://10.88.29.46:2379,https://10.88.29.47:2379,https://10.88.29.48:2379 \

    –enable-swagger-ui=true \

    –allow-privileged=true \

    –apiserver-count=3 \

    –audit-log-maxage=30 \

    –audit-log-maxbackup=3 \

    –audit-log-maxsize=100 \

    –audit-log-path=/var/lib/audit.log \

    –event-ttl=1h \

    –v=2

    Restart=on-failure

    RestartSec=5

    Type=notify

    LimitNOFILE=65536

    [Install]

    WantedBy=multi-user.target

    EOF

    systemctl daemon-reload

    systemctl enable kube-apiserver

    systemctl start kube-apiserver

    systemctl status kube-apiserver

    部署kube-controller-manager

    export CA_DIR="/etc/kubernetes/ssl"

    cat>/usr/lib/systemd/system/kube-controller-manager.service<<EOF

    [Unit]

    Description=Kubernetes Controller Manager

    Documentation=https://github.com/GoogleCloudPlatform/kubernetes

    After=network.target

    After=etcd.service

    After=kube-apiserver.service

    [Service]

    ExecStart=/usr/local/bin/kube-controller-manager \

    –address=0.0.0.0 \

    –master=http://127.0.0.1:8080 \

    –allocate-node-cidrs=true \

    –service-cluster-ip-range=10.96.0.1/16 \

    –cluster-cidr=10.102.0.0/16 \

    –cluster-name=kubernetes \

    –cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \

    –cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \

    –service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \

    –root-ca-file=/etc/kubernetes/ssl/ca.pem \

    –leader-elect=true \

    –node-cidr-mask-size=24 \

    –node-monitor-period=5s \

    –node-monitor-grace-period=15s \

    –node-startup-grace-period=25s \

    –pod-eviction-timeout=5m0s \

    –v=2 \

    –horizontal-pod-autoscaler-downscale-delay=20m \

    –concurrent-deployment-syncs=20 \

    –concurrent-endpoint-syncs=20 \

    –concurrent-replicaset-syncs=20 \

    –concurrent-resource-quota-syncs=20 \

    –concurrent-service-syncs=20 \

    –concurrent-namespace-syncs=20 \

    –concurrent-serviceaccount-token-syncs=20 \

    –kube-api-burst=50 \

    –kube-api-qps=40

    Restart=on-failure

    RestartSec=5

    [Install]

    WantedBy=multi-user.target

    EOF

    systemctl daemon-reload

    systemctl enable kube-controller-manager

    systemctl start kube-controller-manager

    systemctl status kube-controller-manager

    部署kube-scheduler

    cat>/usr/lib/systemd/system/kube-scheduler.service<<EOF

    [Unit]

    Description=Kubernetes Scheduler

    Documentation=https://github.com/GoogleCloudPlatform/kubernetes

    After=network.target

    After=etcd.service

    After=kube-apiserver.service

    After=kube-controller-manager.service

    [Service]

    ExecStart=/usr/local/bin/kube-scheduler \

    –address=0.0.0.0 \

    –master=http://127.0.0.1:8080 \

    –leader-elect=true \

    –v=2

    Restart=on-failure

    RestartSec=5

    [Install]

    WantedBy=multi-user.target

    EOF

    systemctl daemon-reload

    systemctl enable kube-scheduler

    systemctl start kube-scheduler

    systemctl status kube-scheduler

    安装docker

    a、准备yum源

    wget -P /etc/yum.repos.d/ http://mirrors.ustc.edu.cn/docker-ce/linux/centos/docker-ce.repo

    sed -i ‘s/download.docker.com/mirrors.ustc.edu.cn/docker-ce/g’ /etc/yum.repos.d/docker-ce.repo

    b、安装docker

    yum install https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-selinux-17.03.2.ce-1.el7.centos.noarch.rpm -y

    yum install docker-ce-17.03.2.ce -y

    c、增加docker配置

    tee /etc/docker/daemon.json <<-'EOF'
    {
        "registry-mirrors": ["https://k6jyerxi.mirror.aliyuncs.com"]
    }
    EOF
    systemctl daemon-reload && systemctl restart docker
    

    d、修改docker.service配置

    cat>/usr/lib/systemd/system/docker.service <<EOF

    [Unit]

    Description=Docker Application Container Engine

    Documentation=https://docs.docker.com

    After=network.target firewalld.service

    [Service]

    Type=notify

    # the default is not to use systemd for cgroups because the delegate issues still

    # exists and systemd currently does not support the cgroup feature set required

    # for containers run by docker

    ExecStartPost=/sbin/iptables -A FORWARD -s 0.0.0.0/0 -j ACCEPT

    ExecStart=/usr/bin/dockerd --userland-proxy=false --exec-opt native.cgroupdriver=systemd

    ExecReload=/bin/kill -s HUP $MAINPID

    # Having non-zero Limit*s causes performance problems due to accounting overhead

    # in the kernel. We recommend using cgroups to do container-local accounting.

    LimitNOFILE=infinity

    LimitNPROC=infinity

    LimitCORE=infinity

    # Uncomment TasksMax if your systemd version supports it.

    # Only systemd 226 and above support this version.

    #TasksMax=infinity

    TimeoutStartSec=0

    # set delegate yes so that systemd does not reset the cgroups of docker containers

    Delegate=yes

    # kill only the docker process, not all processes in the cgroup

    KillMode=process

    [Install]

    WantedBy=multi-user.target EOF

    systemctl daemon-reload

    systemctl enable docker

    systemctl restart docker

    systemctl status docker

    部署kubelet node

    a、绑定kubelet-bootstrap用户角色

    # user 为 master 中 token.csv 文件里配置的用户,只需要创建一次。

    kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

    b、创建kubelet 的bootstrap.kubeconfig

    #创建 kubelet bootstrap.kubeconfig

    文件,其中token和用户名必须和api服务器上创建的token内容对应。

    kubectl config set-cluster kubernetes \

    ​ --certificate-authority=/etc/kubernetes/ssl/ca.pem \

    ​ --embed-certs=true \

    ​ --server=https://10.88.29.43:6443 \

    ​ --kubeconfig=/etc/kubernetes/bootstrap.kubeconfig

    kubectl config set-credentials kubelet-bootstrap \

    ​ --token=5df4ad30059e58363e25e8602742390b \

    ​ --kubeconfig=/etc/kubernetes/bootstrap.kubeconfig

    kubectl config set-context default \

    ​ --cluster=kubernetes \

    ​ --user=kubelet-bootstrap \

    ​ --kubeconfig=/etc/kubernetes/bootstrap.kubeconfig

    kubectl config use-context default --kubeconfig=/etc/kubernetes/bootstrap.kubeconfig

    c、部署kubelet.service

    export NODE_IP=ifconfig |grep "inet 10.88"|awk '{print $2}'

    mkdir -pv /var/lib/kubelet

    cat> /usr/lib/systemd/system/kubelet.service <<EOF

    [Unit]

    Description=Kubernetes Kubelet Server

    Documentation=https://github.com/GoogleCloudPlatform/kubernetes

    After=docker.service

    Requires=docker.service

    [Service]

    WorkingDirectory=/var/lib/kubelet

    ExecStart=/usr/local/bin/kubelet \

    ​ --address=$NODE_IP \

    ​ --hostname-override=$NODE_IP \

    ​ --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 \

    ​ --experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \

    ​ --kubeconfig=/etc/kubernetes/kubelet.conf \

    ​ --cert-dir=/etc/kubernetes/ssl/ \

    ​ --network-plugin=cni \

    ​ --cni-conf-dir=/etc/cni/net.d \

    ​ --cni-bin-dir=/opt/cni/bin \

    ​ --hairpin-mode hairpin-veth \

    ​ --allow-privileged=true \

    ​ --fail-swap-on=false \

    ​ --logtostderr=true \

    ​ --cgroup-driver=systemd \

    ​ --cluster-dns=10.96.0.1 \

    ​ --cluster-domain=cluster.local

    Restart=on-failure

    RestartSec=5

    [Install]

    WantedBy=multi-user.target EOF

    systemctl daemon-reload

    systemctl enable kubelet

    systemctl start kubelet

    systemctl status kubelet

    d、master 验证node

    kubectl get csr

    kubectl certificate approve node-csr-g61myOSl2MvYCH8fA9CzQHTqVXhvNjtclEZ0m-p_WuU

    [外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-ojslv6U6-1589960228909)(https://ws4.sinaimg.cn/large/006tNc79gy1g03oaijfroj30bl02kta6.jpg)]

    成功以后node节点会自动生成配置文件与密钥

     
    # 配置文件
     
    ls /etc/kubernetes/kubelet.config   
    /etc/kubernetes/kubelet.config
    

    部署kube-proxy

    a、生成kube-proxy 认证配置文件

    kubectl config set-cluster kubernetes \

    ​ --certificate-authority=/etc/kubernetes/ssl/ca.pem \

    ​ --embed-certs=true \

    ​ --server=https://10.88.29.43:6443 \

    ​ --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig

    kubectl config set-credentials kube-proxy \

    ​ --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \

    ​ --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \

    ​ --embed-certs=true \

    ​ --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig

    kubectl config set-context default \

    ​ --cluster=kubernetes \

    ​ --user=kube-proxy \

    ​ --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig

    kubectl config use-context default --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig

    b、生成kube-proxy.service

    export NODE_IP=ifconfig |grep "inet 10.88"|awk '{print $2}'

    mkdir -pv /var/lib/kube-proxy

    cat> /usr/lib/systemd/system/kube-proxy.service <<EOF

    [Unit]

    Description=Kubernetes Kube-Proxy Server

    Documentation=https://github.com/GoogleCloudPlatform/kubernetes

    After=network.target

    [Service]

    WorkingDirectory=/var/lib/kube-proxy

    ExecStart=/usr/local/bin/kube-proxy \

    –proxy-mode=iptables \

    –bind-address=$NODE_IP \

    –hostname-override=$NODE_IP \

    –kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \

    –logtostderr=true \

    –conntrack-max-per-core=196608 \

    –conntrack-tcp-timeout-close-wait=10m \

    –conntrack-tcp-timeout-established=20m \

    –cluster-cidr=10.102.0.0/16

    Restart=on-failure

    RestartSec=5

    LimitNOFILE=65536

    [Install]

    WantedBy=multi-user.target EOF

    systemctl daemon-reload

    systemctl enable kube-proxy

    systemctl restart kube-proxy

    systemctl status kube-proxy

    部署calico网络

    部署calico

    kubectl apply -f rbac.yaml -f calico.yaml

    部署dashboard

    a、准备dashboard.yaml文件

    cat>kubernetes-dashboard.yaml<<EOF

    EOF

    cat> kubernetes-dashboard-admin.rbac.yaml <<EOF

    EOF

    b、部署dashboard

    kubectl apply -f kubernetes-dashboard.yaml -f kubernetes-dashboard-admin.rbac.yaml

    K8S集群高可用

    a、HAproxy+Keepalived-VIP

    b、集群组件状态查询

    [外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-GEmdI1a3-1589960228914)(https://ws3.sinaimg.cn/large/006tNc79gy1g03oajew7sj30bl031aad.jpg)]

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值