Skip to content

2023年12月

vim

vim9 编译

下载最新源码,解压 有些vim插件需要用到 python3环境,所以需要编译时使用 python3, 如果已编译过可先 make uninstall卸载, 最好先 export LDFLAGS="-rdynamic" 然后再编译,否则可能找不到python3的头文件

  • 编译
make & make install
  • 配置
./configure --with-features=huge \
--enable-multibyte \
--enable-python3interp=yes \
--with-python3-config-dir=/usr/local/lib/python3.12/config-3.12-x86_64-linux-gnu/ \
--with-python3-command=python3 \
--enable-cscope \
--enable-gui=auto \
--enable-gtk3-check \
--enable-fontset \
--enable-largefile \
--disable-netbeans \
--enable-fail-if-missing \
--prefix=/usr/local
  • make & make install

docker elasticsearch 搭建

docker-compose.yml

version: '2.2'
services:
  es01:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.12.0
    container_name: es01
    environment:
      - "ES_JAVA_OPTS=-Xms128m -Xmx128m"
      - bootstrap.memory_lock=true
      - cluster.initial_master_nodes=es01,es02
      - cluster.name=mycluster
      - node.name=es01
      - discovery.seed_hosts=es02,es03,es04
    storage_opt:
      size: '5G'
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - data01:/usr/share/elasticsearch/data
    ports:
      - 9200:9200
    networks:
      - elastic
  es02:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.12.0
    container_name: es02
    environment:
      - "ES_JAVA_OPTS=-Xms128m -Xmx128m"
      - bootstrap.memory_lock=true
      - cluster.initial_master_nodes=es01,es02
      - cluster.name=mycluster
      - node.name=es02
      - discovery.seed_hosts=es01,es03,es04
    storage_opt:
      size: '5G'
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - data02:/usr/share/elasticsearch/data
    networks:
      - elastic
  es03:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.12.0
    container_name: es03
    environment:
      - "ES_JAVA_OPTS=-Xms128m -Xmx128m"
      - bootstrap.memory_lock=true
      - cluster.initial_master_nodes=es01,es02
      - cluster.name=mycluster
      - node.name=es03
      - discovery.seed_hosts=es01,es02,es04
    storage_opt:
      size: '5G'
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - data03:/usr/share/elasticsearch/data
    networks:
      - elastic
  es04:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.12.0
    container_name: es04
    environment:
      - "ES_JAVA_OPTS=-Xms128m -Xmx128m"
      - bootstrap.memory_lock=true
      - cluster.initial_master_nodes=es01,es02
      - cluster.name=mycluster
      - node.name=es04
      - discovery.seed_hosts=es01,es02,es03
    storage_opt:
      size: '5G'
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - data04:/usr/share/elasticsearch/data
    networks:
      - elastic
  kibana7.12.0:
    image:  docker.elastic.co/kibana/kibana:7.12.0
    container_name: kibana7.12.0
    environment:
      - ELASTICSEARCH_HOSTS=http://es01:9200
    ports:
        - 5601:5601
    networks:
      - elastic

volumes:
  data01:
    driver: local
  data02:
    driver: local
  data03:
    driver: local
  data04:
    driver: local

networks:
  elastic:
    driver: bridge

docker rocketmq搭建

前置

mkdir -p data/logs
data/logs data/store
chmod -R u+x data/logs
chmod -R u+x data/store

配置文件

broker.conf

# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.  See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.  You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
#  Unless required by applicable law or agreed to in writing, software
#  distributed under the License is distributed on an "AS IS" BASIS,
#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#  See the License for the specific language governing permissions and
#  limitations under the License.


# 所属集群名字
brokerClusterName=DefaultCluster

# broker 名字,注意此处不同的配置文件填写的不一样,如果在 broker-a.properties 使用: broker-a,
# 在 broker-b.properties 使用: broker-b
brokerName=broker-a

# 0 表示 Master,> 0 表示 Slave
brokerId=0

# nameServer地址,分号分割
# namesrvAddr=rocketmq-nameserver1:9876;rocketmq-nameserver2:9876

# 启动IP,如果 docker 报 com.alibaba.rocketmq.remoting.exception.RemotingConnectException: connect to <192.168.0.120:10909> failed
# 解决方式1 加上一句 producer.setVipChannelEnabled(false);,解决方式2 brokerIP1 设置宿主机IP,不要使用docker 内部IP
brokerIP1=192.168.0.111  # 注意改成你自己宿主机IP

# 在发送消息时,自动创建服务器不存在的topic,默认创建的队列数
defaultTopicQueueNums=4

# 是否允许 Broker 自动创建 Topic,建议线下开启,线上关闭 !!!这里仔细看是 false,false,false
autoCreateTopicEnable=true

# 是否允许 Broker 自动创建订阅组,建议线下开启,线上关闭
autoCreateSubscriptionGroup=true

# Broker 对外服务的监听端口
listenPort=10911

# 删除文件时间点,默认凌晨4点
deleteWhen=04

# 文件保留时间,默认48小时
fileReservedTime=120

# commitLog 每个文件的大小默认1G
mapedFileSizeCommitLog=1073741824

# ConsumeQueue 每个文件默认存 30W 条,根据业务情况调整
mapedFileSizeConsumeQueue=300000

# destroyMapedFileIntervalForcibly=120000
# redeleteHangedFileInterval=120000
# 检测物理文件磁盘空间
diskMaxUsedSpaceRatio=88
# 存储路径
# storePathRootDir=/home/ztztdata/rocketmq-all-4.1.0-incubating/store
# commitLog 存储路径
# storePathCommitLog=/home/ztztdata/rocketmq-all-4.1.0-incubating/store/commitlog
# 消费队列存储
# storePathConsumeQueue=/home/ztztdata/rocketmq-all-4.1.0-incubating/store/consumequeue
# 消息索引存储路径
# storePathIndex=/home/ztztdata/rocketmq-all-4.1.0-incubating/store/index
# checkpoint 文件存储路径
# storeCheckpoint=/home/ztztdata/rocketmq-all-4.1.0-incubating/store/checkpoint
# abort 文件存储路径
# abortFile=/home/ztztdata/rocketmq-all-4.1.0-incubating/store/abort
# 限制的消息大小
# maxMessageSize=65536
maxMessageSize=4194384

# flushCommitLogLeastPages=4
# flushConsumeQueueLeastPages=2
# flushCommitLogThoroughInterval=10000
# flushConsumeQueueThoroughInterval=60000

# Broker 的角色
# - ASYNC_MASTER 异步复制Master
# - SYNC_MASTER 同步双写Master
# - SLAVE
brokerRole=ASYNC_MASTER

# 刷盘方式
# - ASYNC_FLUSH 异步刷盘
# - SYNC_FLUSH 同步刷盘
flushDiskType=ASYNC_FLUSH

# 发消息线程池数量
# sendMessageThreadPoolNums=128
# 拉消息线程池数量
# pullMessageThreadPoolNums=128

# #存储路径
# storePathRootDir=/opt/store/root
# #消费队列存储路径
# storePathConsumeQueue=/opt/store/consumequeue
# #消息索引存储路径
# storePathIndex=/opt/store/index
# #checkpoint 文件存储路径
# storeCheckpoint=/opt/store/checkpoint
# #abort 文件存储路径
# abortFile=/opt/store/abort

# #commitLog存储路径
# storePathCommitLog=/opt/logs/commitlog

docker-compose.yml

version: '3.5'
services:
  rmqnamesrv:
    image: foxiswho/rocketmq:server
    container_name: rmqnamesrv
    ports:
      - 9876:9876
    volumes:
      # - ./data/logs:/opt/logs
      # - ./data/store:/opt/store
      - ./data/logs/commitlog:/opt/logs/commitlog
      - ./data/store/abort:/opt/store/abort
      - ./data/store/checkpoint:/opt/store/checkpoint
      - ./data/store/consumequeue:/opt/store/consumequeue
      - ./data/store/index:/opt/store/index
      - ./data/store/root:/opt/store/root
    networks:
        rmq:
          aliases:
            - rmqnamesrv

  rmqbroker:
    image: foxiswho/rocketmq:broker
    container_name: rmqbroker
    ports:
      - 10909:10909
      - 10911:10911
    volumes:
      # - ./data/logs:/opt/logs
      # - ./data/store:/opt/store
      - ./data/logs/commitlog:/opt/logs/commitlog
      - ./data/store/abort:/opt/store/abort
      - ./data/store/checkpoint:/opt/store/checkpoint
      - ./data/store/consumequeue:/opt/store/consumequeue
      - ./data/store/index:/opt/store/index
      - ./data/store/root:/opt/store/root
      - ./data/brokerconf/broker.conf:/etc/rocketmq/broker.conf
    environment:
        NAMESRV_ADDR: "rmqnamesrv:9876"
        JAVA_OPTS: " -Duser.home=/opt"
        JAVA_OPT_EXT: "-server -Xms128m -Xmx128m -Xmn128m"
    command: mqbroker -c /etc/rocketmq/broker.conf
    depends_on:
      - rmqnamesrv
    networks:
      rmq:
        aliases:
          - rmqbroker

  rmqconsole:
    image: styletang/rocketmq-console-ng
    container_name: rmqconsole
    ports:
      - 18080:8080
    environment:
        JAVA_OPTS: "-Drocketmq.namesrv.addr=rmqnamesrv:9876 -Dcom.rocketmq.sendMessageWithVIPChannel=false"
    depends_on:
      - rmqnamesrv
    networks:
      rmq:
        aliases:
          - rmqconsole

networks:
  rmq:
    name: rmq
    driver: bridge

字体

linux 字体安装

流程

1.将字体下载并拷贝到对应文件夹中,并进入字体文件夹

cp *.ttf /usr/share/fonts
cd /usr/share/fonts

2.字体安装器

sudo apt install ttf-mscorefonts-installer 
# (创建字体的fonts.scale文件,它用来控制字体旋转缩放)
sudo mkfontscale

# (创建字体的fonts.dir文件,它用来控制字体粗斜体产生)
sudo mkfontdir

3.建立字体缓存

sudo apt install fontconfig

# 建立字体缓存信息
sudo fc-cache -fv

# 查看已经安装的
fc-list

ohmyzsh 安装

参考

https://github.com/ohmyzsh/ohmyzsh

插件参考

https://github.com/ohmyzsh/ohmyzsh/wiki/Plugins
  • 第三方插件

1.zsh-autosuggestions 自动补全插件

git clone git://github.com/zsh-users/zsh-autosuggestions $ZSH_CUSTOM/plugins/zsh-autosuggestions

2.高亮

git clone https://github.com/zsh-users/zsh-syntax-highlighting.git ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-syntax-highlighting

3.autojump

sudo apt-get install autojump
  • 我的插件配置
plugins=(
        git
        z
        autojump
        zsh-autosuggestions
        zsh-syntax-highlighting
        sudo
)

主题参考

https://github.com/ohmyzsh/ohmyzsh/wiki/Themes

第三方主题参考

powerlevel10k

https://github.com/romkatv/powerlevel10k

centos7 git 环境配置

安装最新git

如果使用 EPEL 存储库中的 dnf 安装 Git 仍然无法得到最新版本的 Git,您可以考虑从源代码编译安装 Git。以下是在 CentOS 7 上编译安装最新版本 Git 的步骤:

  1. 打开终端并使用 root 用户登录系统。

  2. 安装必需的编译工具和依赖项:

yum install curl-devel expat-devel gettext-devel openssl-devel zlib-devel gcc perl-ExtUtils-MakeMaker -y
  1. 前往 Git 官网的下载页面(https://git-scm.com/downloads) 并找到最新版本的 Git。复制该版本的下载链接。

  2. 在终端中使用以下命令下载 Git 的源代码压缩包(请将命令中的 URL 替换为第 3 步中复制的下载链接):

curl -LO https://github.com/git/git/archive/refs/tags/v2.40.1.tar.gz
  1. 使用以下命令解压缩源代码压缩包:
tar xzf *.gz
  1. 进入解压后的 Git 源代码目录:
cd git-*
  1. 使用以下命令编译和安装 Git:
make prefix=/usr/local/git all
make prefix=/usr/local/git install
  1. 将 Git 添加到 PATH 环境变量中:
echo 'export PATH=$PATH:/usr/local/git/bin' >> ~/.bashrc
source ~/.bashrc
  1. 使用以下命令验证 Git 是否已成功安装:
git --version

如果 Git 版本号显示为最新版本,则表示 Git 已经成功安装在 CentOS 7 上。请注意,由于 CentOS 7 默认使用较旧的软件版本,因此您可能需要升级其他依赖项来满足 Git 的要求。如果在编译过程中遇到任何问题,请检查错误消息并尝试解决它们。

git

git 环境

windows git 上有分支名称,一下加入可让 linux 也支持分支名称

vim ~/.bashrc

  • 方案1:
function git-branch-name {
  git symbolic-ref HEAD 2>/dev/null | cut -d"/" -f 3
}
function git-branch-prompt {
  local branch=`git-branch-name`
  if [ $branch ]; then printf " [%s] " $branch; fi
}

if [ "$color_prompt" = yes ]; then
    PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]$(git-branch-prompt)\$ '
else
    PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w$(git-branch-prompt)\$ '
fi
  • 方案2:

git env

显示分支名称

parse_git_branch() {
    git branch --no-color 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/(\1)/'
}

PS1="\[\033[36m\]\u@\h\[\033[m\]:\[\033[32m\]\w\[\033[m\]\$(parse_git_branch)\$ "

配色

配置文件

vim /home/[username]/.gitconfig

[color]
    status = auto
    diff = auto
    ui = true
    interactive = auto
    branch = auto

修改后立马生效

命令设置

git config --global color.status auto
git config --global color.diff auto
git config --global color.branch auto
git config --global color.ui true
git config --global color.interactive auto

1 远程操作

1.1 获取远程仓库地址信息

git remote -v

1.2 本地新建项目情况

本地建立仓库,远程后建立,拉取时会失败,使用以下命令强制拉取

git pull <远程仓库地址> -allow-unrelated-histories 

1.3 更新当前分支远程仓库分支地址

git branch --set-upstream-to=origin/<远程分支名称>

1.4 删除远程仓库

git push origin --delete origin/<远程分支名称>

1.5 远程分支回滚

  1. 要回滚的版本号
git reset --hard <commit> 
  1. 提交回退
// 方式1
git push origin HEAD --force   
// 方式2
git push --force

1.6 回滚作为一个更改项,避免已提交到远程分支后同步造成冲突

git revert <commit>     // 回退作为一个动作保留
git push                // 这样作为一个同步提交不会造成服务器的冲突了

1.7 更新远程仓库地址

git remote set-url origin 【url】

或者

git remote remove origin
git remote add origin【url】

1.8 克隆单个分支

git clone -b <branch> <remote_repo>

2 本地操作

2.1 拉取其他仓库所有分支作自己分支

fetch <仓库路径>

2.2 当前分支获取其他分支某个提交的改动

git cherry-pick <版本号> # 也可以是分支名称

3 打标签

3.1 显示标签
$ git tag
3.2 创建标签
$ git tag -a v1.0 -m "xxx"
3.3 显示标签所有前缀标签

```\ $ git tag -l "v1.0*"

#### 3.4 显示对应标签所有信息
$ git show v1.0
#### 3.5 删除标签
$ git tag -d v1.0
## git submodule

### 添加子模块

```sh
git submodule add <submodule> <path>

拉取整个项目

git submodule update --init --recursive

删除子模块

git submodule deinit <path>
git rm <path>

如果已经update 了,还要删除 .git里面module的对应内容

更换子模块

先删除再添加

  1. git submodule deinit -f {submodule_name}
  2. 删除 .gitsubmodule 中对应条目
  3. rm -rf .git/modules/{submodule_name}
  4. 如果 .git/config 中有对应信息需删除
  5. 添加新的子模块

更新子模块路径

git 1.8.5 之后新增, 建议先删除再新增子模块

git mv {oldPath} {newPath}
# 如果要更换submodule 名称与 path 一致
mv .git/modules/{oldPath} .git/modules/{newPath}

docker-compose 快速搭建 hadoop环境

本人只是用作本地临时来测试 hadoop 功能,完全够用了

尝试了几个版本,测试了这个版本可用

参考来自 https://cloud.tencent.com/developer/beta/article/1150829

docker 镜像

docker pull bde2020/hadoop-namenode:1.1.0-hadoop2.7.1-java8
docker pull bde2020/hadoop-datanode:1.1.0-hadoop2.7.1-java8
docker pull bde2020/hadoop-resourcemanager:1.1.0-hadoop2.7.1-java8
docker pull bde2020/hadoop-historyserver:1.1.0-hadoop2.7.1-java8
docker pull bde2020/hadoop-nodemanager:1.1.0-hadoop2.7.1-java8

hadoop.env

CORE_CONF_fs_defaultFS=hdfs://namenode:8020
CORE_CONF_hadoop_http_staticuser_user=root
CORE_CONF_hadoop_proxyuser_hue_hosts=*
CORE_CONF_hadoop_proxyuser_hue_groups=*

HDFS_CONF_dfs_webhdfs_enabled=true
HDFS_CONF_dfs_permissions_enabled=false

YARN_CONF_yarn_log___aggregation___enable=true
YARN_CONF_yarn_resourcemanager_recovery_enabled=true
YARN_CONF_yarn_resourcemanager_store_class=org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore
YARN_CONF_yarn_resourcemanager_fs_state___store_uri=/rmstate
YARN_CONF_yarn_nodemanager_remote___app___log___dir=/app-logs
YARN_CONF_yarn_log_server_url=http://historyserver:8188/applicationhistory/logs/
YARN_CONF_yarn_timeline___service_enabled=true
YARN_CONF_yarn_timeline___service_generic___application___history_enabled=true
YARN_CONF_yarn_resourcemanager_system___metrics___publisher_enabled=true
YARN_CONF_yarn_resourcemanager_hostname=resourcemanager
YARN_CONF_yarn_timeline___service_hostname=historyserver
YARN_CONF_yarn_resourcemanager_address=resourcemanager:8032
YARN_CONF_yarn_resourcemanager_scheduler_address=resourcemanager:8030
YARN_CONF_yarn_resourcemanager_resource___tracker_address=resourcemanager:8031

docker-compose.yml

以下我暴露了8020端口来调试

version: "2"

services:
  namenode:
    image: bde2020/hadoop-namenode:1.1.0-hadoop2.7.1-java8
    container_name: namenode
    ports:
      - 8020:8020
    volumes:
      - hadoop_namenode:/hadoop/dfs/name
    environment:
      - CLUSTER_NAME=test
    env_file:
      - ./hadoop.env

  resourcemanager:
    image: bde2020/hadoop-resourcemanager:1.1.0-hadoop2.7.1-java8
    container_name: resourcemanager
    depends_on:
      - namenode
      - datanode1
      - datanode2
      - datanode3
    env_file:
      - ./hadoop.env

  historyserver:
    image: bde2020/hadoop-historyserver:1.1.0-hadoop2.7.1-java8
    container_name: historyserver
    depends_on:
      - namenode
      - datanode1
      - datanode2
      - datanode3
    volumes:
      - hadoop_historyserver:/hadoop/yarn/timeline
    env_file:
      - ./hadoop.env

  nodemanager1:
    image: bde2020/hadoop-nodemanager:1.1.0-hadoop2.7.1-java8
    container_name: nodemanager1
    depends_on:
      - namenode
      - datanode1
      - datanode2
      - datanode3
    env_file:
      - ./hadoop.env

  datanode1:
    image: bde2020/hadoop-datanode:1.1.0-hadoop2.7.1-java8
    container_name: datanode1
    depends_on:
      - namenode
    volumes:
      - hadoop_datanode1:/hadoop/dfs/data
    env_file:
      - ./hadoop.env

  datanode2:
    image: bde2020/hadoop-datanode:1.1.0-hadoop2.7.1-java8
    container_name: datanode2
    depends_on:
      - namenode
    volumes:
      - hadoop_datanode2:/hadoop/dfs/data
    env_file:
      - ./hadoop.env

  datanode3:
    image: bde2020/hadoop-datanode:1.1.0-hadoop2.7.1-java8
    container_name: datanode3
    depends_on:
      - namenode
    volumes:
      - hadoop_datanode3:/hadoop/dfs/data
    env_file:
      - ./hadoop.env

volumes:
  hadoop_namenode:
  hadoop_datanode1:
  hadoop_datanode2:
  hadoop_datanode3:
  hadoop_historyserver:

启动

docker-compose up -d

查看状态

╰─ docker-compose ps
NAME                COMMAND                  SERVICE             STATUS              PORTS
datanode1           "/entrypoint.sh /run…"   datanode1           running (healthy)   50075/tcp
datanode2           "/entrypoint.sh /run…"   datanode2           running (healthy)   50075/tcp
datanode3           "/entrypoint.sh /run…"   datanode3           running (healthy)   50075/tcp
historyserver       "/entrypoint.sh /run…"   historyserver       exited (139)
namenode            "/entrypoint.sh /run…"   namenode            running (healthy)   50070/tcp
nodemanager1        "/entrypoint.sh /run…"   nodemanager1        running (healthy)   8042/tcp
resourcemanager     "/entrypoint.sh /run…"   resourcemanager     running (healthy)   8088/tcp

我这边 historyserver 是退出状态,不清楚啥原因

提交作用

sudo docker exec -it namenode /bin/bash

准备数据来提交

cd /opt/hadoop-2.7.1

# 创建用户目录
hdfs dfs -mkdir /user
hdfs dfs -mkdir /user/root

# 准备数据
hdfs dfs -mkdir input
hdfs dfs -put etc/hadoop/*.xml input

# 提交作业
hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.1.jar grep input output 'dfs[a-z.]+'

# 查看作业执行结果
hdfs dfs -cat output/*

停止集群

可以通过CTRL+C来终止集群,也可以通过"sudodocker-composestop"o
停止集群后,创建的容器并不会被删除,此时可以使用"SUdOdocker-composerm"来删除已经停止的容器。也可以使用"SUdO
docker-composedown"来停止并删除容器。
删除容器后,使用"SUdOdockervolumels”可以看到上面集群使用的volume信息,我们可以使用"SUdOdockerrm<volume>”来删
除。

gorm学习及注意事项

实验项目

https://github.com/gofulljs/g_gorm_study

查询注意

  1. 如果model中有主键, gorm 如果使用了 model、take、first、last均会产生对应条件(只要结构体中对应字段为非零值),
info := &model.Userinfos{
    Class: "一一班",
    No:    3,
}

err := model.DB.Take(info).Error

产生sql

SELECT * FROM `userinfos` WHERE `userinfos`.`class` = '一一班' AND `userinfos`.`no` = 3 LIMIT 1
  1. 如果想查询info中一个字段为条件,又不受主键影响,有两种方式

如下只希望获取一一班中的一个学生,但info中有其他值

方式一: 使用table方法+scan方法

info := &model.Userinfos{
        Class: "一一班",
        No:    4,
    }

err := model.DB.Table(info.TableName()).Where("class = ?", info.Class).Scan(info).Error
// 或
err := model.DB.Table(info.TableName()).Where(info, model.UserinfosColumns.Class).Scan(info).Error

方式二: model中使用只带查询字段的新结构体,缺点就是不能重复利用前面的结构体了,会多消耗点资源

info := &model.Userinfos{
    Class: "一一班",
    No:    4,
}

// 如果此处查询非主键作为条件就只能用Where 的方式

err := model.DB.Model(&model.Userinfos{Class: info.Class}).Scan(info).Error
// or
err := model.DB.Model(&model.Userinfos{}).Where(info, model.UserinfosColumns.Class).Scan(info).Error
// or 
err := model.DB.Model(&model.Userinfos{}).Where("class = ?", info.Class).Scan(info).Error

scoop操作

官网 https://scoop.sh/

scoop 修改路径

如果使用默认位置可以跳过此步骤, 如果已经安装需要 卸载scoop

$env:SCOOP='E:\Scoop'
[environment]::setEnvironmentVariable('SCOOP',$env:SCOOP,'User')

按官网安装

简易 powershell 不要用管理员运行

Set-ExecutionPolicy RemoteSigned -Scope CurrentUser
irm get.scoop.sh | iex

卸载

# 需要删除已安装的软件包
scoop uninstall <xxx>
# 卸载scoop
scoop uninstall scoop