From 36b4d38adc13b54503d7ae25a65e66d5eda2e933 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 11 Dec 2018 17:32:04 +0800 Subject: [PATCH 001/124] :construction: MySQL --- markdown-file/Mysql-Install-And-Settings.md | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/markdown-file/Mysql-Install-And-Settings.md b/markdown-file/Mysql-Install-And-Settings.md index 1af74312..98fdbf34 100644 --- a/markdown-file/Mysql-Install-And-Settings.md +++ b/markdown-file/Mysql-Install-And-Settings.md @@ -192,7 +192,7 @@ table_open_cache=256 - 在终端中执行(CentOS 7):`systemctl stop mysql` - 在终端中执行(前面添加的 Linux 用户 mysql 必须有存在):`/usr/local/mysql/bin/mysqld --skip-grant-tables --user=mysql` - 此时 MySQL 服务会一直处于监听状态,你需要另起一个终端窗口来执行接下来的操作 - - 在终端中执行:`mysql -u root mysql` + - 在终端中执行:`mysql -u root mysql` 或者:`mysql -h 127.0.0.1 -u root -P 3306 -p` - 把密码改为:123456,进入 MySQL 命令后执行:`UPDATE user SET Password=PASSWORD('123456') where USER='root';FLUSH PRIVILEGES;` - 然后重启 MySQL 服务(CentOS 6):`service mysql restart` - 然后重启 MySQL 服务(CentOS 7):`systemctl restart mysql` @@ -221,6 +221,24 @@ set password = password('新密码'); FLUSH PRIVILEGES; ``` +## MySQL 5.7 + +- 报错内容: + +``` +Expression #1 of ORDER BY clause is not in GROUP BY clause and contains nonaggregated column 'youmeek.nm.id' +which is not functionally dependent on columns in GROUP BY clause; +this is incompatible with sql_mode=only_full_group_by +``` + +- 查下自己的模式:`select version(), @@sql_mode;` +- 解决办法,修改 my.cnf,增加这一行: + +``` +sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION; +``` + + ## 小内存机子,MySQL 频繁挂掉解决办法(1G + CentOS 7.4) - 保存系统日志到本地进行查看:`cd /var/log/ && sz messages` From 435aa451da88e6ccf9690af4ea27ea4ffc7fef7d Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 12 Dec 2018 11:01:22 +0800 Subject: [PATCH 002/124] :construction: wrk --- markdown-file/wrk-Install-And-Settings.md | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/markdown-file/wrk-Install-And-Settings.md b/markdown-file/wrk-Install-And-Settings.md index 6e3dbe5f..0b96c3c7 100644 --- a/markdown-file/wrk-Install-And-Settings.md +++ b/markdown-file/wrk-Install-And-Settings.md @@ -41,6 +41,19 @@ Requests/sec: 755.26 Transfer/sec: 11.08MB ``` +#### 使用 lua 脚本(发送一个 post 请求) + +- 创建:`vim /opt/post-wrk.lua` + +``` +wrk.method = "POST" +wrk.body = "hms_user_id=222222&routing_key=ad.sys_user.add" +wrk.headers["Content-Type"] = "application/x-www-form-urlencoded" +``` + +- 测试:`wrk -t10 -c100 -d15s --script=/opt/post-wrk.lua --latency http://127.0.0.1:9090/websocket/api/send-by-user-id` + + ## 其他说明 - wrk 使用的是 HTTP/1.1,缺省开启的是长连接 @@ -48,4 +61,5 @@ Transfer/sec: 11.08MB ## 资料 -- \ No newline at end of file +- +- \ No newline at end of file From 43f1a2db31f3c23130eaeed2cddce8d317b5aedb Mon Sep 17 00:00:00 2001 From: "Judas.n" Date: Fri, 14 Dec 2018 12:08:19 +0800 Subject: [PATCH 003/124] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b5a6a443..af84c871 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ ## 初衷(Original Intention) -- 整理下自己所学 +- 整理下自己所学。**但是比较随意,所以很多地方不够严谨,所以请带着批评的思维阅读。** - 带动更多的人进入 Linux 世界,特别是做 Java 开发的人 - Github 项目地址,欢迎 `Fork`、`Star`: - 文章中用到的一些安装包归纳整理: From a4249b2b54cb9b03c8621bde9fb39c5a638ec12f Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 17 Dec 2018 18:37:09 +0800 Subject: [PATCH 004/124] :construction: bash --- markdown-file/CentOS6-and-CentOS7.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/markdown-file/CentOS6-and-CentOS7.md b/markdown-file/CentOS6-and-CentOS7.md index 87afe8d8..7dd26640 100644 --- a/markdown-file/CentOS6-and-CentOS7.md +++ b/markdown-file/CentOS6-and-CentOS7.md @@ -36,6 +36,9 @@ ### 开放端口 +- 一般设置软件端口有一个原则: + - 0 ~ 1024 系统保留,一般不要用到 + - 1024 ~ 65535(2^16) 可以随意用 - 添加单个端口:`firewall-cmd --zone=public --add-port=8883/tcp --permanent` - 添加范围端口:`firewall-cmd --zone=public --add-port=8883-8885/tcp --permanent` - 删除端口:`firewall-cmd --zone=public --remove-port=8883/tcp --permanent` From 98e46c7b574364cef38ad155b3a23d6ff15c1fad Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 17 Dec 2018 19:00:21 +0800 Subject: [PATCH 005/124] :construction: hadoop --- markdown-file/Hadoop-Install-And-Settings.md | 415 +++++++++++++++++++ 1 file changed, 415 insertions(+) create mode 100644 markdown-file/Hadoop-Install-And-Settings.md diff --git a/markdown-file/Hadoop-Install-And-Settings.md b/markdown-file/Hadoop-Install-And-Settings.md new file mode 100644 index 00000000..0f8fafc6 --- /dev/null +++ b/markdown-file/Hadoop-Install-And-Settings.md @@ -0,0 +1,415 @@ +# Hadoop 安装和配置 + + +## Hadoop 说明 + +- Hadoop 官网: +- Hadoop 官网下载: + +## 基础环境 + +- 学习机器 2C4G(生产最少 8G): + - 172.16.0.17 + - 172.16.0.43 + - 172.16.0.180 +- 操作系统:CentOS 7.5 + - root 用户 +- 所有机子必备:Java:1.8 + - 确保:`echo $JAVA_HOME` 能查看到路径,并记下来路径 +- Hadoop:2.6.5 +- 关闭所有机子的防火墙:`systemctl stop firewalld.service` + +## 集群环境设置 + +- Hadoop 集群具体来说包含两个集群:HDFS 集群和 YARN 集群,两者逻辑上分离,但物理上常在一起 + - HDFS 集群:负责海量数据的存储,集群中的角色主要有 NameNode / DataNode + - YARN 集群:负责海量数据运算时的资源调度,集群中的角色主要有 ResourceManager /NodeManager + - HDFS 采用 master/worker 架构。一个 HDFS 集群是由一个 Namenode 和一定数目的 Datanodes 组成。Namenode 是一个中心服务器,负责管理文件系统的命名空间 (namespace) 以及客户端对文件的访问。集群中的 Datanode 一般是一个节点一个,负责管理它所在节点上的存储。 +- 分别给三台机子设置 hostname + +``` +hostnamectl --static set-hostname hadoop-master +hostnamectl --static set-hostname hadoop-node1 +hostnamectl --static set-hostname hadoop-node2 +``` + + +- 修改 hosts + +``` +就按这个来,其他多余的别加,不然可能也会有影响 +vim /etc/hosts +172.16.0.17 hadoop-master +172.16.0.43 hadoop-node1 +172.16.0.180 hadoop-node2 +``` + + +- 对 hadoop-master 设置免密: + +``` +生产密钥对 +ssh-keygen -t rsa + + +公钥内容写入 authorized_keys +cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys + +测试: +ssh localhost + +``` + +- 将公钥复制到两台 slave + +``` +ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@172.16.0.43,根据提示输入 hadoop-node1 机器的 root 密码,成功会有相应提示 +ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@172.16.0.180,根据提示输入 hadoop-node2 机器的 root 密码,成功会有相应提示 + + +在 hadoop-master 上测试: +ssh hadoop-node1 +ssh hadoop-node2 + +``` + + + +## Hadoop 安装 + +- 关于版本这件事,主要看你的技术生态圈。如果你的其他技术,比如 Spark,Flink 等不支持最新版,则就只能向下考虑。 +- 我这里技术栈,目前只能到:2.6.5,所以下面的内容都是基于 2.6.5 版本 +- 官网说明: +- 分别在三台机子上都创建目录: + +``` +mkdir -p /data/hadoop/hdfs/name /data/hadoop/hdfs/data /data/hadoop/hdfs/tmp +``` + +- 下载 Hadoop: +- 现在 hadoop-master 机子上安装 + +``` +cd /usr/local && wget http://apache.claz.org/hadoop/common/hadoop-2.6.5/hadoop-2.6.5.tar.gz +tar zxvf hadoop-2.6.5.tar.gz,有 191M 左右 +``` + +- **给三台机子都先设置 HADOOP_HOME** + +``` +vim /etc/profile + +export HADOOP_HOME=/usr/local/hadoop-2.6.5 +export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin + +source /etc/profile +``` + + +## 修改 hadoop-master 配置 + + +``` +修改 JAVA_HOME +vim $HADOOP_HOME/etc/hadoop/hadoop-env.sh + +把 25 行的 +export JAVA_HOME=${JAVA_HOME} +都改为 +export JAVA_HOME=/usr/local/jdk1.8.0_181 + + +vim $HADOOP_HOME/etc/hadoop/yarn-env.sh + +加一行 export JAVA_HOME=/usr/local/jdk1.8.0_181 + +``` + +- hadoop.tmp.dir == 指定hadoop运行时产生文件的存储目录 + +``` + +vim $HADOOP_HOME/etc/hadoop/core-site.xml,改为: + + + + hadoop.tmp.dir + file:/data/hadoop/hdfs/tmp + + + io.file.buffer.size + 131072 + + + + fs.defaultFS + hdfs://hadoop-master:9000 + + + hadoop.proxyuser.root.hosts + * + + + hadoop.proxyuser.root.groups + * + + +``` + + +- 配置包括副本数量 + - 最大值是 datanode 的个数 +- 数据存放目录 + +``` +vim $HADOOP_HOME/etc/hadoop/hdfs-site.xml + + + + dfs.replication + 2 + + + dfs.namenode.name.dir + file:/data/hadoop/hdfs/name + true + + + dfs.datanode.data.dir + file:/data/hadoop/hdfs/data + true + + + dfs.webhdfs.enabled + true + + + dfs.permissions + false + + + +``` + + + +- 设置 YARN + +``` +新创建:vim $HADOOP_HOME/etc/hadoop/mapred-site.xml + + + + + mapreduce.framework.name + yarn + + +``` + + +- yarn.resourcemanager.hostname == 指定YARN的老大(ResourceManager)的地址 +- yarn.nodemanager.aux-services == NodeManager上运行的附属服务。需配置成mapreduce_shuffle,才可运行MapReduce程序默认值:"" + +``` +vim $HADOOP_HOME/etc/hadoop/yarn-site.xml + + + + + yarn.resourcemanager.hostname + hadoop-master + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + +``` + + +- 配置 slave 相关信息 + + +``` +vim $HADOOP_HOME/etc/hadoop/slaves + +把默认的配置里面的 localhost 删除,换成: +hadoop-node1 +hadoop-node2 + +``` + + +``` +scp -r /usr/local/hadoop-2.6.5 root@hadoop-node1:/usr/local/ + +scp -r /usr/local/hadoop-2.6.5 root@hadoop-node2:/usr/local/ + +``` + + +## hadoop-master 机子运行 + +``` +格式化 HDFS +hdfs namenode -format + +``` + +- 输出结果: + +``` +[root@hadoop-master hadoop-2.6.5]# hdfs namenode -format +18/12/17 17:47:17 INFO namenode.NameNode: STARTUP_MSG: +/************************************************************ +STARTUP_MSG: Starting NameNode +STARTUP_MSG: host = localhost/127.0.0.1 +STARTUP_MSG: args = [-format] +STARTUP_MSG: version = 2.6.5 +STARTUP_MSG: classpath = /usr/local/hadoop-2.6.5/etc/hadoop:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/apacheds-kerberos-codec-2.0.0-M15.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-io-2.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/activation-1.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/netty-3.6.2.Final.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/slf4j-api-1.7.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/junit-4.11.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/curator-recipes-2.6.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jasper-compiler-5.5.23.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jets3t-0.9.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-lang-2.6.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-digester-1.8.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jackson-core-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/apacheds-i18n-2.0.0-M15.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/guava-11.0.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/gson-2.2.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jackson-jaxrs-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jettison-1.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jetty-6.1.26.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/api-util-1.0.0-M20.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/log4j-1.2.17.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-beanutils-core-1.8.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-httpclient-3.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-el-1.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/paranamer-2.3.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/slf4j-log4j12-1.7.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-collections-3.2.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jersey-server-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-net-3.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/hadoop-auth-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jasper-runtime-5.5.23.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jaxb-impl-2.2.3-1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/hamcrest-core-1.3.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/stax-api-1.0-2.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-beanutils-1.7.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/protobuf-java-2.5.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/curator-framework-2.6.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/xz-1.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jsr305-1.3.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jsp-api-2.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-compress-1.4.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/asm-3.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jsch-0.1.42.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-configuration-1.6.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-cli-1.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jackson-xc-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-logging-1.1.3.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/htrace-core-3.0.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jetty-util-6.1.26.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-math3-3.1.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/mockito-all-1.8.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jersey-json-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/zookeeper-3.4.6.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/httpclient-4.2.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/servlet-api-2.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/xmlenc-0.52.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/httpcore-4.2.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/api-asn1-api-1.0.0-M20.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/java-xmlbuilder-0.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/avro-1.7.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jaxb-api-2.2.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-codec-1.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jersey-core-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/snappy-java-1.0.4.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/curator-client-2.6.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/hadoop-annotations-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/hadoop-common-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/hadoop-common-2.6.5-tests.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/hadoop-nfs-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-io-2.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/netty-3.6.2.Final.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-lang-2.6.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-daemon-1.0.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jackson-core-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/guava-11.0.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jetty-6.1.26.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/log4j-1.2.17.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-el-1.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/xercesImpl-2.9.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jersey-server-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jasper-runtime-5.5.23.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/protobuf-java-2.5.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jsr305-1.3.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/xml-apis-1.3.04.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jsp-api-2.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/asm-3.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-cli-1.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-logging-1.1.3.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/htrace-core-3.0.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jetty-util-6.1.26.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/servlet-api-2.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/xmlenc-0.52.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-codec-1.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jersey-core-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/hadoop-hdfs-2.6.5-tests.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/hadoop-hdfs-nfs-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/hadoop-hdfs-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-io-2.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/activation-1.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/aopalliance-1.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/netty-3.6.2.Final.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-lang-2.6.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jackson-core-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/guice-3.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/guava-11.0.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jackson-jaxrs-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jettison-1.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jetty-6.1.26.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/log4j-1.2.17.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-httpclient-3.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-collections-3.2.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jersey-server-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jaxb-impl-2.2.3-1.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/stax-api-1.0-2.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/protobuf-java-2.5.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/xz-1.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jsr305-1.3.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jersey-client-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/guice-servlet-3.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-compress-1.4.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/asm-3.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-cli-1.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jersey-guice-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jackson-xc-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-logging-1.1.3.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jetty-util-6.1.26.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/leveldbjni-all-1.8.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jersey-json-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/javax.inject-1.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/zookeeper-3.4.6.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/servlet-api-2.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jaxb-api-2.2.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jline-0.9.94.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-codec-1.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jersey-core-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-web-proxy-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-api-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-common-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-registry-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-nodemanager-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-client-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-common-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-applications-unmanaged-am-launcher-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-tests-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-resourcemanager-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-applicationhistoryservice-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-applications-distributedshell-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/commons-io-2.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/aopalliance-1.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/netty-3.6.2.Final.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/junit-4.11.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/jackson-core-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/guice-3.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/log4j-1.2.17.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/paranamer-2.3.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/jersey-server-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/hamcrest-core-1.3.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/protobuf-java-2.5.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/xz-1.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/guice-servlet-3.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/commons-compress-1.4.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/asm-3.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/jersey-guice-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/leveldbjni-all-1.8.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/javax.inject-1.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/avro-1.7.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/jersey-core-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/snappy-java-1.0.4.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/hadoop-annotations-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-app-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-common-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-shuffle-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-plugins-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.6.5-tests.jar:/usr/local/hadoop-2.6.5/contrib/capacity-scheduler/*.jar +STARTUP_MSG: build = https://github.com/apache/hadoop.git -r e8c9fe0b4c252caf2ebf1464220599650f119997; compiled by 'sjlee' on 2016-10-02T23:43Z +STARTUP_MSG: java = 1.8.0_181 +************************************************************/ +18/12/17 17:47:17 INFO namenode.NameNode: registered UNIX signal handlers for [TERM, HUP, INT] +18/12/17 17:47:17 INFO namenode.NameNode: createNameNode [-format] +Formatting using clusterid: CID-beba43b4-0881-48b4-8eda-5c3bca046398 +18/12/17 17:47:17 INFO namenode.FSNamesystem: No KeyProvider found. +18/12/17 17:47:17 INFO namenode.FSNamesystem: fsLock is fair:true +18/12/17 17:47:17 INFO blockmanagement.DatanodeManager: dfs.block.invalidate.limit=1000 +18/12/17 17:47:17 INFO blockmanagement.DatanodeManager: dfs.namenode.datanode.registration.ip-hostname-check=true +18/12/17 17:47:17 INFO blockmanagement.BlockManager: dfs.namenode.startup.delay.block.deletion.sec is set to 000:00:00:00.000 +18/12/17 17:47:17 INFO blockmanagement.BlockManager: The block deletion will start around 2018 Dec 17 17:47:17 +18/12/17 17:47:17 INFO util.GSet: Computing capacity for map BlocksMap +18/12/17 17:47:17 INFO util.GSet: VM type = 64-bit +18/12/17 17:47:17 INFO util.GSet: 2.0% max memory 889 MB = 17.8 MB +18/12/17 17:47:17 INFO util.GSet: capacity = 2^21 = 2097152 entries +18/12/17 17:47:17 INFO blockmanagement.BlockManager: dfs.block.access.token.enable=false +18/12/17 17:47:17 INFO blockmanagement.BlockManager: defaultReplication = 2 +18/12/17 17:47:17 INFO blockmanagement.BlockManager: maxReplication = 512 +18/12/17 17:47:17 INFO blockmanagement.BlockManager: minReplication = 1 +18/12/17 17:47:17 INFO blockmanagement.BlockManager: maxReplicationStreams = 2 +18/12/17 17:47:17 INFO blockmanagement.BlockManager: replicationRecheckInterval = 3000 +18/12/17 17:47:17 INFO blockmanagement.BlockManager: encryptDataTransfer = false +18/12/17 17:47:17 INFO blockmanagement.BlockManager: maxNumBlocksToLog = 1000 +18/12/17 17:47:17 INFO namenode.FSNamesystem: fsOwner = root (auth:SIMPLE) +18/12/17 17:47:17 INFO namenode.FSNamesystem: supergroup = supergroup +18/12/17 17:47:17 INFO namenode.FSNamesystem: isPermissionEnabled = false +18/12/17 17:47:17 INFO namenode.FSNamesystem: HA Enabled: false +18/12/17 17:47:17 INFO namenode.FSNamesystem: Append Enabled: true +18/12/17 17:47:17 INFO util.GSet: Computing capacity for map INodeMap +18/12/17 17:47:17 INFO util.GSet: VM type = 64-bit +18/12/17 17:47:17 INFO util.GSet: 1.0% max memory 889 MB = 8.9 MB +18/12/17 17:47:17 INFO util.GSet: capacity = 2^20 = 1048576 entries +18/12/17 17:47:17 INFO namenode.NameNode: Caching file names occuring more than 10 times +18/12/17 17:47:17 INFO util.GSet: Computing capacity for map cachedBlocks +18/12/17 17:47:17 INFO util.GSet: VM type = 64-bit +18/12/17 17:47:17 INFO util.GSet: 0.25% max memory 889 MB = 2.2 MB +18/12/17 17:47:17 INFO util.GSet: capacity = 2^18 = 262144 entries +18/12/17 17:47:17 INFO namenode.FSNamesystem: dfs.namenode.safemode.threshold-pct = 0.9990000128746033 +18/12/17 17:47:17 INFO namenode.FSNamesystem: dfs.namenode.safemode.min.datanodes = 0 +18/12/17 17:47:17 INFO namenode.FSNamesystem: dfs.namenode.safemode.extension = 30000 +18/12/17 17:47:17 INFO namenode.FSNamesystem: Retry cache on namenode is enabled +18/12/17 17:47:17 INFO namenode.FSNamesystem: Retry cache will use 0.03 of total heap and retry cache entry expiry time is 600000 millis +18/12/17 17:47:17 INFO util.GSet: Computing capacity for map NameNodeRetryCache +18/12/17 17:47:17 INFO util.GSet: VM type = 64-bit +18/12/17 17:47:17 INFO util.GSet: 0.029999999329447746% max memory 889 MB = 273.1 KB +18/12/17 17:47:17 INFO util.GSet: capacity = 2^15 = 32768 entries +18/12/17 17:47:17 INFO namenode.NNConf: ACLs enabled? false +18/12/17 17:47:17 INFO namenode.NNConf: XAttrs enabled? true +18/12/17 17:47:17 INFO namenode.NNConf: Maximum size of an xattr: 16384 +18/12/17 17:47:17 INFO namenode.FSImage: Allocated new BlockPoolId: BP-233285725-127.0.0.1-1545040037972 +18/12/17 17:47:18 INFO common.Storage: Storage directory /data/hadoop/hdfs/name has been successfully formatted. +18/12/17 17:47:18 INFO namenode.FSImageFormatProtobuf: Saving image file /data/hadoop/hdfs/name/current/fsimage.ckpt_0000000000000000000 using no compression +18/12/17 17:47:18 INFO namenode.FSImageFormatProtobuf: Image file /data/hadoop/hdfs/name/current/fsimage.ckpt_0000000000000000000 of size 321 bytes saved in 0 seconds. +18/12/17 17:47:18 INFO namenode.NNStorageRetentionManager: Going to retain 1 images with txid >= 0 +18/12/17 17:47:18 INFO util.ExitUtil: Exiting with status 0 +18/12/17 17:47:18 INFO namenode.NameNode: SHUTDOWN_MSG: +/************************************************************ +SHUTDOWN_MSG: Shutting down NameNode at localhost/127.0.0.1 +************************************************************/ + +``` + +- 启动 + +``` +启动:start-dfs.sh,根据提示一路 yes +hadoop-master 会启动:NameNode 和 SecondaryNameNode +从节点启动:DataNode + +查看:jps,可以看到: +21922 Jps +21603 NameNode +21787 SecondaryNameNode + + +然后再从节点可以 jps 可以看到: +19728 DataNode +19819 Jps + +``` + +``` + +查看运行更多情况:hdfs dfsadmin -report + +Configured Capacity: 0 (0 B) +Present Capacity: 0 (0 B) +DFS Remaining: 0 (0 B) +DFS Used: 0 (0 B) +DFS Used%: NaN% +Under replicated blocks: 0 +Blocks with corrupt replicas: 0 +Missing blocks: 0 +``` + + +``` + +如果需要停止:stop-dfs.sh + +查看 log:cd $HADOOP_HOME/logs + + +``` + +## YARN 运行 + +``` +start-yarn.sh +然后 jps 你会看到一个:ResourceManager + +从节点你会看到:NodeManager + +停止:stop-yarn.sh + + +``` + +- 可以看到当前运行的所有端口:`netstat -tpnl | grep java` + + + +查看HDFS管理界面:http://hadoop-master:50070 +访问YARN管理界面:http://hadoop-master:8088 + + + +搭建完成之后,我们运行一个Mapreduce作业感受一下: +hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.5.jar pi 5 10 +hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.5.jar wordcount /data/input /data/output/result + + +## 资料 + +- +- +- \ No newline at end of file From 48bafd15e091f37d676b3bdf1a451752ed931e5a Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 18 Dec 2018 09:46:38 +0800 Subject: [PATCH 006/124] :construction: bash --- markdown-file/Bash.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/markdown-file/Bash.md b/markdown-file/Bash.md index 1017bb15..874d390e 100644 --- a/markdown-file/Bash.md +++ b/markdown-file/Bash.md @@ -130,7 +130,8 @@ drwxr-xr-x. 5 root root 4096 3月 26 10:57,其中最前面的 d 表示这是 - `kill` - `kill 1234`,结束 pid 为 1234 的进程 - `kill -9 1234`,强制结束 pid 为 1234 的进程(慎重) - - `killall java`,杀死同一进程组内的所有为 java 进程 + - `killall java`,结束同一进程组内的所有为 java 进程 + - `ps -ef|grep hadoop|grep -v grep|cut -c 9-15|xargs kill -9`,结束包含关键字 hadoop 的所有进程 - `head` - `head -n 10 spring.ini`,查看当前文件的前 10 行内容 - `tail` From f98180771c59db6277d60f1af2c553bfa0baff34 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 18 Dec 2018 11:21:29 +0800 Subject: [PATCH 007/124] :construction: bash --- markdown-file/Bash.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/markdown-file/Bash.md b/markdown-file/Bash.md index 874d390e..396526d4 100644 --- a/markdown-file/Bash.md +++ b/markdown-file/Bash.md @@ -140,6 +140,8 @@ drwxr-xr-x. 5 root root 4096 3月 26 10:57,其中最前面的 d 表示这是 ## 用户、权限-相关命令 +- 使用 pem 证书登录:`ssh -i /opt/mykey.pem root@192.168.0.70` + - 证书权限不能太大,不然无法使用:`chmod 600 mykey.pem` - `hostname`,查看当前登陆用户全名 - `cat /etc/group`,查看所有组 - `cat /etc/passwd`,查看所有用户 From 0a01ab9035be00fb4f71e495570ed7a2bc81a44d Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 18 Dec 2018 11:35:16 +0800 Subject: [PATCH 008/124] :construction: SSH --- markdown-file/SSH-login-without-password.md | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/markdown-file/SSH-login-without-password.md b/markdown-file/SSH-login-without-password.md index ec33a561..b13e35cd 100644 --- a/markdown-file/SSH-login-without-password.md +++ b/markdown-file/SSH-login-without-password.md @@ -14,6 +14,8 @@ - 在 A 机器上输入命令:`ssh-keygen` - 根据提示回车,共有三次交互提示,都回车即可。 - 生成的密钥目录在:**/root/.ssh** +- 写入:`cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys` +- 测试:`ssh localhost` ## 把 A 的公钥发给 B @@ -23,4 +25,17 @@ ## 测试 A 免密登录到 B -- 在 A 机器上输入命令:`ssh -p 22 root@192.168.1.105`,则会相应登录成功的提示 \ No newline at end of file +- 在 A 机器上输入命令:`ssh -p 22 root@192.168.1.105`,则会相应登录成功的提示 + +------------------------------------------------------------------- + +## 如果是用 pem 登录的话,用 ssh-copy-id 是无法使用的 + +- 先保存 A 的 pub 到本地:`sz /root/.ssh/id_rsa.pub` +- 登录 B 机子:`cd /root/.ssh/` +- 如果 B 机子没有 authorized_keys 文件则创建:`touch /root/.ssh/authorized_keys` + - 设置权限:`chmod 600 /root/.ssh/authorized_keys ` +- 上传 pub 文件到 B 机子,并在 B 机子上执行:`cd /root/.ssh/ && cat id_rsa.pub >> authorized_keys` + + + From a252c14cde61fd88194393097407c89e0c51358a Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 18 Dec 2018 15:06:47 +0800 Subject: [PATCH 009/124] :construction: Ansible --- README.md | 1 + SUMMARY.md | 3 +- TOC.md | 3 +- markdown-file/Ansible-Install-And-Settings.md | 214 ++++++++++++++++++ 4 files changed, 219 insertions(+), 2 deletions(-) create mode 100644 markdown-file/Ansible-Install-And-Settings.md diff --git a/README.md b/README.md index af84c871..154c9bf3 100644 --- a/README.md +++ b/README.md @@ -99,6 +99,7 @@ - [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) - [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) - [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) +- [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) ## 联系(Contact) diff --git a/SUMMARY.md b/SUMMARY.md index 9402b78b..ebe606dc 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -84,4 +84,5 @@ * [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) * [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) * [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) -* [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) \ No newline at end of file +* [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) +* [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) \ No newline at end of file diff --git a/TOC.md b/TOC.md index 8cb10242..d87f0ab8 100644 --- a/TOC.md +++ b/TOC.md @@ -81,4 +81,5 @@ - [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) - [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) - [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) -- [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) \ No newline at end of file +- [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) +- [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) \ No newline at end of file diff --git a/markdown-file/Ansible-Install-And-Settings.md b/markdown-file/Ansible-Install-And-Settings.md new file mode 100644 index 00000000..91011077 --- /dev/null +++ b/markdown-file/Ansible-Install-And-Settings.md @@ -0,0 +1,214 @@ +# Ansible 安装和配置 + + +## Ansible 说明 + +- Ansible 官网: +- Ansible 官网 Github: +- Ansible 官网文档: +- 简单讲:它的作用就是把写 shell 这件事变成标准化、模块化。方便更好的自动化运维 + +## 安装 + +- 官网说明: +- CentOS:`sudo yum install -y ansible` + - 查看版本:`ansible --version` + +------------------------------------------------------------------- + +## 配置基本概念 + +#### Ansible 基本配置文件顺序 + +- Ansible 执行的时候会按照以下顺序查找配置项,所以修改的时候要特别注意改的是哪个文件 + +``` +ANSIBLE_CONFIG (环境变量) +ansible.cfg (脚本所在当前目录下) +~/.ansible.cfg (用户家目录下,默认没有) +/etc/ansible/ansible.cfg(安装后会自动生成) +``` + + +#### 配置远程主机地址 (Ansible 称这些地址为 Inventory) + +- 假设我有 3 台机子: + - 192.168.0.223 + - 192.168.0.70 + - 192.168.0.103 +- 官网对此的配置说明: + +###### 给这三台机子设置免密登录的情况(一般推荐方式) + +- 编辑 Ansible 配置文件:`vim /etc/ansible/hosts` +- 添加如下内容 + +``` +[hadoop-host] +192.168.0.223 +192.168.0.70 +192.168.0.103 +``` + +- 其中 `[hadoop-host]` 表示这些主机代表的一个组名 + + +###### 如果不设置免密,直接采用账号密码(容易泄露信息) + + +- 编辑 Ansible 配置文件:`vim /etc/ansible/hosts` +- 添加如下内容 + +``` +[hadoop-host] +hadoop-master ansible_host=192.168.0.223 ansible_user=root ansible_ssh_pass=123456 +hadoop-node1 ansible_host=192.168.0.70 ansible_user=root ansible_ssh_pass=123456 +hadoop-node2 ansible_host=192.168.0.103 ansible_user=root ansible_ssh_pass=123456 +``` + + + +## 简单使用(`ad hoc`方式) + +- ad hoc 官网: + +##### 运行 Ansible + +- 运行 Ansible 的 `ping` 命令,看看配置正确时输出如下: + +``` +sudo ansible --private-key ~/.ssh/id_rsa all -m ping +``` + +- 让远程所有主机都执行 `ps` 命令,输出如下 + +``` +ansible all -a 'ps' +``` + +- 让远程所有 hadoop-host 组的主机都执行 `ps` 命令,输出如下 + +``` +ansible hadoop-host -a 'ps' +``` + +------------------------------------------------------------------- + +## Playbook 脚本方式 + +- 官网: +- 一些语法: +- playbook(剧本),顾名思义,就是需要定义一个脚本或者说配置文件,然后定义好要做什么。之后 ansible 就会根据 playbook 脚本对远程主机进行操作 + +#### 简单脚本 + +- 下面脚本让所有远程主机执行 `whoami` 命令,并把结果(当前用户名)输出到 `/opt/whoami.txt` 文件 +- 创建脚本文件:`vim /opt/simple-playbook.yml` + +``` +- hosts: all + tasks: + - name: whoami + shell: 'whoami > /opt/whoami.txt' +``` + +- 执行命令:`ansible-playbook /opt/simple-playbook.yml`,结果如下,并且 opt 下也有文件生成 + +``` +PLAY [all] ************************************************************************************************************************** + +TASK [Gathering Facts] ************************************************************************************************************** +ok: [192.168.0.223] +ok: [192.168.0.103] +ok: [192.168.0.70] + +TASK [whoami] *********************************************************************************************************************** +changed: [192.168.0.103] +changed: [192.168.0.223] +changed: [192.168.0.70] + +PLAY RECAP ************************************************************************************************************************** +192.168.0.103 : ok=2 changed=1 unreachable=0 failed=0 +192.168.0.223 : ok=2 changed=1 unreachable=0 failed=0 +192.168.0.70 : ok=2 changed=1 unreachable=0 failed=0 +``` + +------------------------------------------------------------------- + +## 平时用来测试 + +- 创建脚本文件:`vim /opt/test-playbook.yml` + +``` +- hosts: hadoop-test + remote_user: root + vars: + java_install_folder: /usr/local + tasks: + # 按行的方式写入 + - name: Set JAVA_HOME 1 + lineinfile: + dest=/etc/profile + line="JAVA_HOME={{ java_install_folder }}/jdk1.8.0_181" + # 按块的方式写入,#{mark} 会被自动替换成:begin 和 end 字符来包裹整块内容(我这里自己定义了词语) + - name: Set JAVA_HOME 2 + blockinfile: + path: /etc/profile + marker: "#{mark} JDK ENV" + marker_begin: "开始" + marker_end: "结束" + block: | + export JAVA_HOME={{ java_install_folder }}/jdk1.8.0_181 + export PATH=$PATH:$JAVA_HOME/bin +``` + +- 执行命令:`ansible-playbook /opt/test-playbook.yml` + +------------------------------------------------------------------- + +## 更多 playbook 实战 + +#### 部署 JDK + +- 创建脚本文件:`vim /opt/jdk8-playbook.yml` + +``` +- hosts: hadoop-host + remote_user: root + vars: + java_install_folder: /usr/local + tasks: + - name: copy jdk + copy: src=/opt/jdk-8u181-linux-x64.tar.gz dest={{ java_install_folder }} + + - name: tar jdk + shell: chdir={{ java_install_folder }} tar zxf jdk-8u181-linux-x64.tar.gz + + - name: Set JAVA_HOME + blockinfile: + path: /etc/profile + marker: "#{mark} JDK ENV" + block: | + JAVA_HOME={{ java_install_folder }}/jdk1.8.0_181 + JRE_HOME=$JAVA_HOME/jre + PATH=$PATH:$JAVA_HOME/bin + CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar + export JAVA_HOME + export JRE_HOME + export PATH + export CLASSPATH + + - name: source profile + shell: source /etc/profile +``` + + +- 执行命令:`ansible-playbook /opt/jdk8-playbook.yml` + + +## 资料 + + +- +- +- \ No newline at end of file From 16c09a80ec4b53716597e87fe7b727e6d129d771 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 18 Dec 2018 15:48:58 +0800 Subject: [PATCH 010/124] :construction: hadoop --- README.md | 1 + SUMMARY.md | 1 + TOC.md | 1 + markdown-file/Hadoop-Install-And-Settings.md | 75 ++++++++++++++------ 4 files changed, 55 insertions(+), 23 deletions(-) diff --git a/README.md b/README.md index 154c9bf3..4aca2d77 100644 --- a/README.md +++ b/README.md @@ -94,6 +94,7 @@ - [CI 整套服务安装和使用](markdown-file/CI-Install-And-Usage.md) - [YApi 安装和配置](markdown-file/YApi-Install-And-Settings.md) - [Kafka 安装和配置](markdown-file/Kafka-Install-And-Settings.md) +- [Hadoop 安装和配置](markdown-file/Hadoop-Install-And-Settings.md) - [Showdoc 安装和配置](markdown-file/Showdoc-Install-And-Settings.md) - [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) - [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) diff --git a/SUMMARY.md b/SUMMARY.md index ebe606dc..91dfcd0a 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -80,6 +80,7 @@ * [CI 整套服务安装和使用](markdown-file/CI-Install-And-Usage.md) * [YApi 安装和配置](markdown-file/YApi-Install-And-Settings.md) * [Kafka 安装和配置](markdown-file/Kafka-Install-And-Settings.md) +* [Hadoop 安装和配置](markdown-file/Hadoop-Install-And-Settings.md) * [Showdoc 安装和配置](markdown-file/Showdoc-Install-And-Settings.md) * [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) * [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) diff --git a/TOC.md b/TOC.md index d87f0ab8..1b0ff377 100644 --- a/TOC.md +++ b/TOC.md @@ -77,6 +77,7 @@ - [Node.js 安装和使用](markdown-file/Node-Install-And-Usage.md) - [YApi 安装和配置](markdown-file/YApi-Install-And-Settings.md) - [Kafka 安装和配置](markdown-file/Kafka-Install-And-Settings.md) +- [Hadoop 安装和配置](markdown-file/Hadoop-Install-And-Settings.md) - [Showdoc 安装和配置](markdown-file/Showdoc-Install-And-Settings.md) - [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) - [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) diff --git a/markdown-file/Hadoop-Install-And-Settings.md b/markdown-file/Hadoop-Install-And-Settings.md index 0f8fafc6..8f9542e0 100644 --- a/markdown-file/Hadoop-Install-And-Settings.md +++ b/markdown-file/Hadoop-Install-And-Settings.md @@ -61,6 +61,7 @@ ssh localhost ``` - 将公钥复制到两台 slave + - 如果你是采用 pem 登录的,可以看这个:[SSH 免密登录](SSH-login-without-password.md) ``` ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@172.16.0.43,根据提示输入 hadoop-node1 机器的 root 密码,成功会有相应提示 @@ -95,6 +96,7 @@ tar zxvf hadoop-2.6.5.tar.gz,有 191M 左右 ``` - **给三台机子都先设置 HADOOP_HOME** + - 会 ansible playbook 会方便点:[Ansible 安装和配置](Ansible-Install-And-Settings.md) ``` vim /etc/profile @@ -338,29 +340,31 @@ SHUTDOWN_MSG: Shutting down NameNode at localhost/127.0.0.1 ``` -- 启动 +## HDFS 启动 + +- 启动:start-dfs.sh,根据提示一路 yes ``` -启动:start-dfs.sh,根据提示一路 yes -hadoop-master 会启动:NameNode 和 SecondaryNameNode -从节点启动:DataNode +这个命令效果: +主节点会启动任务:NameNode 和 SecondaryNameNode +从节点会启动任务:DataNode + -查看:jps,可以看到: +主节点查看:jps,可以看到: 21922 Jps 21603 NameNode 21787 SecondaryNameNode -然后再从节点可以 jps 可以看到: +从节点查看:jps 可以看到: 19728 DataNode 19819 Jps - ``` -``` -查看运行更多情况:hdfs dfsadmin -report +- 查看运行更多情况:`hdfs dfsadmin -report` +``` Configured Capacity: 0 (0 B) Present Capacity: 0 (0 B) DFS Remaining: 0 (0 B) @@ -371,15 +375,9 @@ Blocks with corrupt replicas: 0 Missing blocks: 0 ``` +- 如果需要停止:`stop-dfs.sh` +- 查看 log:`cd $HADOOP_HOME/logs` -``` - -如果需要停止:stop-dfs.sh - -查看 log:cd $HADOOP_HOME/logs - - -``` ## YARN 运行 @@ -391,22 +389,53 @@ start-yarn.sh 停止:stop-yarn.sh +``` + +## 端口情况 +- 主节点当前运行的所有端口:`netstat -tpnl | grep java` +- 会用到端口(为了方便展示,整理下顺序): + +``` +tcp 0 0 172.16.0.17:9000 0.0.0.0:* LISTEN 22932/java >> NameNode +tcp 0 0 0.0.0.0:50070 0.0.0.0:* LISTEN 22932/java >> NameNode +tcp 0 0 0.0.0.0:50090 0.0.0.0:* LISTEN 23125/java >> SecondaryNameNode +tcp6 0 0 172.16.0.17:8030 :::* LISTEN 23462/java >> ResourceManager +tcp6 0 0 172.16.0.17:8031 :::* LISTEN 23462/java >> ResourceManager +tcp6 0 0 172.16.0.17:8032 :::* LISTEN 23462/java >> ResourceManager +tcp6 0 0 172.16.0.17:8033 :::* LISTEN 23462/java >> ResourceManager +tcp6 0 0 172.16.0.17:8088 :::* LISTEN 23462/java >> ResourceManager +``` + +- 从节点当前运行的所有端口:`netstat -tpnl | grep java` +- 会用到端口(为了方便展示,整理下顺序): + +``` +tcp 0 0 0.0.0.0:50010 0.0.0.0:* LISTEN 14545/java >> DataNode +tcp 0 0 0.0.0.0:50020 0.0.0.0:* LISTEN 14545/java >> DataNode +tcp 0 0 0.0.0.0:50075 0.0.0.0:* LISTEN 14545/java >> DataNode +tcp6 0 0 :::8040 :::* LISTEN 14698/java >> NodeManager +tcp6 0 0 :::8042 :::* LISTEN 14698/java >> NodeManager +tcp6 0 0 :::13562 :::* LISTEN 14698/java >> NodeManager +tcp6 0 0 :::37481 :::* LISTEN 14698/java >> NodeManager ``` -- 可以看到当前运行的所有端口:`netstat -tpnl | grep java` +------------------------------------------------------------------- +## 管理界面 +- 查看 HDFS 管理界面: +- 访问 YARN 管理界面: -查看HDFS管理界面:http://hadoop-master:50070 -访问YARN管理界面:http://hadoop-master:8088 +------------------------------------------------------------------- +## 运行作业 -搭建完成之后,我们运行一个Mapreduce作业感受一下: -hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.5.jar pi 5 10 -hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.5.jar wordcount /data/input /data/output/result +- 运行一个 Mapreduce 作业试试: + - `hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.5.jar pi 5 10` +------------------------------------------------------------------- ## 资料 From c9033c181cedff12ce3c88b4eb093e447692e98b Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 18 Dec 2018 18:48:42 +0800 Subject: [PATCH 011/124] :construction: hadoop --- markdown-file/Hadoop-Install-And-Settings.md | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/markdown-file/Hadoop-Install-And-Settings.md b/markdown-file/Hadoop-Install-And-Settings.md index 8f9542e0..d39a2e42 100644 --- a/markdown-file/Hadoop-Install-And-Settings.md +++ b/markdown-file/Hadoop-Install-And-Settings.md @@ -432,8 +432,21 @@ tcp6 0 0 :::37481 :::* LISTEN ## 运行作业 +- 在主节点上操作 - 运行一个 Mapreduce 作业试试: - - `hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.5.jar pi 5 10` + - 计算 π:`hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.5.jar pi 5 10` +- 运行一个文件相关作业: + - 由于运行 hadoop 时指定的输入文件只能是 HDFS 文件系统中的文件,所以我们必须将要进行 wordcount 的文件从本地文件系统拷贝到 HDFS 文件系统中。 + - 查看目前根目录结构:`hadoop fs -ls /` + - 创建目录:`hadoop fs -mkdir -p /tmp/zch/wordcount_input_dir` + - 上传文件:`hadoop fs -put /opt/input.txt /tmp/zch/wordcount_input_dir` + - 查看上传的目录下是否有文件:`hadoop fs -ls /tmp/zch/wordcount_input_dir` + - 向 yarn 提交作业,计算单词个数:`hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.5.jar wordcount /tmp/zch/wordcount_input_dir /tmp/zch/wordcount_output_dir` + - 查看计算结果输出的目录:`hadoop fs -ls /tmp/zch/wordcount_output_dir` + - 查看计算结果输出内容:`hadoop fs -cat /tmp/zch/wordcount_output_dir/part-r-00000` +- 查看正在运行的 Hadoop 任务:`yarn application -list` +- 关闭 Hadoop 任务进程:`yarn application -kill 你的ApplicationId` + ------------------------------------------------------------------- @@ -441,4 +454,5 @@ tcp6 0 0 :::37481 :::* LISTEN - - -- \ No newline at end of file +- +- \ No newline at end of file From 2ee0154670983f8e42c165bcb8cd4ce01b427f77 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 19 Dec 2018 11:01:33 +0800 Subject: [PATCH 012/124] :construction: docker --- .../shell/install_docker_disable_firewalld_centos7-aliyun.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/favorite-file/shell/install_docker_disable_firewalld_centos7-aliyun.sh b/favorite-file/shell/install_docker_disable_firewalld_centos7-aliyun.sh index 35080fed..587ca77b 100644 --- a/favorite-file/shell/install_docker_disable_firewalld_centos7-aliyun.sh +++ b/favorite-file/shell/install_docker_disable_firewalld_centos7-aliyun.sh @@ -45,7 +45,7 @@ docker run hello-world echo "-----------------------------------------安装 docker compose" echo "docker compose 的版本检查:https://docs.docker.com/compose/install/#install-compose" -curl -L https://github.com/docker/compose/releases/download/1.21.2/docker-compose-$(uname -s)-$(uname -m) -o /usr/local/bin/docker-compose +curl -L "https://github.com/docker/compose/releases/download/1.23.1/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose chmod +x /usr/local/bin/docker-compose From 9d28869cc887408ea362e85ce7f22f3c41919984 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 19 Dec 2018 11:17:21 +0800 Subject: [PATCH 013/124] :construction: kafka --- markdown-file/Kafka-Install-And-Settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/markdown-file/Kafka-Install-And-Settings.md b/markdown-file/Kafka-Install-And-Settings.md index c23626e6..ecb40028 100644 --- a/markdown-file/Kafka-Install-And-Settings.md +++ b/markdown-file/Kafka-Install-And-Settings.md @@ -392,7 +392,7 @@ wurstmeister/kafka:latest ---------------------------------------------------------------------------------------------- -## Kafka 1.0.1 源码安装(也支持 1.0.2、0.11.0.3) +## Kafka 1.0.1 源码安装(也支持 1.0.2、0.11.0.3、0.10.2.2) - 测试环境:2G 内存足够 - 一台机子:CentOS 7.4,根据文章最开头,已经修改了 hosts From a366bc4944c95112d26a9c6f708d95f1209ae5f3 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 19 Dec 2018 11:19:25 +0800 Subject: [PATCH 014/124] :construction: Flink --- markdown-file/Flink-Install-And-Settings.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/markdown-file/Flink-Install-And-Settings.md b/markdown-file/Flink-Install-And-Settings.md index c2ea84d3..a50c9a27 100644 --- a/markdown-file/Flink-Install-And-Settings.md +++ b/markdown-file/Flink-Install-And-Settings.md @@ -19,14 +19,15 @@ - 最终我选择了:Apache 1.7.0 Flink only Scala 2.11,共:240M - 解压:`tar zxf flink-*.tgz` - 进入根目录:`cd flink-1.7.0`,完整路径:`cd /usr/local/flink-1.7.0` -- 启动:`cd /usr/local/flink-1.7.0 && ./bin/start-cluster.sh` -- 停止:`cd /usr/local/flink-1.7.0 && ./bin/stop-cluster.sh` +- 改下目录名方便后面书写:`mv /usr/local/flink-1.7.0 /usr/local/flink` +- 启动:`cd /usr/local/flink && ./bin/start-cluster.sh` +- 停止:`cd /usr/local/flink && ./bin/stop-cluster.sh` - 查看日志:`tail -300f log/flink-*-standalonesession-*.log` - 浏览器访问 WEB 管理:`http://192.168.0.105:8081` ## Demo -- 运行程序解压包下也有一些 jar demo:`cd /usr/local/flink-1.7.0/examples` +- 运行程序解压包下也有一些 jar demo:`cd /usr/local/flink/examples` - 官网: - DataStream API: - DataSet API: @@ -53,7 +54,7 @@ mvn archetype:generate \ - 可以直接在 IntelliJ IDEA 上 run - 也可以交给服务器上 flink 执行,也有两种方式: - - 把 jar 自己上传 Flink 服务器运行:`cd /usr/local/flink-1.7.0 && ./bin/flink run -c com.youmeek.WordCount /opt/flink-simple-demo-1.0-SNAPSHOT.jar` + - 把 jar 自己上传 Flink 服务器运行:`cd /usr/local/flink && ./bin/flink run -c com.youmeek.WordCount /opt/flink-simple-demo-1.0-SNAPSHOT.jar` - 也可以通过 WEB UI 上传 jar: - 有一个 `Add New` 按钮可以上传 jar 包,然后填写 Class 路径:`com.youmeek.WordCount` - `parallelism` 表示并行度,填写数字,一般并行度设置为集群 CPU 核数总和的 2-3 倍(如果是单机模式不需要设置并行度) From aef0b8f1176c451449bc7f97020554385aeb386a Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 19 Dec 2018 11:40:53 +0800 Subject: [PATCH 015/124] :construction: MySQL --- markdown-file/Mysql-Install-And-Settings.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/markdown-file/Mysql-Install-And-Settings.md b/markdown-file/Mysql-Install-And-Settings.md index 98fdbf34..cc79011c 100644 --- a/markdown-file/Mysql-Install-And-Settings.md +++ b/markdown-file/Mysql-Install-And-Settings.md @@ -5,7 +5,7 @@ - 关掉:SELinux - 创建本地数据存储 + 配置文件目录:`mkdir -p /data/docker/mysql/datadir /data/docker/mysql/conf /data/docker/mysql/log` -- 在宿主机上创建一个配置文件:`vim /data/docker/mysql/conf/mycat-mysql-1.cnf`,内容如下: +- 在宿主机上创建一个配置文件:`vim /data/docker/mysql/conf/mysql-1.cnf`,内容如下: ``` # 该编码设置是我自己配置的 @@ -36,10 +36,11 @@ max_allowed_packet = 50M - 赋权(避免挂载的时候,一些程序需要容器中的用户的特定权限使用):`chmod -R 777 /data/docker/mysql/datadir /data/docker/mysql/log` - 赋权:`chown -R 0:0 /data/docker/mysql/conf` - - 配置文件的赋权比较特殊,如果是给 777 权限会报:[Warning] World-writable config file '/etc/mysql/conf.d/mycat-mysql-1.cnf' is ignored,所以这里要特殊对待。容器内是用 root 的 uid,所以这里与之相匹配赋权即可。 + - 配置文件的赋权比较特殊,如果是给 777 权限会报:[Warning] World-writable config file '/etc/mysql/conf.d/mysql-1.cnf' is ignored,所以这里要特殊对待。容器内是用 root 的 uid,所以这里与之相匹配赋权即可。 - 我是进入容器 bash 内,输入:`whoami && id`,看到默认用户的 uid 是 0,所以这里才 chown 0 -- `docker run -p 3306:3306 --name cloud-mysql -v /data/docker/mysql/datadir:/var/lib/mysql -v /data/docker/mysql/log:/var/log/mysql -v /data/docker/mysql/conf:/etc/mysql/conf.d -e MYSQL_ROOT_PASSWORD=adg123456 -d mysql:5.7` -- 连上容器:`docker exec -it 09747cd7d0bd /bin/bash` +- `docker run -p 3306:3306 --name cloud-mysql -v /data/docker/mysql/datadir:/var/lib/mysql -v /data/docker/mysql/log:/var/log/mysql -v /data/docker/mysql/conf:/etc/mysql/conf.d -e MYSQL_ROOT_PASSWORD=123456 -d mysql:5.7` +- 连上容器:`docker exec -it cloud-mysql /bin/bash` + - 连上 MySQL:`mysql -u root -p` - 关于容器的 MySQL 配置,官网是这样说的: >> The MySQL startup configuration is specified in the file /etc/mysql/my.cnf, and that file in turn includes any files found in the /etc/mysql/conf.d directory that end with .cnf.Settings in files in this directory will augment and/or override settings in /etc/mysql/my.cnf. If you want to use a customized MySQL configuration,you can create your alternative configuration file in a directory on the host machine and then mount that directory location as /etc/mysql/conf.d inside the mysql container. From 90467b586aa2093201cec92b63c5f098be614b3d Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 19 Dec 2018 11:53:24 +0800 Subject: [PATCH 016/124] :construction: MySQL --- markdown-file/Mysql-Install-And-Settings.md | 1 + 1 file changed, 1 insertion(+) diff --git a/markdown-file/Mysql-Install-And-Settings.md b/markdown-file/Mysql-Install-And-Settings.md index cc79011c..3ab9a80a 100644 --- a/markdown-file/Mysql-Install-And-Settings.md +++ b/markdown-file/Mysql-Install-And-Settings.md @@ -41,6 +41,7 @@ max_allowed_packet = 50M - `docker run -p 3306:3306 --name cloud-mysql -v /data/docker/mysql/datadir:/var/lib/mysql -v /data/docker/mysql/log:/var/log/mysql -v /data/docker/mysql/conf:/etc/mysql/conf.d -e MYSQL_ROOT_PASSWORD=123456 -d mysql:5.7` - 连上容器:`docker exec -it cloud-mysql /bin/bash` - 连上 MySQL:`mysql -u root -p` + - 创建表:`CREATE DATABASE wormhole DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;` - 关于容器的 MySQL 配置,官网是这样说的: >> The MySQL startup configuration is specified in the file /etc/mysql/my.cnf, and that file in turn includes any files found in the /etc/mysql/conf.d directory that end with .cnf.Settings in files in this directory will augment and/or override settings in /etc/mysql/my.cnf. If you want to use a customized MySQL configuration,you can create your alternative configuration file in a directory on the host machine and then mount that directory location as /etc/mysql/conf.d inside the mysql container. From 83475e3ad554f95516c5ce35df7b8238a422ccb2 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 19 Dec 2018 12:12:39 +0800 Subject: [PATCH 017/124] :construction: Hadoop --- markdown-file/Hadoop-Install-And-Settings.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/markdown-file/Hadoop-Install-And-Settings.md b/markdown-file/Hadoop-Install-And-Settings.md index d39a2e42..bead4ba5 100644 --- a/markdown-file/Hadoop-Install-And-Settings.md +++ b/markdown-file/Hadoop-Install-And-Settings.md @@ -438,6 +438,9 @@ tcp6 0 0 :::37481 :::* LISTEN - 运行一个文件相关作业: - 由于运行 hadoop 时指定的输入文件只能是 HDFS 文件系统中的文件,所以我们必须将要进行 wordcount 的文件从本地文件系统拷贝到 HDFS 文件系统中。 - 查看目前根目录结构:`hadoop fs -ls /` + - 查看目前根目录结构,另外写法:`hadoop fs -ls hdfs://linux-05:9000/` + - 或者列出目录以及下面的文件:`hadoop fs -ls -R /` + - 更多命令可以看:[hadoop HDFS常用文件操作命令](https://segmentfault.com/a/1190000002672666) - 创建目录:`hadoop fs -mkdir -p /tmp/zch/wordcount_input_dir` - 上传文件:`hadoop fs -put /opt/input.txt /tmp/zch/wordcount_input_dir` - 查看上传的目录下是否有文件:`hadoop fs -ls /tmp/zch/wordcount_input_dir` From aa1854321b9d1e2f5a2e5cb5174c82a38f62dc73 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 19 Dec 2018 14:04:09 +0800 Subject: [PATCH 018/124] :construction: Hadoop --- markdown-file/Hadoop-Install-And-Settings.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/markdown-file/Hadoop-Install-And-Settings.md b/markdown-file/Hadoop-Install-And-Settings.md index bead4ba5..142cc70f 100644 --- a/markdown-file/Hadoop-Install-And-Settings.md +++ b/markdown-file/Hadoop-Install-And-Settings.md @@ -424,8 +424,10 @@ tcp6 0 0 :::37481 :::* LISTEN ## 管理界面 -- 查看 HDFS 管理界面: -- 访问 YARN 管理界面: +- 查看 HDFS NameNode 管理界面: +- 访问 YARN ResourceManager 管理界面: +- 访问 NodeManager-1 管理界面: +- 访问 NodeManager-2 管理界面: ------------------------------------------------------------------- From b0d3119db866e88d0ac5df7ee50e4b44f1a59729 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 19 Dec 2018 14:50:10 +0800 Subject: [PATCH 019/124] :construction: Spark --- markdown-file/Spark-Install-And-Settings.md | 46 +++++++++++++++++++++ markdown-file/monitor.md | 2 +- 2 files changed, 47 insertions(+), 1 deletion(-) create mode 100644 markdown-file/Spark-Install-And-Settings.md diff --git a/markdown-file/Spark-Install-And-Settings.md b/markdown-file/Spark-Install-And-Settings.md new file mode 100644 index 00000000..916a5ef1 --- /dev/null +++ b/markdown-file/Spark-Install-And-Settings.md @@ -0,0 +1,46 @@ +# Spark 安装和配置 + +## 介绍 + +- 2018-12 发布最新:2.4.0 版本 +- 官网: +- 官网文档: +- 官网下载: +- 官网 Github: + +## 本地模式安装 + +- CentOS 7.4 +- IP 地址:`192.168.0.105` +- 必须 JDK 8.x +- 因为个人原因,我这里 Hadoop 还是 2.6.5 版本,Spark 要用的是 2.2.0 +- Spark 2.2.0 官网文档: + - 192M,下载速度有点慢 + - `cd /usr/local && wget https://archive.apache.org/dist/spark/spark-2.2.0/spark-2.2.0-bin-hadoop2.6.tgz` +- 解压:`tar zxvf spark-2.2.0-bin-hadoop2.6.tgz` +- 重命名:`mv /usr/local/spark-2.2.0-bin-hadoop2.6 /usr/local/spark` +- 增加环境变量: + +``` +vim /etc/profile + +SPARK_HOME=/usr/local/spark +PATH=$PATH:${SPARK_HOME}/bin:${SPARK_HOME}/sbin +export SPARK_HOME +export PATH + +source /etc/profile +``` + +- 修改配置:`cp $SPARK_HOME/conf/spark-env.sh.template $SPARK_HOME/conf/spark-env.sh` +- 修改配置:`vim $SPARK_HOME/conf/spark-env.sh` +- 假设我的 hadoop 路径是:/usr/local/hadoop-2.6.5,则最尾巴增加: + +``` +export HADOOP_CONF_DIR=/usr/local/hadoop-2.6.5/etc/hadoop +``` + + +## 资料 + +- diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index 5505e34e..38ae729c 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -568,7 +568,7 @@ TOTAL:(总的流量) 12.9GB 229Mb 190Mb 193Mb ``` -### 端口使用情况 +### 端口使用情况(也可以用来查看端口占用) #### lsof From 08f48891c0e64c79bfdeb913597707666e92fd1f Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 19 Dec 2018 14:59:54 +0800 Subject: [PATCH 020/124] :construction: Spark --- markdown-file/Spark-Install-And-Settings.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/markdown-file/Spark-Install-And-Settings.md b/markdown-file/Spark-Install-And-Settings.md index 916a5ef1..004ce09c 100644 --- a/markdown-file/Spark-Install-And-Settings.md +++ b/markdown-file/Spark-Install-And-Settings.md @@ -13,6 +13,7 @@ - CentOS 7.4 - IP 地址:`192.168.0.105` - 必须 JDK 8.x +- 已经安装了 hadoop-2.6.5 集群(**这个细节注意**) - 因为个人原因,我这里 Hadoop 还是 2.6.5 版本,Spark 要用的是 2.2.0 - Spark 2.2.0 官网文档: - 192M,下载速度有点慢 @@ -40,6 +41,8 @@ source /etc/profile export HADOOP_CONF_DIR=/usr/local/hadoop-2.6.5/etc/hadoop ``` +- 因为要交给 YARN 作业,所以到这里就好了。 + ## 资料 From 0c8cae6d1366524c9dd5be4de391e499c832f014 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9D=B3=E9=98=B3?= <260893248@qq.com> Date: Tue, 25 Dec 2018 14:47:28 +0800 Subject: [PATCH 021/124] =?UTF-8?q?=E7=96=91=E4=BC=BC=E5=BA=94=E8=AF=A5?= =?UTF-8?q?=E6=98=AF=20'.'=20(=E8=A1=A8=E7=A4=BA=E9=9A=90=E8=97=8F?= =?UTF-8?q?=E6=96=87=E4=BB=B6)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 疑似应该是 '.' (表示隐藏文件) --- markdown-file/Bash.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/markdown-file/Bash.md b/markdown-file/Bash.md index 396526d4..58dfcd55 100644 --- a/markdown-file/Bash.md +++ b/markdown-file/Bash.md @@ -48,7 +48,7 @@ - `last`,显示最近登录的帐户及时间 - `lastlog`,显示系统所有用户各自在最近登录的记录,如果没有登录过的用户会显示 **从未登陆过** - `ls`,列出当前目录下的所有没有隐藏的文件 / 文件夹。 - - `ls -a`,列出包括以.号开头的隐藏文件 / 文件夹(也就是所有文件) + - `ls -a`,列出包括以.号开头的隐藏文件 / 文件夹(也就是所有文件) - `ls -R`,显示出目录下以及其所有子目录的文件 / 文件夹(递归地方式,不显示隐藏的文件) - `ls -a -R`,显示出目录下以及其所有子目录的文件 / 文件夹(递归地方式,显示隐藏的文件) - `ls -al`,列出目录下所有文件(包含隐藏)的权限、所有者、文件大小、修改时间及名称(也就是显示详细信息) From 97e93c69862835abdf39b6f877f54283f30e3141 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9D=B3=E9=98=B3?= <260893248@qq.com> Date: Tue, 25 Dec 2018 18:56:37 +0800 Subject: [PATCH 022/124] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E9=94=99=E5=88=AB?= =?UTF-8?q?=E5=AD=97=E4=B9=8B=E7=B1=BB?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 修改错别字之类 --- markdown-file/Vim-Install-And-Settings.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/markdown-file/Vim-Install-And-Settings.md b/markdown-file/Vim-Install-And-Settings.md index a2a3049d..77560192 100644 --- a/markdown-file/Vim-Install-And-Settings.md +++ b/markdown-file/Vim-Install-And-Settings.md @@ -30,7 +30,7 @@ - `l`,右 - `v`,按 v 之后按方向键可以选中你要选中的文字 - `gg`,跳到第 1 行 - - `G`,跳到第最后行 + - `G`,跳到最后一行 - `16G` 或 `:16`,跳到第 16 行 - `$`,到本行 **行尾** - `0`,到本行 **行头** @@ -94,7 +94,7 @@ - `:s/YouMeek/Judasn/`,把光标当前行第一个 YouMeek 替换为 Judasn - `:s/YouMeek/Judasn/g`,把光标当前行所有 YouMeek 替换为 Judasn - `:s#YouMeek/#Judasn/#`,除了使用斜杠作为分隔符之外,还可以使用 # 作为分隔符,此时中间出现的 / 不会作为分隔符,该命令表示:把光标当前行第一个 YouMeek/ 替换为 Judasn/ - - `:10,31s/YouMeek/Judasng`,把第 10 行到 31 行之间所有 YouMeek 替换为 Judasn + - `:10,31s/YouMeek/Judasn/g`,把第 10 行到 31 行之间所有 YouMeek 替换为 Judasn ## Vim 的特殊复制、黏贴 @@ -114,7 +114,7 @@ - 效果如下: - ![vim-for-server](https://raw.githubusercontent.com/wklken/gallery/master/vim/vim-for-server.png) - 需要特别注意的是,如果你平时粘贴内容到终端 Vim 出现缩进错乱,一般需要这样做: - - 进入 vim 后,按 `F5`,然后 `shirt + insert` 进行粘贴。这种事就不会错乱了。 + - 进入 vim 后,按 `F5`,然后 `shift + insert` 进行粘贴。这种事就不会错乱了。 - 原因是:`vim ~/.vimrc` 中有一行这样的设置:`set pastetoggle=` ## 资料 From 7e182686f3dc7e103831e023ef38db9c5367ccb8 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 26 Dec 2018 14:38:22 +0800 Subject: [PATCH 023/124] :construction: Kafka --- markdown-file/Kafka-Install-And-Settings.md | 1 + 1 file changed, 1 insertion(+) diff --git a/markdown-file/Kafka-Install-And-Settings.md b/markdown-file/Kafka-Install-And-Settings.md index ecb40028..61993859 100644 --- a/markdown-file/Kafka-Install-And-Settings.md +++ b/markdown-file/Kafka-Install-And-Settings.md @@ -42,6 +42,7 @@ - Partition:是物理上的概念,每个 Topic 包含一个或多个 Partition。一般有几个 Broker,填写分区最好是等于大于节点值。分区目的主要是数据分片,解决水平扩展、高吞吐量。当 Producer 生产消息的时候,消息会被算法计算后分配到对应的分区,Consumer 读取的时候算法也会帮我们找到消息所在分区,这是内部实现的,应用层面不用管。 - Replication-factor:副本。假设有 3 个 Broker 的情况下,当副本为 3 的时候每个 Partition 会在每个 Broker 都会存有一份,目的主要是容错。 - 其中有一个 Leader。 + - 如果你只有一个 Broker,但是创建 Topic 的时候指定 Replication-factor 为 3,则会报错 - Consumer Group:每个 Consumer 属于一个特定的 Consumer Group(可为每个 Consumer 指定 group name,若不指定 group name 则属于默认的 group)一般一个业务系统集群指定同一个一个 group id,然后一个业务系统集群只能一个节点来消费同一个消息。 - Consumer Group 信息存储在 zookeeper 中,需要通过 zookeeper 的客户端来查看和设置 - 如果某 Consumer Group 中 consumer 数量少于 partition 数量,则至少有一个 consumer 会消费多个 partition 的数据 From 67f74ec519d684daa37963d339d893316f5c33cc Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 26 Dec 2018 16:35:29 +0800 Subject: [PATCH 024/124] :construction: Kafka --- .../Wormhole-Install-And-Settings.md | 303 ++++++++++++++++++ 1 file changed, 303 insertions(+) create mode 100644 markdown-file/Wormhole-Install-And-Settings.md diff --git a/markdown-file/Wormhole-Install-And-Settings.md b/markdown-file/Wormhole-Install-And-Settings.md new file mode 100644 index 00000000..47ca8a28 --- /dev/null +++ b/markdown-file/Wormhole-Install-And-Settings.md @@ -0,0 +1,303 @@ +# Wormhole Flink 最佳实践 + +## 前置声明 + +- 需要对流计算的一些基础概念有基础了解,比如:Source、Sink、YARN、Kafka 等 + + +## 基础环境 + +- 参考官网: +- 三台 4C8G 服务器 CentOS 7.4 + - hostname:`linux-05` + - hostname:`linux-06` + - hostname:`linux-07` +- 必须(版本请不要随便用,而是按照如下说明来): + - 一般情况下,我组件都是放在:`/usr/local` + - JDK 1.8(三台) + - Hadoop 集群(HDFS,YARN)(三台):2.6.5 + - Spark 单点(linux-05):2.2.0 + - Flink 单点(linux-05):1.5.1 + - Zookeeper(linux-05):3.4.13 + - Kafka(linux-05):0.10.2.2 + - MySQL(linux-05):5.7 + - 以上组件安装教程可以查看该教程:[点击我](https://github.com/judasn/Linux-Tutorial) +- 非必须: + - Elasticsearch(支持版本 5.x)(非必须,若无则无法查看 wormhole 处理数据的吞吐和延时) + - Grafana (支持版本 4.x)(非必须,若无则无法查看 wormhole 处理数据的吞吐和延时的图形化展示) + +------------------------------------------------------------------- + +## Wormhole 安装 + 配置 + +- 参考官网: +- 最终环境 application.conf 配置文件参考 + +``` + +akka.http.server.request-timeout = 120s + +wormholeServer { + cluster.id = "" #optional global uuid + host = "linux-05" + port = 8989 + ui.default.language = "Chinese" + token.timeout = 1 + token.secret.key = "iytr174395lclkb?lgj~8u;[=L:ljg" + admin.username = "admin" #default admin user name + admin.password = "admin" #default admin user password +} + +mysql = { + driver = "slick.driver.MySQLDriver$" + db = { + driver = "com.mysql.jdbc.Driver" + user = "root" + password = "123456" + url = "jdbc:mysql://localhost:3306/wormhole?useUnicode=true&characterEncoding=UTF-8&useSSL=false" + numThreads = 4 + minConnections = 4 + maxConnections = 10 + connectionTimeout = 3000 + } +} + +ldap = { + enabled = false + user = "" + pwd = "" + url = "" + dc = "" + read.timeout = 3000 + read.timeout = 5000 + connect = { + timeout = 5000 + pool = true + } +} + +spark = { + wormholeServer.user = "root" #WormholeServer linux user + wormholeServer.ssh.port = 22 #ssh port, please set WormholeServer linux user can password-less login itself remote + spark.home = "/usr/local/spark" + yarn.queue.name = "default" #WormholeServer submit spark streaming/job queue + wormhole.hdfs.root.path = "hdfs://linux-05/wormhole" #WormholeServer hdfslog data default hdfs root path + yarn.rm1.http.url = "linux-05:8088" #Yarn ActiveResourceManager address + yarn.rm2.http.url = "linux-05:8088" #Yarn StandbyResourceManager address +} + +flink = { + home = "/usr/local/flink" + yarn.queue.name = "default" + feedback.state.count=100 + checkpoint.enable=false + checkpoint.interval=60000 + stateBackend="hdfs://linux-05/flink-checkpoints" + feedback.interval=30 +} + +zookeeper = { + connection.url = "localhost:2181" #WormholeServer stream and flow interaction channel + wormhole.root.path = "/wormhole" #zookeeper +} + +kafka = { + #brokers.url = "localhost:6667" #WormholeServer feedback data store + brokers.url = "linux-05:9092" + zookeeper.url = "localhost:2181" + #topic.refactor = 3 + topic.refactor = 1 + using.cluster.suffix = false #if true, _${cluster.id} will be concatenated to consumer.feedback.topic + consumer = { + feedback.topic = "wormhole_feedback" + poll-interval = 20ms + poll-timeout = 1s + stop-timeout = 30s + close-timeout = 20s + commit-timeout = 70s + wakeup-timeout = 60s + max-wakeups = 10 + session.timeout.ms = 60000 + heartbeat.interval.ms = 50000 + max.poll.records = 1000 + request.timeout.ms = 80000 + max.partition.fetch.bytes = 10485760 + } +} + +#kerberos = { +# keyTab="" #the keyTab will be used on yarn +# spark.principal="" #the principal of spark +# spark.keyTab="" #the keyTab of spark +# server.config="" #the path of krb5.conf +# jaas.startShell.config="" #the path of jaas config file which should be used by start.sh +# jaas.yarn.config="" #the path of jaas config file which will be uploaded to yarn +# server.enabled=false #enable wormhole connect to Kerberized cluster +#} + +# choose monitor method among ES、MYSQL +monitor ={ + database.type="ES" +} + +#Wormhole feedback data store, if doesn't want to config, you will not see wormhole processing delay and throughput +#if not set, please comment it + +#elasticSearch.http = { +# url = "http://localhost:9200" +# user = "" +# password = "" +#} + +#display wormhole processing delay and throughput data, get admin user token from grafana +#garfana should set to be anonymous login, so you can access the dashboard through wormhole directly +#if not set, please comment it + +#grafana = { +# url = "http://localhost:3000" +# admin.token = "jihefouglokoj" +#} + +#delete feedback history data on time +maintenance = { + mysql.feedback.remain.maxDays = 7 + elasticSearch.feedback.remain.maxDays = 7 +} + + +#Dbus integration, support serveral DBus services, if not set, please comment it + +#dbus = { +# api = [ +# { +# login = { +# url = "http://localhost:8080/keeper/login" +# email = "" +# password = "" +# } +# synchronization.namespace.url = "http://localhost:8080/keeper/tables/riderSearch" +# } +# ] +#} +``` + +- 初始化数据库: + - 创建表:`create database wormhole character set utf8;` +- 初始化表结构脚本路径: + - 该脚本存在一个问题:初始化脚本和补丁脚本混在一起,所以直接复制执行会有报错,但是报错的部分是不影响 + - 我是直接把基础 sql 和补丁 sql 分开执行,方便判断。 +- 部署完成,浏览器访问: + +------------------------------------------------------------------- + +## 创建用户 + +- **参考官网,必须先了解下**: +- 必须创建用户,后面才能进入 Project 里面创建 Stream / Flow +- 创建的用户类型必须是:`user` + + +------------------------------------------------------------------- + +## 创建 Source 需要涉及的概念 + +#### 创建 Instance + +- Instance 用于绑定各个组件的所在服务连接 +- 一般我们都会选择 Kafka 作为 source,后面的基础也是基于 Kafka 作为 Source 的场景 +- 假设填写实例名:`source_kafka` + +#### 创建 Database + +- 各个组件的具体数据库、Topic 等信息 +- 假设填写 topic:`source` + + +#### 创建 Namespace + +- wormhole 抽象出来的概念 +- 用于数据分类 +- 假设填写 Tables:`ums_extension id` +- 配置 schema,记得配置上 ums_ts + +``` +{ + "id": 1, + "name": "test", + "phone": "18074546423", + "city": "Beijing", + "time": "2017-12-22 10:00:00" +} +``` + + +------------------------------------------------------------------- + +## 创建 Sink 需要涉及的概念 + +#### 创建 Instance + +- 假设填写实例名:`sink_mysql` + +#### 创建 Database + +- 假设填写 Database Name:`sink` +- config 参数:`useUnicode=true&characterEncoding=UTF-8&useSSL=false&rewriteBatchedStatements=true` + +#### 创建 Namespace + +- 假设填写 Tables: `user id` + + +------------------------------------------------------------------- + +## 创建 Project + +- 项目标识:`demo` + +------------------------------------------------------------------- + + +## Flink Stream + +- Stream 是在 Project 内容页下才能创建 +- 一个 Stream 可以有多个 Flow +- 并且是 Project 下面的用户才能创建,admin 用户没有权限 +- 要删除 Project 必须先进入 Project 内容页删除所有 Stream 之后 admin 才能删除 Project +- 新建 Stream + - Stream type 类型选择:`Flink` + - 假设填写 Name:`wormhole_stream_test` + +## Flink Flow(流式作业) + +- Flow 是在 Project 内容页下才能创建 +- 并且是 Project 下面的用户才能创建,admin 用户没有权限 +- Flow 会关联 source 和 sink +- 要删除 Project 必须先进入 Project 内容页删除所有 Stream 之后 admin 才能删除 Project +- 基于 Stream 新建 Flow + - Pipeline + - Transformation + - + - NO_SKIP 滑动窗口 + - SKIP_PAST_LAST_EVENT 滚动窗口 + - KeyBy 分组字段 + - Output + - Agg:将匹配的多条数据做聚合,生成一条数据输出,例:field1:avg,field2:max(目前支持 max/min/avg/sum) + - Detail:将匹配的多条数据逐一输出 + - FilteredRow:按条件选择指定的一条数据输出,例:head/last/ field1:min/max + - Confirmation +- 注意:Stream 处于 running 状态时,才可以启动 Flow + + +------------------------------------------------------------------- + +## Kafka 发送测试数据 + +- `cd /usr/local/kafka/bin` +- `./kafka-console-producer.sh --broker-list linux-05:9092 --topic source --property "parse.key=true" --property "key.separator=@@@"` +- 发送 UMS 流消息协议规范格式: + +``` +data_increment_data.kafka.source_kafka.source.ums_extension.*.*.*@@@{"id": 1, "name": "test", "phone":"18074546423", "city": "Beijing", "time": "2017-12-22 10:00:00"} +``` + From a1e73315d70e6632dafb62b81cda364cea600fec Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 26 Dec 2018 16:37:23 +0800 Subject: [PATCH 025/124] :construction: Kafka --- markdown-file/Wormhole-Install-And-Settings.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/markdown-file/Wormhole-Install-And-Settings.md b/markdown-file/Wormhole-Install-And-Settings.md index 47ca8a28..d3bdad27 100644 --- a/markdown-file/Wormhole-Install-And-Settings.md +++ b/markdown-file/Wormhole-Install-And-Settings.md @@ -14,13 +14,13 @@ - hostname:`linux-07` - 必须(版本请不要随便用,而是按照如下说明来): - 一般情况下,我组件都是放在:`/usr/local` - - JDK 1.8(三台) - - Hadoop 集群(HDFS,YARN)(三台):2.6.5 - - Spark 单点(linux-05):2.2.0 - - Flink 单点(linux-05):1.5.1 - - Zookeeper(linux-05):3.4.13 - - Kafka(linux-05):0.10.2.2 - - MySQL(linux-05):5.7 + - JDK(三台):`1.8.0_181` + - Hadoop 集群(HDFS,YARN)(三台):`2.6.5` + - Spark 单点(linux-05):`2.2.0` + - Flink 单点(linux-05):`1.5.1` + - Zookeeper 单点(linux-05):`3.4.13` + - Kafka 单点(linux-05):`0.10.2.2` + - MySQL 单点(linux-05):`5.7` - 以上组件安装教程可以查看该教程:[点击我](https://github.com/judasn/Linux-Tutorial) - 非必须: - Elasticsearch(支持版本 5.x)(非必须,若无则无法查看 wormhole 处理数据的吞吐和延时) From 8388a7121a61b1f29fc6feb034056c0ae73fb410 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 26 Dec 2018 16:41:47 +0800 Subject: [PATCH 026/124] :construction: Kafka --- markdown-file/Wormhole-Install-And-Settings.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/markdown-file/Wormhole-Install-And-Settings.md b/markdown-file/Wormhole-Install-And-Settings.md index d3bdad27..3c44fcee 100644 --- a/markdown-file/Wormhole-Install-And-Settings.md +++ b/markdown-file/Wormhole-Install-And-Settings.md @@ -4,6 +4,24 @@ - 需要对流计算的一些基础概念有基础了解,比如:Source、Sink、YARN、Kafka 等 +------------------------------------------------------------------- + +## 本文目标 + +- 统计 **滑动窗口** 下的流过的数据量(count) +- 数据格式: + +``` +{ + "id": 1, + "name": "test", + "phone": "18074546423", + "city": "Beijing", + "time": "2017-12-22 10:00:00" +} +``` + +------------------------------------------------------------------- ## 基础环境 From c70186bcbb0f21d6f8897ea8256434028d00bc9a Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 26 Dec 2018 16:42:51 +0800 Subject: [PATCH 027/124] :construction: Kafka --- markdown-file/Wormhole-Install-And-Settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/markdown-file/Wormhole-Install-And-Settings.md b/markdown-file/Wormhole-Install-And-Settings.md index 3c44fcee..fba90057 100644 --- a/markdown-file/Wormhole-Install-And-Settings.md +++ b/markdown-file/Wormhole-Install-And-Settings.md @@ -9,7 +9,7 @@ ## 本文目标 - 统计 **滑动窗口** 下的流过的数据量(count) -- 数据格式: +- 业务数据格式: ``` { From 501691cf4a196abc91cb36dad4149ae6235c6ed5 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 26 Dec 2018 17:23:42 +0800 Subject: [PATCH 028/124] :construction: Kafka --- markdown-file/Wormhole-Install-And-Settings.md | 1 + 1 file changed, 1 insertion(+) diff --git a/markdown-file/Wormhole-Install-And-Settings.md b/markdown-file/Wormhole-Install-And-Settings.md index fba90057..96e96161 100644 --- a/markdown-file/Wormhole-Install-And-Settings.md +++ b/markdown-file/Wormhole-Install-And-Settings.md @@ -39,6 +39,7 @@ - Zookeeper 单点(linux-05):`3.4.13` - Kafka 单点(linux-05):`0.10.2.2` - MySQL 单点(linux-05):`5.7` + - wormhole 单点(linux-05):`0.6.0-beta`,2018-12-06 版本 - 以上组件安装教程可以查看该教程:[点击我](https://github.com/judasn/Linux-Tutorial) - 非必须: - Elasticsearch(支持版本 5.x)(非必须,若无则无法查看 wormhole 处理数据的吞吐和延时) From 046d5cd089d2edaba4301515764423b67579ec6f Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 27 Dec 2018 10:52:42 +0800 Subject: [PATCH 029/124] :construction: Hacked --- markdown-file/Was-Hacked.md | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/markdown-file/Was-Hacked.md b/markdown-file/Was-Hacked.md index 3adb0e70..a803a05a 100644 --- a/markdown-file/Was-Hacked.md +++ b/markdown-file/Was-Hacked.md @@ -31,8 +31,10 @@ - 查看开放的端口,比如常用的80,22,8009,后面的箭头表示端口对应占用的程序:`netstat -lnp` - 检查某个端口的具体信息:`lsof -i :18954` - 检查启动项:`chkconfig` -- 检查定时器:`cat /etc/crontab` -- 检查定时器:`crontab -l` +- 检查定时器(重要):`cat /etc/crontab` +- 检查定时器(重要):`crontab -l` + - `vim /var/spool/cron/crontabs/root` + - `vim /var/spool/cron/root` - 检查其他系统重要文件: - `cat /etc/rc.local` - `cd /etc/init.d;ll` @@ -89,6 +91,25 @@ TOTAL:(总的流量) 12.9GB 229Mb 190Mb 193Mb - yum update openssh-server +## 实战 + +#### 挖矿程序 + +- 先查看调度任务是否有新增内容 + - `vim /var/spool/cron/root` + - `vim /var/spool/cron/crontabs/root` +- 如果有,先停止定时任务:`systemctl stop crond` +- 如果对方有去 wget curl 指定网站,则先在 hosts 里面映射为 127.0.0.1,比如:`127.0.0.1 prax0zma.ru` + - 查看当前最占用 CPU 的进程 PID,加入发现是 22935,则:`cd /proc/22935 && ll`,发现程序目录是:`/root/.tmp00/bash64` + - 我们就把该程序去掉执行任务的权限:`chmod -R -x /root/.tmp00/`,然后再 kill 掉该程序 +- 打开别人的脚本,看下是如何书写的,发现有写入几个目录,这里进行删除: + +``` +rm -rf /tmp/.ha /boot/.b /boot/.0 /root/.tmp00 +``` + +- 最后检查下是否有免密内容被修改:`cd ~/.ssh/ && cat authorized_keys` + ## 资料 - From 7f5687ecfa8c57ce380ae548c8e4083841b8440b Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 27 Dec 2018 16:00:44 +0800 Subject: [PATCH 030/124] :construction: Flink --- markdown-file/Flink-Install-And-Settings.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/markdown-file/Flink-Install-And-Settings.md b/markdown-file/Flink-Install-And-Settings.md index a50c9a27..a0f93ccd 100644 --- a/markdown-file/Flink-Install-And-Settings.md +++ b/markdown-file/Flink-Install-And-Settings.md @@ -25,6 +25,14 @@ - 查看日志:`tail -300f log/flink-*-standalonesession-*.log` - 浏览器访问 WEB 管理:`http://192.168.0.105:8081` +## yarn 启动 + +- 安装方式跟上面一样,但是必须保证有 hadoop、yarn 集群 +- 控制台启动:`cd /usr/local/flink && ./bin/yarn-session.sh -n 2 -jm 1024 -tm 1024` +- 守护进程启动:`cd /usr/local/flink && ./bin/yarn-session.sh -n 2 -jm 1024 -tm 1024 -d` +- 有可能会报:`The Flink Yarn cluster has failed`,可能是资源不够 + + ## Demo - 运行程序解压包下也有一些 jar demo:`cd /usr/local/flink/examples` From 6b01f97c3160120c488ba837e1a12c2c000eb4c7 Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 27 Dec 2018 17:12:33 +0800 Subject: [PATCH 031/124] :construction: Flink --- markdown-file/Ansible-Install-And-Settings.md | 26 ++++++ markdown-file/Hadoop-Install-And-Settings.md | 54 +++++------ .../Wormhole-Install-And-Settings.md | 92 ++++++++++--------- 3 files changed, 104 insertions(+), 68 deletions(-) diff --git a/markdown-file/Ansible-Install-And-Settings.md b/markdown-file/Ansible-Install-And-Settings.md index 91011077..6c22a870 100644 --- a/markdown-file/Ansible-Install-And-Settings.md +++ b/markdown-file/Ansible-Install-And-Settings.md @@ -206,6 +206,32 @@ PLAY RECAP ********************************************************************* - 执行命令:`ansible-playbook /opt/jdk8-playbook.yml` +#### 修改 hosts + + +- 创建脚本文件:`vim /opt/hosts-playbook.yml` + +``` +- hosts: all + remote_user: root + tasks: + - name: update hosts + blockinfile: + path: /etc/hosts + block: | + 192.168.0.223 linux01 + 192.168.0.223 linux02 + 192.168.0.223 linux03 + 192.168.0.223 linux04 + 192.168.0.223 linux05 +``` + + +- 执行命令:`ansible-playbook /opt/hosts-playbook.yml` + +------------------------------------------------------------------- + + ## 资料 diff --git a/markdown-file/Hadoop-Install-And-Settings.md b/markdown-file/Hadoop-Install-And-Settings.md index 142cc70f..f75adc34 100644 --- a/markdown-file/Hadoop-Install-And-Settings.md +++ b/markdown-file/Hadoop-Install-And-Settings.md @@ -28,9 +28,9 @@ - 分别给三台机子设置 hostname ``` -hostnamectl --static set-hostname hadoop-master -hostnamectl --static set-hostname hadoop-node1 -hostnamectl --static set-hostname hadoop-node2 +hostnamectl --static set-hostname linux01 +hostnamectl --static set-hostname linux02 +hostnamectl --static set-hostname linux03 ``` @@ -39,13 +39,13 @@ hostnamectl --static set-hostname hadoop-node2 ``` 就按这个来,其他多余的别加,不然可能也会有影响 vim /etc/hosts -172.16.0.17 hadoop-master -172.16.0.43 hadoop-node1 -172.16.0.180 hadoop-node2 +172.16.0.17 linux01 +172.16.0.43 linux02 +172.16.0.180 linux03 ``` -- 对 hadoop-master 设置免密: +- 对 linux01 设置免密: ``` 生产密钥对 @@ -64,13 +64,13 @@ ssh localhost - 如果你是采用 pem 登录的,可以看这个:[SSH 免密登录](SSH-login-without-password.md) ``` -ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@172.16.0.43,根据提示输入 hadoop-node1 机器的 root 密码,成功会有相应提示 -ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@172.16.0.180,根据提示输入 hadoop-node2 机器的 root 密码,成功会有相应提示 +ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@172.16.0.43,根据提示输入 linux02 机器的 root 密码,成功会有相应提示 +ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@172.16.0.180,根据提示输入 linux03 机器的 root 密码,成功会有相应提示 -在 hadoop-master 上测试: -ssh hadoop-node1 -ssh hadoop-node2 +在 linux01 上测试: +ssh linux02 +ssh linux03 ``` @@ -88,7 +88,7 @@ mkdir -p /data/hadoop/hdfs/name /data/hadoop/hdfs/data /data/hadoop/hdfs/tmp ``` - 下载 Hadoop: -- 现在 hadoop-master 机子上安装 +- 现在 linux01 机子上安装 ``` cd /usr/local && wget http://apache.claz.org/hadoop/common/hadoop-2.6.5/hadoop-2.6.5.tar.gz @@ -108,7 +108,7 @@ source /etc/profile ``` -## 修改 hadoop-master 配置 +## 修改 linux01 配置 ``` @@ -145,12 +145,12 @@ vim $HADOOP_HOME/etc/hadoop/core-site.xml,改为: fs.defaultFS - hdfs://hadoop-master:9000 + hdfs://linux01:9000 hadoop.proxyuser.root.hosts @@ -225,7 +225,7 @@ vim $HADOOP_HOME/etc/hadoop/yarn-site.xml yarn.resourcemanager.hostname - hadoop-master + linux01 @@ -244,21 +244,21 @@ vim $HADOOP_HOME/etc/hadoop/yarn-site.xml vim $HADOOP_HOME/etc/hadoop/slaves 把默认的配置里面的 localhost 删除,换成: -hadoop-node1 -hadoop-node2 +linux02 +linux03 ``` ``` -scp -r /usr/local/hadoop-2.6.5 root@hadoop-node1:/usr/local/ +scp -r /usr/local/hadoop-2.6.5 root@linux02:/usr/local/ -scp -r /usr/local/hadoop-2.6.5 root@hadoop-node2:/usr/local/ +scp -r /usr/local/hadoop-2.6.5 root@linux03:/usr/local/ ``` -## hadoop-master 机子运行 +## linux01 机子运行 ``` 格式化 HDFS @@ -269,7 +269,7 @@ hdfs namenode -format - 输出结果: ``` -[root@hadoop-master hadoop-2.6.5]# hdfs namenode -format +[root@linux01 hadoop-2.6.5]# hdfs namenode -format 18/12/17 17:47:17 INFO namenode.NameNode: STARTUP_MSG: /************************************************************ STARTUP_MSG: Starting NameNode @@ -424,10 +424,10 @@ tcp6 0 0 :::37481 :::* LISTEN ## 管理界面 -- 查看 HDFS NameNode 管理界面: -- 访问 YARN ResourceManager 管理界面: -- 访问 NodeManager-1 管理界面: -- 访问 NodeManager-2 管理界面: +- 查看 HDFS NameNode 管理界面: +- 访问 YARN ResourceManager 管理界面: +- 访问 NodeManager-1 管理界面: +- 访问 NodeManager-2 管理界面: ------------------------------------------------------------------- diff --git a/markdown-file/Wormhole-Install-And-Settings.md b/markdown-file/Wormhole-Install-And-Settings.md index 96e96161..2a55aa81 100644 --- a/markdown-file/Wormhole-Install-And-Settings.md +++ b/markdown-file/Wormhole-Install-And-Settings.md @@ -26,21 +26,32 @@ ## 基础环境 - 参考官网: -- 三台 4C8G 服务器 CentOS 7.4 - - hostname:`linux-05` - - hostname:`linux-06` - - hostname:`linux-07` +- 4 台 8C32G 服务器 CentOS 7.5 + - **为了方便测试,服务器都已经关闭防火墙,并且对外开通所有端口** + - **都做了免密登录** + - hostname:`linux01` + - hostname:`linux02` + - hostname:`linux03` + - hostname:`linux04` + - hostname:`linux05` + - Ansible 批量添加 hosts 请看:[点击我](Ansible-Install-And-Settings.md) - 必须(版本请不要随便用,而是按照如下说明来): - 一般情况下,我组件都是放在:`/usr/local` - - JDK(三台):`1.8.0_181` - - Hadoop 集群(HDFS,YARN)(三台):`2.6.5` - - Spark 单点(linux-05):`2.2.0` - - Flink 单点(linux-05):`1.5.1` - - Zookeeper 单点(linux-05):`3.4.13` - - Kafka 单点(linux-05):`0.10.2.2` - - MySQL 单点(linux-05):`5.7` - - wormhole 单点(linux-05):`0.6.0-beta`,2018-12-06 版本 - - 以上组件安装教程可以查看该教程:[点击我](https://github.com/judasn/Linux-Tutorial) + - JDK(所有服务器):`1.8.0_181` + - 批量添加 JDK 请看:[点击我](Ansible-Install-And-Settings.md) + - Hadoop 集群(HDFS,YARN)(linux01、linux02、linux03):`2.6.5` + - 安装请看:[点击我](Hadoop-Install-And-Settings.md) + - Zookeeper 单点(linux04):`3.4.13` + - 安装请看:[点击我](Zookeeper-Install.md) + - Kafka 单点(linux04):`0.10.2.2` + - 安装请看:[点击我](Kafka-Install-And-Settings.md) + - MySQL 单点(linux04):`5.7` + - 安装请看:[点击我](Mysql-Install-And-Settings.md) + - Spark 单点(linux05):`2.2.0` + - 安装请看:[点击我](Spark-Install-And-Settings.md) + - Flink 单点(linux05):`1.5.1` + - 安装请看:[点击我](Flink-Install-And-Settings.md) + - wormhole 单点(linux05):`0.6.0-beta`,2018-12-06 版本 - 非必须: - Elasticsearch(支持版本 5.x)(非必须,若无则无法查看 wormhole 处理数据的吞吐和延时) - Grafana (支持版本 4.x)(非必须,若无则无法查看 wormhole 处理数据的吞吐和延时的图形化展示) @@ -50,7 +61,8 @@ ## Wormhole 安装 + 配置 - 参考官网: -- 最终环境 application.conf 配置文件参考 +- 解压:`cd /usr/local && tar -xvf wormhole-0.6.0-beta.tar.gz` +- 修改配置文件:`vim /usr/local/wormhole-0.6.0-beta/conf/application.conf` ``` @@ -58,7 +70,7 @@ akka.http.server.request-timeout = 120s wormholeServer { cluster.id = "" #optional global uuid - host = "linux-05" + host = "linux05" port = 8989 ui.default.language = "Chinese" token.timeout = 1 @@ -73,7 +85,7 @@ mysql = { driver = "com.mysql.jdbc.Driver" user = "root" password = "123456" - url = "jdbc:mysql://localhost:3306/wormhole?useUnicode=true&characterEncoding=UTF-8&useSSL=false" + url = "jdbc:mysql://linux04:3306/wormhole?useUnicode=true&characterEncoding=UTF-8&useSSL=false" numThreads = 4 minConnections = 4 maxConnections = 10 @@ -81,28 +93,28 @@ mysql = { } } -ldap = { - enabled = false - user = "" - pwd = "" - url = "" - dc = "" - read.timeout = 3000 - read.timeout = 5000 - connect = { - timeout = 5000 - pool = true - } -} +#ldap = { +# enabled = false +# user = "" +# pwd = "" +# url = "" +# dc = "" +# read.timeout = 3000 +# read.timeout = 5000 +# connect = { +# timeout = 5000 +# pool = true +# } +#} spark = { wormholeServer.user = "root" #WormholeServer linux user wormholeServer.ssh.port = 22 #ssh port, please set WormholeServer linux user can password-less login itself remote spark.home = "/usr/local/spark" yarn.queue.name = "default" #WormholeServer submit spark streaming/job queue - wormhole.hdfs.root.path = "hdfs://linux-05/wormhole" #WormholeServer hdfslog data default hdfs root path - yarn.rm1.http.url = "linux-05:8088" #Yarn ActiveResourceManager address - yarn.rm2.http.url = "linux-05:8088" #Yarn StandbyResourceManager address + wormhole.hdfs.root.path = "hdfs://linux01/wormhole" #WormholeServer hdfslog data default hdfs root path + yarn.rm1.http.url = "linux01:8088" #Yarn ActiveResourceManager address + yarn.rm2.http.url = "linux01:8088" #Yarn StandbyResourceManager address } flink = { @@ -111,20 +123,18 @@ flink = { feedback.state.count=100 checkpoint.enable=false checkpoint.interval=60000 - stateBackend="hdfs://linux-05/flink-checkpoints" + stateBackend="hdfs://linux01/flink-checkpoints" feedback.interval=30 } zookeeper = { - connection.url = "localhost:2181" #WormholeServer stream and flow interaction channel + connection.url = "linux04:2181" #WormholeServer stream and flow interaction channel wormhole.root.path = "/wormhole" #zookeeper } kafka = { - #brokers.url = "localhost:6667" #WormholeServer feedback data store - brokers.url = "linux-05:9092" - zookeeper.url = "localhost:2181" - #topic.refactor = 3 + brokers.url = "linux04:9092" + zookeeper.url = "linux04:2181" topic.refactor = 1 using.cluster.suffix = false #if true, _${cluster.id} will be concatenated to consumer.feedback.topic consumer = { @@ -156,7 +166,7 @@ kafka = { # choose monitor method among ES、MYSQL monitor ={ - database.type="ES" + database.type="MYSQL" } #Wormhole feedback data store, if doesn't want to config, you will not see wormhole processing delay and throughput @@ -205,7 +215,7 @@ maintenance = { - 初始化表结构脚本路径: - 该脚本存在一个问题:初始化脚本和补丁脚本混在一起,所以直接复制执行会有报错,但是报错的部分是不影响 - 我是直接把基础 sql 和补丁 sql 分开执行,方便判断。 -- 部署完成,浏览器访问: +- 部署完成,浏览器访问: ------------------------------------------------------------------- @@ -313,7 +323,7 @@ maintenance = { ## Kafka 发送测试数据 - `cd /usr/local/kafka/bin` -- `./kafka-console-producer.sh --broker-list linux-05:9092 --topic source --property "parse.key=true" --property "key.separator=@@@"` +- `./kafka-console-producer.sh --broker-list linux01:9092 --topic source --property "parse.key=true" --property "key.separator=@@@"` - 发送 UMS 流消息协议规范格式: ``` From ec40f6d8fad47c06b03ab18baa45769fde9b0fc5 Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 27 Dec 2018 18:06:25 +0800 Subject: [PATCH 032/124] :construction: Ansible --- markdown-file/Ansible-Install-And-Settings.md | 107 ++++++++++++++++-- .../Wormhole-Install-And-Settings.md | 1 + 2 files changed, 96 insertions(+), 12 deletions(-) diff --git a/markdown-file/Ansible-Install-And-Settings.md b/markdown-file/Ansible-Install-And-Settings.md index 6c22a870..6188d29c 100644 --- a/markdown-file/Ansible-Install-And-Settings.md +++ b/markdown-file/Ansible-Install-And-Settings.md @@ -168,6 +168,55 @@ PLAY RECAP ********************************************************************* ## 更多 playbook 实战 + +#### 禁用防火墙(CentOS 7.x) + + +- 创建脚本文件:`vim /opt/disable-firewalld-playbook.yml` + +``` +- hosts: all + remote_user: root + tasks: + - name: Disable SELinux at next reboot + selinux: + state: disabled + - name: disable firewalld + command: "{{ item }}" + with_items: + - systemctl stop firewalld + - systemctl disable firewalld + - setenforce 0 +``` + + +- 执行命令:`ansible-playbook /opt/disable-firewalld-playbook.yml` + +#### 修改 hosts + + +- 创建脚本文件:`vim /opt/hosts-playbook.yml` + +``` +- hosts: all + remote_user: root + tasks: + - name: update hosts + blockinfile: + path: /etc/hosts + block: | + 192.168.0.223 linux01 + 192.168.0.223 linux02 + 192.168.0.223 linux03 + 192.168.0.223 linux04 + 192.168.0.223 linux05 +``` + + +- 执行命令:`ansible-playbook /opt/hosts-playbook.yml` + + + #### 部署 JDK - 创建脚本文件:`vim /opt/jdk8-playbook.yml` @@ -184,7 +233,7 @@ PLAY RECAP ********************************************************************* - name: tar jdk shell: chdir={{ java_install_folder }} tar zxf jdk-8u181-linux-x64.tar.gz - - name: Set JAVA_HOME + - name: set JAVA_HOME blockinfile: path: /etc/profile marker: "#{mark} JDK ENV" @@ -206,28 +255,62 @@ PLAY RECAP ********************************************************************* - 执行命令:`ansible-playbook /opt/jdk8-playbook.yml` -#### 修改 hosts +#### 部署 Hadoop 集群 -- 创建脚本文件:`vim /opt/hosts-playbook.yml` +- 创建脚本文件:`vim /opt/hadoop-playbook.yml` ``` -- hosts: all +- hosts: hadoop-host remote_user: root tasks: - - name: update hosts + - name: Creates directory + file: + path: /data/hadoop/hdfs/name + state: directory + - name: Creates directory + file: + path: /data/hadoop/hdfs/data + state: directory + - name: Creates directory + file: + path: /data/hadoop/hdfs/tmp + state: directory + + - name: copy gz file + copy: src=/opt/hadoop-2.6.5.tar.gz dest=/usr/local + + - name: tar gz file + command: cd /usr/local && tar zxf hadoop-2.6.5.tar.gz + + - name: check folder existed + stat: path=/usr/local/hadoop-2.6.5 + register: folder_existed + + - name: rename folder + command: mv /usr/local/hadoop-2.6.5 /usr/local/hadoop + when: folder_existed.stat.exists == true + + - name: set HADOOP_HOME blockinfile: - path: /etc/hosts + path: /etc/profile + marker: "#{mark} HADOOP ENV" block: | - 192.168.0.223 linux01 - 192.168.0.223 linux02 - 192.168.0.223 linux03 - 192.168.0.223 linux04 - 192.168.0.223 linux05 + HADOOP_HOME=/usr/local/hadoop + PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin + export HADOOP_HOME + export PATH + + - name: source profile + shell: source /etc/profile ``` -- 执行命令:`ansible-playbook /opt/hosts-playbook.yml` +- 执行命令:`ansible-playbook /opt/jdk8-playbook.yml` + + + + ------------------------------------------------------------------- diff --git a/markdown-file/Wormhole-Install-And-Settings.md b/markdown-file/Wormhole-Install-And-Settings.md index 2a55aa81..41f60410 100644 --- a/markdown-file/Wormhole-Install-And-Settings.md +++ b/markdown-file/Wormhole-Install-And-Settings.md @@ -37,6 +37,7 @@ - Ansible 批量添加 hosts 请看:[点击我](Ansible-Install-And-Settings.md) - 必须(版本请不要随便用,而是按照如下说明来): - 一般情况下,我组件都是放在:`/usr/local` + - 批量部署用的是:Ansible(linux01) - JDK(所有服务器):`1.8.0_181` - 批量添加 JDK 请看:[点击我](Ansible-Install-And-Settings.md) - Hadoop 集群(HDFS,YARN)(linux01、linux02、linux03):`2.6.5` From 2d1498024c29598fd427f3d21717c31ff2f0854a Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 28 Dec 2018 13:47:01 +0800 Subject: [PATCH 033/124] :construction: Wormhole --- markdown-file/Ansible-Install-And-Settings.md | 75 +++- markdown-file/Kafka-Install-And-Settings.md | 2 +- .../Wormhole-Install-And-Settings.md | 367 ++++++++++++++++-- 3 files changed, 392 insertions(+), 52 deletions(-) diff --git a/markdown-file/Ansible-Install-And-Settings.md b/markdown-file/Ansible-Install-And-Settings.md index 6188d29c..aed43c64 100644 --- a/markdown-file/Ansible-Install-And-Settings.md +++ b/markdown-file/Ansible-Install-And-Settings.md @@ -190,7 +190,61 @@ PLAY RECAP ********************************************************************* ``` -- 执行命令:`ansible-playbook /opt/disable-firewalld-playbook.yml` + +#### 基础环境(CentOS 7.x) + + +- 创建脚本文件:`vim /opt/install-basic-playbook.yml` + +``` +- hosts: all + remote_user: root + tasks: + - name: Disable SELinux at next reboot + selinux: + state: disabled + + - name: disable firewalld + command: "{{ item }}" + with_items: + - systemctl stop firewalld + - systemctl disable firewalld + - setenforce 0 + + - name: install-basic + command: "{{ item }}" + with_items: + - yum install -y zip unzip lrzsz git epel-release wget htop deltarpm + + - name: install-vim + shell: "{{ item }}" + with_items: + - yum install -y vim + - curl https://raw.githubusercontent.com/wklken/vim-for-server/master/vimrc > ~/.vimrc + + - name: install-docker + shell: "{{ item }}" + with_items: + - yum install -y yum-utils device-mapper-persistent-data lvm2 + - yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo + - yum makecache fast + - yum install -y docker-ce + - systemctl start docker.service + - docker run hello-world + + - name: install-docker-compose + shell: "{{ item }}" + with_items: + - curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose + - chmod +x /usr/local/bin/docker-compose + - docker-compose --version + - systemctl restart docker.service + - systemctl enable docker.service + +``` + + +- 执行命令:`ansible-playbook /opt/install-basic-playbook.yml` #### 修改 hosts @@ -259,6 +313,7 @@ PLAY RECAP ********************************************************************* #### 部署 Hadoop 集群 - 创建脚本文件:`vim /opt/hadoop-playbook.yml` +- 刚学 Ansible,不好动配置文件,所以就只保留环境部分的设置,其他部分自行手工~ ``` - hosts: hadoop-host @@ -277,20 +332,6 @@ PLAY RECAP ********************************************************************* path: /data/hadoop/hdfs/tmp state: directory - - name: copy gz file - copy: src=/opt/hadoop-2.6.5.tar.gz dest=/usr/local - - - name: tar gz file - command: cd /usr/local && tar zxf hadoop-2.6.5.tar.gz - - - name: check folder existed - stat: path=/usr/local/hadoop-2.6.5 - register: folder_existed - - - name: rename folder - command: mv /usr/local/hadoop-2.6.5 /usr/local/hadoop - when: folder_existed.stat.exists == true - - name: set HADOOP_HOME blockinfile: path: /etc/profile @@ -306,7 +347,7 @@ PLAY RECAP ********************************************************************* ``` -- 执行命令:`ansible-playbook /opt/jdk8-playbook.yml` +- 执行命令:`ansible-playbook /opt/hadoop-playbook.yml` @@ -317,7 +358,7 @@ PLAY RECAP ********************************************************************* ## 资料 - +- [ANSIBLE模块 - shell和command区别](https://www.jianshu.com/p/081139f73613) - - - \ No newline at end of file diff --git a/markdown-file/Kafka-Install-And-Settings.md b/markdown-file/Kafka-Install-And-Settings.md index 61993859..ff67a925 100644 --- a/markdown-file/Kafka-Install-And-Settings.md +++ b/markdown-file/Kafka-Install-And-Settings.md @@ -427,7 +427,7 @@ num.partitions=1 # 允许删除topic delete.topic.enable=false # 允许自动创建topic(默认是 true) -auto.create.topics.enable=false +auto.create.topics.enable=true # 磁盘IO不足的时候,可以适当调大该值 ( 当内存足够时 ) #log.flush.interval.messages=10000 #log.flush.interval.ms=1000 diff --git a/markdown-file/Wormhole-Install-And-Settings.md b/markdown-file/Wormhole-Install-And-Settings.md index 41f60410..5c92ac10 100644 --- a/markdown-file/Wormhole-Install-And-Settings.md +++ b/markdown-file/Wormhole-Install-And-Settings.md @@ -1,8 +1,18 @@ -# Wormhole Flink 最佳实践 +# Wormhole + Flink 最佳实践 + + +## 本文声明 + +- **感谢 Wormhole 的官方帮助!官方微信群很友好,这让我很意外,只能感谢了!** +- 本人大数据和 Ansible 刚看,只会皮毛的皮毛。但是也因为这样的契机促使了我写这篇文章。 +- 希望对你们有帮助。 + +------------------------------------------------------------------- ## 前置声明 -- 需要对流计算的一些基础概念有基础了解,比如:Source、Sink、YARN、Kafka 等 +- 需要对 Linux 环境,流计算的一些基础概念有基础了解,比如:Source、Sink、YARN、Zookeeper、Kafka、Ansible 等 +- 如果有欠缺,可以查看本系列文章:[点击我](../README.md) ------------------------------------------------------------------- @@ -23,45 +33,334 @@ ------------------------------------------------------------------- -## 基础环境 +## 服务器基础环境设置 + +#### 特别说明 + +- **5 台 8C32G 服务器 CentOS 7.5,内存推荐 16G 或以上。** + - **为了方便,所有服务器都已经关闭防火墙,并且在云服务上设置安全组对外开通所有端口** +- 整体部署结构图: + +![未命名文件.png](https://upload-images.jianshu.io/upload_images/12159-dc29079158e1e59e.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) + +#### 服务器基础配置 + +- 给对应服务器设置 hostname,方便后面使用: + +``` +hostnamectl --static set-hostname linux01 +hostnamectl --static set-hostname linux02 +hostnamectl --static set-hostname linux03 +hostnamectl --static set-hostname linux04 +hostnamectl --static set-hostname linux05 +``` + +- 给所有服务器设置 hosts:`vim /etc/hosts` + +``` +172.16.0.17 linux01 +172.16.0.43 linux02 +172.16.0.180 linux03 +172.16.0.180 linux04 +172.16.0.180 linux05 +``` + +- 在 linux01 生成密钥对,设置 SSH 免密登录 + +``` +生产密钥对 +ssh-keygen -t rsa + + +公钥内容写入 authorized_keys +cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys + +测试: +ssh localhost + +将公钥复制到其他机子 +ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@linux02(根据提示输入 linux02 密码) +ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@linux03(根据提示输入 linux03 密码) +ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@linux04(根据提示输入 linux04 密码) +ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@linux05(根据提示输入 linux05 密码) + +在 linux01 上测试 +ssh linux02 +ssh linux03 +ssh linux04 +ssh linux05 +``` + +- 安装 Ansible:`yum install -y ansible` +- 测试 Ansible:`ansible all -a 'ps'` +- 配置 Inventory 编辑配置文件:`vim /etc/ansible/hosts` +- 添加如下内容 + +``` +[hadoop-host] +linux01 +linux02 +linux03 +``` + +#### 服务器基础组件(CentOS 7.x) + + +- 创建脚本文件:`vim /opt/install-basic-playbook.yml` + +``` +- hosts: all + remote_user: root + tasks: + - name: Disable SELinux at next reboot + selinux: + state: disabled + + - name: disable firewalld + command: "{{ item }}" + with_items: + - systemctl stop firewalld + - systemctl disable firewalld + - setenforce 0 + + - name: install-basic + command: "{{ item }}" + with_items: + - yum install -y zip unzip lrzsz git epel-release wget htop deltarpm + + - name: install-vim + shell: "{{ item }}" + with_items: + - yum install -y vim + - curl https://raw.githubusercontent.com/wklken/vim-for-server/master/vimrc > ~/.vimrc + + - name: install-docker + shell: "{{ item }}" + with_items: + - yum install -y yum-utils device-mapper-persistent-data lvm2 + - yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo + - yum makecache fast + - yum install -y docker-ce + - systemctl start docker.service + - docker run hello-world + + - name: install-docker-compose + shell: "{{ item }}" + with_items: + - curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose + - chmod +x /usr/local/bin/docker-compose + - docker-compose --version + - systemctl restart docker.service + - systemctl enable docker.service + +``` + +- 执行命令:`ansible-playbook /opt/install-basic-playbook.yml` + + +------------------------------------------------------------------- + +## Wormhole 所需组件安装 - 参考官网: -- 4 台 8C32G 服务器 CentOS 7.5 - - **为了方便测试,服务器都已经关闭防火墙,并且对外开通所有端口** - - **都做了免密登录** - - hostname:`linux01` - - hostname:`linux02` - - hostname:`linux03` - - hostname:`linux04` - - hostname:`linux05` - - Ansible 批量添加 hosts 请看:[点击我](Ansible-Install-And-Settings.md) -- 必须(版本请不要随便用,而是按照如下说明来): - - 一般情况下,我组件都是放在:`/usr/local` - - 批量部署用的是:Ansible(linux01) - - JDK(所有服务器):`1.8.0_181` - - 批量添加 JDK 请看:[点击我](Ansible-Install-And-Settings.md) - - Hadoop 集群(HDFS,YARN)(linux01、linux02、linux03):`2.6.5` - - 安装请看:[点击我](Hadoop-Install-And-Settings.md) - - Zookeeper 单点(linux04):`3.4.13` - - 安装请看:[点击我](Zookeeper-Install.md) - - Kafka 单点(linux04):`0.10.2.2` - - 安装请看:[点击我](Kafka-Install-And-Settings.md) - - MySQL 单点(linux04):`5.7` - - 安装请看:[点击我](Mysql-Install-And-Settings.md) - - Spark 单点(linux05):`2.2.0` - - 安装请看:[点击我](Spark-Install-And-Settings.md) - - Flink 单点(linux05):`1.5.1` - - 安装请看:[点击我](Flink-Install-And-Settings.md) - - wormhole 单点(linux05):`0.6.0-beta`,2018-12-06 版本 -- 非必须: - - Elasticsearch(支持版本 5.x)(非必须,若无则无法查看 wormhole 处理数据的吞吐和延时) - - Grafana (支持版本 4.x)(非必须,若无则无法查看 wormhole 处理数据的吞吐和延时的图形化展示) +- 必须组件(版本请不要随便用,而是按照如下说明来): +- 我个人习惯软件都是放在:`/usr/local`,压缩包放在:`/opt` + +#### 关于版本号和端口问题 + +- 百度云打包下载(提取码:8tm3): +- 版本: + - **jdk-8u191-linux-x64.tar.gz** + - **zookeeper-3.4.13(Docker)** + - **kafka_2.11-0.10.2.2.tgz** + - **hadoop-2.6.5.tar.gz** + - **flink-1.5.1-bin-hadoop26-scala_2.11.tgz** + - **spark-2.2.0-bin-hadoop2.6.tgz** + - **mysql-3.7(Docker)** + - **wormhole-0.6.0-beta.tar.gz** +- 端口 + - 都采用组件默认端口 + +#### JDK 安装 + +- JDK(所有服务器):`1.8.0_191` +- 复制压缩包到所有机子的 /opt 目录下: + +``` +scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux02:/opt + +scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux03:/opt + +scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux04:/opt + +scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux05:/opt +``` + +- 创建脚本文件:`vim /opt/jdk8-playbook.yml` + +``` +- hosts: all + remote_user: root + tasks: + - name: copy jdk + copy: src=/opt/jdk-8u191-linux-x64.tar.gz dest=/usr/local + + - name: tar jdk + shell: cd /usr/local && tar zxf jdk-8u191-linux-x64.tar.gz + + - name: set JAVA_HOME + blockinfile: + path: /etc/profile + marker: "#{mark} JDK ENV" + block: | + JAVA_HOME={{ java_install_folder }}/jdk1.8.0_191 + JRE_HOME=$JAVA_HOME/jre + PATH=$PATH:$JAVA_HOME/bin + CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar + export JAVA_HOME + export JRE_HOME + export PATH + export CLASSPATH + + - name: source profile + shell: source /etc/profile +``` + + +- 执行命令:`ansible-playbook /opt/jdk8-playbook.yml` + + + +#### Hadoop 集群(HDFS,YARN) + +- Hadoop 集群(HDFS,YARN)(linux01、linux02、linux03):`2.6.5` +- 内容较多,具体参考:[点击我](Hadoop-Install-And-Settings.md) +- Hadoop 环境可以用脚本文件,剩余部分内容请参考上文手工操作:`vim /opt/hadoop-playbook.yml` + +``` +- hosts: hadoop-host + remote_user: root + tasks: + - name: Creates directory + file: + path: /data/hadoop/hdfs/name + state: directory + - name: Creates directory + file: + path: /data/hadoop/hdfs/data + state: directory + - name: Creates directory + file: + path: /data/hadoop/hdfs/tmp + state: directory + + - name: set HADOOP_HOME + blockinfile: + path: /etc/profile + marker: "#{mark} HADOOP ENV" + block: | + HADOOP_HOME=/usr/local/hadoop + PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin + export HADOOP_HOME + export PATH + + - name: source profile + shell: source /etc/profile +``` + + +- 执行命令:`ansible-playbook /opt/hadoop-playbook.yml` + + +#### Zookeeper + +- Zookeeper 单点(linux04):`3.4.13` +- 单个实例:`docker run -d --restart always --name one-zookeeper -p 2181:2181 -v /etc/localtime:/etc/localtime zookeeper:3.4.13` + +#### Kafka + +- Kafka 单点(linux04):`0.10.2.2` +- 上传压缩包到 /opt 目录下 +- 解压:`tar zxvf kafka_2.11-0.10.2.2.tgz` +- 删除压缩包并重命名目录:`rm -rf kafka_2.11-0.10.2.2.tgz && mv /usr/local/kafka_2.11-0.10.2.2 /usr/local/kafka` +- 修改 kafka-server 的配置文件:`vim /usr/local/kafka/config/server.properties` + +``` +listeners=PLAINTEXT://0.0.0.0:9092 +advertised.listeners=PLAINTEXT://linux04:9092 +zookeeper.connect=linux04:2181 +auto.create.topics.enable=true +``` + +- 启动 kafka 服务(必须制定配置文件):`cd /usr/local/kafka && bin/kafka-server-start.sh config/server.properties` + - 后台方式运行 kafka 服务:`cd /usr/local/kafka && bin/kafka-server-start.sh -daemon config/server.properties` + - 停止 kafka 服务:`cd /usr/local/kafka && bin/kafka-server-stop.sh` +- 再开一个终端测试: + - 创建 topic 命令:`cd /usr/local/kafka && bin/kafka-topics.sh --create --zookeeper youmeekhost:2181 --replication-factor 1 --partitions 1 --topic my-topic-test` + - 查看 topic 命令:`cd /usr/local/kafka && bin/kafka-topics.sh --list --zookeeper youmeekhost:2181` + - 删除 topic:`cd /usr/local/kafka && bin/kafka-topics.sh --delete --topic my-topic-test --zookeeper youmeekhost:2181` + - 给 topic 发送消息命令:`cd /usr/local/kafka && bin/kafka-console-producer.sh --broker-list youmeekhost:9092 --topic my-topic-test`,然后在出现交互输入框的时候输入你要发送的内容 + - 再开一个终端,进入 kafka 容器,接受消息:`cd /usr/local/kafka && bin/kafka-console-consumer.sh --bootstrap-server youmeekhost:9092 --topic my-topic-test --from-beginning` + - 此时发送的终端输入一个内容回车,接受消息的终端就可以收到。 + +#### MySQL + +- MySQL 单点(linux04):`5.7` +- 创建本地数据存储 + 配置文件目录:`mkdir -p /data/docker/mysql/datadir /data/docker/mysql/conf /data/docker/mysql/log` +- 在宿主机上创建一个配置文件:`vim /data/docker/mysql/conf/mysql-1.cnf`,内容如下: + +``` +[mysql] +default-character-set = utf8 + +[mysqld] +pid-file = /var/run/mysqld/mysqld.pid +socket = /var/run/mysqld/mysqld.sock +datadir = /var/lib/mysql +symbolic-links=0 + +log-error=/var/log/mysql/error.log +default-storage-engine = InnoDB +collation-server = utf8_unicode_ci +init_connect = 'SET NAMES utf8' +character-set-server = utf8 +lower_case_table_names = 1 +max_allowed_packet = 50M +``` + +- 赋权(避免挂载的时候,一些程序需要容器中的用户的特定权限使用):`chmod -R 777 /data/docker/mysql/datadir /data/docker/mysql/log` +- 赋权:`chown -R 0:0 /data/docker/mysql/conf` +- `docker run -p 3306:3306 --name one-mysql -v /data/docker/mysql/datadir:/var/lib/mysql -v /data/docker/mysql/log:/var/log/mysql -v /data/docker/mysql/conf:/etc/mysql/conf.d -e MYSQL_ROOT_PASSWORD=aaabbb123456 -d mysql:5.7` +- 连上容器:`docker exec -it one-mysql /bin/bash` + - 连上 MySQL:`mysql -u root -p` + - 创建表:`CREATE DATABASE wormhole DEFAULT CHARACTER SET utf8 COLLATE utf8_unicode_ci;` +- **确保用 sqlyog 能直接在外网连上,方便后面调试** + + +#### Spark + +- Spark 单点(linux05):`2.2.0` + + + + +#### Flink + + +- Flink 单点(linux05):`1.5.1` + + +#### 非必须组件 + +- Elasticsearch(支持版本 5.x)(非必须,若无则无法查看 wormhole 处理数据的吞吐和延时) +- Grafana (支持版本 4.x)(非必须,若无则无法查看 wormhole 处理数据的吞吐和延时的图形化展示) + ------------------------------------------------------------------- ## Wormhole 安装 + 配置 -- 参考官网: +- wormhole 单点(linux05):`0.6.0-beta`,2018-12-06 版本 - 解压:`cd /usr/local && tar -xvf wormhole-0.6.0-beta.tar.gz` - 修改配置文件:`vim /usr/local/wormhole-0.6.0-beta/conf/application.conf` From 4f32322777c1d7903993d0165c9c9a131ccae085 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 28 Dec 2018 17:30:40 +0800 Subject: [PATCH 034/124] :construction: Wormhole --- README.md | 1 + markdown-file/Flink-Install-And-Settings.md | 2 +- markdown-file/Hadoop-Install-And-Settings.md | 43 +++- .../Wormhole-Install-And-Settings.md | 208 ++++++++++++++---- 4 files changed, 210 insertions(+), 44 deletions(-) diff --git a/README.md b/README.md index 4aca2d77..946d100e 100644 --- a/README.md +++ b/README.md @@ -101,6 +101,7 @@ - [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) - [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) - [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) +- [Wormhole + Flink 最佳实践](markdown-file/Wormhole-Install-And-Settings.md) ## 联系(Contact) diff --git a/markdown-file/Flink-Install-And-Settings.md b/markdown-file/Flink-Install-And-Settings.md index a0f93ccd..97d813c0 100644 --- a/markdown-file/Flink-Install-And-Settings.md +++ b/markdown-file/Flink-Install-And-Settings.md @@ -31,7 +31,7 @@ - 控制台启动:`cd /usr/local/flink && ./bin/yarn-session.sh -n 2 -jm 1024 -tm 1024` - 守护进程启动:`cd /usr/local/flink && ./bin/yarn-session.sh -n 2 -jm 1024 -tm 1024 -d` - 有可能会报:`The Flink Yarn cluster has failed`,可能是资源不够 - +- YARN 参数配置可以参考:[点击我](https://sustcoder.github.io/2018/09/27/YARN%20%E5%86%85%E5%AD%98%E5%8F%82%E6%95%B0%E8%AF%A6%E8%A7%A3/) ## Demo diff --git a/markdown-file/Hadoop-Install-And-Settings.md b/markdown-file/Hadoop-Install-And-Settings.md index f75adc34..ad398971 100644 --- a/markdown-file/Hadoop-Install-And-Settings.md +++ b/markdown-file/Hadoop-Install-And-Settings.md @@ -118,12 +118,12 @@ vim $HADOOP_HOME/etc/hadoop/hadoop-env.sh 把 25 行的 export JAVA_HOME=${JAVA_HOME} 都改为 -export JAVA_HOME=/usr/local/jdk1.8.0_181 +export JAVA_HOME=/usr/local/jdk1.8.0_191 vim $HADOOP_HOME/etc/hadoop/yarn-env.sh -加一行 export JAVA_HOME=/usr/local/jdk1.8.0_181 +文件开头加一行 export JAVA_HOME=/usr/local/jdk1.8.0_191 ``` @@ -211,12 +211,34 @@ vim $HADOOP_HOME/etc/hadoop/hdfs-site.xml mapreduce.framework.name yarn + + + mapreduce.map.memory.mb + 4096 + + + + mapreduce.reduce.memory.mb + 8192 + + + + mapreduce.map.java.opts + -Xmx3072m + + + + mapreduce.reduce.java.opts + -Xmx6144m + + ``` - yarn.resourcemanager.hostname == 指定YARN的老大(ResourceManager)的地址 - yarn.nodemanager.aux-services == NodeManager上运行的附属服务。需配置成mapreduce_shuffle,才可运行MapReduce程序默认值:"" +- 32G 内存的情况下配置: ``` vim $HADOOP_HOME/etc/hadoop/yarn-site.xml @@ -233,6 +255,21 @@ vim $HADOOP_HOME/etc/hadoop/yarn-site.xml mapreduce_shuffle + + yarn.nodemanager.vmem-pmem-ratio + 2.1 + + + + yarn.nodemanager.resource.memory-mb + 20480 + + + + yarn.scheduler.minimum-allocation-mb + 2048 + + ``` @@ -278,7 +315,7 @@ STARTUP_MSG: args = [-format] STARTUP_MSG: version = 2.6.5 STARTUP_MSG: classpath = /usr/local/hadoop-2.6.5/etc/hadoop:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/apacheds-kerberos-codec-2.0.0-M15.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-io-2.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/activation-1.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/netty-3.6.2.Final.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/slf4j-api-1.7.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/junit-4.11.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/curator-recipes-2.6.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jasper-compiler-5.5.23.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jets3t-0.9.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-lang-2.6.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-digester-1.8.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jackson-core-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/apacheds-i18n-2.0.0-M15.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/guava-11.0.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/gson-2.2.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jackson-jaxrs-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jettison-1.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jetty-6.1.26.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/api-util-1.0.0-M20.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/log4j-1.2.17.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-beanutils-core-1.8.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-httpclient-3.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-el-1.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/paranamer-2.3.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/slf4j-log4j12-1.7.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-collections-3.2.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jersey-server-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-net-3.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/hadoop-auth-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jasper-runtime-5.5.23.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jaxb-impl-2.2.3-1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/hamcrest-core-1.3.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/stax-api-1.0-2.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-beanutils-1.7.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/protobuf-java-2.5.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/curator-framework-2.6.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/xz-1.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jsr305-1.3.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jsp-api-2.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-compress-1.4.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/asm-3.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jsch-0.1.42.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-configuration-1.6.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-cli-1.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jackson-xc-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-logging-1.1.3.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/htrace-core-3.0.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jetty-util-6.1.26.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-math3-3.1.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/mockito-all-1.8.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jersey-json-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/zookeeper-3.4.6.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/httpclient-4.2.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/servlet-api-2.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/xmlenc-0.52.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/httpcore-4.2.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/api-asn1-api-1.0.0-M20.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/java-xmlbuilder-0.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/avro-1.7.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jaxb-api-2.2.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-codec-1.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jersey-core-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/snappy-java-1.0.4.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/curator-client-2.6.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/hadoop-annotations-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/hadoop-common-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/hadoop-common-2.6.5-tests.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/hadoop-nfs-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-io-2.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/netty-3.6.2.Final.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-lang-2.6.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-daemon-1.0.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jackson-core-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/guava-11.0.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jetty-6.1.26.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/log4j-1.2.17.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-el-1.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/xercesImpl-2.9.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jersey-server-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jasper-runtime-5.5.23.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/protobuf-java-2.5.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jsr305-1.3.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/xml-apis-1.3.04.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jsp-api-2.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/asm-3.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-cli-1.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-logging-1.1.3.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/htrace-core-3.0.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jetty-util-6.1.26.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/servlet-api-2.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/xmlenc-0.52.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-codec-1.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jersey-core-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/hadoop-hdfs-2.6.5-tests.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/hadoop-hdfs-nfs-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/hadoop-hdfs-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-io-2.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/activation-1.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/aopalliance-1.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/netty-3.6.2.Final.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-lang-2.6.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jackson-core-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/guice-3.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/guava-11.0.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jackson-jaxrs-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jettison-1.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jetty-6.1.26.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/log4j-1.2.17.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-httpclient-3.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-collections-3.2.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jersey-server-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jaxb-impl-2.2.3-1.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/stax-api-1.0-2.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/protobuf-java-2.5.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/xz-1.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jsr305-1.3.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jersey-client-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/guice-servlet-3.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-compress-1.4.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/asm-3.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-cli-1.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jersey-guice-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jackson-xc-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-logging-1.1.3.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jetty-util-6.1.26.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/leveldbjni-all-1.8.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jersey-json-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/javax.inject-1.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/zookeeper-3.4.6.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/servlet-api-2.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jaxb-api-2.2.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jline-0.9.94.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-codec-1.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jersey-core-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-web-proxy-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-api-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-common-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-registry-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-nodemanager-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-client-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-common-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-applications-unmanaged-am-launcher-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-tests-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-resourcemanager-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-applicationhistoryservice-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-applications-distributedshell-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/commons-io-2.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/aopalliance-1.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/netty-3.6.2.Final.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/junit-4.11.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/jackson-core-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/guice-3.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/log4j-1.2.17.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/paranamer-2.3.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/jersey-server-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/hamcrest-core-1.3.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/protobuf-java-2.5.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/xz-1.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/guice-servlet-3.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/commons-compress-1.4.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/asm-3.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/jersey-guice-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/leveldbjni-all-1.8.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/javax.inject-1.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/avro-1.7.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/jersey-core-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/snappy-java-1.0.4.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/hadoop-annotations-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-app-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-common-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-shuffle-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-plugins-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.6.5-tests.jar:/usr/local/hadoop-2.6.5/contrib/capacity-scheduler/*.jar STARTUP_MSG: build = https://github.com/apache/hadoop.git -r e8c9fe0b4c252caf2ebf1464220599650f119997; compiled by 'sjlee' on 2016-10-02T23:43Z -STARTUP_MSG: java = 1.8.0_181 +STARTUP_MSG: java = 1.8.0_191 ************************************************************/ 18/12/17 17:47:17 INFO namenode.NameNode: registered UNIX signal handlers for [TERM, HUP, INT] 18/12/17 17:47:17 INFO namenode.NameNode: createNameNode [-format] diff --git a/markdown-file/Wormhole-Install-And-Settings.md b/markdown-file/Wormhole-Install-And-Settings.md index 5c92ac10..172691f8 100644 --- a/markdown-file/Wormhole-Install-And-Settings.md +++ b/markdown-file/Wormhole-Install-And-Settings.md @@ -39,9 +39,12 @@ - **5 台 8C32G 服务器 CentOS 7.5,内存推荐 16G 或以上。** - **为了方便,所有服务器都已经关闭防火墙,并且在云服务上设置安全组对外开通所有端口** + - **全程 root 用户** - 整体部署结构图: -![未命名文件.png](https://upload-images.jianshu.io/upload_images/12159-dc29079158e1e59e.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) + +![未命名文件(1).png](https://upload-images.jianshu.io/upload_images/12159-7a94673ea075873c.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) + #### 服务器基础配置 @@ -58,11 +61,11 @@ hostnamectl --static set-hostname linux05 - 给所有服务器设置 hosts:`vim /etc/hosts` ``` -172.16.0.17 linux01 -172.16.0.43 linux02 -172.16.0.180 linux03 -172.16.0.180 linux04 -172.16.0.180 linux05 +172.16.0.55 linux01 +172.16.0.92 linux02 +172.16.0.133 linux03 +172.16.0.159 linux04 +172.16.0.184 linux05 ``` - 在 linux01 生成密钥对,设置 SSH 免密登录 @@ -80,29 +83,78 @@ ssh localhost 将公钥复制到其他机子 ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@linux02(根据提示输入 linux02 密码) + ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@linux03(根据提示输入 linux03 密码) + ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@linux04(根据提示输入 linux04 密码) + ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@linux05(根据提示输入 linux05 密码) 在 linux01 上测试 +ssh linux01 + ssh linux02 + ssh linux03 + ssh linux04 + ssh linux05 ``` +- 安装基础软件:`yum install -y zip unzip lrzsz git epel-release wget htop deltarpm` - 安装 Ansible:`yum install -y ansible` -- 测试 Ansible:`ansible all -a 'ps'` - 配置 Inventory 编辑配置文件:`vim /etc/ansible/hosts` -- 添加如下内容 +- 在文件尾部补上如下内容 ``` [hadoop-host] linux01 linux02 linux03 + +[kafka-host] +linux04 + +[wh-host] +linux05 ``` +- 测试 Ansible:`ansible all -a 'ps'`,必须保证能得到如下结果: + +``` +linux01 | CHANGED | rc=0 >> + PID TTY TIME CMD +11088 pts/7 00:00:00 sh +11101 pts/7 00:00:00 python +11102 pts/7 00:00:00 ps + +linux02 | CHANGED | rc=0 >> + PID TTY TIME CMD +10590 pts/1 00:00:00 sh +10603 pts/1 00:00:00 python +10604 pts/1 00:00:00 ps + +linux05 | CHANGED | rc=0 >> + PID TTY TIME CMD +10573 pts/0 00:00:00 sh +10586 pts/0 00:00:00 python +10587 pts/0 00:00:00 ps + +linux03 | CHANGED | rc=0 >> + PID TTY TIME CMD +10586 pts/1 00:00:00 sh +10599 pts/1 00:00:00 python +10600 pts/1 00:00:00 ps + +linux04 | CHANGED | rc=0 >> + PID TTY TIME CMD +10574 pts/1 00:00:00 sh +10587 pts/1 00:00:00 python +10588 pts/1 00:00:00 ps +``` + + #### 服务器基础组件(CentOS 7.x) @@ -121,7 +173,6 @@ linux03 with_items: - systemctl stop firewalld - systemctl disable firewalld - - setenforce 0 - name: install-basic command: "{{ item }}" @@ -183,8 +234,7 @@ linux03 #### JDK 安装 -- JDK(所有服务器):`1.8.0_191` -- 复制压缩包到所有机子的 /opt 目录下: +- 将 linux01 下的 JDK 压缩包复制到所有机子的 /opt 目录下: ``` scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux02:/opt @@ -196,7 +246,7 @@ scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux04:/opt scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux05:/opt ``` -- 创建脚本文件:`vim /opt/jdk8-playbook.yml` +- 在 linux01 创建脚本文件:`vim /opt/jdk8-playbook.yml` ``` - hosts: all @@ -213,7 +263,7 @@ scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux05:/opt path: /etc/profile marker: "#{mark} JDK ENV" block: | - JAVA_HOME={{ java_install_folder }}/jdk1.8.0_191 + JAVA_HOME=/usr/local/jdk1.8.0_191 JRE_HOME=$JAVA_HOME/jre PATH=$PATH:$JAVA_HOME/bin CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar @@ -228,13 +278,12 @@ scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux05:/opt - 执行命令:`ansible-playbook /opt/jdk8-playbook.yml` - +- 经过试验,发现还是要自己再手动:`source /etc/profile`,原因未知。 #### Hadoop 集群(HDFS,YARN) - Hadoop 集群(HDFS,YARN)(linux01、linux02、linux03):`2.6.5` -- 内容较多,具体参考:[点击我](Hadoop-Install-And-Settings.md) - Hadoop 环境可以用脚本文件,剩余部分内容请参考上文手工操作:`vim /opt/hadoop-playbook.yml` ``` @@ -260,7 +309,11 @@ scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux05:/opt marker: "#{mark} HADOOP ENV" block: | HADOOP_HOME=/usr/local/hadoop + HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop + YARN_CONF_DIR=$HADOOP_HOME/etc/hadoop PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin + export HADOOP_CONF_DIR + export YARN_CONF_DIR export HADOOP_HOME export PATH @@ -270,6 +323,29 @@ scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux05:/opt - 执行命令:`ansible-playbook /opt/hadoop-playbook.yml` +- 剩余内容较多,具体参考:[点击我](Hadoop-Install-And-Settings.md) + - 解压压缩包:`tar zxvf hadoop-2.6.5.tar.gz` + - 这里最好把目录重命名下:`mv /usr/local/hadoop-2.6.5 /usr/local/hadoop` + - 剩下内容从:修改 linux01 配置,开始阅读 + + +#### Flink + +- 须安装在 linux01 +- Flink 单点(linux01):`1.5.1` +- 拷贝:`cd /usr/local/ && cp /opt/flink-1.5.1-bin-hadoop26-scala_2.11.tgz .` +- 解压:`tar zxf flink-*.tgz` +- 修改目录名:`mv /usr/local/flink-1.5.1 /usr/local/flink` +- 修改配置文件:`vim /usr/local/flink/conf/flink-conf.yaml` + - 在文件最前加上:`env.java.home: /usr/local/jdk1.8.0_191` +- 启动:`cd /usr/local/flink && ./bin/start-cluster.sh` +- 停止:`cd /usr/local/flink && ./bin/stop-cluster.sh` +- 查看日志:`tail -300f log/flink-*-standalonesession-*.log` +- 浏览器访问 WEB 管理:`http://linux01:8081/` +- yarn 启动 + - 先停止下本地模式 + - 测试控制台启动:`cd /usr/local/flink && ./bin/yarn-session.sh -n 2 -jm 2024 -tm 2024` + - 有可能会报:`The Flink Yarn cluster has failed`,可能是资源不够,需要调优内存相关参数 #### Zookeeper @@ -281,26 +357,27 @@ scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux05:/opt - Kafka 单点(linux04):`0.10.2.2` - 上传压缩包到 /opt 目录下 +- 拷贝压缩包:`cd /usr/local && cp /opt/kafka_2.11-0.10.2.2.tgz .` - 解压:`tar zxvf kafka_2.11-0.10.2.2.tgz` - 删除压缩包并重命名目录:`rm -rf kafka_2.11-0.10.2.2.tgz && mv /usr/local/kafka_2.11-0.10.2.2 /usr/local/kafka` - 修改 kafka-server 的配置文件:`vim /usr/local/kafka/config/server.properties` ``` -listeners=PLAINTEXT://0.0.0.0:9092 -advertised.listeners=PLAINTEXT://linux04:9092 -zookeeper.connect=linux04:2181 -auto.create.topics.enable=true +034 行:listeners=PLAINTEXT://0.0.0.0:9092 +039 行:advertised.listeners=PLAINTEXT://linux04:9092 +119 行:zookeeper.connect=linux04:2181 +补充 :auto.create.topics.enable=true ``` - 启动 kafka 服务(必须制定配置文件):`cd /usr/local/kafka && bin/kafka-server-start.sh config/server.properties` - 后台方式运行 kafka 服务:`cd /usr/local/kafka && bin/kafka-server-start.sh -daemon config/server.properties` - 停止 kafka 服务:`cd /usr/local/kafka && bin/kafka-server-stop.sh` - 再开一个终端测试: - - 创建 topic 命令:`cd /usr/local/kafka && bin/kafka-topics.sh --create --zookeeper youmeekhost:2181 --replication-factor 1 --partitions 1 --topic my-topic-test` - - 查看 topic 命令:`cd /usr/local/kafka && bin/kafka-topics.sh --list --zookeeper youmeekhost:2181` - - 删除 topic:`cd /usr/local/kafka && bin/kafka-topics.sh --delete --topic my-topic-test --zookeeper youmeekhost:2181` - - 给 topic 发送消息命令:`cd /usr/local/kafka && bin/kafka-console-producer.sh --broker-list youmeekhost:9092 --topic my-topic-test`,然后在出现交互输入框的时候输入你要发送的内容 - - 再开一个终端,进入 kafka 容器,接受消息:`cd /usr/local/kafka && bin/kafka-console-consumer.sh --bootstrap-server youmeekhost:9092 --topic my-topic-test --from-beginning` + - 创建 topic 命令:`cd /usr/local/kafka && bin/kafka-topics.sh --create --zookeeper linux04:2181 --replication-factor 1 --partitions 1 --topic my-topic-test` + - 查看 topic 命令:`cd /usr/local/kafka && bin/kafka-topics.sh --list --zookeeper linux04:2181` + - 删除 topic:`cd /usr/local/kafka && bin/kafka-topics.sh --delete --topic my-topic-test --zookeeper linux04:2181` + - 给 topic 发送消息命令:`cd /usr/local/kafka && bin/kafka-console-producer.sh --broker-list linux04:9092 --topic my-topic-test`,然后在出现交互输入框的时候输入你要发送的内容 + - 再开一个终端,进入 kafka 容器,接受消息:`cd /usr/local/kafka && bin/kafka-console-consumer.sh --bootstrap-server linux04:9092 --topic my-topic-test --from-beginning` - 此时发送的终端输入一个内容回车,接受消息的终端就可以收到。 #### MySQL @@ -339,15 +416,32 @@ max_allowed_packet = 50M #### Spark -- Spark 单点(linux05):`2.2.0` - +- 须安装在 linux01 +- Spark 单点(linux01):`2.2.0` +- 上传压缩包到 /opt 目录下 +- 拷贝压缩包:`cd /usr/local && cp /opt/spark-2.2.0-bin-hadoop2.6.tgz .` +- 解压:`tar zxvf spark-2.2.0-bin-hadoop2.6.tgz` +- 重命名:`mv /usr/local/spark-2.2.0-bin-hadoop2.6 /usr/local/spark` +- 增加环境变量: +``` +vim /etc/profile +SPARK_HOME=/usr/local/spark +PATH=$PATH:${SPARK_HOME}/bin:${SPARK_HOME}/sbin +export SPARK_HOME +export PATH -#### Flink +source /etc/profile +``` +- 修改配置:`cp $SPARK_HOME/conf/spark-env.sh.template $SPARK_HOME/conf/spark-env.sh` +- 修改配置:`vim $SPARK_HOME/conf/spark-env.sh` +- 假设我的 hadoop 路径是:/usr/local/hadoop,则最尾巴增加: -- Flink 单点(linux05):`1.5.1` +``` +export HADOOP_CONF_DIR=/usr/local/hadoop/etc/hadoop +``` #### 非必须组件 @@ -360,7 +454,19 @@ max_allowed_packet = 50M ## Wormhole 安装 + 配置 -- wormhole 单点(linux05):`0.6.0-beta`,2018-12-06 版本 +- 须安装在 linux01 +- wormhole 单点(linux01):`0.6.0-beta`,2018-12-06 版本 +- 先在 linux04 机子的 kafka 创建 topic: + +``` +cd /usr/local/kafka && bin/kafka-topics.sh --list --zookeeper linux04:2181 +cd /usr/local/kafka && bin/kafka-topics.sh --create --zookeeper linux04:2181 --replication-factor 1 --partitions 1 --topic source +cd /usr/local/kafka && bin/kafka-topics.sh --create --zookeeper linux04:2181 --replication-factor 1 --partitions 1 --topic wormhole_feedback +cd /usr/local/kafka && bin/kafka-topics.sh --create --zookeeper linux04:2181 --replication-factor 1 --partitions 1 --topic wormhole_heartbeat +``` + +- 上传压缩包到 /opt 目录下 +- 拷贝压缩包:`cd /usr/local && cp /opt/wormhole-0.6.0-beta.tar.gz .` - 解压:`cd /usr/local && tar -xvf wormhole-0.6.0-beta.tar.gz` - 修改配置文件:`vim /usr/local/wormhole-0.6.0-beta/conf/application.conf` @@ -370,7 +476,7 @@ akka.http.server.request-timeout = 120s wormholeServer { cluster.id = "" #optional global uuid - host = "linux05" + host = "linux01" port = 8989 ui.default.language = "Chinese" token.timeout = 1 @@ -384,7 +490,7 @@ mysql = { db = { driver = "com.mysql.jdbc.Driver" user = "root" - password = "123456" + password = "aaabbb123456" url = "jdbc:mysql://linux04:3306/wormhole?useUnicode=true&characterEncoding=UTF-8&useSSL=false" numThreads = 4 minConnections = 4 @@ -510,12 +616,13 @@ maintenance = { #} ``` -- 初始化数据库: - - 创建表:`create database wormhole character set utf8;` - 初始化表结构脚本路径: - 该脚本存在一个问题:初始化脚本和补丁脚本混在一起,所以直接复制执行会有报错,但是报错的部分是不影响 - 我是直接把基础 sql 和补丁 sql 分开执行,方便判断。 +- 启动:`sh /usr/local/wormhole-0.6.0-beta/bin/start.sh` +- 查看 log:`tail -200f /usr/local/wormhole-0.6.0-beta/logs/application.log` - 部署完成,浏览器访问: +- 默认管理员用户名:admin,密码:admin ------------------------------------------------------------------- @@ -524,7 +631,7 @@ maintenance = { - **参考官网,必须先了解下**: - 必须创建用户,后面才能进入 Project 里面创建 Stream / Flow - 创建的用户类型必须是:`user` - +- 假设这里创建的用户叫做:`user1@bg.com` ------------------------------------------------------------------- @@ -535,12 +642,13 @@ maintenance = { - Instance 用于绑定各个组件的所在服务连接 - 一般我们都会选择 Kafka 作为 source,后面的基础也是基于 Kafka 作为 Source 的场景 - 假设填写实例名:`source_kafka` +- URL:`linux04:9092` #### 创建 Database - 各个组件的具体数据库、Topic 等信息 -- 假设填写 topic:`source` - +- 假设填写 Topic Name:`source` +- Partition:1 #### 创建 Namespace @@ -567,12 +675,15 @@ maintenance = { #### 创建 Instance - 假设填写实例名:`sink_mysql` +- URL:`linux04:3306` #### 创建 Database - 假设填写 Database Name:`sink` - config 参数:`useUnicode=true&characterEncoding=UTF-8&useSSL=false&rewriteBatchedStatements=true` + + #### 创建 Namespace - 假设填写 Tables: `user id` @@ -597,8 +708,9 @@ maintenance = { - Stream type 类型选择:`Flink` - 假设填写 Name:`wormhole_stream_test` -## Flink Flow(流式作业) +## Flink Flow +- 假设 Flow name 为:`wormhole_flow_test` - Flow 是在 Project 内容页下才能创建 - 并且是 Project 下面的用户才能创建,admin 用户没有权限 - Flow 会关联 source 和 sink @@ -622,11 +734,27 @@ maintenance = { ## Kafka 发送测试数据 -- `cd /usr/local/kafka/bin` -- `./kafka-console-producer.sh --broker-list linux01:9092 --topic source --property "parse.key=true" --property "key.separator=@@@"` +- 在 linux04 机子上 +- `cd /usr/local/kafka/bin && ./kafka-console-producer.sh --broker-list linux04:9092 --topic source --property "parse.key=true" --property "key.separator=@@@"` - 发送 UMS 流消息协议规范格式: ``` -data_increment_data.kafka.source_kafka.source.ums_extension.*.*.*@@@{"id": 1, "name": "test", "phone":"18074546423", "city": "Beijing", "time": "2017-12-22 10:00:00"} +data_increment_data.kafka.source_kafka.source.ums_extension.*.*.*@@@{"id": 1, "name": "test1", "phone":"18074546423", "city": "Beijing", "time": "2017-12-22 10:01:00"} + +data_increment_data.kafka.source_kafka.source.ums_extension.*.*.*@@@{"id": 2, "name": "test2", "phone":"18074546423", "city": "Beijing", "time": "2017-12-22 10:02:00"} + +data_increment_data.kafka.source_kafka.source.ums_extension.*.*.*@@@{"id": 3, "name": "test3", "phone":"18074546423", "city": "Beijing", "time": "2017-12-22 10:03:00"} + +data_increment_data.kafka.source_kafka.source.ums_extension.*.*.*@@@{"id": 4, "name": "test4", "phone":"18074546423", "city": "Beijing", "time": "2017-12-22 10:04:00"} + +data_increment_data.kafka.source_kafka.source.ums_extension.*.*.*@@@{"id": 5, "name": "test5", "phone":"18074546423", "city": "Beijing", "time": "2017-12-22 10:05:00"} + +data_increment_data.kafka.source_kafka.source.ums_extension.*.*.*@@@{"id": 6, "name": "test6", "phone":"18074546423", "city": "Beijing", "time": "2017-12-22 10:06:00"} + +data_increment_data.kafka.source_kafka.source.ums_extension.*.*.*@@@{"id": 7, "name": "test7", "phone":"18074546423", "city": "Beijing", "time": "2017-12-22 10:07:00"} + +data_increment_data.kafka.source_kafka.source.ums_extension.*.*.*@@@{"id": 8, "name": "test8", "phone":"18074546423", "city": "Beijing", "time": "2017-12-22 10:08:00"} + +data_increment_data.kafka.source_kafka.source.ums_extension.*.*.*@@@{"id": 9, "name": "test9", "phone":"18074546423", "city": "Beijing", "time": "2017-12-22 10:09:00"} ``` From 656f0cfd9c03d208d9b4fa896c1a7ca81b800023 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 28 Dec 2018 17:32:09 +0800 Subject: [PATCH 035/124] :construction: Wormhole --- markdown-file/Wormhole-Install-And-Settings.md | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/markdown-file/Wormhole-Install-And-Settings.md b/markdown-file/Wormhole-Install-And-Settings.md index 172691f8..9d464355 100644 --- a/markdown-file/Wormhole-Install-And-Settings.md +++ b/markdown-file/Wormhole-Install-And-Settings.md @@ -37,7 +37,7 @@ #### 特别说明 -- **5 台 8C32G 服务器 CentOS 7.5,内存推荐 16G 或以上。** +- **4 台 8C32G 服务器 CentOS 7.5,内存推荐 16G 或以上。** - **为了方便,所有服务器都已经关闭防火墙,并且在云服务上设置安全组对外开通所有端口** - **全程 root 用户** - 整体部署结构图: @@ -55,7 +55,6 @@ hostnamectl --static set-hostname linux01 hostnamectl --static set-hostname linux02 hostnamectl --static set-hostname linux03 hostnamectl --static set-hostname linux04 -hostnamectl --static set-hostname linux05 ``` - 给所有服务器设置 hosts:`vim /etc/hosts` @@ -65,7 +64,6 @@ hostnamectl --static set-hostname linux05 172.16.0.92 linux02 172.16.0.133 linux03 172.16.0.159 linux04 -172.16.0.184 linux05 ``` - 在 linux01 生成密钥对,设置 SSH 免密登录 @@ -88,7 +86,6 @@ ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@linux03(根据提示输入 linux03 ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@linux04(根据提示输入 linux04 密码) -ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@linux05(根据提示输入 linux05 密码) 在 linux01 上测试 ssh linux01 @@ -98,8 +95,6 @@ ssh linux02 ssh linux03 ssh linux04 - -ssh linux05 ``` - 安装基础软件:`yum install -y zip unzip lrzsz git epel-release wget htop deltarpm` @@ -116,8 +111,6 @@ linux03 [kafka-host] linux04 -[wh-host] -linux05 ``` - 测试 Ansible:`ansible all -a 'ps'`,必须保证能得到如下结果: @@ -135,12 +128,6 @@ linux02 | CHANGED | rc=0 >> 10603 pts/1 00:00:00 python 10604 pts/1 00:00:00 ps -linux05 | CHANGED | rc=0 >> - PID TTY TIME CMD -10573 pts/0 00:00:00 sh -10586 pts/0 00:00:00 python -10587 pts/0 00:00:00 ps - linux03 | CHANGED | rc=0 >> PID TTY TIME CMD 10586 pts/1 00:00:00 sh @@ -242,8 +229,6 @@ scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux02:/opt scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux03:/opt scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux04:/opt - -scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux05:/opt ``` - 在 linux01 创建脚本文件:`vim /opt/jdk8-playbook.yml` From 02747b9e72a3ad2e6ef7b129a89acbe157d7ae60 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 28 Dec 2018 17:47:45 +0800 Subject: [PATCH 036/124] :construction: Wormhole --- markdown-file/Wormhole-Install-And-Settings.md | 1 + 1 file changed, 1 insertion(+) diff --git a/markdown-file/Wormhole-Install-And-Settings.md b/markdown-file/Wormhole-Install-And-Settings.md index 9d464355..1a7150bb 100644 --- a/markdown-file/Wormhole-Install-And-Settings.md +++ b/markdown-file/Wormhole-Install-And-Settings.md @@ -5,6 +5,7 @@ - **感谢 Wormhole 的官方帮助!官方微信群很友好,这让我很意外,只能感谢了!** - 本人大数据和 Ansible 刚看,只会皮毛的皮毛。但是也因为这样的契机促使了我写这篇文章。 +- 因为刚入门,需要了解细节,所以没用 Ambari 这类工具,已经熟悉的可以考虑使用。 - 希望对你们有帮助。 ------------------------------------------------------------------- From 8f202036f2c159834cceca9afba910d395ce1781 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 28 Dec 2018 17:52:20 +0800 Subject: [PATCH 037/124] :construction: Wormhole --- markdown-file/Hadoop-Install-And-Settings.md | 1 + 1 file changed, 1 insertion(+) diff --git a/markdown-file/Hadoop-Install-And-Settings.md b/markdown-file/Hadoop-Install-And-Settings.md index ad398971..7f42810f 100644 --- a/markdown-file/Hadoop-Install-And-Settings.md +++ b/markdown-file/Hadoop-Install-And-Settings.md @@ -494,6 +494,7 @@ tcp6 0 0 :::37481 :::* LISTEN ## 资料 +- [如何正确的为 MapReduce 配置内存分配](http://loupipalien.com/2018/03/how-to-properly-configure-the-memory-allocations-for-mapreduce/) - - - From 5d8534bfddae266f257dca8df0c12071a6940bbc Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 14 Jan 2019 22:28:12 +0800 Subject: [PATCH 038/124] 2019-01-14 --- centos-settings/CentOS-Network-Settings.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/centos-settings/CentOS-Network-Settings.md b/centos-settings/CentOS-Network-Settings.md index d94b28ef..ba95ede9 100644 --- a/centos-settings/CentOS-Network-Settings.md +++ b/centos-settings/CentOS-Network-Settings.md @@ -44,19 +44,22 @@ ``` - 重启网络配置:`service network restart` -## CentOS 7 +## CentOS 7.x ### 命令行下设置网络 - 查看系统下有哪些网卡:`ls /etc/sysconfig/network-scripts/`,新版本不叫 eth0 这类格式了,比如我当前这个叫做:ifcfg-ens33(你的肯定跟我不一样,但是格式类似) +- 先备份:`cp /etc/sysconfig/network-scripts/ifcfg-ens33 /etc/sysconfig/network-scripts/ifcfg-ens33.bak` - 编辑该文件:`vim /etc/sysconfig/network-scripts/ifcfg-ens33`,改为如下信息:(IP 段自己改为自己的网络情况) ``` ini TYPE=Ethernet +PROXY_METHOD=none +BROWSER_ONLY=no BOOTPROTO=static -IPADDR=192.168.1.126 +IPADDR=192.168.0.127 NETMASK=255.255.255.0 -GATEWAY=192.168.1.1 +GATEWAY=192.168.0.1 DNS1=8.8.8.8 DNS1=114.114.114.114 DEFROUTE=yes @@ -71,7 +74,7 @@ IPV6_PEERROUTES=yes IPV6_FAILURE_FATAL=no IPV6_ADDR_GEN_MODE=stable-privacy NAME=ens33 -UUID=15a16b51-0369-44d7-87b4-667f715a68df +UUID=b9f01b7d-4ebf-4d3a-a4ec-ae203425bb11 DEVICE=ens33 ONBOOT=yes ``` From 24076b9ee0c0f4051b62aecdd53ba2496a927154 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 14 Jan 2019 22:38:46 +0800 Subject: [PATCH 039/124] 2019-01-14 --- markdown-file/CentOS-Virtual-Machine-Copy-Settings.md | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/markdown-file/CentOS-Virtual-Machine-Copy-Settings.md b/markdown-file/CentOS-Virtual-Machine-Copy-Settings.md index 7c415bde..3cdbd8f2 100644 --- a/markdown-file/CentOS-Virtual-Machine-Copy-Settings.md +++ b/markdown-file/CentOS-Virtual-Machine-Copy-Settings.md @@ -8,8 +8,9 @@ -## 修改方法 +## CentOS 6 修改方法 +- 设置 hostname:`hostnamectl --static set-hostname linux02` - 命令:`sudo vim /etc/udev/rules.d/70-persistent-net.rules` - 该文件中正常此时应该有两行信息 - 在文件中把 NAME="eth0″ 的这一行注释掉 @@ -23,4 +24,10 @@ - 如果显示两行 UUID 的信息的话,复制不是 System eth0 的那个 UUID 值,下面有用。 - 编辑:`sudo vim /etc/sysconfig/network-scripts/ifcfg-eth0` - 把文件中的 UUID 值 改为上面要求复制的 UUID 值。 - - 保存配置文件,重启系统,正常应该是可以了。 \ No newline at end of file + - 保存配置文件,重启系统,正常应该是可以了。 + +## CentOS 7 修改方法 + +- 在 VMware 15 Pro 的情况下,直接 copy 进行后,直接修改网卡配置即可 +- 编辑该文件:`vim /etc/sysconfig/network-scripts/ifcfg-ens33` + - 把 ip 地址修改即可 \ No newline at end of file From aa91f4622fb12f0373940384c34a865d621a8ee2 Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 31 Jan 2019 13:43:01 +0800 Subject: [PATCH 040/124] :construction: K8S --- markdown-file/Docker-Install-And-Usage.md | 53 ++++++++++++++++++++++- 1 file changed, 52 insertions(+), 1 deletion(-) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index b5d5aa3e..947c59d7 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -810,8 +810,53 @@ sudo chmod +x /usr/local/bin/docker-compose - `容器聚合` - 主要角色:Master、Node + +#### 安装准备 - Kubernetes 1.13 版本 + +- 推荐最低 2C2G,优先:2C4G 或以上 +- 特别说明:1.13 之前的版本,由于网络问题,需要各种设置,这里就不再多说了。1.13 之后相对就简单了点。 +- 优先官网软件包:kubeadm +- 官网资料: + - issues 入口: + - 源码入口: + - 安装指导: + - 按官网要求做下检查: + - 网络环境: + - 端口检查: + - 对 Docker 版本的支持,这里官网推荐的是 18.06: +- 三大核心工具包,都需要各自安装,并且注意版本关系: + - `kubeadm`: the command to bootstrap the cluster. + - 集群部署、管理工具 + - `kubelet`: the component that runs on all of the machines in your cluster and does things like starting pods and containers. + - 具体执行层面的管理 Pod 和 Docker 工具 + - `kubectl`: the command line util to talk to your cluster. + - 操作 k8s 的命令行入口工具 +- 官网插件安装过程的故障排查: +- 其他部署方案: + - + - + - + +#### 开始安装 - Kubernetes 1.13.2 版本 + +- 官网最新版本: +- 官网 1.13 版本的 changelog: +- 所有节点安装 Docker 18.06,并设置阿里云源 +- 所有节点设置 kubernetes repo 源,并安装 Kubeadm、Kubelet、Kubectl 都设置阿里云的源 +- Kubeadm 初始化集群过程当中,它会下载很多的镜像,默认也是去 Google 家里下载。但是 1.13 新增了一个配置:`--image-repository` 算是救了命。 + #### 主要概念 +- Master 节点,负责集群的调度、集群的管理 + - 常见组件: + - kube-apiserver:API服务 + - kube-scheduler:调度 + - Kube-Controller-Manager:容器编排 + - Etcd:保存了整个集群的状态 + - Kube-proxy:负责为 Service 提供 cluster 内部的服务发现和负载均衡 + - Kube-DNS:负责为整个集群提供 DNS 服务 +- Workers 节点,负责容器相关的处理 + - `Pods` ``` @@ -946,7 +991,13 @@ Master选举确保kube-scheduler和kube-controller-manager高可用 ## 资料 - 书籍:《第一本 Docker 书》 - +- [如何更“优雅”地部署Kubernetes集群](https://juejin.im/entry/5a03f98d6fb9a04524054516) +- []() +- []() +- []() +- []() +- []() +- []() From fb7c53d797d05be41b1bf6954317e105169159b7 Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 31 Jan 2019 23:05:25 +0800 Subject: [PATCH 041/124] 2019-01-31 --- ...er_k8s_disable_firewalld_centos7-aliyun.sh | 49 +++++ markdown-file/Docker-Install-And-Usage.md | 190 +++++++++++++++++- 2 files changed, 237 insertions(+), 2 deletions(-) create mode 100644 favorite-file/shell/install_docker_k8s_disable_firewalld_centos7-aliyun.sh diff --git a/favorite-file/shell/install_docker_k8s_disable_firewalld_centos7-aliyun.sh b/favorite-file/shell/install_docker_k8s_disable_firewalld_centos7-aliyun.sh new file mode 100644 index 00000000..66adafa2 --- /dev/null +++ b/favorite-file/shell/install_docker_k8s_disable_firewalld_centos7-aliyun.sh @@ -0,0 +1,49 @@ +#!/bin/sh + +echo "-----------------------------------------禁用防火墙" +systemctl stop firewalld.service +systemctl disable firewalld.service + +echo "-----------------------------------------安装 docker 所需环境" + +yum install -y yum-utils device-mapper-persistent-data lvm2 + +echo "-----------------------------------------添加 repo(可能网络会很慢,有时候会报:Timeout,所以要多试几次)" +echo "-----------------------------------------官网的地址 yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo" +echo "-----------------------------------------这里用阿里云进行加速,不然可能会出现无法安装,阿里云官网说明:https://help.aliyun.com/document_detail/60742.html" + +yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo +yum makecache fast + +echo "-----------------------------------------开始安装 docker" + +yum install -y docker-ce-18.06.1.ce-3.el7 + +echo "-----------------------------------------启动 Docker" + +systemctl start docker.service +systemctl enable docker.service + +echo "-----------------------------------------安装结束" + +echo "-----------------------------------------docker 加速" + +touch /etc/docker/daemon.json + +cat << EOF >> /etc/docker/daemon.json +{ + "registry-mirrors": ["https://ldhc17y9.mirror.aliyuncs.com"] +} +EOF + +systemctl daemon-reload +systemctl restart docker + +echo "-----------------------------------------运行 hello world 镜像" + +docker run hello-world + + + + + diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index 947c59d7..29cd34d7 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -839,23 +839,209 @@ sudo chmod +x /usr/local/bin/docker-compose #### 开始安装 - Kubernetes 1.13.2 版本 +- 三台机子: + - master-1:`192.168.0.127` + - node-1:`192.168.0.128` + - node-2:`192.168.0.129` - 官网最新版本: - 官网 1.13 版本的 changelog: - 所有节点安装 Docker 18.06,并设置阿里云源 + - 可以参考:[点击我o(∩_∩)o ](https://github.com/judasn/Linux-Tutorial/blob/master/favorite-file/shell/install_docker_k8s_disable_firewalld_centos7-aliyun.sh) + - 核心,查看可以安装的 Docker 列表:`yum list docker-ce --showduplicates` - 所有节点设置 kubernetes repo 源,并安装 Kubeadm、Kubelet、Kubectl 都设置阿里云的源 - Kubeadm 初始化集群过程当中,它会下载很多的镜像,默认也是去 Google 家里下载。但是 1.13 新增了一个配置:`--image-repository` 算是救了命。 +- 具体流程: + +``` +主机时间同步 +systemctl start chronyd.service +systemctl enable chronyd.service + +systemctl stop firewalld.service +systemctl disable firewalld.service +systemctl disable iptables.service + + +setenforce 0 + +sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config + +swapoff -a && sysctl -w vm.swappiness=0 + + +hostnamectl --static set-hostname k8s-master-1 +hostnamectl --static set-hostname k8s-node-1 +hostnamectl --static set-hostname k8s-node-2 + + +vim /etc/hosts +192.168.0.127 k8s-master-1 +192.168.0.128 k8s-node-1 +192.168.0.129 k8s-node-2 + +master 免密 +生产密钥对 +ssh-keygen -t rsa + + +公钥内容写入 authorized_keys +cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys + +测试: +ssh localhost + +将公钥复制到其他机子 +ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@k8s-node-1(根据提示输入 k8s-node-1 密码) +ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@k8s-node-2(根据提示输入 k8s-node-2 密码) + + +在 linux01 上测试 +ssh k8s-master-1 +ssh k8s-node-1 +ssh k8s-node-2 + + + +vim /etc/yum.repos.d/kubernetes.repo + +[kubernetes] +name=Kubernetes +baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ +enabled=1 +gpgcheck=1 +repo_gpgcheck=1 +gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg + + +scp -r /etc/yum.repos.d/kubernetes.repo root@k8s-node-1:/etc/yum.repos.d/ +scp -r /etc/yum.repos.d/kubernetes.repo root@k8s-node-2:/etc/yum.repos.d/ + + +所有机子 +yum install -y kubelet-1.13.2 kubeadm-1.13.2 kubectl-1.13.2 --disableexcludes=kubernetes + + +vim /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +最后一行添加:Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs" + + +systemctl enable kubelet && systemctl start kubelet + +kubeadm version +kubectl version + + +vim /etc/sysctl.d/k8s.conf +net.bridge.bridge-nf-call-ip6tables = 1 +net.bridge.bridge-nf-call-iptables = 1 + +sysctl --system + +``` + +- 初始化 master 节点: + +``` + +推荐: +kubeadm init \ +--image-repository registry.aliyuncs.com/google_containers \ +--pod-network-cidr 10.244.0.0/16 \ +--kubernetes-version 1.13.2 \ +--service-cidr 10.96.0.0/12 \ +--apiserver-advertise-address=0.0.0.0 \ +--ignore-preflight-errors=Swap + +10.244.0.0/16是 flannel 插件固定使用的ip段,它的值取决于你准备安装哪个网络插件 + +终端会输出核心内容: +Your Kubernetes master has initialized successfully! + +To start using your cluster, you need to run the following as a regular user: + + mkdir -p $HOME/.kube + sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config + +You should now deploy a pod network to the cluster. +Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: + https://kubernetes.io/docs/concepts/cluster-administration/addons/ + +You can now join any number of machines by running the following on each node +as root: + + kubeadm join 192.168.0.127:6443 --token 53mly1.yf9llsghle20p2uq --discovery-token-ca-cert-hash sha256:a9f26eef42c30d9f4b20c52058a2eaa696edc3f63ba20be477fe1494ec0146f7 + + + + +也可以使用另外一个流行网络插件 calico: +kubeadm init --image-repository registry.aliyuncs.com/google_containers --pod-network-cidr=192.168.0.0/16 --kubernetes-version v1.13.2 + + + +mkdir -p $HOME/.kube +sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config +sudo chown $(id -u):$(id -g) $HOME/.kube/config + +export KUBECONFIG=/etc/kubernetes/admin.conf +echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.zshrc +source ~/.zshrc + + +查询我们的 token +kubeadm token list + + +kubectl cluster-info +``` + +- 到 node 节点进行加入: + +``` + +kubeadm join 192.168.0.127:6443 --token 53mly1.yf9llsghle20p2uq --discovery-token-ca-cert-hash sha256:a9f26eef42c30d9f4b20c52058a2eaa696edc3f63ba20be477fe1494ec0146f7 + + +在 master 节点上:kubectl get cs +NAME STATUS MESSAGE ERROR +controller-manager Healthy ok +scheduler Healthy ok +etcd-0 Healthy {"health": "true"} +结果都是 Healthy 则表示可以了,不然就得检查。必要时可以用:`kubeadm reset` 重置,重新进行集群初始化 + + + +master 安装 Flannel +cd /opt && wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml + +kubectl apply -f /opt/kube-flannel.yml + + +验证: +kubectl get pods --all-namespaces +kubectl get nodes +如果还是 NotReady,则查看错误信息: +kubectl describe pod kube-scheduler-master.hanli.com -n kube-system +kubectl logs kube-scheduler-master.hanli.com -n kube-system +tail -f /var/log/messages + +``` + + + #### 主要概念 - Master 节点,负责集群的调度、集群的管理 - - 常见组件: + - 常见组件: - kube-apiserver:API服务 - kube-scheduler:调度 - Kube-Controller-Manager:容器编排 - Etcd:保存了整个集群的状态 - Kube-proxy:负责为 Service 提供 cluster 内部的服务发现和负载均衡 - Kube-DNS:负责为整个集群提供 DNS 服务 -- Workers 节点,负责容器相关的处理 +- node 节点,负责容器相关的处理 - `Pods` From 83581195d6bf13d6f338484113ba495af376b9ed Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 31 Jan 2019 23:23:42 +0800 Subject: [PATCH 042/124] 2019-01-31 --- markdown-file/Docker-Install-And-Usage.md | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index 29cd34d7..1081b11f 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -854,21 +854,21 @@ sudo chmod +x /usr/local/bin/docker-compose ``` 主机时间同步 -systemctl start chronyd.service -systemctl enable chronyd.service +systemctl start chronyd.service && systemctl enable chronyd.service systemctl stop firewalld.service systemctl disable firewalld.service systemctl disable iptables.service -setenforce 0 - -sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config +setenforce 0 && sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config swapoff -a && sysctl -w vm.swappiness=0 + + + hostnamectl --static set-hostname k8s-master-1 hostnamectl --static set-hostname k8s-node-1 hostnamectl --static set-hostname k8s-node-2 @@ -942,6 +942,7 @@ sysctl --system - 初始化 master 节点: ``` +echo 1 > /proc/sys/net/ipv4/ip_forward 推荐: kubeadm init \ @@ -970,7 +971,7 @@ Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: You can now join any number of machines by running the following on each node as root: - kubeadm join 192.168.0.127:6443 --token 53mly1.yf9llsghle20p2uq --discovery-token-ca-cert-hash sha256:a9f26eef42c30d9f4b20c52058a2eaa696edc3f63ba20be477fe1494ec0146f7 + kubeadm join 192.168.0.127:6443 --token 84kj2n.1kdj36xcsvyzx29i --discovery-token-ca-cert-hash sha256:bcd2edf9878e82db6f73f1253e8d6b6e7b91160db706f7ee59b9a9e32c6099e3 @@ -999,8 +1000,9 @@ kubectl cluster-info - 到 node 节点进行加入: ``` +echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables -kubeadm join 192.168.0.127:6443 --token 53mly1.yf9llsghle20p2uq --discovery-token-ca-cert-hash sha256:a9f26eef42c30d9f4b20c52058a2eaa696edc3f63ba20be477fe1494ec0146f7 +kubeadm join 192.168.0.127:6443 --token 84kj2n.1kdj36xcsvyzx29i --discovery-token-ca-cert-hash sha256:bcd2edf9878e82db6f73f1253e8d6b6e7b91160db706f7ee59b9a9e32c6099e3 在 master 节点上:kubectl get cs From 8f01db3a8ce86f08caa1685c35b68e7fb5791e46 Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 31 Jan 2019 23:44:39 +0800 Subject: [PATCH 043/124] 2019-01-31 --- markdown-file/Docker-Install-And-Usage.md | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index 1081b11f..8a804f76 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -934,6 +934,7 @@ kubectl version vim /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 +net.ipv4.ip_forward=1 sysctl --system @@ -955,6 +956,7 @@ kubeadm init \ 10.244.0.0/16是 flannel 插件固定使用的ip段,它的值取决于你准备安装哪个网络插件 +这个过程会下载一些 docker 镜像,时间可能会比较久,看你网络情况。 终端会输出核心内容: Your Kubernetes master has initialized successfully! @@ -971,7 +973,7 @@ Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: You can now join any number of machines by running the following on each node as root: - kubeadm join 192.168.0.127:6443 --token 84kj2n.1kdj36xcsvyzx29i --discovery-token-ca-cert-hash sha256:bcd2edf9878e82db6f73f1253e8d6b6e7b91160db706f7ee59b9a9e32c6099e3 + kubeadm join 192.168.0.127:6443 --token r7qkjb.2g1zikg6yvrmwlp6 --discovery-token-ca-cert-hash sha256:14601b9269829cd86756ad30aaeb3199158cbc2c150ef8b9dc2ce00f1fa5c2d0 @@ -980,11 +982,6 @@ as root: kubeadm init --image-repository registry.aliyuncs.com/google_containers --pod-network-cidr=192.168.0.0/16 --kubernetes-version v1.13.2 - -mkdir -p $HOME/.kube -sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config -sudo chown $(id -u):$(id -g) $HOME/.kube/config - export KUBECONFIG=/etc/kubernetes/admin.conf echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.zshrc source ~/.zshrc @@ -1002,7 +999,7 @@ kubectl cluster-info ``` echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables -kubeadm join 192.168.0.127:6443 --token 84kj2n.1kdj36xcsvyzx29i --discovery-token-ca-cert-hash sha256:bcd2edf9878e82db6f73f1253e8d6b6e7b91160db706f7ee59b9a9e32c6099e3 +kubeadm join 192.168.0.127:6443 --token r7qkjb.2g1zikg6yvrmwlp6 --discovery-token-ca-cert-hash sha256:14601b9269829cd86756ad30aaeb3199158cbc2c150ef8b9dc2ce00f1fa5c2d0 在 master 节点上:kubectl get cs From 06e4e0acb4b9bf0031beb29dbb7fff30a3165134 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 1 Feb 2019 10:54:57 +0800 Subject: [PATCH 044/124] :construction: K8S --- markdown-file/Docker-Install-And-Usage.md | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index 8a804f76..1616f42e 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -947,7 +947,7 @@ echo 1 > /proc/sys/net/ipv4/ip_forward 推荐: kubeadm init \ ---image-repository registry.aliyuncs.com/google_containers \ +--image-repository registry.cn-hangzhou.aliyuncs.com/google_containers \ --pod-network-cidr 10.244.0.0/16 \ --kubernetes-version 1.13.2 \ --service-cidr 10.96.0.0/12 \ @@ -977,14 +977,9 @@ as root: - -也可以使用另外一个流行网络插件 calico: -kubeadm init --image-repository registry.aliyuncs.com/google_containers --pod-network-cidr=192.168.0.0/16 --kubernetes-version v1.13.2 - - -export KUBECONFIG=/etc/kubernetes/admin.conf -echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.zshrc -source ~/.zshrc +mkdir -p $HOME/.kube +sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config +sudo chown $(id -u):$(id -g) $HOME/.kube/config 查询我们的 token From 4bfe839f1bc3e0ea2c90782961dfe40e16e1bbc7 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 1 Feb 2019 22:49:59 +0800 Subject: [PATCH 045/124] 2019-02-01 --- markdown-file/Docker-Install-And-Usage.md | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index 1616f42e..b97aadcb 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -920,23 +920,25 @@ scp -r /etc/yum.repos.d/kubernetes.repo root@k8s-node-2:/etc/yum.repos.d/ 所有机子 yum install -y kubelet-1.13.2 kubeadm-1.13.2 kubectl-1.13.2 --disableexcludes=kubernetes - +所有机子 vim /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 最后一行添加:Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs" - +所有机子 systemctl enable kubelet && systemctl start kubelet kubeadm version kubectl version - +必须配置: vim /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 net.ipv4.ip_forward=1 +vm.swappiness=0 -sysctl --system +modprobe br_netfilter +sysctl -p /etc/sysctl.d/k8s.conf ``` @@ -973,7 +975,7 @@ Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: You can now join any number of machines by running the following on each node as root: - kubeadm join 192.168.0.127:6443 --token r7qkjb.2g1zikg6yvrmwlp6 --discovery-token-ca-cert-hash sha256:14601b9269829cd86756ad30aaeb3199158cbc2c150ef8b9dc2ce00f1fa5c2d0 + kubeadm join 192.168.0.127:6443 --token 6y18dy.oy5bt6d5y4nvop28 --discovery-token-ca-cert-hash sha256:a4e8aed696bc0481bb3f6e0af4256d41a1779141241e2684fdc6aa8bcca4d58b From a8cbffae29d9c4521805335a860049f822c4d63e Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 1 Feb 2019 23:01:53 +0800 Subject: [PATCH 046/124] 2019-02-01 --- markdown-file/Docker-Install-And-Usage.md | 42 +++++++++++++++++++---- 1 file changed, 35 insertions(+), 7 deletions(-) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index b97aadcb..d3770373 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -917,9 +917,37 @@ scp -r /etc/yum.repos.d/kubernetes.repo root@k8s-node-1:/etc/yum.repos.d/ scp -r /etc/yum.repos.d/kubernetes.repo root@k8s-node-2:/etc/yum.repos.d/ + +所有机子 +iptables -P FORWARD ACCEPT + 所有机子 yum install -y kubelet-1.13.2 kubeadm-1.13.2 kubectl-1.13.2 --disableexcludes=kubernetes + +所有机子 +vim /etc/cni/net.d/10-flannel.conflist,内容 +{ + "name": "cbr0", + "plugins": [ + { + "type": "flannel", + "delegate": { + "hairpinMode": true, + "isDefaultGateway": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] +} + + + 所有机子 vim /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 最后一行添加:Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs" @@ -987,8 +1015,14 @@ sudo chown $(id -u):$(id -g) $HOME/.kube/config 查询我们的 token kubeadm token list - kubectl cluster-info + + +master 安装 Flannel +cd /opt && wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml + +kubectl apply -f /opt/kube-flannel.yml + ``` - 到 node 节点进行加入: @@ -1008,12 +1042,6 @@ etcd-0 Healthy {"health": "true"} -master 安装 Flannel -cd /opt && wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml - -kubectl apply -f /opt/kube-flannel.yml - - 验证: kubectl get pods --all-namespaces kubectl get nodes From 90ea22b3296a936f38f8d757008021e574c99562 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 1 Feb 2019 23:10:40 +0800 Subject: [PATCH 047/124] 2019-02-01 --- markdown-file/Docker-Install-And-Usage.md | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index d3770373..30d899b5 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -965,8 +965,11 @@ net.bridge.bridge-nf-call-iptables = 1 net.ipv4.ip_forward=1 vm.swappiness=0 -modprobe br_netfilter -sysctl -p /etc/sysctl.d/k8s.conf + +scp -r /etc/sysctl.d/k8s.conf root@k8s-node-1:/etc/sysctl.d/ +scp -r /etc/sysctl.d/k8s.conf root@k8s-node-2:/etc/sysctl.d/ + +modprobe br_netfilter && sysctl -p /etc/sysctl.d/k8s.conf ``` @@ -1003,10 +1006,11 @@ Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: You can now join any number of machines by running the following on each node as root: - kubeadm join 192.168.0.127:6443 --token 6y18dy.oy5bt6d5y4nvop28 --discovery-token-ca-cert-hash sha256:a4e8aed696bc0481bb3f6e0af4256d41a1779141241e2684fdc6aa8bcca4d58b + kubeadm join 192.168.0.127:6443 --token 6m0emc.x9uim283uzevn3pm --discovery-token-ca-cert-hash sha256:c8b1b72de1eabc71df5490afa7cd8dd1c1952234e65b30262ed5084c9d1f10c2 +master 机子: mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config @@ -1030,7 +1034,7 @@ kubectl apply -f /opt/kube-flannel.yml ``` echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables -kubeadm join 192.168.0.127:6443 --token r7qkjb.2g1zikg6yvrmwlp6 --discovery-token-ca-cert-hash sha256:14601b9269829cd86756ad30aaeb3199158cbc2c150ef8b9dc2ce00f1fa5c2d0 +kubeadm join 192.168.0.127:6443 --token 6m0emc.x9uim283uzevn3pm --discovery-token-ca-cert-hash sha256:c8b1b72de1eabc71df5490afa7cd8dd1c1952234e65b30262ed5084c9d1f10c2 在 master 节点上:kubectl get cs From 6e4226de921c1f3991783577040c704998429708 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 1 Feb 2019 23:29:36 +0800 Subject: [PATCH 048/124] 2019-02-01 --- markdown-file/Docker-Install-And-Usage.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index 30d899b5..228d8b1b 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -1047,11 +1047,11 @@ etcd-0 Healthy {"health": "true"} 验证: -kubectl get pods --all-namespaces kubectl get nodes 如果还是 NotReady,则查看错误信息: -kubectl describe pod kube-scheduler-master.hanli.com -n kube-system -kubectl logs kube-scheduler-master.hanli.com -n kube-system +kubectl get pods --all-namespaces,其中:Pending/ContainerCreating/ImagePullBackOff 都是 Pod 没有就绪,我们可以这样查看对应 Pod 遇到了什么问题 +kubectl describe pod --namespace=kube-system +或者:kubectl logs -n kube-system tail -f /var/log/messages ``` From 1849cccfbcdeba8d6c8889e3e3bebd4d0554d92f Mon Sep 17 00:00:00 2001 From: judasn Date: Sun, 10 Feb 2019 22:55:24 +0800 Subject: [PATCH 049/124] =?UTF-8?q?2019-02-10=20=E5=AE=8C=E5=96=84=20K8S?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Docker-Install-And-Usage.md | 402 -------------------- markdown-file/K8S-Install-And-Usage.md | 442 ++++++++++++++++++++++ 2 files changed, 442 insertions(+), 402 deletions(-) create mode 100644 markdown-file/K8S-Install-And-Usage.md diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index 228d8b1b..20db7079 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -796,407 +796,6 @@ sudo chmod +x /usr/local/bin/docker-compose - Docker Swarm 是一个 Docker 集群管理工具 -## Kubernetes - -- 目前流行的容器编排系统 -- 简称:K8S -- 官网: -- 主要解决几个问题: - - `调度` - - `生命周期及健康状况` - - `服务发现` - - `监控` - - `认证` - - `容器聚合` -- 主要角色:Master、Node - - -#### 安装准备 - Kubernetes 1.13 版本 - -- 推荐最低 2C2G,优先:2C4G 或以上 -- 特别说明:1.13 之前的版本,由于网络问题,需要各种设置,这里就不再多说了。1.13 之后相对就简单了点。 -- 优先官网软件包:kubeadm -- 官网资料: - - issues 入口: - - 源码入口: - - 安装指导: - - 按官网要求做下检查: - - 网络环境: - - 端口检查: - - 对 Docker 版本的支持,这里官网推荐的是 18.06: -- 三大核心工具包,都需要各自安装,并且注意版本关系: - - `kubeadm`: the command to bootstrap the cluster. - - 集群部署、管理工具 - - `kubelet`: the component that runs on all of the machines in your cluster and does things like starting pods and containers. - - 具体执行层面的管理 Pod 和 Docker 工具 - - `kubectl`: the command line util to talk to your cluster. - - 操作 k8s 的命令行入口工具 -- 官网插件安装过程的故障排查: -- 其他部署方案: - - - - - - - -#### 开始安装 - Kubernetes 1.13.2 版本 - -- 三台机子: - - master-1:`192.168.0.127` - - node-1:`192.168.0.128` - - node-2:`192.168.0.129` -- 官网最新版本: -- 官网 1.13 版本的 changelog: -- 所有节点安装 Docker 18.06,并设置阿里云源 - - 可以参考:[点击我o(∩_∩)o ](https://github.com/judasn/Linux-Tutorial/blob/master/favorite-file/shell/install_docker_k8s_disable_firewalld_centos7-aliyun.sh) - - 核心,查看可以安装的 Docker 列表:`yum list docker-ce --showduplicates` -- 所有节点设置 kubernetes repo 源,并安装 Kubeadm、Kubelet、Kubectl 都设置阿里云的源 -- Kubeadm 初始化集群过程当中,它会下载很多的镜像,默认也是去 Google 家里下载。但是 1.13 新增了一个配置:`--image-repository` 算是救了命。 -- 具体流程: - -``` -主机时间同步 -systemctl start chronyd.service && systemctl enable chronyd.service - -systemctl stop firewalld.service -systemctl disable firewalld.service -systemctl disable iptables.service - - -setenforce 0 && sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config - -swapoff -a && sysctl -w vm.swappiness=0 - - - - - -hostnamectl --static set-hostname k8s-master-1 -hostnamectl --static set-hostname k8s-node-1 -hostnamectl --static set-hostname k8s-node-2 - - -vim /etc/hosts -192.168.0.127 k8s-master-1 -192.168.0.128 k8s-node-1 -192.168.0.129 k8s-node-2 - -master 免密 -生产密钥对 -ssh-keygen -t rsa - - -公钥内容写入 authorized_keys -cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys - -测试: -ssh localhost - -将公钥复制到其他机子 -ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@k8s-node-1(根据提示输入 k8s-node-1 密码) -ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@k8s-node-2(根据提示输入 k8s-node-2 密码) - - -在 linux01 上测试 -ssh k8s-master-1 -ssh k8s-node-1 -ssh k8s-node-2 - - - -vim /etc/yum.repos.d/kubernetes.repo - -[kubernetes] -name=Kubernetes -baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ -enabled=1 -gpgcheck=1 -repo_gpgcheck=1 -gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg - - -scp -r /etc/yum.repos.d/kubernetes.repo root@k8s-node-1:/etc/yum.repos.d/ -scp -r /etc/yum.repos.d/kubernetes.repo root@k8s-node-2:/etc/yum.repos.d/ - - - -所有机子 -iptables -P FORWARD ACCEPT - -所有机子 -yum install -y kubelet-1.13.2 kubeadm-1.13.2 kubectl-1.13.2 --disableexcludes=kubernetes - - -所有机子 -vim /etc/cni/net.d/10-flannel.conflist,内容 -{ - "name": "cbr0", - "plugins": [ - { - "type": "flannel", - "delegate": { - "hairpinMode": true, - "isDefaultGateway": true - } - }, - { - "type": "portmap", - "capabilities": { - "portMappings": true - } - } - ] -} - - - -所有机子 -vim /etc/systemd/system/kubelet.service.d/10-kubeadm.conf -最后一行添加:Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs" - -所有机子 -systemctl enable kubelet && systemctl start kubelet - -kubeadm version -kubectl version - -必须配置: -vim /etc/sysctl.d/k8s.conf -net.bridge.bridge-nf-call-ip6tables = 1 -net.bridge.bridge-nf-call-iptables = 1 -net.ipv4.ip_forward=1 -vm.swappiness=0 - - -scp -r /etc/sysctl.d/k8s.conf root@k8s-node-1:/etc/sysctl.d/ -scp -r /etc/sysctl.d/k8s.conf root@k8s-node-2:/etc/sysctl.d/ - -modprobe br_netfilter && sysctl -p /etc/sysctl.d/k8s.conf - -``` - -- 初始化 master 节点: - -``` -echo 1 > /proc/sys/net/ipv4/ip_forward - -推荐: -kubeadm init \ ---image-repository registry.cn-hangzhou.aliyuncs.com/google_containers \ ---pod-network-cidr 10.244.0.0/16 \ ---kubernetes-version 1.13.2 \ ---service-cidr 10.96.0.0/12 \ ---apiserver-advertise-address=0.0.0.0 \ ---ignore-preflight-errors=Swap - -10.244.0.0/16是 flannel 插件固定使用的ip段,它的值取决于你准备安装哪个网络插件 - -这个过程会下载一些 docker 镜像,时间可能会比较久,看你网络情况。 -终端会输出核心内容: -Your Kubernetes master has initialized successfully! - -To start using your cluster, you need to run the following as a regular user: - - mkdir -p $HOME/.kube - sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config - sudo chown $(id -u):$(id -g) $HOME/.kube/config - -You should now deploy a pod network to the cluster. -Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: - https://kubernetes.io/docs/concepts/cluster-administration/addons/ - -You can now join any number of machines by running the following on each node -as root: - - kubeadm join 192.168.0.127:6443 --token 6m0emc.x9uim283uzevn3pm --discovery-token-ca-cert-hash sha256:c8b1b72de1eabc71df5490afa7cd8dd1c1952234e65b30262ed5084c9d1f10c2 - - - -master 机子: -mkdir -p $HOME/.kube -sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config -sudo chown $(id -u):$(id -g) $HOME/.kube/config - - -查询我们的 token -kubeadm token list - -kubectl cluster-info - - -master 安装 Flannel -cd /opt && wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml - -kubectl apply -f /opt/kube-flannel.yml - -``` - -- 到 node 节点进行加入: - -``` -echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables - -kubeadm join 192.168.0.127:6443 --token 6m0emc.x9uim283uzevn3pm --discovery-token-ca-cert-hash sha256:c8b1b72de1eabc71df5490afa7cd8dd1c1952234e65b30262ed5084c9d1f10c2 - - -在 master 节点上:kubectl get cs -NAME STATUS MESSAGE ERROR -controller-manager Healthy ok -scheduler Healthy ok -etcd-0 Healthy {"health": "true"} -结果都是 Healthy 则表示可以了,不然就得检查。必要时可以用:`kubeadm reset` 重置,重新进行集群初始化 - - - -验证: -kubectl get nodes -如果还是 NotReady,则查看错误信息: -kubectl get pods --all-namespaces,其中:Pending/ContainerCreating/ImagePullBackOff 都是 Pod 没有就绪,我们可以这样查看对应 Pod 遇到了什么问题 -kubectl describe pod --namespace=kube-system -或者:kubectl logs -n kube-system -tail -f /var/log/messages - -``` - - - - -#### 主要概念 - -- Master 节点,负责集群的调度、集群的管理 - - 常见组件: - - kube-apiserver:API服务 - - kube-scheduler:调度 - - Kube-Controller-Manager:容器编排 - - Etcd:保存了整个集群的状态 - - Kube-proxy:负责为 Service 提供 cluster 内部的服务发现和负载均衡 - - Kube-DNS:负责为整个集群提供 DNS 服务 -- node 节点,负责容器相关的处理 - -- `Pods` - -``` -创建,调度以及管理的最小单元 -共存的一组容器的集合 -容器共享PID,网络,IPC以及UTS命名空间 -容器共享存储卷 -短暂存在 -``` - -- `Volumes` - -``` -数据持久化 -Pod中容器共享数据 -生命周期 -支持多种类型的数据卷 – emptyDir, hostpath, gcePersistentDisk, awsElasticBlockStore, nfs, iscsi, glusterfs, secrets -``` - -- `Labels` - -``` -用以标示对象(如Pod)的key/value对 -组织并选择对象子集 -``` - -- `Replication Controllers` - -``` -确保在任一时刻运行指定数目的Pod -容器重新调度 -规模调整 -在线升级 -多发布版本跟踪 -``` - -- `Services` - -``` -抽象一系列Pod并定义其访问规则 -固定IP地址和DNS域名 -通过环境变量和DNS发现服务 -负载均衡 -外部服务 – ClusterIP, NodePort, LoadBalancer -``` - - -#### 主要组成模块 - -- `etcd` - -``` -高可用的Key/Value存储 -只有apiserver有读写权限 -使用etcd集群确保数据可靠性 -``` - -- `apiserver` - -``` -Kubernetes系统入口, REST -认证 -授权 -访问控制 -服务帐号 -资源限制 -``` - -- `kube-scheduler` - -``` -资源需求 -服务需求 -硬件/软件/策略限制 -关联性和非关联性 -数据本地化 -``` - -- `kube-controller-manager` - -``` -Replication controller -Endpoint controller -Namespace controller -Serviceaccount controller -``` - -- `kubelet` - -``` -节点管理器 -确保调度到本节点的Pod的运行和健康 -``` - -- `kube-proxy` - -``` -Pod网络代理 -TCP/UDP请求转发 -负载均衡(Round Robin) -``` - -- `服务发现` - -``` -环境变量 -DNS – kube2sky, etcd,skydns -``` - -- `网络` - -``` -容器间互相通信 -节点和容器间互相通信 -每个Pod使用一个全局唯一的IP -``` - -- `高可用` - -``` -kubelet保证每一个master节点的服务正常运行 -系统监控程序确保kubelet正常运行 -Etcd集群 -多个apiserver进行负载均衡 -Master选举确保kube-scheduler和kube-controller-manager高可用 -``` ## Harbor 镜像私有仓库 @@ -1205,7 +804,6 @@ Master选举确保kube-scheduler和kube-controller-manager高可用 ## 资料 - 书籍:《第一本 Docker 书》 -- [如何更“优雅”地部署Kubernetes集群](https://juejin.im/entry/5a03f98d6fb9a04524054516) - []() - []() - []() diff --git a/markdown-file/K8S-Install-And-Usage.md b/markdown-file/K8S-Install-And-Usage.md new file mode 100644 index 00000000..263cfe32 --- /dev/null +++ b/markdown-file/K8S-Install-And-Usage.md @@ -0,0 +1,442 @@ + + +# Kubernets(K8S) 使用 + +## 环境说明 + +- CentOS 7.5(不准确地说:要求必须是 CentOS 7 64位) +- Docker + +## Kubernetes + +- 目前流行的容器编排系统 +- 简称:K8S +- 官网: +- 主要解决几个问题: + - `调度` + - `生命周期及健康状况` + - `服务发现` + - `监控` + - `认证` + - `容器聚合` +- 主要角色:Master、Node + + +#### 安装准备 - Kubernetes 1.13 版本 + +- 推荐最低 2C2G,优先:2C4G 或以上 +- 特别说明:1.13 之前的版本,由于网络问题,需要各种设置,这里就不再多说了。1.13 之后相对就简单了点。 +- 优先官网软件包:kubeadm +- 官网资料: + - issues 入口: + - 源码入口: + - 安装指导: + - 按官网要求做下检查: + - 网络环境: + - 端口检查: + - **对 Docker 版本的支持,这里官网推荐的是 18.06**: +- 三大核心工具包,都需要各自安装,并且注意版本关系: + - `kubeadm`: the command to bootstrap the cluster. + - 集群部署、管理工具 + - `kubelet`: the component that runs on all of the machines in your cluster and does things like starting pods and containers. + - 具体执行层面的管理 Pod 和 Docker 工具 + - `kubectl`: the command line util to talk to your cluster. + - 操作 k8s 的命令行入口工具 +- 官网插件安装过程的故障排查: +- 其他部署方案: + - + - + - + +#### 开始安装 - Kubernetes 1.13.2 版本 + +- 三台机子: + - master-1:`192.168.0.127` + - node-1:`192.168.0.128` + - node-2:`192.168.0.129` +- 官网最新版本: +- 官网 1.13 版本的 changelog: +- **所有节点安装 Docker 18.06,并设置阿里云源** + - 可以参考:[点击我o(∩_∩)o ](https://github.com/judasn/Linux-Tutorial/blob/master/favorite-file/shell/install_docker_k8s_disable_firewalld_centos7-aliyun.sh) + - 核心,查看可以安装的 Docker 列表:`yum list docker-ce --showduplicates` +- 所有节点设置 kubernetes repo 源,并安装 Kubeadm、Kubelet、Kubectl 都设置阿里云的源 +- Kubeadm 初始化集群过程当中,它会下载很多的镜像,默认也是去 Google 家里下载。但是 1.13 新增了一个配置:`--image-repository` 算是救了命。 +- 具体流程: + +``` +主机时间同步 +systemctl start chronyd.service && systemctl enable chronyd.service + +systemctl stop firewalld.service +systemctl disable firewalld.service +systemctl disable iptables.service + + +setenforce 0 && sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config + +swapoff -a && sysctl -w vm.swappiness=0 + + + + + +hostnamectl --static set-hostname k8s-master-1 +hostnamectl --static set-hostname k8s-node-1 +hostnamectl --static set-hostname k8s-node-2 + + +vim /etc/hosts +192.168.0.127 k8s-master-1 +192.168.0.128 k8s-node-1 +192.168.0.129 k8s-node-2 + +master 免密 +生产密钥对 +ssh-keygen -t rsa + + +公钥内容写入 authorized_keys +cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys + +测试: +ssh localhost + +将公钥复制到其他机子 +ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@k8s-node-1(根据提示输入 k8s-node-1 密码) +ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@k8s-node-2(根据提示输入 k8s-node-2 密码) + + +在 linux01 上测试 +ssh k8s-master-1 +ssh k8s-node-1 +ssh k8s-node-2 + + + +vim /etc/yum.repos.d/kubernetes.repo + +[kubernetes] +name=Kubernetes +baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ +enabled=1 +gpgcheck=1 +repo_gpgcheck=1 +gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg + + +scp -r /etc/yum.repos.d/kubernetes.repo root@k8s-node-1:/etc/yum.repos.d/ +scp -r /etc/yum.repos.d/kubernetes.repo root@k8s-node-2:/etc/yum.repos.d/ + + + +所有机子 +iptables -P FORWARD ACCEPT + +所有机子 +yum install -y kubelet-1.13.2 kubeadm-1.13.2 kubectl-1.13.2 --disableexcludes=kubernetes + + +所有机子 +vim /etc/cni/net.d/10-flannel.conflist,内容 +{ + "name": "cbr0", + "plugins": [ + { + "type": "flannel", + "delegate": { + "hairpinMode": true, + "isDefaultGateway": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] +} + + + +所有机子 +vim /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +最后一行添加:Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs" + +所有机子 +systemctl enable kubelet && systemctl start kubelet + +kubeadm version +kubectl version + +必须配置: +vim /etc/sysctl.d/k8s.conf +net.bridge.bridge-nf-call-ip6tables = 1 +net.bridge.bridge-nf-call-iptables = 1 +net.ipv4.ip_forward=1 +vm.swappiness=0 + + +scp -r /etc/sysctl.d/k8s.conf root@k8s-node-1:/etc/sysctl.d/ +scp -r /etc/sysctl.d/k8s.conf root@k8s-node-2:/etc/sysctl.d/ + +modprobe br_netfilter && sysctl -p /etc/sysctl.d/k8s.conf + +``` + +- 初始化 master 节点: + +``` +echo 1 > /proc/sys/net/ipv4/ip_forward + +推荐: +kubeadm init \ +--image-repository registry.cn-hangzhou.aliyuncs.com/google_containers \ +--pod-network-cidr 10.244.0.0/16 \ +--kubernetes-version 1.13.2 \ +--service-cidr 10.96.0.0/12 \ +--apiserver-advertise-address=0.0.0.0 \ +--ignore-preflight-errors=Swap + +10.244.0.0/16是 flannel 插件固定使用的ip段,它的值取决于你准备安装哪个网络插件 + +这个过程会下载一些 docker 镜像,时间可能会比较久,看你网络情况。 +终端会输出核心内容: +Your Kubernetes master has initialized successfully! + +To start using your cluster, you need to run the following as a regular user: + + mkdir -p $HOME/.kube + sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config + +You should now deploy a pod network to the cluster. +Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: + https://kubernetes.io/docs/concepts/cluster-administration/addons/ + +You can now join any number of machines by running the following on each node +as root: + + kubeadm join 192.168.0.127:6443 --token 6m0emc.x9uim283uzevn3pm --discovery-token-ca-cert-hash sha256:c8b1b72de1eabc71df5490afa7cd8dd1c1952234e65b30262ed5084c9d1f10c2 + + + +master 机子: +mkdir -p $HOME/.kube +sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config +sudo chown $(id -u):$(id -g) $HOME/.kube/config + + +查询我们的 token +kubeadm token list + +kubectl cluster-info + + +master 安装 Flannel +cd /opt && wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml + +kubectl apply -f /opt/kube-flannel.yml + +``` + +- 到 node 节点进行加入: + +``` +echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables + +kubeadm join 192.168.0.127:6443 --token 6m0emc.x9uim283uzevn3pm --discovery-token-ca-cert-hash sha256:c8b1b72de1eabc71df5490afa7cd8dd1c1952234e65b30262ed5084c9d1f10c2 + + +在 master 节点上:kubectl get cs +NAME STATUS MESSAGE ERROR +controller-manager Healthy ok +scheduler Healthy ok +etcd-0 Healthy {"health": "true"} +结果都是 Healthy 则表示可以了,不然就得检查。必要时可以用:`kubeadm reset` 重置,重新进行集群初始化 + + + +验证: +kubectl get nodes +如果还是 NotReady,则查看错误信息: +kubectl get pods --all-namespaces,其中:Pending/ContainerCreating/ImagePullBackOff 都是 Pod 没有就绪,我们可以这样查看对应 Pod 遇到了什么问题 +kubectl describe pod --namespace=kube-system +或者:kubectl logs -n kube-system +tail -f /var/log/messages + +``` + + + + +#### 主要概念 + +- Master 节点,负责集群的调度、集群的管理 + - 常见组件: + - kube-apiserver:API服务 + - kube-scheduler:调度 + - Kube-Controller-Manager:容器编排 + - Etcd:保存了整个集群的状态 + - Kube-proxy:负责为 Service 提供 cluster 内部的服务发现和负载均衡 + - Kube-DNS:负责为整个集群提供 DNS 服务 +- node 节点,负责容器相关的处理 + +- `Pods` + +``` +创建,调度以及管理的最小单元 +共存的一组容器的集合 +容器共享PID,网络,IPC以及UTS命名空间 +容器共享存储卷 +短暂存在 +``` + +- `Volumes` + +``` +数据持久化 +Pod中容器共享数据 +生命周期 +支持多种类型的数据卷 – emptyDir, hostpath, gcePersistentDisk, awsElasticBlockStore, nfs, iscsi, glusterfs, secrets +``` + +- `Labels` + +``` +用以标示对象(如Pod)的key/value对 +组织并选择对象子集 +``` + +- `Replication Controllers` + +``` +确保在任一时刻运行指定数目的Pod +容器重新调度 +规模调整 +在线升级 +多发布版本跟踪 +``` + +- `Services` + +``` +抽象一系列Pod并定义其访问规则 +固定IP地址和DNS域名 +通过环境变量和DNS发现服务 +负载均衡 +外部服务 – ClusterIP, NodePort, LoadBalancer +``` + + +#### 主要组成模块 + +- `etcd` + +``` +高可用的Key/Value存储 +只有apiserver有读写权限 +使用etcd集群确保数据可靠性 +``` + +- `apiserver` + +``` +Kubernetes系统入口, REST +认证 +授权 +访问控制 +服务帐号 +资源限制 +``` + +- `kube-scheduler` + +``` +资源需求 +服务需求 +硬件/软件/策略限制 +关联性和非关联性 +数据本地化 +``` + +- `kube-controller-manager` + +``` +Replication controller +Endpoint controller +Namespace controller +Serviceaccount controller +``` + +- `kubelet` + +``` +节点管理器 +确保调度到本节点的Pod的运行和健康 +``` + +- `kube-proxy` + +``` +Pod网络代理 +TCP/UDP请求转发 +负载均衡(Round Robin) +``` + +- `服务发现` + +``` +环境变量 +DNS – kube2sky, etcd,skydns +``` + +- `网络` + +``` +容器间互相通信 +节点和容器间互相通信 +每个Pod使用一个全局唯一的IP +``` + +- `高可用` + +``` +kubelet保证每一个master节点的服务正常运行 +系统监控程序确保kubelet正常运行 +Etcd集群 +多个apiserver进行负载均衡 +Master选举确保kube-scheduler和kube-controller-manager高可用 +``` + + +## 资料 + +- [如何更“优雅”地部署Kubernetes集群](https://juejin.im/entry/5a03f98d6fb9a04524054516) +- []() +- []() +- []() +- []() +- []() +- []() + + + + + + + + + + + + + + + + + + + + + From 4273bbe9fdb46933c0d1f1ef6b267209c76227ea Mon Sep 17 00:00:00 2001 From: judasn Date: Sun, 10 Feb 2019 23:12:37 +0800 Subject: [PATCH 050/124] =?UTF-8?q?2019-02-10=20=E5=AE=8C=E5=96=84=20K8S?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/K8S-Install-And-Usage.md | 88 ++++++++++++++++++++++++-- 1 file changed, 82 insertions(+), 6 deletions(-) diff --git a/markdown-file/K8S-Install-And-Usage.md b/markdown-file/K8S-Install-And-Usage.md index 263cfe32..5deab113 100644 --- a/markdown-file/K8S-Install-And-Usage.md +++ b/markdown-file/K8S-Install-And-Usage.md @@ -48,7 +48,7 @@ - - -#### 开始安装 - Kubernetes 1.13.2 版本 +#### 开始安装 - Kubernetes 1.13.3 版本 - 三台机子: - master-1:`192.168.0.127` @@ -133,11 +133,11 @@ scp -r /etc/yum.repos.d/kubernetes.repo root@k8s-node-2:/etc/yum.repos.d/ iptables -P FORWARD ACCEPT 所有机子 -yum install -y kubelet-1.13.2 kubeadm-1.13.2 kubectl-1.13.2 --disableexcludes=kubernetes +yum install -y kubelet-1.13.3 kubeadm-1.13.3 kubectl-1.13.3 --disableexcludes=kubernetes 所有机子 -vim /etc/cni/net.d/10-flannel.conflist,内容 +mkdir -p /etc/cni/net.d && vim /etc/cni/net.d/10-flannel.conflist,内容 { "name": "cbr0", "plugins": [ @@ -193,7 +193,7 @@ echo 1 > /proc/sys/net/ipv4/ip_forward kubeadm init \ --image-repository registry.cn-hangzhou.aliyuncs.com/google_containers \ --pod-network-cidr 10.244.0.0/16 \ ---kubernetes-version 1.13.2 \ +--kubernetes-version 1.13.3 \ --service-cidr 10.96.0.0/12 \ --apiserver-advertise-address=0.0.0.0 \ --ignore-preflight-errors=Swap @@ -202,6 +202,56 @@ kubeadm init \ 这个过程会下载一些 docker 镜像,时间可能会比较久,看你网络情况。 终端会输出核心内容: +[init] Using Kubernetes version: v1.13.3 +[preflight] Running pre-flight checks +[preflight] Pulling images required for setting up a Kubernetes cluster +[preflight] This might take a minute or two, depending on the speed of your internet connection +[preflight] You can also perform this action in beforehand using 'kubeadm config images pull' +[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" +[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" +[kubelet-start] Activating the kubelet service +[certs] Using certificateDir folder "/etc/kubernetes/pki" +[certs] Generating "ca" certificate and key +[certs] Generating "apiserver" certificate and key +[certs] apiserver serving cert is signed for DNS names [k8s-master-1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.0.127] +[certs] Generating "apiserver-kubelet-client" certificate and key +[certs] Generating "front-proxy-ca" certificate and key +[certs] Generating "front-proxy-client" certificate and key +[certs] Generating "etcd/ca" certificate and key +[certs] Generating "etcd/healthcheck-client" certificate and key +[certs] Generating "apiserver-etcd-client" certificate and key +[certs] Generating "etcd/server" certificate and key +[certs] etcd/server serving cert is signed for DNS names [k8s-master-1 localhost] and IPs [192.168.0.127 127.0.0.1 ::1] +[certs] Generating "etcd/peer" certificate and key +[certs] etcd/peer serving cert is signed for DNS names [k8s-master-1 localhost] and IPs [192.168.0.127 127.0.0.1 ::1] +[certs] Generating "sa" key and public key +[kubeconfig] Using kubeconfig folder "/etc/kubernetes" +[kubeconfig] Writing "admin.conf" kubeconfig file +[kubeconfig] Writing "kubelet.conf" kubeconfig file +[kubeconfig] Writing "controller-manager.conf" kubeconfig file +[kubeconfig] Writing "scheduler.conf" kubeconfig file +[control-plane] Using manifest folder "/etc/kubernetes/manifests" +[control-plane] Creating static Pod manifest for "kube-apiserver" +[control-plane] Creating static Pod manifest for "kube-controller-manager" +[control-plane] Creating static Pod manifest for "kube-scheduler" +[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" +[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s +[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s +[apiclient] All control plane components are healthy after 32.002189 seconds +[uploadconfig] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace +[kubelet] Creating a ConfigMap "kubelet-config-1.13" in namespace kube-system with the configuration for the kubelets in the cluster +[patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "k8s-master-1" as an annotation +[mark-control-plane] Marking the node k8s-master-1 as control-plane by adding the label "node-role.kubernetes.io/master=''" +[mark-control-plane] Marking the node k8s-master-1 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] +[bootstrap-token] Using token: 3ag6sz.y8rmcz5xec50xkz1 +[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles +[bootstraptoken] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials +[bootstraptoken] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token +[bootstraptoken] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster +[bootstraptoken] creating the "cluster-info" ConfigMap in the "kube-public" namespace +[addons] Applied essential addon: CoreDNS +[addons] Applied essential addon: kube-proxy + Your Kubernetes master has initialized successfully! To start using your cluster, you need to run the following as a regular user: @@ -217,7 +267,9 @@ Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: You can now join any number of machines by running the following on each node as root: - kubeadm join 192.168.0.127:6443 --token 6m0emc.x9uim283uzevn3pm --discovery-token-ca-cert-hash sha256:c8b1b72de1eabc71df5490afa7cd8dd1c1952234e65b30262ed5084c9d1f10c2 + kubeadm join 192.168.0.127:6443 --token 3ag6sz.y8rmcz5xec50xkz1 --discovery-token-ca-cert-hash sha256:912c325aee8dc7c583c36a1c15d7ef1d7a1abeea5dc8a19e96d24536c13373e6 + + @@ -245,7 +297,31 @@ kubectl apply -f /opt/kube-flannel.yml ``` echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables -kubeadm join 192.168.0.127:6443 --token 6m0emc.x9uim283uzevn3pm --discovery-token-ca-cert-hash sha256:c8b1b72de1eabc71df5490afa7cd8dd1c1952234e65b30262ed5084c9d1f10c2 +kubeadm join 192.168.0.127:6443 --token 3ag6sz.y8rmcz5xec50xkz1 --discovery-token-ca-cert-hash sha256:912c325aee8dc7c583c36a1c15d7ef1d7a1abeea5dc8a19e96d24536c13373e6 +这时候终端会输出: + +[preflight] Running pre-flight checks +[discovery] Trying to connect to API Server "192.168.0.127:6443" +[discovery] Created cluster-info discovery client, requesting info from "https://192.168.0.127:6443" +[discovery] Requesting info from "https://192.168.0.127:6443" again to validate TLS against the pinned public key +[discovery] Cluster info signature and contents are valid and TLS certificate validates against pinned roots, will use API Server "192.168.0.127:6443" +[discovery] Successfully established connection with API Server "192.168.0.127:6443" +[join] Reading configuration from the cluster... +[join] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' +[kubelet] Downloading configuration for the kubelet from the "kubelet-config-1.13" ConfigMap in the kube-system namespace +[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" +[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" +[kubelet-start] Activating the kubelet service +[tlsbootstrap] Waiting for the kubelet to perform the TLS Bootstrap... +[patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "k8s-node-1" as an annotation + +This node has joined the cluster: +* Certificate signing request was sent to apiserver and a response was received. +* The Kubelet was informed of the new secure connection details. + +Run 'kubectl get nodes' on the master to see this node join the cluster. + + 在 master 节点上:kubectl get cs From 1c77c2a0d2068f1e4ac92ec9885487d010103cd0 Mon Sep 17 00:00:00 2001 From: judasn Date: Sun, 10 Feb 2019 23:33:16 +0800 Subject: [PATCH 051/124] =?UTF-8?q?2019-02-10=20=E5=AE=8C=E5=96=84=20K8S?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/K8S-Install-And-Usage.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/markdown-file/K8S-Install-And-Usage.md b/markdown-file/K8S-Install-And-Usage.md index 5deab113..a43b673f 100644 --- a/markdown-file/K8S-Install-And-Usage.md +++ b/markdown-file/K8S-Install-And-Usage.md @@ -75,7 +75,7 @@ systemctl disable iptables.service setenforce 0 && sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config swapoff -a && sysctl -w vm.swappiness=0 - +echo "vm.swappiness = 0" >> /etc/sysctl.conf @@ -277,10 +277,10 @@ master 机子: mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config - +export KUBECONFIG=$HOME/.kube/config 查询我们的 token -kubeadm token list +kubectl cluster-info kubectl cluster-info @@ -322,6 +322,9 @@ This node has joined the cluster: Run 'kubectl get nodes' on the master to see this node join the cluster. +如果 node 节点加入失败,可以:kubeadm reset,再来重新 join + + 在 master 节点上:kubectl get cs From ea85f55bbae2d0f6a995dcd240aa62021ac424c4 Mon Sep 17 00:00:00 2001 From: judasn Date: Sun, 10 Feb 2019 23:43:09 +0800 Subject: [PATCH 052/124] =?UTF-8?q?2019-02-10=20=E5=AE=8C=E5=96=84=20K8S?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/K8S-Install-And-Usage.md | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/markdown-file/K8S-Install-And-Usage.md b/markdown-file/K8S-Install-And-Usage.md index a43b673f..46f6f622 100644 --- a/markdown-file/K8S-Install-And-Usage.md +++ b/markdown-file/K8S-Install-And-Usage.md @@ -163,13 +163,9 @@ mkdir -p /etc/cni/net.d && vim /etc/cni/net.d/10-flannel.conflist,内容 vim /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 最后一行添加:Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs" -所有机子 -systemctl enable kubelet && systemctl start kubelet -kubeadm version -kubectl version -必须配置: +所有机子必须配置: vim /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 @@ -182,6 +178,15 @@ scp -r /etc/sysctl.d/k8s.conf root@k8s-node-2:/etc/sysctl.d/ modprobe br_netfilter && sysctl -p /etc/sysctl.d/k8s.conf + + +所有机子 +systemctl enable kubelet && systemctl start kubelet + +kubeadm version +kubectl version + + ``` - 初始化 master 节点: @@ -267,8 +272,7 @@ Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: You can now join any number of machines by running the following on each node as root: - kubeadm join 192.168.0.127:6443 --token 3ag6sz.y8rmcz5xec50xkz1 --discovery-token-ca-cert-hash sha256:912c325aee8dc7c583c36a1c15d7ef1d7a1abeea5dc8a19e96d24536c13373e6 - +kubeadm join 192.168.0.127:6443 --token cdo0z3.msyp89yp8zk6lhmf --discovery-token-ca-cert-hash sha256:c0d8942e801962232f0e02b757d13ed0034fa07ab3953764e6a6c67b6688963c @@ -297,7 +301,8 @@ kubectl apply -f /opt/kube-flannel.yml ``` echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables -kubeadm join 192.168.0.127:6443 --token 3ag6sz.y8rmcz5xec50xkz1 --discovery-token-ca-cert-hash sha256:912c325aee8dc7c583c36a1c15d7ef1d7a1abeea5dc8a19e96d24536c13373e6 +kubeadm join 192.168.0.127:6443 --token cdo0z3.msyp89yp8zk6lhmf --discovery-token-ca-cert-hash sha256:c0d8942e801962232f0e02b757d13ed0034fa07ab3953764e6a6c67b6688963c + 这时候终端会输出: [preflight] Running pre-flight checks From 5ffb78e64d2a6cad612eae6d7f21c3292943b264 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 11 Feb 2019 00:03:57 +0800 Subject: [PATCH 053/124] =?UTF-8?q?2019-02-10=20=E5=AE=8C=E5=96=84=20K8S?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/K8S-Install-And-Usage.md | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/markdown-file/K8S-Install-And-Usage.md b/markdown-file/K8S-Install-And-Usage.md index 46f6f622..b844a9c1 100644 --- a/markdown-file/K8S-Install-And-Usage.md +++ b/markdown-file/K8S-Install-And-Usage.md @@ -48,7 +48,7 @@ - - -#### 开始安装 - Kubernetes 1.13.3 版本 +#### 开始安装 - Kubernetes 1.13.2 版本 - 三台机子: - master-1:`192.168.0.127` @@ -74,8 +74,8 @@ systemctl disable iptables.service setenforce 0 && sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config -swapoff -a && sysctl -w vm.swappiness=0 echo "vm.swappiness = 0" >> /etc/sysctl.conf +swapoff -a && sysctl -w vm.swappiness=0 @@ -133,7 +133,7 @@ scp -r /etc/yum.repos.d/kubernetes.repo root@k8s-node-2:/etc/yum.repos.d/ iptables -P FORWARD ACCEPT 所有机子 -yum install -y kubelet-1.13.3 kubeadm-1.13.3 kubectl-1.13.3 --disableexcludes=kubernetes +yum install -y kubelet-1.13.2 kubeadm-1.13.2 kubectl-1.13.2 --disableexcludes=kubernetes 所有机子 @@ -198,7 +198,7 @@ echo 1 > /proc/sys/net/ipv4/ip_forward kubeadm init \ --image-repository registry.cn-hangzhou.aliyuncs.com/google_containers \ --pod-network-cidr 10.244.0.0/16 \ ---kubernetes-version 1.13.3 \ +--kubernetes-version 1.13.2 \ --service-cidr 10.96.0.0/12 \ --apiserver-advertise-address=0.0.0.0 \ --ignore-preflight-errors=Swap @@ -207,7 +207,7 @@ kubeadm init \ 这个过程会下载一些 docker 镜像,时间可能会比较久,看你网络情况。 终端会输出核心内容: -[init] Using Kubernetes version: v1.13.3 +[init] Using Kubernetes version: v1.13.2 [preflight] Running pre-flight checks [preflight] Pulling images required for setting up a Kubernetes cluster [preflight] This might take a minute or two, depending on the speed of your internet connection @@ -223,12 +223,12 @@ kubeadm init \ [certs] Generating "front-proxy-ca" certificate and key [certs] Generating "front-proxy-client" certificate and key [certs] Generating "etcd/ca" certificate and key -[certs] Generating "etcd/healthcheck-client" certificate and key -[certs] Generating "apiserver-etcd-client" certificate and key [certs] Generating "etcd/server" certificate and key [certs] etcd/server serving cert is signed for DNS names [k8s-master-1 localhost] and IPs [192.168.0.127 127.0.0.1 ::1] +[certs] Generating "apiserver-etcd-client" certificate and key [certs] Generating "etcd/peer" certificate and key [certs] etcd/peer serving cert is signed for DNS names [k8s-master-1 localhost] and IPs [192.168.0.127 127.0.0.1 ::1] +[certs] Generating "etcd/healthcheck-client" certificate and key [certs] Generating "sa" key and public key [kubeconfig] Using kubeconfig folder "/etc/kubernetes" [kubeconfig] Writing "admin.conf" kubeconfig file @@ -241,14 +241,13 @@ kubeadm init \ [control-plane] Creating static Pod manifest for "kube-scheduler" [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s -[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s -[apiclient] All control plane components are healthy after 32.002189 seconds +[apiclient] All control plane components are healthy after 18.002437 seconds [uploadconfig] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace [kubelet] Creating a ConfigMap "kubelet-config-1.13" in namespace kube-system with the configuration for the kubelets in the cluster [patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "k8s-master-1" as an annotation [mark-control-plane] Marking the node k8s-master-1 as control-plane by adding the label "node-role.kubernetes.io/master=''" [mark-control-plane] Marking the node k8s-master-1 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] -[bootstrap-token] Using token: 3ag6sz.y8rmcz5xec50xkz1 +[bootstrap-token] Using token: yes6xf.5huewerdtfxafde5 [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles [bootstraptoken] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials [bootstraptoken] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token @@ -272,8 +271,7 @@ Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: You can now join any number of machines by running the following on each node as root: -kubeadm join 192.168.0.127:6443 --token cdo0z3.msyp89yp8zk6lhmf --discovery-token-ca-cert-hash sha256:c0d8942e801962232f0e02b757d13ed0034fa07ab3953764e6a6c67b6688963c - + kubeadm join 192.168.0.127:6443 --token yes6xf.5huewerdtfxafde5 --discovery-token-ca-cert-hash sha256:98dd48ac4298e23f9c275309bfd8b69c5b3166752ccf7a36c2affcb7c1988781 @@ -284,7 +282,7 @@ sudo chown $(id -u):$(id -g) $HOME/.kube/config export KUBECONFIG=$HOME/.kube/config 查询我们的 token -kubectl cluster-info +kubeadm token list kubectl cluster-info @@ -301,7 +299,7 @@ kubectl apply -f /opt/kube-flannel.yml ``` echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables -kubeadm join 192.168.0.127:6443 --token cdo0z3.msyp89yp8zk6lhmf --discovery-token-ca-cert-hash sha256:c0d8942e801962232f0e02b757d13ed0034fa07ab3953764e6a6c67b6688963c +kubeadm join 192.168.0.127:6443 --token yes6xf.5huewerdtfxafde5 --discovery-token-ca-cert-hash sha256:98dd48ac4298e23f9c275309bfd8b69c5b3166752ccf7a36c2affcb7c1988781 这时候终端会输出: From b29d7be1afba72b4d072f39e1c8cea94572ac0c4 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 11 Feb 2019 22:29:42 +0800 Subject: [PATCH 054/124] =?UTF-8?q?2019-02-11=20=E5=AE=8C=E5=96=84=20K8S?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/K8S-Install-And-Usage.md | 140 ++++++++++++++----------- 1 file changed, 76 insertions(+), 64 deletions(-) diff --git a/markdown-file/K8S-Install-And-Usage.md b/markdown-file/K8S-Install-And-Usage.md index b844a9c1..bd14fb1d 100644 --- a/markdown-file/K8S-Install-And-Usage.md +++ b/markdown-file/K8S-Install-And-Usage.md @@ -48,7 +48,7 @@ - - -#### 开始安装 - Kubernetes 1.13.2 版本 +#### 开始安装 - Kubernetes 1.13.3 版本 - 三台机子: - master-1:`192.168.0.127` @@ -61,25 +61,29 @@ - 核心,查看可以安装的 Docker 列表:`yum list docker-ce --showduplicates` - 所有节点设置 kubernetes repo 源,并安装 Kubeadm、Kubelet、Kubectl 都设置阿里云的源 - Kubeadm 初始化集群过程当中,它会下载很多的镜像,默认也是去 Google 家里下载。但是 1.13 新增了一个配置:`--image-repository` 算是救了命。 -- 具体流程: -``` -主机时间同步 -systemctl start chronyd.service && systemctl enable chronyd.service +#### 安装具体流程 + +- 同步所有机子时间:`systemctl start chronyd.service && systemctl enable chronyd.service` +- 所有机子禁用防火墙、selinux、swap +``` systemctl stop firewalld.service systemctl disable firewalld.service systemctl disable iptables.service +iptables -P FORWARD ACCEPT setenforce 0 && sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config echo "vm.swappiness = 0" >> /etc/sysctl.conf swapoff -a && sysctl -w vm.swappiness=0 +``` +- 给各自机子设置 hostname 和 hosts - +``` hostnamectl --static set-hostname k8s-master-1 hostnamectl --static set-hostname k8s-node-1 hostnamectl --static set-hostname k8s-node-2 @@ -89,30 +93,30 @@ vim /etc/hosts 192.168.0.127 k8s-master-1 192.168.0.128 k8s-node-1 192.168.0.129 k8s-node-2 +``` -master 免密 -生产密钥对 -ssh-keygen -t rsa +- 给 master 设置免密 +``` +ssh-keygen -t rsa -公钥内容写入 authorized_keys cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys -测试: + ssh localhost -将公钥复制到其他机子 ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@k8s-node-1(根据提示输入 k8s-node-1 密码) ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@k8s-node-2(根据提示输入 k8s-node-2 密码) - -在 linux01 上测试 ssh k8s-master-1 ssh k8s-node-1 ssh k8s-node-2 +``` +- 给所有机子设置 yum 源 +``` vim /etc/yum.repos.d/kubernetes.repo [kubernetes] @@ -126,18 +130,13 @@ gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors scp -r /etc/yum.repos.d/kubernetes.repo root@k8s-node-1:/etc/yum.repos.d/ scp -r /etc/yum.repos.d/kubernetes.repo root@k8s-node-2:/etc/yum.repos.d/ +``` +- 给 master 机子创建 flannel 配置文件 +``` +mkdir -p /etc/cni/net.d && vim /etc/cni/net.d/10-flannel.conflist -所有机子 -iptables -P FORWARD ACCEPT - -所有机子 -yum install -y kubelet-1.13.2 kubeadm-1.13.2 kubectl-1.13.2 --disableexcludes=kubernetes - - -所有机子 -mkdir -p /etc/cni/net.d && vim /etc/cni/net.d/10-flannel.conflist,内容 { "name": "cbr0", "plugins": [ @@ -156,17 +155,15 @@ mkdir -p /etc/cni/net.d && vim /etc/cni/net.d/10-flannel.conflist,内容 } ] } +``` -所有机子 -vim /etc/systemd/system/kubelet.service.d/10-kubeadm.conf -最后一行添加:Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs" - - +- 给所有机子创建配置 -所有机子必须配置: +``` vim /etc/sysctl.d/k8s.conf + net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 net.ipv4.ip_forward=1 @@ -177,16 +174,31 @@ scp -r /etc/sysctl.d/k8s.conf root@k8s-node-1:/etc/sysctl.d/ scp -r /etc/sysctl.d/k8s.conf root@k8s-node-2:/etc/sysctl.d/ modprobe br_netfilter && sysctl -p /etc/sysctl.d/k8s.conf +``` + +- 给所有机子安装组件 + +``` +yum install -y kubelet-1.13.3 kubeadm-1.13.3 kubectl-1.13.3 --disableexcludes=kubernetes +``` + +- 给所有机子添加一个变量 + +``` +vim /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +最后一行添加:Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs" +``` -所有机子 +- 启动所有机子 + +``` systemctl enable kubelet && systemctl start kubelet kubeadm version kubectl version - ``` - 初始化 master 节点: @@ -194,20 +206,18 @@ kubectl version ``` echo 1 > /proc/sys/net/ipv4/ip_forward -推荐: + kubeadm init \ --image-repository registry.cn-hangzhou.aliyuncs.com/google_containers \ --pod-network-cidr 10.244.0.0/16 \ ---kubernetes-version 1.13.2 \ ---service-cidr 10.96.0.0/12 \ ---apiserver-advertise-address=0.0.0.0 \ +--kubernetes-version 1.13.3 \ --ignore-preflight-errors=Swap -10.244.0.0/16是 flannel 插件固定使用的ip段,它的值取决于你准备安装哪个网络插件 +其中 10.244.0.0/16 是 flannel 插件固定使用的ip段,它的值取决于你准备安装哪个网络插件 这个过程会下载一些 docker 镜像,时间可能会比较久,看你网络情况。 终端会输出核心内容: -[init] Using Kubernetes version: v1.13.2 +[init] Using Kubernetes version: v1.13.3 [preflight] Running pre-flight checks [preflight] Pulling images required for setting up a Kubernetes cluster [preflight] This might take a minute or two, depending on the speed of your internet connection @@ -216,19 +226,19 @@ kubeadm init \ [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Activating the kubelet service [certs] Using certificateDir folder "/etc/kubernetes/pki" -[certs] Generating "ca" certificate and key -[certs] Generating "apiserver" certificate and key -[certs] apiserver serving cert is signed for DNS names [k8s-master-1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.0.127] -[certs] Generating "apiserver-kubelet-client" certificate and key [certs] Generating "front-proxy-ca" certificate and key [certs] Generating "front-proxy-client" certificate and key [certs] Generating "etcd/ca" certificate and key [certs] Generating "etcd/server" certificate and key [certs] etcd/server serving cert is signed for DNS names [k8s-master-1 localhost] and IPs [192.168.0.127 127.0.0.1 ::1] -[certs] Generating "apiserver-etcd-client" certificate and key [certs] Generating "etcd/peer" certificate and key [certs] etcd/peer serving cert is signed for DNS names [k8s-master-1 localhost] and IPs [192.168.0.127 127.0.0.1 ::1] [certs] Generating "etcd/healthcheck-client" certificate and key +[certs] Generating "apiserver-etcd-client" certificate and key +[certs] Generating "ca" certificate and key +[certs] Generating "apiserver-kubelet-client" certificate and key +[certs] Generating "apiserver" certificate and key +[certs] apiserver serving cert is signed for DNS names [k8s-master-1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.0.127] [certs] Generating "sa" key and public key [kubeconfig] Using kubeconfig folder "/etc/kubernetes" [kubeconfig] Writing "admin.conf" kubeconfig file @@ -241,13 +251,13 @@ kubeadm init \ [control-plane] Creating static Pod manifest for "kube-scheduler" [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s -[apiclient] All control plane components are healthy after 18.002437 seconds +[apiclient] All control plane components are healthy after 19.001686 seconds [uploadconfig] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace [kubelet] Creating a ConfigMap "kubelet-config-1.13" in namespace kube-system with the configuration for the kubelets in the cluster [patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "k8s-master-1" as an annotation [mark-control-plane] Marking the node k8s-master-1 as control-plane by adding the label "node-role.kubernetes.io/master=''" [mark-control-plane] Marking the node k8s-master-1 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] -[bootstrap-token] Using token: yes6xf.5huewerdtfxafde5 +[bootstrap-token] Using token: 8tpo9l.jlw135r8559kaad4 [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles [bootstraptoken] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials [bootstraptoken] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token @@ -271,35 +281,40 @@ Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: You can now join any number of machines by running the following on each node as root: - kubeadm join 192.168.0.127:6443 --token yes6xf.5huewerdtfxafde5 --discovery-token-ca-cert-hash sha256:98dd48ac4298e23f9c275309bfd8b69c5b3166752ccf7a36c2affcb7c1988781 - + kubeadm join 192.168.0.127:6443 --token 8tpo9l.jlw135r8559kaad4 --discovery-token-ca-cert-hash sha256:d6594ccc1310a45cbebc45f1c93f5ac113873786365ed63efcf667c952d7d197 +``` +- 给 master 机子设置配置 -master 机子: +``` mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config export KUBECONFIG=$HOME/.kube/config +``` -查询我们的 token +- 在 master 上查看一些环境 + +``` kubeadm token list kubectl cluster-info +``` +- 给 master 安装 Flannel -master 安装 Flannel +``` cd /opt && wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml kubectl apply -f /opt/kube-flannel.yml - ``` -- 到 node 节点进行加入: +- 到 node 节点加入集群: ``` echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables -kubeadm join 192.168.0.127:6443 --token yes6xf.5huewerdtfxafde5 --discovery-token-ca-cert-hash sha256:98dd48ac4298e23f9c275309bfd8b69c5b3166752ccf7a36c2affcb7c1988781 +kubeadm join 192.168.0.127:6443 --token 8tpo9l.jlw135r8559kaad4 --discovery-token-ca-cert-hash sha256:d6594ccc1310a45cbebc45f1c93f5ac113873786365ed63efcf667c952d7d197 这时候终端会输出: @@ -323,35 +338,32 @@ This node has joined the cluster: * The Kubelet was informed of the new secure connection details. Run 'kubectl get nodes' on the master to see this node join the cluster. +``` +- 如果 node 节点加入失败,可以:`kubeadm reset`,再来重新 join +- 在 master 节点上:`kubectl get cs` -如果 node 节点加入失败,可以:kubeadm reset,再来重新 join - - - - -在 master 节点上:kubectl get cs +``` NAME STATUS MESSAGE ERROR controller-manager Healthy ok scheduler Healthy ok etcd-0 Healthy {"health": "true"} 结果都是 Healthy 则表示可以了,不然就得检查。必要时可以用:`kubeadm reset` 重置,重新进行集群初始化 +``` +- 在 master 节点上:`kubectl get nodes` -验证: -kubectl get nodes -如果还是 NotReady,则查看错误信息: -kubectl get pods --all-namespaces,其中:Pending/ContainerCreating/ImagePullBackOff 都是 Pod 没有就绪,我们可以这样查看对应 Pod 遇到了什么问题 +``` +如果还是 NotReady,则查看错误信息:kubectl get pods --all-namespaces +其中:Pending/ContainerCreating/ImagePullBackOff 都是 Pod 没有就绪,我们可以这样查看对应 Pod 遇到了什么问题 kubectl describe pod --namespace=kube-system 或者:kubectl logs -n kube-system tail -f /var/log/messages - ``` - #### 主要概念 - Master 节点,负责集群的调度、集群的管理 From 2c3218cd7712b062fab1dfd3461a4e99d543b8b6 Mon Sep 17 00:00:00 2001 From: "Judas.n" Date: Sun, 24 Feb 2019 15:45:51 +0800 Subject: [PATCH 055/124] Update Vim-Install-And-Settings.md --- markdown-file/Vim-Install-And-Settings.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/markdown-file/Vim-Install-And-Settings.md b/markdown-file/Vim-Install-And-Settings.md index 77560192..dfdd3653 100644 --- a/markdown-file/Vim-Install-And-Settings.md +++ b/markdown-file/Vim-Install-And-Settings.md @@ -117,6 +117,10 @@ - 进入 vim 后,按 `F5`,然后 `shift + insert` 进行粘贴。这种事就不会错乱了。 - 原因是:`vim ~/.vimrc` 中有一行这样的设置:`set pastetoggle=` +## 其他常用命令 + +- 对两个文件进行对比:`vimdiff /opt/1.txt /opt/2.txt` + ## 资料 - [vim几个小技巧(批量替换,列编辑)](http://blogread.cn/it/article/1010?f=sa) From f45dfcc237e20196036bc89ceb05583f35d6d312 Mon Sep 17 00:00:00 2001 From: judasn Date: Sat, 9 Mar 2019 13:15:31 +0800 Subject: [PATCH 056/124] =?UTF-8?q?2019-03-09=20=E8=A1=A5=E5=85=85=20ES=20?= =?UTF-8?q?GUI=20=E5=AE=A2=E6=88=B7=E7=AB=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Elasticsearch-Base.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index 0176b74e..a7e98bba 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -25,7 +25,7 @@ services: ------------------------------------------------------------------- -## Elasticsearch 6.5.x 安装(适配与 5.5.x) +## Elasticsearch 6.5.x 安装(适配与 5.5.x,6.6.x) #### 环境 @@ -114,6 +114,12 @@ type=rpm-md - `cd /usr/share/elasticsearch && bin/elasticsearch-plugin install x-pack` +#### GUI 客户端工具 + +- 优先推荐: +- + + #### 安装 Chrome 扩展的 Head - 下载地址: From bbde6a08dcdee5169910a594e44453e748cc6c76 Mon Sep 17 00:00:00 2001 From: "Judas.n" Date: Wed, 13 Mar 2019 16:16:26 +0800 Subject: [PATCH 057/124] Update README.md --- README.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 946d100e..477aa85c 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,10 @@ -# Java 程序员眼中的 Linux + +## 团队 DevOps 方案参考 + + + + + ## 初衷(Original Intention) From 1072550e98ca6142ddf679c71f950bbe578a9948 Mon Sep 17 00:00:00 2001 From: judasn Date: Sun, 24 Mar 2019 23:17:07 +0800 Subject: [PATCH 058/124] =?UTF-8?q?2019-03-24=20=E8=A1=A5=E5=85=85=20TPPC-?= =?UTF-8?q?MySQL?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Mysql-Install-And-Settings.md | 21 +++ markdown-file/Mysql-Test.md | 156 ++++++++++++++++++++ 2 files changed, 177 insertions(+) diff --git a/markdown-file/Mysql-Install-And-Settings.md b/markdown-file/Mysql-Install-And-Settings.md index 3ab9a80a..acb54084 100644 --- a/markdown-file/Mysql-Install-And-Settings.md +++ b/markdown-file/Mysql-Install-And-Settings.md @@ -148,6 +148,27 @@ rpm -Uvh https://mirror.webtatic.com/yum/el6/latest.rpm - `ln -s /usr/local/mysql/bin/mysqldump /usr/bin` - `ln -s /usr/local/mysql/bin/mysqlslap /usr/bin` +## MySQL 5.7 YUM 安装 + +- 官网: + +``` + +禁用 selinux:setenforce 0 + +wget https://repo.mysql.com//mysql57-community-release-el7-11.noarch.rpm +yum localinstall mysql57-community-release-el7-11.noarch.rpm +yum install mysql-community-server +一共 194M + +配置文件:/etc/my.cnf +systemctl start mysqld +systemctl status mysqld + +查看初次使用的临时密码:grep 'temporary password' /var/log/mysqld.log + +``` + ------------------------------------------------------------------- diff --git a/markdown-file/Mysql-Test.md b/markdown-file/Mysql-Test.md index 4a6edfb9..1b03d472 100644 --- a/markdown-file/Mysql-Test.md +++ b/markdown-file/Mysql-Test.md @@ -47,6 +47,8 @@ - `--debug-info` 代表要额外输出 CPU 以及内存的相关信息。 - `--only-print` 打印压力测试的时候 mysqlslap 到底做了什么事,通过 sql 语句方式告诉我们。 +------------------------------------------------------------------- + ## sysbench 工具 @@ -148,6 +150,8 @@ Threads fairness: events (avg/stddev): 2748.6000/132.71 --总处理事件数/标准偏差 execution time (avg/stddev): 119.9907/0.00 --总执行时间/标准偏差 +------------------------------------------------------------------- + ## QPS 和 TPS 和说明 ### 基本概念 @@ -171,6 +175,157 @@ Threads fairness: - 每天300wPV的在单台机器上,这台机器需要多少QPS?对于这样的问题,假设每天80%的访问集中在20%的时间里,这20%时间叫做峰值时间。( 3000000 * 0.8 ) / (3600 * 24 * 0.2 ) = 139 (QPS). - 如果一台机器的QPS是58,需要几台机器来支持?答:139 / 58 = 3 +------------------------------------------------------------------- + +## Percona TPCC-MySQL 测试工具(优先推荐) + +- 可以较好地模拟真实测试结果数据 +- 官网主页: + +``` +TPC-C 是专门针对联机交易处理系统(OLTP系统)的规范,一般情况下我们也把这类系统称为业务处理系统。 +TPC-C是TPC(Transaction Processing Performance Council)组织发布的一个测试规范,用于模拟测试复杂的在线事务处理系统。其测试结果包括每分钟事务数(tpmC),以及每事务的成本(Price/tpmC)。 +在进行大压力下MySQL的一些行为时经常使用。 +``` + +### 安装 + +- 先确定本机安装过 MySQL +- 并且安装过:`yum install mysql-devel` + +``` +git clone https://github.com/Percona-Lab/tpcc-mysql +cd tpcc-mysql/src +make + +如果make没报错,就会在tpcc-mysql 根目录文件夹下生成tpcc二进制命令行工具tpcc_load、tpcc_start +``` + +### 测试的几个表介绍 + +``` +tpcc-mysql的业务逻辑及其相关的几个表作用如下: +New-Order:新订单,主要对应 new_orders 表 +Payment:支付,主要对应 orders、history 表 +Order-Status:订单状态,主要对应 orders、order_line 表 +Delivery:发货,主要对应 order_line 表 +Stock-Level:库存,主要对应 stock 表 + +其他相关表: +客户:主要对应customer表 +地区:主要对应district表 +商品:主要对应item表 +仓库:主要对应warehouse表 +``` + +### 准备 + +- 测试阿里云 ECS 与 RDS 是否相通: +- 记得在 RDS 添加账号和给账号配置权限,包括:配置权限、数据权限(默认添加账号后都是没有开启的,还要自己手动开启) +- 还要添加内网 ECS 到 RDS 的白名单 IP 里面 +- 或者在 RDS 上开启外网访问设置,但是也设置 IP 白名单(访问 ip.cn 查看自己的外网 IP 地址,比如:120.85.112.97) + +``` +ping rm-wz9066qo44wn500t55o.mysql.rds.aliyuncs.com + +mysql -h rm-wz9066qo44wn500t55o.mysql.rds.aliyuncs.com -P 3306 -u myaccount -p + +输入密码:Aa123456 +``` + + + +``` +创库,名字为:TPCC: +CREATE DATABASE TPCC DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci; + + +导入项目中的出初始化数据脚本: +创建表:create_table.sql +创建索引和外键:add_fkey_idx.sql +``` + + +### 测试 + +- 数据库:阿里云 RDS-MySQL-5.7-2C4G +- 测试机:阿里云 ECS-4C8G-CentOS7.6 + +- 需要注意的是 tpcc 默认会读取 /var/lib/mysql/mysql.sock 这个 socket 文件。因此,如果你的socket文件不在相应路径的话,可以做个软连接,或者通过TCP/IP的方式连接测试服务器 +- 准备数据: + +``` +cd /opt/tpcc-mysql +./tpcc_load -h rm-wz9066qo44wn500t55o.mysql.rds.aliyuncs.com -P 3306 -d TPCC -u myaccount -p Aa123456 -w 100 +-w 100 表示创建 100 个仓库数据 +这个过程花费时间还是挺长的,我这台 ECS 结果是这样: +差不多 9s == 5000 个数据。 +也就是: +10W 个数据需要 == 20 X 9s == 180s == 3min +1000W == 5h +一共差不多花了 10h 左右。 + +插入过程 RDS-2C4G 的监控情况: +CPU利用率 4% +内存 18% ~ 40% (随着数据增加而增大) +连接数:1% +IOPS:4% +已使用存储空间:5.5G ~ 10G + +要模拟出够真实的数据,仓库不要太少,一般要大于 100, +当然你也可以 select count(*) from 上面的各个表,看下 100 个库生成的数据,是不是跟你预期数据差不多,是的话就够了。 + +select count(*) from customer; +10s X 10 X 100 = 10000s + +select count(*) from district; +select count(*) from history; +select count(*) from item; + 100 个仓库 == 1000 X 100 == 100000 == 10W +select count(*) from new_orders; +select count(*) from order_line; +select count(*) from orders; +select count(*) from stock; + 100 个仓库 == 100000 X 100 == 10000000 = 1000W +select count(*) from warehouse; +``` + +- 开始测试: + +``` + +tpcc_start -h rm-wz9066qo44wn500t55o.mysql.rds.aliyuncs.com -P 3306 -d TPCC -u myaccount -p Aa123456 -w 100 -c 200 -r 300 -l 2400 -f /opt/mysql_tpcc_100_20190324 + +-w 100 表示 100 个仓库数据 +-c 200 表示并发 200 个线程 +-r 300 表示预热 300 秒 +-l 2400 表示持续压测 2400 秒 + +``` + + +### 报表 + + +``` +行数据表示:10, 1187(0):1.682|2.175, 1187(0):0.336|0.473, 118(0):0.172|0.226, 118(0):1.864|2.122, 119(0):6.953|8.107 + +10:时间戳,每十秒产生一条数据。 +1187(0):1.682|2.175:表示10秒内完成1187笔新订单业务。 +1187(0):0.336|0.473: 支付业务, +118(0):1.864|2.122:查询业务, +118(0):0.172|0.226: 发货业务, +119(0):6.953|8.107: 库存查询业务 + + + + +188.000 TpmC +TpmC结果值(每分钟事务数,该值是第一次统计结果中的新订单事务数除以总耗时分钟数,例如本例中是:372/2=186) +tpmC值在国内外被广泛用于衡量计算机系统的事务处理能力 +``` + + ## 资料 @@ -181,3 +336,4 @@ Threads fairness: - - - +- \ No newline at end of file From 5be37d37b1067de2b93b87c33741b5172e6df457 Mon Sep 17 00:00:00 2001 From: zhang Date: Mon, 25 Mar 2019 17:06:29 +0800 Subject: [PATCH 059/124] =?UTF-8?q?2019-03-25=20=E8=A1=A5=E5=85=85=20TPCC-?= =?UTF-8?q?MySQL?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Mysql-Test.md | 253 +++++++++++++++++++++++++++++++----- 1 file changed, 221 insertions(+), 32 deletions(-) diff --git a/markdown-file/Mysql-Test.md b/markdown-file/Mysql-Test.md index 1b03d472..2a5799e0 100644 --- a/markdown-file/Mysql-Test.md +++ b/markdown-file/Mysql-Test.md @@ -224,11 +224,12 @@ Stock-Level:库存,主要对应 stock 表 - 记得在 RDS 添加账号和给账号配置权限,包括:配置权限、数据权限(默认添加账号后都是没有开启的,还要自己手动开启) - 还要添加内网 ECS 到 RDS 的白名单 IP 里面 - 或者在 RDS 上开启外网访问设置,但是也设置 IP 白名单(访问 ip.cn 查看自己的外网 IP 地址,比如:120.85.112.97) +- RDS 的内网地址和外网地址不一样,要认真看。 ``` -ping rm-wz9066qo44wn500t55o.mysql.rds.aliyuncs.com +ping rm-wz9v0vej02ys79jbj.mysql.rds.aliyuncs.com -mysql -h rm-wz9066qo44wn500t55o.mysql.rds.aliyuncs.com -P 3306 -u myaccount -p +mysql -h rm-wz9v0vej02ys79jbj.mysql.rds.aliyuncs.com -P 3306 -u myaccount -p 输入密码:Aa123456 ``` @@ -242,65 +243,71 @@ CREATE DATABASE TPCC DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci; 导入项目中的出初始化数据脚本: 创建表:create_table.sql +/usr/bin/mysql -h rm-wz9v0vej02ys79jbj.mysql.rds.aliyuncs.com -u myaccount -p tpcc < /root/tpcc-mysql/create_table.sql + 创建索引和外键:add_fkey_idx.sql +/usr/bin/mysql -h rm-wz9v0vej02ys79jbj.mysql.rds.aliyuncs.com -u myaccount -p tpcc < /root/tpcc-mysql/add_fkey_idx.sql ``` ### 测试 - 数据库:阿里云 RDS-MySQL-5.7-2C4G -- 测试机:阿里云 ECS-4C8G-CentOS7.6 +- 测试机:阿里云 ECS-4C4G-CentOS7.6 +- 根据测试,不同的 ECS 测试机,不同的 RDS 测试结果有时候差距挺大的,这个很蛋疼。 - 需要注意的是 tpcc 默认会读取 /var/lib/mysql/mysql.sock 这个 socket 文件。因此,如果你的socket文件不在相应路径的话,可以做个软连接,或者通过TCP/IP的方式连接测试服务器 - 准备数据: ``` cd /opt/tpcc-mysql -./tpcc_load -h rm-wz9066qo44wn500t55o.mysql.rds.aliyuncs.com -P 3306 -d TPCC -u myaccount -p Aa123456 -w 100 --w 100 表示创建 100 个仓库数据 -这个过程花费时间还是挺长的,我这台 ECS 结果是这样: -差不多 9s == 5000 个数据。 -也就是: -10W 个数据需要 == 20 X 9s == 180s == 3min -1000W == 5h -一共差不多花了 10h 左右。 +./tpcc_load -h rm-wz9v0vej02ys79jbj.mysql.rds.aliyuncs.com -P 3306 -d TPCC -u myaccount -p Aa123456 -w 80 +-w 80 表示创建 80 个仓库数据 +这个过程花费时间还是挺长的,建议测试机是高性能计算型。2CPU 差不多要 8h,你自己估量下。 +我这边 RDS 监控中,曲线上每秒 insert 差不多在 2W 差不多,如果你没有这个数,速度可能就很慢了。 +我这边差不多用了 2.5h 完成数据准备。 + 插入过程 RDS-2C4G 的监控情况: -CPU利用率 4% -内存 18% ~ 40% (随着数据增加而增大) +CPU利用率 24% +内存 30% ~ 40% (随着数据增加而增大) 连接数:1% -IOPS:4% +IOPS:9% 已使用存储空间:5.5G ~ 10G 要模拟出够真实的数据,仓库不要太少,一般要大于 100, -当然你也可以 select count(*) from 上面的各个表,看下 100 个库生成的数据,是不是跟你预期数据差不多,是的话就够了。 +下面是基于 80 个库的最终数据: select count(*) from customer; -10s X 10 X 100 = 10000s - + 2400000 select count(*) from district; + 800 select count(*) from history; + 2400000 select count(*) from item; - 100 个仓库 == 1000 X 100 == 100000 == 10W + 100000 select count(*) from new_orders; + 720000 select count(*) from order_line; + 23996450 select count(*) from orders; + 2400000 select count(*) from stock; - 100 个仓库 == 100000 X 100 == 10000000 = 1000W + 8000000 select count(*) from warehouse; + 80 ``` - 开始测试: ``` -tpcc_start -h rm-wz9066qo44wn500t55o.mysql.rds.aliyuncs.com -P 3306 -d TPCC -u myaccount -p Aa123456 -w 100 -c 200 -r 300 -l 2400 -f /opt/mysql_tpcc_100_20190324 +./tpcc_start -h rm-wz9v0vej02ys79jbj.mysql.rds.aliyuncs.com -P 3306 -d TPCC -u myaccount -p Aa123456 -w 80 -c 200 -r 300 -l 1800 -f /opt/mysql_tpcc_100_20190325 -w 100 表示 100 个仓库数据 -c 200 表示并发 200 个线程 -r 300 表示预热 300 秒 --l 2400 表示持续压测 2400 秒 - +-l 1800 表示持续压测 1800 秒 ``` @@ -308,25 +315,207 @@ tpcc_start -h rm-wz9066qo44wn500t55o.mysql.rds.aliyuncs.com -P 3306 -d TPCC -u m ``` -行数据表示:10, 1187(0):1.682|2.175, 1187(0):0.336|0.473, 118(0):0.172|0.226, 118(0):1.864|2.122, 119(0):6.953|8.107 + +188.000 TpmC +TpmC结果值(每分钟事务数,该值是第一次统计结果中的新订单事务数除以总耗时分钟数,例如本例中是:372/2=186) +tpmC值在国内外被广泛用于衡量计算机系统的事务处理能力 +``` -10:时间戳,每十秒产生一条数据。 -1187(0):1.682|2.175:表示10秒内完成1187笔新订单业务。 -1187(0):0.336|0.473: 支付业务, -118(0):1.864|2.122:查询业务, -118(0):0.172|0.226: 发货业务, -119(0):6.953|8.107: 库存查询业务 +- RDS-2C4G-80个仓库结果: +- CPU:100%,内存:34%,连接数:17%,IOPS:62%,磁盘空间:20G +``` +1780, trx: 979, 95%: 1849.535, 99%: 2402.613, max_rt: 3401.947, 986|3248.772, 98|698.821, 103|4202.110, 101|4547.416 +1790, trx: 1021, 95%: 1898.903, 99%: 2700.936, max_rt: 3848.142, 999|3150.117, 100|500.740, 102|3600.104, 100|5551.834 +1800, trx: 989, 95%: 1899.472, 99%: 2847.899, max_rt: 4455.064, 989|3049.921, 101|699.144, 97|3599.021, 102|5151.141 + +STOPPING THREADS........................................................................................................................................................................................................ + + + [0] sc:2 lt:174378 rt:0 fl:0 avg_rt: 1192.8 (5) + [1] sc:253 lt:173935 rt:0 fl:0 avg_rt: 542.7 (5) + [2] sc:4726 lt:12712 rt:0 fl:0 avg_rt: 144.7 (5) + [3] sc:0 lt:17435 rt:0 fl:0 avg_rt: 3029.8 (80) + [4] sc:0 lt:17435 rt:0 fl:0 avg_rt: 3550.7 (20) + in 1800 sec. + + + [0] sc:2 lt:174378 rt:0 fl:0 + [1] sc:254 lt:174096 rt:0 fl:0 + [2] sc:4726 lt:12712 rt:0 fl:0 + [3] sc:0 lt:17437 rt:0 fl:0 + [4] sc:0 lt:17435 rt:0 fl:0 + + (all must be [OK]) + [transaction percentage] + Payment: 43.45% (>=43.0%) [OK] + Order-Status: 4.35% (>= 4.0%) [OK] + Delivery: 4.35% (>= 4.0%) [OK] + Stock-Level: 4.35% (>= 4.0%) [OK] + [response time (at least 90% passed)] + New-Order: 0.00% [NG] * + Payment: 0.15% [NG] * + Order-Status: 27.10% [NG] * + Delivery: 0.00% [NG] * + Stock-Level: 0.00% [NG] * -188.000 TpmC -TpmC结果值(每分钟事务数,该值是第一次统计结果中的新订单事务数除以总耗时分钟数,例如本例中是:372/2=186) -tpmC值在国内外被广泛用于衡量计算机系统的事务处理能力 + 5812.667 TpmC +``` + +- 升级:RDS-4C8G-80个仓库结果 +- CPU:100%,内存:55%,连接数:10%,IOPS:20%,磁盘空间:25G + ``` +1780, trx: 2303, 95%: 796.121, 99%: 1099.640, max_rt: 1596.883, 2293|2249.288, 232|256.393, 230|1694.050, 235|2550.775 +1790, trx: 2336, 95%: 798.030, 99%: 1093.403, max_rt: 1547.840, 2338|2803.739, 234|305.185, 232|1799.869, 228|2453.748 +1800, trx: 2305, 95%: 801.381, 99%: 1048.528, max_rt: 1297.465, 2306|1798.565, 229|304.329, 227|1649.609, 233|2549.599 + +STOPPING THREADS........................................................................................................................................................................................................ + + + [0] sc:7 lt:406567 rt:0 fl:0 avg_rt: 493.7 (5) + [1] sc:10485 lt:395860 rt:0 fl:0 avg_rt: 240.1 (5) + [2] sc:24615 lt:16045 rt:0 fl:0 avg_rt: 49.4 (5) + [3] sc:0 lt:40651 rt:0 fl:0 avg_rt: 1273.6 (80) + [4] sc:0 lt:40656 rt:0 fl:0 avg_rt: 1665.3 (20) + in 1800 sec. + + + [0] sc:7 lt:406569 rt:0 fl:0 + [1] sc:10487 lt:396098 rt:0 fl:0 + [2] sc:24615 lt:16045 rt:0 fl:0 + [3] sc:0 lt:40655 rt:0 fl:0 + [4] sc:0 lt:40659 rt:0 fl:0 + + (all must be [OK]) + [transaction percentage] + Payment: 43.46% (>=43.0%) [OK] + Order-Status: 4.35% (>= 4.0%) [OK] + Delivery: 4.35% (>= 4.0%) [OK] + Stock-Level: 4.35% (>= 4.0%) [OK] + [response time (at least 90% passed)] + New-Order: 0.00% [NG] * + Payment: 2.58% [NG] * + Order-Status: 60.54% [NG] * + Delivery: 0.00% [NG] * + Stock-Level: 0.00% [NG] * + + 13552.467 TpmC +``` +- 升级:RDS-8C16G-80个仓库结果 +- CPU:100%,内存:35%,连接数:5%,IOPS:18%,磁盘空间:30G + +``` +1780, trx: 4502, 95%: 398.131, 99%: 501.634, max_rt: 772.128, 4473|740.073, 446|183.361, 448|1042.264, 442|1302.569 +1790, trx: 4465, 95%: 398.489, 99%: 541.424, max_rt: 803.659, 4476|845.313, 448|152.917, 450|997.319, 454|1250.160 +1800, trx: 4506, 95%: 397.774, 99%: 501.334, max_rt: 747.074, 4508|701.625, 453|108.619, 450|1052.293, 451|1107.277 + +STOPPING THREADS........................................................................................................................................................................................................ + + + [0] sc:20 lt:803738 rt:0 fl:0 avg_rt: 240.5 (5) + [1] sc:13844 lt:789535 rt:0 fl:0 avg_rt: 128.5 (5) + [2] sc:54560 lt:25817 rt:0 fl:0 avg_rt: 22.1 (5) + [3] sc:0 lt:80372 rt:0 fl:0 avg_rt: 739.8 (80) + [4] sc:0 lt:80378 rt:0 fl:0 avg_rt: 771.1 (20) + in 1800 sec. + + + [0] sc:20 lt:803747 rt:0 fl:0 + [1] sc:13845 lt:789916 rt:0 fl:0 + [2] sc:54561 lt:25817 rt:0 fl:0 + [3] sc:0 lt:80377 rt:0 fl:0 + [4] sc:0 lt:80381 rt:0 fl:0 + + (all must be [OK]) + [transaction percentage] + Payment: 43.47% (>=43.0%) [OK] + Order-Status: 4.35% (>= 4.0%) [OK] + Delivery: 4.35% (>= 4.0%) [OK] + Stock-Level: 4.35% (>= 4.0%) [OK] + [response time (at least 90% passed)] + New-Order: 0.00% [NG] * + Payment: 1.72% [NG] * + Order-Status: 67.88% [NG] * + Delivery: 0.00% [NG] * + Stock-Level: 0.00% [NG] * + + + 26791.934 TpmC +``` + + +- 升级:RDS-16C64G-80个仓库结果 +- CPU:100%,内存:18%,连接数:2%,IOPS:10%,磁盘空间:40G + +``` +1780, trx: 8413, 95%: 203.560, 99%: 279.322, max_rt: 451.010, 8414|441.849, 841|92.900, 839|583.340, 843|644.276 +1790, trx: 8269, 95%: 204.599, 99%: 282.602, max_rt: 444.075, 8262|412.414, 827|91.551, 831|665.421, 824|616.396 +1800, trx: 8395, 95%: 202.285, 99%: 255.026, max_rt: 436.136, 8404|446.292, 839|87.081, 839|609.221, 842|697.509 + +STOPPING THREADS........................................................................................................................................................................................................ + + + [0] sc:37 lt:1532893 rt:0 fl:0 avg_rt: 124.8 (5) + [1] sc:36091 lt:1496111 rt:0 fl:0 avg_rt: 68.5 (5) + [2] sc:105738 lt:47555 rt:0 fl:0 avg_rt: 11.4 (5) + [3] sc:0 lt:153285 rt:0 fl:0 avg_rt: 404.6 (80) + [4] sc:0 lt:153293 rt:0 fl:0 avg_rt: 389.5 (20) + in 1800 sec. + + + [0] sc:37 lt:1532918 rt:0 fl:0 + [1] sc:36093 lt:1496868 rt:0 fl:0 + [2] sc:105739 lt:47556 rt:0 fl:0 + [3] sc:0 lt:153297 rt:0 fl:0 + [4] sc:0 lt:153298 rt:0 fl:0 + + (all must be [OK]) + [transaction percentage] + Payment: 43.47% (>=43.0%) [OK] + Order-Status: 4.35% (>= 4.0%) [OK] + Delivery: 4.35% (>= 4.0%) [OK] + Stock-Level: 4.35% (>= 4.0%) [OK] + [response time (at least 90% passed)] + New-Order: 0.00% [NG] * + Payment: 2.36% [NG] * + Order-Status: 68.98% [NG] * + Delivery: 0.00% [NG] * + Stock-Level: 0.00% [NG] * + + + 51097.668 TpmC +``` + + +- 几轮下来,最终数据量: + +``` +select count(*) from customer; + 2400000 +select count(*) from district; + 800 +select count(*) from history; + 5779395 +select count(*) from item; + 100000 +select count(*) from new_orders; + 764970 +select count(*) from order_line; + 57453708 +select count(*) from orders; + 5745589 +select count(*) from stock; + 8000000 +select count(*) from warehouse; + 80 +``` + ## 资料 From 667b481dbe45a9d46981811fe5133711ee92cbeb Mon Sep 17 00:00:00 2001 From: zhang Date: Thu, 11 Apr 2019 13:49:25 +0800 Subject: [PATCH 060/124] 2019-04-11 mongo --- markdown-file/MongoDB-Install-And-Settings.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/markdown-file/MongoDB-Install-And-Settings.md b/markdown-file/MongoDB-Install-And-Settings.md index 71cddd81..e25c6ffe 100644 --- a/markdown-file/MongoDB-Install-And-Settings.md +++ b/markdown-file/MongoDB-Install-And-Settings.md @@ -362,6 +362,15 @@ $ne ->not equal 不等于 - Robomongo: +## 基准测试 + +- + +## 随机生成测试数据 + +- + + ## 资料 - From d8f97bb2bd80c13db5799d455fc1eb3620a2e88b Mon Sep 17 00:00:00 2001 From: zhang Date: Thu, 11 Apr 2019 14:12:38 +0800 Subject: [PATCH 061/124] 2019-04-11 mongo --- markdown-file/MongoDB-Install-And-Settings.md | 56 +++++++++++++++++-- 1 file changed, 51 insertions(+), 5 deletions(-) diff --git a/markdown-file/MongoDB-Install-And-Settings.md b/markdown-file/MongoDB-Install-And-Settings.md index e25c6ffe..0a29fdae 100644 --- a/markdown-file/MongoDB-Install-And-Settings.md +++ b/markdown-file/MongoDB-Install-And-Settings.md @@ -22,8 +22,9 @@ - 请查看介绍中支持哪个版本: - 目前 201712 支持 MongoDB 3.4 +------------------------------------------------------------------- -## Docker 下安装 MongoDB +## Docker 下安装 MongoDB(方式一) - 先创建一个宿主机以后用来存放数据的目录:`mkdir -p /data/docker/mongo/db` - 赋权:`chmod 777 -R /data/docker/mongo/db` @@ -51,16 +52,61 @@ db.createUser( - 导出:`docker exec -it cloud-mongo mongoexport -h 127.0.0.1 -u 用户名 -p 密码 -d 库名 -c 集合名 -o /data/db/mongodb.json --type json` - 导入:`docker exec -it cloud-mongo mongoimport -h 127.0.0.1 -u 用户名 -p 密码 -d 库名 -c 集合名 --file /data/db/mongodb.json --type json` -## 安装环境 -- CentOS 6 -## MongoDB 安装 +## Docker 下安装 MongoDB(方式二) + +- 先创建一个宿主机以后用来存放数据的目录:`mkdir -p /data/docker/mongo/db` +- 赋权:`chmod 777 -R /data/docker/mongo/db` +- 运行镜像:`docker run --name cloud-mongo2 -p 37017:27017 -v /data/docker/mongo/db:/data/db -d mongo:3.4 --auth` +- 进入容器中 mongo shell 交互界面:`docker exec -it cloud-mongo2 mongo` + - 进入 admin:`use admin` +- 创建一个超级用户: + +``` +db.createUser( + { + user: "mongo-admin", + pwd: "123456", + roles: [ + { role: "root", db: "admin" } + ] + } +) +``` + +- 验证账号:`db.auth("mongo-admin","123456")` + - 使用 db.auth() 可以对数据库中的用户进行验证,如果验证成功则返回 1,否则返回 0 +- 接着创建一个普通数据库和用户: + +``` + +use my_test_db + + +db.createUser( + { + user: "mytestuser", + pwd: "123456", + roles: [ + { role: "dbAdmin", db: "my_test_db" }, + { role: "readWrite", db: "my_test_db" } + ] + } +) + + +db.auth("mytestuser","123456") +``` + +------------------------------------------------------------------- + +## MongoDB 传统方式安装 - 关闭 SELinux - 编辑配置文件:`vim /etc/selinux/config` - 把 `SELINUX=enforcing` 改为 `SELINUX=disabled` -- MongoDB 安装 +- MongoDB 资料 - 官网: - 官网文档: - 此时(20170228) 最新稳定版本为:**3.4.2** From 2a6c1aa8c9418681a7e6a14fd15d39560a7f8037 Mon Sep 17 00:00:00 2001 From: zhang Date: Thu, 11 Apr 2019 14:13:46 +0800 Subject: [PATCH 062/124] 2019-04-11 mongo --- markdown-file/MongoDB-Install-And-Settings.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/markdown-file/MongoDB-Install-And-Settings.md b/markdown-file/MongoDB-Install-And-Settings.md index 0a29fdae..f05e7b74 100644 --- a/markdown-file/MongoDB-Install-And-Settings.md +++ b/markdown-file/MongoDB-Install-And-Settings.md @@ -60,10 +60,11 @@ db.createUser( - 赋权:`chmod 777 -R /data/docker/mongo/db` - 运行镜像:`docker run --name cloud-mongo2 -p 37017:27017 -v /data/docker/mongo/db:/data/db -d mongo:3.4 --auth` - 进入容器中 mongo shell 交互界面:`docker exec -it cloud-mongo2 mongo` - - 进入 admin:`use admin` - 创建一个超级用户: ``` +use admin + db.createUser( { user: "mongo-admin", @@ -73,10 +74,11 @@ db.createUser( ] } ) + +db.auth("mongo-admin","123456") ``` -- 验证账号:`db.auth("mongo-admin","123456")` - - 使用 db.auth() 可以对数据库中的用户进行验证,如果验证成功则返回 1,否则返回 0 +- 使用 db.auth() 可以对数据库中的用户进行验证,如果验证成功则返回 1,否则返回 0 - 接着创建一个普通数据库和用户: ``` From 86ae11355f0b52e1199ce748e2febf50ae840ff8 Mon Sep 17 00:00:00 2001 From: zhang Date: Fri, 12 Apr 2019 16:31:44 +0800 Subject: [PATCH 063/124] 2019-04-12 --- .editorconfig | 25 +++++++ markdown-file/Mysql-Optimize.md | 114 ++++++++++++++++++++++++++++---- 2 files changed, 125 insertions(+), 14 deletions(-) create mode 100644 .editorconfig diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 00000000..bc36a8e5 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,25 @@ +# http://editorconfig.org +# 官网首页有介绍:IntelliJ IDEA,VS Code 默认就支持,无需额外安装插件 +root = true + +# 空格替代Tab缩进在各种编辑工具下效果一致 +[*] +indent_style = space +indent_size = 4 +charset = utf-8 +end_of_line = lf +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 200 + + + +[*.java] +indent_style = tab + +[*.{json,yml}] +indent_size = 2 + +[*.md] +insert_final_newline = false +trim_trailing_whitespace = false diff --git a/markdown-file/Mysql-Optimize.md b/markdown-file/Mysql-Optimize.md index 1cbad473..583bc9c8 100644 --- a/markdown-file/Mysql-Optimize.md +++ b/markdown-file/Mysql-Optimize.md @@ -3,6 +3,15 @@ - 下面说的优化基于 MySQL 5.6,理论上 5.5 之后的都算适用,具体还是要看官网 +## 优秀材料 + +- +- <> +- <> +- <> +- <> + + ## 服务状态查询 - 查看当前数据库的状态,常用的有: @@ -10,14 +19,23 @@ - 查看刚刚执行 SQL 是否有警告信息:`SHOW WARNINGS;` - 查看刚刚执行 SQL 是否有错误信息:`SHOW ERRORS;` - 查看已经连接的所有线程状况:`SHOW FULL PROCESSLIST;` + - 输出参数说明: + - 可以结束某些连接:`kill id值` - 查看当前连接数量:`SHOW STATUS LIKE 'max_used_connections';` - 查看变量,在 my.cnf 中配置的变量会在这里显示:`SHOW VARIABLES;` + - 查询慢 SQL 配置:`show variables like 'slow%';` + - 开启慢 SQL:`set global slow_query_log='ON'` + - 查询慢 SQL 秒数值:` show variables like 'long%';` + - 调整秒速值:`set long_query_time=1;` - 查看当前MySQL 中已经记录了多少条慢查询,前提是配置文件中开启慢查询记录了. - `SHOW STATUS LIKE '%slow_queries%';` - 查询当前MySQL中查询、更新、删除执行多少条了,可以通过这个来判断系统是侧重于读还是侧重于写,如果是写要考虑使用读写分离。 - `SHOW STATUS LIKE '%Com_select%';` + - `SHOW STATUS LIKE '%Com_insert%';` - `SHOW STATUS LIKE '%Com_update%';` - `SHOW STATUS LIKE '%Com_delete%';` + - 如果 rollback 过多,说明程序肯定哪里存在问题 + - `SHOW STATUS LIKE '%Com_rollback%';` - 显示MySQL服务启动运行了多少时间,如果MySQL服务重启,该时间重新计算,单位秒 - `SHOW STATUS LIKE 'uptime';` - 显示查询缓存的状态情况 @@ -35,23 +53,22 @@ - 6. Qcache_not_cached # 没有进行缓存的查询的数量,通常是这些查询未被缓存或其类型不允许被缓存 - 7. Qcache_queries_in_cache # 在当前缓存的查询(和响应)的数量。 - 8. Qcache_total_blocks #缓存中块的数量。 +- 查询哪些表在被使用,是否有锁表:`SHOW OPEN TABLES WHERE In_use > 0;` +- 查询 innodb 状态(输出内容很多):`SHOW ENGINE INNODB STATUS;` +- 锁性能状态:`SHOW STATUS LIKE 'innodb_row_lock_%';` + - Innodb_row_lock_current_waits:当前等待锁的数量 + - Innodb_row_lock_time:系统启动到现在、锁定的总时间长度 + - Innodb_row_lock_time_avg:每次平均锁定的时间 + - Innodb_row_lock_time_max:最长一次锁定时间 + - Innodb_row_lock_waits:系统启动到现在、总共锁定次数 +- 帮我们分析表,并提出建议:`select * from my_table procedure analyse();` +## 系统表 - -## my.cnf 常配置项 - -- `key_buffer_size`,索引缓冲区大小。 -- `query_cache_size`,查询缓存。 -- `max_connections = 1000`,MySQL 的最大并发连接数 -- ``, -- ``, -- ``, -- ``, -- ``, -- ``, -- ``, -- ``, +- 当前运行的所有事务:`select * from information_schema.INNODB_TRX;` +- 当前事务出现的锁:`select * from information_schema.INNODB_LOCKS;` +- 锁等待的对应关系:`select * from information_schema.INNODB_LOCK_WAITS;` ## 查询优化 @@ -109,6 +126,46 @@ - 优化: - 可以使用连接查询(JOIN)代替子查询,连接查询时不需要建立临时表,其速度比子查询快。 +## 查询不走索引优化 + +- WHERE字句的查询条件里有不等于号(WHERE column!=…),MYSQL将无法使用索引 +- 类似地,如果WHERE字句的查询条件里使用了函数(如:WHERE DAY(column)=…),MYSQL将无法使用索引 +- 在JOIN操作中(需要从多个数据表提取数据时),MYSQL只有在主键和外键的数据类型相同时才能使用索引,否则即使建立了索引也不会使用 +- 如果WHERE子句的查询条件里使用了比较操作符LIKE和REGEXP,MYSQL只有在搜索模板的第一个字符不是通配符的情况下才能使用索引。比如说,如果查询条件是LIKE 'abc%',MYSQL将使用索引;如果条件是LIKE '%abc',MYSQL将不使用索引。 +- 在ORDER BY操作中,MYSQL只有在排序条件不是一个查询条件表达式的情况下才使用索引。尽管如此,在涉及多个数据表的查询里,即使有索引可用,那些索引在加快ORDER BY操作方面也没什么作用。 +- 如果某个数据列里包含着许多重复的值,就算为它建立了索引也不会有很好的效果。比如说,如果某个数据列里包含了净是些诸如“0/1”或“Y/N”等值,就没有必要为它创建一个索引。 +- 索引有用的情况下就太多了。基本只要建立了索引,除了上面提到的索引不会使用的情况下之外,其他情况只要是使用在WHERE条件里,ORDER BY 字段,联表字段,一般都是有效的。 建立索引要的就是有效果。 不然还用它干吗? 如果不能确定在某个字段上建立的索引是否有效果,只要实际进行测试下比较下执行时间就知道。 +- 如果条件中有or(并且其中有or的条件是不带索引的),即使其中有条件带索引也不会使用(这也是为什么尽量少用or的原因)。注意:要想使用or,又想让索引生效,只能将or条件中的每个列都加上索引 +- 如果列类型是字符串,那一定要在条件中将数据使用引号引用起来,否则不使用索引 +- 如果mysql估计使用全表扫描要比使用索引快,则不使用索引 + + +## 其他查询优化 + +- 关联查询过程 + - 确保 ON 或者 using子句中的列上有索引 + - 确保任何的 groupby 和 orderby 中的表达式只涉及到一个表中的列。 +- count()函数优化 + - count()函数有一点需要特别注意:它是不统计值为NULL的字段的!所以:不能指定查询结果的某一列,来统计结果行数。即 count(xx column) 不太好。 + - 如果想要统计结果集,就使用 count(*),性能也会很好。 +- 分页查询(数据偏移量大的场景) + - 不允许跳页,只能上一页或者下一页 + - 使用 where 加上上一页 ID 作为条件(具体要看 explain 分析效果):`select xxx,xxx from test_table where id < '上页id分界值' order by id desc limit 20;` + +## 创表原则 + +- 所有字段均定义为 NOT NULL ,除非你真的想存 Null。因为表内默认值 Null 过多会影响优化器选择执行计划 + + +## 建立索引原则 + +- 使用区分度高的列作为索引,字段不重复的比例,区分度越高,索引树的分叉也就越多,一次性找到的概率也就越高。 +- 尽量使用字段长度小的列作为索引 +- 使用数据类型简单的列(int 型,固定长度) +- 选用 NOT NULL 的列。在MySQL中,含有空值的列很难进行查询优化,因为它们使得索引、索引的统计信息以及比较运算更加复杂。你应该用0、一个特殊的值或者一个空串代替空值。 +- 尽量的扩展索引,不要新建索引。比如表中已经有a的索引,现在要加(a,b)的索引,那么只需要修改原来的索引即可。这样也可避免索引重复。 + + ## 数据库结构优化 @@ -152,7 +209,36 @@ - 可以看我整理的这篇文章: - 由于 binlog 日志的读写频繁,可以考虑在 my.cnf 中配置,指定这个 binlog 日志到一个 SSD 硬盘上。 + +## 锁相关 + +InnoDB支持事务;InnoDB 采用了行级锁。也就是你需要修改哪行,就可以只锁定哪行。 +在 Mysql 中,行级锁并不是直接锁记录,而是锁索引。索引分为主键索引和非主键索引两种,如果一条sql 语句操作了主键索引,Mysql 就会锁定这条主键索引;如果一条语句操作了非主键索引,MySQL会先锁定该非主键索引,再锁定相关的主键索引。 +InnoDB 行锁是通过给索引项加锁实现的,如果没有索引,InnoDB 会通过隐藏的聚簇索引来对记录加锁。也就是说:如果不通过索引条件检索数据,那么InnoDB将对表中所有数据加锁,实际效果跟表锁一样。因为没有了索引,找到某一条记录就得扫描全表,要扫描全表,就得锁定表。 + + +数据库的增删改操作默认都会加排他锁,而查询不会加任何锁。 + +排他锁:对某一资源加排他锁,自身可以进行增删改查,其他人无法进行任何操作。语法为: +select * from table for update; + +共享锁:对某一资源加共享锁,自身可以读该资源,其他人也可以读该资源(也可以再继续加共享锁,即 共享锁可多个共存),但无法修改。 +要想修改就必须等所有共享锁都释放完之后。语法为: +select * from table lock in share mode; + + + ## 资料 - - +- +- +- +- +- <> +- <> +- <> +- <> +- <> +- <> \ No newline at end of file From 53c4e660f4e8243b94adb45c1acff793a2beb270 Mon Sep 17 00:00:00 2001 From: judasn Date: Sat, 13 Apr 2019 13:15:07 +0800 Subject: [PATCH 064/124] 2019-04-13 --- markdown-file/Mysql-Optimize.md | 154 ++++++++++++++++++++++---------- 1 file changed, 105 insertions(+), 49 deletions(-) diff --git a/markdown-file/Mysql-Optimize.md b/markdown-file/Mysql-Optimize.md index 583bc9c8..51b960aa 100644 --- a/markdown-file/Mysql-Optimize.md +++ b/markdown-file/Mysql-Optimize.md @@ -71,60 +71,106 @@ - 锁等待的对应关系:`select * from information_schema.INNODB_LOCK_WAITS;` -## 查询优化 - -- 使用 EXPLAIN 进行 SQL 语句分析:`EXPLAIN SELECT * FROM sys_user;` -- 得到的结果有下面几列: - - **id**,该列表示当前结果序号,无特殊意义,不重要 +## otpimizer trace + +- 作用:输入我们想要查看优化过程的查询语句,当该查询语句执行完成后,就可以到 information_schema 数据库下的OPTIMIZER_TRACE表中查看 mysql 自己帮我们的完整优化过程 +- 是否打开(默认都是关闭):`SHOW VARIABLES LIKE 'optimizer_trace';` + - one_line的值是控制输出格式的,如果为on那么所有输出都将在一行中展示,不适合人阅读,所以我们就保持其默认值为off吧。 +- 打开配置:`SET optimizer_trace="enabled=on";` +- 关闭配置:`SET optimizer_trace="enabled=off";` +- 查询优化结果:`SELECT * FROM information_schema.OPTIMIZER_TRACE;` + +``` +我们所说的基于成本的优化主要集中在optimize阶段,对于单表查询来说,我们主要关注optimize阶段的"rows_estimation"这个过程,这个过程深入分析了对单表查询的各种执行方案的成本; +对于多表连接查询来说,我们更多需要关注"considered_execution_plans"这个过程,这个过程里会写明各种不同的连接方式所对应的成本。 +反正优化器最终会选择成本最低的那种方案来作为最终的执行计划,也就是我们使用EXPLAIN语句所展现出的那种方案。 +如果有小伙伴对使用EXPLAIN语句展示出的对某个查询的执行计划很不理解,大家可以尝试使用optimizer trace功能来详细了解每一种执行方案对应的成本,相信这个功能能让大家更深入的了解MySQL查询优化器。 +``` + + + +## 查询优化(EXPLAIN 查看执行计划) + +- 使用 EXPLAIN 进行 SQL 语句分析:`EXPLAIN SELECT * FROM sys_user;`,效果如下: + +``` +id|select_type|table |partitions|type|possible_keys|key|key_len|ref|rows|filtered|Extra| +--|-----------|--------|----------|----|-------------|---|-------|---|----|--------|-----| + 1|SIMPLE |sys_user| |ALL | | | | | 2| 100| | +``` + +- 简单描述 + - `id`:在一个大的查询语句中每个 SELECT 关键字都对应一个唯一的id + - `select_type`:SELECT 关键字对应的那个查询的类型 + - `table`:表名 + - `partitions`:匹配的分区信息 + - `type`:针对单表的访问方法 + - `possible_keys`:可能用到的索引 + - `key`:实际上使用的索引 + - `key_len`:实际使用到的索引长度 + - `ref`:当使用索引列等值查询时,与索引列进行等值匹配的对象信息 + - `rows`:预估的需要读取的记录条数 + - `filtered`:某个表经过搜索条件过滤后剩余记录条数的百分比 + - `Extra`:一些额外的信息 +- 有多个结果的场景分析 + - 有子查询的一般都会有多个结果,id 是递增值。但是,有些场景查询优化器可能对子查询进行重写,转换为连接查询。所以有时候 id 就不是自增值。 + - 对于连接查询一般也会有多个接口,id 可能是相同值,相同值情况下,排在前面的记录表示驱动表,后面的表示被驱动表 + - UNION 场景会有 id 为 NULL 的情况,这是一个去重后临时表,合并多个结果集的临时表。但是,UNION ALL 不会有这种情况,因为这个不需要去重。 +- 根据具体的描述: + - **id**,该列表示当前结果序号 - **select_type**,表示 SELECT 语句的类型,有下面几种 - - SIMPLE,表示简单查询,其中不包括连接查询和子查询 - - PRIMARY,表示主查询,或者是最外面的查询语句。比如你使用一个子查询语句,比如这条 SQL:`EXPLAIN SELECT * FROM (SELECT sys_user_id FROM sys_user WHERE sys_user_id = 1) AS temp_table;` - - 这条 SQL 有两个结果,其中有一个结果的类型就是 PRIMARY - - UNION,使用 UNION 的 SQL 是这个类型 - - DERIVED,在 SQL 中 From 后面子查询 - - SUBQUERY,子查询 + - `SIMPLE`:表示简单查询,其中不包括 UNION 查询和子查询 + - `PRIMARY`:对于包含UNION、UNION ALL或者子查询的大查询来说,它是由几个小查询组成的,其中最左边的那个查询的select_type值就是PRIMARY + - `UNION`:对于包含UNION或者UNION ALL的大查询来说,它是由几个小查询组成的,其中除了最左边的那个小查询以外,其余的小查询的select_type值就是UNION + - `UNION RESULT`:MySQL选择使用临时表来完成UNION查询的去重工作,针对该临时表的查询的select_type就是UNION RESULT + - `SUBQUERY`:如果包含子查询的查询语句不能够转为对应的semi-join的形式,并且该子查询是不相关子查询,并且查询优化器决定采用将该子查询物化的方案来执行该子查询时,该子查询的第一个SELECT关键字代表的那个查询的select_type就是SUBQUERY + - `DEPENDENT SUBQUERY`:如果包含子查询的查询语句不能够转为对应的semi-join的形式,并且该子查询是相关子查询,则该子查询的第一个SELECT关键字代表的那个查询的select_type就是DEPENDENT SUBQUERY + - `DEPENDENT UNION`:在包含UNION或者UNION ALL的大查询中,如果各个小查询都依赖于外层查询的话,那除了最左边的那个小查询之外,其余的小查询的select_type的值就是DEPENDENT UNION + - `DERIVED`:对于采用物化的方式执行的包含派生表的查询,该派生表对应的子查询的select_type就是DERIVED + - `MATERIALIZED`:当查询优化器在执行包含子查询的语句时,选择将子查询物化之后与外层查询进行连接查询时,该子查询对应的select_type属性就是MATERIALIZED - 还有其他一些 - **table**,表名或者是子查询的一个结果集 - **type**,表示表的链接类型,分别有(以下的连接类型的顺序是从最佳类型到最差类型)**(这个属性重要)**: - 性能好: - - system,表仅有一行,这是 const 类型的特列,平时不会出现,这个也可以忽略不计。 - - const,数据表最多只有一个匹配行,因为只匹配一行数据,所以很快,常用于 PRIMARY KEY 或者 UNIQUE 索引的查询,可理解为 const 是最优化的。 - - eq_ref,mysql 手册是这样说的:"对于每个来自于前面的表的行组合,从该表中读取一行。这可能是最好的联接类型,除了 const 类型。它用在一个索引的所有部分被联接使用并且索引是 UNIQUE(唯一键) 也不是 PRIMARY KEY(主键)"。eq_ref 可以用于使用 = 比较带索引的列。 - - ref,查询条件索引既不是 UNIQUE(唯一键) 也不是 PRIMARY KEY(主键) 的情况。ref 可用于 = 或 < 或 > 操作符的带索引的列。 - - ref_or_null,该联接类型如同 ref,但是添加了 MySQL 可以专门搜索包含 NULL 值的行。在解决子查询中经常使用该联接类型的优化。 + - `system`:当表中只有一条记录并且该表使用的存储引擎的统计数据是精确的,比如MyISAM、Memory,那么对该表的访问方法就是system,平时不会出现,这个也可以忽略不计。 + - `const`:当我们根据主键或者唯一二级索引列与常数进行等值匹配时,对单表的访问方法就是const,常用于 PRIMARY KEY 或者 UNIQUE 索引的查询,可理解为 const 是最优化的。 + - `eq_ref`:在连接查询时,如果被驱动表是通过主键或者唯一二级索引列等值匹配的方式进行访问的(如果该主键或者唯一二级索引是联合索引的话,所有的索引列都必须进行等值比较),则对该被驱动表的访问方法就是eq_ref + - `ref`:当通过普通的二级索引列与常量进行等值匹配时来查询某个表,那么对该表的访问方法就可能是ref。ref 可用于 = 或 < 或 > 操作符的带索引的列。 + - `ref_or_null`:当对普通二级索引进行等值匹配查询,该索引列的值也可以是NULL值时,那么对该表的访问方法就可能是ref_or_null - 性能较差: - - index_merge,该联接类型表示使用了索引合并优化方法。在这种情况下,key 列包含了使用的索引的清单,key_len 包含了使用的索引的最长的关键元素。 - - unique_subquery,该类型替换了下面形式的IN子查询的ref: `value IN (SELECT primary_key FROM single_table WHERE some_expr)`。unique_subquery 是一个索引查找函数,可以完全替换子查询,效率更高。 - - index_subquery,该联接类型类似于 unique_subquery。可以替换 IN 子查询, 但只适合下列形式的子查询中的非唯一索引: `value IN (SELECT key_column FROM single_table WHERE some_expr)` - - range,只检索给定范围的行, 使用一个索引来选择行。 - - index,该联接类型与 ALL 相同, 除了只有索引树被扫描。这通常比 ALL 快, 因为索引文件通常比数据文件小。 + - `index_merge`:该联接类型表示使用了索引合并优化方法。在这种情况下,key 列包含了使用的索引的清单,key_len 包含了使用的索引的最长的关键元素。 + - `unique_subquery`:类似于两表连接中被驱动表的eq_ref访问方法,unique_subquery是针对在一些包含IN子查询的查询语句中,如果查询优化器决定将IN子查询转换为EXISTS子查询,而且子查询可以使用到主键进行等值匹配的话,那么该子查询执行计划的type列的值就是unique_subquery + - `index_subquery`:index_subquery与unique_subquery类似,只不过访问子查询中的表时使用的是普通的索引 + - `range`:只检索给定范围的行, 使用一个索引来选择行。 + - `index`:该联接类型与 ALL 相同, 除了只有索引树被扫描。这通常比 ALL 快, 因为索引文件通常比数据文件小。 + - 再一次强调,对于使用InnoDB存储引擎的表来说,二级索引的记录只包含索引列和主键列的值,而聚簇索引中包含用户定义的全部列以及一些隐藏列,所以扫描二级索引的代价比直接全表扫描,也就是扫描聚簇索引的代价更低一些 - 性能最差: - - ALL,对于每个来自于先前的表的行组合, 进行完整的表扫描。(性能最差) - - **possible_keys**,指出 MySQL 能使用哪个索引在该表中找到行。如果该列为 NULL,说明没有使用索引,可以对该列创建索引来提供性能。**(这个属性重要)** - - **key**,显示 MySQL 实际决定使用的键 (索引)。如果没有选择索引, 键是 NULL。**(这个属性重要)** - - **key**_len,显示 MySQL 决定使用的键长度。如果键是 NULL, 则长度为 NULL。注意:key_len 是确定了 MySQL 将实际使用的索引长度。 - - **ref**,显示使用哪个列或常数与 key 一起从表中选择行。 - - **rows**,显示 MySQL 认为它执行查询时必须检查的行数。**(这个属性重要)** - - **Extra**,该列包含 MySQL 解决查询的详细信息: - - Distinct:MySQL 发现第 1 个匹配行后, 停止为当前的行组合搜索更多的行。 - - Not exists:MySQL 能够对查询进行 LEFT JOIN 优化, 发现 1 个匹配 LEFT JOIN 标准的行后, 不再为前面的的行组合在该表内检查更多的行。 - - range checked for each record (index map: #):MySQL 没有发现好的可以使用的索引, 但发现如果来自前面的表的列值已知, 可能部分索引可以使用。 - - Using filesort:MySQL 需要额外的一次传递, 以找出如何按排序顺序检索行。 - - Using index: 从只使用索引树中的信息而不需要进一步搜索读取实际的行来检索表中的列信息。 - - Using temporary: 为了解决查询,MySQL 需要创建一个临时表来容纳结果。 - - Using where:WHERE 子句用于限制哪一个行匹配下一个表或发送到客户。 - - Using sort_union(...), Using union(...), Using intersect(...): 这些函数说明如何为 index_merge 联接类型合并索引扫描。 - - Using index for group-by: 类似于访问表的 Using index 方式,Using index for group-by 表示 MySQL 发现了一个索引, 可以用来查 询 GROUP BY 或 DISTINCT 查询的所有列, 而不要额外搜索硬盘访问实际的表。 -- **了解对索引不生效的查询情况 (这个属性重要)** - - 使用 LIKE 关键字的查询,在使用 LIKE 关键字进行查询的查询语句中,如果匹配字符串的第一个字符为“%”,索引不起作用。只有“%”不在第一个位置,索引才会生效。 - - 使用联合索引的查询,MySQL 可以为多个字段创建索引,一个索引可以包括 16 个字段。对于联合索引,只有查询条件中使用了这些字段中第一个字段时,索引才会生效。 - - 使用 OR 关键字的查询,查询语句的查询条件中只有 OR 关键字,且 OR 前后的两个条件中的列都是索引列时,索引才会生效,否则,索引不生效。 -- 子查询优化 - - MySQL 从 4.1 版本开始支持子查询,使用子查询进行 SELECT 语句嵌套查询,可以一次完成很多逻辑上需要多个步骤才能完成的 SQL 操作。 - - 子查询虽然很灵活,但是执行效率并不高。 - - 执行子查询时,MYSQL 需要创建临时表,查询完毕后再删除这些临时表,所以,子查询的速度会受到一定的影响。 - - 优化: - - 可以使用连接查询(JOIN)代替子查询,连接查询时不需要建立临时表,其速度比子查询快。 + - `ALL`:对于每个来自于先前的表的行组合, 进行完整的表扫描。(性能最差) + - `possible_keys`,指出 MySQL 能使用哪个索引在该表中找到行。如果该列为 NULL,说明没有使用索引,可以对该列创建索引来提供性能。**(这个属性重要)** + - possible_keys列中的值并不是越多越好,可能使用的索引越多,查询优化器计算查询成本时就得花费更长时间,所以如果可以的话,尽量删除那些用不到的索引。 + - `key`,显示 MySQL 实际决定使用的键 (索引)。如果没有选择索引, 键是 NULL。**(这个属性重要)** + - 不过有一点比较特别,就是在使用index访问方法来查询某个表时,possible_keys列是空的,而key列展示的是实际使用到的索引 + - `key_len`,表示当优化器决定使用某个索引执行查询时,该索引记录的最大长度。如果键是可以为 NULL, 则长度多 1。 + - `ref`,显示使用哪个列或常数与 key 一起从表中选择行。 + - `rows`,显示 MySQL 认为它执行查询时必须检查的行数。**(这个属性重要)** + - `Extra`,该列包含 MySQL 解决查询的详细信息: + - `Distinct` MySQL 发现第 1 个匹配行后, 停止为当前的行组合搜索更多的行。 + - `Not exists` 当我们使用左(外)连接时,如果WHERE子句中包含要求被驱动表的某个列等于NULL值的搜索条件,而且那个列又是不允许存储NULL值的,那么在该表的执行计划的Extra列就会提示Not exists额外信息 + - `range checked for each record (index map: #)` MySQL 没有发现好的可以使用的索引, 但发现如果来自前面的表的列值已知, 可能部分索引可以使用。 + - `Using filesort` 有一些情况下对结果集中的记录进行排序是可以使用到索引的 + - 需要注意的是,如果查询中需要使用filesort的方式进行排序的记录非常多,那么这个过程是很耗费性能的,我们最好想办法将使用文件排序的执行方式改为使用索引进行排序。 + - `Using temporary` 在许多查询的执行过程中,MySQL可能会借助临时表来完成一些功能,比如去重、排序之类的,比如我们在执行许多包含DISTINCT、GROUP BY、UNION等子句的查询过程中,如果不能有效利用索引来完成查询,MySQL很有可能寻求通过建立内部的临时表来执行查询。如果查询中使用到了内部的临时表,在执行计划的Extra列将会显示Using temporary提示 + - 如果我们并不想为包含GROUP BY子句的查询进行排序,需要我们显式的写上:ORDER BY NULL + - 执行计划中出现Using temporary并不是一个好的征兆,因为建立与维护临时表要付出很大成本的,所以我们最好能使用索引来替代掉使用临时表 + - `Using join buffer (Block Nested Loop)` 在连接查询执行过程过,当被驱动表不能有效的利用索引加快访问速度,MySQL一般会为其分配一块名叫join buffer的内存块来加快查询速度,也就是我们所讲的基于块的嵌套循环算法 + - `Using where` + - 当我们使用全表扫描来执行对某个表的查询,并且该语句的WHERE子句中有针对该表的搜索条件时,在Extra列中会提示上述额外信息 + - 当使用索引访问来执行对某个表的查询,并且该语句的WHERE子句中有除了该索引包含的列之外的其他搜索条件时,在Extra列中也会提示上述额外信息 + - `Using sort_union(...), Using union(...), Using intersect(...)` 如果执行计划的Extra列出现了Using intersect(...)提示,说明准备使用Intersect索引合并的方式执行查询,括号中的...表示需要进行索引合并的索引名称;如果出现了Using union(...)提示,说明准备使用Union索引合并的方式执行查询;出现了Using sort_union(...)提示,说明准备使用Sort-Union索引合并的方式执行查询。 + - `Using index condition` 有些搜索条件中虽然出现了索引列,但却不能使用到索引 + - `Using index` 当我们的查询列表以及搜索条件中只包含属于某个索引的列,也就是在可以使用索引覆盖的情况下,在Extra列将会提示该额外信息 + - `Using index for group-by` 类似于访问表的 Using index 方式,Using index for group-by 表示 MySQL 发现了一个索引, 可以用来查 询 GROUP BY 或 DISTINCT 查询的所有列, 而不要额外搜索硬盘访问实际的表。 + ## 查询不走索引优化 @@ -140,6 +186,14 @@ - 如果mysql估计使用全表扫描要比使用索引快,则不使用索引 +## 子查询优化 + +- MySQL 从 4.1 版本开始支持子查询,使用子查询进行 SELECT 语句嵌套查询,可以一次完成很多逻辑上需要多个步骤才能完成的 SQL 操作。 +- 子查询虽然很灵活,但是执行效率并不高。 +- 执行子查询时,MYSQL 需要创建临时表,查询完毕后再删除这些临时表,所以,子查询的速度会受到一定的影响。 +- 优化: + - 可以使用连接查询(JOIN)代替子查询,连接查询时不需要建立临时表,其速度比子查询快。 + ## 其他查询优化 - 关联查询过程 @@ -199,7 +253,9 @@ - 插入数据之前执行禁止事务的自动提交,数据插入完成后再恢复,可以提供插入速度。 - 禁用:`SET autocommit = 0;` - 开启:`SET autocommit = 1;` - + - 插入数据之前执行禁止对外键的检查,数据插入完成后再恢复 + - 禁用:`SET foreign_key_checks = 0;` + - 开启:`SET foreign_key_checks = 1;` ## 服务器优化 @@ -236,7 +292,7 @@ select * from table lock in share mode; - - - -- <> +- - <> - <> - <> From f2cf8a5a83d65204a0e49266ffbaf830f904e4bb Mon Sep 17 00:00:00 2001 From: judasn Date: Sat, 13 Apr 2019 23:22:02 +0800 Subject: [PATCH 065/124] 2019-04-13 --- markdown-file/Mysql-Optimize.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/markdown-file/Mysql-Optimize.md b/markdown-file/Mysql-Optimize.md index 51b960aa..cd0c58d5 100644 --- a/markdown-file/Mysql-Optimize.md +++ b/markdown-file/Mysql-Optimize.md @@ -6,7 +6,7 @@ ## 优秀材料 - -- <> +- - <> - <> - <> From f106cfb4cc4fc1cc4d955e126586f5f56f01c5cb Mon Sep 17 00:00:00 2001 From: "Judas.n" Date: Tue, 16 Apr 2019 14:06:58 +0800 Subject: [PATCH 066/124] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 477aa85c..30c6e32f 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ ## 团队 DevOps 方案参考 - + From 2c318df253789736bfaa24e4a0713714efd37cc4 Mon Sep 17 00:00:00 2001 From: judasn Date: Sat, 20 Apr 2019 13:54:30 +0800 Subject: [PATCH 067/124] 2019-04-20 --- markdown-file/Jira-Install-And-Settings.md | 118 +++++++++++++------- markdown-file/Mysql-Install-And-Settings.md | 29 ++++- 2 files changed, 107 insertions(+), 40 deletions(-) diff --git a/markdown-file/Jira-Install-And-Settings.md b/markdown-file/Jira-Install-And-Settings.md index c3457adc..b831ae36 100644 --- a/markdown-file/Jira-Install-And-Settings.md +++ b/markdown-file/Jira-Install-And-Settings.md @@ -1,42 +1,82 @@ # Jira 安装和配置 +## Jira 7.13.3 -## Jira 安装 - -- Jira 安装 - - 官网: - - 官网下载: - - 中文在线试用: - - 官网帮助说明: - - 官网中文语言包: - - Jira 6.3.6 网盘下载: - - Jira 6.3.6 中文语言包网盘下载: - - 环境要求: - - JDK 7 或更新版本; - - Mysql - - 我们要使用的版本:**atlassian-jira-6.3.6.tar.gz** - - 我个人习惯 `/opt` 目录下创建一个目录 `setups` 用来存放各种软件安装包;在 `/usr` 目录下创建一个 `program` 用来存放各种解压后的软件包,下面的讲解也都是基于此习惯 - - 我个人已经使用了第三方源:`EPEL、RepoForge`,如果你出现 `yum install XXXXX` 安装不成功的话,很有可能就是你没有相关源,请查看我对源设置的文章 - - 解压:`tar zxvf atlassian-jira-6.3.6.tar.gz` - - 修改目录名:`mv atlassian-jira-6.3.6/ jira6.3.6/` - - 移到我个人的安装目录下:`mv jira6.3.6/ /usr/program/` - - 创建存放数据目录:`mkdir -p /usr/program/jira6.3.6/data/` - - 设置环境变量: - - 编辑:`vim /etc/profile` - - 在文件尾部添加: - ``` ini - JIRA_HOME=/usr/program/jira6.3.6/data/ - export JIRA_HOME - ``` - - 刷新配置:`source /etc/profile` - - 运行:`/usr/program/jira6.3.6/bin/start-jira.sh` - - 访问:`http://192.168.0.111:8080/` - - 汉化:`cp JIRA-6.3.6-language-pack-zh_CN.jar /usr/program/jira6.3.6/atlassian-jira/WEB-INF/lib/` - - 配置过程: - - ![Jira 安装和配置](../images/Jira-Install-And-Settings-a-1.jpg) - - ![Jira 安装和配置](../images/Jira-Install-And-Settings-a-2.jpg) - - ![Jira 安装和配置](../images/Jira-Install-And-Settings-a-3.jpg) - - ![Jira 安装和配置](../images/Jira-Install-And-Settings-a-4.jpg) - - ![Jira 安装和配置](../images/Jira-Install-And-Settings-a-5.jpg) - - ![Jira 安装和配置](../images/Jira-Install-And-Settings-a-6.jpg) - - 重新激活页面地址:`http://192.168.0.111:8090/secure/admin/ViewLicense.jspa` \ No newline at end of file +- 最新 7.13.3 版本时间:2019-04 + +#### 数据库 + +``` +docker run \ + --name mysql-jira \ + --restart always \ + -p 3306:3306 \ + -e MYSQL_ROOT_PASSWORD=adg123456 \ + -e MYSQL_DATABASE=jira_db \ + -e MYSQL_USER=jira_user \ + -e MYSQL_PASSWORD=jira_123456 \ + -d \ + mysql:5.7 +``` + +- 连上容器:`docker exec -it mysql-jira /bin/bash` + - 连上 MySQL:`mysql -u root -p` +- 设置编码:**必须做这一步,不然配置过程会报错,JIRA 的 DB 要求是 utf8mb4** + +``` +SET NAMES 'utf8mb4'; +alter database jira_db character set utf8mb4; +``` + + +#### 安装 + +- 下载: + - 选择:tar.gz 类型下载 +- 解压:`tar zxvf atlassian-jira-software-7.13.3.tar.gz` +- 创建 home 目录:`mkdir /usr/local/atlassian-jira-software-7.13.3-standalone/data` +- 配置 home 变量: + +``` +编辑:vim ~/.zshrc + +在文件尾部添加: + +JIRA_HOME=/usr/local/atlassian-jira-software-7.13.3-standalone/data +export JIRA_HOME + + +刷新配置:`source ~/.zshrc` +``` + +- 设置 MySQL 连接: +- 把 mysql-connector-java-5.1.47.jar 放在目录 `/usr/local/atlassian-jira-software-7.13.3-standalone/atlassian-jira/WEB-INF/lib` + + +#### License 过程 + +- 参考自己的为知笔记 + +#### 运行 + +- 启动:`sh /usr/local/atlassian-jira-software-7.13.3-standalone/bin/start-jira.sh` +- 停止:`sh /usr/local/atlassian-jira-software-7.13.3-standalone/bin/stop-jira.sh` + - `ps -ef | grep java` +- 查看 log:`tail -300f /usr/local/atlassian-jira-software-7.13.3-standalone/logs/catalina.out` +- 访问: + - 注意防火墙配置 +- 如果需要更换端口号可以修改:`/usr/local/atlassian-jira-software-7.13.3-standalone/conf/server.xml` 文件中的内容。 + + +#### 中文化 + +- 从 7.x 版本默认已经有中文支持,不需要再汉化了 +- 在安装后首次进入的时候就可以配置,选择中文了 + + +#### 首次配置 + +- 参考文章: +- 因为步骤一样,所以我就不再截图了。 + + diff --git a/markdown-file/Mysql-Install-And-Settings.md b/markdown-file/Mysql-Install-And-Settings.md index acb54084..5eb73d43 100644 --- a/markdown-file/Mysql-Install-And-Settings.md +++ b/markdown-file/Mysql-Install-And-Settings.md @@ -1,7 +1,34 @@ # MySQL 安装和配置 -## Docker 安装 MySQL +## Docker 安装 MySQL(不带挂载) + +``` +docker run \ + --name mysql-jira \ + --restart always \ + -p 3306:3306 \ + -e MYSQL_ROOT_PASSWORD=adg_123456 \ + -e MYSQL_DATABASE=jira_db \ + -e MYSQL_USER=jira_user \ + -e MYSQL_PASSWORD=jira_123456 \ + -d \ + mysql:5.7 +``` + + +- 连上容器:`docker exec -it mysql-jira /bin/bash` + - 连上 MySQL:`mysql -u root -p` +- 设置编码: + +``` +SET NAMES 'utf8mb4'; +alter database jira_db character set utf8mb4; +``` + + + +## Docker 安装 MySQL(带挂载) - 关掉:SELinux - 创建本地数据存储 + 配置文件目录:`mkdir -p /data/docker/mysql/datadir /data/docker/mysql/conf /data/docker/mysql/log` From 7e0391488b55f74bbdb3df58f5ba3af801449920 Mon Sep 17 00:00:00 2001 From: judasn Date: Sun, 21 Apr 2019 16:04:16 +0800 Subject: [PATCH 068/124] 2019-04-21 --- markdown-file/Bash.md | 1 + 1 file changed, 1 insertion(+) diff --git a/markdown-file/Bash.md b/markdown-file/Bash.md index 58dfcd55..b5eba96e 100644 --- a/markdown-file/Bash.md +++ b/markdown-file/Bash.md @@ -107,6 +107,7 @@ drwxr-xr-x. 5 root root 4096 3月 26 10:57,其中最前面的 d 表示这是 - `find . -name "lin*" -exec ls -l {} \;`,当前目录搜索lin开头的文件,然后用其搜索后的结果集,再执行ls -l的命令(这个命令可变,其他命令也可以),其中 -exec 和 {} \; 都是固定格式 - `find /opt -type f -size +800M -print0 | xargs -0 du -h | sort -nr`,找出 /opt 目录下大于 800 M 的文件 - `find / -name "*tower*" -exec rm {} \;`,找到文件并删除 + - `find / -name "*tower*" -exec mv {} /opt \;`,找到文件并移到 opt 目录 - `find . -name "*" |xargs grep "youmeek"`,递归查找当前文件夹下所有文件内容中包含 youmeek 的文件 - `find . -size 0 | xargs rm -f &`,删除当前目录下文件大小为0的文件 - `du -hm --max-depth=2 | sort -nr | head -12`,找出系统中占用容量最大的前 12 个目录 From c457bd24c3a6e39fd5cf4650c03b224d4ebde2a6 Mon Sep 17 00:00:00 2001 From: zhang Date: Fri, 26 Apr 2019 12:57:34 +0800 Subject: [PATCH 069/124] 2019-04-26 --- markdown-file/monitor.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index 38ae729c..66a065a4 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -66,6 +66,8 @@ procs -----------memory---------- ---swap-- -----io---- -system-- ------cpu----- - `bi` 表示从块设备读取数据的量(读磁盘) - `bo` 表示从块设备写入数据的量(写磁盘) - **如果bi和bo两个数字比较高,则说明,磁盘IO压力大。** + - `in` 每秒 CPU 的中断次数,包括时间中断 + - `cs` 每秒上下文切换次数,例如我们调用系统函数,就要进行上下文切换,线程的切换,也要进程上下文切换,这个值要越小越好,太大了,要考虑调低线程或者进程的数目 - `wa` 表示I/O等待所占用CPU的时间比 #### 命令:sar(综合) @@ -266,6 +268,7 @@ atctive 和 passive 的数目通常可以用来衡量服务器的负载:接受 - 在 `top` 命令状态下按 shfit + m 可以按照 **内存使用** 大小排序 - 在 `top` 命令状态下按 shfit + p 可以按照 **CPU 使用** 大小排序 - 展示数据上,%CPU 表示进程占用的 CPU 百分比,%MEM 表示进程占用的内存百分比 +- mac 下不一样:要先输入 o,然后输入 cpu 则按 cpu 使用量排序,输入 rsize 则按内存使用量排序。 #### CPU 其他工具 From f5aa85e220dbb2c61cc9c56ac905137c8e625172 Mon Sep 17 00:00:00 2001 From: zhang Date: Tue, 30 Apr 2019 15:55:28 +0800 Subject: [PATCH 070/124] 2019-04-30 --- markdown-file/monitor.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index 66a065a4..50574c9f 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -576,7 +576,7 @@ TOTAL:(总的流量) 12.9GB 229Mb 190Mb 193Mb #### lsof - 安装 lsof:`yum install -y lsof` -- 查看 3316 端口是否有被使用:`lsof -i:3316`,**有被使用会输出类似如下信息,如果没被使用会没有任何信息返回** +- 查看 3316 端口是否有被使用(macOS 也适用):`lsof -i:3316`,**有被使用会输出类似如下信息,如果没被使用会没有任何信息返回** ``` COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME @@ -596,7 +596,7 @@ docker-pr 13551 root 4u IPv6 2116824 0t0 TCP *:aicc-cmi (LISTEN) #### netstat - 更多用法可以看:[netstat 的10个基本用法](https://linux.cn/article-2434-1.html) -- 查看所有在用的端口:`netstat -ntlp` +- 查看所有在用的端口(macOS 也适用):`netstat -ntlp` ``` Active Internet connections (only servers) From 223b9c610e91c7f48a360018b1fd5428e576df2b Mon Sep 17 00:00:00 2001 From: zhang Date: Tue, 7 May 2019 11:30:01 +0800 Subject: [PATCH 071/124] 2019-05-07 --- markdown-file/Docker-Install-And-Usage.md | 3 ++- markdown-file/Elasticsearch-Base.md | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index 20db7079..3fe4361b 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -774,7 +774,8 @@ logger.warn("-------------maxMemory=" + ((double) maxMemory / (1024 * 1024))); - Docker Compose 主要用于定义和运行多个 Docker 容器的工具,这样可以快速运行一套分布式系统 - 容器之间是有依赖关系,比如我一个 Java web 系统依赖 DB 容器、Redis 容器,必须这些依赖容器先运行起来。 - 一个文件:docker-compose.yml -- 一个命令:docker-compose up +- 一个命令:`docker-compose up` + - 指定文件:`docker-compose -f zookeeper.yml -p zk_test up -d` - 官网安装说明: - 安装方法: diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index a7e98bba..599a742e 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -2,6 +2,9 @@ ## Docker 部署 +- `vim ~/elasticsearch-5.6.8-docker.yml` +- 启动:`docker-compose -f ~/elasticsearch-5.6.8-docker.yml -p elasticsearch_5.6.8 up -d` + ``` version: "3" From 79e82dba3168792650906a9e97daa1a1a4a36d6a Mon Sep 17 00:00:00 2001 From: zhang Date: Mon, 13 May 2019 12:27:11 +0800 Subject: [PATCH 072/124] 2019-05-13 --- centos-settings/CentOS-Extra-Packages.md | 16 ++++++++++++++++ markdown-file/Elasticsearch-Base.md | 17 ++++++++++++++++- 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/centos-settings/CentOS-Extra-Packages.md b/centos-settings/CentOS-Extra-Packages.md index def938fe..c00901e3 100644 --- a/centos-settings/CentOS-Extra-Packages.md +++ b/centos-settings/CentOS-Extra-Packages.md @@ -83,6 +83,22 @@ - `sudo yum install -y htop`(htop 官方源是没有的,所以如果能下载下来就表示已经使用了第三方源) +### 禁用源 + +- 编辑:`vim /etc/yum.repos.d/elasticsearch.repo` +- 把 enabled=1 改为 enabled=0 + +``` +[elasticsearch-6.x] +name=Elasticsearch repository for 6.x packages +baseurl=https://artifacts.elastic.co/packages/6.x/yum +gpgcheck=1 +gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch +enabled=1 +autorefresh=1 +type=rpm-md +``` + > 资料: - diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index 599a742e..1c02a750 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -61,10 +61,12 @@ elasticsearch hard memlock unlimited #### 开始安装 +- 检查:`rpm -qa | grep elastic` +- 卸载:`rpm -e --nodeps elasticsearch` - 官网 RPM 安装流程(重要,以下资料都是对官网的总结): - 导入 KEY:`rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch` - 新建文件:`vim /etc/yum.repos.d/elasticsearch.repo` -- 内容如下: +- 内容如下(6.x): ``` [elasticsearch-6.x] @@ -77,6 +79,19 @@ autorefresh=1 type=rpm-md ``` +- 内容如下(5.x): + +``` +[elasticsearch-5.x] +name=Elasticsearch repository for 5.x packages +baseurl=https://artifacts.elastic.co/packages/5.x/yum +gpgcheck=1 +gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch +enabled=1 +autorefresh=1 +type=rpm-md +``` + - 开始安装:`yum install -y elasticsearch`,预计文件有 108M 左右,国内网络安装可能会很慢,慢慢等 - 安装完后会多了一个:elasticsearch 用户和组 - 设置 java 软链接:`ln -s /usr/local/jdk1.8.0_181/jre/bin/java /usr/local/sbin/java` From d206847075699dc8e8f05d86995a236211d701b5 Mon Sep 17 00:00:00 2001 From: zhang Date: Mon, 13 May 2019 14:32:14 +0800 Subject: [PATCH 073/124] 2019-05-13 --- markdown-file/Elasticsearch-Base.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index 1c02a750..e5f8c287 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -124,6 +124,9 @@ type=rpm-md - 默认只能 localhost 访问,修改成支持外网访问 ``` +打开这个注释:#cluster.name: my-application +集群名称最好是自己给定,不然有些 client 端会连不上,或者要求填写 + 打开这个注释:#network.host: 192.168.0.1 改为:network.host: 0.0.0.0 ``` From 6b4f01aa480513dcff499666c63268b5374b66aa Mon Sep 17 00:00:00 2001 From: zhang Date: Mon, 13 May 2019 16:22:28 +0800 Subject: [PATCH 074/124] 2019-05-13 --- markdown-file/Elasticsearch-Base.md | 33 ++++++++++++++++++----------- 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index e5f8c287..1b1cfa3d 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -1,30 +1,39 @@ # Elasticsearch 知识 -## Docker 部署 +## Docker 单节点部署 +- 注意:docker 版本下 client.transport.sniff = true 是无效的。 - `vim ~/elasticsearch-5.6.8-docker.yml` - 启动:`docker-compose -f ~/elasticsearch-5.6.8-docker.yml -p elasticsearch_5.6.8 up -d` ``` -version: "3" - +version: '3' services: - elasticsearch: - image: elasticsearch:5.6.8 - restart: always - container_name: elasticsearch - hostname: elasticsearch + elasticsearch1: + image: docker.elastic.co/elasticsearch/elasticsearch:5.6.8 + container_name: elasticsearch1 environment: - - 'http.host=0.0.0.0' - - 'transport.host=127.0.0.1' - "ES_JAVA_OPTS=-Xms512m -Xmx512m" + - "cluster.name=elasticsearch" + - "network.host=0.0.0.0" + - "http.host=0.0.0.0" + - "xpack.security.enabled=false" + ulimits: + memlock: + soft: -1 + hard: -1 + nofile: + soft: 65536 + hard: 65536 ports: - - "9200:9200" - - "9300:9300" + - 9200:9200 + - 9300:9300 volumes: - /data/docker/elasticsearch/data:/usr/share/elasticsearch/data + ``` + ------------------------------------------------------------------- From 4e7a01882aa04e6848ffe7d87c35e8df472d14de Mon Sep 17 00:00:00 2001 From: zhang Date: Wed, 29 May 2019 17:21:27 +0800 Subject: [PATCH 075/124] 2019-05-29 --- README.md | 1 + SUMMARY.md | 1 + TOC.md | 1 + .../Confluence-Install-And-Settings.md | 134 ++++++++++++++++++ 4 files changed, 137 insertions(+) create mode 100644 markdown-file/Confluence-Install-And-Settings.md diff --git a/README.md b/README.md index 30c6e32f..b79e908f 100644 --- a/README.md +++ b/README.md @@ -72,6 +72,7 @@ - [MongoDB 安装和配置](markdown-file/MongoDB-Install-And-Settings.md) - [Solr 安装和配置](markdown-file/Solr-Install-And-Settings.md) - [Jira 安装和配置](markdown-file/Jira-Install-And-Settings.md) +- [Confluence 安装和配置](markdown-file/Confluence-Install-And-Settings.md) - [TeamCity 安装和配置](markdown-file/TeamCity-Install-And-Settings.md) - [Nginx 安装和配置](markdown-file/Nginx-Install-And-Settings.md) - [wrk 安装和配置](markdown-file/wrk-Install-And-Settings.md) diff --git a/SUMMARY.md b/SUMMARY.md index 91dfcd0a..91ff9f92 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -51,6 +51,7 @@ * [MongoDB 安装和配置](markdown-file/MongoDB-Install-And-Settings.md) * [Solr 安装和配置](markdown-file/Solr-Install-And-Settings.md) * [Jira 安装和配置](markdown-file/Jira-Install-And-Settings.md) +* [Confluence 安装和配置](markdown-file/Confluence-Install-And-Settings.md) * [Jenkins 安装和配置](markdown-file/Jenkins-Install-And-Settings.md) * [TeamCity 安装和配置](markdown-file/TeamCity-Install-And-Settings.md) * [Nginx 安装和配置](markdown-file/Nginx-Install-And-Settings.md) diff --git a/TOC.md b/TOC.md index 1b0ff377..44bf8f76 100644 --- a/TOC.md +++ b/TOC.md @@ -49,6 +49,7 @@ - [MongoDB 安装和配置](markdown-file/MongoDB-Install-And-Settings.md) - [Solr 安装和配置](markdown-file/Solr-Install-And-Settings.md) - [Jira 安装和配置](markdown-file/Jira-Install-And-Settings.md) +- [Confluence 安装和配置](markdown-file/Confluence-Install-And-Settings.md) - [Jenkins 安装和配置](markdown-file/Jenkins-Install-And-Settings.md) - [TeamCity 安装和配置](markdown-file/TeamCity-Install-And-Settings.md) - [Nginx 安装和配置](markdown-file/Nginx-Install-And-Settings.md) diff --git a/markdown-file/Confluence-Install-And-Settings.md b/markdown-file/Confluence-Install-And-Settings.md new file mode 100644 index 00000000..5a1a6fe1 --- /dev/null +++ b/markdown-file/Confluence-Install-And-Settings.md @@ -0,0 +1,134 @@ +# Confluence 安装和配置 + +## Confluence 6.15.4 + +- 最新 6.15.4 版本时间:2019-05 + +#### 数据库 + +``` +docker run \ + --name mysql-confluence \ + --restart always \ + -p 3316:3306 \ + -e MYSQL_ROOT_PASSWORD=adg123456 \ + -e MYSQL_DATABASE=confluence_db \ + -e MYSQL_USER=confluence_user \ + -e MYSQL_PASSWORD=confluence_123456 \ + -d \ + mysql:5.7 +``` + +- 连上容器:`docker exec -it mysql-confluence /bin/bash` + - 连上 MySQL:`mysql -u root -p` +- 设置编码: + - **必须做这一步,不然配置过程会报错,confluence 的 DB 要求是 utf8,还不能是 utf8mb4** + - **并且排序规则还必须是:utf8_bin** + - **数据库必须使用'READ-COMMITTED'作为默认隔离级别** + +``` +SET NAMES 'utf8'; +alter database confluence_db character set utf8 collate utf8_bin; +SET GLOBAL tx_isolation='READ-COMMITTED'; +``` + +#### 安装 + +- 下载: + - 选择:linux64 类型下载 +- 授权:`chmod +x atlassian-confluence-6.15.4-x64.bin` + + +``` +./atlassian-confluence-6.15.4-x64.bin + +开始提示: + +Unpacking JRE ... +Starting Installer ... + +This will install Confluence 6.9.0 on your computer. +OK [o, Enter], Cancel [c] + +>> 输入o或直接回车 + +Click Next to continue, or Cancel to exit Setup. + +Choose the appropriate installation or upgrade option. +Please choose one of the following: +Express Install (uses default settings) [1], +Custom Install (recommended for advanced users) [2, Enter], +Upgrade an existing Confluence installation [3] +1 +>> 这里输入数字1 + +See where Confluence will be installed and the settings that will be used. +Installation Directory: /opt/atlassian/confluence +Home Directory: /var/atlassian/application-data/confluence +HTTP Port: 8090 +RMI Port: 8000 +Install as service: Yes +Install [i, Enter], Exit [e] +i + +>> 输入i或者直接回车 + +Extracting files ... + +Please wait a few moments while we configure Confluence. + +Installation of Confluence 6.9.0 is complete +Start Confluence now? +Yes [y, Enter], No [n] + +>> 输入y或者直接回车 + +Please wait a few moments while Confluence starts up. +Launching Confluence ... + +Installation of Confluence 6.9.0 is complete +Your installation of Confluence 6.9.0 is now ready and can be accessed via +your browser. +Confluence 6.9.0 can be accessed at http://localhost:8090 +Finishing installation ... + +# 安装完成,访问本机的8090端口进行web端安装 +# 开放防火墙端口 +firewall-cmd --add-port=8090/tcp --permanent +firewall-cmd --add-port=8000/tcp --permanent +firewall-cmd --reload +``` + +- 默认是安装在 /opt 目录下:`/opt/atlassian/confluence/confluence/WEB-INF/lib` +- 启动:`sh /opt/atlassian/confluence/bin/start-confluence.sh` +- 停止:`sh /opt/atlassian/confluence/bin/stop-confluence.sh` +- 查看 log:`tail -300f /opt/atlassian/confluence/logs/catalina.out` +- 卸载:`sh /opt/atlassian/confluence/uninstall` +- 设置 MySQL 连接驱动,把 mysql-connector-java-5.1.47.jar 放在目录 `/opt/atlassian/confluence/confluence/WEB-INF/lib` + +#### 首次配置 + +- 访问: +- 参考文章: +- 参考文章: +- 因为步骤一样,所以我就不再截图了。 + +#### License 过程 + +- 参考自己的为知笔记 + + +## 反向代理的配置可以参考 + +- + + +## 使用 markdown + +- 点击右上角小齿轮 > 管理应用 > 搜索市场应用 > 输入 markdown > 安装 + + +## 其他资料 + +- +- From 8d6879d3d9fdc30bb636160014836fff6115fb6a Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 30 May 2019 23:30:21 +0800 Subject: [PATCH 076/124] 2019-05-30 --- markdown-file/Elasticsearch-Base.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index 1b1cfa3d..05cc86de 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -2,6 +2,10 @@ ## Docker 单节点部署 +- 官网: + - 7.x:7.1.0 + - 6.x:6.8.0 + - 5.x:5.6.8 - 注意:docker 版本下 client.transport.sniff = true 是无效的。 - `vim ~/elasticsearch-5.6.8-docker.yml` - 启动:`docker-compose -f ~/elasticsearch-5.6.8-docker.yml -p elasticsearch_5.6.8 up -d` From 4000f6646c77cffb57e37dc2787de950aaf19b85 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 31 May 2019 00:20:26 +0800 Subject: [PATCH 077/124] 2019-05-31 --- .../SkyWalking-Install-And-Settings.md | 192 ++++++++++++++++++ 1 file changed, 192 insertions(+) create mode 100644 markdown-file/SkyWalking-Install-And-Settings.md diff --git a/markdown-file/SkyWalking-Install-And-Settings.md b/markdown-file/SkyWalking-Install-And-Settings.md new file mode 100644 index 00000000..f47d66b9 --- /dev/null +++ b/markdown-file/SkyWalking-Install-And-Settings.md @@ -0,0 +1,192 @@ +# SkyWalking 安装和配置 + + +## OpenAPM 相关 + +- 目前市场工具一览: +- 目前最活跃的标准:[OpenTracing](https://opentracing.io/) +- 现在比较活跃的应该是: + - [Jaeger](https://www.jaegertracing.io/) + - [SkyWalking](https://skywalking.apache.org/) + + +## 官网资料 + +- 当前时间:2019-05,最新版本:6.1 +- 官网: +- 官网 Github: +- 官网文档: +- 官网下载: + - 该网页显示:官网目前推荐的是通过源码构建出包,docker 镜像推荐 + - 源码构建方法: +- 这里简单抽取下核心内容: +- 至少需要 jdk8 + maven3 +- 需要 Elasticsearch + - Elasticsearch 和 SkyWalking 的所在服务器的时间必须一致 + - 看了下源码依赖的 Elasticsearch 依赖包,目前支持 5.x 和 6.x + +## 基于 IntelliJ IDEA 直接运行、Debug + +- 这里选择 IntelliJ IDEA 运行服务,方便我们 debug 了解 SkyWalking: + +``` +cd skywalking/ + +git submodule init + +git submodule update + +mvn clean package -DskipTests + +因为需要设置 gRPC 的自动生成的代码目录,为源码目录,所以: +手工将下面提到的目录下的 grpc-java 和 java 目录设置为 IntelliJ IDEA 的源码目录(Sources Root) +/skywalking/apm-protocol/apm-network/target/generated-sources/protobuf +/skywalking/oap-server/server-core/target/generated-sources/protobuf +/skywalking/oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf +/skywalking/oap-server/exporter/target/generated-sources/protobuf + + +手工将下面提到的目录下的 antlr4 目录设置为 IntelliJ IDEA 的源码目录(Sources Root) +/skywalking/oap-server/generate-tool-grammar/target/generated-sources + +手工将下面提到的目录下的 oal 目录设置为 IntelliJ IDEA 的源码目录(Sources Root) +/skywalking/oap-server/generated-analysis/target/generated-sources + +``` + +#### 启动 Server 项目 + +- 现在可以通过 IntelliJ IDEA 启动服务: +- 编辑 server 配置:`/skywalking/oap-server/server-starter/src/main/resources/application.yml` + - 里面有关 Elasticsearch 连接信息的配置,你可以根据自己的情况进行配置。 +- 启动类:`/skywalking/oap-server/server-starter/src/main/java/org/apache/skywalking/oap/server/starter/OAPServerStartUp.java` + - 第一次启动会创建 540 个左右的 Elasticsearch 索引库,会花点时间。 + + +#### 启动 UI 项目 + + +- 现在启动 UI 项目,找到:`/skywalking/apm-webapp/src/main/java/org/apache/skywalking/apm/webapp/ApplicationStartUp.java` +- 访问 UI 地址: + - 用户名:admin + - 密码:admin + + +## Java Agent(探针) + + +#### IntelliJ IDEA 项目调试 + +- 前面构建服务的时候记得构建出 jar 包出来,这里要用到 +- 自己的 Spring Boot 项目 +- 引包: + +``` + + + + org.apache.skywalking + apm-toolkit-trace + 6.1.0 + + +``` + +- 常用注解: + + +``` +@Trace +@ApiOperation(tags = {"用户系统管理->用户管理->用户列表"}, value = "查询所有用户列表", notes = "查询所有用户列表") +@RequestMapping(value = "/list", method = RequestMethod.GET) +@ResponseBody +public List list() { + List sysUserList = sysUserService.findAll(); + ActiveSpan.tag("一共有数据:", sysUserList.size() + "条"); + log.info("当前 traceId={}", TraceContext.traceId()); + return sysUserList; +} + +``` + +- 更多注解的使用: + +- 你的 demo 项目在 IntelliJ IDEA 启动的时候加上 VM 参数上设置: + +``` +-javaagent:/你自己的路径/skywalking-agent.jar -Dskywalking.agent.application_code=my_app_001 -Dskywalking.collector.backend_service=localhost:11800 +``` + +- 默认 11800 是 gRPC 的接收接口 +- 你自己构建出来的 jar 路径一般是:`/skywalking/apm-sniffer/apm-agent/target/skywalking-agent.jar` +- 然后请求你带有 Trace 的 Controller,然后去 UI 界面看统计情况 + +#### jar 包方式 + +- 你的 Spring Boot jar 包 run 之前加上 VM 参数: + +``` +java -javaagent:/你自己的路径/skywalking-agent.jar -Dskywalking.collector.backend_service=localhost:11800 -Dskywalking.agent.application_code=my_app_002 -jar my-project-1.0-SNAPSHOT.jar +``` + + +#### Docker 方式 + +- Dockerfile + +``` +FROM openjdk:8-jre-alpine + +LABEL maintainer="tanjian20150101@gmail.com" + +ENV SW_APPLICATION_CODE=java-agent-demo \ + SW_COLLECTOR_SERVERS=localhost:11800 + +COPY skywalking-agent /apache-skywalking-apm-incubating/agent + +COPY target/sky-demo-1.0-SNAPSHOT.jar /demo.jar + +ENTRYPOINT java -javaagent:/apache-skywalking-apm-incubating/agent/skywalking-agent.jar -Dskywalking.collector.backend_service=${SW_COLLECTOR_SERVERS} \ +-Dskywalking.agent.application_code=${SW_APPLICATION_CODE} -jar /demo.jar +``` + +- 构建镜像: + +``` +docker build -t hello-demo . +docker run -p 10101:10101 -e SW_APPLICATION_CODE=hello-world-demo-005 -e SW_COLLECTOR_SERVERS=127.10.0.2:11800 hello-demo +``` + + + +## 构建 jar 部署在服务器 + +- 如果想直接打包出 jar 部署与服务器,只需要这样: + +``` +cd skywalking/ + +git submodule init + +git submodule update + +mvn clean package -DskipTests +``` + + +## 资料 + +- +- +- +- <> +- <> +- <> +- <> +- <> + + + + + + From 6447cef047197c473c3c49a575800c933658ad87 Mon Sep 17 00:00:00 2001 From: zhang Date: Fri, 31 May 2019 10:32:25 +0800 Subject: [PATCH 078/124] 2019-05-31 --- README.md | 3 ++- SUMMARY.md | 4 +++- TOC.md | 4 +++- markdown-file/SkyWalking-Install-And-Settings.md | 13 ++++++++++++- 4 files changed, 20 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index b79e908f..e646a463 100644 --- a/README.md +++ b/README.md @@ -108,7 +108,8 @@ - [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) - [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) - [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) -- [Wormhole + Flink 最佳实践](markdown-file/Wormhole-Install-And-Settings.md) +- [Wormhole + Flink 安装和配置](markdown-file/Wormhole-Install-And-Settings.md) +- [SkyWalking 安装和配置](markdown-file/SkyWalking-Install-And-Settings.md) ## 联系(Contact) diff --git a/SUMMARY.md b/SUMMARY.md index 91ff9f92..c62a5b09 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -87,4 +87,6 @@ * [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) * [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) * [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) -* [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) \ No newline at end of file +* [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) +* [Wormhole + Flink 安装和配置](markdown-file/Wormhole-Install-And-Settings.md) +* [SkyWalking 安装和配置](markdown-file/SkyWalking-Install-And-Settings.md) \ No newline at end of file diff --git a/TOC.md b/TOC.md index 44bf8f76..96ab0f35 100644 --- a/TOC.md +++ b/TOC.md @@ -84,4 +84,6 @@ - [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) - [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) - [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) -- [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) \ No newline at end of file +- [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) +- [Wormhole + Flink 安装和配置](markdown-file/Wormhole-Install-And-Settings.md) +- [SkyWalking 安装和配置](markdown-file/SkyWalking-Install-And-Settings.md) \ No newline at end of file diff --git a/markdown-file/SkyWalking-Install-And-Settings.md b/markdown-file/SkyWalking-Install-And-Settings.md index f47d66b9..ad6cd074 100644 --- a/markdown-file/SkyWalking-Install-And-Settings.md +++ b/markdown-file/SkyWalking-Install-And-Settings.md @@ -25,6 +25,13 @@ - Elasticsearch 和 SkyWalking 的所在服务器的时间必须一致 - 看了下源码依赖的 Elasticsearch 依赖包,目前支持 5.x 和 6.x + +## 支持收集的组件列表 + +- 国内常用的组件目前看来都支持了 +- + + ## 基于 IntelliJ IDEA 直接运行、Debug - 这里选择 IntelliJ IDEA 运行服务,方便我们 debug 了解 SkyWalking: @@ -173,13 +180,17 @@ git submodule update mvn clean package -DskipTests ``` +## 告警配置 + +- + ## 资料 - - - -- <> +- - <> - <> - <> From 5cb4b2a51df6d0ab050fdca1f647215029d49d26 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 31 May 2019 22:36:35 +0800 Subject: [PATCH 079/124] 2019-05-31 --- markdown-file/Zsh.md | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/markdown-file/Zsh.md b/markdown-file/Zsh.md index 2186bc74..e0e294a3 100644 --- a/markdown-file/Zsh.md +++ b/markdown-file/Zsh.md @@ -64,6 +64,18 @@ - 编辑配置文件:`vim /root/.zshrc`,找到下图的地方,怎么安装,原作者注释写得很清楚了,别装太多了,默认 git 是安装的。 - ![oh-my-zsh 安装](../images/Zsh-c-1.jpg) - 插件推荐: + - `zsh-autosuggestions` + - 这个插件会对历史命令一些补全,类似 fish 终端 + - 插件官网: + - 安装,复制该命令:`git clone https://github.com/zsh-users/zsh-autosuggestions ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-autosuggestions` + - 编辑:`vim ~/.zshrc`,找到这一行,后括号里面的后面添加:`plugins=( 前面的一些插件名称,换行,加上:zsh-autosuggestions)` + - 刷新下配置:`source ~/.zshrc` + - `zsh-syntax-highlighting` + - 这个插件会对终端命令高亮显示,比如正确的拼写会是绿色标识,否则是红色,另外对于一些shell输出语句也会有高亮显示,算是不错的辅助插件 + - 插件官网: + - 安装,复制该命令:`git clone https://github.com/zsh-users/zsh-syntax-highlighting.git ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-syntax-highlighting` + - 编辑:`vim ~/.zshrc`,找到这一行,后括号里面的后面添加:`plugins=( 前面的一些插件名称,换行,加上:zsh-syntax-highlighting)` + - 刷新下配置:`source ~/.zshrc` - `wd` - 简单地讲就是给指定目录映射一个全局的名字,以后方便直接跳转到这个目录,比如: - 编辑配置文件,添加上 wd 的名字:`vim /root/.zshrc` @@ -80,12 +92,6 @@ - 进入解压后目录并安装:`cd autojump_v21.1.2/ ; ./install.sh` - 再执行下这个:`source /etc/profile.d/autojump.sh` - 编辑配置文件,添加上 autojump 的名字:`vim /root/.zshrc` - - `zsh-syntax-highlighting` - - 这个插件会对终端命令高亮显示,比如正确的拼写会是绿色标识,否则是红色,另外对于一些shell输出语句也会有高亮显示,算是不错的辅助插件 - - 插件官网: - - 安装,复制该命令:'git clone https://github.com/zsh-users/zsh-syntax-highlighting.git ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-syntax-highlighting' - - 编辑:`vim ~/.zshrc`,找到这一行,后括号里面的后面添加:`plugins=( 前面的一些插件名称 zsh-syntax-highlighting)` - - 刷新下配置:`source ~/.zshrc` ### 主题 From 5f7743f651160d2f0c41e0ca5824ba9ae04a6180 Mon Sep 17 00:00:00 2001 From: zhang Date: Wed, 12 Jun 2019 16:09:30 +0800 Subject: [PATCH 080/124] 2019-06-12 --- README.md | 1 + SUMMARY.md | 1 + TOC.md | 1 + .../PostgreSQL-Install-And-Settings.md | 32 +++++++++++++++++++ 4 files changed, 35 insertions(+) create mode 100644 markdown-file/PostgreSQL-Install-And-Settings.md diff --git a/README.md b/README.md index e646a463..9301dbb6 100644 --- a/README.md +++ b/README.md @@ -63,6 +63,7 @@ - [Jenkins 安装和配置](markdown-file/Jenkins-Install-And-Settings.md) - [Maven 安装和配置](markdown-file/Maven-Install-And-Settings.md) - [Nexus 安装和配置](markdown-file/Nexus-Install-And-Settings.md) +- [PostgreSQL 安装和配置](markdown-file/PostgreSQL-Install-And-Settings.md) - [MySQL 安装和配置](markdown-file/Mysql-Install-And-Settings.md) - [MySQL 优化](markdown-file/Mysql-Optimize.md) - [MySQL 测试](markdown-file/Mysql-Test.md) diff --git a/SUMMARY.md b/SUMMARY.md index c62a5b09..ef4a9f48 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -42,6 +42,7 @@ * [Jenkins 安装和配置](markdown-file/Jenkins-Install-And-Settings.md) * [Maven 安装和配置](markdown-file/Maven-Install-And-Settings.md) * [Nexus 安装和配置](markdown-file/Nexus-Install-And-Settings.md) +* [PostgreSQL 安装和配置](markdown-file/PostgreSQL-Install-And-Settings.md) * [MySQL 安装和配置](markdown-file/Mysql-Install-And-Settings.md) * [MySQL 优化](markdown-file/Mysql-Optimize.md) * [MySQL 测试](markdown-file/Mysql-Test.md) diff --git a/TOC.md b/TOC.md index 96ab0f35..0b6b3709 100644 --- a/TOC.md +++ b/TOC.md @@ -40,6 +40,7 @@ - [Jenkins 安装和配置](markdown-file/Jenkins-Install-And-Settings.md) - [Maven 安装和配置](markdown-file/Maven-Install-And-Settings.md) - [Nexus 安装和配置](markdown-file/Nexus-Install-And-Settings.md) +- [PostgreSQL 安装和配置](markdown-file/PostgreSQL-Install-And-Settings.md) - [MySQL 安装和配置](markdown-file/Mysql-Install-And-Settings.md) - [MySQL 优化](markdown-file/Mysql-Optimize.md) - [MySQL 测试](markdown-file/Mysql-Test.md) diff --git a/markdown-file/PostgreSQL-Install-And-Settings.md b/markdown-file/PostgreSQL-Install-And-Settings.md new file mode 100644 index 00000000..b2f3182e --- /dev/null +++ b/markdown-file/PostgreSQL-Install-And-Settings.md @@ -0,0 +1,32 @@ +# PostgreSQL 安装和配置 + + +## 官网 + +- 官网: + - 201906 最新版本 + - 12 beat + - 11 release +- 官网 Docker hub: + + +## Docker 安装 PostgreSQL(带挂载) + +``` +docker run \ + -d \ + --name pgsql \ + -p 5432:5432 \ + -e POSTGRES_USER=adg_user \ + -e POSTGRES_PASSWORD=adg123456 \ + -v ~/docker_data/pgsql/data:/var/lib/postgresql/data \ + postgres:11 +``` + +- 连上容器:`docker exec -it pgsql /bin/bash` + - 连上 PostgreSQL:`psql -h 127.0.0.1 -p 5432 -U adg_user` + + +## 资料 + +- From cc754b7d8f1ba1fdeb3487d42d545a623528e063 Mon Sep 17 00:00:00 2001 From: zhang Date: Thu, 13 Jun 2019 17:33:36 +0800 Subject: [PATCH 081/124] 2019-06-13 --- markdown-file/Docker-Install-And-Usage.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index 3fe4361b..734c11de 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -309,6 +309,11 @@ CONTAINER ID NAME CPU % MEM USAGE / LI - `docker run -it 镜像ID --link redis-name:myredis /bin/bash` - `redis-name` 是容器名称 - `myredis` 是容器别名,其他容器连接它可以用这个别名来写入到自己的配置文件中 +- 容器与宿主机之间文件的拷贝 + - `docker cp /www/runoob 96f7f14e99ab:/www/` 将主机 /www/runoob 目录拷贝到容器 96f7f14e99ab 的 /www 目录下 + - `docker cp /www/runoob 96f7f14e99ab:/www` 将主机 /www/runoob 目录拷贝到容器 96f7f14e99ab 中,目录重命名为 www。 + - `docker cp 96f7f14e99ab:/www /tmp/` 将容器96f7f14e99ab的/www目录拷贝到主机的/tmp目录中。 + #### docker 网络模式 From cd315c62729c3859b7cea78b07962dd12758ccf8 Mon Sep 17 00:00:00 2001 From: judasn Date: Sun, 23 Jun 2019 10:45:48 +0800 Subject: [PATCH 082/124] 2019-06-23 --- markdown-file/Jenkins-Install-And-Settings.md | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index 82cc053c..a4101dea 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -158,6 +158,37 @@ This may also be found at: /root/.jenkins/secrets/initialAdminPassword - 如果配置插件过程遇到这个错误:`No valid crumb was included in the request`,则多重试几次。 - 登录后把: 下面的 `防止跨站点请求伪造` 勾选去掉。遇到问题多试几次。 + +## 忘记 admin 密码进行重置 + +- 备份配置文件:`cp /root/.jenkins/config.xml /root/.jenkins/config.xml.back` +- 编辑:`vim /root/.jenkins/config.xml`,删除 config.xml 文件中的这部分内容,在 10 行左右位置 + +``` +true + + true + + + true + false + +``` + +- 重启服务,进入首页此时系统是免密状态 +- 选择左侧的 `系统管理`,系统会提示你需要配置安全设置:`全局安全配置` + - 勾选 `启用安全` + - 安全域 > 勾选 `Jenkins专有用户数据库` + - 点击保存 +- 重新点击首页:`系统管理` + - 点击 `管理用户` + - 在用户列表中点击 admin 右侧齿轮 + - 修改密码,修改后即可重新登录。 +- 选择左侧的 `系统管理`,系统会提示你需要配置安全设置:`全局安全配置` + - 勾选 `启用安全` + - 授权策略 > 勾选 `登录用户可以做任何事` 或 `安全矩阵` + - 点击保存 + ------------------------------------------------------------------- ## pipeline 语法 @@ -927,3 +958,4 @@ pipeline { - - - +- \ No newline at end of file From b6973a021761adeef7baa287e295073901dcc639 Mon Sep 17 00:00:00 2001 From: judasn Date: Sun, 23 Jun 2019 14:43:43 +0800 Subject: [PATCH 083/124] 2019-06-23 --- markdown-file/Gitlab-Install-And-Settings.md | 107 ++++++++++++++----- 1 file changed, 80 insertions(+), 27 deletions(-) diff --git a/markdown-file/Gitlab-Install-And-Settings.md b/markdown-file/Gitlab-Install-And-Settings.md index f8f3eaab..c9a4c1e3 100644 --- a/markdown-file/Gitlab-Install-And-Settings.md +++ b/markdown-file/Gitlab-Install-And-Settings.md @@ -51,40 +51,27 @@ gitlab-postgresql: - 本质就是把文件、缓存、数据库抽离出来,然后部署多个 Gitlab 用 nginx 前面做负载。 -## 原始安装方式 +## 原始安装方式(推荐) -- 环境: - - CPU:1 core - - 内存:2G -- 我习惯使用 root 用户 +- 推荐至少内存 4G,它有大量组件 - 有开源版本和收费版本,各版本比较: - 官网: - 中文网: - 官网下载: -- 安装的系统环境要求: - - 从文章看目前要求 ruby 2.3,用 yum 版本过低,那就源码安装 ruby 吧,官网当前最新是:2.4.1(大小:14M) - 官网安装说明: -- 安装 ruby - - 下载: - - 解压:`tar zxvf ruby-2.4.1.tar.gz` - - 编译安装: - - `cd ruby-2.4.1` - - `./configure` - - `make`,过程有点慢 - - `make install` - - 默认安装到这个目录:`/usr/local` - - 查看当前版本号:`ruby -v` -- CentOS 6 安装流程: - - 当前(201703)的版本是:`GitLab Community Edition 9.0.0` - - `sudo yum install -y curl openssh-server openssh-clients postfix cronie` - - `sudo service postfix start` - - `sudo chkconfig postfix on` - - `sudo lokkit -s http -s ssh` - - `curl -sS https://packages.gitlab.com/install/repositories/gitlab/gitlab-ce/script.rpm.sh | sudo bash` - - `sudo yum install gitlab-ce`,软件大小:272M,下载速度不稳定 - - `sudo gitlab-ctl reconfigure`,这个过程比较慢 - 如果上面的下载比较慢,也有国内的镜像: - 清华: +- 参考: + +``` +sudo yum install -y curl policycoreutils-python openssh-server + +sudo systemctl enable sshd +sudo systemctl start sshd + +curl https://packages.gitlab.com/install/repositories/gitlab/gitlab-ce/script.rpm.sh | sudo bash +sudo EXTERNAL_URL="http://192.168.1.123:8181" yum install -y gitlab-ce +``` ## 配置 @@ -92,7 +79,10 @@ gitlab-postgresql: - 配置域名 / IP - 编辑配置文件:`sudo vim /etc/gitlab/gitlab.rb` - 找到 13 行左右:`external_url 'http://gitlab.example.com'`,改为你的域名 / IP - - 重启服务:`sudo gitlab-ctl reconfigure` + - 刷新配置:`sudo gitlab-ctl reconfigure`,第一次这个时间会比较久,我花了好几分钟 + - 启动服务:`sudo gitlab-ctl start` + - 停止服务:`sudo gitlab-ctl stop` + - 重启服务:`sudo gitlab-ctl restart` - 前面的初始化配置完成之后,访问当前机子 IP:`http://192.168.1.111:80` - 默认用户是 `root`,并且没有密码,所以第一次访问是让你设置你的 root 密码,我设置为:gitlab123456(至少 8 位数) - 设置会初始化密码之后,你就需要登录了。输入设置的密码。 @@ -234,6 +224,69 @@ gitlab-postgresql: - +## 接入第三方登录 + +- 官网文档: + - + - + - + +- gitlab 自己本身维护一套用户系统,第三方认证服务一套用户系统,gitlab 可以将两者关联起来,然后用户可以选择其中一种方式进行登录而已。 +- 所以,gitlab 第三方认证只能用于网页登录,clone 时仍然使用用户在 gitlab 的账户密码,推荐使用 ssh-key 来操作仓库,不再使用账户密码。 +- 重要参数:block_auto_created_users=true 的时候则自动注册的账户是被锁定的,需要管理员账户手动的为这些账户解锁,可以改为 false +- 编辑配置文件引入第三方:`sudo vim /etc/gitlab/gitlab.rb`,在 309 行有默认的一些注释配置 + - 其中 oauth2_generic 模块默认是没有,需要自己 gem,其他主流的那些都自带,配置即可使用。 + +``` +gitlab_rails['omniauth_enabled'] = true +gitlab_rails['omniauth_allow_single_sign_on'] = ['google_oauth2', 'facebook', 'twitter', 'oauth2_generic'] +gitlab_rails['omniauth_block_auto_created_users'] = false +gitlab_rails['omniauth_sync_profile_attributes'] = ['email','username'] +gitlab_rails['omniauth_external_providers'] = ['google_oauth2', 'facebook', 'twitter', 'oauth2_generic'] +gitlab_rails['omniauth_providers'] = [ + { + "name"=> "google_oauth2", + "label"=> "Google", + "app_id"=> "123456", + "app_secret"=> "123456", + "args"=> { + "access_type"=> 'offline', + "approval_prompt"=> '123456' + } + }, + { + "name"=> "facebook", + "label"=> "facebook", + "app_id"=> "123456", + "app_secret"=> "123456" + }, + { + "name"=> "twitter", + "label"=> "twitter", + "app_id"=> "123456", + "app_secret"=> "123456" + }, + { + "name" => "oauth2_generic", + "app_id" => "123456", + "app_secret" => "123456", + "args" => { + client_options: { + "site" => "http://sso.cdk8s.com:9090/sso", + "user_info_url" => "/oauth/userinfo" + }, + user_response_structure: { + root_path: ["user_attribute"], + attributes: { + "nickname": "username" + } + } + } + } +] + +``` + ## 资料 From aa7bcfaaad03ef73092e564712d6e01bd3d163e1 Mon Sep 17 00:00:00 2001 From: Jared Tan Date: Thu, 27 Jun 2019 22:57:04 +0800 Subject: [PATCH 084/124] update demo dockerfile. according https://github.com/apache/skywalking/blob/master/apm-sniffer/config/agent.config#L18. Env vars has changed. --- markdown-file/SkyWalking-Install-And-Settings.md | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/markdown-file/SkyWalking-Install-And-Settings.md b/markdown-file/SkyWalking-Install-And-Settings.md index ad6cd074..db9cf77c 100644 --- a/markdown-file/SkyWalking-Install-And-Settings.md +++ b/markdown-file/SkyWalking-Install-And-Settings.md @@ -146,22 +146,21 @@ FROM openjdk:8-jre-alpine LABEL maintainer="tanjian20150101@gmail.com" -ENV SW_APPLICATION_CODE=java-agent-demo \ - SW_COLLECTOR_SERVERS=localhost:11800 +ENV SW_AGENT_NAMESPACE=java-agent-demo \ + SW_AGENT_COLLECTOR_BACKEND_SERVICES=localhost:11800 -COPY skywalking-agent /apache-skywalking-apm-incubating/agent +COPY skywalking-agent /apache-skywalking-apm-bin/agent COPY target/sky-demo-1.0-SNAPSHOT.jar /demo.jar -ENTRYPOINT java -javaagent:/apache-skywalking-apm-incubating/agent/skywalking-agent.jar -Dskywalking.collector.backend_service=${SW_COLLECTOR_SERVERS} \ --Dskywalking.agent.application_code=${SW_APPLICATION_CODE} -jar /demo.jar +ENTRYPOINT java -javaagent:/apache-skywalking-apm-bin/agent/skywalking-agent.jar -jar /demo.jar ``` - 构建镜像: ``` docker build -t hello-demo . -docker run -p 10101:10101 -e SW_APPLICATION_CODE=hello-world-demo-005 -e SW_COLLECTOR_SERVERS=127.10.0.2:11800 hello-demo +docker run -p 10101:10101 -e SW_AGENT_NAMESPACE=hello-world-demo-005 -e SW_AGENT_COLLECTOR_BACKEND_SERVICES=127.10.0.2:11800 hello-demo ``` From 70a343c10eac6186d5247e59e70fdafaf32fc669 Mon Sep 17 00:00:00 2001 From: zhang Date: Wed, 3 Jul 2019 11:45:13 +0800 Subject: [PATCH 085/124] 2019-06-13 --- markdown-file/Mysql-Test.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/markdown-file/Mysql-Test.md b/markdown-file/Mysql-Test.md index 2a5799e0..f18d3f0f 100644 --- a/markdown-file/Mysql-Test.md +++ b/markdown-file/Mysql-Test.md @@ -199,6 +199,8 @@ cd tpcc-mysql/src make 如果make没报错,就会在tpcc-mysql 根目录文件夹下生成tpcc二进制命令行工具tpcc_load、tpcc_start + +如果要同时支持 PgSQL 可以考虑:https://github.com/Percona-Lab/sysbench-tpcc ``` ### 测试的几个表介绍 From 3cb44898266debef177522ff5799e9cdec5355a7 Mon Sep 17 00:00:00 2001 From: zhang Date: Fri, 5 Jul 2019 10:58:31 +0800 Subject: [PATCH 086/124] Influxdb --- README.md | 1 + SUMMARY.md | 1 + TOC.md | 1 + .../Influxdb-Install-And-Settings.md | 62 +++++++++++++++++++ 4 files changed, 65 insertions(+) create mode 100644 markdown-file/Influxdb-Install-And-Settings.md diff --git a/README.md b/README.md index 9301dbb6..eaed7384 100644 --- a/README.md +++ b/README.md @@ -107,6 +107,7 @@ - [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) - [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) - [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) +- [Influxdb 安装和配置](markdown-file/Influxdb-Install-And-Settings.md) - [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) - [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) - [Wormhole + Flink 安装和配置](markdown-file/Wormhole-Install-And-Settings.md) diff --git a/SUMMARY.md b/SUMMARY.md index ef4a9f48..c72d20ab 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -87,6 +87,7 @@ * [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) * [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) * [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) +* [Influxdb 安装和配置](markdown-file/Influxdb-Install-And-Settings.md) * [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) * [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) * [Wormhole + Flink 安装和配置](markdown-file/Wormhole-Install-And-Settings.md) diff --git a/TOC.md b/TOC.md index 0b6b3709..078416b0 100644 --- a/TOC.md +++ b/TOC.md @@ -84,6 +84,7 @@ - [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) - [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) - [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) +- [Influxdb 安装和配置](markdown-file/Influxdb-Install-And-Settings.md) - [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) - [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) - [Wormhole + Flink 安装和配置](markdown-file/Wormhole-Install-And-Settings.md) diff --git a/markdown-file/Influxdb-Install-And-Settings.md b/markdown-file/Influxdb-Install-And-Settings.md new file mode 100644 index 00000000..ea39cc70 --- /dev/null +++ b/markdown-file/Influxdb-Install-And-Settings.md @@ -0,0 +1,62 @@ +# Influxdb 安装和配置 + + + +## Influxdb Docker 安装 + +- 官网库: + + +``` +docker run -d --name influxdb \ +-p 8086:8086 -p 8083:8083 \ +-e INFLUXDB_HTTP_AUTH_ENABLED=true \ +-e INFLUXDB_ADMIN_ENABLED=true -e INFLUXDB_ADMIN_USER=admin -e INFLUXDB_ADMIN_PASSWORD=123456 \ +-e INFLUXDB_DB=mydb1 \ +-v /Users/gitnavi/docker_data/influxdb/data:/var/lib/influxdb influxdb +``` + + +- 进入终端交互: + +``` +docker exec -it influxdb /bin/bash + +输入:influx,开始终端交互 + +auth admin 123456 +show databases; + +如果你要再额外创建数据库: +create database demo + +如果你要再创建用户: +create user "myuser" with password '123456' with all privileges +``` + + +---------------------------------------------------------------------------------------------- + +## 配置 + + + +---------------------------------------------------------------------------------------------- + + + +---------------------------------------------------------------------------------------------- + + +## 其他资料 + +- +- <> +- <> +- <> +- <> +- <> +- <> +- <> +- <> + From 9179150a6522b71aa3ae42f781e0d60eed06dc09 Mon Sep 17 00:00:00 2001 From: zhang Date: Fri, 5 Jul 2019 11:16:30 +0800 Subject: [PATCH 087/124] Influxdb --- markdown-file/Grafana-Install-And-Settings.md | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/markdown-file/Grafana-Install-And-Settings.md b/markdown-file/Grafana-Install-And-Settings.md index 095c2b27..ad5a4416 100644 --- a/markdown-file/Grafana-Install-And-Settings.md +++ b/markdown-file/Grafana-Install-And-Settings.md @@ -6,6 +6,28 @@ - [支持的 Elasticsearch 版本](http://docs.grafana.org/features/datasources/elasticsearch/#elasticsearch-version) +## Grafana Docker 安装 + +- 官网: + +``` +docker run -d --name grafana -p 3000:3000 -v /Users/gitnavi/docker_data/grafana/data grafana/grafana + +docker exec -it grafana /bin/bash + +容器中默认的配置文件位置:/etc/grafana/grafana.ini + +复制出配置文件到宿主机:docker cp grafana:/etc/grafana/grafana.ini /Users/gitnavi/ +``` + +- +- 默认管理账号;admin,密码:admin,第一次登录后需要修改密码,也可以通过配置文件修改 + +``` +[security] +admin_user = admin +admin_password = admin +``` ---------------------------------------------------------------------------------------------- ## Grafana 安装 From 7d28eea60e1d6418178e50d794d29b30c39a87fd Mon Sep 17 00:00:00 2001 From: zhang Date: Fri, 5 Jul 2019 14:15:09 +0800 Subject: [PATCH 088/124] Influxdb --- markdown-file/Influxdb-Install-And-Settings.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/markdown-file/Influxdb-Install-And-Settings.md b/markdown-file/Influxdb-Install-And-Settings.md index ea39cc70..82fe262d 100644 --- a/markdown-file/Influxdb-Install-And-Settings.md +++ b/markdown-file/Influxdb-Install-And-Settings.md @@ -27,6 +27,14 @@ docker exec -it influxdb /bin/bash auth admin 123456 show databases; +use springboot +show measurements + +show series from "jvm_buffer_total_capacity" + +select * from "jvm_buffer_total_capacity" + + 如果你要再额外创建数据库: create database demo From 9e9de520bc8529c6d29f9a81ac84677b4aee703c Mon Sep 17 00:00:00 2001 From: zhang Date: Fri, 5 Jul 2019 14:21:31 +0800 Subject: [PATCH 089/124] Influxdb --- markdown-file/Grafana-Install-And-Settings.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/markdown-file/Grafana-Install-And-Settings.md b/markdown-file/Grafana-Install-And-Settings.md index ad5a4416..ed963acd 100644 --- a/markdown-file/Grafana-Install-And-Settings.md +++ b/markdown-file/Grafana-Install-And-Settings.md @@ -84,6 +84,11 @@ sudo systemctl status grafana-server - 个性化设置: - 软件变量: +## 官网 dashboard + +- dashboar仓库地址: +- 本地可以通过输入 dashboard id 导入别人模板 + ---------------------------------------------------------------------------------------------- From a1dac80519573e12a364013d0a60bbd06c1325ec Mon Sep 17 00:00:00 2001 From: zhang Date: Fri, 5 Jul 2019 15:34:17 +0800 Subject: [PATCH 090/124] Prometheus --- README.md | 1 + SUMMARY.md | 1 + TOC.md | 1 + markdown-file/Grafana-Install-And-Settings.md | 3 +- .../Prometheus-Install-And-Settings.md | 81 +++++++++++++++++++ 5 files changed, 86 insertions(+), 1 deletion(-) create mode 100644 markdown-file/Prometheus-Install-And-Settings.md diff --git a/README.md b/README.md index eaed7384..e99786b4 100644 --- a/README.md +++ b/README.md @@ -108,6 +108,7 @@ - [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) - [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) - [Influxdb 安装和配置](markdown-file/Influxdb-Install-And-Settings.md) +- [Prometheus 安装和配置](markdown-file/Prometheus-Install-And-Settings.md) - [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) - [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) - [Wormhole + Flink 安装和配置](markdown-file/Wormhole-Install-And-Settings.md) diff --git a/SUMMARY.md b/SUMMARY.md index c72d20ab..d034445b 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -88,6 +88,7 @@ * [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) * [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) * [Influxdb 安装和配置](markdown-file/Influxdb-Install-And-Settings.md) +* [Prometheus 安装和配置](markdown-file/Prometheus-Install-And-Settings.md) * [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) * [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) * [Wormhole + Flink 安装和配置](markdown-file/Wormhole-Install-And-Settings.md) diff --git a/TOC.md b/TOC.md index 078416b0..434c20f7 100644 --- a/TOC.md +++ b/TOC.md @@ -85,6 +85,7 @@ - [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) - [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) - [Influxdb 安装和配置](markdown-file/Influxdb-Install-And-Settings.md) +- [Prometheus 安装和配置](markdown-file/Prometheus-Install-And-Settings.md) - [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) - [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) - [Wormhole + Flink 安装和配置](markdown-file/Wormhole-Install-And-Settings.md) diff --git a/markdown-file/Grafana-Install-And-Settings.md b/markdown-file/Grafana-Install-And-Settings.md index ed963acd..3370630a 100644 --- a/markdown-file/Grafana-Install-And-Settings.md +++ b/markdown-file/Grafana-Install-And-Settings.md @@ -88,7 +88,8 @@ sudo systemctl status grafana-server - dashboar仓库地址: - 本地可以通过输入 dashboard id 导入别人模板 - +- 打开: + - 输入对应的 id,点击 Load 即可 ---------------------------------------------------------------------------------------------- diff --git a/markdown-file/Prometheus-Install-And-Settings.md b/markdown-file/Prometheus-Install-And-Settings.md new file mode 100644 index 00000000..a5d36f4c --- /dev/null +++ b/markdown-file/Prometheus-Install-And-Settings.md @@ -0,0 +1,81 @@ +# Prometheus 安装和配置 + +## Prometheus Docker 安装 + +- 官网: +- 这里以 Spring Boot Metrics 为收集信息 +- 创建配置文件:/Users/gitnavi/docker_data/prometheus/config/prometheus.yml +- 在 scrape_configs 位置下增加我们自己应用的路径信息 + +``` +# my global config +global: + scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + +# Alertmanager configuration +alerting: + alertmanagers: + - static_configs: + - targets: + # - alertmanager:9093 + +# Load rules once and periodically evaluate them according to the global 'evaluation_interval'. +rule_files: + # - "first_rules.yml" + # - "second_rules.yml" + +# A scrape configuration containing exactly one endpoint to scrape: +# Here it's Prometheus itself. +scrape_configs: + - job_name: 'springboot' + metrics_path: '/tkey-actuator/actuator/prometheus' + static_configs: + - targets: ['192.168.2.225:8811'] +``` + +- 启动 + +``` +docker run -d --name prometheus -p 9091:9090 \ +-v /Users/gitnavi/docker_data/prometheus/config/prometheus.yml:/etc/prometheus/prometheus.yml \ +prom/prometheus +``` + +- 然后配置 Grafana,使用这个 dashboard: + + +---------------------------------------------------------------------------------------------- + +## 配置 + + +### 微服务下的多服务收集 + +- + + +### 告警 + +- +- + +---------------------------------------------------------------------------------------------- + + + +---------------------------------------------------------------------------------------------- + + +## 其他资料 + +- <> +- <> +- <> +- <> +- <> +- <> +- <> +- <> + From b5c52a9ac736fe40f46e1d3145d8bb17b72aa830 Mon Sep 17 00:00:00 2001 From: judasn Date: Sat, 6 Jul 2019 00:26:08 +0800 Subject: [PATCH 091/124] 2019-07-06 --- markdown-file/Grafana-Install-And-Settings.md | 6 +- .../Prometheus-Install-And-Settings.md | 207 +++++++++++++++++- markdown-file/wrk-Install-And-Settings.md | 18 +- 3 files changed, 215 insertions(+), 16 deletions(-) diff --git a/markdown-file/Grafana-Install-And-Settings.md b/markdown-file/Grafana-Install-And-Settings.md index 3370630a..c0c12ae3 100644 --- a/markdown-file/Grafana-Install-And-Settings.md +++ b/markdown-file/Grafana-Install-And-Settings.md @@ -11,12 +11,14 @@ - 官网: ``` -docker run -d --name grafana -p 3000:3000 -v /Users/gitnavi/docker_data/grafana/data grafana/grafana +mkdir -p /data/docker/grafana/data +chmod 777 -R /data/docker/grafana/data + +docker run -d --name grafana -p 3000:3000 -v /data/docker/grafana/data:/var/lib/grafana grafana/grafana docker exec -it grafana /bin/bash 容器中默认的配置文件位置:/etc/grafana/grafana.ini - 复制出配置文件到宿主机:docker cp grafana:/etc/grafana/grafana.ini /Users/gitnavi/ ``` diff --git a/markdown-file/Prometheus-Install-And-Settings.md b/markdown-file/Prometheus-Install-And-Settings.md index a5d36f4c..ea5633f9 100644 --- a/markdown-file/Prometheus-Install-And-Settings.md +++ b/markdown-file/Prometheus-Install-And-Settings.md @@ -1,10 +1,12 @@ # Prometheus 安装和配置 +- 特别说明:一般这类环境要尽可能保证所有服务器时间一致 + ## Prometheus Docker 安装 - 官网: - 这里以 Spring Boot Metrics 为收集信息 -- 创建配置文件:/Users/gitnavi/docker_data/prometheus/config/prometheus.yml +- 创建配置文件:`vim /data/docker/prometheus/config/prometheus.yml` - 在 scrape_configs 位置下增加我们自己应用的路径信息 ``` @@ -39,7 +41,7 @@ scrape_configs: ``` docker run -d --name prometheus -p 9091:9090 \ --v /Users/gitnavi/docker_data/prometheus/config/prometheus.yml:/etc/prometheus/prometheus.yml \ +-v /data/docker/prometheus/config/prometheus.yml:/etc/prometheus/prometheus.yml \ prom/prometheus ``` @@ -50,18 +52,212 @@ prom/prometheus ## 配置 +- 官网 exporter 列表: +- 官网 exporter 暴露的端口列表: + + +### CentOS7 服务器 + +- 当前最新版本:node_exporter 0.18.1(201907) + +``` +mkdir -p /usr/local/prometheus/node_exporter + +cd /usr/local/prometheus/node_exporter + +wget https://github.com/prometheus/node_exporter/releases/download/v0.18.1/node_exporter-0.18.1.linux-amd64.tar.gz + +tar -zxvf node_exporter-0.18.1.linux-amd64.tar.gz + +``` + + +``` +创建Systemd服务 +vim /etc/systemd/system/node_exporter.service + + + +[Unit] +Description=node_exporter +After=network.target + +[Service] +Type=simple +User=root +ExecStart=/usr/local/prometheus/node_exporter/node_exporter-0.18.1.linux-amd64/node_exporter +Restart=on-failure + +[Install] +WantedBy=multi-user.target +``` + +- 关于 ExecStart 参数,可以再附带一些启动监控的参数,官网介绍: + - 格式:`ExecStart=/usr/local/prometheus/node_exporter/node_exporter-0.18.1.linux-amd64/node_exporter --collectors.enabled meminfo,hwmon,entropy` + + +``` +启动 Node exporter +systemctl start node_exporter + +systemctl daemon-reload + +systemctl status node_exporter + +``` + + +``` +修改prometheus.yml,加入下面的监控目标: + +vim /usr/local/prometheus/prometheus.yml + +scrape_configs: + - job_name: 'centos7' + static_configs: + - targets: ['127.0.0.1:9100'] + labels: + instance: centos7_node1 + +``` + +- 重启 prometheus:`docker restart prometheus` +- Grafana 有现成的 dashboard: + - + - + +---------------------------------------------------------------------------------------------- + + +### Nginx 指标 + +- 这里使用 Nginx VTS exporter: + +- 安装 nginx 模块: + +``` +git clone --depth=1 https://github.com/vozlt/nginx-module-vts.git + + +编译 nginx 的时候加上: +./configure --prefix=/usr/local/nginx --with-http_ssl_module --add-module=/opt/nginx-module-vts + +make(已经安装过了,就不要再 make install) + +``` + +``` +修改Nginx配置 + + +http { + vhost_traffic_status_zone; + vhost_traffic_status_filter_by_host on; + + ... + + server { + + ... + + location /status { + vhost_traffic_status_display; + vhost_traffic_status_display_format html; + } + } +} + + +验证nginx-module-vts模块:http://IP/status + +``` + +``` +如果不想统计流量的server,可以禁用vhost_traffic_status,配置示例: +server { + ... + vhost_traffic_status off; + ... +} +``` + + +- 安装 nginx-vts-exporter + +``` +wget -O nginx-vts-exporter-0.5.zip https://github.com/hnlq715/nginx-vts-exporter/archive/v0.5.zip +unzip nginx-vts-exporter-0.5.zip +mv nginx-vts-exporter-0.5 /usr/local/prometheus/nginx-vts-exporter +chmod +x /usr/local/prometheus/nginx-vts-exporter/bin/nginx-vts-exporter + +``` + +``` +创建Systemd服务 +vim /etc/systemd/system/nginx_vts_exporter.service + + +[Unit] +Description=nginx_exporter +After=network.target + +[Service] +Type=simple +User=root +ExecStart=/usr/local/prometheus/nginx-vts-exporter/bin/nginx-vts-exporter -nginx.scrape_uri=http://localhost/status/format/json +Restart=on-failure + +[Install] +WantedBy=multi-user.target +``` + + +``` +启动nginx-vts-exporter +systemctl start nginx_vts_exporter.service +systemctl status nginx_vts_exporter.service +``` + + +``` +修改prometheus.yml,加入下面的监控目标: + +- job_name: nginx + static_configs: + - targets: ['127.0.0.1:9913'] + labels: + instance: web1 + +``` + +- 重启 prometheus:`docker restart prometheus` +- Grafana 有现成的 dashboard: + - + - + +---------------------------------------------------------------------------------------------- + + ### 微服务下的多服务收集 - +---------------------------------------------------------------------------------------------- + ### 告警 - - ----------------------------------------------------------------------------------------------- +- 告警配置 + +- 告警检测 + +- [Grafana+Prometheus系统监控之邮件报警功能](https://blog.52itstyle.vip/archives/2014/) +- [Grafana+Prometheus系统监控之钉钉报警功能](https://blog.52itstyle.vip/archives/2029/) +- [Grafana+Prometheus系统监控之webhook](https://blog.52itstyle.vip/archives/2068/) @@ -70,8 +266,9 @@ prom/prometheus ## 其他资料 -- <> -- <> +- + - 写得非常非常非常好 +- - <> - <> - <> diff --git a/markdown-file/wrk-Install-And-Settings.md b/markdown-file/wrk-Install-And-Settings.md index 0b96c3c7..f73c6330 100644 --- a/markdown-file/wrk-Install-And-Settings.md +++ b/markdown-file/wrk-Install-And-Settings.md @@ -26,19 +26,19 @@ sudo cp wrk /usr/local/bin ## 使用 -- 启用 10 个线程,每个线程发起 100 个连接,持续 15 秒:`wrk -t10 -c100 -d15s http://www.baidu.com` +- 启用 10 个线程,每个线程发起 100 个连接,持续 15 秒:`wrk -t5 -c5 -d30s http://www.baidu.com` - 最终报告: ``` -Running 15s test @ http://www.baidu.com - 10 threads and 100 connections +Running 30s test @ http://www.baidu.com + 5 threads and 5 connections Thread Stats Avg Stdev Max +/- Stdev - Latency 208.39ms 324.00ms 1.91s 87.70% - Req/Sec 82.68 64.81 414.00 70.60% - 11345 requests in 15.02s, 166.51MB read - Socket errors: connect 0, read 20, write 0, timeout 59 -Requests/sec: 755.26 -Transfer/sec: 11.08MB + Latency 44.59ms 17.41ms 331.91ms 95.66% + Req/Sec 23.11 5.77 30.00 57.04% + 3439 requests in 30.03s, 50.47MB read + Socket errors: connect 0, read 10, write 0, timeout 0 +Requests/sec: 114.52 +Transfer/sec: 1.68MB ``` #### 使用 lua 脚本(发送一个 post 请求) From bdce133e17b61969d4a6b37bc45a250a1804fde3 Mon Sep 17 00:00:00 2001 From: zhang Date: Sat, 6 Jul 2019 17:07:05 +0800 Subject: [PATCH 092/124] Prometheus --- markdown-file/Nginx-Install-And-Settings.md | 20 ++++++++++ .../Prometheus-Install-And-Settings.md | 40 +++++++++++++------ 2 files changed, 47 insertions(+), 13 deletions(-) diff --git a/markdown-file/Nginx-Install-And-Settings.md b/markdown-file/Nginx-Install-And-Settings.md index 71cb04a4..1fdf60cf 100644 --- a/markdown-file/Nginx-Install-And-Settings.md +++ b/markdown-file/Nginx-Install-And-Settings.md @@ -102,6 +102,26 @@ http { ------------------------------------------------------------------- +## Nginx 源码编译安装(带 Prometheus 模块) + +``` +./configure \ +--prefix=/usr/local/nginx \ +--pid-path=/var/local/nginx/nginx.pid \ +--lock-path=/var/lock/nginx/nginx.lock \ +--error-log-path=/var/log/nginx/error.log \ +--http-log-path=/var/log/nginx/access.log \ +--with-http_gzip_static_module \ +--http-client-body-temp-path=/var/temp/nginx/client \ +--http-proxy-temp-path=/var/temp/nginx/proxy \ +--http-fastcgi-temp-path=/var/temp/nginx/fastcgi \ +--http-uwsgi-temp-path=/var/temp/nginx/uwsgi \ +--with-http_ssl_module \ +--with-http_stub_status_module \ +--http-scgi-temp-path=/var/temp/nginx/scgi \ +--add-module=/usr/local/nginx-module-vts +``` + ## Nginx 源码编译安装(带监控模块) diff --git a/markdown-file/Prometheus-Install-And-Settings.md b/markdown-file/Prometheus-Install-And-Settings.md index ea5633f9..d7f91e88 100644 --- a/markdown-file/Prometheus-Install-And-Settings.md +++ b/markdown-file/Prometheus-Install-And-Settings.md @@ -110,12 +110,12 @@ systemctl status node_exporter ``` 修改prometheus.yml,加入下面的监控目标: -vim /usr/local/prometheus/prometheus.yml +vim /data/docker/prometheus/config/prometheus.yml scrape_configs: - job_name: 'centos7' static_configs: - - targets: ['127.0.0.1:9100'] + - targets: ['192.168.1.3:9100'] labels: instance: centos7_node1 @@ -143,9 +143,17 @@ git clone --depth=1 https://github.com/vozlt/nginx-module-vts.git ./configure --prefix=/usr/local/nginx --with-http_ssl_module --add-module=/opt/nginx-module-vts make(已经安装过了,就不要再 make install) +``` + + +``` +也有人做好了 docker 镜像: +https://hub.docker.com/r/xcgd/nginx-vts +docker run --name nginx-vts -p 80:80 -v /data/docker/nginx/conf/nginx.conf:/etc/nginx/nginx.conf:ro -d xcgd/nginx-vts ``` + ``` 修改Nginx配置 @@ -168,7 +176,8 @@ http { } -验证nginx-module-vts模块:http://IP/status +验证nginx-module-vts模块:http://192.168.1.3/status,会展示: +Nginx Vhost Traffic Status 统计表 ``` @@ -185,11 +194,13 @@ server { - 安装 nginx-vts-exporter ``` -wget -O nginx-vts-exporter-0.5.zip https://github.com/hnlq715/nginx-vts-exporter/archive/v0.5.zip -unzip nginx-vts-exporter-0.5.zip -mv nginx-vts-exporter-0.5 /usr/local/prometheus/nginx-vts-exporter -chmod +x /usr/local/prometheus/nginx-vts-exporter/bin/nginx-vts-exporter +官网版本:https://github.com/hnlq715/nginx-vts-exporter/releases + +wget https://github.com/hnlq715/nginx-vts-exporter/releases/download/v0.10.3/nginx-vts-exporter-0.10.3.linux-amd64.tar.gz +tar zxvf nginx-vts-exporter-0.10.3.linux-amd64.tar.gz + +chmod +x /usr/local/nginx-vts-exporter-0.10.3.linux-amd64/nginx-vts-exporter ``` ``` @@ -204,7 +215,7 @@ After=network.target [Service] Type=simple User=root -ExecStart=/usr/local/prometheus/nginx-vts-exporter/bin/nginx-vts-exporter -nginx.scrape_uri=http://localhost/status/format/json +ExecStart=/usr/local/nginx-vts-exporter-0.10.3.linux-amd64/nginx-vts-exporter -nginx.scrape_uri=http://192.168.1.3/status/format/json Restart=on-failure [Install] @@ -215,18 +226,21 @@ WantedBy=multi-user.target ``` 启动nginx-vts-exporter systemctl start nginx_vts_exporter.service +systemctl daemon-reload systemctl status nginx_vts_exporter.service ``` ``` -修改prometheus.yml,加入下面的监控目标: +修改 prometheus.yml,加入下面的监控目标: +vim /data/docker/prometheus/config/prometheus.yml -- job_name: nginx +scrape_configs: + - job_name: 'nginx' static_configs: - - targets: ['127.0.0.1:9913'] - labels: - instance: web1 + - targets: ['192.168.1.3:9913'] + labels: + instance: nginx1 ``` From dcf0a257a334135c6cc99bae4d005a2eee3dbd00 Mon Sep 17 00:00:00 2001 From: zhang Date: Sat, 6 Jul 2019 17:42:09 +0800 Subject: [PATCH 093/124] Prometheus --- markdown-file/Nginx-Install-And-Settings.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/markdown-file/Nginx-Install-And-Settings.md b/markdown-file/Nginx-Install-And-Settings.md index 1fdf60cf..f54cbdb0 100644 --- a/markdown-file/Nginx-Install-And-Settings.md +++ b/markdown-file/Nginx-Install-And-Settings.md @@ -310,6 +310,27 @@ upgrade: - 更新 `make upgrade` +## 为 Nginx 添加 basic_auth + +``` +yum install httpd-tools + +htpasswd -c /opt/nginx-auth/passwd.db myusername,回车之后输入两次密码 + + +server { + ... + + location / { + auth_basic "please input you user name and password"; + auth_basic_user_file /opt/nginx-auth/passwd.db; + .... + } +} + +``` + + ## Nginx 全局变量 - $arg_PARAMETER #这个变量包含GET请求中,如果有变量PARAMETER时的值。 From 36058aafaf66b2eb9acfa6ba3a11410e40642432 Mon Sep 17 00:00:00 2001 From: zhang Date: Sat, 6 Jul 2019 17:45:22 +0800 Subject: [PATCH 094/124] Prometheus --- markdown-file/Prometheus-Install-And-Settings.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/markdown-file/Prometheus-Install-And-Settings.md b/markdown-file/Prometheus-Install-And-Settings.md index d7f91e88..36d2e642 100644 --- a/markdown-file/Prometheus-Install-And-Settings.md +++ b/markdown-file/Prometheus-Install-And-Settings.md @@ -242,6 +242,19 @@ scrape_configs: labels: instance: nginx1 + +如果nginx 有加 basic auth,则需要这样: +scrape_configs: + - job_name: "nginx" + metrics_path: /status/format/prometheus + basic_auth: + username: youmeek + password: '123456' + static_configs: + - targets: ['192.168.1.3:9913'] + labels: + instance: 'nginx1' + ``` - 重启 prometheus:`docker restart prometheus` From 8a5b8aa26b7ce60dbb97eff6b8724c92304a4458 Mon Sep 17 00:00:00 2001 From: zhang Date: Sat, 6 Jul 2019 18:15:27 +0800 Subject: [PATCH 095/124] Prometheus --- markdown-file/Prometheus-Install-And-Settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/markdown-file/Prometheus-Install-And-Settings.md b/markdown-file/Prometheus-Install-And-Settings.md index 36d2e642..9ccc8c6f 100644 --- a/markdown-file/Prometheus-Install-And-Settings.md +++ b/markdown-file/Prometheus-Install-And-Settings.md @@ -296,7 +296,7 @@ scrape_configs: - - 写得非常非常非常好 - -- <> +- - <> - <> - <> From 8c97b20723e931d68567015a6bb91e16169ebfb9 Mon Sep 17 00:00:00 2001 From: judasn Date: Sat, 6 Jul 2019 22:44:26 +0800 Subject: [PATCH 096/124] Prometheus --- markdown-file/Prometheus-Install-And-Settings.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/markdown-file/Prometheus-Install-And-Settings.md b/markdown-file/Prometheus-Install-And-Settings.md index 9ccc8c6f..93b0b9b3 100644 --- a/markdown-file/Prometheus-Install-And-Settings.md +++ b/markdown-file/Prometheus-Install-And-Settings.md @@ -1,6 +1,9 @@ # Prometheus 安装和配置 +- 不错的发展史说明: - 特别说明:一般这类环境要尽可能保证所有服务器时间一致 +- Prometheus 本地存储不适合存长久数据,一般存储一个月就够了。要永久存储需要用到远端存储,远端存储可以用 OpenTSDB +- Prometheus 也不适合做日志存储,日志存储还是推荐 ELK 方案 ## Prometheus Docker 安装 @@ -287,6 +290,10 @@ scrape_configs: - [Grafana+Prometheus系统监控之webhook](https://blog.52itstyle.vip/archives/2068/) +## 远端存储方案 + +- + ---------------------------------------------------------------------------------------------- From e924e95351adc3f7eb6a43ccc8ba96ca69e61d41 Mon Sep 17 00:00:00 2001 From: zhang Date: Wed, 17 Jul 2019 10:20:26 +0800 Subject: [PATCH 097/124] Elasticsearch --- markdown-file/Elasticsearch-Base.md | 39 +++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index 05cc86de..b9ef2d45 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -3,10 +3,15 @@ ## Docker 单节点部署 - 官网: +- 官网列表: +- 阿里云支持版本: - 7.x:7.1.0 - 6.x:6.8.0 - 5.x:5.6.8 - 注意:docker 版本下 client.transport.sniff = true 是无效的。 + +#### 5.6.x + - `vim ~/elasticsearch-5.6.8-docker.yml` - 启动:`docker-compose -f ~/elasticsearch-5.6.8-docker.yml -p elasticsearch_5.6.8 up -d` @@ -38,6 +43,40 @@ services: ``` +#### 6.7.x + +- `vim ~/elasticsearch-6.7.2-docker.yml` +- 启动:`docker-compose -f ~/elasticsearch-6.7.2-docker.yml -p elasticsearch_6.7.2 up -d` +- `mkdir -p /data/docker/elasticsearch-6.7.2/data` + +``` +version: '3' +services: + elasticsearch1: + image: docker pull docker.elastic.co/elasticsearch/elasticsearch:6.7.2 + container_name: elasticsearch1 + environment: + - "ES_JAVA_OPTS=-Xms512m -Xmx512m" + - "cluster.name=elasticsearch" + - "network.host=0.0.0.0" + - "http.host=0.0.0.0" + - "xpack.security.enabled=false" + ulimits: + memlock: + soft: -1 + hard: -1 + nofile: + soft: 65536 + hard: 65536 + ports: + - 9200:9200 + - 9300:9300 + volumes: + - /data/docker/elasticsearch-6.7.2/data:/usr/share/elasticsearch/data + +``` + + ------------------------------------------------------------------- From 5694f0a85641e9cfeaf923aff88304e70c83b777 Mon Sep 17 00:00:00 2001 From: zhang Date: Wed, 17 Jul 2019 10:23:07 +0800 Subject: [PATCH 098/124] Elasticsearch --- markdown-file/Elasticsearch-Base.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index b9ef2d45..36c86f1a 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -20,7 +20,7 @@ version: '3' services: elasticsearch1: image: docker.elastic.co/elasticsearch/elasticsearch:5.6.8 - container_name: elasticsearch1 + container_name: elasticsearch-5.6.8 environment: - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - "cluster.name=elasticsearch" @@ -54,7 +54,7 @@ version: '3' services: elasticsearch1: image: docker pull docker.elastic.co/elasticsearch/elasticsearch:6.7.2 - container_name: elasticsearch1 + container_name: elasticsearch-6.7.2 environment: - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - "cluster.name=elasticsearch" From 91a1876f352f68745d1e380836d8dab32c883219 Mon Sep 17 00:00:00 2001 From: zhang Date: Wed, 17 Jul 2019 10:23:55 +0800 Subject: [PATCH 099/124] Elasticsearch --- markdown-file/Elasticsearch-Base.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index 36c86f1a..bb0f52c6 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -53,7 +53,7 @@ services: version: '3' services: elasticsearch1: - image: docker pull docker.elastic.co/elasticsearch/elasticsearch:6.7.2 + image: docker.elastic.co/elasticsearch/elasticsearch:6.7.2 container_name: elasticsearch-6.7.2 environment: - "ES_JAVA_OPTS=-Xms512m -Xmx512m" From c13cd7fe4830e9231fa31ba0220c9dcd690fecd3 Mon Sep 17 00:00:00 2001 From: zhang Date: Wed, 17 Jul 2019 10:38:41 +0800 Subject: [PATCH 100/124] Elasticsearch --- markdown-file/Elasticsearch-Base.md | 1 + 1 file changed, 1 insertion(+) diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index bb0f52c6..ae1bf8f9 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -48,6 +48,7 @@ services: - `vim ~/elasticsearch-6.7.2-docker.yml` - 启动:`docker-compose -f ~/elasticsearch-6.7.2-docker.yml -p elasticsearch_6.7.2 up -d` - `mkdir -p /data/docker/elasticsearch-6.7.2/data` +- 如果官网镜像比较慢可以换成阿里云:`registry.cn-hangzhou.aliyuncs.com/elasticsearch/elasticsearch:6.7.2` ``` version: '3' From 15086fcde61f2252ca06edf782c2200cdbe14335 Mon Sep 17 00:00:00 2001 From: zhang Date: Wed, 17 Jul 2019 16:01:18 +0800 Subject: [PATCH 101/124] Elasticsearch --- markdown-file/Elasticsearch-Base.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index ae1bf8f9..f66d8d28 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -5,9 +5,7 @@ - 官网: - 官网列表: - 阿里云支持版本: - - 7.x:7.1.0 - - 6.x:6.8.0 - - 5.x:5.6.8 + - 阿里云有一个 `插件配置` 功能,常用的 Elasticsearch 插件都带了,勾选下即可安装。也支持上传安装。 - 注意:docker 版本下 client.transport.sniff = true 是无效的。 #### 5.6.x From 7431a20d35067f52b2c55d7b05227d59c3ecd102 Mon Sep 17 00:00:00 2001 From: judasn Date: Sun, 21 Jul 2019 19:45:05 +0800 Subject: [PATCH 102/124] Elasticsearch --- markdown-file/Elasticsearch-Base.md | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index f66d8d28..959a7a93 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -41,12 +41,13 @@ services: ``` -#### 6.7.x +#### 6.7.x(带 ik 分词) - `vim ~/elasticsearch-6.7.2-docker.yml` - 启动:`docker-compose -f ~/elasticsearch-6.7.2-docker.yml -p elasticsearch_6.7.2 up -d` - `mkdir -p /data/docker/elasticsearch-6.7.2/data` - 如果官网镜像比较慢可以换成阿里云:`registry.cn-hangzhou.aliyuncs.com/elasticsearch/elasticsearch:6.7.2` +- 下载 ik 分词(版本必须和 Elasticsearch 版本对应,包括小版本号): ``` version: '3' @@ -72,7 +73,26 @@ services: - 9300:9300 volumes: - /data/docker/elasticsearch-6.7.2/data:/usr/share/elasticsearch/data + - /data/docker/ik:/usr/share/elasticsearch/plugins/ik +``` + +- Elasticsearch Head 插件地址: +- 测试: + + +``` +http://localhost:9200/ +_analyze?pretty POST + +{"analyzer":"ik_smart","text":"安徽省长江流域"} +``` + +- ik_max_word 和 ik_smart 什么区别? + +``` +ik_max_word: 会将文本做最细粒度的拆分,比如会将“中华人民共和国国歌”拆分为“中华人民共和国,中华人民,中华,华人,人民共和国,人民,人,民,共和国,共和,和,国国,国歌”,会穷尽各种可能的组合,适合 Term Query; +ik_smart: 会做最粗粒度的拆分,比如会将“中华人民共和国国歌”拆分为“中华人民共和国,国歌”,适合 Phrase 查询。 ``` From e6559ace64791755c5871b9db88273c8d9008df6 Mon Sep 17 00:00:00 2001 From: judasn Date: Sat, 27 Jul 2019 08:59:52 +0800 Subject: [PATCH 103/124] Elasticsearch --- markdown-file/Prometheus-Install-And-Settings.md | 1 + 1 file changed, 1 insertion(+) diff --git a/markdown-file/Prometheus-Install-And-Settings.md b/markdown-file/Prometheus-Install-And-Settings.md index 93b0b9b3..7780838c 100644 --- a/markdown-file/Prometheus-Install-And-Settings.md +++ b/markdown-file/Prometheus-Install-And-Settings.md @@ -8,6 +8,7 @@ ## Prometheus Docker 安装 - 官网: +- Docker 官方镜像: - 这里以 Spring Boot Metrics 为收集信息 - 创建配置文件:`vim /data/docker/prometheus/config/prometheus.yml` - 在 scrape_configs 位置下增加我们自己应用的路径信息 From 31a876dfae112ba1de1ae997c54894db7732b55c Mon Sep 17 00:00:00 2001 From: "Judas.n" Date: Tue, 6 Aug 2019 10:12:57 +0800 Subject: [PATCH 104/124] Update README.md --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index e99786b4..ed57b20f 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,11 @@ +## 声明 + +- 2019-08-19 周一 +- 我将发布这两年来第一个新的 **大专题**,还是跟 IT 行业相关,请关注!!! + ## 初衷(Original Intention) - 整理下自己所学。**但是比较随意,所以很多地方不够严谨,所以请带着批评的思维阅读。** From f227d7a07b751d3308d7dc3d62270a43716bfab5 Mon Sep 17 00:00:00 2001 From: judasn Date: Sun, 11 Aug 2019 22:00:48 +0800 Subject: [PATCH 105/124] WordPress --- markdown-file/WordPress-Install-And-Settings.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/markdown-file/WordPress-Install-And-Settings.md b/markdown-file/WordPress-Install-And-Settings.md index 39fdc9c2..c11835da 100644 --- a/markdown-file/WordPress-Install-And-Settings.md +++ b/markdown-file/WordPress-Install-And-Settings.md @@ -194,6 +194,19 @@ systemctl enable httpd.service - 我是托管到 DNSPOD,重新指向到新 IP 地址即可 +## 常用插件 + +- JP Markdown +- WP Code Highlight.js +- FooBox Image Lightbox +- WP Super Cache + +## 常见问题 + +- 安装插件出现:`WordPress需要访问您网页服务器的权限。 请输入您的FTP登录凭据以继续` +- 解决办法:`chown -R apache:apache /var/www/html` + + ## 资料 - From 0b3e9155204cd9425df4e2ebeaef76a0f05aef66 Mon Sep 17 00:00:00 2001 From: zhang Date: Mon, 12 Aug 2019 18:01:03 +0800 Subject: [PATCH 106/124] Gravitee --- .../gravitee-docker-compose/README.md | 32 +++++ .../environments/ci/docker-compose.yml | 62 ++++++++ .../environments/demo/common.yml | 62 ++++++++ .../demo/docker-compose-local.yml | 76 ++++++++++ .../demo/docker-compose-traefik-latest.yml | 76 ++++++++++ .../demo/docker-compose-traefik-nightly.yml | 79 ++++++++++ .../environments/demo/launch.sh | 91 ++++++++++++ .../docker-compose-sample-apis.yml | 47 ++++++ .../platform/docker-compose.yml | 135 ++++++++++++++++++ .../create-index.js | 92 ++++++++++++ .../platform/nginx/nginx.conf | 133 +++++++++++++++++ .../platform/nginx/ssl/gio-selfsigned.crt | 27 ++++ .../platform/nginx/ssl/gio-selfsigned.key | 27 ++++ .../platform/nginx/ssl/gio.pem | 8 ++ .../platform/prometheus.yml | 8 ++ 15 files changed, 955 insertions(+) create mode 100644 favorite-file/gravitee-docker-compose/README.md create mode 100644 favorite-file/gravitee-docker-compose/environments/ci/docker-compose.yml create mode 100644 favorite-file/gravitee-docker-compose/environments/demo/common.yml create mode 100644 favorite-file/gravitee-docker-compose/environments/demo/docker-compose-local.yml create mode 100644 favorite-file/gravitee-docker-compose/environments/demo/docker-compose-traefik-latest.yml create mode 100644 favorite-file/gravitee-docker-compose/environments/demo/docker-compose-traefik-nightly.yml create mode 100755 favorite-file/gravitee-docker-compose/environments/demo/launch.sh create mode 100644 favorite-file/gravitee-docker-compose/environments/sample-apis/docker-compose-sample-apis.yml create mode 100644 favorite-file/gravitee-docker-compose/platform/docker-compose.yml create mode 100644 favorite-file/gravitee-docker-compose/platform/mongo/docker-entrypoint-initdb.d/create-index.js create mode 100644 favorite-file/gravitee-docker-compose/platform/nginx/nginx.conf create mode 100644 favorite-file/gravitee-docker-compose/platform/nginx/ssl/gio-selfsigned.crt create mode 100644 favorite-file/gravitee-docker-compose/platform/nginx/ssl/gio-selfsigned.key create mode 100644 favorite-file/gravitee-docker-compose/platform/nginx/ssl/gio.pem create mode 100644 favorite-file/gravitee-docker-compose/platform/prometheus.yml diff --git a/favorite-file/gravitee-docker-compose/README.md b/favorite-file/gravitee-docker-compose/README.md new file mode 100644 index 00000000..e4983ec9 --- /dev/null +++ b/favorite-file/gravitee-docker-compose/README.md @@ -0,0 +1,32 @@ +# graviteeio api gateway docker-compose running + +fork from graviteeio project && change some deps image + +- +- + +## how to run + +```code +cd platform && docker-compose up -d +``` + +## manager ui + +* api portal + +```code +open https://localhost/apim/portal +``` + +* access manager ui + +```code +open https://localhost/am/ui/ +``` + +## Note: + +- environments directory has some demos with ci && traefik gateway +- portal account admin amdin +- access manager ui account admin adminadmin \ No newline at end of file diff --git a/favorite-file/gravitee-docker-compose/environments/ci/docker-compose.yml b/favorite-file/gravitee-docker-compose/environments/ci/docker-compose.yml new file mode 100644 index 00000000..37c9c07c --- /dev/null +++ b/favorite-file/gravitee-docker-compose/environments/ci/docker-compose.yml @@ -0,0 +1,62 @@ +#------------------------------------------------------------------------------- +# Copyright (C) 2015 The Gravitee team (http://gravitee.io) +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------- +version: '2' + +services: + ci: + image: graviteeio/jenkins:latest + container_name: ci + network_mode: "bridge" + expose: + - 50022 + ports: + - "50022:50022" + labels: + - "traefik.frontend.rule=Host:ci.gravitee.io" + - "traefik.port=8080" + volumes: + - /var/jenkins_home:/var/jenkins_home + - /var/run/docker.sock:/var/run/docker.sock + links: + - redis-test + - qa + + redis-test: + image: redis:3 + container_name: redis-test + network_mode: "bridge" + + qa: + image: sonarqube:alpine + container_name: qa + network_mode: "bridge" + environment: + - SONARQUBE_JDBC_URL=jdbc:postgresql://sonarqube-db:5432/sonar + labels: + - "traefik.frontend.rule=Host:qa.gravitee.io" + volumes: + - /opt/sonarqube/conf:/opt/sonarqube/conf + - /opt/sonarqube/data:/opt/sonarqube/data + - /opt/sonarqube/extensions:/opt/sonarqube/extensions + - /opt/sonarqube/bundled-plugins:/opt/sonarqube/lib/bundled-plugins + links: + - sonarqube-db + + sonarqube-db: + image: postgres:alpine + network_mode: "bridge" + environment: + - POSTGRES_USER=sonar + - POSTGRES_PASSWORD=sonar + volumes: + - /opt/sonarqube/postgresql/data:/var/lib/postgresql/data diff --git a/favorite-file/gravitee-docker-compose/environments/demo/common.yml b/favorite-file/gravitee-docker-compose/environments/demo/common.yml new file mode 100644 index 00000000..a1d7c696 --- /dev/null +++ b/favorite-file/gravitee-docker-compose/environments/demo/common.yml @@ -0,0 +1,62 @@ +#------------------------------------------------------------------------------- +# Copyright (C) 2015 The Gravitee team (http://gravitee.io) +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------- +version: '2' + +volumes: + datamongo: {} + dataelasticsearch: {} + +services: + elasticsearch: + hostname: demo-elasticsearch + image: docker.elastic.co/elasticsearch/elasticsearch:5.4.3 + volumes: + - dataelasticsearch:/usr/share/elasticsearch/data + environment: + - http.host=0.0.0.0 + - transport.host=0.0.0.0 + - xpack.security.enabled=false + - xpack.monitoring.enabled=false + - cluster.name=elasticsearch + - bootstrap.memory_lock=true + - "ES_JAVA_OPTS=-Xms512m -Xmx512m" + ulimits: + memlock: + soft: -1 + hard: -1 + nofile: 65536 + + mongodb: + hostname: demo-mongodb + image: mongo:3.4 + volumes: + - datamongo:/data/db + + gateway: + hostname: demo-gateway + image: graviteeio/gateway:latest + environment: + - gravitee_management_mongodb_uri=mongodb://mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_ratelimit_mongodb_uri=mongodb://mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_reporters_elasticsearch_endpoints_0=http://elasticsearch:9200 + + managementui: + hostname: demo-managementui + image: graviteeio/management-ui:latest + + managementapi: + hostname: demo-managementapi + image: graviteeio/management-api:latest + environment: + - gravitee_management_mongodb_uri=mongodb://mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_analytics_elasticsearch_endpoints_0=http://elasticsearch:9200 diff --git a/favorite-file/gravitee-docker-compose/environments/demo/docker-compose-local.yml b/favorite-file/gravitee-docker-compose/environments/demo/docker-compose-local.yml new file mode 100644 index 00000000..38c34e4c --- /dev/null +++ b/favorite-file/gravitee-docker-compose/environments/demo/docker-compose-local.yml @@ -0,0 +1,76 @@ +#------------------------------------------------------------------------------- +# Copyright (C) 2015 The Gravitee team (http://gravitee.io) +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------- +version: '2' + +volumes: + local_datamongo: {} + local_dataelasticsearch: {} + +services: + local_elasticsearch: + extends: + file: common.yml + service: elasticsearch + volumes: + - local_dataelasticsearch:/usr/share/elasticsearch/data + - ./logs/elasticsearch:/var/log/elasticsearch + + local_mongodb: + extends: + file: common.yml + service: mongodb + volumes: + - local_datamongo:/data/db + - ./logs/mongodb:/var/log/mongodb + + local_gateway: + extends: + file: common.yml + service: gateway + links: + - "local_mongodb:demo-mongodb" + - "local_elasticsearch:demo-elasticsearch" + ports: + - "8000:8082" + volumes: + - ./logs/gateway:/etc/gravitee.io/log + environment: + - gravitee_management_mongodb_uri=mongodb://demo-mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_ratelimit_mongodb_uri=mongodb://demo-mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_reporters_elasticsearch_endpoints_0=http://demo-elasticsearch:9200 + + local_managementui: + extends: + file: common.yml + service: managementui + ports: + - "8002:80" + volumes: + - ./logs/management-ui:/var/log/httpd + environment: + - MGMT_API_URL=http:\/\/localhost:8005\/management\/ + + local_managementapi: + extends: + file: common.yml + service: managementapi + ports: + - "8005:8083" + volumes: + - ./logs/management-api:/home/gravitee/logs + links: + - "local_mongodb:demo-mongodb" + - "local_elasticsearch:demo-elasticsearch" + environment: + - gravitee_management_mongodb_uri=mongodb://demo-mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_analytics_elasticsearch_endpoints_0=http://demo-elasticsearch:9200 diff --git a/favorite-file/gravitee-docker-compose/environments/demo/docker-compose-traefik-latest.yml b/favorite-file/gravitee-docker-compose/environments/demo/docker-compose-traefik-latest.yml new file mode 100644 index 00000000..e3ea6bce --- /dev/null +++ b/favorite-file/gravitee-docker-compose/environments/demo/docker-compose-traefik-latest.yml @@ -0,0 +1,76 @@ +#------------------------------------------------------------------------------- +# Copyright (C) 2015 The Gravitee team (http://gravitee.io) +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------- +version: '2' + +volumes: + latest_datamongo: {} + latest_dataelasticsearch: {} + +services: + latest_elasticsearch: + network_mode: "bridge" + extends: + file: common.yml + service: elasticsearch + volumes: + - latest_dataelasticsearch:/usr/share/elasticsearch/data + + latest_mongodb: + network_mode: "bridge" + extends: + file: common.yml + service: mongodb + volumes: + - latest_datamongo:/data/db + + latest_gateway: + network_mode: "bridge" + extends: + file: common.yml + service: gateway + links: + - latest_mongodb + - latest_elasticsearch + labels: + - "traefik.backend=graviteeio-gateway" + - "traefik.frontend.rule=Host:demo.gravitee.io;PathPrefixStrip:/gateway" + environment: + - gravitee_management_mongodb_uri=mongodb://demo-mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_ratelimit_mongodb_uri=mongodb://demo-mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_reporters_elasticsearch_endpoints_0=http://demo-elasticsearch:9200 + + latest_managementui: + network_mode: "bridge" + extends: + file: common.yml + service: managementui + labels: + - "traefik.backend=graviteeio-managementui" + - "traefik.frontend.rule=Host:demo.gravitee.io" + environment: + - MGMT_API_URL=https:\/\/demo.gravitee.io\/management\/ + + latest_managementapi: + network_mode: "bridge" + extends: + file: common.yml + service: managementapi + labels: + - "traefik.backend=graviteeio-managementapi" + - "traefik.frontend.rule=Host:demo.gravitee.io;PathPrefix:/management" + links: + - latest_mongodb + - latest_elasticsearch + environment: + - gravitee_management_mongodb_uri=mongodb://demo-mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_analytics_elasticsearch_endpoints_0=http://demo-elasticsearch:9200 \ No newline at end of file diff --git a/favorite-file/gravitee-docker-compose/environments/demo/docker-compose-traefik-nightly.yml b/favorite-file/gravitee-docker-compose/environments/demo/docker-compose-traefik-nightly.yml new file mode 100644 index 00000000..2369851c --- /dev/null +++ b/favorite-file/gravitee-docker-compose/environments/demo/docker-compose-traefik-nightly.yml @@ -0,0 +1,79 @@ +#------------------------------------------------------------------------------- +# Copyright (C) 2015 The Gravitee team (http://gravitee.io) +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------- +version: '2' + +volumes: + nightly_datamongo: {} + nightly_dataelasticsearch: {} + +services: + nightly_elasticsearch: + network_mode: "bridge" + extends: + file: common.yml + service: elasticsearch + volumes: + - nightly_dataelasticsearch:/usr/share/elasticsearch/data + + nightly_mongodb: + network_mode: "bridge" + extends: + file: common.yml + service: mongodb + volumes: + - nightly_datamongo:/data/db + + nightly_gateway: + image: graviteeio/gateway:nightly + network_mode: "bridge" + extends: + file: common.yml + service: gateway + links: + - nightly_mongodb + - nightly_elasticsearch + labels: + - "traefik.backend=nightly-graviteeio-gateway" + - "traefik.frontend.rule=Host:nightly.gravitee.io;PathPrefixStrip:/gateway" + environment: + - gravitee_management_mongodb_uri=mongodb://demo-mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_ratelimit_mongodb_uri=mongodb://demo-mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_reporters_elasticsearch_endpoints_0=http://demo-elasticsearch:9200 + + nightly_managementui: + image: graviteeio/management-ui:nightly + network_mode: "bridge" + extends: + file: common.yml + service: managementui + labels: + - "traefik.backend=nightly-graviteeio-managementui" + - "traefik.frontend.rule=Host:nightly.gravitee.io" + environment: + - MGMT_API_URL=https:\/\/nightly.gravitee.io\/management\/ + + nightly_managementapi: + image: graviteeio/management-api:nightly + network_mode: "bridge" + extends: + file: common.yml + service: managementapi + labels: + - "traefik.backend=nightly-graviteeio-managementapi" + - "traefik.frontend.rule=Host:nightly.gravitee.io;PathPrefix:/management" + links: + - nightly_mongodb + - nightly_elasticsearch + environment: + - gravitee_management_mongodb_uri=mongodb://demo-mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_analytics_elasticsearch_endpoints_0=http://demo-elasticsearch:9200 \ No newline at end of file diff --git a/favorite-file/gravitee-docker-compose/environments/demo/launch.sh b/favorite-file/gravitee-docker-compose/environments/demo/launch.sh new file mode 100755 index 00000000..ff51ff04 --- /dev/null +++ b/favorite-file/gravitee-docker-compose/environments/demo/launch.sh @@ -0,0 +1,91 @@ +#!/bin/bash +#------------------------------------------------------------------------------- +# Copyright (C) 2015 The Gravitee team (http://gravitee.io) +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------- + +readonly WORKDIR="$HOME/graviteeio-demo" +readonly DIRNAME=`dirname $0` +readonly PROGNAME=`basename $0` +readonly color_title='\033[32m' +readonly color_text='\033[1;36m' + +# OS specific support (must be 'true' or 'false'). +declare cygwin=false +declare darwin=false +declare linux=false +declare dc_exec="docker-compose -f docker-compose-local.yml up" + +welcome() { + echo + echo -e " ${color_title} _____ _____ __ _______ _______ ______ ______ _____ ____ \033[0m" + echo -e " ${color_title} / ____| __ \ /\ \ / /_ _|__ __| ____| ____| |_ _/ __ \ \033[0m" + echo -e " ${color_title}| | __| |__) | / \ \ / / | | | | | |__ | |__ | || | | | \033[0m" + echo -e " ${color_title}| | |_ | _ / / /\ \ \/ / | | | | | __| | __| | || | | | \033[0m" + echo -e " ${color_title}| |__| | | \ \ / ____ \ / _| |_ | | | |____| |____ _ _| || |__| | \033[0m" + echo -e " ${color_title} \_____|_| \_\/_/ \_\/ |_____| |_| |______|______(_)_____\____/ \033[0m" + echo -e " ${color_title} | | \033[0m${color_text}http://gravitee.io\033[0m" + echo -e " ${color_title} __| | ___ _ __ ___ ___ \033[0m" + echo -e " ${color_title} / _\` |/ _ \ '_ \` _ \ / _ \ \033[0m" + echo -e " ${color_title}| (_| | __/ | | | | | (_) | \033[0m" + echo -e " ${color_title} \__,_|\___|_| |_| |_|\___/ \033[0m" + echo +} + +init_env() { + local dockergrp + # define env + case "`uname`" in + CYGWIN*) + cygwin=true + ;; + + Darwin*) + darwin=true + ;; + + Linux) + linux=true + ;; + esac + + # test if docker must be run with sudo + dockergrp=$(groups | grep -c docker) + if [[ $darwin == false && $dockergrp == 0 ]]; then + dc_exec="sudo $dc_exec"; + fi +} + +init_dirs() { + echo "Init log directory in $WORKDIR ..." + mkdir -p "$WORKDIR/logs/" + echo +} + +main() { + welcome + init_env + if [[ $? != 0 ]]; then + exit 1 + fi + set -e + init_dirs + pushd $WORKDIR > /dev/null + echo "Download docker compose files ..." + curl -L https://raw.githubusercontent.com/gravitee-io/gravitee-docker/master/environments/demo/common.yml -o "common.yml" + curl -L https://raw.githubusercontent.com/gravitee-io/gravitee-docker/master/environments/demo/docker-compose-local.yml -o "docker-compose-local.yml" + echo + echo "Launch GraviteeIO demo ..." + $dc_exec + popd > /dev/null +} + +main diff --git a/favorite-file/gravitee-docker-compose/environments/sample-apis/docker-compose-sample-apis.yml b/favorite-file/gravitee-docker-compose/environments/sample-apis/docker-compose-sample-apis.yml new file mode 100644 index 00000000..10c1a074 --- /dev/null +++ b/favorite-file/gravitee-docker-compose/environments/sample-apis/docker-compose-sample-apis.yml @@ -0,0 +1,47 @@ +#------------------------------------------------------------------------------- +# Copyright (C) 2015 The Gravitee team (http://gravitee.io) +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------- +version: '2' + +services: + + sample-api-index: + image: graviteeio/gravitee-sample-index:latest + network_mode: "bridge" + labels: + - "traefik.backend=gravitee-sample-index" + - "traefik.frontend.rule=Host:api.gravitee.io" + - "traefik.frontend.entryPoints=https" + + sample-api-echo: + image: graviteeio/gravitee-echo-api:nightly + network_mode: "bridge" + labels: + - "traefik.backend=gravitee-echo-api" + - "traefik.frontend.rule=Host:api.gravitee.io;PathPrefix:/echo" + - "traefik.frontend.entryPoints=https" + + sample-api-whoami: + image: graviteeio/gravitee-whoami-api:latest + network_mode: "bridge" + labels: + - "traefik.backend=gravitee-whoami-api" + - "traefik.frontend.rule=Host:api.gravitee.io;PathPrefix:/whoami" + - "traefik.frontend.entryPoints=https" + + sample-api-whattimeisit: + image: graviteeio/gravitee-whattimeisit-api:latest + network_mode: "bridge" + labels: + - "traefik.backend=gravitee-whattimeisit-api" + - "traefik.frontend.rule=Host:api.gravitee.io;PathPrefix:/whattimeisit" + - "traefik.frontend.entryPoints=https" diff --git a/favorite-file/gravitee-docker-compose/platform/docker-compose.yml b/favorite-file/gravitee-docker-compose/platform/docker-compose.yml new file mode 100644 index 00000000..3771102c --- /dev/null +++ b/favorite-file/gravitee-docker-compose/platform/docker-compose.yml @@ -0,0 +1,135 @@ +version: '3' + +networks: + default: + +services: + nginx: + image: nginx:1.15-alpine + container_name: gio_platform_nginx + volumes: + - ./nginx/nginx.conf:/etc/nginx/nginx.conf + - ./nginx/ssl/gio-selfsigned.crt:/etc/ssl/certs/gio-selfsigned.crt + - ./nginx/ssl/gio-selfsigned.key:/etc/ssl/private/gio-selfsigned.key + - ./nginx/ssl/gio.pem:/etc/ssl/certs/gio.pem + ports: + - "80:80" + - "443:443" + depends_on: + - apim_gateway + - apim_portal + - apim_management + - am_gateway + - am_management + - am_webui + + mongodb: + image: mongo:3.4 + container_name: gio_platform_mongo + ports: + - 27017:27017 + environment: + - MONGO_INITDB_DATABASE=gravitee + volumes: + - ./mongo/docker-entrypoint-initdb.d:/docker-entrypoint-initdb.d + - ./data/mongo:/data/db + - ./logs/mongodb:/var/log/mongodb + + elasticsearch: + image: elasticsearch:6.4.0 + container_name: gio_platform_elasticsearch + ports: + - 9200:9200 + environment: + - http.host=0.0.0.0 + - transport.host=0.0.0.0 + - xpack.security.enabled=false + - xpack.monitoring.enabled=false + - cluster.name=elasticsearch + ulimits: + nofile: 65536 + volumes: + - ./data/elasticsearch:/usr/share/elasticsearch/data + - ./logs/elasticsearch:/var/log/elasticsearch + + apim_gateway: + image: graviteeio/gateway:latest + container_name: gio_platform_apim_gateway + volumes: + - ./logs/apim-gateway:/opt/graviteeio-gateway/logs + environment: + - gravitee_management_mongodb_uri=mongodb://mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_ratelimit_mongodb_uri=mongodb://mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_reporters_elasticsearch_endpoints_0=http://elasticsearch:9200 + - gravitee_services_metrics_enabled=true + - gravitee_services_metrics_prometheus.enabled=true + depends_on: + - mongodb + - elasticsearch + + apim_portal: + image: graviteeio/management-ui:latest + container_name: gio_platform_apim_portal + environment: + - MGMT_API_URL=https:\/\/localhost\/apim\/management\/ + depends_on: + - apim_management + + apim_management: + image: graviteeio/management-api:latest + container_name: gio_platform_apim_mgmt_api + volumes: + - ./logs/apim-management-api:/opt/graviteeio-management-api/logs + environment: + - gravitee_management_mongodb_uri=mongodb://mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_ratelimit_mongodb_uri=mongodb://mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_analytics_elasticsearch_endpoints_0=http://elasticsearch:9200 + - gravitee_jwt_cookiepath=/apim/management + - gravitee_jwt_cookiesecure=true + depends_on: + - mongodb + - elasticsearch + + am_gateway: + image: graviteeio/am-gateway:2 + container_name: gio_platform_am_gateway + volumes: + - ./logs/am-gateway:/opt/graviteeio-am-gateway/logs + environment: + - gravitee_management_mongodb_uri=mongodb://mongodb:27017/gravitee-am?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_oauth2_mongodb_uri=mongodb://mongodb:27017/gravitee-am?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + depends_on: + - mongodb + + am_management: + image: graviteeio/am-management-api:2 + container_name: gio_platform_am_management + volumes: + - ./logs/am-management-api:/opt/graviteeio-am-management-api/logs + environment: + - gravitee_management_mongodb_uri=mongodb://mongodb:27017/gravitee-am?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_oauth2_mongodb_uri=mongodb://mongodb:27017/gravitee-am?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_jwt_cookiepath=/am/management + - gravitee_jwt_cookiesecure=true + depends_on: + - mongodb + grafana: + image: grafana/grafana + ports: + - "3000:3000" + prometheus: + image: prom/prometheus + volumes: + - "./prometheus.yml:/etc/prometheus/prometheus.yml" + ports: + - "9090:9090" + am_webui: + image: graviteeio/am-management-ui:2 + container_name: gio_platform_am_webui + environment: + - MGMT_API_URL=https:\/\/localhost\/am\/ + - MGMT_UI_URL=https:\/\/localhost\/am\/ui\/ + volumes: + - ./logs/am-webui:/var/log/nginx + depends_on: + - am_management diff --git a/favorite-file/gravitee-docker-compose/platform/mongo/docker-entrypoint-initdb.d/create-index.js b/favorite-file/gravitee-docker-compose/platform/mongo/docker-entrypoint-initdb.d/create-index.js new file mode 100644 index 00000000..b6f2d379 --- /dev/null +++ b/favorite-file/gravitee-docker-compose/platform/mongo/docker-entrypoint-initdb.d/create-index.js @@ -0,0 +1,92 @@ + +// "apis" collection +db.apis.dropIndexes(); +db.apis.createIndex( { "visibility" : 1 } ); +db.apis.createIndex( { "group" : 1 } ); +db.apis.reIndex(); + +// "applications" collection +db.applications.dropIndexes(); +db.applications.createIndex( { "group" : 1 } ); +db.applications.createIndex( { "name" : 1 } ); +db.applications.createIndex( { "status" : 1 } ); +db.applications.reIndex(); + +// "events" collection +db.events.dropIndexes(); +db.events.createIndex( { "type" : 1 } ); +db.events.createIndex( { "updatedAt" : 1 } ); +db.events.createIndex( { "properties.api_id" : 1 } ); +db.events.createIndex( { "properties.api_id":1, "type":1} ); +db.events.reIndex(); + +// "plans" collection +db.plans.dropIndexes(); +db.plans.createIndex( { "apis" : 1 } ); +db.plans.reIndex(); + +// "subscriptions" collection +db.subscriptions.dropIndexes(); +db.subscriptions.createIndex( { "plan" : 1 } ); +db.subscriptions.createIndex( { "application" : 1 } ); +db.subscriptions.reIndex(); + +// "keys" collection +db.keys.dropIndexes(); +db.keys.createIndex( { "plan" : 1 } ); +db.keys.createIndex( { "application" : 1 } ); +db.keys.createIndex( { "updatedAt" : 1 } ); +db.keys.createIndex( { "revoked" : 1 } ); +db.keys.createIndex( { "plan" : 1 , "revoked" : 1, "updatedAt" : 1 } ); +db.keys.reIndex(); + +// "pages" collection +db.pages.dropIndexes(); +db.pages.createIndex( { "api" : 1 } ); +db.pages.reIndex(); + +// "memberships" collection +db.memberships.dropIndexes(); +db.memberships.createIndex( {"_id.userId":1, "_id.referenceId":1, "_id.referenceType":1}, { unique: true } ); +db.memberships.createIndex( {"_id.referenceId":1, "_id.referenceType":1} ); +db.memberships.createIndex( {"_id.referenceId":1, "_id.referenceType":1, "roles":1} ); +db.memberships.createIndex( {"_id.userId":1, "_id.referenceType":1} ); +db.memberships.createIndex( {"_id.userId":1, "_id.referenceType":1, "roles":1} ); +db.memberships.reIndex(); + +// "roles" collection +db.roles.dropIndexes(); +db.roles.createIndex( {"_id.scope": 1 } ); +db.roles.reIndex(); + +// "audits" collection +db.audits.dropIndexes(); +db.audits.createIndex( { "referenceType": 1, "referenceId": 1 } ); +db.audits.createIndex( { "createdAt": 1 } ); +db.audits.reIndex(); + +// "rating" collection +db.rating.dropIndexes(); +db.rating.createIndex( { "api" : 1 } ); +db.rating.reIndex(); + +// "ratingAnswers" collection +db.ratingAnswers.dropIndexes(); +db.ratingAnswers.createIndex( { "rating" : 1 } ); + +// "portalnotifications" collection +db.portalnotifications.dropIndexes(); +db.portalnotifications.createIndex( { "user" : 1 } ); +db.portalnotifications.reIndex(); + +// "portalnotificationconfigs" collection +db.portalnotificationconfigs.dropIndexes(); +db.portalnotificationconfigs.createIndex( {"_id.user":1, "_id.referenceId":1, "_id.referenceType":1}, { unique: true } ); +db.portalnotificationconfigs.createIndex( {"_id.referenceId":1, "_id.referenceType":1, "hooks":1}); +db.portalnotificationconfigs.reIndex(); + +// "genericnotificationconfigs" collection +db.genericnotificationconfigs.dropIndexes(); +db.genericnotificationconfigs.createIndex( {"referenceId":1, "referenceType":1, "hooks":1}); +db.genericnotificationconfigs.createIndex( {"referenceId":1, "referenceType":1}); +db.genericnotificationconfigs.reIndex(); diff --git a/favorite-file/gravitee-docker-compose/platform/nginx/nginx.conf b/favorite-file/gravitee-docker-compose/platform/nginx/nginx.conf new file mode 100644 index 00000000..d08fc26c --- /dev/null +++ b/favorite-file/gravitee-docker-compose/platform/nginx/nginx.conf @@ -0,0 +1,133 @@ +worker_processes 4; + +events { worker_connections 1024; } + +http { + include /etc/nginx/mime.types; + resolver 127.0.0.11 ipv6=off; + + upstream apim_management { + server apim_management:8083; + } + + upstream apim_gateway { + server apim_gateway:8082; + } + + upstream apim_portal { + server apim_portal:80; + } + + upstream am_management { + server am_management:8093; + } + + upstream am_gateway { + server am_gateway:8092; + } + + upstream am_webui { + server am_webui:80; + } + + server { + listen 80; + server_name localhost; + return 301 https://$server_name$request_uri; #Redirection + } + + server { + listen 443 ssl; + listen [::]:443 ssl; + + server_name localhost; + + ssl_certificate /etc/ssl/certs/gio-selfsigned.crt; + ssl_certificate_key /etc/ssl/private/gio-selfsigned.key; + ssl_dhparam /etc/ssl/certs/gio.pem; + + error_page 500 502 503 504 /50x.html; + + location /apim/portal/ { + proxy_pass http://apim_portal/; + proxy_redirect $scheme://$host:$server_port/ $scheme://$http_host/apim/portal/; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Host $server_name; + } + + location /apim/management/ { + proxy_pass http://apim_management/management/; + proxy_redirect $scheme://$host:$server_port/management/ /apim/management/; + sub_filter "/management/" "/apim/management/"; + sub_filter_types application/json; + sub_filter_once off; + proxy_cookie_path /management /apim/management; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Host $server_name; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location /apim/ { + proxy_pass http://apim_gateway/; + proxy_cookie_path / /apim; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Host $server_name; + } + + location /am/ui/ { + proxy_pass http://am_webui/; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Host $server_name; + sub_filter ' Date: Mon, 19 Aug 2019 18:30:38 +0800 Subject: [PATCH 107/124] 2019-08-19 --- favorite-file/Nginx-Settings/nginx-front.conf | 45 +++++++++++-------- 1 file changed, 26 insertions(+), 19 deletions(-) diff --git a/favorite-file/Nginx-Settings/nginx-front.conf b/favorite-file/Nginx-Settings/nginx-front.conf index 82894983..8b49d8f9 100644 --- a/favorite-file/Nginx-Settings/nginx-front.conf +++ b/favorite-file/Nginx-Settings/nginx-front.conf @@ -22,28 +22,35 @@ http { log_format main '$remote_addr - $remote_user [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent" "$http_x_forwarded_for" "$request_time"'; - + access_log /var/log/nginx/access.log main; error_log /var/log/nginx/error.log; - + gzip on; gzip_buffers 8 16k; gzip_min_length 512; gzip_disable "MSIE [1-6]\.(?!.*SV1)"; gzip_http_version 1.1; gzip_types text/plain text/css application/javascript application/x-javascript application/json application/xml; - + server { - + listen 8001; server_name localhost 127.0.0.1 139.159.190.24 platform.gitnavi.com; - + location / { root /root/.jenkins/workspace/nestle-platform-front-test/dist; index index.html index.htm; try_files $uri /index.html; } - + + ## 二级目录方式,记得 package.json 添加:"homepage": "cdk8s-markdown", + location ^~ /cdk8s-markdown { + root /root/.jenkins/workspace; + index index.html; + try_files $uri /cdk8s-markdown/index.html; + } + location ^~ /platform/ { proxy_pass http://127.0.0.1:28081; proxy_redirect off; @@ -51,36 +58,36 @@ http { proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; } - + location ~ .*\.(js|css)?$ { root /root/.jenkins/workspace/nestle-platform-front-test/dist; } - + location ~ .*\.(gif|jpg|jpeg|png|bmp|swf|ico|woff|woff2|ttf|eot|txt|svg)$ { root /root/.jenkins/workspace/nestle-platform-front-test/dist; } - + error_page 404 /404.html; location = /usr/share/nginx/html/40x.html { } - + error_page 500 502 503 504 /50x.html; location = /usr/share/nginx/html/50x.html { } } - + server { - + listen 8002; server_name localhost 127.0.0.1 139.159.190.24 store.gitnavi.com; - + location / { root /root/.jenkins/workspace/nestle-store-front-test/dist; index index.html index.htm; try_files $uri /index.html; } - + location ^~ /store/ { proxy_pass http://127.0.0.1:28082; proxy_redirect off; @@ -88,22 +95,22 @@ http { proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; } - + location ~ .*\.(js|css)?$ { root /root/.jenkins/workspace/nestle-store-front-test/dist; } - + location ~ .*\.(gif|jpg|jpeg|png|bmp|swf|ico|woff|woff2|ttf|eot|txt|svg)$ { root /root/.jenkins/workspace/nestle-store-front-test/dist; } - + error_page 404 /404.html; location = /usr/share/nginx/html/40x.html { } - + error_page 500 502 503 504 /50x.html; location = /usr/share/nginx/html/50x.html { } } -} \ No newline at end of file +} From 5ca8e8b6e72398eb3764736ae991e36706a1955c Mon Sep 17 00:00:00 2001 From: "Judas.n" Date: Mon, 19 Aug 2019 22:36:14 +0800 Subject: [PATCH 108/124] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index ed57b20f..06409a8d 100644 --- a/README.md +++ b/README.md @@ -8,8 +8,8 @@ ## 声明 -- 2019-08-19 周一 -- 我将发布这两年来第一个新的 **大专题**,还是跟 IT 行业相关,请关注!!! +- 后续我将在新的地方,以新的方式重新开始,感谢一直以来的信任 ! +- CDK8S: ## 初衷(Original Intention) From 5737ad495ebb96b8b3cb1313972b155effd71be2 Mon Sep 17 00:00:00 2001 From: "Judas.n" Date: Sat, 5 Oct 2019 23:18:46 +0800 Subject: [PATCH 109/124] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 06409a8d..dcc2662c 100644 --- a/README.md +++ b/README.md @@ -6,10 +6,10 @@ -## 声明 +## 新的起点 -- 后续我将在新的地方,以新的方式重新开始,感谢一直以来的信任 ! - CDK8S: +- TKey: ## 初衷(Original Intention) From dd7514316f27e78df6c6c690af60bd92dc06eb68 Mon Sep 17 00:00:00 2001 From: "Judas.n" Date: Sat, 21 Dec 2019 15:20:28 +0800 Subject: [PATCH 110/124] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index dcc2662c..3f311512 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,7 @@ ## 新的起点 +- [Sculptor Boot:项目思维化的《代码生成器》体系(未来可期,做个朋友吧)](https://github.com/cdk8s/sculptor-boot-generator) - CDK8S: - TKey: From e95b2b7a1d6594103583076733b3ac6e6f905415 Mon Sep 17 00:00:00 2001 From: cdk8s-zelda <51701412+cdk8s-zelda@users.noreply.github.com> Date: Thu, 9 Jan 2020 16:11:46 +0800 Subject: [PATCH 111/124] Update README.md --- README.md | 118 ++++-------------------------------------------------- 1 file changed, 8 insertions(+), 110 deletions(-) diff --git a/README.md b/README.md index 3f311512..e33ec9a0 100644 --- a/README.md +++ b/README.md @@ -5,6 +5,13 @@ +## 只有上云才能撑住规模化后的发展 + +- 初期技术选型上尽可能寻找云支持的 +- 在公司规模小,自建服务基本都做不到 99.999% 高可用 +- 在公司规模发展变迅速时,如果云技术和已有技术契合,迁移成本会低很多很多 +- 目前暂定只选择:[阿里云服务](https://www.aliyun.com/minisite/goods?userCode=v2zozyxz) +- 这里罗列了阿里云常用的一些:[产品](https://github.com/cdk8s/cdk8s-team-style/blob/master/ops/aliyun.md) ## 新的起点 @@ -48,113 +55,4 @@ - [Linux 下常用压缩文件的解压、压缩](markdown-file/File-Extract-Compress.md) - [Yum 下载安装包及对应依赖包](markdown-file/Off-line-Yum-Install.md) - [Zsh 入门](markdown-file/Zsh.md) -- [终端测速](markdown-file/speedtest.md) -- [日常维护](markdown-file/maintenance.md) -- [日常监控](markdown-file/monitor.md) -- [nmon 系统性能监控工具](markdown-file/Nmon.md) -- [Glances 安装和配置](markdown-file/Glances-Install-And-Settings.md) -- [SSH(Secure Shell)介绍](markdown-file/SSH.md) -- [FTP(File Transfer Protocol)介绍](markdown-file/FTP.md) -- [VPN(Virtual Private Network)介绍](markdown-file/VPN.md) -- [NFS(Network FileSystem)介绍](markdown-file/NFS.md) -- [NTP(Network Time Protocol)介绍](markdown-file/NTP.md) -- [Samba 介绍](markdown-file/Samba.md) -- [Crontab 介绍](markdown-file/Crontab.md) -- [Iptables 介绍](markdown-file/Iptables.md) -- [花生壳-安装介绍](markdown-file/Hsk-Install.md) -- [JDK 安装](markdown-file/JDK-Install.md) -- [Java bin 目录下的工具](markdown-file/Java-bin.md) -- [SVN 安装和配置](markdown-file/SVN-Install-And-Settings.md) -- [Tomcat 安装和配置、优化](markdown-file/Tomcat-Install-And-Settings.md) -- [Jenkins 安装和配置](markdown-file/Jenkins-Install-And-Settings.md) -- [Maven 安装和配置](markdown-file/Maven-Install-And-Settings.md) -- [Nexus 安装和配置](markdown-file/Nexus-Install-And-Settings.md) -- [PostgreSQL 安装和配置](markdown-file/PostgreSQL-Install-And-Settings.md) -- [MySQL 安装和配置](markdown-file/Mysql-Install-And-Settings.md) -- [MySQL 优化](markdown-file/Mysql-Optimize.md) -- [MySQL 测试](markdown-file/Mysql-Test.md) -- [MySQL 教程](markdown-file/Mysql-Tutorial.md) -- [Percona XtraDB Cluster(PXC)安装和配置](markdown-file/PXC-Install-And-Settings.md) -- [Redis 安装和配置](markdown-file/Redis-Install-And-Settings.md) -- [MongoDB 安装和配置](markdown-file/MongoDB-Install-And-Settings.md) -- [Solr 安装和配置](markdown-file/Solr-Install-And-Settings.md) -- [Jira 安装和配置](markdown-file/Jira-Install-And-Settings.md) -- [Confluence 安装和配置](markdown-file/Confluence-Install-And-Settings.md) -- [TeamCity 安装和配置](markdown-file/TeamCity-Install-And-Settings.md) -- [Nginx 安装和配置](markdown-file/Nginx-Install-And-Settings.md) -- [wrk 安装和配置](markdown-file/wrk-Install-And-Settings.md) -- [FastDFS 安装和配置](markdown-file/FastDFS-Install-And-Settings.md) -- [FastDFS 结合 GraphicsMagick](markdown-file/FastDFS-Nginx-Lua-GraphicsMagick.md) -- [RabbitMQ 安装和配置](markdown-file/RabbitMQ-Install-And-Settings.md) -- [Openfire 安装和配置](markdown-file/Openfire-Install-And-Settings.md) -- [Rap 安装和配置](markdown-file/Rap-Install-And-Settings.md) -- [Nginx + Keepalived 高可用](markdown-file/Nginx-Keepalived-Install-And-Settings.md) -- [黑客入侵检查](markdown-file/Was-Hacked.md) -- [Shadowsocks 安装和配置](markdown-file/http://code.youmeek.com/2016/08/19/2016/08/VPS/) -- [Mycat 安装和配置](markdown-file/Mycat-Install-And-Settings.md) -- [Zookeeper 安装和配置](markdown-file/Zookeeper-Install.md) -- [Daemontools 工具介绍](markdown-file/Daemontools.md) -- [Tmux 安装和配置](markdown-file/Tmux-Install-And-Settings.md) -- [ELK 日志收集系统安装和配置](markdown-file/ELK-Install-And-Settings.md) -- [Dubbo 安装和配置](markdown-file/Dubbo-Install-And-Settings.md) -- [GitLab 安装和配置](markdown-file/Gitlab-Install-And-Settings.md) -- [JMeter 安装和配置](markdown-file/JMeter-Install-And-Settings.md) -- [Docker 安装和使用](markdown-file/Docker-Install-And-Usage.md) -- [Harbor 安装和配置](markdown-file/Harbor-Install-And-Usage.md) -- [LDAP 安装和使用](markdown-file/LDAP-Install-And-Settings.md) -- [Alfresco 安装和使用](markdown-file/Alfresco-Install-And-Usage.md) -- [Apache Thrift 安装和使用](markdown-file/Thrift-Install-And-Usage.md) -- [Node.js 安装和使用](markdown-file/Node-Install-And-Usage.md) -- [CI 整套服务安装和使用](markdown-file/CI-Install-And-Usage.md) -- [YApi 安装和配置](markdown-file/YApi-Install-And-Settings.md) -- [Kafka 安装和配置](markdown-file/Kafka-Install-And-Settings.md) -- [Hadoop 安装和配置](markdown-file/Hadoop-Install-And-Settings.md) -- [Showdoc 安装和配置](markdown-file/Showdoc-Install-And-Settings.md) -- [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) -- [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) -- [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) -- [Influxdb 安装和配置](markdown-file/Influxdb-Install-And-Settings.md) -- [Prometheus 安装和配置](markdown-file/Prometheus-Install-And-Settings.md) -- [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) -- [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) -- [Wormhole + Flink 安装和配置](markdown-file/Wormhole-Install-And-Settings.md) -- [SkyWalking 安装和配置](markdown-file/SkyWalking-Install-And-Settings.md) - -## 联系(Contact) - -- Email:judas.n@qq.com(常用) or admin@youmeek.com(备用) -- Blog: -- QQ 群交流,入群请看: -- 欢迎捐赠 ^_^: - - -## Github 协同视频教程(Participate) - -- 如果您不会使用 Git 或是 Github 也没关系,请认真学习下面视频教程: -- Judas.n 录制 - - 视频格式:MP4 - - 分辨率:1920 X 1080 - - 片长:16 min - - 文件大小:62 M -- 下载 - - 百度云盘: - - 360 网盘(2fb5): - -## Github 常用按钮说明 - -- Watch:关注该项目,作者有更新的时候,会在你的 Github 主页有通知消息。 -- Star:收藏该项目,在你的头像上有一个“Your stars”链接,可以看到你的收藏列表。 -- Fork:复制一份项目到的Github空间上,你可以自己开发自己的这个地址项目,然后 Pull Request 给项目原主人。 - -## 参与作者汇总(Author) - -|作者(按参与时间排序)|地址| -|:---------|:---------| -|Judas.n|| -|mrdear|| -|fooofei|| - -## AD - -- [推荐:程序员的个性化网址导航:GitNavi.com](http://www.gitnavi.com/u/judasn/) -- [适合后端开发者的前端 React-Admin](https://github.com/satan31415/umi-admin) +- [终端测速](markdown-file/ From aec2ad2693d79c6ae85d13f847b19ea20c04bd28 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 5 Jun 2020 16:09:18 +0800 Subject: [PATCH 112/124] monitor --- markdown-file/monitor.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index 50574c9f..9a825839 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -858,11 +858,12 @@ access_log /home/wwwlogs/hicrew.log special_main; #### 一次 JVM 引起的 CPU 高排查 - 使用 `ps -ef | grep java`,查看进程 PID - - 根据高 CPU 的进程 PID,查看其线程 CPU 使用情况:`top -Hp PID`,找到占用 CPU 资源高的线程 PID -- 保存堆栈情况:`jstack -l PID >> /opt/jstack-tomcat1-PID-20181017.log` -- 把占用 CPU 资源高的线程十进制的 PID 转换成 16 进制:`printf "%x\n" PID`,比如:`printf "%x\n" 12401` 得到结果是:`3071` + - 根据高 CPU 的进程 PID,查看其线程 CPU 使用情况:`top -Hp PID`,找到占用 CPU 资源高的线程 TID + - 也可以用:`ps -mp PID -o THREAD,tid,time` +- 保存堆栈情况:`jstack -l TID >> /opt/jstack-tomcat1-TID-20181017.log` +- 把占用 CPU 资源高的线程十进制的 TID 转换成 16 进制:`printf "%x\n" TID`,比如:`printf "%x\n" 12401` 得到结果是:`3071` - 在刚刚输出的那个 log 文件中搜索:`3071`,可以找到:`nid=0x3071` -- 也可以在终端中直接看:`jstack PID |grep 十六进制线程 -A 30`,此时如果发现如下: +- 也可以在终端中直接看:`jstack TID |grep 十六进制线程 -A 30`,此时如果发现如下: ``` "GC task thread#0 (ParallelGC)" os_prio=0 tid=0x00007fd0ac01f000 nid=0x66f runnable From 6c13ef6789caa3dbdabf3cce346af18a5dc01d4b Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 6 Aug 2020 14:51:03 +0800 Subject: [PATCH 113/124] monitor --- markdown-file/Bash.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/markdown-file/Bash.md b/markdown-file/Bash.md index b5eba96e..593b8ff6 100644 --- a/markdown-file/Bash.md +++ b/markdown-file/Bash.md @@ -171,6 +171,8 @@ drwxr-xr-x. 5 root root 4096 3月 26 10:57,其中最前面的 d 表示这是 - linux 的权限分为 rwx。r 代表:可读,w 代表:可写,x 代表:可执行 - 这三个权限都可以转换成数值表示,r = 4,w = 2,x = 1,- = 0,所以总和是 7,也就是最大权限。第一个 7 是所属主(user)的权限,第二个 7 是所属组(group)的权限,最后一位 7 是非本群组用户(others)的权限。 - `chmod -R 777 目录` 表示递归目录下的所有文件夹,都赋予 777 权限 + - `chown myUsername:myGroupName myFile` 表示修改文件所属用户、组 + - `chown -R myUsername:myGroupName myFolder` 表示递归修改指定目录下的所有文件权限 - `su`:切换到 root 用户,终端目录还是原来的地方(常用) - `su -`:切换到 root 用户,其中 **-** 号另起一个终端并切换账号 - `su 用户名`,切换指定用户帐号登陆,终端目录还是原来地方。 From ad52e9864a3408122c2feb78040256bdb890f5f1 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 10 Aug 2020 16:24:52 +0800 Subject: [PATCH 114/124] monitor --- markdown-file/Nginx-Install-And-Settings.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/markdown-file/Nginx-Install-And-Settings.md b/markdown-file/Nginx-Install-And-Settings.md index f54cbdb0..6a5ffb0c 100644 --- a/markdown-file/Nginx-Install-And-Settings.md +++ b/markdown-file/Nginx-Install-And-Settings.md @@ -619,6 +619,20 @@ http { ``` +- 最新版本的 Nginx SSL 配置 + +``` +listen 443 ssl; + +ssl_certificate /opt/jar/ssl/server.crt; +ssl_certificate_key /opt/jar/ssl/server.key; + +ssl_session_timeout 5m; +ssl_protocols TLSv1 TLSv1.1 TLSv1.2; +ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:HIGH:!aNULL:!MD5:!RC4:!DHE; +ssl_prefer_server_ciphers on; +``` + ---------------------------------------------------------------------- ## Nginx 压力测试 From 730694b737d7eace9b31819d92d1f0906e229f35 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 25 Sep 2020 15:05:40 +0800 Subject: [PATCH 115/124] 2020-09-25 --- markdown-file/JMeter-Install-And-Settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/markdown-file/JMeter-Install-And-Settings.md b/markdown-file/JMeter-Install-And-Settings.md index 3fd087b9..31946f33 100644 --- a/markdown-file/JMeter-Install-And-Settings.md +++ b/markdown-file/JMeter-Install-And-Settings.md @@ -160,8 +160,8 @@ remote_hosts=192.168.0.1:1099,192.168.0.2:1099 - [快速学习Jmeter性能测试工具](http://gitbook.cn/books/58de71a8be13fa66243873ef/index.html) - [jmeter:菜鸟入门到进阶系列](http://www.cnblogs.com/imyalost/p/7062784.html) - 国内视频教程: - - [JMeter 性能测试入门篇 - 慕课网](https://www.imooc.com/learn/735) - [JMeter 之 HTTP 协议接口性能测试 - 慕课网](https://www.imooc.com/learn/791) + - [接口测试基础之入门篇 - 慕课网](https://www.imooc.com/learn/738) - [JMeter 性能测试进阶案例实战 - 慕课网](https://coding.imooc.com/class/142.html) - [性能测试工具—Jmeter- 我要自学网](http://www.51zxw.net/list.aspx?page=2&cid=520) - [jmeter 视频教学课程 - 小强](https://www.youtube.com/watch?v=zIiXpCBaBgQ&list=PL3rfV4zNE8CD-rAwlXlGXilN5QpkqDWox) From 955ff70778c388c807eaf51eb29ae5cfbb75eb60 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 27 Oct 2020 23:49:18 +0800 Subject: [PATCH 116/124] 2020-10-27 --- centos-settings/Close-XWindow.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/centos-settings/Close-XWindow.md b/centos-settings/Close-XWindow.md index 0a34275f..5b89f556 100644 --- a/centos-settings/Close-XWindow.md +++ b/centos-settings/Close-XWindow.md @@ -15,7 +15,14 @@ - 在图形界面中如果你希望临时关闭图形界面可以输入:`init 3` -## CentOS 7 设置方法 +## CentOS 7 设置方法 1 + +- 开机以命令模式启动,执行: + - systemctl set-default multi-user.target +- 开机以图形界面启动,执行: + - systemctl set-default graphical.target + +## CentOS 7 设置方法 2 - 关闭图形 - `mv /etc/systemd/system/default.target /etc/systemd/system/default.target.bak` (改名备份) From d236d0e636740c4c8671a2bc4a73edb8adaa1187 Mon Sep 17 00:00:00 2001 From: cdk8s-zelda <51701412+cdk8s-zelda@users.noreply.github.com> Date: Fri, 2 Jul 2021 03:46:02 +0800 Subject: [PATCH 117/124] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e33ec9a0..2f461700 100644 --- a/README.md +++ b/README.md @@ -15,6 +15,7 @@ ## 新的起点 +- [本系列的大部分文章都迁移到这里](https://github.com/cdk8s/cdk8s-team-style) - [Sculptor Boot:项目思维化的《代码生成器》体系(未来可期,做个朋友吧)](https://github.com/cdk8s/sculptor-boot-generator) - CDK8S: - TKey: @@ -55,4 +56,3 @@ - [Linux 下常用压缩文件的解压、压缩](markdown-file/File-Extract-Compress.md) - [Yum 下载安装包及对应依赖包](markdown-file/Off-line-Yum-Install.md) - [Zsh 入门](markdown-file/Zsh.md) -- [终端测速](markdown-file/ From 007f8881f711a9bad1131dfd3e64d62e61a660e6 Mon Sep 17 00:00:00 2001 From: cdk8s-zelda <51701412+cdk8s-zelda@users.noreply.github.com> Date: Fri, 2 Jul 2021 03:46:17 +0800 Subject: [PATCH 118/124] Update README.md --- README.md | 6 ------ 1 file changed, 6 deletions(-) diff --git a/README.md b/README.md index 2f461700..2e2aa874 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,4 @@ -## 团队 DevOps 方案参考 - - - - - ## 只有上云才能撑住规模化后的发展 - 初期技术选型上尽可能寻找云支持的 From 8b8dc1c05283a60c0f47af1da81af7bc5f55041f Mon Sep 17 00:00:00 2001 From: cdk8s-zelda <51701412+cdk8s-zelda@users.noreply.github.com> Date: Fri, 2 Jul 2021 03:46:30 +0800 Subject: [PATCH 119/124] Update README.md --- README.md | 8 -------- 1 file changed, 8 deletions(-) diff --git a/README.md b/README.md index 2e2aa874..c1b3fef1 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,4 @@ -## 只有上云才能撑住规模化后的发展 - -- 初期技术选型上尽可能寻找云支持的 -- 在公司规模小,自建服务基本都做不到 99.999% 高可用 -- 在公司规模发展变迅速时,如果云技术和已有技术契合,迁移成本会低很多很多 -- 目前暂定只选择:[阿里云服务](https://www.aliyun.com/minisite/goods?userCode=v2zozyxz) -- 这里罗列了阿里云常用的一些:[产品](https://github.com/cdk8s/cdk8s-team-style/blob/master/ops/aliyun.md) - ## 新的起点 - [本系列的大部分文章都迁移到这里](https://github.com/cdk8s/cdk8s-team-style) From add9e63649889e04004d6d55ba0852596c2ebe36 Mon Sep 17 00:00:00 2001 From: cdk8s-zelda <51701412+cdk8s-zelda@users.noreply.github.com> Date: Fri, 2 Jul 2021 03:50:38 +0800 Subject: [PATCH 120/124] Update CentOS-7-Install.md --- markdown-file/CentOS-7-Install.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/markdown-file/CentOS-7-Install.md b/markdown-file/CentOS-7-Install.md index 4b1a3e00..bbbec5d4 100644 --- a/markdown-file/CentOS-7-Install.md +++ b/markdown-file/CentOS-7-Install.md @@ -5,7 +5,8 @@ - 本教程中主要演示了 VMware Workstation 下安装 `CentOS 7.3` 的过程。 - VMware 的使用细节可以看这篇:[CentOS 6 安装](CentOS-Install.md) -- 如果你是要安装到 PC 机中,你需要准备一个 U 盘,以及下载这个软件:[USBWriter(提取码:5aa2)](https://pan.baidu.com/s/1gg83h9T) +- 如果你是要安装到 PC 机中,你需要准备一个 U 盘,以及 Windows 下载这个软件:[USBWriter(提取码:5aa2)](https://pan.baidu.com/s/1gg83h9T) +- 如果你是要安装到 PC 机中,你需要准备一个 U 盘,以及 macOS 下载这个软件:[balenaEtcher(提取码:o9qn)](https://pan.baidu.com/s/1bjddfOcuhS3UUIOrFf5ehg) - USBWriter 的使用很简单,如下图即可制作一个 CentOS 系统盘 ![VMware 下安装](../images/CentOS-7-Install-a-0.jpg) From b198564df47639eea7c53005d295054edc802141 Mon Sep 17 00:00:00 2001 From: cdk8s-zelda <51701412+cdk8s-zelda@users.noreply.github.com> Date: Fri, 2 Jul 2021 03:55:40 +0800 Subject: [PATCH 121/124] Update CentOS-7-Install.md --- markdown-file/CentOS-7-Install.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/markdown-file/CentOS-7-Install.md b/markdown-file/CentOS-7-Install.md index bbbec5d4..c9428617 100644 --- a/markdown-file/CentOS-7-Install.md +++ b/markdown-file/CentOS-7-Install.md @@ -6,7 +6,7 @@ - 本教程中主要演示了 VMware Workstation 下安装 `CentOS 7.3` 的过程。 - VMware 的使用细节可以看这篇:[CentOS 6 安装](CentOS-Install.md) - 如果你是要安装到 PC 机中,你需要准备一个 U 盘,以及 Windows 下载这个软件:[USBWriter(提取码:5aa2)](https://pan.baidu.com/s/1gg83h9T) -- 如果你是要安装到 PC 机中,你需要准备一个 U 盘,以及 macOS 下载这个软件:[balenaEtcher(提取码:o9qn)](https://pan.baidu.com/s/1bjddfOcuhS3UUIOrFf5ehg) +- 如果你是要安装到 PC 机中,你需要准备一个 U 盘,以及 macOS 下载这个软件:[balenaEtcher(提取码:oqp9)](https://pan.baidu.com/s/1l5K48tfuCKdn0wR_62PjJA) - USBWriter 的使用很简单,如下图即可制作一个 CentOS 系统盘 ![VMware 下安装](../images/CentOS-7-Install-a-0.jpg) From 3cb8e187e69de094788cba5211226f8856485991 Mon Sep 17 00:00:00 2001 From: cdk8s-zelda <51701412+cdk8s-zelda@users.noreply.github.com> Date: Mon, 5 Jul 2021 19:04:26 +0800 Subject: [PATCH 122/124] Update wrk-Install-And-Settings.md --- markdown-file/wrk-Install-And-Settings.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/markdown-file/wrk-Install-And-Settings.md b/markdown-file/wrk-Install-And-Settings.md index f73c6330..f6b7c67f 100644 --- a/markdown-file/wrk-Install-And-Settings.md +++ b/markdown-file/wrk-Install-And-Settings.md @@ -26,7 +26,7 @@ sudo cp wrk /usr/local/bin ## 使用 -- 启用 10 个线程,每个线程发起 100 个连接,持续 15 秒:`wrk -t5 -c5 -d30s http://www.baidu.com` +- 启用 10 个线程,每个线程发起 100 个连接,持续 30 秒:`wrk -t10 -c100 -d30s http://www.baidu.com` - 最终报告: ``` @@ -62,4 +62,4 @@ wrk.headers["Content-Type"] = "application/x-www-form-urlencoded" ## 资料 - -- \ No newline at end of file +- From 6010165e96420c229479f41ffc86e7a7f857b88c Mon Sep 17 00:00:00 2001 From: cdk8s-zelda <51701412+cdk8s-zelda@users.noreply.github.com> Date: Mon, 31 Jan 2022 09:32:59 +0800 Subject: [PATCH 123/124] Update README.md --- README.md | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index c1b3fef1..064e3835 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,8 @@ -## 新的起点 -- [本系列的大部分文章都迁移到这里](https://github.com/cdk8s/cdk8s-team-style) -- [Sculptor Boot:项目思维化的《代码生成器》体系(未来可期,做个朋友吧)](https://github.com/cdk8s/sculptor-boot-generator) -- CDK8S: -- TKey: +## 作者新方向,感谢支持 + +- [UPUPMO-扶持个人从开公司到全平台产品上线](https://www.bilibili.com/video/BV1Bb4y1j7dy) ## 初衷(Original Intention) From 80109268d5b773c22cb8700d953d5ff01e52abde Mon Sep 17 00:00:00 2001 From: cdk8s-zelda <51701412+cdk8s-zelda@users.noreply.github.com> Date: Sat, 11 Jun 2022 15:40:03 +0800 Subject: [PATCH 124/124] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 064e3835..80bf6ad0 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ - ## 作者新方向,感谢支持 -- [UPUPMO-扶持个人从开公司到全平台产品上线](https://www.bilibili.com/video/BV1Bb4y1j7dy) +- [从开公司到开发全平台产品(文字版)](https://github.com/cdk8s/cdk8s-team-style/blob/master/full-stack/README.md) +- [从开公司到开发全平台产品(视频版)](https://space.bilibili.com/1765486559/channel/seriesdetail?sid=2359281) ## 初衷(Original Intention)