From 5b53343003cf9c38f22ffd36e72e5b00916df540 Mon Sep 17 00:00:00 2001 From: zehua <523786283@qq.com> Date: Fri, 13 Apr 2018 20:37:41 +0800 Subject: [PATCH 001/330] =?UTF-8?q?Node=20=E5=AE=89=E8=A3=85=E5=92=8C?= =?UTF-8?q?=E9=85=8D=E7=BD=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Node-Install-And-Usage.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/markdown-file/Node-Install-And-Usage.md b/markdown-file/Node-Install-And-Usage.md index 3788d5b2..bd413346 100644 --- a/markdown-file/Node-Install-And-Usage.md +++ b/markdown-file/Node-Install-And-Usage.md @@ -21,5 +21,7 @@ curl --silent --location https://rpm.nodesource.com/setup_9.x | sudo bash - sudo yum -y install nodejs ``` +- 注意:因为网络原因,最好先把脚本下载到本地,再用代理进行安装 + From 25a18896e188bc0167a86e185f2e75f7bc545d33 Mon Sep 17 00:00:00 2001 From: zehua <523786283@qq.com> Date: Mon, 7 May 2018 10:35:40 +0800 Subject: [PATCH 002/330] Update NTP.md --- markdown-file/NTP.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/markdown-file/NTP.md b/markdown-file/NTP.md index 7dead7a9..a51f840e 100644 --- a/markdown-file/NTP.md +++ b/markdown-file/NTP.md @@ -28,7 +28,10 @@ server 3.asia.pool.ntp.org ``` - 启动服务: + - `sudo service ntpd stop`(改配置后,先停再启动) - `sudo service ntpd start` +- 手动更新时间: + - `sudo ntpdate ntp1.aliyun.com` - 服务加到启动项 - CentOS 系统 - `sudo chkconfig ntpd on` From e443c5f9f5d8b1c217a7bc0d81d77afc2b5b91a1 Mon Sep 17 00:00:00 2001 From: zehua <523786283@qq.com> Date: Mon, 7 May 2018 10:36:56 +0800 Subject: [PATCH 003/330] Update NTP.md --- markdown-file/NTP.md | 61 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 60 insertions(+), 1 deletion(-) diff --git a/markdown-file/NTP.md b/markdown-file/NTP.md index a51f840e..a3dd1f41 100644 --- a/markdown-file/NTP.md +++ b/markdown-file/NTP.md @@ -17,7 +17,66 @@ - 世界上可以校对时间节点: - 中国时间校对服务器节点: - 配置文件介绍(记得先备份):`sudo vim /etc/ntp.conf` - - ![NTP 服务器配置文件常用参数](../images/NTP-a-1.jpg) + ``` + driftfile /var/lib/ntp/drift +pidfile /var/run/ntpd.pid +logfile /var/log/ntp.log + + +# Access Control Support +restrict default kod nomodify notrap nopeer noquery +restrict -6 default kod nomodify notrap nopeer noquery +restrict 127.0.0.1 +restrict 192.168.0.0 mask 255.255.0.0 nomodify notrap nopeer noquery +restrict 172.16.0.0 mask 255.240.0.0 nomodify notrap nopeer noquery +restrict 100.64.0.0 mask 255.192.0.0 nomodify notrap nopeer noquery +restrict 10.0.0.0 mask 255.0.0.0 nomodify notrap nopeer noquery + + +# local clock +server 127.127.1.0 +fudge 127.127.1.0 stratum 10 + + +restrict ntp1.aliyun.com nomodify notrap nopeer noquery +restrict ntp1.cloud.aliyuncs.com nomodify notrap nopeer noquery +restrict ntp10.cloud.aliyuncs.com nomodify notrap nopeer noquery +restrict ntp11.cloud.aliyuncs.com nomodify notrap nopeer noquery +restrict ntp12.cloud.aliyuncs.com nomodify notrap nopeer noquery +restrict ntp2.aliyun.com nomodify notrap nopeer noquery +restrict ntp2.cloud.aliyuncs.com nomodify notrap nopeer noquery +restrict ntp3.aliyun.com nomodify notrap nopeer noquery +restrict ntp3.cloud.aliyuncs.com nomodify notrap nopeer noquery +restrict ntp4.aliyun.com nomodify notrap nopeer noquery +restrict ntp4.cloud.aliyuncs.com nomodify notrap nopeer noquery +restrict ntp5.aliyun.com nomodify notrap nopeer noquery +restrict ntp5.cloud.aliyuncs.com nomodify notrap nopeer noquery +restrict ntp6.aliyun.com nomodify notrap nopeer noquery +restrict ntp6.cloud.aliyuncs.com nomodify notrap nopeer noquery +restrict ntp7.cloud.aliyuncs.com nomodify notrap nopeer noquery +restrict ntp8.cloud.aliyuncs.com nomodify notrap nopeer noquery +restrict ntp9.cloud.aliyuncs.com nomodify notrap nopeer noquery + + +server ntp1.aliyun.com iburst minpoll 4 maxpoll 10 +server ntp1.cloud.aliyuncs.com iburst minpoll 4 maxpoll 10 +server ntp10.cloud.aliyuncs.com iburst minpoll 4 maxpoll 10 +server ntp11.cloud.aliyuncs.com iburst minpoll 4 maxpoll 10 +server ntp12.cloud.aliyuncs.com iburst minpoll 4 maxpoll 10 +server ntp2.aliyun.com iburst minpoll 4 maxpoll 10 +server ntp2.cloud.aliyuncs.com iburst minpoll 4 maxpoll 10 +server ntp3.aliyun.com iburst minpoll 4 maxpoll 10 +server ntp3.cloud.aliyuncs.com iburst minpoll 4 maxpoll 10 +server ntp4.aliyun.com iburst minpoll 4 maxpoll 10 +server ntp4.cloud.aliyuncs.com iburst minpoll 4 maxpoll 10 +server ntp5.aliyun.com iburst minpoll 4 maxpoll 10 +server ntp5.cloud.aliyuncs.com iburst minpoll 4 maxpoll 10 +server ntp6.aliyun.com iburst minpoll 4 maxpoll 10 +server ntp6.cloud.aliyuncs.com iburst minpoll 4 maxpoll 10 +server ntp7.cloud.aliyuncs.com iburst minpoll 4 maxpoll 10 +server ntp8.cloud.aliyuncs.com iburst minpoll 4 maxpoll 10 +server ntp9.cloud.aliyuncs.com iburst minpoll 4 maxpoll 10 +``` - 该配置解释: - 标注 1 是默认内容,我们这里进行了注释。 - 标注 2 是新增内容,表示使用中国时间校对服务器节点地址。 From dbdd90f2dc2d09b6850791f128521992dacfded9 Mon Sep 17 00:00:00 2001 From: zehua <523786283@qq.com> Date: Wed, 6 Jun 2018 13:54:31 +0800 Subject: [PATCH 004/330] =?UTF-8?q?=E4=BF=AE=E6=94=B9sonar=E7=9A=84jdbc?= =?UTF-8?q?=E6=95=B0=E6=8D=AE=E6=BA=90=E4=BF=A1=E6=81=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/CI-Install-And-Usage.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/markdown-file/CI-Install-And-Usage.md b/markdown-file/CI-Install-And-Usage.md index 6cc6bc49..ea7bf381 100644 --- a/markdown-file/CI-Install-And-Usage.md +++ b/markdown-file/CI-Install-And-Usage.md @@ -228,8 +228,7 @@ services: - /data/docker/ci/sonarqube/extension:/opt/sonarqube/extensions - /data/docker/ci/sonarqube/bundled-plugins:/opt/sonarqube/lib/bundled-plugins environment: - #- SONARQUBE_JDBC_URL=jdbc:postgresql://sonardb:5433/sonar - - SONARQUBE_JDBC_URL=jdbc:postgresql://192.168.0.105:5433/sonar + - SONARQUBE_JDBC_URL=jdbc:postgresql://sonardb:5432/sonar - SONARQUBE_JDBC_USERNAME=sonar - SONARQUBE_JDBC_PASSWORD=sonar nexus: @@ -309,4 +308,4 @@ services: ## 资料 -- \ No newline at end of file +- From 5079a26af82b2b042c0982d4f89f2c25b35069d3 Mon Sep 17 00:00:00 2001 From: zehua <523786283@qq.com> Date: Wed, 6 Jun 2018 14:15:50 +0800 Subject: [PATCH 005/330] =?UTF-8?q?=E5=8E=BB=E9=99=A4=E9=83=A8=E5=88=86?= =?UTF-8?q?=E6=B3=A8=E9=87=8A?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/CI-Install-And-Usage.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/markdown-file/CI-Install-And-Usage.md b/markdown-file/CI-Install-And-Usage.md index ea7bf381..bab0e9a5 100644 --- a/markdown-file/CI-Install-And-Usage.md +++ b/markdown-file/CI-Install-And-Usage.md @@ -218,8 +218,8 @@ services: ports: - "19000:9000" - "19092:9092" - #networks: - #- prodnetwork + networks: + - prodnetwork depends_on: - sonardb volumes: From 3f0c4fa9eaa7fa635f52a8dbaaa7623b874081b5 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 8 Jun 2018 00:06:08 +0800 Subject: [PATCH 006/330] 2018-06-08 --- markdown-file/CI-Install-And-Usage.md | 27 ++++++++----------- markdown-file/Jenkins-Install-And-Settings.md | 2 +- 2 files changed, 12 insertions(+), 17 deletions(-) diff --git a/markdown-file/CI-Install-And-Usage.md b/markdown-file/CI-Install-And-Usage.md index bab0e9a5..88d48080 100644 --- a/markdown-file/CI-Install-And-Usage.md +++ b/markdown-file/CI-Install-And-Usage.md @@ -182,13 +182,14 @@ services: - 启动:`docker-compose up -d`,启动比较慢,等个 2 分钟左右。 - 浏览器访问 Gitlab: - 默认用户是 root,密码首次访问必须重新设置,并且最小长度为 8 位,我习惯设置为:aa123456 + - 添加 SSH key: - Gitlab 的具体使用可以看另外文章:[Gitlab 的使用](Gitlab-Install-And-Settings.md) ## Nexus + Jenkins + SonarQube - 预计会使用内存:4G 左右 -- 创建宿主机挂载目录:`mkdir -p /data/docker/ci/nexus /data/docker/ci/jenkins /data/docker/ci/jenkins/lib /data/docker/ci/jenkins/home /data/docker/ci/sonarqube /data/docker/ci/postgresql` -- 赋权(避免挂载的时候,一些程序需要容器中的用户的特定权限使用):`chmod -R 777 /data/docker/ci/nexus /data/docker/ci/jenkins /data/docker/ci/jenkins/home /data/docker/ci/sonarqube /data/docker/ci/postgresql` +- 创建宿主机挂载目录:`mkdir -p /data/docker/ci/nexus /data/docker/ci/jenkins /data/docker/ci/jenkins/lib /data/docker/ci/jenkins/home /data/docker/ci/sonarqube /data/docker/ci/postgresql /data/docker/ci/gatling/results` +- 赋权(避免挂载的时候,一些程序需要容器中的用户的特定权限使用):`chmod -R 777 /data/docker/ci/nexus /data/docker/ci/jenkins/lib /data/docker/ci/jenkins/home /data/docker/ci/sonarqube /data/docker/ci/postgresql /data/docker/ci/gatling/results` - 下面有一个细节要特别注意:yml 里面不能有中文。还有就是 sonar 的挂载目录不能直接挂在 /opt/sonarqube 上,不然会启动不了。 - 这里使用 docker-compose 的启动方式,所以需要创建 docker-compose.yml 文件: @@ -251,7 +252,7 @@ services: - /var/run/docker.sock:/var/run/docker.sock - /usr/bin/docker:/usr/bin/docker - /etc/localtime:/etc/localtime:ro - - /root/.ssh:/root/.ssh + - $HOME/.ssh:/root/.ssh - /data/docker/ci/jenkins/lib:/var/lib/jenkins/ - /data/docker/ci/jenkins/home:/var/jenkins_home depends_on: @@ -274,23 +275,14 @@ services: - 首次进入 Jenkins 的 Web UI 界面是一个解锁页面 Unlock Jenkins,需要让你输入:Administrator password - 这个密码放在:`/var/jenkins_home/secrets/initialAdminPassword`,你需要先:`docker exec -it ci_jenkins_1 /bin/bash` - 然后:`cat /var/jenkins_home/secrets/initialAdminPassword`,找到初始化密码 + --------------------------------- ## 配置 Jenkins 拉取代码权限 -- Gitlab 创建一个 Access Token: - - 填写任意 Name 字符串 - - 勾选:API `Access the authenticated user's API` - - 点击:Create personal access token,会生成一个类似格式的字符串:`wt93jQzA8yu5a6pfsk3s`,这个 Jenkinsfile 会用到 -- 先访问 Jenkins 插件安装页面,安装下面三个插件: - - Gitlab:可能会直接安装不成功,如果不成功根据报错的详细信息可以看到 hpi 文件的下载地址,挂代理下载下来,然后离线安装即可 - - Gitlab Hook:用于触发 GitLab 的一些 WebHooks 来构建项目 - - Gitlab Authentication 这个插件提供了使用GitLab进行用户认证和授权的方案 -- 安装完插件后,访问 Jenkins 这个路径(Jenkins-->Credentials-->System-->Global credentials(unrestricted)-->Add Credentials) - - 该路径链接地址: - - kind 下拉框选择:`GitLab API token` - - token 就填写我们刚刚生成的 access token - - ID 填写我们 Gitlab 账号 +- 因为 dockerfile 中我已经把宿主机的 .ssh 下的挂载在 Jenkins 的容器中 +- 所以读取宿主机的 pub:`cat ~/.ssh/id_rsa.pub`,然后配置在 Gitlab 中: +- Jenkinsfile 中 Git URL 使用:ssh 协议,比如:`ssh://git@192.168.0.105:10022/gitnavi/spring-boot-ci-demo.git` ## Jenkins 特殊配置(减少权限问题,如果是内网的话) @@ -306,6 +298,9 @@ services: - Gitlab 访问: - 在 `URL` 中填写:`http://192.168.0.105:18080/job/任务名/build?token=112233` + + + ## 资料 - diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index 03a941f3..6da80a5d 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -37,7 +37,7 @@ Can not write to /var/jenkins_home/copy_reference_file.log. Wrong volume permiss - 我这里就不截图了,有需要截图可以看这博文,差不多就是这样的:[点击我o(∩_∩)o ](https://blog.csdn.net/boling_cavalry/article/details/78942408) - 首次进入 Jenkins 的 Web UI 界面是一个解锁页面 Unlock Jenkins,需要让你输入:Administrator password - - 这个密码放在:`/var/jenkins_home/secrets/initialAdminPassword`,你需要先:`docker exec -it eec22d513b5b /bin/bash` + - 这个密码放在:`/var/jenkins_home/secrets/initialAdminPassword`,你需要先:`docker exec -it ci_jenkins_1 /bin/bash` - 然后:`cat /data/jenkins/jenkins_home/secrets/initialAdminPassword` - 也有可能是这个目录:`cat /var/jenkins_home/secrets/initialAdminPassword` - 然后再接下来就是插件的安装,我推荐直接用它推荐给我们的插件直接安装,稍后再安装自己需要定制的。 From 53904af22365e42bd608e929cbaecdbce69ecd42 Mon Sep 17 00:00:00 2001 From: judasn Date: Sat, 9 Jun 2018 17:37:44 +0800 Subject: [PATCH 007/330] 2018-06-09 --- markdown-file/CI-Install-And-Usage.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/markdown-file/CI-Install-And-Usage.md b/markdown-file/CI-Install-And-Usage.md index 88d48080..dd5bbd62 100644 --- a/markdown-file/CI-Install-And-Usage.md +++ b/markdown-file/CI-Install-And-Usage.md @@ -268,6 +268,10 @@ services: - 浏览器访问 SonarQube: - 默认用户名:admin - 默认密码:admin + - 插件安装地址: + - 代码检查规范选择: + - 如果没有安装相关检查插件,默认是没有内容可以设置,建议现在插件市场安装 FindBugs,这样也可以帮你生成插件目录:`/extension/plugins` + - 如果你有其他额外插件,可以把 jar 放在 `${SONAR_HOME}/extension/plugins` 目录下,然后重启下 sonar - 浏览器访问 Nexus: - 默认用户名:admin - 默认密码:admin123 From ae791994ce51630bb712f6e487b69d05035e75da Mon Sep 17 00:00:00 2001 From: judasn Date: Sun, 10 Jun 2018 22:59:52 +0800 Subject: [PATCH 008/330] =?UTF-8?q?2018-06-10=20=E8=A1=A5=E5=85=85=20Harbo?= =?UTF-8?q?r=20=E5=AE=89=E8=A3=85=E5=92=8C=E9=85=8D=E7=BD=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 1 + SUMMARY.md | 1 + TOC.md | 1 + markdown-file/Harbor-Install-And-Usage.md | 436 ++++++++++++++++++++++ 4 files changed, 439 insertions(+) create mode 100644 markdown-file/Harbor-Install-And-Usage.md diff --git a/README.md b/README.md index 7e805e0f..6ebc2af8 100644 --- a/README.md +++ b/README.md @@ -78,6 +78,7 @@ - [GitLab 安装和配置](markdown-file/Gitlab-Install-And-Settings.md) - [JMeter 安装和配置](markdown-file/JMeter-Install-And-Settings.md) - [Docker 安装和使用](markdown-file/Docker-Install-And-Usage.md) +- [Harbor 安装和配置](markdown-file/Harbor-Install-And-Usage.md) - [LDAP 安装和使用](markdown-file/LDAP-Install-And-Settings.md) - [Alfresco 安装和使用](markdown-file/Alfresco-Install-And-Usage.md) - [Apache Thrift 安装和使用](markdown-file/Thrift-Install-And-Usage.md) diff --git a/SUMMARY.md b/SUMMARY.md index 240a3327..d13cfbf6 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -67,6 +67,7 @@ * [GitLab 安装和配置](markdown-file/Gitlab-Install-And-Settings.md) * [JMeter 安装和配置](markdown-file/JMeter-Install-And-Settings.md) * [Docker 安装和使用](markdown-file/Docker-Install-And-Usage.md) +* [Harbor 安装和配置](markdown-file/Harbor-Install-And-Usage.md) * [LDAP 安装和使用](markdown-file/LDAP-Install-And-Settings.md) * [Alfresco 安装和使用](markdown-file/Alfresco-Install-And-Usage.md) * [Apache Thrift 安装和使用](markdown-file/Thrift-Install-And-Usage.md) diff --git a/TOC.md b/TOC.md index 7dcbae42..896c483d 100644 --- a/TOC.md +++ b/TOC.md @@ -65,6 +65,7 @@ - [GitLab 安装和配置](markdown-file/Gitlab-Install-And-Settings.md) - [JMeter 安装和配置](markdown-file/JMeter-Install-And-Settings.md) - [Docker 安装和使用](markdown-file/Docker-Install-And-Usage.md) +- [Harbor 安装和配置](markdown-file/Harbor-Install-And-Usage.md) - [LDAP 安装和使用](markdown-file/LDAP-Install-And-Settings.md) - [Alfresco 安装和使用](markdown-file/Alfresco-Install-And-Usage.md) - [Apache Thrift 安装和使用](markdown-file/Thrift-Install-And-Usage.md) diff --git a/markdown-file/Harbor-Install-And-Usage.md b/markdown-file/Harbor-Install-And-Usage.md new file mode 100644 index 00000000..9a1d41a8 --- /dev/null +++ b/markdown-file/Harbor-Install-And-Usage.md @@ -0,0 +1,436 @@ +# Harbor 安装和配置 + +## 环境说明 + +- CentOS 7.4 +- IP:`192.168.0.105` +- 需要访问的机子 hosts 需要映射(如果绑定真实域名就不需要这一步了):`192.168.0.105 harbor.gitnavi.com` + +## 官方文档 + +- 安装指导: + - 从中我们可以知道需要:[Docker、Docker Compose 环境](./Docker-Install-And-Usage.md) +- 硬件最低要求:2C + 4GB(推荐 8GB) +- 下载: + - 当前(201806)最新版本:**v1.5.1** + - 分 offline 和 online 版本,推荐使用 offline + - **v1.5.1** 下载地址: + + +## 安装 + +- 切换目录:`cd /opt/setups` +- 下载:`wget https://storage.googleapis.com/harbor-releases/release-1.5.0/harbor-offline-installer-v1.5.1.tgz` +- 解压:`tar xvf harbor-offline-installer-v1.5.1.tgz` +- 移动到 /usr 目录下:`mv /opt/setups/harbor /usr/local` +- 修改配置文件:`vim /usr/local/harbor/harbor.cfg`: + +``` +_version = 1.5.0 + +# hostname 可以使用 ip、域名,不可以设置为 127.0.0.1 或 localhost +hostname = harbor.gitnavi.com + +ui_url_protocol = http +max_job_workers = 50 +customize_crt = on +ssl_cert = /data/cert/server.crt +ssl_cert_key = /data/cert/server.key +secretkey_path = /data +admiral_url = NA +log_rotate_count = 50 +log_rotate_size = 200M +http_proxy = +https_proxy = +no_proxy = 127.0.0.1,localhost,ui +email_identity = +email_server = smtp.mydomain.com +email_server_port = 25 +email_username = sample_admin@mydomain.com +email_password = abc +email_from = admin +email_ssl = false +email_insecure = false + +# 启动Harbor后,管理员UI登录的密码,默认是 Harbor12345,用户名默认是:admin +harbor_admin_password = Harbor12345 + +auth_mode = db_auth +ldap_url = ldaps://ldap.mydomain.com +ldap_basedn = ou=people,dc=mydomain,dc=com +ldap_uid = uid +ldap_scope = 2 +ldap_timeout = 5 +ldap_verify_cert = true +ldap_group_basedn = ou=group,dc=mydomain,dc=com +ldap_group_filter = objectclass=group +ldap_group_gid = cn +ldap_group_scope = 2 +self_registration = on +token_expiration = 30 +project_creation_restriction = everyone +db_host = mysql +db_password = root123 +db_port = 3306 +db_user = root +redis_url = redis:6379 +clair_db_host = postgres +clair_db_password = password +clair_db_port = 5432 +clair_db_username = postgres +clair_db = postgres +uaa_endpoint = uaa.mydomain.org +uaa_clientid = id +uaa_clientsecret = secret +uaa_verify_cert = true +uaa_ca_cert = /path/to/ca.pem +registry_storage_provider_name = filesystem +registry_storage_provider_config = +``` + +- 安装成功会占用这些端口,所以请先做好准备,如果不想使用下面的端口需要修改:`vim /usr/local/harbor/docker-compose.yml` + - `80` + - `6379` + - `3306` + - `5000` + - `1514` +- 后面重新启动 Harbor 也靠这个文件了:`docker-compose -f /usr/local/harbor/docker-compose.yml restart` +- 开始安装:`sh /usr/local/harbor/install.sh`,控制台输出如下: + +``` + +[Step 0]: checking installation environment ... + +Note: docker version: 17.12.0 + +Note: docker-compose version: 1.18.0 + +[Step 1]: loading Harbor images ... +52ef9064d2e4: Loading layer [==================================================>] 135.9MB/135.9MB +4a6862dbadda: Loading layer [==================================================>] 23.25MB/23.25MB +58b7d0c522b2: Loading layer [==================================================>] 24.4MB/24.4MB +9cd4bb748634: Loading layer [==================================================>] 7.168kB/7.168kB +c81302a14908: Loading layer [==================================================>] 10.56MB/10.56MB +7848e9ba72a3: Loading layer [==================================================>] 24.39MB/24.39MB +Loaded image: vmware/harbor-ui:v1.5.1 +f1691b5a5198: Loading layer [==================================================>] 73.15MB/73.15MB +a529013c99e4: Loading layer [==================================================>] 3.584kB/3.584kB +d9b4853cff8b: Loading layer [==================================================>] 3.072kB/3.072kB +3d305073979e: Loading layer [==================================================>] 4.096kB/4.096kB +c9e17074f54a: Loading layer [==================================================>] 3.584kB/3.584kB +956055840e30: Loading layer [==================================================>] 9.728kB/9.728kB +Loaded image: vmware/harbor-log:v1.5.1 +185db06a02d0: Loading layer [==================================================>] 23.25MB/23.25MB +835213979c70: Loading layer [==================================================>] 20.9MB/20.9MB +f74eeb41c1c9: Loading layer [==================================================>] 20.9MB/20.9MB +Loaded image: vmware/harbor-jobservice:v1.5.1 +9bd5c7468774: Loading layer [==================================================>] 23.25MB/23.25MB +5fa6889b9a6d: Loading layer [==================================================>] 2.56kB/2.56kB +bd3ac235b209: Loading layer [==================================================>] 2.56kB/2.56kB +cb5d493833cc: Loading layer [==================================================>] 2.048kB/2.048kB +557669a074de: Loading layer [==================================================>] 22.8MB/22.8MB +f02b4f30a9ac: Loading layer [==================================================>] 22.8MB/22.8MB +Loaded image: vmware/registry-photon:v2.6.2-v1.5.1 +5d3b562db23e: Loading layer [==================================================>] 23.25MB/23.25MB +8edca1b0e3b0: Loading layer [==================================================>] 12.16MB/12.16MB +ce5f11ea46c0: Loading layer [==================================================>] 17.3MB/17.3MB +93750d7ec363: Loading layer [==================================================>] 15.87kB/15.87kB +36f81937e80d: Loading layer [==================================================>] 3.072kB/3.072kB +37e5df92b624: Loading layer [==================================================>] 29.46MB/29.46MB +Loaded image: vmware/notary-server-photon:v0.5.1-v1.5.1 +0a2f8f90bd3a: Loading layer [==================================================>] 401.3MB/401.3MB +41fca4deb6bf: Loading layer [==================================================>] 9.216kB/9.216kB +f2e28262e760: Loading layer [==================================================>] 9.216kB/9.216kB +68677196e356: Loading layer [==================================================>] 7.68kB/7.68kB +2b006714574e: Loading layer [==================================================>] 1.536kB/1.536kB +Loaded image: vmware/mariadb-photon:v1.5.1 +a8c4992c632e: Loading layer [==================================================>] 156.3MB/156.3MB +0f37bf842677: Loading layer [==================================================>] 10.75MB/10.75MB +9f34c0cd38bf: Loading layer [==================================================>] 2.048kB/2.048kB +91ca17ca7e16: Loading layer [==================================================>] 48.13kB/48.13kB +5a7e0da65127: Loading layer [==================================================>] 10.8MB/10.8MB +Loaded image: vmware/clair-photon:v2.0.1-v1.5.1 +0e782fe069e7: Loading layer [==================================================>] 23.25MB/23.25MB +67fc1e2f7009: Loading layer [==================================================>] 15.36MB/15.36MB +8db2141aa82c: Loading layer [==================================================>] 15.36MB/15.36MB +Loaded image: vmware/harbor-adminserver:v1.5.1 +3f87a34f553c: Loading layer [==================================================>] 4.772MB/4.772MB +Loaded image: vmware/nginx-photon:v1.5.1 +Loaded image: vmware/photon:1.0 +ad58f3ddcb1b: Loading layer [==================================================>] 10.95MB/10.95MB +9b50f12509bf: Loading layer [==================================================>] 17.3MB/17.3MB +2c21090fd212: Loading layer [==================================================>] 15.87kB/15.87kB +38bec864f23e: Loading layer [==================================================>] 3.072kB/3.072kB +6e81ea7b0fa6: Loading layer [==================================================>] 28.24MB/28.24MB +Loaded image: vmware/notary-signer-photon:v0.5.1-v1.5.1 +897a26fa09cb: Loading layer [==================================================>] 95.02MB/95.02MB +16e3a10a21ba: Loading layer [==================================================>] 6.656kB/6.656kB +85ecac164331: Loading layer [==================================================>] 2.048kB/2.048kB +37a2fb188706: Loading layer [==================================================>] 7.68kB/7.68kB +Loaded image: vmware/postgresql-photon:v1.5.1 +bed9f52be1d1: Loading layer [==================================================>] 11.78kB/11.78kB +d731f2986f6e: Loading layer [==================================================>] 2.56kB/2.56kB +c3fde9a69f96: Loading layer [==================================================>] 3.072kB/3.072kB +Loaded image: vmware/harbor-db:v1.5.1 +7844feb13ef3: Loading layer [==================================================>] 78.68MB/78.68MB +de0fd8aae388: Loading layer [==================================================>] 3.072kB/3.072kB +3f79efb720fd: Loading layer [==================================================>] 59.9kB/59.9kB +1c02f801c2e8: Loading layer [==================================================>] 61.95kB/61.95kB +Loaded image: vmware/redis-photon:v1.5.1 +454c81edbd3b: Loading layer [==================================================>] 135.2MB/135.2MB +e99db1275091: Loading layer [==================================================>] 395.4MB/395.4MB +051e4ee23882: Loading layer [==================================================>] 9.216kB/9.216kB +6cca4437b6f6: Loading layer [==================================================>] 9.216kB/9.216kB +1d48fc08c8bc: Loading layer [==================================================>] 7.68kB/7.68kB +0419724fd942: Loading layer [==================================================>] 1.536kB/1.536kB +543c0c1ee18d: Loading layer [==================================================>] 655.2MB/655.2MB +4190aa7e89b8: Loading layer [==================================================>] 103.9kB/103.9kB +Loaded image: vmware/harbor-migrator:v1.5.0 + + +[Step 2]: preparing environment ... +Generated and saved secret to file: /data/secretkey +Generated configuration file: ./common/config/nginx/nginx.conf +Generated configuration file: ./common/config/adminserver/env +Generated configuration file: ./common/config/ui/env +Generated configuration file: ./common/config/registry/config.yml +Generated configuration file: ./common/config/db/env +Generated configuration file: ./common/config/jobservice/env +Generated configuration file: ./common/config/jobservice/config.yml +Generated configuration file: ./common/config/log/logrotate.conf +Generated configuration file: ./common/config/jobservice/config.yml +Generated configuration file: ./common/config/ui/app.conf +Generated certificate, key file: ./common/config/ui/private_key.pem, cert file: ./common/config/registry/root.crt +The configuration files are ready, please use docker-compose to start the service. +Creating harbor-log ... done + +[Step 3]: checking existing instance of Harbor ... + +Creating registry ... done +Creating harbor-ui ... done +Creating network "harbor_harbor" with the default driver +Creating nginx ... done +Creating registry ... +Creating harbor-adminserver ... +Creating harbor-db ... +Creating redis ... +Creating harbor-ui ... +Creating harbor-jobservice ... +Creating nginx ... + +✔ ----Harbor has been installed and started successfully.---- + +Now you should be able to visit the admin portal at http://harbor.gitnavi.com. +For more details, please visit https://github.com/vmware/harbor . +``` + +- 安装成功后,可以访问: + - 默认用户名:`admin` + - 默认密码:`Harbor12345` +- docker 客户端默认是使用 https 访问 docker registry,我们默认在安装 Harbor 的时候配置文件用的时候 http,所以这里需要修改 + - `vim /lib/systemd/system/docker.service` + - 修改默认值为:`ExecStart=/usr/bin/dockerd` + - 改为:`ExecStart=/usr/bin/dockerd --insecure-registry harbor.gitnavi.com` + - `systemctl daemon-reload` + - `systemctl reload docker` + - `systemctl restart docker` + - `docker-compose -f /usr/local/harbor/docker-compose.yml restart` +- 访问:,创建一个项目,比如:`youmeek`,等下需要用到。 + - 这里用 admin 用户,不再另外创建用了,但是实际使用最好新建用户。 + - `docker login -u admin -p Harbor12345 harbor.gitnavi.com` +- 给本地的一个 maven 镜像打 tag:`docker tag maven:3.3-jdk-8 harbor.gitnavi.com/youmeek/harbor-maven:3.3-jdk-8` +- push 到仓库:`docker push harbor.gitnavi.com/youmeek/harbor-maven:3.3-jdk-8` + +---------------------------------------------------------------------------- + +## harbor.cfg 默认值 + + +``` +## Configuration file of Harbor + +#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY! +_version = 1.5.0 +#The IP address or hostname to access admin UI and registry service. +#DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients. +hostname = reg.mydomain.com + +#The protocol for accessing the UI and token/notification service, by default it is http. +#It can be set to https if ssl is enabled on nginx. +ui_url_protocol = http + +#Maximum number of job workers in job service +max_job_workers = 50 + +#Determine whether or not to generate certificate for the registry's token. +#If the value is on, the prepare script creates new root cert and private key +#for generating token to access the registry. If the value is off the default key/cert will be used. +#This flag also controls the creation of the notary signer's cert. +customize_crt = on + +#The path of cert and key files for nginx, they are applied only the protocol is set to https +ssl_cert = /data/cert/server.crt +ssl_cert_key = /data/cert/server.key + +#The path of secretkey storage +secretkey_path = /data + +#Admiral's url, comment this attribute, or set its value to NA when Harbor is standalone +admiral_url = NA + +#Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated. +log_rotate_count = 50 +#Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes. +#If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G +#are all valid. +log_rotate_size = 200M + +#Config http proxy for Clair, e.g. http://my.proxy.com:3128 +#Clair doesn't need to connect to harbor ui container via http proxy. +http_proxy = +https_proxy = +no_proxy = 127.0.0.1,localhost,ui + +#NOTES: The properties between BEGIN INITIAL PROPERTIES and END INITIAL PROPERTIES +#only take effect in the first boot, the subsequent changes of these properties +#should be performed on web ui + +#************************BEGIN INITIAL PROPERTIES************************ + +#Email account settings for sending out password resetting emails. + +#Email server uses the given username and password to authenticate on TLS connections to host and act as identity. +#Identity left blank to act as username. +email_identity = + +email_server = smtp.mydomain.com +email_server_port = 25 +email_username = sample_admin@mydomain.com +email_password = abc +email_from = admin +email_ssl = false +email_insecure = false + +##The initial password of Harbor admin, only works for the first time when Harbor starts. +#It has no effect after the first launch of Harbor. +#Change the admin password from UI after launching Harbor. +harbor_admin_password = Harbor12345 + +##By default the auth mode is db_auth, i.e. the credentials are stored in a local database. +#Set it to ldap_auth if you want to verify a user's credentials against an LDAP server. +auth_mode = db_auth + +#The url for an ldap endpoint. +ldap_url = ldaps://ldap.mydomain.com + +#A user's DN who has the permission to search the LDAP/AD server. +#If your LDAP/AD server does not support anonymous search, you should configure this DN and ldap_search_pwd. +#ldap_searchdn = uid=searchuser,ou=people,dc=mydomain,dc=com + +#the password of the ldap_searchdn +#ldap_search_pwd = password + +#The base DN from which to look up a user in LDAP/AD +ldap_basedn = ou=people,dc=mydomain,dc=com + +#Search filter for LDAP/AD, make sure the syntax of the filter is correct. +#ldap_filter = (objectClass=person) + +# The attribute used in a search to match a user, it could be uid, cn, email, sAMAccountName or other attributes depending on your LDAP/AD +ldap_uid = uid + +#the scope to search for users, 0-LDAP_SCOPE_BASE, 1-LDAP_SCOPE_ONELEVEL, 2-LDAP_SCOPE_SUBTREE +ldap_scope = 2 + +#Timeout (in seconds) when connecting to an LDAP Server. The default value (and most reasonable) is 5 seconds. +ldap_timeout = 5 + +#Verify certificate from LDAP server +ldap_verify_cert = true + +#The base dn from which to lookup a group in LDAP/AD +ldap_group_basedn = ou=group,dc=mydomain,dc=com + +#filter to search LDAP/AD group +ldap_group_filter = objectclass=group + +#The attribute used to name a LDAP/AD group, it could be cn, name +ldap_group_gid = cn + +#The scope to search for ldap groups. 0-LDAP_SCOPE_BASE, 1-LDAP_SCOPE_ONELEVEL, 2-LDAP_SCOPE_SUBTREE +ldap_group_scope = 2 + +#Turn on or off the self-registration feature +self_registration = on + +#The expiration time (in minute) of token created by token service, default is 30 minutes +token_expiration = 30 + +#The flag to control what users have permission to create projects +#The default value "everyone" allows everyone to creates a project. +#Set to "adminonly" so that only admin user can create project. +project_creation_restriction = everyone + +#************************END INITIAL PROPERTIES************************ + +#######Harbor DB configuration section####### + +#The address of the Harbor database. Only need to change when using external db. +db_host = mysql + +#The password for the root user of Harbor DB. Change this before any production use. +db_password = root123 + +#The port of Harbor database host +db_port = 3306 + +#The user name of Harbor database +db_user = root + +##### End of Harbor DB configuration####### + +#The redis server address. Only needed in HA installation. +#address:port[,weight,password,db_index] +redis_url = redis:6379 + +##########Clair DB configuration############ + +#Clair DB host address. Only change it when using an exteral DB. +clair_db_host = postgres + +#The password of the Clair's postgres database. Only effective when Harbor is deployed with Clair. +#Please update it before deployment. Subsequent update will cause Clair's API server and Harbor unable to access Clair's database. +clair_db_password = password + +#Clair DB connect port +clair_db_port = 5432 + +#Clair DB username +clair_db_username = postgres + +#Clair default database +clair_db = postgres + +##########End of Clair DB configuration############ + +#The following attributes only need to be set when auth mode is uaa_auth +uaa_endpoint = uaa.mydomain.org +uaa_clientid = id +uaa_clientsecret = secret +uaa_verify_cert = true +uaa_ca_cert = /path/to/ca.pem + + +### Docker Registry setting ### +#registry_storage_provider can be: filesystem, s3, gcs, azure, etc. +registry_storage_provider_name = filesystem +#registry_storage_provider_config is a comma separated "key: value" pairs, e.g. "key1: value, key2: value2". +#Refer to https://docs.docker.com/registry/configuration/#storage for all available configuration. +registry_storage_provider_config = +``` + + +## 资料 + +- +- From 685a317f36cf586b9f88a3a6814aa166bf538ad9 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 11 Jun 2018 22:47:04 +0800 Subject: [PATCH 009/330] 2018-06-11 --- markdown-file/CentOS6-and-CentOS7.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/markdown-file/CentOS6-and-CentOS7.md b/markdown-file/CentOS6-and-CentOS7.md index e09f9097..87afe8d8 100644 --- a/markdown-file/CentOS6-and-CentOS7.md +++ b/markdown-file/CentOS6-and-CentOS7.md @@ -36,7 +36,9 @@ ### 开放端口 -- 添加端口:`firewall-cmd --zone=public --add-port=8883/tcp --permanent` +- 添加单个端口:`firewall-cmd --zone=public --add-port=8883/tcp --permanent` +- 添加范围端口:`firewall-cmd --zone=public --add-port=8883-8885/tcp --permanent` +- 删除端口:`firewall-cmd --zone=public --remove-port=8883/tcp --permanent` - 重启防火墙:`firewall-cmd --reload` - 命令解释: - `--zone` #作用域 From 3fd144558da1a6f2134e82eaf19d8db33ac2d30a Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 11 Jun 2018 23:38:54 +0800 Subject: [PATCH 010/330] 2018-06-11 pxc --- README.md | 1 + SUMMARY.md | 1 + TOC.md | 1 + markdown-file/PXC-Install-And-Settings.md | 45 +++++++++++++++++++++++ 4 files changed, 48 insertions(+) create mode 100644 markdown-file/PXC-Install-And-Settings.md diff --git a/README.md b/README.md index 6ebc2af8..b7395ef5 100644 --- a/README.md +++ b/README.md @@ -54,6 +54,7 @@ - [MySQL 优化](markdown-file/Mysql-Optimize.md) - [MySQL 测试](markdown-file/Mysql-Test.md) - [MySQL 教程](markdown-file/Mysql-Tutorial.md) +- [Percona XtraDB Cluster(PXC)安装和配置](markdown-file/PXC-Install-And-Settings.md) - [Redis 安装和配置](markdown-file/Redis-Install-And-Settings.md) - [MongoDB 安装和配置](markdown-file/MongoDB-Install-And-Settings.md) - [Solr 安装和配置](markdown-file/Solr-Install-And-Settings.md) diff --git a/SUMMARY.md b/SUMMARY.md index d13cfbf6..787d9e4a 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -43,6 +43,7 @@ * [MySQL 优化](markdown-file/Mysql-Optimize.md) * [MySQL 测试](markdown-file/Mysql-Test.md) * [MySQL 教程](markdown-file/Mysql-Tutorial.md) +* [Percona XtraDB Cluster(PXC)安装和配置](markdown-file/PXC-Install-And-Settings.md) * [Redis 安装和配置](markdown-file/Redis-Install-And-Settings.md) * [MongoDB 安装和配置](markdown-file/MongoDB-Install-And-Settings.md) * [Solr 安装和配置](markdown-file/Solr-Install-And-Settings.md) diff --git a/TOC.md b/TOC.md index 896c483d..3c93a7ef 100644 --- a/TOC.md +++ b/TOC.md @@ -41,6 +41,7 @@ - [MySQL 优化](markdown-file/Mysql-Optimize.md) - [MySQL 测试](markdown-file/Mysql-Test.md) - [MySQL 教程](markdown-file/Mysql-Tutorial.md) +- [Percona XtraDB Cluster(PXC)安装和配置](markdown-file/PXC-Install-And-Settings.md) - [Redis 安装和配置](markdown-file/Redis-Install-And-Settings.md) - [MongoDB 安装和配置](markdown-file/MongoDB-Install-And-Settings.md) - [Solr 安装和配置](markdown-file/Solr-Install-And-Settings.md) diff --git a/markdown-file/PXC-Install-And-Settings.md b/markdown-file/PXC-Install-And-Settings.md new file mode 100644 index 00000000..f90b2471 --- /dev/null +++ b/markdown-file/PXC-Install-And-Settings.md @@ -0,0 +1,45 @@ +# Percona XtraDB Cluster(PXC)安装和配置 + +## PXC 主要特点 + +- 主要特点:强一致性(比较适合比较注重事务的场景) + - 采用同步复制,事务在所有节点中要嘛是同时提交成功,要嘛不提交,让写入失败 + - 所以,整个集群的写入吞吐量是由最弱的节点限制,如果有一个节点变配置较差,整体质量就是差的 +- 数据同步是双向的,任何节点是从,也是主,都可以进行写入 +- 一般推荐至少 3 个节点 + +## 官网资料 + +- 官网介绍: +- 官网下载: + +## Docker 方式安装 + +- Docker 官方仓库: +- 下载镜像:`docker pull percona/percona-xtradb-cluster` +- 创建需要挂载的目录:`mkdir -p /data/docker/pxc/node1 /data/docker/pxc/node2 /data/docker/pxc/node3` +- 赋权:`chmod 777 -R /data/docker/pxc` +- 创建 Docker 网段:`docker network create --subnet=172.18.0.0/24 pxc-net` +- 启动镜像: + +``` +# 初次初始化比较慢,给个 2 分钟左右吧 +docker run -d -p 3307:3306 -v /data/docker/pxc/node1:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=gitnavi123456 -e CLUSTER_NAME=pxc-cluster -e XTRABACKUP_PASSWORD=gitnavi123456 --privileged --name=pxc-node-1 --net=pxc-net --ip 172.18.0.2 percona/percona-xtradb-cluster +``` + +- 使用 SQLyog 测试是否可以连上去,可以才能继续创建其他节点。 + - 连接地址是宿主机 IP,端口是:3307 + +``` +docker run -d -p 3308:3306 -v /data/docker/pxc/node2:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=gitnavi123456 -e CLUSTER_NAME=pxc-cluster -e XTRABACKUP_PASSWORD=gitnavi123456 -e CLUSTER_JOIN=pxc-node-1 --privileged --name=pxc-node-2 --net=pxc-net --ip 172.18.0.3 percona/percona-xtradb-cluster + +docker run -d -p 3309:3306 -v /data/docker/pxc/node3:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=gitnavi123456 -e CLUSTER_NAME=pxc-cluster -e XTRABACKUP_PASSWORD=gitnavi123456 -e CLUSTER_JOIN=pxc-node-1 --privileged --name=pxc-node-3 --net=pxc-net --ip 172.18.0.4 percona/percona-xtradb-cluster +``` + +- 测试集群 + - 用 SQLyog 连上 3 个节点,随便找一个节点创建库,其他几个节点会同时产生库。以此类推,创建表、插入数据,然后查看其他库情况。 + + +## 资料 + +- From efe8996826dea59194e6297201c70315479d5e5b Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 12 Jun 2018 22:18:10 +0800 Subject: [PATCH 011/330] 2018-06-12 pxc + HAProxy --- markdown-file/PXC-Install-And-Settings.md | 88 +++++++++++++++++++++++ 1 file changed, 88 insertions(+) diff --git a/markdown-file/PXC-Install-And-Settings.md b/markdown-file/PXC-Install-And-Settings.md index f90b2471..a06c9848 100644 --- a/markdown-file/PXC-Install-And-Settings.md +++ b/markdown-file/PXC-Install-And-Settings.md @@ -39,7 +39,95 @@ docker run -d -p 3309:3306 -v /data/docker/pxc/node3:/var/lib/mysql -e MYSQL_ROO - 测试集群 - 用 SQLyog 连上 3 个节点,随便找一个节点创建库,其他几个节点会同时产生库。以此类推,创建表、插入数据,然后查看其他库情况。 +## 负载均衡 + +- 因为 PXC 是同步是双向的,都支持读写,所以就可以考虑使用负载均衡实现流量分发 +- 使用使用 HAProxy(支持 HTTP 协议、TCP/IP 协议,并且支持虚拟化,可以直接用 Docker 安装) +- 创建需要挂载的目录:`mkdir -p /data/docker/haproxy/conf` +- 赋权:`chmod 777 -R /data/docker/haproxy` +- 创建一个用于 MySQL 心跳检测的用户: + - 连上 PXC 任意一个数据库:`CREATE USER 'haproxy'@'%' IDENTIFIED BY '';` +- 创建配置文件:`vim /data/docker/haproxy/conf/haproxy.cfg` + +``` +global + #工作目录 + chroot /usr/local/etc/haproxy + #日志文件,使用rsyslog服务中local5日志设备(/var/log/local5),等级info + log 127.0.0.1 local5 info + #守护进程运行 + daemon + +defaults + log global + mode http + #日志格式 + option httplog + #日志中不记录负载均衡的心跳检测记录 + option dontlognull + #连接超时(毫秒) + timeout connect 5000 + #客户端超时(毫秒) + timeout client 50000 + #服务器超时(毫秒) + timeout server 50000 + +#监控界面 +listen admin_stats + #监控界面的访问的IP和端口 + bind 0.0.0.0:8118 + #访问协议 + mode http + # URI 相对地址(访问 haproxy 监控地址:http://192.168.0.105:8118/dbs) + stats uri /dbs + #统计报告格式 + stats realm Global\ statistics + #登陆帐户信息(登录名 admin,密码:gitnavi123456) + stats auth admin:gitnavi123456 +#数据库负载均衡 +listen proxy-mysql + #访问的IP和端口 + bind 0.0.0.0:3316 + #网络协议 + mode tcp + #负载均衡算法(轮询算法) + #轮询算法:roundrobin + #权重算法:static-rr + #最少连接算法:leastconn + #请求源IP算法:source + balance roundrobin + #日志格式 + option tcplog + #在 MySQL 中创建一个没有权限的 haproxy 用户,密码为空。Haproxy 使用这个账户对MySQL数据库心跳检测 + option mysql-check user haproxy + #这里填写的端口是 docker 容器的端口,而不是宿主机端口 + server MySQL_1 172.18.0.2:3306 check weight 1 maxconn 2000 + server MySQL_2 172.18.0.3:3306 check weight 1 maxconn 2000 + server MySQL_3 172.18.0.4:3306 check weight 1 maxconn 2000 + #使用keepalive检测死链 + option tcpka +``` + +- 官网 Docker 镜像: +- 运行容器:`docker run -it -d -p 4001:8118 -p 4002:3316 -v /data/docker/haproxy/conf:/usr/local/etc/haproxy --name pxc-haproxy-1 --privileged --net=pxc-net haproxy -f /usr/local/etc/haproxy/haproxy.cfg` +- 浏览器访问: + - 输入:`admin` + - 输入:`gitnavi123456` + - 可以看到 HAProxy 监控界面 +- SQLyog 连接 + - IP:`192.168.0.105` + - 端口:`4002` + - 用户:`root` + - 密码:`gitnavi123456` + - 然后在上面创建对应的数据,如果所有节点都有对应的数据,则表示部署成功 ## 资料 - +- +- +- <> +- <> +- <> +- <> +- <> From 711bea098229323e3941e81e619c120d40b83c24 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 12 Jun 2018 23:47:49 +0800 Subject: [PATCH 012/330] 2018-06-12 pxc + HAProxy --- markdown-file/PXC-Install-And-Settings.md | 33 +++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/markdown-file/PXC-Install-And-Settings.md b/markdown-file/PXC-Install-And-Settings.md index a06c9848..66d022c6 100644 --- a/markdown-file/PXC-Install-And-Settings.md +++ b/markdown-file/PXC-Install-And-Settings.md @@ -121,13 +121,42 @@ listen proxy-mysql - 密码:`gitnavi123456` - 然后在上面创建对应的数据,如果所有节点都有对应的数据,则表示部署成功 +## HAProxy 高可用(Keepalived) + +- 配置虚拟 IP + - 因为 Docker 内的虚拟 IP 不能被外网使用,所以需要借助宿主机 Keepalived 映射成外网可以访问的虚拟 IP +- 进入 pxc-haproxy-1 容器安装 Keepalived:`docker exec -it pxc-haproxy-1 /bin/bash` + - `apt-get update` + - `apt-get install -y keepalived` + - `apt-get install -y vim` + - `vim /etc/keepalived/keepalived.conf` + +``` +vrrp_instance VI_1 { + state MASTER + interface eth0 + virtual_router_id 51 + priority 100 + advert_int 1 + authentication { + auth_type PASS + auth_pass 123456 + } + virtual_ipaddress { + 172.18.0.201 + } +} +``` + +- `service keepalived start` +- 在宿主机测试:`ping 172.18.0.201`,如果能 ping 通表示没问题了 + ## 资料 - - - -- <> -- <> +- - <> - <> - <> From a82dc989e1c099cabdb5801237dd8ef3a267163b Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 13 Jun 2018 23:52:51 +0800 Subject: [PATCH 013/330] 2018-06-12 pxc + HAProxy --- markdown-file/PXC-Install-And-Settings.md | 153 +++++++++++++++++++++- 1 file changed, 146 insertions(+), 7 deletions(-) diff --git a/markdown-file/PXC-Install-And-Settings.md b/markdown-file/PXC-Install-And-Settings.md index 66d022c6..6ba045d5 100644 --- a/markdown-file/PXC-Install-And-Settings.md +++ b/markdown-file/PXC-Install-And-Settings.md @@ -39,7 +39,11 @@ docker run -d -p 3309:3306 -v /data/docker/pxc/node3:/var/lib/mysql -e MYSQL_ROO - 测试集群 - 用 SQLyog 连上 3 个节点,随便找一个节点创建库,其他几个节点会同时产生库。以此类推,创建表、插入数据,然后查看其他库情况。 -## 负载均衡 +## 负载均衡(HAProxy + Keepalived) + +### HAProxy-1 + +#### 创建 HAProxy-1 - 因为 PXC 是同步是双向的,都支持读写,所以就可以考虑使用负载均衡实现流量分发 - 使用使用 HAProxy(支持 HTTP 协议、TCP/IP 协议,并且支持虚拟化,可以直接用 Docker 安装) @@ -109,7 +113,7 @@ listen proxy-mysql ``` - 官网 Docker 镜像: -- 运行容器:`docker run -it -d -p 4001:8118 -p 4002:3316 -v /data/docker/haproxy/conf:/usr/local/etc/haproxy --name pxc-haproxy-1 --privileged --net=pxc-net haproxy -f /usr/local/etc/haproxy/haproxy.cfg` +- 运行容器:`docker run -it -d -p 4001:8118 -p 4002:3316 -v /data/docker/haproxy/conf:/usr/local/etc/haproxy --name pxc-haproxy-1 --privileged --net=pxc-net --ip 172.18.0.7 haproxy -f /usr/local/etc/haproxy/haproxy.cfg` - 浏览器访问: - 输入:`admin` - 输入:`gitnavi123456` @@ -121,15 +125,32 @@ listen proxy-mysql - 密码:`gitnavi123456` - 然后在上面创建对应的数据,如果所有节点都有对应的数据,则表示部署成功 -## HAProxy 高可用(Keepalived) +#### HAProxy-1 配置 Keepalived - 配置虚拟 IP - 因为 Docker 内的虚拟 IP 不能被外网使用,所以需要借助宿主机 Keepalived 映射成外网可以访问的虚拟 IP - 进入 pxc-haproxy-1 容器安装 Keepalived:`docker exec -it pxc-haproxy-1 /bin/bash` - - `apt-get update` - - `apt-get install -y keepalived` - - `apt-get install -y vim` - - `vim /etc/keepalived/keepalived.conf` +- 先更换下源,不然太慢了: + +``` +cat << EOF > /etc/apt/sources.list +deb http://mirrors.163.com/ubuntu/ wily main restricted universe multiverse +deb http://mirrors.163.com/ubuntu/ wily-security main restricted universe multiverse +deb http://mirrors.163.com/ubuntu/ wily-updates main restricted universe multiverse +deb http://mirrors.163.com/ubuntu/ wily-proposed main restricted universe multiverse +deb http://mirrors.163.com/ubuntu/ wily-backports main restricted universe multiverse +deb-src http://mirrors.163.com/ubuntu/ wily main restricted universe multiverse +deb-src http://mirrors.163.com/ubuntu/ wily-security main restricted universe multiverse +deb-src http://mirrors.163.com/ubuntu/ wily-updates main restricted universe multiverse +deb-src http://mirrors.163.com/ubuntu/ wily-proposed main restricted universe multiverse +deb-src http://mirrors.163.com/ubuntu/ wily-backports main restricted universe multiverse +EOF +``` + +- `apt-get update` +- `apt-get install -y vim` +- `apt-get install -y keepalived` +- `vim /etc/keepalived/keepalived.conf` ``` vrrp_instance VI_1 { @@ -151,6 +172,124 @@ vrrp_instance VI_1 { - `service keepalived start` - 在宿主机测试:`ping 172.18.0.201`,如果能 ping 通表示没问题了 +### HAProxy-2 + +#### 创建 HAProxy-2 + +- 运行容器:`docker run -it -d -p 4003:8118 -p 4004:3316 -v /data/docker/haproxy/conf:/usr/local/etc/haproxy --name pxc-haproxy-2 --privileged --net=pxc-net --ip 172.18.0.8 haproxy -f /usr/local/etc/haproxy/haproxy.cfg` +- 浏览器访问: + - 输入:`admin` + - 输入:`gitnavi123456` + - 可以看到 HAProxy 监控界面 +- SQLyog 连接 + - IP:`192.168.0.105` + - 端口:`4004` + - 用户:`root` + - 密码:`gitnavi123456` + - 然后在上面创建对应的数据,如果所有节点都有对应的数据,则表示部署成功 + +#### HAProxy-2 配置 Keepalived + +- 配置虚拟 IP + - 因为 Docker 内的虚拟 IP 不能被外网使用,所以需要借助宿主机 Keepalived 映射成外网可以访问的虚拟 IP +- 进入 pxc-haproxy-1 容器安装 Keepalived:`docker exec -it pxc-haproxy-2 /bin/bash` +- 先更换下源,不然太慢了: + +``` +cat << EOF > /etc/apt/sources.list +deb http://mirrors.163.com/ubuntu/ wily main restricted universe multiverse +deb http://mirrors.163.com/ubuntu/ wily-security main restricted universe multiverse +deb http://mirrors.163.com/ubuntu/ wily-updates main restricted universe multiverse +deb http://mirrors.163.com/ubuntu/ wily-proposed main restricted universe multiverse +deb http://mirrors.163.com/ubuntu/ wily-backports main restricted universe multiverse +deb-src http://mirrors.163.com/ubuntu/ wily main restricted universe multiverse +deb-src http://mirrors.163.com/ubuntu/ wily-security main restricted universe multiverse +deb-src http://mirrors.163.com/ubuntu/ wily-updates main restricted universe multiverse +deb-src http://mirrors.163.com/ubuntu/ wily-proposed main restricted universe multiverse +deb-src http://mirrors.163.com/ubuntu/ wily-backports main restricted universe multiverse +EOF +``` + +- `apt-get update` +- `apt-get install -y vim` +- `apt-get install -y keepalived` +- `vim /etc/keepalived/keepalived.conf` + +``` +vrrp_instance VI_1 { + state MASTER + interface eth0 + virtual_router_id 51 + priority 100 + advert_int 1 + authentication { + auth_type PASS + auth_pass 123456 + } + virtual_ipaddress { + 172.18.0.201 + } +} +``` + +- `service keepalived start` +- 在宿主机测试:`ping 172.18.0.201`,如果能 ping 通表示没问题了 + + +## 宿主机安装 Keepalived + +``` + +yum install -y keepalived + +vi /etc/keepalived/keepalived.conf + +``` + +``` +vrrp_instance VI_1 { + state MASTER + interface ens33 + virtual_router_id 51 + priority 100 + advert_int 1 + authentication { + auth_type PASS + auth_pass 123456 + } + virtual_ipaddress { + 192.168.99.150 + } +} +​ +virtual_server 192.168.99.150 8118 { + delay_loop 3 + lb_algo rr + lb_kind NAT + persistence_timeout 50 + protocol TCP +​ + real_server 172.18.0.201 8118 { + weight 1 + } +} +​ +virtual_server 192.168.99.150 3316 { + delay_loop 3 + lb_algo rr + lb_kind NAT + persistence_timeout 50 + protocol TCP +​ + real_server 172.18.0.201 3316 { + weight 1 + } +} +``` + +- `systemctl start keepalived` + + ## 资料 - From 65399675e3e41e896eebf2586fada98327ee531b Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 14 Jun 2018 21:36:32 +0800 Subject: [PATCH 014/330] 2018-06-14 pxc + HAProxy --- markdown-file/PXC-Install-And-Settings.md | 172 +--------------------- 1 file changed, 2 insertions(+), 170 deletions(-) diff --git a/markdown-file/PXC-Install-And-Settings.md b/markdown-file/PXC-Install-And-Settings.md index 6ba045d5..5db366bf 100644 --- a/markdown-file/PXC-Install-And-Settings.md +++ b/markdown-file/PXC-Install-And-Settings.md @@ -39,11 +39,7 @@ docker run -d -p 3309:3306 -v /data/docker/pxc/node3:/var/lib/mysql -e MYSQL_ROO - 测试集群 - 用 SQLyog 连上 3 个节点,随便找一个节点创建库,其他几个节点会同时产生库。以此类推,创建表、插入数据,然后查看其他库情况。 -## 负载均衡(HAProxy + Keepalived) - -### HAProxy-1 - -#### 创建 HAProxy-1 +## 负载均衡 - 因为 PXC 是同步是双向的,都支持读写,所以就可以考虑使用负载均衡实现流量分发 - 使用使用 HAProxy(支持 HTTP 协议、TCP/IP 协议,并且支持虚拟化,可以直接用 Docker 安装) @@ -113,7 +109,7 @@ listen proxy-mysql ``` - 官网 Docker 镜像: -- 运行容器:`docker run -it -d -p 4001:8118 -p 4002:3316 -v /data/docker/haproxy/conf:/usr/local/etc/haproxy --name pxc-haproxy-1 --privileged --net=pxc-net --ip 172.18.0.7 haproxy -f /usr/local/etc/haproxy/haproxy.cfg` +- 运行容器:`docker run -it -d -p 4001:8118 -p 4002:3316 -v /data/docker/haproxy/conf:/usr/local/etc/haproxy --name pxc-haproxy-1 --privileged --net=pxc-net haproxy -f /usr/local/etc/haproxy/haproxy.cfg` - 浏览器访问: - 输入:`admin` - 输入:`gitnavi123456` @@ -125,170 +121,6 @@ listen proxy-mysql - 密码:`gitnavi123456` - 然后在上面创建对应的数据,如果所有节点都有对应的数据,则表示部署成功 -#### HAProxy-1 配置 Keepalived - -- 配置虚拟 IP - - 因为 Docker 内的虚拟 IP 不能被外网使用,所以需要借助宿主机 Keepalived 映射成外网可以访问的虚拟 IP -- 进入 pxc-haproxy-1 容器安装 Keepalived:`docker exec -it pxc-haproxy-1 /bin/bash` -- 先更换下源,不然太慢了: - -``` -cat << EOF > /etc/apt/sources.list -deb http://mirrors.163.com/ubuntu/ wily main restricted universe multiverse -deb http://mirrors.163.com/ubuntu/ wily-security main restricted universe multiverse -deb http://mirrors.163.com/ubuntu/ wily-updates main restricted universe multiverse -deb http://mirrors.163.com/ubuntu/ wily-proposed main restricted universe multiverse -deb http://mirrors.163.com/ubuntu/ wily-backports main restricted universe multiverse -deb-src http://mirrors.163.com/ubuntu/ wily main restricted universe multiverse -deb-src http://mirrors.163.com/ubuntu/ wily-security main restricted universe multiverse -deb-src http://mirrors.163.com/ubuntu/ wily-updates main restricted universe multiverse -deb-src http://mirrors.163.com/ubuntu/ wily-proposed main restricted universe multiverse -deb-src http://mirrors.163.com/ubuntu/ wily-backports main restricted universe multiverse -EOF -``` - -- `apt-get update` -- `apt-get install -y vim` -- `apt-get install -y keepalived` -- `vim /etc/keepalived/keepalived.conf` - -``` -vrrp_instance VI_1 { - state MASTER - interface eth0 - virtual_router_id 51 - priority 100 - advert_int 1 - authentication { - auth_type PASS - auth_pass 123456 - } - virtual_ipaddress { - 172.18.0.201 - } -} -``` - -- `service keepalived start` -- 在宿主机测试:`ping 172.18.0.201`,如果能 ping 通表示没问题了 - -### HAProxy-2 - -#### 创建 HAProxy-2 - -- 运行容器:`docker run -it -d -p 4003:8118 -p 4004:3316 -v /data/docker/haproxy/conf:/usr/local/etc/haproxy --name pxc-haproxy-2 --privileged --net=pxc-net --ip 172.18.0.8 haproxy -f /usr/local/etc/haproxy/haproxy.cfg` -- 浏览器访问: - - 输入:`admin` - - 输入:`gitnavi123456` - - 可以看到 HAProxy 监控界面 -- SQLyog 连接 - - IP:`192.168.0.105` - - 端口:`4004` - - 用户:`root` - - 密码:`gitnavi123456` - - 然后在上面创建对应的数据,如果所有节点都有对应的数据,则表示部署成功 - -#### HAProxy-2 配置 Keepalived - -- 配置虚拟 IP - - 因为 Docker 内的虚拟 IP 不能被外网使用,所以需要借助宿主机 Keepalived 映射成外网可以访问的虚拟 IP -- 进入 pxc-haproxy-1 容器安装 Keepalived:`docker exec -it pxc-haproxy-2 /bin/bash` -- 先更换下源,不然太慢了: - -``` -cat << EOF > /etc/apt/sources.list -deb http://mirrors.163.com/ubuntu/ wily main restricted universe multiverse -deb http://mirrors.163.com/ubuntu/ wily-security main restricted universe multiverse -deb http://mirrors.163.com/ubuntu/ wily-updates main restricted universe multiverse -deb http://mirrors.163.com/ubuntu/ wily-proposed main restricted universe multiverse -deb http://mirrors.163.com/ubuntu/ wily-backports main restricted universe multiverse -deb-src http://mirrors.163.com/ubuntu/ wily main restricted universe multiverse -deb-src http://mirrors.163.com/ubuntu/ wily-security main restricted universe multiverse -deb-src http://mirrors.163.com/ubuntu/ wily-updates main restricted universe multiverse -deb-src http://mirrors.163.com/ubuntu/ wily-proposed main restricted universe multiverse -deb-src http://mirrors.163.com/ubuntu/ wily-backports main restricted universe multiverse -EOF -``` - -- `apt-get update` -- `apt-get install -y vim` -- `apt-get install -y keepalived` -- `vim /etc/keepalived/keepalived.conf` - -``` -vrrp_instance VI_1 { - state MASTER - interface eth0 - virtual_router_id 51 - priority 100 - advert_int 1 - authentication { - auth_type PASS - auth_pass 123456 - } - virtual_ipaddress { - 172.18.0.201 - } -} -``` - -- `service keepalived start` -- 在宿主机测试:`ping 172.18.0.201`,如果能 ping 通表示没问题了 - - -## 宿主机安装 Keepalived - -``` - -yum install -y keepalived - -vi /etc/keepalived/keepalived.conf - -``` - -``` -vrrp_instance VI_1 { - state MASTER - interface ens33 - virtual_router_id 51 - priority 100 - advert_int 1 - authentication { - auth_type PASS - auth_pass 123456 - } - virtual_ipaddress { - 192.168.99.150 - } -} -​ -virtual_server 192.168.99.150 8118 { - delay_loop 3 - lb_algo rr - lb_kind NAT - persistence_timeout 50 - protocol TCP -​ - real_server 172.18.0.201 8118 { - weight 1 - } -} -​ -virtual_server 192.168.99.150 3316 { - delay_loop 3 - lb_algo rr - lb_kind NAT - persistence_timeout 50 - protocol TCP -​ - real_server 172.18.0.201 3316 { - weight 1 - } -} -``` - -- `systemctl start keepalived` - ## 资料 From 4230f72dca78025f47bf69c4aa9af79f852b2f04 Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 14 Jun 2018 22:26:00 +0800 Subject: [PATCH 015/330] 2018-06-14 pxc + HAProxy --- markdown-file/PXC-Install-And-Settings.md | 40 ++++++++++++++++++++--- 1 file changed, 35 insertions(+), 5 deletions(-) diff --git a/markdown-file/PXC-Install-And-Settings.md b/markdown-file/PXC-Install-And-Settings.md index 5db366bf..37e0db1f 100644 --- a/markdown-file/PXC-Install-And-Settings.md +++ b/markdown-file/PXC-Install-And-Settings.md @@ -17,23 +17,24 @@ - Docker 官方仓库: - 下载镜像:`docker pull percona/percona-xtradb-cluster` -- 创建需要挂载的目录:`mkdir -p /data/docker/pxc/node1 /data/docker/pxc/node2 /data/docker/pxc/node3` +- 创建需要挂载的目录:`mkdir -p /data/docker/pxc/node1/mysql /data/docker/pxc/node2/mysql /data/docker/pxc/node3/mysql` +- 创建需要挂载的目录:`mkdir -p /data/docker/pxc/node1/backup` - 赋权:`chmod 777 -R /data/docker/pxc` - 创建 Docker 网段:`docker network create --subnet=172.18.0.0/24 pxc-net` - 启动镜像: ``` -# 初次初始化比较慢,给个 2 分钟左右吧 -docker run -d -p 3307:3306 -v /data/docker/pxc/node1:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=gitnavi123456 -e CLUSTER_NAME=pxc-cluster -e XTRABACKUP_PASSWORD=gitnavi123456 --privileged --name=pxc-node-1 --net=pxc-net --ip 172.18.0.2 percona/percona-xtradb-cluster +# 初次初始化比较慢,给个 2 分钟左右吧,同时这个节点也用来做全量备份 +docker run -d -p 3307:3306 -v /data/docker/pxc/node1/mysql:/var/lib/mysql -v /data/docker/pxc/node1/backup:/data/backup -e MYSQL_ROOT_PASSWORD=gitnavi123456 -e CLUSTER_NAME=pxc-cluster -e XTRABACKUP_PASSWORD=gitnavi123456 --privileged --name=pxc-node-1 --net=pxc-net --ip 172.18.0.2 percona/percona-xtradb-cluster ``` - 使用 SQLyog 测试是否可以连上去,可以才能继续创建其他节点。 - 连接地址是宿主机 IP,端口是:3307 ``` -docker run -d -p 3308:3306 -v /data/docker/pxc/node2:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=gitnavi123456 -e CLUSTER_NAME=pxc-cluster -e XTRABACKUP_PASSWORD=gitnavi123456 -e CLUSTER_JOIN=pxc-node-1 --privileged --name=pxc-node-2 --net=pxc-net --ip 172.18.0.3 percona/percona-xtradb-cluster +docker run -d -p 3308:3306 -v /data/docker/pxc/node2/mysql:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=gitnavi123456 -e CLUSTER_NAME=pxc-cluster -e XTRABACKUP_PASSWORD=gitnavi123456 -e CLUSTER_JOIN=pxc-node-1 --privileged --name=pxc-node-2 --net=pxc-net --ip 172.18.0.3 percona/percona-xtradb-cluster -docker run -d -p 3309:3306 -v /data/docker/pxc/node3:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=gitnavi123456 -e CLUSTER_NAME=pxc-cluster -e XTRABACKUP_PASSWORD=gitnavi123456 -e CLUSTER_JOIN=pxc-node-1 --privileged --name=pxc-node-3 --net=pxc-net --ip 172.18.0.4 percona/percona-xtradb-cluster +docker run -d -p 3309:3306 -v /data/docker/pxc/node3/mysql:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=gitnavi123456 -e CLUSTER_NAME=pxc-cluster -e XTRABACKUP_PASSWORD=gitnavi123456 -e CLUSTER_JOIN=pxc-node-1 --privileged --name=pxc-node-3 --net=pxc-net --ip 172.18.0.4 percona/percona-xtradb-cluster ``` - 测试集群 @@ -121,6 +122,35 @@ listen proxy-mysql - 密码:`gitnavi123456` - 然后在上面创建对应的数据,如果所有节点都有对应的数据,则表示部署成功 +## XtraBackup 热备份 + +- XtraBackup 备份过程不锁表 +- XtraBackup 备份过程不会打断正在执行的事务 +- XtraBackup 备份资料经过压缩,磁盘空间占用低 + +#### 全量备份 + +- 容器内安装 XtraBackup,并执行备份语句 + +``` +apt-get update +apt-get install -y percona-xtrabackup-24 + +# 全量备份,备份到 docker 容器的 /data 目录下: +innobackupex --user=root --password=gitnavi123456 /data/backup/full/201806 +``` + +#### 还原全量备份 + + +- PXC 还原数据的时候,必须解散集群,删除掉只剩下一个节点,同时删除节点中的数据 + - 进入容器:`rm -rf /var/lib/mysql/*` +- 回滚备份时没有提交的事务:`innobackupex --user=root --password=gitnavi123456 --apply-back /data/backup/full/2018-04-15_05-09-07/` +- 还原数据:`innobackupex --user=root --password=gitnavi123456 --copy-back /data/backup/full/2018-04-15_05-09-07/` + + +#### 增量备份(未整理) + ## 资料 From 9e867b6064c68f19906f723a8250ce192165712c Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 15 Jun 2018 11:34:20 +0800 Subject: [PATCH 016/330] =?UTF-8?q?2018-06-15=20=E8=A1=A5=E5=85=85=20Nginx?= =?UTF-8?q?=20=E8=87=AA=E5=8A=A8=E5=88=86=E5=89=B2=E6=97=A5=E5=BF=97?= =?UTF-8?q?=E6=96=87=E4=BB=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Nginx-Install-And-Settings.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/markdown-file/Nginx-Install-And-Settings.md b/markdown-file/Nginx-Install-And-Settings.md index 63e35a2d..a4a9ca08 100644 --- a/markdown-file/Nginx-Install-And-Settings.md +++ b/markdown-file/Nginx-Install-And-Settings.md @@ -683,7 +683,12 @@ location ~ .*$ { ### Nginx 自动分割日志文件 +- 在 [Tomcat 安装和配置、优化](Tomcat-Install-And-Settings.md) 文章已经使用了 cronolog,这里也借用 cronolog 来实现分割。具体安装看文章。 +- 创建命名管道:`mkfifo /data/nginx/log/access_log.log` +- 配置 cronolog:`nohup cat /data/nginx/log/access_log.log | /usr/sbin/cronolog /data/nginx/log/logs/access-%Y-%m-%d.log &` +- 编辑 nginx 配置文件,配置 log 位置:`access_log /data/nginx/log/access_log.log;` +重启Nginx ### Nginx 处理跨域请求 From fa39542f88255d7fd1286769cfb88d4b295a8fe7 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 15 Jun 2018 11:35:06 +0800 Subject: [PATCH 017/330] =?UTF-8?q?2018-06-15=20=E8=A1=A5=E5=85=85=20Nginx?= =?UTF-8?q?=20=E8=87=AA=E5=8A=A8=E5=88=86=E5=89=B2=E6=97=A5=E5=BF=97?= =?UTF-8?q?=E6=96=87=E4=BB=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Nginx-Install-And-Settings.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/markdown-file/Nginx-Install-And-Settings.md b/markdown-file/Nginx-Install-And-Settings.md index a4a9ca08..e3c40f9b 100644 --- a/markdown-file/Nginx-Install-And-Settings.md +++ b/markdown-file/Nginx-Install-And-Settings.md @@ -685,7 +685,8 @@ location ~ .*$ { - 在 [Tomcat 安装和配置、优化](Tomcat-Install-And-Settings.md) 文章已经使用了 cronolog,这里也借用 cronolog 来实现分割。具体安装看文章。 - 创建命名管道:`mkfifo /data/nginx/log/access_log.log` -- 配置 cronolog:`nohup cat /data/nginx/log/access_log.log | /usr/sbin/cronolog /data/nginx/log/logs/access-%Y-%m-%d.log &` +- 配置 cronolog(按天):`nohup cat /data/nginx/log/access_log.log | /usr/sbin/cronolog /data/nginx/log/logs/access-%Y-%m-%d.log &` +- 配置 cronolog(按月):`nohup cat /data/nginx/log/access_log.log | /usr/sbin/cronolog /data/nginx/log/logs/access-%Y-%m.log &` - 编辑 nginx 配置文件,配置 log 位置:`access_log /data/nginx/log/access_log.log;` 重启Nginx From 1f6ac5624f363e24f2a89518e3cb9b75f39cb905 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 15 Jun 2018 11:47:13 +0800 Subject: [PATCH 018/330] =?UTF-8?q?2018-06-15=20=E8=A1=A5=E5=85=85=20Nginx?= =?UTF-8?q?=20=E8=87=AA=E5=8A=A8=E5=88=86=E5=89=B2=E6=97=A5=E5=BF=97?= =?UTF-8?q?=E6=96=87=E4=BB=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Nginx-Install-And-Settings.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/markdown-file/Nginx-Install-And-Settings.md b/markdown-file/Nginx-Install-And-Settings.md index e3c40f9b..9fe3ebcf 100644 --- a/markdown-file/Nginx-Install-And-Settings.md +++ b/markdown-file/Nginx-Install-And-Settings.md @@ -688,8 +688,7 @@ location ~ .*$ { - 配置 cronolog(按天):`nohup cat /data/nginx/log/access_log.log | /usr/sbin/cronolog /data/nginx/log/logs/access-%Y-%m-%d.log &` - 配置 cronolog(按月):`nohup cat /data/nginx/log/access_log.log | /usr/sbin/cronolog /data/nginx/log/logs/access-%Y-%m.log &` - 编辑 nginx 配置文件,配置 log 位置:`access_log /data/nginx/log/access_log.log;` - -重启Nginx +- 重启 nginx ### Nginx 处理跨域请求 From 13fe4b1e87e4e77c9d5e9f8fff485365935454b2 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 18 Jun 2018 09:24:51 +0800 Subject: [PATCH 019/330] 2018-06-18 --- markdown-file/WordPress-Install-And-Settings.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/markdown-file/WordPress-Install-And-Settings.md b/markdown-file/WordPress-Install-And-Settings.md index f616c7f7..3c0591f8 100644 --- a/markdown-file/WordPress-Install-And-Settings.md +++ b/markdown-file/WordPress-Install-And-Settings.md @@ -130,6 +130,8 @@ rm -rf wordpress-4.9.4-zh_CN.zip cd /var/www/html/wordpress && mv * ../ +rm -rf /var/www/html/wordpress/ + chmod -R 777 /var/www/html/ ``` From 2c427d13c7ab8e1dfd63821c6108e225a9c3df39 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 19 Jun 2018 22:42:22 +0800 Subject: [PATCH 020/330] =?UTF-8?q?2018-06-19=20=E8=A1=A5=E5=85=85=20WordP?= =?UTF-8?q?ress=20=E7=BB=86=E8=8A=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../WordPress-Install-And-Settings.md | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/markdown-file/WordPress-Install-And-Settings.md b/markdown-file/WordPress-Install-And-Settings.md index 3c0591f8..3eae4909 100644 --- a/markdown-file/WordPress-Install-And-Settings.md +++ b/markdown-file/WordPress-Install-And-Settings.md @@ -135,6 +135,36 @@ rm -rf /var/www/html/wordpress/ chmod -R 777 /var/www/html/ ``` +- 修改 Apache 配置文件:`vim /etc/httpd/conf/httpd.conf` + +``` +旧值: +#ServerName www.example.com:80 + +改为: +ServerName www.youmeek.com:80 + +---------------------- + +旧值: +AllowOverride None + +改为: +AllowOverride All + +---------------------- + +旧值: + + DirectoryIndex index.html + + +改为: + + DirectoryIndex index.html index.htm Default.html Default.htm index.php Default.php index.html.var + +``` + ## 创建数据库 - SQL 语句:`CREATE DATABASE wordpress DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;` From 52553ffe59fe0cf2cb80bccda706c285f16a4387 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 19 Jun 2018 22:59:16 +0800 Subject: [PATCH 021/330] =?UTF-8?q?2018-06-19=20=E8=A1=A5=E5=85=85=20Docke?= =?UTF-8?q?r=20=E7=BB=86=E8=8A=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Docker-Install-And-Usage.md | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index db03392d..f0e3ce46 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -120,6 +120,7 @@ Docker CE has both stable and edge channels. - 推荐优先阿里云,然后是 USTC - 我下面的讲解也是基于阿里云加速 - 阿里云的服务需要注册账号,**首次使用需要设置 docker 登录密码(阿里云叫做:**修改Registry登录密码**),这个以后用私人仓库会用到。** + - 如果忘记了,后面可以在这里修改: - 注册后请访问:,你会看到专属的加速地址,比如我是:`https://ldhc17y9.mirror.aliyuncs.com`,所以下面文章你看到该地址都表示是这个专属地址,请记得自己更换自己的。 - 以及教你如何使用 Docker 加速器。如果你已经安装了最新版的 Docker 你就不需要用它的脚本进行安装了。 - 最新版本的 Docker 是新增配置文件:`vim /etc/docker/daemon.json`,增加如下内容: @@ -136,7 +137,7 @@ Docker CE has both stable and edge channels. - 在 ` namespace管理` 中创建属于你自己的 namespace: - 创建镜像仓库: - 创建好仓库后,点击:`管理` 进入查看仓库的更多详细信息,这里面有很多有用的信息,包括一个详细的操作指南,**这份指南等下会用到。** - - 比如我自己创建的仓库,地址是阿里云给我们的:`registry.cn-shenzhen.aliyuncs.com/youmeek/open-hub` + - 比如我自己创建一个 redis-to-cluster 仓库,地址是阿里云给我们的:`registry.cn-shenzhen.aliyuncs.com/youmeek/redis-to-cluster` - 那我登录这个镜像地址的方式: ``` @@ -146,6 +147,18 @@ Username:阿里云邮箱 password:上文提到的--Registry登录密码 ``` +- 然后在我的仓库管理地址有教我如何推送和拉取镜像: +- 拉取:`docker pull registry.cn-shenzhen.aliyuncs.com/youmeek/redis-to-cluster:[镜像版本号]` +- 推送: + +``` +docker login + +docker tag [ImageId] registry.cn-shenzhen.aliyuncs.com/youmeek/redis-to-cluster:[镜像版本号] + +docker push registry.cn-shenzhen.aliyuncs.com/youmeek/redis-to-cluster:[镜像版本号] +``` + # Docker 命令,最终部署 Spring Boot 项目 - 建议:初期使用的时候尽量用容器 ID / 镜像 ID。如果使用 Tag/Name 在东西多的情况下很容易混乱 还不如就用记不住但是肯定唯一的容器 ID / 镜像 ID From 7bc9579f36ab094a713e15565d0047685984bc18 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 20 Jun 2018 00:29:16 +0800 Subject: [PATCH 022/330] =?UTF-8?q?2018-06-19=20=E8=A1=A5=E5=85=85=20Redis?= =?UTF-8?q?Cluster=20=E7=BB=86=E8=8A=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Redis-Install-And-Settings.md | 104 +++++++++++++++++++- 1 file changed, 103 insertions(+), 1 deletion(-) diff --git a/markdown-file/Redis-Install-And-Settings.md b/markdown-file/Redis-Install-And-Settings.md index ca8b34b5..d671e3aa 100644 --- a/markdown-file/Redis-Install-And-Settings.md +++ b/markdown-file/Redis-Install-And-Settings.md @@ -83,7 +83,109 @@ aof-rewrite-incremental-fsync yes - 重新启动服务:`docker restart cloud-redis` -## Redis 安装 +## RedisCluster 集群(Docker 方式) + +#### Redis 容器准备 + +- 目标:3 主 3 从(一般都是推荐奇数个 master) +- 拉取镜像:`docker pull registry.cn-shenzhen.aliyuncs.com/youmeek/redis-to-cluster:3.2.3` +- 重新打个 tag(旧名字太长了):`docker tag registry.cn-shenzhen.aliyuncs.com/youmeek/redis-to-cluster:3.2.3 redis-to-cluster:3.2.3` +- 创建网段:`docker network create --subnet=172.19.0.0/16 net-redis-to-cluster` +- 宿主机创建配置文件:`mkdir -p /data/docker/redis-to-cluster/config && vim /data/docker/redis-to-cluster/config/redis.conf` + +``` +bind 127.0.0.1 +protected-mode yes +port 6379 +tcp-backlog 511 +timeout 0 +tcp-keepalive 300 +daemonize yes +supervised no +pidfile /var/run/redis_6379.pid +loglevel notice +logfile "" +databases 16 +save 900 1 +save 300 10 +save 60 10000 +stop-writes-on-bgsave-error yes +rdbcompression yes +rdbchecksum yes +dbfilename dump.rdb +dir ./ +slave-serve-stale-data yes +slave-read-only yes +repl-diskless-sync no +repl-diskless-sync-delay 5 +repl-disable-tcp-nodelay no +slave-priority 100 +appendonly yes +appendfilename "appendonly.aof" +appendfsync everysec +no-appendfsync-on-rewrite no +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb +aof-load-truncated yes +lua-time-limit 5000 +cluster-enabled yes +cluster-config-file nodes-6379.conf +cluster-node-timeout 15000 +slowlog-log-slower-than 10000 +slowlog-max-len 128 +latency-monitor-threshold 0 +notify-keyspace-events "" +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 +list-max-ziplist-size -2 +list-compress-depth 0 +set-max-intset-entries 512 +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 +hll-sparse-max-bytes 3000 +activerehashing yes +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit slave 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 +hz 10 +aof-rewrite-incremental-fsync yes +``` + +- 赋权:`chmod 777 -R /data/docker/redis-to-cluster/` +- 运行 6 个节点: + - `docker run -it -d --name redis-to-cluster-1 -p 5001:6379 -v /data/docker/redis-to-cluster/config/redis.conf:/usr/redis/redis.conf --net=net-redis-to-cluster --ip 172.19.0.2 redis-to-cluster:3.2.3 bash` + - `docker run -it -d --name redis-to-cluster-2 -p 5002:6379 -v /data/docker/redis-to-cluster/config/redis.conf:/usr/redis/redis.conf --net=net-redis-to-cluster --ip 172.19.0.3 redis-to-cluster:3.2.3 bash` + - `docker run -it -d --name redis-to-cluster-3 -p 5003:6379 -v /data/docker/redis-to-cluster/config/redis.conf:/usr/redis/redis.conf --net=net-redis-to-cluster --ip 172.19.0.4 redis-to-cluster:3.2.3 bash` + - `docker run -it -d --name redis-to-cluster-4 -p 5004:6379 -v /data/docker/redis-to-cluster/config/redis.conf:/usr/redis/redis.conf --net=net-redis-to-cluster --ip 172.19.0.5 redis-to-cluster:3.2.3 bash` + - `docker run -it -d --name redis-to-cluster-5 -p 5005:6379 -v /data/docker/redis-to-cluster/config/redis.conf:/usr/redis/redis.conf --net=net-redis-to-cluster --ip 172.19.0.6 redis-to-cluster:3.2.3 bash` + - `docker run -it -d --name redis-to-cluster-6 -p 5006:6379 -v /data/docker/redis-to-cluster/config/redis.conf:/usr/redis/redis.conf --net=net-redis-to-cluster --ip 172.19.0.7 redis-to-cluster:3.2.3 bash` +- 配置 redis-to-cluster-1 节点:`docker exec -it redis-to-cluster-1 bash` + - 启动容器的 redis:`/usr/redis/src/redis-server /usr/redis/redis.conf` +- 其他 5 个节点一样进行启动。 + +#### 创建 Cluster 集群(通过 redis-trib.rb) + +- 配置 redis-to-cluster-1 节点(或者选择其他任意一个节点):`docker exec -it redis-to-cluster-1 bash` +- `mkdir -p /usr/redis/cluster` +- `cp /usr/redis/src/redis-trib.rb /usr/redis/cluster/` +- `cd /usr/redis/cluster/` +- 创建 Cluster 集群(会有交互)(镜像中已经安装了 ruby 了):`./redis-trib.rb create --replicas 1 172.19.0.2:6379 172.19.0.3:6379 172.19.0.4:6379 172.19.0.5:6379 172.19.0.6:6379 172.19.0.7:6379` + - `--replicas 1` 表示为每个主节点创建一个从节点 +- 连接集群测试: + - 进入随便一个节点:`docker exec -it redis-to-cluster-1 bash` + - `/usr/redis/src/redis-cli -c` + - 查看集群情况:`cluster nodes` + - 写入数据:`set myKey myValue`,如果成功会返回:``,可以推断它是 redis-to-cluster-3 容器 + - 暂定掉 redis-to-cluster-3 容器:`docker pause redis-to-cluster-3` + - 重新连接:`/usr/redis/src/redis-cli -c` + - 查看集群情况:`cluster nodes` + - 获取值:`get myKey` + - 重新启动 redis-to-cluster-3:`docker unpause redis-to-cluster-3` + - 查看集群情况:`cluster nodes` + + + +## Redis 编译安装 - Redis 安装 - 官网: From 555b8fee69868a5de5c74061dcc9b73c0f3c4f68 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 20 Jun 2018 11:08:07 +0800 Subject: [PATCH 023/330] =?UTF-8?q?2018-06-15=20=E8=A1=A5=E5=85=85=20MySQL?= =?UTF-8?q?=20=E7=9B=B8=E5=85=B3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Mysql-Install-And-Settings.md | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/markdown-file/Mysql-Install-And-Settings.md b/markdown-file/Mysql-Install-And-Settings.md index 44795953..4d443f50 100644 --- a/markdown-file/Mysql-Install-And-Settings.md +++ b/markdown-file/Mysql-Install-And-Settings.md @@ -164,7 +164,26 @@ max_allowed_packet = 50M ## 连接报错:"Host '192.168.1.133' is not allowed to connect to this MySQL server" - 不允许除了 localhost 之外去连接,解决办法,进入 MySQL 命令行,输入下面内容: -- `GRANT ALL PRIVILEGES ON *.* TO '数据库用户名'@'%' IDENTIFIED BY '数据库用户名的密码' WITH GRANT OPTION;` +- 开发机设置允许任何机子访问: + - `vim /etc/my.cnf` 中不能有:`bind-address = 127.0.0.1` + - 配置:`GRANT ALL PRIVILEGES ON *.* TO '数据库用户名'@'%' IDENTIFIED BY '数据库用户名的密码' WITH GRANT OPTION;` + - 更新配置:`flush privileges;` +- 生产机设置只运行本机访问: + - `vim /etc/my.cnf` 中必须有:`bind-address = 127.0.0.1` + - 配置:`GRANT ALL PRIVILEGES ON *.* TO '数据库用户名'@'127.0.0.1' IDENTIFIED BY '数据库用户名的密码' WITH GRANT OPTION;` + - 更新配置:`flush privileges;` + + +## 修改密码报错:Your password does not satisfy the current policy requirements + +- MySQL 5.7 安全性要求更高,需要这么做: + +``` +set global validate_password_policy=0; #密码强度设为最低等级 +set global validate_password_length=6; #密码允许最小长度为6 +set password = password('新密码'); +FLUSH PRIVILEGES; +``` ## MySQL 主从复制 From 7ab7e06ffac0129ce53a8d979078b08c46b5b8ee Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 20 Jun 2018 14:28:10 +0800 Subject: [PATCH 024/330] =?UTF-8?q?2018-06-20=20=E6=9B=B4=E6=8D=A2=20githu?= =?UTF-8?q?b=20=E7=9A=84=E4=B8=80=E4=BA=9B=E8=84=9A=E6=9C=AC=E5=9C=B0?= =?UTF-8?q?=E5=9D=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- favorite-file/shell/install_common_vim_zsh.sh | 2 +- markdown-file/Zsh.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/favorite-file/shell/install_common_vim_zsh.sh b/favorite-file/shell/install_common_vim_zsh.sh index a2ecbd8e..64e4f1c1 100644 --- a/favorite-file/shell/install_common_vim_zsh.sh +++ b/favorite-file/shell/install_common_vim_zsh.sh @@ -12,7 +12,7 @@ echo "-----------------------------------------开始安装 zsh" yum install -y zsh echo "-----------------------------------------开始安装 oh-my-zsh" -wget https://raw.github.com/robbyrussell/oh-my-zsh/master/tools/install.sh -O - | sh +wget https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh -O - | sh echo "-----------------------------------------设置默认终端为 oh-my-zsh" chsh -s /bin/zsh root diff --git a/markdown-file/Zsh.md b/markdown-file/Zsh.md index a0ad3a34..2186bc74 100644 --- a/markdown-file/Zsh.md +++ b/markdown-file/Zsh.md @@ -37,7 +37,7 @@ - oh-my-zsh 帮我们整理了一些常用的 Zsh 扩展功能和主题: - 我们无需自己去捣搞 Zsh,直接用 oh-my-zsh 就足够了,如果你想继续深造的话那再去弄。 - 先安装 git:`sudo yum install -y git` -- 安装 oh-my-zsh(这个过程可能会有点慢,或者需要重试几次):`wget https://raw.github.com/robbyrussell/oh-my-zsh/master/tools/install.sh -O - | sh` +- 安装 oh-my-zsh(这个过程可能会有点慢,或者需要重试几次):`wget https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh -O - | sh` - 整个过程效果如下图: - ![oh-my-zsh 安装](../images/Zsh-a-1.jpg) - 在以 root 用户为前提下,oh-my-zsh 的安装目录:**/root/.oh-my-zsh** From e2060568d44df262ff3624e48e88680321ba5275 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 20 Jun 2018 14:32:03 +0800 Subject: [PATCH 025/330] =?UTF-8?q?2018-06-20=20=E6=9B=B4=E6=8D=A2=20docke?= =?UTF-8?q?r=20=E7=9A=84=E6=96=B0=E5=AE=98=E7=BD=91=E5=9C=B0=E5=9D=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Docker-Install-And-Usage.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index f0e3ce46..855ebf8b 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -61,8 +61,8 @@ Docker CE has both stable and edge channels. Edge builds are released once per month, and are supported for that month only. If you subscribe to the Edge channel on Linux distributions, you should also subscribe to the Stable channel. -- 官网总的安装手册: -- 官网 CentOS 安装手册: +- 官网总的安装手册: +- 官网 CentOS 安装手册: - 目前也支持 Windows,特别是 Windows 10,直接官网一个安装包即可搞定。 - Windows 10 的 Docker 安装说明: - 我这里选择 Docker CE 版本: From 35160a23a9c95a716db99f41c77e01cf5ef42a48 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 20 Jun 2018 14:45:06 +0800 Subject: [PATCH 026/330] =?UTF-8?q?2018-06-20=20=E8=A1=A5=E5=85=85=20delta?= =?UTF-8?q?rpm?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- favorite-file/shell/install_common_tool.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/favorite-file/shell/install_common_tool.sh b/favorite-file/shell/install_common_tool.sh index d4e6eb2f..541da34f 100644 --- a/favorite-file/shell/install_common_tool.sh +++ b/favorite-file/shell/install_common_tool.sh @@ -2,6 +2,6 @@ echo "开始常用工具安装" -yum install -y zip unzip lrzsz git epel-release wget htop +yum install -y zip unzip lrzsz git epel-release wget htop deltarpm echo "开始常用工具结束" \ No newline at end of file From 7b5a5e52667d25938a1876b5a50c86d132352dfb Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 20 Jun 2018 15:04:10 +0800 Subject: [PATCH 027/330] =?UTF-8?q?2018-06-20=20=E8=A1=A5=E5=85=85=20docke?= =?UTF-8?q?r=20=E9=98=BF=E9=87=8C=E4=BA=91=E9=95=9C=E5=83=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...aliyun_docker_disable_firewalld_centos7.sh | 28 +++++++++++-------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/favorite-file/shell/install_aliyun_docker_disable_firewalld_centos7.sh b/favorite-file/shell/install_aliyun_docker_disable_firewalld_centos7.sh index 5120d329..35080fed 100644 --- a/favorite-file/shell/install_aliyun_docker_disable_firewalld_centos7.sh +++ b/favorite-file/shell/install_aliyun_docker_disable_firewalld_centos7.sh @@ -9,8 +9,10 @@ echo "-----------------------------------------安装 docker 所需环境" yum install -y yum-utils device-mapper-persistent-data lvm2 echo "-----------------------------------------添加 repo(可能网络会很慢,有时候会报:Timeout,所以要多试几次)" +echo "-----------------------------------------官网的地址 yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo" +echo "-----------------------------------------这里用阿里云进行加速,不然可能会出现无法安装,阿里云官网说明:https://help.aliyun.com/document_detail/60742.html" -yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo +yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo yum makecache fast echo "-----------------------------------------开始安装 docker" @@ -23,6 +25,19 @@ systemctl start docker.service echo "-----------------------------------------安装结束" +echo "-----------------------------------------docker 加速" + +touch /etc/docker/daemon.json + +cat << EOF >> /etc/docker/daemon.json +{ + "registry-mirrors": ["https://ldhc17y9.mirror.aliyuncs.com"] +} +EOF + +systemctl daemon-reload +systemctl restart docker + echo "-----------------------------------------运行 hello world 镜像" docker run hello-world @@ -38,17 +53,6 @@ echo "-----------------------------------------输出 docker compose 版本号" docker-compose --version -echo "-----------------------------------------docker 加速" - -touch /etc/docker/daemon.json - -cat << EOF >> /etc/docker/daemon.json -{ - "registry-mirrors": ["https://ldhc17y9.mirror.aliyuncs.com"] -} -EOF -systemctl daemon-reload -systemctl restart docker From 02417d666752963f111e4e22161a67e4e92d0846 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 20 Jun 2018 15:39:29 +0800 Subject: [PATCH 028/330] =?UTF-8?q?2018-06-20=20=E8=A1=A5=E5=85=85=20redis?= =?UTF-8?q?=20cluster?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Redis-Install-And-Settings.md | 55 ++++++++++++++++++++- 1 file changed, 53 insertions(+), 2 deletions(-) diff --git a/markdown-file/Redis-Install-And-Settings.md b/markdown-file/Redis-Install-And-Settings.md index d671e3aa..6d54585e 100644 --- a/markdown-file/Redis-Install-And-Settings.md +++ b/markdown-file/Redis-Install-And-Settings.md @@ -94,7 +94,7 @@ aof-rewrite-incremental-fsync yes - 宿主机创建配置文件:`mkdir -p /data/docker/redis-to-cluster/config && vim /data/docker/redis-to-cluster/config/redis.conf` ``` -bind 127.0.0.1 +bind 0.0.0.0 protected-mode yes port 6379 tcp-backlog 511 @@ -171,11 +171,62 @@ aof-rewrite-incremental-fsync yes - `cd /usr/redis/cluster/` - 创建 Cluster 集群(会有交互)(镜像中已经安装了 ruby 了):`./redis-trib.rb create --replicas 1 172.19.0.2:6379 172.19.0.3:6379 172.19.0.4:6379 172.19.0.5:6379 172.19.0.6:6379 172.19.0.7:6379` - `--replicas 1` 表示为每个主节点创建一个从节点 + - 如果正常的话,会出现下面内容: + +``` +>>> Creating cluster +>>> Performing hash slots allocation on 6 nodes... +Using 3 masters: +172.19.0.2:6379 +172.19.0.3:6379 +172.19.0.4:6379 +Adding replica 172.19.0.5:6379 to 172.19.0.2:6379 +Adding replica 172.19.0.6:6379 to 172.19.0.3:6379 +Adding replica 172.19.0.7:6379 to 172.19.0.4:6379 +M: 9c1c64b18bfc2a0586be2089f13c330787c1f67b 172.19.0.2:6379 + slots:0-5460 (5461 slots) master +M: 35a633853329c9ff25bb93a7ce9192699c2ab6a8 172.19.0.3:6379 + slots:5461-10922 (5462 slots) master +M: 8ea2bfeeeda939abb43e96a95a990bcc55c10389 172.19.0.4:6379 + slots:10923-16383 (5461 slots) master +S: 9cb00acba065120ea96834f4352c72bb50aa37ac 172.19.0.5:6379 + replicates 9c1c64b18bfc2a0586be2089f13c330787c1f67b +S: 8e2a4bb11e97adf28427091a621dbbed66c61001 172.19.0.6:6379 + replicates 35a633853329c9ff25bb93a7ce9192699c2ab6a8 +S: 5d0fe968559af3035d8d64ab598f2841e5f3a059 172.19.0.7:6379 + replicates 8ea2bfeeeda939abb43e96a95a990bcc55c10389 +Can I set the above configuration? (type 'yes' to accept): yes +>>> Nodes configuration updated +>>> Assign a different config epoch to each node +>>> Sending CLUSTER MEET messages to join the cluster +Waiting for the cluster to join...... +>>> Performing Cluster Check (using node 172.19.0.2:6379) +M: 9c1c64b18bfc2a0586be2089f13c330787c1f67b 172.19.0.2:6379 + slots:0-5460 (5461 slots) master +M: 35a633853329c9ff25bb93a7ce9192699c2ab6a8 172.19.0.3:6379 + slots:5461-10922 (5462 slots) master +M: 8ea2bfeeeda939abb43e96a95a990bcc55c10389 172.19.0.4:6379 + slots:10923-16383 (5461 slots) master +M: 9cb00acba065120ea96834f4352c72bb50aa37ac 172.19.0.5:6379 + slots: (0 slots) master + replicates 9c1c64b18bfc2a0586be2089f13c330787c1f67b +M: 8e2a4bb11e97adf28427091a621dbbed66c61001 172.19.0.6:6379 + slots: (0 slots) master + replicates 35a633853329c9ff25bb93a7ce9192699c2ab6a8 +M: 5d0fe968559af3035d8d64ab598f2841e5f3a059 172.19.0.7:6379 + slots: (0 slots) master + replicates 8ea2bfeeeda939abb43e96a95a990bcc55c10389 +[OK] All nodes agree about slots configuration. +>>> Check for open slots... +>>> Check slots coverage... +[OK] All 16384 slots covered. +``` + - 连接集群测试: - 进入随便一个节点:`docker exec -it redis-to-cluster-1 bash` - `/usr/redis/src/redis-cli -c` - 查看集群情况:`cluster nodes` - - 写入数据:`set myKey myValue`,如果成功会返回:``,可以推断它是 redis-to-cluster-3 容器 + - 写入数据:`set myKey myValue`,如果成功会返回:`Redirected to slot [16281] located at 172.19.0.4:6379`,可以推断它是 redis-to-cluster-3 容器 - 暂定掉 redis-to-cluster-3 容器:`docker pause redis-to-cluster-3` - 重新连接:`/usr/redis/src/redis-cli -c` - 查看集群情况:`cluster nodes` From 3a947cedb78fb0a62b530ef7064376f4a3fcf4e3 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 20 Jun 2018 16:11:48 +0800 Subject: [PATCH 029/330] =?UTF-8?q?2018-06-20=20=E8=A1=A5=E5=85=85=20mycat?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Mycat-Install-And-Settings.md | 36 +++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/markdown-file/Mycat-Install-And-Settings.md b/markdown-file/Mycat-Install-And-Settings.md index 575e190e..9a31f7cb 100644 --- a/markdown-file/Mycat-Install-And-Settings.md +++ b/markdown-file/Mycat-Install-And-Settings.md @@ -239,6 +239,42 @@ export PATH=$PATH:$MYCAT_HOME/bin ``` +#### 如果节点数据很多的情况,我们有一种简便写法 + +```xml + + + + + + + + + + +
+ + + +
+
+ + + + + + + + + select user() + + + + + +
+``` + #### rule.xml 配置详解 From fae9c07e8360c2e398a41ba5009bb41530aaf267 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 20 Jun 2018 20:28:58 +0800 Subject: [PATCH 030/330] =?UTF-8?q?2018-06-20=20=E8=A1=A5=E5=85=85=20docke?= =?UTF-8?q?r=20=E5=91=BD=E4=BB=A4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Docker-Install-And-Usage.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index 855ebf8b..684f8556 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -340,6 +340,18 @@ CONTAINER ID NAME CPU % MEM USAGE / LI - `docker export`,将容器整个文件系统导出为一个tar包,不带layers、tag等信息 - `docker port`,显示容器的端口映射 - `docker inspect 容器ID`:查看容器的全面信息,用 JSON 格式输出 +- `docker system df`:类似于 Linux 上的 df 命令,用于查看 Docker 的磁盘使用情况 + - Images 镜像 + - Containers 容器 + - Local Volumes 数据卷 + +``` +TYPE TOTAL ACTIVE SIZE RECLAIMABLE +Images 6 6 1.049GB 0B (0%) +Containers 7 4 10.25kB 0B (0%) +Local Volumes 13 5 38.49GB 1.365MB (0%) +Build Cache 0B 0B +``` ``` 获取容器中的 IP:docker inspect -f {{.NetworkSettings.IPAddress}} 容器ID From c98d92052257dc06c3f16424d41210535702b426 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 20 Jun 2018 23:04:53 +0800 Subject: [PATCH 031/330] =?UTF-8?q?2018-06-20=20=E8=A1=A5=E5=85=85=20Redis?= =?UTF-8?q?Cluster=20=E7=BB=86=E8=8A=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Redis-Install-And-Settings.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/markdown-file/Redis-Install-And-Settings.md b/markdown-file/Redis-Install-And-Settings.md index 6d54585e..fcf54dc3 100644 --- a/markdown-file/Redis-Install-And-Settings.md +++ b/markdown-file/Redis-Install-And-Settings.md @@ -233,7 +233,9 @@ M: 5d0fe968559af3035d8d64ab598f2841e5f3a059 172.19.0.7:6379 - 获取值:`get myKey` - 重新启动 redis-to-cluster-3:`docker unpause redis-to-cluster-3` - 查看集群情况:`cluster nodes` - +- Spring Boot 项目 Docker 容器访问 RedisCluster + - application.yml 配置的 IP 地址:172.19.0.2 等 + - docker 容器启动增加 `--net=host` 使用宿主机网络 ## Redis 编译安装 From fb19f3a03586d2a2b6e94b93060625dfaa1ed2f4 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 20 Jun 2018 23:13:26 +0800 Subject: [PATCH 032/330] =?UTF-8?q?2018-06-20=20=E8=A1=A5=E5=85=85=20Redis?= =?UTF-8?q?Cluster=20=E7=BB=86=E8=8A=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Docker-Install-And-Usage.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index 684f8556..c64c184c 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -677,7 +677,7 @@ docker rmi $(docker images -f "dangling=true" -q) ``` bash FROM java:8-jre -MAINTAINER skb-user zch +MAINTAINER gitnavi ENV TZ=Asia/Shanghai RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone From 552920447bd5716d79f71dbedc2cbb9808f7f8bf Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 21 Jun 2018 21:59:36 +0800 Subject: [PATCH 033/330] 2018-06-21 --- markdown-file/monitor.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index 2308f60a..a5049b9b 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -374,9 +374,10 @@ tcp6 0 0 :::43107 :::* LISTEN - ping 命令查看丢包、域名解析地址 - `ping 116.196.110.69` - `ping www.GitNavi.com` -- telnet 测试端口的连通性 +- telnet 测试端口的连通性(验证服务的可用性) - `yum install -y telnet` - `telnet 116.196.110.68 3306` + - `telnet www.youmeek.com 80` - tracert(跟踪路由)查看网络请求节点访问情况,用于确定 IP 数据报访问目标所采取的路径。 - `yum install -y traceroute` - `traceroute gitnavi.com` From c161f715bee3bf1c5a7f6ed21f7caac52cb57f60 Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 21 Jun 2018 23:32:32 +0800 Subject: [PATCH 034/330] 2018-06-21 --- markdown-file/Docker-Install-And-Usage.md | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index c64c184c..b91e3ce1 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -306,12 +306,21 @@ CONTAINER ID NAME CPU % MEM USAGE / LI - `docker run -it 镜像ID --link redis-name:myredis /bin/bash` - `redis-name` 是容器名称 - `myredis` 是容器别名,其他容器连接它可以用这个别名来写入到自己的配置文件中 -- `--network` docker 网络模式: + +#### docker 网络模式 + +- 查看也有网络:`docker network ls` +- 创建网络:`docker network create --subnet=172.19.0.0/16 net-redis-to-cluster` +- 已有容器连接到某个网络(一个容器可以同时连上多个网络):`docker network connect net-redis-to-cluster my-redis-container` +- 如果是内网提供服务的,可以直接创建一个网络,其服务使用该网络。然后另外一个需要调用该服务的,并且是对外网提供服务的可以使用 host 模式 +- `--network XXXXXX` 常见几种模式 - bridge 默认模式,在 docker0 的网桥上创建新网络栈,确保独立的网络环境,实现网络隔离:`docker run -it 镜像ID --network=bridge /bin/bash` - - none 不适用网卡,无法联网:`docker run -it 镜像ID --network=none /bin/bash` - - host 使用宿主机网络 IP、端口联网:`docker run -it 镜像ID --network=host /bin/bash` + - none 不适用网卡,不会有 IP,无法联网:`docker run -it 镜像ID --network=none /bin/bash` + - host 使用宿主机网络 IP、端口联网(在容器里面输入:ip a,看到的结果和在宿主机看到的一样):`docker run -it 镜像ID --network=host /bin/bash` - 自定义-使用自己命名的网络栈,但是需要手动配置网卡、IP 信息:`docker run -it 镜像ID --network=自定义名称 /bin/bash` + + #### 容器管理操作 - `docker ps`:列出当前所有 **正在运行** 的容器 @@ -340,6 +349,7 @@ CONTAINER ID NAME CPU % MEM USAGE / LI - `docker export`,将容器整个文件系统导出为一个tar包,不带layers、tag等信息 - `docker port`,显示容器的端口映射 - `docker inspect 容器ID`:查看容器的全面信息,用 JSON 格式输出 +- `docker inspect network名称`:查看 network 信息,用 JSON 格式输出,包含使用该网络的容器有哪些 - `docker system df`:类似于 Linux 上的 df 命令,用于查看 Docker 的磁盘使用情况 - Images 镜像 - Containers 容器 From e6c52b087d7723969eb82823a1a914887f840ac6 Mon Sep 17 00:00:00 2001 From: judasn Date: Sat, 23 Jun 2018 11:52:58 +0800 Subject: [PATCH 035/330] =?UTF-8?q?2018-06-23=20=E8=A1=A5=E5=85=85?= =?UTF-8?q?=E7=9B=91=E6=8E=A7=20CPU?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/monitor.md | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index a5049b9b..20c8e360 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -236,6 +236,33 @@ Average: 0.50 0.00 0.50 0.00 8.94 - 在 `top` 命令状态下按 shfit + p 可以按照 **CPU 使用** 大小排序 - 展示数据上,%CPU 表示进程占用的 CPU 百分比,%MEM 表示进程占用的内存百分比 +#### 另外工具 + +- htop 综合工具:`yum install -y htop` + - 这几篇文章讲得很好,我没必要再贴过来了,大家自己看: + - [htop 命令完胜 top 命令](http://blog.51cto.com/215687833/1788493) + - [htop 命令详解](https://blog.csdn.net/freeking101/article/details/79173903) +- mpstat 实时监控 CPU 状态:`yum install -y sysstat` + - 可以具体到某个核心,比如我有 2 核的 CPU,因为 CPU 核心下标是从 0 开始,所以我要查看 0 的状况(间隔 3 秒获取一次指标,一共获取 5 次):`mpstat -P 0 3 5` + - 获取所有核心的平均值:`mpstat 3 5` + +``` +Linux 3.10.0-693.2.2.el7.x86_64 (iZwz998aag1ggy168n3wg2Z) 06/23/2018 _x86_64_ (2 CPU) + +11:44:52 AM CPU %usr %nice %sys %iowait %irq %soft %steal %guest %gnice %idle +11:44:53 AM 0 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 100.00 +11:44:54 AM 0 1.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 99.00 +11:44:55 AM 0 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 100.00 +11:44:56 AM 0 0.00 0.00 1.00 0.00 0.00 0.00 0.00 0.00 0.00 99.00 +11:44:57 AM 0 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 100.00 +Average: 0 0.20 0.00 0.20 0.00 0.00 0.00 0.00 0.00 0.00 99.60 +``` + +- %usr 用户进程消耗 CPU 情况 +- %sys 系统进程消耗 CPU 情况 +- %iowait 表示 CPU 等待 IO 时间占整个 CPU 周期的百分比 +- %idle 显示 CPU 空闲时间占用 CPU 总时间的百分比 + ## 内存监控 From ee9223a9dde65e6d71200fd08c66898b06e63eb6 Mon Sep 17 00:00:00 2001 From: judasn Date: Sat, 23 Jun 2018 12:24:19 +0800 Subject: [PATCH 036/330] =?UTF-8?q?2018-06-23=20=E8=A1=A5=E5=85=85?= =?UTF-8?q?=E7=9B=91=E6=8E=A7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/monitor.md | 46 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index 20c8e360..3d7526f1 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -354,6 +354,9 @@ Timing buffered disk reads: 806 MB in 3.00 seconds = 268.52 MB/sec ## 端口使用情况 + +#### lsof + - 安装 lsof:`yum install -y lsof` - 查看 3316 端口是否有被使用:`lsof -i:3316`,**有被使用会输出类似如下信息,如果没被使用会没有任何信息返回** @@ -372,7 +375,9 @@ java 12011 root 87u IPv6 4506851 0t0 TCP JDu4e00u53f7:58572->116.1 docker-pr 13551 root 4u IPv6 2116824 0t0 TCP *:aicc-cmi (LISTEN) ``` +#### netstat +- 更多用法可以看:[netstat 的10个基本用法](https://linux.cn/article-2434-1.html) - 查看所有在用的端口:`netstat -ntlp` ``` @@ -395,6 +400,47 @@ tcp6 0 0 :::43107 :::* LISTEN - 查看当前连接80端口的机子有多少:`netstat -an|grep 80|sort -r` - 查看已经连接的IP有多少连接数:`netstat -ntu | awk '{print $5}' | cut -d: -f1 | sort | uniq -c | sort -n` +- 查看已经连接的IP有多少连接数,只显示前 5 个:`netstat -ntu | awk '{print $5}' | cut -d: -f1 | sort | uniq -c | sort -n | head -5` +- 统计当前连接的一些状态情况:`netstat -nat |awk '{print $6}'|sort|uniq -c|sort -rn` + +``` +8 TIME_WAIT +8 ESTABLISHED +7 LISTEN +1 Foreign +1 established) +1 CLOSE_WAIT +``` + +- 查看网络接口接受、发送的数据包情况(每隔 3 秒统计一次):`netstat -i 3` + + +``` +Kernel Interface table +Iface MTU RX-OK RX-ERR RX-DRP RX-OVR TX-OK TX-ERR TX-DRP TX-OVR Flg +eth0 1500 10903298 0 0 0 10847741 0 0 0 BMRU +lo 65536 453650 0 0 0 453650 0 0 0 LRU +eth0 1500 10903335 0 0 0 10847777 0 0 0 BMRU +lo 65536 453650 0 0 0 453650 0 0 0 LRU +eth0 1500 10903363 0 0 0 10847798 0 0 0 BMRU +lo 65536 453650 0 0 0 453650 0 0 0 LRU +eth0 1500 10903393 0 0 0 10847836 0 0 0 BMRU +lo 65536 453650 0 0 0 453650 0 0 0 LRU +eth0 1500 10903437 0 0 0 10847867 0 0 0 BMRU +lo 65536 453650 0 0 0 453650 0 0 0 LRU +``` + +- 接收: + - RX-OK 已接收字节数 + - RX-ERR 已接收错误字节数(数据值大说明网络存在问题) + - RX-DRP 已丢失字节数(数据值大说明网络存在问题) + - RX-OVR 由于误差而遗失字节数(数据值大说明网络存在问题) +- 发送: + - TX-OK 已发送字节数 + - TX-ERR 已发送错误字节数(数据值大说明网络存在问题) + - TX-DRP 已丢失字节数(数据值大说明网络存在问题) + - TX-OVR 由于误差而遗失字节数(数据值大说明网络存在问题) + #### 网络排查 From dc29dac411481594c7a173c17e8b88a768c7e255 Mon Sep 17 00:00:00 2001 From: judasn Date: Sat, 23 Jun 2018 13:58:27 +0800 Subject: [PATCH 037/330] =?UTF-8?q?2018-06-23=20=E8=A1=A5=E5=85=85?= =?UTF-8?q?=E7=9B=91=E6=8E=A7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Crontab.md | 8 +++ markdown-file/Nmon.md | 1 + markdown-file/monitor.md | 121 +++++++++++++++++++++++---------------- 3 files changed, 82 insertions(+), 48 deletions(-) diff --git a/markdown-file/Crontab.md b/markdown-file/Crontab.md index 7df0b914..23c734d4 100644 --- a/markdown-file/Crontab.md +++ b/markdown-file/Crontab.md @@ -35,6 +35,14 @@ - 更多例子可以看: - 执行记录日志:`tail -f /var/log/cron`(如果发现任务不执行,可以来这里盯着日志看) + +## Crontab 权限问题 + +- 一般默认只有 root 用户可以使用 +- 如果要指定某个用户可以使用,可以在 /etc/cron.allow 添加(不存在文件就创建一个) +- 如果要指定某个用户不可以使用,可以在 /etc/cron.deny 添加(不存在文件就创建一个) +- 如果一个用户同时在两个文件都存在,那则以 allow 为准 + ## Crontab 不执行 - Crontab 不执行原因有很多,可以 Google 搜索:`Crontab 不执行`,这里不多说。 diff --git a/markdown-file/Nmon.md b/markdown-file/Nmon.md index 11493cfe..383711dd 100644 --- a/markdown-file/Nmon.md +++ b/markdown-file/Nmon.md @@ -34,6 +34,7 @@ - 点击 Analyse nmon data 会弹出一个弹出框,选择刚刚转换的 csv 文件,然后就会自动再转化成 excel 文件 - 导出的综合报表的参数说明: + ## 资料 - [Nmon命令行:Linux系统性能的监测利器](http://os.51cto.com/art/201406/442795.htm) diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index 3d7526f1..da7cf05e 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -4,6 +4,17 @@ - 查看 CentOS 版本号:`cat /etc/redhat-release` +--------------------------------------------------------------------- + +## 综合监控 + +- [nmon](Nmon.md) + + + +--------------------------------------------------------------------- + + ## 系统负载 #### 命令:w(判断整体瓶颈) @@ -52,47 +63,6 @@ procs -----------memory---------- ---swap-- -----io---- -system-- ------cpu----- - **如果bi和bo两个数字比较高,则说明,磁盘IO压力大。** - `wa` 表示I/O等待所占用CPU的时间比 - -#### 命令:iostat(判断 I/0 瓶颈) - -- 命令:`iostat -x -k 3 3`,每 3 秒采样一次,共 3 次。 - -``` -avg-cpu: %user %nice %system %iowait %steal %idle - 0.55 0.00 0.52 0.00 0.00 98.93 - -Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await r_await w_await svctm %util -vda 0.00 0.04 0.02 0.62 0.44 6.49 21.65 0.00 1.42 1.17 1.42 0.25 0.02 - -avg-cpu: %user %nice %system %iowait %steal %idle - 0.34 0.00 0.00 0.00 0.00 99.66 - -Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await r_await w_await svctm %util -vda 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 - -avg-cpu: %user %nice %system %iowait %steal %idle - 2.02 0.00 0.34 0.00 0.00 97.64 - -Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await r_await w_await svctm %util -vda 0.00 0.00 0.00 1.68 0.00 16.16 19.20 0.00 0.20 0.00 0.20 0.20 0.03 -``` - -- 列说明: - - `rrqm/s`: 每秒对该设备的读请求被合并次数,文件系统会对读取同块(block)的请求进行合并 - - `wrqm/s`: 每秒对该设备的写请求被合并次数 - - `r/s`: 每秒完成的读次数 - - `w/s`: 每秒完成的写次数 - - `rkB/s`: 每秒读数据量(kB为单位) - - `wkB/s`: 每秒写数据量(kB为单位) - - `avgrq-sz`:平均每次IO操作的数据量(扇区数为单位) - - `avgqu-sz`: 平均等待处理的IO请求队列长度 - - `await`: 平均每次IO请求等待时间(包括等待时间和处理时间,毫秒为单位) - - `svctm`: 平均每次IO请求的处理时间(毫秒为单位) - - `%util`: 采用周期内用于IO操作的时间比率,即IO队列非空的时间比率 -- **总结** - - `iowait%` 表示CPU等待IO时间占整个CPU周期的百分比,如果iowait值超过50%,或者明显大于%system、%user以及%idle,表示IO可能存在问题。 - - `%util` 表示磁盘忙碌情况,一般该值超过80%表示该磁盘可能处于繁忙状态 - #### 命令:sar(综合) - sar(system activity reporter 系统活动情况报告) @@ -208,8 +178,11 @@ Average: 0.50 0.00 0.50 0.00 8.94 - `txcmp/s`:每秒钟发送出去的压缩包数目 - `txmcst/s`:每秒钟接收到的多播包的包数目 +--------------------------------------------------------------------- + +## CPU 监控 -## CPU 的基本信息查看 +#### CPU 的基本信息查看 - Demo CPU 型号:[Intel® Xeon® Processor E5-2620 v2(15M Cache, 2.10 GHz)](http://ark.intel.com/products/75789/Intel-Xeon-Processor-E5-2620-v2-15M-Cache-2_10-GHz) - 该 CPU 显示的数据中有一项这个要注意:`Intel® Hyper-Threading Technology` 是 `Yes`。表示该 CPU 支持超线程 @@ -225,7 +198,7 @@ Average: 0.50 0.00 0.50 0.00 8.94 - 线程数:线程数是一种逻辑的概念,简单地说,就是模拟出的 CPU 核心数。比如,可以通过一个 CPU 核心数模拟出 2 线程的 CPU,也就是说,这个单核心的 CPU 被模拟成了一个类似双核心 CPU 的功能。 -## CPU 监控 +#### CPU 监控 - Linux 的 CPU 简单监控一般简单 - 常用命令就是 `top` @@ -236,7 +209,7 @@ Average: 0.50 0.00 0.50 0.00 8.94 - 在 `top` 命令状态下按 shfit + p 可以按照 **CPU 使用** 大小排序 - 展示数据上,%CPU 表示进程占用的 CPU 百分比,%MEM 表示进程占用的内存百分比 -#### 另外工具 +#### CPU 其他工具 - htop 综合工具:`yum install -y htop` - 这几篇文章讲得很好,我没必要再贴过来了,大家自己看: @@ -263,6 +236,8 @@ Average: 0 0.20 0.00 0.20 0.00 0.00 0.00 0.00 0.00 - %iowait 表示 CPU 等待 IO 时间占整个 CPU 周期的百分比 - %idle 显示 CPU 空闲时间占用 CPU 总时间的百分比 +--------------------------------------------------------------------- + ## 内存监控 @@ -297,15 +272,60 @@ Total: 16080 15919 160 - 以上的结果重点关注是:`-/+ buffers/cache`,这一行代表实际使用情况。 +--------------------------------------------------------------------- + +## 硬盘监控 -## 硬盘查看 +#### 硬盘容量相关查看 - `df -h`:自动以合适的磁盘容量单位查看磁盘大小和使用空间 - `df -m`:以磁盘容量单位 M 为数值结果查看磁盘使用情况 - `du -sh /opt/tomcat6`:查看tomcat6这个文件夹大小 (h的意思human-readable用人类可读性较好方式显示,系统会自动调节单位,显示合适大小的单位) - `du /opt --max-depth=1 -h`:查看指定录入下包括子目录的各个文件大小情况 -## 硬盘 IO 监控 + +#### 命令:iostat(判断 I/0 瓶颈) + +- 命令:`iostat -x -k 3 3`,每 3 秒采样一次,共 3 次。 + +``` +avg-cpu: %user %nice %system %iowait %steal %idle + 0.55 0.00 0.52 0.00 0.00 98.93 + +Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await r_await w_await svctm %util +vda 0.00 0.04 0.02 0.62 0.44 6.49 21.65 0.00 1.42 1.17 1.42 0.25 0.02 + +avg-cpu: %user %nice %system %iowait %steal %idle + 0.34 0.00 0.00 0.00 0.00 99.66 + +Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await r_await w_await svctm %util +vda 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 + +avg-cpu: %user %nice %system %iowait %steal %idle + 2.02 0.00 0.34 0.00 0.00 97.64 + +Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await r_await w_await svctm %util +vda 0.00 0.00 0.00 1.68 0.00 16.16 19.20 0.00 0.20 0.00 0.20 0.20 0.03 +``` + +- 列说明: + - `rrqm/s`: 每秒对该设备的读请求被合并次数,文件系统会对读取同块(block)的请求进行合并 + - `wrqm/s`: 每秒对该设备的写请求被合并次数 + - `r/s`: 每秒完成的读次数 + - `w/s`: 每秒完成的写次数 + - `rkB/s`: 每秒读数据量(kB为单位) + - `wkB/s`: 每秒写数据量(kB为单位) + - `avgrq-sz`:平均每次IO操作的数据量(扇区数为单位) + - `avgqu-sz`: 平均等待处理的IO请求队列长度 + - `await`: 平均每次IO请求等待时间(包括等待时间和处理时间,毫秒为单位) + - `svctm`: 平均每次IO请求的处理时间(毫秒为单位) + - `%util`: 采用周期内用于IO操作的时间比率,即IO队列非空的时间比率(就是繁忙程度,值越高表示越繁忙) +- **总结** + - `iowait%` 表示CPU等待IO时间占整个CPU周期的百分比,如果iowait值超过50%,或者明显大于%system、%user以及%idle,表示IO可能存在问题。 + - `%util` 表示磁盘忙碌情况,一般该值超过80%表示该磁盘可能处于繁忙状态 + + +#### 硬盘 IO 监控 - 安装 iotop:`yum install -y iotop` - 查看命令:`iotop` @@ -341,8 +361,13 @@ Timing cached reads: 3462 MB in 2.00 seconds = 1731.24 MB/sec Timing buffered disk reads: 806 MB in 3.00 seconds = 268.52 MB/sec ``` +--------------------------------------------------------------------- + + ## 网络监控 +#### 网络监控常用 + - 安装 iftop(需要有 EPEL 源):`yum install -y iftop` - 如果没有 EPEL 源:`yum install -y epel-release` - 常用命令: @@ -352,8 +377,7 @@ Timing buffered disk reads: 806 MB in 3.00 seconds = 268.52 MB/sec - `iftop -N`:直接显示连接埠编号, 不显示服务名称 - `iftop -F 192.168.1.0/24 or 192.168.1.0/255.255.255.0`:显示某个网段进出封包流量 -## 端口使用情况 - +### 端口使用情况 #### lsof @@ -487,6 +511,7 @@ Address: 180.97.33.107 - 以上表明,不同的 DNS 情况下,我们获取到的域名所属 IP 是不同的。 +--------------------------------------------------------------------- ## 参考资料 From 375095f1e96bcce352a787525411b51e0551702a Mon Sep 17 00:00:00 2001 From: judasn Date: Sun, 24 Jun 2018 02:02:02 +0800 Subject: [PATCH 038/330] =?UTF-8?q?2018-06-23=20=E8=A1=A5=E5=85=85=20mycat?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Mycat-Install-And-Settings.md | 151 +++++++++++++++++++- 1 file changed, 150 insertions(+), 1 deletion(-) diff --git a/markdown-file/Mycat-Install-And-Settings.md b/markdown-file/Mycat-Install-And-Settings.md index 9a31f7cb..02fa114a 100644 --- a/markdown-file/Mycat-Install-And-Settings.md +++ b/markdown-file/Mycat-Install-And-Settings.md @@ -179,7 +179,9 @@ export PATH=$PATH:$MYCAT_HOME/bin - + + + @@ -594,6 +596,8 @@ INSERT INTO `adg_ads`(`ads_id`,`ads_set_id`,`ads_title`,`shop_id`,`channel_id`, ## mycat 正常启动的 log 内容 +- `tail -300f wrapper.log` + ```log 2018-02-05 14:15:41.432 INFO [WrapperSimpleAppMain] (io.mycat.backend.datasource.PhysicalDBPool.(PhysicalDBPool.java:100)) - total resouces of dataHost mysql_host_0 is :1 2018-02-05 14:15:41.435 INFO [WrapperSimpleAppMain] (io.mycat.backend.datasource.PhysicalDBPool.(PhysicalDBPool.java:100)) - total resouces of dataHost mysql_host_2 is :1 @@ -692,6 +696,151 @@ INSERT INTO `adg_ads`(`ads_id`,`ads_set_id`,`ads_title`,`shop_id`,`channel_id`, - 先编辑 /conf/schema.xml 文件,增加对应的表信息 - 把创建表 SQL 放在虚拟库上执行,则各个节点的物理库表会增加对应的表结构 +------------------------------------------------------------------------------ + +## 只垂直分库流程 + +- 垂直切分缺点 + - 如果不采用全局表那就只能通过 API 接口关联表数据(为了增加吞吐,可以考虑多线程并发执行 API 接口后整合) + - 对于访问频繁、数据大的表,性能瓶颈依旧会存在 +- 这里只是写个大体思路,基础知识上面已经说了。 +- 假设以电商系统为例,拆分出:商品库、用户库、订单库,有 3 个 MySQL 实例各自存储一个业务库 +- 1. 因为不进行水平切分,所以不需要修改 rule.xml +- 2. 修改 server.xml,增加用户和权限 +- 3. 修改 schema.xml,增加逻辑库配置 + - dataHost 配置 3 个(只有 3 个 MySQL 实例) + - dataNode 配置 3 个,分别对应:商品库(1 个)、用户库(1 个)、订单库(1 个) + - schema 配置: + +``` + + + +
+
+ + + +
+
+
+ +
+
+ +
+
+ + + +``` + +------------------------------------------------------------------------------ + +## 垂直分库基础上进行水平切分 + +- 水平分片原则 + - 能不切分是最好的,能用归档方式分开存储,分开查询的尽可能通过产品思维层面解决 + - 一般只推荐那些数据量大,并且读写频繁的表进行切分 + - 选择合适的切分规则、分片键 + - 尽可能避免跨分片 JOIN 操作 +- 水平分片的步骤 + - 选择分片键和分片算法 + - 一般分片键推荐的是查询条件基本都会带上的那个字段,或者影响面很广的字段 + - 分片键是能尽可能均匀把数据分片到各个节点 + - 没有什么可以选择的时候,推荐就是主键 + - MyCAT 配置分片节点 + - 测试分片节点 + - 业务数据迁移 + +#### 对订单相关业务进行水平切分 + +- 一般选择订单号或者所属用户 ID 进行分片,这里推荐使用所属用户 ID,因为查询订单信息基本都是从用户角度发起的 +- 1. 前面垂直分库已经修改 server.xml,这里不需要 +- 2. 修改 rule.xml,修改分片规则 + +``` + + + user_id + by-user-id-to-order + + + + + + 3 + +``` + +- 3. 修改 schema.xml,增加逻辑库配置 + - dataHost 配置 3 个(只有 3 个 MySQL 实例) + - dataNode 配置 5 个,分别对应:商品库(1 个)、用户库(1 个)、订单库(3 个) + - schema 配置,这里使用取模分片算法: + +``` + + + +
+
+ + + +
+
+
+ +
+
+ +
+ +
+ + + +``` + +------------------------------------------------------------------------------ + +## 其他常用配置 + +#### SQL 拦截(做审计,不分该 SQL 是否执行成功与否) + +- 修改 server.xml(只拦截 UPDATE,DELETE,INSERT) + +``` +io.mycat.server.interceptor.impl.StatisticsSqlInterceptor +UPDATE,DELETE,INSERT +/opt/mycat-log.txt +``` + +#### SQL 防火墙 + +- 作用 + - 限制某些用户只能通过某些主机访问(whitehost 标签) + - 屏蔽一些 SQL 语句(blacklist 标签) + +``` + + + + + + + + true + + +``` + + +------------------------------------------------------------------------------ + + + + ## 资料 - 书:《分布式数据库架构及企业实践-基于 Mycat 中间件》 From a787ac6e77625214b0ba55c227ef729339e62f20 Mon Sep 17 00:00:00 2001 From: judasn Date: Sun, 24 Jun 2018 16:05:28 +0800 Subject: [PATCH 039/330] =?UTF-8?q?2018-06-23=20=E8=A1=A5=E5=85=85=20mycat?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Mycat-Install-And-Settings.md | 379 ++++++++++++++++++++ 1 file changed, 379 insertions(+) diff --git a/markdown-file/Mycat-Install-And-Settings.md b/markdown-file/Mycat-Install-And-Settings.md index 02fa114a..3ea1ab01 100644 --- a/markdown-file/Mycat-Install-And-Settings.md +++ b/markdown-file/Mycat-Install-And-Settings.md @@ -838,7 +838,386 @@ INSERT INTO `adg_ads`(`ads_id`,`ads_set_id`,`ads_title`,`shop_id`,`channel_id`, ------------------------------------------------------------------------------ +## 高可用方案(MySQL + MyCAT + Zookeeper + HAProxy + Keepalived) +#### MySQL(3 节点) + +- 端口使用: + - 3406 + - 3407 + - 3408 + +``` +docker run -p 3406:3306 --name mycat-mysql-1 -e MYSQL_ROOT_PASSWORD=adgADG123456 -d mysql:5.7 + +docker run -p 3407:3306 --name mycat-mysql-2 -e MYSQL_ROOT_PASSWORD=adgADG123456 -d mysql:5.7 + +docker run -p 3408:3306 --name mycat-mysql-3 -e MYSQL_ROOT_PASSWORD=adgADG123456 -d mysql:5.7 +``` + + + +#### MyCAT + Zookeeper + +###### Zookeeper 单机多个实例(集群) + +- 端口使用: + - 2281 + - 2282 + - 2283 + +- 创建 docker compose 文件:`vim zookeeper.yml` +- 下面内容来自官网仓库: + +``` +version: '3.1' + +services: + zoo1: + image: zookeeper + restart: always + hostname: zoo1 + ports: + - 2281:2181 + environment: + ZOO_MY_ID: 1 + ZOO_SERVERS: server.1=0.0.0.0:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888 + + zoo2: + image: zookeeper + restart: always + hostname: zoo2 + ports: + - 2282:2181 + environment: + ZOO_MY_ID: 2 + ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=0.0.0.0:2888:3888 server.3=zoo3:2888:3888 + + zoo3: + image: zookeeper + restart: always + hostname: zoo3 + ports: + - 2283:2181 + environment: + ZOO_MY_ID: 3 + ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=0.0.0.0:2888:3888 +``` + +- 启动:`docker-compose -f zookeeper.yml -p zk_test up -d` + - 参数 -p zk_test 表示这个 compose project 的名字,等价于:`COMPOSE_PROJECT_NAME=zk_test docker-compose -f zookeeper.yml up -d` + - 不指定项目名称,Docker-Compose 默认以当前文件目录名作为应用的项目名 + - 报错是正常情况的。 +- 停止:`docker-compose -f zookeeper.yml -p zk_test stop` + + +###### MyCAT 单机多个实例 + +- 必须有 JDK 环境(我这里使用的是:1.8.0_171) + +``` +tar -zxvf Mycat-server-1.6.5-release-20180503154132-linux.tar.gz + +mv mycat mycat-1 +``` + +- `cd /usr/local/mycat-1/conf` +- `vim server.xml` + +``` + + + 123456 + + adg_system + + false + + 0 + + 0 + +``` + +- `cd /usr/local/mycat-1/conf` +- `vim schema.xml` + +``` + + + + + + + + + + +
+
+ + +
+ + + +
+
+ + + + + + + + + + + select user() + + + + + select user() + + + + + select user() + + + + + +
+``` + +- `cd /usr/local/mycat-1/conf` +- `vim rule.xml` + +``` + + + shop_id + by-shop-id + + + + + + sharding-by-shop-id.txt + 1 + 0 + +``` + + + +``` +还需要在 conf 新增文件 sharding-by-shop-id.txt 文件,内容是: +需要注意的是: +417454619141211000=0 +417454619141211001=1 +417454619141211002=2 +``` + +``` +CREATE DATABASE adg_system_0000 CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci; +CREATE DATABASE adg_system_0001 CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci; +CREATE DATABASE adg_system_0002 CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci; +``` + + +- 使用 Zookeeper 配置 + +- `vim /usr/local/mycat-1/conf/myid.properties` + +``` +loadZk=true +zkURL=192.168.0.105:2281,192.168.0.105:2282,192.168.0.105:2283 +clusterId=mycat-cluster +myid=mycat_fz_01 +clusterSize=2 +clusterNodes=mycat_fz_01,mycat_fz_02 +#server booster ; booster install on db same server,will reset all minCon to 2 +type=server +boosterDataHosts=dataHost1 +``` + + +- 同步节点配置到 Zookeeper + +``` +cd /usr/local/mycat-1/conf +cp -f schema.xml server.xml rule.xml sharding-by-shop-id.txt zkconf/ + +sh /usr/local/mycat-1/bin/init_zk_data.sh + +``` + +- 重要参数: + +``` +clusterSize=2 表示有几个 MyCAT 节点数量 +``` + +- 解压另外一个节点: + +``` +tar -zxvf Mycat-server-1.6.5-release-20180503154132-linux.tar.gz + +mv mycat mycat-2 + +因为是单机多节点,所以这里需要修改几个端口参数 +vim /usr/local/mycat-2/conf/server.xml + +旧值: +8066 +9066 + +新值: +8067 +9067 + + +vim /usr/local/mycat-2/conf/wrapper.conf + +旧值: +wrapper.java.additional.7=-Dcom.sun.management.jmxremote.port=1984 + +新值: +wrapper.java.additional.7=-Dcom.sun.management.jmxremote.port=1985 +``` + +- 修改另外一个节点配置: + +- `vim /usr/local/mycat-2/conf/myid.properties` + +``` +loadZk=true +zkURL=192.168.0.105:2281,192.168.0.105:2282,192.168.0.105:2283 +clusterId=mycat-cluster +myid=mycat_fz_02 +clusterSize=2 +clusterNodes=mycat_fz_01,mycat_fz_02 +#server booster ; booster install on db same server,will reset all minCon to 2 +type=server +boosterDataHosts=dataHost1 +``` + +- 启动节点: + +``` +cd /usr/local/mycat-1/bin +后台启动:./mycat start && tail -300f /usr/local/mycat-1/logs/mycat.log +控制台启动:./mycat console +控制台启动:cd /usr/local/mycat-1/bin && ./mycat console +重启:./mycat restart +停止:./mycat stop + +cd /usr/local/mycat-2/bin +后台启动:./mycat start && tail -300f /usr/local/mycat-2/logs/mycat.log +控制台启动:./mycat console +控制台启动:cd /usr/local/mycat-2/bin && ./mycat console +重启:./mycat restart +停止:./mycat stop +``` + +- 创建数据结构: + +``` +CREATE TABLE `adg_ads` ( + `ads_id` BIGINT(20) NOT NULL COMMENT '广告表ID', + `ads_set_id` BIGINT(20) NOT NULL COMMENT '广告组表ID', + `ads_title` VARCHAR(32) NOT NULL COMMENT '广告标题', + `shop_id` BIGINT(20) NOT NULL COMMENT '店铺ID', + `channel_id` BIGINT(20) NOT NULL COMMENT '渠道ID', + `shop_name` VARCHAR(32) NOT NULL COMMENT '店铺名称', + `channel_name` VARCHAR(32) NOT NULL COMMENT '渠道名称', + PRIMARY KEY (`ads_id`) +) ENGINE=INNODB DEFAULT CHARSET=utf8mb4 COMMENT='广告表'; + + +CREATE TABLE `adg_ads_set` ( + `ads_set_id` BIGINT(20) NOT NULL COMMENT '广告组表ID', + `ads_set_title` VARCHAR(32) NOT NULL COMMENT '广告组标题', + `ads_campaign_id` BIGINT(20) NOT NULL COMMENT '广告系列表ID', + `shop_id` BIGINT(20) NOT NULL COMMENT '店铺ID', + `channel_id` BIGINT(20) NOT NULL COMMENT '渠道ID', + `shop_name` VARCHAR(32) NOT NULL COMMENT '店铺名称', + `channel_name` VARCHAR(32) NOT NULL COMMENT '渠道名称', + PRIMARY KEY (`ads_set_id`) +) ENGINE=INNODB DEFAULT CHARSET=utf8mb4 COMMENT='广告组表'; + + +CREATE TABLE `adg_ads_campaign` ( + `ads_campaign_id` BIGINT(20) NOT NULL COMMENT '广告系列表ID', + `ads_campaign_title` VARCHAR(32) NOT NULL COMMENT '广告系列标题', + `shop_id` BIGINT(20) NOT NULL COMMENT '店铺ID', + `channel_id` BIGINT(20) NOT NULL COMMENT '渠道ID', + `shop_name` VARCHAR(32) NOT NULL COMMENT '店铺名称', + `channel_name` VARCHAR(32) NOT NULL COMMENT '渠道名称', + PRIMARY KEY (`ads_campaign_id`) +) ENGINE=INNODB DEFAULT CHARSET=utf8mb4 COMMENT='广告系列表'; + + +CREATE TABLE `adg_channel` ( + `channel_id` BIGINT(20) NOT NULL COMMENT '渠道ID', + `channel_name` VARCHAR(32) NOT NULL COMMENT '渠道名称', + PRIMARY KEY (`channel_id`) +) ENGINE=INNODB DEFAULT CHARSET=utf8mb4 COMMENT='渠道表'; + + +CREATE TABLE `adg_shop` ( + `shop_id` BIGINT(20) NOT NULL COMMENT '店铺ID', + `shop_name` VARCHAR(32) NOT NULL COMMENT '店铺名称', + PRIMARY KEY (`shop_id`) +) ENGINE=INNODB DEFAULT CHARSET=utf8mb4 COMMENT='商品表'; + + +CREATE TABLE `adg_shop_channel` ( + `shop_channel_id` BIGINT(20) NOT NULL COMMENT '店铺渠道中间表ID', + `shop_id` BIGINT(20) NOT NULL COMMENT '店铺ID', + `channel_id` BIGINT(20) NOT NULL COMMENT '渠道ID', + `shop_name` VARCHAR(32) NOT NULL COMMENT '店铺名称', + `channel_name` VARCHAR(32) NOT NULL COMMENT '渠道名称', + PRIMARY KEY (`shop_channel_id`) +) ENGINE=INNODB DEFAULT CHARSET=utf8mb4 COMMENT='店铺渠道中间表'; +``` + + + +``` +INSERT INTO `adg_shop`(`shop_id`,`shop_name`) VALUES (417454619141211000,'NC站'); +INSERT INTO `adg_shop`(`shop_id`,`shop_name`) VALUES (417454619141211001,'BG站'); + +INSERT INTO `adg_channel`(`channel_id`,`channel_name`) VALUES (1,'Facebook'); +INSERT INTO `adg_channel`(`channel_id`,`channel_name`) VALUES (2,'Google'); +INSERT INTO `adg_channel`(`channel_id`,`channel_name`) VALUES (3,'Twitter'); + +INSERT INTO `adg_shop_channel`(`shop_channel_id`,`shop_id`,`channel_id`,`shop_name`,`channel_name`) VALUES (1,417454619141211000,1,'NC站','Facebook'); +INSERT INTO `adg_shop_channel`(`shop_channel_id`,`shop_id`,`channel_id`,`shop_name`,`channel_name`) VALUES (2,417454619141211000,2,'NC站','Google'); +INSERT INTO `adg_shop_channel`(`shop_channel_id`,`shop_id`,`channel_id`,`shop_name`,`channel_name`) VALUES (3,417454619141211001,1,'BG站','Facebook'); +INSERT INTO `adg_shop_channel`(`shop_channel_id`,`shop_id`,`channel_id`,`shop_name`,`channel_name`) VALUES (4,417454619141211001,2,'BG站','Google'); + +INSERT INTO `adg_ads_campaign`(`ads_campaign_id`,`ads_campaign_title`,`shop_id`,`channel_id`,`shop_name`,`channel_name`) VALUES (1,'第1个广告系列',417454619141211000,1,'NC站','Facebook'); +INSERT INTO `adg_ads_campaign`(`ads_campaign_id`,`ads_campaign_title`,`shop_id`,`channel_id`,`shop_name`,`channel_name`) VALUES (2,'第2个广告系列',417454619141211001,2,'BG站','Google'); + +INSERT INTO `adg_ads_set`(`ads_set_id`,`ads_set_title`,`ads_campaign_id`,`shop_id`,`channel_id`,`shop_name`,`channel_name`) VALUES (1,'第1个广告集',1,417454619141211000,1,'NC站','Facebook'); +INSERT INTO `adg_ads_set`(`ads_set_id`,`ads_set_title`,`ads_campaign_id`,`shop_id`,`channel_id`,`shop_name`,`channel_name`) VALUES (2,'第2个广告集',2,417454619141211001,2,'BG站','Google'); + +INSERT INTO `adg_ads`(`ads_id`,`ads_set_id`,`ads_title`,`shop_id`,`channel_id`,`shop_name`,`channel_name`) VALUES (1,1,'第1个广告',417454619141211000,1,'NC站','Facebook'); +INSERT INTO `adg_ads`(`ads_id`,`ads_set_id`,`ads_title`,`shop_id`,`channel_id`,`shop_name`,`channel_name`) VALUES (2,2,'第2个广告',417454619141211001,2,'BG站','Google'); +``` + + +#### HAProxy + Keepalived + +``` + +``` + +------------------------------------------------------------------------------ ## 资料 From 0f273e3c2a511d31cd470bd81333e7f92e9b4139 Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 28 Jun 2018 15:51:32 +0800 Subject: [PATCH 040/330] 2018-06-28 --- .../shell/install_mysql5.6_offline_centos7.sh | 41 ++++++++++++------- markdown-file/Mysql-Install-And-Settings.md | 5 ++- .../WordPress-Install-And-Settings.md | 7 +--- 3 files changed, 32 insertions(+), 21 deletions(-) diff --git a/favorite-file/shell/install_mysql5.6_offline_centos7.sh b/favorite-file/shell/install_mysql5.6_offline_centos7.sh index 11d42b92..e030a3dc 100644 --- a/favorite-file/shell/install_mysql5.6_offline_centos7.sh +++ b/favorite-file/shell/install_mysql5.6_offline_centos7.sh @@ -2,49 +2,62 @@ echo "安装 mysql 开始" +echo "判断常见的文件夹是否存在" + +if [ ! -d "/opt/setups" ]; then + mkdir /opt/setups +fi + +echo "判断 JDK 压缩包是否存在" + +if [ ! -f "/opt/setups/mysql-5.6.35.tar.gz" ]; then + echo "mysql 压缩包不存在" + exit 1 +fi + cd /opt/setups tar zxvf mysql-5.6.35.tar.gz -mv /opt/setups/mysql-5.6.35 /usr/program/ +mv /opt/setups/mysql-5.6.35 /usr/local/ -yum install -y make gcc-c++ cmake bison-devel ncurses-devel +yum install -y make gcc-c++ cmake bison-devel ncurses-devel autoconf -cd /usr/program/mysql-5.6.35/ +cd /usr/local/mysql-5.6.35/ -mkdir -p /usr/program/mysql/data +mkdir -p /usr/local/mysql/data -cmake -DCMAKE_INSTALL_PREFIX=/usr/program/mysql -DMYSQL_DATADIR=/usr/program/mysql/data -DMYSQL_UNIX_ADDR=/tmp/mysql.sock -DDEFAULT_CHARSET=utf8 -DDEFAULT_COLLATION=utf8_general_ci -DWITH_EXTRA_CHARSETS:STRING=utf8 -DWITH_MYISAM_STORAGE_ENGINE=1 -DWITH_INNOBASE_STORAGE_ENGINE=1 -DENABLED_LOCAL_INFILE=1 +cmake -DCMAKE_INSTALL_PREFIX=/usr/local/mysql -DMYSQL_DATADIR=/usr/local/mysql/data -DMYSQL_UNIX_ADDR=/tmp/mysql.sock -DDEFAULT_CHARSET=utf8 -DDEFAULT_COLLATION=utf8_general_ci -DWITH_EXTRA_CHARSETS:STRING=utf8 -DWITH_MYISAM_STORAGE_ENGINE=1 -DWITH_INNOBASE_STORAGE_ENGINE=1 -DENABLED_LOCAL_INFILE=1 make make install -cp /usr/program/mysql-5.6.35/support-files/mysql.server /etc/init.d/mysql +cp /usr/local/mysql-5.6.35/support-files/mysql.server /etc/init.d/mysql chmod 755 /etc/init.d/mysql chkconfig mysql on -cp /usr/program/mysql-5.6.35/support-files/my-default.cnf /etc/my.cnf +cp /usr/local/mysql-5.6.35/support-files/my-default.cnf /etc/my.cnf -rm -rf /usr/program/mysql-5.6.35/ +rm -rf /usr/local/mysql-5.6.35/ groupadd mysql useradd -g mysql mysql -s /bin/false -chown -R mysql:mysql /usr/program/mysql/data +chown -R mysql:mysql /usr/local/mysql/data -/usr/program/mysql/scripts/mysql_install_db --basedir=/usr/program/mysql --datadir=/usr/program/mysql/data --skip-name-resolve --user=mysql +/usr/local/mysql/scripts/mysql_install_db --basedir=/usr/local/mysql --datadir=/usr/local/mysql/data --skip-name-resolve --user=mysql -ln -s /usr/program/mysql/bin/mysql /usr/bin +ln -s /usr/local/mysql/bin/mysql /usr/bin -ln -s /usr/program/mysql/bin/mysqladmin /usr/bin +ln -s /usr/local/mysql/bin/mysqladmin /usr/bin -ln -s /usr/program/mysql/bin/mysqldump /usr/bin +ln -s /usr/local/mysql/bin/mysqldump /usr/bin -ln -s /usr/program/mysql/bin/mysqlslap /usr/bin +ln -s /usr/local/mysql/bin/mysqlslap /usr/bin echo "防火墙放行 3306 端口" systemctl restart firewalld.service diff --git a/markdown-file/Mysql-Install-And-Settings.md b/markdown-file/Mysql-Install-And-Settings.md index 4d443f50..50174aac 100644 --- a/markdown-file/Mysql-Install-And-Settings.md +++ b/markdown-file/Mysql-Install-And-Settings.md @@ -92,7 +92,7 @@ max_allowed_packet = 50M - 进入下载目录:`cd /opt/setups` - 解压压缩包:`tar zxvf mysql-5.6.35.tar.gz` - 移到解压包:`mv /opt/setups/mysql-5.6.35 /usr/program/` - - 安装依赖包、编译包:`yum install -y make gcc-c++ cmake bison-devel ncurses-devel` + - 安装依赖包、编译包:`yum install -y make gcc-c++ cmake bison-devel ncurses-devel autoconf` - 进入解压目录:`cd /usr/program/mysql-5.6.35/` - 生成安装目录:`mkdir -p /usr/program/mysql/data` - 生成配置(使用 InnoDB):`cmake -DCMAKE_INSTALL_PREFIX=/usr/program/mysql -DMYSQL_DATADIR=/usr/program/mysql/data -DMYSQL_UNIX_ADDR=/tmp/mysql.sock -DDEFAULT_CHARSET=utf8mb4 -DDEFAULT_COLLATION=utf8mb4_unicode_ci -DWITH_EXTRA_CHARSETS:STRING=utf8mb4 -DWITH_MYISAM_STORAGE_ENGINE=1 -DWITH_INNOBASE_STORAGE_ENGINE=1 -DENABLED_LOCAL_INFILE=1` @@ -145,7 +145,8 @@ max_allowed_packet = 50M ## 修改 root 账号密码 -- 启动 Mysql 服务器:`service mysql start` +- 启动 Mysql 服务器(CentOS 6):`service mysql start` +- 启动 Mysql 服务器(CentOS 7):`systemctl start mysql` - 查看是否已经启动了:`ps aux | grep mysql` - 默认安装情况下,root 的密码是空,所以为了方便我们可以设置一个密码,假设我设置为:123456 - 终端下执行:`mysql -uroot` diff --git a/markdown-file/WordPress-Install-And-Settings.md b/markdown-file/WordPress-Install-And-Settings.md index 3eae4909..3075a7a7 100644 --- a/markdown-file/WordPress-Install-And-Settings.md +++ b/markdown-file/WordPress-Install-And-Settings.md @@ -28,12 +28,9 @@ systemctl enable httpd.service ## 安装 MySQL -#### MySQL 5.6(尽可能不用旧版本) +#### MySQL 5.6 安装和配置(如果就 1G 内存那就不要用 5.7) -``` -sudo rpm -Uvh http://dev.mysql.com/get/mysql-community-release-el7-5.noarch.rpm -yum install mysql mysql-server mysql-libs mysql-server -``` +- [MySQL 5.6](Mysql-Install-And-Settings.md) #### MySQL 5.7(推荐) From a290d08aa0c98b65cd785c542f06061516fe0d58 Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 28 Jun 2018 16:15:07 +0800 Subject: [PATCH 041/330] 2018-06-28 --- markdown-file/Mysql-Install-And-Settings.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/markdown-file/Mysql-Install-And-Settings.md b/markdown-file/Mysql-Install-And-Settings.md index 50174aac..1f9e2a19 100644 --- a/markdown-file/Mysql-Install-And-Settings.md +++ b/markdown-file/Mysql-Install-And-Settings.md @@ -151,16 +151,19 @@ max_allowed_packet = 50M - 默认安装情况下,root 的密码是空,所以为了方便我们可以设置一个密码,假设我设置为:123456 - 终端下执行:`mysql -uroot` - 现在进入了 mysql 命令行管理界面,输入:`SET PASSWORD = PASSWORD('123456');FLUSH PRIVILEGES;` + - 现在进入了 mysql 命令行管理界面,输入:`UPDATE user SET authentication_string=PASSWORD('123456') where USER='root';FLUSH PRIVILEGES;` - 修改密码后,终端下执行:`mysql -uroot -p` - 根据提示,输入密码进度 mysql 命令行状态。 - 如果你在其他机子上连接该数据库机子报:**Access denied for user 'root'@'localhost' (using password: YES)** - 解决办法: - - 在终端中执行:`service mysql stop` + - 在终端中执行(CentOS 6):`service mysql stop` + - 在终端中执行(CentOS 7):`systemctl stop mysql` - 在终端中执行(前面添加的 Linux 用户 mysql 必须有存在):`/usr/program/mysql/bin/mysqld --skip-grant-tables --user=mysql` - 此时 MySQL 服务会一直处于监听状态,你需要另起一个终端窗口来执行接下来的操作 - 在终端中执行:`mysql -u root mysql` - 把密码改为:123456,进入 MySQL 命令后执行:`UPDATE user SET Password=PASSWORD('123456') where USER='root';FLUSH PRIVILEGES;` - - 然后重启 MySQL 服务:`service mysql restart` + - 然后重启 MySQL 服务(CentOS 6):`service mysql restart` + - 然后重启 MySQL 服务(CentOS 7):`systemctl restart mysql` ## 连接报错:"Host '192.168.1.133' is not allowed to connect to this MySQL server" From 7d2f2faa1a49f2718bf1c3bdacbdf8f6be375a4a Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 28 Jun 2018 16:43:39 +0800 Subject: [PATCH 042/330] =?UTF-8?q?2018-06-28=20=E8=A1=A5=E5=85=85=20MySQL?= =?UTF-8?q?=20=E7=9B=B8=E5=85=B3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Mysql-Install-And-Settings.md | 7 +++++++ markdown-file/WordPress-Install-And-Settings.md | 8 ++++++++ 2 files changed, 15 insertions(+) diff --git a/markdown-file/Mysql-Install-And-Settings.md b/markdown-file/Mysql-Install-And-Settings.md index 1f9e2a19..9e5b91be 100644 --- a/markdown-file/Mysql-Install-And-Settings.md +++ b/markdown-file/Mysql-Install-And-Settings.md @@ -142,6 +142,13 @@ max_allowed_packet = 50M - 保留 **/etc/my.cnf** 和 **/usr/program/mysql/mysql-test/** 目录下配置文件,其他删除掉。 - 我整理的一个单机版配置说明(MySQL 5.6,适用于 1G 内存的服务器): - [my.cnf](MySQL-Settings/MySQL-5.6/1G-Memory-Machine/my-for-comprehensive.cnf) +- 其中我测试的结果,在不适用任何配置修改的情况下,1G 内存安装 MySQL 5.6 默认就会占用 400M 左右的内存,要降下来的核心配置要补上这几个参数: + +``` +performance_schema_max_table_instances=400 +table_definition_cache=400 +table_open_cache=256 +``` ## 修改 root 账号密码 diff --git a/markdown-file/WordPress-Install-And-Settings.md b/markdown-file/WordPress-Install-And-Settings.md index 3075a7a7..97a46d87 100644 --- a/markdown-file/WordPress-Install-And-Settings.md +++ b/markdown-file/WordPress-Install-And-Settings.md @@ -162,6 +162,14 @@ AllowOverride All ``` +- 重启 Apache + +``` +systemctl restart httpd.service +systemctl enable httpd.service +``` + + ## 创建数据库 - SQL 语句:`CREATE DATABASE wordpress DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;` From 4127b725a8eadba323e6129ec6e43975183dff10 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 2 Jul 2018 11:36:44 +0800 Subject: [PATCH 043/330] =?UTF-8?q?2018-07-02=20=E8=A1=A5=E5=85=85?= =?UTF-8?q?=E7=94=A8=E6=88=B7=E7=9B=B8=E5=85=B3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Bash.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/markdown-file/Bash.md b/markdown-file/Bash.md index 477101d2..7c3bb9e8 100644 --- a/markdown-file/Bash.md +++ b/markdown-file/Bash.md @@ -173,7 +173,10 @@ drwxr-xr-x. 5 root root 4096 3月 26 10:57,其中最前面的 d 表示这是 - `exit`,注销当前用户(常用) - `sudo 某个命令`,使用管理员权限使用命令,使用 sudo 回车之后需要输入当前登录账号的密码。(常用) - `passwd`,修改当前用户密码(常用) - +- 添加临时账号,并指定用户根目录,并只有可读权限方法 + - 添加账号并指定根目录(用户名 tempuser):`useradd -d /data/logs -m tempuser` + - 设置密码:`passwd tempuser` 回车设置密码 + - 删除用户(该用户必须退出 SSH 才能删除成功),也会同时删除组:`userdel tempuser` ## 磁盘管理 From 1061e2452f5eb36679fb1fe3693968f917dcc83c Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 2 Jul 2018 19:32:40 +0800 Subject: [PATCH 044/330] =?UTF-8?q?2018-07-02=20=E8=A1=A5=E5=85=85=20GoAcc?= =?UTF-8?q?ess?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 1 + SUMMARY.md | 3 +- TOC.md | 3 +- .../GoAccess-Install-And-Settings.md | 160 ++++++++++++++++++ 4 files changed, 165 insertions(+), 2 deletions(-) create mode 100644 markdown-file/GoAccess-Install-And-Settings.md diff --git a/README.md b/README.md index b7395ef5..13f23112 100644 --- a/README.md +++ b/README.md @@ -89,6 +89,7 @@ - [Kafka 安装和配置](markdown-file/Kafka-Install-And-Settings.md) - [Showdoc 安装和配置](markdown-file/Showdoc-Install-And-Settings.md) - [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) +- [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) ## 联系(Contact) diff --git a/SUMMARY.md b/SUMMARY.md index 787d9e4a..01117a82 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -77,4 +77,5 @@ * [YApi 安装和配置](markdown-file/YApi-Install-And-Settings.md) * [Kafka 安装和配置](markdown-file/Kafka-Install-And-Settings.md) * [Showdoc 安装和配置](markdown-file/Showdoc-Install-And-Settings.md) -* [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) \ No newline at end of file +* [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) +* [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) \ No newline at end of file diff --git a/TOC.md b/TOC.md index 3c93a7ef..3031dca3 100644 --- a/TOC.md +++ b/TOC.md @@ -74,4 +74,5 @@ - [YApi 安装和配置](markdown-file/YApi-Install-And-Settings.md) - [Kafka 安装和配置](markdown-file/Kafka-Install-And-Settings.md) - [Showdoc 安装和配置](markdown-file/Showdoc-Install-And-Settings.md) -- [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) \ No newline at end of file +- [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) +- [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) \ No newline at end of file diff --git a/markdown-file/GoAccess-Install-And-Settings.md b/markdown-file/GoAccess-Install-And-Settings.md new file mode 100644 index 00000000..5804718e --- /dev/null +++ b/markdown-file/GoAccess-Install-And-Settings.md @@ -0,0 +1,160 @@ +# GoAccess 安装和配置 + +## 官网资料 + +- 一般用于 Apache, Nginx 的 Log 分析 +- 官网: +- 官网下载(201807 最新版本 1.2): +- 官网 Github: +- 国内中文站: + + +## 安装(CentOS 7.4) + +1. 安装依赖包 + +``` +yum install -y ncurses-devel +wget http://geolite.maxmind.com/download/geoip/api/c/GeoIP.tar.gz +tar -zxvf GeoIP.tar.gz +cd GeoIP-1.4.8/ +./configure +make && make install +``` + +2. 安装 GoAccess + +``` +wget http://tar.goaccess.io/goaccess-1.2.tar.gz +tar -xzvf goaccess-1.2.tar.gz +cd goaccess-1.2/ +./configure --enable-utf8 --enable-geoip=legacy +make && make install +``` + +## 配置 + +- 假设你 nginx 安装在:`/usr/local/nginx` +- 假设你 nginx 的 log 输出到:`/var/log/nginx` +- 修改 `vim /usr/local/nginx/conf/nginx.conf` 指定 nginx 的日志格式 + +``` + +http { + charset utf8; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for" "$request_time"'; + + access_log /var/log/nginx/access.log main; + error_log /var/log/nginx/error.log; +} +``` + +- 停止 nginx:`/usr/local/nginx/sbin/nginx -s stop` +- 备份旧的 nginx log 文件:`mv /var/log/nginx/access.log /var/log/nginx/access.log.20180702back` +- 启动 nginx:`/usr/local/nginx/sbin/nginx` +- 创建 GoAccess 配置文件:`vim /etc/goaccess_log_conf_nginx.conf` + +``` +time-format %T +date-format %d/%b/%Y +log_format %h - %^ [%d:%t %^] "%r" %s %b "%R" "%u" "%^" %^ %^ %^ %T +``` + + +## 使用 + +#### 手动生成当前统计页面 + +``` +goaccess -f /var/log/nginx/access.log -p /etc/goaccess_log_conf_nginx.conf -o /usr/local/nginx/report/index.html +``` + +- 更多参数用法: + +``` +时间分布图上:按小时展示数据: +goaccess -f /var/log/nginx/access.log -p /etc/goaccess_log_conf_nginx.conf -o /usr/local/nginx/report/index.html --ignore-crawlers --hour-spec=min + + +时间分布图上:按分钟展示数据: +goaccess -f /var/log/nginx/access.log -p /etc/goaccess_log_conf_nginx.conf -o /usr/local/nginx/report/index.html --ignore-crawlers --hour-spec=hour + + +不显示指定的面板 +goaccess -f /var/log/nginx/access.log -p /etc/goaccess_log_conf_nginx.conf -o /usr/local/nginx/report/index.html --ignore-crawlers --hour-spec=min \ + --ignore-panel=VISITORS \ + --ignore-panel=REQUESTS \ + --ignore-panel=REQUESTS_STATIC \ + --ignore-panel=NOT_FOUND \ + --ignore-panel=HOSTS \ + --ignore-panel=OS \ + --ignore-panel=BROWSERS \ + --ignore-panel=VIRTUAL_HOSTS \ + --ignore-panel=REFERRERS \ + --ignore-panel=REFERRING_SITES \ + --ignore-panel=KEYPHRASES \ + --ignore-panel=STATUS_CODES \ + --ignore-panel=REMOTE_USER \ + --ignore-panel=GEO_LOCATION + +我一般只留下几个面板(排除掉不想看的面板,因为使用 --enable-panel 参数无法达到这个目的) +goaccess -f /var/log/nginx/access.log -p /etc/goaccess_log_conf_nginx.conf -o /usr/local/nginx/report/index.html --ignore-crawlers --hour-spec=min \ + --ignore-panel=VISITORS \ + --ignore-panel=REQUESTS_STATIC \ + --ignore-panel=NOT_FOUND \ + --ignore-panel=OS \ + --ignore-panel=VIRTUAL_HOSTS \ + --ignore-panel=REFERRERS \ + --ignore-panel=KEYPHRASES \ + --ignore-panel=REMOTE_USER \ + --ignore-panel=GEO_LOCATION +``` + +#### 方便执行命令创建脚本 + +- `vim goaccess_report_by_min.sh` + +``` +goaccess -f /var/log/nginx/access.log -p /etc/goaccess_log_conf_nginx.conf -o /usr/local/nginx/report/index.html --ignore-crawlers --hour-spec=min \ + --ignore-panel=VISITORS \ + --ignore-panel=REQUESTS_STATIC \ + --ignore-panel=NOT_FOUND \ + --ignore-panel=OS \ + --ignore-panel=VIRTUAL_HOSTS \ + --ignore-panel=REFERRERS \ + --ignore-panel=KEYPHRASES \ + --ignore-panel=REMOTE_USER \ + --ignore-panel=GEO_LOCATION +``` + +- `vim goaccess_report_by_hour.sh` + +``` +goaccess -f /var/log/nginx/access.log -p /etc/goaccess_log_conf_nginx.conf -o /usr/local/nginx/report/index.html --ignore-crawlers --hour-spec=hour \ + --ignore-panel=VISITORS \ + --ignore-panel=REQUESTS_STATIC \ + --ignore-panel=NOT_FOUND \ + --ignore-panel=OS \ + --ignore-panel=VIRTUAL_HOSTS \ + --ignore-panel=REFERRERS \ + --ignore-panel=KEYPHRASES \ + --ignore-panel=REMOTE_USER \ + --ignore-panel=GEO_LOCATION +``` + +#### 实时生成统计页面 + +- 我个人看法是:一般没必要浪费这个性能,需要的时候执行下脚本就行了。 +- 官网文档:,查询关键字:**REAL TIME HTML OUTPUT** + +``` +goaccess -f /var/log/nginx/access.log -p /etc/goaccess_log_conf_nginx.conf -o /usr/local/nginx/report/index.html --real-time-html --daemonize +``` + +## 资料 + +- +- From f061bb7b342f52bef9bc8bda28d6019aa8f317c4 Mon Sep 17 00:00:00 2001 From: "Judas.n" Date: Mon, 2 Jul 2018 23:30:15 +0800 Subject: [PATCH 045/330] Update GoAccess-Install-And-Settings.md --- markdown-file/GoAccess-Install-And-Settings.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/markdown-file/GoAccess-Install-And-Settings.md b/markdown-file/GoAccess-Install-And-Settings.md index 5804718e..0d45fb8a 100644 --- a/markdown-file/GoAccess-Install-And-Settings.md +++ b/markdown-file/GoAccess-Install-And-Settings.md @@ -11,7 +11,9 @@ ## 安装(CentOS 7.4) -1. 安装依赖包 +- 注意,如果是在 CentOS 6 下安装会碰到一些问题,可以参考: + +- 1. 安装依赖包 ``` yum install -y ncurses-devel @@ -22,7 +24,7 @@ cd GeoIP-1.4.8/ make && make install ``` -2. 安装 GoAccess +- 2. 安装 GoAccess ``` wget http://tar.goaccess.io/goaccess-1.2.tar.gz From 4f100c574e2fbbc4e4ad913d3c3910341b908532 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 4 Jul 2018 16:19:45 +0800 Subject: [PATCH 046/330] =?UTF-8?q?2018-07-04=20=E8=A1=A5=E5=85=85=20GoAcc?= =?UTF-8?q?ess?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/GoAccess-Install-And-Settings.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/markdown-file/GoAccess-Install-And-Settings.md b/markdown-file/GoAccess-Install-And-Settings.md index 0d45fb8a..26e0f920 100644 --- a/markdown-file/GoAccess-Install-And-Settings.md +++ b/markdown-file/GoAccess-Install-And-Settings.md @@ -68,6 +68,13 @@ log_format %h - %^ [%d:%t %^] "%r" %s %b "%R" "%u" "%^" %^ %^ %^ %T ## 使用 +#### 在终端上展示数据 + +``` +goaccess -a -d -f /var/log/nginx/access.log -p /etc/goaccess_log_conf_nginx.conf +``` + + #### 手动生成当前统计页面 ``` From 8ca6600cd37270cdedbab01300fb3dddf4c1a650 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 6 Jul 2018 16:55:07 +0800 Subject: [PATCH 047/330] =?UTF-8?q?2018-07-06=20=E8=A1=A5=E5=85=85=20Porta?= =?UTF-8?q?iner?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 1 + SUMMARY.md | 3 +- TOC.md | 3 +- .../Portainer-Install-And-Settings.md | 41 +++++++++++++++++++ 4 files changed, 46 insertions(+), 2 deletions(-) create mode 100644 markdown-file/Portainer-Install-And-Settings.md diff --git a/README.md b/README.md index 13f23112..cdb6e19d 100644 --- a/README.md +++ b/README.md @@ -90,6 +90,7 @@ - [Showdoc 安装和配置](markdown-file/Showdoc-Install-And-Settings.md) - [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) - [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) +- [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) ## 联系(Contact) diff --git a/SUMMARY.md b/SUMMARY.md index 01117a82..1abff5bf 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -78,4 +78,5 @@ * [Kafka 安装和配置](markdown-file/Kafka-Install-And-Settings.md) * [Showdoc 安装和配置](markdown-file/Showdoc-Install-And-Settings.md) * [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) -* [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) \ No newline at end of file +* [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) +* [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) \ No newline at end of file diff --git a/TOC.md b/TOC.md index 3031dca3..e97c0361 100644 --- a/TOC.md +++ b/TOC.md @@ -75,4 +75,5 @@ - [Kafka 安装和配置](markdown-file/Kafka-Install-And-Settings.md) - [Showdoc 安装和配置](markdown-file/Showdoc-Install-And-Settings.md) - [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) -- [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) \ No newline at end of file +- [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) +- [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) \ No newline at end of file diff --git a/markdown-file/Portainer-Install-And-Settings.md b/markdown-file/Portainer-Install-And-Settings.md new file mode 100644 index 00000000..9aaa85a7 --- /dev/null +++ b/markdown-file/Portainer-Install-And-Settings.md @@ -0,0 +1,41 @@ +# Portainer 安装和配置、优化 + +## 介绍 + +- 官网: +- 官网 GitHub: +- 官网文档: + +## 安装 + +- 创建文件:`vim docker-compose.yml` + +``` +version: '3' +services: + portainer: + container_name: portainer + image: portainer/portainer + volumes: + - /data/portainer:/data + - /var/run/docker.sock:/var/run/docker.sock + ports: + - "9000:9000" +``` + + + +## 对本地监控配置 + +- 因为 Portainer 镜像构建的时候已经配置了:`/var/run/docker.sock:/var/run/docker.sock`,所以对于跟 Portainer 同一台机子的其他容器都可以被直接监控 +- 浏览器访问访问:`http://192.168.1.2:9000` + +## 远程监控配置 + +- 待完善 + +## 资料 + +- [Portainer 容器管理](https://blog.mallux.me/2017/04/13/portainer/) + + From 172ef99e675e09b24ed1b1b34e57bb8925a868d8 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 6 Jul 2018 17:01:32 +0800 Subject: [PATCH 048/330] =?UTF-8?q?2018-07-06=20=E8=A1=A5=E5=85=85=20Porta?= =?UTF-8?q?iner?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Portainer-Install-And-Settings.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/markdown-file/Portainer-Install-And-Settings.md b/markdown-file/Portainer-Install-And-Settings.md index 9aaa85a7..2788b7a8 100644 --- a/markdown-file/Portainer-Install-And-Settings.md +++ b/markdown-file/Portainer-Install-And-Settings.md @@ -8,6 +8,8 @@ ## 安装 +- 创建文件夹:`mkdir -p /data/docker/portainer` +- 赋权:`chmod -R 777 /data/docker/portainer` - 创建文件:`vim docker-compose.yml` ``` @@ -17,18 +19,20 @@ services: container_name: portainer image: portainer/portainer volumes: - - /data/portainer:/data + - /data/docker/portainer:/data - /var/run/docker.sock:/var/run/docker.sock ports: - "9000:9000" ``` +- 启动:`docker-compose up -d` ## 对本地监控配置 - 因为 Portainer 镜像构建的时候已经配置了:`/var/run/docker.sock:/var/run/docker.sock`,所以对于跟 Portainer 同一台机子的其他容器都可以被直接监控 - 浏览器访问访问:`http://192.168.1.2:9000` +- 第一次启动会让你创建用户名和密码。第二步就是配置管理哪里的 docker 容器,我这里选择:local ## 远程监控配置 From bccdbd123df8f90cdd833e99a12f6063d0562f66 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 6 Jul 2018 17:03:10 +0800 Subject: [PATCH 049/330] =?UTF-8?q?2018-07-06=20=E8=A1=A5=E5=85=85=20Porta?= =?UTF-8?q?iner?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Portainer-Install-And-Settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/markdown-file/Portainer-Install-And-Settings.md b/markdown-file/Portainer-Install-And-Settings.md index 2788b7a8..308efe4b 100644 --- a/markdown-file/Portainer-Install-And-Settings.md +++ b/markdown-file/Portainer-Install-And-Settings.md @@ -26,7 +26,7 @@ services: ``` - 启动:`docker-compose up -d` - +- 该容器占用内存非常非常小,只有 5 M 左右。 ## 对本地监控配置 From 18fc87e15af96600736acce8bb34da58df1dc85d Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 9 Jul 2018 10:31:21 +0800 Subject: [PATCH 050/330] =?UTF-8?q?2018-07-09=20=E8=A1=A5=E5=85=85=20Cront?= =?UTF-8?q?ab?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Crontab.md | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/markdown-file/Crontab.md b/markdown-file/Crontab.md index 23c734d4..eed7f1c7 100644 --- a/markdown-file/Crontab.md +++ b/markdown-file/Crontab.md @@ -8,13 +8,20 @@ - Ubuntu:`dpkg -l | grep cron` - 安装(一般系统是集成的): - - CentOS 6:`sudo yum install -y vixie-cron crontabs` + - CentOS 6 / 7:`sudo yum install -y vixie-cron crontabs` - Ubuntu:`sudo apt-get install -y cron` - 服务常用命令 - - service crond start //启动服务 - - service crond stop //关闭服务 - - service crond restart //重启服务 + - CentOS 6 + - `service crond start` 启动服务 + - `service crond stop` 关闭服务 + - `service crond restart` 重启服务 + - CentOS 7 + - `systemctl start crond` 启动服务 + - `systemctl restart crond` 重新启动服务 + - `systemctl status crond` 加入自启动 + - `systemctl stop crond` 关闭服务 + ## Crontab 服务器配置文件常用参数 @@ -28,6 +35,7 @@ - `45 4 1-10 * * service httpd restart` #每月的 1 到 10 日的 4:45 重启 apache - `*/2 * * * * service httpd restart` #每隔两分钟重启 apache - `1-59/2 * * * * service httpd restart` #每隔两分钟重启 apache(这个比较特殊:1-59/2 这个表示过掉0分,从 1 分开始算,每隔两分执行,所以 1 分执行了,3 分执行了,5 分执行了....都是奇数进行执行。默认的 */2 都是偶数执行。) + - `* */2 * * * service httpd restart` #每隔两小时重启 apache - `0 23-7/2 * * * service httpd restart` #晚上 11 点到早上 7 点之间,每隔 2 个小时重启 apache - `0-59/30 18-23 * * * service httpd restart` #每天 18:00 到 23:00 之间,每隔 30 分钟重启 apache(方法一) - `0,30 18-23 * * * service httpd restart` #每天 18:00 到 23:00 之间,每隔 30 分钟重启 apache(方法二) From 827e63116d3f8b62b51ebd2bb17cab04b590a555 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 9 Jul 2018 14:22:41 +0800 Subject: [PATCH 051/330] =?UTF-8?q?2018-07-09=20=E8=A1=A5=E5=85=85=20Cront?= =?UTF-8?q?ab?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Crontab.md | 1 + 1 file changed, 1 insertion(+) diff --git a/markdown-file/Crontab.md b/markdown-file/Crontab.md index eed7f1c7..2bb88205 100644 --- a/markdown-file/Crontab.md +++ b/markdown-file/Crontab.md @@ -31,6 +31,7 @@ - ![Crontab 服务器配置文件常用参数](../images/Crontab-a-1.jpg) - 常用例子介绍: - `30 21 * * * service httpd restart` #每晚的 21:30 重启 apache + - `30 21 * * 6,0 service httpd restart` #每周六、周日的 21:30 重启 apache - `45 4 1,10,22 * * service httpd restart` #每月的 1、10、22 日的 4:45 重启 apache - `45 4 1-10 * * service httpd restart` #每月的 1 到 10 日的 4:45 重启 apache - `*/2 * * * * service httpd restart` #每隔两分钟重启 apache From 1b0204d2ffe5f7182e661afcdf4e6f5c60023b76 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 10 Jul 2018 09:19:56 +0800 Subject: [PATCH 052/330] =?UTF-8?q?2018-07-10=20=E8=A1=A5=E5=85=85?= =?UTF-8?q?=E5=B0=8F=E5=86=85=E5=AD=98=E7=9A=84=20MySQL=20=E9=85=8D?= =?UTF-8?q?=E7=BD=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Mysql-Install-And-Settings.md | 33 +++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/markdown-file/Mysql-Install-And-Settings.md b/markdown-file/Mysql-Install-And-Settings.md index 9e5b91be..4ab03b28 100644 --- a/markdown-file/Mysql-Install-And-Settings.md +++ b/markdown-file/Mysql-Install-And-Settings.md @@ -196,6 +196,39 @@ set password = password('新密码'); FLUSH PRIVILEGES; ``` +## 小内存机子,MySQL 频繁挂掉解决办法(1G + CentOS 7.4) + +- 保存系统日志到本地进行查看:`cd /var/log/ && sz messages` +- 其中可以看到这样的几句话(可以知道内存不够了): + +``` +Jul 6 21:49:14 VM_123_201_centos kernel: Out of memory: Kill process 19452 (httpd) score 36 or sacrifice child +Jul 6 21:49:14 VM_123_201_centos kernel: Killed process 19452 (httpd) total-vm:516404kB, anon-rss:36088kB, file-rss:168kB, shmem-rss:12kB +``` + +- 对于 1G 的内存 MySQL(5.6.35),建议重点下面配置: + +``` +[mysqld] +table_definition_cache=400 +table_open_cache=256 +innodb_buffer_pool_size = 64M +max_connections = 100 +``` + +- 增加 swap(云服务基本都是没 swap 的) +- 分别执行下面 shell 命令: + +``` +dd if=/dev/zero of=/swapfile bs=1M count=1024 +mkswap /swapfile +swapon /swapfile +``` + +- 修改配置文件:`vim /etc/fstab` + - 添加这句在文件最后一行:`/swapfile swap swap defauluts 0 0` +- 重启机子:`reboot` + ## MySQL 主从复制 ### 环境说明和注意点 From 63d5c23872c6e09708bb15828e22948a96f6c2a4 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 11 Jul 2018 14:07:07 +0800 Subject: [PATCH 053/330] =?UTF-8?q?2018-07-11=20=E8=A1=A5=E5=85=85=20nginx?= =?UTF-8?q?=20log=20=E5=88=87=E5=88=86?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Nginx-Install-And-Settings.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/markdown-file/Nginx-Install-And-Settings.md b/markdown-file/Nginx-Install-And-Settings.md index 9fe3ebcf..58a04b8c 100644 --- a/markdown-file/Nginx-Install-And-Settings.md +++ b/markdown-file/Nginx-Install-And-Settings.md @@ -684,11 +684,12 @@ location ~ .*$ { ### Nginx 自动分割日志文件 - 在 [Tomcat 安装和配置、优化](Tomcat-Install-And-Settings.md) 文章已经使用了 cronolog,这里也借用 cronolog 来实现分割。具体安装看文章。 +- 创建目录:`mkdir -p /data/nginx/log/logs` - 创建命名管道:`mkfifo /data/nginx/log/access_log.log` - 配置 cronolog(按天):`nohup cat /data/nginx/log/access_log.log | /usr/sbin/cronolog /data/nginx/log/logs/access-%Y-%m-%d.log &` - 配置 cronolog(按月):`nohup cat /data/nginx/log/access_log.log | /usr/sbin/cronolog /data/nginx/log/logs/access-%Y-%m.log &` - 编辑 nginx 配置文件,配置 log 位置:`access_log /data/nginx/log/access_log.log;` -- 重启 nginx +- 重启 nginx,最终可以在 /data/nginx/log/logs 目录下看到生成的 log ### Nginx 处理跨域请求 From 1db8117b1e76c5ac922fbe35814e0b38616d02b0 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 11 Jul 2018 22:32:37 +0800 Subject: [PATCH 054/330] 2018-07-11 --- markdown-file/Portainer-Install-And-Settings.md | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/markdown-file/Portainer-Install-And-Settings.md b/markdown-file/Portainer-Install-And-Settings.md index 308efe4b..70eef2c8 100644 --- a/markdown-file/Portainer-Install-And-Settings.md +++ b/markdown-file/Portainer-Install-And-Settings.md @@ -36,7 +36,20 @@ services: ## 远程监控配置 -- 待完善 +- **以下方法为了方便,没有做任何安全措施,请用于内网** +- 关掉防火墙 +- 修改远程 Docker 配置:`vim /usr/lib/systemd/system/docker.service` + +``` +旧值: +ExecStart=/usr/bin/dockerd + +新值: +ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2375 -H unix://var/run/docker.sock +``` + +- 重启 Docker:`systemctl daemon-reload && systemctl reload docker && systemctl restart docker` +- Portainer 启动选择 Remote,填写远程 IP 和端口,比如:`192.168.1.3:2375` ## 资料 From 81325b9f5c344a1855f613f0770cd3041815d476 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 13 Jul 2018 17:13:40 +0800 Subject: [PATCH 055/330] =?UTF-8?q?2018-07-13=20=E4=BF=AE=E6=94=B9?= =?UTF-8?q?=E8=84=9A=E6=9C=AC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- favorite-file/shell/install_redis_centos7.sh | 12 ++++-------- favorite-file/shell/install_tomcat_centos7.sh | 12 ++++-------- 2 files changed, 8 insertions(+), 16 deletions(-) diff --git a/favorite-file/shell/install_redis_centos7.sh b/favorite-file/shell/install_redis_centos7.sh index c87bb527..ec7246dd 100644 --- a/favorite-file/shell/install_redis_centos7.sh +++ b/favorite-file/shell/install_redis_centos7.sh @@ -7,10 +7,6 @@ if [ ! -d "/opt/setups" ]; then mkdir /opt/setups fi -if [ ! -d "/usr/program" ]; then - mkdir /usr/program -fi - echo "下载 redis" cd /opt/setups @@ -37,15 +33,15 @@ if [ ! -d "/opt/setups/redis-4.0.6" ]; then exit 1 fi -mv redis-4.0.6/ /usr/program/ +mv redis-4.0.6/ /usr/local/ -cd /usr/program/redis-4.0.6 +cd /usr/local/redis-4.0.6 make make install -cp /usr/program/redis-4.0.6/redis.conf /etc/ +cp /usr/local/redis-4.0.6/redis.conf /etc/ sed -i 's/daemonize no/daemonize yes/g' /etc/redis.conf @@ -55,6 +51,6 @@ echo "防火墙放行 6379 端口" firewall-cmd --zone=public --add-port=6379/tcp --permanent firewall-cmd --reload -rm -rf /usr/program/redis-4.0.6 +rm -rf /usr/local/redis-4.0.6 echo "安装结束" diff --git a/favorite-file/shell/install_tomcat_centos7.sh b/favorite-file/shell/install_tomcat_centos7.sh index fa7b50b8..c85aad61 100644 --- a/favorite-file/shell/install_tomcat_centos7.sh +++ b/favorite-file/shell/install_tomcat_centos7.sh @@ -6,10 +6,6 @@ if [ ! -d "/opt/setups" ]; then mkdir /opt/setups fi -if [ ! -d "/usr/program" ]; then - mkdir /usr/program -fi - echo "下载 Tomcat" cd /opt/setups @@ -28,13 +24,13 @@ if [ ! -d "/opt/setups/apache-tomcat-8.0.46" ]; then exit 1 fi -echo "Tomcat 解压包移到 /usr/program/ 目录下" -mv apache-tomcat-8.0.46/ /usr/program/ -mv /usr/program/apache-tomcat-8.0.46/ /usr/program/tomcat8/ +echo "Tomcat 解压包移到 /usr/local/ 目录下" +mv apache-tomcat-8.0.46/ /usr/local/ +mv /usr/local/apache-tomcat-8.0.46/ /usr/local/tomcat8/ echo "防火墙放行 8080 端口" firewall-cmd --zone=public --add-port=8080/tcp --permanent firewall-cmd --reload echo "运行 Tomcat" -sh /usr/program/tomcat8/bin/startup.sh ; tail -200f /usr/program/tomcat8/logs/catalina.out \ No newline at end of file +sh /usr/local/tomcat8/bin/startup.sh ; tail -200f /usr/local/tomcat8/logs/catalina.out \ No newline at end of file From 9452923a88fae71d1631ae61dc120b77c415c50a Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 13 Jul 2018 17:39:48 +0800 Subject: [PATCH 056/330] =?UTF-8?q?2018-07-13=20=E4=BF=AE=E6=94=B9?= =?UTF-8?q?=E8=84=9A=E6=9C=AC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../shell/install_tomcat_offline_centos7.sh | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 favorite-file/shell/install_tomcat_offline_centos7.sh diff --git a/favorite-file/shell/install_tomcat_offline_centos7.sh b/favorite-file/shell/install_tomcat_offline_centos7.sh new file mode 100644 index 00000000..563756b6 --- /dev/null +++ b/favorite-file/shell/install_tomcat_offline_centos7.sh @@ -0,0 +1,38 @@ +#!/bin/sh + +echo "判断常见的文件夹是否存在" + +if [ ! -d "/opt/setups" ]; then + mkdir /opt/setups +fi + + +echo "判断 tomcat 压缩包是否存在" + +if [ ! -f "/opt/setups/apache-tomcat-8.0.46.tar.gz" ]; then + echo "JDK 压缩包不存在" + exit 1 +fi + + +cd /opt/setups + +echo "开始解压 Tomcat" + +tar -zxf apache-tomcat-8.0.46.tar.gz + +if [ ! -d "/opt/setups/apache-tomcat-8.0.46" ]; then + echo "Tomcat 解压失败,结束脚本" + exit 1 +fi + +echo "Tomcat 解压包移到 /usr/local/ 目录下" +mv apache-tomcat-8.0.46/ /usr/local/ +mv /usr/local/apache-tomcat-8.0.46/ /usr/local/tomcat8/ + +echo "防火墙放行 8080 端口" +firewall-cmd --zone=public --add-port=8080/tcp --permanent +firewall-cmd --reload + +echo "运行 Tomcat" +sh /usr/local/tomcat8/bin/startup.sh ; tail -200f /usr/local/tomcat8/logs/catalina.out \ No newline at end of file From 0af47541f9181ecf453776ac22a2f626b63a5dcd Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 17 Jul 2018 11:23:59 +0800 Subject: [PATCH 057/330] =?UTF-8?q?2018-07-17=20=E8=A1=A5=E5=85=85=20dmesg?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/monitor.md | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index da7cf05e..c6c6a95d 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -33,6 +33,11 @@ root pts/0 116.21.24.85 11:57 4.00s 16:18 0.01s w - 查看 CPU 总的线程数:`grep 'processor' /proc/cpuinfo | sort -u | wc -l` - 第二行: - 开始表示各个登录用户的情况,当前登录者是 root,登录者 IP 116.21.24.85 +- 还有一个简化版本的命令:`uptime` + +``` +10:56:16 up 26 days, 20:05, 1 user, load average: 0.00, 0.01, 0.05 +``` #### 命令:vmstat(判断 RAM 和 I/0 瓶颈) @@ -513,6 +518,24 @@ Address: 180.97.33.107 --------------------------------------------------------------------- +## 查看 Linux 内核版本 + +- 对于一些复杂的层面问题,一般都要先确认内核版本,好帮助分析:`uname -r` + +``` +3.10.0-693.2.2.el7.x86_64 +``` + + +## dmesg 打印内核信息 + +- 开机信息存在:`tail -500f /var/log/dmesg` +- 查看尾部信息:`dmesg -T | tail` + - 参数 `-T` 表示显示时间 +- 只显示 error 和 warning 信息:`dmesg --level=err,warn -T` + +--------------------------------------------------------------------- + ## 参考资料 - From 6d72a5cadf188a9384fdf632d4c74b368e703756 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 17 Jul 2018 12:09:23 +0800 Subject: [PATCH 058/330] =?UTF-8?q?2018-07-17=20=E8=A1=A5=E5=85=85=20dmesg?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/monitor.md | 178 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 176 insertions(+), 2 deletions(-) diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index c6c6a95d..573d38cd 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -183,6 +183,59 @@ Average: 0.50 0.00 0.50 0.00 8.94 - `txcmp/s`:每秒钟发送出去的压缩包数目 - `txmcst/s`:每秒钟接收到的多播包的包数目 +- 查看 TCP 相关的一些数据(每隔 1 秒采样一次,一共 5 次):`sar -n TCP,ETCP 1 5` + +``` +Linux 3.10.0-693.2.2.el7.x86_64 (youmeek) 07/17/2018 _x86_64_ (2 CPU) + +12:05:47 PM active/s passive/s iseg/s oseg/s +12:05:48 PM 0.00 0.00 1.00 0.00 + +12:05:47 PM atmptf/s estres/s retrans/s isegerr/s orsts/s +12:05:48 PM 0.00 0.00 0.00 0.00 0.00 + +12:05:48 PM active/s passive/s iseg/s oseg/s +12:05:49 PM 0.00 0.00 1.00 1.00 + +12:05:48 PM atmptf/s estres/s retrans/s isegerr/s orsts/s +12:05:49 PM 0.00 0.00 0.00 0.00 0.00 + +12:05:49 PM active/s passive/s iseg/s oseg/s +12:05:50 PM 0.00 0.00 1.00 1.00 + +12:05:49 PM atmptf/s estres/s retrans/s isegerr/s orsts/s +12:05:50 PM 0.00 0.00 0.00 0.00 0.00 + +12:05:50 PM active/s passive/s iseg/s oseg/s +12:05:51 PM 0.00 0.00 3.00 3.00 + +12:05:50 PM atmptf/s estres/s retrans/s isegerr/s orsts/s +12:05:51 PM 0.00 0.00 0.00 0.00 0.00 + +12:05:51 PM active/s passive/s iseg/s oseg/s +12:05:52 PM 0.00 0.00 1.00 1.00 + +12:05:51 PM atmptf/s estres/s retrans/s isegerr/s orsts/s +12:05:52 PM 0.00 0.00 0.00 0.00 0.00 + +Average: active/s passive/s iseg/s oseg/s +Average: 0.00 0.00 1.40 1.20 + +Average: atmptf/s estres/s retrans/s isegerr/s orsts/s +Average: 0.00 0.00 0.00 0.00 0.00 +``` + + +``` +- active/s:每秒钟本地主动开启的 tcp 连接,也就是本地程序使用 connect() 系统调用 +- passive/s:每秒钟从源端发起的 tcp 连接,也就是本地程序使用 accept() 所接受的连接 +- retrans/s: 每秒钟的 tcp 重传次数 + +atctive 和 passive 的数目通常可以用来衡量服务器的负载:接受连接的个数(passive),下游连接的个数(active)。可以简单认为 active 为出主机的连接,passive 为入主机的连接;但这个不是很严格的说法,比如 loalhost 和 localhost 之间的连接。 + +来自:https://zhuanlan.zhihu.com/p/39893236 +``` + --------------------------------------------------------------------- ## CPU 监控 @@ -222,6 +275,7 @@ Average: 0.50 0.00 0.50 0.00 8.94 - [htop 命令详解](https://blog.csdn.net/freeking101/article/details/79173903) - mpstat 实时监控 CPU 状态:`yum install -y sysstat` - 可以具体到某个核心,比如我有 2 核的 CPU,因为 CPU 核心下标是从 0 开始,所以我要查看 0 的状况(间隔 3 秒获取一次指标,一共获取 5 次):`mpstat -P 0 3 5` + - 打印总 CPU 和各个核心指标:`mpstat -P ALL 1` - 获取所有核心的平均值:`mpstat 3 5` ``` @@ -241,6 +295,50 @@ Average: 0 0.20 0.00 0.20 0.00 0.00 0.00 0.00 0.00 - %iowait 表示 CPU 等待 IO 时间占整个 CPU 周期的百分比 - %idle 显示 CPU 空闲时间占用 CPU 总时间的百分比 +#### 类似 top 的 pidstat + +- 安装:`yum install -y sysstat` +- 每隔 2 秒采样一次,一共 5 次:`pidstat 2 5` + +``` +Linux 3.10.0-693.el7.x86_64 (youmeek) 07/17/2018 _x86_64_ (8 CPU) + +11:52:58 AM UID PID %usr %system %guest %CPU CPU Command +11:53:00 AM 0 16813 0.50 0.99 0.00 1.49 1 pidstat +11:53:00 AM 0 24757 50.99 12.87 0.00 63.86 0 java +11:53:00 AM 0 24799 60.40 3.47 0.00 63.86 5 java +11:53:00 AM 0 24841 99.50 7.43 0.00 100.00 0 java + +11:53:00 AM UID PID %usr %system %guest %CPU CPU Command +11:53:02 AM 0 24757 56.50 0.50 0.00 57.00 0 java +11:53:02 AM 0 24799 100.00 6.50 0.00 100.00 5 java +11:53:02 AM 0 24841 58.00 2.50 0.00 60.50 0 java + +11:53:02 AM UID PID %usr %system %guest %CPU CPU Command +11:53:04 AM 0 16813 0.00 1.00 0.00 1.00 2 pidstat +11:53:04 AM 0 24757 62.00 5.50 0.00 67.50 0 java +11:53:04 AM 0 24799 54.00 14.00 0.00 68.00 5 java +11:53:04 AM 0 24841 39.50 9.00 0.00 48.50 0 java + +11:53:04 AM UID PID %usr %system %guest %CPU CPU Command +11:53:06 AM 0 16813 0.50 0.50 0.00 1.00 2 pidstat +11:53:06 AM 0 24757 80.00 13.50 0.00 93.50 0 java +11:53:06 AM 0 24799 56.50 0.50 0.00 57.00 5 java +11:53:06 AM 0 24841 1.00 0.50 0.00 1.50 0 java + +11:53:06 AM UID PID %usr %system %guest %CPU CPU Command +11:53:08 AM 0 16813 0.00 0.50 0.00 0.50 2 pidstat +11:53:08 AM 0 24757 58.50 1.00 0.00 59.50 0 java +11:53:08 AM 0 24799 60.00 1.50 0.00 61.50 5 java +11:53:08 AM 0 24841 1.00 0.50 0.00 1.50 0 java + +Average: UID PID %usr %system %guest %CPU CPU Command +Average: 0 16813 0.20 0.60 0.00 0.80 - pidstat +Average: 0 24757 61.58 6.69 0.00 68.26 - java +Average: 0 24799 66.47 5.19 0.00 71.66 - java +Average: 0 24841 39.92 3.99 0.00 43.91 - java +``` + --------------------------------------------------------------------- @@ -277,6 +375,41 @@ Total: 16080 15919 160 - 以上的结果重点关注是:`-/+ buffers/cache`,这一行代表实际使用情况。 + +##### pidstat 采样内存使用情况 + +- 安装:`yum install -y sysstat` +- 每隔 2 秒采样一次,一共 3 次:`pidstat -r 2 3` + +``` +Linux 3.10.0-693.el7.x86_64 (youmeek) 07/17/2018 _x86_64_ (8 CPU) + +11:56:34 AM UID PID minflt/s majflt/s VSZ RSS %MEM Command +11:56:36 AM 0 23960 168.81 0.00 108312 1124 0.01 pidstat +11:56:36 AM 0 24757 8.42 0.00 9360696 3862788 23.75 java +11:56:36 AM 0 24799 8.91 0.00 10424088 4988468 30.67 java +11:56:36 AM 0 24841 11.39 0.00 10423576 4968428 30.54 java + +11:56:36 AM UID PID minflt/s majflt/s VSZ RSS %MEM Command +11:56:38 AM 0 23960 169.50 0.00 108312 1200 0.01 pidstat +11:56:38 AM 0 24757 6.00 0.00 9360696 3862788 23.75 java +11:56:38 AM 0 24799 5.50 0.00 10424088 4988468 30.67 java +11:56:38 AM 0 24841 7.00 0.00 10423576 4968428 30.54 java + +11:56:38 AM UID PID minflt/s majflt/s VSZ RSS %MEM Command +11:56:40 AM 0 23960 160.00 0.00 108312 1200 0.01 pidstat +11:56:40 AM 0 24757 6.50 0.00 9360696 3862788 23.75 java +11:56:40 AM 0 24799 6.00 0.00 10424088 4988468 30.67 java +11:56:40 AM 0 24841 8.00 0.00 10423576 4968428 30.54 java + +Average: UID PID minflt/s majflt/s VSZ RSS %MEM Command +Average: 0 23960 166.11 0.00 108312 1175 0.01 pidstat +Average: 0 24757 6.98 0.00 9360696 3862788 23.75 java +Average: 0 24799 6.81 0.00 10424088 4988468 30.67 java +Average: 0 24841 8.80 0.00 10423576 4968428 30.54 java +``` + + --------------------------------------------------------------------- ## 硬盘监控 @@ -321,8 +454,8 @@ vda 0.00 0.00 0.00 1.68 0.00 16.16 19.20 0 - `rkB/s`: 每秒读数据量(kB为单位) - `wkB/s`: 每秒写数据量(kB为单位) - `avgrq-sz`:平均每次IO操作的数据量(扇区数为单位) - - `avgqu-sz`: 平均等待处理的IO请求队列长度 - - `await`: 平均每次IO请求等待时间(包括等待时间和处理时间,毫秒为单位) + - `avgqu-sz`: 平均等待处理的IO请求队列长度(队列长度大于 1 表示设备处于饱和状态。) + - `await`: 系统发往 IO 设备的请求的平均响应时间(毫秒为单位)。这包括请求排队的时间,以及请求处理的时间。超过经验值的平均响应时间表明设备处于饱和状态,或者设备有问题。 - `svctm`: 平均每次IO请求的处理时间(毫秒为单位) - `%util`: 采用周期内用于IO操作的时间比率,即IO队列非空的时间比率(就是繁忙程度,值越高表示越繁忙) - **总结** @@ -366,6 +499,39 @@ Timing cached reads: 3462 MB in 2.00 seconds = 1731.24 MB/sec Timing buffered disk reads: 806 MB in 3.00 seconds = 268.52 MB/sec ``` + +##### pidstat 采样硬盘使用情况 + +- 安装:`yum install -y sysstat` +- 每隔 2 秒采样一次,一共 3 次:`pidstat -d 2 3` + +``` +Linux 3.10.0-693.el7.x86_64 (youmeek) 07/17/2018 _x86_64_ (8 CPU) + +11:57:29 AM UID PID kB_rd/s kB_wr/s kB_ccwr/s Command + +11:57:31 AM UID PID kB_rd/s kB_wr/s kB_ccwr/s Command +11:57:33 AM 0 24757 0.00 2.00 0.00 java +11:57:33 AM 0 24799 0.00 14.00 0.00 java + +11:57:33 AM UID PID kB_rd/s kB_wr/s kB_ccwr/s Command +11:57:35 AM 0 24841 0.00 8.00 0.00 java + +Average: UID PID kB_rd/s kB_wr/s kB_ccwr/s Command +Average: 0 24757 0.00 0.66 0.00 java +Average: 0 24799 0.00 4.65 0.00 java +Average: 0 24841 0.00 2.66 0.00 java +``` + +- 输出指标含义: + +``` +kB_rd/s: 每秒进程从磁盘读取的数据量(以 kB 为单位) +kB_wr/s: 每秒进程向磁盘写的数据量(以 kB 为单位) +kB_ccwr/s:任务取消的写入磁盘的 KB。当任务截断脏的 pagecache 的时候会发生。 +``` + + --------------------------------------------------------------------- @@ -533,6 +699,13 @@ Address: 180.97.33.107 - 查看尾部信息:`dmesg -T | tail` - 参数 `-T` 表示显示时间 - 只显示 error 和 warning 信息:`dmesg --level=err,warn -T` +- 有些 OOM 的错误会在这里显示,比如: + +``` +[1880957.563400] Out of memory: Kill process 18694 (perl) score 246 or sacrifice child +[1880957.563408] Killed process 18694 (perl) total-vm:1972392kB, anon-rss:1953348kB, file-rss:0kB +``` + --------------------------------------------------------------------- @@ -544,6 +717,7 @@ Address: 180.97.33.107 - - - +- From f209a901b5e4f969181d692f4548f79b4fb59709 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 23 Jul 2018 15:49:23 +0800 Subject: [PATCH 059/330] 2018-07-23 --- favorite-file/shell/install_common_tool_ubuntu.sh | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 favorite-file/shell/install_common_tool_ubuntu.sh diff --git a/favorite-file/shell/install_common_tool_ubuntu.sh b/favorite-file/shell/install_common_tool_ubuntu.sh new file mode 100644 index 00000000..3d808206 --- /dev/null +++ b/favorite-file/shell/install_common_tool_ubuntu.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +echo "开始常用工具安装" + +sudo apt-get install -y zip unzip unrar lrzsz git wget htop + +echo "开始常用工具结束" \ No newline at end of file From e61d4350466dee08cb54b5ab2496cce86f7ae179 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 23 Jul 2018 16:41:54 +0800 Subject: [PATCH 060/330] 2018-07-23 --- markdown-file/SSR-Client-Ubuntu.md | 90 ++++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) create mode 100644 markdown-file/SSR-Client-Ubuntu.md diff --git a/markdown-file/SSR-Client-Ubuntu.md b/markdown-file/SSR-Client-Ubuntu.md new file mode 100644 index 00000000..487805c1 --- /dev/null +++ b/markdown-file/SSR-Client-Ubuntu.md @@ -0,0 +1,90 @@ +# SSR Ubuntu 客户端 + +## 介绍 + +- 因为某些原因,这个东西不做过多解释 + +## 安装 + +- 需要 Git 环境: +- 需要 Python 2 环境: +- 官网脚本: +- 我们这里使用别人提供的文件,如果该文件被屏蔽,就自行用上面官网的文件。 + +``` +wget http://www.djangoz.com/ssr + +sudo mv ssr /usr/local/bin + +sudo chmod 766 /usr/local/bin/ssr + +ssr install +``` + +- 配置:`ssr config` + - 这是一个 vim 的配置界面,也可以直接编辑其源文件: +- 主要修改如下内容: + +``` +"server":"12.26.68.99", //服务器ip +"server_port":9191, //端口 +"password":"123456", //密码 +"protocol":"auth_sha1_v4", //协议插件 +"obfs":"http_simple", //混淆插件 +"method":"aes-256-cfb", //加密方式 +``` + + +- 启动:`ssr start` +- 其他常用命令: + - `ssr stop` + - `ssr help` +- 然后就可以用 Chrome 的 SwitchyOmega + +## 配置终端代理 polipo + +- 安装:`sudo apt-get install polipo` +- 修改配置(一般不要变动,直接复制上去即可):`sudo vim /etc/polipo/config` + +``` +# This file only needs to list configuration variables that deviate +# from the default values. See /usr/share/doc/polipo/examples/config.sample +# and "polipo -v" for variables you can tweak and further information. + +logSyslog = true +logFile = /var/log/polipo/polipo.log + +proxyAddress = "0.0.0.0" + +socksParentProxy = "127.0.0.1:1080" +socksProxyType = socks5 + +chunkHighMark = 50331648 +objectHighMark = 16384 + +serverMaxSlots = 64 +serverSlots = 16 +serverSlots1 = 32 +``` + +- 重启:`sudo service polipo restart` + +#### 开始测试 polipo + +- 获取自己当前 IP:`curl ip.gs` + - 这时候应该是国内 IP + +- **开始使用代理**:`export http_proxy=http://127.0.0.1:8123` + +- 获取自己当前 IP:`curl ip.gs` + - 这时候应该是国外 IP + +- **取消代理**:`unset http_proxy` + +- 获取自己当前 IP:`curl ip.gs` + - 这时候应该是国内 IP +- 另外:在浏览器中输入 便可以进入到 Polipo 的使用说明和配置界面。 + +## 材料 + +- \ No newline at end of file From d6503b9fdec26ab45447ab7a85583f8453b255c9 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 23 Jul 2018 16:48:14 +0800 Subject: [PATCH 061/330] 2018-07-23 --- markdown-file/Python-Ubuntu.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 markdown-file/Python-Ubuntu.md diff --git a/markdown-file/Python-Ubuntu.md b/markdown-file/Python-Ubuntu.md new file mode 100644 index 00000000..c7b8161b --- /dev/null +++ b/markdown-file/Python-Ubuntu.md @@ -0,0 +1,18 @@ +# Python Ubuntu 安装 + +## 为 Python 开发环境准备 + +#### 给 root 用户安装 Python + +- 切换用户:`sudo su -` +- 安装 Python 2:`apt-get install python` +- 安装 Python 3:`apt-get install python3` +- 查看版本: + - `python --version` + - `python3 --version` + +#### 给非 root 用户安装 Anaconda(Python 3 版本) + +- 官网下载: +- 进入命令行交互安装:`sh Anaconda3-5.2.0-Linux-x86_64.sh` + From 0dc9767709cf45d674653e3665ac6c8125b4251d Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 23 Jul 2018 17:05:05 +0800 Subject: [PATCH 062/330] 2018-07-23 --- ubuntu-settings/Ubuntu-Create-Desktop.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/ubuntu-settings/Ubuntu-Create-Desktop.md b/ubuntu-settings/Ubuntu-Create-Desktop.md index fb5bdedf..475babec 100644 --- a/ubuntu-settings/Ubuntu-Create-Desktop.md +++ b/ubuntu-settings/Ubuntu-Create-Desktop.md @@ -2,16 +2,16 @@ ## 创建图标文件 -- 我们建设以创建 Eclipse 程序图标为例 +- 我们建设以创建 pycharm 程序图标为例 - 进入图标存放目录|:`cd /usr/share/applications` -- 创建文件并编辑:`sudo gedit eclipse.desktop` +- 创建文件并编辑:`sudo gedit pycharm.desktop` ``` ini [Desktop Entry] -Name=eclipse -Name[zh_CN]=eclipse -Comment=eclipse Client -Exec=/usr/programa/tools/eclipse/eclipse -Icon=/usr/programa/tools/eclipse/icon.xpm +Name=Pycharm +Name[zh_CN]=Pycharm +Comment=Pycharm3:The Python IDE +Exec=/pycharm-community-2017.1.1/bin/pycharm.sh +Icon=/pycharm-community-2017.1.1/bin/pycharm.png Terminal=false Type=Application Categories=Application; @@ -23,7 +23,7 @@ StartupNotify=true - Comment 为说明。 - Exec 为程序执行位置 - Icon 为图标所在路径 -- 最后,打开 Dash,在顶部搜索框搜索 **eclipse**,此时你应该能搜到它,先单击试一下看能不能打开,如果可以打开,拖到该图标启动器上,下次就可以直接从启动器打开了 +- 最后,打开 Dash,在顶部搜索框搜索 **pycharm**,此时你应该能搜到它,先单击试一下看能不能打开,如果可以打开,拖到该图标启动器上,下次就可以直接从启动器打开了 ## 资料 From d8310ffaa45d74b29a1d5d68b35e901f2900bf32 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 23 Jul 2018 17:06:53 +0800 Subject: [PATCH 063/330] 2018-07-23 --- .../ubuntu-settings}/Network-Settings.md | 0 .../ubuntu-settings}/Ubuntu-Create-Desktop.md | 0 .../ubuntu-settings}/Ubuntu-Extra-Packages.md | 0 .../ubuntu-settings}/Ubuntu-Popular-Settings.md | 0 .../ubuntu-settings}/ubuntu-settings-toc.md | 0 5 files changed, 0 insertions(+), 0 deletions(-) rename {ubuntu-settings => markdown-file/ubuntu-settings}/Network-Settings.md (100%) rename {ubuntu-settings => markdown-file/ubuntu-settings}/Ubuntu-Create-Desktop.md (100%) rename {ubuntu-settings => markdown-file/ubuntu-settings}/Ubuntu-Extra-Packages.md (100%) rename {ubuntu-settings => markdown-file/ubuntu-settings}/Ubuntu-Popular-Settings.md (100%) rename {ubuntu-settings => markdown-file/ubuntu-settings}/ubuntu-settings-toc.md (100%) diff --git a/ubuntu-settings/Network-Settings.md b/markdown-file/ubuntu-settings/Network-Settings.md similarity index 100% rename from ubuntu-settings/Network-Settings.md rename to markdown-file/ubuntu-settings/Network-Settings.md diff --git a/ubuntu-settings/Ubuntu-Create-Desktop.md b/markdown-file/ubuntu-settings/Ubuntu-Create-Desktop.md similarity index 100% rename from ubuntu-settings/Ubuntu-Create-Desktop.md rename to markdown-file/ubuntu-settings/Ubuntu-Create-Desktop.md diff --git a/ubuntu-settings/Ubuntu-Extra-Packages.md b/markdown-file/ubuntu-settings/Ubuntu-Extra-Packages.md similarity index 100% rename from ubuntu-settings/Ubuntu-Extra-Packages.md rename to markdown-file/ubuntu-settings/Ubuntu-Extra-Packages.md diff --git a/ubuntu-settings/Ubuntu-Popular-Settings.md b/markdown-file/ubuntu-settings/Ubuntu-Popular-Settings.md similarity index 100% rename from ubuntu-settings/Ubuntu-Popular-Settings.md rename to markdown-file/ubuntu-settings/Ubuntu-Popular-Settings.md diff --git a/ubuntu-settings/ubuntu-settings-toc.md b/markdown-file/ubuntu-settings/ubuntu-settings-toc.md similarity index 100% rename from ubuntu-settings/ubuntu-settings-toc.md rename to markdown-file/ubuntu-settings/ubuntu-settings-toc.md From c25bf48e1cf25844dc15ae8ea556318de2cf3ea4 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 24 Jul 2018 18:33:55 +0800 Subject: [PATCH 064/330] =?UTF-8?q?2018-07-24=20Linux=20=E5=B8=B8=E8=A7=81?= =?UTF-8?q?=E6=A1=8C=E9=9D=A2=E7=8E=AF=E5=A2=83=E6=96=B9=E6=A1=88?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/ubuntu-settings/Ubuntu-UI.md | 40 +++++++++++++++++++ .../ubuntu-settings/ubuntu-settings-toc.md | 8 +--- 2 files changed, 41 insertions(+), 7 deletions(-) create mode 100644 markdown-file/ubuntu-settings/Ubuntu-UI.md diff --git a/markdown-file/ubuntu-settings/Ubuntu-UI.md b/markdown-file/ubuntu-settings/Ubuntu-UI.md new file mode 100644 index 00000000..1691ec83 --- /dev/null +++ b/markdown-file/ubuntu-settings/Ubuntu-UI.md @@ -0,0 +1,40 @@ + + +## Linux 常见桌面环境方案 + +- [GNOME](https://www.gnome.org/) +- [KDE](https://www.kde.org/) +- [Unity8](https://ubports.com/zh_CN/blog/1/post/unity8-on-the-desktop-95) +- MATE +- Cinnamon +- Lxde +- Xfce + +## Ubuntu 18.04 桌面环境方案 + +- 使用 GNOME + +## Ubuntu 16.04 桌面环境方案 + +- 使用 Unity + +------------------------------------------------------------ + +## GUI 工具包 + +- [GTK](https://www.gtk.org/) +- [Qt](https://www.qt.io/) + +------------------------------------------------------------ + +## 常见桌面环境方案 与 GUI 工具包关系 + +- GNOME、LXDE、Xfce 使用 GTK 工具包 +- KDE、Unity8 使用 Qt 工具包 + + +## 资料 + +- [自由之选:七大顶尖 Linux 桌面环境比拼](https://linux.cn/article-6021-1.html) +- [自由奔放的 Linux (3)—— Gnome 与 KDE](http://blog.sciencenet.cn/blog-530833-540604.html) +- [自由奔放的 Linux (2) ——Ubuntu 与 Fedora](http://blog.sciencenet.cn/home.php?mod=space&uid=530833&do=blog&id=540366) diff --git a/markdown-file/ubuntu-settings/ubuntu-settings-toc.md b/markdown-file/ubuntu-settings/ubuntu-settings-toc.md index 1b320721..7361c57d 100644 --- a/markdown-file/ubuntu-settings/ubuntu-settings-toc.md +++ b/markdown-file/ubuntu-settings/ubuntu-settings-toc.md @@ -2,13 +2,7 @@ - [Ubuntu 源设置](Ubuntu-Extra-Packages.md) - [Ubuntu 给 Dash 添加程序图标](Ubuntu-Create-Desktop.md) - [Ubuntu 常用设置](Ubuntu-Popular-Settings.md) -- []() -- []() -- []() -- []() -- []() -- []() -- []() +- [Ubuntu UI 基础](Ubuntu-UI.md) From 402eb7f859f16cc24e7d67e08da336ba9b3533a4 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 24 Jul 2018 18:34:36 +0800 Subject: [PATCH 065/330] =?UTF-8?q?2018-07-24=20Linux=20=E5=B8=B8=E8=A7=81?= =?UTF-8?q?=E6=A1=8C=E9=9D=A2=E7=8E=AF=E5=A2=83=E6=96=B9=E6=A1=88?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/ubuntu-settings/Ubuntu-UI.md | 2 +- markdown-file/ubuntu-settings/ubuntu-settings-toc.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/markdown-file/ubuntu-settings/Ubuntu-UI.md b/markdown-file/ubuntu-settings/Ubuntu-UI.md index 1691ec83..2471042e 100644 --- a/markdown-file/ubuntu-settings/Ubuntu-UI.md +++ b/markdown-file/ubuntu-settings/Ubuntu-UI.md @@ -1,6 +1,6 @@ -## Linux 常见桌面环境方案 +## Ubuntu 常见桌面环境方案 - [GNOME](https://www.gnome.org/) - [KDE](https://www.kde.org/) diff --git a/markdown-file/ubuntu-settings/ubuntu-settings-toc.md b/markdown-file/ubuntu-settings/ubuntu-settings-toc.md index 7361c57d..2c437314 100644 --- a/markdown-file/ubuntu-settings/ubuntu-settings-toc.md +++ b/markdown-file/ubuntu-settings/ubuntu-settings-toc.md @@ -2,7 +2,7 @@ - [Ubuntu 源设置](Ubuntu-Extra-Packages.md) - [Ubuntu 给 Dash 添加程序图标](Ubuntu-Create-Desktop.md) - [Ubuntu 常用设置](Ubuntu-Popular-Settings.md) -- [Ubuntu UI 基础](Ubuntu-UI.md) +- [Ubuntu 常见桌面环境方案](Ubuntu-UI.md) From 81b9fae78c03bdb44768af99a5215ee2b81457dc Mon Sep 17 00:00:00 2001 From: "Judas.n" Date: Wed, 25 Jul 2018 12:19:21 +0800 Subject: [PATCH 066/330] Update Tomcat-Install-And-Settings.md --- markdown-file/Tomcat-Install-And-Settings.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/markdown-file/Tomcat-Install-And-Settings.md b/markdown-file/Tomcat-Install-And-Settings.md index 8d188020..6176b3e5 100644 --- a/markdown-file/Tomcat-Install-And-Settings.md +++ b/markdown-file/Tomcat-Install-And-Settings.md @@ -144,6 +144,8 @@ - 模型资料来源: - 配比资料: +- JDK8 配比:[关键系统的JVM参数推荐(2018仲夏版)](https://mp.weixin.qq.com/s/FHY0MelBfmgdRpT4zWF9dQ) +- JDK8 常用配比总结 8G 内存:`CATALINA_OPTS="-Dfile.encoding=UTF-8 -Xms4g -Xmx4g -XX:NewRatio=1 -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=512m"` - Java 的内存模型分为: - Young,年轻代(易被 GC)。Young 区被划分为三部分,Eden 区和两个大小严格相同的 Survivor 区,其中 Survivor 区间中,某一时刻只有其中一个是被使用的,另外一个留做垃圾收集时复制对象用,在 Young 区间变满的时候,minor GC 就会将存活的对象移到空闲的Survivor 区间中,根据 JVM 的策略,在经过几次垃圾收集后,任然存活于 Survivor 的对象将被移动到 Tenured 区间。 - Tenured,终身代。Tenured 区主要保存生命周期长的对象,一般是一些老的对象,当一些对象在 Young 复制转移一定的次数以后,对象就会被转移到 Tenured 区,一般如果系统中用了 application 级别的缓存,缓存中的对象往往会被转移到这一区间。 From 7a8776af32b15722d4cfadf1ebd089b13638cf78 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 27 Jul 2018 14:30:37 +0800 Subject: [PATCH 067/330] 2018-07-27 --- markdown-file/Bash.md | 1 + 1 file changed, 1 insertion(+) diff --git a/markdown-file/Bash.md b/markdown-file/Bash.md index 7c3bb9e8..82e7cfeb 100644 --- a/markdown-file/Bash.md +++ b/markdown-file/Bash.md @@ -17,6 +17,7 @@ - `jobs`,查看后台运行的程序列表 - `ifconfig`,查看内网 IP 等信息(常用) - `curl ifconfig.me`,查看外网 IP 信息 +- `curl ip.cn`,查看外网 IP 信息 - `locate 搜索关键字`,快速搜索系统文件/文件夹(类似 Windows 上的 everything 索引式搜索)(常用) - `updatedb`,配合上面的 locate,给 locate 的索引更新(locate 默认是一天更新一次索引)(常用) - `date`,查看系统时间(常用) From b23e4cc1368b22ef5df3726506647322d40be3bb Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 27 Jul 2018 16:01:36 +0800 Subject: [PATCH 068/330] 2018-07-27 --- markdown-file/Sed.md | 21 ++------------------- 1 file changed, 2 insertions(+), 19 deletions(-) diff --git a/markdown-file/Sed.md b/markdown-file/Sed.md index ae5cdea6..fd5b73a6 100644 --- a/markdown-file/Sed.md +++ b/markdown-file/Sed.md @@ -1,6 +1,6 @@ # Sed 常用命令 -- 轻量级流编辑器,一般用来处理文本类文件 +- 轻量级流编辑器,一般用来处理文本类文件 - **sed 是非交互式的编辑器。它不会修改文件,除非使用 shell 重定向来保存结果。默认情况下,所有的输出行都被打印到屏幕上** - **用 sed -i 会实际写入**,下面为了演示,都没加该参数,有需要可以自行添加。 @@ -53,16 +53,6 @@ rootLogger.appenderRef.rolling.ref = rolling - `sed 's/^[0-9][0-9]*//g' /opt/log4j2.properties`:将文件中每一行以数字开头,都替换掉空字符并展示 - `sed '4,6s/^/#/g' /opt/log4j2.properties`:将文件中 4 ~ 6 行添加 # 开头 - `sed '4,6s/^#//g' /opt/log4j2.properties`:将文件中 4 ~ 6 行 # 开头去掉 -- ``: -- ``: -- ``: -- ``: -- ``: -- ``: -- ``: -- ``: -- ``: -- ``: ## 实用例子 @@ -70,14 +60,7 @@ rootLogger.appenderRef.rolling.ref = rolling - `ifconfig eth0 |grep 'inet addr' |sed 's/^.*addr://g' |sed 's/Bcast.*$//g'`:CentOS 6 只显示 IP - `ifconfig ens33 |grep 'inet' |sed 's/^.*inet//g' |sed 's/netmask.*$//g' |sed -n '1p'`:CentOS 7.3 只显示 IP。先用 grep 筛选中包含 inet 的数据。 - `s` 参数开头表示的是搜索替换,`/^.*inet` 表示从开头到 inet 之间,`//` 为空内容,`/g`,表示处理这一行所有匹配的内容。`/netmask.*$` 表示从 netmask 到这一行结束的内容 -- ``: -- ``: -- ``: -- ``: -- ``: -- ``: -- ``: -- ``: + From 08ac0dd1a7878baac1d3cde6aba50cbc715dee9d Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 30 Jul 2018 12:02:08 +0800 Subject: [PATCH 069/330] 2018-07-30 --- README.md | 4 ++++ markdown-file/Bash.md | 12 ++++++++++++ 2 files changed, 16 insertions(+) diff --git a/README.md b/README.md index cdb6e19d..69d0b4f7 100644 --- a/README.md +++ b/README.md @@ -9,6 +9,10 @@ - Gitbook 在线阅读地址: - **学得越多越是明白一个人的力量是不够的,我只是起了一个引子,希望你能一起参与,真心希望!!!(鞠躬)** +## 优秀同行推荐 + +- [Linux工具快速教程](http://linuxtools-rst.readthedocs.io/zh_CN/latest/base/index.html) + ## 目录(Contents) - [Linux 介绍](markdown-file/Linux.md) diff --git a/markdown-file/Bash.md b/markdown-file/Bash.md index 82e7cfeb..1017bb15 100644 --- a/markdown-file/Bash.md +++ b/markdown-file/Bash.md @@ -195,8 +195,20 @@ drwxr-xr-x. 5 root root 4096 3月 26 10:57,其中最前面的 d 表示这是 - `umount /dev/sdb5`,卸载挂载,用分区名 +## wget 下载文件 + +- 常规下载:`wget http://www.gitnavi.com/index.html` +- 自动断点下载:`wget -c http://www.gitnavi.com/index.html` +- 后台下载:`wget -b http://www.gitnavi.com/index.html` +- 伪装代理名称下载:`wget --user-agent="Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.204 Safari/534.16" http://www.gitnavi.com/index.html` +- 限速下载:`wget --limit-rate=300k http://www.gitnavi.com/index.html` +- 批量下载:`wget -i /opt/download.txt`,一个下载地址一行 +- 后台批量下载:`wget -b -c -i /opt/download.txt`,一个下载地址一行 + + ## 资料 - - +- From 4a426b3297131b276b26eabb9eff2cc4f4918ecf Mon Sep 17 00:00:00 2001 From: judasn Date: Sun, 19 Aug 2018 22:53:32 +0800 Subject: [PATCH 070/330] 2018-08-19 --- markdown-file/Ubuntu-Popular-Software.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/markdown-file/Ubuntu-Popular-Software.md b/markdown-file/Ubuntu-Popular-Software.md index 51af8f8f..25bbac15 100644 --- a/markdown-file/Ubuntu-Popular-Software.md +++ b/markdown-file/Ubuntu-Popular-Software.md @@ -3,6 +3,8 @@ ## 安装软件基础 - 取回更新的软件包列表信息:`sudo apt-get update`,如果安装某个软件报:`Unable to locate package`,就得这样 update 下。 +- 安装本地 deb 文件:`sudo dpkg -i 文件名` + - 安装过程提示缺依赖:`sudo apt-get --fix-broken install` - 查看已经安装了哪些包:`sudo dpkg -l` - 查看已安装列表中是否有 Vim 软件,没有安装则没有数据显示:`sudo dpkg -l | grep vim` - 查看 Vim 软件安装位置:`sudo dpkg -L vim` From b42af936f8c402c7fea6519a24ed5d6505adc462 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 20 Aug 2018 10:49:10 +0800 Subject: [PATCH 071/330] =?UTF-8?q?2018-08-20=20=E8=A1=A5=E5=85=85=20kali?= =?UTF-8?q?=20linux?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 1 + SUMMARY.md | 1 + TOC.md | 1 + .../kali-linux-basic-settings.md | 75 +++++++++++++++++++ .../kali-linux-settings/kali-linux-install.md | 6 ++ .../kali-linux-settings/kali-linux-toc.md | 25 +++++++ 6 files changed, 109 insertions(+) create mode 100644 markdown-file/kali-linux-settings/kali-linux-basic-settings.md create mode 100644 markdown-file/kali-linux-settings/kali-linux-install.md create mode 100644 markdown-file/kali-linux-settings/kali-linux-toc.md diff --git a/README.md b/README.md index 69d0b4f7..b1cd2fcf 100644 --- a/README.md +++ b/README.md @@ -19,6 +19,7 @@ - [Ubuntu 介绍](markdown-file/Ubuntu.md) - [Ubuntu 安装](markdown-file/Ubuntu-Install.md) - [Ubuntu 设置(目录)](markdown-file/ubuntu-settings/ubuntu-settings-toc.md) +- [Kali Linux 介绍和设置(目录)](markdown-file/kali-linux-settings/kali-linux-toc.md) - [CentOS 介绍](markdown-file/CentOS.md) - [CentOS 6 安装](markdown-file/CentOS-Install.md) - [CentOS 7 安装](markdown-file/CentOS-7-Install.md) diff --git a/SUMMARY.md b/SUMMARY.md index 1abff5bf..65d08a57 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -4,6 +4,7 @@ * [Ubuntu 介绍](markdown-file/Ubuntu.md) * [Ubuntu 安装](markdown-file/Ubuntu-Install.md) * [Ubuntu 设置(目录)](markdown-file/ubuntu-settings/ubuntu-settings-toc.md) +* [Kali Linux 介绍和设置(目录)](markdown-file/kali-linux-settings/kali-linux-toc.md) * [CentOS 介绍](markdown-file/CentOS.md) * [CentOS 6 安装](markdown-file/CentOS-Install.md) * [CentOS 7 安装](markdown-file/CentOS-7-Install.md) diff --git a/TOC.md b/TOC.md index e97c0361..9eea2731 100644 --- a/TOC.md +++ b/TOC.md @@ -2,6 +2,7 @@ - [Ubuntu 介绍](markdown-file/Ubuntu.md) - [Ubuntu 安装](markdown-file/Ubuntu-Install.md) - [Ubuntu 设置(目录)](markdown-file/ubuntu-settings/ubuntu-settings-toc.md) +- [Kali Linux 介绍和设置(目录)](markdown-file/kali-linux-settings/kali-linux-toc.md) - [CentOS 介绍](markdown-file/CentOS.md) - [CentOS 6 安装](markdown-file/CentOS-Install.md) - [CentOS 7 安装](markdown-file/CentOS-7-Install.md) diff --git a/markdown-file/kali-linux-settings/kali-linux-basic-settings.md b/markdown-file/kali-linux-settings/kali-linux-basic-settings.md new file mode 100644 index 00000000..944a6254 --- /dev/null +++ b/markdown-file/kali-linux-settings/kali-linux-basic-settings.md @@ -0,0 +1,75 @@ +# 基础设置 + + +## 修改源 + +- 编辑配置文件:`vim /etc/apt/sources.list` +- 在文件 **最前面** 添加以下条目,官网文档: + +``` +#中科大的源 +deb https://mirrors.ustc.edu.cn/kali kali-rolling main non-free contrib +deb-src https://mirrors.ustc.edu.cn/kali kali-rolling main non-free contrib +``` + +- `apt-get update` 更新索引 + +## 安装公共工具 + +- `apt-get install -y zip unzip lrzsz git` + + +## 安装 Zsh、Vim + +``` +apt-get install -y zsh + +wget https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh -O - | sh + +chsh -s /bin/zsh root + +apt-get install -y vim + +curl https://raw.githubusercontent.com/wklken/vim-for-server/master/vimrc > ~/.vimrc +``` + +## 安装搜狗输入法 + +- 下载 deb 文件: +- 安装依赖:`apt-get install -y fcitx` +- 安装 deb 文件:`dpkg -i 文件名` + - 应该会提示有部分依赖不存在,则执行:`apt-get --fix-broken install` +- 安装完成后: + - 终端输入:`im-config`,一路确定,选择:`fcitx`,重启电脑,在设置中新增输入法 + +## 安装 Chrome 浏览器 + +- 下载 deb 文件: +- 安装 deb 文件:`dpkg -i 文件名` + - 应该会提示有部分依赖不存在,则执行:`apt-get --fix-broken install` + +## 安装 Visual Studio Code + +- 下载 deb 文件: +- 安装 deb 文件:`dpkg -i 文件名` + - 应该会提示有部分依赖不存在,则执行:`apt-get --fix-broken install` + +## 安装 Peek(Gif 录制) + +- 自己构建 deb 包安装 + +``` +sudo apt install cmake valac libgtk-3-dev libkeybinder-3.0-dev libxml2-utils gettext txt2man + +git clone https://github.com/phw/peek.git +mkdir peek/build +cd peek/build +cmake -DCMAKE_INSTALL_PREFIX=/usr -DGSETTINGS_COMPILE=OFF .. +make package + +sudo dpkg -i peek-*-Linux.deb +``` + +## 安装 shutter(截图软件) + +- `apt-get install shutter` \ No newline at end of file diff --git a/markdown-file/kali-linux-settings/kali-linux-install.md b/markdown-file/kali-linux-settings/kali-linux-install.md new file mode 100644 index 00000000..a8a6cb1e --- /dev/null +++ b/markdown-file/kali-linux-settings/kali-linux-install.md @@ -0,0 +1,6 @@ +# Kali Linux 安装 + +- 最新的几个版本安装过程都是类似的,大家可以参考这个视频教程: + - + - 其中,安装第一步选择中文,安装完成就会是中文的,不需要像上面这个视频那么麻烦。 +- 安装过程,比较差的机子差不多要 20 ~ 30 分钟。 diff --git a/markdown-file/kali-linux-settings/kali-linux-toc.md b/markdown-file/kali-linux-settings/kali-linux-toc.md new file mode 100644 index 00000000..d94655e5 --- /dev/null +++ b/markdown-file/kali-linux-settings/kali-linux-toc.md @@ -0,0 +1,25 @@ + +# Kali Linux + +## 介绍 + +- 官网: +- 基于 Debian +- 设计用于数字鉴识和渗透测试,预装了很多渗透测试程序 +- 支持 x86 和 ARM 架构 +- 官网下载: + - 镜像名:Kali Linux 64 Bit,默认是用 GNOME 桌面,比较华丽,相对较卡(好点电脑推荐,习惯 Ubuntu 的基本都会用) + - 镜像名:Kali Linux Kde 64 Bit,默认是用 Kde 桌面,比较华丽,相对较卡 + - 镜像名:Kali Linux Mate 64 Bit,默认是用 Mate 桌面,比较华丽,相对较卡 + - 镜像名:Kali Linux Lxde 64 Bit,默认是用 Lxde 桌面,比较简洁,相对不卡(类似 Windows 风格的桌面,不卡,只是比较难用) + - 镜像名:Kali Linux Xfce 64 Bit,默认是用 Xfce 桌面,比较简洁,相对不卡 + +## 学习过程 + +- [Kali Linux 系统安装](kali-linux-install.md) +- [Kali Linux 基础设置](kali-linux-basic-settings.md) + + +## 其他资料 + +- \ No newline at end of file From e5c98ebaba3d1c6e8e613335c6a1cb819a327c5d Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 20 Aug 2018 10:55:21 +0800 Subject: [PATCH 072/330] =?UTF-8?q?2018-08-20=20=E8=A1=A5=E5=85=85=20kali?= =?UTF-8?q?=20linux?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../kali-linux-settings/kali-linux-basic-settings.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/markdown-file/kali-linux-settings/kali-linux-basic-settings.md b/markdown-file/kali-linux-settings/kali-linux-basic-settings.md index 944a6254..2d719d7f 100644 --- a/markdown-file/kali-linux-settings/kali-linux-basic-settings.md +++ b/markdown-file/kali-linux-settings/kali-linux-basic-settings.md @@ -72,4 +72,8 @@ sudo dpkg -i peek-*-Linux.deb ## 安装 shutter(截图软件) -- `apt-get install shutter` \ No newline at end of file +- `apt-get install shutter` + +## 安装 SSR 客户端 + +- 查看: \ No newline at end of file From 1d84e378b0fa8a692e28904ae76cba9334640da6 Mon Sep 17 00:00:00 2001 From: satan31415 <471867900@qq.com> Date: Mon, 20 Aug 2018 22:04:30 +0800 Subject: [PATCH 073/330] Update kali-linux-basic-settings.md --- .../kali-linux-settings/kali-linux-basic-settings.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/markdown-file/kali-linux-settings/kali-linux-basic-settings.md b/markdown-file/kali-linux-settings/kali-linux-basic-settings.md index 2d719d7f..c8952d4b 100644 --- a/markdown-file/kali-linux-settings/kali-linux-basic-settings.md +++ b/markdown-file/kali-linux-settings/kali-linux-basic-settings.md @@ -40,7 +40,7 @@ curl https://raw.githubusercontent.com/wklken/vim-for-server/master/vimrc > ~/.v - 安装 deb 文件:`dpkg -i 文件名` - 应该会提示有部分依赖不存在,则执行:`apt-get --fix-broken install` - 安装完成后: - - 终端输入:`im-config`,一路确定,选择:`fcitx`,重启电脑,在设置中新增输入法 + - 终端输入:`im-config`,一路确定,在有一个提示选项中选择:`fcitx`,重启电脑,按 Ctrl + Space 就可以切换输入法 ## 安装 Chrome 浏览器 @@ -76,4 +76,4 @@ sudo dpkg -i peek-*-Linux.deb ## 安装 SSR 客户端 -- 查看: \ No newline at end of file +- 查看: From 1992691952086b2c1859a20d8f592a00aa23cbef Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 20 Aug 2018 23:12:33 +0800 Subject: [PATCH 074/330] 2018-08-20 --- .../kali-linux-basic-settings.md | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/markdown-file/kali-linux-settings/kali-linux-basic-settings.md b/markdown-file/kali-linux-settings/kali-linux-basic-settings.md index c8952d4b..8b41ed1c 100644 --- a/markdown-file/kali-linux-settings/kali-linux-basic-settings.md +++ b/markdown-file/kali-linux-settings/kali-linux-basic-settings.md @@ -74,6 +74,29 @@ sudo dpkg -i peek-*-Linux.deb - `apt-get install shutter` + +## 升级 firefox + +- 官网下载: +- 假设放在 /opt 目录下,进行解压:`tar xjf firefox-*.tar.bz2` +- 进入图标存放目录|:`cd /usr/share/applications` +- 创建文件并编辑:`sudo vim firefoxnew.desktop` + +``` ini +[Desktop Entry] +Name=firefoxnew +Name[zh_CN]=firefoxnew +Comment=firefox new +Exec=/opt/firefox/firefox +Icon=/opt/firefox/firefox/icons/updater.png +Terminal=false +Type=Application +Categories=Application; +Encoding=UTF-8 +StartupNotify=true +``` + + ## 安装 SSR 客户端 - 查看: From d6ad39dcb46161915245a1bf9897fe60c5a93ef7 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 20 Aug 2018 23:15:48 +0800 Subject: [PATCH 075/330] 2018-08-20 --- markdown-file/kali-linux-settings/kali-linux-basic-settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/markdown-file/kali-linux-settings/kali-linux-basic-settings.md b/markdown-file/kali-linux-settings/kali-linux-basic-settings.md index 8b41ed1c..02810c50 100644 --- a/markdown-file/kali-linux-settings/kali-linux-basic-settings.md +++ b/markdown-file/kali-linux-settings/kali-linux-basic-settings.md @@ -88,7 +88,7 @@ Name=firefoxnew Name[zh_CN]=firefoxnew Comment=firefox new Exec=/opt/firefox/firefox -Icon=/opt/firefox/firefox/icons/updater.png +Icon=/opt/firefox/icons/updater.png Terminal=false Type=Application Categories=Application; From 12ee38558fdee8a5cf33a74ba9d7c161d2b4a5d8 Mon Sep 17 00:00:00 2001 From: "Judas.n" Date: Mon, 20 Aug 2018 23:21:38 +0800 Subject: [PATCH 076/330] Update SSR-Client-Ubuntu.md --- markdown-file/SSR-Client-Ubuntu.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/markdown-file/SSR-Client-Ubuntu.md b/markdown-file/SSR-Client-Ubuntu.md index 487805c1..21028f8c 100644 --- a/markdown-file/SSR-Client-Ubuntu.md +++ b/markdown-file/SSR-Client-Ubuntu.md @@ -40,6 +40,7 @@ ssr install - `ssr stop` - `ssr help` - 然后就可以用 Chrome 的 SwitchyOmega +- AutoProxy:https://raw.githubusercontent.com/gfwlist/gfwlist/master/gfwlist.txt ## 配置终端代理 polipo @@ -87,4 +88,4 @@ serverSlots1 = 32 ## 材料 -- \ No newline at end of file +- From ce51ef404268d7a79a947c286e69084c7acebc71 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 20 Aug 2018 23:44:55 +0800 Subject: [PATCH 077/330] 2018-08-20 --- .../kali-linux-settings/kali-linux-basic-settings.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/markdown-file/kali-linux-settings/kali-linux-basic-settings.md b/markdown-file/kali-linux-settings/kali-linux-basic-settings.md index 02810c50..2ea65c36 100644 --- a/markdown-file/kali-linux-settings/kali-linux-basic-settings.md +++ b/markdown-file/kali-linux-settings/kali-linux-basic-settings.md @@ -72,7 +72,10 @@ sudo dpkg -i peek-*-Linux.deb ## 安装 shutter(截图软件) -- `apt-get install shutter` +``` +sudo add-apt-repository ppa:shutter/ppa +sudo apt-get update && sudo apt-get install shutter +``` ## 升级 firefox From b7ff66889207785f773ae02ed6fa8ae74a669018 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 20 Aug 2018 23:57:06 +0800 Subject: [PATCH 078/330] 2018-08-20 --- .../kali-linux-settings/kali-linux-basic-settings.md | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/markdown-file/kali-linux-settings/kali-linux-basic-settings.md b/markdown-file/kali-linux-settings/kali-linux-basic-settings.md index 2ea65c36..4bd9529d 100644 --- a/markdown-file/kali-linux-settings/kali-linux-basic-settings.md +++ b/markdown-file/kali-linux-settings/kali-linux-basic-settings.md @@ -70,11 +70,17 @@ make package sudo dpkg -i peek-*-Linux.deb ``` -## 安装 shutter(截图软件) +## 安装截图软 ``` -sudo add-apt-repository ppa:shutter/ppa -sudo apt-get update && sudo apt-get install shutter +wget http://packages.linuxdeepin.com/deepin/pool/main/d/deepin-scrot/deepin-scrot_2.0-0deepin_all.deb +sudo dpkg -i deepin-scrot_2.0-0deepin_all.deb + +//可能提示缺少python依赖 +sudo apt-get install python-xlib + +//终端下启动 +deepin scort ``` From 42152a33d6de30665678e623047279f4eac86064 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 21 Aug 2018 00:03:28 +0800 Subject: [PATCH 079/330] 2018-08-20 --- .../kali-linux-settings/kali-linux-basic-settings.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/markdown-file/kali-linux-settings/kali-linux-basic-settings.md b/markdown-file/kali-linux-settings/kali-linux-basic-settings.md index 4bd9529d..b66e4724 100644 --- a/markdown-file/kali-linux-settings/kali-linux-basic-settings.md +++ b/markdown-file/kali-linux-settings/kali-linux-basic-settings.md @@ -76,13 +76,15 @@ sudo dpkg -i peek-*-Linux.deb wget http://packages.linuxdeepin.com/deepin/pool/main/d/deepin-scrot/deepin-scrot_2.0-0deepin_all.deb sudo dpkg -i deepin-scrot_2.0-0deepin_all.deb -//可能提示缺少python依赖 -sudo apt-get install python-xlib +apt-get --fix-broken install //终端下启动 deepin scort ``` +- 配置快捷键来截图方法(思路一样): + - Kali 的快捷键设置在:设置 - 设备 - Keyboard 下 + ## 升级 firefox From 72a9fd43f5b1d579b060f03a35f7cc44be893981 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 21 Aug 2018 00:11:46 +0800 Subject: [PATCH 080/330] 2018-08-20 --- markdown-file/kali-linux-settings/kali-linux-toc.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/markdown-file/kali-linux-settings/kali-linux-toc.md b/markdown-file/kali-linux-settings/kali-linux-toc.md index d94655e5..358c81a4 100644 --- a/markdown-file/kali-linux-settings/kali-linux-toc.md +++ b/markdown-file/kali-linux-settings/kali-linux-toc.md @@ -9,10 +9,10 @@ - 支持 x86 和 ARM 架构 - 官网下载: - 镜像名:Kali Linux 64 Bit,默认是用 GNOME 桌面,比较华丽,相对较卡(好点电脑推荐,习惯 Ubuntu 的基本都会用) - - 镜像名:Kali Linux Kde 64 Bit,默认是用 Kde 桌面,比较华丽,相对较卡 - - 镜像名:Kali Linux Mate 64 Bit,默认是用 Mate 桌面,比较华丽,相对较卡 - - 镜像名:Kali Linux Lxde 64 Bit,默认是用 Lxde 桌面,比较简洁,相对不卡(类似 Windows 风格的桌面,不卡,只是比较难用) - - 镜像名:Kali Linux Xfce 64 Bit,默认是用 Xfce 桌面,比较简洁,相对不卡 + - 镜像名:Kali Linux Xfce 64 Bit,默认是用 Xfce 桌面,比较简洁,相对不卡(配置较差的推荐) + - 镜像名:Kali Linux Kde 64 Bit,默认是用 Kde 桌面,比较华丽,相对较卡(不推荐) + - 镜像名:Kali Linux Mate 64 Bit,默认是用 Mate 桌面,比较华丽,相对较卡(不推荐) + - 镜像名:Kali Linux Lxde 64 Bit,默认是用 Lxde 桌面,比较简洁,相对不卡(类似 Windows 风格的桌面,不推荐) ## 学习过程 From 249f01c7b2eeacbf6701f2ca8765774ba8bc4f40 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 21 Aug 2018 00:23:39 +0800 Subject: [PATCH 081/330] 2018-08-20 --- markdown-file/kali-linux-settings/kali-linux-basic-settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/markdown-file/kali-linux-settings/kali-linux-basic-settings.md b/markdown-file/kali-linux-settings/kali-linux-basic-settings.md index b66e4724..aa45e065 100644 --- a/markdown-file/kali-linux-settings/kali-linux-basic-settings.md +++ b/markdown-file/kali-linux-settings/kali-linux-basic-settings.md @@ -61,7 +61,7 @@ curl https://raw.githubusercontent.com/wklken/vim-for-server/master/vimrc > ~/.v ``` sudo apt install cmake valac libgtk-3-dev libkeybinder-3.0-dev libxml2-utils gettext txt2man -git clone https://github.com/phw/peek.git +git clone https://github.com/phw/peek.git --depth=1 mkdir peek/build cd peek/build cmake -DCMAKE_INSTALL_PREFIX=/usr -DGSETTINGS_COMPILE=OFF .. From e93f4c82d4b097562a45dc7a34ef2f9507fa78c8 Mon Sep 17 00:00:00 2001 From: "Judas.n" Date: Tue, 21 Aug 2018 10:40:00 +0800 Subject: [PATCH 082/330] Update kali-linux-basic-settings.md --- .../kali-linux-settings/kali-linux-basic-settings.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/markdown-file/kali-linux-settings/kali-linux-basic-settings.md b/markdown-file/kali-linux-settings/kali-linux-basic-settings.md index aa45e065..962cdca3 100644 --- a/markdown-file/kali-linux-settings/kali-linux-basic-settings.md +++ b/markdown-file/kali-linux-settings/kali-linux-basic-settings.md @@ -38,7 +38,7 @@ curl https://raw.githubusercontent.com/wklken/vim-for-server/master/vimrc > ~/.v - 下载 deb 文件: - 安装依赖:`apt-get install -y fcitx` - 安装 deb 文件:`dpkg -i 文件名` - - 应该会提示有部分依赖不存在,则执行:`apt-get --fix-broken install` + - 应该会提示有部分依赖不存在,则执行:`apt-get --fix-broken install -y` - 安装完成后: - 终端输入:`im-config`,一路确定,在有一个提示选项中选择:`fcitx`,重启电脑,按 Ctrl + Space 就可以切换输入法 @@ -46,13 +46,13 @@ curl https://raw.githubusercontent.com/wklken/vim-for-server/master/vimrc > ~/.v - 下载 deb 文件: - 安装 deb 文件:`dpkg -i 文件名` - - 应该会提示有部分依赖不存在,则执行:`apt-get --fix-broken install` + - 应该会提示有部分依赖不存在,则执行:`apt-get --fix-broken install -y` ## 安装 Visual Studio Code - 下载 deb 文件: - 安装 deb 文件:`dpkg -i 文件名` - - 应该会提示有部分依赖不存在,则执行:`apt-get --fix-broken install` + - 应该会提示有部分依赖不存在,则执行:`apt-get --fix-broken install -y` ## 安装 Peek(Gif 录制) @@ -76,7 +76,7 @@ sudo dpkg -i peek-*-Linux.deb wget http://packages.linuxdeepin.com/deepin/pool/main/d/deepin-scrot/deepin-scrot_2.0-0deepin_all.deb sudo dpkg -i deepin-scrot_2.0-0deepin_all.deb -apt-get --fix-broken install +apt-get --fix-broken install -y //终端下启动 deepin scort From 6936e137e7593783adc673cea25a082eccd8fcad Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 21 Aug 2018 10:51:54 +0800 Subject: [PATCH 083/330] =?UTF-8?q?2018-08-20=20=E8=A1=A5=E5=85=85=20kali?= =?UTF-8?q?=20linux?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Ubuntu-Popular-Software.md | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/markdown-file/Ubuntu-Popular-Software.md b/markdown-file/Ubuntu-Popular-Software.md index 25bbac15..5f22a8c9 100644 --- a/markdown-file/Ubuntu-Popular-Software.md +++ b/markdown-file/Ubuntu-Popular-Software.md @@ -4,14 +4,20 @@ - 取回更新的软件包列表信息:`sudo apt-get update`,如果安装某个软件报:`Unable to locate package`,就得这样 update 下。 - 安装本地 deb 文件:`sudo dpkg -i 文件名` - - 安装过程提示缺依赖:`sudo apt-get --fix-broken install` + - 安装过程提示缺依赖:`sudo apt-get --fix-broken install -y` - 查看已经安装了哪些包:`sudo dpkg -l` - 查看已安装列表中是否有 Vim 软件,没有安装则没有数据显示:`sudo dpkg -l | grep vim` - 查看 Vim 软件安装位置:`sudo dpkg -L vim` - 安装名为 Vim 的软件:`sudo apt-get install vim` -- 卸载名为 Vim 的软件(保留配置文档):`sudo apt-get remove vim` -- 卸载名为 Vim 的软件(删除配置文档):`sudo apt-get –purge remove vim` - 升级系统所有有新版本的软件:`sudo apt-get upgrade` + +## 卸载 + +- 卸载名为 Vim 的软件(保留配置文档):`sudo apt-get remove vim`(在输入软件的名的时候,可以输入部分,按 Tab 进行提示) +- 卸载名为 Vim 的软件(并删除配置文档):`sudo apt-get –purge remove vim` +- 卸载名为 Vim 的软件(并删除包及其依赖的软件包、配置文件):`sudo apt-get autoremove --purge vim` +- 卸载名为 Vim 的软件(dpkg 方式):`sudo dpkg -r vim`(在输入软件的名的时候,可以输入部分,按 Tab 进行提示) +- 卸载名为 Vim 的软件(dpkg 方式,并删除配置文档):`sudo dpkg -P vim` - 删除已下载的旧包文件:`sudo apt-get autoclean` - 删除所有已下载的包文件:`sudo apt-get clean` - 卸载所有自动安装且不再使用的软件包:`sudo apt-get autoremove` From b07ccc692d8a28e5133596b3c9cbc8254b7234f2 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 21 Aug 2018 11:13:39 +0800 Subject: [PATCH 084/330] =?UTF-8?q?2018-08-20=20=E8=A1=A5=E5=85=85=20kali?= =?UTF-8?q?=20linux?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/kali-linux-settings/kali-linux-basic-settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/markdown-file/kali-linux-settings/kali-linux-basic-settings.md b/markdown-file/kali-linux-settings/kali-linux-basic-settings.md index 962cdca3..1f17d986 100644 --- a/markdown-file/kali-linux-settings/kali-linux-basic-settings.md +++ b/markdown-file/kali-linux-settings/kali-linux-basic-settings.md @@ -79,7 +79,7 @@ sudo dpkg -i deepin-scrot_2.0-0deepin_all.deb apt-get --fix-broken install -y //终端下启动 -deepin scort +deepin-scrot ``` - 配置快捷键来截图方法(思路一样): From f3a61705576374919dd4098d687f8cb8b463b6db Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 21 Aug 2018 12:03:39 +0800 Subject: [PATCH 085/330] =?UTF-8?q?2018-08-20=20=E8=A1=A5=E5=85=85=20kali?= =?UTF-8?q?=20linux?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 1 + SUMMARY.md | 1 + TOC.md | 1 + markdown-file/speedtest.md | 29 +++++++++++++++++++++++++++++ 4 files changed, 32 insertions(+) create mode 100644 markdown-file/speedtest.md diff --git a/README.md b/README.md index b1cd2fcf..d93ea998 100644 --- a/README.md +++ b/README.md @@ -36,6 +36,7 @@ - [Linux 下常用压缩文件的解压、压缩](markdown-file/File-Extract-Compress.md) - [Yum 下载安装包及对应依赖包](markdown-file/Off-line-Yum-Install.md) - [Zsh 入门](markdown-file/Zsh.md) +- [终端测速](markdown-file/speedtest.md) - [日常维护](markdown-file/maintenance.md) - [日常监控](markdown-file/monitor.md) - [nmon 系统性能监控工具](markdown-file/Nmon.md) diff --git a/SUMMARY.md b/SUMMARY.md index 65d08a57..cc2438ae 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -21,6 +21,7 @@ * [Linux 下常用压缩文件的解压、压缩](markdown-file/File-Extract-Compress.md) * [Yum 下载安装包及对应依赖包](markdown-file/Off-line-Yum-Install.md) * [Zsh 入门](markdown-file/Zsh.md) +* [终端测速](markdown-file/speedtest.md) * [日常维护](markdown-file/maintenance.md) * [日常监控](markdown-file/monitor.md) * [nmon 系统性能监控工具](markdown-file/Nmon.md) diff --git a/TOC.md b/TOC.md index 9eea2731..bd148013 100644 --- a/TOC.md +++ b/TOC.md @@ -19,6 +19,7 @@ - [Linux 下常用压缩文件的解压、压缩](markdown-file/File-Extract-Compress.md) - [Yum 下载安装包及对应依赖包](markdown-file/Off-line-Yum-Install.md) - [Zsh 入门](markdown-file/Zsh.md) +- [终端测速](markdown-file/speedtest.md) - [日常维护](markdown-file/maintenance.md) - [日常监控](markdown-file/monitor.md) - [nmon 系统性能监控工具](markdown-file/Nmon.md) diff --git a/markdown-file/speedtest.md b/markdown-file/speedtest.md new file mode 100644 index 00000000..2bf84c18 --- /dev/null +++ b/markdown-file/speedtest.md @@ -0,0 +1,29 @@ +# 终端测速 + + +- 目前大家主推这个项目: +- 用起来也比较简单,Python 2 ~ 3 都支持。 + +## 简单安装方式 + +- 命令:`pip install speedtest-cli` +- 官网还介绍了其他很多安装使用方式,大家可以自行看下。 + +## 运行 + +- 命令:`speedtest-cli` +- 结果如下: + +``` +Retrieving speedtest.net configuration... +Testing from China Telecom Guangdong (113.67.181.234)... +Retrieving speedtest.net server list... +Selecting best server based on ping... +Hosted by CTM Internet Services (Macau) [106.48 km]: 64.783 ms +Testing download speed................................................................................ +Download: 1.05 Mbit/s +Testing upload speed................................................................................................ +Upload: 2.28 Mbit/s +``` + + From 66b901c3526063683358f630015d203def7a9298 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 21 Aug 2018 15:58:33 +0800 Subject: [PATCH 086/330] =?UTF-8?q?2018-08-20=20=E8=A1=A5=E5=85=85=20kali?= =?UTF-8?q?=20linux?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../kali-linux-settings/kali-linux-basic-settings.md | 12 ++++++------ markdown-file/kali-linux-settings/kali-linux-toc.md | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/markdown-file/kali-linux-settings/kali-linux-basic-settings.md b/markdown-file/kali-linux-settings/kali-linux-basic-settings.md index 1f17d986..41a52ff5 100644 --- a/markdown-file/kali-linux-settings/kali-linux-basic-settings.md +++ b/markdown-file/kali-linux-settings/kali-linux-basic-settings.md @@ -42,17 +42,17 @@ curl https://raw.githubusercontent.com/wklken/vim-for-server/master/vimrc > ~/.v - 安装完成后: - 终端输入:`im-config`,一路确定,在有一个提示选项中选择:`fcitx`,重启电脑,按 Ctrl + Space 就可以切换输入法 -## 安装 Chrome 浏览器 - -- 下载 deb 文件: -- 安装 deb 文件:`dpkg -i 文件名` - - 应该会提示有部分依赖不存在,则执行:`apt-get --fix-broken install -y` - ## 安装 Visual Studio Code - 下载 deb 文件: - 安装 deb 文件:`dpkg -i 文件名` - 应该会提示有部分依赖不存在,则执行:`apt-get --fix-broken install -y` +- 安装 Markdown 扩展,中文语言包 + +## 安装剪切板 + +- 命令:`sudo apt-get install parcellite` + - 修改快捷键:`右键软件 | 首选项 | Hotkeys | 历史记录按键组合` ## 安装 Peek(Gif 录制) diff --git a/markdown-file/kali-linux-settings/kali-linux-toc.md b/markdown-file/kali-linux-settings/kali-linux-toc.md index 358c81a4..cc95f0ca 100644 --- a/markdown-file/kali-linux-settings/kali-linux-toc.md +++ b/markdown-file/kali-linux-settings/kali-linux-toc.md @@ -9,7 +9,7 @@ - 支持 x86 和 ARM 架构 - 官网下载: - 镜像名:Kali Linux 64 Bit,默认是用 GNOME 桌面,比较华丽,相对较卡(好点电脑推荐,习惯 Ubuntu 的基本都会用) - - 镜像名:Kali Linux Xfce 64 Bit,默认是用 Xfce 桌面,比较简洁,相对不卡(配置较差的推荐) + - 镜像名:Kali Linux Xfce 64 Bit,默认是用 Xfce 桌面,比较简洁,相对不卡(配置较差的推荐,我这里一台笔记本配置较差,用的就是这个) - 镜像名:Kali Linux Kde 64 Bit,默认是用 Kde 桌面,比较华丽,相对较卡(不推荐) - 镜像名:Kali Linux Mate 64 Bit,默认是用 Mate 桌面,比较华丽,相对较卡(不推荐) - 镜像名:Kali Linux Lxde 64 Bit,默认是用 Lxde 桌面,比较简洁,相对不卡(类似 Windows 风格的桌面,不推荐) From 3a0514017ee3d4f681b5ab401baf6d14812cb8ad Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 21 Aug 2018 16:49:54 +0800 Subject: [PATCH 087/330] =?UTF-8?q?2018-08-20=20=E8=A1=A5=E5=85=85=20kali?= =?UTF-8?q?=20linux?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../kali-linux-penetration-test.md | 59 +++++++++++++++++++ .../kali-linux-settings/kali-linux-toc.md | 1 + 2 files changed, 60 insertions(+) create mode 100644 markdown-file/kali-linux-settings/kali-linux-penetration-test.md diff --git a/markdown-file/kali-linux-settings/kali-linux-penetration-test.md b/markdown-file/kali-linux-settings/kali-linux-penetration-test.md new file mode 100644 index 00000000..48bbc328 --- /dev/null +++ b/markdown-file/kali-linux-settings/kali-linux-penetration-test.md @@ -0,0 +1,59 @@ +# 渗透测试思路 + +- 来源:《Kali Linux 渗透测试的艺术》 + +## 范围界定 + +- 收集需求 +- 筹划工作 +- 边界分析 +- 明确业务指标 +- 项目管理和统筹调度 + +## 信息收集 + +- 互联网上的公开信息 +- 域名注册信息(whois) +- DNS 记录分析 +- 路由信息 +- 利用搜索引擎搜索目标历史记录 + +## 目标识别 + +- 识别目标还在线的主机 +- 识别目标在线主机的系统 + +## 服务枚举 + +- 获取目标主机的情况 + - 开放的端口 + - 操作系统 + - 网络服务 + +## 漏洞扫描 + +- 根据种类划分 + - 本地漏洞 + - 远程漏洞 +- 根据类型划分 + - 设计类漏洞 + - 实施类漏洞 + - 运营类漏洞 + +## 漏洞利用 + + +## 社会工程学攻击 + +- 心理学建模 +- 社会关系 + +## 提升权限 + +## 密码攻击 + +## 无线网络渗透测试 + +## 访问维护 + +- 创建后门 diff --git a/markdown-file/kali-linux-settings/kali-linux-toc.md b/markdown-file/kali-linux-settings/kali-linux-toc.md index cc95f0ca..6c3bd1aa 100644 --- a/markdown-file/kali-linux-settings/kali-linux-toc.md +++ b/markdown-file/kali-linux-settings/kali-linux-toc.md @@ -18,6 +18,7 @@ - [Kali Linux 系统安装](kali-linux-install.md) - [Kali Linux 基础设置](kali-linux-basic-settings.md) +- [Kali Linux 渗透测试思路](kali-linux-penetration-test.md) ## 其他资料 From e089a99a91cbdfc5d19f762846987c58421122ab Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 21 Aug 2018 16:54:00 +0800 Subject: [PATCH 088/330] =?UTF-8?q?2018-08-20=20=E8=A1=A5=E5=85=85=20kali?= =?UTF-8?q?=20linux?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../kali-linux-settings/kali-linux-install.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/markdown-file/kali-linux-settings/kali-linux-install.md b/markdown-file/kali-linux-settings/kali-linux-install.md index a8a6cb1e..1ae207a5 100644 --- a/markdown-file/kali-linux-settings/kali-linux-install.md +++ b/markdown-file/kali-linux-settings/kali-linux-install.md @@ -1,5 +1,17 @@ # Kali Linux 安装 + +## 制作 U 盘 + + +- 准备一个 U 盘,下载这个软件:[USBWriter(提取码:5aa2)](https://pan.baidu.com/s/1gg83h9T) +- USBWriter 的使用很简单,如下图即可制作一个 CentOS 系统盘 + +![VMware 下安装](../../images/CentOS-7-Install-a-0.jpg) + + +## 安装过程 + - 最新的几个版本安装过程都是类似的,大家可以参考这个视频教程: - - 其中,安装第一步选择中文,安装完成就会是中文的,不需要像上面这个视频那么麻烦。 From ee5374aee8668546edd499615f63791a0145c557 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 22 Aug 2018 00:16:28 +0800 Subject: [PATCH 089/330] 2018-08-20 --- .../kali-linux-settings/kali-linux-install.md | 12 ++++++- .../kali-linux-penetration-test.md | 2 ++ .../kali-linux-sql-injection.md | 36 +++++++++++++++++++ 3 files changed, 49 insertions(+), 1 deletion(-) create mode 100644 markdown-file/kali-linux-settings/kali-linux-sql-injection.md diff --git a/markdown-file/kali-linux-settings/kali-linux-install.md b/markdown-file/kali-linux-settings/kali-linux-install.md index 1ae207a5..f8fa9f83 100644 --- a/markdown-file/kali-linux-settings/kali-linux-install.md +++ b/markdown-file/kali-linux-settings/kali-linux-install.md @@ -12,7 +12,17 @@ ## 安装过程 -- 最新的几个版本安装过程都是类似的,大家可以参考这个视频教程: +- 这几年的几个版本安装过程都是类似的,大家可以参考这个视频教程: - - 其中,安装第一步选择中文,安装完成就会是中文的,不需要像上面这个视频那么麻烦。 - 安装过程,比较差的机子差不多要 20 ~ 30 分钟。 +- 比较重点的几个步骤推荐: + - `使用整个磁盘` + - `将所有文件放在同一个分区中` + - `分区设定结束并修改写入磁盘` + - `将改动写入磁盘 -- 是` + - `使用网络镜像 -- 是` + +## 软件及系统升级 + +- `apt-get update && apt-get upgrade` \ No newline at end of file diff --git a/markdown-file/kali-linux-settings/kali-linux-penetration-test.md b/markdown-file/kali-linux-settings/kali-linux-penetration-test.md index 48bbc328..5e5c1d0b 100644 --- a/markdown-file/kali-linux-settings/kali-linux-penetration-test.md +++ b/markdown-file/kali-linux-settings/kali-linux-penetration-test.md @@ -17,6 +17,8 @@ - DNS 记录分析 - 路由信息 - 利用搜索引擎搜索目标历史记录 + - Google Hack,根据 Google 的 `inurl` 等高级用法查询一些系统可能存在风险 + - 比如查找链接是:`asp?id=` 的链接。asp 的系统基本都比较老,对于老系统,要嘛废弃,要嘛还是废弃,不然没完没了的。 ## 目标识别 diff --git a/markdown-file/kali-linux-settings/kali-linux-sql-injection.md b/markdown-file/kali-linux-settings/kali-linux-sql-injection.md new file mode 100644 index 00000000..a267754f --- /dev/null +++ b/markdown-file/kali-linux-settings/kali-linux-sql-injection.md @@ -0,0 +1,36 @@ +# SQL 注入 + +## 探测到底是不是通过拼接字符串方式使用 SQL + +- 常用字符: + +``` +' +" +' and 1=1 +' and 1=2 +1 or 1=1 +1' or '1'='1 +1" or "1"="1 +1' order by 1-- +union select 1,2-- +@@version +@@datadir +user() +database() +information_schema.tables + +``` + + +## sqlmap 分析数据库和表名、dump 表数据 + +- 获取数据库和服务器信息:`sqlmap -u 目标网址 --dbs --current-user` +- 获取有几张表:`sqlmap -u 目标网址 --tables` +- 获取指定表的字段有哪些:`sqlmap -u 目标网址 -T 表名 --columns` +- 获取指定表有哪些值:`sqlmap -u 目标网址 -T 表名 -C 字段名1,字段名2,字段名3 --dump` + + +## 分析登录后台入口 + +- nikto From 8d18909646f0660923e5ebccd4ac3e6b15faf4ed Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 22 Aug 2018 16:50:51 +0800 Subject: [PATCH 090/330] 2018-08-22 sqlmap --- .../kali-linux-penetration-test.md | 2 +- .../kali-linux-sql-injection.md | 73 ++++++++++++++++++- 2 files changed, 72 insertions(+), 3 deletions(-) diff --git a/markdown-file/kali-linux-settings/kali-linux-penetration-test.md b/markdown-file/kali-linux-settings/kali-linux-penetration-test.md index 5e5c1d0b..58751c06 100644 --- a/markdown-file/kali-linux-settings/kali-linux-penetration-test.md +++ b/markdown-file/kali-linux-settings/kali-linux-penetration-test.md @@ -18,7 +18,7 @@ - 路由信息 - 利用搜索引擎搜索目标历史记录 - Google Hack,根据 Google 的 `inurl` 等高级用法查询一些系统可能存在风险 - - 比如查找链接是:`asp?id=` 的链接。asp 的系统基本都比较老,对于老系统,要嘛废弃,要嘛还是废弃,不然没完没了的。 + - 比如查找链接是:`asp?id=`、`php?id=1` 的链接。asp 和 php 的系统相对比较薄弱。特别是对于 asp 的老系统,建议开发者还是放弃。 ## 目标识别 diff --git a/markdown-file/kali-linux-settings/kali-linux-sql-injection.md b/markdown-file/kali-linux-settings/kali-linux-sql-injection.md index a267754f..e8d7815a 100644 --- a/markdown-file/kali-linux-settings/kali-linux-sql-injection.md +++ b/markdown-file/kali-linux-settings/kali-linux-sql-injection.md @@ -25,12 +25,81 @@ information_schema.tables ## sqlmap 分析数据库和表名、dump 表数据 -- 获取数据库和服务器信息:`sqlmap -u 目标网址 --dbs --current-user` +#### sqlmap 介绍 + +- 目前做 SQL 注入的工具一般大家都是选择:[sqlmap](http://sqlmap.org/) + - 目前(2018年08月)只支持:2.6.x 和 2.7.x +- 支持的 5 种注入类型: + - 基于布尔的盲注,即可以根据返回页面判断条件真假的注入。 + - 基于时间的盲注,即不能根据页面返回内容判断任何信息,用条件语句查看时间延迟语句是否执行(即页面返回时间是否增加)来判断。 + - 基于报错注入,即页面会返回错误信息,或者把注入的语句的结果直接返回在页面中。 + - 联合查询注入,可以使用union的情况下的注入。 + - 堆查询注入,可以同时执行多条语句的执行时的注入。 + + +#### sqlmap 使用 + +- sqlmap 的输出信息按从简到繁共分为7个级别,依次为 0 ~ 6,级别越高,检测越全面。分别代表: + - 使用参数 `-v <级别>` 来指定某个等级,默认输出级别为 1 + +``` +0:只显示 Python 的 tracebacks 信息、错误信息 [ERROR] 和关键信息 [CRITICAL]; +1:同时显示普通信息 [INFO] 和警告信息[WARNING]; +2:同时显示调试信息[DEBUG]; +3:同时显示注入使用的攻击荷载; +4:同时显示 HTTP 请求; +5:同时显示 HTTP 响应头; +6:同时显示 HTTP 响应体。 +``` + +- 将 Google 搜索前一百个结果作为攻击目标:`sqlmap -g "inurl:\".asp?id=1\""` +- 检查注入点(GET):`sqlmap -u 目标网址` +- 检查注入点(POST 数据,多个数据用分号隔开):`sqlmap -u 目标网址 --data="id=0;name=werner" --param-del=";"` +- 检查注入点(Cookie,等级必须是 2 以上):`sqlmap -u 目标网址 --cookie –level 2 "JSESSIONID=123456;NAME=youmeek;" --cookie-del=";"` +- 获取所有数据库信息:`sqlmap -u 目标网址 --dbs` +- 获取所有数据库用户:`sqlmap -u 目标网址 --users` +- 获取当前数据库信息:`sqlmap -u 目标网址 --current-db` +- 获取当前用户:`sqlmap -u 目标网址 --current-user` +- 获取当前数据库和当前用户:`sqlmap -u 目标网址 --current-db --current-user` - 获取有几张表:`sqlmap -u 目标网址 --tables` - 获取指定表的字段有哪些:`sqlmap -u 目标网址 -T 表名 --columns` -- 获取指定表有哪些值:`sqlmap -u 目标网址 -T 表名 -C 字段名1,字段名2,字段名3 --dump` +- 获取指定表字段值:`sqlmap -u 目标网址 -T 表名 -C 字段名1,字段名2,字段名3 --dump` +- 获取指定表字段所有值:`sqlmap -u 目标网址 -T 表名 -C 字段名1,字段名2,字段名3 --dump-all` +- 让 HTTP 请求之间添加延迟,添加参数:`--delay 3`,单位是秒 +- 设置超时时间,默认是 30 秒,添加参数:`--timeout 50`,单位是秒 +- 设置超时后最大重试次数,默认是 3 次,添加参数:`--retries 5` +- 避免错误请求过多而被屏蔽: +``` +有时服务器检测到某个客户端错误请求过多会对其进行屏蔽,而 sqlmap 的测试往往会产生大量错误请求,为避免被屏蔽,可以时不时的产生几个正常请求以迷惑服务器。有以下四个参数与这一机制有关: + +--safe-url: 隔一会就访问一下的安全 URL +--safe-post: 访问安全 URL 时携带的 POST 数据 +--safe-req: 从文件中载入安全 HTTP 请求 +--safe-freq: 每次测试请求之后都会访问一下的安全 URL + +这里所谓的安全 URL 是指访问会返回 200、没有任何报错的 URL。相应地,Sqlmap 也不会对安全 URL 进行任何注入测试。 +``` + +- 其他常用参数: + - 构造随机 user-agent:`–random-agent` + - 指定 HTTP Referer头:`–referer=设定值` + - 换行分开,加入其他的HTTP头:`–headers=设定值` + - 忽略响应的 Set–Cookie 头信息:`–drop-set-cookie` ## 分析登录后台入口 - nikto + +## 资料 + +- +- +- <> +- <> +- <> +- <> +- <> +- <> +- <> +- <> \ No newline at end of file From 7ccfee04fe764f60565a2cca87d456ac42a35318 Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 23 Aug 2018 09:44:57 +0800 Subject: [PATCH 091/330] 2018-08-23 redis info --- markdown-file/Redis-Install-And-Settings.md | 50 +++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/markdown-file/Redis-Install-And-Settings.md b/markdown-file/Redis-Install-And-Settings.md index fcf54dc3..e99fddfa 100644 --- a/markdown-file/Redis-Install-And-Settings.md +++ b/markdown-file/Redis-Install-And-Settings.md @@ -609,6 +609,56 @@ esac - 配置文件也跟原版本不一样,叫做:`redis.windows.conf` +## Redis Info + +- 客户端下命令行:`info` + - 参考: + +``` +server 部分记录了 Redis 服务器的信息,它包含以下域: + +redis_version : Redis 服务器版本 +redis_git_sha1 : Git SHA1 +redis_git_dirty : Git dirty flag +os : Redis 服务器的宿主操作系统 +arch_bits : 架构(32 或 64 位) +multiplexing_api : Redis 所使用的事件处理机制 +gcc_version : 编译 Redis 时所使用的 GCC 版本 +process_id : 服务器进程的 PID +run_id : Redis 服务器的随机标识符(用于 Sentinel 和集群) +tcp_port : TCP/IP 监听端口 +uptime_in_seconds : 自 Redis 服务器启动以来,经过的秒数 +uptime_in_days : 自 Redis 服务器启动以来,经过的天数 +lru_clock : 以分钟为单位进行自增的时钟,用于 LRU 管理 +clients 部分记录了已连接客户端的信息,它包含以下域: + +connected_clients : 已连接客户端的数量(不包括通过从属服务器连接的客户端) +client_longest_output_list : 当前连接的客户端当中,最长的输出列表 +client_longest_input_buf : 当前连接的客户端当中,最大输入缓存 +blocked_clients : 正在等待阻塞命令(BLPOP、BRPOP、BRPOPLPUSH)的客户端的数量 +memory 部分记录了服务器的内存信息,它包含以下域: + +used_memory : 由 Redis 分配器分配的内存总量,以字节(byte)为单位 +used_memory_human : 以人类可读的格式返回 Redis 分配的内存总量 +used_memory_rss : 从操作系统的角度,返回 Redis 已分配的内存总量(俗称常驻集大小)。这个值和 top 、 ps 等命令的输出一致。 +used_memory_peak : Redis 的内存消耗峰值(以字节为单位) +used_memory_peak_human : 以人类可读的格式返回 Redis 的内存消耗峰值 +used_memory_lua : Lua 引擎所使用的内存大小(以字节为单位) +mem_fragmentation_ratio : used_memory_rss 和 used_memory 之间的比率 +mem_allocator : 在编译时指定的, Redis 所使用的内存分配器。可以是 libc 、 jemalloc 或者 tcmalloc 。 +``` + +- 常关注信息: + +``` +used_memory_rss_human:系统给redis分配的内存(即常驻内存) +used_memory_peak_human : Redis 的内存消耗峰值 +used_memory_lua_human : 系统内存大小 +expired_keys : 过期的的键数量 +evicted_keys : 因为最大内存容量限制而被驱逐(evict)的键数量 +used_cpu_sys_children : Redis 后台进程在 内核态 消耗的 CPU +used_cpu_user_children : Redis 后台进程在 用户态 消耗的 CPU +``` ## 资料 From bed07d363eedaffbcf69a711f0a69673d9e2215d Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 24 Aug 2018 10:30:07 +0800 Subject: [PATCH 092/330] 2018-08-24 --- .../kali-linux-settings/kali-linux-basic-settings.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/markdown-file/kali-linux-settings/kali-linux-basic-settings.md b/markdown-file/kali-linux-settings/kali-linux-basic-settings.md index 41a52ff5..d3dc1404 100644 --- a/markdown-file/kali-linux-settings/kali-linux-basic-settings.md +++ b/markdown-file/kali-linux-settings/kali-linux-basic-settings.md @@ -47,7 +47,13 @@ curl https://raw.githubusercontent.com/wklken/vim-for-server/master/vimrc > ~/.v - 下载 deb 文件: - 安装 deb 文件:`dpkg -i 文件名` - 应该会提示有部分依赖不存在,则执行:`apt-get --fix-broken install -y` -- 安装 Markdown 扩展,中文语言包 +- 安装扩展 + - 扩展仓库: + - 中文语言包 + - Markdown + - GitLens + - REST Client + - Atom One Dark Theme ## 安装剪切板 From e14c184f3edd6a65f3a31be8e80ad1039d066a61 Mon Sep 17 00:00:00 2001 From: "Judas.n" Date: Fri, 24 Aug 2018 12:53:12 +0800 Subject: [PATCH 093/330] Update MongoDB-Install-And-Settings.md --- markdown-file/MongoDB-Install-And-Settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/markdown-file/MongoDB-Install-And-Settings.md b/markdown-file/MongoDB-Install-And-Settings.md index 8c4873cd..4dba5528 100644 --- a/markdown-file/MongoDB-Install-And-Settings.md +++ b/markdown-file/MongoDB-Install-And-Settings.md @@ -48,7 +48,7 @@ db.createUser( - 然后删除容器:`docker rm cloud-mongo` - 重新运行镜像,这次增加需要授权才能访问的配置:`docker run -d -p 27017:27017 -v /data/docker/mongo/db:/data/db --restart always --name cloud-mongo mongo:3.4 --auth` - 重新启动服务:`docker restart cloud-mongo` - +- 导入:`docker exec -it cloud-mongo mongoimport -h 127.0.0.1 -u 用户名 -p 密码 -d 库名 -c 集合名 --file /data/db/mongodb20180824.json --type json` ## 安装环境 From 75680526300c777212fc0008f26c08eb3974faa6 Mon Sep 17 00:00:00 2001 From: "Judas.n" Date: Fri, 24 Aug 2018 12:54:08 +0800 Subject: [PATCH 094/330] Update MongoDB-Install-And-Settings.md --- markdown-file/MongoDB-Install-And-Settings.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/markdown-file/MongoDB-Install-And-Settings.md b/markdown-file/MongoDB-Install-And-Settings.md index 4dba5528..71cddd81 100644 --- a/markdown-file/MongoDB-Install-And-Settings.md +++ b/markdown-file/MongoDB-Install-And-Settings.md @@ -48,7 +48,8 @@ db.createUser( - 然后删除容器:`docker rm cloud-mongo` - 重新运行镜像,这次增加需要授权才能访问的配置:`docker run -d -p 27017:27017 -v /data/docker/mongo/db:/data/db --restart always --name cloud-mongo mongo:3.4 --auth` - 重新启动服务:`docker restart cloud-mongo` -- 导入:`docker exec -it cloud-mongo mongoimport -h 127.0.0.1 -u 用户名 -p 密码 -d 库名 -c 集合名 --file /data/db/mongodb20180824.json --type json` +- 导出:`docker exec -it cloud-mongo mongoexport -h 127.0.0.1 -u 用户名 -p 密码 -d 库名 -c 集合名 -o /data/db/mongodb.json --type json` +- 导入:`docker exec -it cloud-mongo mongoimport -h 127.0.0.1 -u 用户名 -p 密码 -d 库名 -c 集合名 --file /data/db/mongodb.json --type json` ## 安装环境 From 912fa281c99ae6b67bdd0e33ae6f073472322763 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 24 Aug 2018 22:55:52 +0800 Subject: [PATCH 095/330] 2018-08-20 --- .../kali-linux-social-engineering.md | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 markdown-file/kali-linux-settings/kali-linux-social-engineering.md diff --git a/markdown-file/kali-linux-settings/kali-linux-social-engineering.md b/markdown-file/kali-linux-settings/kali-linux-social-engineering.md new file mode 100644 index 00000000..7432bd85 --- /dev/null +++ b/markdown-file/kali-linux-settings/kali-linux-social-engineering.md @@ -0,0 +1,23 @@ +# 社会工程学 + +## 简介 + + + +## SET 工具 + +- 命令:`setoolkit`(命令行向导式交互) +- 选择 1:`Social-Engineering Attacks`,结果如下: + +``` +1) Spear-Phishing Attack Vectors #鱼叉式钓鱼攻击向量(也称针对性钓鱼攻击) +2) Website Attack Vectors #网页攻击向量 +3) Infectious Media Generator #媒介感染生成器 +4) Create a Payload and Listener #生成一个payload和监听 +5) Mass Mailer Attack #大规模邮件钓鱼 +6) Arduino-Based Attack Vector #基于Arduino的攻击(类似于单片机) +7) Wireless Access Point Attack Vector #无线接入点攻击 +8) QRCode Generator Attack Vector #二维码攻击 +9) Powershell Attack Vectors #powershell攻击 +10) Third Party Modules #第三方模块 +``` From 942e580d944ea673df75c567e5aae5a9db0b4d01 Mon Sep 17 00:00:00 2001 From: "Judas.n" Date: Fri, 31 Aug 2018 16:42:37 +0800 Subject: [PATCH 096/330] Update Jenkins-Install-And-Settings.md --- markdown-file/Jenkins-Install-And-Settings.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index 6da80a5d..0f2205d4 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -9,6 +9,7 @@ ## Docker 下安装 Jenkins +- 配置:至少需要 2G 内存 - 先禁用 selinux - 编辑配置文件:`vim /etc/selinux/config` - 把 `SELINUX=enforcing` 改为 `SELINUX=disabled` @@ -138,4 +139,4 @@ Can not write to /var/jenkins_home/copy_reference_file.log. Wrong volume permiss - - -- \ No newline at end of file +- From 1d74b70443496146ad2d5e56e4c3ced213425d29 Mon Sep 17 00:00:00 2001 From: "Judas.n" Date: Fri, 31 Aug 2018 16:43:46 +0800 Subject: [PATCH 097/330] Update README.md --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index d93ea998..94965ade 100644 --- a/README.md +++ b/README.md @@ -65,7 +65,6 @@ - [MongoDB 安装和配置](markdown-file/MongoDB-Install-And-Settings.md) - [Solr 安装和配置](markdown-file/Solr-Install-And-Settings.md) - [Jira 安装和配置](markdown-file/Jira-Install-And-Settings.md) -- [Jenkins 安装和配置](markdown-file/Jenkins-Install-And-Settings.md) - [TeamCity 安装和配置](markdown-file/TeamCity-Install-And-Settings.md) - [Nginx 安装和配置](markdown-file/Nginx-Install-And-Settings.md) - [FastDFS 安装和配置](markdown-file/FastDFS-Install-And-Settings.md) From f068e5ccdf71fbc91bcc19db4d26706b3f5724f6 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 31 Aug 2018 16:47:19 +0800 Subject: [PATCH 098/330] 2018-08-31 --- markdown-file/Daemontools.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/markdown-file/Daemontools.md b/markdown-file/Daemontools.md index 97cc341f..c6644ee6 100644 --- a/markdown-file/Daemontools.md +++ b/markdown-file/Daemontools.md @@ -49,8 +49,7 @@ python setup.py install ``` -- 生成配置文件: - - `echo_supervisord_conf > /etc/supervisord.conf` +- 生成配置文件:`echo_supervisord_conf > /etc/supervisord.conf` - 创建专门的程序配置文件目录、日志目录: - `mkdir -p /var/log/supervisor` - `mkdir -p /etc/supervisor/conf.d/` From 51e3caadd9bacee5f758bdbe9877fda7e270e3aa Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 31 Aug 2018 16:51:22 +0800 Subject: [PATCH 099/330] 2018-08-31 --- markdown-file/Jenkins-Install-And-Settings.md | 1 - 1 file changed, 1 deletion(-) diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index 0f2205d4..8fa67e08 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -89,7 +89,6 @@ Can not write to /var/jenkins_home/copy_reference_file.log. Wrong volume permiss - Pre SCM BuildStep Plugin 在拉代码之前插入一些步骤 - GitHub Pull Request Builder Github Pull Request时自动构建 - GitHub API Plugin Github API插件 - - SonarQube Scanner for Jenkins :代码质量管理插件。 - AnsiColor(可选):这个插件可以让Jenkins的控制台输出的log带有颜色 - NodeJS Plugin From c3da6df1ada3c9815288469f5a2d83155bc00681 Mon Sep 17 00:00:00 2001 From: judasn Date: Sat, 15 Sep 2018 16:38:38 +0800 Subject: [PATCH 100/330] 2018-09-15 --- markdown-file/monitor.md | 79 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 72 insertions(+), 7 deletions(-) diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index 573d38cd..b384a658 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -596,17 +596,39 @@ tcp6 0 0 :::43107 :::* LISTEN - 查看当前连接80端口的机子有多少:`netstat -an|grep 80|sort -r` - 查看已经连接的IP有多少连接数:`netstat -ntu | awk '{print $5}' | cut -d: -f1 | sort | uniq -c | sort -n` - 查看已经连接的IP有多少连接数,只显示前 5 个:`netstat -ntu | awk '{print $5}' | cut -d: -f1 | sort | uniq -c | sort -n | head -5` -- 统计当前连接的一些状态情况:`netstat -nat |awk '{print $6}'|sort|uniq -c|sort -rn` +- 查看每个 ip 跟服务器建立的连接数:`netstat -nat|awk '{print$5}'|awk -F : '{print$1}'|sort|uniq -c|sort -rn` ``` -8 TIME_WAIT -8 ESTABLISHED -7 LISTEN -1 Foreign -1 established) -1 CLOSE_WAIT +262 127.0.0.1 +118 +103 172.22.100.141 + 12 172.22.100.29 + 7 172.22.100.183 + 6 116.21.17.144 + 6 0.0.0.0 + 5 192.168.1.109 + 4 172.22.100.32 + 4 172.22.100.121 + 4 172.22.100.108 + 4 172.18.1.39 + 3 172.22.100.2 + 3 172.22.100.190 ``` + +- 统计当前连接的一些状态情况:`netstat -n | awk '/^tcp/ {++S[$NF]} END {for(a in S) print a, S[a]}'` 或者 `netstat -nat |awk '{print $6}'|sort|uniq -c|sort -rn` + +``` +TIME_WAIT 96(是表示系统在等待客户端响应,以便再次连接时候能快速响应,如果积压很多,要开始注意了,准备阻塞了。这篇文章可以看下:http://blog.51cto.com/jschu/1728001) +CLOSE_WAIT 11 +FIN_WAIT2 17 +ESTABLISHED 102(表示正常数据传输状态) +``` + +- Linux 系统下,TCP连接断开后,会以TIME_WAIT状态保留一定的时间,然后才会释放端口。当并发请求过多的时候,就会产生大量的TIME_WAIT状态 的连接,无法及时断开的话,会占用大量的端口资源和服务器资源。这个时候我们可以优化TCP的内核参数,来及时将TIME_WAIT状态的端口清理掉。[来源](http://zhangbin.junxilinux.com/?p=219) + + + - 查看网络接口接受、发送的数据包情况(每隔 3 秒统计一次):`netstat -i 3` @@ -706,9 +728,52 @@ Address: 180.97.33.107 [1880957.563408] Killed process 18694 (perl) total-vm:1972392kB, anon-rss:1953348kB, file-rss:0kB ``` +## 查看系统日志 + +- 查看系统日志:`tail -400f /var/log/messages` +- 可能会看到类似以下异常: + +``` +Out of memory: Kill process 19452 (java) score 264 or sacrifice child +``` + --------------------------------------------------------------------- +## 服务器故障排查顺序 + +#### 负载高,访问慢(没有数据库) + +- 系统层面 + - 查看负载、CPU 和内存使用、服务器上线时间:`htop` + - 查看系统日志:`tail -400f /var/log/messages` + - 查看简化线程树:`pstree -a >> /opt/pstree-20180915.txt` + - ping(多个地区 ping),看下解析 IP 与网络丢包 + - nslookup 命令查看 DNS 是否可用 + - 查看 TCP 和 UDP 应用 + - `netstat -ntlp` + - `netstat -nulp` + - 统计当前连接的一些状态情况:`netstat -nat |awk '{print $6}'|sort|uniq -c|sort -rn` + - 查看每个 ip 跟服务器建立的连接数:`netstat -nat|awk '{print$5}'|awk -F : '{print$1}'|sort|uniq -c|sort -rn` + - 看下谁在线:`w`,`last` + - 看下执行了哪些命令:`history` +- JVM 层面 + - 使用 `ps -ef | grep java`,查看 PID + - 使用 `jstat -gc PID 250 20`,查看gc情况,一般比较关注PERM区的情况,查看GC的增长情况。 + - 使用 `jstat -gccause`:额外输出上次GC原因 + - 使用 `jmap -dump:format=b,file=/opt/myHeapDumpFileName PID`,生成堆转储文件 + - 使用 jhat 或者可视化工具(Eclipse Memory Analyzer 、IBM HeapAnalyzer)分析堆情况。 + - 结合代码解决内存溢出或泄露问题。 + + +#### 访问不了 + +- ping(多个地区 ping),看下解析 IP 与网络丢包 +- nslookup 命令查看 DNS 是否可用 +- telnet 端口:`telnet 192.1.1.1 80` + + + ## 参考资料 - From 37b087a725a7cd0ea6408bb01e768ed2414adf16 Mon Sep 17 00:00:00 2001 From: judasn Date: Sat, 15 Sep 2018 22:13:05 +0800 Subject: [PATCH 101/330] 2018-09-15 --- markdown-file/monitor.md | 47 ++++++++++++++++++++++++++++++++++------ 1 file changed, 40 insertions(+), 7 deletions(-) diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index b384a658..e0a1d7a0 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -460,13 +460,17 @@ vda 0.00 0.00 0.00 1.68 0.00 16.16 19.20 0 - `%util`: 采用周期内用于IO操作的时间比率,即IO队列非空的时间比率(就是繁忙程度,值越高表示越繁忙) - **总结** - `iowait%` 表示CPU等待IO时间占整个CPU周期的百分比,如果iowait值超过50%,或者明显大于%system、%user以及%idle,表示IO可能存在问题。 - - `%util` 表示磁盘忙碌情况,一般该值超过80%表示该磁盘可能处于繁忙状态 + - `%util` (重点参数)表示磁盘忙碌情况,一般该值超过80%表示该磁盘可能处于繁忙状态 #### 硬盘 IO 监控 - 安装 iotop:`yum install -y iotop` -- 查看命令:`iotop` +- 查看所有进程 I/O 情况命令:`iotop` +- 只查看当前正在处理 I/O 的进程:`iotop -o` +- 只查看当前正在处理 I/O 的线程,每隔 5 秒刷新一次:`iotop -o -d 5` +- 只查看当前正在处理 I/O 的进程(-P 参数决定),每隔 5 秒刷新一次:`iotop -o -P -d 5` +- 只查看当前正在处理 I/O 的进程(-P 参数决定),每隔 5 秒刷新一次,使用 KB/s 单位(默认是 B/s):`iotop -o -P -k -d 5` - 使用 dd 命令测量服务器延迟:`dd if=/dev/zero of=/opt/ioTest2.txt bs=512 count=1000 oflag=dsync` - 使用 dd 命令来测量服务器的吞吐率(写速度):`dd if=/dev/zero of=/opt/ioTest1.txt bs=1G count=1 oflag=dsync` - 该命令创建了一个 10M 大小的文件 ioTest1.txt,其中参数解释: @@ -543,10 +547,27 @@ kB_ccwr/s:任务取消的写入磁盘的 KB。当任务截断脏的 pagecache - 如果没有 EPEL 源:`yum install -y epel-release` - 常用命令: - `iftop`:默认是监控第一块网卡的流量 - - `iftop -i eth1`:监控eth1 + - `iftop -i eth0`:监控 eth0 - `iftop -n`:直接显示IP, 不进行DNS反解析 - `iftop -N`:直接显示连接埠编号, 不显示服务名称 - `iftop -F 192.168.1.0/24 or 192.168.1.0/255.255.255.0`:显示某个网段进出封包流量 + - `iftop -nP`:显示端口与 IP 信息 + +``` nginx +中间部分:外部连接列表,即记录了哪些ip正在和本机的网络连接 + +右边部分:实时参数分别是该访问 ip 连接到本机 2 秒,10 秒和 40 秒的平均流量 + +=> 代表发送数据,<= 代表接收数据 + +底部会显示一些全局的统计数据,peek 是指峰值情况,cumm 是从 iftop 运行至今的累计情况,而 rates 表示最近 2 秒、10 秒、40 秒内总共接收或者发送的平均网络流量。 + +TX:(发送流量) cumm: 143MB peak: 10.5Mb rates: 1.03Mb 1.54Mb 2.10Mb +RX:(接收流量) 12.7GB 228Mb 189Mb 191Mb 183Mb +TOTAL:(总的流量) 12.9GB 229Mb 190Mb 193Mb 185MbW + +``` + ### 端口使用情况 @@ -742,13 +763,17 @@ Out of memory: Kill process 19452 (java) score 264 or sacrifice child ## 服务器故障排查顺序 -#### 负载高,访问慢(没有数据库) +#### CPU 负载高,访问慢(没有数据库) - 系统层面 - - 查看负载、CPU 和内存使用、服务器上线时间:`htop` + - 查看负载、CPU、内存、上线时间、高资源进程 PID:`htop` + - 查看磁盘使用情况:`df -h` + - 查看磁盘当前情况:`iostat -x -k 3 3`。如果发现当前磁盘忙碌,则查看是哪个 PID 在忙碌:`iotop -o -P -k -d 5` + - 查看 PID 具体在写什么东西:`lsof -p PID` - 查看系统日志:`tail -400f /var/log/messages` - 查看简化线程树:`pstree -a >> /opt/pstree-20180915.txt` - - ping(多个地区 ping),看下解析 IP 与网络丢包 + - 其他机子 ping(多个地区 ping),看下解析 IP 与网络丢包 + - `ifconfig` 查看 dropped 和 error 是否在不断增加,判断网卡是否出现问题 - nslookup 命令查看 DNS 是否可用 - 查看 TCP 和 UDP 应用 - `netstat -ntlp` @@ -757,7 +782,8 @@ Out of memory: Kill process 19452 (java) score 264 or sacrifice child - 查看每个 ip 跟服务器建立的连接数:`netstat -nat|awk '{print$5}'|awk -F : '{print$1}'|sort|uniq -c|sort -rn` - 看下谁在线:`w`,`last` - 看下执行了哪些命令:`history` -- JVM 层面 +- 程序、JVM 层面 + - 查看程序 log - 使用 `ps -ef | grep java`,查看 PID - 使用 `jstat -gc PID 250 20`,查看gc情况,一般比较关注PERM区的情况,查看GC的增长情况。 - 使用 `jstat -gccause`:额外输出上次GC原因 @@ -765,6 +791,13 @@ Out of memory: Kill process 19452 (java) score 264 or sacrifice child - 使用 jhat 或者可视化工具(Eclipse Memory Analyzer 、IBM HeapAnalyzer)分析堆情况。 - 结合代码解决内存溢出或泄露问题。 +#### CPU 负载高,访问慢(带数据库) + +- 基于上面 +- mysql 下查看当前的连接数与执行的sql 语句:`show full processlist;` +- 检查慢查询日志,可能是慢查询引起负载高,根据配置文件查看存放位置:`log_slow_queries` +- 查看 MySQL 设置的最大连接数:`show variables like 'max_connections';` + - 重新设置最大连接数:`set GLOBAL max_connections=300` #### 访问不了 From d09bb93a18ab1ce69f77481d55843c8864451e87 Mon Sep 17 00:00:00 2001 From: judasn Date: Sat, 15 Sep 2018 23:14:12 +0800 Subject: [PATCH 102/330] 2018-09-15 --- markdown-file/Java-bin.md | 5 ++++- markdown-file/monitor.md | 14 +++++++++----- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/markdown-file/Java-bin.md b/markdown-file/Java-bin.md index fdd3354b..71766508 100644 --- a/markdown-file/Java-bin.md +++ b/markdown-file/Java-bin.md @@ -202,7 +202,10 @@ tenured generation: - jstack命令主要用来查看Java线程的调用堆栈的,可以用来分析线程问题(如死锁) - jstack用于生成java虚拟机当前时刻的线程快照。 -- 线程快照是当前java虚拟机内每一条线程正在执行的方法堆栈的集合,生成线程快照的主要目的是定位线程出现长时间停顿的原因,如线程间死锁、死循环、请求外部资源导致的长时间等待等。 线程出现停顿的时候通过jstack来查看各个线程的调用堆栈,就可以知道没有响应的线程到底在后台做什么事情,或者等待什么资源。 如果java程序崩溃生成core文件,jstack工具可以用来获得core文件的java stack和native stack的信息,从而可以轻松地知道java程序是如何崩溃和在程序何处发生问题。另外,jstack工具还可以附属到正在运行的java程序中,看到当时运行的java程序的java stack和native stack的信息, 如果现在运行的java程序呈现hung的状态,jstack是非常有用的。 +- 线程快照是当前java虚拟机内每一条线程正在执行的方法堆栈的集合,生成线程快照的主要目的是定位线程出现长时间停顿的原因,如线程间死锁、死循环、请求外部资源导致的长时间等待等。 + - 线程出现停顿的时候通过jstack来查看各个线程的调用堆栈,就可以知道没有响应的线程到底在后台做什么事情,或者等待什么资源。 + - 如果java程序崩溃生成core文件,jstack工具可以用来获得core文件的java stack和native stack的信息,从而可以轻松地知道java程序是如何崩溃和在程序何处发生问题。 + - 另外,jstack工具还可以附属到正在运行的java程序中,看到当时运行的java程序的java stack和native stack的信息, 如果现在运行的java程序呈现hung的状态,jstack是非常有用的。 - `jstack 12011`,查看线程情况 - `jstack -l 12011`,除堆栈外,显示关于锁的附件信息 - 下面 demo 内容太多,所以选取其中一部分 diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index e0a1d7a0..276db65b 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -763,7 +763,7 @@ Out of memory: Kill process 19452 (java) score 264 or sacrifice child ## 服务器故障排查顺序 -#### CPU 负载高,访问慢(没有数据库) +#### CPU 高,负载高,访问慢(没有数据库) - 系统层面 - 查看负载、CPU、内存、上线时间、高资源进程 PID:`htop` @@ -771,8 +771,9 @@ Out of memory: Kill process 19452 (java) score 264 or sacrifice child - 查看磁盘当前情况:`iostat -x -k 3 3`。如果发现当前磁盘忙碌,则查看是哪个 PID 在忙碌:`iotop -o -P -k -d 5` - 查看 PID 具体在写什么东西:`lsof -p PID` - 查看系统日志:`tail -400f /var/log/messages` - - 查看简化线程树:`pstree -a >> /opt/pstree-20180915.txt` + - 查看简化线程树:`pstree -a >> /opt/pstree-20180915.log` - 其他机子 ping(多个地区 ping),看下解析 IP 与网络丢包 + - 查看网络节点情况:`traceroute www.youmeek.com` - `ifconfig` 查看 dropped 和 error 是否在不断增加,判断网卡是否出现问题 - nslookup 命令查看 DNS 是否可用 - 查看 TCP 和 UDP 应用 @@ -780,20 +781,23 @@ Out of memory: Kill process 19452 (java) score 264 or sacrifice child - `netstat -nulp` - 统计当前连接的一些状态情况:`netstat -nat |awk '{print $6}'|sort|uniq -c|sort -rn` - 查看每个 ip 跟服务器建立的连接数:`netstat -nat|awk '{print$5}'|awk -F : '{print$1}'|sort|uniq -c|sort -rn` + - 跟踪程序:`strace -tt -T -v -f -e trace=file -o /opt/strace-20180915.log -s 1024 -p PID` - 看下谁在线:`w`,`last` - 看下执行了哪些命令:`history` - 程序、JVM 层面 - - 查看程序 log + - 查看 Nginx 程序 log + - 查看 JAVA 程序 log - 使用 `ps -ef | grep java`,查看 PID + - 查看堆栈情况:`jstack -l PID >> /opt/jstack-20180915.log` - 使用 `jstat -gc PID 250 20`,查看gc情况,一般比较关注PERM区的情况,查看GC的增长情况。 - 使用 `jstat -gccause`:额外输出上次GC原因 - 使用 `jmap -dump:format=b,file=/opt/myHeapDumpFileName PID`,生成堆转储文件 - 使用 jhat 或者可视化工具(Eclipse Memory Analyzer 、IBM HeapAnalyzer)分析堆情况。 - 结合代码解决内存溢出或泄露问题。 -#### CPU 负载高,访问慢(带数据库) +#### CPU 低,负载高,访问慢(带数据库) -- 基于上面 +- 基于上面,但是侧重点在于 I/O 读写,以及是否有 MySQL 死锁,或者挂载了 NFS,而 NFS Server 出现问题 - mysql 下查看当前的连接数与执行的sql 语句:`show full processlist;` - 检查慢查询日志,可能是慢查询引起负载高,根据配置文件查看存放位置:`log_slow_queries` - 查看 MySQL 设置的最大连接数:`show variables like 'max_connections';` From b2ad27b6d8f7a8858918ece17eeeb9cd10ebf6a0 Mon Sep 17 00:00:00 2001 From: judasn Date: Sun, 16 Sep 2018 23:20:58 +0800 Subject: [PATCH 103/330] 2018-09-16 --- markdown-file/Tomcat-Install-And-Settings.md | 77 +++++++++++++++++--- markdown-file/monitor.md | 1 + 2 files changed, 67 insertions(+), 11 deletions(-) diff --git a/markdown-file/Tomcat-Install-And-Settings.md b/markdown-file/Tomcat-Install-And-Settings.md index 6176b3e5..f7419e49 100644 --- a/markdown-file/Tomcat-Install-And-Settings.md +++ b/markdown-file/Tomcat-Install-And-Settings.md @@ -110,16 +110,15 @@ port="8080" protocol="org.apache.coyote.http11.Http11Nio2Protocol" connectionTimeout="20000" - maxConnections="10000" + maxConnections="1000" redirectPort="8443" enableLookups="false" - acceptCount="100" + acceptCount="1000" maxPostSize="10485760" maxHttpHeaderSize="8192" compression="on" disableUploadTimeout="true" compressionMinSize="2048" - acceptorThreadCount="2" compressableMimeType="text/html,text/xml,text/plain,text/css,text/javascript,application/javascript" URIEncoding="utf-8" /> @@ -145,7 +144,7 @@ - 模型资料来源: - 配比资料: - JDK8 配比:[关键系统的JVM参数推荐(2018仲夏版)](https://mp.weixin.qq.com/s/FHY0MelBfmgdRpT4zWF9dQ) -- JDK8 常用配比总结 8G 内存:`CATALINA_OPTS="-Dfile.encoding=UTF-8 -Xms4g -Xmx4g -XX:NewRatio=1 -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=512m"` +- JDK8 常用配比总结 8G 内存:`CATALINA_OPTS="-Dfile.encoding=UTF-8 -Xms4g -Xmx4g"` - Java 的内存模型分为: - Young,年轻代(易被 GC)。Young 区被划分为三部分,Eden 区和两个大小严格相同的 Survivor 区,其中 Survivor 区间中,某一时刻只有其中一个是被使用的,另外一个留做垃圾收集时复制对象用,在 Young 区间变满的时候,minor GC 就会将存活的对象移到空闲的Survivor 区间中,根据 JVM 的策略,在经过几次垃圾收集后,任然存活于 Survivor 的对象将被移动到 Tenured 区间。 - Tenured,终身代。Tenured 区主要保存生命周期长的对象,一般是一些老的对象,当一些对象在 Young 复制转移一定的次数以后,对象就会被转移到 Tenured 区,一般如果系统中用了 application 级别的缓存,缓存中的对象往往会被转移到这一区间。 @@ -153,17 +152,17 @@ - Linux 修改 /usr/program/tomcat7/bin/catalina.sh 文件,把下面信息添加到文件第一行。 - 如果服务器只运行一个 Tomcat - 机子内存如果是 4G: - - `CATALINA_OPTS="-Dfile.encoding=UTF-8 -server -Xms2048m -Xmx2048m -Xmn1024m -XX:PermSize=256m -XX:MaxPermSize=512m -XX:SurvivorRatio=10 -XX:MaxTenuringThreshold=15 -XX:NewRatio=2 -XX:+DisableExplicitGC"` + - `CATALINA_OPTS="-Dfile.encoding=UTF-8 -server -Xms2g -Xmx2g"` - 机子内存如果是 8G: - - `CATALINA_OPTS="-Dfile.encoding=UTF-8 -server -Xms4096m -Xmx4096m -Xmn2048m -XX:PermSize=256m -XX:MaxPermSize=512m -XX:SurvivorRatio=10 -XX:MaxTenuringThreshold=15 -XX:NewRatio=2 -XX:+DisableExplicitGC"` + - `CATALINA_OPTS="-Dfile.encoding=UTF-8 -server -Xms4g -Xmx4g"` - 机子内存如果是 16G: - - `CATALINA_OPTS="-Dfile.encoding=UTF-8 -server -Xms8192m -Xmx8192m -Xmn4096m -XX:PermSize=256m -XX:MaxPermSize=512m -XX:SurvivorRatio=10 -XX:MaxTenuringThreshold=15 -XX:NewRatio=2 -XX:+DisableExplicitGC"` + - `CATALINA_OPTS="-Dfile.encoding=UTF-8 -server -Xms8g -Xmx8g"` - 机子内存如果是 32G: - - `CATALINA_OPTS="-Dfile.encoding=UTF-8 -server -Xms16384m -Xmx16384m -Xmn8192m -XX:PermSize=256m -XX:MaxPermSize=512m -XX:SurvivorRatio=10 -XX:MaxTenuringThreshold=15 -XX:NewRatio=2 -XX:+DisableExplicitGC"` + - `CATALINA_OPTS="-Dfile.encoding=UTF-8 -server -Xms16g -Xmx16g"` - 如果是 8G 开发机 - - `-Xms2048m -Xmx2048m -XX:NewSize=512m -XX:MaxNewSize=1024m -XX:PermSize=256m -XX:MaxPermSize=512m` + - `-Xms2g -Xmx2g` - 如果是 16G 开发机 - - `-Xms4096m -Xmx4096m -XX:NewSize=1024m -XX:MaxNewSize=2048m -XX:PermSize=256m -XX:MaxPermSize=512m` + - `-Xms4g -Xmx4g` - 参数说明: ``` nginx -Dfile.encoding:默认文件编码 @@ -182,9 +181,65 @@ - Windows 修改 /tomcat7/bin/catalina.bat 文件,找到这一行:`echo Using CATALINA_BASE: "%CATALINA_BASE%"`,然后在其上面添加如下内容,此方法只对解压版的 Tomcat 有效果,对于安装版本的需要点击安装后任务栏上的那个 Tomcat 图标,打开配置中有一个 `Java` Tab 的进行编辑。 ``` nginx set JAVA_OPTS=%JAVA_OPTS% -Dfile.encoding="UTF-8" -Dsun.jnu.encoding="UTF8" -Ddefault.client.encoding="UTF-8" -Duser.language=Zh -set JAVA_OPTS=%JAVA_OPTS% -server -Xms4096m -Xmx4096m -Xmn2048m -XX:PermSize=256m -XX:MaxPermSize=512m -XX:SurvivorRatio=10 -XX:MaxTenuringThreshold=15 -XX:NewRatio=2 -XX:+DisableExplicitGC +set JAVA_OPTS=%JAVA_OPTS% -server -Xms4g -Xmx4g ``` +## tomcat-manager 监控配置(tomcat 8.0.53) + +#### 开启步骤 + +- 不同的 Tomcat 版本会有差异。 +- 官网文档: +- **先确保解压的 tomcat/webapps 下有 manager 项目** +- 在配置文件里面添加可访问用户:`vim /usr/local/tomcat8/conf/tomcat-users.xml`,比如: + +``` + + + + + +``` + +- 正常情况下,manager ui 界面只运行内网:127.0.0.1 访问,这里我们要关闭这个限制。 +- 修改 webapps 下 manager 项目下的配置:`vim /usr/local/tomcat8/webapps/manager/META-INF/context.xml` +- 旧值: + +``` + + + + + + - - - + + + +``` +- 编辑配置文件:`vim /usr/program/tomcat8/conf/server.xml` + +#### 打开默认被注释的连接池配置 + +- 默认值: + +``` xml + +``` + +- 修改为: + +``` xml + +``` + +- 重点参数解释: + - maxThreads,最大并发数,默认设置 200,一般建议在 500 ~ 800,根据硬件设施和业务来判断 + - minSpareThreads,Tomcat 初始化时创建的线程数,默认设置 25 + - prestartminSpareThreads,在 Tomcat 初始化的时候就初始化 minSpareThreads 的参数值,如果不等于 true,minSpareThreads 的值就没啥效果了 + - maxQueueSize,最大的等待队列数,超过则拒绝请求 + - maxIdleTime,如果当前线程大于初始化线程,那空闲线程存活的时间,单位毫秒,默认60000=60秒=1分钟。 + +#### 修改默认的链接参数配置 + +- 默认值: + +``` xml + +``` + +- 修改为: + +``` xml + ``` -- 编辑配置文件:`vim /usr/program/tomcat7/conf/server.xml` - - 打开默认被注释的连接池配置: - - 默认值: - ``` xml - - ``` - - 修改为: - ``` xml - - ``` - - 重点参数解释: - - maxThreads,最大并发数,默认设置 200,一般建议在 500 ~ 800,根据硬件设施和业务来判断 - - minSpareThreads,Tomcat 初始化时创建的线程数,默认设置 25 - - prestartminSpareThreads,在 Tomcat 初始化的时候就初始化 minSpareThreads 的参数值,如果不等于 true,minSpareThreads 的值就没啥效果了 - - maxQueueSize,最大的等待队列数,超过则拒绝请求 - - maxIdleTime,如果当前线程大于初始化线程,那空闲线程存活的时间,单位毫秒,默认60000=60秒=1分钟。 - - 修改默认的链接参数配置: - - 默认值: - ``` xml - - ``` - - 修改为: - ``` xml - - ``` - - 重点参数解释: - - protocol,Tomcat 8 设置 nio2 更好:org.apache.coyote.http11.Http11Nio2Protocol(如果这个用不了,就用下面那个) - - protocol,Tomcat 6、7 设置 nio 更好:org.apache.coyote.http11.Http11NioProtocol - - enableLookups,禁用DNS查询 - - acceptCount,指定当所有可以使用的处理请求的线程数都被使用时,可以放到处理队列中的请求数,超过这个数的请求将不予处理,默认设置 100 - - maxPostSize,以 FORM URL 参数方式的 POST 提交方式,限制提交最大的大小,默认是 2097152(2兆),它使用的单位是字节。10485760 为 10M。如果要禁用限制,则可以设置为 -1。 - - acceptorThreadCount,用于接收连接的线程的数量,默认值是1。一般这个指需要改动的时候是因为该服务器是一个多核CPU,如果是多核 CPU 一般配置为 2. - - maxHttpHeaderSize,http请求头信息的最大程度,超过此长度的部分不予处理。一般8K。 - - 禁用 AJP(如果你服务器没有使用 Apache) - - 把下面这一行注释掉,默认 Tomcat 是开启的。 - ``` xml - - ``` - - - -## JVM 优化 + +- 重点参数解释: + - protocol,Tomcat 8 设置 nio2 更好:org.apache.coyote.http11.Http11Nio2Protocol(如果这个用不了,就用下面那个) + - protocol,Tomcat 6、7 设置 nio 更好:org.apache.coyote.http11.Http11NioProtocol + - enableLookups,禁用DNS查询,tomcat 8 默认已经是禁用了。 + - maxConnections,最大连接数,tomcat 8 默认设置 10000 + - acceptCount,指定当所有可以使用的处理请求的线程数都被使用时,可以放到处理队列中的请求数,超过这个数的请求将不予处理,默认设置 100 + - maxPostSize,以 FORM URL 参数方式的 POST 提交方式,限制提交最大的大小,默认是 2097152(2兆),它使用的单位是字节。10485760 为 10M。如果要禁用限制,则可以设置为 -1。 + - maxHttpHeaderSize,http请求头信息的最大程度,超过此长度的部分不予处理。一般8K。 +- 禁用 AJP(如果你服务器没有使用 Apache) + - 把下面这一行注释掉,默认 Tomcat 是开启的。 + +``` xml + +``` + +- 关闭自动部署功能: +- 旧值: +``` + +``` + +- 新值: +``` + +``` + +## JVM 优化(JDK 8) - 模型资料来源: - 配比资料: @@ -150,7 +173,7 @@ - Tenured,终身代。Tenured 区主要保存生命周期长的对象,一般是一些老的对象,当一些对象在 Young 复制转移一定的次数以后,对象就会被转移到 Tenured 区,一般如果系统中用了 application 级别的缓存,缓存中的对象往往会被转移到这一区间。 - Perm,永久代。主要保存 class,method,filed 对象,这部门的空间一般不会溢出,除非一次性加载了很多的类,不过在涉及到热部署的应用服务器的时候,有时候会遇到 java.lang.OutOfMemoryError : PermGen space 的错误,造成这个错误的很大原因就有可能是每次都重新部署,但是重新部署后,类的 class 没有被卸载掉,这样就造成了大量的 class 对象保存在了 perm 中,这种情况下,一般重新启动应用服务器可以解决问题。 - Linux 修改 /usr/program/tomcat7/bin/catalina.sh 文件,把下面信息添加到文件第一行。 - - 如果服务器只运行一个 Tomcat + - 如果服务器只运行一个 Tomcat,堆栈信息可以这样配置: - 机子内存如果是 4G: - `CATALINA_OPTS="-Dfile.encoding=UTF-8 -server -Xms2g -Xmx2g"` - 机子内存如果是 8G: @@ -163,21 +186,8 @@ - `-Xms2g -Xmx2g` - 如果是 16G 开发机 - `-Xms4g -Xmx4g` - - 参数说明: - ``` nginx - -Dfile.encoding:默认文件编码 - -server:表示这是应用于服务器的配置,JVM 内部会有特殊处理的 - -Xmx1024m:设置JVM最大可用内存为1024MB - -Xms1024m:设置JVM最小内存为1024m。此值可以设置与-Xmx相同,以避免每次垃圾回收完成后JVM重新分配内存。 - -Xmn1024m:设置JVM新生代大小(JDK1.4之后版本)。一般-Xmn的大小是-Xms的1/2左右,不要设置的过大或过小,过大导致老年代变小,频繁Full GC,过小导致minor GC频繁。如果不设置-Xmn,可以采用-XX:NewRatio=2来设置,也是一样的效果 - -XX:NewSize:设置新生代大小 - -XX:MaxNewSize:设置最大的新生代大小 - -XX:PermSize:设置永久代大小(在 Tomcat8 移出了该参数) - -XX:MaxPermSize:设置最大永久代大小(在 Tomcat8 移出了该参数) - -XX:NewRatio=4:设置年轻代(包括 Eden 和两个 Survivor 区)与终身代的比值(除去永久代)。设置为 4,则年轻代与终身代所占比值为 1:4,年轻代占整个堆栈的 1/5 - -XX:MaxTenuringThreshold=10:设置垃圾最大年龄,默认为:15。如果设置为 0 的话,则年轻代对象不经过 Survivor 区,直接进入年老代。对于年老代比较多的应用,可以提高效率。如果将此值设置为一个较大值,则年轻代对象会在 Survivor 区进行多次复制,这样可以增加对象再年轻代的存活时间,增加在年轻代即被回收的概论。需要注意的是,设置了 -XX:MaxTenuringThreshold,并不代表着,对象一定在年轻代存活15次才被晋升进入老年代,它只是一个最大值,事实上,存在一个动态计算机制,计算每次晋入老年代的阈值,取阈值和MaxTenuringThreshold中较小的一个为准。 - -XX:+DisableExplicitGC:这个将会忽略手动调用 GC 的代码使得 System.gc() 的调用就会变成一个空调用,完全不会触发任何 GC - ``` + - 还有一个参数:`-XX:MetaspaceSize=512M -XX:MaxMetaspaceSize=1024M` + - 这个调试来确认什么值合适。 - Windows 修改 /tomcat7/bin/catalina.bat 文件,找到这一行:`echo Using CATALINA_BASE: "%CATALINA_BASE%"`,然后在其上面添加如下内容,此方法只对解压版的 Tomcat 有效果,对于安装版本的需要点击安装后任务栏上的那个 Tomcat 图标,打开配置中有一个 `Java` Tab 的进行编辑。 ``` nginx set JAVA_OPTS=%JAVA_OPTS% -Dfile.encoding="UTF-8" -Dsun.jnu.encoding="UTF8" -Ddefault.client.encoding="UTF-8" -Duser.language=Zh diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index faaded04..aad26af3 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -787,7 +787,7 @@ Out of memory: Kill process 19452 (java) score 264 or sacrifice child - 程序、JVM 层面 - 查看 Nginx 程序 log - 查看 JAVA 程序 log - - 开启 tomcat-manager 监控配置 + - 使用内置 tomcat-manager 监控配置,或者使用:psi-probe - 使用 `ps -ef | grep java`,查看 PID - 查看堆栈情况:`jstack -l PID >> /opt/jstack-20180915.log` - 使用 `jstat -gc PID 250 20`,查看gc情况,一般比较关注PERM区的情况,查看GC的增长情况。 From 5a1a4127c2991661781511d4a7f4f0fc057f4526 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 17 Sep 2018 12:29:57 +0800 Subject: [PATCH 105/330] 2018-09-17 --- markdown-file/Java-bin.md | 126 ++++++++++++---- markdown-file/Nginx-Install-And-Settings.md | 148 +++++++++++++++---- markdown-file/Tomcat-Install-And-Settings.md | 3 - markdown-file/monitor.md | 24 ++- 4 files changed, 228 insertions(+), 73 deletions(-) diff --git a/markdown-file/Java-bin.md b/markdown-file/Java-bin.md index 71766508..dbd0ce70 100644 --- a/markdown-file/Java-bin.md +++ b/markdown-file/Java-bin.md @@ -1,10 +1,55 @@ # Java bin 目录下的工具 +## JVM 内存结构 + +- 参考资料:[JVM内存结构(基于JDK8)](https://blog.csdn.net/qq_34457118/article/details/81712293) + +#### 运行时数据区(JVM 规范) + +![image.png](https://upload-images.jianshu.io/upload_images/12159-f8cdb04243ea36e4.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) + +- VM 栈(JVM 虚拟机栈) + - 是线程私有的,它的生命周期和线程相同。它描述的是 Java 方法执行的内存模式。 +- Java 堆区(Heap) + - 是 Java 虚拟机所管理的内存中最大的一块。是被所有线程共享的一块内存区域,在虚拟机启动时候创建。用于存放对象实例。 +- 方法区(Method Area) + - 也是各个线程共享的内存区域,用于存储已被虚拟机加载的类信息、常量、静态变量、即时编译器编译后的代码等数据。 + - 虽然在 JVM 规范上是描述为堆的一个逻辑部分,但是它有一个别名:Non-Heap(非堆),独立于堆区之外的。JDK8 它是:Metaspace 区 + - Metaspace:主要存放:Class、Package、Method、Field、字节码、常量池、符号引用等等 + - 方法区里面有一个:运行时常量池(Run-Time Constant Pool),用于存放编译期生成的各种字面量和符号应用,在类加载后进入该池存放。 +- 本地方法栈(Native Method Stacks) + - 与虚拟机栈所发挥的作用类似,之间的区别: + - 虚拟机栈是为虚拟机执行 Java 方法(也就是字节码)服务 + - 本地方法栈是为了虚拟机使用到 Native 方法服务。 + +#### JDK8 真实内存结构(HotSpot) + +- HotSpot--Java HotSpot Performance Engine,是 Java 虚拟机的一个实现,目前是 Oracle 在维护和发布。 + +![image.png](https://upload-images.jianshu.io/upload_images/12159-045ea5a11000e8df.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) + +#### JDK8 HotSpot 的堆内存区域结构 + +![image.png](https://upload-images.jianshu.io/upload_images/12159-6a94044da388bb0e.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) + +- 对象生命周期:Eden > Surviver(S0 + S1) > Old +- Eden:该区域是最主要的刚创建的对象的内存分配区域,绝大多数对象都会被创建到这里(除了部分大对象通过内存担保机制创建到Old区域,默认大对象都是能够存活较长时间的),该区域的对象大部分都是短时间都会死亡的,故垃圾回收器针对该部分主要采用标记整理算法了回收该区域。 +- Surviver:该区域也是属于新生代的区域,该区域是将在Eden中未被清理的对象存放到该区域中,该区域分为两块区域,采用的是复制算法,每次只使用一块,Eden与Surviver区域的比例是8:1,是根据大量的业务运行总结出来的规律。 +- Old:该区域是属于老年代,一般能够在Surviver中没有被清除出去的对象才会进入到这块区域,该区域主要是采用标记清除算法。 +- 总结:java堆的垃圾回收是垃圾回收器最主要的光顾对象,整体采用分代收集的策略,对不同区域结合其特点采用不同的垃圾收集算法。我们在编程中也应该关注这一块区域,尽量不适用大对象,尽可能的创建局部对象,使用过后确定废弃不用的对象及时断开引用,尽量避免使用循环的对象引用(可达性分析也是比较消耗资源的)等等。 + +#### JVM内存区域的详解图 + +![image.png](https://upload-images.jianshu.io/upload_images/12159-deafd9588b74a2cf.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) + + +------------------------------------------------------------- + ## 频繁GC问题或内存溢出排查流程 - 使用 `jps`,查看线程ID,假设 PID 为 12011 -- 使用 `jstat -gc 12011 250 20`,查看gc情况,一般比较关注PERM区的情况,查看GC的增长情况。 -- 使用 `jstat -gccause`:额外输出上次GC原因 +- 使用 `jstat -gc PID 250 20`,查看gc情况,一般比较关注PERM区的情况,查看GC的增长情况。 +- 使用 `jstat -gccause PID`:额外输出上次GC原因 - 使用 `jmap -dump:format=b,file=/opt/myHeapDumpFileName 12011`,生成堆转储文件 - 使用 jhat 或者可视化工具(Eclipse Memory Analyzer 、IBM HeapAnalyzer)分析堆情况。 - 结合代码解决内存溢出或泄露问题。 @@ -14,6 +59,9 @@ - 使用 `jps`查看线程ID,假设 PID 为 12011 - 使用 `jstack 12011` 查看线程情况 +------------------------------------------------------------------- + + ## jps - 显示当前所有 java 进程 pid 的命令 @@ -26,10 +74,16 @@ - `jps -v` 跟:`ps -ef|grep java` 主要输出内容一样 - `12011` 是我这边的一个 java 应用的 pid,下面的其他命令都是自己与此应用进行分析的 -## jstat +------------------------------------------------------------------- + +## jstat(重要) - 显示进程中的类装载、内存、垃圾收集、JIT编译等运行数据。 -- `jstat -gc 12011 250 10`,查询进程 12011 的垃圾收集情况,每250毫秒查询一次,一共查询10次。 +- 查看类加载信息:`jstat -class PID` + +#### 垃圾回收统计 + +- `jstat -gc PID 250 10`,每250毫秒查询一次,一共查询10次。 ``` S0C S1C S0U S1U EC EU OC OU MC MU CCSC CCSU YGC YGCT FGC FGCT GCT @@ -46,21 +100,30 @@ ``` - 列含义说明: - -S0C 年轻代中第一个survivor(幸存区)的容量 (字节) - -S1C 年轻代中第二个survivor(幸存区)的容量 (字节) - -S0U 年轻代中第一个survivor(幸存区)目前已使用空间 (字节) - -S1U 年轻代中第二个survivor(幸存区)目前已使用空间 (字节) - -EC 年轻代中Eden(伊甸园)的容量 (字节) - -EU 年轻代中Eden(伊甸园)目前已使用空间 (字节) - -OC Old代的容量 (字节) - -OU Old代目前已使用空间 (字节) - -PC Perm(持久代)的容量 (字节) - -PUPerm(持久代)目前已使用空间 (字节) - -YGC 从应用程序启动到采样时年轻代中gc次数 - -YGCT 从应用程序启动到采样时年轻代中gc所用时间(s) - -FGC 从应用程序启动到采样时old代(全gc)gc次数 - -FGCT 从应用程序启动到采样时old代(全gc)gc所用时间(s) - -GCT 从应用程序启动到采样时gc用的总时间(s) + - **34944.0 表示 34M 大小,235729.8 表示 235M ** + - **SO + S1 + Eden = young 区** + -S0C 年轻代中第一个survivor(幸存区)的容量 (字节) + -S1C 年轻代中第二个survivor(幸存区)的容量 (字节) + -S0U 年轻代中第一个survivor(幸存区)目前已使用空间 (字节) (字母 U 表示 used) + -S1U 年轻代中第二个survivor(幸存区)目前已使用空间 (字节) (字母 U 表示 used) + -EC 年轻代中Eden(伊甸园)的容量 (字节) + -EU 年轻代中Eden(伊甸园)目前已使用空间 (字节) + - **OC + OU = old 区** + -OC Old代的容量 (字节) + -OU Old代目前已使用空间 (字节) + - **MC + MU = Metaspace 区** + - MC 方法区大小 + - MU 方法区使用大小 + - 其他 + - CCSC 压缩类空间大小 + - CCSU 压缩类空间使用大小 + - YGC 年轻代垃圾回收次数 + - YGCT 年轻代垃圾回收消耗时间 + - FGC 老年代垃圾回收次数 + - FGCT 老年代垃圾回收消耗时间 + - GCT 垃圾回收消耗总时间 + +#### 堆内存统计 - `jstat -gccapacity 12011 250 10`,查询进程 12011 VM内存中三代(young,old,perm)对象的使用和占用大小,每250毫秒查询一次,一共查询10次。 @@ -89,13 +152,20 @@ - OGCMX old代的最大容量(字节) - OGC old代当前新生成的容量 (字节) - OC Old代的容量 (字节) - - PGCMN perm代中初始化(最小)的大小 (字节) - - PGCMX perm代的最大容量 (字节) - - PGC perm代当前新生成的容量 (字节) - - PC Perm(持久代)的容量 (字节) - - YGC 从应用程序启动到采样时年轻代中gc次数 - - FGC 从应用程序启动到采样时old代(全gc)gc次数 -- 更多其他参数的使用可以看:[Java命令学习系列(四)——jstat](https://mp.weixin.qq.com/s?__biz=MzI3NzE0NjcwMg==&mid=402330276&idx=2&sn=58117de92512f83090d0a9de738eeacd&scene=21#wechat_redirect) + - MCMN 最小元数据容量 + - MCMX 最大元数据容量 + - MC 当前元数据空间大小 + - CCSMN 最小压缩类空间大小 + - CCSMX 最大压缩类空间大小 + - CCSC 当前压缩类空间大小 + - YGC 年轻代gc次数,从应用程序启动到采样时年轻代中gc次数 + - FGC 老年代GC次数,从应用程序启动到采样时old代(全gc)gc次数 +- 更多其他参数的使用可以看: + - [Java命令学习系列(四)——jstat](https://mp.weixin.qq.com/s?__biz=MzI3NzE0NjcwMg==&mid=402330276&idx=2&sn=58117de92512f83090d0a9de738eeacd&scene=21#wechat_redirect) + - [java高分局之jstat命令使用](https://blog.csdn.net/maosijunzi/article/details/46049117) + +------------------------------------------------------------------- + ## jmap @@ -198,6 +268,9 @@ tenured generation: 535: 1 168 [[Ljava.math.BigInteger; ``` +------------------------------------------------------------------- + + ## jstack - jstack命令主要用来查看Java线程的调用堆栈的,可以用来分析线程问题(如死锁) @@ -275,6 +348,7 @@ JNI global references: 281 ``` + ## 资料 - diff --git a/markdown-file/Nginx-Install-And-Settings.md b/markdown-file/Nginx-Install-And-Settings.md index 58a04b8c..eeb083e6 100644 --- a/markdown-file/Nginx-Install-And-Settings.md +++ b/markdown-file/Nginx-Install-And-Settings.md @@ -81,7 +81,7 @@ http { - 重新启动服务:`docker restart youmeek-nginx` -## Nginx 源码编译安装 +## Nginx 源码编译安装(带监控模块) - 官网下载最新稳定版本 **1.8.1**,大小:814K - 官网安装说明: @@ -110,33 +110,35 @@ http { --http-fastcgi-temp-path=/var/temp/nginx/fastcgi \ --http-uwsgi-temp-path=/var/temp/nginx/uwsgi \ --with-http_ssl_module \ +--with-http_stub_status_module \ --http-scgi-temp-path=/var/temp/nginx/scgi ``` - - 编译:`make` - - 安装:`make install` +- 编译:`make` +- 安装:`make install` - 启动 Nginx - - 先检查是否在 /usr/local 目录下生成了 Nginx 等相关文件:`cd /usr/local/nginx;ll`,正常的效果应该是显示这样的: - - ``` nginx - drwxr-xr-x. 2 root root 4096 3月 22 16:21 conf - drwxr-xr-x. 2 root root 4096 3月 22 16:21 html - drwxr-xr-x. 2 root root 4096 3月 22 16:21 sbin - ``` - - - 停止防火墙:`service iptables stop` - - 或是把 80 端口加入到的排除列表: - - `sudo iptables -A INPUT -p tcp -m tcp --dport 80 -j ACCEPT` - - `sudo service iptables save` - - `sudo service iptables restart` - - 启动:`/usr/local/nginx/sbin/nginx`,启动完成 shell 是不会有输出的 - - 检查 时候有 Nginx 进程:`ps aux | grep nginx`,正常是显示 3 个结果出来 - - 检查 Nginx 是否启动并监听了 80 端口:`netstat -ntulp | grep 80` - - 访问:`192.168.1.114`,如果能看到:`Welcome to nginx!`,即可表示安装成功 - - 检查 Nginx 启用的配置文件是哪个:`/usr/local/nginx/sbin/nginx -t` - - 刷新 Nginx 配置后重启:`/usr/local/nginx/sbin/nginx -s reload` - - 停止 Nginx:`/usr/local/nginx/sbin/nginx -s stop` - - 如果访问不了,或是出现其他信息看下错误立即:`vim /var/log/nginx/error.log` + - 先检查是否在 /usr/local 目录下生成了 Nginx 等相关文件:`cd /usr/local/nginx;ll`,正常的效果应该是显示这样的: + +``` nginx +drwxr-xr-x. 2 root root 4096 3月 22 16:21 conf +drwxr-xr-x. 2 root root 4096 3月 22 16:21 html +drwxr-xr-x. 2 root root 4096 3月 22 16:21 sbin +``` + +- 如果要检查刚刚编译的哪些模块,可以:`nginx -V` +- 停止防火墙:`service iptables stop` + - 或是把 80 端口加入到的排除列表: + - `sudo iptables -A INPUT -p tcp -m tcp --dport 80 -j ACCEPT` + - `sudo service iptables save` + - `sudo service iptables restart` +- 启动:`/usr/local/nginx/sbin/nginx`,启动完成 shell 是不会有输出的 +- 检查 时候有 Nginx 进程:`ps aux | grep nginx`,正常是显示 3 个结果出来 +- 检查 Nginx 是否启动并监听了 80 端口:`netstat -ntulp | grep 80` +- 访问:`192.168.1.114`,如果能看到:`Welcome to nginx!`,即可表示安装成功 +- 检查 Nginx 启用的配置文件是哪个:`/usr/local/nginx/sbin/nginx -t` +- 刷新 Nginx 配置后重启:`/usr/local/nginx/sbin/nginx -s reload` +- 停止 Nginx:`/usr/local/nginx/sbin/nginx -s stop` +- 如果访问不了,或是出现其他信息看下错误立即:`vim /var/log/nginx/error.log` ## 把 Nginx 添加到系统服务中 @@ -544,6 +546,89 @@ http { ``` +---------------------------------------------------------------------- + + +## Nginx 常规优化 + +#### 增加工作线程数和并发连接数 + +- 修改参数:`worker_processes 1;` +- 该参数是指:nginx 要开启的工作进程数(worker process),默认是 1,一把你不需要修改。(除了工作进程,还有一种 master process 的概念) +- 但是如果请求数比较多,一般推荐最大是修改成 CPU 的内核数等同的值,以增加能力。 +- 修改 events 参数 + +``` +events { + # 每一个进程可以打开的最大连接数(这个参数是受限制于系统参数的,默认是 1024)(进程数是上面 worker_processes 决定的) + worker_connections 1024; + # 可以一次建立多个连接 + multi_accept on; + # epoll 模式效率最高 + use epoll; +} +``` + +#### 启动长连接 + +``` +http { + sendfile on; # 减少文件在应用和内核之间的拷贝 + tcp_nopush on; # 当数据包达到一定大小再发送 + + keepalive_timeout 65; + + upstream tomcatCluster { + server 192.168.1.114:8080; + server 192.168.1.114:8081; + keepalive 300; # 300 个长连接 + } + +} +``` + +#### 启用缓存和压缩 + +``` +http { + gzip on; + gzip_buffers 8 16k; # 这个限制了nginx不能压缩大于128k的文件 + gzip_min_length 512; # 单位byte + gzip_disable "MSIE [1-6]\.(?!.*SV1)"; + gzip_http_version 1.1; # 1.0 的版本可能会有问题 + gzip_types text/plain text/css application/javascript application/x-javascript application/json application/xml; +} +``` + +#### 操作系统优化(机器好点的时候) + +###### 修改 sysctl 参数 + +- 修改配置文件:`vim /etc/sysctl.conf` + +``` +net.ipv4.tcp_fin_timeout = 10 #保持在FIN-WAIT-2状态的时间,使系统可以处理更多的连接。此参数值为整数,单位为秒。 +net.ipv4.tcp_tw_reuse = 1 #开启重用,允许将TIME_WAIT socket用于新的TCP连接。默认为0,表示关闭。 +net.ipv4.tcp_tw_recycle = 0 #开启TCP连接中TIME_WAIT socket的快速回收。默认值为0,表示关闭。 +net.ipv4.tcp_syncookies = 1 #开启SYN cookie,出现SYN等待队列溢出时启用cookie处理,防范少量的SYN攻击。默认为0,表示关闭。 +net.core.somaxconn = 1024 #定义了系统中每一个端口最大的监听队列的长度, 对于一个经常处理新连接的高负载 web服务环境来说,默认值为128,偏小。 +``` + +- 刷新 sysctl 配置:`sysctl -p` + +###### 修改 limits 参数 + +- ElasticSearch 一般也是要修改该参数 +- 修改配置文件:`vim /etc/security/limits.conf` + +``` +* soft nofile 262144 +* hard nofile 262144 +* soft core unlimited +* soft stack 262144 +``` + +---------------------------------------------------------------------- ## Nginx 监控模块 @@ -571,15 +656,16 @@ http { ```ini location /nginx_status { - #allow 192.168.1.100; - #deny all; + allow 127.0.0.1; + deny all; stub_status on; access_log off; } ``` - 当你访问:http://127.0.0.1/nginx_status,会得到类似下面的结果 -- 其中配置的 `allow 192.168.1.100;` 表示只允许客户端 IP 为这个才能访问这个地址 +- 其中配置的 `allow 127.0.0.1;` 表示只允许本机访问:http://127.0.0.1/nginx_status 才能看到 + - 所以我们也可以通过 curl 访问本机看到结果,不一定要对外开放。 - `deny all;` 除了被允许的,其他所有人都不可以访问 ``` @@ -589,12 +675,12 @@ server accepts handled requests Reading: 0 Writing: 5 Waiting: 0 ``` -- Active connections: 对后端发起的活动连接数(最常需要看的就是这个参数) +- Active connections: 当前活动连接数,包含 waiting 的连接(最常需要看的就是这个参数) - Server accepts handled requests: Nginx总共处理了 3 个连接,成功创建 6 次握手(证明中间没有失败的),总共处理了 9 个请求. -- Reading: Nginx 读取到客户端的 Header 信息数. -- Writing: Nginx 返回给客户端的 Header 信息数. +- Reading: Nginx 读取到客户端的 Header 信息数,如果很大,说明现在很多请求正在过来 +- Writing: Nginx 返回给客户端的 Header 信息数,如果很大,说明现在又很多请求正在响应 - Waiting: 开启keep-alive的情况下,这个值等于 active – (reading + writing),意思就是 Nginx 已经处理完成,正在等候下一次请求指令的驻留连接. -- 所以,在访问效率高,请求很快被处理完毕的情况下,Waiting数比较多是正常的.如果reading +writing数较多,则说明并发访问量非常大,正在处理过程中. +- 所以,在访问效率高,请求很快被处理完毕的情况下,Waiting数比较多是正常的。**如果reading + writing数较多,则说明并发访问量非常大,正在处理过程中** ## Nginx 配置文件常用配置积累 diff --git a/markdown-file/Tomcat-Install-And-Settings.md b/markdown-file/Tomcat-Install-And-Settings.md index 36bf7943..ee194427 100644 --- a/markdown-file/Tomcat-Install-And-Settings.md +++ b/markdown-file/Tomcat-Install-And-Settings.md @@ -128,10 +128,7 @@ acceptCount="100" maxPostSize="10485760" maxHttpHeaderSize="8192" - compression="on" disableUploadTimeout="true" - compressionMinSize="2048" - compressableMimeType="text/html,text/xml,text/plain,text/css,text/javascript,application/javascript" URIEncoding="utf-8" /> ``` diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index aad26af3..a562535a 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -765,6 +765,7 @@ Out of memory: Kill process 19452 (java) score 264 or sacrifice child #### CPU 高,负载高,访问慢(没有数据库) +- **记录负载开始升高的时间** - 系统层面 - 查看负载、CPU、内存、上线时间、高资源进程 PID:`htop` - 查看磁盘使用情况:`df -h` @@ -775,25 +776,27 @@ Out of memory: Kill process 19452 (java) score 264 or sacrifice child - 其他机子 ping(多个地区 ping),看下解析 IP 与网络丢包 - 查看网络节点情况:`traceroute www.youmeek.com` - `ifconfig` 查看 dropped 和 error 是否在不断增加,判断网卡是否出现问题 - - nslookup 命令查看 DNS 是否可用 + - `nslookup` 命令查看 DNS 是否可用 + - 如果 nginx 有安装:http_stub_status_module 模块,则查看当前统计 - 查看 TCP 和 UDP 应用 - `netstat -ntlp` - `netstat -nulp` - 统计当前连接的一些状态情况:`netstat -nat |awk '{print $6}'|sort|uniq -c|sort -rn` - 查看每个 ip 跟服务器建立的连接数:`netstat -nat|awk '{print$5}'|awk -F : '{print$1}'|sort|uniq -c|sort -rn` - - 跟踪程序:`strace -tt -T -v -f -e trace=file -o /opt/strace-20180915.log -s 1024 -p PID` + - 跟踪程序(按 `Ctrl + C` 停止跟踪):`strace -tt -T -v -f -e trace=file -o /opt/strace-20180915.log -s 1024 -p PID` - 看下谁在线:`w`,`last` - 看下执行了哪些命令:`history` - 程序、JVM 层面 - - 查看 Nginx 程序 log - - 查看 JAVA 程序 log - - 使用内置 tomcat-manager 监控配置,或者使用:psi-probe + - 保存、查看 Nginx 程序 log + - 通过 GoAccess 分析 log + - 保存、查看 Java 程序 log + - 使用内置 tomcat-manager 监控配置,或者使用类似工具:psi-probe - 使用 `ps -ef | grep java`,查看 PID - 查看堆栈情况:`jstack -l PID >> /opt/jstack-20180915.log` - - 使用 `jstat -gc PID 250 20`,查看gc情况,一般比较关注PERM区的情况,查看GC的增长情况。 - - 使用 `jstat -gccause`:额外输出上次GC原因 + - 使用 `jstat -gc PID 250 10`,查看gc情况(截图) + - 使用 `jstat -gccause PID`:额外输出上次GC原因(截图) - 使用 `jmap -dump:format=b,file=/opt/myHeapDumpFileName PID`,生成堆转储文件 - - 使用 jhat 或者可视化工具(Eclipse Memory Analyzer 、IBM HeapAnalyzer)分析堆情况。 + - 使用 jhat 或者可视化工具(Eclipse Memory Analyzer 、IBM HeapAnalyzer)分析堆情况。 - 结合代码解决内存溢出或泄露问题。 #### CPU 低,负载高,访问慢(带数据库) @@ -804,11 +807,6 @@ Out of memory: Kill process 19452 (java) score 264 or sacrifice child - 查看 MySQL 设置的最大连接数:`show variables like 'max_connections';` - 重新设置最大连接数:`set GLOBAL max_connections=300` -#### 访问不了 - -- ping(多个地区 ping),看下解析 IP 与网络丢包 -- nslookup 命令查看 DNS 是否可用 -- telnet 端口:`telnet 192.1.1.1 80` From 457a7d010db1c7eaf065216cbe06bad5439fb64f Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 17 Sep 2018 17:04:51 +0800 Subject: [PATCH 106/330] 2018-09-17 --- markdown-file/monitor.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index a562535a..e64c970c 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -792,10 +792,10 @@ Out of memory: Kill process 19452 (java) score 264 or sacrifice child - 保存、查看 Java 程序 log - 使用内置 tomcat-manager 监控配置,或者使用类似工具:psi-probe - 使用 `ps -ef | grep java`,查看 PID - - 查看堆栈情况:`jstack -l PID >> /opt/jstack-20180915.log` + - 查看堆栈情况:`jstack -l PID >> /opt/jstack-tomcat1-20180917.log` - 使用 `jstat -gc PID 250 10`,查看gc情况(截图) - 使用 `jstat -gccause PID`:额外输出上次GC原因(截图) - - 使用 `jmap -dump:format=b,file=/opt/myHeapDumpFileName PID`,生成堆转储文件 + - 使用 `jmap -dump:format=b,file=/opt/dumpfile-tomcat1-20180917 PID`,生成堆转储文件 - 使用 jhat 或者可视化工具(Eclipse Memory Analyzer 、IBM HeapAnalyzer)分析堆情况。 - 结合代码解决内存溢出或泄露问题。 From 11b51124de044393215fc7f0ab7b0d9f1c157095 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 17 Sep 2018 18:29:51 +0800 Subject: [PATCH 107/330] 2018-09-17 --- markdown-file/monitor.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index e64c970c..a02deacb 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -641,7 +641,7 @@ tcp6 0 0 :::43107 :::* LISTEN ``` TIME_WAIT 96(是表示系统在等待客户端响应,以便再次连接时候能快速响应,如果积压很多,要开始注意了,准备阻塞了。这篇文章可以看下:http://blog.51cto.com/jschu/1728001) -CLOSE_WAIT 11 +CLOSE_WAIT 11(如果积压很多,要开始注意了,准备阻塞了。可以看这篇文章:http://blog.51cto.com/net881004/2164020) FIN_WAIT2 17 ESTABLISHED 102(表示正常数据传输状态) ``` From 46248794cb7389a2e3e24ecc84850c4a21441df6 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 17 Sep 2018 18:57:12 +0800 Subject: [PATCH 108/330] 20180917 --- markdown-file/monitor.md | 47 +++++++++++++++------------------------- 1 file changed, 17 insertions(+), 30 deletions(-) diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index a02deacb..71e823d4 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -568,7 +568,6 @@ TOTAL:(总的流量) 12.9GB 229Mb 190Mb 193Mb ``` - ### 端口使用情况 #### lsof @@ -614,7 +613,7 @@ tcp6 0 0 :::8066 :::* LISTEN tcp6 0 0 :::43107 :::* LISTEN 12011/java ``` -- 查看当前连接80端口的机子有多少:`netstat -an|grep 80|sort -r` +- 查看当前连接80端口的机子有多少,并且是属于什么状态:`netstat -an|grep 80|sort -r` - 查看已经连接的IP有多少连接数:`netstat -ntu | awk '{print $5}' | cut -d: -f1 | sort | uniq -c | sort -n` - 查看已经连接的IP有多少连接数,只显示前 5 个:`netstat -ntu | awk '{print $5}' | cut -d: -f1 | sort | uniq -c | sort -n | head -5` - 查看每个 ip 跟服务器建立的连接数:`netstat -nat|awk '{print$5}'|awk -F : '{print$1}'|sort|uniq -c|sort -rn` @@ -646,9 +645,24 @@ FIN_WAIT2 17 ESTABLISHED 102(表示正常数据传输状态) ``` -- Linux 系统下,TCP连接断开后,会以TIME_WAIT状态保留一定的时间,然后才会释放端口。当并发请求过多的时候,就会产生大量的TIME_WAIT状态 的连接,无法及时断开的话,会占用大量的端口资源和服务器资源。这个时候我们可以优化TCP的内核参数,来及时将TIME_WAIT状态的端口清理掉。[来源](http://zhangbin.junxilinux.com/?p=219) +- TIME_WAIT 和 CLOSE_WAIT 说明: + +``` +Linux 系统下,TCP连接断开后,会以TIME_WAIT状态保留一定的时间,然后才会释放端口。当并发请求过多的时候,就会产生大量的TIME_WAIT状态 的连接,无法及时断开的话,会占用大量的端口资源和服务器资源。这个时候我们可以优化TCP的内核参数,来及时将TIME_WAIT状态的端口清理掉。 + +来源:http://zhangbin.junxilinux.com/?p=219 + +================================= +出现大量close_wait的现象,主要原因是某种情况下对方关闭了socket链接,但是另一端由于正在读写,没有关闭连接。代码需要判断socket,一旦读到0,断开连接,read返回负,检查一下errno,如果不是AGAIN,就断开连接。 +Linux分配给一个用户的文件句柄是有限的,而TIME_WAIT和CLOSE_WAIT两种状态如果一直被保持,那么意味着对应数目的通道就一直被占着,一旦达到句柄数上限,新的请求就无法被处理了,接着就是大量Too Many Open Files异常,导致tomcat崩溃。关于TIME_WAIT过多的解决方案参见TIME_WAIT数量太多。 +常见错误原因: +1.代码层面上未对连接进行关闭,比如关闭代码未写在 finally 块关闭,如果程序中发生异常就会跳过关闭代码,自然未发出指令关闭,连接一直由程序托管,内核也无权处理,自然不会发出 FIN 请求,导致连接一直在 CLOSE_WAIT 。 +2.程序响应过慢,比如双方进行通讯,当客户端请求服务端迟迟得不到响应,就断开连接,重新发起请求,导致服务端一直忙于业务处理,没空去关闭连接。这种情况也会导致这个问题。一般如果有多个节点,nginx 进行负载,其中某个节点很高,其他节点不高,那可能就是负载算法不正常,都落在一台机子上了,以至于它忙不过来。 + +来源:https://juejin.im/post/5b59e61ae51d4519634fe257 +``` - 查看网络接口接受、发送的数据包情况(每隔 3 秒统计一次):`netstat -i 3` @@ -823,30 +837,3 @@ Out of memory: Kill process 19452 (java) score 264 or sacrifice child - - - - - - - - - - - - - - - - - - - - - - - - - - - From 0359adc5ad8a3c465bb7e3d4cf0755bc08c124bb Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 18 Sep 2018 13:38:54 +0800 Subject: [PATCH 109/330] 2018-09-18 --- favorite-file/shell/install_common_vim.sh | 12 ++++++++++++ favorite-file/shell/install_jdk_offline_to_bash.sh | 10 +++++----- 2 files changed, 17 insertions(+), 5 deletions(-) create mode 100644 favorite-file/shell/install_common_vim.sh diff --git a/favorite-file/shell/install_common_vim.sh b/favorite-file/shell/install_common_vim.sh new file mode 100644 index 00000000..3c8b1e1f --- /dev/null +++ b/favorite-file/shell/install_common_vim.sh @@ -0,0 +1,12 @@ +#!/bin/sh + + +echo "-----------------------------------------开始常用工具安装" +yum install -y zip unzip lrzsz git epel-release + +echo "-----------------------------------------开始安装 vim" +yum install -y vim + +echo "-----------------------------------------设置 vim 配置" +curl https://raw.githubusercontent.com/wklken/vim-for-server/master/vimrc > ~/.vimrc + diff --git a/favorite-file/shell/install_jdk_offline_to_bash.sh b/favorite-file/shell/install_jdk_offline_to_bash.sh index 3585d262..bcb76d67 100644 --- a/favorite-file/shell/install_jdk_offline_to_bash.sh +++ b/favorite-file/shell/install_jdk_offline_to_bash.sh @@ -8,28 +8,28 @@ fi echo "判断 JDK 压缩包是否存在" -if [ ! -f "/opt/setups/jdk-8u151-linux-x64.tar.gz" ]; then +if [ ! -f "/opt/setups/jdk-8u181-linux-x64.tar.gz" ]; then echo "JDK 压缩包不存在" exit 1 fi echo "开始解压 JDK" -cd /opt/setups ; tar -zxf jdk-8u151-linux-x64.tar.gz +cd /opt/setups ; tar -zxf jdk-8u181-linux-x64.tar.gz -if [ ! -d "/opt/setups/jdk1.8.0_151" ]; then +if [ ! -d "/opt/setups/jdk1.8.0_181" ]; then echo "JDK 解压失败,结束脚本" exit 1 fi echo "JDK 解压包移到 /usr/local/ 目录下" -mv jdk1.8.0_151/ /usr/local/ +mv jdk1.8.0_181/ /usr/local/ echo "JDK 写入系统变量到 bash_profile" cat << EOF >> ~/.bash_profile # JDK -JAVA_HOME=/usr/local/jdk1.8.0_151 +JAVA_HOME=/usr/local/jdk1.8.0_181 JRE_HOME=\$JAVA_HOME/jre PATH=\$PATH:\$JAVA_HOME/bin CLASSPATH=.:\$JAVA_HOME/lib/dt.jar:\$JAVA_HOME/lib/tools.jar From 4f6d4f460f14fecc2c065fa93b402d4dd731e19d Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 18 Sep 2018 18:34:50 +0800 Subject: [PATCH 110/330] 2018-09-18 --- markdown-file/Java-bin.md | 7 ++++++- markdown-file/Tomcat-Install-And-Settings.md | 13 ++++++------- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/markdown-file/Java-bin.md b/markdown-file/Java-bin.md index dbd0ce70..21731653 100644 --- a/markdown-file/Java-bin.md +++ b/markdown-file/Java-bin.md @@ -32,6 +32,7 @@ ![image.png](https://upload-images.jianshu.io/upload_images/12159-6a94044da388bb0e.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) +- 组成:Eden + Surviver(S0 + S1) + Old - 对象生命周期:Eden > Surviver(S0 + S1) > Old - Eden:该区域是最主要的刚创建的对象的内存分配区域,绝大多数对象都会被创建到这里(除了部分大对象通过内存担保机制创建到Old区域,默认大对象都是能够存活较长时间的),该区域的对象大部分都是短时间都会死亡的,故垃圾回收器针对该部分主要采用标记整理算法了回收该区域。 - Surviver:该区域也是属于新生代的区域,该区域是将在Eden中未被清理的对象存放到该区域中,该区域分为两块区域,采用的是复制算法,每次只使用一块,Eden与Surviver区域的比例是8:1,是根据大量的业务运行总结出来的规律。 @@ -42,6 +43,10 @@ ![image.png](https://upload-images.jianshu.io/upload_images/12159-deafd9588b74a2cf.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) +#### 更多这类文章 + +- [从实际案例聊聊Java应用的GC优化](https://tech.meituan.com/jvm_optimize.html) + ------------------------------------------------------------- @@ -271,7 +276,7 @@ tenured generation: ------------------------------------------------------------------- -## jstack +## jstack(CPU 负载高) - jstack命令主要用来查看Java线程的调用堆栈的,可以用来分析线程问题(如死锁) - jstack用于生成java虚拟机当前时刻的线程快照。 diff --git a/markdown-file/Tomcat-Install-And-Settings.md b/markdown-file/Tomcat-Install-And-Settings.md index ee194427..68476691 100644 --- a/markdown-file/Tomcat-Install-And-Settings.md +++ b/markdown-file/Tomcat-Install-And-Settings.md @@ -165,11 +165,8 @@ - 配比资料: - JDK8 配比:[关键系统的JVM参数推荐(2018仲夏版)](https://mp.weixin.qq.com/s/FHY0MelBfmgdRpT4zWF9dQ) - JDK8 常用配比总结 8G 内存:`CATALINA_OPTS="-Dfile.encoding=UTF-8 -Xms4g -Xmx4g"` -- Java 的内存模型分为: - - Young,年轻代(易被 GC)。Young 区被划分为三部分,Eden 区和两个大小严格相同的 Survivor 区,其中 Survivor 区间中,某一时刻只有其中一个是被使用的,另外一个留做垃圾收集时复制对象用,在 Young 区间变满的时候,minor GC 就会将存活的对象移到空闲的Survivor 区间中,根据 JVM 的策略,在经过几次垃圾收集后,任然存活于 Survivor 的对象将被移动到 Tenured 区间。 - - Tenured,终身代。Tenured 区主要保存生命周期长的对象,一般是一些老的对象,当一些对象在 Young 复制转移一定的次数以后,对象就会被转移到 Tenured 区,一般如果系统中用了 application 级别的缓存,缓存中的对象往往会被转移到这一区间。 - - Perm,永久代。主要保存 class,method,filed 对象,这部门的空间一般不会溢出,除非一次性加载了很多的类,不过在涉及到热部署的应用服务器的时候,有时候会遇到 java.lang.OutOfMemoryError : PermGen space 的错误,造成这个错误的很大原因就有可能是每次都重新部署,但是重新部署后,类的 class 没有被卸载掉,这样就造成了大量的 class 对象保存在了 perm 中,这种情况下,一般重新启动应用服务器可以解决问题。 -- Linux 修改 /usr/program/tomcat7/bin/catalina.sh 文件,把下面信息添加到文件第一行。 +- Java 的内存模型看:[这篇文章](Java-bin.md) +- Linux 修改 /usr/program/tomcat8/bin/catalina.sh 文件,把下面信息添加到文件第一行。 - 如果服务器只运行一个 Tomcat,堆栈信息可以这样配置: - 机子内存如果是 4G: - `CATALINA_OPTS="-Dfile.encoding=UTF-8 -server -Xms2g -Xmx2g"` @@ -183,8 +180,10 @@ - `-Xms2g -Xmx2g` - 如果是 16G 开发机 - `-Xms4g -Xmx4g` - - 还有一个参数:`-XX:MetaspaceSize=512M -XX:MaxMetaspaceSize=1024M` - - 这个调试来确认什么值合适。 + - 还有一个参数:`-XX:MetaspaceSize=128M -XX:MaxMetaspaceSize=512M` + - 这个可以通过调试来确认什么值合适,一般通过使用 `jstat -gc PID 250 20`,查看 gc 情况下的 MC、MU 情况。 + - 默认 MaxMetaspaceSize 是 -1,无上限,所以如果硬件还行,不配置也没啥问题。 + - 自己也了解 JVM 实际情况,那就根据实际情况调整。一般项目可以推荐:`-XX:MetaspaceSize=128M -XX:MaxMetaspaceSize=512M` - Windows 修改 /tomcat7/bin/catalina.bat 文件,找到这一行:`echo Using CATALINA_BASE: "%CATALINA_BASE%"`,然后在其上面添加如下内容,此方法只对解压版的 Tomcat 有效果,对于安装版本的需要点击安装后任务栏上的那个 Tomcat 图标,打开配置中有一个 `Java` Tab 的进行编辑。 ``` nginx set JAVA_OPTS=%JAVA_OPTS% -Dfile.encoding="UTF-8" -Dsun.jnu.encoding="UTF8" -Ddefault.client.encoding="UTF-8" -Duser.language=Zh From beca75cc8cc5d0e3dfb40029bc6d5d63547c3084 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 19 Sep 2018 14:59:11 +0800 Subject: [PATCH 111/330] 2018-09-19 --- markdown-file/monitor.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index 71e823d4..14faa86e 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -780,6 +780,12 @@ Out of memory: Kill process 19452 (java) score 264 or sacrifice child #### CPU 高,负载高,访问慢(没有数据库) - **记录负载开始升高的时间** +- 常见场景 + - 虚拟机所在的宿主机资源瓶颈,多个虚拟机竞争资源 + - 定时任务大量的任务并发 + - 消息、请求堆积后恢复时的瞬时流量引起 + - 持久化任务引起 + - 更多可以看这篇:[线上异常排查总结](https://blog.csdn.net/freeiceflame/article/details/78006812) - 系统层面 - 查看负载、CPU、内存、上线时间、高资源进程 PID:`htop` - 查看磁盘使用情况:`df -h` From 6a09d7bbe8a3373862a3263d4364a9375de09f82 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 19 Sep 2018 21:04:51 +0800 Subject: [PATCH 112/330] 2018-09-19 --- markdown-file/Nginx-Install-And-Settings.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/markdown-file/Nginx-Install-And-Settings.md b/markdown-file/Nginx-Install-And-Settings.md index eeb083e6..93b61884 100644 --- a/markdown-file/Nginx-Install-And-Settings.md +++ b/markdown-file/Nginx-Install-And-Settings.md @@ -126,6 +126,16 @@ drwxr-xr-x. 2 root root 4096 3月 22 16:21 sbin ``` - 如果要检查刚刚编译的哪些模块,可以:`nginx -V` + +``` +nginx version: nginx/1.8.0 +built by gcc 4.4.7 20120313 (Red Hat 4.4.7-18) (GCC) +built with OpenSSL 1.0.1e-fips 11 Feb 2013 +TLS SNI support enabled +configure arguments: --user=nginx --group=nginx --prefix=/usr/local/nginx --pid-path=/usr/local/nginx/run/nginx.pid --lock-path=/usr/local/nginx/lock/nginx.lock --with-http_ssl_module --with-http_dav_module --with-http_flv_module --with-http_gzip_static_module --with-http_stub_status_module +``` + + - 停止防火墙:`service iptables stop` - 或是把 80 端口加入到的排除列表: - `sudo iptables -A INPUT -p tcp -m tcp --dport 80 -j ACCEPT` From 9faf97487d4c8e886fe85fbbc319df20a7e6286c Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 20 Sep 2018 10:23:38 +0800 Subject: [PATCH 113/330] 2018-09-20 --- markdown-file/File-Extract-Compress.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/markdown-file/File-Extract-Compress.md b/markdown-file/File-Extract-Compress.md index b0459b9a..fc820934 100644 --- a/markdown-file/File-Extract-Compress.md +++ b/markdown-file/File-Extract-Compress.md @@ -61,6 +61,20 @@ - 命令:`7za a test1.7z /opt/test1/` +## 分卷压缩 + +- 分卷压缩:`zip -s 100M myFile.zip --out newFile.zip` +- 最终效果: + +``` +newFile.z01 +newFile.z02 +newFile.z03 +newFile.z04 +newFile.zip +``` + + ## 特殊格式 - 7z From a14000d3412c3ac5260c8e2e9d494f1540abc6c6 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 21 Sep 2018 16:35:37 +0800 Subject: [PATCH 114/330] 2018-09-21 --- markdown-file/monitor.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index 14faa86e..80daaa3c 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -811,8 +811,11 @@ Out of memory: Kill process 19452 (java) score 264 or sacrifice child - 通过 GoAccess 分析 log - 保存、查看 Java 程序 log - 使用内置 tomcat-manager 监控配置,或者使用类似工具:psi-probe - - 使用 `ps -ef | grep java`,查看 PID + - 使用 `ps -ef | grep java`,查看进程 PID + - 根据高 CPU 的进程 PID,查看其线程 CPU 使用情况:`top -Hp PID`,找到占用 CPU 资源高的线程 PID - 查看堆栈情况:`jstack -l PID >> /opt/jstack-tomcat1-20180917.log` + - 把占用 CPU 资源高的线程十进制的 PID 转换成 16 进制:`printf "%x\n" PID`,比如:`printf "%x\n" 12401` 得到结果是:`3071` + - 在刚刚输出的那个 log 文件中搜索:`3071`,可以找到:`nid=0x3071` - 使用 `jstat -gc PID 250 10`,查看gc情况(截图) - 使用 `jstat -gccause PID`:额外输出上次GC原因(截图) - 使用 `jmap -dump:format=b,file=/opt/dumpfile-tomcat1-20180917 PID`,生成堆转储文件 From 7a4a236c66c7b3654796d6c3fae1902fb3b114a6 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 21 Sep 2018 16:56:39 +0800 Subject: [PATCH 115/330] 2018-09-21 --- markdown-file/Java-bin.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/markdown-file/Java-bin.md b/markdown-file/Java-bin.md index 21731653..0eedc308 100644 --- a/markdown-file/Java-bin.md +++ b/markdown-file/Java-bin.md @@ -286,7 +286,11 @@ tenured generation: - 另外,jstack工具还可以附属到正在运行的java程序中,看到当时运行的java程序的java stack和native stack的信息, 如果现在运行的java程序呈现hung的状态,jstack是非常有用的。 - `jstack 12011`,查看线程情况 - `jstack -l 12011`,除堆栈外,显示关于锁的附件信息 -- 下面 demo 内容太多,所以选取其中一部分 +- 导出文件:`jstack -l PID >> /opt/jstack-tomcat1-20180917.log` + - 把占用 CPU 资源高的线程十进制的 PID 转换成 16 进制:`printf "%x\n" PID`,比如:`printf "%x\n" 12401` 得到结果是:`3071` + - 在刚刚输出的那个 log 文件中搜索:`3071`,可以找到:`nid=0x3071` +- 在线看某个线程 PID 的情况:`jstack 进程ID | grep 十六进制线程ID -A 10` +- 下面 demo 内容太多,所以选取其中一部分结构: ``` 2018-03-08 14:28:13 From fa093e060d33b8caf065c054b357d9704e74b442 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 21 Sep 2018 18:33:42 +0800 Subject: [PATCH 116/330] 2018-09-21 --- markdown-file/Java-bin.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/markdown-file/Java-bin.md b/markdown-file/Java-bin.md index 0eedc308..decc94a6 100644 --- a/markdown-file/Java-bin.md +++ b/markdown-file/Java-bin.md @@ -174,7 +174,8 @@ ## jmap -- 生成堆转储快照(heapdump) +- 生成堆转储快照(heap dump) + - heap dump 主要记录了在某一时刻JVM堆中对象使用的情况,即某个时刻JVM堆的快照,是一个二进制文件,主要用于分析哪些对象占用了太对的堆空间,从而发现导致内存泄漏的对象。 - 堆Dump是反应Java堆使用情况的内存镜像,其中主要包括系统信息、虚拟机属性、完整的线程Dump、所有类和对象的状态等。 一般,在内存不足、GC异常等情况下,我们就会怀疑有内存泄露。这个时候我们就可以制作堆Dump来查看具体情况,分析原因。 - 常见内存错误: - outOfMemoryError 年老代内存不足。 @@ -276,10 +277,10 @@ tenured generation: ------------------------------------------------------------------- -## jstack(CPU 负载高) +## jstack(线程快照 -- CPU 负载高) - jstack命令主要用来查看Java线程的调用堆栈的,可以用来分析线程问题(如死锁) -- jstack用于生成java虚拟机当前时刻的线程快照。 +- jstack用于生成java虚拟机当前时刻的 **线程快照(thread dump)**。主要记录JVM在某一时刻各个线程执行的情况,以栈的形式显示,是一个文本文件。 - 线程快照是当前java虚拟机内每一条线程正在执行的方法堆栈的集合,生成线程快照的主要目的是定位线程出现长时间停顿的原因,如线程间死锁、死循环、请求外部资源导致的长时间等待等。 - 线程出现停顿的时候通过jstack来查看各个线程的调用堆栈,就可以知道没有响应的线程到底在后台做什么事情,或者等待什么资源。 - 如果java程序崩溃生成core文件,jstack工具可以用来获得core文件的java stack和native stack的信息,从而可以轻松地知道java程序是如何崩溃和在程序何处发生问题。 @@ -290,6 +291,7 @@ tenured generation: - 把占用 CPU 资源高的线程十进制的 PID 转换成 16 进制:`printf "%x\n" PID`,比如:`printf "%x\n" 12401` 得到结果是:`3071` - 在刚刚输出的那个 log 文件中搜索:`3071`,可以找到:`nid=0x3071` - 在线看某个线程 PID 的情况:`jstack 进程ID | grep 十六进制线程ID -A 10` + - `-A 10` 参数用来指定显示行数,否则只会显示一行信息 - 下面 demo 内容太多,所以选取其中一部分结构: ``` @@ -361,3 +363,4 @@ JNI global references: 281 ## 资料 - +- \ No newline at end of file From 60fb2bf286b441294ad7b077ae8dc701b9b0e504 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 26 Sep 2018 19:18:04 +0800 Subject: [PATCH 117/330] 2018-09-26 --- markdown-file/Mysql-Install-And-Settings.md | 69 ++++++++++++------- .../WordPress-Install-And-Settings.md | 12 +++- 2 files changed, 56 insertions(+), 25 deletions(-) diff --git a/markdown-file/Mysql-Install-And-Settings.md b/markdown-file/Mysql-Install-And-Settings.md index 4ab03b28..44b31c86 100644 --- a/markdown-file/Mysql-Install-And-Settings.md +++ b/markdown-file/Mysql-Install-And-Settings.md @@ -70,8 +70,27 @@ max_allowed_packet = 50M - 备份:`docker exec cloud-mysql /usr/bin/mysqldump -u root --password=123456 DATABASE_Name > /opt/backup.sql` - 还原:`docker exec -i cloud-mysql /usr/bin/mysql -u root --password=123456 DATABASE_Name < /opt/backup.sql` +------------------------------------------------------------------- -## MySQL 安装 + +## MySQL 5.5 安装 + +- [来源](https://blog.csdn.net/qingtian_1993/article/details/79692479) +- 设置仓库 + +``` +rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-6.noarch.rpm +rpm -Uvh https://mirror.webtatic.com/yum/el6/latest.rpm +``` + +- 安装:`yum install mysql55w mysql55w-server` +- 启动:`service mysqld start` +- 重置密码:`mysqladmin -u root password '123456'` + + +------------------------------------------------------------------- + +## MySQL 5.6 安装 - 假设当前用户为:root - Mysql 安装 @@ -91,25 +110,25 @@ max_allowed_packet = 50M - 我们这次安装以 5.6 为实例 - 进入下载目录:`cd /opt/setups` - 解压压缩包:`tar zxvf mysql-5.6.35.tar.gz` - - 移到解压包:`mv /opt/setups/mysql-5.6.35 /usr/program/` + - 移到解压包:`mv /opt/setups/mysql-5.6.35 /usr/local/` - 安装依赖包、编译包:`yum install -y make gcc-c++ cmake bison-devel ncurses-devel autoconf` - - 进入解压目录:`cd /usr/program/mysql-5.6.35/` - - 生成安装目录:`mkdir -p /usr/program/mysql/data` - - 生成配置(使用 InnoDB):`cmake -DCMAKE_INSTALL_PREFIX=/usr/program/mysql -DMYSQL_DATADIR=/usr/program/mysql/data -DMYSQL_UNIX_ADDR=/tmp/mysql.sock -DDEFAULT_CHARSET=utf8mb4 -DDEFAULT_COLLATION=utf8mb4_unicode_ci -DWITH_EXTRA_CHARSETS:STRING=utf8mb4 -DWITH_MYISAM_STORAGE_ENGINE=1 -DWITH_INNOBASE_STORAGE_ENGINE=1 -DENABLED_LOCAL_INFILE=1` + - 进入解压目录:`cd /usr/local/mysql-5.6.35/` + - 生成安装目录:`mkdir -p /usr/local/mysql/data` + - 生成配置(使用 InnoDB):`cmake -DCMAKE_INSTALL_PREFIX=/usr/local/mysql -DMYSQL_DATADIR=/usr/local/mysql/data -DMYSQL_UNIX_ADDR=/tmp/mysql.sock -DDEFAULT_CHARSET=utf8mb4 -DDEFAULT_COLLATION=utf8mb4_unicode_ci -DWITH_EXTRA_CHARSETS:STRING=utf8mb4 -DWITH_MYISAM_STORAGE_ENGINE=1 -DWITH_INNOBASE_STORAGE_ENGINE=1 -DENABLED_LOCAL_INFILE=1` - 更多参数说明可以查看: - 编译:`make`,这个过程比较漫长,一般都在 30 分钟左右,具体还得看机子配置,如果最后结果有 error,建议删除整个 mysql 目录后重新解压一个出来继续处理 - 安装:`make install` - 配置开机启动: - - `cp /usr/program/mysql-5.6.35/support-files/mysql.server /etc/init.d/mysql` + - `cp /usr/local/mysql-5.6.35/support-files/mysql.server /etc/init.d/mysql` - `chmod 755 /etc/init.d/mysql` - `chkconfig mysql on` - - 复制一份配置文件: `cp /usr/program/mysql-5.6.35/support-files/my-default.cnf /etc/my.cnf` - - 删除安装的目录:`rm -rf /usr/program/mysql-5.6.35/` + - 复制一份配置文件: `cp /usr/local/mysql-5.6.35/support-files/my-default.cnf /etc/my.cnf` + - 删除安装的目录:`rm -rf /usr/local/mysql-5.6.35/` - 添加组和用户及安装目录权限 - `groupadd mysql` #添加组 - `useradd -g mysql mysql -s /bin/false` #创建用户mysql并加入到mysql组,不允许mysql用户直接登录系统 - - `chown -R mysql:mysql /usr/program/mysql/data` #设置MySQL数据库目录权限 - - 初始化数据库:`/usr/program/mysql/scripts/mysql_install_db --basedir=/usr/program/mysql --datadir=/usr/program/mysql/data --skip-name-resolve --user=mysql` + - `chown -R mysql:mysql /usr/local/mysql/data` #设置MySQL数据库目录权限 + - 初始化数据库:`/usr/local/mysql/scripts/mysql_install_db --basedir=/usr/local/mysql --datadir=/usr/local/mysql/data --skip-name-resolve --user=mysql` - 开放防火墙端口: - `iptables -I INPUT -p tcp -m tcp --dport 3306 -j ACCEPT` - `service iptables save` @@ -118,10 +137,12 @@ max_allowed_packet = 50M - 编辑配置文件:`vim /etc/selinux/config` - 把 `SELINUX=enforcing` 改为 `SELINUX=disabled` - 常用命令软连接,才可以在终端直接使用:mysql 和 mysqladmin 命令 - - `ln -s /usr/program/mysql/bin/mysql /usr/bin` - - `ln -s /usr/program/mysql/bin/mysqladmin /usr/bin` - - `ln -s /usr/program/mysql/bin/mysqldump /usr/bin` - - `ln -s /usr/program/mysql/bin/mysqlslap /usr/bin` + - `ln -s /usr/local/mysql/bin/mysql /usr/bin` + - `ln -s /usr/local/mysql/bin/mysqladmin /usr/bin` + - `ln -s /usr/local/mysql/bin/mysqldump /usr/bin` + - `ln -s /usr/local/mysql/bin/mysqlslap /usr/bin` + +------------------------------------------------------------------- ## MySQL 配置 @@ -131,15 +152,15 @@ max_allowed_packet = 50M ``` nginx /etc/my.cnf -/usr/program/mysql/my.cnf -/usr/program/mysql/mysql-test/suite/ndb/my.cnf -/usr/program/mysql/mysql-test/suite/ndb_big/my.cnf +/usr/local/mysql/my.cnf +/usr/local/mysql/mysql-test/suite/ndb/my.cnf +/usr/local/mysql/mysql-test/suite/ndb_big/my.cnf ............. -/usr/program/mysql/mysql-test/suite/ndb_rpl/my.cnf +/usr/local/mysql/mysql-test/suite/ndb_rpl/my.cnf ``` -- 保留 **/etc/my.cnf** 和 **/usr/program/mysql/mysql-test/** 目录下配置文件,其他删除掉。 +- 保留 **/etc/my.cnf** 和 **/usr/local/mysql/mysql-test/** 目录下配置文件,其他删除掉。 - 我整理的一个单机版配置说明(MySQL 5.6,适用于 1G 内存的服务器): - [my.cnf](MySQL-Settings/MySQL-5.6/1G-Memory-Machine/my-for-comprehensive.cnf) - 其中我测试的结果,在不适用任何配置修改的情况下,1G 内存安装 MySQL 5.6 默认就会占用 400M 左右的内存,要降下来的核心配置要补上这几个参数: @@ -165,7 +186,7 @@ table_open_cache=256 - 解决办法: - 在终端中执行(CentOS 6):`service mysql stop` - 在终端中执行(CentOS 7):`systemctl stop mysql` - - 在终端中执行(前面添加的 Linux 用户 mysql 必须有存在):`/usr/program/mysql/bin/mysqld --skip-grant-tables --user=mysql` + - 在终端中执行(前面添加的 Linux 用户 mysql 必须有存在):`/usr/local/mysql/bin/mysqld --skip-grant-tables --user=mysql` - 此时 MySQL 服务会一直处于监听状态,你需要另起一个终端窗口来执行接下来的操作 - 在终端中执行:`mysql -u root mysql` - 把密码改为:123456,进入 MySQL 命令后执行:`UPDATE user SET Password=PASSWORD('123456') where USER='root';FLUSH PRIVILEGES;` @@ -250,9 +271,9 @@ swapon /swapfile ### 主库机子操作 - 主库操作步骤 - - 创建一个目录:`mkdir -p /usr/program/mysql/data/mysql-bin` + - 创建一个目录:`mkdir -p /usr/local/mysql/data/mysql-bin` - 主 DB 开启二进制日志功能:`vim /etc/my.cnf`, - - 添加一行:`log-bin = /usr/program/mysql/data/mysql-bin` + - 添加一行:`log-bin = /usr/local/mysql/data/mysql-bin` - 指定同步的数据库,如果不指定则同步全部数据库,其中 ssm 是我的数据库名:`binlog-do-db=ssm` - 主库关掉慢查询记录,用 SQL 语句查看当前是否开启:`SHOW VARIABLES LIKE '%slow_query_log%';`,如果显示 OFF 则表示关闭,ON 表示开启 - 重启主库 MySQL 服务 @@ -296,8 +317,8 @@ swapon /swapfile - `Slave_IO_Running:Yes` - 如果不是 Yes 也不是 No,而是 Connecting,那就表示从机连不上主库,需要你进一步排查连接问题。 - `Slave_SQL_Running:Yes` -- 如果你的 Slave_IO_Running 是 No,一般如果你是在虚拟机上测试的话,从库的虚拟机是从主库的虚拟机上复制过来的,那一般都会这样的,因为两台的 MySQL 的 UUID 值一样。你可以检查从库下的错误日志:`cat /usr/program/mysql/data/mysql-error.log` - - 如果里面提示 uuid 错误,你可以编辑从库的这个配置文件:`vim /usr/program/mysql/data/auto.cnf`,把配置文件中的:server-uuid 值随便改一下,保证和主库是不一样即可。 +- 如果你的 Slave_IO_Running 是 No,一般如果你是在虚拟机上测试的话,从库的虚拟机是从主库的虚拟机上复制过来的,那一般都会这样的,因为两台的 MySQL 的 UUID 值一样。你可以检查从库下的错误日志:`cat /usr/local/mysql/data/mysql-error.log` + - 如果里面提示 uuid 错误,你可以编辑从库的这个配置文件:`vim /usr/local/mysql/data/auto.cnf`,把配置文件中的:server-uuid 值随便改一下,保证和主库是不一样即可。 diff --git a/markdown-file/WordPress-Install-And-Settings.md b/markdown-file/WordPress-Install-And-Settings.md index 97a46d87..9585771f 100644 --- a/markdown-file/WordPress-Install-And-Settings.md +++ b/markdown-file/WordPress-Install-And-Settings.md @@ -28,7 +28,16 @@ systemctl enable httpd.service ## 安装 MySQL -#### MySQL 5.6 安装和配置(如果就 1G 内存那就不要用 5.7) +#### 先检查是否已经安装了 Mariadb + +- 检查:`rpm -qa | grep mariadb` +- 卸载:`rpm -e --nodeps mariadb-libs-5.5.56-2.el7.x86_64` + +#### MySQL 5.5 安装和配置(内存 1G 推荐) + +- [MySQL 5.5](Mysql-Install-And-Settings.md) + +#### MySQL 5.6 安装和配置(如果内存没有大于 2G,请不要使用) - [MySQL 5.6](Mysql-Install-And-Settings.md) @@ -173,6 +182,7 @@ systemctl enable httpd.service ## 创建数据库 - SQL 语句:`CREATE DATABASE wordpress DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;` + - 如果有数据则直接导入:`/usr/bin/mysql -u root --password=123456 DATABASE_Name < /opt/backup.sql` ## WordPress 在线配置引导 From 767cdea8ef53d5f511dd4f3d426d563e2c277ce7 Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 4 Oct 2018 11:56:19 +0800 Subject: [PATCH 118/330] =?UTF-8?q?2018-10-04=20=E8=A1=A5=E5=85=85=20Nginx?= =?UTF-8?q?=20YUM?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Nginx-Install-And-Settings.md | 98 +++++++++++++++++++++ 1 file changed, 98 insertions(+) diff --git a/markdown-file/Nginx-Install-And-Settings.md b/markdown-file/Nginx-Install-And-Settings.md index 93b61884..d8d145c0 100644 --- a/markdown-file/Nginx-Install-And-Settings.md +++ b/markdown-file/Nginx-Install-And-Settings.md @@ -81,6 +81,27 @@ http { - 重新启动服务:`docker restart youmeek-nginx` +------------------------------------------------------------------- + + +## YUM 安装(版本一般滞后半年左右) + +- 安装:`yum install -y nginx`,同时增加了一个 nginx 用户组和用户 +- 默认配置文件位置:`vim /etc/nginx/nginx.conf` +- 其他配置文件位置:`cd /etc/nginx/conf.d/` +- 模块配置文件位置:`cd /usr/share/nginx/modules/` +- 默认 HTML 静态文件位置:`cd /usr/share/nginx/html` +- log 存放目录:`cd /var/log/nginx/` +- 状态:`systemctl status nginx` +- 启动:`systemctl start nginx` +- 启动:`systemctl stop nginx` +- 刷新配置:`nginx -s reload` +- 查看版本和 YUM 自带的模块:`nginx -V` + + +------------------------------------------------------------------- + + ## Nginx 源码编译安装(带监控模块) - 官网下载最新稳定版本 **1.8.1**,大小:814K @@ -556,6 +577,83 @@ http { ``` +---------------------------------------------------------------------- + +## Nginx 压力测试 + +- AB 测试工具安装:`yum install -y httpd-tools` +- 使用: + +``` +ab -n 1000 -c 100 http://www.baidu.com/ + +-n 总的请求数 +-c 单个时刻并发数 +``` + + +- 压测结果: + + +``` +This is ApacheBench, Version 2.3 <$Revision: 1430300 $> +Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/ +Licensed to The Apache Software Foundation, http://www.apache.org/ + +Benchmarking juejin.im (be patient) +Completed 100 requests +Completed 200 requests +Completed 300 requests +Completed 400 requests +Completed 500 requests +Completed 600 requests +Completed 700 requests +Completed 800 requests +Completed 900 requests +Completed 1000 requests +Finished 1000 requests + + +Server Software: nginx +Server Hostname: juejin.im +Server Port: 443 +SSL/TLS Protocol: TLSv1.2,ECDHE-RSA-AES256-GCM-SHA384,2048,256 + +Document Path: / +Document Length: 271405 bytes + +Concurrency Level: 100(并发数:100) +Time taken for tests: 120.042 seconds(一共用了 120 秒) +Complete requests: 1000(总的请求数:1000) +Failed requests: 0(失败的请求次数) +Write errors: 0 +Total transferred: 271948000 bytes +HTML transferred: 271405000 bytes +Requests per second: 8.33 [#/sec] (mean)(QPS 系统吞吐量,平均每秒请求数,计算公式 = 总请求数 / 总时间数) +Time per request: 12004.215 [ms] (mean)(毫秒,平均每次并发 100 个请求的处理时间) +Time per request: 120.042 [ms] (mean, across all concurrent requests)(毫秒,并发 100 下,平均每个请求处理时间) +Transfer rate: 2212.34 [Kbytes/sec] received(平均每秒网络流量) + +Connection Times (ms) + min mean[+/-sd] median max +Connect: 57 159 253.6 77 1002 +Processing: 1139 11570 2348.2 11199 36198 +Waiting: 156 1398 959.4 1279 22698 +Total: 1232 11730 2374.1 11300 36274 + +Percentage of the requests served within a certain time (ms) + 50% 11300 + 66% 11562 + 75% 11863 + 80% 12159 + 90% 13148 + 95% 15814 + 98% 18882 + 99% 22255 + 100% 36274 (longest request) +``` + + ---------------------------------------------------------------------- From 22ed5256e75ac2e7b5586f98d78561edeb8b502f Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 4 Oct 2018 15:18:26 +0800 Subject: [PATCH 119/330] =?UTF-8?q?2018-10-04=20=E8=A1=A5=E5=85=85=20Nginx?= =?UTF-8?q?=20YUM?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Nginx-Install-And-Settings.md | 22 +++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/markdown-file/Nginx-Install-And-Settings.md b/markdown-file/Nginx-Install-And-Settings.md index d8d145c0..3932e2bc 100644 --- a/markdown-file/Nginx-Install-And-Settings.md +++ b/markdown-file/Nginx-Install-And-Settings.md @@ -843,9 +843,31 @@ location ~ .*$ { } ``` +### 链接 aa 下,查询参数包含 bb +- 这里必须使用:IF,但是 IF 是不被推荐的:[If Is Evil](https://www.nginx.com/resources/wiki/start/topics/depth/ifisevil/) +``` +location /aa/ { + if ( $args ~* '(.*bb.*)' ) { + return 601; + } +} +``` + +``` +location /aa/ { + if ($args ~ tag=bb){ + return 601; + } +} +``` + + + +------------------------------------------------------------------- + ### HTTP 服务,绑定多个域名 From 880c4e28128afec7d8133a6e25e7e5eaf517cf82 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 5 Oct 2018 11:55:37 +0800 Subject: [PATCH 120/330] =?UTF-8?q?2018-10-05=20=E8=A1=A5=E5=85=85=20WordP?= =?UTF-8?q?ress?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Mysql-Install-And-Settings.md | 8 ++++++-- markdown-file/WordPress-Install-And-Settings.md | 5 +++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/markdown-file/Mysql-Install-And-Settings.md b/markdown-file/Mysql-Install-And-Settings.md index 44b31c86..1af74312 100644 --- a/markdown-file/Mysql-Install-And-Settings.md +++ b/markdown-file/Mysql-Install-And-Settings.md @@ -83,10 +83,14 @@ rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-6.noarch.rpm rpm -Uvh https://mirror.webtatic.com/yum/el6/latest.rpm ``` -- 安装:`yum install mysql55w mysql55w-server` +- 安装:`yum install mysql55w mysql55w-server`,用同时生产 mysql 的组和用户 - 启动:`service mysqld start` - 重置密码:`mysqladmin -u root password '123456'` - +- 默认配置文件:`vim /etc/my.cnf` +- log 目录:`cd /var/log/mysqld.log` +- 查看服务 log:`tail -300 /var/log/mysqld.log` +- 给指定目录增加 mysql 用户组权限:`chown mysql.mysql /var/run/mysqld/` +- 官网 MySQL 启动失败,这篇文章经验值得推荐:[CentOS 7下MySQL服务启动失败的解决思路](https://www.cnblogs.com/ivictor/p/5146247.html) ------------------------------------------------------------------- diff --git a/markdown-file/WordPress-Install-And-Settings.md b/markdown-file/WordPress-Install-And-Settings.md index 9585771f..e8780619 100644 --- a/markdown-file/WordPress-Install-And-Settings.md +++ b/markdown-file/WordPress-Install-And-Settings.md @@ -107,10 +107,11 @@ pid-file=/var/run/mysqld/mysqld.pid ``` -## 安装 PHP +## 安装 PHP 7(默认是 PHP 5.4) -- 安装命令:`yum install php php-mysql php-gd php-imap php-ldap php-odbc php-pear php-xml php-xmlrpc` +- 安装过程: - 测试 PHP 安装结果,新建文件:`vim /var/www/html/info.php` +- 默认配置文件位置:`vim /etc/php.ini` ``` Date: Sun, 7 Oct 2018 10:02:27 +0800 Subject: [PATCH 121/330] 2018-10-07 Docker --- markdown-file/Docker-Install-And-Usage.md | 1 + 1 file changed, 1 insertion(+) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index b91e3ce1..392972b4 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -284,6 +284,7 @@ CONTAINER ID NAME CPU % MEM USAGE / LI - `docker exec -d 容器ID touch /opt/test.txt`,已守护式的方式进入 docker 容器,并创建一个文件 - `docker stop 容器ID`,停止容器 - `docker stop $(docker ps -a -q)`,停止所有容器 + - `docker stop $(docker ps -a -q) ; docker rm $(docker ps -a -q)`,停止所有容器,并删除所有容器 - `docker kill $(docker ps -q) ; docker rm $(docker ps -a -q)`,停止所有容器,并删除所有容器 - `docker start 容器ID`,重新启动已经停止的容器(重新启动,docker run 参数还是保留之前的) - `docker restart 容器ID`,重启容器 From ed76961adc15ff0f7d0cbde370cda9786b05d11f Mon Sep 17 00:00:00 2001 From: judasn Date: Sun, 7 Oct 2018 10:38:16 +0800 Subject: [PATCH 122/330] 2018-10-07 Jenkins --- markdown-file/Bash-Other-Bash.md | 2 + markdown-file/Jenkins-Install-And-Settings.md | 95 ++++++++++++------- 2 files changed, 65 insertions(+), 32 deletions(-) diff --git a/markdown-file/Bash-Other-Bash.md b/markdown-file/Bash-Other-Bash.md index 5f8782c5..2ce9fcd8 100644 --- a/markdown-file/Bash-Other-Bash.md +++ b/markdown-file/Bash-Other-Bash.md @@ -10,6 +10,7 @@ - `rpm -ivh example.rpm`,安装 example.rpm 包并在安装过程中显示正在安装的文件信息及安装进度 - 查询 - `rpm -qa | grep jdk`,查看 jdk 是否被安装 + - `rpm -ql jdk`,查看 jdk 是否被安装 - 卸载 - `rpm -e jdk`,卸载 jdk(一般卸载的时候都要先用 rpm -qa 看下整个软件的全名) - YUM 软件管理: @@ -26,4 +27,5 @@ ## 资料 +- diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index 8fa67e08..7e12e852 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -99,39 +99,70 @@ Can not write to /var/jenkins_home/copy_reference_file.log. Wrong volume permiss - 比上面多了一步:`-v /var/run/docker.sock:/var/run/docker.sock` - 这样,在 jenkins 里面写 shell 脚本调用 docker 程序,就可以直接调用宿主机的 docker 了。 +------------------------------------------------------------------- -## Jenkins 安装 - -- Jenkins 安装 - - 官网使用 Tomcat 部署方式指导: - - 此时(20160207) Jenkins 最新版本为:**1.647** - - JDK 最低要求是 JDK 7,官网推荐是 JDK 8 - - 我个人习惯 `/opt` 目录下创建一个目录 `setups` 用来存放各种软件安装包;在 `/usr` 目录下创建一个 `program` 用来存放各种解压后的软件包,下面的讲解也都是基于此习惯 - - 我个人已经使用了第三方源:`EPEL、RepoForge`,如果你出现 `yum install XXXXX` 安装不成功的话,很有可能就是你没有相关源,请查看我对源设置的文章 - - Jenkins 下载:`wget http://mirrors.jenkins-ci.org/war/latest/jenkins.war` (大小:61 M) - - 我们假设这个 Tomcat 就是为了 Jenkins 专用的 - - 把下载下来的 jenkins.war 移到 Tomcat 的 webapps 目录下,比如我的是:`/usr/program/tomcat8/webapps` - - 把 Jenkins.war 改名为 ROOT.war:`mv jenkins.war ROOT.war` - - 删除 Tomcat 下 webapps 目录下多余的一些目录 - - 首次启动 Tomcat,让 Tomcat 解压 war - - 设置 JENKINS_HOME: - - 寻找 jenkins home 目录地址:`find / -name .jenkins`,我这边得到的结果是:`/root/.jenkins` - - 对在 Tomcat 文章中讲解的系统变量 `CATALINA_OPTS` 进行设置: - - 旧值: - ``` - CATALINA_OPTS="-server -Xms528m -Xmx528m -XX:PermSize=256m -XX:MaxPermSize=358m" - export CATALINA_OPTS - ``` - - 改为: - ``` - CATALINA_OPTS="-server -DJENKINS_HOME=/root/.jenkins -Xms528m -Xmx528m -XX:PermSize=256m -XX:MaxPermSize=358m" - export CATALINA_OPTS - ``` - -- Jenkins 各个组件配置: - - 访问: -- 其他问题 - - 如果访问的时候报这个异常:`java.net.UnknownHostException`,可以查看这篇文章: +## Jenkins 安装(YUM) + +- **需要 JDK8 环境** +- 官网安装说明 RedHat Linux RPM packages: +- 官网在线安装(72M): + +``` +sudo wget -O /etc/yum.repos.d/jenkins.repo https://pkg.jenkins.io/redhat-stable/jenkins.repo +sudo rpm --import https://pkg.jenkins.io/redhat-stable/jenkins.io.key + +yum install jenkins +``` + +- 查看安装后的情况:`rpm -ql jenkins` + +``` +/etc/init.d/jenkins +/etc/logrotate.d/jenkins +/etc/sysconfig/jenkins +/usr/lib/jenkins +/usr/lib/jenkins/jenkins.war +/usr/sbin/rcjenkins +/var/cache/jenkins +/var/lib/jenkins +/var/log/jenkins +``` + +- jenkins 相关目录释义: + +``` +/usr/lib/jenkins/:jenkins安装目录,war 包会放在这里。 +/etc/sysconfig/jenkins:jenkins配置文件,“端口”,“JENKINS_HOME” 等都可以在这里配置。 +/var/lib/jenkins/:默认的 JENKINS_HOME。 +/var/log/jenkins/jenkins.log:jenkins 日志文件。 +``` + +- 配置 jenkins 端口,默认是:8080 + +``` +vim /etc/sysconfig/jenkins + +56 行:JENKINS_PORT="8080" +``` + +- 控制台输出方式启动:`java -jar /usr/lib/jenkins/jenkins.war` +- 可以看到有一个这个重点内容,这是你的初始化密码,等下会用到的: + + +``` +Jenkins initial setup is required. An admin user has been created and a password generated. +Please use the following password to proceed to installation: + +daacc724767640a29ddc99d159a80cf8 + +This may also be found at: /root/.jenkins/secrets/initialAdminPassword +``` + +- 守护进程启动:`nohup java -jar /usr/lib/jenkins/jenkins.war > /opt/jenkins-nohup.log 2>&1 &` +- 浏览器访问 Jenkins 首页开始配置: + + +------------------------------------------------------------------- ## 资料 From 0106a0a6bc235c02870b0af2554b64c21cbc4157 Mon Sep 17 00:00:00 2001 From: judasn Date: Sun, 7 Oct 2018 10:40:31 +0800 Subject: [PATCH 123/330] 2018-10-07 Jenkins --- markdown-file/Jenkins-Install-And-Settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index 7e12e852..74c530b2 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -105,7 +105,7 @@ Can not write to /var/jenkins_home/copy_reference_file.log. Wrong volume permiss - **需要 JDK8 环境** - 官网安装说明 RedHat Linux RPM packages: -- 官网在线安装(72M): +- 官网在线安装(72M),该安装方式会自己生成一个 jenkins 用户组和用户: ``` sudo wget -O /etc/yum.repos.d/jenkins.repo https://pkg.jenkins.io/redhat-stable/jenkins.repo From 302ecb175fd5ad4183b5ce21091a06c852747a63 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 8 Oct 2018 09:47:20 +0800 Subject: [PATCH 124/330] =?UTF-8?q?2018-10-08=20=E8=A1=A5=E5=85=85=20WordP?= =?UTF-8?q?ress?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/WordPress-Install-And-Settings.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/markdown-file/WordPress-Install-And-Settings.md b/markdown-file/WordPress-Install-And-Settings.md index e8780619..39fdc9c2 100644 --- a/markdown-file/WordPress-Install-And-Settings.md +++ b/markdown-file/WordPress-Install-And-Settings.md @@ -107,11 +107,12 @@ pid-file=/var/run/mysqld/mysqld.pid ``` -## 安装 PHP 7(默认是 PHP 5.4) +## 安装 PHP 7 +- CentOS 7 默认是 PHP 5.4,版本太低 - 安装过程: -- 测试 PHP 安装结果,新建文件:`vim /var/www/html/info.php` - 默认配置文件位置:`vim /etc/php.ini` +- 测试 PHP 安装结果,新建文件:`vim /var/www/html/info.php` ``` Date: Mon, 8 Oct 2018 10:01:42 +0800 Subject: [PATCH 125/330] 2018-10-08 --- markdown-file/Jenkins-Install-And-Settings.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index 74c530b2..9df5658e 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -104,6 +104,7 @@ Can not write to /var/jenkins_home/copy_reference_file.log. Wrong volume permiss ## Jenkins 安装(YUM) - **需要 JDK8 环境** +- 当前最新版本:`2.138.1-1.1`(201810) - 官网安装说明 RedHat Linux RPM packages: - 官网在线安装(72M),该安装方式会自己生成一个 jenkins 用户组和用户: @@ -111,7 +112,7 @@ Can not write to /var/jenkins_home/copy_reference_file.log. Wrong volume permiss sudo wget -O /etc/yum.repos.d/jenkins.repo https://pkg.jenkins.io/redhat-stable/jenkins.repo sudo rpm --import https://pkg.jenkins.io/redhat-stable/jenkins.io.key -yum install jenkins +yum install -y jenkins ``` - 查看安装后的情况:`rpm -ql jenkins` From ed5d9b066e52d63f6359e8bf3266adc39c4a0160 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 8 Oct 2018 16:27:22 +0800 Subject: [PATCH 126/330] 2018-10-08 --- .../shell/install_jdk_offline_to_bash.sh | 6 +++--- markdown-file/Jenkins-Install-And-Settings.md | 17 ++++++++++++++++- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/favorite-file/shell/install_jdk_offline_to_bash.sh b/favorite-file/shell/install_jdk_offline_to_bash.sh index bcb76d67..f1b7edce 100644 --- a/favorite-file/shell/install_jdk_offline_to_bash.sh +++ b/favorite-file/shell/install_jdk_offline_to_bash.sh @@ -24,9 +24,9 @@ fi echo "JDK 解压包移到 /usr/local/ 目录下" mv jdk1.8.0_181/ /usr/local/ -echo "JDK 写入系统变量到 bash_profile" +echo "JDK 写入系统变量到 profile" -cat << EOF >> ~/.bash_profile +cat << EOF >> /etc/profile # JDK JAVA_HOME=/usr/local/jdk1.8.0_181 @@ -40,4 +40,4 @@ export CLASSPATH EOF -echo "JDK 设置完成,需要你手动设置:source ~/.bash_profile" \ No newline at end of file +echo "JDK 设置完成,需要你手动设置:source /etc/profile" \ No newline at end of file diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index 9df5658e..8d3dd23e 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -112,7 +112,7 @@ Can not write to /var/jenkins_home/copy_reference_file.log. Wrong volume permiss sudo wget -O /etc/yum.repos.d/jenkins.repo https://pkg.jenkins.io/redhat-stable/jenkins.repo sudo rpm --import https://pkg.jenkins.io/redhat-stable/jenkins.io.key -yum install -y jenkins +sudo yum install -y jenkins ``` - 查看安装后的情况:`rpm -ql jenkins` @@ -147,6 +147,7 @@ vim /etc/sysconfig/jenkins ``` - 控制台输出方式启动:`java -jar /usr/lib/jenkins/jenkins.war` +- 内置 Jetty - 可以看到有一个这个重点内容,这是你的初始化密码,等下会用到的: @@ -161,11 +162,25 @@ This may also be found at: /root/.jenkins/secrets/initialAdminPassword - 守护进程启动:`nohup java -jar /usr/lib/jenkins/jenkins.war > /opt/jenkins-nohup.log 2>&1 &` - 浏览器访问 Jenkins 首页开始配置: +- 特殊情况: + - 如果配置插件过程遇到这个错误:`No valid crumb was included in the request`,则多重试几次。 + - 登录后把: 下面的 `防止跨站点请求伪造` 勾选去掉。遇到问题多试几次。 + + + +------------------------------------------------------------------- + +## Jenkins 前端 React 项目构建 + + + + ------------------------------------------------------------------- + ## 资料 - From 90c375c509a1716e0ae152bd155822811ebeaedc Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 8 Oct 2018 18:53:30 +0800 Subject: [PATCH 127/330] 2018-10-08 --- markdown-file/Jenkins-Install-And-Settings.md | 101 ++++++++++++++++++ markdown-file/Node-Install-And-Usage.md | 5 + 2 files changed, 106 insertions(+) diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index 8d3dd23e..67da5e86 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -166,12 +166,113 @@ This may also be found at: /root/.jenkins/secrets/initialAdminPassword - 如果配置插件过程遇到这个错误:`No valid crumb was included in the request`,则多重试几次。 - 登录后把: 下面的 `防止跨站点请求伪造` 勾选去掉。遇到问题多试几次。 +------------------------------------------------------------------- + +## pipeline 语法 + +- 全局 pipeline 语法说明: + +``` +BUILD_NUMBER = ${env.BUILD_NUMBER}" +BUILD_ID = ${env.BUILD_ID}" +BUILD_DISPLAY_NAME = ${env.BUILD_DISPLAY_NAME}" +JOB_NAME = ${env.JOB_NAME}" +JOB_BASE_NAME = ${env.JOB_BASE_NAME}" +WORKSPACE = ${env.WORKSPACE}" +JENKINS_HOME = ${env.JENKINS_HOME}" +JENKINS_URL = ${env.JENKINS_URL}" +BUILD_URL = ${env.BUILD_URL}" +JOB_URL = ${env.JOB_URL}" +``` + +- 输出结果: + +``` +BUILD_NUMBER = 21 +BUILD_ID = 21 +BUILD_DISPLAY_NAME = #21 +JOB_NAME = react +JOB_BASE_NAME = react +WORKSPACE = /root/.jenkins/workspace/react +JENKINS_HOME = /root/.jenkins +JENKINS_URL = http://192.168.0.105:8080/ +BUILD_URL = http://192.168.0.105:8080/job/react/21/ +JOB_URL = http://192.168.0.105:8080/job/react/ +``` ------------------------------------------------------------------- ## Jenkins 前端 React 项目构建 +#### 简单的 pipeline 写法 + +``` +pipeline { + agent any + + options { + timestamps() + disableConcurrentBuilds() + buildDiscarder(logRotator( + numToKeepStr: '20', + daysToKeepStr: '30', + )) + } + + /*=======================================常修改变量-start=======================================*/ + + environment { + gitUrl = "https://github.com/satan31415/heh_umi_template.git" + branchName = "master" + projectBuildPath = "${env.WORKSPACE}/dist/" + nginxHtmlRoot = "/usr/share/nginx/react/" + } + + /*=======================================常修改变量-end=======================================*/ + + stages { + + stage('Pre Env') { + steps { + echo "======================================项目名称 = ${env.JOB_NAME}" + echo "======================================项目 URL = ${gitUrl}" + echo "======================================项目分支 = ${branchName}" + echo "======================================当前编译版本号 = ${env.BUILD_NUMBER}" + echo "======================================项目 Build 文件夹路径 = ${projectBuildPath}" + echo "======================================项目 Nginx 的 ROOT 路径 = ${nginxHtmlRoot}" + } + } + + stage('Git Clone'){ + steps { + git branch: "${branchName}", url: "${gitUrl}" + } + } + + stage('NPM Install') { + steps { + sh "npm install" + } + } + + stage('NPM Build') { + steps { + sh "npm run build" + } + } + + stage('Nginx Deploy') { + steps { + sh "rm -rf ${nginxHtmlRoot}" + sh "cp -r ${projectBuildPath} ${nginxHtmlRoot}" + } + } + + + } +} +``` diff --git a/markdown-file/Node-Install-And-Usage.md b/markdown-file/Node-Install-And-Usage.md index bd413346..80bc010a 100644 --- a/markdown-file/Node-Install-And-Usage.md +++ b/markdown-file/Node-Install-And-Usage.md @@ -24,4 +24,9 @@ sudo yum -y install nodejs - 注意:因为网络原因,最好先把脚本下载到本地,再用代理进行安装 +## nrm 快速切换 NPM 源 +- 安装:`npm install -g nrm` +- 列表源:`nrm ls` +- 使用源:`nrm use taobao` +- 更多使用方法: From e4bc60b458fb8cd95aa29f9db8e05b3061171e87 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 9 Oct 2018 16:00:14 +0800 Subject: [PATCH 128/330] 2018-10-09 --- markdown-file/Jenkins-Install-And-Settings.md | 83 ++++++++++++++++++- 1 file changed, 82 insertions(+), 1 deletion(-) diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index 67da5e86..9a2a0312 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -171,6 +171,8 @@ This may also be found at: /root/.jenkins/secrets/initialAdminPassword ## pipeline 语法 - 全局 pipeline 语法说明: +- 其他资料 + - ``` BUILD_NUMBER = ${env.BUILD_NUMBER}" @@ -205,7 +207,7 @@ JOB_URL = http://192.168.0.105:8080/job/react/ ## Jenkins 前端 React 项目构建 -#### 简单的 pipeline 写法 +#### 简单的 pipeline 写法(开源项目) ``` pipeline { @@ -275,6 +277,85 @@ pipeline { ``` +#### 简单的 pipeline 写法(闭源项目 -- 码云为例) + +- 新增一个全局凭据: +- 类型:`Username with password` +- 范围:`全局` +- Username:`你的 Gitee 账号` +- Password:`你的 Gitee 密码` +- **ID**:`只要是唯一值就行,后面要用到` +- 描述:`最好跟 ID 一致,方便认` + +``` +pipeline { + agent any + + options { + timestamps() + disableConcurrentBuilds() + buildDiscarder(logRotator( + numToKeepStr: '20', + daysToKeepStr: '30', + )) + } + + /*=======================================常修改变量-start=======================================*/ + + environment { + gitUrl = "https://gitee.com/youmeek/react-demo.git" + branchName = "master" + projectBuildPath = "${env.WORKSPACE}/dist/" + nginxHtmlRoot = "/usr/share/nginx/react/" + giteeCredentialsId = "上面全局凭据填写的 ID" + } + + /*=======================================常修改变量-end=======================================*/ + + stages { + + stage('Pre Env') { + steps { + echo "======================================项目名称 = ${env.JOB_NAME}" + echo "======================================项目 URL = ${gitUrl}" + echo "======================================项目分支 = ${branchName}" + echo "======================================当前编译版本号 = ${env.BUILD_NUMBER}" + echo "======================================项目 Build 文件夹路径 = ${projectBuildPath}" + echo "======================================项目 Nginx 的 ROOT 路径 = ${nginxHtmlRoot}" + } + } + + stage('Git Clone'){ + steps { + git branch: "${branchName}", + credentialsId: "${giteeCredentialsId}", + url: "${gitUrl}" + } + } + + stage('NPM Install') { + steps { + sh "npm install" + } + } + + stage('NPM Build') { + steps { + sh "npm run build" + } + } + + stage('Nginx Deploy') { + steps { + sh "rm -rf ${nginxHtmlRoot}" + sh "cp -r ${projectBuildPath} ${nginxHtmlRoot}" + } + } + + + } +} +``` From d48e83a97317d38cd82f4f697a78718ab8b873b9 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 9 Oct 2018 19:28:33 +0800 Subject: [PATCH 129/330] 2018-10-09 --- .../shell/install_jdk_offline_to_bash.sh | 1 + .../shell/install_jdk_offline_to_zsh.sh | 1 + .../shell/install_maven_offline_to_bash.sh | 44 ++++ markdown-file/Jenkins-Install-And-Settings.md | 204 +++++++++++++++++- markdown-file/Maven-Install-And-Settings.md | 133 ++++-------- 5 files changed, 280 insertions(+), 103 deletions(-) create mode 100644 favorite-file/shell/install_maven_offline_to_bash.sh diff --git a/favorite-file/shell/install_jdk_offline_to_bash.sh b/favorite-file/shell/install_jdk_offline_to_bash.sh index f1b7edce..1b60aeb0 100644 --- a/favorite-file/shell/install_jdk_offline_to_bash.sh +++ b/favorite-file/shell/install_jdk_offline_to_bash.sh @@ -37,6 +37,7 @@ export JAVA_HOME export JRE_HOME export PATH export CLASSPATH + EOF diff --git a/favorite-file/shell/install_jdk_offline_to_zsh.sh b/favorite-file/shell/install_jdk_offline_to_zsh.sh index afa7eb98..7fa52b7c 100644 --- a/favorite-file/shell/install_jdk_offline_to_zsh.sh +++ b/favorite-file/shell/install_jdk_offline_to_zsh.sh @@ -37,6 +37,7 @@ export JAVA_HOME export JRE_HOME export PATH export CLASSPATH + EOF diff --git a/favorite-file/shell/install_maven_offline_to_bash.sh b/favorite-file/shell/install_maven_offline_to_bash.sh new file mode 100644 index 00000000..179cf3fa --- /dev/null +++ b/favorite-file/shell/install_maven_offline_to_bash.sh @@ -0,0 +1,44 @@ +#!/bin/sh + +echo "判断常见的文件夹是否存在" + +if [ ! -d "/opt/setups" ]; then + mkdir /opt/setups +fi + +echo "判断 Maven 压缩包是否存在" + +if [ ! -f "/opt/setups/apache-maven-3.5.4-bin.tar.gz" ]; then + echo "Maven 压缩包不存在" + exit 1 +fi + +echo "开始解压 Maven" +cd /opt/setups ; tar -zxf apache-maven-3.5.4-bin.tar.gz + +if [ ! -d "/opt/setups/apache-maven-3.5.4" ]; then + echo "Maven 解压失败,结束脚本" + exit 1 +fi + +echo "Maven 解压包移到 /usr/local/ 目录下" +mv apache-maven-3.5.4/ /usr/local/ + +echo "Maven 写入系统变量到 profile" + +cat << EOF >> /etc/profile + +# Maven +M3_HOME=/usr/local/apache-maven-3.5.4 +MAVEN_HOME=/usr/local/apache-maven-3.5.4 +PATH=\$PATH:\$M3_HOME/bin +MAVEN_OPTS="-Xms256m -Xmx356m" +export M3_HOME +export MAVEN_HOME +export PATH +export MAVEN_OPTS + +EOF + + +echo "Maven 设置完成,需要你手动设置:source /etc/profile" \ No newline at end of file diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index 9a2a0312..78d4d0e8 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -227,8 +227,8 @@ pipeline { environment { gitUrl = "https://github.com/satan31415/heh_umi_template.git" branchName = "master" - projectBuildPath = "${env.WORKSPACE}/dist/" - nginxHtmlRoot = "/usr/share/nginx/react/" + projectBuildPath = "${env.WORKSPACE}/dist" + nginxHtmlRoot = "/usr/share/nginx/react" } /*=======================================常修改变量-end=======================================*/ @@ -266,8 +266,8 @@ pipeline { stage('Nginx Deploy') { steps { - sh "rm -rf ${nginxHtmlRoot}" - sh "cp -r ${projectBuildPath} ${nginxHtmlRoot}" + sh "rm -rf ${nginxHtmlRoot}/" + sh "cp -r ${projectBuildPath}/ ${nginxHtmlRoot}/" } } @@ -305,9 +305,9 @@ pipeline { environment { gitUrl = "https://gitee.com/youmeek/react-demo.git" branchName = "master" - projectBuildPath = "${env.WORKSPACE}/dist/" - nginxHtmlRoot = "/usr/share/nginx/react/" giteeCredentialsId = "上面全局凭据填写的 ID" + projectBuildPath = "${env.WORKSPACE}/dist" + nginxHtmlRoot = "/usr/share/nginx/react" } /*=======================================常修改变量-end=======================================*/ @@ -347,8 +347,8 @@ pipeline { stage('Nginx Deploy') { steps { - sh "rm -rf ${nginxHtmlRoot}" - sh "cp -r ${projectBuildPath} ${nginxHtmlRoot}" + sh "rm -rf ${nginxHtmlRoot}/" + sh "cp -r ${projectBuildPath}/ ${nginxHtmlRoot}/" } } @@ -358,6 +358,194 @@ pipeline { ``` +------------------------------------------------------------------- + +## Jenkins 后端 Spring Boot 项目构建 + +#### 安装 Maven + +- [参考该文章](Maven-Install-And-Settings.md) + +#### 配置工具 + +- 访问: +- 我习惯自己安装,所以这里修改配置: + - **需要注意**:配置里面的 `别名` 不要随便取名字,后面 Pipeline 要用到的。在 tool 标签里面会用到。 + +![screencaptu](https://upload-images.jianshu.io/upload_images/12159-ef61595aebaa4244.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) + + +#### 简单的 pipeline 写法(Jar 方式运行)(闭源项目 -- 码云为例) + +###### 先写一个控制 jar 脚本 + +- 来源:[鹏磊](https://segmentfault.com/a/1190000011504208) +- 创建脚本:`vim /etc/rc.d/init.d/spring-boot.sh` +- 设置权限:`chmod 777 /etc/rc.d/init.d/spring-boot.sh` +- 脚本内容: + + +``` +#!/bin/bash + +SpringBoot=$2 + +if [ "$1" = "" ]; +then + echo -e "\033[0;31m 未输入操作名 \033[0m \033[0;34m {start|stop|restart|status} \033[0m" + exit 1 +fi + +if [ "$SpringBoot" = "" ]; +then + echo -e "\033[0;31m 未输入应用名 \033[0m" + exit 1 +fi + +function start() +{ + count=`ps -ef |grep java|grep $SpringBoot|grep -v grep|wc -l` + if [ $count != 0 ];then + echo "$SpringBoot is running..." + else + echo "Start $SpringBoot success..." + BUILD_ID=dontKillMe nohup java -jar $SpringBoot > /dev/null 2>&1 & + fi +} + +function stop() +{ + echo "Stop $SpringBoot" + boot_id=`ps -ef |grep java|grep $SpringBoot|grep -v grep|awk '{print $2}'` + count=`ps -ef |grep java|grep $SpringBoot|grep -v grep|wc -l` + + if [ $count != 0 ];then + kill $boot_id + count=`ps -ef |grep java|grep $SpringBoot|grep -v grep|wc -l` + + boot_id=`ps -ef |grep java|grep $SpringBoot|grep -v grep|awk '{print $2}'` + kill -9 $boot_id + fi +} + +function restart() +{ + stop + sleep 2 + start +} + +function status() +{ + count=`ps -ef |grep java|grep $SpringBoot|grep -v grep|wc -l` + if [ $count != 0 ];then + echo "$SpringBoot is running..." + else + echo "$SpringBoot is not running..." + fi +} + +case $1 in + start) + start;; + stop) + stop;; + restart) + restart;; + status) + status;; + *) + + echo -e "\033[0;31m Usage: \033[0m \033[0;34m sh $0 {start|stop|restart|status} {SpringBootJarName} \033[0m\033[0;31m Example: \033[0m\033[0;33m sh $0 start esmart-test.jar \033[0m" +esac +``` + + +###### 配置 Jenkins + +- **必须**:新增一个全局凭据,方法参考前端部分 + +``` +pipeline { + agent any + + /*=======================================工具环境修改-start=======================================*/ + tools { + jdk 'JDK8' + maven 'MAVEN3' + } + /*=======================================工具环境修改-end=======================================*/ + + options { + timestamps() + disableConcurrentBuilds() + buildDiscarder(logRotator( + numToKeepStr: '20', + daysToKeepStr: '30', + )) + } + + /*=======================================常修改变量-start=======================================*/ + + environment { + gitUrl = "https://gitee.com/youmeek/springboot-jenkins-demo.git" + branchName = "master" + giteeCredentialsId = "Gitee" + projectWorkSpacePath = "${env.WORKSPACE}" + projectBuildTargetPath = "${env.WORKSPACE}/target" + projectJarNewName = "${env.JOB_NAME}.jar" + + } + + /*=======================================常修改变量-end=======================================*/ + + stages { + + stage('Pre Env') { + steps { + echo "======================================项目名称 = ${env.JOB_NAME}" + echo "======================================项目 URL = ${gitUrl}" + echo "======================================项目分支 = ${branchName}" + echo "======================================当前编译版本号 = ${env.BUILD_NUMBER}" + echo "======================================项目空间文件夹路径 = ${projectWorkSpacePath}" + echo "======================================项目 build 后 jar 路径 = ${projectBuildTargetPath}" + } + } + + stage('Git Clone'){ + steps { + git branch: "${branchName}", + credentialsId: "${giteeCredentialsId}", + url: "${gitUrl}" + } + } + + stage('Maven Clean') { + steps { + sh "mvn clean" + } + } + + stage('Maven Package') { + steps { + sh "mvn package -DskipTests" + } + } + + stage('Spring Boot Run') { + steps { + sh "mv ${projectBuildTargetPath}/*.jar ${projectBuildTargetPath}/${projectJarNewName}" + sh "cp ${projectBuildTargetPath}/${projectJarNewName} /opt/" + sh "cp /etc/rc.d/init.d/spring-boot.sh /opt/" + sh "chmod 777 /opt/spring-boot.sh" + sh "bash /opt/spring-boot.sh restart ${projectJarNewName}" + } + } + + } +} +``` + ------------------------------------------------------------------- diff --git a/markdown-file/Maven-Install-And-Settings.md b/markdown-file/Maven-Install-And-Settings.md index 6a432db7..fa342b94 100644 --- a/markdown-file/Maven-Install-And-Settings.md +++ b/markdown-file/Maven-Install-And-Settings.md @@ -1,42 +1,42 @@ # Maven 安装和配置 +## Maven 资料 + +- 官网: +- 官网下载: +- 历史版本下载: +- 此时(20160208) Maven 最新版本为:**3.3.9** + +## Maven 安装(bash 环境) + +- Maven 3.3 的 JDK 最低要求是 JDK 7 +- 下载压缩包:`wget http://mirrors.cnnic.cn/apache/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz` +- 解压:`tar zxvf apache-maven-3.3.9-bin.tar.gz` +- 修改目录名,默认的太长了:`mv apache-maven-3.3.9/ maven3.3.9/` +- 移到我个人习惯的安装目录下:`mv maven3.3.9/ /usr/local` +- 环境变量设置:`vim /etc/profile` +- 在文件最尾巴添加下面内容: + +``` ini +# Maven +MAVEN_HOME=/usr/local/maven3.3.9 +M3_HOME=/usr/local/maven3.3.9 +PATH=$PATH:$M3_HOME/bin +MAVEN_OPTS="-Xms256m -Xmx356m" +export M3_HOME +export MAVEN_HOME +export PATH +export MAVEN_OPTS +``` - -## Maven 安装 - -- Maven 安装 - - 官网: - - 官网下载: - - 历史版本下载: - - 此时(20160208) Maven 最新版本为:**3.3.9** - - Maven 3.3 的 JDK 最低要求是 JDK 7 - - 我个人习惯 `/opt` 目录下创建一个目录 `setups` 用来存放各种软件安装包;在 `/usr` 目录下创建一个 `program` 用来存放各种解压后的软件包,下面的讲解也都是基于此习惯 - - 我个人已经使用了第三方源:`EPEL、RepoForge`,如果你出现 `yum install XXXXX` 安装不成功的话,很有可能就是你没有相关源,请查看我对源设置的文章 - - 下载压缩包:`wget http://mirrors.cnnic.cn/apache/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz` - - 解压:`tar zxvf apache-maven-3.3.9-bin.tar.gz` - - 修改目录名,默认的太长了:`mv apache-maven-3.3.9/ maven3.3.9/` - - 移到我个人习惯的安装目录下:`mv maven3.3.9/ /usr/program` - - 环境变量设置:`vim /etc/profile` - - 在文件最尾巴添加下面内容: - - ``` ini - # Maven - MAVEN_HOME=/usr/program/maven3.3.9 - PATH=$PATH:$MAVEN_HOME/bin - MAVEN_OPTS="-Xms256m -Xmx356m" - export MAVEN_HOME - export PATH - export MAVEN_OPTS - ``` - - - 刷新配置文件:`source /etc/profile` - - 测试是否安装成功:`mvn -version` - +- 刷新配置文件:`source /etc/profile` +- 测试是否安装成功:`mvn -version` ## Maven 配置 +- 创建本地参数:`mkdir -p /opt/maven-repository` - 配置项目连接上私服 -- 全局方式配置: +- 编辑配置文件:`vim /usr/local/maven3.3.9/conf/settings.xml` ``` xml @@ -44,7 +44,7 @@ - D:/maven/my_local_repository + /opt/maven-repository @@ -66,80 +66,23 @@ - + - nexus-releases + aliyun-releases * - http://localhost:8081/nexus/content/groups/public + http://maven.aliyun.com/nexus/content/groups/public/ - nexus-snapshots + aliyun-snapshots * - http://localhost:8081/nexus/content/groups/public-snapshots + http://maven.aliyun.com/nexus/content/groups/public/ - - - - nexus - - - nexus-releases - http://nexus-releases - - true - - - true - - - - nexus-snapshots - http://nexus-snapshots - - true - - - true - - - - - - nexus-releases - http://nexus-releases - - true - - - true - - - - nexus-snapshots - http://nexus-snapshots - - true - - - true - - - - - - - - nexus - - ``` -- 项目级别: - From 9077cd0fb1ac7a2f733efc26782357ed428d4748 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 9 Oct 2018 19:50:51 +0800 Subject: [PATCH 130/330] 2018-10-09 --- markdown-file/Jenkins-Install-And-Settings.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index 78d4d0e8..465f8256 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -160,7 +160,7 @@ daacc724767640a29ddc99d159a80cf8 This may also be found at: /root/.jenkins/secrets/initialAdminPassword ``` -- 守护进程启动:`nohup java -jar /usr/lib/jenkins/jenkins.war > /opt/jenkins-nohup.log 2>&1 &` +- 守护进程启动:`nohup java -jar /usr/lib/jenkins/jenkins.war > /dev/null 2>&1 &` - 浏览器访问 Jenkins 首页开始配置: - 特殊情况: - 如果配置插件过程遇到这个错误:`No valid crumb was included in the request`,则多重试几次。 @@ -535,10 +535,8 @@ pipeline { stage('Spring Boot Run') { steps { sh "mv ${projectBuildTargetPath}/*.jar ${projectBuildTargetPath}/${projectJarNewName}" - sh "cp ${projectBuildTargetPath}/${projectJarNewName} /opt/" - sh "cp /etc/rc.d/init.d/spring-boot.sh /opt/" - sh "chmod 777 /opt/spring-boot.sh" - sh "bash /opt/spring-boot.sh restart ${projectJarNewName}" + sh "cp /etc/rc.d/init.d/spring-boot.sh ${projectBuildTargetPath}" + sh "bash ${projectBuildTargetPath}/spring-boot.sh restart ${projectJarNewName}" } } @@ -547,8 +545,10 @@ pipeline { ``` -------------------------------------------------------------------- +#### 简单的 pipeline 写法(Docker 方式运行)(闭源项目 -- 码云为例) + +------------------------------------------------------------------- ## 资料 From 07c205b2bff027e759915bc3dc15f7f122531f89 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 9 Oct 2018 19:53:39 +0800 Subject: [PATCH 131/330] 2018-10-09 --- markdown-file/Jenkins-Install-And-Settings.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index 465f8256..d99515d5 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -371,8 +371,7 @@ pipeline { - 访问: - 我习惯自己安装,所以这里修改配置: - **需要注意**:配置里面的 `别名` 不要随便取名字,后面 Pipeline 要用到的。在 tool 标签里面会用到。 - -![screencaptu](https://upload-images.jianshu.io/upload_images/12159-ef61595aebaa4244.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) + - 具体可以查看该图片说明:[点击查看](https://upload-images.jianshu.io/upload_images/12159-ef61595aebaa4244.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) #### 简单的 pipeline 写法(Jar 方式运行)(闭源项目 -- 码云为例) From 8f49a3cab21146aab0c98c0ed8ed759d63315efc Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 9 Oct 2018 19:55:25 +0800 Subject: [PATCH 132/330] 2018-10-09 --- markdown-file/Jenkins-Install-And-Settings.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index d99515d5..cb838a29 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -379,8 +379,8 @@ pipeline { ###### 先写一个控制 jar 脚本 - 来源:[鹏磊](https://segmentfault.com/a/1190000011504208) -- 创建脚本:`vim /etc/rc.d/init.d/spring-boot.sh` -- 设置权限:`chmod 777 /etc/rc.d/init.d/spring-boot.sh` +- 创建脚本:`vim /opt/spring-boot.sh` +- 设置权限:`chmod 777 /opt/spring-boot.sh` - 脚本内容: @@ -534,7 +534,7 @@ pipeline { stage('Spring Boot Run') { steps { sh "mv ${projectBuildTargetPath}/*.jar ${projectBuildTargetPath}/${projectJarNewName}" - sh "cp /etc/rc.d/init.d/spring-boot.sh ${projectBuildTargetPath}" + sh "cp /opt/spring-boot.sh ${projectBuildTargetPath}" sh "bash ${projectBuildTargetPath}/spring-boot.sh restart ${projectJarNewName}" } } From c5ab73c0d1fc80a8843d2b51f669090f51142dab Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 10 Oct 2018 12:21:11 +0800 Subject: [PATCH 133/330] nginx --- favorite-file/Nginx-Settings/nginx.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/favorite-file/Nginx-Settings/nginx.conf b/favorite-file/Nginx-Settings/nginx.conf index 490367d2..6460dd24 100644 --- a/favorite-file/Nginx-Settings/nginx.conf +++ b/favorite-file/Nginx-Settings/nginx.conf @@ -88,7 +88,7 @@ http { #静态资源转发 #由nginx处理静态页面 - location ~ .*\.(gif|jpg|jpeg|png|bmp|swf|ico|woff|woff2|ttf|eot)$ { + location ~ .*\.(gif|jpg|jpeg|png|bmp|swf|ico|woff|woff2|ttf|eot|txt|svg)$ { root /usr/program/tomcat8/webapps/ROOT;#这里直接写tomcat的程序里面的静态资源目录 expires 30d;#使用expires缓存模块,缓存到客户端30天(这个模块看下有没有安装) } From 2343267aa1ef30791da105570b0737630178aae2 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 10 Oct 2018 15:03:47 +0800 Subject: [PATCH 134/330] nginx --- markdown-file/monitor.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index 80daaa3c..33ed429a 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -813,12 +813,12 @@ Out of memory: Kill process 19452 (java) score 264 or sacrifice child - 使用内置 tomcat-manager 监控配置,或者使用类似工具:psi-probe - 使用 `ps -ef | grep java`,查看进程 PID - 根据高 CPU 的进程 PID,查看其线程 CPU 使用情况:`top -Hp PID`,找到占用 CPU 资源高的线程 PID - - 查看堆栈情况:`jstack -l PID >> /opt/jstack-tomcat1-20180917.log` + - 查看堆栈情况:`jstack -l PID >> /opt/jstack-tomcat1-PID-20180917.log` - 把占用 CPU 资源高的线程十进制的 PID 转换成 16 进制:`printf "%x\n" PID`,比如:`printf "%x\n" 12401` 得到结果是:`3071` - 在刚刚输出的那个 log 文件中搜索:`3071`,可以找到:`nid=0x3071` - 使用 `jstat -gc PID 250 10`,查看gc情况(截图) - 使用 `jstat -gccause PID`:额外输出上次GC原因(截图) - - 使用 `jmap -dump:format=b,file=/opt/dumpfile-tomcat1-20180917 PID`,生成堆转储文件 + - 使用 `jmap -dump:format=b,file=/opt/dumpfile-tomcat1-PID-20180917 PID`,生成堆转储文件 - 使用 jhat 或者可视化工具(Eclipse Memory Analyzer 、IBM HeapAnalyzer)分析堆情况。 - 结合代码解决内存溢出或泄露问题。 From 953f4149bdf93e0537fcdcc22ea9889828076758 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 10 Oct 2018 16:39:14 +0800 Subject: [PATCH 135/330] nginx --- markdown-file/monitor.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index 33ed429a..bddeca51 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -818,9 +818,11 @@ Out of memory: Kill process 19452 (java) score 264 or sacrifice child - 在刚刚输出的那个 log 文件中搜索:`3071`,可以找到:`nid=0x3071` - 使用 `jstat -gc PID 250 10`,查看gc情况(截图) - 使用 `jstat -gccause PID`:额外输出上次GC原因(截图) - - 使用 `jmap -dump:format=b,file=/opt/dumpfile-tomcat1-PID-20180917 PID`,生成堆转储文件 + - 使用 `jstat -gccause PID 10000 10`:额外输出上次GC原因,收集 10 次,每隔 10 秒 + - 使用 `jmap -dump:format=b,file=/opt/dumpfile-tomcat1-PID-20180917.hprof PID`,生成堆转储文件 - 使用 jhat 或者可视化工具(Eclipse Memory Analyzer 、IBM HeapAnalyzer)分析堆情况。 - 结合代码解决内存溢出或泄露问题。 + - 给 VM 增加 dump 触发参数:`-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/opt/tomcat-1.hprof` #### CPU 低,负载高,访问慢(带数据库) From a57e463119ab514936f3d03cb3f908b122f0b7d1 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 10 Oct 2018 19:02:31 +0800 Subject: [PATCH 136/330] nginx --- markdown-file/Tomcat-Install-And-Settings.md | 15 +++++++++++++++ markdown-file/monitor.md | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/markdown-file/Tomcat-Install-And-Settings.md b/markdown-file/Tomcat-Install-And-Settings.md index 68476691..beeedd53 100644 --- a/markdown-file/Tomcat-Install-And-Settings.md +++ b/markdown-file/Tomcat-Install-And-Settings.md @@ -366,6 +366,21 @@ EXPOSE 8081 - **Tomcat 7.0.68**:`wget http://apache.fayea.com/tomcat/tomcat-7/v7.0.68/bin/apache-tomcat-7.0.68.tar.gz` - **Tomcat 6.0.45**:`wget http://mirrors.cnnic.cn/apache/tomcat/tomcat-6/v6.0.45/bin/apache-tomcat-6.0.45.tar.gz` +## 其他问题 + + +#### log4j2 输出的时间与北京时间相差 8 小时 + +- 原因是系统时区不对。 +- 设置时区: + +``` +timedatectl set-timezone Asia/Shanghai +timedatectl status +``` + + + ## 资料 - diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index bddeca51..e86f0257 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -816,7 +816,7 @@ Out of memory: Kill process 19452 (java) score 264 or sacrifice child - 查看堆栈情况:`jstack -l PID >> /opt/jstack-tomcat1-PID-20180917.log` - 把占用 CPU 资源高的线程十进制的 PID 转换成 16 进制:`printf "%x\n" PID`,比如:`printf "%x\n" 12401` 得到结果是:`3071` - 在刚刚输出的那个 log 文件中搜索:`3071`,可以找到:`nid=0x3071` - - 使用 `jstat -gc PID 250 10`,查看gc情况(截图) + - 使用 `jstat -gc PID 10000 10`,查看gc情况(截图) - 使用 `jstat -gccause PID`:额外输出上次GC原因(截图) - 使用 `jstat -gccause PID 10000 10`:额外输出上次GC原因,收集 10 次,每隔 10 秒 - 使用 `jmap -dump:format=b,file=/opt/dumpfile-tomcat1-PID-20180917.hprof PID`,生成堆转储文件 From 8357d982cd6787321e22d94c83e0c7628b9d45a6 Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 11 Oct 2018 18:55:09 +0800 Subject: [PATCH 137/330] jenkins --- markdown-file/Daemontools.md | 4 + markdown-file/Jenkins-Install-And-Settings.md | 116 +++++------------- 2 files changed, 35 insertions(+), 85 deletions(-) diff --git a/markdown-file/Daemontools.md b/markdown-file/Daemontools.md index c6644ee6..2a61c9b9 100644 --- a/markdown-file/Daemontools.md +++ b/markdown-file/Daemontools.md @@ -82,8 +82,12 @@ killasgroup=true - 启动程序(默认会启动所有子任务):`/usr/bin/supervisord -c /etc/supervisord.conf` - 管理子任务的命令: + - 子任务状态:`/usr/bin/supervisorctl status` - 启动所有子任务:`/usr/bin/supervisorctl start all` - 结束所有子任务:`/usr/bin/supervisorctl stop all` + - 启动指定子任务:`/usr/bin/supervisorctl start gitnavi-logstash` + - 结束指定子任务:`/usr/bin/supervisorctl stop gitnavi-logstash` + - 重启指定子任务:`/usr/bin/supervisorctl restart gitnavi-logstash` - 只载入最新的配置文件, 并不重启任何进程:`/usr/bin/supervisorctl reread` - 载入最新的配置文件,停止原来的所有进程并按新的配置启动管理所有进程:`/usr/bin/supervisorctl reload` - 根据最新的配置文件,启动新配置或有改动的进程,配置没有改动的进程不会受影响而重启:`/usr/bin/supervisorctl update` diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index cb838a29..4da4b588 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -376,89 +376,10 @@ pipeline { #### 简单的 pipeline 写法(Jar 方式运行)(闭源项目 -- 码云为例) -###### 先写一个控制 jar 脚本 - -- 来源:[鹏磊](https://segmentfault.com/a/1190000011504208) -- 创建脚本:`vim /opt/spring-boot.sh` -- 设置权限:`chmod 777 /opt/spring-boot.sh` -- 脚本内容: - - -``` -#!/bin/bash - -SpringBoot=$2 - -if [ "$1" = "" ]; -then - echo -e "\033[0;31m 未输入操作名 \033[0m \033[0;34m {start|stop|restart|status} \033[0m" - exit 1 -fi - -if [ "$SpringBoot" = "" ]; -then - echo -e "\033[0;31m 未输入应用名 \033[0m" - exit 1 -fi - -function start() -{ - count=`ps -ef |grep java|grep $SpringBoot|grep -v grep|wc -l` - if [ $count != 0 ];then - echo "$SpringBoot is running..." - else - echo "Start $SpringBoot success..." - BUILD_ID=dontKillMe nohup java -jar $SpringBoot > /dev/null 2>&1 & - fi -} - -function stop() -{ - echo "Stop $SpringBoot" - boot_id=`ps -ef |grep java|grep $SpringBoot|grep -v grep|awk '{print $2}'` - count=`ps -ef |grep java|grep $SpringBoot|grep -v grep|wc -l` - - if [ $count != 0 ];then - kill $boot_id - count=`ps -ef |grep java|grep $SpringBoot|grep -v grep|wc -l` - - boot_id=`ps -ef |grep java|grep $SpringBoot|grep -v grep|awk '{print $2}'` - kill -9 $boot_id - fi -} - -function restart() -{ - stop - sleep 2 - start -} - -function status() -{ - count=`ps -ef |grep java|grep $SpringBoot|grep -v grep|wc -l` - if [ $count != 0 ];then - echo "$SpringBoot is running..." - else - echo "$SpringBoot is not running..." - fi -} - -case $1 in - start) - start;; - stop) - stop;; - restart) - restart;; - status) - status;; - *) - - echo -e "\033[0;31m Usage: \033[0m \033[0;34m sh $0 {start|stop|restart|status} {SpringBootJarName} \033[0m\033[0;31m Example: \033[0m\033[0;33m sh $0 start esmart-test.jar \033[0m" -esac -``` +###### 用 supervisord 做进程控制 +- [supervisord 的使用](Daemontools.md) +- 生成 supervisord 的配置文件会写在 Pipeline,所以只要你保证服务器 supervisord 正常运行即可 ###### 配置 Jenkins @@ -533,9 +454,34 @@ pipeline { stage('Spring Boot Run') { steps { - sh "mv ${projectBuildTargetPath}/*.jar ${projectBuildTargetPath}/${projectJarNewName}" - sh "cp /opt/spring-boot.sh ${projectBuildTargetPath}" - sh "bash ${projectBuildTargetPath}/spring-boot.sh restart ${projectJarNewName}" + +sh """ +mv ${projectBuildTargetPath}/*.jar ${projectBuildTargetPath}/${projectJarNewName} + +if [ ! -f /etc/supervisor/conf.d/${env.JOB_NAME}.conf ]; then + +touch /etc/supervisor/conf.d/${env.JOB_NAME}.conf + +cat << EOF >> /etc/supervisor/conf.d/${env.JOB_NAME}.conf +[program:${env.JOB_NAME}] +command=java -jar ${projectBuildTargetPath}/${projectJarNewName} +stdout_logfile=/var/log/supervisor/${env.JOB_NAME}.log +stderr_logfile=/var/log/supervisor/${env.JOB_NAME}-err.log +user=root +autostart=true +autorestart=true +startsecs=5 +priority=1 +stopasgroup=true +killasgroup=true +EOF + +/usr/bin/supervisorctl update +fi + +/usr/bin/supervisorctl restart ${env.JOB_NAME} +""" + } } From 883370650cc330fdd9d76794a11422cd0bc5daa8 Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 11 Oct 2018 18:58:41 +0800 Subject: [PATCH 138/330] jenkins --- markdown-file/Daemontools.md | 3 ++- markdown-file/Jenkins-Install-And-Settings.md | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/markdown-file/Daemontools.md b/markdown-file/Daemontools.md index 2a61c9b9..130ef7dc 100644 --- a/markdown-file/Daemontools.md +++ b/markdown-file/Daemontools.md @@ -73,13 +73,14 @@ stdout_logfile=/var/log/supervisor/supervisord-logstash.log stderr_logfile=/var/log/supervisor/supervisord-logstash-err.log user=root autostart=true -autorestart=true +autorestart=false startsecs=5 priority=1 stopasgroup=true killasgroup=true ``` +- 该配置的具体说明可以参考:[使用 supervisor 管理进程](http://liyangliang.me/posts/2015/06/using-supervisor/) - 启动程序(默认会启动所有子任务):`/usr/bin/supervisord -c /etc/supervisord.conf` - 管理子任务的命令: - 子任务状态:`/usr/bin/supervisorctl status` diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index 4da4b588..913685a5 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -469,7 +469,7 @@ stdout_logfile=/var/log/supervisor/${env.JOB_NAME}.log stderr_logfile=/var/log/supervisor/${env.JOB_NAME}-err.log user=root autostart=true -autorestart=true +autorestart=false startsecs=5 priority=1 stopasgroup=true From 476815c5325304801fee7ae8392d834a8af82884 Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 11 Oct 2018 19:01:20 +0800 Subject: [PATCH 139/330] jenkins --- markdown-file/Jenkins-Install-And-Settings.md | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index 913685a5..7bd3f671 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -414,6 +414,8 @@ pipeline { projectWorkSpacePath = "${env.WORKSPACE}" projectBuildTargetPath = "${env.WORKSPACE}/target" projectJarNewName = "${env.JOB_NAME}.jar" + supervisorConfigFileFullPath = "/etc/supervisor/conf.d/${env.JOB_NAME}.conf" + supervisorLogPath = "/var/log/supervisor" } @@ -458,15 +460,15 @@ pipeline { sh """ mv ${projectBuildTargetPath}/*.jar ${projectBuildTargetPath}/${projectJarNewName} -if [ ! -f /etc/supervisor/conf.d/${env.JOB_NAME}.conf ]; then +if [ ! -f ${supervisorConfigFileFullPath} ]; then -touch /etc/supervisor/conf.d/${env.JOB_NAME}.conf +touch ${supervisorConfigFileFullPath} -cat << EOF >> /etc/supervisor/conf.d/${env.JOB_NAME}.conf +cat << EOF >> ${supervisorConfigFileFullPath} [program:${env.JOB_NAME}] command=java -jar ${projectBuildTargetPath}/${projectJarNewName} -stdout_logfile=/var/log/supervisor/${env.JOB_NAME}.log -stderr_logfile=/var/log/supervisor/${env.JOB_NAME}-err.log +stdout_logfile=${supervisorLogPath}/${env.JOB_NAME}.log +stderr_logfile=${supervisorLogPath}/${env.JOB_NAME}-err.log user=root autostart=true autorestart=false From 2a60c2efa9c49b2401284fa6ddeb2a6730da8b2e Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 12 Oct 2018 09:40:48 +0800 Subject: [PATCH 140/330] jenkins --- markdown-file/Jenkins-Install-And-Settings.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index 7bd3f671..78689c57 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -431,6 +431,9 @@ pipeline { echo "======================================当前编译版本号 = ${env.BUILD_NUMBER}" echo "======================================项目空间文件夹路径 = ${projectWorkSpacePath}" echo "======================================项目 build 后 jar 路径 = ${projectBuildTargetPath}" + echo "======================================项目 jar 新名称 = ${projectJarNewName}" + echo "======================================supervisor 配置文件路径 = ${supervisorConfigFileFullPath}" + echo "======================================supervisor 存放 log 路径 = ${supervisorLogPath}" } } @@ -495,6 +498,13 @@ fi #### 简单的 pipeline 写法(Docker 方式运行)(闭源项目 -- 码云为例) + + + + + + + ------------------------------------------------------------------- From 0ae220249799f8e9485ed41e5891894ed3cee904 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 12 Oct 2018 09:45:22 +0800 Subject: [PATCH 141/330] jenkins --- ...ocker_disable_firewalld_centos7-aliyun.sh} | 0 favorite-file/shell/with-param-demo.sh | 89 +++++++++++++++++++ 2 files changed, 89 insertions(+) rename favorite-file/shell/{install_aliyun_docker_disable_firewalld_centos7.sh => install_docker_disable_firewalld_centos7-aliyun.sh} (100%) create mode 100644 favorite-file/shell/with-param-demo.sh diff --git a/favorite-file/shell/install_aliyun_docker_disable_firewalld_centos7.sh b/favorite-file/shell/install_docker_disable_firewalld_centos7-aliyun.sh similarity index 100% rename from favorite-file/shell/install_aliyun_docker_disable_firewalld_centos7.sh rename to favorite-file/shell/install_docker_disable_firewalld_centos7-aliyun.sh diff --git a/favorite-file/shell/with-param-demo.sh b/favorite-file/shell/with-param-demo.sh new file mode 100644 index 00000000..f59e9a79 --- /dev/null +++ b/favorite-file/shell/with-param-demo.sh @@ -0,0 +1,89 @@ +#!/bin/bash + + +methodParam=$1 +SpringBoot=$2 +SpringBootPath=$3 + +if [ "$methodParam" = "" ]; +then + echo -e "\033[0;31m 未输入操作名 \033[0m \033[0;34m {start|stop|restart|status} \033[0m" + exit 1 +fi + +if [ "$SpringBoot" = "" ]; +then + echo -e "\033[0;31m 未输入应用名 \033[0m" + exit 1 +fi + +if [ "$SpringBootPath" = "" ]; +then + echo -e "\033[0;31m 未输入应用路径 \033[0m" + exit 1 +fi + +echo "操作名 = $methodParam" +echo "应用名 = $2" +echo "应用路径 = $3" + +function start() +{ + count=`ps -ef |grep java|grep $SpringBoot|grep -v grep|wc -l` + if [ $count != 0 ];then + echo "$SpringBoot is running..." + else + echo "Start $SpringBoot success..." + cd $SpringBootPath + BUILD_ID=dontKillMe nohup java -jar $SpringBoot > /opt/run-log.log 2>&1 & + fi +} + +function stop() +{ + echo "Stop $SpringBoot" + boot_id=`ps -ef |grep java|grep $SpringBoot|grep -v grep|awk '{print $2}'` + count=`ps -ef |grep java|grep $SpringBoot|grep -v grep|wc -l` + + if [ $count != 0 ];then + kill $boot_id + count=`ps -ef |grep java|grep $SpringBoot|grep -v grep|wc -l` + + boot_id=`ps -ef |grep java|grep $SpringBoot|grep -v grep|awk '{print $2}'` + kill -9 $boot_id + fi +} + +function restart() +{ + stop + sleep 2 + start +} + +function status() +{ + count=`ps -ef |grep java|grep $SpringBoot|grep -v grep|wc -l` + if [ $count != 0 ];then + echo "$SpringBoot is running..." + else + echo "$SpringBoot is not running..." + fi +} + +case $methodParam in + start) + start;; + stop) + stop;; + restart) + restart;; + status) + status;; + *) + + echo -e "\033[0;31m Usage: \033[0m \033[0;34m sh $0 {start|stop|restart|status} {SpringBootJarName} \033[0m\033[0;31m Example: \033[0m\033[0;33m sh $0 start esmart-test.jar \033[0m" +esac + + + From 885b439d17cb9a77aeff9d512ca628aae17854b9 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 12 Oct 2018 10:26:34 +0800 Subject: [PATCH 142/330] jenkins --- markdown-file/Jenkins-Install-And-Settings.md | 125 ++++++++++++++++++ 1 file changed, 125 insertions(+) diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index 78689c57..722f3893 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -498,9 +498,134 @@ fi #### 简单的 pipeline 写法(Docker 方式运行)(闭源项目 -- 码云为例) +- **确保** 项目根目录有 Dockerfile 文件(部分内容自己修改),内容模板: +``` +FROM java:8 +VOLUME /tmp + +ENV TZ=Asia/Shanghai +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + +ADD ./target/buildApp.jar /app.jar + +RUN bash -c 'touch /app.jar' + +EXPOSE 8081 + +ENTRYPOINT ["java", "-jar", "-Xms512M", "-Xmx512M" , "-XX:MetaspaceSize=128M", "-XX:MaxMetaspaceSize=256M" ,"/app.jar"] +``` + +- Pipeline 写法 + +``` +pipeline { + agent any + + /*=======================================工具环境修改-start=======================================*/ + tools { + jdk 'JDK8' + maven 'MAVEN3' + } + /*=======================================工具环境修改-end=======================================*/ + + options { + timestamps() + disableConcurrentBuilds() + buildDiscarder(logRotator( + numToKeepStr: '20', + daysToKeepStr: '30', + )) + } + /*=======================================常修改变量-start=======================================*/ + environment { + gitUrl = "https://gitee.com/youmeek/springboot-jenkins-demo.git" + branchName = "master" + giteeCredentialsId = "Gitee" + projectWorkSpacePath = "${env.WORKSPACE}" + projectBuildTargetPath = "${env.WORKSPACE}/target" + projectJarNewName = "buildApp.jar" + + + dockerImageName = "docker.youmeek.com/demo/${env.JOB_NAME}:${env.BUILD_NUMBER}" + dockerContainerName = "${env.JOB_NAME}" + inHostPort = "8082" + inDockerAndJavaPort = "8081" + inHostLogPath = "/data/docker/logs/${dockerContainerName}" + inDockerLogPath = "/data/logs" + dockerRunParam = "--name ${dockerContainerName} -v /etc/hosts:/etc/hosts -v ${inHostLogPath}:${inDockerLogPath} --restart=always -p ${inHostPort}:${inDockerAndJavaPort}" + } + + /*=======================================常修改变量-end=======================================*/ + + stages { + + stage('Pre Env') { + steps { + echo "======================================项目名称 = ${env.JOB_NAME}" + echo "======================================项目 URL = ${gitUrl}" + echo "======================================项目分支 = ${branchName}" + echo "======================================当前编译版本号 = ${env.BUILD_NUMBER}" + echo "======================================项目空间文件夹路径 = ${projectWorkSpacePath}" + echo "======================================项目 build 后 jar 路径 = ${projectBuildTargetPath}" + echo "======================================项目 jar 新名称 = ${projectJarNewName}" + echo "======================================Docker 镜像名称 = ${dockerImageName}" + echo "======================================Docker 容器名称 = ${dockerContainerName}" + } + } + + stage('Git Clone'){ + steps { + git branch: "${branchName}", + credentialsId: "${giteeCredentialsId}", + url: "${gitUrl}" + } + } + + stage('Maven Clean') { + steps { + sh "mvn clean" + } + } + + stage('Maven Package') { + steps { + sh "mvn package -DskipTests" + } + } + + stage('构建 Docker 镜像') { + steps { + sh """ + mv ${projectBuildTargetPath}/*.jar ${projectBuildTargetPath}/${projectJarNewName} + + cd ${projectWorkSpacePath} + + docker build -t ${dockerImageName} ./ + """ + } + } + + stage('运行 Docker 镜像') { + steps { + sh """ + docker rm -f ${dockerContainerName} | true + + docker run -d ${dockerRunParam} ${dockerImageName} + """ + } + } + + + + + + + } +} +``` From 7ff85dc30d37c4c6741c5e28ccc618660f61cc10 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 12 Oct 2018 13:29:53 +0800 Subject: [PATCH 143/330] jenkins --- markdown-file/Docker-Install-And-Usage.md | 15 ++ markdown-file/Harbor-Install-And-Usage.md | 15 +- markdown-file/Jenkins-Install-And-Settings.md | 150 ++++++++++++++++++ 3 files changed, 174 insertions(+), 6 deletions(-) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index 392972b4..1ed25e9c 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -72,6 +72,7 @@ Docker CE has both stable and edge channels. - `sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo` - `sudo yum makecache fast` - `sudo yum install -y docker-ce`,大小:19M,速度很慢。 +- 查看配置文件位置:`systemctl show --property=FragmentPath docker` - 启动 Docker:`systemctl start docker.service` - 停止 Docker:`systemctl stop docker.service` - 查看状态:`systemctl status docker.service` @@ -646,6 +647,20 @@ docker rmi $(docker images -f "dangling=true" -q) - +## Docker remote api 配置(保证在内网环境) + +- 假设要被远程操作的服务器 IP:`192.168.1.22` +- 修改其配置文件:`vim /lib/systemd/system/docker.service` +- 修改默认值为:`ExecStart=/usr/bin/dockerd` +- 改为:`ExecStart=/usr/bin/dockerd -H unix:///var/run/docker.sock -H tcp://0.0.0.0:2376` +- `systemctl daemon-reload` +- `systemctl reload docker` +- `systemctl restart docker` +- 验证: + - 在其他服务器上运行:`docker -H 192.168.1.22:2376 images ` + - 能拿到和它本身看到的一样的数据表示可以了 + + ## Dockerfile 解释 - 该文件名就叫 Dockerfile,注意大小写,没有后缀,否则会报错。 diff --git a/markdown-file/Harbor-Install-And-Usage.md b/markdown-file/Harbor-Install-And-Usage.md index 9a1d41a8..38a437e2 100644 --- a/markdown-file/Harbor-Install-And-Usage.md +++ b/markdown-file/Harbor-Install-And-Usage.md @@ -5,16 +5,20 @@ - CentOS 7.4 - IP:`192.168.0.105` - 需要访问的机子 hosts 需要映射(如果绑定真实域名就不需要这一步了):`192.168.0.105 harbor.gitnavi.com` + - 直接用 IP 也是可以的,只是不用起来不美观 ## 官方文档 -- 安装指导: +- 安装指导: - 从中我们可以知道需要:[Docker、Docker Compose 环境](./Docker-Install-And-Usage.md) - 硬件最低要求:2C + 4GB(推荐 8GB) -- 下载: + - 官网有推荐配置说明:[hardware](https://github.com/goharbor/harbor/blob/master/docs/installation_guide.md#hardware) +- 下载: - 当前(201806)最新版本:**v1.5.1** + - 当前(201810)最新版本:**v1.5.3 和 1.6.0** - 分 offline 和 online 版本,推荐使用 offline - **v1.5.1** 下载地址: + - **v1.5.3** 下载地址: ## 安装 @@ -95,7 +99,7 @@ registry_storage_provider_config = - `5000` - `1514` - 后面重新启动 Harbor 也靠这个文件了:`docker-compose -f /usr/local/harbor/docker-compose.yml restart` -- 开始安装:`sh /usr/local/harbor/install.sh`,控制台输出如下: +- 开始安装:`sh /usr/local/harbor/install.sh`,控制台输出如下(预计需要 5 ~ 10 分钟): ``` @@ -227,19 +231,18 @@ For more details, please visit https://github.com/vmware/harbor . - 安装成功后,可以访问: - 默认用户名:`admin` - 默认密码:`Harbor12345` -- docker 客户端默认是使用 https 访问 docker registry,我们默认在安装 Harbor 的时候配置文件用的时候 http,所以这里需要修改 +- docker 客户端默认是使用 https 访问 docker registry,我们默认在安装 Harbor 的时候配置文件用的时候 http,所以其他 docker 客户端需要修改 - `vim /lib/systemd/system/docker.service` - 修改默认值为:`ExecStart=/usr/bin/dockerd` - 改为:`ExecStart=/usr/bin/dockerd --insecure-registry harbor.gitnavi.com` - `systemctl daemon-reload` - `systemctl reload docker` - `systemctl restart docker` - - `docker-compose -f /usr/local/harbor/docker-compose.yml restart` - 访问:,创建一个项目,比如:`youmeek`,等下需要用到。 - 这里用 admin 用户,不再另外创建用了,但是实际使用最好新建用户。 - `docker login -u admin -p Harbor12345 harbor.gitnavi.com` - 给本地的一个 maven 镜像打 tag:`docker tag maven:3.3-jdk-8 harbor.gitnavi.com/youmeek/harbor-maven:3.3-jdk-8` -- push 到仓库:`docker push harbor.gitnavi.com/youmeek/harbor-maven:3.3-jdk-8` +- push 到仓库:`docker push 182.61.19.178/demo/springboot-jenkins-docker:3` ---------------------------------------------------------------------------- diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index 722f3893..42587c90 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -628,6 +628,156 @@ pipeline { ``` +#### 简单的 pipeline 写法(Docker + Harbor 方式运行)(闭源项目 -- 码云为例) + +- 请先看懂上面 Docker 方式 +- 一共需要 3 台机子(要保证在内网环境,不然一定会有安全问题) + - 一台部署 [Harbor](Harbor-Install-And-Usage.md) + - 一台部署 Jenkins + - 一台运行项目 +- 确保 Jenkins 机子已经 Docker Login Harbor,这个就一次性的动作,所以自己在 Jenkins 服务器上操作即可 +- 确保 Spring Boot 项目运行的机子已经 Docker Login Harbor,这个就一次性的动作,所以自己在 Jenkins 服务器上操作即可 +- 确保 Spring Boot 项目运行的机子 docker remote api 开启(没有身份认证功能,所以才要保证内网) +- Pipeline 写法 + +``` +pipeline { + agent any + + /*=======================================工具环境修改-start=======================================*/ + tools { + jdk 'JDK8' + maven 'MAVEN3' + } + /*=======================================工具环境修改-end=======================================*/ + + options { + timestamps() + disableConcurrentBuilds() + buildDiscarder(logRotator( + numToKeepStr: '20', + daysToKeepStr: '30', + )) + } + + /*=======================================常修改变量-start=======================================*/ + + environment { + gitUrl = "https://gitee.com/youmeek/springboot-jenkins-demo.git" + branchName = "master" + giteeCredentialsId = "Gitee" + projectWorkSpacePath = "${env.WORKSPACE}" + projectBuildTargetPath = "${env.WORKSPACE}/target" + projectJarNewName = "buildApp.jar" + + projectDockerDaemon = "tcp://192.168.1.12:2376" + harborUrl = "192.168.1.13" + harborProjectName = "demo" + dockerImageName = "${harborUrl}/${harborProjectName}/${env.JOB_NAME}:${env.BUILD_NUMBER}" + dockerContainerName = "${env.JOB_NAME}" + inHostPort = "8082" + inDockerAndJavaPort = "8081" + inHostLogPath = "/data/docker/logs/${dockerContainerName}" + inDockerLogPath = "/data/logs" + dockerRunParam = "--name ${dockerContainerName} -v /etc/hosts:/etc/hosts -v ${inHostLogPath}:${inDockerLogPath} --restart=always -p ${inHostPort}:${inDockerAndJavaPort}" + } + + /*=======================================常修改变量-end=======================================*/ + + stages { + + stage('Pre Env') { + steps { + echo "======================================项目名称 = ${env.JOB_NAME}" + echo "======================================项目 URL = ${gitUrl}" + echo "======================================项目分支 = ${branchName}" + echo "======================================当前编译版本号 = ${env.BUILD_NUMBER}" + echo "======================================项目空间文件夹路径 = ${projectWorkSpacePath}" + echo "======================================项目 build 后 jar 路径 = ${projectBuildTargetPath}" + echo "======================================项目 jar 新名称 = ${projectJarNewName}" + echo "======================================Docker 镜像名称 = ${dockerImageName}" + echo "======================================Docker 容器名称 = ${dockerContainerName}" + echo "======================================harbor 地址 = ${harborUrl}" + echo "======================================harbor 项目名称 = ${harborProjectName}" + echo "======================================项目在宿主机的端口 = ${inHostPort}" + echo "======================================项目在 Docker 容器中的端口 = ${inDockerAndJavaPort}" + echo "======================================项目在宿主机的 log 路径 = ${inHostLogPath}" + echo "======================================项目在 docker 容器的 log 路径 = ${inDockerLogPath}" + echo "======================================项目运行的 Docker remote ip 信息 = ${projectDockerDaemon}" + echo "======================================项目运行的参数 = ${dockerRunParam}" + } + } + + stage('Git Clone'){ + steps { + git branch: "${branchName}", + credentialsId: "${giteeCredentialsId}", + url: "${gitUrl}" + } + } + + stage('Maven Clean') { + steps { + sh "mvn clean" + } + } + + stage('Maven Package') { + steps { + sh "mvn package -DskipTests" + } + } + + stage('构建 Docker 镜像') { + steps { + sh """ + mv ${projectBuildTargetPath}/*.jar ${projectBuildTargetPath}/${projectJarNewName} + + cd ${projectWorkSpacePath} + + docker build -t ${dockerImageName} ./ + """ + } + } + + stage('Push Docker 镜像') { + options { + timeout(time: 5, unit: 'MINUTES') + } + steps { + sh """ + docker push ${dockerImageName} + docker rmi ${dockerImageName} + """ + } + } + + stage('运行远程 Docker 镜像') { + options { + timeout(time: 5, unit: 'MINUTES') + } + steps { + sh """ + docker -H ${projectDockerDaemon} pull ${dockerImageName} + + docker -H ${projectDockerDaemon} rm -f ${dockerContainerName} | true + + docker -H ${projectDockerDaemon} run -d ${dockerRunParam} ${dockerImageName} + """ + } + } + + + + + + + } +} +``` + + + ------------------------------------------------------------------- From 8e3185bc46a8d48f9f4b65b1a0c3efb04db003e9 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 12 Oct 2018 13:34:26 +0800 Subject: [PATCH 144/330] jenkins --- markdown-file/Docker-Install-And-Usage.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index 1ed25e9c..c965514b 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -647,12 +647,13 @@ docker rmi $(docker images -f "dangling=true" -q) - -## Docker remote api 配置(保证在内网环境) +## Docker remote api 远程操作配置(保证在内网环境) - 假设要被远程操作的服务器 IP:`192.168.1.22` - 修改其配置文件:`vim /lib/systemd/system/docker.service` - 修改默认值为:`ExecStart=/usr/bin/dockerd` - 改为:`ExecStart=/usr/bin/dockerd -H unix:///var/run/docker.sock -H tcp://0.0.0.0:2376` + - 如果还需要连自己的 harbor 这类,完整配置:`ExecStart=/usr/bin/dockerd --insecure-registry harbor.youmeek.com -H unix:///var/run/docker.sock -H tcp://0.0.0.0:2376` - `systemctl daemon-reload` - `systemctl reload docker` - `systemctl restart docker` From b4db61af990e8807353af1a30bb7ec60cff59a41 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 12 Oct 2018 13:57:47 +0800 Subject: [PATCH 145/330] jenkins --- ..._tool.sh => install_common_tool_CentOS.sh} | 0 .../shell/install_jdk_offline_to_zsh.sh | 10 ++--- .../shell/install_maven_offline_to_zsh.sh | 44 +++++++++++++++++++ 3 files changed, 49 insertions(+), 5 deletions(-) rename favorite-file/shell/{install_common_tool.sh => install_common_tool_CentOS.sh} (100%) create mode 100644 favorite-file/shell/install_maven_offline_to_zsh.sh diff --git a/favorite-file/shell/install_common_tool.sh b/favorite-file/shell/install_common_tool_CentOS.sh similarity index 100% rename from favorite-file/shell/install_common_tool.sh rename to favorite-file/shell/install_common_tool_CentOS.sh diff --git a/favorite-file/shell/install_jdk_offline_to_zsh.sh b/favorite-file/shell/install_jdk_offline_to_zsh.sh index 7fa52b7c..b83adb33 100644 --- a/favorite-file/shell/install_jdk_offline_to_zsh.sh +++ b/favorite-file/shell/install_jdk_offline_to_zsh.sh @@ -8,28 +8,28 @@ fi echo "判断 JDK 压缩包是否存在" -if [ ! -f "/opt/setups/jdk-8u171-linux-x64.tar.gz" ]; then +if [ ! -f "/opt/setups/jdk-8u181-linux-x64.tar.gz" ]; then echo "JDK 压缩包不存在" exit 1 fi echo "开始解压 JDK" -cd /opt/setups ; tar -zxf jdk-8u171-linux-x64.tar.gz +cd /opt/setups ; tar -zxf jdk-8u181-linux-x64.tar.gz -if [ ! -d "/opt/setups/jdk1.8.0_171" ]; then +if [ ! -d "/opt/setups/jdk1.8.0_181" ]; then echo "JDK 解压失败,结束脚本" exit 1 fi echo "JDK 解压包移到 /usr/local/ 目录下" -mv jdk1.8.0_171/ /usr/local/ +mv jdk1.8.0_181/ /usr/local/ echo "JDK 写入系统变量到 zshrc" cat << EOF >> ~/.zshrc # JDK -JAVA_HOME=/usr/local/jdk1.8.0_171 +JAVA_HOME=/usr/local/jdk1.8.0_181 JRE_HOME=\$JAVA_HOME/jre PATH=\$PATH:\$JAVA_HOME/bin CLASSPATH=.:\$JAVA_HOME/lib/dt.jar:\$JAVA_HOME/lib/tools.jar diff --git a/favorite-file/shell/install_maven_offline_to_zsh.sh b/favorite-file/shell/install_maven_offline_to_zsh.sh new file mode 100644 index 00000000..2454bb59 --- /dev/null +++ b/favorite-file/shell/install_maven_offline_to_zsh.sh @@ -0,0 +1,44 @@ +#!/bin/sh + +echo "判断常见的文件夹是否存在" + +if [ ! -d "/opt/setups" ]; then + mkdir /opt/setups +fi + +echo "判断 Maven 压缩包是否存在" + +if [ ! -f "/opt/setups/apache-maven-3.5.4-bin.tar.gz" ]; then + echo "Maven 压缩包不存在" + exit 1 +fi + +echo "开始解压 Maven" +cd /opt/setups ; tar -zxf apache-maven-3.5.4-bin.tar.gz + +if [ ! -d "/opt/setups/apache-maven-3.5.4" ]; then + echo "Maven 解压失败,结束脚本" + exit 1 +fi + +echo "Maven 解压包移到 /usr/local/ 目录下" +mv apache-maven-3.5.4/ /usr/local/ + +echo "Maven 写入系统变量到 zshrc" + +cat << EOF >> ~/.zshrc + +# Maven +M3_HOME=/usr/local/apache-maven-3.5.4 +MAVEN_HOME=/usr/local/apache-maven-3.5.4 +PATH=\$PATH:\$M3_HOME/bin +MAVEN_OPTS="-Xms256m -Xmx356m" +export M3_HOME +export MAVEN_HOME +export PATH +export MAVEN_OPTS + +EOF + + +echo "Maven 设置完成,需要你手动设置:source ~/.zshrc" \ No newline at end of file From 247aa67f4b261d4dbdb77dfee3a0e90754086381 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 12 Oct 2018 14:27:01 +0800 Subject: [PATCH 146/330] jenkins --- markdown-file/Jenkins-Install-And-Settings.md | 34 +++++++------------ 1 file changed, 13 insertions(+), 21 deletions(-) diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index 42587c90..07adcbf1 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -20,7 +20,7 @@ - 先创建一个宿主机以后用来存放数据的目录:`mkdir -p /data/jenkins/jenkins_home && chmod 777 -R /data/jenkins/jenkins_home` - 安装镜像(813MB,有点大):`docker pull jenkins/jenkins:lts` - 查看下载下来的镜像:`docker images` -- 首次运行镜像:`docker run --name jenkins-master -p 8123:8080 -p 50000:50000 -v /etc/localtime:/etc/localtime -v /data/jenkins/jenkins_home:/var/jenkins_home -e JAVA_OPTS="-Duser.timezone=Asia/Shanghai" -d --restart always jenkins/jenkins:lts` +- 首次运行镜像:`docker run --name jenkins-master -p 8123:18080 -p 50000:50000 -v /etc/localtime:/etc/localtime -v /data/jenkins/jenkins_home:/var/jenkins_home -e JAVA_OPTS="-Duser.timezone=Asia/Shanghai" -d --restart always jenkins/jenkins:lts` - 这里的 8080 端口是 jenkins 运行程序的端口,必须要有映射的。50000 端口是非必须映射的,但是如果你要用 Jenkins 分布式构建这个就必须开放 - 如果报下面的错误: @@ -95,7 +95,7 @@ Can not write to /var/jenkins_home/copy_reference_file.log. Wrong volume permiss ## Docker 的 Jenkins 与 Docker 结合使用 -- 运行镜像命令:`docker run --name jenkins-master -p 8123:8080 -p 50000:50000 -v /etc/localtime:/etc/localtime -v /data/jenkins/jenkins_home:/var/jenkins_home -v /var/run/docker.sock:/var/run/docker.sock -e JAVA_OPTS="-Duser.timezone=Asia/Shanghai" -d --restart always jenkins/jenkins:lts` +- 运行镜像命令:`docker run --name jenkins-master -p 8123:18080 -p 50000:50000 -v /etc/localtime:/etc/localtime -v /data/jenkins/jenkins_home:/var/jenkins_home -v /var/run/docker.sock:/var/run/docker.sock -e JAVA_OPTS="-Duser.timezone=Asia/Shanghai" -d --restart always jenkins/jenkins:lts` - 比上面多了一步:`-v /var/run/docker.sock:/var/run/docker.sock` - 这样,在 jenkins 里面写 shell 脚本调用 docker 程序,就可以直接调用宿主机的 docker 了。 @@ -138,16 +138,8 @@ sudo yum install -y jenkins /var/log/jenkins/jenkins.log:jenkins 日志文件。 ``` -- 配置 jenkins 端口,默认是:8080 - -``` -vim /etc/sysconfig/jenkins - -56 行:JENKINS_PORT="8080" -``` - -- 控制台输出方式启动:`java -jar /usr/lib/jenkins/jenkins.war` -- 内置 Jetty +- 控制台输出方式启动:`java -jar /usr/lib/jenkins/jenkins.war --httpPort=18080` +- 内置 Jetty,默认是 18080 端口,你也可以改为其他(建议修改为其他) - 可以看到有一个这个重点内容,这是你的初始化密码,等下会用到的: @@ -160,17 +152,17 @@ daacc724767640a29ddc99d159a80cf8 This may also be found at: /root/.jenkins/secrets/initialAdminPassword ``` -- 守护进程启动:`nohup java -jar /usr/lib/jenkins/jenkins.war > /dev/null 2>&1 &` -- 浏览器访问 Jenkins 首页开始配置: +- 守护进程启动:`nohup java -jar /usr/lib/jenkins/jenkins.war --httpPort=18080 > /dev/null 2>&1 &` +- 浏览器访问 Jenkins 首页开始配置: - 特殊情况: - 如果配置插件过程遇到这个错误:`No valid crumb was included in the request`,则多重试几次。 - - 登录后把: 下面的 `防止跨站点请求伪造` 勾选去掉。遇到问题多试几次。 + - 登录后把: 下面的 `防止跨站点请求伪造` 勾选去掉。遇到问题多试几次。 ------------------------------------------------------------------- ## pipeline 语法 -- 全局 pipeline 语法说明: +- 全局 pipeline 语法说明: - 其他资料 - @@ -197,9 +189,9 @@ JOB_NAME = react JOB_BASE_NAME = react WORKSPACE = /root/.jenkins/workspace/react JENKINS_HOME = /root/.jenkins -JENKINS_URL = http://192.168.0.105:8080/ -BUILD_URL = http://192.168.0.105:8080/job/react/21/ -JOB_URL = http://192.168.0.105:8080/job/react/ +JENKINS_URL = http://192.168.0.105:18080/ +BUILD_URL = http://192.168.0.105:18080/job/react/21/ +JOB_URL = http://192.168.0.105:18080/job/react/ ``` @@ -279,7 +271,7 @@ pipeline { #### 简单的 pipeline 写法(闭源项目 -- 码云为例) -- 新增一个全局凭据: +- 新增一个全局凭据: - 类型:`Username with password` - 范围:`全局` - Username:`你的 Gitee 账号` @@ -368,7 +360,7 @@ pipeline { #### 配置工具 -- 访问: +- 访问: - 我习惯自己安装,所以这里修改配置: - **需要注意**:配置里面的 `别名` 不要随便取名字,后面 Pipeline 要用到的。在 tool 标签里面会用到。 - 具体可以查看该图片说明:[点击查看](https://upload-images.jianshu.io/upload_images/12159-ef61595aebaa4244.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) From a5b322f869770501918daf9358aa2e35b1be8d19 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 12 Oct 2018 16:08:08 +0800 Subject: [PATCH 147/330] jenkins --- markdown-file/Jenkins-Install-And-Settings.md | 2 ++ markdown-file/Node-Install-And-Usage.md | 1 + 2 files changed, 3 insertions(+) diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index 07adcbf1..d25cc32a 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -199,6 +199,8 @@ JOB_URL = http://192.168.0.105:18080/job/react/ ## Jenkins 前端 React 项目构建 +- **确保**:安装了 [Node.js](Node-Install-And-Usage.md) + #### 简单的 pipeline 写法(开源项目) ``` diff --git a/markdown-file/Node-Install-And-Usage.md b/markdown-file/Node-Install-And-Usage.md index 80bc010a..46aa0d6f 100644 --- a/markdown-file/Node-Install-And-Usage.md +++ b/markdown-file/Node-Install-And-Usage.md @@ -21,6 +21,7 @@ curl --silent --location https://rpm.nodesource.com/setup_9.x | sudo bash - sudo yum -y install nodejs ``` +- 验证:`node -v` - 注意:因为网络原因,最好先把脚本下载到本地,再用代理进行安装 From d0e0a15b5471e3c183238b8aa3cf7d22140f8ba4 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 12 Oct 2018 16:50:22 +0800 Subject: [PATCH 148/330] jenkins --- markdown-file/Jenkins-Install-And-Settings.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index d25cc32a..b26e220a 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -547,7 +547,7 @@ pipeline { dockerContainerName = "${env.JOB_NAME}" inHostPort = "8082" inDockerAndJavaPort = "8081" - inHostLogPath = "/data/docker/logs/${dockerContainerName}" + inHostLogPath = "/data/docker/logs/${dockerContainerName}/${env.BUILD_NUMBER}" inDockerLogPath = "/data/logs" dockerRunParam = "--name ${dockerContainerName} -v /etc/hosts:/etc/hosts -v ${inHostLogPath}:${inDockerLogPath} --restart=always -p ${inHostPort}:${inDockerAndJavaPort}" } @@ -671,7 +671,7 @@ pipeline { dockerContainerName = "${env.JOB_NAME}" inHostPort = "8082" inDockerAndJavaPort = "8081" - inHostLogPath = "/data/docker/logs/${dockerContainerName}" + inHostLogPath = "/data/docker/logs/${dockerContainerName}/${env.BUILD_NUMBER}" inDockerLogPath = "/data/logs" dockerRunParam = "--name ${dockerContainerName} -v /etc/hosts:/etc/hosts -v ${inHostLogPath}:${inDockerLogPath} --restart=always -p ${inHostPort}:${inDockerAndJavaPort}" } From 2651ccb1c7fd3471cf02238e285482d67f07b55a Mon Sep 17 00:00:00 2001 From: judasn Date: Sun, 14 Oct 2018 23:51:11 +0800 Subject: [PATCH 149/330] nginx --- favorite-file/Nginx-Settings/nginx-front.conf | 69 +++++++++ favorite-file/Nginx-Settings/nginx.conf | 134 ------------------ 2 files changed, 69 insertions(+), 134 deletions(-) create mode 100644 favorite-file/Nginx-Settings/nginx-front.conf delete mode 100644 favorite-file/Nginx-Settings/nginx.conf diff --git a/favorite-file/Nginx-Settings/nginx-front.conf b/favorite-file/Nginx-Settings/nginx-front.conf new file mode 100644 index 00000000..c6163fde --- /dev/null +++ b/favorite-file/Nginx-Settings/nginx-front.conf @@ -0,0 +1,69 @@ +user root; +worker_processes auto; +pid /run/nginx.pid; + +events { + use epoll; + multi_accept on; + worker_connections 1024; +} + +http { + + sendfile on; + tcp_nopush on; + tcp_nodelay on; + keepalive_timeout 65; + types_hash_max_size 2048; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for" "$request_time"'; + + access_log /var/log/nginx/access.log main; + error_log /var/log/nginx/error.log; + + gzip on; + gzip_buffers 8 16k; + gzip_min_length 512; + gzip_disable "MSIE [1-6]\.(?!.*SV1)"; + gzip_http_version 1.1; + gzip_types text/plain text/css application/javascript application/x-javascript application/json application/xml; + + server { + + listen 80; + server_name localhost 127.0.0.1 139.159.190.24 gitnavi.com; + + location / { + root /root/.jenkins/workspace/nestle-platform-front-test/dist; + index index.html index.htm; + try_files $uri /index.html; + } + + location ^~ /platform/ { + proxy_pass http://127.0.0.1:28081; + proxy_redirect off; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + location ~ .*\.(js|css)?$ { + root /root/.jenkins/workspace/nestle-platform-front-test/dist; + } + + location ~ .*\.(gif|jpg|jpeg|png|bmp|swf|ico|woff|woff2|ttf|eot|txt|svg)$ { + root /root/.jenkins/workspace/nestle-platform-front-test/dist; + } + + error_page 404 /404.html; + location = /usr/share/nginx/html/40x.html { + } + + error_page 500 502 503 504 /50x.html; + location = /usr/share/nginx/html/50x.html { + } + + } +} \ No newline at end of file diff --git a/favorite-file/Nginx-Settings/nginx.conf b/favorite-file/Nginx-Settings/nginx.conf deleted file mode 100644 index 6460dd24..00000000 --- a/favorite-file/Nginx-Settings/nginx.conf +++ /dev/null @@ -1,134 +0,0 @@ -user root; - -worker_processes 2;#Nginx进程数, 建议设置为等于CPU总核心数 - -events { - use epoll; #nginx工作模式,epoll是linux平台下的高效模式,配合nginx的异步非阻塞作用 - worker_connections 1024;#单个进程最大连接数 -} - -http { - include mime.types;#扩展名与文件类型映射表:#nginx通过服务端文件的后缀名来判断这个文件属于什么类型,再将该数据类型写入http头部的Content-Type字段中,发送给客户端。mime.types这个文件里面全是文件类型的定义。 - default_type application/octet-stream;#当用户请求的文件后缀在mime.types这个文件没有定义,便使用默认的type为二进制流 - sendfile on;#开启高效文件传输模式 - tcp_nopush on;#启用之后,数据包会累积到一定大小才会发送,减小了额外开销,防止网络阻塞,提高网络效率 - tcp_nodelay on;#启用之后,尽快发送数据。可以看到tcp_nopush是要等数据包累积到一定大小才发送,tcp_nodelay是要尽快发送,二者相互矛盾。实际上,它们可以一起用,最终的效果是先填满包,再尽快发送。 - keepalive_timeout 65; - charset utf8; - - #全局日志(也可以把这个配置到 server 中进行不同 server 的不同配置) - access_log /var/log/nginx/access.log; - error_log /var/log/nginx/error.log; - - # gzip 压缩设置 - gzip on; - gzip_min_length 1k; - gzip_buffers 4 16k; - gzip_http_version 1.0; - gzip_comp_level 2;#gzip的压缩程度,级别为1到9.级别越高,压缩程序越高,时间越多 - gzip_types text/plain application/x-javascript text/css text/xml application/xml text/javascript application/javascript application/json; - gzip_vary on;#告诉接受方数据经过gzip压缩 - gzip_disable "MSIE[1-6]\."; #因为IE6对gzip不支持,所以在IE6及更旧的版本不使用gzip压缩 - - server { - - listen 80; #监听80端口 - server_name localhost 127.0.0.1 120.77.84.121 gitnavi.com; - - location / { - root /usr/program/tomcat8/webapps/ROOT;#静态文件直接读取硬盘,所有这里直接写tomcat的程序里面的静态资源目录 - index index.html index.jsp; - } - - location = / { - root /usr/program/tomcat8/webapps/ROOT;#静态文件直接读取硬盘,所有这里直接写tomcat的程序里面的静态资源目录 - index index.html; - } - - # 匹配用户导航静态 html 目录路径 - location ^~ /u/ { - root /usr/program/tomcat8/webapps/ROOT;#静态文件直接读取硬盘,所有这里直接写tomcat的程序里面的静态资源目录 - } - - # Controller 中前台的请求标识 - location ^~ /front/ { - proxy_pass http://127.0.0.1:8080; #转发请求交给tomcat处理 - proxy_redirect off; - proxy_set_header Host $host; #后端的Web服务器可以通过X-Forwarded-For获取用户真实IP - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - client_max_body_size 10m; #允许客户端请求的最大单文件字节数 - client_body_buffer_size 128k; #缓冲区代理缓冲用户端请求的最大字节数 - proxy_connect_timeout 300; #nginx跟后端服务器连接超时时间 - proxy_send_timeout 300; #后端服务器数据回传时间(代理发送超时) - proxy_read_timeout 300; #连接成功后,后端服务器响应时间 - proxy_buffer_size 4k; #设置代理服务器(nginx)保存用户头信息的缓冲区大小 - proxy_buffers 6 32k; #proxy_buffers缓冲区,网页平均在32k以下的话,这样设置 - proxy_busy_buffers_size 64k; #高负荷下缓冲大小(proxy_buffers*2) - proxy_temp_file_write_size 64k; #设定缓存文件夹大小,大于这个值,将从upstream服务器传 - } - - # Controller 中后台的请求标识 - location ^~ /admin/ { - proxy_pass http://127.0.0.1:8080; #转发请求交给tomcat处理 - proxy_redirect off; - proxy_set_header Host $host; #后端的Web服务器可以通过X-Forwarded-For获取用户真实IP - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - client_max_body_size 10m; #允许客户端请求的最大单文件字节数 - client_body_buffer_size 128k; #缓冲区代理缓冲用户端请求的最大字节数 - proxy_connect_timeout 300; #nginx跟后端服务器连接超时时间 - proxy_send_timeout 300; #后端服务器数据回传时间(代理发送超时) - proxy_read_timeout 300; #连接成功后,后端服务器响应时间 - proxy_buffer_size 4k; #设置代理服务器(nginx)保存用户头信息的缓冲区大小 - proxy_buffers 6 32k; #proxy_buffers缓冲区,网页平均在32k以下的话,这样设置 - proxy_busy_buffers_size 64k; #高负荷下缓冲大小(proxy_buffers*2) - proxy_temp_file_write_size 64k; #设定缓存文件夹大小,大于这个值,将从upstream服务器传 - } - - #静态资源转发 - #由nginx处理静态页面 - location ~ .*\.(gif|jpg|jpeg|png|bmp|swf|ico|woff|woff2|ttf|eot|txt|svg)$ { - root /usr/program/tomcat8/webapps/ROOT;#这里直接写tomcat的程序里面的静态资源目录 - expires 30d;#使用expires缓存模块,缓存到客户端30天(这个模块看下有没有安装) - } - #因为我的 html 页面经常变动,所以 html 我这里就不缓存了 - location ~ .*\.(js|css)?$ { - root /usr/program/tomcat8/webapps/ROOT;#静态文件直接读取硬盘,所有这里直接写tomcat的程序里面的静态资源目录 - expires 1d;#也可以指定小时,比如:12h - } - # 读取一些静态页面,比如隐私政策等 - location ~ .*\.(html|htm)?$ { - root /usr/program/tomcat8/webapps/ROOT;#静态文件直接读取硬盘,所有这里直接写tomcat的程序里面的静态资源目录 - expires 1d;#也可以指定小时,比如:12h - } - - #其他请求都视为动态请求处理 - #location ~ .*$ { - # proxy_pass http://127.0.0.1:8080; #转发请求交给tomcat处理 - # proxy_redirect off; - # proxy_set_header Host $host; #后端的Web服务器可以通过X-Forwarded-For获取用户真实IP - # proxy_set_header X-Real-IP $remote_addr; - # proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - # client_max_body_size 10m; #允许客户端请求的最大单文件字节数 - # client_body_buffer_size 128k; #缓冲区代理缓冲用户端请求的最大字节数 - # proxy_connect_timeout 300; #nginx跟后端服务器连接超时时间 - # proxy_send_timeout 300; #后端服务器数据回传时间(代理发送超时) - # proxy_read_timeout 300; #连接成功后,后端服务器响应时间 - # proxy_buffer_size 4k; #设置代理服务器(nginx)保存用户头信息的缓冲区大小 - # proxy_buffers 6 32k; #proxy_buffers缓冲区,网页平均在32k以下的话,这样设置 - # proxy_busy_buffers_size 64k; #高负荷下缓冲大小(proxy_buffers*2) - # proxy_temp_file_write_size 64k; #设定缓存文件夹大小,大于这个值,将从upstream服务器传 - #} - - #常见错误页面设置 - error_page 404 /404.html; - location = /404.html { - root /usr/program/tomcat8/webapps/ROOT; - } - error_page 500 502 503 504 /500.html; - location = /500.html { - root /usr/program/tomcat8/webapps/ROOT; - } - } -} From 7f9664b63408743421f0cbb5e1291ad74e9417de Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 15 Oct 2018 00:18:36 +0800 Subject: [PATCH 150/330] nginx --- favorite-file/Nginx-Settings/nginx-front.conf | 3 +++ 1 file changed, 3 insertions(+) diff --git a/favorite-file/Nginx-Settings/nginx-front.conf b/favorite-file/Nginx-Settings/nginx-front.conf index c6163fde..b47cfdad 100644 --- a/favorite-file/Nginx-Settings/nginx-front.conf +++ b/favorite-file/Nginx-Settings/nginx-front.conf @@ -9,6 +9,9 @@ events { } http { + # 必须加这两个,不然 CSS 无法正常加载 + include mime.types; + default_type application/octet-stream; sendfile on; tcp_nopush on; From 86545ab3ae1f461d97a2623b92fa7542848cc1d2 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 15 Oct 2018 11:11:12 +0800 Subject: [PATCH 151/330] nginx --- favorite-file/Nginx-Settings/nginx-front.conf | 41 ++++++++++++++++++- 1 file changed, 39 insertions(+), 2 deletions(-) diff --git a/favorite-file/Nginx-Settings/nginx-front.conf b/favorite-file/Nginx-Settings/nginx-front.conf index b47cfdad..82894983 100644 --- a/favorite-file/Nginx-Settings/nginx-front.conf +++ b/favorite-file/Nginx-Settings/nginx-front.conf @@ -35,8 +35,8 @@ http { server { - listen 80; - server_name localhost 127.0.0.1 139.159.190.24 gitnavi.com; + listen 8001; + server_name localhost 127.0.0.1 139.159.190.24 platform.gitnavi.com; location / { root /root/.jenkins/workspace/nestle-platform-front-test/dist; @@ -69,4 +69,41 @@ http { } } + + server { + + listen 8002; + server_name localhost 127.0.0.1 139.159.190.24 store.gitnavi.com; + + location / { + root /root/.jenkins/workspace/nestle-store-front-test/dist; + index index.html index.htm; + try_files $uri /index.html; + } + + location ^~ /store/ { + proxy_pass http://127.0.0.1:28082; + proxy_redirect off; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + + location ~ .*\.(js|css)?$ { + root /root/.jenkins/workspace/nestle-store-front-test/dist; + } + + location ~ .*\.(gif|jpg|jpeg|png|bmp|swf|ico|woff|woff2|ttf|eot|txt|svg)$ { + root /root/.jenkins/workspace/nestle-store-front-test/dist; + } + + error_page 404 /404.html; + location = /usr/share/nginx/html/40x.html { + } + + error_page 500 502 503 504 /50x.html; + location = /usr/share/nginx/html/50x.html { + } + + } } \ No newline at end of file From eb0b2cee00a7541d6df1294c3ce9bbe70c0b195c Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 15 Oct 2018 17:23:20 +0800 Subject: [PATCH 152/330] war --- markdown-file/File-Extract-Compress.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/markdown-file/File-Extract-Compress.md b/markdown-file/File-Extract-Compress.md index fc820934..b6ac65ac 100644 --- a/markdown-file/File-Extract-Compress.md +++ b/markdown-file/File-Extract-Compress.md @@ -40,7 +40,8 @@ ## 常用文件进行--压缩--命令整理 - Linux 压缩文件夹为后缀 `.war` 格式的文件(最好不要对根目录进行压缩,不然会多出一级目录) -- 命令:`jar -cvfM0 cas.war /opt/cas/META-INF /opt/cas/WEB-INF` +- 命令:`jar -cvfM0 cas.war /opt/cas/META-INF /opt/cas/WEB-INF /opt/cas/index.jsp` +- 或者命令:`jar -cvfM0 cas.war /opt/cas/*` - Linux 压缩文件为后缀 `.tar` 格式的文件 - 命令:`tar -zcvf test11.tar test11` From 3f06c5e7658c549dec306b6ea0f0dd22115bcc67 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 15 Oct 2018 18:05:34 +0800 Subject: [PATCH 153/330] war --- markdown-file/File-Extract-Compress.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/markdown-file/File-Extract-Compress.md b/markdown-file/File-Extract-Compress.md index b6ac65ac..fd03c1dd 100644 --- a/markdown-file/File-Extract-Compress.md +++ b/markdown-file/File-Extract-Compress.md @@ -41,7 +41,7 @@ - Linux 压缩文件夹为后缀 `.war` 格式的文件(最好不要对根目录进行压缩,不然会多出一级目录) - 命令:`jar -cvfM0 cas.war /opt/cas/META-INF /opt/cas/WEB-INF /opt/cas/index.jsp` -- 或者命令:`jar -cvfM0 cas.war /opt/cas/*` +- 或者命令:`cd 项目根目录 ; jar -cvfM0 cas.war ./*` - Linux 压缩文件为后缀 `.tar` 格式的文件 - 命令:`tar -zcvf test11.tar test11` From a163c70581ab2190fd707388113212721eb94caa Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 16 Oct 2018 10:40:41 +0800 Subject: [PATCH 154/330] tomcat --- markdown-file/Docker-Install-And-Usage.md | 33 +++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index c965514b..bc139548 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -734,6 +734,39 @@ ENV TZ=Asia/Shanghai RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone ``` +## Dockerfile 部署 Tomcat 应用 + +- 编写 Dockerfile + +``` +FROM tomcat:8.0.46-jre8 +MAINTAINER GitNavi + +ENV JAVA_OPTS="-Xms2g -Xmx2g -XX:MetaspaceSize=128M -XX:MaxMetaspaceSize=312M" +ENV CATALINA_HOME /usr/local/tomcat + +ENV TZ=Asia/Shanghai +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + +RUN rm -rf /usr/local/tomcat/webapps/* + +ADD qiyeweixin.war /usr/local/tomcat/webapps/ + +EXPOSE 8080 + +CMD ["catalina.sh", "run"] +``` + +- 打包镜像:`docker build -t harbor.gitnavi.com/demo/qiyeweixin:1.2.2 ./` +- 运行:`docker run -d -p 8888:8080 --name qiyeweixin -v /data/docker/logs/qiyeweixin:/data/logs/qiyeweixin harbor.gitnavi.com/demo/qiyeweixin:1.2.2` +- 带 JVM 参数运行:`docker run -d -p 8888:8080 -e JAVA_OPTS='-Xms7g -Xmx7g -XX:MetaspaceSize=128M -XX:MaxMetaspaceSize=512M' --name qiyeweixin -v /data/docker/logs/qiyeweixin:/data/logs/qiyeweixin harbor.gitnavi.com/demo/qiyeweixin:1.2.2` + - 虽然 Dockerfile 已经有 JVM 参数,并且也是有效的。但是如果 docker run 的时候又带了 JVM 参数,则会以 docker run 的参数为准 +- 测试 JVM 是否有效方法,在代码里面书写,该值要接近 xmx 值: + +``` +long maxMemory = Runtime.getRuntime().maxMemory(); +logger.warn("-------------maxMemory=" + ((double) maxMemory / (1024 * 1024))); +``` ## Docker Compose From a45e52e916f219d32e3026ca322c54829d509b96 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 16 Oct 2018 14:21:52 +0800 Subject: [PATCH 155/330] tomcat --- markdown-file/Docker-Install-And-Usage.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index bc139548..2524afd4 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -199,7 +199,7 @@ java -jar /root/spring-boot-my-demo.jar - 我们看到了我们刚刚运行的容器 ID(CONTAINER ID)为:`a5d544d9b6f9`,这个下面要用到 - 基于刚刚运行的容器创建新镜像:`docker commit a5d544d9b6f9 youmeek/springboot:0.1` - 查看现在的镜像库:`docker images`,会发现多了一个 youmeek/springboot 新镜像,镜像 ID 为:`7024f230fef9` -- 运行新镜像,实例化为一个容器,并启动容器中刚刚写的脚本:`docker run -d -p 38080:8080 --name springBootJar 7024f230fef9 /root/spring-boot-run.sh` +- 运行新镜像,实例化为一个容器,并启动容器中刚刚写的脚本:`docker run -d -p 38080:8080 --name=springBootJar --hostname=springBootJar 7024f230fef9 /root/spring-boot-run.sh` - `-d`:表示以“守护模式”执行 spring-boot-run.sh 脚本,此时 jar 中的 log 日志不会出现在输出终端上。 - `-p`:表示宿主机与容器的端口映射,此时将容器内部的 8080 端口映射为宿主机的 38080 端口,这样就向外界暴露了 38080 端口,可通过 Docker 网桥来访问容器内部的 8080 端口了。 - `--name`:表示给新实例容器取的名称,用一个有意义的名称命名即可 @@ -271,13 +271,13 @@ CONTAINER ID NAME CPU % MEM USAGE / LI #### 容器生命周期管理 - `docker run`,运行镜像 - - `docker run -v /java_logs/:/opt/ -d -p 8080:80 --name myDockerNameIsGitNavi -i -t 镜像ID /bin/bash` + - `docker run -v /java_logs/:/opt/ -d -p 8080:80 --name=myDockerNameIsGitNavi --hostname=myDockerNameIsGitNavi -i -t 镜像ID /bin/bash` - `-i -t` 分别表示保证容器中的 STDIN 开启,并分配一个伪 tty 终端进行交互,这两个是合着用。 - `--name` 是给容器起了一个名字(如果没有主动给名字,docker 会自动给你生成一个)容器的名称规则:大小写字母、数字、下划线、圆点、中横线,用正则表达式来表达就是:[a-zA-Z0-9_*-] - `-d` 容器运行在后台。 - `-p 8080:80` 表示端口映射,将宿主机的8080端口转发到容器内的80端口。(如果是 -P 参数,则表示随机映射应该端口,一般用在测试的时候) - `-v /java_logs/:/opt/` 表示目录挂载,/java_logs/ 是宿主机的目录,/opt/ 是容器目录 - - `docker run --rm --name myDockerNameIsGitNavi -i -t centos /bin/bash`,--rm,表示退出即删除容器,一般用在做实验测试的时候 + - `docker run --rm --name=myDockerNameIsGitNavi --hostname=myDockerNameIsGitNavi -i -t centos /bin/bash`,--rm,表示退出即删除容器,一般用在做实验测试的时候 - `docker run --restart=always -i -t centos /bin/bash`,--restart=always 表示停止后会自动重启 - `docker run --restart=on-failure:5 -i -t centos /bin/bash`,--restart=on-failure:5 表示停止后会自动重启,最多重启 5 次 - `docker exec`:对守护式的容器里面执行命令,方便对正在运行的容器进行维护、监控、管理 @@ -720,7 +720,7 @@ EXPOSE 9096 - `cd /opt/zch` - `docker build . --tag="skb/user:v1.0.1"` - 因为 build 过程中会有多层镜像 step 过程,所以如果 build 过程中失败,那解决办法的思路是找到 step 失败的上一层,成功的 step 中镜像 ID。然后 docker run 该镜像 ID,手工操作,看报什么错误,然后就比较清晰得了解错误情况了。 - - `docker run -d -p 9096:9096 -v /usr/local/logs/:/opt/ --name="skbUser1.0.0" skb/user:v1.0.1` + - `docker run -d -p 9096:9096 -v /usr/local/logs/:/opt/ --name=skbUser --hostname=skbUser skb/user:v1.0.1` - 查看启动后容器列表:`docker ps` - jar 应用的日志是输出在容器的 /opt 目录下,因为我们上面用了挂载,所在在我们宿主机的 /usr/local/logs 目录下可以看到输出的日志 - 防火墙开放端口: @@ -758,8 +758,8 @@ CMD ["catalina.sh", "run"] ``` - 打包镜像:`docker build -t harbor.gitnavi.com/demo/qiyeweixin:1.2.2 ./` -- 运行:`docker run -d -p 8888:8080 --name qiyeweixin -v /data/docker/logs/qiyeweixin:/data/logs/qiyeweixin harbor.gitnavi.com/demo/qiyeweixin:1.2.2` -- 带 JVM 参数运行:`docker run -d -p 8888:8080 -e JAVA_OPTS='-Xms7g -Xmx7g -XX:MetaspaceSize=128M -XX:MaxMetaspaceSize=512M' --name qiyeweixin -v /data/docker/logs/qiyeweixin:/data/logs/qiyeweixin harbor.gitnavi.com/demo/qiyeweixin:1.2.2` +- 运行:`docker run -d -p 8888:8080 --name=qiyeweixin --hostname=qiyeweixin -v /data/docker/logs/qiyeweixin:/data/logs/qiyeweixin harbor.gitnavi.com/demo/qiyeweixin:1.2.2` +- 带 JVM 参数运行:`docker run -d -p 8888:8080 -e JAVA_OPTS='-Xms7g -Xmx7g -XX:MetaspaceSize=128M -XX:MaxMetaspaceSize=512M' --name=qiyeweixin --hostname=qiyeweixin -v /data/docker/logs/qiyeweixin:/data/logs/qiyeweixin harbor.gitnavi.com/demo/qiyeweixin:1.2.2` - 虽然 Dockerfile 已经有 JVM 参数,并且也是有效的。但是如果 docker run 的时候又带了 JVM 参数,则会以 docker run 的参数为准 - 测试 JVM 是否有效方法,在代码里面书写,该值要接近 xmx 值: From fc459e8e4cb3063e7e094bfabc0bd0e89a0d3a00 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 16 Oct 2018 16:48:21 +0800 Subject: [PATCH 156/330] tomcat --- markdown-file/Jenkins-Install-And-Settings.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index b26e220a..ddd8e226 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -549,7 +549,7 @@ pipeline { inDockerAndJavaPort = "8081" inHostLogPath = "/data/docker/logs/${dockerContainerName}/${env.BUILD_NUMBER}" inDockerLogPath = "/data/logs" - dockerRunParam = "--name ${dockerContainerName} -v /etc/hosts:/etc/hosts -v ${inHostLogPath}:${inDockerLogPath} --restart=always -p ${inHostPort}:${inDockerAndJavaPort}" + dockerRunParam = "--name=${dockerContainerName} --hostname=${dockerContainerName} -v /etc/hosts:/etc/hosts -v ${inHostLogPath}:${inDockerLogPath} --restart=always -p ${inHostPort}:${inDockerAndJavaPort}" } /*=======================================常修改变量-end=======================================*/ @@ -673,7 +673,7 @@ pipeline { inDockerAndJavaPort = "8081" inHostLogPath = "/data/docker/logs/${dockerContainerName}/${env.BUILD_NUMBER}" inDockerLogPath = "/data/logs" - dockerRunParam = "--name ${dockerContainerName} -v /etc/hosts:/etc/hosts -v ${inHostLogPath}:${inDockerLogPath} --restart=always -p ${inHostPort}:${inDockerAndJavaPort}" + dockerRunParam = "--name=${dockerContainerName} --hostname=${dockerContainerName} -v /etc/hosts:/etc/hosts -v ${inHostLogPath}:${inDockerLogPath} --restart=always -p ${inHostPort}:${inDockerAndJavaPort}" } /*=======================================常修改变量-end=======================================*/ @@ -754,6 +754,8 @@ pipeline { sh """ docker -H ${projectDockerDaemon} pull ${dockerImageName} + docker -H ${projectDockerDaemon} stop ${dockerContainerName} | true + docker -H ${projectDockerDaemon} rm -f ${dockerContainerName} | true docker -H ${projectDockerDaemon} run -d ${dockerRunParam} ${dockerImageName} From 763c3eff06ffcb1f35424ab2ee128b162444abc2 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 16 Oct 2018 16:52:13 +0800 Subject: [PATCH 157/330] tomcat --- markdown-file/Jenkins-Install-And-Settings.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index ddd8e226..0ceac000 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -605,6 +605,8 @@ pipeline { stage('运行 Docker 镜像') { steps { sh """ + docker stop ${dockerContainerName} | true + docker rm -f ${dockerContainerName} | true docker run -d ${dockerRunParam} ${dockerImageName} From a44429a0f314f3e27a6810f353497f9c4b6d5a85 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 16 Oct 2018 18:04:50 +0800 Subject: [PATCH 158/330] tomcat --- markdown-file/Jenkins-Install-And-Settings.md | 124 ++++++++++++++++++ 1 file changed, 124 insertions(+) diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index 0ceac000..e837cd5f 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -165,6 +165,12 @@ This may also be found at: /root/.jenkins/secrets/initialAdminPassword - 全局 pipeline 语法说明: - 其他资料 - + - + - + - + - + +#### 内置的参数 ``` BUILD_NUMBER = ${env.BUILD_NUMBER}" @@ -194,6 +200,124 @@ BUILD_URL = http://192.168.0.105:18080/job/react/21/ JOB_URL = http://192.168.0.105:18080/job/react/ ``` +#### 构建时指定参数 + +- 如果要构建的时候明确输入参数值,可以用 `parameters`: + +``` +pipeline { + agent any + + parameters { + string(name: 'assignVersionValue', defaultValue: '1.1.3', description: '构建之前请先指定版本号') + } + + tools { + jdk 'JDK8' + maven 'MAVEN3' + } + + options { + timestamps() + disableConcurrentBuilds() + buildDiscarder(logRotator( + numToKeepStr: '20', + daysToKeepStr: '30', + )) + } + + + environment { + gitUrl = "https://gitee.com/youmeek/springboot-jenkins-demo.git" + branchName = "master" + giteeCredentialsId = "Gitee" + projectWorkSpacePath = "${env.WORKSPACE}" + } + + + stages { + + stage('Check Env') { + + /*当指定的参数版本号等于空字符的时候进入 steps。这里的 when 对 当前 stage 有效,对其他 stage 无效*/ + when { + environment name: 'assignVersionValue', value: '' + } + + /*结束整个任务。如果不想结束整个任务,就不要用:exit 1*/ + steps { + sh "exit 1" + } + } + + stage('Pre Env') { + + steps { + echo "======================================项目名称 = ${env.JOB_NAME}" + echo "======================================项目 URL = ${gitUrl}" + echo "======================================项目分支 = ${branchName}" + echo "======================================当前编译版本号 = ${env.BUILD_NUMBER}" + echo "======================================项目空间文件夹路径 = ${projectWorkSpacePath}" + echo "======================================构建时自己指定的版本号值 = ${params.assignVersionValue}" + } + } + + } +} +``` + + +#### 定时构建 + +``` +pipeline { + agent any + + /*采用 linux cron 语法即可*/ + triggers { + cron('*/1 * * * *') + } + + tools { + jdk 'JDK8' + maven 'MAVEN3' + } + + options { + timestamps() + disableConcurrentBuilds() + buildDiscarder(logRotator( + numToKeepStr: '20', + daysToKeepStr: '30', + )) + } + + + environment { + gitUrl = "https://gitee.com/youmeek/springboot-jenkins-demo.git" + branchName = "master" + giteeCredentialsId = "Gitee" + projectWorkSpacePath = "${env.WORKSPACE}" + } + + + stages { + + stage('Pre Env') { + steps { + echo "======================================项目名称 = ${env.JOB_NAME}" + echo "======================================项目 URL = ${gitUrl}" + echo "======================================项目分支 = ${branchName}" + echo "======================================当前编译版本号 = ${env.BUILD_NUMBER}" + echo "======================================项目空间文件夹路径 = ${projectWorkSpacePath}" + } + } + + } +} + +``` + ------------------------------------------------------------------- From 6d53986cc810b99501496e12b97bd33df53b13ed Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 17 Oct 2018 17:46:24 +0800 Subject: [PATCH 159/330] tomcat --- markdown-file/Java-bin.md | 3 ++- markdown-file/monitor.md | 56 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 57 insertions(+), 2 deletions(-) diff --git a/markdown-file/Java-bin.md b/markdown-file/Java-bin.md index decc94a6..028d7854 100644 --- a/markdown-file/Java-bin.md +++ b/markdown-file/Java-bin.md @@ -168,6 +168,8 @@ - 更多其他参数的使用可以看: - [Java命令学习系列(四)——jstat](https://mp.weixin.qq.com/s?__biz=MzI3NzE0NjcwMg==&mid=402330276&idx=2&sn=58117de92512f83090d0a9de738eeacd&scene=21#wechat_redirect) - [java高分局之jstat命令使用](https://blog.csdn.net/maosijunzi/article/details/46049117) + - [ jstat命令查看jvm的GC情况 (以Linux为例)](https://www.cnblogs.com/yjd_hycf_space/p/7755633.html) + ------------------------------------------------------------------- @@ -359,7 +361,6 @@ JNI global references: 281 ``` - ## 资料 - diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index e86f0257..ae50cd5e 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -813,7 +813,7 @@ Out of memory: Kill process 19452 (java) score 264 or sacrifice child - 使用内置 tomcat-manager 监控配置,或者使用类似工具:psi-probe - 使用 `ps -ef | grep java`,查看进程 PID - 根据高 CPU 的进程 PID,查看其线程 CPU 使用情况:`top -Hp PID`,找到占用 CPU 资源高的线程 PID - - 查看堆栈情况:`jstack -l PID >> /opt/jstack-tomcat1-PID-20180917.log` + - 保存堆栈情况:`jstack -l PID >> /opt/jstack-tomcat1-PID-20180917.log` - 把占用 CPU 资源高的线程十进制的 PID 转换成 16 进制:`printf "%x\n" PID`,比如:`printf "%x\n" 12401` 得到结果是:`3071` - 在刚刚输出的那个 log 文件中搜索:`3071`,可以找到:`nid=0x3071` - 使用 `jstat -gc PID 10000 10`,查看gc情况(截图) @@ -824,6 +824,58 @@ Out of memory: Kill process 19452 (java) score 264 or sacrifice child - 结合代码解决内存溢出或泄露问题。 - 给 VM 增加 dump 触发参数:`-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/opt/tomcat-1.hprof` +#### 一次 JVM 引起的 CPU 高排查 + +- 使用 `ps -ef | grep java`,查看进程 PID + - 根据高 CPU 的进程 PID,查看其线程 CPU 使用情况:`top -Hp PID`,找到占用 CPU 资源高的线程 PID +- 保存堆栈情况:`jstack -l PID >> /opt/jstack-tomcat1-PID-20181017.log` +- 把占用 CPU 资源高的线程十进制的 PID 转换成 16 进制:`printf "%x\n" PID`,比如:`printf "%x\n" 12401` 得到结果是:`3071` +- 在刚刚输出的那个 log 文件中搜索:`3071`,可以找到:`nid=0x3071` +- 也可以在终端中直接看:`jstack PID |grep 十六进制线程 -A 30`,此时如果发现如下: + +``` +"GC task thread#0 (ParallelGC)" os_prio=0 tid=0x00007fd0ac01f000 nid=0x66f runnable +``` + +- 这种情况一般是 heap 设置得过小,而又要频繁分配对象;二是内存泄露,对象一直不能被回收,导致 CPU 占用过高 +- 使用:`jstat -gcutil PID 3000 10`: +- 正常情况结果应该是这样的: + +``` +S0 S1 E O M CCS YGC YGCT FGC FGCT GCT +0.00 0.00 67.63 38.09 78.03 68.82 124 0.966 5 0.778 1.744 +0.00 0.00 67.68 38.09 78.03 68.82 124 0.966 5 0.778 1.744 +0.00 0.00 67.68 38.09 78.03 68.82 124 0.966 5 0.778 1.744 +0.00 0.00 67.68 38.09 78.03 68.82 124 0.966 5 0.778 1.744 +0.00 0.00 67.68 38.09 78.03 68.82 124 0.966 5 0.778 1.744 +0.00 0.00 67.68 38.09 78.03 68.82 124 0.966 5 0.778 1.744 +0.00 0.00 67.68 38.09 78.03 68.82 124 0.966 5 0.778 1.744 +0.00 0.00 67.68 38.09 78.03 68.82 124 0.966 5 0.778 1.744 +0.00 0.00 67.71 38.09 78.03 68.82 124 0.966 5 0.778 1.744 +0.00 0.00 67.71 38.09 78.03 68.82 124 0.966 5 0.778 1.744 + +``` + +- S0:SO 当前使用比例 +- S1:S1 当前使用比例 +- E:**Eden 区使用比例(百分比)(异常的时候,这里可能会接近 100%)** +- O:**old 区使用比例(百分比)(异常的时候,这里可能会接近 100%)** +- M:**Metaspace 区使用比例(百分比)(异常的时候,这里可能会接近 100%)** +- CCS:压缩使用比例 +- YGC:年轻代垃圾回收次数 +- FGC:老年代垃圾回收次数 +- FGCT:老年代垃圾回收消耗时间(单位秒) +- GCT:垃圾回收消耗总时间(单位秒) +- **异常的时候每次 Full GC 时间也可能非常长,每次时间计算公式=FGCT值/FGC指)** +- `jmap -heap PID`,查看具体占用量是多大 +- 使用 `jmap -dump:format=b,file=/opt/dumpfile-tomcat1-PID-20180917.hprof PID`,生成堆转储文件(如果设置的 heap 过大,dump 下来会也会非常大) + - 使用 jhat 或者可视化工具(Eclipse Memory Analyzer 、IBM HeapAnalyzer)分析堆情况。 + - 一般这时候就只能根据 jhat 的分析,看源码了 +- 这里有几篇类似经历的文章推荐给大家: + - [三个神奇bug之Java占满CPU](http://luofei.me/?p=197) + - [CPU 负载过高问题排查](http://zhouyun.me/2017/10/24/cpu_load_issue/) + + #### CPU 低,负载高,访问慢(带数据库) - 基于上面,但是侧重点在于 I/O 读写,以及是否有 MySQL 死锁,或者挂载了 NFS,而 NFS Server 出现问题 @@ -844,6 +896,8 @@ Out of memory: Kill process 19452 (java) score 264 or sacrifice child - - - +- +- From b94fedb465b6dab21015cdb102975cc08b8fd496 Mon Sep 17 00:00:00 2001 From: "Judas.n" Date: Fri, 19 Oct 2018 17:25:41 +0800 Subject: [PATCH 160/330] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 94965ade..5eef6776 100644 --- a/README.md +++ b/README.md @@ -134,4 +134,4 @@ ## AD -- [推荐:程序员的个性化网址导航:GitNavi.com](http://www.gitnavi.com) +- [推荐:程序员的个性化网址导航:GitNavi.com](http://www.gitnavi.com/u/judasn/) From e3342ee60efafa63f37d1f7e3784d6ce4ad4100e Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 22 Oct 2018 11:02:31 +0800 Subject: [PATCH 161/330] =?UTF-8?q?=E8=A1=A5=E5=85=85=20Glances?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 1 + SUMMARY.md | 1 + TOC.md | 1 + markdown-file/Glances-Install-And-Settings.md | 68 +++++++++++++++++++ 4 files changed, 71 insertions(+) create mode 100644 markdown-file/Glances-Install-And-Settings.md diff --git a/README.md b/README.md index 5eef6776..03208821 100644 --- a/README.md +++ b/README.md @@ -40,6 +40,7 @@ - [日常维护](markdown-file/maintenance.md) - [日常监控](markdown-file/monitor.md) - [nmon 系统性能监控工具](markdown-file/Nmon.md) +- [Glances 安装和配置](markdown-file/Glances-Install-And-Settings.md) - [SSH(Secure Shell)介绍](markdown-file/SSH.md) - [FTP(File Transfer Protocol)介绍](markdown-file/FTP.md) - [VPN(Virtual Private Network)介绍](markdown-file/VPN.md) diff --git a/SUMMARY.md b/SUMMARY.md index cc2438ae..b1a5323a 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -25,6 +25,7 @@ * [日常维护](markdown-file/maintenance.md) * [日常监控](markdown-file/monitor.md) * [nmon 系统性能监控工具](markdown-file/Nmon.md) +* [Glances 安装和配置](markdown-file/Glances-Install-And-Settings.md) * [SSH(Secure Shell)介绍](markdown-file/SSH.md) * [FTP(File Transfer Protocol)介绍](markdown-file/FTP.md) * [VPN(Virtual Private Network)介绍](markdown-file/VPN.md) diff --git a/TOC.md b/TOC.md index bd148013..e93b47ec 100644 --- a/TOC.md +++ b/TOC.md @@ -23,6 +23,7 @@ - [日常维护](markdown-file/maintenance.md) - [日常监控](markdown-file/monitor.md) - [nmon 系统性能监控工具](markdown-file/Nmon.md) +- [Glances 安装和配置](markdown-file/Glances-Install-And-Settings.md) - [SSH(Secure Shell)介绍](markdown-file/SSH.md) - [FTP(File Transfer Protocol)介绍](markdown-file/FTP.md) - [VPN(Virtual Private Network)介绍](markdown-file/VPN.md) diff --git a/markdown-file/Glances-Install-And-Settings.md b/markdown-file/Glances-Install-And-Settings.md new file mode 100644 index 00000000..9b4255b2 --- /dev/null +++ b/markdown-file/Glances-Install-And-Settings.md @@ -0,0 +1,68 @@ +# Glances 安装和配置 + +## Glances 介绍 + +- 相对 top、htop,它比较重,因此内容也比较多。小机子一般不建议安装。大机子一般也不建议一直开着。 +- 官网: +- 官网 Github: +- 官网文档: +- 当前(201810)最新版本为 3.0.2 + + +## Glances Linux 安装 + +- `curl -L https://bit.ly/glances | /bin/bash` +- 需要 5 ~ 10 分钟左右。 + +## 用法 + +#### 本地监控 + +- 进入实时监控面板(默认 3 秒一次指标):`glances` +- 每间隔 5 秒获取一次指标:`glances -t 5` +- 在控制面板中可以按快捷键进行排序、筛选 + +``` +m : 按内存占用排序进程 +p : 按进程名称排序进程 +c : 按 CPU 占用率排序进程 +i : 按 I/O 频率排序进程 +a : 自动排序进程 +d : 显示/隐藏磁盘 I/O 统计信息 +f : 显示/隐藏文件系统统计信息 +s : 显示/隐藏传感器统计信息 +y : 显示/隐藏硬盘温度统计信息 +l : 显示/隐藏日志 +n : 显示/隐藏网络统计信息 +x : 删除警告和严重日志 +h : 显示/隐藏帮助界面 +q : 退出 +w : 删除警告记录 +``` + + +#### 监控远程机子 + +- 这里面的检控方和被监控的概念要弄清楚 +- 作为服务端的机子运行(也就是被监控方):`glances -s` + - 假设它的 IP 为:192.168.1.44 + - 必需打开 61209 端口 +- 作为客户端的机子运行(要查看被检控方的数据):`glances -c 192.168.1.44` + - 这时候控制台输出的内容是被监控机子的数据 + + +## 导出数据 + +- 个人测试没效果,后续再看下吧。 +- 官网文档: +- 导出 CSV:`glances --export-csv /tmp/glances.csv` +- 导出 JSON:`glances --export-json /tmp/glances.json` + +## 资料 + +- +- +- +- +- +- From b6895f0cab30ff081c7d76754713ad5246a44686 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 22 Oct 2018 15:39:29 +0800 Subject: [PATCH 162/330] redis --- markdown-file/Redis-Install-And-Settings.md | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/markdown-file/Redis-Install-And-Settings.md b/markdown-file/Redis-Install-And-Settings.md index e99fddfa..bc5146b2 100644 --- a/markdown-file/Redis-Install-And-Settings.md +++ b/markdown-file/Redis-Install-And-Settings.md @@ -246,17 +246,14 @@ M: 5d0fe968559af3035d8d64ab598f2841e5f3a059 172.19.0.7:6379 - 官网 Github 地址: - 此时(20160212) Redis 最新稳定版本为:**3.0.7** - 官网帮助中心: - - 我个人习惯 `/opt` 目录下创建一个目录 `setups` 用来存放各种软件安装包;在 `/usr` 目录下创建一个 `program` 用来存放各种解压后的软件包,下面的讲解也都是基于此习惯 - - 我个人已经使用了第三方源:`EPEL、RepoForge`,如果你出现 `yum install XXXXX` 安装不成功的话,很有可能就是你没有相关源,请查看我对源设置的文章 - - Redis 下载:`wget http://download.redis.io/releases/redis-3.0.7.tar.gz` (大小:1.4 M) + - Redis 下载(/usr/local):`wget http://download.redis.io/releases/redis-3.0.7.tar.gz` (大小:1.4 M) - 安装依赖包:`yum install -y gcc-c++ tcl` - 解压:`tar zxvf redis-3.0.7.tar.gz` - - 移动到我个人安装目录:`mv redis-3.0.7/ /usr/program/` - - 进入解压后目录:`cd /usr/program/redis-3.0.7/` + - 进入解压后目录:`cd /usr/local/redis-3.0.7/` - 编译:`make` - 编译安装:`make install` - 安装完之后会在:`/usr/local/bin` 目录下生成好几个 redis 相关的文件 - - 复制配置文件:`cp /usr/program/redis-3.0.7/redis.conf /etc/` + - 复制配置文件:`cp /usr/local/redis-3.0.7/redis.conf /etc/` - 修改配置:`vim /etc/redis.conf` - 把旧值:`daemonize no` - 改为新值:`daemonize yes` From 2eaa03b85c6ff9d7ded219a18c06d47d11f7728b Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 23 Oct 2018 19:24:34 +0800 Subject: [PATCH 163/330] redis --- markdown-file/Java-bin.md | 38 ++++++++++++++++++- markdown-file/Jenkins-Install-And-Settings.md | 11 +++++- 2 files changed, 46 insertions(+), 3 deletions(-) diff --git a/markdown-file/Java-bin.md b/markdown-file/Java-bin.md index 028d7854..2c0c5cb1 100644 --- a/markdown-file/Java-bin.md +++ b/markdown-file/Java-bin.md @@ -170,6 +170,41 @@ - [java高分局之jstat命令使用](https://blog.csdn.net/maosijunzi/article/details/46049117) - [ jstat命令查看jvm的GC情况 (以Linux为例)](https://www.cnblogs.com/yjd_hycf_space/p/7755633.html) +#### gcutil + +- 使用:`jstat -gcutil PID 3000 10`: +- 正常情况结果应该是这样的: + +``` +S0 S1 E O M CCS YGC YGCT FGC FGCT GCT +0.00 0.00 67.63 38.09 78.03 68.82 124 0.966 5 0.778 1.744 +0.00 0.00 67.68 38.09 78.03 68.82 124 0.966 5 0.778 1.744 +0.00 0.00 67.68 38.09 78.03 68.82 124 0.966 5 0.778 1.744 +0.00 0.00 67.68 38.09 78.03 68.82 124 0.966 5 0.778 1.744 +0.00 0.00 67.68 38.09 78.03 68.82 124 0.966 5 0.778 1.744 +0.00 0.00 67.68 38.09 78.03 68.82 124 0.966 5 0.778 1.744 +0.00 0.00 67.68 38.09 78.03 68.82 124 0.966 5 0.778 1.744 +0.00 0.00 67.68 38.09 78.03 68.82 124 0.966 5 0.778 1.744 +0.00 0.00 67.71 38.09 78.03 68.82 124 0.966 5 0.778 1.744 +0.00 0.00 67.71 38.09 78.03 68.82 124 0.966 5 0.778 1.744 + +``` + +- S0:SO 当前使用比例 +- S1:S1 当前使用比例 +- E:**Eden 区使用比例(百分比)(异常的时候,这里可能会接近 100%)** +- O:**old 区使用比例(百分比)(异常的时候,这里可能会接近 100%)** +- M:**Metaspace 区使用比例(百分比)(异常的时候,这里可能会接近 100%)** +- CCS:压缩使用比例 +- YGC:年轻代垃圾回收次数 +- FGC:老年代垃圾回收次数 +- FGCT:老年代垃圾回收消耗时间(单位秒) +- GCT:垃圾回收消耗总时间(单位秒) +- **异常的时候每次 Full GC 时间也可能非常长,每次时间计算公式=FGCT值/FGC指)** +- 在 YGC 之前 年轻代 = eden + S1;YGC 之后,年轻代 = eden + S0。 +- 如果看到 YGC 之后 old 区空间没变,表示此次 YGC,没有对象晋升到 old 区 + + ------------------------------------------------------------------- @@ -364,4 +399,5 @@ JNI global references: 281 ## 资料 - -- \ No newline at end of file +- +- \ No newline at end of file diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index e837cd5f..bf945f35 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -69,6 +69,8 @@ Can not write to /var/jenkins_home/copy_reference_file.log. Wrong volume permiss - Mailer Plugin - NotifyQQ([QQ 消息通知](https://github.com/ameizi/NotifyQQ)) - 钉钉通知([钉钉 消息通知](https://wiki.jenkins.io/display/JENKINS/Dingding+Notification+Plugin)) + - 360 FireLine:代码规范检查,已经集成了阿里巴巴的代码规约(P3C)检查 + - AnsiColor(可选):这个插件可以让Jenkins的控制台输出的log带有颜色 - oauth(具体名字要看下) - Build Failure Analyzer 分析构建错误日志并在构建页面显示错误 - SSH plugin 支持通过SSH执行脚本 @@ -82,14 +84,12 @@ Can not write to /var/jenkins_home/copy_reference_file.log. Wrong volume permiss - Gitlab Plugin:可能会直接安装不成功,如果不成功根据报错的详细信息可以看到 hpi 文件的下载地址,挂代理下载下来,然后离线安装即可 - Gitlab Hook:用于触发 GitLab 的一些 WebHooks 来构建项目 - Gitlab Authentication 这个插件提供了使用GitLab进行用户认证和授权的方案 - - FireLine Plugin:代码规范检查,已经集成了阿里巴巴的代码规约(P3C)检查 - Docker Commons Plugin - Docker plugin - Kubernetes - Pre SCM BuildStep Plugin 在拉代码之前插入一些步骤 - GitHub Pull Request Builder Github Pull Request时自动构建 - GitHub API Plugin Github API插件 - - AnsiColor(可选):这个插件可以让Jenkins的控制台输出的log带有颜色 - NodeJS Plugin @@ -902,6 +902,13 @@ pipeline { +------------------------------------------------------------------- + +## 多节点 master 与 slave + +- 可以参考这篇: + + ------------------------------------------------------------------- From 0bd8c26db7205551129c6c7364456fb5cd723098 Mon Sep 17 00:00:00 2001 From: "Judas.n" Date: Thu, 1 Nov 2018 15:53:42 +0800 Subject: [PATCH 164/330] Update Redis-Install-And-Settings.md --- markdown-file/Redis-Install-And-Settings.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/markdown-file/Redis-Install-And-Settings.md b/markdown-file/Redis-Install-And-Settings.md index bc5146b2..99567baf 100644 --- a/markdown-file/Redis-Install-And-Settings.md +++ b/markdown-file/Redis-Install-And-Settings.md @@ -22,9 +22,16 @@ - Redis 默认的配置文件内容: ``` ini -bind 0.0.0.0 +安全情况的几个特殊配置: +bind 127.0.0.1 requirepass adgredis123456 protected-mode yes + +免密情况: +bind 0.0.0.0 +protected-mode no + +其他: port 6379 tcp-backlog 511 timeout 0 From c64828e904992514fc807a41de70ddc4e08a7846 Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 1 Nov 2018 18:03:04 +0800 Subject: [PATCH 165/330] RabbitMQ --- markdown-file/RabbitMQ-Install-And-Settings.md | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/markdown-file/RabbitMQ-Install-And-Settings.md b/markdown-file/RabbitMQ-Install-And-Settings.md index 3ad9eb37..e224fae0 100644 --- a/markdown-file/RabbitMQ-Install-And-Settings.md +++ b/markdown-file/RabbitMQ-Install-And-Settings.md @@ -5,12 +5,25 @@ - 官网镜像: - 官网镜像说明: -- 运行: +- 一般情况,运行: ``` docker run -d --name cloud-rabbitmq -p 5671:5671 -p 5672:5672 -p 4369:4369 -p 25672:25672 -p 15671:15671 -p 15672:15672 -e RABBITMQ_DEFAULT_USER=admin -e RABBITMQ_DEFAULT_PASS=adg123456 rabbitmq:3-management ``` +- 带有 websocket stomp 功能(不知道是什么就不用管它): + +``` +docker run -d --name cloud-rabbitmq -p 5671:5671 -p 5672:5672 -p 4369:4369 -p 25672:25672 -p 15671:15671 -p 15672:15672 -p 61613:61613 -e RABBITMQ_DEFAULT_USER=admin -e RABBITMQ_DEFAULT_PASS=adg123456 rabbitmq:3-management + + +进入 Docker 容器启动 stomp 插件: +docker exec -it cloud-rabbitmq /bin/bash +cd /plugins +rabbitmq-plugins enable rabbitmq_web_stomp +``` + + - 参数解释: - rabbitmq:3-management:只有带 management 后缀的才有 web 端管理入口 - 15672:表示 RabbitMQ 控制台端口号,可以在浏览器中通过控制台来执行 RabbitMQ 的相关操作。容器启动成功后,可以在浏览器输入地址:http://ip:15672/ 访问控制台 From baacc3df6441dd3cafd21ef130042de711f20504 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 2 Nov 2018 00:15:14 +0800 Subject: [PATCH 166/330] RabbitMQ --- .../RabbitMQ-Install-And-Settings.md | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/markdown-file/RabbitMQ-Install-And-Settings.md b/markdown-file/RabbitMQ-Install-And-Settings.md index e224fae0..9850edd8 100644 --- a/markdown-file/RabbitMQ-Install-And-Settings.md +++ b/markdown-file/RabbitMQ-Install-And-Settings.md @@ -111,3 +111,38 @@ yum install rabbitmq-server -y - ![RabbitMQ-Add-User](../images/RabbitMQ-Add-User-a-3.gif) - 交换机绑定队列(如下图所示): - ![RabbitMQ-Bindings-Queue](../images/RabbitMQ-Bindings-Queue-a-1.gif) + +## 集群环境(镜像队列) + +- TODO + +## 消息重复 + +- 消息重复无法避免,比如消费端异常重启就有可能,或者 MQ 应用挂了重启之后等场景,任何 MQ 应用没有保证消息不会重复发送。 +- 对于一定要保证幂等性的业务场景,在消费端做好标识。比如在 Redis 或 JVM 缓存中存有上一次消费的记录,业务操作之前下判断。 +- 如果是插入操作类的,也可以考虑用唯一约束的方式来保证插入不会重复等。 + +## 消息丢失 + +- 单节点,纯内存情况下一般有三种情况: + - 生产者提交消息到 MQ,但是网络抖动了,丢了。或是 MQ 拿到之后突然挂了,来不及登记 + - MQ 拿到消息,消费者还没消费,但是 MQ 挂了 + - 消费者拿到消息来不及处理,自己挂了,MQ 认为已经消费成功了。 +- 分别解决办法: + - 把 channel 设置为 confirm 模式 + - 持久化队列:创建 queue 的时候持久化 durable=true。持久化消息:生产者发送消息时候:deliveryMode = 2 + - 手动 ACK + +## 消息顺序 + +- 一般场景不需要消息顺序,要去做一般也开销很大,需要执行考虑。 +- 在能保证消息顺序的情况下,可以用来做数据同步 + + + + + + + + + From d6c52637087b85872be2a29580cbddd10a443fcd Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 2 Nov 2018 11:58:33 +0800 Subject: [PATCH 167/330] RabbitMQ --- markdown-file/RabbitMQ-Install-And-Settings.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/markdown-file/RabbitMQ-Install-And-Settings.md b/markdown-file/RabbitMQ-Install-And-Settings.md index 9850edd8..c90868c9 100644 --- a/markdown-file/RabbitMQ-Install-And-Settings.md +++ b/markdown-file/RabbitMQ-Install-And-Settings.md @@ -137,10 +137,14 @@ yum install rabbitmq-server -y - 一般场景不需要消息顺序,要去做一般也开销很大,需要执行考虑。 - 在能保证消息顺序的情况下,可以用来做数据同步 +- 解决: + - 消费者只有单个应用,并且内容不要使用异步或者多线程。在这种场景下绑定 queue,**基于消息队列本质是队列,消息是 FIFO(先进先出)的**,这样消息就能按顺序。但是缺点很明显:吞吐太差,效率太低,适合低效率的业务。 + - 基于上面方案并且对队列进行分片。假设我们原来 repay.queue下面有 10 个消费者线程,那么我们可以创建10个队列,每个队列下面只允许有一个消费者。一个比较简单的方式是,队列命名为 repay.queue-0,repay.queue-2…repay.queue-9,然后生产者推送信息的时候,基于用户的ID(Long类型)mod 10 取模 0…9(取余),再选择发送到相应的队列即可,这样就等保证同一个用户的顺序。 +## 资料 - +- From 7ec177dffb1db22818af9f56397848a89edd56ea Mon Sep 17 00:00:00 2001 From: judasn Date: Sat, 3 Nov 2018 10:09:31 +0800 Subject: [PATCH 168/330] jenkins --- markdown-file/Jenkins-Install-And-Settings.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index bf945f35..82cc053c 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -318,6 +318,16 @@ pipeline { ``` +#### 同时构建其他 Job + +``` +stage('运行其他任务') { + steps { + build job: '任务名称' + } +} +``` + ------------------------------------------------------------------- From 927258c70f31358f924c0ede6d092ba9bee3a1ac Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 5 Nov 2018 19:38:17 +0800 Subject: [PATCH 169/330] rabbitmq --- markdown-file/RabbitMQ-Install-And-Settings.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/markdown-file/RabbitMQ-Install-And-Settings.md b/markdown-file/RabbitMQ-Install-And-Settings.md index c90868c9..21380e71 100644 --- a/markdown-file/RabbitMQ-Install-And-Settings.md +++ b/markdown-file/RabbitMQ-Install-And-Settings.md @@ -141,6 +141,16 @@ yum install rabbitmq-server -y - 消费者只有单个应用,并且内容不要使用异步或者多线程。在这种场景下绑定 queue,**基于消息队列本质是队列,消息是 FIFO(先进先出)的**,这样消息就能按顺序。但是缺点很明显:吞吐太差,效率太低,适合低效率的业务。 - 基于上面方案并且对队列进行分片。假设我们原来 repay.queue下面有 10 个消费者线程,那么我们可以创建10个队列,每个队列下面只允许有一个消费者。一个比较简单的方式是,队列命名为 repay.queue-0,repay.queue-2…repay.queue-9,然后生产者推送信息的时候,基于用户的ID(Long类型)mod 10 取模 0…9(取余),再选择发送到相应的队列即可,这样就等保证同一个用户的顺序。 +## 消息积压 + +- 如果消费者挂掉,消息会一直积压在 MQ 中 +- 解决办法 + - 如果原来有 3 个消费者应用,现在需要准备多倍的消费者应用,假设现在有 10 个新的消费者应用。 + - 创建一个临时的 topic,假设叫做 topic_abc + - 新的 10 个消费者应用绑定在新的 topic_abc 上 + - 修改原来 3 个消费者应用代码,改为接受到 MQ 消息后不操作入数据库了,而是直接发给 topic_abc + - 这样原来积压的消息就有 10 个消费者一起来分摊 + ## 资料 From 6c00892620774ba4a4d85a02d61f1191787d92db Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 5 Nov 2018 20:04:51 +0800 Subject: [PATCH 170/330] rabbitmq --- markdown-file/RabbitMQ-Install-And-Settings.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/markdown-file/RabbitMQ-Install-And-Settings.md b/markdown-file/RabbitMQ-Install-And-Settings.md index 21380e71..b3a84a9a 100644 --- a/markdown-file/RabbitMQ-Install-And-Settings.md +++ b/markdown-file/RabbitMQ-Install-And-Settings.md @@ -150,6 +150,8 @@ yum install rabbitmq-server -y - 新的 10 个消费者应用绑定在新的 topic_abc 上 - 修改原来 3 个消费者应用代码,改为接受到 MQ 消息后不操作入数据库了,而是直接发给 topic_abc - 这样原来积压的消息就有 10 个消费者一起来分摊 + - 如果此时这个 MQ 扛不住压力,那只能让旧的 3 个消费者应用写到其他 MQ,然后 10 个新消费者消费新的 MQ。 + - 然后深夜找个时间重新欢迎旧的 3 个消费者代码。 ## 资料 From ede832f3919ae80da23f83ba749d786924592754 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 6 Nov 2018 23:12:44 +0800 Subject: [PATCH 171/330] redis --- markdown-file/Redis-Install-And-Settings.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/markdown-file/Redis-Install-And-Settings.md b/markdown-file/Redis-Install-And-Settings.md index 99567baf..2b7f77c4 100644 --- a/markdown-file/Redis-Install-And-Settings.md +++ b/markdown-file/Redis-Install-And-Settings.md @@ -95,6 +95,8 @@ aof-rewrite-incremental-fsync yes #### Redis 容器准备 - 目标:3 主 3 从(一般都是推荐奇数个 master) +- 最小集群数推荐是:3 +- 测试机的最低配置推荐是:2C4G - 拉取镜像:`docker pull registry.cn-shenzhen.aliyuncs.com/youmeek/redis-to-cluster:3.2.3` - 重新打个 tag(旧名字太长了):`docker tag registry.cn-shenzhen.aliyuncs.com/youmeek/redis-to-cluster:3.2.3 redis-to-cluster:3.2.3` - 创建网段:`docker network create --subnet=172.19.0.0/16 net-redis-to-cluster` From 751daf38922b1502ff44f7ac49eddcba94dae665 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 7 Nov 2018 13:56:46 +0800 Subject: [PATCH 172/330] redis --- markdown-file/Redis-Install-And-Settings.md | 32 +++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/markdown-file/Redis-Install-And-Settings.md b/markdown-file/Redis-Install-And-Settings.md index 99567baf..f33ab203 100644 --- a/markdown-file/Redis-Install-And-Settings.md +++ b/markdown-file/Redis-Install-And-Settings.md @@ -664,6 +664,38 @@ used_cpu_sys_children : Redis 后台进程在 内核态 消耗的 CPU used_cpu_user_children : Redis 后台进程在 用户态 消耗的 CPU ``` +## Redis 基准压力测试 + +- 默认安装包下就自带 +- 官网文档: +- 运行:`redis-benchmark -q -n 100000` + - `-q` 表示 quiet 安静执行,结束后直接输出结果即可 + - `-n 100000` 请求 10 万次 + +``` +PING_INLINE: 62189.05 requests per second +PING_BULK: 68634.18 requests per second +SET: 58241.12 requests per second +GET: 65445.03 requests per second +INCR: 57703.40 requests per second +LPUSH: 61199.51 requests per second +RPUSH: 68119.89 requests per second +LPOP: 58309.04 requests per second +RPOP: 63775.51 requests per second +SADD: 58479.53 requests per second +HSET: 61500.61 requests per second +SPOP: 58241.12 requests per second +LPUSH (needed to benchmark LRANGE): 59523.81 requests per second +LRANGE_100 (first 100 elements): 60350.03 requests per second +LRANGE_300 (first 300 elements): 57636.89 requests per second +LRANGE_500 (first 450 elements): 63251.11 requests per second +LRANGE_600 (first 600 elements): 58479.53 requests per second +MSET (10 keys): 56401.58 requests per second +``` + +- 只测试特定类型:`redis-benchmark -t set,lpush -n 100000 -q` + + ## 资料 - From b8b30380ab76894c131c6d4f6c9f519d7cb8d601 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 7 Nov 2018 17:41:09 +0800 Subject: [PATCH 173/330] :construction: monitor --- markdown-file/monitor.md | 1 + 1 file changed, 1 insertion(+) diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index ae50cd5e..17fb5a0d 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -803,6 +803,7 @@ Out of memory: Kill process 19452 (java) score 264 or sacrifice child - `netstat -nulp` - 统计当前连接的一些状态情况:`netstat -nat |awk '{print $6}'|sort|uniq -c|sort -rn` - 查看每个 ip 跟服务器建立的连接数:`netstat -nat|awk '{print$5}'|awk -F : '{print$1}'|sort|uniq -c|sort -rn` + - 查看与后端应用端口连接的有多少:`lsof -i:8080|grep 'TCP'|wc -l` - 跟踪程序(按 `Ctrl + C` 停止跟踪):`strace -tt -T -v -f -e trace=file -o /opt/strace-20180915.log -s 1024 -p PID` - 看下谁在线:`w`,`last` - 看下执行了哪些命令:`history` From 25685cc9ed2ca7e46f72f82a7422a1895a6207d5 Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 8 Nov 2018 14:59:31 +0800 Subject: [PATCH 174/330] :construction: hack --- markdown-file/Was-Hacked.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/markdown-file/Was-Hacked.md b/markdown-file/Was-Hacked.md index 6f666e73..822caccd 100644 --- a/markdown-file/Was-Hacked.md +++ b/markdown-file/Was-Hacked.md @@ -32,6 +32,10 @@ - 检查某个端口的具体信息:`lsof -i :18954` - 检查启动项:`chkconfig` - 检查定时器:`cat /etc/crontab` +- 检查定时器:`crontab -l` +- 定时器的几个配置文件: + - `cat /var/spool/cron/root` + - `cat /var/spool/cron/crontabs/root` - 检查其他系统重要文件: - `cat /etc/rc.local` - `cd /etc/init.d;ll` From 15abc303472a563d096706149c4b6381bb762e92 Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 8 Nov 2018 15:01:27 +0800 Subject: [PATCH 175/330] :construction: hack --- markdown-file/Was-Hacked.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/markdown-file/Was-Hacked.md b/markdown-file/Was-Hacked.md index 822caccd..3adb0e70 100644 --- a/markdown-file/Was-Hacked.md +++ b/markdown-file/Was-Hacked.md @@ -33,9 +33,6 @@ - 检查启动项:`chkconfig` - 检查定时器:`cat /etc/crontab` - 检查定时器:`crontab -l` -- 定时器的几个配置文件: - - `cat /var/spool/cron/root` - - `cat /var/spool/cron/crontabs/root` - 检查其他系统重要文件: - `cat /etc/rc.local` - `cd /etc/init.d;ll` From b63ca8b4300f11a9dbc9a51a7fcc2c508d75ee4a Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 21 Nov 2018 14:18:13 +0800 Subject: [PATCH 176/330] :construction: monitor --- markdown-file/monitor.md | 32 ++++++++++++++++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index 17fb5a0d..5505e34e 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -682,12 +682,12 @@ eth0 1500 10903437 0 0 0 10847867 0 0 0 BMRU lo 65536 453650 0 0 0 453650 0 0 0 LRU ``` -- 接收: +- 接收(该值是历史累加数据,不是瞬间数据,要计算时间内的差值需要自己减): - RX-OK 已接收字节数 - RX-ERR 已接收错误字节数(数据值大说明网络存在问题) - RX-DRP 已丢失字节数(数据值大说明网络存在问题) - RX-OVR 由于误差而遗失字节数(数据值大说明网络存在问题) -- 发送: +- 发送(该值是历史累加数据,不是瞬间数据,要计算时间内的差值需要自己减): - TX-OK 已发送字节数 - TX-ERR 已发送错误字节数(数据值大说明网络存在问题) - TX-DRP 已丢失字节数(数据值大说明网络存在问题) @@ -777,6 +777,33 @@ Out of memory: Kill process 19452 (java) score 264 or sacrifice child ## 服务器故障排查顺序 +#### 请求时好时坏 + +- 系统层面 + - 查看负载、CPU、内存、上线时间、高资源进程 PID:`htop` + - 查看网络丢失情况:`netstat -i 3`,关注:RX-DRP、TX-DRP,如果两个任何一个有值,或者都有值,肯定是网络出了问题(该值是历史累加数据,不是瞬间数据)。 +- 应用层面 + - 临时修改 nginx log 输出格式,输出完整信息,包括请求头 + +``` +$request_body 请求体(含POST数据) +$http_XXX 指定某个请求头(XXX为字段名,全小写) +$cookie_XXX 指定某个cookie值(XXX为字段名,全小写) + + +类似用法: +log_format special_main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$request_body" "$http_referer" ' + '"$http_user_agent" $http_x_forwarded_for "appid=$http_appid,appver=$http_appver,vuser=$http_vuser" ' + '"phpsessid=$cookie_phpsessid,vuser_cookie=$cookie___vuser" '; + + +access_log /home/wwwlogs/hicrew.log special_main; + +``` + + + #### CPU 高,负载高,访问慢(没有数据库) - **记录负载开始升高的时间** @@ -899,6 +926,7 @@ S0 S1 E O M CCS YGC YGCT FGC FGCT GCT - - - +- From 252600b68f4f407896f609f1ea7d0603844440ea Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 22 Nov 2018 16:06:37 +0800 Subject: [PATCH 177/330] :construction: kafka --- markdown-file/Kafka-Install-And-Settings.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/markdown-file/Kafka-Install-And-Settings.md b/markdown-file/Kafka-Install-And-Settings.md index 4ff5826d..a9e891f1 100644 --- a/markdown-file/Kafka-Install-And-Settings.md +++ b/markdown-file/Kafka-Install-And-Settings.md @@ -123,6 +123,8 @@ services: - /data/docker/kafka/logs:/data/docker/kafka/logs ``` +- 启动:`docker-compose up -d` +- 停止:`docker-compose stop` - 测试: - 进入 kafka 容器:`docker exec -it kafkadocker_kafka_1 /bin/bash` - 根据官网 Dockerfile 说明,kafka home 应该是:`cd /opt/kafka` From c60af8fb7459bcfa19db4ced0b097db0cad48506 Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 22 Nov 2018 16:42:17 +0800 Subject: [PATCH 178/330] :construction: kafka --- markdown-file/Kafka-Install-And-Settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/markdown-file/Kafka-Install-And-Settings.md b/markdown-file/Kafka-Install-And-Settings.md index a9e891f1..ad8155e2 100644 --- a/markdown-file/Kafka-Install-And-Settings.md +++ b/markdown-file/Kafka-Install-And-Settings.md @@ -433,7 +433,7 @@ socket.request.max.bytes=104857600 - 查看 topic 命令:`bin/kafka-topics.sh --list --zookeeper youmeekhost:2181` - 删除 topic:`bin/kafka-topics.sh --delete --topic my-topic-test --zookeeper youmeekhost:2181` - 给 topic 发送消息命令:`bin/kafka-console-producer.sh --broker-list youmeekhost:9092 --topic my-topic-test`,然后在出现交互输入框的时候输入你要发送的内容 - - 再开一个终端,进入 kafka 容器,接受消息:`bin/kafka-console-consumer.sh --bootstrap-server youmeekhost:9092 --topic my-topic-test --from-beginning` + - 再开一个终端,进入 kafka 容器,接受消息:`cd /usr/local/kafka && bin/kafka-console-consumer.sh --bootstrap-server youmeekhost:9092 --topic my-topic-test --from-beginning` - 此时发送的终端输入一个内容回车,接受消息的终端就可以收到。 - Spring Boot 依赖: From e018b0b681bac2b01f30110fe0c95cd35f3d6e4f Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 22 Nov 2018 17:00:24 +0800 Subject: [PATCH 179/330] :construction: kafka --- markdown-file/Kafka-Install-And-Settings.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/markdown-file/Kafka-Install-And-Settings.md b/markdown-file/Kafka-Install-And-Settings.md index ad8155e2..96c10ec6 100644 --- a/markdown-file/Kafka-Install-And-Settings.md +++ b/markdown-file/Kafka-Install-And-Settings.md @@ -367,8 +367,9 @@ wurstmeister/kafka:latest ---------------------------------------------------------------------------------------------- -## Kafka 1.0.1 源码安装 +## Kafka 1.0.1 源码安装(也支持 1.0.2) +- 测试环境:2G 内存足够 - 一台机子:CentOS 7.4,根据文章最开头,已经修改了 hosts - 确保本机安装有 JDK8(JDK 版本不能随便挑选) - 先用上面的 docker 方式部署一个 zookeeper,我这里的 zookeeper IP 地址为:`172.16.0.2` From 29c7bb5bd32431a119d1cb589c7f736eff1f9095 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 23 Nov 2018 11:29:55 +0800 Subject: [PATCH 180/330] :construction: kafka --- favorite-file/shell/mysql_backup.sh | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 favorite-file/shell/mysql_backup.sh diff --git a/favorite-file/shell/mysql_backup.sh b/favorite-file/shell/mysql_backup.sh new file mode 100644 index 00000000..0d0f29a3 --- /dev/null +++ b/favorite-file/shell/mysql_backup.sh @@ -0,0 +1,17 @@ +#!/bin/bash + + +backupDatetime=$1 + +if [ "$backupDatetime" = "" ]; +then + echo -e "\033[0;31m 请输入备份日期 \033[0m" + exit 1 +fi + +echo "备份日期 = $backupDatetime" + +/usr/bin/mysqldump -u root --password=adg123adg456adg wordpress > /opt/wordpress-"$backupDatetime".sql + + + From 1d8f8f265ace99a9319365d9b55a011ba9999739 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 23 Nov 2018 11:30:36 +0800 Subject: [PATCH 181/330] :construction: kafka --- favorite-file/shell/mysql_backup.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/favorite-file/shell/mysql_backup.sh b/favorite-file/shell/mysql_backup.sh index 0d0f29a3..541c77cb 100644 --- a/favorite-file/shell/mysql_backup.sh +++ b/favorite-file/shell/mysql_backup.sh @@ -11,7 +11,7 @@ fi echo "备份日期 = $backupDatetime" -/usr/bin/mysqldump -u root --password=adg123adg456adg wordpress > /opt/wordpress-"$backupDatetime".sql +/usr/bin/mysqldump -u root --password=123456 数据库名 > /opt/mydb-"$backupDatetime".sql From 719d153f4bac856746bfdfafb1813c91368cc5db Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 23 Nov 2018 14:12:35 +0800 Subject: [PATCH 182/330] :construction: kafka --- markdown-file/Kafka-Install-And-Settings.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/markdown-file/Kafka-Install-And-Settings.md b/markdown-file/Kafka-Install-And-Settings.md index 96c10ec6..c411bd28 100644 --- a/markdown-file/Kafka-Install-And-Settings.md +++ b/markdown-file/Kafka-Install-And-Settings.md @@ -458,6 +458,8 @@ socket.request.max.bytes=104857600 ``` +- 项目配置文件:bootstrap-servers 地址:`instance-3v0pbt5d:9092`(这里端口是 9092 别弄错了) + ---------------------------------------------------------------------------------------------- ## kafka 1.0.1 默认配置文件内容 From 509343891ac92139fb9ec18b53a8918815388fb8 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 26 Nov 2018 09:16:51 +0800 Subject: [PATCH 183/330] :construction: jdk --- markdown-file/Java-bin.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/markdown-file/Java-bin.md b/markdown-file/Java-bin.md index 2c0c5cb1..edee24b9 100644 --- a/markdown-file/Java-bin.md +++ b/markdown-file/Java-bin.md @@ -164,7 +164,7 @@ - CCSMX 最大压缩类空间大小 - CCSC 当前压缩类空间大小 - YGC 年轻代gc次数,从应用程序启动到采样时年轻代中gc次数 - - FGC 老年代GC次数,从应用程序启动到采样时old代(全gc)gc次数 + - FGC 老年代GC次数,从应用程序启动到采样时old代(全gc = Full gc次数)gc次数 - 更多其他参数的使用可以看: - [Java命令学习系列(四)——jstat](https://mp.weixin.qq.com/s?__biz=MzI3NzE0NjcwMg==&mid=402330276&idx=2&sn=58117de92512f83090d0a9de738eeacd&scene=21#wechat_redirect) - [java高分局之jstat命令使用](https://blog.csdn.net/maosijunzi/article/details/46049117) @@ -198,7 +198,7 @@ S0 S1 E O M CCS YGC YGCT FGC FGCT GCT - CCS:压缩使用比例 - YGC:年轻代垃圾回收次数 - FGC:老年代垃圾回收次数 -- FGCT:老年代垃圾回收消耗时间(单位秒) +- FGCT:老年代垃圾回收消耗时间(Full gc耗时)(单位秒) - GCT:垃圾回收消耗总时间(单位秒) - **异常的时候每次 Full GC 时间也可能非常长,每次时间计算公式=FGCT值/FGC指)** - 在 YGC 之前 年轻代 = eden + S1;YGC 之后,年轻代 = eden + S0。 @@ -330,6 +330,10 @@ tenured generation: - 在线看某个线程 PID 的情况:`jstack 进程ID | grep 十六进制线程ID -A 10` - `-A 10` 参数用来指定显示行数,否则只会显示一行信息 - 下面 demo 内容太多,所以选取其中一部分结构: +- 常见线程状态 + - Runnable:正在运行的线程 + - Sleeping:休眠的线程 + - Waiting:等待的线程 ``` 2018-03-08 14:28:13 From f9f2f3fadcce270a0f6b959268db1ec9573550a0 Mon Sep 17 00:00:00 2001 From: "Judas.n" Date: Mon, 26 Nov 2018 16:51:27 +0800 Subject: [PATCH 184/330] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 03208821..ebeb2100 100644 --- a/README.md +++ b/README.md @@ -136,3 +136,4 @@ ## AD - [推荐:程序员的个性化网址导航:GitNavi.com](http://www.gitnavi.com/u/judasn/) +- [适合后端开发者的前端 React-Admin](https://github.com/satan31415/umi-admin) From f260924da556e7bd22ed59b42a50b8ef89115093 Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 29 Nov 2018 17:34:50 +0800 Subject: [PATCH 185/330] :construction: jdk --- favorite-file/shell/install_jdk_offline_to_zsh.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/favorite-file/shell/install_jdk_offline_to_zsh.sh b/favorite-file/shell/install_jdk_offline_to_zsh.sh index b83adb33..0c0c091b 100644 --- a/favorite-file/shell/install_jdk_offline_to_zsh.sh +++ b/favorite-file/shell/install_jdk_offline_to_zsh.sh @@ -8,28 +8,28 @@ fi echo "判断 JDK 压缩包是否存在" -if [ ! -f "/opt/setups/jdk-8u181-linux-x64.tar.gz" ]; then +if [ ! -f "/opt/setups/jdk-8u191-linux-x64.tar.gz" ]; then echo "JDK 压缩包不存在" exit 1 fi echo "开始解压 JDK" -cd /opt/setups ; tar -zxf jdk-8u181-linux-x64.tar.gz +cd /opt/setups ; tar -zxf jdk-8u191-linux-x64.tar.gz -if [ ! -d "/opt/setups/jdk1.8.0_181" ]; then +if [ ! -d "/opt/setups/jdk1.8.0_191" ]; then echo "JDK 解压失败,结束脚本" exit 1 fi echo "JDK 解压包移到 /usr/local/ 目录下" -mv jdk1.8.0_181/ /usr/local/ +mv jdk1.8.0_191/ /usr/local/ echo "JDK 写入系统变量到 zshrc" cat << EOF >> ~/.zshrc # JDK -JAVA_HOME=/usr/local/jdk1.8.0_181 +JAVA_HOME=/usr/local/jdk1.8.0_191 JRE_HOME=\$JAVA_HOME/jre PATH=\$PATH:\$JAVA_HOME/bin CLASSPATH=.:\$JAVA_HOME/lib/dt.jar:\$JAVA_HOME/lib/tools.jar From b802de1ef5edda5014b63ab10fc7e25b4f08aa8c Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 30 Nov 2018 11:20:22 +0800 Subject: [PATCH 186/330] :construction: kafka --- markdown-file/Kafka-Install-And-Settings.md | 36 ++++++++++++++++++--- 1 file changed, 31 insertions(+), 5 deletions(-) diff --git a/markdown-file/Kafka-Install-And-Settings.md b/markdown-file/Kafka-Install-And-Settings.md index c411bd28..bf4c362d 100644 --- a/markdown-file/Kafka-Install-And-Settings.md +++ b/markdown-file/Kafka-Install-And-Settings.md @@ -1,5 +1,14 @@ # Kafka 安装和配置 + +## 对于版本 + +- 由于 Kafka 经常会被连接到各个地方去,所以对于 Kafka 的版本,一般不能用太新的,要看你用在什么地方。 +- [Flink 的要求](https://ci.apache.org/projects/flink/flink-docs-release-1.6/dev/connectors/kafka.html) +- [Spark 的要求](https://spark.apache.org/docs/latest/streaming-kafka-integration.html) +- [Spring 的要求](http://projects.spring.io/spring-kafka/) + + ## 消息系统的好处 - 解耦(各个业务系统各自为政,有各自新需求,各自系统自行修改,只通过消息来通信) @@ -360,14 +369,30 @@ wurstmeister/kafka:latest - 如果 kafka1 输入的消息,kafka2 和 kafka3 能收到,则表示已经成功。 +#### Kafka 认证配置 + +- 可以参考:[Kafka的SASL/PLAIN认证配置说明](http://www.2bowl.info/kafka%e7%9a%84saslplain%e8%ae%a4%e8%af%81%e9%85%8d%e7%bd%ae%e8%af%b4%e6%98%8e/) + + +#### Kafka 单纯监控 KafkaOffsetMonitor + +- Github 官网: + - README 带了下载地址和运行命令 + - 只是已经很久不更新了 + +#### 部署 kafka-manager + +- Github 官网: + - 注意官网说明的版本支持 +- 节点 1(没成功):`docker run -d --name=kafka-manager1 --restart=always -p 9000:9000 -e ZK_HOSTS="youmeekhost1:2181,youmeekhost2:2181,youmeekhost3:2181" sheepkiller/kafka-manager:latest` +- 源码类安装可以看:[Kafka监控工具—Kafka Manager](http://www.2bowl.info/kafka%e7%9b%91%e6%8e%a7%e5%b7%a5%e5%85%b7-kafka-manager/) +- Kafka manager 是一款管理 + 监控的工具,比较重 -#### 部署 kafka-manager(未能访问成功) -- 节点 1:`docker run -d --name=kafka-manager1 --restart=always -p 9000:9000 -e ZK_HOSTS="youmeekhost1:2181,youmeekhost2:2181,youmeekhost3:2181" sheepkiller/kafka-manager:latest` ---------------------------------------------------------------------------------------------- -## Kafka 1.0.1 源码安装(也支持 1.0.2) +## Kafka 1.0.1 源码安装(也支持 1.0.2、0.11.0.3) - 测试环境:2G 内存足够 - 一台机子:CentOS 7.4,根据文章最开头,已经修改了 hosts @@ -391,6 +416,8 @@ broker.id=1 listeners=PLAINTEXT://0.0.0.0:9092 # 向 Zookeeper 注册的地址。这里可以直接填写外网IP地址,但是不建议这样做,而是通过配置 hosts 的方式来设置。不然填写外网 IP 地址会导致所有流量都走外网(单节点多 broker 的情况下该参数必改) advertised.listeners=PLAINTEXT://youmeekhost:9092 +# zookeeper,存储了 broker 的元信息 +zookeeper.connect=youmeekhost:2181 # 日志数据目录,可以通过逗号来指定多个目录(单节点多 broker 的情况下该参数必改) log.dirs=/data/kafka/logs # 创建新 topic 的时候默认 1 个分区。需要特别注意的是:已经创建好的 topic 的 partition 的个数只可以被增加,不能被减少。 @@ -405,8 +432,7 @@ auto.create.topics.enable=false #log.flush.interval.ms=1000 # kafka 数据保留时间 默认 168 小时 == 7 天 log.retention.hours=168 -# zookeeper,存储了 broker 的元信息 -zookeeper.connect=youmeekhost:2181 + # 其余都使用默认配置,但是顺便解释下: # borker 进行网络处理的线程数 From 4269f84d99625b843ff708d9055bbb29e1e5bc7e Mon Sep 17 00:00:00 2001 From: satan31415 <471867900@qq.com> Date: Sun, 2 Dec 2018 21:22:12 +0800 Subject: [PATCH 187/330] :construction: Docker --- markdown-file/Docker-Install-And-Usage.md | 1 + 1 file changed, 1 insertion(+) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index 2524afd4..b5d5aa3e 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -254,6 +254,7 @@ CONTAINER ID NAME CPU % MEM USAGE / LI - `docker rmi 仓库:Tag`:删除具体某一个镜像 - `docker rmi $(docker images -q)`,删除所有镜像 - `docker rmi -f $(docker images -q)`,强制删除所有镜像 + - `docker rmi $(docker images | grep "vmware" | awk '{print $3}')`,批量删除带有 vmware 名称的镜像 - `docker tag`:为镜像打上标签 - `docker tag -f ubuntu:14.04 ubuntu:latest`,-f 意思是强制覆盖 - 同一个IMAGE ID可能会有多个TAG(可能还在不同的仓库),首先你要根据这些 image names 来删除标签,当删除最后一个tag的时候就会自动删除镜像; From 4d551dd9874fb30b9c3fb404b1946a8d2d31cc83 Mon Sep 17 00:00:00 2001 From: satan31415 <471867900@qq.com> Date: Sun, 2 Dec 2018 22:56:23 +0800 Subject: [PATCH 188/330] :construction: Flink --- markdown-file/Flink-Install-And-Settings.md | 56 +++++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100644 markdown-file/Flink-Install-And-Settings.md diff --git a/markdown-file/Flink-Install-And-Settings.md b/markdown-file/Flink-Install-And-Settings.md new file mode 100644 index 00000000..efe93946 --- /dev/null +++ b/markdown-file/Flink-Install-And-Settings.md @@ -0,0 +1,56 @@ +# Flink 安装和配置 + +## 介绍 + +- 2018-11-30 发布最新:1.7.0 版本 +- 官网: +- 官网 Github: + +## 本地模式安装 + +- CentOS 7.4 +- IP 地址:`192.168.0.105` +- 官网指导: +- 必须 JDK 8.x +- 下载: + - 选择 Binaries 类型 + - 如果没有 Hadoop 环境,只是本地开发,选择:Apache 1.7.0 Flink only + - Scala 2.11 和 Scala 2.12 都可以,但是我因为后面要用到 kafka,kafka 推荐 Scala 2.11,所以我这里也选择同样。 + - 最终我选择了:Apache 1.7.0 Flink only Scala 2.11,共:240M +- 解压:`tar zxf flink-*.tgz` +- 进入根目录:`cd flink-1.7.0` +- 启动:`./bin/start-cluster.sh` +- 停止:`./bin/stop-cluster.sh` +- 查看日志:`tail -300f log/flink-*-standalonesession-*.log` +- 浏览器访问 WEB 管理:`http://192.168.0.105:8081` + +## Demo + +- 官网: +- DataStream API: +- DataSet API: +- 访问该脚本可以得到如下内容: + +``` +mvn archetype:generate \ + -DarchetypeGroupId=org.apache.flink \ + -DarchetypeArtifactId=flink-quickstart-java \ + -DarchetypeVersion=${1:-1.7.0} \ + -DgroupId=org.myorg.quickstart \ + -DartifactId=$PACKAGE \ + -Dversion=0.1 \ + -Dpackage=org.myorg.quickstart \ + -DinteractiveMode=false +``` + +- 可以自己在本地执行该 mvn 命令,用 Maven 骨架快速创建一个 WordCount 项目 +- 注意,这里必须使用这个仓库(最好用穿越软件):`https://repository.apache.org/content/repositories/snapshots` +- 该骨架的所有版本: + - 根据实验,目前 1.7.0 和 1.6.x 都是没有 WordCount demo 代码的。但是 1.3.x 是有的。 + + + +## 资料 + +- []() + From cf9209a5f95b6c5f9872ca0ccccf567d71b06d1d Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 4 Dec 2018 17:58:46 +0800 Subject: [PATCH 189/330] :construction: Grafana --- README.md | 2 +- SUMMARY.md | 3 +- TOC.md | 3 +- markdown-file/Grafana-Install-And-Settings.md | 96 +++++++++++++++++++ 4 files changed, 101 insertions(+), 3 deletions(-) create mode 100644 markdown-file/Grafana-Install-And-Settings.md diff --git a/README.md b/README.md index ebeb2100..6b4b075c 100644 --- a/README.md +++ b/README.md @@ -97,7 +97,7 @@ - [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) - [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) - [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) - +- [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) ## 联系(Contact) diff --git a/SUMMARY.md b/SUMMARY.md index b1a5323a..a2bdcb4e 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -82,4 +82,5 @@ * [Showdoc 安装和配置](markdown-file/Showdoc-Install-And-Settings.md) * [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) * [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) -* [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) \ No newline at end of file +* [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) +* [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) \ No newline at end of file diff --git a/TOC.md b/TOC.md index e93b47ec..527ae765 100644 --- a/TOC.md +++ b/TOC.md @@ -79,4 +79,5 @@ - [Showdoc 安装和配置](markdown-file/Showdoc-Install-And-Settings.md) - [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) - [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) -- [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) \ No newline at end of file +- [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) +- [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) \ No newline at end of file diff --git a/markdown-file/Grafana-Install-And-Settings.md b/markdown-file/Grafana-Install-And-Settings.md new file mode 100644 index 00000000..095c2b27 --- /dev/null +++ b/markdown-file/Grafana-Install-And-Settings.md @@ -0,0 +1,96 @@ +# Grafana 安装和配置 + + +## 对于版本 + +- [支持的 Elasticsearch 版本](http://docs.grafana.org/features/datasources/elasticsearch/#elasticsearch-version) + + +---------------------------------------------------------------------------------------------- + +## Grafana 安装 + +- CentOS 7.4 +- rpm 文件包大小 53M +- 所需内存:300M 左右 +- 官网下载: +- 官网指导: + +``` +sudo yum install -y initscripts fontconfig urw-fonts +wget https://dl.grafana.com/oss/release/grafana-5.4.0-1.x86_64.rpm +sudo yum localinstall -y grafana-5.4.0-1.x86_64.rpm +``` + + +- 启动 Grafana 服务(默认是不启动的) + +``` +sudo systemctl start grafana-server +sudo systemctl status grafana-server +``` + +- 将 Grafana 服务设置为开机启动:`sudo systemctl enable grafana-server` +- 开放端口:`firewall-cmd --add-port=3000/tcp --permanent` +- 重新加载防火墙配置:`firewall-cmd --reload` +- 访问: +- 默认管理账号;admin,密码:admin,登录后需要修改密码 + +---------------------------------------------------------------------------------------------- + +## 配置 + +- 官网指导: +- 安装包默认安装后的一些路径 + - 二进制文件:`/usr/sbin/grafana-server` + - init.d 脚本:`/etc/init.d/grafana-server` + - 配置文件:`/etc/grafana/grafana.ini` + - 日志文件:`/var/log/grafana/grafana.log` + - 插件目录是:`/var/lib/grafana/plugins` + - 默认配置的 sqlite3 数据库:`/var/lib/grafana/grafana.db` +- 最重要的配置文件:`vim /etc/grafana/grafana.ini` + - 可以修改用户名和密码 + - 端口 + - 数据路径 + - 数据库配置 + - 第三方认证 + - Session 有效期 +- 添加数据源: +- 添加组织: +- 添加用户: +- 添加插件: +- 个性化设置: +- 软件变量: + + +---------------------------------------------------------------------------------------------- + +## 数据源 + +#### Elasticsearch + +使用: +- +- +- <> +- <> +- <> +- <> + + +---------------------------------------------------------------------------------------------- + + +## 其他资料 + +- +- <> +- <> +- <> +- <> +- <> +- <> +- <> +- <> +- <> + From 554d818c8ac440889bd3023f6a34e818f90fec85 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 4 Dec 2018 18:00:01 +0800 Subject: [PATCH 190/330] :construction: Flink --- markdown-file/Flink-Install-And-Settings.md | 162 +++++++++++++++++++- 1 file changed, 159 insertions(+), 3 deletions(-) diff --git a/markdown-file/Flink-Install-And-Settings.md b/markdown-file/Flink-Install-And-Settings.md index efe93946..ac034381 100644 --- a/markdown-file/Flink-Install-And-Settings.md +++ b/markdown-file/Flink-Install-And-Settings.md @@ -18,14 +18,15 @@ - Scala 2.11 和 Scala 2.12 都可以,但是我因为后面要用到 kafka,kafka 推荐 Scala 2.11,所以我这里也选择同样。 - 最终我选择了:Apache 1.7.0 Flink only Scala 2.11,共:240M - 解压:`tar zxf flink-*.tgz` -- 进入根目录:`cd flink-1.7.0` -- 启动:`./bin/start-cluster.sh` -- 停止:`./bin/stop-cluster.sh` +- 进入根目录:`cd flink-1.7.0`,完整路径:`cd /usr/local/flink-1.7.0` +- 启动:`cd /usr/local/flink-1.7.0 && ./bin/start-cluster.sh` +- 停止:`cd /usr/local/flink-1.7.0 && ./bin/stop-cluster.sh` - 查看日志:`tail -300f log/flink-*-standalonesession-*.log` - 浏览器访问 WEB 管理:`http://192.168.0.105:8081` ## Demo +- 运行程序解压包下也有一些 jar demo:`cd /usr/local/flink-1.7.0/examples` - 官网: - DataStream API: - DataSet API: @@ -48,9 +49,164 @@ mvn archetype:generate \ - 该骨架的所有版本: - 根据实验,目前 1.7.0 和 1.6.x 都是没有 WordCount demo 代码的。但是 1.3.x 是有的。 +## 运行 + +- 可以直接在 IntelliJ IDEA 上 run +- 也可以交给服务器上 flink 执行,也有两种方式: + - 把 jar 自己上传 Flink 服务器运行:`cd /usr/local/flink-1.7.0 && ./bin/flink run -c com.youmeek.WordCount /opt/flink-simple-demo-1.0-SNAPSHOT.jar` + - 也可以通过 WEB UI 上传 jar: + - 有一个 `Add New` 按钮可以上传 jar 包,然后填写 Class 路径:`com.youmeek.WordCount` + - `parallelism` 表示并行度,填写数字,一般并行度设置为集群 CPU 核数总和的 2-3 倍(如果是单机模式不需要设置并行度) + +------------------------------------------------------------------- + + +## Flink 核心概念 + +- 四个基石:Checkpoint、State、Time、Window +- 解决 exactly-once 的问题 +- 状态管理 +- 实现了 watermark 的机制,解决了基于事件时间处理时的数据乱序和数据迟到的问题 +- 提供了一套开箱即用的窗口操作,包括滚动窗口、滑动窗口、会话窗口 +- 我想说的,都被这篇文章说了: + - + - +- 这里补充点其他的 + + + +Client 用来提交任务给 JobManager,JobManager 分发任务给 TaskManager 去执行,然后 TaskManager 会心跳的汇报任务状态 +在 Flink 集群中,计算资源被定义为 Task Slot +每个 TaskManager 会拥有一个或多个 Slots + +JobManager 会以 Slot 为单位调度 Task。 +对 Flink 的 JobManager 来说,其调度的是一个 Pipeline 的 Task,而不是一个点。 +在 Flink 中其也是一个被整体调度的 Pipeline Task。在 TaskManager 中,根据其所拥有的 Slot 个数,同时会拥有多个 Pipeline + + +Task Slot +在架构概览中我们介绍了 TaskManager 是一个 JVM 进程,并会以独立的线程来执行一个task或多个subtask。为了控制一个 TaskManager 能接受多少个 task,Flink 提出了 Task Slot 的概念。 + +Flink 中的计算资源通过 Task Slot 来定义。每个 task slot 代表了 TaskManager 的一个固定大小的资源子集。例如,一个拥有3个slot的 TaskManager,会将其管理的内存平均分成三分分给各个 slot。将资源 slot 化意味着来自不同job的task不会为了内存而竞争,而是每个task都拥有一定数量的内存储备。需要注意的是,这里不会涉及到CPU的隔离,slot目前仅仅用来隔离task的内存。 +通过调整 task slot 的数量,用户可以定义task之间是如何相互隔离的。每个 TaskManager 有一个slot,也就意味着每个task运行在独立的 JVM 中。每个 TaskManager 有多个slot的话,也就是说多个task运行在同一个JVM中。而在同一个JVM进程中的task,可以共享TCP连接(基于多路复用)和心跳消息,可以减少数据的网络传输。也能共享一些数据结构,一定程度上减少了每个task的消耗。 + +每一个 TaskManager 会拥有一个或多个的 task slot,每个 slot 都能跑由多个连续 task 组成的一个 pipeline,比如 MapFunction 的第n个并行实例和 ReduceFunction 的第n个并行实例可以组成一个 pipeline。 + + +source(Streaming 进来) +Transformations(Streaming 处理) +sink(Streaming 出去) + +Flink程序与生俱来的就是并行和分布式的。Streams被分割成stream patition, Operators被被分割成operator subtasks。这些subtasks在不同的机器(容器)上的不同的线程中运行,彼此独立,互不干扰。 一个操作的operator subtask的数目,被称为parallelism(并行度)。一个stream的并行度,总是等于生成它的(operator)操作的并行度。一个Flink程序中,不同的operator可能具有不同的并行度。 + + + + +#### 为了容错的 Checkpoint 机制 + +- 这几篇文章写得很好: + - [Flink 增量式checkpoint 介绍](https://my.oschina.net/u/992559/blog/2873828) + - [A Deep Dive into Rescalable State in Apache Flink](https://flink.apache.org/features/2017/07/04/flink-rescalable-state.html) + - [Flink 小贴士 (5): Savepoint 和 Checkpoint 的 3 个不同点](http://wuchong.me/blog/2018/11/25/flink-tips-differences-between-savepoints-and-checkpoints/) + - [Flink 小贴士 (2):Flink 如何管理 Kafka 消费位点](http://wuchong.me/blog/2018/11/04/how-apache-flink-manages-kafka-consumer-offsets/) + - []() + - []() +- Checkpoint 允许 Flink 恢复流中的状态和位置,使应用程序具有与无故障执行相同的语义 +- Checkpoint 是 Flink 用来从故障中恢复的机制,快照下了整个应用程序的状态,当然也包括输入源读取到的位点。如果发生故障,Flink 将通过从 Checkpoint 加载应用程序状态并从恢复的读取位点继续应用程序的处理,就像什么事情都没发生一样。 + + +``` +一个checkpoint是Flink的一致性快照,它包括: + +程序当前的状态 +输入流的位置 +Flink通过一个可配置的时间,周期性的生成checkpoint,将它写入到存储中,例如S3或者HDFS。写入到存储的过程是异步的,意味着Flink程序在checkpoint运行的同时还可以处理数据。 + +在机器或者程序遇到错误重启的时候,Flink程序会使用最新的checkpoint进行恢复。Flink会恢复程序的状态,将输入流回滚到checkpoint保存的位置,然后重新开始运行。这意味着Flink可以像没有发生错误一样计算结果。 + +检查点(Checkpoint)是使 Apache Flink 能从故障恢复的一种内部机制。检查点是 Flink 应用状态的一个一致性副本,包括了输入的读取位点。在发生故障时,Flink 通过从检查点加载应用程序状态来恢复,并从恢复的读取位点继续处理,就好像什么事情都没发生一样。你可以把检查点想象成电脑游戏的存档一样。如果你在游戏中发生了什么事情,你可以随时读档重来一次。 +检查点使得 Apache Flink 具有容错能力,并确保了即时发生故障也能保证流应用程序的语义。检查点是以固定的间隔来触发的,该间隔可以在应用中配置。 + +``` + +- 默认情况下 checkpoint 是不启用的,为了启用 checkpoint,需要在 StreamExecutionEnvironment 中调用 enableCheckpointing(n) 方法, 其中 n 是 checkpoint 的间隔毫秒数。 +- 这里有一个核心:用到 Facebook 的 RocksDB 数据库(可嵌入式的支持持久化的 key-value 存储系统) + + + +#### Exactly-Once + +- 因为有了 Checkpoint,才有了 Exactly-Once +- [Apache Flink 端到端(end-to-end)Exactly-Once特性概览 (翻译)](https://my.oschina.net/u/992559/blog/1819948) + +#### Watermark + +- [Flink 小贴士 (3): 轻松理解 Watermark](http://wuchong.me/blog/2018/11/18/flink-tips-watermarks-in-apache-flink-made-easy/) +- 了解事件时间的几个概念:event-time【消息产生的时间】, processing-time【消息处理时间】, ingestion-time【消息流入 flink 框架的时间】 +- watermark 的作用,他们定义了何时不再等待更早的数据 +- WaterMark 只在时间特性 EventTime 和 IngestionTime 起作用,并且 IngestionTime 的时间等同于消息的 ingestion 时间 + +#### 窗口 + +- 翻滚窗口(Tumble) +- [Flink 原理与实现:Window 机制](http://wuchong.me/blog/2016/05/25/flink-internals-window-mechanism/) +- [Flink 原理与实现:Session Window](http://wuchong.me/blog/2016/06/06/flink-internals-session-window/) + + +#### 生产环境 + +- [Flink 小贴士 (7): 4个步骤,让 Flink 应用达到生产状态](http://wuchong.me/blog/2018/12/03/flink-tips-4-steps-flink-application-production-ready/) + + + + +#### 运行环境 + + +Flink 的部署 +Flink 有三种部署模式,分别是 Local、Standalone Cluster 和 Yarn Cluster。对于 Local 模式来说,JobManager 和 TaskManager 会公用一个 JVM 来完成 Workload。如果要验证一个简单的应用,Local 模式是最方便的。实际应用中大多使用 Standalone 或者 Yarn Cluster。下面我主要介绍下这两种模式。 + + +#### Flink 的 HA + + +#### Monitoring REST API + +https://ci.apache.org/projects/flink/flink-docs-stable/monitoring/rest_api.html#monitoring-rest-api + + +#### 主要核心 API + +- 官网 API 文档: +- DataStream API -- Stream Processing +- DataSet API -- Batch Processing +- Kafka source + - Kafka Connectors +- Elasticsearch sink + + +#### Table & SQL API(关系型 API) + +Table API:为Java&Scala SDK提供类似于LINQ(语言集成查询)模式的API(自0.9.0版本开始) +SQL API:支持标准SQL(自1.1.0版本开始) + + +关系型API作为一个统一的API层,既能够做到在Batch模式的表上进行可终止地查询并生成有限的结果集,同时也能做到在Streaming模式的表上持续地运行并生产结果流,并且在两种模式的表上的查询具有相同的语法跟语义。这其中最重要的概念是Table,Table与DataSet、DataStream紧密结合,DataSet和DataStream都可以很容易地转换成Table,同样转换回来也很方便。 + +关系型API架构在基础的DataStream、DataSet API之上,其整体层次关系如下图所示: + +![table-sql-level](http://7xkaaz.com1.z0.glb.clouddn.com/table-sql-level.png) + +------------------------------------------------------------------- ## 资料 +- [新一代大数据处理引擎 Apache Flink](https://www.ibm.com/developerworks/cn/opensource/os-cn-apache-flink/index.html) +- [Flink-关系型API简介](http://vinoyang.com/2017/07/06/flink-relation-api-introduction/) +- [Flink学习笔记(4):基本概念](https://www.jianshu.com/p/0cd1db4282be) +- [Apache Flink:特性、概念、组件栈、架构及原理分析](http://shiyanjun.cn/archives/1508.html) +- [Flink 原理与实现:理解 Flink 中的计算资源](http://wuchong.me/blog/2016/05/09/flink-internals-understanding-execution-resources/) +- []() - []() From 70f76402434d7c4ff67f2453a94b7b5e4b8fd184 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 4 Dec 2018 20:41:00 +0800 Subject: [PATCH 191/330] :construction: Elasticsearch --- markdown-file/Elasticsearch-Base.md | 43 +++++++++++++++++------------ 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index 97c03042..69933e9b 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -22,6 +22,8 @@ services: - /data/docker/elasticsearch/data:/usr/share/elasticsearch/data ``` +------------------------------------------------------------------- + ## 环境 - CentOS 7.3 @@ -31,10 +33,11 @@ services: - `systemctl stop firewalld.service` #停止firewall - `systemctl disable firewalld.service` #禁止firewall开机启动 -## Elasticsearch 5.5.0 安装 +## Elasticsearch 6.5.x 安装(适配与 5.5.x) ### 先配置部分系统变量 +- 更多系统层面的配置可以看官网: - 配置系统最大打开文件描述符数:`vim /etc/sysctl.conf` ``` @@ -53,15 +56,15 @@ elasticsearch hard memlock unlimited ### 开始安装 -- 官网 RPM 安装流程(重要,以下资料都是对官网的总结): -- `rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch` +- 官网 RPM 安装流程(重要,以下资料都是对官网的总结): +- 导入 KEY:`rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch` - 新建文件:`vim /etc/yum.repos.d/elasticsearch.repo` - 内容如下: ``` -[elasticsearch-5.x] -name=Elasticsearch repository for 5.x packages -baseurl=https://artifacts.elastic.co/packages/5.x/yum +[elasticsearch-6.x] +name=Elasticsearch repository for 6.x packages +baseurl=https://artifacts.elastic.co/packages/6.x/yum gpgcheck=1 gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch enabled=1 @@ -70,23 +73,27 @@ type=rpm-md ``` - 开始安装:`yum install -y elasticsearch`,国内网络安装会很慢,慢慢等 +- 启动和停止软件(默认是不启动的): + - 启动:`systemctl start elasticsearch.service` + - 状态:`systemctl status elasticsearch.service` + - 停止:`systemctl stop elasticsearch.service` - 安装完成后,增加系统自启动: -- `/bin/systemctl daemon-reload` -- `/bin/systemctl enable elasticsearch.service` -- 启动和停止软件: -- `systemctl start elasticsearch.service` -- `systemctl stop elasticsearch.service` + - `/bin/systemctl daemon-reload` + - `/bin/systemctl enable elasticsearch.service` +- 检查:`curl -X GET "localhost:9200/"` ### RPM 安装后的一些配置位置说明 +- 更多说明可以看官网: +- 更加详细的配置可以看: - 默认系统生成了一个 elasticsearch 用户,下面的目录权限属于该用户 -- Elasticsearch 安装后位置:/usr/share/elasticsearch -- Elasticsearch 的软件环境、堆栈的设置:/etc/sysconfig/elasticsearch -- Elasticsearch 的集群设置:/etc/elasticsearch/elasticsearch.yml -- Log 位置:/var/log/elasticsearch/ -- 索引数据位置:/var/lib/elasticsearch -- 插件位置:/usr/share/elasticsearch/plugins -- 脚本文件位置:/etc/elasticsearch/scripts +- Elasticsearch 安装后位置:`/usr/share/elasticsearch` +- Elasticsearch 的软件环境、堆栈的设置:`/etc/sysconfig/elasticsearch` +- Elasticsearch 的集群设置:`/etc/elasticsearch/elasticsearch.yml` +- Log 位置:`/var/log/elasticsearch/` +- 索引数据位置:`/var/lib/elasticsearch` +- 插件位置:`/usr/share/elasticsearch/plugins` +- 脚本文件位置:`/etc/elasticsearch/scripts` ------------------------------------------------------------------------------------------------------------------- From b2865948da211baf9e2e0fb081fc2faefead4e1a Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 5 Dec 2018 10:48:07 +0800 Subject: [PATCH 192/330] :construction: Elasticsearch --- markdown-file/Elasticsearch-Base.md | 84 ++++++++++++++++++++++++++--- 1 file changed, 76 insertions(+), 8 deletions(-) diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index 69933e9b..4a3a8290 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -24,18 +24,20 @@ services: ------------------------------------------------------------------- -## 环境 -- CentOS 7.3 +## Elasticsearch 6.5.x 安装(适配与 5.5.x) + +#### 环境 + +- CentOS 7.x +- 至少需要 2G 内存 - root 用户 - JDK 版本:1.8(最低要求),主推:JDK 1.8.0_121 以上 - 关闭 firewall - `systemctl stop firewalld.service` #停止firewall - `systemctl disable firewalld.service` #禁止firewall开机启动 -## Elasticsearch 6.5.x 安装(适配与 5.5.x) - -### 先配置部分系统变量 +#### 先配置部分系统变量 - 更多系统层面的配置可以看官网: - 配置系统最大打开文件描述符数:`vim /etc/sysctl.conf` @@ -54,7 +56,7 @@ elasticsearch hard memlock unlimited * hard nofile 262144 ``` -### 开始安装 +#### 开始安装 - 官网 RPM 安装流程(重要,以下资料都是对官网的总结): - 导入 KEY:`rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch` @@ -72,17 +74,20 @@ autorefresh=1 type=rpm-md ``` -- 开始安装:`yum install -y elasticsearch`,国内网络安装会很慢,慢慢等 +- 开始安装:`yum install -y elasticsearch`,预计文件有 108M 左右,国内网络安装可能会很慢,慢慢等 + - 安装完后会多了一个:elasticsearch 用户和组 +- 设置 java 软链接:`ln -s /usr/local/jdk1.8.0_181/jre/bin/java /usr/local/sbin/java` - 启动和停止软件(默认是不启动的): - 启动:`systemctl start elasticsearch.service` - 状态:`systemctl status elasticsearch.service` - 停止:`systemctl stop elasticsearch.service` + - 重新启动:`systemctl restart elasticsearch.service` - 安装完成后,增加系统自启动: - `/bin/systemctl daemon-reload` - `/bin/systemctl enable elasticsearch.service` - 检查:`curl -X GET "localhost:9200/"` -### RPM 安装后的一些配置位置说明 +#### RPM 安装后的一些配置位置说明 - 更多说明可以看官网: - 更加详细的配置可以看: @@ -95,6 +100,69 @@ type=rpm-md - 插件位置:`/usr/share/elasticsearch/plugins` - 脚本文件位置:`/etc/elasticsearch/scripts` +#### 配置 + +- 编辑配置文件:`vim /etc/elasticsearch/elasticsearch.yml` +- 默认只能 localhost 访问,修改成支持外网访问 + +``` +打开这个注释:#network.host: 192.168.0.1 +改为:network.host: 0.0.0.0 +``` + +#### 安装 X-Pack(6.5.x 默认带了 x-pack) + +- `cd /usr/share/elasticsearch && bin/elasticsearch-plugin install x-pack` + +#### 安装 Chrome 扩展的 Head + +- 下载地址: + +#### 其他细节 + +- 如果就单个节点测试,新建索引的时候副本数记得填 0。 + +#### 批量增加 / 删除测试数据 + +- 官网文档: +- 批量增加,接口地址:`POST /_bulk` + +``` +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100001" } } +{ "post_date" : "2018-12-01 10:00:00", "request_num" : 1 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100002" } } +{ "post_date" : "2018-12-01 10:00:05", "request_num" : 2 } +``` + +- cURL 格式: + +``` +curl -X POST "http://127.0.0.1:9200/_bulk" -H 'Content-Type: application/json' -d' +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100001" } } +{ "post_date" : "2018-12-01 10:00:00", "request_num" : 1 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100002" } } +{ "post_date" : "2018-12-01 10:00:05", "request_num" : 2 } +' +``` + +- 批量删除,接口地址:`POST /_bulk` + +``` +{ "delete": { "_index": "grafanadb", "_type": "radar", "_id": "100001" } } +{ "delete": { "_index": "grafanadb", "_type": "radar", "_id": "100002" } } +``` + +- cURL 格式: + +``` +curl -X POST "http://127.0.0.1:9200/_bulk" -H 'Content-Type: application/json' -d' +{ "delete": { "_index": "grafanadb", "_type": "radar", "_id": "100001" } } +{ "delete": { "_index": "grafanadb", "_type": "radar", "_id": "100002" } } +' +``` + + + ------------------------------------------------------------------------------------------------------------------- From 6fb9561159d797c0481d6db2557ebc0d6cf0eecf Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 5 Dec 2018 10:59:34 +0800 Subject: [PATCH 193/330] :construction: Elasticsearch --- markdown-file/Elasticsearch-Base.md | 152 ++++++++++++++++++++++++++++ 1 file changed, 152 insertions(+) diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index 4a3a8290..fa4d9d4b 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -132,6 +132,82 @@ type=rpm-md { "post_date" : "2018-12-01 10:00:00", "request_num" : 1 } { "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100002" } } { "post_date" : "2018-12-01 10:00:05", "request_num" : 2 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100003" } } +{ "post_date" : "2018-12-01 10:00:10", "request_num" : 3 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100004" } } +{ "post_date" : "2018-12-01 10:00:15", "request_num" : 4 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100005" } } +{ "post_date" : "2018-12-01 10:00:20", "request_num" : 5 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100006" } } +{ "post_date" : "2018-12-01 10:00:25", "request_num" : 6 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100007" } } +{ "post_date" : "2018-12-01 10:00:30", "request_num" : 7 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100008" } } +{ "post_date" : "2018-12-01 10:00:35", "request_num" : 8 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100009" } } +{ "post_date" : "2018-12-01 10:00:40", "request_num" : 9 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100010" } } +{ "post_date" : "2018-12-01 10:00:45", "request_num" : 10 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100011" } } +{ "post_date" : "2018-12-01 10:00:50", "request_num" : 1 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100012" } } +{ "post_date" : "2018-12-01 10:00:55", "request_num" : 2 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100013" } } +{ "post_date" : "2018-12-01 10:01:00", "request_num" : 3 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100014" } } +{ "post_date" : "2018-12-01 10:01:05", "request_num" : 4 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100015" } } +{ "post_date" : "2018-12-01 10:01:10", "request_num" : 5 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100016" } } +{ "post_date" : "2018-12-01 10:01:15", "request_num" : 6 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100017" } } +{ "post_date" : "2018-12-01 10:01:20", "request_num" : 7 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100018" } } +{ "post_date" : "2018-12-01 10:01:25", "request_num" : 8 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100019" } } +{ "post_date" : "2018-12-01 10:01:30", "request_num" : 9 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100020" } } +{ "post_date" : "2018-12-01 10:01:35", "request_num" : 10 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100021" } } +{ "post_date" : "2018-12-01 10:01:40", "request_num" : 1 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100022" } } +{ "post_date" : "2018-12-01 10:01:45", "request_num" : 2 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100023" } } +{ "post_date" : "2018-12-01 10:01:50", "request_num" : 3 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100024" } } +{ "post_date" : "2018-12-01 10:01:55", "request_num" : 4 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100025" } } +{ "post_date" : "2018-12-01 10:02:00", "request_num" : 5 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100026" } } +{ "post_date" : "2018-12-01 10:02:05", "request_num" : 6 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100027" } } +{ "post_date" : "2018-12-01 10:02:10", "request_num" : 7 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100028" } } +{ "post_date" : "2018-12-01 10:02:15", "request_num" : 8 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100029" } } +{ "post_date" : "2018-12-01 10:02:20", "request_num" : 9 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100030" } } +{ "post_date" : "2018-12-01 10:02:25", "request_num" : 10 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100031" } } +{ "post_date" : "2018-12-01 10:02:30", "request_num" : 1 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100032" } } +{ "post_date" : "2018-12-01 10:02:35", "request_num" : 2 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100033" } } +{ "post_date" : "2018-12-01 10:02:40", "request_num" : 3 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100034" } } +{ "post_date" : "2018-12-01 10:02:45", "request_num" : 4 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100035" } } +{ "post_date" : "2018-12-01 10:02:50", "request_num" : 5 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100036" } } +{ "post_date" : "2018-12-01 10:02:55", "request_num" : 6 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100037" } } +{ "post_date" : "2018-12-01 10:03:00", "request_num" : 7 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100038" } } +{ "post_date" : "2018-12-01 10:03:05", "request_num" : 8 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100039" } } +{ "post_date" : "2018-12-01 10:03:10", "request_num" : 9 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100040" } } +{ "post_date" : "2018-12-01 10:03:15", "request_num" : 10 } ``` - cURL 格式: @@ -142,6 +218,82 @@ curl -X POST "http://127.0.0.1:9200/_bulk" -H 'Content-Type: application/json' - { "post_date" : "2018-12-01 10:00:00", "request_num" : 1 } { "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100002" } } { "post_date" : "2018-12-01 10:00:05", "request_num" : 2 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100003" } } +{ "post_date" : "2018-12-01 10:00:10", "request_num" : 3 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100004" } } +{ "post_date" : "2018-12-01 10:00:15", "request_num" : 4 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100005" } } +{ "post_date" : "2018-12-01 10:00:20", "request_num" : 5 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100006" } } +{ "post_date" : "2018-12-01 10:00:25", "request_num" : 6 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100007" } } +{ "post_date" : "2018-12-01 10:00:30", "request_num" : 7 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100008" } } +{ "post_date" : "2018-12-01 10:00:35", "request_num" : 8 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100009" } } +{ "post_date" : "2018-12-01 10:00:40", "request_num" : 9 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100010" } } +{ "post_date" : "2018-12-01 10:00:45", "request_num" : 10 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100011" } } +{ "post_date" : "2018-12-01 10:00:50", "request_num" : 1 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100012" } } +{ "post_date" : "2018-12-01 10:00:55", "request_num" : 2 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100013" } } +{ "post_date" : "2018-12-01 10:01:00", "request_num" : 3 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100014" } } +{ "post_date" : "2018-12-01 10:01:05", "request_num" : 4 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100015" } } +{ "post_date" : "2018-12-01 10:01:10", "request_num" : 5 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100016" } } +{ "post_date" : "2018-12-01 10:01:15", "request_num" : 6 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100017" } } +{ "post_date" : "2018-12-01 10:01:20", "request_num" : 7 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100018" } } +{ "post_date" : "2018-12-01 10:01:25", "request_num" : 8 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100019" } } +{ "post_date" : "2018-12-01 10:01:30", "request_num" : 9 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100020" } } +{ "post_date" : "2018-12-01 10:01:35", "request_num" : 10 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100021" } } +{ "post_date" : "2018-12-01 10:01:40", "request_num" : 1 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100022" } } +{ "post_date" : "2018-12-01 10:01:45", "request_num" : 2 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100023" } } +{ "post_date" : "2018-12-01 10:01:50", "request_num" : 3 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100024" } } +{ "post_date" : "2018-12-01 10:01:55", "request_num" : 4 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100025" } } +{ "post_date" : "2018-12-01 10:02:00", "request_num" : 5 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100026" } } +{ "post_date" : "2018-12-01 10:02:05", "request_num" : 6 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100027" } } +{ "post_date" : "2018-12-01 10:02:10", "request_num" : 7 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100028" } } +{ "post_date" : "2018-12-01 10:02:15", "request_num" : 8 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100029" } } +{ "post_date" : "2018-12-01 10:02:20", "request_num" : 9 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100030" } } +{ "post_date" : "2018-12-01 10:02:25", "request_num" : 10 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100031" } } +{ "post_date" : "2018-12-01 10:02:30", "request_num" : 1 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100032" } } +{ "post_date" : "2018-12-01 10:02:35", "request_num" : 2 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100033" } } +{ "post_date" : "2018-12-01 10:02:40", "request_num" : 3 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100034" } } +{ "post_date" : "2018-12-01 10:02:45", "request_num" : 4 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100035" } } +{ "post_date" : "2018-12-01 10:02:50", "request_num" : 5 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100036" } } +{ "post_date" : "2018-12-01 10:02:55", "request_num" : 6 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100037" } } +{ "post_date" : "2018-12-01 10:03:00", "request_num" : 7 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100038" } } +{ "post_date" : "2018-12-01 10:03:05", "request_num" : 8 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100039" } } +{ "post_date" : "2018-12-01 10:03:10", "request_num" : 9 } +{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100040" } } +{ "post_date" : "2018-12-01 10:03:15", "request_num" : 10 } ' ``` From b1d96215277705b2bf1b7591b4003010156b1360 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 5 Dec 2018 16:24:30 +0800 Subject: [PATCH 194/330] :construction: Elasticsearch --- markdown-file/ELK-Install-And-Settings.md | 24 ----- markdown-file/Elasticsearch-Base.md | 112 ++++++---------------- markdown-file/Kibana-Base.md | 56 ++++++++++- 3 files changed, 83 insertions(+), 109 deletions(-) diff --git a/markdown-file/ELK-Install-And-Settings.md b/markdown-file/ELK-Install-And-Settings.md index d5e0c216..26fbda70 100644 --- a/markdown-file/ELK-Install-And-Settings.md +++ b/markdown-file/ELK-Install-And-Settings.md @@ -26,30 +26,6 @@ - 请看 logstash 专题文:[logstash 相关知识](Logstash-Base.md) -### 安装 Kibana - -- 选择一台机子安装即可,我选择:192.168.1.127 这台 -- 切换到存放目录:`cd /usr/program/elk` -- 解压:`tar zxvf kibana-4.6.1-linux-x86_64.tar.gz` -- 修改配置文件:`vim /usr/program/elk/kibana-4.6.1-linux-x86_64/config/kibana.yml`,打开下面注释并配置: - -``` nginx -server.port: 5601 #端口 -server.host: "192.168.1.127" #访问ip地址 -elasticsearch.url: "http://192.168.1.127:9200" #连接elastic -kibana.index: ".kibana" #在elastic中添加.kibana索引 -``` - -- 记得先切换到 elasticsearch 用户下,然后先启动 elasticsearch。先确保 elasticsearch 集群是启动的。 -- 再切换到 root 用户下,启动 kibana -- 带控制台的启动(比较慢):`/usr/program/elk/kibana-4.6.1-linux-x86_64/bin/kibana` -- 守护进程方式启动:`/usr/program/elk/kibana-4.6.1-linux-x86_64/bin/kibana -d` -- 守护进程方式停止:`ps -ef|grep kibana`,只能通过 kill pid 来结束 -- 然后你可以访问:`http://192.168.1.127:5601`,可以看到 kibana 的相关界面。 -- 在 logstash 安装这一步,如果你刚刚有按着我说的去做一个 elasticsearch 索引,那你此时不会看到这样的提示:`Unable to fetch mapping. Do you have indices matching the pattern?` - - 此时你可以直接点击 `create` 统计 `logstash-*` 格式的索引结果,看到相关内容 - - 如果你知道你的索引名称的规则,比如我现在要统计 Tomcat 的相关索引,我的索引名称是:`tomcat-log-*`,则我输入这个,点击:create 即可。 -- kibana 的高级用法请看我单独的一篇文章:[kibana 相关知识](Kibana-Base.md) ## 资料 diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index fa4d9d4b..72b79523 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -122,94 +122,40 @@ type=rpm-md - 如果就单个节点测试,新建索引的时候副本数记得填 0。 -#### 批量增加 / 删除测试数据 +#### 创建索引并设置 mapping -- 官网文档: -- 批量增加,接口地址:`POST /_bulk` +- 官网类型说明: ``` -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100001" } } -{ "post_date" : "2018-12-01 10:00:00", "request_num" : 1 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100002" } } -{ "post_date" : "2018-12-01 10:00:05", "request_num" : 2 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100003" } } -{ "post_date" : "2018-12-01 10:00:10", "request_num" : 3 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100004" } } -{ "post_date" : "2018-12-01 10:00:15", "request_num" : 4 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100005" } } -{ "post_date" : "2018-12-01 10:00:20", "request_num" : 5 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100006" } } -{ "post_date" : "2018-12-01 10:00:25", "request_num" : 6 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100007" } } -{ "post_date" : "2018-12-01 10:00:30", "request_num" : 7 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100008" } } -{ "post_date" : "2018-12-01 10:00:35", "request_num" : 8 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100009" } } -{ "post_date" : "2018-12-01 10:00:40", "request_num" : 9 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100010" } } -{ "post_date" : "2018-12-01 10:00:45", "request_num" : 10 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100011" } } -{ "post_date" : "2018-12-01 10:00:50", "request_num" : 1 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100012" } } -{ "post_date" : "2018-12-01 10:00:55", "request_num" : 2 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100013" } } -{ "post_date" : "2018-12-01 10:01:00", "request_num" : 3 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100014" } } -{ "post_date" : "2018-12-01 10:01:05", "request_num" : 4 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100015" } } -{ "post_date" : "2018-12-01 10:01:10", "request_num" : 5 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100016" } } -{ "post_date" : "2018-12-01 10:01:15", "request_num" : 6 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100017" } } -{ "post_date" : "2018-12-01 10:01:20", "request_num" : 7 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100018" } } -{ "post_date" : "2018-12-01 10:01:25", "request_num" : 8 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100019" } } -{ "post_date" : "2018-12-01 10:01:30", "request_num" : 9 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100020" } } -{ "post_date" : "2018-12-01 10:01:35", "request_num" : 10 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100021" } } -{ "post_date" : "2018-12-01 10:01:40", "request_num" : 1 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100022" } } -{ "post_date" : "2018-12-01 10:01:45", "request_num" : 2 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100023" } } -{ "post_date" : "2018-12-01 10:01:50", "request_num" : 3 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100024" } } -{ "post_date" : "2018-12-01 10:01:55", "request_num" : 4 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100025" } } -{ "post_date" : "2018-12-01 10:02:00", "request_num" : 5 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100026" } } -{ "post_date" : "2018-12-01 10:02:05", "request_num" : 6 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100027" } } -{ "post_date" : "2018-12-01 10:02:10", "request_num" : 7 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100028" } } -{ "post_date" : "2018-12-01 10:02:15", "request_num" : 8 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100029" } } -{ "post_date" : "2018-12-01 10:02:20", "request_num" : 9 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100030" } } -{ "post_date" : "2018-12-01 10:02:25", "request_num" : 10 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100031" } } -{ "post_date" : "2018-12-01 10:02:30", "request_num" : 1 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100032" } } -{ "post_date" : "2018-12-01 10:02:35", "request_num" : 2 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100033" } } -{ "post_date" : "2018-12-01 10:02:40", "request_num" : 3 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100034" } } -{ "post_date" : "2018-12-01 10:02:45", "request_num" : 4 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100035" } } -{ "post_date" : "2018-12-01 10:02:50", "request_num" : 5 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100036" } } -{ "post_date" : "2018-12-01 10:02:55", "request_num" : 6 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100037" } } -{ "post_date" : "2018-12-01 10:03:00", "request_num" : 7 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100038" } } -{ "post_date" : "2018-12-01 10:03:05", "request_num" : 8 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100039" } } -{ "post_date" : "2018-12-01 10:03:10", "request_num" : 9 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100040" } } -{ "post_date" : "2018-12-01 10:03:15", "request_num" : 10 } +curl -XPUT 'http://127.0.0.1:9200/grafanadb' -H 'Content-Type: application/json' -d' +{ + "settings": { + "refresh_interval": "5s", + "number_of_shards": 5, + "number_of_replicas": 0 + }, + "mappings": { + "radar": { + "properties": { + "request_num": { + "type": "long" + }, + "post_date": { + "type": "date", + "format": "yyyy-MM-dd HH:mm:ss||epoch_millis" + } + } + } + } +} +' ``` + +#### 批量增加 / 删除测试数据 + +- 官网文档: +- 批量增加,接口地址:`POST /_bulk` - cURL 格式: ``` diff --git a/markdown-file/Kibana-Base.md b/markdown-file/Kibana-Base.md index 22febb5a..dec0b227 100644 --- a/markdown-file/Kibana-Base.md +++ b/markdown-file/Kibana-Base.md @@ -2,12 +2,64 @@ ## 基础知识 -- 官网文档: +- 官网文档: +### 安装 Kibana -## 案例 +- CentOS 7.4 +- 至少需要 500M 内存 +- 官网文档: +- 官网文档 CentOS: +- 添加 KEY:`rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch` +- 添加源:`vim /etc/yum.repos.d/kibana.repo` +``` +[kibana-6.x] +name=Kibana repository for 6.x packages +baseurl=https://artifacts.elastic.co/packages/6.x/yum +gpgcheck=1 +gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch +enabled=1 +autorefresh=1 +type=rpm-md +``` +- 开始安装:`yum install -y kibana`,预计文件有 200M 左右,国内网络安装可能会很慢,慢慢等 + - 安装完后会多了一个:kibana 用户和组 +- 启动和停止软件(默认是不启动的): + - 启动:`systemctl start kibana.service` + - 状态:`systemctl status kibana.service` + - 停止:`systemctl stop kibana.service` + - 重新启动:`systemctl restart kibana.service` +- 安装完成后,增加系统自启动: + - `/bin/systemctl daemon-reload` + - `/bin/systemctl enable kibana.service` + +#### RPM 安装后的一些配置位置说明 + +- 官网文档 CentOS: +- 配置文件的参数说明: +- kibana 安装后位置:`/usr/share/kibana` +- kibana 的配置文件:`/etc/kibana/kibana.yml` +- Log 位置:`/var/log/kibana/` +- 数据位置:`/var/lib/kibana` +- 插件位置:`/usr/share/kibana/plugins` + + +#### 配置 + +- 编辑配置文件:`vim /etc/kibana/kibana.yml` +- 默认只能 localhost 访问,修改成支持外网访问 + +``` +打开这个注释:#server.host: "localhost" +改为:server.host: "0.0.0.0" +``` + +- 然后你可以访问:`http://192.168.0.105:5601`,可以看到 kibana 的相关界面。 +- 在 logstash 安装这一步,如果你刚刚有按着我说的去做一个 elasticsearch 索引,那你此时不会看到这样的提示:`Unable to fetch mapping. Do you have indices matching the pattern?` + - 此时你可以直接点击 `create` 统计 `logstash-*` 格式的索引结果,看到相关内容 + - 如果你知道你的索引名称的规则,比如我现在要统计 Tomcat 的相关索引,我的索引名称是:`tomcat-log-*`,则我输入这个,点击:create 即可。 ## 资料 From 4be689db585189e05b2736c29ae9b986098e4b0b Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 5 Dec 2018 16:50:09 +0800 Subject: [PATCH 195/330] :construction: Elasticsearch --- markdown-file/Elasticsearch-Base.md | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index 72b79523..6884f51e 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -155,8 +155,7 @@ curl -XPUT 'http://127.0.0.1:9200/grafanadb' -H 'Content-Type: application/json' #### 批量增加 / 删除测试数据 - 官网文档: -- 批量增加,接口地址:`POST /_bulk` -- cURL 格式: +- 批量增加,cURL 格式: ``` curl -X POST "http://127.0.0.1:9200/_bulk" -H 'Content-Type: application/json' -d' @@ -243,14 +242,7 @@ curl -X POST "http://127.0.0.1:9200/_bulk" -H 'Content-Type: application/json' - ' ``` -- 批量删除,接口地址:`POST /_bulk` - -``` -{ "delete": { "_index": "grafanadb", "_type": "radar", "_id": "100001" } } -{ "delete": { "_index": "grafanadb", "_type": "radar", "_id": "100002" } } -``` - -- cURL 格式: +- 批量删除,cURL 格式: ``` curl -X POST "http://127.0.0.1:9200/_bulk" -H 'Content-Type: application/json' -d' From 8de1559065e44594525753819cac95fced105e03 Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 6 Dec 2018 11:16:28 +0800 Subject: [PATCH 196/330] :construction: Nginx --- markdown-file/Nginx-Install-And-Settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/markdown-file/Nginx-Install-And-Settings.md b/markdown-file/Nginx-Install-And-Settings.md index 3932e2bc..bd0f3b24 100644 --- a/markdown-file/Nginx-Install-And-Settings.md +++ b/markdown-file/Nginx-Install-And-Settings.md @@ -45,7 +45,7 @@ ## Nginx 的 Docker 部署 - 预设好目录,在宿主机上创建下面目录:`mkdir -p /data/docker/nginx/html /data/docker/nginx/conf.d /data/docker/nginx/logs /data/docker/nginx/conf` -- **重点**:先准备好你的 nginx.conf 文件,存放在宿主机的:/data/docker/nginx/conf 目录下,等下需要映射。 +- **重点**:先准备好你的 nginx.conf 文件,存放在宿主机的:`vim /data/docker/nginx/conf/nginx.conf` 目录下,等下需要映射。 ``` worker_processes 1; From f056c1a5cb6d575c9baff415909ef1103a582bb6 Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 6 Dec 2018 11:33:09 +0800 Subject: [PATCH 197/330] :construction: Nginx --- markdown-file/Nginx-Install-And-Settings.md | 1 + 1 file changed, 1 insertion(+) diff --git a/markdown-file/Nginx-Install-And-Settings.md b/markdown-file/Nginx-Install-And-Settings.md index bd0f3b24..2a971a46 100644 --- a/markdown-file/Nginx-Install-And-Settings.md +++ b/markdown-file/Nginx-Install-And-Settings.md @@ -74,6 +74,7 @@ http { } ``` +- 官网镜像: - 下载镜像:`docker pull nginx:1.12.2` - 运行容器:`docker run --name youmeek-nginx -p 80:80 -v /data/docker/nginx/html:/usr/share/nginx/html:ro -v /data/docker/nginx/conf.d:/etc/nginx/conf.d -v /data/docker/nginx/logs:/var/log/nginx -v /data/docker/nginx/conf/nginx.conf:/etc/nginx/nginx.conf:ro -d nginx:1.12.2` - 重新加载配置(目前测试无效,只能重启服务):`docker exec -it youmeek-nginx nginx -s reload` From f3906188b9d541f94860d9ba28e626144aaeeb3e Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 6 Dec 2018 11:42:17 +0800 Subject: [PATCH 198/330] :construction: Nginx --- markdown-file/Nginx-Install-And-Settings.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/markdown-file/Nginx-Install-And-Settings.md b/markdown-file/Nginx-Install-And-Settings.md index 2a971a46..71cb04a4 100644 --- a/markdown-file/Nginx-Install-And-Settings.md +++ b/markdown-file/Nginx-Install-And-Settings.md @@ -44,7 +44,7 @@ ## Nginx 的 Docker 部署 -- 预设好目录,在宿主机上创建下面目录:`mkdir -p /data/docker/nginx/html /data/docker/nginx/conf.d /data/docker/nginx/logs /data/docker/nginx/conf` +- 预设好目录,在宿主机上创建下面目录:`mkdir -p /data/docker/nginx/logs /data/docker/nginx/conf` - **重点**:先准备好你的 nginx.conf 文件,存放在宿主机的:`vim /data/docker/nginx/conf/nginx.conf` 目录下,等下需要映射。 ``` @@ -76,7 +76,7 @@ http { - 官网镜像: - 下载镜像:`docker pull nginx:1.12.2` -- 运行容器:`docker run --name youmeek-nginx -p 80:80 -v /data/docker/nginx/html:/usr/share/nginx/html:ro -v /data/docker/nginx/conf.d:/etc/nginx/conf.d -v /data/docker/nginx/logs:/var/log/nginx -v /data/docker/nginx/conf/nginx.conf:/etc/nginx/nginx.conf:ro -d nginx:1.12.2` +- 运行容器:`docker run --name youmeek-nginx -p 80:80 -v /data/docker/nginx/logs:/var/log/nginx -v /data/docker/nginx/conf/nginx.conf:/etc/nginx/nginx.conf:ro -d nginx:1.12.2` - 重新加载配置(目前测试无效,只能重启服务):`docker exec -it youmeek-nginx nginx -s reload` - 停止服务:`docker exec -it youmeek-nginx nginx -s stop` 或者:`docker stop youmeek-nginx` - 重新启动服务:`docker restart youmeek-nginx` From 588629bd32007d3120ec89d90d94584c19a7eacd Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 6 Dec 2018 14:04:19 +0800 Subject: [PATCH 199/330] :construction: wrk --- README.md | 1 + SUMMARY.md | 1 + TOC.md | 1 + markdown-file/wrk-Install-And-Settings.md | 51 +++++++++++++++++++++++ 4 files changed, 54 insertions(+) create mode 100644 markdown-file/wrk-Install-And-Settings.md diff --git a/README.md b/README.md index 6b4b075c..b5a6a443 100644 --- a/README.md +++ b/README.md @@ -68,6 +68,7 @@ - [Jira 安装和配置](markdown-file/Jira-Install-And-Settings.md) - [TeamCity 安装和配置](markdown-file/TeamCity-Install-And-Settings.md) - [Nginx 安装和配置](markdown-file/Nginx-Install-And-Settings.md) +- [wrk 安装和配置](markdown-file/wrk-Install-And-Settings.md) - [FastDFS 安装和配置](markdown-file/FastDFS-Install-And-Settings.md) - [FastDFS 结合 GraphicsMagick](markdown-file/FastDFS-Nginx-Lua-GraphicsMagick.md) - [RabbitMQ 安装和配置](markdown-file/RabbitMQ-Install-And-Settings.md) diff --git a/SUMMARY.md b/SUMMARY.md index a2bdcb4e..9402b78b 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -54,6 +54,7 @@ * [Jenkins 安装和配置](markdown-file/Jenkins-Install-And-Settings.md) * [TeamCity 安装和配置](markdown-file/TeamCity-Install-And-Settings.md) * [Nginx 安装和配置](markdown-file/Nginx-Install-And-Settings.md) +* [wrk 安装和配置](markdown-file/wrk-Install-And-Settings.md) * [FastDFS 安装和配置](markdown-file/FastDFS-Install-And-Settings.md) * [FastDFS 结合 GraphicsMagick](markdown-file/FastDFS-Nginx-Lua-GraphicsMagick.md) * [RabbitMQ 安装和配置](markdown-file/RabbitMQ-Install-And-Settings.md) diff --git a/TOC.md b/TOC.md index 527ae765..8cb10242 100644 --- a/TOC.md +++ b/TOC.md @@ -52,6 +52,7 @@ - [Jenkins 安装和配置](markdown-file/Jenkins-Install-And-Settings.md) - [TeamCity 安装和配置](markdown-file/TeamCity-Install-And-Settings.md) - [Nginx 安装和配置](markdown-file/Nginx-Install-And-Settings.md) +- [wrk 安装和配置](markdown-file/wrk-Install-And-Settings.md) - [FastDFS 安装和配置](markdown-file/FastDFS-Install-And-Settings.md) - [FastDFS 结合 GraphicsMagick](markdown-file/FastDFS-Nginx-Lua-GraphicsMagick.md) - [RabbitMQ 安装和配置](markdown-file/RabbitMQ-Install-And-Settings.md) diff --git a/markdown-file/wrk-Install-And-Settings.md b/markdown-file/wrk-Install-And-Settings.md new file mode 100644 index 00000000..26d9722f --- /dev/null +++ b/markdown-file/wrk-Install-And-Settings.md @@ -0,0 +1,51 @@ +# wrk 安装和配置 + + +## wrk 说明 + +- wrk 相对于 ab 来说最大的优点是它支持多线程,可以有更大的并发量 + + +## 安装 + +- CentOS 7.4 +- 官网说明: + +``` +# 安装工具包的时候差不多有 90 个左右的子工具 +sudo yum groupinstall 'Development Tools' +sudo yum install -y openssl-devel git +git clone --depth=1 https://github.com/wg/wrk.git wrk +cd wrk +make +# move the executable to somewhere in your PATH +sudo cp wrk /usr/local/bin +``` + +- 查看帮助:`wrk --help` + +## 使用 + +- 启用 10 个线程,每个线程发起 100 个请求,持续 15 秒:`wrk -t10 -c100 -d15s http://www.baidu.com` +- 最终报告: + +``` +Running 15s test @ http://www.baidu.com + 10 threads and 100 connections + Thread Stats Avg Stdev Max +/- Stdev + Latency 208.39ms 324.00ms 1.91s 87.70% + Req/Sec 82.68 64.81 414.00 70.60% + 11345 requests in 15.02s, 166.51MB read + Socket errors: connect 0, read 20, write 0, timeout 59 +Requests/sec: 755.26 +Transfer/sec: 11.08MB +``` + +## 其他说明 + +- wrk 使用的是 HTTP/1.1,缺省开启的是长连接 +- 要测试短连接:`wrk -H "Connection: Close" -c 100 -d 10 http://domain/path` + +## 资料 + +- \ No newline at end of file From ed3469a70cec74a99cf0ee8a1a822e70e3cba57b Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 6 Dec 2018 14:06:11 +0800 Subject: [PATCH 200/330] :construction: wrk --- markdown-file/wrk-Install-And-Settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/markdown-file/wrk-Install-And-Settings.md b/markdown-file/wrk-Install-And-Settings.md index 26d9722f..6e3dbe5f 100644 --- a/markdown-file/wrk-Install-And-Settings.md +++ b/markdown-file/wrk-Install-And-Settings.md @@ -26,7 +26,7 @@ sudo cp wrk /usr/local/bin ## 使用 -- 启用 10 个线程,每个线程发起 100 个请求,持续 15 秒:`wrk -t10 -c100 -d15s http://www.baidu.com` +- 启用 10 个线程,每个线程发起 100 个连接,持续 15 秒:`wrk -t10 -c100 -d15s http://www.baidu.com` - 最终报告: ``` From 12865df84bf0583a41208d6c79d342bcdccd9d38 Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 6 Dec 2018 14:44:16 +0800 Subject: [PATCH 201/330] :construction: wrk --- favorite-file/shell/shell-for.sh | 18 ++++++++++++++++++ ...-param-demo.sh => shell-with-param-demo.sh} | 0 2 files changed, 18 insertions(+) create mode 100644 favorite-file/shell/shell-for.sh rename favorite-file/shell/{with-param-demo.sh => shell-with-param-demo.sh} (100%) diff --git a/favorite-file/shell/shell-for.sh b/favorite-file/shell/shell-for.sh new file mode 100644 index 00000000..9b92b372 --- /dev/null +++ b/favorite-file/shell/shell-for.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +# 循环总次数 +totalDegree=$1 + +# 如果没有传参,则默认值是 10 +if [ "$totalDegree" = "" ]; +then + totalDegree=10 +fi + +for((timeTemp = 0; timeTemp <= $totalDegree; timeTemp = timeTemp + 5)) +do + echo "timeTemp=$timeTemp" +done + + + diff --git a/favorite-file/shell/with-param-demo.sh b/favorite-file/shell/shell-with-param-demo.sh similarity index 100% rename from favorite-file/shell/with-param-demo.sh rename to favorite-file/shell/shell-with-param-demo.sh From d3d5d37bb3fafc429a3ec20b257d8516b8d725f7 Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 6 Dec 2018 23:17:39 +0800 Subject: [PATCH 202/330] :construction: Kibana --- markdown-file/Kibana-Base.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/markdown-file/Kibana-Base.md b/markdown-file/Kibana-Base.md index dec0b227..45f85adc 100644 --- a/markdown-file/Kibana-Base.md +++ b/markdown-file/Kibana-Base.md @@ -57,6 +57,10 @@ type=rpm-md ``` - 然后你可以访问:`http://192.168.0.105:5601`,可以看到 kibana 的相关界面。 + - 1. Create index pattern + - 如果你 Elasticsearch 新创建了索引,kibana 是不会自动帮你匹配到的,所以要匹配新索引,这一步都要走 + - 2. Discover | 右上角筛选时间区间 + - 这一步非常重要,里面的 filter,图表等都是基于此时间区间的 - 在 logstash 安装这一步,如果你刚刚有按着我说的去做一个 elasticsearch 索引,那你此时不会看到这样的提示:`Unable to fetch mapping. Do you have indices matching the pattern?` - 此时你可以直接点击 `create` 统计 `logstash-*` 格式的索引结果,看到相关内容 - 如果你知道你的索引名称的规则,比如我现在要统计 Tomcat 的相关索引,我的索引名称是:`tomcat-log-*`,则我输入这个,点击:create 即可。 From cc162e24fd3c4e4cc7b805e01eca4636aa38c195 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 7 Dec 2018 14:17:37 +0800 Subject: [PATCH 203/330] :construction: ncat --- markdown-file/Flink-Install-And-Settings.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/markdown-file/Flink-Install-And-Settings.md b/markdown-file/Flink-Install-And-Settings.md index ac034381..e75382d2 100644 --- a/markdown-file/Flink-Install-And-Settings.md +++ b/markdown-file/Flink-Install-And-Settings.md @@ -58,6 +58,17 @@ mvn archetype:generate \ - 有一个 `Add New` 按钮可以上传 jar 包,然后填写 Class 路径:`com.youmeek.WordCount` - `parallelism` 表示并行度,填写数字,一般并行度设置为集群 CPU 核数总和的 2-3 倍(如果是单机模式不需要设置并行度) +## 安装 ncat 方便发送数据包 + +- 环境:CentOS 7.4 +- 官网下载:,找到 rpm 包 +- 当前时间(201803)最新版本下载:`wget https://nmap.org/dist/ncat-7.60-1.x86_64.rpm` +- 当前时间(201812)最新版本下载:`wget https://nmap.org/dist/ncat-7.70-1.x86_64.rpm` +- 安装:`sudo rpm -i ncat-7.60-1.x86_64.rpm` +- ln 下:`sudo ln -s /usr/bin/ncat /usr/bin/nc` +- 检验:`nc --version` +- 启动监听 9011 端口:`nc -lk 9011`,然后你可以输入内容,Flink demo 看是否有收到 + ------------------------------------------------------------------- From 59ff1bbce471cd491a3c83783aa3c30461712a1e Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 7 Dec 2018 16:24:46 +0800 Subject: [PATCH 204/330] :construction: Elasticsearch --- markdown-file/Elasticsearch-Base.md | 82 +++++------------------------ 1 file changed, 12 insertions(+), 70 deletions(-) diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index 6884f51e..0176b74e 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -169,76 +169,6 @@ curl -X POST "http://127.0.0.1:9200/_bulk" -H 'Content-Type: application/json' - { "post_date" : "2018-12-01 10:00:15", "request_num" : 4 } { "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100005" } } { "post_date" : "2018-12-01 10:00:20", "request_num" : 5 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100006" } } -{ "post_date" : "2018-12-01 10:00:25", "request_num" : 6 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100007" } } -{ "post_date" : "2018-12-01 10:00:30", "request_num" : 7 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100008" } } -{ "post_date" : "2018-12-01 10:00:35", "request_num" : 8 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100009" } } -{ "post_date" : "2018-12-01 10:00:40", "request_num" : 9 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100010" } } -{ "post_date" : "2018-12-01 10:00:45", "request_num" : 10 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100011" } } -{ "post_date" : "2018-12-01 10:00:50", "request_num" : 1 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100012" } } -{ "post_date" : "2018-12-01 10:00:55", "request_num" : 2 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100013" } } -{ "post_date" : "2018-12-01 10:01:00", "request_num" : 3 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100014" } } -{ "post_date" : "2018-12-01 10:01:05", "request_num" : 4 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100015" } } -{ "post_date" : "2018-12-01 10:01:10", "request_num" : 5 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100016" } } -{ "post_date" : "2018-12-01 10:01:15", "request_num" : 6 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100017" } } -{ "post_date" : "2018-12-01 10:01:20", "request_num" : 7 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100018" } } -{ "post_date" : "2018-12-01 10:01:25", "request_num" : 8 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100019" } } -{ "post_date" : "2018-12-01 10:01:30", "request_num" : 9 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100020" } } -{ "post_date" : "2018-12-01 10:01:35", "request_num" : 10 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100021" } } -{ "post_date" : "2018-12-01 10:01:40", "request_num" : 1 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100022" } } -{ "post_date" : "2018-12-01 10:01:45", "request_num" : 2 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100023" } } -{ "post_date" : "2018-12-01 10:01:50", "request_num" : 3 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100024" } } -{ "post_date" : "2018-12-01 10:01:55", "request_num" : 4 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100025" } } -{ "post_date" : "2018-12-01 10:02:00", "request_num" : 5 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100026" } } -{ "post_date" : "2018-12-01 10:02:05", "request_num" : 6 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100027" } } -{ "post_date" : "2018-12-01 10:02:10", "request_num" : 7 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100028" } } -{ "post_date" : "2018-12-01 10:02:15", "request_num" : 8 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100029" } } -{ "post_date" : "2018-12-01 10:02:20", "request_num" : 9 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100030" } } -{ "post_date" : "2018-12-01 10:02:25", "request_num" : 10 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100031" } } -{ "post_date" : "2018-12-01 10:02:30", "request_num" : 1 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100032" } } -{ "post_date" : "2018-12-01 10:02:35", "request_num" : 2 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100033" } } -{ "post_date" : "2018-12-01 10:02:40", "request_num" : 3 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100034" } } -{ "post_date" : "2018-12-01 10:02:45", "request_num" : 4 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100035" } } -{ "post_date" : "2018-12-01 10:02:50", "request_num" : 5 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100036" } } -{ "post_date" : "2018-12-01 10:02:55", "request_num" : 6 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100037" } } -{ "post_date" : "2018-12-01 10:03:00", "request_num" : 7 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100038" } } -{ "post_date" : "2018-12-01 10:03:05", "request_num" : 8 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100039" } } -{ "post_date" : "2018-12-01 10:03:10", "request_num" : 9 } -{ "index" : { "_index" : "grafanadb", "_type" : "radar", "_id" : "100040" } } -{ "post_date" : "2018-12-01 10:03:15", "request_num" : 10 } ' ``` @@ -251,6 +181,18 @@ curl -X POST "http://127.0.0.1:9200/_bulk" -H 'Content-Type: application/json' - ' ``` +- 清空索引所有数据,分成5个切片去执行删除,cURL 格式: + +``` +curl -X POST "http://127.0.0.1:9200/索引名称/类型名称/_delete_by_query?refresh&slices=5&pretty" -H 'Content-Type: application/json' -d' +{ + "query": { + "match_all": {} + } +} +' +``` + From b1afd6299430990daed3b901243dd94d271a5ac6 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 7 Dec 2018 19:37:07 +0800 Subject: [PATCH 205/330] :construction: Elasticsearch --- markdown-file/Kafka-Install-And-Settings.md | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/markdown-file/Kafka-Install-And-Settings.md b/markdown-file/Kafka-Install-And-Settings.md index bf4c362d..c23626e6 100644 --- a/markdown-file/Kafka-Install-And-Settings.md +++ b/markdown-file/Kafka-Install-And-Settings.md @@ -455,11 +455,10 @@ socket.request.max.bytes=104857600 - 后台方式运行 kafka 服务:`cd /usr/local/kafka && bin/kafka-server-start.sh -daemon config/server.properties` - 停止 kafka 服务:`cd /usr/local/kafka && bin/kafka-server-stop.sh` - 再开一个终端测试: - - 进入目录:`cd /usr/local/kafka` - - 创建 topic 命令:`bin/kafka-topics.sh --create --zookeeper youmeekhost:2181 --replication-factor 1 --partitions 1 --topic my-topic-test` - - 查看 topic 命令:`bin/kafka-topics.sh --list --zookeeper youmeekhost:2181` - - 删除 topic:`bin/kafka-topics.sh --delete --topic my-topic-test --zookeeper youmeekhost:2181` - - 给 topic 发送消息命令:`bin/kafka-console-producer.sh --broker-list youmeekhost:9092 --topic my-topic-test`,然后在出现交互输入框的时候输入你要发送的内容 + - 创建 topic 命令:`cd /usr/local/kafka && bin/kafka-topics.sh --create --zookeeper youmeekhost:2181 --replication-factor 1 --partitions 1 --topic my-topic-test` + - 查看 topic 命令:`cd /usr/local/kafka && bin/kafka-topics.sh --list --zookeeper youmeekhost:2181` + - 删除 topic:`cd /usr/local/kafka && bin/kafka-topics.sh --delete --topic my-topic-test --zookeeper youmeekhost:2181` + - 给 topic 发送消息命令:`cd /usr/local/kafka && bin/kafka-console-producer.sh --broker-list youmeekhost:9092 --topic my-topic-test`,然后在出现交互输入框的时候输入你要发送的内容 - 再开一个终端,进入 kafka 容器,接受消息:`cd /usr/local/kafka && bin/kafka-console-consumer.sh --bootstrap-server youmeekhost:9092 --topic my-topic-test --from-beginning` - 此时发送的终端输入一个内容回车,接受消息的终端就可以收到。 - Spring Boot 依赖: From 0502fea2ec88529b3e1d520bd9a264891f19b71f Mon Sep 17 00:00:00 2001 From: judasn Date: Sat, 8 Dec 2018 11:03:23 +0800 Subject: [PATCH 206/330] :construction: Flink --- markdown-file/Flink-Install-And-Settings.md | 165 ++++++++++++++++++-- 1 file changed, 152 insertions(+), 13 deletions(-) diff --git a/markdown-file/Flink-Install-And-Settings.md b/markdown-file/Flink-Install-And-Settings.md index e75382d2..c2ea84d3 100644 --- a/markdown-file/Flink-Install-And-Settings.md +++ b/markdown-file/Flink-Install-And-Settings.md @@ -76,8 +76,8 @@ mvn archetype:generate \ - 四个基石:Checkpoint、State、Time、Window - 解决 exactly-once 的问题 -- 状态管理 - 实现了 watermark 的机制,解决了基于事件时间处理时的数据乱序和数据迟到的问题 +- 状态管理 - 提供了一套开箱即用的窗口操作,包括滚动窗口、滑动窗口、会话窗口 - 我想说的,都被这篇文章说了: - @@ -85,7 +85,7 @@ mvn archetype:generate \ - 这里补充点其他的 - +``` Client 用来提交任务给 JobManager,JobManager 分发任务给 TaskManager 去执行,然后 TaskManager 会心跳的汇报任务状态 在 Flink 集群中,计算资源被定义为 Task Slot 每个 TaskManager 会拥有一个或多个 Slots @@ -103,14 +103,14 @@ Flink 中的计算资源通过 Task Slot 来定义。每个 task slot 代表了 每一个 TaskManager 会拥有一个或多个的 task slot,每个 slot 都能跑由多个连续 task 组成的一个 pipeline,比如 MapFunction 的第n个并行实例和 ReduceFunction 的第n个并行实例可以组成一个 pipeline。 - source(Streaming 进来) Transformations(Streaming 处理) sink(Streaming 出去) Flink程序与生俱来的就是并行和分布式的。Streams被分割成stream patition, Operators被被分割成operator subtasks。这些subtasks在不同的机器(容器)上的不同的线程中运行,彼此独立,互不干扰。 一个操作的operator subtask的数目,被称为parallelism(并行度)。一个stream的并行度,总是等于生成它的(operator)操作的并行度。一个Flink程序中,不同的operator可能具有不同的并行度。 +``` - +------------------------------------------------------------------- #### 为了容错的 Checkpoint 机制 @@ -120,8 +120,6 @@ Flink程序与生俱来的就是并行和分布式的。Streams被分割成strea - [A Deep Dive into Rescalable State in Apache Flink](https://flink.apache.org/features/2017/07/04/flink-rescalable-state.html) - [Flink 小贴士 (5): Savepoint 和 Checkpoint 的 3 个不同点](http://wuchong.me/blog/2018/11/25/flink-tips-differences-between-savepoints-and-checkpoints/) - [Flink 小贴士 (2):Flink 如何管理 Kafka 消费位点](http://wuchong.me/blog/2018/11/04/how-apache-flink-manages-kafka-consumer-offsets/) - - []() - - []() - Checkpoint 允许 Flink 恢复流中的状态和位置,使应用程序具有与无故障执行相同的语义 - Checkpoint 是 Flink 用来从故障中恢复的机制,快照下了整个应用程序的状态,当然也包括输入源读取到的位点。如果发生故障,Flink 将通过从 Checkpoint 加载应用程序状态并从恢复的读取位点继续应用程序的处理,就像什么事情都没发生一样。 @@ -144,11 +142,23 @@ Flink通过一个可配置的时间,周期性的生成checkpoint,将它写 - 这里有一个核心:用到 Facebook 的 RocksDB 数据库(可嵌入式的支持持久化的 key-value 存储系统) +------------------------------------------------------------------- #### Exactly-Once - 因为有了 Checkpoint,才有了 Exactly-Once - [Apache Flink 端到端(end-to-end)Exactly-Once特性概览 (翻译)](https://my.oschina.net/u/992559/blog/1819948) +- 常见有这几种语义: + +``` +at most once : 至多一次。可能导致消息丢失。 +at least once : 至少一次。可能导致消息重复。 +exactly once : 刚好一次。不丢失也不重复。 +``` + + +------------------------------------------------------------------- + #### Watermark @@ -157,34 +167,163 @@ Flink通过一个可配置的时间,周期性的生成checkpoint,将它写 - watermark 的作用,他们定义了何时不再等待更早的数据 - WaterMark 只在时间特性 EventTime 和 IngestionTime 起作用,并且 IngestionTime 的时间等同于消息的 ingestion 时间 +------------------------------------------------------------------- + #### 窗口 -- 翻滚窗口(Tumble) +- - [Flink 原理与实现:Window 机制](http://wuchong.me/blog/2016/05/25/flink-internals-window-mechanism/) - [Flink 原理与实现:Session Window](http://wuchong.me/blog/2016/06/06/flink-internals-session-window/) +##### 滚动窗口(Tumbling Windows) -#### 生产环境 +- 滚动窗口有一个固定的大小,并且不会出现重叠 -- [Flink 小贴士 (7): 4个步骤,让 Flink 应用达到生产状态](http://wuchong.me/blog/2018/12/03/flink-tips-4-steps-flink-application-production-ready/) +###### 滚动事件时间窗口 +``` +input + .keyBy() + .window(TumblingEventTimeWindows.of(Time.seconds(5))) + .(); +``` +- 每日偏移8小时的滚动事件时间窗口 + +``` +input + .keyBy() + .window(TumblingEventTimeWindows.of(Time.days(1), Time.hours(-8))) + .(); +``` + +###### 滚动处理时间窗口 + +``` +input + .keyBy() + .window(TumblingProcessingTimeWindows.of(Time.seconds(5))) + .(); +``` + +--------------------------------- + +##### 滑动窗口(Sliding Windows) + +- 滑动窗口分配器将元素分配到固定长度的窗口中,与滚动窗口类似,窗口的大小由窗口大小参数来配置,另一个窗口滑动参数控制滑动窗口开始的频率。因此,滑动窗口如果滑动参数小于滚动参数的话,窗口是可以重叠的,在这种情况下元素会被分配到多个窗口中。 +- 例如,你有10分钟的窗口和5分钟的滑动,那么每个窗口中5分钟的窗口里包含着上个10分钟产生的数据 + +###### 滑动事件时间窗口 + +``` +input + .keyBy() + .window(SlidingEventTimeWindows.of(Time.seconds(10), Time.seconds(5))) + .(); +``` + +###### 滑动处理时间窗口 + +``` +input + .keyBy() + .window(SlidingProcessingTimeWindows.of(Time.seconds(10), Time.seconds(5))) + .(); +``` + +- 偏移8小时的滑动处理时间窗口 + +``` +input + .keyBy() + .window(SlidingProcessingTimeWindows.of(Time.hours(12), Time.hours(1), Time.hours(-8))) + .(); +``` + +--------------------------------- + +##### 计数窗口(Count Window) + +- 根据元素个数对数据流进行分组的 + +###### 翻滚计数窗口 + +- 当我们想要每 100 个用户购买行为事件统计购买总数,那么每当窗口中填满 100 个元素了,就会对窗口进行计算,这种窗口我们称之为翻滚计数窗口(Tumbling Count Window) + +``` +input + .keyBy() + .countWindow(100) + .(); +``` + +--------------------------------- + + +##### 会话窗口(Session Windows) + +- session 窗口分配器通过 session 活动来对元素进行分组,session 窗口跟滚动窗口和滑动窗口相比,不会有重叠和固定的开始时间和结束时间的情况。相反,当它在一个固定的时间周期内不再收到元素,即非活动间隔产生,那个这个窗口就会关闭。一个 session 窗口通过一个 session 间隔来配置,这个 session 间隔定义了非活跃周期的长度。当这个非活跃周期产生,那么当前的 session 将关闭并且后续的元素将被分配到新的 session 窗口中去。 + +###### 事件时间会话窗口 + +``` +input + .keyBy() + .window(EventTimeSessionWindows.withGap(Time.minutes(10))) + .(); +``` + +###### 处理时间会话窗口 + +``` +input + .keyBy() + .window(ProcessingTimeSessionWindows.withGap(Time.minutes(10))) + .(); +``` + +--------------------------------- + +##### 全局窗口(Global Windows) + +- 全局窗口分配器将所有具有相同 key 的元素分配到同一个全局窗口中,这个窗口模式仅适用于用户还需自定义触发器的情况。否则,由于全局窗口没有一个自然的结尾,无法执行元素的聚合,将不会有计算被执行。 + +``` +input + .keyBy() + .window(GlobalWindows.create()) + .(); +``` + +------------------------------------------------------------------- + + +#### 生产环境准备 + +- [Flink 小贴士 (7): 4个步骤,让 Flink 应用达到生产状态](http://wuchong.me/blog/2018/12/03/flink-tips-4-steps-flink-application-production-ready/) + +------------------------------------------------------------------- #### 运行环境 -Flink 的部署 -Flink 有三种部署模式,分别是 Local、Standalone Cluster 和 Yarn Cluster。对于 Local 模式来说,JobManager 和 TaskManager 会公用一个 JVM 来完成 Workload。如果要验证一个简单的应用,Local 模式是最方便的。实际应用中大多使用 Standalone 或者 Yarn Cluster。下面我主要介绍下这两种模式。 +- Flink 的部署 +- Flink 有三种部署模式,分别是 Local、Standalone Cluster 和 Yarn Cluster。 +- 对于 Local 模式来说,JobManager 和 TaskManager 会公用一个 JVM 来完成 Workload。 +- 如果要验证一个简单的应用,Local 模式是最方便的。实际应用中大多使用 Standalone 或者 Yarn Cluster +------------------------------------------------------------------- #### Flink 的 HA +------------------------------------------------------------------- #### Monitoring REST API https://ci.apache.org/projects/flink/flink-docs-stable/monitoring/rest_api.html#monitoring-rest-api +------------------------------------------------------------------- #### 主要核心 API @@ -195,6 +334,7 @@ https://ci.apache.org/projects/flink/flink-docs-stable/monitoring/rest_api.html# - Kafka Connectors - Elasticsearch sink +------------------------------------------------------------------- #### Table & SQL API(关系型 API) @@ -218,6 +358,5 @@ SQL API:支持标准SQL(自1.1.0版本开始) - [Flink学习笔记(4):基本概念](https://www.jianshu.com/p/0cd1db4282be) - [Apache Flink:特性、概念、组件栈、架构及原理分析](http://shiyanjun.cn/archives/1508.html) - [Flink 原理与实现:理解 Flink 中的计算资源](http://wuchong.me/blog/2016/05/09/flink-internals-understanding-execution-resources/) -- []() -- []() +- [Flink实战教程](https://liguohua-bigdata.gitbooks.io/simple-flink/content/) From 36b4d38adc13b54503d7ae25a65e66d5eda2e933 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 11 Dec 2018 17:32:04 +0800 Subject: [PATCH 207/330] :construction: MySQL --- markdown-file/Mysql-Install-And-Settings.md | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/markdown-file/Mysql-Install-And-Settings.md b/markdown-file/Mysql-Install-And-Settings.md index 1af74312..98fdbf34 100644 --- a/markdown-file/Mysql-Install-And-Settings.md +++ b/markdown-file/Mysql-Install-And-Settings.md @@ -192,7 +192,7 @@ table_open_cache=256 - 在终端中执行(CentOS 7):`systemctl stop mysql` - 在终端中执行(前面添加的 Linux 用户 mysql 必须有存在):`/usr/local/mysql/bin/mysqld --skip-grant-tables --user=mysql` - 此时 MySQL 服务会一直处于监听状态,你需要另起一个终端窗口来执行接下来的操作 - - 在终端中执行:`mysql -u root mysql` + - 在终端中执行:`mysql -u root mysql` 或者:`mysql -h 127.0.0.1 -u root -P 3306 -p` - 把密码改为:123456,进入 MySQL 命令后执行:`UPDATE user SET Password=PASSWORD('123456') where USER='root';FLUSH PRIVILEGES;` - 然后重启 MySQL 服务(CentOS 6):`service mysql restart` - 然后重启 MySQL 服务(CentOS 7):`systemctl restart mysql` @@ -221,6 +221,24 @@ set password = password('新密码'); FLUSH PRIVILEGES; ``` +## MySQL 5.7 + +- 报错内容: + +``` +Expression #1 of ORDER BY clause is not in GROUP BY clause and contains nonaggregated column 'youmeek.nm.id' +which is not functionally dependent on columns in GROUP BY clause; +this is incompatible with sql_mode=only_full_group_by +``` + +- 查下自己的模式:`select version(), @@sql_mode;` +- 解决办法,修改 my.cnf,增加这一行: + +``` +sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION; +``` + + ## 小内存机子,MySQL 频繁挂掉解决办法(1G + CentOS 7.4) - 保存系统日志到本地进行查看:`cd /var/log/ && sz messages` From 435aa451da88e6ccf9690af4ea27ea4ffc7fef7d Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 12 Dec 2018 11:01:22 +0800 Subject: [PATCH 208/330] :construction: wrk --- markdown-file/wrk-Install-And-Settings.md | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/markdown-file/wrk-Install-And-Settings.md b/markdown-file/wrk-Install-And-Settings.md index 6e3dbe5f..0b96c3c7 100644 --- a/markdown-file/wrk-Install-And-Settings.md +++ b/markdown-file/wrk-Install-And-Settings.md @@ -41,6 +41,19 @@ Requests/sec: 755.26 Transfer/sec: 11.08MB ``` +#### 使用 lua 脚本(发送一个 post 请求) + +- 创建:`vim /opt/post-wrk.lua` + +``` +wrk.method = "POST" +wrk.body = "hms_user_id=222222&routing_key=ad.sys_user.add" +wrk.headers["Content-Type"] = "application/x-www-form-urlencoded" +``` + +- 测试:`wrk -t10 -c100 -d15s --script=/opt/post-wrk.lua --latency http://127.0.0.1:9090/websocket/api/send-by-user-id` + + ## 其他说明 - wrk 使用的是 HTTP/1.1,缺省开启的是长连接 @@ -48,4 +61,5 @@ Transfer/sec: 11.08MB ## 资料 -- \ No newline at end of file +- +- \ No newline at end of file From 43f1a2db31f3c23130eaeed2cddce8d317b5aedb Mon Sep 17 00:00:00 2001 From: "Judas.n" Date: Fri, 14 Dec 2018 12:08:19 +0800 Subject: [PATCH 209/330] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b5a6a443..af84c871 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ ## 初衷(Original Intention) -- 整理下自己所学 +- 整理下自己所学。**但是比较随意,所以很多地方不够严谨,所以请带着批评的思维阅读。** - 带动更多的人进入 Linux 世界,特别是做 Java 开发的人 - Github 项目地址,欢迎 `Fork`、`Star`: - 文章中用到的一些安装包归纳整理: From a4249b2b54cb9b03c8621bde9fb39c5a638ec12f Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 17 Dec 2018 18:37:09 +0800 Subject: [PATCH 210/330] :construction: bash --- markdown-file/CentOS6-and-CentOS7.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/markdown-file/CentOS6-and-CentOS7.md b/markdown-file/CentOS6-and-CentOS7.md index 87afe8d8..7dd26640 100644 --- a/markdown-file/CentOS6-and-CentOS7.md +++ b/markdown-file/CentOS6-and-CentOS7.md @@ -36,6 +36,9 @@ ### 开放端口 +- 一般设置软件端口有一个原则: + - 0 ~ 1024 系统保留,一般不要用到 + - 1024 ~ 65535(2^16) 可以随意用 - 添加单个端口:`firewall-cmd --zone=public --add-port=8883/tcp --permanent` - 添加范围端口:`firewall-cmd --zone=public --add-port=8883-8885/tcp --permanent` - 删除端口:`firewall-cmd --zone=public --remove-port=8883/tcp --permanent` From 98e46c7b574364cef38ad155b3a23d6ff15c1fad Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 17 Dec 2018 19:00:21 +0800 Subject: [PATCH 211/330] :construction: hadoop --- markdown-file/Hadoop-Install-And-Settings.md | 415 +++++++++++++++++++ 1 file changed, 415 insertions(+) create mode 100644 markdown-file/Hadoop-Install-And-Settings.md diff --git a/markdown-file/Hadoop-Install-And-Settings.md b/markdown-file/Hadoop-Install-And-Settings.md new file mode 100644 index 00000000..0f8fafc6 --- /dev/null +++ b/markdown-file/Hadoop-Install-And-Settings.md @@ -0,0 +1,415 @@ +# Hadoop 安装和配置 + + +## Hadoop 说明 + +- Hadoop 官网: +- Hadoop 官网下载: + +## 基础环境 + +- 学习机器 2C4G(生产最少 8G): + - 172.16.0.17 + - 172.16.0.43 + - 172.16.0.180 +- 操作系统:CentOS 7.5 + - root 用户 +- 所有机子必备:Java:1.8 + - 确保:`echo $JAVA_HOME` 能查看到路径,并记下来路径 +- Hadoop:2.6.5 +- 关闭所有机子的防火墙:`systemctl stop firewalld.service` + +## 集群环境设置 + +- Hadoop 集群具体来说包含两个集群:HDFS 集群和 YARN 集群,两者逻辑上分离,但物理上常在一起 + - HDFS 集群:负责海量数据的存储,集群中的角色主要有 NameNode / DataNode + - YARN 集群:负责海量数据运算时的资源调度,集群中的角色主要有 ResourceManager /NodeManager + - HDFS 采用 master/worker 架构。一个 HDFS 集群是由一个 Namenode 和一定数目的 Datanodes 组成。Namenode 是一个中心服务器,负责管理文件系统的命名空间 (namespace) 以及客户端对文件的访问。集群中的 Datanode 一般是一个节点一个,负责管理它所在节点上的存储。 +- 分别给三台机子设置 hostname + +``` +hostnamectl --static set-hostname hadoop-master +hostnamectl --static set-hostname hadoop-node1 +hostnamectl --static set-hostname hadoop-node2 +``` + + +- 修改 hosts + +``` +就按这个来,其他多余的别加,不然可能也会有影响 +vim /etc/hosts +172.16.0.17 hadoop-master +172.16.0.43 hadoop-node1 +172.16.0.180 hadoop-node2 +``` + + +- 对 hadoop-master 设置免密: + +``` +生产密钥对 +ssh-keygen -t rsa + + +公钥内容写入 authorized_keys +cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys + +测试: +ssh localhost + +``` + +- 将公钥复制到两台 slave + +``` +ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@172.16.0.43,根据提示输入 hadoop-node1 机器的 root 密码,成功会有相应提示 +ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@172.16.0.180,根据提示输入 hadoop-node2 机器的 root 密码,成功会有相应提示 + + +在 hadoop-master 上测试: +ssh hadoop-node1 +ssh hadoop-node2 + +``` + + + +## Hadoop 安装 + +- 关于版本这件事,主要看你的技术生态圈。如果你的其他技术,比如 Spark,Flink 等不支持最新版,则就只能向下考虑。 +- 我这里技术栈,目前只能到:2.6.5,所以下面的内容都是基于 2.6.5 版本 +- 官网说明: +- 分别在三台机子上都创建目录: + +``` +mkdir -p /data/hadoop/hdfs/name /data/hadoop/hdfs/data /data/hadoop/hdfs/tmp +``` + +- 下载 Hadoop: +- 现在 hadoop-master 机子上安装 + +``` +cd /usr/local && wget http://apache.claz.org/hadoop/common/hadoop-2.6.5/hadoop-2.6.5.tar.gz +tar zxvf hadoop-2.6.5.tar.gz,有 191M 左右 +``` + +- **给三台机子都先设置 HADOOP_HOME** + +``` +vim /etc/profile + +export HADOOP_HOME=/usr/local/hadoop-2.6.5 +export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin + +source /etc/profile +``` + + +## 修改 hadoop-master 配置 + + +``` +修改 JAVA_HOME +vim $HADOOP_HOME/etc/hadoop/hadoop-env.sh + +把 25 行的 +export JAVA_HOME=${JAVA_HOME} +都改为 +export JAVA_HOME=/usr/local/jdk1.8.0_181 + + +vim $HADOOP_HOME/etc/hadoop/yarn-env.sh + +加一行 export JAVA_HOME=/usr/local/jdk1.8.0_181 + +``` + +- hadoop.tmp.dir == 指定hadoop运行时产生文件的存储目录 + +``` + +vim $HADOOP_HOME/etc/hadoop/core-site.xml,改为: + + + + hadoop.tmp.dir + file:/data/hadoop/hdfs/tmp + + + io.file.buffer.size + 131072 + + + + fs.defaultFS + hdfs://hadoop-master:9000 + + + hadoop.proxyuser.root.hosts + * + + + hadoop.proxyuser.root.groups + * + + +``` + + +- 配置包括副本数量 + - 最大值是 datanode 的个数 +- 数据存放目录 + +``` +vim $HADOOP_HOME/etc/hadoop/hdfs-site.xml + + + + dfs.replication + 2 + + + dfs.namenode.name.dir + file:/data/hadoop/hdfs/name + true + + + dfs.datanode.data.dir + file:/data/hadoop/hdfs/data + true + + + dfs.webhdfs.enabled + true + + + dfs.permissions + false + + + +``` + + + +- 设置 YARN + +``` +新创建:vim $HADOOP_HOME/etc/hadoop/mapred-site.xml + + + + + mapreduce.framework.name + yarn + + +``` + + +- yarn.resourcemanager.hostname == 指定YARN的老大(ResourceManager)的地址 +- yarn.nodemanager.aux-services == NodeManager上运行的附属服务。需配置成mapreduce_shuffle,才可运行MapReduce程序默认值:"" + +``` +vim $HADOOP_HOME/etc/hadoop/yarn-site.xml + + + + + yarn.resourcemanager.hostname + hadoop-master + + + + yarn.nodemanager.aux-services + mapreduce_shuffle + + + +``` + + +- 配置 slave 相关信息 + + +``` +vim $HADOOP_HOME/etc/hadoop/slaves + +把默认的配置里面的 localhost 删除,换成: +hadoop-node1 +hadoop-node2 + +``` + + +``` +scp -r /usr/local/hadoop-2.6.5 root@hadoop-node1:/usr/local/ + +scp -r /usr/local/hadoop-2.6.5 root@hadoop-node2:/usr/local/ + +``` + + +## hadoop-master 机子运行 + +``` +格式化 HDFS +hdfs namenode -format + +``` + +- 输出结果: + +``` +[root@hadoop-master hadoop-2.6.5]# hdfs namenode -format +18/12/17 17:47:17 INFO namenode.NameNode: STARTUP_MSG: +/************************************************************ +STARTUP_MSG: Starting NameNode +STARTUP_MSG: host = localhost/127.0.0.1 +STARTUP_MSG: args = [-format] +STARTUP_MSG: version = 2.6.5 +STARTUP_MSG: classpath = /usr/local/hadoop-2.6.5/etc/hadoop:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/apacheds-kerberos-codec-2.0.0-M15.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-io-2.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/activation-1.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/netty-3.6.2.Final.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/slf4j-api-1.7.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/junit-4.11.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/curator-recipes-2.6.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jasper-compiler-5.5.23.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jets3t-0.9.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-lang-2.6.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-digester-1.8.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jackson-core-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/apacheds-i18n-2.0.0-M15.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/guava-11.0.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/gson-2.2.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jackson-jaxrs-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jettison-1.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jetty-6.1.26.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/api-util-1.0.0-M20.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/log4j-1.2.17.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-beanutils-core-1.8.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-httpclient-3.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-el-1.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/paranamer-2.3.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/slf4j-log4j12-1.7.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-collections-3.2.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jersey-server-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-net-3.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/hadoop-auth-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jasper-runtime-5.5.23.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jaxb-impl-2.2.3-1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/hamcrest-core-1.3.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/stax-api-1.0-2.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-beanutils-1.7.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/protobuf-java-2.5.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/curator-framework-2.6.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/xz-1.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jsr305-1.3.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jsp-api-2.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-compress-1.4.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/asm-3.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jsch-0.1.42.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-configuration-1.6.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-cli-1.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jackson-xc-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-logging-1.1.3.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/htrace-core-3.0.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jetty-util-6.1.26.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-math3-3.1.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/mockito-all-1.8.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jersey-json-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/zookeeper-3.4.6.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/httpclient-4.2.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/servlet-api-2.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/xmlenc-0.52.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/httpcore-4.2.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/api-asn1-api-1.0.0-M20.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/java-xmlbuilder-0.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/avro-1.7.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jaxb-api-2.2.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-codec-1.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jersey-core-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/snappy-java-1.0.4.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/curator-client-2.6.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/hadoop-annotations-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/hadoop-common-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/hadoop-common-2.6.5-tests.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/hadoop-nfs-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-io-2.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/netty-3.6.2.Final.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-lang-2.6.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-daemon-1.0.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jackson-core-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/guava-11.0.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jetty-6.1.26.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/log4j-1.2.17.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-el-1.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/xercesImpl-2.9.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jersey-server-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jasper-runtime-5.5.23.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/protobuf-java-2.5.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jsr305-1.3.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/xml-apis-1.3.04.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jsp-api-2.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/asm-3.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-cli-1.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-logging-1.1.3.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/htrace-core-3.0.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jetty-util-6.1.26.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/servlet-api-2.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/xmlenc-0.52.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-codec-1.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jersey-core-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/hadoop-hdfs-2.6.5-tests.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/hadoop-hdfs-nfs-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/hadoop-hdfs-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-io-2.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/activation-1.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/aopalliance-1.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/netty-3.6.2.Final.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-lang-2.6.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jackson-core-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/guice-3.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/guava-11.0.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jackson-jaxrs-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jettison-1.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jetty-6.1.26.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/log4j-1.2.17.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-httpclient-3.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-collections-3.2.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jersey-server-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jaxb-impl-2.2.3-1.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/stax-api-1.0-2.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/protobuf-java-2.5.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/xz-1.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jsr305-1.3.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jersey-client-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/guice-servlet-3.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-compress-1.4.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/asm-3.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-cli-1.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jersey-guice-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jackson-xc-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-logging-1.1.3.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jetty-util-6.1.26.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/leveldbjni-all-1.8.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jersey-json-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/javax.inject-1.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/zookeeper-3.4.6.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/servlet-api-2.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jaxb-api-2.2.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jline-0.9.94.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-codec-1.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jersey-core-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-web-proxy-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-api-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-common-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-registry-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-nodemanager-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-client-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-common-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-applications-unmanaged-am-launcher-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-tests-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-resourcemanager-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-applicationhistoryservice-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-applications-distributedshell-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/commons-io-2.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/aopalliance-1.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/netty-3.6.2.Final.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/junit-4.11.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/jackson-core-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/guice-3.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/log4j-1.2.17.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/paranamer-2.3.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/jersey-server-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/hamcrest-core-1.3.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/protobuf-java-2.5.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/xz-1.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/guice-servlet-3.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/commons-compress-1.4.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/asm-3.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/jersey-guice-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/leveldbjni-all-1.8.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/javax.inject-1.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/avro-1.7.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/jersey-core-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/snappy-java-1.0.4.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/hadoop-annotations-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-app-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-common-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-shuffle-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-plugins-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.6.5-tests.jar:/usr/local/hadoop-2.6.5/contrib/capacity-scheduler/*.jar +STARTUP_MSG: build = https://github.com/apache/hadoop.git -r e8c9fe0b4c252caf2ebf1464220599650f119997; compiled by 'sjlee' on 2016-10-02T23:43Z +STARTUP_MSG: java = 1.8.0_181 +************************************************************/ +18/12/17 17:47:17 INFO namenode.NameNode: registered UNIX signal handlers for [TERM, HUP, INT] +18/12/17 17:47:17 INFO namenode.NameNode: createNameNode [-format] +Formatting using clusterid: CID-beba43b4-0881-48b4-8eda-5c3bca046398 +18/12/17 17:47:17 INFO namenode.FSNamesystem: No KeyProvider found. +18/12/17 17:47:17 INFO namenode.FSNamesystem: fsLock is fair:true +18/12/17 17:47:17 INFO blockmanagement.DatanodeManager: dfs.block.invalidate.limit=1000 +18/12/17 17:47:17 INFO blockmanagement.DatanodeManager: dfs.namenode.datanode.registration.ip-hostname-check=true +18/12/17 17:47:17 INFO blockmanagement.BlockManager: dfs.namenode.startup.delay.block.deletion.sec is set to 000:00:00:00.000 +18/12/17 17:47:17 INFO blockmanagement.BlockManager: The block deletion will start around 2018 Dec 17 17:47:17 +18/12/17 17:47:17 INFO util.GSet: Computing capacity for map BlocksMap +18/12/17 17:47:17 INFO util.GSet: VM type = 64-bit +18/12/17 17:47:17 INFO util.GSet: 2.0% max memory 889 MB = 17.8 MB +18/12/17 17:47:17 INFO util.GSet: capacity = 2^21 = 2097152 entries +18/12/17 17:47:17 INFO blockmanagement.BlockManager: dfs.block.access.token.enable=false +18/12/17 17:47:17 INFO blockmanagement.BlockManager: defaultReplication = 2 +18/12/17 17:47:17 INFO blockmanagement.BlockManager: maxReplication = 512 +18/12/17 17:47:17 INFO blockmanagement.BlockManager: minReplication = 1 +18/12/17 17:47:17 INFO blockmanagement.BlockManager: maxReplicationStreams = 2 +18/12/17 17:47:17 INFO blockmanagement.BlockManager: replicationRecheckInterval = 3000 +18/12/17 17:47:17 INFO blockmanagement.BlockManager: encryptDataTransfer = false +18/12/17 17:47:17 INFO blockmanagement.BlockManager: maxNumBlocksToLog = 1000 +18/12/17 17:47:17 INFO namenode.FSNamesystem: fsOwner = root (auth:SIMPLE) +18/12/17 17:47:17 INFO namenode.FSNamesystem: supergroup = supergroup +18/12/17 17:47:17 INFO namenode.FSNamesystem: isPermissionEnabled = false +18/12/17 17:47:17 INFO namenode.FSNamesystem: HA Enabled: false +18/12/17 17:47:17 INFO namenode.FSNamesystem: Append Enabled: true +18/12/17 17:47:17 INFO util.GSet: Computing capacity for map INodeMap +18/12/17 17:47:17 INFO util.GSet: VM type = 64-bit +18/12/17 17:47:17 INFO util.GSet: 1.0% max memory 889 MB = 8.9 MB +18/12/17 17:47:17 INFO util.GSet: capacity = 2^20 = 1048576 entries +18/12/17 17:47:17 INFO namenode.NameNode: Caching file names occuring more than 10 times +18/12/17 17:47:17 INFO util.GSet: Computing capacity for map cachedBlocks +18/12/17 17:47:17 INFO util.GSet: VM type = 64-bit +18/12/17 17:47:17 INFO util.GSet: 0.25% max memory 889 MB = 2.2 MB +18/12/17 17:47:17 INFO util.GSet: capacity = 2^18 = 262144 entries +18/12/17 17:47:17 INFO namenode.FSNamesystem: dfs.namenode.safemode.threshold-pct = 0.9990000128746033 +18/12/17 17:47:17 INFO namenode.FSNamesystem: dfs.namenode.safemode.min.datanodes = 0 +18/12/17 17:47:17 INFO namenode.FSNamesystem: dfs.namenode.safemode.extension = 30000 +18/12/17 17:47:17 INFO namenode.FSNamesystem: Retry cache on namenode is enabled +18/12/17 17:47:17 INFO namenode.FSNamesystem: Retry cache will use 0.03 of total heap and retry cache entry expiry time is 600000 millis +18/12/17 17:47:17 INFO util.GSet: Computing capacity for map NameNodeRetryCache +18/12/17 17:47:17 INFO util.GSet: VM type = 64-bit +18/12/17 17:47:17 INFO util.GSet: 0.029999999329447746% max memory 889 MB = 273.1 KB +18/12/17 17:47:17 INFO util.GSet: capacity = 2^15 = 32768 entries +18/12/17 17:47:17 INFO namenode.NNConf: ACLs enabled? false +18/12/17 17:47:17 INFO namenode.NNConf: XAttrs enabled? true +18/12/17 17:47:17 INFO namenode.NNConf: Maximum size of an xattr: 16384 +18/12/17 17:47:17 INFO namenode.FSImage: Allocated new BlockPoolId: BP-233285725-127.0.0.1-1545040037972 +18/12/17 17:47:18 INFO common.Storage: Storage directory /data/hadoop/hdfs/name has been successfully formatted. +18/12/17 17:47:18 INFO namenode.FSImageFormatProtobuf: Saving image file /data/hadoop/hdfs/name/current/fsimage.ckpt_0000000000000000000 using no compression +18/12/17 17:47:18 INFO namenode.FSImageFormatProtobuf: Image file /data/hadoop/hdfs/name/current/fsimage.ckpt_0000000000000000000 of size 321 bytes saved in 0 seconds. +18/12/17 17:47:18 INFO namenode.NNStorageRetentionManager: Going to retain 1 images with txid >= 0 +18/12/17 17:47:18 INFO util.ExitUtil: Exiting with status 0 +18/12/17 17:47:18 INFO namenode.NameNode: SHUTDOWN_MSG: +/************************************************************ +SHUTDOWN_MSG: Shutting down NameNode at localhost/127.0.0.1 +************************************************************/ + +``` + +- 启动 + +``` +启动:start-dfs.sh,根据提示一路 yes +hadoop-master 会启动:NameNode 和 SecondaryNameNode +从节点启动:DataNode + +查看:jps,可以看到: +21922 Jps +21603 NameNode +21787 SecondaryNameNode + + +然后再从节点可以 jps 可以看到: +19728 DataNode +19819 Jps + +``` + +``` + +查看运行更多情况:hdfs dfsadmin -report + +Configured Capacity: 0 (0 B) +Present Capacity: 0 (0 B) +DFS Remaining: 0 (0 B) +DFS Used: 0 (0 B) +DFS Used%: NaN% +Under replicated blocks: 0 +Blocks with corrupt replicas: 0 +Missing blocks: 0 +``` + + +``` + +如果需要停止:stop-dfs.sh + +查看 log:cd $HADOOP_HOME/logs + + +``` + +## YARN 运行 + +``` +start-yarn.sh +然后 jps 你会看到一个:ResourceManager + +从节点你会看到:NodeManager + +停止:stop-yarn.sh + + +``` + +- 可以看到当前运行的所有端口:`netstat -tpnl | grep java` + + + +查看HDFS管理界面:http://hadoop-master:50070 +访问YARN管理界面:http://hadoop-master:8088 + + + +搭建完成之后,我们运行一个Mapreduce作业感受一下: +hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.5.jar pi 5 10 +hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.5.jar wordcount /data/input /data/output/result + + +## 资料 + +- +- +- \ No newline at end of file From 48bafd15e091f37d676b3bdf1a451752ed931e5a Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 18 Dec 2018 09:46:38 +0800 Subject: [PATCH 212/330] :construction: bash --- markdown-file/Bash.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/markdown-file/Bash.md b/markdown-file/Bash.md index 1017bb15..874d390e 100644 --- a/markdown-file/Bash.md +++ b/markdown-file/Bash.md @@ -130,7 +130,8 @@ drwxr-xr-x. 5 root root 4096 3月 26 10:57,其中最前面的 d 表示这是 - `kill` - `kill 1234`,结束 pid 为 1234 的进程 - `kill -9 1234`,强制结束 pid 为 1234 的进程(慎重) - - `killall java`,杀死同一进程组内的所有为 java 进程 + - `killall java`,结束同一进程组内的所有为 java 进程 + - `ps -ef|grep hadoop|grep -v grep|cut -c 9-15|xargs kill -9`,结束包含关键字 hadoop 的所有进程 - `head` - `head -n 10 spring.ini`,查看当前文件的前 10 行内容 - `tail` From f98180771c59db6277d60f1af2c553bfa0baff34 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 18 Dec 2018 11:21:29 +0800 Subject: [PATCH 213/330] :construction: bash --- markdown-file/Bash.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/markdown-file/Bash.md b/markdown-file/Bash.md index 874d390e..396526d4 100644 --- a/markdown-file/Bash.md +++ b/markdown-file/Bash.md @@ -140,6 +140,8 @@ drwxr-xr-x. 5 root root 4096 3月 26 10:57,其中最前面的 d 表示这是 ## 用户、权限-相关命令 +- 使用 pem 证书登录:`ssh -i /opt/mykey.pem root@192.168.0.70` + - 证书权限不能太大,不然无法使用:`chmod 600 mykey.pem` - `hostname`,查看当前登陆用户全名 - `cat /etc/group`,查看所有组 - `cat /etc/passwd`,查看所有用户 From 0a01ab9035be00fb4f71e495570ed7a2bc81a44d Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 18 Dec 2018 11:35:16 +0800 Subject: [PATCH 214/330] :construction: SSH --- markdown-file/SSH-login-without-password.md | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/markdown-file/SSH-login-without-password.md b/markdown-file/SSH-login-without-password.md index ec33a561..b13e35cd 100644 --- a/markdown-file/SSH-login-without-password.md +++ b/markdown-file/SSH-login-without-password.md @@ -14,6 +14,8 @@ - 在 A 机器上输入命令:`ssh-keygen` - 根据提示回车,共有三次交互提示,都回车即可。 - 生成的密钥目录在:**/root/.ssh** +- 写入:`cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys` +- 测试:`ssh localhost` ## 把 A 的公钥发给 B @@ -23,4 +25,17 @@ ## 测试 A 免密登录到 B -- 在 A 机器上输入命令:`ssh -p 22 root@192.168.1.105`,则会相应登录成功的提示 \ No newline at end of file +- 在 A 机器上输入命令:`ssh -p 22 root@192.168.1.105`,则会相应登录成功的提示 + +------------------------------------------------------------------- + +## 如果是用 pem 登录的话,用 ssh-copy-id 是无法使用的 + +- 先保存 A 的 pub 到本地:`sz /root/.ssh/id_rsa.pub` +- 登录 B 机子:`cd /root/.ssh/` +- 如果 B 机子没有 authorized_keys 文件则创建:`touch /root/.ssh/authorized_keys` + - 设置权限:`chmod 600 /root/.ssh/authorized_keys ` +- 上传 pub 文件到 B 机子,并在 B 机子上执行:`cd /root/.ssh/ && cat id_rsa.pub >> authorized_keys` + + + From a252c14cde61fd88194393097407c89e0c51358a Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 18 Dec 2018 15:06:47 +0800 Subject: [PATCH 215/330] :construction: Ansible --- README.md | 1 + SUMMARY.md | 3 +- TOC.md | 3 +- markdown-file/Ansible-Install-And-Settings.md | 214 ++++++++++++++++++ 4 files changed, 219 insertions(+), 2 deletions(-) create mode 100644 markdown-file/Ansible-Install-And-Settings.md diff --git a/README.md b/README.md index af84c871..154c9bf3 100644 --- a/README.md +++ b/README.md @@ -99,6 +99,7 @@ - [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) - [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) - [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) +- [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) ## 联系(Contact) diff --git a/SUMMARY.md b/SUMMARY.md index 9402b78b..ebe606dc 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -84,4 +84,5 @@ * [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) * [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) * [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) -* [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) \ No newline at end of file +* [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) +* [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) \ No newline at end of file diff --git a/TOC.md b/TOC.md index 8cb10242..d87f0ab8 100644 --- a/TOC.md +++ b/TOC.md @@ -81,4 +81,5 @@ - [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) - [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) - [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) -- [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) \ No newline at end of file +- [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) +- [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) \ No newline at end of file diff --git a/markdown-file/Ansible-Install-And-Settings.md b/markdown-file/Ansible-Install-And-Settings.md new file mode 100644 index 00000000..91011077 --- /dev/null +++ b/markdown-file/Ansible-Install-And-Settings.md @@ -0,0 +1,214 @@ +# Ansible 安装和配置 + + +## Ansible 说明 + +- Ansible 官网: +- Ansible 官网 Github: +- Ansible 官网文档: +- 简单讲:它的作用就是把写 shell 这件事变成标准化、模块化。方便更好的自动化运维 + +## 安装 + +- 官网说明: +- CentOS:`sudo yum install -y ansible` + - 查看版本:`ansible --version` + +------------------------------------------------------------------- + +## 配置基本概念 + +#### Ansible 基本配置文件顺序 + +- Ansible 执行的时候会按照以下顺序查找配置项,所以修改的时候要特别注意改的是哪个文件 + +``` +ANSIBLE_CONFIG (环境变量) +ansible.cfg (脚本所在当前目录下) +~/.ansible.cfg (用户家目录下,默认没有) +/etc/ansible/ansible.cfg(安装后会自动生成) +``` + + +#### 配置远程主机地址 (Ansible 称这些地址为 Inventory) + +- 假设我有 3 台机子: + - 192.168.0.223 + - 192.168.0.70 + - 192.168.0.103 +- 官网对此的配置说明: + +###### 给这三台机子设置免密登录的情况(一般推荐方式) + +- 编辑 Ansible 配置文件:`vim /etc/ansible/hosts` +- 添加如下内容 + +``` +[hadoop-host] +192.168.0.223 +192.168.0.70 +192.168.0.103 +``` + +- 其中 `[hadoop-host]` 表示这些主机代表的一个组名 + + +###### 如果不设置免密,直接采用账号密码(容易泄露信息) + + +- 编辑 Ansible 配置文件:`vim /etc/ansible/hosts` +- 添加如下内容 + +``` +[hadoop-host] +hadoop-master ansible_host=192.168.0.223 ansible_user=root ansible_ssh_pass=123456 +hadoop-node1 ansible_host=192.168.0.70 ansible_user=root ansible_ssh_pass=123456 +hadoop-node2 ansible_host=192.168.0.103 ansible_user=root ansible_ssh_pass=123456 +``` + + + +## 简单使用(`ad hoc`方式) + +- ad hoc 官网: + +##### 运行 Ansible + +- 运行 Ansible 的 `ping` 命令,看看配置正确时输出如下: + +``` +sudo ansible --private-key ~/.ssh/id_rsa all -m ping +``` + +- 让远程所有主机都执行 `ps` 命令,输出如下 + +``` +ansible all -a 'ps' +``` + +- 让远程所有 hadoop-host 组的主机都执行 `ps` 命令,输出如下 + +``` +ansible hadoop-host -a 'ps' +``` + +------------------------------------------------------------------- + +## Playbook 脚本方式 + +- 官网: +- 一些语法: +- playbook(剧本),顾名思义,就是需要定义一个脚本或者说配置文件,然后定义好要做什么。之后 ansible 就会根据 playbook 脚本对远程主机进行操作 + +#### 简单脚本 + +- 下面脚本让所有远程主机执行 `whoami` 命令,并把结果(当前用户名)输出到 `/opt/whoami.txt` 文件 +- 创建脚本文件:`vim /opt/simple-playbook.yml` + +``` +- hosts: all + tasks: + - name: whoami + shell: 'whoami > /opt/whoami.txt' +``` + +- 执行命令:`ansible-playbook /opt/simple-playbook.yml`,结果如下,并且 opt 下也有文件生成 + +``` +PLAY [all] ************************************************************************************************************************** + +TASK [Gathering Facts] ************************************************************************************************************** +ok: [192.168.0.223] +ok: [192.168.0.103] +ok: [192.168.0.70] + +TASK [whoami] *********************************************************************************************************************** +changed: [192.168.0.103] +changed: [192.168.0.223] +changed: [192.168.0.70] + +PLAY RECAP ************************************************************************************************************************** +192.168.0.103 : ok=2 changed=1 unreachable=0 failed=0 +192.168.0.223 : ok=2 changed=1 unreachable=0 failed=0 +192.168.0.70 : ok=2 changed=1 unreachable=0 failed=0 +``` + +------------------------------------------------------------------- + +## 平时用来测试 + +- 创建脚本文件:`vim /opt/test-playbook.yml` + +``` +- hosts: hadoop-test + remote_user: root + vars: + java_install_folder: /usr/local + tasks: + # 按行的方式写入 + - name: Set JAVA_HOME 1 + lineinfile: + dest=/etc/profile + line="JAVA_HOME={{ java_install_folder }}/jdk1.8.0_181" + # 按块的方式写入,#{mark} 会被自动替换成:begin 和 end 字符来包裹整块内容(我这里自己定义了词语) + - name: Set JAVA_HOME 2 + blockinfile: + path: /etc/profile + marker: "#{mark} JDK ENV" + marker_begin: "开始" + marker_end: "结束" + block: | + export JAVA_HOME={{ java_install_folder }}/jdk1.8.0_181 + export PATH=$PATH:$JAVA_HOME/bin +``` + +- 执行命令:`ansible-playbook /opt/test-playbook.yml` + +------------------------------------------------------------------- + +## 更多 playbook 实战 + +#### 部署 JDK + +- 创建脚本文件:`vim /opt/jdk8-playbook.yml` + +``` +- hosts: hadoop-host + remote_user: root + vars: + java_install_folder: /usr/local + tasks: + - name: copy jdk + copy: src=/opt/jdk-8u181-linux-x64.tar.gz dest={{ java_install_folder }} + + - name: tar jdk + shell: chdir={{ java_install_folder }} tar zxf jdk-8u181-linux-x64.tar.gz + + - name: Set JAVA_HOME + blockinfile: + path: /etc/profile + marker: "#{mark} JDK ENV" + block: | + JAVA_HOME={{ java_install_folder }}/jdk1.8.0_181 + JRE_HOME=$JAVA_HOME/jre + PATH=$PATH:$JAVA_HOME/bin + CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar + export JAVA_HOME + export JRE_HOME + export PATH + export CLASSPATH + + - name: source profile + shell: source /etc/profile +``` + + +- 执行命令:`ansible-playbook /opt/jdk8-playbook.yml` + + +## 资料 + + +- +- +- \ No newline at end of file From 16c09a80ec4b53716597e87fe7b727e6d129d771 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 18 Dec 2018 15:48:58 +0800 Subject: [PATCH 216/330] :construction: hadoop --- README.md | 1 + SUMMARY.md | 1 + TOC.md | 1 + markdown-file/Hadoop-Install-And-Settings.md | 75 ++++++++++++++------ 4 files changed, 55 insertions(+), 23 deletions(-) diff --git a/README.md b/README.md index 154c9bf3..4aca2d77 100644 --- a/README.md +++ b/README.md @@ -94,6 +94,7 @@ - [CI 整套服务安装和使用](markdown-file/CI-Install-And-Usage.md) - [YApi 安装和配置](markdown-file/YApi-Install-And-Settings.md) - [Kafka 安装和配置](markdown-file/Kafka-Install-And-Settings.md) +- [Hadoop 安装和配置](markdown-file/Hadoop-Install-And-Settings.md) - [Showdoc 安装和配置](markdown-file/Showdoc-Install-And-Settings.md) - [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) - [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) diff --git a/SUMMARY.md b/SUMMARY.md index ebe606dc..91dfcd0a 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -80,6 +80,7 @@ * [CI 整套服务安装和使用](markdown-file/CI-Install-And-Usage.md) * [YApi 安装和配置](markdown-file/YApi-Install-And-Settings.md) * [Kafka 安装和配置](markdown-file/Kafka-Install-And-Settings.md) +* [Hadoop 安装和配置](markdown-file/Hadoop-Install-And-Settings.md) * [Showdoc 安装和配置](markdown-file/Showdoc-Install-And-Settings.md) * [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) * [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) diff --git a/TOC.md b/TOC.md index d87f0ab8..1b0ff377 100644 --- a/TOC.md +++ b/TOC.md @@ -77,6 +77,7 @@ - [Node.js 安装和使用](markdown-file/Node-Install-And-Usage.md) - [YApi 安装和配置](markdown-file/YApi-Install-And-Settings.md) - [Kafka 安装和配置](markdown-file/Kafka-Install-And-Settings.md) +- [Hadoop 安装和配置](markdown-file/Hadoop-Install-And-Settings.md) - [Showdoc 安装和配置](markdown-file/Showdoc-Install-And-Settings.md) - [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) - [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) diff --git a/markdown-file/Hadoop-Install-And-Settings.md b/markdown-file/Hadoop-Install-And-Settings.md index 0f8fafc6..8f9542e0 100644 --- a/markdown-file/Hadoop-Install-And-Settings.md +++ b/markdown-file/Hadoop-Install-And-Settings.md @@ -61,6 +61,7 @@ ssh localhost ``` - 将公钥复制到两台 slave + - 如果你是采用 pem 登录的,可以看这个:[SSH 免密登录](SSH-login-without-password.md) ``` ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@172.16.0.43,根据提示输入 hadoop-node1 机器的 root 密码,成功会有相应提示 @@ -95,6 +96,7 @@ tar zxvf hadoop-2.6.5.tar.gz,有 191M 左右 ``` - **给三台机子都先设置 HADOOP_HOME** + - 会 ansible playbook 会方便点:[Ansible 安装和配置](Ansible-Install-And-Settings.md) ``` vim /etc/profile @@ -338,29 +340,31 @@ SHUTDOWN_MSG: Shutting down NameNode at localhost/127.0.0.1 ``` -- 启动 +## HDFS 启动 + +- 启动:start-dfs.sh,根据提示一路 yes ``` -启动:start-dfs.sh,根据提示一路 yes -hadoop-master 会启动:NameNode 和 SecondaryNameNode -从节点启动:DataNode +这个命令效果: +主节点会启动任务:NameNode 和 SecondaryNameNode +从节点会启动任务:DataNode + -查看:jps,可以看到: +主节点查看:jps,可以看到: 21922 Jps 21603 NameNode 21787 SecondaryNameNode -然后再从节点可以 jps 可以看到: +从节点查看:jps 可以看到: 19728 DataNode 19819 Jps - ``` -``` -查看运行更多情况:hdfs dfsadmin -report +- 查看运行更多情况:`hdfs dfsadmin -report` +``` Configured Capacity: 0 (0 B) Present Capacity: 0 (0 B) DFS Remaining: 0 (0 B) @@ -371,15 +375,9 @@ Blocks with corrupt replicas: 0 Missing blocks: 0 ``` +- 如果需要停止:`stop-dfs.sh` +- 查看 log:`cd $HADOOP_HOME/logs` -``` - -如果需要停止:stop-dfs.sh - -查看 log:cd $HADOOP_HOME/logs - - -``` ## YARN 运行 @@ -391,22 +389,53 @@ start-yarn.sh 停止:stop-yarn.sh +``` + +## 端口情况 +- 主节点当前运行的所有端口:`netstat -tpnl | grep java` +- 会用到端口(为了方便展示,整理下顺序): + +``` +tcp 0 0 172.16.0.17:9000 0.0.0.0:* LISTEN 22932/java >> NameNode +tcp 0 0 0.0.0.0:50070 0.0.0.0:* LISTEN 22932/java >> NameNode +tcp 0 0 0.0.0.0:50090 0.0.0.0:* LISTEN 23125/java >> SecondaryNameNode +tcp6 0 0 172.16.0.17:8030 :::* LISTEN 23462/java >> ResourceManager +tcp6 0 0 172.16.0.17:8031 :::* LISTEN 23462/java >> ResourceManager +tcp6 0 0 172.16.0.17:8032 :::* LISTEN 23462/java >> ResourceManager +tcp6 0 0 172.16.0.17:8033 :::* LISTEN 23462/java >> ResourceManager +tcp6 0 0 172.16.0.17:8088 :::* LISTEN 23462/java >> ResourceManager +``` + +- 从节点当前运行的所有端口:`netstat -tpnl | grep java` +- 会用到端口(为了方便展示,整理下顺序): + +``` +tcp 0 0 0.0.0.0:50010 0.0.0.0:* LISTEN 14545/java >> DataNode +tcp 0 0 0.0.0.0:50020 0.0.0.0:* LISTEN 14545/java >> DataNode +tcp 0 0 0.0.0.0:50075 0.0.0.0:* LISTEN 14545/java >> DataNode +tcp6 0 0 :::8040 :::* LISTEN 14698/java >> NodeManager +tcp6 0 0 :::8042 :::* LISTEN 14698/java >> NodeManager +tcp6 0 0 :::13562 :::* LISTEN 14698/java >> NodeManager +tcp6 0 0 :::37481 :::* LISTEN 14698/java >> NodeManager ``` -- 可以看到当前运行的所有端口:`netstat -tpnl | grep java` +------------------------------------------------------------------- +## 管理界面 +- 查看 HDFS 管理界面: +- 访问 YARN 管理界面: -查看HDFS管理界面:http://hadoop-master:50070 -访问YARN管理界面:http://hadoop-master:8088 +------------------------------------------------------------------- +## 运行作业 -搭建完成之后,我们运行一个Mapreduce作业感受一下: -hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.5.jar pi 5 10 -hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.5.jar wordcount /data/input /data/output/result +- 运行一个 Mapreduce 作业试试: + - `hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.5.jar pi 5 10` +------------------------------------------------------------------- ## 资料 From c9033c181cedff12ce3c88b4eb093e447692e98b Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 18 Dec 2018 18:48:42 +0800 Subject: [PATCH 217/330] :construction: hadoop --- markdown-file/Hadoop-Install-And-Settings.md | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/markdown-file/Hadoop-Install-And-Settings.md b/markdown-file/Hadoop-Install-And-Settings.md index 8f9542e0..d39a2e42 100644 --- a/markdown-file/Hadoop-Install-And-Settings.md +++ b/markdown-file/Hadoop-Install-And-Settings.md @@ -432,8 +432,21 @@ tcp6 0 0 :::37481 :::* LISTEN ## 运行作业 +- 在主节点上操作 - 运行一个 Mapreduce 作业试试: - - `hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.5.jar pi 5 10` + - 计算 π:`hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.5.jar pi 5 10` +- 运行一个文件相关作业: + - 由于运行 hadoop 时指定的输入文件只能是 HDFS 文件系统中的文件,所以我们必须将要进行 wordcount 的文件从本地文件系统拷贝到 HDFS 文件系统中。 + - 查看目前根目录结构:`hadoop fs -ls /` + - 创建目录:`hadoop fs -mkdir -p /tmp/zch/wordcount_input_dir` + - 上传文件:`hadoop fs -put /opt/input.txt /tmp/zch/wordcount_input_dir` + - 查看上传的目录下是否有文件:`hadoop fs -ls /tmp/zch/wordcount_input_dir` + - 向 yarn 提交作业,计算单词个数:`hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.5.jar wordcount /tmp/zch/wordcount_input_dir /tmp/zch/wordcount_output_dir` + - 查看计算结果输出的目录:`hadoop fs -ls /tmp/zch/wordcount_output_dir` + - 查看计算结果输出内容:`hadoop fs -cat /tmp/zch/wordcount_output_dir/part-r-00000` +- 查看正在运行的 Hadoop 任务:`yarn application -list` +- 关闭 Hadoop 任务进程:`yarn application -kill 你的ApplicationId` + ------------------------------------------------------------------- @@ -441,4 +454,5 @@ tcp6 0 0 :::37481 :::* LISTEN - - -- \ No newline at end of file +- +- \ No newline at end of file From 2ee0154670983f8e42c165bcb8cd4ce01b427f77 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 19 Dec 2018 11:01:33 +0800 Subject: [PATCH 218/330] :construction: docker --- .../shell/install_docker_disable_firewalld_centos7-aliyun.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/favorite-file/shell/install_docker_disable_firewalld_centos7-aliyun.sh b/favorite-file/shell/install_docker_disable_firewalld_centos7-aliyun.sh index 35080fed..587ca77b 100644 --- a/favorite-file/shell/install_docker_disable_firewalld_centos7-aliyun.sh +++ b/favorite-file/shell/install_docker_disable_firewalld_centos7-aliyun.sh @@ -45,7 +45,7 @@ docker run hello-world echo "-----------------------------------------安装 docker compose" echo "docker compose 的版本检查:https://docs.docker.com/compose/install/#install-compose" -curl -L https://github.com/docker/compose/releases/download/1.21.2/docker-compose-$(uname -s)-$(uname -m) -o /usr/local/bin/docker-compose +curl -L "https://github.com/docker/compose/releases/download/1.23.1/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose chmod +x /usr/local/bin/docker-compose From 9d28869cc887408ea362e85ce7f22f3c41919984 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 19 Dec 2018 11:17:21 +0800 Subject: [PATCH 219/330] :construction: kafka --- markdown-file/Kafka-Install-And-Settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/markdown-file/Kafka-Install-And-Settings.md b/markdown-file/Kafka-Install-And-Settings.md index c23626e6..ecb40028 100644 --- a/markdown-file/Kafka-Install-And-Settings.md +++ b/markdown-file/Kafka-Install-And-Settings.md @@ -392,7 +392,7 @@ wurstmeister/kafka:latest ---------------------------------------------------------------------------------------------- -## Kafka 1.0.1 源码安装(也支持 1.0.2、0.11.0.3) +## Kafka 1.0.1 源码安装(也支持 1.0.2、0.11.0.3、0.10.2.2) - 测试环境:2G 内存足够 - 一台机子:CentOS 7.4,根据文章最开头,已经修改了 hosts From a366bc4944c95112d26a9c6f708d95f1209ae5f3 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 19 Dec 2018 11:19:25 +0800 Subject: [PATCH 220/330] :construction: Flink --- markdown-file/Flink-Install-And-Settings.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/markdown-file/Flink-Install-And-Settings.md b/markdown-file/Flink-Install-And-Settings.md index c2ea84d3..a50c9a27 100644 --- a/markdown-file/Flink-Install-And-Settings.md +++ b/markdown-file/Flink-Install-And-Settings.md @@ -19,14 +19,15 @@ - 最终我选择了:Apache 1.7.0 Flink only Scala 2.11,共:240M - 解压:`tar zxf flink-*.tgz` - 进入根目录:`cd flink-1.7.0`,完整路径:`cd /usr/local/flink-1.7.0` -- 启动:`cd /usr/local/flink-1.7.0 && ./bin/start-cluster.sh` -- 停止:`cd /usr/local/flink-1.7.0 && ./bin/stop-cluster.sh` +- 改下目录名方便后面书写:`mv /usr/local/flink-1.7.0 /usr/local/flink` +- 启动:`cd /usr/local/flink && ./bin/start-cluster.sh` +- 停止:`cd /usr/local/flink && ./bin/stop-cluster.sh` - 查看日志:`tail -300f log/flink-*-standalonesession-*.log` - 浏览器访问 WEB 管理:`http://192.168.0.105:8081` ## Demo -- 运行程序解压包下也有一些 jar demo:`cd /usr/local/flink-1.7.0/examples` +- 运行程序解压包下也有一些 jar demo:`cd /usr/local/flink/examples` - 官网: - DataStream API: - DataSet API: @@ -53,7 +54,7 @@ mvn archetype:generate \ - 可以直接在 IntelliJ IDEA 上 run - 也可以交给服务器上 flink 执行,也有两种方式: - - 把 jar 自己上传 Flink 服务器运行:`cd /usr/local/flink-1.7.0 && ./bin/flink run -c com.youmeek.WordCount /opt/flink-simple-demo-1.0-SNAPSHOT.jar` + - 把 jar 自己上传 Flink 服务器运行:`cd /usr/local/flink && ./bin/flink run -c com.youmeek.WordCount /opt/flink-simple-demo-1.0-SNAPSHOT.jar` - 也可以通过 WEB UI 上传 jar: - 有一个 `Add New` 按钮可以上传 jar 包,然后填写 Class 路径:`com.youmeek.WordCount` - `parallelism` 表示并行度,填写数字,一般并行度设置为集群 CPU 核数总和的 2-3 倍(如果是单机模式不需要设置并行度) From aef0b8f1176c451449bc7f97020554385aeb386a Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 19 Dec 2018 11:40:53 +0800 Subject: [PATCH 221/330] :construction: MySQL --- markdown-file/Mysql-Install-And-Settings.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/markdown-file/Mysql-Install-And-Settings.md b/markdown-file/Mysql-Install-And-Settings.md index 98fdbf34..cc79011c 100644 --- a/markdown-file/Mysql-Install-And-Settings.md +++ b/markdown-file/Mysql-Install-And-Settings.md @@ -5,7 +5,7 @@ - 关掉:SELinux - 创建本地数据存储 + 配置文件目录:`mkdir -p /data/docker/mysql/datadir /data/docker/mysql/conf /data/docker/mysql/log` -- 在宿主机上创建一个配置文件:`vim /data/docker/mysql/conf/mycat-mysql-1.cnf`,内容如下: +- 在宿主机上创建一个配置文件:`vim /data/docker/mysql/conf/mysql-1.cnf`,内容如下: ``` # 该编码设置是我自己配置的 @@ -36,10 +36,11 @@ max_allowed_packet = 50M - 赋权(避免挂载的时候,一些程序需要容器中的用户的特定权限使用):`chmod -R 777 /data/docker/mysql/datadir /data/docker/mysql/log` - 赋权:`chown -R 0:0 /data/docker/mysql/conf` - - 配置文件的赋权比较特殊,如果是给 777 权限会报:[Warning] World-writable config file '/etc/mysql/conf.d/mycat-mysql-1.cnf' is ignored,所以这里要特殊对待。容器内是用 root 的 uid,所以这里与之相匹配赋权即可。 + - 配置文件的赋权比较特殊,如果是给 777 权限会报:[Warning] World-writable config file '/etc/mysql/conf.d/mysql-1.cnf' is ignored,所以这里要特殊对待。容器内是用 root 的 uid,所以这里与之相匹配赋权即可。 - 我是进入容器 bash 内,输入:`whoami && id`,看到默认用户的 uid 是 0,所以这里才 chown 0 -- `docker run -p 3306:3306 --name cloud-mysql -v /data/docker/mysql/datadir:/var/lib/mysql -v /data/docker/mysql/log:/var/log/mysql -v /data/docker/mysql/conf:/etc/mysql/conf.d -e MYSQL_ROOT_PASSWORD=adg123456 -d mysql:5.7` -- 连上容器:`docker exec -it 09747cd7d0bd /bin/bash` +- `docker run -p 3306:3306 --name cloud-mysql -v /data/docker/mysql/datadir:/var/lib/mysql -v /data/docker/mysql/log:/var/log/mysql -v /data/docker/mysql/conf:/etc/mysql/conf.d -e MYSQL_ROOT_PASSWORD=123456 -d mysql:5.7` +- 连上容器:`docker exec -it cloud-mysql /bin/bash` + - 连上 MySQL:`mysql -u root -p` - 关于容器的 MySQL 配置,官网是这样说的: >> The MySQL startup configuration is specified in the file /etc/mysql/my.cnf, and that file in turn includes any files found in the /etc/mysql/conf.d directory that end with .cnf.Settings in files in this directory will augment and/or override settings in /etc/mysql/my.cnf. If you want to use a customized MySQL configuration,you can create your alternative configuration file in a directory on the host machine and then mount that directory location as /etc/mysql/conf.d inside the mysql container. From 90467b586aa2093201cec92b63c5f098be614b3d Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 19 Dec 2018 11:53:24 +0800 Subject: [PATCH 222/330] :construction: MySQL --- markdown-file/Mysql-Install-And-Settings.md | 1 + 1 file changed, 1 insertion(+) diff --git a/markdown-file/Mysql-Install-And-Settings.md b/markdown-file/Mysql-Install-And-Settings.md index cc79011c..3ab9a80a 100644 --- a/markdown-file/Mysql-Install-And-Settings.md +++ b/markdown-file/Mysql-Install-And-Settings.md @@ -41,6 +41,7 @@ max_allowed_packet = 50M - `docker run -p 3306:3306 --name cloud-mysql -v /data/docker/mysql/datadir:/var/lib/mysql -v /data/docker/mysql/log:/var/log/mysql -v /data/docker/mysql/conf:/etc/mysql/conf.d -e MYSQL_ROOT_PASSWORD=123456 -d mysql:5.7` - 连上容器:`docker exec -it cloud-mysql /bin/bash` - 连上 MySQL:`mysql -u root -p` + - 创建表:`CREATE DATABASE wormhole DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;` - 关于容器的 MySQL 配置,官网是这样说的: >> The MySQL startup configuration is specified in the file /etc/mysql/my.cnf, and that file in turn includes any files found in the /etc/mysql/conf.d directory that end with .cnf.Settings in files in this directory will augment and/or override settings in /etc/mysql/my.cnf. If you want to use a customized MySQL configuration,you can create your alternative configuration file in a directory on the host machine and then mount that directory location as /etc/mysql/conf.d inside the mysql container. From 83475e3ad554f95516c5ce35df7b8238a422ccb2 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 19 Dec 2018 12:12:39 +0800 Subject: [PATCH 223/330] :construction: Hadoop --- markdown-file/Hadoop-Install-And-Settings.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/markdown-file/Hadoop-Install-And-Settings.md b/markdown-file/Hadoop-Install-And-Settings.md index d39a2e42..bead4ba5 100644 --- a/markdown-file/Hadoop-Install-And-Settings.md +++ b/markdown-file/Hadoop-Install-And-Settings.md @@ -438,6 +438,9 @@ tcp6 0 0 :::37481 :::* LISTEN - 运行一个文件相关作业: - 由于运行 hadoop 时指定的输入文件只能是 HDFS 文件系统中的文件,所以我们必须将要进行 wordcount 的文件从本地文件系统拷贝到 HDFS 文件系统中。 - 查看目前根目录结构:`hadoop fs -ls /` + - 查看目前根目录结构,另外写法:`hadoop fs -ls hdfs://linux-05:9000/` + - 或者列出目录以及下面的文件:`hadoop fs -ls -R /` + - 更多命令可以看:[hadoop HDFS常用文件操作命令](https://segmentfault.com/a/1190000002672666) - 创建目录:`hadoop fs -mkdir -p /tmp/zch/wordcount_input_dir` - 上传文件:`hadoop fs -put /opt/input.txt /tmp/zch/wordcount_input_dir` - 查看上传的目录下是否有文件:`hadoop fs -ls /tmp/zch/wordcount_input_dir` From aa1854321b9d1e2f5a2e5cb5174c82a38f62dc73 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 19 Dec 2018 14:04:09 +0800 Subject: [PATCH 224/330] :construction: Hadoop --- markdown-file/Hadoop-Install-And-Settings.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/markdown-file/Hadoop-Install-And-Settings.md b/markdown-file/Hadoop-Install-And-Settings.md index bead4ba5..142cc70f 100644 --- a/markdown-file/Hadoop-Install-And-Settings.md +++ b/markdown-file/Hadoop-Install-And-Settings.md @@ -424,8 +424,10 @@ tcp6 0 0 :::37481 :::* LISTEN ## 管理界面 -- 查看 HDFS 管理界面: -- 访问 YARN 管理界面: +- 查看 HDFS NameNode 管理界面: +- 访问 YARN ResourceManager 管理界面: +- 访问 NodeManager-1 管理界面: +- 访问 NodeManager-2 管理界面: ------------------------------------------------------------------- From b0d3119db866e88d0ac5df7ee50e4b44f1a59729 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 19 Dec 2018 14:50:10 +0800 Subject: [PATCH 225/330] :construction: Spark --- markdown-file/Spark-Install-And-Settings.md | 46 +++++++++++++++++++++ markdown-file/monitor.md | 2 +- 2 files changed, 47 insertions(+), 1 deletion(-) create mode 100644 markdown-file/Spark-Install-And-Settings.md diff --git a/markdown-file/Spark-Install-And-Settings.md b/markdown-file/Spark-Install-And-Settings.md new file mode 100644 index 00000000..916a5ef1 --- /dev/null +++ b/markdown-file/Spark-Install-And-Settings.md @@ -0,0 +1,46 @@ +# Spark 安装和配置 + +## 介绍 + +- 2018-12 发布最新:2.4.0 版本 +- 官网: +- 官网文档: +- 官网下载: +- 官网 Github: + +## 本地模式安装 + +- CentOS 7.4 +- IP 地址:`192.168.0.105` +- 必须 JDK 8.x +- 因为个人原因,我这里 Hadoop 还是 2.6.5 版本,Spark 要用的是 2.2.0 +- Spark 2.2.0 官网文档: + - 192M,下载速度有点慢 + - `cd /usr/local && wget https://archive.apache.org/dist/spark/spark-2.2.0/spark-2.2.0-bin-hadoop2.6.tgz` +- 解压:`tar zxvf spark-2.2.0-bin-hadoop2.6.tgz` +- 重命名:`mv /usr/local/spark-2.2.0-bin-hadoop2.6 /usr/local/spark` +- 增加环境变量: + +``` +vim /etc/profile + +SPARK_HOME=/usr/local/spark +PATH=$PATH:${SPARK_HOME}/bin:${SPARK_HOME}/sbin +export SPARK_HOME +export PATH + +source /etc/profile +``` + +- 修改配置:`cp $SPARK_HOME/conf/spark-env.sh.template $SPARK_HOME/conf/spark-env.sh` +- 修改配置:`vim $SPARK_HOME/conf/spark-env.sh` +- 假设我的 hadoop 路径是:/usr/local/hadoop-2.6.5,则最尾巴增加: + +``` +export HADOOP_CONF_DIR=/usr/local/hadoop-2.6.5/etc/hadoop +``` + + +## 资料 + +- diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index 5505e34e..38ae729c 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -568,7 +568,7 @@ TOTAL:(总的流量) 12.9GB 229Mb 190Mb 193Mb ``` -### 端口使用情况 +### 端口使用情况(也可以用来查看端口占用) #### lsof From 08f48891c0e64c79bfdeb913597707666e92fd1f Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 19 Dec 2018 14:59:54 +0800 Subject: [PATCH 226/330] :construction: Spark --- markdown-file/Spark-Install-And-Settings.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/markdown-file/Spark-Install-And-Settings.md b/markdown-file/Spark-Install-And-Settings.md index 916a5ef1..004ce09c 100644 --- a/markdown-file/Spark-Install-And-Settings.md +++ b/markdown-file/Spark-Install-And-Settings.md @@ -13,6 +13,7 @@ - CentOS 7.4 - IP 地址:`192.168.0.105` - 必须 JDK 8.x +- 已经安装了 hadoop-2.6.5 集群(**这个细节注意**) - 因为个人原因,我这里 Hadoop 还是 2.6.5 版本,Spark 要用的是 2.2.0 - Spark 2.2.0 官网文档: - 192M,下载速度有点慢 @@ -40,6 +41,8 @@ source /etc/profile export HADOOP_CONF_DIR=/usr/local/hadoop-2.6.5/etc/hadoop ``` +- 因为要交给 YARN 作业,所以到这里就好了。 + ## 资料 From 0c8cae6d1366524c9dd5be4de391e499c832f014 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9D=B3=E9=98=B3?= <260893248@qq.com> Date: Tue, 25 Dec 2018 14:47:28 +0800 Subject: [PATCH 227/330] =?UTF-8?q?=E7=96=91=E4=BC=BC=E5=BA=94=E8=AF=A5?= =?UTF-8?q?=E6=98=AF=20'.'=20(=E8=A1=A8=E7=A4=BA=E9=9A=90=E8=97=8F?= =?UTF-8?q?=E6=96=87=E4=BB=B6)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 疑似应该是 '.' (表示隐藏文件) --- markdown-file/Bash.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/markdown-file/Bash.md b/markdown-file/Bash.md index 396526d4..58dfcd55 100644 --- a/markdown-file/Bash.md +++ b/markdown-file/Bash.md @@ -48,7 +48,7 @@ - `last`,显示最近登录的帐户及时间 - `lastlog`,显示系统所有用户各自在最近登录的记录,如果没有登录过的用户会显示 **从未登陆过** - `ls`,列出当前目录下的所有没有隐藏的文件 / 文件夹。 - - `ls -a`,列出包括以.号开头的隐藏文件 / 文件夹(也就是所有文件) + - `ls -a`,列出包括以.号开头的隐藏文件 / 文件夹(也就是所有文件) - `ls -R`,显示出目录下以及其所有子目录的文件 / 文件夹(递归地方式,不显示隐藏的文件) - `ls -a -R`,显示出目录下以及其所有子目录的文件 / 文件夹(递归地方式,显示隐藏的文件) - `ls -al`,列出目录下所有文件(包含隐藏)的权限、所有者、文件大小、修改时间及名称(也就是显示详细信息) From 97e93c69862835abdf39b6f877f54283f30e3141 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9D=B3=E9=98=B3?= <260893248@qq.com> Date: Tue, 25 Dec 2018 18:56:37 +0800 Subject: [PATCH 228/330] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E9=94=99=E5=88=AB?= =?UTF-8?q?=E5=AD=97=E4=B9=8B=E7=B1=BB?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 修改错别字之类 --- markdown-file/Vim-Install-And-Settings.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/markdown-file/Vim-Install-And-Settings.md b/markdown-file/Vim-Install-And-Settings.md index a2a3049d..77560192 100644 --- a/markdown-file/Vim-Install-And-Settings.md +++ b/markdown-file/Vim-Install-And-Settings.md @@ -30,7 +30,7 @@ - `l`,右 - `v`,按 v 之后按方向键可以选中你要选中的文字 - `gg`,跳到第 1 行 - - `G`,跳到第最后行 + - `G`,跳到最后一行 - `16G` 或 `:16`,跳到第 16 行 - `$`,到本行 **行尾** - `0`,到本行 **行头** @@ -94,7 +94,7 @@ - `:s/YouMeek/Judasn/`,把光标当前行第一个 YouMeek 替换为 Judasn - `:s/YouMeek/Judasn/g`,把光标当前行所有 YouMeek 替换为 Judasn - `:s#YouMeek/#Judasn/#`,除了使用斜杠作为分隔符之外,还可以使用 # 作为分隔符,此时中间出现的 / 不会作为分隔符,该命令表示:把光标当前行第一个 YouMeek/ 替换为 Judasn/ - - `:10,31s/YouMeek/Judasng`,把第 10 行到 31 行之间所有 YouMeek 替换为 Judasn + - `:10,31s/YouMeek/Judasn/g`,把第 10 行到 31 行之间所有 YouMeek 替换为 Judasn ## Vim 的特殊复制、黏贴 @@ -114,7 +114,7 @@ - 效果如下: - ![vim-for-server](https://raw.githubusercontent.com/wklken/gallery/master/vim/vim-for-server.png) - 需要特别注意的是,如果你平时粘贴内容到终端 Vim 出现缩进错乱,一般需要这样做: - - 进入 vim 后,按 `F5`,然后 `shirt + insert` 进行粘贴。这种事就不会错乱了。 + - 进入 vim 后,按 `F5`,然后 `shift + insert` 进行粘贴。这种事就不会错乱了。 - 原因是:`vim ~/.vimrc` 中有一行这样的设置:`set pastetoggle=` ## 资料 From 7e182686f3dc7e103831e023ef38db9c5367ccb8 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 26 Dec 2018 14:38:22 +0800 Subject: [PATCH 229/330] :construction: Kafka --- markdown-file/Kafka-Install-And-Settings.md | 1 + 1 file changed, 1 insertion(+) diff --git a/markdown-file/Kafka-Install-And-Settings.md b/markdown-file/Kafka-Install-And-Settings.md index ecb40028..61993859 100644 --- a/markdown-file/Kafka-Install-And-Settings.md +++ b/markdown-file/Kafka-Install-And-Settings.md @@ -42,6 +42,7 @@ - Partition:是物理上的概念,每个 Topic 包含一个或多个 Partition。一般有几个 Broker,填写分区最好是等于大于节点值。分区目的主要是数据分片,解决水平扩展、高吞吐量。当 Producer 生产消息的时候,消息会被算法计算后分配到对应的分区,Consumer 读取的时候算法也会帮我们找到消息所在分区,这是内部实现的,应用层面不用管。 - Replication-factor:副本。假设有 3 个 Broker 的情况下,当副本为 3 的时候每个 Partition 会在每个 Broker 都会存有一份,目的主要是容错。 - 其中有一个 Leader。 + - 如果你只有一个 Broker,但是创建 Topic 的时候指定 Replication-factor 为 3,则会报错 - Consumer Group:每个 Consumer 属于一个特定的 Consumer Group(可为每个 Consumer 指定 group name,若不指定 group name 则属于默认的 group)一般一个业务系统集群指定同一个一个 group id,然后一个业务系统集群只能一个节点来消费同一个消息。 - Consumer Group 信息存储在 zookeeper 中,需要通过 zookeeper 的客户端来查看和设置 - 如果某 Consumer Group 中 consumer 数量少于 partition 数量,则至少有一个 consumer 会消费多个 partition 的数据 From 67f74ec519d684daa37963d339d893316f5c33cc Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 26 Dec 2018 16:35:29 +0800 Subject: [PATCH 230/330] :construction: Kafka --- .../Wormhole-Install-And-Settings.md | 303 ++++++++++++++++++ 1 file changed, 303 insertions(+) create mode 100644 markdown-file/Wormhole-Install-And-Settings.md diff --git a/markdown-file/Wormhole-Install-And-Settings.md b/markdown-file/Wormhole-Install-And-Settings.md new file mode 100644 index 00000000..47ca8a28 --- /dev/null +++ b/markdown-file/Wormhole-Install-And-Settings.md @@ -0,0 +1,303 @@ +# Wormhole Flink 最佳实践 + +## 前置声明 + +- 需要对流计算的一些基础概念有基础了解,比如:Source、Sink、YARN、Kafka 等 + + +## 基础环境 + +- 参考官网: +- 三台 4C8G 服务器 CentOS 7.4 + - hostname:`linux-05` + - hostname:`linux-06` + - hostname:`linux-07` +- 必须(版本请不要随便用,而是按照如下说明来): + - 一般情况下,我组件都是放在:`/usr/local` + - JDK 1.8(三台) + - Hadoop 集群(HDFS,YARN)(三台):2.6.5 + - Spark 单点(linux-05):2.2.0 + - Flink 单点(linux-05):1.5.1 + - Zookeeper(linux-05):3.4.13 + - Kafka(linux-05):0.10.2.2 + - MySQL(linux-05):5.7 + - 以上组件安装教程可以查看该教程:[点击我](https://github.com/judasn/Linux-Tutorial) +- 非必须: + - Elasticsearch(支持版本 5.x)(非必须,若无则无法查看 wormhole 处理数据的吞吐和延时) + - Grafana (支持版本 4.x)(非必须,若无则无法查看 wormhole 处理数据的吞吐和延时的图形化展示) + +------------------------------------------------------------------- + +## Wormhole 安装 + 配置 + +- 参考官网: +- 最终环境 application.conf 配置文件参考 + +``` + +akka.http.server.request-timeout = 120s + +wormholeServer { + cluster.id = "" #optional global uuid + host = "linux-05" + port = 8989 + ui.default.language = "Chinese" + token.timeout = 1 + token.secret.key = "iytr174395lclkb?lgj~8u;[=L:ljg" + admin.username = "admin" #default admin user name + admin.password = "admin" #default admin user password +} + +mysql = { + driver = "slick.driver.MySQLDriver$" + db = { + driver = "com.mysql.jdbc.Driver" + user = "root" + password = "123456" + url = "jdbc:mysql://localhost:3306/wormhole?useUnicode=true&characterEncoding=UTF-8&useSSL=false" + numThreads = 4 + minConnections = 4 + maxConnections = 10 + connectionTimeout = 3000 + } +} + +ldap = { + enabled = false + user = "" + pwd = "" + url = "" + dc = "" + read.timeout = 3000 + read.timeout = 5000 + connect = { + timeout = 5000 + pool = true + } +} + +spark = { + wormholeServer.user = "root" #WormholeServer linux user + wormholeServer.ssh.port = 22 #ssh port, please set WormholeServer linux user can password-less login itself remote + spark.home = "/usr/local/spark" + yarn.queue.name = "default" #WormholeServer submit spark streaming/job queue + wormhole.hdfs.root.path = "hdfs://linux-05/wormhole" #WormholeServer hdfslog data default hdfs root path + yarn.rm1.http.url = "linux-05:8088" #Yarn ActiveResourceManager address + yarn.rm2.http.url = "linux-05:8088" #Yarn StandbyResourceManager address +} + +flink = { + home = "/usr/local/flink" + yarn.queue.name = "default" + feedback.state.count=100 + checkpoint.enable=false + checkpoint.interval=60000 + stateBackend="hdfs://linux-05/flink-checkpoints" + feedback.interval=30 +} + +zookeeper = { + connection.url = "localhost:2181" #WormholeServer stream and flow interaction channel + wormhole.root.path = "/wormhole" #zookeeper +} + +kafka = { + #brokers.url = "localhost:6667" #WormholeServer feedback data store + brokers.url = "linux-05:9092" + zookeeper.url = "localhost:2181" + #topic.refactor = 3 + topic.refactor = 1 + using.cluster.suffix = false #if true, _${cluster.id} will be concatenated to consumer.feedback.topic + consumer = { + feedback.topic = "wormhole_feedback" + poll-interval = 20ms + poll-timeout = 1s + stop-timeout = 30s + close-timeout = 20s + commit-timeout = 70s + wakeup-timeout = 60s + max-wakeups = 10 + session.timeout.ms = 60000 + heartbeat.interval.ms = 50000 + max.poll.records = 1000 + request.timeout.ms = 80000 + max.partition.fetch.bytes = 10485760 + } +} + +#kerberos = { +# keyTab="" #the keyTab will be used on yarn +# spark.principal="" #the principal of spark +# spark.keyTab="" #the keyTab of spark +# server.config="" #the path of krb5.conf +# jaas.startShell.config="" #the path of jaas config file which should be used by start.sh +# jaas.yarn.config="" #the path of jaas config file which will be uploaded to yarn +# server.enabled=false #enable wormhole connect to Kerberized cluster +#} + +# choose monitor method among ES、MYSQL +monitor ={ + database.type="ES" +} + +#Wormhole feedback data store, if doesn't want to config, you will not see wormhole processing delay and throughput +#if not set, please comment it + +#elasticSearch.http = { +# url = "http://localhost:9200" +# user = "" +# password = "" +#} + +#display wormhole processing delay and throughput data, get admin user token from grafana +#garfana should set to be anonymous login, so you can access the dashboard through wormhole directly +#if not set, please comment it + +#grafana = { +# url = "http://localhost:3000" +# admin.token = "jihefouglokoj" +#} + +#delete feedback history data on time +maintenance = { + mysql.feedback.remain.maxDays = 7 + elasticSearch.feedback.remain.maxDays = 7 +} + + +#Dbus integration, support serveral DBus services, if not set, please comment it + +#dbus = { +# api = [ +# { +# login = { +# url = "http://localhost:8080/keeper/login" +# email = "" +# password = "" +# } +# synchronization.namespace.url = "http://localhost:8080/keeper/tables/riderSearch" +# } +# ] +#} +``` + +- 初始化数据库: + - 创建表:`create database wormhole character set utf8;` +- 初始化表结构脚本路径: + - 该脚本存在一个问题:初始化脚本和补丁脚本混在一起,所以直接复制执行会有报错,但是报错的部分是不影响 + - 我是直接把基础 sql 和补丁 sql 分开执行,方便判断。 +- 部署完成,浏览器访问: + +------------------------------------------------------------------- + +## 创建用户 + +- **参考官网,必须先了解下**: +- 必须创建用户,后面才能进入 Project 里面创建 Stream / Flow +- 创建的用户类型必须是:`user` + + +------------------------------------------------------------------- + +## 创建 Source 需要涉及的概念 + +#### 创建 Instance + +- Instance 用于绑定各个组件的所在服务连接 +- 一般我们都会选择 Kafka 作为 source,后面的基础也是基于 Kafka 作为 Source 的场景 +- 假设填写实例名:`source_kafka` + +#### 创建 Database + +- 各个组件的具体数据库、Topic 等信息 +- 假设填写 topic:`source` + + +#### 创建 Namespace + +- wormhole 抽象出来的概念 +- 用于数据分类 +- 假设填写 Tables:`ums_extension id` +- 配置 schema,记得配置上 ums_ts + +``` +{ + "id": 1, + "name": "test", + "phone": "18074546423", + "city": "Beijing", + "time": "2017-12-22 10:00:00" +} +``` + + +------------------------------------------------------------------- + +## 创建 Sink 需要涉及的概念 + +#### 创建 Instance + +- 假设填写实例名:`sink_mysql` + +#### 创建 Database + +- 假设填写 Database Name:`sink` +- config 参数:`useUnicode=true&characterEncoding=UTF-8&useSSL=false&rewriteBatchedStatements=true` + +#### 创建 Namespace + +- 假设填写 Tables: `user id` + + +------------------------------------------------------------------- + +## 创建 Project + +- 项目标识:`demo` + +------------------------------------------------------------------- + + +## Flink Stream + +- Stream 是在 Project 内容页下才能创建 +- 一个 Stream 可以有多个 Flow +- 并且是 Project 下面的用户才能创建,admin 用户没有权限 +- 要删除 Project 必须先进入 Project 内容页删除所有 Stream 之后 admin 才能删除 Project +- 新建 Stream + - Stream type 类型选择:`Flink` + - 假设填写 Name:`wormhole_stream_test` + +## Flink Flow(流式作业) + +- Flow 是在 Project 内容页下才能创建 +- 并且是 Project 下面的用户才能创建,admin 用户没有权限 +- Flow 会关联 source 和 sink +- 要删除 Project 必须先进入 Project 内容页删除所有 Stream 之后 admin 才能删除 Project +- 基于 Stream 新建 Flow + - Pipeline + - Transformation + - + - NO_SKIP 滑动窗口 + - SKIP_PAST_LAST_EVENT 滚动窗口 + - KeyBy 分组字段 + - Output + - Agg:将匹配的多条数据做聚合,生成一条数据输出,例:field1:avg,field2:max(目前支持 max/min/avg/sum) + - Detail:将匹配的多条数据逐一输出 + - FilteredRow:按条件选择指定的一条数据输出,例:head/last/ field1:min/max + - Confirmation +- 注意:Stream 处于 running 状态时,才可以启动 Flow + + +------------------------------------------------------------------- + +## Kafka 发送测试数据 + +- `cd /usr/local/kafka/bin` +- `./kafka-console-producer.sh --broker-list linux-05:9092 --topic source --property "parse.key=true" --property "key.separator=@@@"` +- 发送 UMS 流消息协议规范格式: + +``` +data_increment_data.kafka.source_kafka.source.ums_extension.*.*.*@@@{"id": 1, "name": "test", "phone":"18074546423", "city": "Beijing", "time": "2017-12-22 10:00:00"} +``` + From a1e73315d70e6632dafb62b81cda364cea600fec Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 26 Dec 2018 16:37:23 +0800 Subject: [PATCH 231/330] :construction: Kafka --- markdown-file/Wormhole-Install-And-Settings.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/markdown-file/Wormhole-Install-And-Settings.md b/markdown-file/Wormhole-Install-And-Settings.md index 47ca8a28..d3bdad27 100644 --- a/markdown-file/Wormhole-Install-And-Settings.md +++ b/markdown-file/Wormhole-Install-And-Settings.md @@ -14,13 +14,13 @@ - hostname:`linux-07` - 必须(版本请不要随便用,而是按照如下说明来): - 一般情况下,我组件都是放在:`/usr/local` - - JDK 1.8(三台) - - Hadoop 集群(HDFS,YARN)(三台):2.6.5 - - Spark 单点(linux-05):2.2.0 - - Flink 单点(linux-05):1.5.1 - - Zookeeper(linux-05):3.4.13 - - Kafka(linux-05):0.10.2.2 - - MySQL(linux-05):5.7 + - JDK(三台):`1.8.0_181` + - Hadoop 集群(HDFS,YARN)(三台):`2.6.5` + - Spark 单点(linux-05):`2.2.0` + - Flink 单点(linux-05):`1.5.1` + - Zookeeper 单点(linux-05):`3.4.13` + - Kafka 单点(linux-05):`0.10.2.2` + - MySQL 单点(linux-05):`5.7` - 以上组件安装教程可以查看该教程:[点击我](https://github.com/judasn/Linux-Tutorial) - 非必须: - Elasticsearch(支持版本 5.x)(非必须,若无则无法查看 wormhole 处理数据的吞吐和延时) From 8388a7121a61b1f29fc6feb034056c0ae73fb410 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 26 Dec 2018 16:41:47 +0800 Subject: [PATCH 232/330] :construction: Kafka --- markdown-file/Wormhole-Install-And-Settings.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/markdown-file/Wormhole-Install-And-Settings.md b/markdown-file/Wormhole-Install-And-Settings.md index d3bdad27..3c44fcee 100644 --- a/markdown-file/Wormhole-Install-And-Settings.md +++ b/markdown-file/Wormhole-Install-And-Settings.md @@ -4,6 +4,24 @@ - 需要对流计算的一些基础概念有基础了解,比如:Source、Sink、YARN、Kafka 等 +------------------------------------------------------------------- + +## 本文目标 + +- 统计 **滑动窗口** 下的流过的数据量(count) +- 数据格式: + +``` +{ + "id": 1, + "name": "test", + "phone": "18074546423", + "city": "Beijing", + "time": "2017-12-22 10:00:00" +} +``` + +------------------------------------------------------------------- ## 基础环境 From c70186bcbb0f21d6f8897ea8256434028d00bc9a Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 26 Dec 2018 16:42:51 +0800 Subject: [PATCH 233/330] :construction: Kafka --- markdown-file/Wormhole-Install-And-Settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/markdown-file/Wormhole-Install-And-Settings.md b/markdown-file/Wormhole-Install-And-Settings.md index 3c44fcee..fba90057 100644 --- a/markdown-file/Wormhole-Install-And-Settings.md +++ b/markdown-file/Wormhole-Install-And-Settings.md @@ -9,7 +9,7 @@ ## 本文目标 - 统计 **滑动窗口** 下的流过的数据量(count) -- 数据格式: +- 业务数据格式: ``` { From 501691cf4a196abc91cb36dad4149ae6235c6ed5 Mon Sep 17 00:00:00 2001 From: judasn Date: Wed, 26 Dec 2018 17:23:42 +0800 Subject: [PATCH 234/330] :construction: Kafka --- markdown-file/Wormhole-Install-And-Settings.md | 1 + 1 file changed, 1 insertion(+) diff --git a/markdown-file/Wormhole-Install-And-Settings.md b/markdown-file/Wormhole-Install-And-Settings.md index fba90057..96e96161 100644 --- a/markdown-file/Wormhole-Install-And-Settings.md +++ b/markdown-file/Wormhole-Install-And-Settings.md @@ -39,6 +39,7 @@ - Zookeeper 单点(linux-05):`3.4.13` - Kafka 单点(linux-05):`0.10.2.2` - MySQL 单点(linux-05):`5.7` + - wormhole 单点(linux-05):`0.6.0-beta`,2018-12-06 版本 - 以上组件安装教程可以查看该教程:[点击我](https://github.com/judasn/Linux-Tutorial) - 非必须: - Elasticsearch(支持版本 5.x)(非必须,若无则无法查看 wormhole 处理数据的吞吐和延时) From 046d5cd089d2edaba4301515764423b67579ec6f Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 27 Dec 2018 10:52:42 +0800 Subject: [PATCH 235/330] :construction: Hacked --- markdown-file/Was-Hacked.md | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/markdown-file/Was-Hacked.md b/markdown-file/Was-Hacked.md index 3adb0e70..a803a05a 100644 --- a/markdown-file/Was-Hacked.md +++ b/markdown-file/Was-Hacked.md @@ -31,8 +31,10 @@ - 查看开放的端口,比如常用的80,22,8009,后面的箭头表示端口对应占用的程序:`netstat -lnp` - 检查某个端口的具体信息:`lsof -i :18954` - 检查启动项:`chkconfig` -- 检查定时器:`cat /etc/crontab` -- 检查定时器:`crontab -l` +- 检查定时器(重要):`cat /etc/crontab` +- 检查定时器(重要):`crontab -l` + - `vim /var/spool/cron/crontabs/root` + - `vim /var/spool/cron/root` - 检查其他系统重要文件: - `cat /etc/rc.local` - `cd /etc/init.d;ll` @@ -89,6 +91,25 @@ TOTAL:(总的流量) 12.9GB 229Mb 190Mb 193Mb - yum update openssh-server +## 实战 + +#### 挖矿程序 + +- 先查看调度任务是否有新增内容 + - `vim /var/spool/cron/root` + - `vim /var/spool/cron/crontabs/root` +- 如果有,先停止定时任务:`systemctl stop crond` +- 如果对方有去 wget curl 指定网站,则先在 hosts 里面映射为 127.0.0.1,比如:`127.0.0.1 prax0zma.ru` + - 查看当前最占用 CPU 的进程 PID,加入发现是 22935,则:`cd /proc/22935 && ll`,发现程序目录是:`/root/.tmp00/bash64` + - 我们就把该程序去掉执行任务的权限:`chmod -R -x /root/.tmp00/`,然后再 kill 掉该程序 +- 打开别人的脚本,看下是如何书写的,发现有写入几个目录,这里进行删除: + +``` +rm -rf /tmp/.ha /boot/.b /boot/.0 /root/.tmp00 +``` + +- 最后检查下是否有免密内容被修改:`cd ~/.ssh/ && cat authorized_keys` + ## 资料 - From 7f5687ecfa8c57ce380ae548c8e4083841b8440b Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 27 Dec 2018 16:00:44 +0800 Subject: [PATCH 236/330] :construction: Flink --- markdown-file/Flink-Install-And-Settings.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/markdown-file/Flink-Install-And-Settings.md b/markdown-file/Flink-Install-And-Settings.md index a50c9a27..a0f93ccd 100644 --- a/markdown-file/Flink-Install-And-Settings.md +++ b/markdown-file/Flink-Install-And-Settings.md @@ -25,6 +25,14 @@ - 查看日志:`tail -300f log/flink-*-standalonesession-*.log` - 浏览器访问 WEB 管理:`http://192.168.0.105:8081` +## yarn 启动 + +- 安装方式跟上面一样,但是必须保证有 hadoop、yarn 集群 +- 控制台启动:`cd /usr/local/flink && ./bin/yarn-session.sh -n 2 -jm 1024 -tm 1024` +- 守护进程启动:`cd /usr/local/flink && ./bin/yarn-session.sh -n 2 -jm 1024 -tm 1024 -d` +- 有可能会报:`The Flink Yarn cluster has failed`,可能是资源不够 + + ## Demo - 运行程序解压包下也有一些 jar demo:`cd /usr/local/flink/examples` From 6b01f97c3160120c488ba837e1a12c2c000eb4c7 Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 27 Dec 2018 17:12:33 +0800 Subject: [PATCH 237/330] :construction: Flink --- markdown-file/Ansible-Install-And-Settings.md | 26 ++++++ markdown-file/Hadoop-Install-And-Settings.md | 54 +++++------ .../Wormhole-Install-And-Settings.md | 92 ++++++++++--------- 3 files changed, 104 insertions(+), 68 deletions(-) diff --git a/markdown-file/Ansible-Install-And-Settings.md b/markdown-file/Ansible-Install-And-Settings.md index 91011077..6c22a870 100644 --- a/markdown-file/Ansible-Install-And-Settings.md +++ b/markdown-file/Ansible-Install-And-Settings.md @@ -206,6 +206,32 @@ PLAY RECAP ********************************************************************* - 执行命令:`ansible-playbook /opt/jdk8-playbook.yml` +#### 修改 hosts + + +- 创建脚本文件:`vim /opt/hosts-playbook.yml` + +``` +- hosts: all + remote_user: root + tasks: + - name: update hosts + blockinfile: + path: /etc/hosts + block: | + 192.168.0.223 linux01 + 192.168.0.223 linux02 + 192.168.0.223 linux03 + 192.168.0.223 linux04 + 192.168.0.223 linux05 +``` + + +- 执行命令:`ansible-playbook /opt/hosts-playbook.yml` + +------------------------------------------------------------------- + + ## 资料 diff --git a/markdown-file/Hadoop-Install-And-Settings.md b/markdown-file/Hadoop-Install-And-Settings.md index 142cc70f..f75adc34 100644 --- a/markdown-file/Hadoop-Install-And-Settings.md +++ b/markdown-file/Hadoop-Install-And-Settings.md @@ -28,9 +28,9 @@ - 分别给三台机子设置 hostname ``` -hostnamectl --static set-hostname hadoop-master -hostnamectl --static set-hostname hadoop-node1 -hostnamectl --static set-hostname hadoop-node2 +hostnamectl --static set-hostname linux01 +hostnamectl --static set-hostname linux02 +hostnamectl --static set-hostname linux03 ``` @@ -39,13 +39,13 @@ hostnamectl --static set-hostname hadoop-node2 ``` 就按这个来,其他多余的别加,不然可能也会有影响 vim /etc/hosts -172.16.0.17 hadoop-master -172.16.0.43 hadoop-node1 -172.16.0.180 hadoop-node2 +172.16.0.17 linux01 +172.16.0.43 linux02 +172.16.0.180 linux03 ``` -- 对 hadoop-master 设置免密: +- 对 linux01 设置免密: ``` 生产密钥对 @@ -64,13 +64,13 @@ ssh localhost - 如果你是采用 pem 登录的,可以看这个:[SSH 免密登录](SSH-login-without-password.md) ``` -ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@172.16.0.43,根据提示输入 hadoop-node1 机器的 root 密码,成功会有相应提示 -ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@172.16.0.180,根据提示输入 hadoop-node2 机器的 root 密码,成功会有相应提示 +ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@172.16.0.43,根据提示输入 linux02 机器的 root 密码,成功会有相应提示 +ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@172.16.0.180,根据提示输入 linux03 机器的 root 密码,成功会有相应提示 -在 hadoop-master 上测试: -ssh hadoop-node1 -ssh hadoop-node2 +在 linux01 上测试: +ssh linux02 +ssh linux03 ``` @@ -88,7 +88,7 @@ mkdir -p /data/hadoop/hdfs/name /data/hadoop/hdfs/data /data/hadoop/hdfs/tmp ``` - 下载 Hadoop: -- 现在 hadoop-master 机子上安装 +- 现在 linux01 机子上安装 ``` cd /usr/local && wget http://apache.claz.org/hadoop/common/hadoop-2.6.5/hadoop-2.6.5.tar.gz @@ -108,7 +108,7 @@ source /etc/profile ``` -## 修改 hadoop-master 配置 +## 修改 linux01 配置 ``` @@ -145,12 +145,12 @@ vim $HADOOP_HOME/etc/hadoop/core-site.xml,改为: fs.defaultFS - hdfs://hadoop-master:9000 + hdfs://linux01:9000 hadoop.proxyuser.root.hosts @@ -225,7 +225,7 @@ vim $HADOOP_HOME/etc/hadoop/yarn-site.xml yarn.resourcemanager.hostname - hadoop-master + linux01 @@ -244,21 +244,21 @@ vim $HADOOP_HOME/etc/hadoop/yarn-site.xml vim $HADOOP_HOME/etc/hadoop/slaves 把默认的配置里面的 localhost 删除,换成: -hadoop-node1 -hadoop-node2 +linux02 +linux03 ``` ``` -scp -r /usr/local/hadoop-2.6.5 root@hadoop-node1:/usr/local/ +scp -r /usr/local/hadoop-2.6.5 root@linux02:/usr/local/ -scp -r /usr/local/hadoop-2.6.5 root@hadoop-node2:/usr/local/ +scp -r /usr/local/hadoop-2.6.5 root@linux03:/usr/local/ ``` -## hadoop-master 机子运行 +## linux01 机子运行 ``` 格式化 HDFS @@ -269,7 +269,7 @@ hdfs namenode -format - 输出结果: ``` -[root@hadoop-master hadoop-2.6.5]# hdfs namenode -format +[root@linux01 hadoop-2.6.5]# hdfs namenode -format 18/12/17 17:47:17 INFO namenode.NameNode: STARTUP_MSG: /************************************************************ STARTUP_MSG: Starting NameNode @@ -424,10 +424,10 @@ tcp6 0 0 :::37481 :::* LISTEN ## 管理界面 -- 查看 HDFS NameNode 管理界面: -- 访问 YARN ResourceManager 管理界面: -- 访问 NodeManager-1 管理界面: -- 访问 NodeManager-2 管理界面: +- 查看 HDFS NameNode 管理界面: +- 访问 YARN ResourceManager 管理界面: +- 访问 NodeManager-1 管理界面: +- 访问 NodeManager-2 管理界面: ------------------------------------------------------------------- diff --git a/markdown-file/Wormhole-Install-And-Settings.md b/markdown-file/Wormhole-Install-And-Settings.md index 96e96161..2a55aa81 100644 --- a/markdown-file/Wormhole-Install-And-Settings.md +++ b/markdown-file/Wormhole-Install-And-Settings.md @@ -26,21 +26,32 @@ ## 基础环境 - 参考官网: -- 三台 4C8G 服务器 CentOS 7.4 - - hostname:`linux-05` - - hostname:`linux-06` - - hostname:`linux-07` +- 4 台 8C32G 服务器 CentOS 7.5 + - **为了方便测试,服务器都已经关闭防火墙,并且对外开通所有端口** + - **都做了免密登录** + - hostname:`linux01` + - hostname:`linux02` + - hostname:`linux03` + - hostname:`linux04` + - hostname:`linux05` + - Ansible 批量添加 hosts 请看:[点击我](Ansible-Install-And-Settings.md) - 必须(版本请不要随便用,而是按照如下说明来): - 一般情况下,我组件都是放在:`/usr/local` - - JDK(三台):`1.8.0_181` - - Hadoop 集群(HDFS,YARN)(三台):`2.6.5` - - Spark 单点(linux-05):`2.2.0` - - Flink 单点(linux-05):`1.5.1` - - Zookeeper 单点(linux-05):`3.4.13` - - Kafka 单点(linux-05):`0.10.2.2` - - MySQL 单点(linux-05):`5.7` - - wormhole 单点(linux-05):`0.6.0-beta`,2018-12-06 版本 - - 以上组件安装教程可以查看该教程:[点击我](https://github.com/judasn/Linux-Tutorial) + - JDK(所有服务器):`1.8.0_181` + - 批量添加 JDK 请看:[点击我](Ansible-Install-And-Settings.md) + - Hadoop 集群(HDFS,YARN)(linux01、linux02、linux03):`2.6.5` + - 安装请看:[点击我](Hadoop-Install-And-Settings.md) + - Zookeeper 单点(linux04):`3.4.13` + - 安装请看:[点击我](Zookeeper-Install.md) + - Kafka 单点(linux04):`0.10.2.2` + - 安装请看:[点击我](Kafka-Install-And-Settings.md) + - MySQL 单点(linux04):`5.7` + - 安装请看:[点击我](Mysql-Install-And-Settings.md) + - Spark 单点(linux05):`2.2.0` + - 安装请看:[点击我](Spark-Install-And-Settings.md) + - Flink 单点(linux05):`1.5.1` + - 安装请看:[点击我](Flink-Install-And-Settings.md) + - wormhole 单点(linux05):`0.6.0-beta`,2018-12-06 版本 - 非必须: - Elasticsearch(支持版本 5.x)(非必须,若无则无法查看 wormhole 处理数据的吞吐和延时) - Grafana (支持版本 4.x)(非必须,若无则无法查看 wormhole 处理数据的吞吐和延时的图形化展示) @@ -50,7 +61,8 @@ ## Wormhole 安装 + 配置 - 参考官网: -- 最终环境 application.conf 配置文件参考 +- 解压:`cd /usr/local && tar -xvf wormhole-0.6.0-beta.tar.gz` +- 修改配置文件:`vim /usr/local/wormhole-0.6.0-beta/conf/application.conf` ``` @@ -58,7 +70,7 @@ akka.http.server.request-timeout = 120s wormholeServer { cluster.id = "" #optional global uuid - host = "linux-05" + host = "linux05" port = 8989 ui.default.language = "Chinese" token.timeout = 1 @@ -73,7 +85,7 @@ mysql = { driver = "com.mysql.jdbc.Driver" user = "root" password = "123456" - url = "jdbc:mysql://localhost:3306/wormhole?useUnicode=true&characterEncoding=UTF-8&useSSL=false" + url = "jdbc:mysql://linux04:3306/wormhole?useUnicode=true&characterEncoding=UTF-8&useSSL=false" numThreads = 4 minConnections = 4 maxConnections = 10 @@ -81,28 +93,28 @@ mysql = { } } -ldap = { - enabled = false - user = "" - pwd = "" - url = "" - dc = "" - read.timeout = 3000 - read.timeout = 5000 - connect = { - timeout = 5000 - pool = true - } -} +#ldap = { +# enabled = false +# user = "" +# pwd = "" +# url = "" +# dc = "" +# read.timeout = 3000 +# read.timeout = 5000 +# connect = { +# timeout = 5000 +# pool = true +# } +#} spark = { wormholeServer.user = "root" #WormholeServer linux user wormholeServer.ssh.port = 22 #ssh port, please set WormholeServer linux user can password-less login itself remote spark.home = "/usr/local/spark" yarn.queue.name = "default" #WormholeServer submit spark streaming/job queue - wormhole.hdfs.root.path = "hdfs://linux-05/wormhole" #WormholeServer hdfslog data default hdfs root path - yarn.rm1.http.url = "linux-05:8088" #Yarn ActiveResourceManager address - yarn.rm2.http.url = "linux-05:8088" #Yarn StandbyResourceManager address + wormhole.hdfs.root.path = "hdfs://linux01/wormhole" #WormholeServer hdfslog data default hdfs root path + yarn.rm1.http.url = "linux01:8088" #Yarn ActiveResourceManager address + yarn.rm2.http.url = "linux01:8088" #Yarn StandbyResourceManager address } flink = { @@ -111,20 +123,18 @@ flink = { feedback.state.count=100 checkpoint.enable=false checkpoint.interval=60000 - stateBackend="hdfs://linux-05/flink-checkpoints" + stateBackend="hdfs://linux01/flink-checkpoints" feedback.interval=30 } zookeeper = { - connection.url = "localhost:2181" #WormholeServer stream and flow interaction channel + connection.url = "linux04:2181" #WormholeServer stream and flow interaction channel wormhole.root.path = "/wormhole" #zookeeper } kafka = { - #brokers.url = "localhost:6667" #WormholeServer feedback data store - brokers.url = "linux-05:9092" - zookeeper.url = "localhost:2181" - #topic.refactor = 3 + brokers.url = "linux04:9092" + zookeeper.url = "linux04:2181" topic.refactor = 1 using.cluster.suffix = false #if true, _${cluster.id} will be concatenated to consumer.feedback.topic consumer = { @@ -156,7 +166,7 @@ kafka = { # choose monitor method among ES、MYSQL monitor ={ - database.type="ES" + database.type="MYSQL" } #Wormhole feedback data store, if doesn't want to config, you will not see wormhole processing delay and throughput @@ -205,7 +215,7 @@ maintenance = { - 初始化表结构脚本路径: - 该脚本存在一个问题:初始化脚本和补丁脚本混在一起,所以直接复制执行会有报错,但是报错的部分是不影响 - 我是直接把基础 sql 和补丁 sql 分开执行,方便判断。 -- 部署完成,浏览器访问: +- 部署完成,浏览器访问: ------------------------------------------------------------------- @@ -313,7 +323,7 @@ maintenance = { ## Kafka 发送测试数据 - `cd /usr/local/kafka/bin` -- `./kafka-console-producer.sh --broker-list linux-05:9092 --topic source --property "parse.key=true" --property "key.separator=@@@"` +- `./kafka-console-producer.sh --broker-list linux01:9092 --topic source --property "parse.key=true" --property "key.separator=@@@"` - 发送 UMS 流消息协议规范格式: ``` From ec40f6d8fad47c06b03ab18baa45769fde9b0fc5 Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 27 Dec 2018 18:06:25 +0800 Subject: [PATCH 238/330] :construction: Ansible --- markdown-file/Ansible-Install-And-Settings.md | 107 ++++++++++++++++-- .../Wormhole-Install-And-Settings.md | 1 + 2 files changed, 96 insertions(+), 12 deletions(-) diff --git a/markdown-file/Ansible-Install-And-Settings.md b/markdown-file/Ansible-Install-And-Settings.md index 6c22a870..6188d29c 100644 --- a/markdown-file/Ansible-Install-And-Settings.md +++ b/markdown-file/Ansible-Install-And-Settings.md @@ -168,6 +168,55 @@ PLAY RECAP ********************************************************************* ## 更多 playbook 实战 + +#### 禁用防火墙(CentOS 7.x) + + +- 创建脚本文件:`vim /opt/disable-firewalld-playbook.yml` + +``` +- hosts: all + remote_user: root + tasks: + - name: Disable SELinux at next reboot + selinux: + state: disabled + - name: disable firewalld + command: "{{ item }}" + with_items: + - systemctl stop firewalld + - systemctl disable firewalld + - setenforce 0 +``` + + +- 执行命令:`ansible-playbook /opt/disable-firewalld-playbook.yml` + +#### 修改 hosts + + +- 创建脚本文件:`vim /opt/hosts-playbook.yml` + +``` +- hosts: all + remote_user: root + tasks: + - name: update hosts + blockinfile: + path: /etc/hosts + block: | + 192.168.0.223 linux01 + 192.168.0.223 linux02 + 192.168.0.223 linux03 + 192.168.0.223 linux04 + 192.168.0.223 linux05 +``` + + +- 执行命令:`ansible-playbook /opt/hosts-playbook.yml` + + + #### 部署 JDK - 创建脚本文件:`vim /opt/jdk8-playbook.yml` @@ -184,7 +233,7 @@ PLAY RECAP ********************************************************************* - name: tar jdk shell: chdir={{ java_install_folder }} tar zxf jdk-8u181-linux-x64.tar.gz - - name: Set JAVA_HOME + - name: set JAVA_HOME blockinfile: path: /etc/profile marker: "#{mark} JDK ENV" @@ -206,28 +255,62 @@ PLAY RECAP ********************************************************************* - 执行命令:`ansible-playbook /opt/jdk8-playbook.yml` -#### 修改 hosts +#### 部署 Hadoop 集群 -- 创建脚本文件:`vim /opt/hosts-playbook.yml` +- 创建脚本文件:`vim /opt/hadoop-playbook.yml` ``` -- hosts: all +- hosts: hadoop-host remote_user: root tasks: - - name: update hosts + - name: Creates directory + file: + path: /data/hadoop/hdfs/name + state: directory + - name: Creates directory + file: + path: /data/hadoop/hdfs/data + state: directory + - name: Creates directory + file: + path: /data/hadoop/hdfs/tmp + state: directory + + - name: copy gz file + copy: src=/opt/hadoop-2.6.5.tar.gz dest=/usr/local + + - name: tar gz file + command: cd /usr/local && tar zxf hadoop-2.6.5.tar.gz + + - name: check folder existed + stat: path=/usr/local/hadoop-2.6.5 + register: folder_existed + + - name: rename folder + command: mv /usr/local/hadoop-2.6.5 /usr/local/hadoop + when: folder_existed.stat.exists == true + + - name: set HADOOP_HOME blockinfile: - path: /etc/hosts + path: /etc/profile + marker: "#{mark} HADOOP ENV" block: | - 192.168.0.223 linux01 - 192.168.0.223 linux02 - 192.168.0.223 linux03 - 192.168.0.223 linux04 - 192.168.0.223 linux05 + HADOOP_HOME=/usr/local/hadoop + PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin + export HADOOP_HOME + export PATH + + - name: source profile + shell: source /etc/profile ``` -- 执行命令:`ansible-playbook /opt/hosts-playbook.yml` +- 执行命令:`ansible-playbook /opt/jdk8-playbook.yml` + + + + ------------------------------------------------------------------- diff --git a/markdown-file/Wormhole-Install-And-Settings.md b/markdown-file/Wormhole-Install-And-Settings.md index 2a55aa81..41f60410 100644 --- a/markdown-file/Wormhole-Install-And-Settings.md +++ b/markdown-file/Wormhole-Install-And-Settings.md @@ -37,6 +37,7 @@ - Ansible 批量添加 hosts 请看:[点击我](Ansible-Install-And-Settings.md) - 必须(版本请不要随便用,而是按照如下说明来): - 一般情况下,我组件都是放在:`/usr/local` + - 批量部署用的是:Ansible(linux01) - JDK(所有服务器):`1.8.0_181` - 批量添加 JDK 请看:[点击我](Ansible-Install-And-Settings.md) - Hadoop 集群(HDFS,YARN)(linux01、linux02、linux03):`2.6.5` From 2d1498024c29598fd427f3d21717c31ff2f0854a Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 28 Dec 2018 13:47:01 +0800 Subject: [PATCH 239/330] :construction: Wormhole --- markdown-file/Ansible-Install-And-Settings.md | 75 +++- markdown-file/Kafka-Install-And-Settings.md | 2 +- .../Wormhole-Install-And-Settings.md | 367 ++++++++++++++++-- 3 files changed, 392 insertions(+), 52 deletions(-) diff --git a/markdown-file/Ansible-Install-And-Settings.md b/markdown-file/Ansible-Install-And-Settings.md index 6188d29c..aed43c64 100644 --- a/markdown-file/Ansible-Install-And-Settings.md +++ b/markdown-file/Ansible-Install-And-Settings.md @@ -190,7 +190,61 @@ PLAY RECAP ********************************************************************* ``` -- 执行命令:`ansible-playbook /opt/disable-firewalld-playbook.yml` + +#### 基础环境(CentOS 7.x) + + +- 创建脚本文件:`vim /opt/install-basic-playbook.yml` + +``` +- hosts: all + remote_user: root + tasks: + - name: Disable SELinux at next reboot + selinux: + state: disabled + + - name: disable firewalld + command: "{{ item }}" + with_items: + - systemctl stop firewalld + - systemctl disable firewalld + - setenforce 0 + + - name: install-basic + command: "{{ item }}" + with_items: + - yum install -y zip unzip lrzsz git epel-release wget htop deltarpm + + - name: install-vim + shell: "{{ item }}" + with_items: + - yum install -y vim + - curl https://raw.githubusercontent.com/wklken/vim-for-server/master/vimrc > ~/.vimrc + + - name: install-docker + shell: "{{ item }}" + with_items: + - yum install -y yum-utils device-mapper-persistent-data lvm2 + - yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo + - yum makecache fast + - yum install -y docker-ce + - systemctl start docker.service + - docker run hello-world + + - name: install-docker-compose + shell: "{{ item }}" + with_items: + - curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose + - chmod +x /usr/local/bin/docker-compose + - docker-compose --version + - systemctl restart docker.service + - systemctl enable docker.service + +``` + + +- 执行命令:`ansible-playbook /opt/install-basic-playbook.yml` #### 修改 hosts @@ -259,6 +313,7 @@ PLAY RECAP ********************************************************************* #### 部署 Hadoop 集群 - 创建脚本文件:`vim /opt/hadoop-playbook.yml` +- 刚学 Ansible,不好动配置文件,所以就只保留环境部分的设置,其他部分自行手工~ ``` - hosts: hadoop-host @@ -277,20 +332,6 @@ PLAY RECAP ********************************************************************* path: /data/hadoop/hdfs/tmp state: directory - - name: copy gz file - copy: src=/opt/hadoop-2.6.5.tar.gz dest=/usr/local - - - name: tar gz file - command: cd /usr/local && tar zxf hadoop-2.6.5.tar.gz - - - name: check folder existed - stat: path=/usr/local/hadoop-2.6.5 - register: folder_existed - - - name: rename folder - command: mv /usr/local/hadoop-2.6.5 /usr/local/hadoop - when: folder_existed.stat.exists == true - - name: set HADOOP_HOME blockinfile: path: /etc/profile @@ -306,7 +347,7 @@ PLAY RECAP ********************************************************************* ``` -- 执行命令:`ansible-playbook /opt/jdk8-playbook.yml` +- 执行命令:`ansible-playbook /opt/hadoop-playbook.yml` @@ -317,7 +358,7 @@ PLAY RECAP ********************************************************************* ## 资料 - +- [ANSIBLE模块 - shell和command区别](https://www.jianshu.com/p/081139f73613) - - - \ No newline at end of file diff --git a/markdown-file/Kafka-Install-And-Settings.md b/markdown-file/Kafka-Install-And-Settings.md index 61993859..ff67a925 100644 --- a/markdown-file/Kafka-Install-And-Settings.md +++ b/markdown-file/Kafka-Install-And-Settings.md @@ -427,7 +427,7 @@ num.partitions=1 # 允许删除topic delete.topic.enable=false # 允许自动创建topic(默认是 true) -auto.create.topics.enable=false +auto.create.topics.enable=true # 磁盘IO不足的时候,可以适当调大该值 ( 当内存足够时 ) #log.flush.interval.messages=10000 #log.flush.interval.ms=1000 diff --git a/markdown-file/Wormhole-Install-And-Settings.md b/markdown-file/Wormhole-Install-And-Settings.md index 41f60410..5c92ac10 100644 --- a/markdown-file/Wormhole-Install-And-Settings.md +++ b/markdown-file/Wormhole-Install-And-Settings.md @@ -1,8 +1,18 @@ -# Wormhole Flink 最佳实践 +# Wormhole + Flink 最佳实践 + + +## 本文声明 + +- **感谢 Wormhole 的官方帮助!官方微信群很友好,这让我很意外,只能感谢了!** +- 本人大数据和 Ansible 刚看,只会皮毛的皮毛。但是也因为这样的契机促使了我写这篇文章。 +- 希望对你们有帮助。 + +------------------------------------------------------------------- ## 前置声明 -- 需要对流计算的一些基础概念有基础了解,比如:Source、Sink、YARN、Kafka 等 +- 需要对 Linux 环境,流计算的一些基础概念有基础了解,比如:Source、Sink、YARN、Zookeeper、Kafka、Ansible 等 +- 如果有欠缺,可以查看本系列文章:[点击我](../README.md) ------------------------------------------------------------------- @@ -23,45 +33,334 @@ ------------------------------------------------------------------- -## 基础环境 +## 服务器基础环境设置 + +#### 特别说明 + +- **5 台 8C32G 服务器 CentOS 7.5,内存推荐 16G 或以上。** + - **为了方便,所有服务器都已经关闭防火墙,并且在云服务上设置安全组对外开通所有端口** +- 整体部署结构图: + +![未命名文件.png](https://upload-images.jianshu.io/upload_images/12159-dc29079158e1e59e.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) + +#### 服务器基础配置 + +- 给对应服务器设置 hostname,方便后面使用: + +``` +hostnamectl --static set-hostname linux01 +hostnamectl --static set-hostname linux02 +hostnamectl --static set-hostname linux03 +hostnamectl --static set-hostname linux04 +hostnamectl --static set-hostname linux05 +``` + +- 给所有服务器设置 hosts:`vim /etc/hosts` + +``` +172.16.0.17 linux01 +172.16.0.43 linux02 +172.16.0.180 linux03 +172.16.0.180 linux04 +172.16.0.180 linux05 +``` + +- 在 linux01 生成密钥对,设置 SSH 免密登录 + +``` +生产密钥对 +ssh-keygen -t rsa + + +公钥内容写入 authorized_keys +cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys + +测试: +ssh localhost + +将公钥复制到其他机子 +ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@linux02(根据提示输入 linux02 密码) +ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@linux03(根据提示输入 linux03 密码) +ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@linux04(根据提示输入 linux04 密码) +ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@linux05(根据提示输入 linux05 密码) + +在 linux01 上测试 +ssh linux02 +ssh linux03 +ssh linux04 +ssh linux05 +``` + +- 安装 Ansible:`yum install -y ansible` +- 测试 Ansible:`ansible all -a 'ps'` +- 配置 Inventory 编辑配置文件:`vim /etc/ansible/hosts` +- 添加如下内容 + +``` +[hadoop-host] +linux01 +linux02 +linux03 +``` + +#### 服务器基础组件(CentOS 7.x) + + +- 创建脚本文件:`vim /opt/install-basic-playbook.yml` + +``` +- hosts: all + remote_user: root + tasks: + - name: Disable SELinux at next reboot + selinux: + state: disabled + + - name: disable firewalld + command: "{{ item }}" + with_items: + - systemctl stop firewalld + - systemctl disable firewalld + - setenforce 0 + + - name: install-basic + command: "{{ item }}" + with_items: + - yum install -y zip unzip lrzsz git epel-release wget htop deltarpm + + - name: install-vim + shell: "{{ item }}" + with_items: + - yum install -y vim + - curl https://raw.githubusercontent.com/wklken/vim-for-server/master/vimrc > ~/.vimrc + + - name: install-docker + shell: "{{ item }}" + with_items: + - yum install -y yum-utils device-mapper-persistent-data lvm2 + - yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo + - yum makecache fast + - yum install -y docker-ce + - systemctl start docker.service + - docker run hello-world + + - name: install-docker-compose + shell: "{{ item }}" + with_items: + - curl -L https://github.com/docker/compose/releases/download/1.18.0/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose + - chmod +x /usr/local/bin/docker-compose + - docker-compose --version + - systemctl restart docker.service + - systemctl enable docker.service + +``` + +- 执行命令:`ansible-playbook /opt/install-basic-playbook.yml` + + +------------------------------------------------------------------- + +## Wormhole 所需组件安装 - 参考官网: -- 4 台 8C32G 服务器 CentOS 7.5 - - **为了方便测试,服务器都已经关闭防火墙,并且对外开通所有端口** - - **都做了免密登录** - - hostname:`linux01` - - hostname:`linux02` - - hostname:`linux03` - - hostname:`linux04` - - hostname:`linux05` - - Ansible 批量添加 hosts 请看:[点击我](Ansible-Install-And-Settings.md) -- 必须(版本请不要随便用,而是按照如下说明来): - - 一般情况下,我组件都是放在:`/usr/local` - - 批量部署用的是:Ansible(linux01) - - JDK(所有服务器):`1.8.0_181` - - 批量添加 JDK 请看:[点击我](Ansible-Install-And-Settings.md) - - Hadoop 集群(HDFS,YARN)(linux01、linux02、linux03):`2.6.5` - - 安装请看:[点击我](Hadoop-Install-And-Settings.md) - - Zookeeper 单点(linux04):`3.4.13` - - 安装请看:[点击我](Zookeeper-Install.md) - - Kafka 单点(linux04):`0.10.2.2` - - 安装请看:[点击我](Kafka-Install-And-Settings.md) - - MySQL 单点(linux04):`5.7` - - 安装请看:[点击我](Mysql-Install-And-Settings.md) - - Spark 单点(linux05):`2.2.0` - - 安装请看:[点击我](Spark-Install-And-Settings.md) - - Flink 单点(linux05):`1.5.1` - - 安装请看:[点击我](Flink-Install-And-Settings.md) - - wormhole 单点(linux05):`0.6.0-beta`,2018-12-06 版本 -- 非必须: - - Elasticsearch(支持版本 5.x)(非必须,若无则无法查看 wormhole 处理数据的吞吐和延时) - - Grafana (支持版本 4.x)(非必须,若无则无法查看 wormhole 处理数据的吞吐和延时的图形化展示) +- 必须组件(版本请不要随便用,而是按照如下说明来): +- 我个人习惯软件都是放在:`/usr/local`,压缩包放在:`/opt` + +#### 关于版本号和端口问题 + +- 百度云打包下载(提取码:8tm3): +- 版本: + - **jdk-8u191-linux-x64.tar.gz** + - **zookeeper-3.4.13(Docker)** + - **kafka_2.11-0.10.2.2.tgz** + - **hadoop-2.6.5.tar.gz** + - **flink-1.5.1-bin-hadoop26-scala_2.11.tgz** + - **spark-2.2.0-bin-hadoop2.6.tgz** + - **mysql-3.7(Docker)** + - **wormhole-0.6.0-beta.tar.gz** +- 端口 + - 都采用组件默认端口 + +#### JDK 安装 + +- JDK(所有服务器):`1.8.0_191` +- 复制压缩包到所有机子的 /opt 目录下: + +``` +scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux02:/opt + +scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux03:/opt + +scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux04:/opt + +scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux05:/opt +``` + +- 创建脚本文件:`vim /opt/jdk8-playbook.yml` + +``` +- hosts: all + remote_user: root + tasks: + - name: copy jdk + copy: src=/opt/jdk-8u191-linux-x64.tar.gz dest=/usr/local + + - name: tar jdk + shell: cd /usr/local && tar zxf jdk-8u191-linux-x64.tar.gz + + - name: set JAVA_HOME + blockinfile: + path: /etc/profile + marker: "#{mark} JDK ENV" + block: | + JAVA_HOME={{ java_install_folder }}/jdk1.8.0_191 + JRE_HOME=$JAVA_HOME/jre + PATH=$PATH:$JAVA_HOME/bin + CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar + export JAVA_HOME + export JRE_HOME + export PATH + export CLASSPATH + + - name: source profile + shell: source /etc/profile +``` + + +- 执行命令:`ansible-playbook /opt/jdk8-playbook.yml` + + + +#### Hadoop 集群(HDFS,YARN) + +- Hadoop 集群(HDFS,YARN)(linux01、linux02、linux03):`2.6.5` +- 内容较多,具体参考:[点击我](Hadoop-Install-And-Settings.md) +- Hadoop 环境可以用脚本文件,剩余部分内容请参考上文手工操作:`vim /opt/hadoop-playbook.yml` + +``` +- hosts: hadoop-host + remote_user: root + tasks: + - name: Creates directory + file: + path: /data/hadoop/hdfs/name + state: directory + - name: Creates directory + file: + path: /data/hadoop/hdfs/data + state: directory + - name: Creates directory + file: + path: /data/hadoop/hdfs/tmp + state: directory + + - name: set HADOOP_HOME + blockinfile: + path: /etc/profile + marker: "#{mark} HADOOP ENV" + block: | + HADOOP_HOME=/usr/local/hadoop + PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin + export HADOOP_HOME + export PATH + + - name: source profile + shell: source /etc/profile +``` + + +- 执行命令:`ansible-playbook /opt/hadoop-playbook.yml` + + +#### Zookeeper + +- Zookeeper 单点(linux04):`3.4.13` +- 单个实例:`docker run -d --restart always --name one-zookeeper -p 2181:2181 -v /etc/localtime:/etc/localtime zookeeper:3.4.13` + +#### Kafka + +- Kafka 单点(linux04):`0.10.2.2` +- 上传压缩包到 /opt 目录下 +- 解压:`tar zxvf kafka_2.11-0.10.2.2.tgz` +- 删除压缩包并重命名目录:`rm -rf kafka_2.11-0.10.2.2.tgz && mv /usr/local/kafka_2.11-0.10.2.2 /usr/local/kafka` +- 修改 kafka-server 的配置文件:`vim /usr/local/kafka/config/server.properties` + +``` +listeners=PLAINTEXT://0.0.0.0:9092 +advertised.listeners=PLAINTEXT://linux04:9092 +zookeeper.connect=linux04:2181 +auto.create.topics.enable=true +``` + +- 启动 kafka 服务(必须制定配置文件):`cd /usr/local/kafka && bin/kafka-server-start.sh config/server.properties` + - 后台方式运行 kafka 服务:`cd /usr/local/kafka && bin/kafka-server-start.sh -daemon config/server.properties` + - 停止 kafka 服务:`cd /usr/local/kafka && bin/kafka-server-stop.sh` +- 再开一个终端测试: + - 创建 topic 命令:`cd /usr/local/kafka && bin/kafka-topics.sh --create --zookeeper youmeekhost:2181 --replication-factor 1 --partitions 1 --topic my-topic-test` + - 查看 topic 命令:`cd /usr/local/kafka && bin/kafka-topics.sh --list --zookeeper youmeekhost:2181` + - 删除 topic:`cd /usr/local/kafka && bin/kafka-topics.sh --delete --topic my-topic-test --zookeeper youmeekhost:2181` + - 给 topic 发送消息命令:`cd /usr/local/kafka && bin/kafka-console-producer.sh --broker-list youmeekhost:9092 --topic my-topic-test`,然后在出现交互输入框的时候输入你要发送的内容 + - 再开一个终端,进入 kafka 容器,接受消息:`cd /usr/local/kafka && bin/kafka-console-consumer.sh --bootstrap-server youmeekhost:9092 --topic my-topic-test --from-beginning` + - 此时发送的终端输入一个内容回车,接受消息的终端就可以收到。 + +#### MySQL + +- MySQL 单点(linux04):`5.7` +- 创建本地数据存储 + 配置文件目录:`mkdir -p /data/docker/mysql/datadir /data/docker/mysql/conf /data/docker/mysql/log` +- 在宿主机上创建一个配置文件:`vim /data/docker/mysql/conf/mysql-1.cnf`,内容如下: + +``` +[mysql] +default-character-set = utf8 + +[mysqld] +pid-file = /var/run/mysqld/mysqld.pid +socket = /var/run/mysqld/mysqld.sock +datadir = /var/lib/mysql +symbolic-links=0 + +log-error=/var/log/mysql/error.log +default-storage-engine = InnoDB +collation-server = utf8_unicode_ci +init_connect = 'SET NAMES utf8' +character-set-server = utf8 +lower_case_table_names = 1 +max_allowed_packet = 50M +``` + +- 赋权(避免挂载的时候,一些程序需要容器中的用户的特定权限使用):`chmod -R 777 /data/docker/mysql/datadir /data/docker/mysql/log` +- 赋权:`chown -R 0:0 /data/docker/mysql/conf` +- `docker run -p 3306:3306 --name one-mysql -v /data/docker/mysql/datadir:/var/lib/mysql -v /data/docker/mysql/log:/var/log/mysql -v /data/docker/mysql/conf:/etc/mysql/conf.d -e MYSQL_ROOT_PASSWORD=aaabbb123456 -d mysql:5.7` +- 连上容器:`docker exec -it one-mysql /bin/bash` + - 连上 MySQL:`mysql -u root -p` + - 创建表:`CREATE DATABASE wormhole DEFAULT CHARACTER SET utf8 COLLATE utf8_unicode_ci;` +- **确保用 sqlyog 能直接在外网连上,方便后面调试** + + +#### Spark + +- Spark 单点(linux05):`2.2.0` + + + + +#### Flink + + +- Flink 单点(linux05):`1.5.1` + + +#### 非必须组件 + +- Elasticsearch(支持版本 5.x)(非必须,若无则无法查看 wormhole 处理数据的吞吐和延时) +- Grafana (支持版本 4.x)(非必须,若无则无法查看 wormhole 处理数据的吞吐和延时的图形化展示) + ------------------------------------------------------------------- ## Wormhole 安装 + 配置 -- 参考官网: +- wormhole 单点(linux05):`0.6.0-beta`,2018-12-06 版本 - 解压:`cd /usr/local && tar -xvf wormhole-0.6.0-beta.tar.gz` - 修改配置文件:`vim /usr/local/wormhole-0.6.0-beta/conf/application.conf` From 4f32322777c1d7903993d0165c9c9a131ccae085 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 28 Dec 2018 17:30:40 +0800 Subject: [PATCH 240/330] :construction: Wormhole --- README.md | 1 + markdown-file/Flink-Install-And-Settings.md | 2 +- markdown-file/Hadoop-Install-And-Settings.md | 43 +++- .../Wormhole-Install-And-Settings.md | 208 ++++++++++++++---- 4 files changed, 210 insertions(+), 44 deletions(-) diff --git a/README.md b/README.md index 4aca2d77..946d100e 100644 --- a/README.md +++ b/README.md @@ -101,6 +101,7 @@ - [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) - [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) - [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) +- [Wormhole + Flink 最佳实践](markdown-file/Wormhole-Install-And-Settings.md) ## 联系(Contact) diff --git a/markdown-file/Flink-Install-And-Settings.md b/markdown-file/Flink-Install-And-Settings.md index a0f93ccd..97d813c0 100644 --- a/markdown-file/Flink-Install-And-Settings.md +++ b/markdown-file/Flink-Install-And-Settings.md @@ -31,7 +31,7 @@ - 控制台启动:`cd /usr/local/flink && ./bin/yarn-session.sh -n 2 -jm 1024 -tm 1024` - 守护进程启动:`cd /usr/local/flink && ./bin/yarn-session.sh -n 2 -jm 1024 -tm 1024 -d` - 有可能会报:`The Flink Yarn cluster has failed`,可能是资源不够 - +- YARN 参数配置可以参考:[点击我](https://sustcoder.github.io/2018/09/27/YARN%20%E5%86%85%E5%AD%98%E5%8F%82%E6%95%B0%E8%AF%A6%E8%A7%A3/) ## Demo diff --git a/markdown-file/Hadoop-Install-And-Settings.md b/markdown-file/Hadoop-Install-And-Settings.md index f75adc34..ad398971 100644 --- a/markdown-file/Hadoop-Install-And-Settings.md +++ b/markdown-file/Hadoop-Install-And-Settings.md @@ -118,12 +118,12 @@ vim $HADOOP_HOME/etc/hadoop/hadoop-env.sh 把 25 行的 export JAVA_HOME=${JAVA_HOME} 都改为 -export JAVA_HOME=/usr/local/jdk1.8.0_181 +export JAVA_HOME=/usr/local/jdk1.8.0_191 vim $HADOOP_HOME/etc/hadoop/yarn-env.sh -加一行 export JAVA_HOME=/usr/local/jdk1.8.0_181 +文件开头加一行 export JAVA_HOME=/usr/local/jdk1.8.0_191 ``` @@ -211,12 +211,34 @@ vim $HADOOP_HOME/etc/hadoop/hdfs-site.xml mapreduce.framework.name yarn + + + mapreduce.map.memory.mb + 4096 + + + + mapreduce.reduce.memory.mb + 8192 + + + + mapreduce.map.java.opts + -Xmx3072m + + + + mapreduce.reduce.java.opts + -Xmx6144m + + ``` - yarn.resourcemanager.hostname == 指定YARN的老大(ResourceManager)的地址 - yarn.nodemanager.aux-services == NodeManager上运行的附属服务。需配置成mapreduce_shuffle,才可运行MapReduce程序默认值:"" +- 32G 内存的情况下配置: ``` vim $HADOOP_HOME/etc/hadoop/yarn-site.xml @@ -233,6 +255,21 @@ vim $HADOOP_HOME/etc/hadoop/yarn-site.xml mapreduce_shuffle + + yarn.nodemanager.vmem-pmem-ratio + 2.1 + + + + yarn.nodemanager.resource.memory-mb + 20480 + + + + yarn.scheduler.minimum-allocation-mb + 2048 + + ``` @@ -278,7 +315,7 @@ STARTUP_MSG: args = [-format] STARTUP_MSG: version = 2.6.5 STARTUP_MSG: classpath = /usr/local/hadoop-2.6.5/etc/hadoop:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/apacheds-kerberos-codec-2.0.0-M15.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-io-2.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/activation-1.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/netty-3.6.2.Final.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/slf4j-api-1.7.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/junit-4.11.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/curator-recipes-2.6.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jasper-compiler-5.5.23.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jets3t-0.9.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-lang-2.6.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-digester-1.8.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jackson-core-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/apacheds-i18n-2.0.0-M15.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/guava-11.0.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/gson-2.2.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jackson-jaxrs-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jettison-1.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jetty-6.1.26.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/api-util-1.0.0-M20.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/log4j-1.2.17.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-beanutils-core-1.8.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-httpclient-3.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-el-1.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/paranamer-2.3.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/slf4j-log4j12-1.7.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-collections-3.2.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jersey-server-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-net-3.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/hadoop-auth-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jasper-runtime-5.5.23.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jaxb-impl-2.2.3-1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/hamcrest-core-1.3.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/stax-api-1.0-2.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-beanutils-1.7.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/protobuf-java-2.5.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/curator-framework-2.6.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/xz-1.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jsr305-1.3.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jsp-api-2.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-compress-1.4.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/asm-3.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jsch-0.1.42.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-configuration-1.6.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-cli-1.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jackson-xc-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-logging-1.1.3.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/htrace-core-3.0.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jetty-util-6.1.26.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-math3-3.1.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/mockito-all-1.8.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jersey-json-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/zookeeper-3.4.6.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/httpclient-4.2.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/servlet-api-2.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/xmlenc-0.52.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/httpcore-4.2.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/api-asn1-api-1.0.0-M20.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/java-xmlbuilder-0.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/avro-1.7.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jaxb-api-2.2.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/commons-codec-1.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/jersey-core-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/snappy-java-1.0.4.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/curator-client-2.6.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/lib/hadoop-annotations-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/hadoop-common-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/hadoop-common-2.6.5-tests.jar:/usr/local/hadoop-2.6.5/share/hadoop/common/hadoop-nfs-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-io-2.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/netty-3.6.2.Final.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-lang-2.6.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-daemon-1.0.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jackson-core-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/guava-11.0.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jetty-6.1.26.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/log4j-1.2.17.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-el-1.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/xercesImpl-2.9.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jersey-server-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jasper-runtime-5.5.23.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/protobuf-java-2.5.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jsr305-1.3.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/xml-apis-1.3.04.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jsp-api-2.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/asm-3.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-cli-1.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-logging-1.1.3.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/htrace-core-3.0.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jetty-util-6.1.26.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/servlet-api-2.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/xmlenc-0.52.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/commons-codec-1.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/lib/jersey-core-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/hadoop-hdfs-2.6.5-tests.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/hadoop-hdfs-nfs-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/hdfs/hadoop-hdfs-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-io-2.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/activation-1.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/aopalliance-1.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/netty-3.6.2.Final.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-lang-2.6.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jackson-core-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/guice-3.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/guava-11.0.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jackson-jaxrs-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jettison-1.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jetty-6.1.26.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/log4j-1.2.17.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-httpclient-3.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-collections-3.2.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jersey-server-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jaxb-impl-2.2.3-1.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/stax-api-1.0-2.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/protobuf-java-2.5.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/xz-1.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jsr305-1.3.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jersey-client-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/guice-servlet-3.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-compress-1.4.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/asm-3.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-cli-1.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jersey-guice-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jackson-xc-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-logging-1.1.3.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jetty-util-6.1.26.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/leveldbjni-all-1.8.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jersey-json-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/javax.inject-1.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/zookeeper-3.4.6.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/servlet-api-2.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jaxb-api-2.2.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jline-0.9.94.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/commons-codec-1.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/lib/jersey-core-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-web-proxy-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-api-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-common-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-registry-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-nodemanager-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-client-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-common-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-applications-unmanaged-am-launcher-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-tests-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-resourcemanager-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-server-applicationhistoryservice-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/yarn/hadoop-yarn-applications-distributedshell-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/commons-io-2.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/aopalliance-1.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/netty-3.6.2.Final.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/jackson-mapper-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/junit-4.11.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/jackson-core-asl-1.9.13.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/guice-3.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/log4j-1.2.17.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/paranamer-2.3.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/jersey-server-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/hamcrest-core-1.3.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/protobuf-java-2.5.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/xz-1.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/guice-servlet-3.0.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/commons-compress-1.4.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/asm-3.2.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/jersey-guice-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/leveldbjni-all-1.8.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/javax.inject-1.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/avro-1.7.4.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/jersey-core-1.9.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/snappy-java-1.0.4.1.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/lib/hadoop-annotations-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-app-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-common-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-shuffle-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-hs-plugins-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.6.5.jar:/usr/local/hadoop-2.6.5/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.6.5-tests.jar:/usr/local/hadoop-2.6.5/contrib/capacity-scheduler/*.jar STARTUP_MSG: build = https://github.com/apache/hadoop.git -r e8c9fe0b4c252caf2ebf1464220599650f119997; compiled by 'sjlee' on 2016-10-02T23:43Z -STARTUP_MSG: java = 1.8.0_181 +STARTUP_MSG: java = 1.8.0_191 ************************************************************/ 18/12/17 17:47:17 INFO namenode.NameNode: registered UNIX signal handlers for [TERM, HUP, INT] 18/12/17 17:47:17 INFO namenode.NameNode: createNameNode [-format] diff --git a/markdown-file/Wormhole-Install-And-Settings.md b/markdown-file/Wormhole-Install-And-Settings.md index 5c92ac10..172691f8 100644 --- a/markdown-file/Wormhole-Install-And-Settings.md +++ b/markdown-file/Wormhole-Install-And-Settings.md @@ -39,9 +39,12 @@ - **5 台 8C32G 服务器 CentOS 7.5,内存推荐 16G 或以上。** - **为了方便,所有服务器都已经关闭防火墙,并且在云服务上设置安全组对外开通所有端口** + - **全程 root 用户** - 整体部署结构图: -![未命名文件.png](https://upload-images.jianshu.io/upload_images/12159-dc29079158e1e59e.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) + +![未命名文件(1).png](https://upload-images.jianshu.io/upload_images/12159-7a94673ea075873c.png?imageMogr2/auto-orient/strip%7CimageView2/2/w/1240) + #### 服务器基础配置 @@ -58,11 +61,11 @@ hostnamectl --static set-hostname linux05 - 给所有服务器设置 hosts:`vim /etc/hosts` ``` -172.16.0.17 linux01 -172.16.0.43 linux02 -172.16.0.180 linux03 -172.16.0.180 linux04 -172.16.0.180 linux05 +172.16.0.55 linux01 +172.16.0.92 linux02 +172.16.0.133 linux03 +172.16.0.159 linux04 +172.16.0.184 linux05 ``` - 在 linux01 生成密钥对,设置 SSH 免密登录 @@ -80,29 +83,78 @@ ssh localhost 将公钥复制到其他机子 ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@linux02(根据提示输入 linux02 密码) + ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@linux03(根据提示输入 linux03 密码) + ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@linux04(根据提示输入 linux04 密码) + ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@linux05(根据提示输入 linux05 密码) 在 linux01 上测试 +ssh linux01 + ssh linux02 + ssh linux03 + ssh linux04 + ssh linux05 ``` +- 安装基础软件:`yum install -y zip unzip lrzsz git epel-release wget htop deltarpm` - 安装 Ansible:`yum install -y ansible` -- 测试 Ansible:`ansible all -a 'ps'` - 配置 Inventory 编辑配置文件:`vim /etc/ansible/hosts` -- 添加如下内容 +- 在文件尾部补上如下内容 ``` [hadoop-host] linux01 linux02 linux03 + +[kafka-host] +linux04 + +[wh-host] +linux05 ``` +- 测试 Ansible:`ansible all -a 'ps'`,必须保证能得到如下结果: + +``` +linux01 | CHANGED | rc=0 >> + PID TTY TIME CMD +11088 pts/7 00:00:00 sh +11101 pts/7 00:00:00 python +11102 pts/7 00:00:00 ps + +linux02 | CHANGED | rc=0 >> + PID TTY TIME CMD +10590 pts/1 00:00:00 sh +10603 pts/1 00:00:00 python +10604 pts/1 00:00:00 ps + +linux05 | CHANGED | rc=0 >> + PID TTY TIME CMD +10573 pts/0 00:00:00 sh +10586 pts/0 00:00:00 python +10587 pts/0 00:00:00 ps + +linux03 | CHANGED | rc=0 >> + PID TTY TIME CMD +10586 pts/1 00:00:00 sh +10599 pts/1 00:00:00 python +10600 pts/1 00:00:00 ps + +linux04 | CHANGED | rc=0 >> + PID TTY TIME CMD +10574 pts/1 00:00:00 sh +10587 pts/1 00:00:00 python +10588 pts/1 00:00:00 ps +``` + + #### 服务器基础组件(CentOS 7.x) @@ -121,7 +173,6 @@ linux03 with_items: - systemctl stop firewalld - systemctl disable firewalld - - setenforce 0 - name: install-basic command: "{{ item }}" @@ -183,8 +234,7 @@ linux03 #### JDK 安装 -- JDK(所有服务器):`1.8.0_191` -- 复制压缩包到所有机子的 /opt 目录下: +- 将 linux01 下的 JDK 压缩包复制到所有机子的 /opt 目录下: ``` scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux02:/opt @@ -196,7 +246,7 @@ scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux04:/opt scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux05:/opt ``` -- 创建脚本文件:`vim /opt/jdk8-playbook.yml` +- 在 linux01 创建脚本文件:`vim /opt/jdk8-playbook.yml` ``` - hosts: all @@ -213,7 +263,7 @@ scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux05:/opt path: /etc/profile marker: "#{mark} JDK ENV" block: | - JAVA_HOME={{ java_install_folder }}/jdk1.8.0_191 + JAVA_HOME=/usr/local/jdk1.8.0_191 JRE_HOME=$JAVA_HOME/jre PATH=$PATH:$JAVA_HOME/bin CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar @@ -228,13 +278,12 @@ scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux05:/opt - 执行命令:`ansible-playbook /opt/jdk8-playbook.yml` - +- 经过试验,发现还是要自己再手动:`source /etc/profile`,原因未知。 #### Hadoop 集群(HDFS,YARN) - Hadoop 集群(HDFS,YARN)(linux01、linux02、linux03):`2.6.5` -- 内容较多,具体参考:[点击我](Hadoop-Install-And-Settings.md) - Hadoop 环境可以用脚本文件,剩余部分内容请参考上文手工操作:`vim /opt/hadoop-playbook.yml` ``` @@ -260,7 +309,11 @@ scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux05:/opt marker: "#{mark} HADOOP ENV" block: | HADOOP_HOME=/usr/local/hadoop + HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop + YARN_CONF_DIR=$HADOOP_HOME/etc/hadoop PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin + export HADOOP_CONF_DIR + export YARN_CONF_DIR export HADOOP_HOME export PATH @@ -270,6 +323,29 @@ scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux05:/opt - 执行命令:`ansible-playbook /opt/hadoop-playbook.yml` +- 剩余内容较多,具体参考:[点击我](Hadoop-Install-And-Settings.md) + - 解压压缩包:`tar zxvf hadoop-2.6.5.tar.gz` + - 这里最好把目录重命名下:`mv /usr/local/hadoop-2.6.5 /usr/local/hadoop` + - 剩下内容从:修改 linux01 配置,开始阅读 + + +#### Flink + +- 须安装在 linux01 +- Flink 单点(linux01):`1.5.1` +- 拷贝:`cd /usr/local/ && cp /opt/flink-1.5.1-bin-hadoop26-scala_2.11.tgz .` +- 解压:`tar zxf flink-*.tgz` +- 修改目录名:`mv /usr/local/flink-1.5.1 /usr/local/flink` +- 修改配置文件:`vim /usr/local/flink/conf/flink-conf.yaml` + - 在文件最前加上:`env.java.home: /usr/local/jdk1.8.0_191` +- 启动:`cd /usr/local/flink && ./bin/start-cluster.sh` +- 停止:`cd /usr/local/flink && ./bin/stop-cluster.sh` +- 查看日志:`tail -300f log/flink-*-standalonesession-*.log` +- 浏览器访问 WEB 管理:`http://linux01:8081/` +- yarn 启动 + - 先停止下本地模式 + - 测试控制台启动:`cd /usr/local/flink && ./bin/yarn-session.sh -n 2 -jm 2024 -tm 2024` + - 有可能会报:`The Flink Yarn cluster has failed`,可能是资源不够,需要调优内存相关参数 #### Zookeeper @@ -281,26 +357,27 @@ scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux05:/opt - Kafka 单点(linux04):`0.10.2.2` - 上传压缩包到 /opt 目录下 +- 拷贝压缩包:`cd /usr/local && cp /opt/kafka_2.11-0.10.2.2.tgz .` - 解压:`tar zxvf kafka_2.11-0.10.2.2.tgz` - 删除压缩包并重命名目录:`rm -rf kafka_2.11-0.10.2.2.tgz && mv /usr/local/kafka_2.11-0.10.2.2 /usr/local/kafka` - 修改 kafka-server 的配置文件:`vim /usr/local/kafka/config/server.properties` ``` -listeners=PLAINTEXT://0.0.0.0:9092 -advertised.listeners=PLAINTEXT://linux04:9092 -zookeeper.connect=linux04:2181 -auto.create.topics.enable=true +034 行:listeners=PLAINTEXT://0.0.0.0:9092 +039 行:advertised.listeners=PLAINTEXT://linux04:9092 +119 行:zookeeper.connect=linux04:2181 +补充 :auto.create.topics.enable=true ``` - 启动 kafka 服务(必须制定配置文件):`cd /usr/local/kafka && bin/kafka-server-start.sh config/server.properties` - 后台方式运行 kafka 服务:`cd /usr/local/kafka && bin/kafka-server-start.sh -daemon config/server.properties` - 停止 kafka 服务:`cd /usr/local/kafka && bin/kafka-server-stop.sh` - 再开一个终端测试: - - 创建 topic 命令:`cd /usr/local/kafka && bin/kafka-topics.sh --create --zookeeper youmeekhost:2181 --replication-factor 1 --partitions 1 --topic my-topic-test` - - 查看 topic 命令:`cd /usr/local/kafka && bin/kafka-topics.sh --list --zookeeper youmeekhost:2181` - - 删除 topic:`cd /usr/local/kafka && bin/kafka-topics.sh --delete --topic my-topic-test --zookeeper youmeekhost:2181` - - 给 topic 发送消息命令:`cd /usr/local/kafka && bin/kafka-console-producer.sh --broker-list youmeekhost:9092 --topic my-topic-test`,然后在出现交互输入框的时候输入你要发送的内容 - - 再开一个终端,进入 kafka 容器,接受消息:`cd /usr/local/kafka && bin/kafka-console-consumer.sh --bootstrap-server youmeekhost:9092 --topic my-topic-test --from-beginning` + - 创建 topic 命令:`cd /usr/local/kafka && bin/kafka-topics.sh --create --zookeeper linux04:2181 --replication-factor 1 --partitions 1 --topic my-topic-test` + - 查看 topic 命令:`cd /usr/local/kafka && bin/kafka-topics.sh --list --zookeeper linux04:2181` + - 删除 topic:`cd /usr/local/kafka && bin/kafka-topics.sh --delete --topic my-topic-test --zookeeper linux04:2181` + - 给 topic 发送消息命令:`cd /usr/local/kafka && bin/kafka-console-producer.sh --broker-list linux04:9092 --topic my-topic-test`,然后在出现交互输入框的时候输入你要发送的内容 + - 再开一个终端,进入 kafka 容器,接受消息:`cd /usr/local/kafka && bin/kafka-console-consumer.sh --bootstrap-server linux04:9092 --topic my-topic-test --from-beginning` - 此时发送的终端输入一个内容回车,接受消息的终端就可以收到。 #### MySQL @@ -339,15 +416,32 @@ max_allowed_packet = 50M #### Spark -- Spark 单点(linux05):`2.2.0` - +- 须安装在 linux01 +- Spark 单点(linux01):`2.2.0` +- 上传压缩包到 /opt 目录下 +- 拷贝压缩包:`cd /usr/local && cp /opt/spark-2.2.0-bin-hadoop2.6.tgz .` +- 解压:`tar zxvf spark-2.2.0-bin-hadoop2.6.tgz` +- 重命名:`mv /usr/local/spark-2.2.0-bin-hadoop2.6 /usr/local/spark` +- 增加环境变量: +``` +vim /etc/profile +SPARK_HOME=/usr/local/spark +PATH=$PATH:${SPARK_HOME}/bin:${SPARK_HOME}/sbin +export SPARK_HOME +export PATH -#### Flink +source /etc/profile +``` +- 修改配置:`cp $SPARK_HOME/conf/spark-env.sh.template $SPARK_HOME/conf/spark-env.sh` +- 修改配置:`vim $SPARK_HOME/conf/spark-env.sh` +- 假设我的 hadoop 路径是:/usr/local/hadoop,则最尾巴增加: -- Flink 单点(linux05):`1.5.1` +``` +export HADOOP_CONF_DIR=/usr/local/hadoop/etc/hadoop +``` #### 非必须组件 @@ -360,7 +454,19 @@ max_allowed_packet = 50M ## Wormhole 安装 + 配置 -- wormhole 单点(linux05):`0.6.0-beta`,2018-12-06 版本 +- 须安装在 linux01 +- wormhole 单点(linux01):`0.6.0-beta`,2018-12-06 版本 +- 先在 linux04 机子的 kafka 创建 topic: + +``` +cd /usr/local/kafka && bin/kafka-topics.sh --list --zookeeper linux04:2181 +cd /usr/local/kafka && bin/kafka-topics.sh --create --zookeeper linux04:2181 --replication-factor 1 --partitions 1 --topic source +cd /usr/local/kafka && bin/kafka-topics.sh --create --zookeeper linux04:2181 --replication-factor 1 --partitions 1 --topic wormhole_feedback +cd /usr/local/kafka && bin/kafka-topics.sh --create --zookeeper linux04:2181 --replication-factor 1 --partitions 1 --topic wormhole_heartbeat +``` + +- 上传压缩包到 /opt 目录下 +- 拷贝压缩包:`cd /usr/local && cp /opt/wormhole-0.6.0-beta.tar.gz .` - 解压:`cd /usr/local && tar -xvf wormhole-0.6.0-beta.tar.gz` - 修改配置文件:`vim /usr/local/wormhole-0.6.0-beta/conf/application.conf` @@ -370,7 +476,7 @@ akka.http.server.request-timeout = 120s wormholeServer { cluster.id = "" #optional global uuid - host = "linux05" + host = "linux01" port = 8989 ui.default.language = "Chinese" token.timeout = 1 @@ -384,7 +490,7 @@ mysql = { db = { driver = "com.mysql.jdbc.Driver" user = "root" - password = "123456" + password = "aaabbb123456" url = "jdbc:mysql://linux04:3306/wormhole?useUnicode=true&characterEncoding=UTF-8&useSSL=false" numThreads = 4 minConnections = 4 @@ -510,12 +616,13 @@ maintenance = { #} ``` -- 初始化数据库: - - 创建表:`create database wormhole character set utf8;` - 初始化表结构脚本路径: - 该脚本存在一个问题:初始化脚本和补丁脚本混在一起,所以直接复制执行会有报错,但是报错的部分是不影响 - 我是直接把基础 sql 和补丁 sql 分开执行,方便判断。 +- 启动:`sh /usr/local/wormhole-0.6.0-beta/bin/start.sh` +- 查看 log:`tail -200f /usr/local/wormhole-0.6.0-beta/logs/application.log` - 部署完成,浏览器访问: +- 默认管理员用户名:admin,密码:admin ------------------------------------------------------------------- @@ -524,7 +631,7 @@ maintenance = { - **参考官网,必须先了解下**: - 必须创建用户,后面才能进入 Project 里面创建 Stream / Flow - 创建的用户类型必须是:`user` - +- 假设这里创建的用户叫做:`user1@bg.com` ------------------------------------------------------------------- @@ -535,12 +642,13 @@ maintenance = { - Instance 用于绑定各个组件的所在服务连接 - 一般我们都会选择 Kafka 作为 source,后面的基础也是基于 Kafka 作为 Source 的场景 - 假设填写实例名:`source_kafka` +- URL:`linux04:9092` #### 创建 Database - 各个组件的具体数据库、Topic 等信息 -- 假设填写 topic:`source` - +- 假设填写 Topic Name:`source` +- Partition:1 #### 创建 Namespace @@ -567,12 +675,15 @@ maintenance = { #### 创建 Instance - 假设填写实例名:`sink_mysql` +- URL:`linux04:3306` #### 创建 Database - 假设填写 Database Name:`sink` - config 参数:`useUnicode=true&characterEncoding=UTF-8&useSSL=false&rewriteBatchedStatements=true` + + #### 创建 Namespace - 假设填写 Tables: `user id` @@ -597,8 +708,9 @@ maintenance = { - Stream type 类型选择:`Flink` - 假设填写 Name:`wormhole_stream_test` -## Flink Flow(流式作业) +## Flink Flow +- 假设 Flow name 为:`wormhole_flow_test` - Flow 是在 Project 内容页下才能创建 - 并且是 Project 下面的用户才能创建,admin 用户没有权限 - Flow 会关联 source 和 sink @@ -622,11 +734,27 @@ maintenance = { ## Kafka 发送测试数据 -- `cd /usr/local/kafka/bin` -- `./kafka-console-producer.sh --broker-list linux01:9092 --topic source --property "parse.key=true" --property "key.separator=@@@"` +- 在 linux04 机子上 +- `cd /usr/local/kafka/bin && ./kafka-console-producer.sh --broker-list linux04:9092 --topic source --property "parse.key=true" --property "key.separator=@@@"` - 发送 UMS 流消息协议规范格式: ``` -data_increment_data.kafka.source_kafka.source.ums_extension.*.*.*@@@{"id": 1, "name": "test", "phone":"18074546423", "city": "Beijing", "time": "2017-12-22 10:00:00"} +data_increment_data.kafka.source_kafka.source.ums_extension.*.*.*@@@{"id": 1, "name": "test1", "phone":"18074546423", "city": "Beijing", "time": "2017-12-22 10:01:00"} + +data_increment_data.kafka.source_kafka.source.ums_extension.*.*.*@@@{"id": 2, "name": "test2", "phone":"18074546423", "city": "Beijing", "time": "2017-12-22 10:02:00"} + +data_increment_data.kafka.source_kafka.source.ums_extension.*.*.*@@@{"id": 3, "name": "test3", "phone":"18074546423", "city": "Beijing", "time": "2017-12-22 10:03:00"} + +data_increment_data.kafka.source_kafka.source.ums_extension.*.*.*@@@{"id": 4, "name": "test4", "phone":"18074546423", "city": "Beijing", "time": "2017-12-22 10:04:00"} + +data_increment_data.kafka.source_kafka.source.ums_extension.*.*.*@@@{"id": 5, "name": "test5", "phone":"18074546423", "city": "Beijing", "time": "2017-12-22 10:05:00"} + +data_increment_data.kafka.source_kafka.source.ums_extension.*.*.*@@@{"id": 6, "name": "test6", "phone":"18074546423", "city": "Beijing", "time": "2017-12-22 10:06:00"} + +data_increment_data.kafka.source_kafka.source.ums_extension.*.*.*@@@{"id": 7, "name": "test7", "phone":"18074546423", "city": "Beijing", "time": "2017-12-22 10:07:00"} + +data_increment_data.kafka.source_kafka.source.ums_extension.*.*.*@@@{"id": 8, "name": "test8", "phone":"18074546423", "city": "Beijing", "time": "2017-12-22 10:08:00"} + +data_increment_data.kafka.source_kafka.source.ums_extension.*.*.*@@@{"id": 9, "name": "test9", "phone":"18074546423", "city": "Beijing", "time": "2017-12-22 10:09:00"} ``` From 656f0cfd9c03d208d9b4fa896c1a7ca81b800023 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 28 Dec 2018 17:32:09 +0800 Subject: [PATCH 241/330] :construction: Wormhole --- markdown-file/Wormhole-Install-And-Settings.md | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/markdown-file/Wormhole-Install-And-Settings.md b/markdown-file/Wormhole-Install-And-Settings.md index 172691f8..9d464355 100644 --- a/markdown-file/Wormhole-Install-And-Settings.md +++ b/markdown-file/Wormhole-Install-And-Settings.md @@ -37,7 +37,7 @@ #### 特别说明 -- **5 台 8C32G 服务器 CentOS 7.5,内存推荐 16G 或以上。** +- **4 台 8C32G 服务器 CentOS 7.5,内存推荐 16G 或以上。** - **为了方便,所有服务器都已经关闭防火墙,并且在云服务上设置安全组对外开通所有端口** - **全程 root 用户** - 整体部署结构图: @@ -55,7 +55,6 @@ hostnamectl --static set-hostname linux01 hostnamectl --static set-hostname linux02 hostnamectl --static set-hostname linux03 hostnamectl --static set-hostname linux04 -hostnamectl --static set-hostname linux05 ``` - 给所有服务器设置 hosts:`vim /etc/hosts` @@ -65,7 +64,6 @@ hostnamectl --static set-hostname linux05 172.16.0.92 linux02 172.16.0.133 linux03 172.16.0.159 linux04 -172.16.0.184 linux05 ``` - 在 linux01 生成密钥对,设置 SSH 免密登录 @@ -88,7 +86,6 @@ ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@linux03(根据提示输入 linux03 ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@linux04(根据提示输入 linux04 密码) -ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@linux05(根据提示输入 linux05 密码) 在 linux01 上测试 ssh linux01 @@ -98,8 +95,6 @@ ssh linux02 ssh linux03 ssh linux04 - -ssh linux05 ``` - 安装基础软件:`yum install -y zip unzip lrzsz git epel-release wget htop deltarpm` @@ -116,8 +111,6 @@ linux03 [kafka-host] linux04 -[wh-host] -linux05 ``` - 测试 Ansible:`ansible all -a 'ps'`,必须保证能得到如下结果: @@ -135,12 +128,6 @@ linux02 | CHANGED | rc=0 >> 10603 pts/1 00:00:00 python 10604 pts/1 00:00:00 ps -linux05 | CHANGED | rc=0 >> - PID TTY TIME CMD -10573 pts/0 00:00:00 sh -10586 pts/0 00:00:00 python -10587 pts/0 00:00:00 ps - linux03 | CHANGED | rc=0 >> PID TTY TIME CMD 10586 pts/1 00:00:00 sh @@ -242,8 +229,6 @@ scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux02:/opt scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux03:/opt scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux04:/opt - -scp -r /opt/jdk-8u191-linux-x64.tar.gz root@linux05:/opt ``` - 在 linux01 创建脚本文件:`vim /opt/jdk8-playbook.yml` From 02747b9e72a3ad2e6ef7b129a89acbe157d7ae60 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 28 Dec 2018 17:47:45 +0800 Subject: [PATCH 242/330] :construction: Wormhole --- markdown-file/Wormhole-Install-And-Settings.md | 1 + 1 file changed, 1 insertion(+) diff --git a/markdown-file/Wormhole-Install-And-Settings.md b/markdown-file/Wormhole-Install-And-Settings.md index 9d464355..1a7150bb 100644 --- a/markdown-file/Wormhole-Install-And-Settings.md +++ b/markdown-file/Wormhole-Install-And-Settings.md @@ -5,6 +5,7 @@ - **感谢 Wormhole 的官方帮助!官方微信群很友好,这让我很意外,只能感谢了!** - 本人大数据和 Ansible 刚看,只会皮毛的皮毛。但是也因为这样的契机促使了我写这篇文章。 +- 因为刚入门,需要了解细节,所以没用 Ambari 这类工具,已经熟悉的可以考虑使用。 - 希望对你们有帮助。 ------------------------------------------------------------------- From 8f202036f2c159834cceca9afba910d395ce1781 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 28 Dec 2018 17:52:20 +0800 Subject: [PATCH 243/330] :construction: Wormhole --- markdown-file/Hadoop-Install-And-Settings.md | 1 + 1 file changed, 1 insertion(+) diff --git a/markdown-file/Hadoop-Install-And-Settings.md b/markdown-file/Hadoop-Install-And-Settings.md index ad398971..7f42810f 100644 --- a/markdown-file/Hadoop-Install-And-Settings.md +++ b/markdown-file/Hadoop-Install-And-Settings.md @@ -494,6 +494,7 @@ tcp6 0 0 :::37481 :::* LISTEN ## 资料 +- [如何正确的为 MapReduce 配置内存分配](http://loupipalien.com/2018/03/how-to-properly-configure-the-memory-allocations-for-mapreduce/) - - - From 5d8534bfddae266f257dca8df0c12071a6940bbc Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 14 Jan 2019 22:28:12 +0800 Subject: [PATCH 244/330] 2019-01-14 --- centos-settings/CentOS-Network-Settings.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/centos-settings/CentOS-Network-Settings.md b/centos-settings/CentOS-Network-Settings.md index d94b28ef..ba95ede9 100644 --- a/centos-settings/CentOS-Network-Settings.md +++ b/centos-settings/CentOS-Network-Settings.md @@ -44,19 +44,22 @@ ``` - 重启网络配置:`service network restart` -## CentOS 7 +## CentOS 7.x ### 命令行下设置网络 - 查看系统下有哪些网卡:`ls /etc/sysconfig/network-scripts/`,新版本不叫 eth0 这类格式了,比如我当前这个叫做:ifcfg-ens33(你的肯定跟我不一样,但是格式类似) +- 先备份:`cp /etc/sysconfig/network-scripts/ifcfg-ens33 /etc/sysconfig/network-scripts/ifcfg-ens33.bak` - 编辑该文件:`vim /etc/sysconfig/network-scripts/ifcfg-ens33`,改为如下信息:(IP 段自己改为自己的网络情况) ``` ini TYPE=Ethernet +PROXY_METHOD=none +BROWSER_ONLY=no BOOTPROTO=static -IPADDR=192.168.1.126 +IPADDR=192.168.0.127 NETMASK=255.255.255.0 -GATEWAY=192.168.1.1 +GATEWAY=192.168.0.1 DNS1=8.8.8.8 DNS1=114.114.114.114 DEFROUTE=yes @@ -71,7 +74,7 @@ IPV6_PEERROUTES=yes IPV6_FAILURE_FATAL=no IPV6_ADDR_GEN_MODE=stable-privacy NAME=ens33 -UUID=15a16b51-0369-44d7-87b4-667f715a68df +UUID=b9f01b7d-4ebf-4d3a-a4ec-ae203425bb11 DEVICE=ens33 ONBOOT=yes ``` From 24076b9ee0c0f4051b62aecdd53ba2496a927154 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 14 Jan 2019 22:38:46 +0800 Subject: [PATCH 245/330] 2019-01-14 --- markdown-file/CentOS-Virtual-Machine-Copy-Settings.md | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/markdown-file/CentOS-Virtual-Machine-Copy-Settings.md b/markdown-file/CentOS-Virtual-Machine-Copy-Settings.md index 7c415bde..3cdbd8f2 100644 --- a/markdown-file/CentOS-Virtual-Machine-Copy-Settings.md +++ b/markdown-file/CentOS-Virtual-Machine-Copy-Settings.md @@ -8,8 +8,9 @@ -## 修改方法 +## CentOS 6 修改方法 +- 设置 hostname:`hostnamectl --static set-hostname linux02` - 命令:`sudo vim /etc/udev/rules.d/70-persistent-net.rules` - 该文件中正常此时应该有两行信息 - 在文件中把 NAME="eth0″ 的这一行注释掉 @@ -23,4 +24,10 @@ - 如果显示两行 UUID 的信息的话,复制不是 System eth0 的那个 UUID 值,下面有用。 - 编辑:`sudo vim /etc/sysconfig/network-scripts/ifcfg-eth0` - 把文件中的 UUID 值 改为上面要求复制的 UUID 值。 - - 保存配置文件,重启系统,正常应该是可以了。 \ No newline at end of file + - 保存配置文件,重启系统,正常应该是可以了。 + +## CentOS 7 修改方法 + +- 在 VMware 15 Pro 的情况下,直接 copy 进行后,直接修改网卡配置即可 +- 编辑该文件:`vim /etc/sysconfig/network-scripts/ifcfg-ens33` + - 把 ip 地址修改即可 \ No newline at end of file From aa91f4622fb12f0373940384c34a865d621a8ee2 Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 31 Jan 2019 13:43:01 +0800 Subject: [PATCH 246/330] :construction: K8S --- markdown-file/Docker-Install-And-Usage.md | 53 ++++++++++++++++++++++- 1 file changed, 52 insertions(+), 1 deletion(-) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index b5d5aa3e..947c59d7 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -810,8 +810,53 @@ sudo chmod +x /usr/local/bin/docker-compose - `容器聚合` - 主要角色:Master、Node + +#### 安装准备 - Kubernetes 1.13 版本 + +- 推荐最低 2C2G,优先:2C4G 或以上 +- 特别说明:1.13 之前的版本,由于网络问题,需要各种设置,这里就不再多说了。1.13 之后相对就简单了点。 +- 优先官网软件包:kubeadm +- 官网资料: + - issues 入口: + - 源码入口: + - 安装指导: + - 按官网要求做下检查: + - 网络环境: + - 端口检查: + - 对 Docker 版本的支持,这里官网推荐的是 18.06: +- 三大核心工具包,都需要各自安装,并且注意版本关系: + - `kubeadm`: the command to bootstrap the cluster. + - 集群部署、管理工具 + - `kubelet`: the component that runs on all of the machines in your cluster and does things like starting pods and containers. + - 具体执行层面的管理 Pod 和 Docker 工具 + - `kubectl`: the command line util to talk to your cluster. + - 操作 k8s 的命令行入口工具 +- 官网插件安装过程的故障排查: +- 其他部署方案: + - + - + - + +#### 开始安装 - Kubernetes 1.13.2 版本 + +- 官网最新版本: +- 官网 1.13 版本的 changelog: +- 所有节点安装 Docker 18.06,并设置阿里云源 +- 所有节点设置 kubernetes repo 源,并安装 Kubeadm、Kubelet、Kubectl 都设置阿里云的源 +- Kubeadm 初始化集群过程当中,它会下载很多的镜像,默认也是去 Google 家里下载。但是 1.13 新增了一个配置:`--image-repository` 算是救了命。 + #### 主要概念 +- Master 节点,负责集群的调度、集群的管理 + - 常见组件: + - kube-apiserver:API服务 + - kube-scheduler:调度 + - Kube-Controller-Manager:容器编排 + - Etcd:保存了整个集群的状态 + - Kube-proxy:负责为 Service 提供 cluster 内部的服务发现和负载均衡 + - Kube-DNS:负责为整个集群提供 DNS 服务 +- Workers 节点,负责容器相关的处理 + - `Pods` ``` @@ -946,7 +991,13 @@ Master选举确保kube-scheduler和kube-controller-manager高可用 ## 资料 - 书籍:《第一本 Docker 书》 - +- [如何更“优雅”地部署Kubernetes集群](https://juejin.im/entry/5a03f98d6fb9a04524054516) +- []() +- []() +- []() +- []() +- []() +- []() From fb7c53d797d05be41b1bf6954317e105169159b7 Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 31 Jan 2019 23:05:25 +0800 Subject: [PATCH 247/330] 2019-01-31 --- ...er_k8s_disable_firewalld_centos7-aliyun.sh | 49 +++++ markdown-file/Docker-Install-And-Usage.md | 190 +++++++++++++++++- 2 files changed, 237 insertions(+), 2 deletions(-) create mode 100644 favorite-file/shell/install_docker_k8s_disable_firewalld_centos7-aliyun.sh diff --git a/favorite-file/shell/install_docker_k8s_disable_firewalld_centos7-aliyun.sh b/favorite-file/shell/install_docker_k8s_disable_firewalld_centos7-aliyun.sh new file mode 100644 index 00000000..66adafa2 --- /dev/null +++ b/favorite-file/shell/install_docker_k8s_disable_firewalld_centos7-aliyun.sh @@ -0,0 +1,49 @@ +#!/bin/sh + +echo "-----------------------------------------禁用防火墙" +systemctl stop firewalld.service +systemctl disable firewalld.service + +echo "-----------------------------------------安装 docker 所需环境" + +yum install -y yum-utils device-mapper-persistent-data lvm2 + +echo "-----------------------------------------添加 repo(可能网络会很慢,有时候会报:Timeout,所以要多试几次)" +echo "-----------------------------------------官网的地址 yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo" +echo "-----------------------------------------这里用阿里云进行加速,不然可能会出现无法安装,阿里云官网说明:https://help.aliyun.com/document_detail/60742.html" + +yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo +yum makecache fast + +echo "-----------------------------------------开始安装 docker" + +yum install -y docker-ce-18.06.1.ce-3.el7 + +echo "-----------------------------------------启动 Docker" + +systemctl start docker.service +systemctl enable docker.service + +echo "-----------------------------------------安装结束" + +echo "-----------------------------------------docker 加速" + +touch /etc/docker/daemon.json + +cat << EOF >> /etc/docker/daemon.json +{ + "registry-mirrors": ["https://ldhc17y9.mirror.aliyuncs.com"] +} +EOF + +systemctl daemon-reload +systemctl restart docker + +echo "-----------------------------------------运行 hello world 镜像" + +docker run hello-world + + + + + diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index 947c59d7..29cd34d7 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -839,23 +839,209 @@ sudo chmod +x /usr/local/bin/docker-compose #### 开始安装 - Kubernetes 1.13.2 版本 +- 三台机子: + - master-1:`192.168.0.127` + - node-1:`192.168.0.128` + - node-2:`192.168.0.129` - 官网最新版本: - 官网 1.13 版本的 changelog: - 所有节点安装 Docker 18.06,并设置阿里云源 + - 可以参考:[点击我o(∩_∩)o ](https://github.com/judasn/Linux-Tutorial/blob/master/favorite-file/shell/install_docker_k8s_disable_firewalld_centos7-aliyun.sh) + - 核心,查看可以安装的 Docker 列表:`yum list docker-ce --showduplicates` - 所有节点设置 kubernetes repo 源,并安装 Kubeadm、Kubelet、Kubectl 都设置阿里云的源 - Kubeadm 初始化集群过程当中,它会下载很多的镜像,默认也是去 Google 家里下载。但是 1.13 新增了一个配置:`--image-repository` 算是救了命。 +- 具体流程: + +``` +主机时间同步 +systemctl start chronyd.service +systemctl enable chronyd.service + +systemctl stop firewalld.service +systemctl disable firewalld.service +systemctl disable iptables.service + + +setenforce 0 + +sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config + +swapoff -a && sysctl -w vm.swappiness=0 + + +hostnamectl --static set-hostname k8s-master-1 +hostnamectl --static set-hostname k8s-node-1 +hostnamectl --static set-hostname k8s-node-2 + + +vim /etc/hosts +192.168.0.127 k8s-master-1 +192.168.0.128 k8s-node-1 +192.168.0.129 k8s-node-2 + +master 免密 +生产密钥对 +ssh-keygen -t rsa + + +公钥内容写入 authorized_keys +cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys + +测试: +ssh localhost + +将公钥复制到其他机子 +ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@k8s-node-1(根据提示输入 k8s-node-1 密码) +ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@k8s-node-2(根据提示输入 k8s-node-2 密码) + + +在 linux01 上测试 +ssh k8s-master-1 +ssh k8s-node-1 +ssh k8s-node-2 + + + +vim /etc/yum.repos.d/kubernetes.repo + +[kubernetes] +name=Kubernetes +baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ +enabled=1 +gpgcheck=1 +repo_gpgcheck=1 +gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg + + +scp -r /etc/yum.repos.d/kubernetes.repo root@k8s-node-1:/etc/yum.repos.d/ +scp -r /etc/yum.repos.d/kubernetes.repo root@k8s-node-2:/etc/yum.repos.d/ + + +所有机子 +yum install -y kubelet-1.13.2 kubeadm-1.13.2 kubectl-1.13.2 --disableexcludes=kubernetes + + +vim /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +最后一行添加:Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs" + + +systemctl enable kubelet && systemctl start kubelet + +kubeadm version +kubectl version + + +vim /etc/sysctl.d/k8s.conf +net.bridge.bridge-nf-call-ip6tables = 1 +net.bridge.bridge-nf-call-iptables = 1 + +sysctl --system + +``` + +- 初始化 master 节点: + +``` + +推荐: +kubeadm init \ +--image-repository registry.aliyuncs.com/google_containers \ +--pod-network-cidr 10.244.0.0/16 \ +--kubernetes-version 1.13.2 \ +--service-cidr 10.96.0.0/12 \ +--apiserver-advertise-address=0.0.0.0 \ +--ignore-preflight-errors=Swap + +10.244.0.0/16是 flannel 插件固定使用的ip段,它的值取决于你准备安装哪个网络插件 + +终端会输出核心内容: +Your Kubernetes master has initialized successfully! + +To start using your cluster, you need to run the following as a regular user: + + mkdir -p $HOME/.kube + sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config + +You should now deploy a pod network to the cluster. +Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: + https://kubernetes.io/docs/concepts/cluster-administration/addons/ + +You can now join any number of machines by running the following on each node +as root: + + kubeadm join 192.168.0.127:6443 --token 53mly1.yf9llsghle20p2uq --discovery-token-ca-cert-hash sha256:a9f26eef42c30d9f4b20c52058a2eaa696edc3f63ba20be477fe1494ec0146f7 + + + + +也可以使用另外一个流行网络插件 calico: +kubeadm init --image-repository registry.aliyuncs.com/google_containers --pod-network-cidr=192.168.0.0/16 --kubernetes-version v1.13.2 + + + +mkdir -p $HOME/.kube +sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config +sudo chown $(id -u):$(id -g) $HOME/.kube/config + +export KUBECONFIG=/etc/kubernetes/admin.conf +echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.zshrc +source ~/.zshrc + + +查询我们的 token +kubeadm token list + + +kubectl cluster-info +``` + +- 到 node 节点进行加入: + +``` + +kubeadm join 192.168.0.127:6443 --token 53mly1.yf9llsghle20p2uq --discovery-token-ca-cert-hash sha256:a9f26eef42c30d9f4b20c52058a2eaa696edc3f63ba20be477fe1494ec0146f7 + + +在 master 节点上:kubectl get cs +NAME STATUS MESSAGE ERROR +controller-manager Healthy ok +scheduler Healthy ok +etcd-0 Healthy {"health": "true"} +结果都是 Healthy 则表示可以了,不然就得检查。必要时可以用:`kubeadm reset` 重置,重新进行集群初始化 + + + +master 安装 Flannel +cd /opt && wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml + +kubectl apply -f /opt/kube-flannel.yml + + +验证: +kubectl get pods --all-namespaces +kubectl get nodes +如果还是 NotReady,则查看错误信息: +kubectl describe pod kube-scheduler-master.hanli.com -n kube-system +kubectl logs kube-scheduler-master.hanli.com -n kube-system +tail -f /var/log/messages + +``` + + + #### 主要概念 - Master 节点,负责集群的调度、集群的管理 - - 常见组件: + - 常见组件: - kube-apiserver:API服务 - kube-scheduler:调度 - Kube-Controller-Manager:容器编排 - Etcd:保存了整个集群的状态 - Kube-proxy:负责为 Service 提供 cluster 内部的服务发现和负载均衡 - Kube-DNS:负责为整个集群提供 DNS 服务 -- Workers 节点,负责容器相关的处理 +- node 节点,负责容器相关的处理 - `Pods` From 83581195d6bf13d6f338484113ba495af376b9ed Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 31 Jan 2019 23:23:42 +0800 Subject: [PATCH 248/330] 2019-01-31 --- markdown-file/Docker-Install-And-Usage.md | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index 29cd34d7..1081b11f 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -854,21 +854,21 @@ sudo chmod +x /usr/local/bin/docker-compose ``` 主机时间同步 -systemctl start chronyd.service -systemctl enable chronyd.service +systemctl start chronyd.service && systemctl enable chronyd.service systemctl stop firewalld.service systemctl disable firewalld.service systemctl disable iptables.service -setenforce 0 - -sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config +setenforce 0 && sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config swapoff -a && sysctl -w vm.swappiness=0 + + + hostnamectl --static set-hostname k8s-master-1 hostnamectl --static set-hostname k8s-node-1 hostnamectl --static set-hostname k8s-node-2 @@ -942,6 +942,7 @@ sysctl --system - 初始化 master 节点: ``` +echo 1 > /proc/sys/net/ipv4/ip_forward 推荐: kubeadm init \ @@ -970,7 +971,7 @@ Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: You can now join any number of machines by running the following on each node as root: - kubeadm join 192.168.0.127:6443 --token 53mly1.yf9llsghle20p2uq --discovery-token-ca-cert-hash sha256:a9f26eef42c30d9f4b20c52058a2eaa696edc3f63ba20be477fe1494ec0146f7 + kubeadm join 192.168.0.127:6443 --token 84kj2n.1kdj36xcsvyzx29i --discovery-token-ca-cert-hash sha256:bcd2edf9878e82db6f73f1253e8d6b6e7b91160db706f7ee59b9a9e32c6099e3 @@ -999,8 +1000,9 @@ kubectl cluster-info - 到 node 节点进行加入: ``` +echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables -kubeadm join 192.168.0.127:6443 --token 53mly1.yf9llsghle20p2uq --discovery-token-ca-cert-hash sha256:a9f26eef42c30d9f4b20c52058a2eaa696edc3f63ba20be477fe1494ec0146f7 +kubeadm join 192.168.0.127:6443 --token 84kj2n.1kdj36xcsvyzx29i --discovery-token-ca-cert-hash sha256:bcd2edf9878e82db6f73f1253e8d6b6e7b91160db706f7ee59b9a9e32c6099e3 在 master 节点上:kubectl get cs From 8f01db3a8ce86f08caa1685c35b68e7fb5791e46 Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 31 Jan 2019 23:44:39 +0800 Subject: [PATCH 249/330] 2019-01-31 --- markdown-file/Docker-Install-And-Usage.md | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index 1081b11f..8a804f76 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -934,6 +934,7 @@ kubectl version vim /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 +net.ipv4.ip_forward=1 sysctl --system @@ -955,6 +956,7 @@ kubeadm init \ 10.244.0.0/16是 flannel 插件固定使用的ip段,它的值取决于你准备安装哪个网络插件 +这个过程会下载一些 docker 镜像,时间可能会比较久,看你网络情况。 终端会输出核心内容: Your Kubernetes master has initialized successfully! @@ -971,7 +973,7 @@ Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: You can now join any number of machines by running the following on each node as root: - kubeadm join 192.168.0.127:6443 --token 84kj2n.1kdj36xcsvyzx29i --discovery-token-ca-cert-hash sha256:bcd2edf9878e82db6f73f1253e8d6b6e7b91160db706f7ee59b9a9e32c6099e3 + kubeadm join 192.168.0.127:6443 --token r7qkjb.2g1zikg6yvrmwlp6 --discovery-token-ca-cert-hash sha256:14601b9269829cd86756ad30aaeb3199158cbc2c150ef8b9dc2ce00f1fa5c2d0 @@ -980,11 +982,6 @@ as root: kubeadm init --image-repository registry.aliyuncs.com/google_containers --pod-network-cidr=192.168.0.0/16 --kubernetes-version v1.13.2 - -mkdir -p $HOME/.kube -sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config -sudo chown $(id -u):$(id -g) $HOME/.kube/config - export KUBECONFIG=/etc/kubernetes/admin.conf echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.zshrc source ~/.zshrc @@ -1002,7 +999,7 @@ kubectl cluster-info ``` echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables -kubeadm join 192.168.0.127:6443 --token 84kj2n.1kdj36xcsvyzx29i --discovery-token-ca-cert-hash sha256:bcd2edf9878e82db6f73f1253e8d6b6e7b91160db706f7ee59b9a9e32c6099e3 +kubeadm join 192.168.0.127:6443 --token r7qkjb.2g1zikg6yvrmwlp6 --discovery-token-ca-cert-hash sha256:14601b9269829cd86756ad30aaeb3199158cbc2c150ef8b9dc2ce00f1fa5c2d0 在 master 节点上:kubectl get cs From 06e4e0acb4b9bf0031beb29dbb7fff30a3165134 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 1 Feb 2019 10:54:57 +0800 Subject: [PATCH 250/330] :construction: K8S --- markdown-file/Docker-Install-And-Usage.md | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index 8a804f76..1616f42e 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -947,7 +947,7 @@ echo 1 > /proc/sys/net/ipv4/ip_forward 推荐: kubeadm init \ ---image-repository registry.aliyuncs.com/google_containers \ +--image-repository registry.cn-hangzhou.aliyuncs.com/google_containers \ --pod-network-cidr 10.244.0.0/16 \ --kubernetes-version 1.13.2 \ --service-cidr 10.96.0.0/12 \ @@ -977,14 +977,9 @@ as root: - -也可以使用另外一个流行网络插件 calico: -kubeadm init --image-repository registry.aliyuncs.com/google_containers --pod-network-cidr=192.168.0.0/16 --kubernetes-version v1.13.2 - - -export KUBECONFIG=/etc/kubernetes/admin.conf -echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.zshrc -source ~/.zshrc +mkdir -p $HOME/.kube +sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config +sudo chown $(id -u):$(id -g) $HOME/.kube/config 查询我们的 token From 4bfe839f1bc3e0ea2c90782961dfe40e16e1bbc7 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 1 Feb 2019 22:49:59 +0800 Subject: [PATCH 251/330] 2019-02-01 --- markdown-file/Docker-Install-And-Usage.md | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index 1616f42e..b97aadcb 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -920,23 +920,25 @@ scp -r /etc/yum.repos.d/kubernetes.repo root@k8s-node-2:/etc/yum.repos.d/ 所有机子 yum install -y kubelet-1.13.2 kubeadm-1.13.2 kubectl-1.13.2 --disableexcludes=kubernetes - +所有机子 vim /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 最后一行添加:Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs" - +所有机子 systemctl enable kubelet && systemctl start kubelet kubeadm version kubectl version - +必须配置: vim /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 net.ipv4.ip_forward=1 +vm.swappiness=0 -sysctl --system +modprobe br_netfilter +sysctl -p /etc/sysctl.d/k8s.conf ``` @@ -973,7 +975,7 @@ Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: You can now join any number of machines by running the following on each node as root: - kubeadm join 192.168.0.127:6443 --token r7qkjb.2g1zikg6yvrmwlp6 --discovery-token-ca-cert-hash sha256:14601b9269829cd86756ad30aaeb3199158cbc2c150ef8b9dc2ce00f1fa5c2d0 + kubeadm join 192.168.0.127:6443 --token 6y18dy.oy5bt6d5y4nvop28 --discovery-token-ca-cert-hash sha256:a4e8aed696bc0481bb3f6e0af4256d41a1779141241e2684fdc6aa8bcca4d58b From a8cbffae29d9c4521805335a860049f822c4d63e Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 1 Feb 2019 23:01:53 +0800 Subject: [PATCH 252/330] 2019-02-01 --- markdown-file/Docker-Install-And-Usage.md | 42 +++++++++++++++++++---- 1 file changed, 35 insertions(+), 7 deletions(-) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index b97aadcb..d3770373 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -917,9 +917,37 @@ scp -r /etc/yum.repos.d/kubernetes.repo root@k8s-node-1:/etc/yum.repos.d/ scp -r /etc/yum.repos.d/kubernetes.repo root@k8s-node-2:/etc/yum.repos.d/ + +所有机子 +iptables -P FORWARD ACCEPT + 所有机子 yum install -y kubelet-1.13.2 kubeadm-1.13.2 kubectl-1.13.2 --disableexcludes=kubernetes + +所有机子 +vim /etc/cni/net.d/10-flannel.conflist,内容 +{ + "name": "cbr0", + "plugins": [ + { + "type": "flannel", + "delegate": { + "hairpinMode": true, + "isDefaultGateway": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] +} + + + 所有机子 vim /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 最后一行添加:Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs" @@ -987,8 +1015,14 @@ sudo chown $(id -u):$(id -g) $HOME/.kube/config 查询我们的 token kubeadm token list - kubectl cluster-info + + +master 安装 Flannel +cd /opt && wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml + +kubectl apply -f /opt/kube-flannel.yml + ``` - 到 node 节点进行加入: @@ -1008,12 +1042,6 @@ etcd-0 Healthy {"health": "true"} -master 安装 Flannel -cd /opt && wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml - -kubectl apply -f /opt/kube-flannel.yml - - 验证: kubectl get pods --all-namespaces kubectl get nodes From 90ea22b3296a936f38f8d757008021e574c99562 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 1 Feb 2019 23:10:40 +0800 Subject: [PATCH 253/330] 2019-02-01 --- markdown-file/Docker-Install-And-Usage.md | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index d3770373..30d899b5 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -965,8 +965,11 @@ net.bridge.bridge-nf-call-iptables = 1 net.ipv4.ip_forward=1 vm.swappiness=0 -modprobe br_netfilter -sysctl -p /etc/sysctl.d/k8s.conf + +scp -r /etc/sysctl.d/k8s.conf root@k8s-node-1:/etc/sysctl.d/ +scp -r /etc/sysctl.d/k8s.conf root@k8s-node-2:/etc/sysctl.d/ + +modprobe br_netfilter && sysctl -p /etc/sysctl.d/k8s.conf ``` @@ -1003,10 +1006,11 @@ Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: You can now join any number of machines by running the following on each node as root: - kubeadm join 192.168.0.127:6443 --token 6y18dy.oy5bt6d5y4nvop28 --discovery-token-ca-cert-hash sha256:a4e8aed696bc0481bb3f6e0af4256d41a1779141241e2684fdc6aa8bcca4d58b + kubeadm join 192.168.0.127:6443 --token 6m0emc.x9uim283uzevn3pm --discovery-token-ca-cert-hash sha256:c8b1b72de1eabc71df5490afa7cd8dd1c1952234e65b30262ed5084c9d1f10c2 +master 机子: mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config @@ -1030,7 +1034,7 @@ kubectl apply -f /opt/kube-flannel.yml ``` echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables -kubeadm join 192.168.0.127:6443 --token r7qkjb.2g1zikg6yvrmwlp6 --discovery-token-ca-cert-hash sha256:14601b9269829cd86756ad30aaeb3199158cbc2c150ef8b9dc2ce00f1fa5c2d0 +kubeadm join 192.168.0.127:6443 --token 6m0emc.x9uim283uzevn3pm --discovery-token-ca-cert-hash sha256:c8b1b72de1eabc71df5490afa7cd8dd1c1952234e65b30262ed5084c9d1f10c2 在 master 节点上:kubectl get cs From 6e4226de921c1f3991783577040c704998429708 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 1 Feb 2019 23:29:36 +0800 Subject: [PATCH 254/330] 2019-02-01 --- markdown-file/Docker-Install-And-Usage.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index 30d899b5..228d8b1b 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -1047,11 +1047,11 @@ etcd-0 Healthy {"health": "true"} 验证: -kubectl get pods --all-namespaces kubectl get nodes 如果还是 NotReady,则查看错误信息: -kubectl describe pod kube-scheduler-master.hanli.com -n kube-system -kubectl logs kube-scheduler-master.hanli.com -n kube-system +kubectl get pods --all-namespaces,其中:Pending/ContainerCreating/ImagePullBackOff 都是 Pod 没有就绪,我们可以这样查看对应 Pod 遇到了什么问题 +kubectl describe pod --namespace=kube-system +或者:kubectl logs -n kube-system tail -f /var/log/messages ``` From 1849cccfbcdeba8d6c8889e3e3bebd4d0554d92f Mon Sep 17 00:00:00 2001 From: judasn Date: Sun, 10 Feb 2019 22:55:24 +0800 Subject: [PATCH 255/330] =?UTF-8?q?2019-02-10=20=E5=AE=8C=E5=96=84=20K8S?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Docker-Install-And-Usage.md | 402 -------------------- markdown-file/K8S-Install-And-Usage.md | 442 ++++++++++++++++++++++ 2 files changed, 442 insertions(+), 402 deletions(-) create mode 100644 markdown-file/K8S-Install-And-Usage.md diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index 228d8b1b..20db7079 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -796,407 +796,6 @@ sudo chmod +x /usr/local/bin/docker-compose - Docker Swarm 是一个 Docker 集群管理工具 -## Kubernetes - -- 目前流行的容器编排系统 -- 简称:K8S -- 官网: -- 主要解决几个问题: - - `调度` - - `生命周期及健康状况` - - `服务发现` - - `监控` - - `认证` - - `容器聚合` -- 主要角色:Master、Node - - -#### 安装准备 - Kubernetes 1.13 版本 - -- 推荐最低 2C2G,优先:2C4G 或以上 -- 特别说明:1.13 之前的版本,由于网络问题,需要各种设置,这里就不再多说了。1.13 之后相对就简单了点。 -- 优先官网软件包:kubeadm -- 官网资料: - - issues 入口: - - 源码入口: - - 安装指导: - - 按官网要求做下检查: - - 网络环境: - - 端口检查: - - 对 Docker 版本的支持,这里官网推荐的是 18.06: -- 三大核心工具包,都需要各自安装,并且注意版本关系: - - `kubeadm`: the command to bootstrap the cluster. - - 集群部署、管理工具 - - `kubelet`: the component that runs on all of the machines in your cluster and does things like starting pods and containers. - - 具体执行层面的管理 Pod 和 Docker 工具 - - `kubectl`: the command line util to talk to your cluster. - - 操作 k8s 的命令行入口工具 -- 官网插件安装过程的故障排查: -- 其他部署方案: - - - - - - - -#### 开始安装 - Kubernetes 1.13.2 版本 - -- 三台机子: - - master-1:`192.168.0.127` - - node-1:`192.168.0.128` - - node-2:`192.168.0.129` -- 官网最新版本: -- 官网 1.13 版本的 changelog: -- 所有节点安装 Docker 18.06,并设置阿里云源 - - 可以参考:[点击我o(∩_∩)o ](https://github.com/judasn/Linux-Tutorial/blob/master/favorite-file/shell/install_docker_k8s_disable_firewalld_centos7-aliyun.sh) - - 核心,查看可以安装的 Docker 列表:`yum list docker-ce --showduplicates` -- 所有节点设置 kubernetes repo 源,并安装 Kubeadm、Kubelet、Kubectl 都设置阿里云的源 -- Kubeadm 初始化集群过程当中,它会下载很多的镜像,默认也是去 Google 家里下载。但是 1.13 新增了一个配置:`--image-repository` 算是救了命。 -- 具体流程: - -``` -主机时间同步 -systemctl start chronyd.service && systemctl enable chronyd.service - -systemctl stop firewalld.service -systemctl disable firewalld.service -systemctl disable iptables.service - - -setenforce 0 && sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config - -swapoff -a && sysctl -w vm.swappiness=0 - - - - - -hostnamectl --static set-hostname k8s-master-1 -hostnamectl --static set-hostname k8s-node-1 -hostnamectl --static set-hostname k8s-node-2 - - -vim /etc/hosts -192.168.0.127 k8s-master-1 -192.168.0.128 k8s-node-1 -192.168.0.129 k8s-node-2 - -master 免密 -生产密钥对 -ssh-keygen -t rsa - - -公钥内容写入 authorized_keys -cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys - -测试: -ssh localhost - -将公钥复制到其他机子 -ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@k8s-node-1(根据提示输入 k8s-node-1 密码) -ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@k8s-node-2(根据提示输入 k8s-node-2 密码) - - -在 linux01 上测试 -ssh k8s-master-1 -ssh k8s-node-1 -ssh k8s-node-2 - - - -vim /etc/yum.repos.d/kubernetes.repo - -[kubernetes] -name=Kubernetes -baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ -enabled=1 -gpgcheck=1 -repo_gpgcheck=1 -gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg - - -scp -r /etc/yum.repos.d/kubernetes.repo root@k8s-node-1:/etc/yum.repos.d/ -scp -r /etc/yum.repos.d/kubernetes.repo root@k8s-node-2:/etc/yum.repos.d/ - - - -所有机子 -iptables -P FORWARD ACCEPT - -所有机子 -yum install -y kubelet-1.13.2 kubeadm-1.13.2 kubectl-1.13.2 --disableexcludes=kubernetes - - -所有机子 -vim /etc/cni/net.d/10-flannel.conflist,内容 -{ - "name": "cbr0", - "plugins": [ - { - "type": "flannel", - "delegate": { - "hairpinMode": true, - "isDefaultGateway": true - } - }, - { - "type": "portmap", - "capabilities": { - "portMappings": true - } - } - ] -} - - - -所有机子 -vim /etc/systemd/system/kubelet.service.d/10-kubeadm.conf -最后一行添加:Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs" - -所有机子 -systemctl enable kubelet && systemctl start kubelet - -kubeadm version -kubectl version - -必须配置: -vim /etc/sysctl.d/k8s.conf -net.bridge.bridge-nf-call-ip6tables = 1 -net.bridge.bridge-nf-call-iptables = 1 -net.ipv4.ip_forward=1 -vm.swappiness=0 - - -scp -r /etc/sysctl.d/k8s.conf root@k8s-node-1:/etc/sysctl.d/ -scp -r /etc/sysctl.d/k8s.conf root@k8s-node-2:/etc/sysctl.d/ - -modprobe br_netfilter && sysctl -p /etc/sysctl.d/k8s.conf - -``` - -- 初始化 master 节点: - -``` -echo 1 > /proc/sys/net/ipv4/ip_forward - -推荐: -kubeadm init \ ---image-repository registry.cn-hangzhou.aliyuncs.com/google_containers \ ---pod-network-cidr 10.244.0.0/16 \ ---kubernetes-version 1.13.2 \ ---service-cidr 10.96.0.0/12 \ ---apiserver-advertise-address=0.0.0.0 \ ---ignore-preflight-errors=Swap - -10.244.0.0/16是 flannel 插件固定使用的ip段,它的值取决于你准备安装哪个网络插件 - -这个过程会下载一些 docker 镜像,时间可能会比较久,看你网络情况。 -终端会输出核心内容: -Your Kubernetes master has initialized successfully! - -To start using your cluster, you need to run the following as a regular user: - - mkdir -p $HOME/.kube - sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config - sudo chown $(id -u):$(id -g) $HOME/.kube/config - -You should now deploy a pod network to the cluster. -Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: - https://kubernetes.io/docs/concepts/cluster-administration/addons/ - -You can now join any number of machines by running the following on each node -as root: - - kubeadm join 192.168.0.127:6443 --token 6m0emc.x9uim283uzevn3pm --discovery-token-ca-cert-hash sha256:c8b1b72de1eabc71df5490afa7cd8dd1c1952234e65b30262ed5084c9d1f10c2 - - - -master 机子: -mkdir -p $HOME/.kube -sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config -sudo chown $(id -u):$(id -g) $HOME/.kube/config - - -查询我们的 token -kubeadm token list - -kubectl cluster-info - - -master 安装 Flannel -cd /opt && wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml - -kubectl apply -f /opt/kube-flannel.yml - -``` - -- 到 node 节点进行加入: - -``` -echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables - -kubeadm join 192.168.0.127:6443 --token 6m0emc.x9uim283uzevn3pm --discovery-token-ca-cert-hash sha256:c8b1b72de1eabc71df5490afa7cd8dd1c1952234e65b30262ed5084c9d1f10c2 - - -在 master 节点上:kubectl get cs -NAME STATUS MESSAGE ERROR -controller-manager Healthy ok -scheduler Healthy ok -etcd-0 Healthy {"health": "true"} -结果都是 Healthy 则表示可以了,不然就得检查。必要时可以用:`kubeadm reset` 重置,重新进行集群初始化 - - - -验证: -kubectl get nodes -如果还是 NotReady,则查看错误信息: -kubectl get pods --all-namespaces,其中:Pending/ContainerCreating/ImagePullBackOff 都是 Pod 没有就绪,我们可以这样查看对应 Pod 遇到了什么问题 -kubectl describe pod --namespace=kube-system -或者:kubectl logs -n kube-system -tail -f /var/log/messages - -``` - - - - -#### 主要概念 - -- Master 节点,负责集群的调度、集群的管理 - - 常见组件: - - kube-apiserver:API服务 - - kube-scheduler:调度 - - Kube-Controller-Manager:容器编排 - - Etcd:保存了整个集群的状态 - - Kube-proxy:负责为 Service 提供 cluster 内部的服务发现和负载均衡 - - Kube-DNS:负责为整个集群提供 DNS 服务 -- node 节点,负责容器相关的处理 - -- `Pods` - -``` -创建,调度以及管理的最小单元 -共存的一组容器的集合 -容器共享PID,网络,IPC以及UTS命名空间 -容器共享存储卷 -短暂存在 -``` - -- `Volumes` - -``` -数据持久化 -Pod中容器共享数据 -生命周期 -支持多种类型的数据卷 – emptyDir, hostpath, gcePersistentDisk, awsElasticBlockStore, nfs, iscsi, glusterfs, secrets -``` - -- `Labels` - -``` -用以标示对象(如Pod)的key/value对 -组织并选择对象子集 -``` - -- `Replication Controllers` - -``` -确保在任一时刻运行指定数目的Pod -容器重新调度 -规模调整 -在线升级 -多发布版本跟踪 -``` - -- `Services` - -``` -抽象一系列Pod并定义其访问规则 -固定IP地址和DNS域名 -通过环境变量和DNS发现服务 -负载均衡 -外部服务 – ClusterIP, NodePort, LoadBalancer -``` - - -#### 主要组成模块 - -- `etcd` - -``` -高可用的Key/Value存储 -只有apiserver有读写权限 -使用etcd集群确保数据可靠性 -``` - -- `apiserver` - -``` -Kubernetes系统入口, REST -认证 -授权 -访问控制 -服务帐号 -资源限制 -``` - -- `kube-scheduler` - -``` -资源需求 -服务需求 -硬件/软件/策略限制 -关联性和非关联性 -数据本地化 -``` - -- `kube-controller-manager` - -``` -Replication controller -Endpoint controller -Namespace controller -Serviceaccount controller -``` - -- `kubelet` - -``` -节点管理器 -确保调度到本节点的Pod的运行和健康 -``` - -- `kube-proxy` - -``` -Pod网络代理 -TCP/UDP请求转发 -负载均衡(Round Robin) -``` - -- `服务发现` - -``` -环境变量 -DNS – kube2sky, etcd,skydns -``` - -- `网络` - -``` -容器间互相通信 -节点和容器间互相通信 -每个Pod使用一个全局唯一的IP -``` - -- `高可用` - -``` -kubelet保证每一个master节点的服务正常运行 -系统监控程序确保kubelet正常运行 -Etcd集群 -多个apiserver进行负载均衡 -Master选举确保kube-scheduler和kube-controller-manager高可用 -``` ## Harbor 镜像私有仓库 @@ -1205,7 +804,6 @@ Master选举确保kube-scheduler和kube-controller-manager高可用 ## 资料 - 书籍:《第一本 Docker 书》 -- [如何更“优雅”地部署Kubernetes集群](https://juejin.im/entry/5a03f98d6fb9a04524054516) - []() - []() - []() diff --git a/markdown-file/K8S-Install-And-Usage.md b/markdown-file/K8S-Install-And-Usage.md new file mode 100644 index 00000000..263cfe32 --- /dev/null +++ b/markdown-file/K8S-Install-And-Usage.md @@ -0,0 +1,442 @@ + + +# Kubernets(K8S) 使用 + +## 环境说明 + +- CentOS 7.5(不准确地说:要求必须是 CentOS 7 64位) +- Docker + +## Kubernetes + +- 目前流行的容器编排系统 +- 简称:K8S +- 官网: +- 主要解决几个问题: + - `调度` + - `生命周期及健康状况` + - `服务发现` + - `监控` + - `认证` + - `容器聚合` +- 主要角色:Master、Node + + +#### 安装准备 - Kubernetes 1.13 版本 + +- 推荐最低 2C2G,优先:2C4G 或以上 +- 特别说明:1.13 之前的版本,由于网络问题,需要各种设置,这里就不再多说了。1.13 之后相对就简单了点。 +- 优先官网软件包:kubeadm +- 官网资料: + - issues 入口: + - 源码入口: + - 安装指导: + - 按官网要求做下检查: + - 网络环境: + - 端口检查: + - **对 Docker 版本的支持,这里官网推荐的是 18.06**: +- 三大核心工具包,都需要各自安装,并且注意版本关系: + - `kubeadm`: the command to bootstrap the cluster. + - 集群部署、管理工具 + - `kubelet`: the component that runs on all of the machines in your cluster and does things like starting pods and containers. + - 具体执行层面的管理 Pod 和 Docker 工具 + - `kubectl`: the command line util to talk to your cluster. + - 操作 k8s 的命令行入口工具 +- 官网插件安装过程的故障排查: +- 其他部署方案: + - + - + - + +#### 开始安装 - Kubernetes 1.13.2 版本 + +- 三台机子: + - master-1:`192.168.0.127` + - node-1:`192.168.0.128` + - node-2:`192.168.0.129` +- 官网最新版本: +- 官网 1.13 版本的 changelog: +- **所有节点安装 Docker 18.06,并设置阿里云源** + - 可以参考:[点击我o(∩_∩)o ](https://github.com/judasn/Linux-Tutorial/blob/master/favorite-file/shell/install_docker_k8s_disable_firewalld_centos7-aliyun.sh) + - 核心,查看可以安装的 Docker 列表:`yum list docker-ce --showduplicates` +- 所有节点设置 kubernetes repo 源,并安装 Kubeadm、Kubelet、Kubectl 都设置阿里云的源 +- Kubeadm 初始化集群过程当中,它会下载很多的镜像,默认也是去 Google 家里下载。但是 1.13 新增了一个配置:`--image-repository` 算是救了命。 +- 具体流程: + +``` +主机时间同步 +systemctl start chronyd.service && systemctl enable chronyd.service + +systemctl stop firewalld.service +systemctl disable firewalld.service +systemctl disable iptables.service + + +setenforce 0 && sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config + +swapoff -a && sysctl -w vm.swappiness=0 + + + + + +hostnamectl --static set-hostname k8s-master-1 +hostnamectl --static set-hostname k8s-node-1 +hostnamectl --static set-hostname k8s-node-2 + + +vim /etc/hosts +192.168.0.127 k8s-master-1 +192.168.0.128 k8s-node-1 +192.168.0.129 k8s-node-2 + +master 免密 +生产密钥对 +ssh-keygen -t rsa + + +公钥内容写入 authorized_keys +cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys + +测试: +ssh localhost + +将公钥复制到其他机子 +ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@k8s-node-1(根据提示输入 k8s-node-1 密码) +ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@k8s-node-2(根据提示输入 k8s-node-2 密码) + + +在 linux01 上测试 +ssh k8s-master-1 +ssh k8s-node-1 +ssh k8s-node-2 + + + +vim /etc/yum.repos.d/kubernetes.repo + +[kubernetes] +name=Kubernetes +baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/ +enabled=1 +gpgcheck=1 +repo_gpgcheck=1 +gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg + + +scp -r /etc/yum.repos.d/kubernetes.repo root@k8s-node-1:/etc/yum.repos.d/ +scp -r /etc/yum.repos.d/kubernetes.repo root@k8s-node-2:/etc/yum.repos.d/ + + + +所有机子 +iptables -P FORWARD ACCEPT + +所有机子 +yum install -y kubelet-1.13.2 kubeadm-1.13.2 kubectl-1.13.2 --disableexcludes=kubernetes + + +所有机子 +vim /etc/cni/net.d/10-flannel.conflist,内容 +{ + "name": "cbr0", + "plugins": [ + { + "type": "flannel", + "delegate": { + "hairpinMode": true, + "isDefaultGateway": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] +} + + + +所有机子 +vim /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +最后一行添加:Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs" + +所有机子 +systemctl enable kubelet && systemctl start kubelet + +kubeadm version +kubectl version + +必须配置: +vim /etc/sysctl.d/k8s.conf +net.bridge.bridge-nf-call-ip6tables = 1 +net.bridge.bridge-nf-call-iptables = 1 +net.ipv4.ip_forward=1 +vm.swappiness=0 + + +scp -r /etc/sysctl.d/k8s.conf root@k8s-node-1:/etc/sysctl.d/ +scp -r /etc/sysctl.d/k8s.conf root@k8s-node-2:/etc/sysctl.d/ + +modprobe br_netfilter && sysctl -p /etc/sysctl.d/k8s.conf + +``` + +- 初始化 master 节点: + +``` +echo 1 > /proc/sys/net/ipv4/ip_forward + +推荐: +kubeadm init \ +--image-repository registry.cn-hangzhou.aliyuncs.com/google_containers \ +--pod-network-cidr 10.244.0.0/16 \ +--kubernetes-version 1.13.2 \ +--service-cidr 10.96.0.0/12 \ +--apiserver-advertise-address=0.0.0.0 \ +--ignore-preflight-errors=Swap + +10.244.0.0/16是 flannel 插件固定使用的ip段,它的值取决于你准备安装哪个网络插件 + +这个过程会下载一些 docker 镜像,时间可能会比较久,看你网络情况。 +终端会输出核心内容: +Your Kubernetes master has initialized successfully! + +To start using your cluster, you need to run the following as a regular user: + + mkdir -p $HOME/.kube + sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config + +You should now deploy a pod network to the cluster. +Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: + https://kubernetes.io/docs/concepts/cluster-administration/addons/ + +You can now join any number of machines by running the following on each node +as root: + + kubeadm join 192.168.0.127:6443 --token 6m0emc.x9uim283uzevn3pm --discovery-token-ca-cert-hash sha256:c8b1b72de1eabc71df5490afa7cd8dd1c1952234e65b30262ed5084c9d1f10c2 + + + +master 机子: +mkdir -p $HOME/.kube +sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config +sudo chown $(id -u):$(id -g) $HOME/.kube/config + + +查询我们的 token +kubeadm token list + +kubectl cluster-info + + +master 安装 Flannel +cd /opt && wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml + +kubectl apply -f /opt/kube-flannel.yml + +``` + +- 到 node 节点进行加入: + +``` +echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables + +kubeadm join 192.168.0.127:6443 --token 6m0emc.x9uim283uzevn3pm --discovery-token-ca-cert-hash sha256:c8b1b72de1eabc71df5490afa7cd8dd1c1952234e65b30262ed5084c9d1f10c2 + + +在 master 节点上:kubectl get cs +NAME STATUS MESSAGE ERROR +controller-manager Healthy ok +scheduler Healthy ok +etcd-0 Healthy {"health": "true"} +结果都是 Healthy 则表示可以了,不然就得检查。必要时可以用:`kubeadm reset` 重置,重新进行集群初始化 + + + +验证: +kubectl get nodes +如果还是 NotReady,则查看错误信息: +kubectl get pods --all-namespaces,其中:Pending/ContainerCreating/ImagePullBackOff 都是 Pod 没有就绪,我们可以这样查看对应 Pod 遇到了什么问题 +kubectl describe pod --namespace=kube-system +或者:kubectl logs -n kube-system +tail -f /var/log/messages + +``` + + + + +#### 主要概念 + +- Master 节点,负责集群的调度、集群的管理 + - 常见组件: + - kube-apiserver:API服务 + - kube-scheduler:调度 + - Kube-Controller-Manager:容器编排 + - Etcd:保存了整个集群的状态 + - Kube-proxy:负责为 Service 提供 cluster 内部的服务发现和负载均衡 + - Kube-DNS:负责为整个集群提供 DNS 服务 +- node 节点,负责容器相关的处理 + +- `Pods` + +``` +创建,调度以及管理的最小单元 +共存的一组容器的集合 +容器共享PID,网络,IPC以及UTS命名空间 +容器共享存储卷 +短暂存在 +``` + +- `Volumes` + +``` +数据持久化 +Pod中容器共享数据 +生命周期 +支持多种类型的数据卷 – emptyDir, hostpath, gcePersistentDisk, awsElasticBlockStore, nfs, iscsi, glusterfs, secrets +``` + +- `Labels` + +``` +用以标示对象(如Pod)的key/value对 +组织并选择对象子集 +``` + +- `Replication Controllers` + +``` +确保在任一时刻运行指定数目的Pod +容器重新调度 +规模调整 +在线升级 +多发布版本跟踪 +``` + +- `Services` + +``` +抽象一系列Pod并定义其访问规则 +固定IP地址和DNS域名 +通过环境变量和DNS发现服务 +负载均衡 +外部服务 – ClusterIP, NodePort, LoadBalancer +``` + + +#### 主要组成模块 + +- `etcd` + +``` +高可用的Key/Value存储 +只有apiserver有读写权限 +使用etcd集群确保数据可靠性 +``` + +- `apiserver` + +``` +Kubernetes系统入口, REST +认证 +授权 +访问控制 +服务帐号 +资源限制 +``` + +- `kube-scheduler` + +``` +资源需求 +服务需求 +硬件/软件/策略限制 +关联性和非关联性 +数据本地化 +``` + +- `kube-controller-manager` + +``` +Replication controller +Endpoint controller +Namespace controller +Serviceaccount controller +``` + +- `kubelet` + +``` +节点管理器 +确保调度到本节点的Pod的运行和健康 +``` + +- `kube-proxy` + +``` +Pod网络代理 +TCP/UDP请求转发 +负载均衡(Round Robin) +``` + +- `服务发现` + +``` +环境变量 +DNS – kube2sky, etcd,skydns +``` + +- `网络` + +``` +容器间互相通信 +节点和容器间互相通信 +每个Pod使用一个全局唯一的IP +``` + +- `高可用` + +``` +kubelet保证每一个master节点的服务正常运行 +系统监控程序确保kubelet正常运行 +Etcd集群 +多个apiserver进行负载均衡 +Master选举确保kube-scheduler和kube-controller-manager高可用 +``` + + +## 资料 + +- [如何更“优雅”地部署Kubernetes集群](https://juejin.im/entry/5a03f98d6fb9a04524054516) +- []() +- []() +- []() +- []() +- []() +- []() + + + + + + + + + + + + + + + + + + + + + From 4273bbe9fdb46933c0d1f1ef6b267209c76227ea Mon Sep 17 00:00:00 2001 From: judasn Date: Sun, 10 Feb 2019 23:12:37 +0800 Subject: [PATCH 256/330] =?UTF-8?q?2019-02-10=20=E5=AE=8C=E5=96=84=20K8S?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/K8S-Install-And-Usage.md | 88 ++++++++++++++++++++++++-- 1 file changed, 82 insertions(+), 6 deletions(-) diff --git a/markdown-file/K8S-Install-And-Usage.md b/markdown-file/K8S-Install-And-Usage.md index 263cfe32..5deab113 100644 --- a/markdown-file/K8S-Install-And-Usage.md +++ b/markdown-file/K8S-Install-And-Usage.md @@ -48,7 +48,7 @@ - - -#### 开始安装 - Kubernetes 1.13.2 版本 +#### 开始安装 - Kubernetes 1.13.3 版本 - 三台机子: - master-1:`192.168.0.127` @@ -133,11 +133,11 @@ scp -r /etc/yum.repos.d/kubernetes.repo root@k8s-node-2:/etc/yum.repos.d/ iptables -P FORWARD ACCEPT 所有机子 -yum install -y kubelet-1.13.2 kubeadm-1.13.2 kubectl-1.13.2 --disableexcludes=kubernetes +yum install -y kubelet-1.13.3 kubeadm-1.13.3 kubectl-1.13.3 --disableexcludes=kubernetes 所有机子 -vim /etc/cni/net.d/10-flannel.conflist,内容 +mkdir -p /etc/cni/net.d && vim /etc/cni/net.d/10-flannel.conflist,内容 { "name": "cbr0", "plugins": [ @@ -193,7 +193,7 @@ echo 1 > /proc/sys/net/ipv4/ip_forward kubeadm init \ --image-repository registry.cn-hangzhou.aliyuncs.com/google_containers \ --pod-network-cidr 10.244.0.0/16 \ ---kubernetes-version 1.13.2 \ +--kubernetes-version 1.13.3 \ --service-cidr 10.96.0.0/12 \ --apiserver-advertise-address=0.0.0.0 \ --ignore-preflight-errors=Swap @@ -202,6 +202,56 @@ kubeadm init \ 这个过程会下载一些 docker 镜像,时间可能会比较久,看你网络情况。 终端会输出核心内容: +[init] Using Kubernetes version: v1.13.3 +[preflight] Running pre-flight checks +[preflight] Pulling images required for setting up a Kubernetes cluster +[preflight] This might take a minute or two, depending on the speed of your internet connection +[preflight] You can also perform this action in beforehand using 'kubeadm config images pull' +[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" +[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" +[kubelet-start] Activating the kubelet service +[certs] Using certificateDir folder "/etc/kubernetes/pki" +[certs] Generating "ca" certificate and key +[certs] Generating "apiserver" certificate and key +[certs] apiserver serving cert is signed for DNS names [k8s-master-1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.0.127] +[certs] Generating "apiserver-kubelet-client" certificate and key +[certs] Generating "front-proxy-ca" certificate and key +[certs] Generating "front-proxy-client" certificate and key +[certs] Generating "etcd/ca" certificate and key +[certs] Generating "etcd/healthcheck-client" certificate and key +[certs] Generating "apiserver-etcd-client" certificate and key +[certs] Generating "etcd/server" certificate and key +[certs] etcd/server serving cert is signed for DNS names [k8s-master-1 localhost] and IPs [192.168.0.127 127.0.0.1 ::1] +[certs] Generating "etcd/peer" certificate and key +[certs] etcd/peer serving cert is signed for DNS names [k8s-master-1 localhost] and IPs [192.168.0.127 127.0.0.1 ::1] +[certs] Generating "sa" key and public key +[kubeconfig] Using kubeconfig folder "/etc/kubernetes" +[kubeconfig] Writing "admin.conf" kubeconfig file +[kubeconfig] Writing "kubelet.conf" kubeconfig file +[kubeconfig] Writing "controller-manager.conf" kubeconfig file +[kubeconfig] Writing "scheduler.conf" kubeconfig file +[control-plane] Using manifest folder "/etc/kubernetes/manifests" +[control-plane] Creating static Pod manifest for "kube-apiserver" +[control-plane] Creating static Pod manifest for "kube-controller-manager" +[control-plane] Creating static Pod manifest for "kube-scheduler" +[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" +[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s +[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s +[apiclient] All control plane components are healthy after 32.002189 seconds +[uploadconfig] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace +[kubelet] Creating a ConfigMap "kubelet-config-1.13" in namespace kube-system with the configuration for the kubelets in the cluster +[patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "k8s-master-1" as an annotation +[mark-control-plane] Marking the node k8s-master-1 as control-plane by adding the label "node-role.kubernetes.io/master=''" +[mark-control-plane] Marking the node k8s-master-1 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] +[bootstrap-token] Using token: 3ag6sz.y8rmcz5xec50xkz1 +[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles +[bootstraptoken] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials +[bootstraptoken] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token +[bootstraptoken] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster +[bootstraptoken] creating the "cluster-info" ConfigMap in the "kube-public" namespace +[addons] Applied essential addon: CoreDNS +[addons] Applied essential addon: kube-proxy + Your Kubernetes master has initialized successfully! To start using your cluster, you need to run the following as a regular user: @@ -217,7 +267,9 @@ Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: You can now join any number of machines by running the following on each node as root: - kubeadm join 192.168.0.127:6443 --token 6m0emc.x9uim283uzevn3pm --discovery-token-ca-cert-hash sha256:c8b1b72de1eabc71df5490afa7cd8dd1c1952234e65b30262ed5084c9d1f10c2 + kubeadm join 192.168.0.127:6443 --token 3ag6sz.y8rmcz5xec50xkz1 --discovery-token-ca-cert-hash sha256:912c325aee8dc7c583c36a1c15d7ef1d7a1abeea5dc8a19e96d24536c13373e6 + + @@ -245,7 +297,31 @@ kubectl apply -f /opt/kube-flannel.yml ``` echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables -kubeadm join 192.168.0.127:6443 --token 6m0emc.x9uim283uzevn3pm --discovery-token-ca-cert-hash sha256:c8b1b72de1eabc71df5490afa7cd8dd1c1952234e65b30262ed5084c9d1f10c2 +kubeadm join 192.168.0.127:6443 --token 3ag6sz.y8rmcz5xec50xkz1 --discovery-token-ca-cert-hash sha256:912c325aee8dc7c583c36a1c15d7ef1d7a1abeea5dc8a19e96d24536c13373e6 +这时候终端会输出: + +[preflight] Running pre-flight checks +[discovery] Trying to connect to API Server "192.168.0.127:6443" +[discovery] Created cluster-info discovery client, requesting info from "https://192.168.0.127:6443" +[discovery] Requesting info from "https://192.168.0.127:6443" again to validate TLS against the pinned public key +[discovery] Cluster info signature and contents are valid and TLS certificate validates against pinned roots, will use API Server "192.168.0.127:6443" +[discovery] Successfully established connection with API Server "192.168.0.127:6443" +[join] Reading configuration from the cluster... +[join] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' +[kubelet] Downloading configuration for the kubelet from the "kubelet-config-1.13" ConfigMap in the kube-system namespace +[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" +[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" +[kubelet-start] Activating the kubelet service +[tlsbootstrap] Waiting for the kubelet to perform the TLS Bootstrap... +[patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "k8s-node-1" as an annotation + +This node has joined the cluster: +* Certificate signing request was sent to apiserver and a response was received. +* The Kubelet was informed of the new secure connection details. + +Run 'kubectl get nodes' on the master to see this node join the cluster. + + 在 master 节点上:kubectl get cs From 1c77c2a0d2068f1e4ac92ec9885487d010103cd0 Mon Sep 17 00:00:00 2001 From: judasn Date: Sun, 10 Feb 2019 23:33:16 +0800 Subject: [PATCH 257/330] =?UTF-8?q?2019-02-10=20=E5=AE=8C=E5=96=84=20K8S?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/K8S-Install-And-Usage.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/markdown-file/K8S-Install-And-Usage.md b/markdown-file/K8S-Install-And-Usage.md index 5deab113..a43b673f 100644 --- a/markdown-file/K8S-Install-And-Usage.md +++ b/markdown-file/K8S-Install-And-Usage.md @@ -75,7 +75,7 @@ systemctl disable iptables.service setenforce 0 && sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config swapoff -a && sysctl -w vm.swappiness=0 - +echo "vm.swappiness = 0" >> /etc/sysctl.conf @@ -277,10 +277,10 @@ master 机子: mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config - +export KUBECONFIG=$HOME/.kube/config 查询我们的 token -kubeadm token list +kubectl cluster-info kubectl cluster-info @@ -322,6 +322,9 @@ This node has joined the cluster: Run 'kubectl get nodes' on the master to see this node join the cluster. +如果 node 节点加入失败,可以:kubeadm reset,再来重新 join + + 在 master 节点上:kubectl get cs From ea85f55bbae2d0f6a995dcd240aa62021ac424c4 Mon Sep 17 00:00:00 2001 From: judasn Date: Sun, 10 Feb 2019 23:43:09 +0800 Subject: [PATCH 258/330] =?UTF-8?q?2019-02-10=20=E5=AE=8C=E5=96=84=20K8S?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/K8S-Install-And-Usage.md | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/markdown-file/K8S-Install-And-Usage.md b/markdown-file/K8S-Install-And-Usage.md index a43b673f..46f6f622 100644 --- a/markdown-file/K8S-Install-And-Usage.md +++ b/markdown-file/K8S-Install-And-Usage.md @@ -163,13 +163,9 @@ mkdir -p /etc/cni/net.d && vim /etc/cni/net.d/10-flannel.conflist,内容 vim /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 最后一行添加:Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs" -所有机子 -systemctl enable kubelet && systemctl start kubelet -kubeadm version -kubectl version -必须配置: +所有机子必须配置: vim /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 @@ -182,6 +178,15 @@ scp -r /etc/sysctl.d/k8s.conf root@k8s-node-2:/etc/sysctl.d/ modprobe br_netfilter && sysctl -p /etc/sysctl.d/k8s.conf + + +所有机子 +systemctl enable kubelet && systemctl start kubelet + +kubeadm version +kubectl version + + ``` - 初始化 master 节点: @@ -267,8 +272,7 @@ Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: You can now join any number of machines by running the following on each node as root: - kubeadm join 192.168.0.127:6443 --token 3ag6sz.y8rmcz5xec50xkz1 --discovery-token-ca-cert-hash sha256:912c325aee8dc7c583c36a1c15d7ef1d7a1abeea5dc8a19e96d24536c13373e6 - +kubeadm join 192.168.0.127:6443 --token cdo0z3.msyp89yp8zk6lhmf --discovery-token-ca-cert-hash sha256:c0d8942e801962232f0e02b757d13ed0034fa07ab3953764e6a6c67b6688963c @@ -297,7 +301,8 @@ kubectl apply -f /opt/kube-flannel.yml ``` echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables -kubeadm join 192.168.0.127:6443 --token 3ag6sz.y8rmcz5xec50xkz1 --discovery-token-ca-cert-hash sha256:912c325aee8dc7c583c36a1c15d7ef1d7a1abeea5dc8a19e96d24536c13373e6 +kubeadm join 192.168.0.127:6443 --token cdo0z3.msyp89yp8zk6lhmf --discovery-token-ca-cert-hash sha256:c0d8942e801962232f0e02b757d13ed0034fa07ab3953764e6a6c67b6688963c + 这时候终端会输出: [preflight] Running pre-flight checks From 5ffb78e64d2a6cad612eae6d7f21c3292943b264 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 11 Feb 2019 00:03:57 +0800 Subject: [PATCH 259/330] =?UTF-8?q?2019-02-10=20=E5=AE=8C=E5=96=84=20K8S?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/K8S-Install-And-Usage.md | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/markdown-file/K8S-Install-And-Usage.md b/markdown-file/K8S-Install-And-Usage.md index 46f6f622..b844a9c1 100644 --- a/markdown-file/K8S-Install-And-Usage.md +++ b/markdown-file/K8S-Install-And-Usage.md @@ -48,7 +48,7 @@ - - -#### 开始安装 - Kubernetes 1.13.3 版本 +#### 开始安装 - Kubernetes 1.13.2 版本 - 三台机子: - master-1:`192.168.0.127` @@ -74,8 +74,8 @@ systemctl disable iptables.service setenforce 0 && sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config -swapoff -a && sysctl -w vm.swappiness=0 echo "vm.swappiness = 0" >> /etc/sysctl.conf +swapoff -a && sysctl -w vm.swappiness=0 @@ -133,7 +133,7 @@ scp -r /etc/yum.repos.d/kubernetes.repo root@k8s-node-2:/etc/yum.repos.d/ iptables -P FORWARD ACCEPT 所有机子 -yum install -y kubelet-1.13.3 kubeadm-1.13.3 kubectl-1.13.3 --disableexcludes=kubernetes +yum install -y kubelet-1.13.2 kubeadm-1.13.2 kubectl-1.13.2 --disableexcludes=kubernetes 所有机子 @@ -198,7 +198,7 @@ echo 1 > /proc/sys/net/ipv4/ip_forward kubeadm init \ --image-repository registry.cn-hangzhou.aliyuncs.com/google_containers \ --pod-network-cidr 10.244.0.0/16 \ ---kubernetes-version 1.13.3 \ +--kubernetes-version 1.13.2 \ --service-cidr 10.96.0.0/12 \ --apiserver-advertise-address=0.0.0.0 \ --ignore-preflight-errors=Swap @@ -207,7 +207,7 @@ kubeadm init \ 这个过程会下载一些 docker 镜像,时间可能会比较久,看你网络情况。 终端会输出核心内容: -[init] Using Kubernetes version: v1.13.3 +[init] Using Kubernetes version: v1.13.2 [preflight] Running pre-flight checks [preflight] Pulling images required for setting up a Kubernetes cluster [preflight] This might take a minute or two, depending on the speed of your internet connection @@ -223,12 +223,12 @@ kubeadm init \ [certs] Generating "front-proxy-ca" certificate and key [certs] Generating "front-proxy-client" certificate and key [certs] Generating "etcd/ca" certificate and key -[certs] Generating "etcd/healthcheck-client" certificate and key -[certs] Generating "apiserver-etcd-client" certificate and key [certs] Generating "etcd/server" certificate and key [certs] etcd/server serving cert is signed for DNS names [k8s-master-1 localhost] and IPs [192.168.0.127 127.0.0.1 ::1] +[certs] Generating "apiserver-etcd-client" certificate and key [certs] Generating "etcd/peer" certificate and key [certs] etcd/peer serving cert is signed for DNS names [k8s-master-1 localhost] and IPs [192.168.0.127 127.0.0.1 ::1] +[certs] Generating "etcd/healthcheck-client" certificate and key [certs] Generating "sa" key and public key [kubeconfig] Using kubeconfig folder "/etc/kubernetes" [kubeconfig] Writing "admin.conf" kubeconfig file @@ -241,14 +241,13 @@ kubeadm init \ [control-plane] Creating static Pod manifest for "kube-scheduler" [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s -[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s -[apiclient] All control plane components are healthy after 32.002189 seconds +[apiclient] All control plane components are healthy after 18.002437 seconds [uploadconfig] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace [kubelet] Creating a ConfigMap "kubelet-config-1.13" in namespace kube-system with the configuration for the kubelets in the cluster [patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "k8s-master-1" as an annotation [mark-control-plane] Marking the node k8s-master-1 as control-plane by adding the label "node-role.kubernetes.io/master=''" [mark-control-plane] Marking the node k8s-master-1 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] -[bootstrap-token] Using token: 3ag6sz.y8rmcz5xec50xkz1 +[bootstrap-token] Using token: yes6xf.5huewerdtfxafde5 [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles [bootstraptoken] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials [bootstraptoken] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token @@ -272,8 +271,7 @@ Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: You can now join any number of machines by running the following on each node as root: -kubeadm join 192.168.0.127:6443 --token cdo0z3.msyp89yp8zk6lhmf --discovery-token-ca-cert-hash sha256:c0d8942e801962232f0e02b757d13ed0034fa07ab3953764e6a6c67b6688963c - + kubeadm join 192.168.0.127:6443 --token yes6xf.5huewerdtfxafde5 --discovery-token-ca-cert-hash sha256:98dd48ac4298e23f9c275309bfd8b69c5b3166752ccf7a36c2affcb7c1988781 @@ -284,7 +282,7 @@ sudo chown $(id -u):$(id -g) $HOME/.kube/config export KUBECONFIG=$HOME/.kube/config 查询我们的 token -kubectl cluster-info +kubeadm token list kubectl cluster-info @@ -301,7 +299,7 @@ kubectl apply -f /opt/kube-flannel.yml ``` echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables -kubeadm join 192.168.0.127:6443 --token cdo0z3.msyp89yp8zk6lhmf --discovery-token-ca-cert-hash sha256:c0d8942e801962232f0e02b757d13ed0034fa07ab3953764e6a6c67b6688963c +kubeadm join 192.168.0.127:6443 --token yes6xf.5huewerdtfxafde5 --discovery-token-ca-cert-hash sha256:98dd48ac4298e23f9c275309bfd8b69c5b3166752ccf7a36c2affcb7c1988781 这时候终端会输出: From b29d7be1afba72b4d072f39e1c8cea94572ac0c4 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 11 Feb 2019 22:29:42 +0800 Subject: [PATCH 260/330] =?UTF-8?q?2019-02-11=20=E5=AE=8C=E5=96=84=20K8S?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/K8S-Install-And-Usage.md | 140 ++++++++++++++----------- 1 file changed, 76 insertions(+), 64 deletions(-) diff --git a/markdown-file/K8S-Install-And-Usage.md b/markdown-file/K8S-Install-And-Usage.md index b844a9c1..bd14fb1d 100644 --- a/markdown-file/K8S-Install-And-Usage.md +++ b/markdown-file/K8S-Install-And-Usage.md @@ -48,7 +48,7 @@ - - -#### 开始安装 - Kubernetes 1.13.2 版本 +#### 开始安装 - Kubernetes 1.13.3 版本 - 三台机子: - master-1:`192.168.0.127` @@ -61,25 +61,29 @@ - 核心,查看可以安装的 Docker 列表:`yum list docker-ce --showduplicates` - 所有节点设置 kubernetes repo 源,并安装 Kubeadm、Kubelet、Kubectl 都设置阿里云的源 - Kubeadm 初始化集群过程当中,它会下载很多的镜像,默认也是去 Google 家里下载。但是 1.13 新增了一个配置:`--image-repository` 算是救了命。 -- 具体流程: -``` -主机时间同步 -systemctl start chronyd.service && systemctl enable chronyd.service +#### 安装具体流程 + +- 同步所有机子时间:`systemctl start chronyd.service && systemctl enable chronyd.service` +- 所有机子禁用防火墙、selinux、swap +``` systemctl stop firewalld.service systemctl disable firewalld.service systemctl disable iptables.service +iptables -P FORWARD ACCEPT setenforce 0 && sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config echo "vm.swappiness = 0" >> /etc/sysctl.conf swapoff -a && sysctl -w vm.swappiness=0 +``` +- 给各自机子设置 hostname 和 hosts - +``` hostnamectl --static set-hostname k8s-master-1 hostnamectl --static set-hostname k8s-node-1 hostnamectl --static set-hostname k8s-node-2 @@ -89,30 +93,30 @@ vim /etc/hosts 192.168.0.127 k8s-master-1 192.168.0.128 k8s-node-1 192.168.0.129 k8s-node-2 +``` -master 免密 -生产密钥对 -ssh-keygen -t rsa +- 给 master 设置免密 +``` +ssh-keygen -t rsa -公钥内容写入 authorized_keys cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys -测试: + ssh localhost -将公钥复制到其他机子 ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@k8s-node-1(根据提示输入 k8s-node-1 密码) ssh-copy-id -i ~/.ssh/id_rsa.pub -p 22 root@k8s-node-2(根据提示输入 k8s-node-2 密码) - -在 linux01 上测试 ssh k8s-master-1 ssh k8s-node-1 ssh k8s-node-2 +``` +- 给所有机子设置 yum 源 +``` vim /etc/yum.repos.d/kubernetes.repo [kubernetes] @@ -126,18 +130,13 @@ gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors scp -r /etc/yum.repos.d/kubernetes.repo root@k8s-node-1:/etc/yum.repos.d/ scp -r /etc/yum.repos.d/kubernetes.repo root@k8s-node-2:/etc/yum.repos.d/ +``` +- 给 master 机子创建 flannel 配置文件 +``` +mkdir -p /etc/cni/net.d && vim /etc/cni/net.d/10-flannel.conflist -所有机子 -iptables -P FORWARD ACCEPT - -所有机子 -yum install -y kubelet-1.13.2 kubeadm-1.13.2 kubectl-1.13.2 --disableexcludes=kubernetes - - -所有机子 -mkdir -p /etc/cni/net.d && vim /etc/cni/net.d/10-flannel.conflist,内容 { "name": "cbr0", "plugins": [ @@ -156,17 +155,15 @@ mkdir -p /etc/cni/net.d && vim /etc/cni/net.d/10-flannel.conflist,内容 } ] } +``` -所有机子 -vim /etc/systemd/system/kubelet.service.d/10-kubeadm.conf -最后一行添加:Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs" - - +- 给所有机子创建配置 -所有机子必须配置: +``` vim /etc/sysctl.d/k8s.conf + net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 net.ipv4.ip_forward=1 @@ -177,16 +174,31 @@ scp -r /etc/sysctl.d/k8s.conf root@k8s-node-1:/etc/sysctl.d/ scp -r /etc/sysctl.d/k8s.conf root@k8s-node-2:/etc/sysctl.d/ modprobe br_netfilter && sysctl -p /etc/sysctl.d/k8s.conf +``` + +- 给所有机子安装组件 + +``` +yum install -y kubelet-1.13.3 kubeadm-1.13.3 kubectl-1.13.3 --disableexcludes=kubernetes +``` + +- 给所有机子添加一个变量 + +``` +vim /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +最后一行添加:Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs" +``` -所有机子 +- 启动所有机子 + +``` systemctl enable kubelet && systemctl start kubelet kubeadm version kubectl version - ``` - 初始化 master 节点: @@ -194,20 +206,18 @@ kubectl version ``` echo 1 > /proc/sys/net/ipv4/ip_forward -推荐: + kubeadm init \ --image-repository registry.cn-hangzhou.aliyuncs.com/google_containers \ --pod-network-cidr 10.244.0.0/16 \ ---kubernetes-version 1.13.2 \ ---service-cidr 10.96.0.0/12 \ ---apiserver-advertise-address=0.0.0.0 \ +--kubernetes-version 1.13.3 \ --ignore-preflight-errors=Swap -10.244.0.0/16是 flannel 插件固定使用的ip段,它的值取决于你准备安装哪个网络插件 +其中 10.244.0.0/16 是 flannel 插件固定使用的ip段,它的值取决于你准备安装哪个网络插件 这个过程会下载一些 docker 镜像,时间可能会比较久,看你网络情况。 终端会输出核心内容: -[init] Using Kubernetes version: v1.13.2 +[init] Using Kubernetes version: v1.13.3 [preflight] Running pre-flight checks [preflight] Pulling images required for setting up a Kubernetes cluster [preflight] This might take a minute or two, depending on the speed of your internet connection @@ -216,19 +226,19 @@ kubeadm init \ [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Activating the kubelet service [certs] Using certificateDir folder "/etc/kubernetes/pki" -[certs] Generating "ca" certificate and key -[certs] Generating "apiserver" certificate and key -[certs] apiserver serving cert is signed for DNS names [k8s-master-1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.0.127] -[certs] Generating "apiserver-kubelet-client" certificate and key [certs] Generating "front-proxy-ca" certificate and key [certs] Generating "front-proxy-client" certificate and key [certs] Generating "etcd/ca" certificate and key [certs] Generating "etcd/server" certificate and key [certs] etcd/server serving cert is signed for DNS names [k8s-master-1 localhost] and IPs [192.168.0.127 127.0.0.1 ::1] -[certs] Generating "apiserver-etcd-client" certificate and key [certs] Generating "etcd/peer" certificate and key [certs] etcd/peer serving cert is signed for DNS names [k8s-master-1 localhost] and IPs [192.168.0.127 127.0.0.1 ::1] [certs] Generating "etcd/healthcheck-client" certificate and key +[certs] Generating "apiserver-etcd-client" certificate and key +[certs] Generating "ca" certificate and key +[certs] Generating "apiserver-kubelet-client" certificate and key +[certs] Generating "apiserver" certificate and key +[certs] apiserver serving cert is signed for DNS names [k8s-master-1 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.0.127] [certs] Generating "sa" key and public key [kubeconfig] Using kubeconfig folder "/etc/kubernetes" [kubeconfig] Writing "admin.conf" kubeconfig file @@ -241,13 +251,13 @@ kubeadm init \ [control-plane] Creating static Pod manifest for "kube-scheduler" [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s -[apiclient] All control plane components are healthy after 18.002437 seconds +[apiclient] All control plane components are healthy after 19.001686 seconds [uploadconfig] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace [kubelet] Creating a ConfigMap "kubelet-config-1.13" in namespace kube-system with the configuration for the kubelets in the cluster [patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "k8s-master-1" as an annotation [mark-control-plane] Marking the node k8s-master-1 as control-plane by adding the label "node-role.kubernetes.io/master=''" [mark-control-plane] Marking the node k8s-master-1 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] -[bootstrap-token] Using token: yes6xf.5huewerdtfxafde5 +[bootstrap-token] Using token: 8tpo9l.jlw135r8559kaad4 [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles [bootstraptoken] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials [bootstraptoken] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token @@ -271,35 +281,40 @@ Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: You can now join any number of machines by running the following on each node as root: - kubeadm join 192.168.0.127:6443 --token yes6xf.5huewerdtfxafde5 --discovery-token-ca-cert-hash sha256:98dd48ac4298e23f9c275309bfd8b69c5b3166752ccf7a36c2affcb7c1988781 - + kubeadm join 192.168.0.127:6443 --token 8tpo9l.jlw135r8559kaad4 --discovery-token-ca-cert-hash sha256:d6594ccc1310a45cbebc45f1c93f5ac113873786365ed63efcf667c952d7d197 +``` +- 给 master 机子设置配置 -master 机子: +``` mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config export KUBECONFIG=$HOME/.kube/config +``` -查询我们的 token +- 在 master 上查看一些环境 + +``` kubeadm token list kubectl cluster-info +``` +- 给 master 安装 Flannel -master 安装 Flannel +``` cd /opt && wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml kubectl apply -f /opt/kube-flannel.yml - ``` -- 到 node 节点进行加入: +- 到 node 节点加入集群: ``` echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables -kubeadm join 192.168.0.127:6443 --token yes6xf.5huewerdtfxafde5 --discovery-token-ca-cert-hash sha256:98dd48ac4298e23f9c275309bfd8b69c5b3166752ccf7a36c2affcb7c1988781 +kubeadm join 192.168.0.127:6443 --token 8tpo9l.jlw135r8559kaad4 --discovery-token-ca-cert-hash sha256:d6594ccc1310a45cbebc45f1c93f5ac113873786365ed63efcf667c952d7d197 这时候终端会输出: @@ -323,35 +338,32 @@ This node has joined the cluster: * The Kubelet was informed of the new secure connection details. Run 'kubectl get nodes' on the master to see this node join the cluster. +``` +- 如果 node 节点加入失败,可以:`kubeadm reset`,再来重新 join +- 在 master 节点上:`kubectl get cs` -如果 node 节点加入失败,可以:kubeadm reset,再来重新 join - - - - -在 master 节点上:kubectl get cs +``` NAME STATUS MESSAGE ERROR controller-manager Healthy ok scheduler Healthy ok etcd-0 Healthy {"health": "true"} 结果都是 Healthy 则表示可以了,不然就得检查。必要时可以用:`kubeadm reset` 重置,重新进行集群初始化 +``` +- 在 master 节点上:`kubectl get nodes` -验证: -kubectl get nodes -如果还是 NotReady,则查看错误信息: -kubectl get pods --all-namespaces,其中:Pending/ContainerCreating/ImagePullBackOff 都是 Pod 没有就绪,我们可以这样查看对应 Pod 遇到了什么问题 +``` +如果还是 NotReady,则查看错误信息:kubectl get pods --all-namespaces +其中:Pending/ContainerCreating/ImagePullBackOff 都是 Pod 没有就绪,我们可以这样查看对应 Pod 遇到了什么问题 kubectl describe pod --namespace=kube-system 或者:kubectl logs -n kube-system tail -f /var/log/messages - ``` - #### 主要概念 - Master 节点,负责集群的调度、集群的管理 From 2c3218cd7712b062fab1dfd3461a4e99d543b8b6 Mon Sep 17 00:00:00 2001 From: "Judas.n" Date: Sun, 24 Feb 2019 15:45:51 +0800 Subject: [PATCH 261/330] Update Vim-Install-And-Settings.md --- markdown-file/Vim-Install-And-Settings.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/markdown-file/Vim-Install-And-Settings.md b/markdown-file/Vim-Install-And-Settings.md index 77560192..dfdd3653 100644 --- a/markdown-file/Vim-Install-And-Settings.md +++ b/markdown-file/Vim-Install-And-Settings.md @@ -117,6 +117,10 @@ - 进入 vim 后,按 `F5`,然后 `shift + insert` 进行粘贴。这种事就不会错乱了。 - 原因是:`vim ~/.vimrc` 中有一行这样的设置:`set pastetoggle=` +## 其他常用命令 + +- 对两个文件进行对比:`vimdiff /opt/1.txt /opt/2.txt` + ## 资料 - [vim几个小技巧(批量替换,列编辑)](http://blogread.cn/it/article/1010?f=sa) From f45dfcc237e20196036bc89ceb05583f35d6d312 Mon Sep 17 00:00:00 2001 From: judasn Date: Sat, 9 Mar 2019 13:15:31 +0800 Subject: [PATCH 262/330] =?UTF-8?q?2019-03-09=20=E8=A1=A5=E5=85=85=20ES=20?= =?UTF-8?q?GUI=20=E5=AE=A2=E6=88=B7=E7=AB=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Elasticsearch-Base.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index 0176b74e..a7e98bba 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -25,7 +25,7 @@ services: ------------------------------------------------------------------- -## Elasticsearch 6.5.x 安装(适配与 5.5.x) +## Elasticsearch 6.5.x 安装(适配与 5.5.x,6.6.x) #### 环境 @@ -114,6 +114,12 @@ type=rpm-md - `cd /usr/share/elasticsearch && bin/elasticsearch-plugin install x-pack` +#### GUI 客户端工具 + +- 优先推荐: +- + + #### 安装 Chrome 扩展的 Head - 下载地址: From bbde6a08dcdee5169910a594e44453e748cc6c76 Mon Sep 17 00:00:00 2001 From: "Judas.n" Date: Wed, 13 Mar 2019 16:16:26 +0800 Subject: [PATCH 263/330] Update README.md --- README.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 946d100e..477aa85c 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,10 @@ -# Java 程序员眼中的 Linux + +## 团队 DevOps 方案参考 + + + + + ## 初衷(Original Intention) From 1072550e98ca6142ddf679c71f950bbe578a9948 Mon Sep 17 00:00:00 2001 From: judasn Date: Sun, 24 Mar 2019 23:17:07 +0800 Subject: [PATCH 264/330] =?UTF-8?q?2019-03-24=20=E8=A1=A5=E5=85=85=20TPPC-?= =?UTF-8?q?MySQL?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Mysql-Install-And-Settings.md | 21 +++ markdown-file/Mysql-Test.md | 156 ++++++++++++++++++++ 2 files changed, 177 insertions(+) diff --git a/markdown-file/Mysql-Install-And-Settings.md b/markdown-file/Mysql-Install-And-Settings.md index 3ab9a80a..acb54084 100644 --- a/markdown-file/Mysql-Install-And-Settings.md +++ b/markdown-file/Mysql-Install-And-Settings.md @@ -148,6 +148,27 @@ rpm -Uvh https://mirror.webtatic.com/yum/el6/latest.rpm - `ln -s /usr/local/mysql/bin/mysqldump /usr/bin` - `ln -s /usr/local/mysql/bin/mysqlslap /usr/bin` +## MySQL 5.7 YUM 安装 + +- 官网: + +``` + +禁用 selinux:setenforce 0 + +wget https://repo.mysql.com//mysql57-community-release-el7-11.noarch.rpm +yum localinstall mysql57-community-release-el7-11.noarch.rpm +yum install mysql-community-server +一共 194M + +配置文件:/etc/my.cnf +systemctl start mysqld +systemctl status mysqld + +查看初次使用的临时密码:grep 'temporary password' /var/log/mysqld.log + +``` + ------------------------------------------------------------------- diff --git a/markdown-file/Mysql-Test.md b/markdown-file/Mysql-Test.md index 4a6edfb9..1b03d472 100644 --- a/markdown-file/Mysql-Test.md +++ b/markdown-file/Mysql-Test.md @@ -47,6 +47,8 @@ - `--debug-info` 代表要额外输出 CPU 以及内存的相关信息。 - `--only-print` 打印压力测试的时候 mysqlslap 到底做了什么事,通过 sql 语句方式告诉我们。 +------------------------------------------------------------------- + ## sysbench 工具 @@ -148,6 +150,8 @@ Threads fairness: events (avg/stddev): 2748.6000/132.71 --总处理事件数/标准偏差 execution time (avg/stddev): 119.9907/0.00 --总执行时间/标准偏差 +------------------------------------------------------------------- + ## QPS 和 TPS 和说明 ### 基本概念 @@ -171,6 +175,157 @@ Threads fairness: - 每天300wPV的在单台机器上,这台机器需要多少QPS?对于这样的问题,假设每天80%的访问集中在20%的时间里,这20%时间叫做峰值时间。( 3000000 * 0.8 ) / (3600 * 24 * 0.2 ) = 139 (QPS). - 如果一台机器的QPS是58,需要几台机器来支持?答:139 / 58 = 3 +------------------------------------------------------------------- + +## Percona TPCC-MySQL 测试工具(优先推荐) + +- 可以较好地模拟真实测试结果数据 +- 官网主页: + +``` +TPC-C 是专门针对联机交易处理系统(OLTP系统)的规范,一般情况下我们也把这类系统称为业务处理系统。 +TPC-C是TPC(Transaction Processing Performance Council)组织发布的一个测试规范,用于模拟测试复杂的在线事务处理系统。其测试结果包括每分钟事务数(tpmC),以及每事务的成本(Price/tpmC)。 +在进行大压力下MySQL的一些行为时经常使用。 +``` + +### 安装 + +- 先确定本机安装过 MySQL +- 并且安装过:`yum install mysql-devel` + +``` +git clone https://github.com/Percona-Lab/tpcc-mysql +cd tpcc-mysql/src +make + +如果make没报错,就会在tpcc-mysql 根目录文件夹下生成tpcc二进制命令行工具tpcc_load、tpcc_start +``` + +### 测试的几个表介绍 + +``` +tpcc-mysql的业务逻辑及其相关的几个表作用如下: +New-Order:新订单,主要对应 new_orders 表 +Payment:支付,主要对应 orders、history 表 +Order-Status:订单状态,主要对应 orders、order_line 表 +Delivery:发货,主要对应 order_line 表 +Stock-Level:库存,主要对应 stock 表 + +其他相关表: +客户:主要对应customer表 +地区:主要对应district表 +商品:主要对应item表 +仓库:主要对应warehouse表 +``` + +### 准备 + +- 测试阿里云 ECS 与 RDS 是否相通: +- 记得在 RDS 添加账号和给账号配置权限,包括:配置权限、数据权限(默认添加账号后都是没有开启的,还要自己手动开启) +- 还要添加内网 ECS 到 RDS 的白名单 IP 里面 +- 或者在 RDS 上开启外网访问设置,但是也设置 IP 白名单(访问 ip.cn 查看自己的外网 IP 地址,比如:120.85.112.97) + +``` +ping rm-wz9066qo44wn500t55o.mysql.rds.aliyuncs.com + +mysql -h rm-wz9066qo44wn500t55o.mysql.rds.aliyuncs.com -P 3306 -u myaccount -p + +输入密码:Aa123456 +``` + + + +``` +创库,名字为:TPCC: +CREATE DATABASE TPCC DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci; + + +导入项目中的出初始化数据脚本: +创建表:create_table.sql +创建索引和外键:add_fkey_idx.sql +``` + + +### 测试 + +- 数据库:阿里云 RDS-MySQL-5.7-2C4G +- 测试机:阿里云 ECS-4C8G-CentOS7.6 + +- 需要注意的是 tpcc 默认会读取 /var/lib/mysql/mysql.sock 这个 socket 文件。因此,如果你的socket文件不在相应路径的话,可以做个软连接,或者通过TCP/IP的方式连接测试服务器 +- 准备数据: + +``` +cd /opt/tpcc-mysql +./tpcc_load -h rm-wz9066qo44wn500t55o.mysql.rds.aliyuncs.com -P 3306 -d TPCC -u myaccount -p Aa123456 -w 100 +-w 100 表示创建 100 个仓库数据 +这个过程花费时间还是挺长的,我这台 ECS 结果是这样: +差不多 9s == 5000 个数据。 +也就是: +10W 个数据需要 == 20 X 9s == 180s == 3min +1000W == 5h +一共差不多花了 10h 左右。 + +插入过程 RDS-2C4G 的监控情况: +CPU利用率 4% +内存 18% ~ 40% (随着数据增加而增大) +连接数:1% +IOPS:4% +已使用存储空间:5.5G ~ 10G + +要模拟出够真实的数据,仓库不要太少,一般要大于 100, +当然你也可以 select count(*) from 上面的各个表,看下 100 个库生成的数据,是不是跟你预期数据差不多,是的话就够了。 + +select count(*) from customer; +10s X 10 X 100 = 10000s + +select count(*) from district; +select count(*) from history; +select count(*) from item; + 100 个仓库 == 1000 X 100 == 100000 == 10W +select count(*) from new_orders; +select count(*) from order_line; +select count(*) from orders; +select count(*) from stock; + 100 个仓库 == 100000 X 100 == 10000000 = 1000W +select count(*) from warehouse; +``` + +- 开始测试: + +``` + +tpcc_start -h rm-wz9066qo44wn500t55o.mysql.rds.aliyuncs.com -P 3306 -d TPCC -u myaccount -p Aa123456 -w 100 -c 200 -r 300 -l 2400 -f /opt/mysql_tpcc_100_20190324 + +-w 100 表示 100 个仓库数据 +-c 200 表示并发 200 个线程 +-r 300 表示预热 300 秒 +-l 2400 表示持续压测 2400 秒 + +``` + + +### 报表 + + +``` +行数据表示:10, 1187(0):1.682|2.175, 1187(0):0.336|0.473, 118(0):0.172|0.226, 118(0):1.864|2.122, 119(0):6.953|8.107 + +10:时间戳,每十秒产生一条数据。 +1187(0):1.682|2.175:表示10秒内完成1187笔新订单业务。 +1187(0):0.336|0.473: 支付业务, +118(0):1.864|2.122:查询业务, +118(0):0.172|0.226: 发货业务, +119(0):6.953|8.107: 库存查询业务 + + + + +188.000 TpmC +TpmC结果值(每分钟事务数,该值是第一次统计结果中的新订单事务数除以总耗时分钟数,例如本例中是:372/2=186) +tpmC值在国内外被广泛用于衡量计算机系统的事务处理能力 +``` + + ## 资料 @@ -181,3 +336,4 @@ Threads fairness: - - - +- \ No newline at end of file From 5be37d37b1067de2b93b87c33741b5172e6df457 Mon Sep 17 00:00:00 2001 From: zhang Date: Mon, 25 Mar 2019 17:06:29 +0800 Subject: [PATCH 265/330] =?UTF-8?q?2019-03-25=20=E8=A1=A5=E5=85=85=20TPCC-?= =?UTF-8?q?MySQL?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- markdown-file/Mysql-Test.md | 253 +++++++++++++++++++++++++++++++----- 1 file changed, 221 insertions(+), 32 deletions(-) diff --git a/markdown-file/Mysql-Test.md b/markdown-file/Mysql-Test.md index 1b03d472..2a5799e0 100644 --- a/markdown-file/Mysql-Test.md +++ b/markdown-file/Mysql-Test.md @@ -224,11 +224,12 @@ Stock-Level:库存,主要对应 stock 表 - 记得在 RDS 添加账号和给账号配置权限,包括:配置权限、数据权限(默认添加账号后都是没有开启的,还要自己手动开启) - 还要添加内网 ECS 到 RDS 的白名单 IP 里面 - 或者在 RDS 上开启外网访问设置,但是也设置 IP 白名单(访问 ip.cn 查看自己的外网 IP 地址,比如:120.85.112.97) +- RDS 的内网地址和外网地址不一样,要认真看。 ``` -ping rm-wz9066qo44wn500t55o.mysql.rds.aliyuncs.com +ping rm-wz9v0vej02ys79jbj.mysql.rds.aliyuncs.com -mysql -h rm-wz9066qo44wn500t55o.mysql.rds.aliyuncs.com -P 3306 -u myaccount -p +mysql -h rm-wz9v0vej02ys79jbj.mysql.rds.aliyuncs.com -P 3306 -u myaccount -p 输入密码:Aa123456 ``` @@ -242,65 +243,71 @@ CREATE DATABASE TPCC DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci; 导入项目中的出初始化数据脚本: 创建表:create_table.sql +/usr/bin/mysql -h rm-wz9v0vej02ys79jbj.mysql.rds.aliyuncs.com -u myaccount -p tpcc < /root/tpcc-mysql/create_table.sql + 创建索引和外键:add_fkey_idx.sql +/usr/bin/mysql -h rm-wz9v0vej02ys79jbj.mysql.rds.aliyuncs.com -u myaccount -p tpcc < /root/tpcc-mysql/add_fkey_idx.sql ``` ### 测试 - 数据库:阿里云 RDS-MySQL-5.7-2C4G -- 测试机:阿里云 ECS-4C8G-CentOS7.6 +- 测试机:阿里云 ECS-4C4G-CentOS7.6 +- 根据测试,不同的 ECS 测试机,不同的 RDS 测试结果有时候差距挺大的,这个很蛋疼。 - 需要注意的是 tpcc 默认会读取 /var/lib/mysql/mysql.sock 这个 socket 文件。因此,如果你的socket文件不在相应路径的话,可以做个软连接,或者通过TCP/IP的方式连接测试服务器 - 准备数据: ``` cd /opt/tpcc-mysql -./tpcc_load -h rm-wz9066qo44wn500t55o.mysql.rds.aliyuncs.com -P 3306 -d TPCC -u myaccount -p Aa123456 -w 100 --w 100 表示创建 100 个仓库数据 -这个过程花费时间还是挺长的,我这台 ECS 结果是这样: -差不多 9s == 5000 个数据。 -也就是: -10W 个数据需要 == 20 X 9s == 180s == 3min -1000W == 5h -一共差不多花了 10h 左右。 +./tpcc_load -h rm-wz9v0vej02ys79jbj.mysql.rds.aliyuncs.com -P 3306 -d TPCC -u myaccount -p Aa123456 -w 80 +-w 80 表示创建 80 个仓库数据 +这个过程花费时间还是挺长的,建议测试机是高性能计算型。2CPU 差不多要 8h,你自己估量下。 +我这边 RDS 监控中,曲线上每秒 insert 差不多在 2W 差不多,如果你没有这个数,速度可能就很慢了。 +我这边差不多用了 2.5h 完成数据准备。 + 插入过程 RDS-2C4G 的监控情况: -CPU利用率 4% -内存 18% ~ 40% (随着数据增加而增大) +CPU利用率 24% +内存 30% ~ 40% (随着数据增加而增大) 连接数:1% -IOPS:4% +IOPS:9% 已使用存储空间:5.5G ~ 10G 要模拟出够真实的数据,仓库不要太少,一般要大于 100, -当然你也可以 select count(*) from 上面的各个表,看下 100 个库生成的数据,是不是跟你预期数据差不多,是的话就够了。 +下面是基于 80 个库的最终数据: select count(*) from customer; -10s X 10 X 100 = 10000s - + 2400000 select count(*) from district; + 800 select count(*) from history; + 2400000 select count(*) from item; - 100 个仓库 == 1000 X 100 == 100000 == 10W + 100000 select count(*) from new_orders; + 720000 select count(*) from order_line; + 23996450 select count(*) from orders; + 2400000 select count(*) from stock; - 100 个仓库 == 100000 X 100 == 10000000 = 1000W + 8000000 select count(*) from warehouse; + 80 ``` - 开始测试: ``` -tpcc_start -h rm-wz9066qo44wn500t55o.mysql.rds.aliyuncs.com -P 3306 -d TPCC -u myaccount -p Aa123456 -w 100 -c 200 -r 300 -l 2400 -f /opt/mysql_tpcc_100_20190324 +./tpcc_start -h rm-wz9v0vej02ys79jbj.mysql.rds.aliyuncs.com -P 3306 -d TPCC -u myaccount -p Aa123456 -w 80 -c 200 -r 300 -l 1800 -f /opt/mysql_tpcc_100_20190325 -w 100 表示 100 个仓库数据 -c 200 表示并发 200 个线程 -r 300 表示预热 300 秒 --l 2400 表示持续压测 2400 秒 - +-l 1800 表示持续压测 1800 秒 ``` @@ -308,25 +315,207 @@ tpcc_start -h rm-wz9066qo44wn500t55o.mysql.rds.aliyuncs.com -P 3306 -d TPCC -u m ``` -行数据表示:10, 1187(0):1.682|2.175, 1187(0):0.336|0.473, 118(0):0.172|0.226, 118(0):1.864|2.122, 119(0):6.953|8.107 + +188.000 TpmC +TpmC结果值(每分钟事务数,该值是第一次统计结果中的新订单事务数除以总耗时分钟数,例如本例中是:372/2=186) +tpmC值在国内外被广泛用于衡量计算机系统的事务处理能力 +``` -10:时间戳,每十秒产生一条数据。 -1187(0):1.682|2.175:表示10秒内完成1187笔新订单业务。 -1187(0):0.336|0.473: 支付业务, -118(0):1.864|2.122:查询业务, -118(0):0.172|0.226: 发货业务, -119(0):6.953|8.107: 库存查询业务 +- RDS-2C4G-80个仓库结果: +- CPU:100%,内存:34%,连接数:17%,IOPS:62%,磁盘空间:20G +``` +1780, trx: 979, 95%: 1849.535, 99%: 2402.613, max_rt: 3401.947, 986|3248.772, 98|698.821, 103|4202.110, 101|4547.416 +1790, trx: 1021, 95%: 1898.903, 99%: 2700.936, max_rt: 3848.142, 999|3150.117, 100|500.740, 102|3600.104, 100|5551.834 +1800, trx: 989, 95%: 1899.472, 99%: 2847.899, max_rt: 4455.064, 989|3049.921, 101|699.144, 97|3599.021, 102|5151.141 + +STOPPING THREADS........................................................................................................................................................................................................ + + + [0] sc:2 lt:174378 rt:0 fl:0 avg_rt: 1192.8 (5) + [1] sc:253 lt:173935 rt:0 fl:0 avg_rt: 542.7 (5) + [2] sc:4726 lt:12712 rt:0 fl:0 avg_rt: 144.7 (5) + [3] sc:0 lt:17435 rt:0 fl:0 avg_rt: 3029.8 (80) + [4] sc:0 lt:17435 rt:0 fl:0 avg_rt: 3550.7 (20) + in 1800 sec. + + + [0] sc:2 lt:174378 rt:0 fl:0 + [1] sc:254 lt:174096 rt:0 fl:0 + [2] sc:4726 lt:12712 rt:0 fl:0 + [3] sc:0 lt:17437 rt:0 fl:0 + [4] sc:0 lt:17435 rt:0 fl:0 + + (all must be [OK]) + [transaction percentage] + Payment: 43.45% (>=43.0%) [OK] + Order-Status: 4.35% (>= 4.0%) [OK] + Delivery: 4.35% (>= 4.0%) [OK] + Stock-Level: 4.35% (>= 4.0%) [OK] + [response time (at least 90% passed)] + New-Order: 0.00% [NG] * + Payment: 0.15% [NG] * + Order-Status: 27.10% [NG] * + Delivery: 0.00% [NG] * + Stock-Level: 0.00% [NG] * -188.000 TpmC -TpmC结果值(每分钟事务数,该值是第一次统计结果中的新订单事务数除以总耗时分钟数,例如本例中是:372/2=186) -tpmC值在国内外被广泛用于衡量计算机系统的事务处理能力 + 5812.667 TpmC +``` + +- 升级:RDS-4C8G-80个仓库结果 +- CPU:100%,内存:55%,连接数:10%,IOPS:20%,磁盘空间:25G + ``` +1780, trx: 2303, 95%: 796.121, 99%: 1099.640, max_rt: 1596.883, 2293|2249.288, 232|256.393, 230|1694.050, 235|2550.775 +1790, trx: 2336, 95%: 798.030, 99%: 1093.403, max_rt: 1547.840, 2338|2803.739, 234|305.185, 232|1799.869, 228|2453.748 +1800, trx: 2305, 95%: 801.381, 99%: 1048.528, max_rt: 1297.465, 2306|1798.565, 229|304.329, 227|1649.609, 233|2549.599 + +STOPPING THREADS........................................................................................................................................................................................................ + + + [0] sc:7 lt:406567 rt:0 fl:0 avg_rt: 493.7 (5) + [1] sc:10485 lt:395860 rt:0 fl:0 avg_rt: 240.1 (5) + [2] sc:24615 lt:16045 rt:0 fl:0 avg_rt: 49.4 (5) + [3] sc:0 lt:40651 rt:0 fl:0 avg_rt: 1273.6 (80) + [4] sc:0 lt:40656 rt:0 fl:0 avg_rt: 1665.3 (20) + in 1800 sec. + + + [0] sc:7 lt:406569 rt:0 fl:0 + [1] sc:10487 lt:396098 rt:0 fl:0 + [2] sc:24615 lt:16045 rt:0 fl:0 + [3] sc:0 lt:40655 rt:0 fl:0 + [4] sc:0 lt:40659 rt:0 fl:0 + + (all must be [OK]) + [transaction percentage] + Payment: 43.46% (>=43.0%) [OK] + Order-Status: 4.35% (>= 4.0%) [OK] + Delivery: 4.35% (>= 4.0%) [OK] + Stock-Level: 4.35% (>= 4.0%) [OK] + [response time (at least 90% passed)] + New-Order: 0.00% [NG] * + Payment: 2.58% [NG] * + Order-Status: 60.54% [NG] * + Delivery: 0.00% [NG] * + Stock-Level: 0.00% [NG] * + + 13552.467 TpmC +``` +- 升级:RDS-8C16G-80个仓库结果 +- CPU:100%,内存:35%,连接数:5%,IOPS:18%,磁盘空间:30G + +``` +1780, trx: 4502, 95%: 398.131, 99%: 501.634, max_rt: 772.128, 4473|740.073, 446|183.361, 448|1042.264, 442|1302.569 +1790, trx: 4465, 95%: 398.489, 99%: 541.424, max_rt: 803.659, 4476|845.313, 448|152.917, 450|997.319, 454|1250.160 +1800, trx: 4506, 95%: 397.774, 99%: 501.334, max_rt: 747.074, 4508|701.625, 453|108.619, 450|1052.293, 451|1107.277 + +STOPPING THREADS........................................................................................................................................................................................................ + + + [0] sc:20 lt:803738 rt:0 fl:0 avg_rt: 240.5 (5) + [1] sc:13844 lt:789535 rt:0 fl:0 avg_rt: 128.5 (5) + [2] sc:54560 lt:25817 rt:0 fl:0 avg_rt: 22.1 (5) + [3] sc:0 lt:80372 rt:0 fl:0 avg_rt: 739.8 (80) + [4] sc:0 lt:80378 rt:0 fl:0 avg_rt: 771.1 (20) + in 1800 sec. + + + [0] sc:20 lt:803747 rt:0 fl:0 + [1] sc:13845 lt:789916 rt:0 fl:0 + [2] sc:54561 lt:25817 rt:0 fl:0 + [3] sc:0 lt:80377 rt:0 fl:0 + [4] sc:0 lt:80381 rt:0 fl:0 + + (all must be [OK]) + [transaction percentage] + Payment: 43.47% (>=43.0%) [OK] + Order-Status: 4.35% (>= 4.0%) [OK] + Delivery: 4.35% (>= 4.0%) [OK] + Stock-Level: 4.35% (>= 4.0%) [OK] + [response time (at least 90% passed)] + New-Order: 0.00% [NG] * + Payment: 1.72% [NG] * + Order-Status: 67.88% [NG] * + Delivery: 0.00% [NG] * + Stock-Level: 0.00% [NG] * + + + 26791.934 TpmC +``` + + +- 升级:RDS-16C64G-80个仓库结果 +- CPU:100%,内存:18%,连接数:2%,IOPS:10%,磁盘空间:40G + +``` +1780, trx: 8413, 95%: 203.560, 99%: 279.322, max_rt: 451.010, 8414|441.849, 841|92.900, 839|583.340, 843|644.276 +1790, trx: 8269, 95%: 204.599, 99%: 282.602, max_rt: 444.075, 8262|412.414, 827|91.551, 831|665.421, 824|616.396 +1800, trx: 8395, 95%: 202.285, 99%: 255.026, max_rt: 436.136, 8404|446.292, 839|87.081, 839|609.221, 842|697.509 + +STOPPING THREADS........................................................................................................................................................................................................ + + + [0] sc:37 lt:1532893 rt:0 fl:0 avg_rt: 124.8 (5) + [1] sc:36091 lt:1496111 rt:0 fl:0 avg_rt: 68.5 (5) + [2] sc:105738 lt:47555 rt:0 fl:0 avg_rt: 11.4 (5) + [3] sc:0 lt:153285 rt:0 fl:0 avg_rt: 404.6 (80) + [4] sc:0 lt:153293 rt:0 fl:0 avg_rt: 389.5 (20) + in 1800 sec. + + + [0] sc:37 lt:1532918 rt:0 fl:0 + [1] sc:36093 lt:1496868 rt:0 fl:0 + [2] sc:105739 lt:47556 rt:0 fl:0 + [3] sc:0 lt:153297 rt:0 fl:0 + [4] sc:0 lt:153298 rt:0 fl:0 + + (all must be [OK]) + [transaction percentage] + Payment: 43.47% (>=43.0%) [OK] + Order-Status: 4.35% (>= 4.0%) [OK] + Delivery: 4.35% (>= 4.0%) [OK] + Stock-Level: 4.35% (>= 4.0%) [OK] + [response time (at least 90% passed)] + New-Order: 0.00% [NG] * + Payment: 2.36% [NG] * + Order-Status: 68.98% [NG] * + Delivery: 0.00% [NG] * + Stock-Level: 0.00% [NG] * + + + 51097.668 TpmC +``` + + +- 几轮下来,最终数据量: + +``` +select count(*) from customer; + 2400000 +select count(*) from district; + 800 +select count(*) from history; + 5779395 +select count(*) from item; + 100000 +select count(*) from new_orders; + 764970 +select count(*) from order_line; + 57453708 +select count(*) from orders; + 5745589 +select count(*) from stock; + 8000000 +select count(*) from warehouse; + 80 +``` + ## 资料 From 667b481dbe45a9d46981811fe5133711ee92cbeb Mon Sep 17 00:00:00 2001 From: zhang Date: Thu, 11 Apr 2019 13:49:25 +0800 Subject: [PATCH 266/330] 2019-04-11 mongo --- markdown-file/MongoDB-Install-And-Settings.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/markdown-file/MongoDB-Install-And-Settings.md b/markdown-file/MongoDB-Install-And-Settings.md index 71cddd81..e25c6ffe 100644 --- a/markdown-file/MongoDB-Install-And-Settings.md +++ b/markdown-file/MongoDB-Install-And-Settings.md @@ -362,6 +362,15 @@ $ne ->not equal 不等于 - Robomongo: +## 基准测试 + +- + +## 随机生成测试数据 + +- + + ## 资料 - From d8f97bb2bd80c13db5799d455fc1eb3620a2e88b Mon Sep 17 00:00:00 2001 From: zhang Date: Thu, 11 Apr 2019 14:12:38 +0800 Subject: [PATCH 267/330] 2019-04-11 mongo --- markdown-file/MongoDB-Install-And-Settings.md | 56 +++++++++++++++++-- 1 file changed, 51 insertions(+), 5 deletions(-) diff --git a/markdown-file/MongoDB-Install-And-Settings.md b/markdown-file/MongoDB-Install-And-Settings.md index e25c6ffe..0a29fdae 100644 --- a/markdown-file/MongoDB-Install-And-Settings.md +++ b/markdown-file/MongoDB-Install-And-Settings.md @@ -22,8 +22,9 @@ - 请查看介绍中支持哪个版本: - 目前 201712 支持 MongoDB 3.4 +------------------------------------------------------------------- -## Docker 下安装 MongoDB +## Docker 下安装 MongoDB(方式一) - 先创建一个宿主机以后用来存放数据的目录:`mkdir -p /data/docker/mongo/db` - 赋权:`chmod 777 -R /data/docker/mongo/db` @@ -51,16 +52,61 @@ db.createUser( - 导出:`docker exec -it cloud-mongo mongoexport -h 127.0.0.1 -u 用户名 -p 密码 -d 库名 -c 集合名 -o /data/db/mongodb.json --type json` - 导入:`docker exec -it cloud-mongo mongoimport -h 127.0.0.1 -u 用户名 -p 密码 -d 库名 -c 集合名 --file /data/db/mongodb.json --type json` -## 安装环境 -- CentOS 6 -## MongoDB 安装 +## Docker 下安装 MongoDB(方式二) + +- 先创建一个宿主机以后用来存放数据的目录:`mkdir -p /data/docker/mongo/db` +- 赋权:`chmod 777 -R /data/docker/mongo/db` +- 运行镜像:`docker run --name cloud-mongo2 -p 37017:27017 -v /data/docker/mongo/db:/data/db -d mongo:3.4 --auth` +- 进入容器中 mongo shell 交互界面:`docker exec -it cloud-mongo2 mongo` + - 进入 admin:`use admin` +- 创建一个超级用户: + +``` +db.createUser( + { + user: "mongo-admin", + pwd: "123456", + roles: [ + { role: "root", db: "admin" } + ] + } +) +``` + +- 验证账号:`db.auth("mongo-admin","123456")` + - 使用 db.auth() 可以对数据库中的用户进行验证,如果验证成功则返回 1,否则返回 0 +- 接着创建一个普通数据库和用户: + +``` + +use my_test_db + + +db.createUser( + { + user: "mytestuser", + pwd: "123456", + roles: [ + { role: "dbAdmin", db: "my_test_db" }, + { role: "readWrite", db: "my_test_db" } + ] + } +) + + +db.auth("mytestuser","123456") +``` + +------------------------------------------------------------------- + +## MongoDB 传统方式安装 - 关闭 SELinux - 编辑配置文件:`vim /etc/selinux/config` - 把 `SELINUX=enforcing` 改为 `SELINUX=disabled` -- MongoDB 安装 +- MongoDB 资料 - 官网: - 官网文档: - 此时(20170228) 最新稳定版本为:**3.4.2** From 2a6c1aa8c9418681a7e6a14fd15d39560a7f8037 Mon Sep 17 00:00:00 2001 From: zhang Date: Thu, 11 Apr 2019 14:13:46 +0800 Subject: [PATCH 268/330] 2019-04-11 mongo --- markdown-file/MongoDB-Install-And-Settings.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/markdown-file/MongoDB-Install-And-Settings.md b/markdown-file/MongoDB-Install-And-Settings.md index 0a29fdae..f05e7b74 100644 --- a/markdown-file/MongoDB-Install-And-Settings.md +++ b/markdown-file/MongoDB-Install-And-Settings.md @@ -60,10 +60,11 @@ db.createUser( - 赋权:`chmod 777 -R /data/docker/mongo/db` - 运行镜像:`docker run --name cloud-mongo2 -p 37017:27017 -v /data/docker/mongo/db:/data/db -d mongo:3.4 --auth` - 进入容器中 mongo shell 交互界面:`docker exec -it cloud-mongo2 mongo` - - 进入 admin:`use admin` - 创建一个超级用户: ``` +use admin + db.createUser( { user: "mongo-admin", @@ -73,10 +74,11 @@ db.createUser( ] } ) + +db.auth("mongo-admin","123456") ``` -- 验证账号:`db.auth("mongo-admin","123456")` - - 使用 db.auth() 可以对数据库中的用户进行验证,如果验证成功则返回 1,否则返回 0 +- 使用 db.auth() 可以对数据库中的用户进行验证,如果验证成功则返回 1,否则返回 0 - 接着创建一个普通数据库和用户: ``` From 86ae11355f0b52e1199ce748e2febf50ae840ff8 Mon Sep 17 00:00:00 2001 From: zhang Date: Fri, 12 Apr 2019 16:31:44 +0800 Subject: [PATCH 269/330] 2019-04-12 --- .editorconfig | 25 +++++++ markdown-file/Mysql-Optimize.md | 114 ++++++++++++++++++++++++++++---- 2 files changed, 125 insertions(+), 14 deletions(-) create mode 100644 .editorconfig diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 00000000..bc36a8e5 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,25 @@ +# http://editorconfig.org +# 官网首页有介绍:IntelliJ IDEA,VS Code 默认就支持,无需额外安装插件 +root = true + +# 空格替代Tab缩进在各种编辑工具下效果一致 +[*] +indent_style = space +indent_size = 4 +charset = utf-8 +end_of_line = lf +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 200 + + + +[*.java] +indent_style = tab + +[*.{json,yml}] +indent_size = 2 + +[*.md] +insert_final_newline = false +trim_trailing_whitespace = false diff --git a/markdown-file/Mysql-Optimize.md b/markdown-file/Mysql-Optimize.md index 1cbad473..583bc9c8 100644 --- a/markdown-file/Mysql-Optimize.md +++ b/markdown-file/Mysql-Optimize.md @@ -3,6 +3,15 @@ - 下面说的优化基于 MySQL 5.6,理论上 5.5 之后的都算适用,具体还是要看官网 +## 优秀材料 + +- +- <> +- <> +- <> +- <> + + ## 服务状态查询 - 查看当前数据库的状态,常用的有: @@ -10,14 +19,23 @@ - 查看刚刚执行 SQL 是否有警告信息:`SHOW WARNINGS;` - 查看刚刚执行 SQL 是否有错误信息:`SHOW ERRORS;` - 查看已经连接的所有线程状况:`SHOW FULL PROCESSLIST;` + - 输出参数说明: + - 可以结束某些连接:`kill id值` - 查看当前连接数量:`SHOW STATUS LIKE 'max_used_connections';` - 查看变量,在 my.cnf 中配置的变量会在这里显示:`SHOW VARIABLES;` + - 查询慢 SQL 配置:`show variables like 'slow%';` + - 开启慢 SQL:`set global slow_query_log='ON'` + - 查询慢 SQL 秒数值:` show variables like 'long%';` + - 调整秒速值:`set long_query_time=1;` - 查看当前MySQL 中已经记录了多少条慢查询,前提是配置文件中开启慢查询记录了. - `SHOW STATUS LIKE '%slow_queries%';` - 查询当前MySQL中查询、更新、删除执行多少条了,可以通过这个来判断系统是侧重于读还是侧重于写,如果是写要考虑使用读写分离。 - `SHOW STATUS LIKE '%Com_select%';` + - `SHOW STATUS LIKE '%Com_insert%';` - `SHOW STATUS LIKE '%Com_update%';` - `SHOW STATUS LIKE '%Com_delete%';` + - 如果 rollback 过多,说明程序肯定哪里存在问题 + - `SHOW STATUS LIKE '%Com_rollback%';` - 显示MySQL服务启动运行了多少时间,如果MySQL服务重启,该时间重新计算,单位秒 - `SHOW STATUS LIKE 'uptime';` - 显示查询缓存的状态情况 @@ -35,23 +53,22 @@ - 6. Qcache_not_cached # 没有进行缓存的查询的数量,通常是这些查询未被缓存或其类型不允许被缓存 - 7. Qcache_queries_in_cache # 在当前缓存的查询(和响应)的数量。 - 8. Qcache_total_blocks #缓存中块的数量。 +- 查询哪些表在被使用,是否有锁表:`SHOW OPEN TABLES WHERE In_use > 0;` +- 查询 innodb 状态(输出内容很多):`SHOW ENGINE INNODB STATUS;` +- 锁性能状态:`SHOW STATUS LIKE 'innodb_row_lock_%';` + - Innodb_row_lock_current_waits:当前等待锁的数量 + - Innodb_row_lock_time:系统启动到现在、锁定的总时间长度 + - Innodb_row_lock_time_avg:每次平均锁定的时间 + - Innodb_row_lock_time_max:最长一次锁定时间 + - Innodb_row_lock_waits:系统启动到现在、总共锁定次数 +- 帮我们分析表,并提出建议:`select * from my_table procedure analyse();` +## 系统表 - -## my.cnf 常配置项 - -- `key_buffer_size`,索引缓冲区大小。 -- `query_cache_size`,查询缓存。 -- `max_connections = 1000`,MySQL 的最大并发连接数 -- ``, -- ``, -- ``, -- ``, -- ``, -- ``, -- ``, -- ``, +- 当前运行的所有事务:`select * from information_schema.INNODB_TRX;` +- 当前事务出现的锁:`select * from information_schema.INNODB_LOCKS;` +- 锁等待的对应关系:`select * from information_schema.INNODB_LOCK_WAITS;` ## 查询优化 @@ -109,6 +126,46 @@ - 优化: - 可以使用连接查询(JOIN)代替子查询,连接查询时不需要建立临时表,其速度比子查询快。 +## 查询不走索引优化 + +- WHERE字句的查询条件里有不等于号(WHERE column!=…),MYSQL将无法使用索引 +- 类似地,如果WHERE字句的查询条件里使用了函数(如:WHERE DAY(column)=…),MYSQL将无法使用索引 +- 在JOIN操作中(需要从多个数据表提取数据时),MYSQL只有在主键和外键的数据类型相同时才能使用索引,否则即使建立了索引也不会使用 +- 如果WHERE子句的查询条件里使用了比较操作符LIKE和REGEXP,MYSQL只有在搜索模板的第一个字符不是通配符的情况下才能使用索引。比如说,如果查询条件是LIKE 'abc%',MYSQL将使用索引;如果条件是LIKE '%abc',MYSQL将不使用索引。 +- 在ORDER BY操作中,MYSQL只有在排序条件不是一个查询条件表达式的情况下才使用索引。尽管如此,在涉及多个数据表的查询里,即使有索引可用,那些索引在加快ORDER BY操作方面也没什么作用。 +- 如果某个数据列里包含着许多重复的值,就算为它建立了索引也不会有很好的效果。比如说,如果某个数据列里包含了净是些诸如“0/1”或“Y/N”等值,就没有必要为它创建一个索引。 +- 索引有用的情况下就太多了。基本只要建立了索引,除了上面提到的索引不会使用的情况下之外,其他情况只要是使用在WHERE条件里,ORDER BY 字段,联表字段,一般都是有效的。 建立索引要的就是有效果。 不然还用它干吗? 如果不能确定在某个字段上建立的索引是否有效果,只要实际进行测试下比较下执行时间就知道。 +- 如果条件中有or(并且其中有or的条件是不带索引的),即使其中有条件带索引也不会使用(这也是为什么尽量少用or的原因)。注意:要想使用or,又想让索引生效,只能将or条件中的每个列都加上索引 +- 如果列类型是字符串,那一定要在条件中将数据使用引号引用起来,否则不使用索引 +- 如果mysql估计使用全表扫描要比使用索引快,则不使用索引 + + +## 其他查询优化 + +- 关联查询过程 + - 确保 ON 或者 using子句中的列上有索引 + - 确保任何的 groupby 和 orderby 中的表达式只涉及到一个表中的列。 +- count()函数优化 + - count()函数有一点需要特别注意:它是不统计值为NULL的字段的!所以:不能指定查询结果的某一列,来统计结果行数。即 count(xx column) 不太好。 + - 如果想要统计结果集,就使用 count(*),性能也会很好。 +- 分页查询(数据偏移量大的场景) + - 不允许跳页,只能上一页或者下一页 + - 使用 where 加上上一页 ID 作为条件(具体要看 explain 分析效果):`select xxx,xxx from test_table where id < '上页id分界值' order by id desc limit 20;` + +## 创表原则 + +- 所有字段均定义为 NOT NULL ,除非你真的想存 Null。因为表内默认值 Null 过多会影响优化器选择执行计划 + + +## 建立索引原则 + +- 使用区分度高的列作为索引,字段不重复的比例,区分度越高,索引树的分叉也就越多,一次性找到的概率也就越高。 +- 尽量使用字段长度小的列作为索引 +- 使用数据类型简单的列(int 型,固定长度) +- 选用 NOT NULL 的列。在MySQL中,含有空值的列很难进行查询优化,因为它们使得索引、索引的统计信息以及比较运算更加复杂。你应该用0、一个特殊的值或者一个空串代替空值。 +- 尽量的扩展索引,不要新建索引。比如表中已经有a的索引,现在要加(a,b)的索引,那么只需要修改原来的索引即可。这样也可避免索引重复。 + + ## 数据库结构优化 @@ -152,7 +209,36 @@ - 可以看我整理的这篇文章: - 由于 binlog 日志的读写频繁,可以考虑在 my.cnf 中配置,指定这个 binlog 日志到一个 SSD 硬盘上。 + +## 锁相关 + +InnoDB支持事务;InnoDB 采用了行级锁。也就是你需要修改哪行,就可以只锁定哪行。 +在 Mysql 中,行级锁并不是直接锁记录,而是锁索引。索引分为主键索引和非主键索引两种,如果一条sql 语句操作了主键索引,Mysql 就会锁定这条主键索引;如果一条语句操作了非主键索引,MySQL会先锁定该非主键索引,再锁定相关的主键索引。 +InnoDB 行锁是通过给索引项加锁实现的,如果没有索引,InnoDB 会通过隐藏的聚簇索引来对记录加锁。也就是说:如果不通过索引条件检索数据,那么InnoDB将对表中所有数据加锁,实际效果跟表锁一样。因为没有了索引,找到某一条记录就得扫描全表,要扫描全表,就得锁定表。 + + +数据库的增删改操作默认都会加排他锁,而查询不会加任何锁。 + +排他锁:对某一资源加排他锁,自身可以进行增删改查,其他人无法进行任何操作。语法为: +select * from table for update; + +共享锁:对某一资源加共享锁,自身可以读该资源,其他人也可以读该资源(也可以再继续加共享锁,即 共享锁可多个共存),但无法修改。 +要想修改就必须等所有共享锁都释放完之后。语法为: +select * from table lock in share mode; + + + ## 资料 - - +- +- +- +- +- <> +- <> +- <> +- <> +- <> +- <> \ No newline at end of file From 53c4e660f4e8243b94adb45c1acff793a2beb270 Mon Sep 17 00:00:00 2001 From: judasn Date: Sat, 13 Apr 2019 13:15:07 +0800 Subject: [PATCH 270/330] 2019-04-13 --- markdown-file/Mysql-Optimize.md | 154 ++++++++++++++++++++++---------- 1 file changed, 105 insertions(+), 49 deletions(-) diff --git a/markdown-file/Mysql-Optimize.md b/markdown-file/Mysql-Optimize.md index 583bc9c8..51b960aa 100644 --- a/markdown-file/Mysql-Optimize.md +++ b/markdown-file/Mysql-Optimize.md @@ -71,60 +71,106 @@ - 锁等待的对应关系:`select * from information_schema.INNODB_LOCK_WAITS;` -## 查询优化 - -- 使用 EXPLAIN 进行 SQL 语句分析:`EXPLAIN SELECT * FROM sys_user;` -- 得到的结果有下面几列: - - **id**,该列表示当前结果序号,无特殊意义,不重要 +## otpimizer trace + +- 作用:输入我们想要查看优化过程的查询语句,当该查询语句执行完成后,就可以到 information_schema 数据库下的OPTIMIZER_TRACE表中查看 mysql 自己帮我们的完整优化过程 +- 是否打开(默认都是关闭):`SHOW VARIABLES LIKE 'optimizer_trace';` + - one_line的值是控制输出格式的,如果为on那么所有输出都将在一行中展示,不适合人阅读,所以我们就保持其默认值为off吧。 +- 打开配置:`SET optimizer_trace="enabled=on";` +- 关闭配置:`SET optimizer_trace="enabled=off";` +- 查询优化结果:`SELECT * FROM information_schema.OPTIMIZER_TRACE;` + +``` +我们所说的基于成本的优化主要集中在optimize阶段,对于单表查询来说,我们主要关注optimize阶段的"rows_estimation"这个过程,这个过程深入分析了对单表查询的各种执行方案的成本; +对于多表连接查询来说,我们更多需要关注"considered_execution_plans"这个过程,这个过程里会写明各种不同的连接方式所对应的成本。 +反正优化器最终会选择成本最低的那种方案来作为最终的执行计划,也就是我们使用EXPLAIN语句所展现出的那种方案。 +如果有小伙伴对使用EXPLAIN语句展示出的对某个查询的执行计划很不理解,大家可以尝试使用optimizer trace功能来详细了解每一种执行方案对应的成本,相信这个功能能让大家更深入的了解MySQL查询优化器。 +``` + + + +## 查询优化(EXPLAIN 查看执行计划) + +- 使用 EXPLAIN 进行 SQL 语句分析:`EXPLAIN SELECT * FROM sys_user;`,效果如下: + +``` +id|select_type|table |partitions|type|possible_keys|key|key_len|ref|rows|filtered|Extra| +--|-----------|--------|----------|----|-------------|---|-------|---|----|--------|-----| + 1|SIMPLE |sys_user| |ALL | | | | | 2| 100| | +``` + +- 简单描述 + - `id`:在一个大的查询语句中每个 SELECT 关键字都对应一个唯一的id + - `select_type`:SELECT 关键字对应的那个查询的类型 + - `table`:表名 + - `partitions`:匹配的分区信息 + - `type`:针对单表的访问方法 + - `possible_keys`:可能用到的索引 + - `key`:实际上使用的索引 + - `key_len`:实际使用到的索引长度 + - `ref`:当使用索引列等值查询时,与索引列进行等值匹配的对象信息 + - `rows`:预估的需要读取的记录条数 + - `filtered`:某个表经过搜索条件过滤后剩余记录条数的百分比 + - `Extra`:一些额外的信息 +- 有多个结果的场景分析 + - 有子查询的一般都会有多个结果,id 是递增值。但是,有些场景查询优化器可能对子查询进行重写,转换为连接查询。所以有时候 id 就不是自增值。 + - 对于连接查询一般也会有多个接口,id 可能是相同值,相同值情况下,排在前面的记录表示驱动表,后面的表示被驱动表 + - UNION 场景会有 id 为 NULL 的情况,这是一个去重后临时表,合并多个结果集的临时表。但是,UNION ALL 不会有这种情况,因为这个不需要去重。 +- 根据具体的描述: + - **id**,该列表示当前结果序号 - **select_type**,表示 SELECT 语句的类型,有下面几种 - - SIMPLE,表示简单查询,其中不包括连接查询和子查询 - - PRIMARY,表示主查询,或者是最外面的查询语句。比如你使用一个子查询语句,比如这条 SQL:`EXPLAIN SELECT * FROM (SELECT sys_user_id FROM sys_user WHERE sys_user_id = 1) AS temp_table;` - - 这条 SQL 有两个结果,其中有一个结果的类型就是 PRIMARY - - UNION,使用 UNION 的 SQL 是这个类型 - - DERIVED,在 SQL 中 From 后面子查询 - - SUBQUERY,子查询 + - `SIMPLE`:表示简单查询,其中不包括 UNION 查询和子查询 + - `PRIMARY`:对于包含UNION、UNION ALL或者子查询的大查询来说,它是由几个小查询组成的,其中最左边的那个查询的select_type值就是PRIMARY + - `UNION`:对于包含UNION或者UNION ALL的大查询来说,它是由几个小查询组成的,其中除了最左边的那个小查询以外,其余的小查询的select_type值就是UNION + - `UNION RESULT`:MySQL选择使用临时表来完成UNION查询的去重工作,针对该临时表的查询的select_type就是UNION RESULT + - `SUBQUERY`:如果包含子查询的查询语句不能够转为对应的semi-join的形式,并且该子查询是不相关子查询,并且查询优化器决定采用将该子查询物化的方案来执行该子查询时,该子查询的第一个SELECT关键字代表的那个查询的select_type就是SUBQUERY + - `DEPENDENT SUBQUERY`:如果包含子查询的查询语句不能够转为对应的semi-join的形式,并且该子查询是相关子查询,则该子查询的第一个SELECT关键字代表的那个查询的select_type就是DEPENDENT SUBQUERY + - `DEPENDENT UNION`:在包含UNION或者UNION ALL的大查询中,如果各个小查询都依赖于外层查询的话,那除了最左边的那个小查询之外,其余的小查询的select_type的值就是DEPENDENT UNION + - `DERIVED`:对于采用物化的方式执行的包含派生表的查询,该派生表对应的子查询的select_type就是DERIVED + - `MATERIALIZED`:当查询优化器在执行包含子查询的语句时,选择将子查询物化之后与外层查询进行连接查询时,该子查询对应的select_type属性就是MATERIALIZED - 还有其他一些 - **table**,表名或者是子查询的一个结果集 - **type**,表示表的链接类型,分别有(以下的连接类型的顺序是从最佳类型到最差类型)**(这个属性重要)**: - 性能好: - - system,表仅有一行,这是 const 类型的特列,平时不会出现,这个也可以忽略不计。 - - const,数据表最多只有一个匹配行,因为只匹配一行数据,所以很快,常用于 PRIMARY KEY 或者 UNIQUE 索引的查询,可理解为 const 是最优化的。 - - eq_ref,mysql 手册是这样说的:"对于每个来自于前面的表的行组合,从该表中读取一行。这可能是最好的联接类型,除了 const 类型。它用在一个索引的所有部分被联接使用并且索引是 UNIQUE(唯一键) 也不是 PRIMARY KEY(主键)"。eq_ref 可以用于使用 = 比较带索引的列。 - - ref,查询条件索引既不是 UNIQUE(唯一键) 也不是 PRIMARY KEY(主键) 的情况。ref 可用于 = 或 < 或 > 操作符的带索引的列。 - - ref_or_null,该联接类型如同 ref,但是添加了 MySQL 可以专门搜索包含 NULL 值的行。在解决子查询中经常使用该联接类型的优化。 + - `system`:当表中只有一条记录并且该表使用的存储引擎的统计数据是精确的,比如MyISAM、Memory,那么对该表的访问方法就是system,平时不会出现,这个也可以忽略不计。 + - `const`:当我们根据主键或者唯一二级索引列与常数进行等值匹配时,对单表的访问方法就是const,常用于 PRIMARY KEY 或者 UNIQUE 索引的查询,可理解为 const 是最优化的。 + - `eq_ref`:在连接查询时,如果被驱动表是通过主键或者唯一二级索引列等值匹配的方式进行访问的(如果该主键或者唯一二级索引是联合索引的话,所有的索引列都必须进行等值比较),则对该被驱动表的访问方法就是eq_ref + - `ref`:当通过普通的二级索引列与常量进行等值匹配时来查询某个表,那么对该表的访问方法就可能是ref。ref 可用于 = 或 < 或 > 操作符的带索引的列。 + - `ref_or_null`:当对普通二级索引进行等值匹配查询,该索引列的值也可以是NULL值时,那么对该表的访问方法就可能是ref_or_null - 性能较差: - - index_merge,该联接类型表示使用了索引合并优化方法。在这种情况下,key 列包含了使用的索引的清单,key_len 包含了使用的索引的最长的关键元素。 - - unique_subquery,该类型替换了下面形式的IN子查询的ref: `value IN (SELECT primary_key FROM single_table WHERE some_expr)`。unique_subquery 是一个索引查找函数,可以完全替换子查询,效率更高。 - - index_subquery,该联接类型类似于 unique_subquery。可以替换 IN 子查询, 但只适合下列形式的子查询中的非唯一索引: `value IN (SELECT key_column FROM single_table WHERE some_expr)` - - range,只检索给定范围的行, 使用一个索引来选择行。 - - index,该联接类型与 ALL 相同, 除了只有索引树被扫描。这通常比 ALL 快, 因为索引文件通常比数据文件小。 + - `index_merge`:该联接类型表示使用了索引合并优化方法。在这种情况下,key 列包含了使用的索引的清单,key_len 包含了使用的索引的最长的关键元素。 + - `unique_subquery`:类似于两表连接中被驱动表的eq_ref访问方法,unique_subquery是针对在一些包含IN子查询的查询语句中,如果查询优化器决定将IN子查询转换为EXISTS子查询,而且子查询可以使用到主键进行等值匹配的话,那么该子查询执行计划的type列的值就是unique_subquery + - `index_subquery`:index_subquery与unique_subquery类似,只不过访问子查询中的表时使用的是普通的索引 + - `range`:只检索给定范围的行, 使用一个索引来选择行。 + - `index`:该联接类型与 ALL 相同, 除了只有索引树被扫描。这通常比 ALL 快, 因为索引文件通常比数据文件小。 + - 再一次强调,对于使用InnoDB存储引擎的表来说,二级索引的记录只包含索引列和主键列的值,而聚簇索引中包含用户定义的全部列以及一些隐藏列,所以扫描二级索引的代价比直接全表扫描,也就是扫描聚簇索引的代价更低一些 - 性能最差: - - ALL,对于每个来自于先前的表的行组合, 进行完整的表扫描。(性能最差) - - **possible_keys**,指出 MySQL 能使用哪个索引在该表中找到行。如果该列为 NULL,说明没有使用索引,可以对该列创建索引来提供性能。**(这个属性重要)** - - **key**,显示 MySQL 实际决定使用的键 (索引)。如果没有选择索引, 键是 NULL。**(这个属性重要)** - - **key**_len,显示 MySQL 决定使用的键长度。如果键是 NULL, 则长度为 NULL。注意:key_len 是确定了 MySQL 将实际使用的索引长度。 - - **ref**,显示使用哪个列或常数与 key 一起从表中选择行。 - - **rows**,显示 MySQL 认为它执行查询时必须检查的行数。**(这个属性重要)** - - **Extra**,该列包含 MySQL 解决查询的详细信息: - - Distinct:MySQL 发现第 1 个匹配行后, 停止为当前的行组合搜索更多的行。 - - Not exists:MySQL 能够对查询进行 LEFT JOIN 优化, 发现 1 个匹配 LEFT JOIN 标准的行后, 不再为前面的的行组合在该表内检查更多的行。 - - range checked for each record (index map: #):MySQL 没有发现好的可以使用的索引, 但发现如果来自前面的表的列值已知, 可能部分索引可以使用。 - - Using filesort:MySQL 需要额外的一次传递, 以找出如何按排序顺序检索行。 - - Using index: 从只使用索引树中的信息而不需要进一步搜索读取实际的行来检索表中的列信息。 - - Using temporary: 为了解决查询,MySQL 需要创建一个临时表来容纳结果。 - - Using where:WHERE 子句用于限制哪一个行匹配下一个表或发送到客户。 - - Using sort_union(...), Using union(...), Using intersect(...): 这些函数说明如何为 index_merge 联接类型合并索引扫描。 - - Using index for group-by: 类似于访问表的 Using index 方式,Using index for group-by 表示 MySQL 发现了一个索引, 可以用来查 询 GROUP BY 或 DISTINCT 查询的所有列, 而不要额外搜索硬盘访问实际的表。 -- **了解对索引不生效的查询情况 (这个属性重要)** - - 使用 LIKE 关键字的查询,在使用 LIKE 关键字进行查询的查询语句中,如果匹配字符串的第一个字符为“%”,索引不起作用。只有“%”不在第一个位置,索引才会生效。 - - 使用联合索引的查询,MySQL 可以为多个字段创建索引,一个索引可以包括 16 个字段。对于联合索引,只有查询条件中使用了这些字段中第一个字段时,索引才会生效。 - - 使用 OR 关键字的查询,查询语句的查询条件中只有 OR 关键字,且 OR 前后的两个条件中的列都是索引列时,索引才会生效,否则,索引不生效。 -- 子查询优化 - - MySQL 从 4.1 版本开始支持子查询,使用子查询进行 SELECT 语句嵌套查询,可以一次完成很多逻辑上需要多个步骤才能完成的 SQL 操作。 - - 子查询虽然很灵活,但是执行效率并不高。 - - 执行子查询时,MYSQL 需要创建临时表,查询完毕后再删除这些临时表,所以,子查询的速度会受到一定的影响。 - - 优化: - - 可以使用连接查询(JOIN)代替子查询,连接查询时不需要建立临时表,其速度比子查询快。 + - `ALL`:对于每个来自于先前的表的行组合, 进行完整的表扫描。(性能最差) + - `possible_keys`,指出 MySQL 能使用哪个索引在该表中找到行。如果该列为 NULL,说明没有使用索引,可以对该列创建索引来提供性能。**(这个属性重要)** + - possible_keys列中的值并不是越多越好,可能使用的索引越多,查询优化器计算查询成本时就得花费更长时间,所以如果可以的话,尽量删除那些用不到的索引。 + - `key`,显示 MySQL 实际决定使用的键 (索引)。如果没有选择索引, 键是 NULL。**(这个属性重要)** + - 不过有一点比较特别,就是在使用index访问方法来查询某个表时,possible_keys列是空的,而key列展示的是实际使用到的索引 + - `key_len`,表示当优化器决定使用某个索引执行查询时,该索引记录的最大长度。如果键是可以为 NULL, 则长度多 1。 + - `ref`,显示使用哪个列或常数与 key 一起从表中选择行。 + - `rows`,显示 MySQL 认为它执行查询时必须检查的行数。**(这个属性重要)** + - `Extra`,该列包含 MySQL 解决查询的详细信息: + - `Distinct` MySQL 发现第 1 个匹配行后, 停止为当前的行组合搜索更多的行。 + - `Not exists` 当我们使用左(外)连接时,如果WHERE子句中包含要求被驱动表的某个列等于NULL值的搜索条件,而且那个列又是不允许存储NULL值的,那么在该表的执行计划的Extra列就会提示Not exists额外信息 + - `range checked for each record (index map: #)` MySQL 没有发现好的可以使用的索引, 但发现如果来自前面的表的列值已知, 可能部分索引可以使用。 + - `Using filesort` 有一些情况下对结果集中的记录进行排序是可以使用到索引的 + - 需要注意的是,如果查询中需要使用filesort的方式进行排序的记录非常多,那么这个过程是很耗费性能的,我们最好想办法将使用文件排序的执行方式改为使用索引进行排序。 + - `Using temporary` 在许多查询的执行过程中,MySQL可能会借助临时表来完成一些功能,比如去重、排序之类的,比如我们在执行许多包含DISTINCT、GROUP BY、UNION等子句的查询过程中,如果不能有效利用索引来完成查询,MySQL很有可能寻求通过建立内部的临时表来执行查询。如果查询中使用到了内部的临时表,在执行计划的Extra列将会显示Using temporary提示 + - 如果我们并不想为包含GROUP BY子句的查询进行排序,需要我们显式的写上:ORDER BY NULL + - 执行计划中出现Using temporary并不是一个好的征兆,因为建立与维护临时表要付出很大成本的,所以我们最好能使用索引来替代掉使用临时表 + - `Using join buffer (Block Nested Loop)` 在连接查询执行过程过,当被驱动表不能有效的利用索引加快访问速度,MySQL一般会为其分配一块名叫join buffer的内存块来加快查询速度,也就是我们所讲的基于块的嵌套循环算法 + - `Using where` + - 当我们使用全表扫描来执行对某个表的查询,并且该语句的WHERE子句中有针对该表的搜索条件时,在Extra列中会提示上述额外信息 + - 当使用索引访问来执行对某个表的查询,并且该语句的WHERE子句中有除了该索引包含的列之外的其他搜索条件时,在Extra列中也会提示上述额外信息 + - `Using sort_union(...), Using union(...), Using intersect(...)` 如果执行计划的Extra列出现了Using intersect(...)提示,说明准备使用Intersect索引合并的方式执行查询,括号中的...表示需要进行索引合并的索引名称;如果出现了Using union(...)提示,说明准备使用Union索引合并的方式执行查询;出现了Using sort_union(...)提示,说明准备使用Sort-Union索引合并的方式执行查询。 + - `Using index condition` 有些搜索条件中虽然出现了索引列,但却不能使用到索引 + - `Using index` 当我们的查询列表以及搜索条件中只包含属于某个索引的列,也就是在可以使用索引覆盖的情况下,在Extra列将会提示该额外信息 + - `Using index for group-by` 类似于访问表的 Using index 方式,Using index for group-by 表示 MySQL 发现了一个索引, 可以用来查 询 GROUP BY 或 DISTINCT 查询的所有列, 而不要额外搜索硬盘访问实际的表。 + ## 查询不走索引优化 @@ -140,6 +186,14 @@ - 如果mysql估计使用全表扫描要比使用索引快,则不使用索引 +## 子查询优化 + +- MySQL 从 4.1 版本开始支持子查询,使用子查询进行 SELECT 语句嵌套查询,可以一次完成很多逻辑上需要多个步骤才能完成的 SQL 操作。 +- 子查询虽然很灵活,但是执行效率并不高。 +- 执行子查询时,MYSQL 需要创建临时表,查询完毕后再删除这些临时表,所以,子查询的速度会受到一定的影响。 +- 优化: + - 可以使用连接查询(JOIN)代替子查询,连接查询时不需要建立临时表,其速度比子查询快。 + ## 其他查询优化 - 关联查询过程 @@ -199,7 +253,9 @@ - 插入数据之前执行禁止事务的自动提交,数据插入完成后再恢复,可以提供插入速度。 - 禁用:`SET autocommit = 0;` - 开启:`SET autocommit = 1;` - + - 插入数据之前执行禁止对外键的检查,数据插入完成后再恢复 + - 禁用:`SET foreign_key_checks = 0;` + - 开启:`SET foreign_key_checks = 1;` ## 服务器优化 @@ -236,7 +292,7 @@ select * from table lock in share mode; - - - -- <> +- - <> - <> - <> From f2cf8a5a83d65204a0e49266ffbaf830f904e4bb Mon Sep 17 00:00:00 2001 From: judasn Date: Sat, 13 Apr 2019 23:22:02 +0800 Subject: [PATCH 271/330] 2019-04-13 --- markdown-file/Mysql-Optimize.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/markdown-file/Mysql-Optimize.md b/markdown-file/Mysql-Optimize.md index 51b960aa..cd0c58d5 100644 --- a/markdown-file/Mysql-Optimize.md +++ b/markdown-file/Mysql-Optimize.md @@ -6,7 +6,7 @@ ## 优秀材料 - -- <> +- - <> - <> - <> From f106cfb4cc4fc1cc4d955e126586f5f56f01c5cb Mon Sep 17 00:00:00 2001 From: "Judas.n" Date: Tue, 16 Apr 2019 14:06:58 +0800 Subject: [PATCH 272/330] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 477aa85c..30c6e32f 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ ## 团队 DevOps 方案参考 - + From 2c318df253789736bfaa24e4a0713714efd37cc4 Mon Sep 17 00:00:00 2001 From: judasn Date: Sat, 20 Apr 2019 13:54:30 +0800 Subject: [PATCH 273/330] 2019-04-20 --- markdown-file/Jira-Install-And-Settings.md | 118 +++++++++++++------- markdown-file/Mysql-Install-And-Settings.md | 29 ++++- 2 files changed, 107 insertions(+), 40 deletions(-) diff --git a/markdown-file/Jira-Install-And-Settings.md b/markdown-file/Jira-Install-And-Settings.md index c3457adc..b831ae36 100644 --- a/markdown-file/Jira-Install-And-Settings.md +++ b/markdown-file/Jira-Install-And-Settings.md @@ -1,42 +1,82 @@ # Jira 安装和配置 +## Jira 7.13.3 -## Jira 安装 - -- Jira 安装 - - 官网: - - 官网下载: - - 中文在线试用: - - 官网帮助说明: - - 官网中文语言包: - - Jira 6.3.6 网盘下载: - - Jira 6.3.6 中文语言包网盘下载: - - 环境要求: - - JDK 7 或更新版本; - - Mysql - - 我们要使用的版本:**atlassian-jira-6.3.6.tar.gz** - - 我个人习惯 `/opt` 目录下创建一个目录 `setups` 用来存放各种软件安装包;在 `/usr` 目录下创建一个 `program` 用来存放各种解压后的软件包,下面的讲解也都是基于此习惯 - - 我个人已经使用了第三方源:`EPEL、RepoForge`,如果你出现 `yum install XXXXX` 安装不成功的话,很有可能就是你没有相关源,请查看我对源设置的文章 - - 解压:`tar zxvf atlassian-jira-6.3.6.tar.gz` - - 修改目录名:`mv atlassian-jira-6.3.6/ jira6.3.6/` - - 移到我个人的安装目录下:`mv jira6.3.6/ /usr/program/` - - 创建存放数据目录:`mkdir -p /usr/program/jira6.3.6/data/` - - 设置环境变量: - - 编辑:`vim /etc/profile` - - 在文件尾部添加: - ``` ini - JIRA_HOME=/usr/program/jira6.3.6/data/ - export JIRA_HOME - ``` - - 刷新配置:`source /etc/profile` - - 运行:`/usr/program/jira6.3.6/bin/start-jira.sh` - - 访问:`http://192.168.0.111:8080/` - - 汉化:`cp JIRA-6.3.6-language-pack-zh_CN.jar /usr/program/jira6.3.6/atlassian-jira/WEB-INF/lib/` - - 配置过程: - - ![Jira 安装和配置](../images/Jira-Install-And-Settings-a-1.jpg) - - ![Jira 安装和配置](../images/Jira-Install-And-Settings-a-2.jpg) - - ![Jira 安装和配置](../images/Jira-Install-And-Settings-a-3.jpg) - - ![Jira 安装和配置](../images/Jira-Install-And-Settings-a-4.jpg) - - ![Jira 安装和配置](../images/Jira-Install-And-Settings-a-5.jpg) - - ![Jira 安装和配置](../images/Jira-Install-And-Settings-a-6.jpg) - - 重新激活页面地址:`http://192.168.0.111:8090/secure/admin/ViewLicense.jspa` \ No newline at end of file +- 最新 7.13.3 版本时间:2019-04 + +#### 数据库 + +``` +docker run \ + --name mysql-jira \ + --restart always \ + -p 3306:3306 \ + -e MYSQL_ROOT_PASSWORD=adg123456 \ + -e MYSQL_DATABASE=jira_db \ + -e MYSQL_USER=jira_user \ + -e MYSQL_PASSWORD=jira_123456 \ + -d \ + mysql:5.7 +``` + +- 连上容器:`docker exec -it mysql-jira /bin/bash` + - 连上 MySQL:`mysql -u root -p` +- 设置编码:**必须做这一步,不然配置过程会报错,JIRA 的 DB 要求是 utf8mb4** + +``` +SET NAMES 'utf8mb4'; +alter database jira_db character set utf8mb4; +``` + + +#### 安装 + +- 下载: + - 选择:tar.gz 类型下载 +- 解压:`tar zxvf atlassian-jira-software-7.13.3.tar.gz` +- 创建 home 目录:`mkdir /usr/local/atlassian-jira-software-7.13.3-standalone/data` +- 配置 home 变量: + +``` +编辑:vim ~/.zshrc + +在文件尾部添加: + +JIRA_HOME=/usr/local/atlassian-jira-software-7.13.3-standalone/data +export JIRA_HOME + + +刷新配置:`source ~/.zshrc` +``` + +- 设置 MySQL 连接: +- 把 mysql-connector-java-5.1.47.jar 放在目录 `/usr/local/atlassian-jira-software-7.13.3-standalone/atlassian-jira/WEB-INF/lib` + + +#### License 过程 + +- 参考自己的为知笔记 + +#### 运行 + +- 启动:`sh /usr/local/atlassian-jira-software-7.13.3-standalone/bin/start-jira.sh` +- 停止:`sh /usr/local/atlassian-jira-software-7.13.3-standalone/bin/stop-jira.sh` + - `ps -ef | grep java` +- 查看 log:`tail -300f /usr/local/atlassian-jira-software-7.13.3-standalone/logs/catalina.out` +- 访问: + - 注意防火墙配置 +- 如果需要更换端口号可以修改:`/usr/local/atlassian-jira-software-7.13.3-standalone/conf/server.xml` 文件中的内容。 + + +#### 中文化 + +- 从 7.x 版本默认已经有中文支持,不需要再汉化了 +- 在安装后首次进入的时候就可以配置,选择中文了 + + +#### 首次配置 + +- 参考文章: +- 因为步骤一样,所以我就不再截图了。 + + diff --git a/markdown-file/Mysql-Install-And-Settings.md b/markdown-file/Mysql-Install-And-Settings.md index acb54084..5eb73d43 100644 --- a/markdown-file/Mysql-Install-And-Settings.md +++ b/markdown-file/Mysql-Install-And-Settings.md @@ -1,7 +1,34 @@ # MySQL 安装和配置 -## Docker 安装 MySQL +## Docker 安装 MySQL(不带挂载) + +``` +docker run \ + --name mysql-jira \ + --restart always \ + -p 3306:3306 \ + -e MYSQL_ROOT_PASSWORD=adg_123456 \ + -e MYSQL_DATABASE=jira_db \ + -e MYSQL_USER=jira_user \ + -e MYSQL_PASSWORD=jira_123456 \ + -d \ + mysql:5.7 +``` + + +- 连上容器:`docker exec -it mysql-jira /bin/bash` + - 连上 MySQL:`mysql -u root -p` +- 设置编码: + +``` +SET NAMES 'utf8mb4'; +alter database jira_db character set utf8mb4; +``` + + + +## Docker 安装 MySQL(带挂载) - 关掉:SELinux - 创建本地数据存储 + 配置文件目录:`mkdir -p /data/docker/mysql/datadir /data/docker/mysql/conf /data/docker/mysql/log` From 7e0391488b55f74bbdb3df58f5ba3af801449920 Mon Sep 17 00:00:00 2001 From: judasn Date: Sun, 21 Apr 2019 16:04:16 +0800 Subject: [PATCH 274/330] 2019-04-21 --- markdown-file/Bash.md | 1 + 1 file changed, 1 insertion(+) diff --git a/markdown-file/Bash.md b/markdown-file/Bash.md index 58dfcd55..b5eba96e 100644 --- a/markdown-file/Bash.md +++ b/markdown-file/Bash.md @@ -107,6 +107,7 @@ drwxr-xr-x. 5 root root 4096 3月 26 10:57,其中最前面的 d 表示这是 - `find . -name "lin*" -exec ls -l {} \;`,当前目录搜索lin开头的文件,然后用其搜索后的结果集,再执行ls -l的命令(这个命令可变,其他命令也可以),其中 -exec 和 {} \; 都是固定格式 - `find /opt -type f -size +800M -print0 | xargs -0 du -h | sort -nr`,找出 /opt 目录下大于 800 M 的文件 - `find / -name "*tower*" -exec rm {} \;`,找到文件并删除 + - `find / -name "*tower*" -exec mv {} /opt \;`,找到文件并移到 opt 目录 - `find . -name "*" |xargs grep "youmeek"`,递归查找当前文件夹下所有文件内容中包含 youmeek 的文件 - `find . -size 0 | xargs rm -f &`,删除当前目录下文件大小为0的文件 - `du -hm --max-depth=2 | sort -nr | head -12`,找出系统中占用容量最大的前 12 个目录 From c457bd24c3a6e39fd5cf4650c03b224d4ebde2a6 Mon Sep 17 00:00:00 2001 From: zhang Date: Fri, 26 Apr 2019 12:57:34 +0800 Subject: [PATCH 275/330] 2019-04-26 --- markdown-file/monitor.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index 38ae729c..66a065a4 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -66,6 +66,8 @@ procs -----------memory---------- ---swap-- -----io---- -system-- ------cpu----- - `bi` 表示从块设备读取数据的量(读磁盘) - `bo` 表示从块设备写入数据的量(写磁盘) - **如果bi和bo两个数字比较高,则说明,磁盘IO压力大。** + - `in` 每秒 CPU 的中断次数,包括时间中断 + - `cs` 每秒上下文切换次数,例如我们调用系统函数,就要进行上下文切换,线程的切换,也要进程上下文切换,这个值要越小越好,太大了,要考虑调低线程或者进程的数目 - `wa` 表示I/O等待所占用CPU的时间比 #### 命令:sar(综合) @@ -266,6 +268,7 @@ atctive 和 passive 的数目通常可以用来衡量服务器的负载:接受 - 在 `top` 命令状态下按 shfit + m 可以按照 **内存使用** 大小排序 - 在 `top` 命令状态下按 shfit + p 可以按照 **CPU 使用** 大小排序 - 展示数据上,%CPU 表示进程占用的 CPU 百分比,%MEM 表示进程占用的内存百分比 +- mac 下不一样:要先输入 o,然后输入 cpu 则按 cpu 使用量排序,输入 rsize 则按内存使用量排序。 #### CPU 其他工具 From f5aa85e220dbb2c61cc9c56ac905137c8e625172 Mon Sep 17 00:00:00 2001 From: zhang Date: Tue, 30 Apr 2019 15:55:28 +0800 Subject: [PATCH 276/330] 2019-04-30 --- markdown-file/monitor.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index 66a065a4..50574c9f 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -576,7 +576,7 @@ TOTAL:(总的流量) 12.9GB 229Mb 190Mb 193Mb #### lsof - 安装 lsof:`yum install -y lsof` -- 查看 3316 端口是否有被使用:`lsof -i:3316`,**有被使用会输出类似如下信息,如果没被使用会没有任何信息返回** +- 查看 3316 端口是否有被使用(macOS 也适用):`lsof -i:3316`,**有被使用会输出类似如下信息,如果没被使用会没有任何信息返回** ``` COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME @@ -596,7 +596,7 @@ docker-pr 13551 root 4u IPv6 2116824 0t0 TCP *:aicc-cmi (LISTEN) #### netstat - 更多用法可以看:[netstat 的10个基本用法](https://linux.cn/article-2434-1.html) -- 查看所有在用的端口:`netstat -ntlp` +- 查看所有在用的端口(macOS 也适用):`netstat -ntlp` ``` Active Internet connections (only servers) From 223b9c610e91c7f48a360018b1fd5428e576df2b Mon Sep 17 00:00:00 2001 From: zhang Date: Tue, 7 May 2019 11:30:01 +0800 Subject: [PATCH 277/330] 2019-05-07 --- markdown-file/Docker-Install-And-Usage.md | 3 ++- markdown-file/Elasticsearch-Base.md | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index 20db7079..3fe4361b 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -774,7 +774,8 @@ logger.warn("-------------maxMemory=" + ((double) maxMemory / (1024 * 1024))); - Docker Compose 主要用于定义和运行多个 Docker 容器的工具,这样可以快速运行一套分布式系统 - 容器之间是有依赖关系,比如我一个 Java web 系统依赖 DB 容器、Redis 容器,必须这些依赖容器先运行起来。 - 一个文件:docker-compose.yml -- 一个命令:docker-compose up +- 一个命令:`docker-compose up` + - 指定文件:`docker-compose -f zookeeper.yml -p zk_test up -d` - 官网安装说明: - 安装方法: diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index a7e98bba..599a742e 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -2,6 +2,9 @@ ## Docker 部署 +- `vim ~/elasticsearch-5.6.8-docker.yml` +- 启动:`docker-compose -f ~/elasticsearch-5.6.8-docker.yml -p elasticsearch_5.6.8 up -d` + ``` version: "3" From 79e82dba3168792650906a9e97daa1a1a4a36d6a Mon Sep 17 00:00:00 2001 From: zhang Date: Mon, 13 May 2019 12:27:11 +0800 Subject: [PATCH 278/330] 2019-05-13 --- centos-settings/CentOS-Extra-Packages.md | 16 ++++++++++++++++ markdown-file/Elasticsearch-Base.md | 17 ++++++++++++++++- 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/centos-settings/CentOS-Extra-Packages.md b/centos-settings/CentOS-Extra-Packages.md index def938fe..c00901e3 100644 --- a/centos-settings/CentOS-Extra-Packages.md +++ b/centos-settings/CentOS-Extra-Packages.md @@ -83,6 +83,22 @@ - `sudo yum install -y htop`(htop 官方源是没有的,所以如果能下载下来就表示已经使用了第三方源) +### 禁用源 + +- 编辑:`vim /etc/yum.repos.d/elasticsearch.repo` +- 把 enabled=1 改为 enabled=0 + +``` +[elasticsearch-6.x] +name=Elasticsearch repository for 6.x packages +baseurl=https://artifacts.elastic.co/packages/6.x/yum +gpgcheck=1 +gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch +enabled=1 +autorefresh=1 +type=rpm-md +``` + > 资料: - diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index 599a742e..1c02a750 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -61,10 +61,12 @@ elasticsearch hard memlock unlimited #### 开始安装 +- 检查:`rpm -qa | grep elastic` +- 卸载:`rpm -e --nodeps elasticsearch` - 官网 RPM 安装流程(重要,以下资料都是对官网的总结): - 导入 KEY:`rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch` - 新建文件:`vim /etc/yum.repos.d/elasticsearch.repo` -- 内容如下: +- 内容如下(6.x): ``` [elasticsearch-6.x] @@ -77,6 +79,19 @@ autorefresh=1 type=rpm-md ``` +- 内容如下(5.x): + +``` +[elasticsearch-5.x] +name=Elasticsearch repository for 5.x packages +baseurl=https://artifacts.elastic.co/packages/5.x/yum +gpgcheck=1 +gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch +enabled=1 +autorefresh=1 +type=rpm-md +``` + - 开始安装:`yum install -y elasticsearch`,预计文件有 108M 左右,国内网络安装可能会很慢,慢慢等 - 安装完后会多了一个:elasticsearch 用户和组 - 设置 java 软链接:`ln -s /usr/local/jdk1.8.0_181/jre/bin/java /usr/local/sbin/java` From d206847075699dc8e8f05d86995a236211d701b5 Mon Sep 17 00:00:00 2001 From: zhang Date: Mon, 13 May 2019 14:32:14 +0800 Subject: [PATCH 279/330] 2019-05-13 --- markdown-file/Elasticsearch-Base.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index 1c02a750..e5f8c287 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -124,6 +124,9 @@ type=rpm-md - 默认只能 localhost 访问,修改成支持外网访问 ``` +打开这个注释:#cluster.name: my-application +集群名称最好是自己给定,不然有些 client 端会连不上,或者要求填写 + 打开这个注释:#network.host: 192.168.0.1 改为:network.host: 0.0.0.0 ``` From 6b4f01aa480513dcff499666c63268b5374b66aa Mon Sep 17 00:00:00 2001 From: zhang Date: Mon, 13 May 2019 16:22:28 +0800 Subject: [PATCH 280/330] 2019-05-13 --- markdown-file/Elasticsearch-Base.md | 33 ++++++++++++++++++----------- 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index e5f8c287..1b1cfa3d 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -1,30 +1,39 @@ # Elasticsearch 知识 -## Docker 部署 +## Docker 单节点部署 +- 注意:docker 版本下 client.transport.sniff = true 是无效的。 - `vim ~/elasticsearch-5.6.8-docker.yml` - 启动:`docker-compose -f ~/elasticsearch-5.6.8-docker.yml -p elasticsearch_5.6.8 up -d` ``` -version: "3" - +version: '3' services: - elasticsearch: - image: elasticsearch:5.6.8 - restart: always - container_name: elasticsearch - hostname: elasticsearch + elasticsearch1: + image: docker.elastic.co/elasticsearch/elasticsearch:5.6.8 + container_name: elasticsearch1 environment: - - 'http.host=0.0.0.0' - - 'transport.host=127.0.0.1' - "ES_JAVA_OPTS=-Xms512m -Xmx512m" + - "cluster.name=elasticsearch" + - "network.host=0.0.0.0" + - "http.host=0.0.0.0" + - "xpack.security.enabled=false" + ulimits: + memlock: + soft: -1 + hard: -1 + nofile: + soft: 65536 + hard: 65536 ports: - - "9200:9200" - - "9300:9300" + - 9200:9200 + - 9300:9300 volumes: - /data/docker/elasticsearch/data:/usr/share/elasticsearch/data + ``` + ------------------------------------------------------------------- From 4e7a01882aa04e6848ffe7d87c35e8df472d14de Mon Sep 17 00:00:00 2001 From: zhang Date: Wed, 29 May 2019 17:21:27 +0800 Subject: [PATCH 281/330] 2019-05-29 --- README.md | 1 + SUMMARY.md | 1 + TOC.md | 1 + .../Confluence-Install-And-Settings.md | 134 ++++++++++++++++++ 4 files changed, 137 insertions(+) create mode 100644 markdown-file/Confluence-Install-And-Settings.md diff --git a/README.md b/README.md index 30c6e32f..b79e908f 100644 --- a/README.md +++ b/README.md @@ -72,6 +72,7 @@ - [MongoDB 安装和配置](markdown-file/MongoDB-Install-And-Settings.md) - [Solr 安装和配置](markdown-file/Solr-Install-And-Settings.md) - [Jira 安装和配置](markdown-file/Jira-Install-And-Settings.md) +- [Confluence 安装和配置](markdown-file/Confluence-Install-And-Settings.md) - [TeamCity 安装和配置](markdown-file/TeamCity-Install-And-Settings.md) - [Nginx 安装和配置](markdown-file/Nginx-Install-And-Settings.md) - [wrk 安装和配置](markdown-file/wrk-Install-And-Settings.md) diff --git a/SUMMARY.md b/SUMMARY.md index 91dfcd0a..91ff9f92 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -51,6 +51,7 @@ * [MongoDB 安装和配置](markdown-file/MongoDB-Install-And-Settings.md) * [Solr 安装和配置](markdown-file/Solr-Install-And-Settings.md) * [Jira 安装和配置](markdown-file/Jira-Install-And-Settings.md) +* [Confluence 安装和配置](markdown-file/Confluence-Install-And-Settings.md) * [Jenkins 安装和配置](markdown-file/Jenkins-Install-And-Settings.md) * [TeamCity 安装和配置](markdown-file/TeamCity-Install-And-Settings.md) * [Nginx 安装和配置](markdown-file/Nginx-Install-And-Settings.md) diff --git a/TOC.md b/TOC.md index 1b0ff377..44bf8f76 100644 --- a/TOC.md +++ b/TOC.md @@ -49,6 +49,7 @@ - [MongoDB 安装和配置](markdown-file/MongoDB-Install-And-Settings.md) - [Solr 安装和配置](markdown-file/Solr-Install-And-Settings.md) - [Jira 安装和配置](markdown-file/Jira-Install-And-Settings.md) +- [Confluence 安装和配置](markdown-file/Confluence-Install-And-Settings.md) - [Jenkins 安装和配置](markdown-file/Jenkins-Install-And-Settings.md) - [TeamCity 安装和配置](markdown-file/TeamCity-Install-And-Settings.md) - [Nginx 安装和配置](markdown-file/Nginx-Install-And-Settings.md) diff --git a/markdown-file/Confluence-Install-And-Settings.md b/markdown-file/Confluence-Install-And-Settings.md new file mode 100644 index 00000000..5a1a6fe1 --- /dev/null +++ b/markdown-file/Confluence-Install-And-Settings.md @@ -0,0 +1,134 @@ +# Confluence 安装和配置 + +## Confluence 6.15.4 + +- 最新 6.15.4 版本时间:2019-05 + +#### 数据库 + +``` +docker run \ + --name mysql-confluence \ + --restart always \ + -p 3316:3306 \ + -e MYSQL_ROOT_PASSWORD=adg123456 \ + -e MYSQL_DATABASE=confluence_db \ + -e MYSQL_USER=confluence_user \ + -e MYSQL_PASSWORD=confluence_123456 \ + -d \ + mysql:5.7 +``` + +- 连上容器:`docker exec -it mysql-confluence /bin/bash` + - 连上 MySQL:`mysql -u root -p` +- 设置编码: + - **必须做这一步,不然配置过程会报错,confluence 的 DB 要求是 utf8,还不能是 utf8mb4** + - **并且排序规则还必须是:utf8_bin** + - **数据库必须使用'READ-COMMITTED'作为默认隔离级别** + +``` +SET NAMES 'utf8'; +alter database confluence_db character set utf8 collate utf8_bin; +SET GLOBAL tx_isolation='READ-COMMITTED'; +``` + +#### 安装 + +- 下载: + - 选择:linux64 类型下载 +- 授权:`chmod +x atlassian-confluence-6.15.4-x64.bin` + + +``` +./atlassian-confluence-6.15.4-x64.bin + +开始提示: + +Unpacking JRE ... +Starting Installer ... + +This will install Confluence 6.9.0 on your computer. +OK [o, Enter], Cancel [c] + +>> 输入o或直接回车 + +Click Next to continue, or Cancel to exit Setup. + +Choose the appropriate installation or upgrade option. +Please choose one of the following: +Express Install (uses default settings) [1], +Custom Install (recommended for advanced users) [2, Enter], +Upgrade an existing Confluence installation [3] +1 +>> 这里输入数字1 + +See where Confluence will be installed and the settings that will be used. +Installation Directory: /opt/atlassian/confluence +Home Directory: /var/atlassian/application-data/confluence +HTTP Port: 8090 +RMI Port: 8000 +Install as service: Yes +Install [i, Enter], Exit [e] +i + +>> 输入i或者直接回车 + +Extracting files ... + +Please wait a few moments while we configure Confluence. + +Installation of Confluence 6.9.0 is complete +Start Confluence now? +Yes [y, Enter], No [n] + +>> 输入y或者直接回车 + +Please wait a few moments while Confluence starts up. +Launching Confluence ... + +Installation of Confluence 6.9.0 is complete +Your installation of Confluence 6.9.0 is now ready and can be accessed via +your browser. +Confluence 6.9.0 can be accessed at http://localhost:8090 +Finishing installation ... + +# 安装完成,访问本机的8090端口进行web端安装 +# 开放防火墙端口 +firewall-cmd --add-port=8090/tcp --permanent +firewall-cmd --add-port=8000/tcp --permanent +firewall-cmd --reload +``` + +- 默认是安装在 /opt 目录下:`/opt/atlassian/confluence/confluence/WEB-INF/lib` +- 启动:`sh /opt/atlassian/confluence/bin/start-confluence.sh` +- 停止:`sh /opt/atlassian/confluence/bin/stop-confluence.sh` +- 查看 log:`tail -300f /opt/atlassian/confluence/logs/catalina.out` +- 卸载:`sh /opt/atlassian/confluence/uninstall` +- 设置 MySQL 连接驱动,把 mysql-connector-java-5.1.47.jar 放在目录 `/opt/atlassian/confluence/confluence/WEB-INF/lib` + +#### 首次配置 + +- 访问: +- 参考文章: +- 参考文章: +- 因为步骤一样,所以我就不再截图了。 + +#### License 过程 + +- 参考自己的为知笔记 + + +## 反向代理的配置可以参考 + +- + + +## 使用 markdown + +- 点击右上角小齿轮 > 管理应用 > 搜索市场应用 > 输入 markdown > 安装 + + +## 其他资料 + +- +- From 8d6879d3d9fdc30bb636160014836fff6115fb6a Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 30 May 2019 23:30:21 +0800 Subject: [PATCH 282/330] 2019-05-30 --- markdown-file/Elasticsearch-Base.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index 1b1cfa3d..05cc86de 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -2,6 +2,10 @@ ## Docker 单节点部署 +- 官网: + - 7.x:7.1.0 + - 6.x:6.8.0 + - 5.x:5.6.8 - 注意:docker 版本下 client.transport.sniff = true 是无效的。 - `vim ~/elasticsearch-5.6.8-docker.yml` - 启动:`docker-compose -f ~/elasticsearch-5.6.8-docker.yml -p elasticsearch_5.6.8 up -d` From 4000f6646c77cffb57e37dc2787de950aaf19b85 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 31 May 2019 00:20:26 +0800 Subject: [PATCH 283/330] 2019-05-31 --- .../SkyWalking-Install-And-Settings.md | 192 ++++++++++++++++++ 1 file changed, 192 insertions(+) create mode 100644 markdown-file/SkyWalking-Install-And-Settings.md diff --git a/markdown-file/SkyWalking-Install-And-Settings.md b/markdown-file/SkyWalking-Install-And-Settings.md new file mode 100644 index 00000000..f47d66b9 --- /dev/null +++ b/markdown-file/SkyWalking-Install-And-Settings.md @@ -0,0 +1,192 @@ +# SkyWalking 安装和配置 + + +## OpenAPM 相关 + +- 目前市场工具一览: +- 目前最活跃的标准:[OpenTracing](https://opentracing.io/) +- 现在比较活跃的应该是: + - [Jaeger](https://www.jaegertracing.io/) + - [SkyWalking](https://skywalking.apache.org/) + + +## 官网资料 + +- 当前时间:2019-05,最新版本:6.1 +- 官网: +- 官网 Github: +- 官网文档: +- 官网下载: + - 该网页显示:官网目前推荐的是通过源码构建出包,docker 镜像推荐 + - 源码构建方法: +- 这里简单抽取下核心内容: +- 至少需要 jdk8 + maven3 +- 需要 Elasticsearch + - Elasticsearch 和 SkyWalking 的所在服务器的时间必须一致 + - 看了下源码依赖的 Elasticsearch 依赖包,目前支持 5.x 和 6.x + +## 基于 IntelliJ IDEA 直接运行、Debug + +- 这里选择 IntelliJ IDEA 运行服务,方便我们 debug 了解 SkyWalking: + +``` +cd skywalking/ + +git submodule init + +git submodule update + +mvn clean package -DskipTests + +因为需要设置 gRPC 的自动生成的代码目录,为源码目录,所以: +手工将下面提到的目录下的 grpc-java 和 java 目录设置为 IntelliJ IDEA 的源码目录(Sources Root) +/skywalking/apm-protocol/apm-network/target/generated-sources/protobuf +/skywalking/oap-server/server-core/target/generated-sources/protobuf +/skywalking/oap-server/server-receiver-plugin/receiver-proto/target/generated-sources/protobuf +/skywalking/oap-server/exporter/target/generated-sources/protobuf + + +手工将下面提到的目录下的 antlr4 目录设置为 IntelliJ IDEA 的源码目录(Sources Root) +/skywalking/oap-server/generate-tool-grammar/target/generated-sources + +手工将下面提到的目录下的 oal 目录设置为 IntelliJ IDEA 的源码目录(Sources Root) +/skywalking/oap-server/generated-analysis/target/generated-sources + +``` + +#### 启动 Server 项目 + +- 现在可以通过 IntelliJ IDEA 启动服务: +- 编辑 server 配置:`/skywalking/oap-server/server-starter/src/main/resources/application.yml` + - 里面有关 Elasticsearch 连接信息的配置,你可以根据自己的情况进行配置。 +- 启动类:`/skywalking/oap-server/server-starter/src/main/java/org/apache/skywalking/oap/server/starter/OAPServerStartUp.java` + - 第一次启动会创建 540 个左右的 Elasticsearch 索引库,会花点时间。 + + +#### 启动 UI 项目 + + +- 现在启动 UI 项目,找到:`/skywalking/apm-webapp/src/main/java/org/apache/skywalking/apm/webapp/ApplicationStartUp.java` +- 访问 UI 地址: + - 用户名:admin + - 密码:admin + + +## Java Agent(探针) + + +#### IntelliJ IDEA 项目调试 + +- 前面构建服务的时候记得构建出 jar 包出来,这里要用到 +- 自己的 Spring Boot 项目 +- 引包: + +``` + + + + org.apache.skywalking + apm-toolkit-trace + 6.1.0 + + +``` + +- 常用注解: + + +``` +@Trace +@ApiOperation(tags = {"用户系统管理->用户管理->用户列表"}, value = "查询所有用户列表", notes = "查询所有用户列表") +@RequestMapping(value = "/list", method = RequestMethod.GET) +@ResponseBody +public List list() { + List sysUserList = sysUserService.findAll(); + ActiveSpan.tag("一共有数据:", sysUserList.size() + "条"); + log.info("当前 traceId={}", TraceContext.traceId()); + return sysUserList; +} + +``` + +- 更多注解的使用: + +- 你的 demo 项目在 IntelliJ IDEA 启动的时候加上 VM 参数上设置: + +``` +-javaagent:/你自己的路径/skywalking-agent.jar -Dskywalking.agent.application_code=my_app_001 -Dskywalking.collector.backend_service=localhost:11800 +``` + +- 默认 11800 是 gRPC 的接收接口 +- 你自己构建出来的 jar 路径一般是:`/skywalking/apm-sniffer/apm-agent/target/skywalking-agent.jar` +- 然后请求你带有 Trace 的 Controller,然后去 UI 界面看统计情况 + +#### jar 包方式 + +- 你的 Spring Boot jar 包 run 之前加上 VM 参数: + +``` +java -javaagent:/你自己的路径/skywalking-agent.jar -Dskywalking.collector.backend_service=localhost:11800 -Dskywalking.agent.application_code=my_app_002 -jar my-project-1.0-SNAPSHOT.jar +``` + + +#### Docker 方式 + +- Dockerfile + +``` +FROM openjdk:8-jre-alpine + +LABEL maintainer="tanjian20150101@gmail.com" + +ENV SW_APPLICATION_CODE=java-agent-demo \ + SW_COLLECTOR_SERVERS=localhost:11800 + +COPY skywalking-agent /apache-skywalking-apm-incubating/agent + +COPY target/sky-demo-1.0-SNAPSHOT.jar /demo.jar + +ENTRYPOINT java -javaagent:/apache-skywalking-apm-incubating/agent/skywalking-agent.jar -Dskywalking.collector.backend_service=${SW_COLLECTOR_SERVERS} \ +-Dskywalking.agent.application_code=${SW_APPLICATION_CODE} -jar /demo.jar +``` + +- 构建镜像: + +``` +docker build -t hello-demo . +docker run -p 10101:10101 -e SW_APPLICATION_CODE=hello-world-demo-005 -e SW_COLLECTOR_SERVERS=127.10.0.2:11800 hello-demo +``` + + + +## 构建 jar 部署在服务器 + +- 如果想直接打包出 jar 部署与服务器,只需要这样: + +``` +cd skywalking/ + +git submodule init + +git submodule update + +mvn clean package -DskipTests +``` + + +## 资料 + +- +- +- +- <> +- <> +- <> +- <> +- <> + + + + + + From 6447cef047197c473c3c49a575800c933658ad87 Mon Sep 17 00:00:00 2001 From: zhang Date: Fri, 31 May 2019 10:32:25 +0800 Subject: [PATCH 284/330] 2019-05-31 --- README.md | 3 ++- SUMMARY.md | 4 +++- TOC.md | 4 +++- markdown-file/SkyWalking-Install-And-Settings.md | 13 ++++++++++++- 4 files changed, 20 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index b79e908f..e646a463 100644 --- a/README.md +++ b/README.md @@ -108,7 +108,8 @@ - [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) - [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) - [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) -- [Wormhole + Flink 最佳实践](markdown-file/Wormhole-Install-And-Settings.md) +- [Wormhole + Flink 安装和配置](markdown-file/Wormhole-Install-And-Settings.md) +- [SkyWalking 安装和配置](markdown-file/SkyWalking-Install-And-Settings.md) ## 联系(Contact) diff --git a/SUMMARY.md b/SUMMARY.md index 91ff9f92..c62a5b09 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -87,4 +87,6 @@ * [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) * [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) * [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) -* [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) \ No newline at end of file +* [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) +* [Wormhole + Flink 安装和配置](markdown-file/Wormhole-Install-And-Settings.md) +* [SkyWalking 安装和配置](markdown-file/SkyWalking-Install-And-Settings.md) \ No newline at end of file diff --git a/TOC.md b/TOC.md index 44bf8f76..96ab0f35 100644 --- a/TOC.md +++ b/TOC.md @@ -84,4 +84,6 @@ - [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) - [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) - [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) -- [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) \ No newline at end of file +- [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) +- [Wormhole + Flink 安装和配置](markdown-file/Wormhole-Install-And-Settings.md) +- [SkyWalking 安装和配置](markdown-file/SkyWalking-Install-And-Settings.md) \ No newline at end of file diff --git a/markdown-file/SkyWalking-Install-And-Settings.md b/markdown-file/SkyWalking-Install-And-Settings.md index f47d66b9..ad6cd074 100644 --- a/markdown-file/SkyWalking-Install-And-Settings.md +++ b/markdown-file/SkyWalking-Install-And-Settings.md @@ -25,6 +25,13 @@ - Elasticsearch 和 SkyWalking 的所在服务器的时间必须一致 - 看了下源码依赖的 Elasticsearch 依赖包,目前支持 5.x 和 6.x + +## 支持收集的组件列表 + +- 国内常用的组件目前看来都支持了 +- + + ## 基于 IntelliJ IDEA 直接运行、Debug - 这里选择 IntelliJ IDEA 运行服务,方便我们 debug 了解 SkyWalking: @@ -173,13 +180,17 @@ git submodule update mvn clean package -DskipTests ``` +## 告警配置 + +- + ## 资料 - - - -- <> +- - <> - <> - <> From 5cb4b2a51df6d0ab050fdca1f647215029d49d26 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 31 May 2019 22:36:35 +0800 Subject: [PATCH 285/330] 2019-05-31 --- markdown-file/Zsh.md | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/markdown-file/Zsh.md b/markdown-file/Zsh.md index 2186bc74..e0e294a3 100644 --- a/markdown-file/Zsh.md +++ b/markdown-file/Zsh.md @@ -64,6 +64,18 @@ - 编辑配置文件:`vim /root/.zshrc`,找到下图的地方,怎么安装,原作者注释写得很清楚了,别装太多了,默认 git 是安装的。 - ![oh-my-zsh 安装](../images/Zsh-c-1.jpg) - 插件推荐: + - `zsh-autosuggestions` + - 这个插件会对历史命令一些补全,类似 fish 终端 + - 插件官网: + - 安装,复制该命令:`git clone https://github.com/zsh-users/zsh-autosuggestions ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-autosuggestions` + - 编辑:`vim ~/.zshrc`,找到这一行,后括号里面的后面添加:`plugins=( 前面的一些插件名称,换行,加上:zsh-autosuggestions)` + - 刷新下配置:`source ~/.zshrc` + - `zsh-syntax-highlighting` + - 这个插件会对终端命令高亮显示,比如正确的拼写会是绿色标识,否则是红色,另外对于一些shell输出语句也会有高亮显示,算是不错的辅助插件 + - 插件官网: + - 安装,复制该命令:`git clone https://github.com/zsh-users/zsh-syntax-highlighting.git ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-syntax-highlighting` + - 编辑:`vim ~/.zshrc`,找到这一行,后括号里面的后面添加:`plugins=( 前面的一些插件名称,换行,加上:zsh-syntax-highlighting)` + - 刷新下配置:`source ~/.zshrc` - `wd` - 简单地讲就是给指定目录映射一个全局的名字,以后方便直接跳转到这个目录,比如: - 编辑配置文件,添加上 wd 的名字:`vim /root/.zshrc` @@ -80,12 +92,6 @@ - 进入解压后目录并安装:`cd autojump_v21.1.2/ ; ./install.sh` - 再执行下这个:`source /etc/profile.d/autojump.sh` - 编辑配置文件,添加上 autojump 的名字:`vim /root/.zshrc` - - `zsh-syntax-highlighting` - - 这个插件会对终端命令高亮显示,比如正确的拼写会是绿色标识,否则是红色,另外对于一些shell输出语句也会有高亮显示,算是不错的辅助插件 - - 插件官网: - - 安装,复制该命令:'git clone https://github.com/zsh-users/zsh-syntax-highlighting.git ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-syntax-highlighting' - - 编辑:`vim ~/.zshrc`,找到这一行,后括号里面的后面添加:`plugins=( 前面的一些插件名称 zsh-syntax-highlighting)` - - 刷新下配置:`source ~/.zshrc` ### 主题 From 5f7743f651160d2f0c41e0ca5824ba9ae04a6180 Mon Sep 17 00:00:00 2001 From: zhang Date: Wed, 12 Jun 2019 16:09:30 +0800 Subject: [PATCH 286/330] 2019-06-12 --- README.md | 1 + SUMMARY.md | 1 + TOC.md | 1 + .../PostgreSQL-Install-And-Settings.md | 32 +++++++++++++++++++ 4 files changed, 35 insertions(+) create mode 100644 markdown-file/PostgreSQL-Install-And-Settings.md diff --git a/README.md b/README.md index e646a463..9301dbb6 100644 --- a/README.md +++ b/README.md @@ -63,6 +63,7 @@ - [Jenkins 安装和配置](markdown-file/Jenkins-Install-And-Settings.md) - [Maven 安装和配置](markdown-file/Maven-Install-And-Settings.md) - [Nexus 安装和配置](markdown-file/Nexus-Install-And-Settings.md) +- [PostgreSQL 安装和配置](markdown-file/PostgreSQL-Install-And-Settings.md) - [MySQL 安装和配置](markdown-file/Mysql-Install-And-Settings.md) - [MySQL 优化](markdown-file/Mysql-Optimize.md) - [MySQL 测试](markdown-file/Mysql-Test.md) diff --git a/SUMMARY.md b/SUMMARY.md index c62a5b09..ef4a9f48 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -42,6 +42,7 @@ * [Jenkins 安装和配置](markdown-file/Jenkins-Install-And-Settings.md) * [Maven 安装和配置](markdown-file/Maven-Install-And-Settings.md) * [Nexus 安装和配置](markdown-file/Nexus-Install-And-Settings.md) +* [PostgreSQL 安装和配置](markdown-file/PostgreSQL-Install-And-Settings.md) * [MySQL 安装和配置](markdown-file/Mysql-Install-And-Settings.md) * [MySQL 优化](markdown-file/Mysql-Optimize.md) * [MySQL 测试](markdown-file/Mysql-Test.md) diff --git a/TOC.md b/TOC.md index 96ab0f35..0b6b3709 100644 --- a/TOC.md +++ b/TOC.md @@ -40,6 +40,7 @@ - [Jenkins 安装和配置](markdown-file/Jenkins-Install-And-Settings.md) - [Maven 安装和配置](markdown-file/Maven-Install-And-Settings.md) - [Nexus 安装和配置](markdown-file/Nexus-Install-And-Settings.md) +- [PostgreSQL 安装和配置](markdown-file/PostgreSQL-Install-And-Settings.md) - [MySQL 安装和配置](markdown-file/Mysql-Install-And-Settings.md) - [MySQL 优化](markdown-file/Mysql-Optimize.md) - [MySQL 测试](markdown-file/Mysql-Test.md) diff --git a/markdown-file/PostgreSQL-Install-And-Settings.md b/markdown-file/PostgreSQL-Install-And-Settings.md new file mode 100644 index 00000000..b2f3182e --- /dev/null +++ b/markdown-file/PostgreSQL-Install-And-Settings.md @@ -0,0 +1,32 @@ +# PostgreSQL 安装和配置 + + +## 官网 + +- 官网: + - 201906 最新版本 + - 12 beat + - 11 release +- 官网 Docker hub: + + +## Docker 安装 PostgreSQL(带挂载) + +``` +docker run \ + -d \ + --name pgsql \ + -p 5432:5432 \ + -e POSTGRES_USER=adg_user \ + -e POSTGRES_PASSWORD=adg123456 \ + -v ~/docker_data/pgsql/data:/var/lib/postgresql/data \ + postgres:11 +``` + +- 连上容器:`docker exec -it pgsql /bin/bash` + - 连上 PostgreSQL:`psql -h 127.0.0.1 -p 5432 -U adg_user` + + +## 资料 + +- From cc754b7d8f1ba1fdeb3487d42d545a623528e063 Mon Sep 17 00:00:00 2001 From: zhang Date: Thu, 13 Jun 2019 17:33:36 +0800 Subject: [PATCH 287/330] 2019-06-13 --- markdown-file/Docker-Install-And-Usage.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/markdown-file/Docker-Install-And-Usage.md b/markdown-file/Docker-Install-And-Usage.md index 3fe4361b..734c11de 100644 --- a/markdown-file/Docker-Install-And-Usage.md +++ b/markdown-file/Docker-Install-And-Usage.md @@ -309,6 +309,11 @@ CONTAINER ID NAME CPU % MEM USAGE / LI - `docker run -it 镜像ID --link redis-name:myredis /bin/bash` - `redis-name` 是容器名称 - `myredis` 是容器别名,其他容器连接它可以用这个别名来写入到自己的配置文件中 +- 容器与宿主机之间文件的拷贝 + - `docker cp /www/runoob 96f7f14e99ab:/www/` 将主机 /www/runoob 目录拷贝到容器 96f7f14e99ab 的 /www 目录下 + - `docker cp /www/runoob 96f7f14e99ab:/www` 将主机 /www/runoob 目录拷贝到容器 96f7f14e99ab 中,目录重命名为 www。 + - `docker cp 96f7f14e99ab:/www /tmp/` 将容器96f7f14e99ab的/www目录拷贝到主机的/tmp目录中。 + #### docker 网络模式 From cd315c62729c3859b7cea78b07962dd12758ccf8 Mon Sep 17 00:00:00 2001 From: judasn Date: Sun, 23 Jun 2019 10:45:48 +0800 Subject: [PATCH 288/330] 2019-06-23 --- markdown-file/Jenkins-Install-And-Settings.md | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/markdown-file/Jenkins-Install-And-Settings.md b/markdown-file/Jenkins-Install-And-Settings.md index 82cc053c..a4101dea 100644 --- a/markdown-file/Jenkins-Install-And-Settings.md +++ b/markdown-file/Jenkins-Install-And-Settings.md @@ -158,6 +158,37 @@ This may also be found at: /root/.jenkins/secrets/initialAdminPassword - 如果配置插件过程遇到这个错误:`No valid crumb was included in the request`,则多重试几次。 - 登录后把: 下面的 `防止跨站点请求伪造` 勾选去掉。遇到问题多试几次。 + +## 忘记 admin 密码进行重置 + +- 备份配置文件:`cp /root/.jenkins/config.xml /root/.jenkins/config.xml.back` +- 编辑:`vim /root/.jenkins/config.xml`,删除 config.xml 文件中的这部分内容,在 10 行左右位置 + +``` +true + + true + + + true + false + +``` + +- 重启服务,进入首页此时系统是免密状态 +- 选择左侧的 `系统管理`,系统会提示你需要配置安全设置:`全局安全配置` + - 勾选 `启用安全` + - 安全域 > 勾选 `Jenkins专有用户数据库` + - 点击保存 +- 重新点击首页:`系统管理` + - 点击 `管理用户` + - 在用户列表中点击 admin 右侧齿轮 + - 修改密码,修改后即可重新登录。 +- 选择左侧的 `系统管理`,系统会提示你需要配置安全设置:`全局安全配置` + - 勾选 `启用安全` + - 授权策略 > 勾选 `登录用户可以做任何事` 或 `安全矩阵` + - 点击保存 + ------------------------------------------------------------------- ## pipeline 语法 @@ -927,3 +958,4 @@ pipeline { - - - +- \ No newline at end of file From b6973a021761adeef7baa287e295073901dcc639 Mon Sep 17 00:00:00 2001 From: judasn Date: Sun, 23 Jun 2019 14:43:43 +0800 Subject: [PATCH 289/330] 2019-06-23 --- markdown-file/Gitlab-Install-And-Settings.md | 107 ++++++++++++++----- 1 file changed, 80 insertions(+), 27 deletions(-) diff --git a/markdown-file/Gitlab-Install-And-Settings.md b/markdown-file/Gitlab-Install-And-Settings.md index f8f3eaab..c9a4c1e3 100644 --- a/markdown-file/Gitlab-Install-And-Settings.md +++ b/markdown-file/Gitlab-Install-And-Settings.md @@ -51,40 +51,27 @@ gitlab-postgresql: - 本质就是把文件、缓存、数据库抽离出来,然后部署多个 Gitlab 用 nginx 前面做负载。 -## 原始安装方式 +## 原始安装方式(推荐) -- 环境: - - CPU:1 core - - 内存:2G -- 我习惯使用 root 用户 +- 推荐至少内存 4G,它有大量组件 - 有开源版本和收费版本,各版本比较: - 官网: - 中文网: - 官网下载: -- 安装的系统环境要求: - - 从文章看目前要求 ruby 2.3,用 yum 版本过低,那就源码安装 ruby 吧,官网当前最新是:2.4.1(大小:14M) - 官网安装说明: -- 安装 ruby - - 下载: - - 解压:`tar zxvf ruby-2.4.1.tar.gz` - - 编译安装: - - `cd ruby-2.4.1` - - `./configure` - - `make`,过程有点慢 - - `make install` - - 默认安装到这个目录:`/usr/local` - - 查看当前版本号:`ruby -v` -- CentOS 6 安装流程: - - 当前(201703)的版本是:`GitLab Community Edition 9.0.0` - - `sudo yum install -y curl openssh-server openssh-clients postfix cronie` - - `sudo service postfix start` - - `sudo chkconfig postfix on` - - `sudo lokkit -s http -s ssh` - - `curl -sS https://packages.gitlab.com/install/repositories/gitlab/gitlab-ce/script.rpm.sh | sudo bash` - - `sudo yum install gitlab-ce`,软件大小:272M,下载速度不稳定 - - `sudo gitlab-ctl reconfigure`,这个过程比较慢 - 如果上面的下载比较慢,也有国内的镜像: - 清华: +- 参考: + +``` +sudo yum install -y curl policycoreutils-python openssh-server + +sudo systemctl enable sshd +sudo systemctl start sshd + +curl https://packages.gitlab.com/install/repositories/gitlab/gitlab-ce/script.rpm.sh | sudo bash +sudo EXTERNAL_URL="http://192.168.1.123:8181" yum install -y gitlab-ce +``` ## 配置 @@ -92,7 +79,10 @@ gitlab-postgresql: - 配置域名 / IP - 编辑配置文件:`sudo vim /etc/gitlab/gitlab.rb` - 找到 13 行左右:`external_url 'http://gitlab.example.com'`,改为你的域名 / IP - - 重启服务:`sudo gitlab-ctl reconfigure` + - 刷新配置:`sudo gitlab-ctl reconfigure`,第一次这个时间会比较久,我花了好几分钟 + - 启动服务:`sudo gitlab-ctl start` + - 停止服务:`sudo gitlab-ctl stop` + - 重启服务:`sudo gitlab-ctl restart` - 前面的初始化配置完成之后,访问当前机子 IP:`http://192.168.1.111:80` - 默认用户是 `root`,并且没有密码,所以第一次访问是让你设置你的 root 密码,我设置为:gitlab123456(至少 8 位数) - 设置会初始化密码之后,你就需要登录了。输入设置的密码。 @@ -234,6 +224,69 @@ gitlab-postgresql: - +## 接入第三方登录 + +- 官网文档: + - + - + - + +- gitlab 自己本身维护一套用户系统,第三方认证服务一套用户系统,gitlab 可以将两者关联起来,然后用户可以选择其中一种方式进行登录而已。 +- 所以,gitlab 第三方认证只能用于网页登录,clone 时仍然使用用户在 gitlab 的账户密码,推荐使用 ssh-key 来操作仓库,不再使用账户密码。 +- 重要参数:block_auto_created_users=true 的时候则自动注册的账户是被锁定的,需要管理员账户手动的为这些账户解锁,可以改为 false +- 编辑配置文件引入第三方:`sudo vim /etc/gitlab/gitlab.rb`,在 309 行有默认的一些注释配置 + - 其中 oauth2_generic 模块默认是没有,需要自己 gem,其他主流的那些都自带,配置即可使用。 + +``` +gitlab_rails['omniauth_enabled'] = true +gitlab_rails['omniauth_allow_single_sign_on'] = ['google_oauth2', 'facebook', 'twitter', 'oauth2_generic'] +gitlab_rails['omniauth_block_auto_created_users'] = false +gitlab_rails['omniauth_sync_profile_attributes'] = ['email','username'] +gitlab_rails['omniauth_external_providers'] = ['google_oauth2', 'facebook', 'twitter', 'oauth2_generic'] +gitlab_rails['omniauth_providers'] = [ + { + "name"=> "google_oauth2", + "label"=> "Google", + "app_id"=> "123456", + "app_secret"=> "123456", + "args"=> { + "access_type"=> 'offline', + "approval_prompt"=> '123456' + } + }, + { + "name"=> "facebook", + "label"=> "facebook", + "app_id"=> "123456", + "app_secret"=> "123456" + }, + { + "name"=> "twitter", + "label"=> "twitter", + "app_id"=> "123456", + "app_secret"=> "123456" + }, + { + "name" => "oauth2_generic", + "app_id" => "123456", + "app_secret" => "123456", + "args" => { + client_options: { + "site" => "http://sso.cdk8s.com:9090/sso", + "user_info_url" => "/oauth/userinfo" + }, + user_response_structure: { + root_path: ["user_attribute"], + attributes: { + "nickname": "username" + } + } + } + } +] + +``` + ## 资料 From aa7bcfaaad03ef73092e564712d6e01bd3d163e1 Mon Sep 17 00:00:00 2001 From: Jared Tan Date: Thu, 27 Jun 2019 22:57:04 +0800 Subject: [PATCH 290/330] update demo dockerfile. according https://github.com/apache/skywalking/blob/master/apm-sniffer/config/agent.config#L18. Env vars has changed. --- markdown-file/SkyWalking-Install-And-Settings.md | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/markdown-file/SkyWalking-Install-And-Settings.md b/markdown-file/SkyWalking-Install-And-Settings.md index ad6cd074..db9cf77c 100644 --- a/markdown-file/SkyWalking-Install-And-Settings.md +++ b/markdown-file/SkyWalking-Install-And-Settings.md @@ -146,22 +146,21 @@ FROM openjdk:8-jre-alpine LABEL maintainer="tanjian20150101@gmail.com" -ENV SW_APPLICATION_CODE=java-agent-demo \ - SW_COLLECTOR_SERVERS=localhost:11800 +ENV SW_AGENT_NAMESPACE=java-agent-demo \ + SW_AGENT_COLLECTOR_BACKEND_SERVICES=localhost:11800 -COPY skywalking-agent /apache-skywalking-apm-incubating/agent +COPY skywalking-agent /apache-skywalking-apm-bin/agent COPY target/sky-demo-1.0-SNAPSHOT.jar /demo.jar -ENTRYPOINT java -javaagent:/apache-skywalking-apm-incubating/agent/skywalking-agent.jar -Dskywalking.collector.backend_service=${SW_COLLECTOR_SERVERS} \ --Dskywalking.agent.application_code=${SW_APPLICATION_CODE} -jar /demo.jar +ENTRYPOINT java -javaagent:/apache-skywalking-apm-bin/agent/skywalking-agent.jar -jar /demo.jar ``` - 构建镜像: ``` docker build -t hello-demo . -docker run -p 10101:10101 -e SW_APPLICATION_CODE=hello-world-demo-005 -e SW_COLLECTOR_SERVERS=127.10.0.2:11800 hello-demo +docker run -p 10101:10101 -e SW_AGENT_NAMESPACE=hello-world-demo-005 -e SW_AGENT_COLLECTOR_BACKEND_SERVICES=127.10.0.2:11800 hello-demo ``` From 70a343c10eac6186d5247e59e70fdafaf32fc669 Mon Sep 17 00:00:00 2001 From: zhang Date: Wed, 3 Jul 2019 11:45:13 +0800 Subject: [PATCH 291/330] 2019-06-13 --- markdown-file/Mysql-Test.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/markdown-file/Mysql-Test.md b/markdown-file/Mysql-Test.md index 2a5799e0..f18d3f0f 100644 --- a/markdown-file/Mysql-Test.md +++ b/markdown-file/Mysql-Test.md @@ -199,6 +199,8 @@ cd tpcc-mysql/src make 如果make没报错,就会在tpcc-mysql 根目录文件夹下生成tpcc二进制命令行工具tpcc_load、tpcc_start + +如果要同时支持 PgSQL 可以考虑:https://github.com/Percona-Lab/sysbench-tpcc ``` ### 测试的几个表介绍 From 3cb44898266debef177522ff5799e9cdec5355a7 Mon Sep 17 00:00:00 2001 From: zhang Date: Fri, 5 Jul 2019 10:58:31 +0800 Subject: [PATCH 292/330] Influxdb --- README.md | 1 + SUMMARY.md | 1 + TOC.md | 1 + .../Influxdb-Install-And-Settings.md | 62 +++++++++++++++++++ 4 files changed, 65 insertions(+) create mode 100644 markdown-file/Influxdb-Install-And-Settings.md diff --git a/README.md b/README.md index 9301dbb6..eaed7384 100644 --- a/README.md +++ b/README.md @@ -107,6 +107,7 @@ - [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) - [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) - [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) +- [Influxdb 安装和配置](markdown-file/Influxdb-Install-And-Settings.md) - [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) - [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) - [Wormhole + Flink 安装和配置](markdown-file/Wormhole-Install-And-Settings.md) diff --git a/SUMMARY.md b/SUMMARY.md index ef4a9f48..c72d20ab 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -87,6 +87,7 @@ * [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) * [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) * [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) +* [Influxdb 安装和配置](markdown-file/Influxdb-Install-And-Settings.md) * [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) * [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) * [Wormhole + Flink 安装和配置](markdown-file/Wormhole-Install-And-Settings.md) diff --git a/TOC.md b/TOC.md index 0b6b3709..078416b0 100644 --- a/TOC.md +++ b/TOC.md @@ -84,6 +84,7 @@ - [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) - [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) - [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) +- [Influxdb 安装和配置](markdown-file/Influxdb-Install-And-Settings.md) - [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) - [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) - [Wormhole + Flink 安装和配置](markdown-file/Wormhole-Install-And-Settings.md) diff --git a/markdown-file/Influxdb-Install-And-Settings.md b/markdown-file/Influxdb-Install-And-Settings.md new file mode 100644 index 00000000..ea39cc70 --- /dev/null +++ b/markdown-file/Influxdb-Install-And-Settings.md @@ -0,0 +1,62 @@ +# Influxdb 安装和配置 + + + +## Influxdb Docker 安装 + +- 官网库: + + +``` +docker run -d --name influxdb \ +-p 8086:8086 -p 8083:8083 \ +-e INFLUXDB_HTTP_AUTH_ENABLED=true \ +-e INFLUXDB_ADMIN_ENABLED=true -e INFLUXDB_ADMIN_USER=admin -e INFLUXDB_ADMIN_PASSWORD=123456 \ +-e INFLUXDB_DB=mydb1 \ +-v /Users/gitnavi/docker_data/influxdb/data:/var/lib/influxdb influxdb +``` + + +- 进入终端交互: + +``` +docker exec -it influxdb /bin/bash + +输入:influx,开始终端交互 + +auth admin 123456 +show databases; + +如果你要再额外创建数据库: +create database demo + +如果你要再创建用户: +create user "myuser" with password '123456' with all privileges +``` + + +---------------------------------------------------------------------------------------------- + +## 配置 + + + +---------------------------------------------------------------------------------------------- + + + +---------------------------------------------------------------------------------------------- + + +## 其他资料 + +- +- <> +- <> +- <> +- <> +- <> +- <> +- <> +- <> + From 9179150a6522b71aa3ae42f781e0d60eed06dc09 Mon Sep 17 00:00:00 2001 From: zhang Date: Fri, 5 Jul 2019 11:16:30 +0800 Subject: [PATCH 293/330] Influxdb --- markdown-file/Grafana-Install-And-Settings.md | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/markdown-file/Grafana-Install-And-Settings.md b/markdown-file/Grafana-Install-And-Settings.md index 095c2b27..ad5a4416 100644 --- a/markdown-file/Grafana-Install-And-Settings.md +++ b/markdown-file/Grafana-Install-And-Settings.md @@ -6,6 +6,28 @@ - [支持的 Elasticsearch 版本](http://docs.grafana.org/features/datasources/elasticsearch/#elasticsearch-version) +## Grafana Docker 安装 + +- 官网: + +``` +docker run -d --name grafana -p 3000:3000 -v /Users/gitnavi/docker_data/grafana/data grafana/grafana + +docker exec -it grafana /bin/bash + +容器中默认的配置文件位置:/etc/grafana/grafana.ini + +复制出配置文件到宿主机:docker cp grafana:/etc/grafana/grafana.ini /Users/gitnavi/ +``` + +- +- 默认管理账号;admin,密码:admin,第一次登录后需要修改密码,也可以通过配置文件修改 + +``` +[security] +admin_user = admin +admin_password = admin +``` ---------------------------------------------------------------------------------------------- ## Grafana 安装 From 7d28eea60e1d6418178e50d794d29b30c39a87fd Mon Sep 17 00:00:00 2001 From: zhang Date: Fri, 5 Jul 2019 14:15:09 +0800 Subject: [PATCH 294/330] Influxdb --- markdown-file/Influxdb-Install-And-Settings.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/markdown-file/Influxdb-Install-And-Settings.md b/markdown-file/Influxdb-Install-And-Settings.md index ea39cc70..82fe262d 100644 --- a/markdown-file/Influxdb-Install-And-Settings.md +++ b/markdown-file/Influxdb-Install-And-Settings.md @@ -27,6 +27,14 @@ docker exec -it influxdb /bin/bash auth admin 123456 show databases; +use springboot +show measurements + +show series from "jvm_buffer_total_capacity" + +select * from "jvm_buffer_total_capacity" + + 如果你要再额外创建数据库: create database demo From 9e9de520bc8529c6d29f9a81ac84677b4aee703c Mon Sep 17 00:00:00 2001 From: zhang Date: Fri, 5 Jul 2019 14:21:31 +0800 Subject: [PATCH 295/330] Influxdb --- markdown-file/Grafana-Install-And-Settings.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/markdown-file/Grafana-Install-And-Settings.md b/markdown-file/Grafana-Install-And-Settings.md index ad5a4416..ed963acd 100644 --- a/markdown-file/Grafana-Install-And-Settings.md +++ b/markdown-file/Grafana-Install-And-Settings.md @@ -84,6 +84,11 @@ sudo systemctl status grafana-server - 个性化设置: - 软件变量: +## 官网 dashboard + +- dashboar仓库地址: +- 本地可以通过输入 dashboard id 导入别人模板 + ---------------------------------------------------------------------------------------------- From a1dac80519573e12a364013d0a60bbd06c1325ec Mon Sep 17 00:00:00 2001 From: zhang Date: Fri, 5 Jul 2019 15:34:17 +0800 Subject: [PATCH 296/330] Prometheus --- README.md | 1 + SUMMARY.md | 1 + TOC.md | 1 + markdown-file/Grafana-Install-And-Settings.md | 3 +- .../Prometheus-Install-And-Settings.md | 81 +++++++++++++++++++ 5 files changed, 86 insertions(+), 1 deletion(-) create mode 100644 markdown-file/Prometheus-Install-And-Settings.md diff --git a/README.md b/README.md index eaed7384..e99786b4 100644 --- a/README.md +++ b/README.md @@ -108,6 +108,7 @@ - [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) - [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) - [Influxdb 安装和配置](markdown-file/Influxdb-Install-And-Settings.md) +- [Prometheus 安装和配置](markdown-file/Prometheus-Install-And-Settings.md) - [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) - [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) - [Wormhole + Flink 安装和配置](markdown-file/Wormhole-Install-And-Settings.md) diff --git a/SUMMARY.md b/SUMMARY.md index c72d20ab..d034445b 100644 --- a/SUMMARY.md +++ b/SUMMARY.md @@ -88,6 +88,7 @@ * [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) * [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) * [Influxdb 安装和配置](markdown-file/Influxdb-Install-And-Settings.md) +* [Prometheus 安装和配置](markdown-file/Prometheus-Install-And-Settings.md) * [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) * [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) * [Wormhole + Flink 安装和配置](markdown-file/Wormhole-Install-And-Settings.md) diff --git a/TOC.md b/TOC.md index 078416b0..434c20f7 100644 --- a/TOC.md +++ b/TOC.md @@ -85,6 +85,7 @@ - [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) - [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) - [Influxdb 安装和配置](markdown-file/Influxdb-Install-And-Settings.md) +- [Prometheus 安装和配置](markdown-file/Prometheus-Install-And-Settings.md) - [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) - [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) - [Wormhole + Flink 安装和配置](markdown-file/Wormhole-Install-And-Settings.md) diff --git a/markdown-file/Grafana-Install-And-Settings.md b/markdown-file/Grafana-Install-And-Settings.md index ed963acd..3370630a 100644 --- a/markdown-file/Grafana-Install-And-Settings.md +++ b/markdown-file/Grafana-Install-And-Settings.md @@ -88,7 +88,8 @@ sudo systemctl status grafana-server - dashboar仓库地址: - 本地可以通过输入 dashboard id 导入别人模板 - +- 打开: + - 输入对应的 id,点击 Load 即可 ---------------------------------------------------------------------------------------------- diff --git a/markdown-file/Prometheus-Install-And-Settings.md b/markdown-file/Prometheus-Install-And-Settings.md new file mode 100644 index 00000000..a5d36f4c --- /dev/null +++ b/markdown-file/Prometheus-Install-And-Settings.md @@ -0,0 +1,81 @@ +# Prometheus 安装和配置 + +## Prometheus Docker 安装 + +- 官网: +- 这里以 Spring Boot Metrics 为收集信息 +- 创建配置文件:/Users/gitnavi/docker_data/prometheus/config/prometheus.yml +- 在 scrape_configs 位置下增加我们自己应用的路径信息 + +``` +# my global config +global: + scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + +# Alertmanager configuration +alerting: + alertmanagers: + - static_configs: + - targets: + # - alertmanager:9093 + +# Load rules once and periodically evaluate them according to the global 'evaluation_interval'. +rule_files: + # - "first_rules.yml" + # - "second_rules.yml" + +# A scrape configuration containing exactly one endpoint to scrape: +# Here it's Prometheus itself. +scrape_configs: + - job_name: 'springboot' + metrics_path: '/tkey-actuator/actuator/prometheus' + static_configs: + - targets: ['192.168.2.225:8811'] +``` + +- 启动 + +``` +docker run -d --name prometheus -p 9091:9090 \ +-v /Users/gitnavi/docker_data/prometheus/config/prometheus.yml:/etc/prometheus/prometheus.yml \ +prom/prometheus +``` + +- 然后配置 Grafana,使用这个 dashboard: + + +---------------------------------------------------------------------------------------------- + +## 配置 + + +### 微服务下的多服务收集 + +- + + +### 告警 + +- +- + +---------------------------------------------------------------------------------------------- + + + +---------------------------------------------------------------------------------------------- + + +## 其他资料 + +- <> +- <> +- <> +- <> +- <> +- <> +- <> +- <> + From b5c52a9ac736fe40f46e1d3145d8bb17b72aa830 Mon Sep 17 00:00:00 2001 From: judasn Date: Sat, 6 Jul 2019 00:26:08 +0800 Subject: [PATCH 297/330] 2019-07-06 --- markdown-file/Grafana-Install-And-Settings.md | 6 +- .../Prometheus-Install-And-Settings.md | 207 +++++++++++++++++- markdown-file/wrk-Install-And-Settings.md | 18 +- 3 files changed, 215 insertions(+), 16 deletions(-) diff --git a/markdown-file/Grafana-Install-And-Settings.md b/markdown-file/Grafana-Install-And-Settings.md index 3370630a..c0c12ae3 100644 --- a/markdown-file/Grafana-Install-And-Settings.md +++ b/markdown-file/Grafana-Install-And-Settings.md @@ -11,12 +11,14 @@ - 官网: ``` -docker run -d --name grafana -p 3000:3000 -v /Users/gitnavi/docker_data/grafana/data grafana/grafana +mkdir -p /data/docker/grafana/data +chmod 777 -R /data/docker/grafana/data + +docker run -d --name grafana -p 3000:3000 -v /data/docker/grafana/data:/var/lib/grafana grafana/grafana docker exec -it grafana /bin/bash 容器中默认的配置文件位置:/etc/grafana/grafana.ini - 复制出配置文件到宿主机:docker cp grafana:/etc/grafana/grafana.ini /Users/gitnavi/ ``` diff --git a/markdown-file/Prometheus-Install-And-Settings.md b/markdown-file/Prometheus-Install-And-Settings.md index a5d36f4c..ea5633f9 100644 --- a/markdown-file/Prometheus-Install-And-Settings.md +++ b/markdown-file/Prometheus-Install-And-Settings.md @@ -1,10 +1,12 @@ # Prometheus 安装和配置 +- 特别说明:一般这类环境要尽可能保证所有服务器时间一致 + ## Prometheus Docker 安装 - 官网: - 这里以 Spring Boot Metrics 为收集信息 -- 创建配置文件:/Users/gitnavi/docker_data/prometheus/config/prometheus.yml +- 创建配置文件:`vim /data/docker/prometheus/config/prometheus.yml` - 在 scrape_configs 位置下增加我们自己应用的路径信息 ``` @@ -39,7 +41,7 @@ scrape_configs: ``` docker run -d --name prometheus -p 9091:9090 \ --v /Users/gitnavi/docker_data/prometheus/config/prometheus.yml:/etc/prometheus/prometheus.yml \ +-v /data/docker/prometheus/config/prometheus.yml:/etc/prometheus/prometheus.yml \ prom/prometheus ``` @@ -50,18 +52,212 @@ prom/prometheus ## 配置 +- 官网 exporter 列表: +- 官网 exporter 暴露的端口列表: + + +### CentOS7 服务器 + +- 当前最新版本:node_exporter 0.18.1(201907) + +``` +mkdir -p /usr/local/prometheus/node_exporter + +cd /usr/local/prometheus/node_exporter + +wget https://github.com/prometheus/node_exporter/releases/download/v0.18.1/node_exporter-0.18.1.linux-amd64.tar.gz + +tar -zxvf node_exporter-0.18.1.linux-amd64.tar.gz + +``` + + +``` +创建Systemd服务 +vim /etc/systemd/system/node_exporter.service + + + +[Unit] +Description=node_exporter +After=network.target + +[Service] +Type=simple +User=root +ExecStart=/usr/local/prometheus/node_exporter/node_exporter-0.18.1.linux-amd64/node_exporter +Restart=on-failure + +[Install] +WantedBy=multi-user.target +``` + +- 关于 ExecStart 参数,可以再附带一些启动监控的参数,官网介绍: + - 格式:`ExecStart=/usr/local/prometheus/node_exporter/node_exporter-0.18.1.linux-amd64/node_exporter --collectors.enabled meminfo,hwmon,entropy` + + +``` +启动 Node exporter +systemctl start node_exporter + +systemctl daemon-reload + +systemctl status node_exporter + +``` + + +``` +修改prometheus.yml,加入下面的监控目标: + +vim /usr/local/prometheus/prometheus.yml + +scrape_configs: + - job_name: 'centos7' + static_configs: + - targets: ['127.0.0.1:9100'] + labels: + instance: centos7_node1 + +``` + +- 重启 prometheus:`docker restart prometheus` +- Grafana 有现成的 dashboard: + - + - + +---------------------------------------------------------------------------------------------- + + +### Nginx 指标 + +- 这里使用 Nginx VTS exporter: + +- 安装 nginx 模块: + +``` +git clone --depth=1 https://github.com/vozlt/nginx-module-vts.git + + +编译 nginx 的时候加上: +./configure --prefix=/usr/local/nginx --with-http_ssl_module --add-module=/opt/nginx-module-vts + +make(已经安装过了,就不要再 make install) + +``` + +``` +修改Nginx配置 + + +http { + vhost_traffic_status_zone; + vhost_traffic_status_filter_by_host on; + + ... + + server { + + ... + + location /status { + vhost_traffic_status_display; + vhost_traffic_status_display_format html; + } + } +} + + +验证nginx-module-vts模块:http://IP/status + +``` + +``` +如果不想统计流量的server,可以禁用vhost_traffic_status,配置示例: +server { + ... + vhost_traffic_status off; + ... +} +``` + + +- 安装 nginx-vts-exporter + +``` +wget -O nginx-vts-exporter-0.5.zip https://github.com/hnlq715/nginx-vts-exporter/archive/v0.5.zip +unzip nginx-vts-exporter-0.5.zip +mv nginx-vts-exporter-0.5 /usr/local/prometheus/nginx-vts-exporter +chmod +x /usr/local/prometheus/nginx-vts-exporter/bin/nginx-vts-exporter + +``` + +``` +创建Systemd服务 +vim /etc/systemd/system/nginx_vts_exporter.service + + +[Unit] +Description=nginx_exporter +After=network.target + +[Service] +Type=simple +User=root +ExecStart=/usr/local/prometheus/nginx-vts-exporter/bin/nginx-vts-exporter -nginx.scrape_uri=http://localhost/status/format/json +Restart=on-failure + +[Install] +WantedBy=multi-user.target +``` + + +``` +启动nginx-vts-exporter +systemctl start nginx_vts_exporter.service +systemctl status nginx_vts_exporter.service +``` + + +``` +修改prometheus.yml,加入下面的监控目标: + +- job_name: nginx + static_configs: + - targets: ['127.0.0.1:9913'] + labels: + instance: web1 + +``` + +- 重启 prometheus:`docker restart prometheus` +- Grafana 有现成的 dashboard: + - + - + +---------------------------------------------------------------------------------------------- + + ### 微服务下的多服务收集 - +---------------------------------------------------------------------------------------------- + ### 告警 - - ----------------------------------------------------------------------------------------------- +- 告警配置 + +- 告警检测 + +- [Grafana+Prometheus系统监控之邮件报警功能](https://blog.52itstyle.vip/archives/2014/) +- [Grafana+Prometheus系统监控之钉钉报警功能](https://blog.52itstyle.vip/archives/2029/) +- [Grafana+Prometheus系统监控之webhook](https://blog.52itstyle.vip/archives/2068/) @@ -70,8 +266,9 @@ prom/prometheus ## 其他资料 -- <> -- <> +- + - 写得非常非常非常好 +- - <> - <> - <> diff --git a/markdown-file/wrk-Install-And-Settings.md b/markdown-file/wrk-Install-And-Settings.md index 0b96c3c7..f73c6330 100644 --- a/markdown-file/wrk-Install-And-Settings.md +++ b/markdown-file/wrk-Install-And-Settings.md @@ -26,19 +26,19 @@ sudo cp wrk /usr/local/bin ## 使用 -- 启用 10 个线程,每个线程发起 100 个连接,持续 15 秒:`wrk -t10 -c100 -d15s http://www.baidu.com` +- 启用 10 个线程,每个线程发起 100 个连接,持续 15 秒:`wrk -t5 -c5 -d30s http://www.baidu.com` - 最终报告: ``` -Running 15s test @ http://www.baidu.com - 10 threads and 100 connections +Running 30s test @ http://www.baidu.com + 5 threads and 5 connections Thread Stats Avg Stdev Max +/- Stdev - Latency 208.39ms 324.00ms 1.91s 87.70% - Req/Sec 82.68 64.81 414.00 70.60% - 11345 requests in 15.02s, 166.51MB read - Socket errors: connect 0, read 20, write 0, timeout 59 -Requests/sec: 755.26 -Transfer/sec: 11.08MB + Latency 44.59ms 17.41ms 331.91ms 95.66% + Req/Sec 23.11 5.77 30.00 57.04% + 3439 requests in 30.03s, 50.47MB read + Socket errors: connect 0, read 10, write 0, timeout 0 +Requests/sec: 114.52 +Transfer/sec: 1.68MB ``` #### 使用 lua 脚本(发送一个 post 请求) From bdce133e17b61969d4a6b37bc45a250a1804fde3 Mon Sep 17 00:00:00 2001 From: zhang Date: Sat, 6 Jul 2019 17:07:05 +0800 Subject: [PATCH 298/330] Prometheus --- markdown-file/Nginx-Install-And-Settings.md | 20 ++++++++++ .../Prometheus-Install-And-Settings.md | 40 +++++++++++++------ 2 files changed, 47 insertions(+), 13 deletions(-) diff --git a/markdown-file/Nginx-Install-And-Settings.md b/markdown-file/Nginx-Install-And-Settings.md index 71cb04a4..1fdf60cf 100644 --- a/markdown-file/Nginx-Install-And-Settings.md +++ b/markdown-file/Nginx-Install-And-Settings.md @@ -102,6 +102,26 @@ http { ------------------------------------------------------------------- +## Nginx 源码编译安装(带 Prometheus 模块) + +``` +./configure \ +--prefix=/usr/local/nginx \ +--pid-path=/var/local/nginx/nginx.pid \ +--lock-path=/var/lock/nginx/nginx.lock \ +--error-log-path=/var/log/nginx/error.log \ +--http-log-path=/var/log/nginx/access.log \ +--with-http_gzip_static_module \ +--http-client-body-temp-path=/var/temp/nginx/client \ +--http-proxy-temp-path=/var/temp/nginx/proxy \ +--http-fastcgi-temp-path=/var/temp/nginx/fastcgi \ +--http-uwsgi-temp-path=/var/temp/nginx/uwsgi \ +--with-http_ssl_module \ +--with-http_stub_status_module \ +--http-scgi-temp-path=/var/temp/nginx/scgi \ +--add-module=/usr/local/nginx-module-vts +``` + ## Nginx 源码编译安装(带监控模块) diff --git a/markdown-file/Prometheus-Install-And-Settings.md b/markdown-file/Prometheus-Install-And-Settings.md index ea5633f9..d7f91e88 100644 --- a/markdown-file/Prometheus-Install-And-Settings.md +++ b/markdown-file/Prometheus-Install-And-Settings.md @@ -110,12 +110,12 @@ systemctl status node_exporter ``` 修改prometheus.yml,加入下面的监控目标: -vim /usr/local/prometheus/prometheus.yml +vim /data/docker/prometheus/config/prometheus.yml scrape_configs: - job_name: 'centos7' static_configs: - - targets: ['127.0.0.1:9100'] + - targets: ['192.168.1.3:9100'] labels: instance: centos7_node1 @@ -143,9 +143,17 @@ git clone --depth=1 https://github.com/vozlt/nginx-module-vts.git ./configure --prefix=/usr/local/nginx --with-http_ssl_module --add-module=/opt/nginx-module-vts make(已经安装过了,就不要再 make install) +``` + + +``` +也有人做好了 docker 镜像: +https://hub.docker.com/r/xcgd/nginx-vts +docker run --name nginx-vts -p 80:80 -v /data/docker/nginx/conf/nginx.conf:/etc/nginx/nginx.conf:ro -d xcgd/nginx-vts ``` + ``` 修改Nginx配置 @@ -168,7 +176,8 @@ http { } -验证nginx-module-vts模块:http://IP/status +验证nginx-module-vts模块:http://192.168.1.3/status,会展示: +Nginx Vhost Traffic Status 统计表 ``` @@ -185,11 +194,13 @@ server { - 安装 nginx-vts-exporter ``` -wget -O nginx-vts-exporter-0.5.zip https://github.com/hnlq715/nginx-vts-exporter/archive/v0.5.zip -unzip nginx-vts-exporter-0.5.zip -mv nginx-vts-exporter-0.5 /usr/local/prometheus/nginx-vts-exporter -chmod +x /usr/local/prometheus/nginx-vts-exporter/bin/nginx-vts-exporter +官网版本:https://github.com/hnlq715/nginx-vts-exporter/releases + +wget https://github.com/hnlq715/nginx-vts-exporter/releases/download/v0.10.3/nginx-vts-exporter-0.10.3.linux-amd64.tar.gz +tar zxvf nginx-vts-exporter-0.10.3.linux-amd64.tar.gz + +chmod +x /usr/local/nginx-vts-exporter-0.10.3.linux-amd64/nginx-vts-exporter ``` ``` @@ -204,7 +215,7 @@ After=network.target [Service] Type=simple User=root -ExecStart=/usr/local/prometheus/nginx-vts-exporter/bin/nginx-vts-exporter -nginx.scrape_uri=http://localhost/status/format/json +ExecStart=/usr/local/nginx-vts-exporter-0.10.3.linux-amd64/nginx-vts-exporter -nginx.scrape_uri=http://192.168.1.3/status/format/json Restart=on-failure [Install] @@ -215,18 +226,21 @@ WantedBy=multi-user.target ``` 启动nginx-vts-exporter systemctl start nginx_vts_exporter.service +systemctl daemon-reload systemctl status nginx_vts_exporter.service ``` ``` -修改prometheus.yml,加入下面的监控目标: +修改 prometheus.yml,加入下面的监控目标: +vim /data/docker/prometheus/config/prometheus.yml -- job_name: nginx +scrape_configs: + - job_name: 'nginx' static_configs: - - targets: ['127.0.0.1:9913'] - labels: - instance: web1 + - targets: ['192.168.1.3:9913'] + labels: + instance: nginx1 ``` From dcf0a257a334135c6cc99bae4d005a2eee3dbd00 Mon Sep 17 00:00:00 2001 From: zhang Date: Sat, 6 Jul 2019 17:42:09 +0800 Subject: [PATCH 299/330] Prometheus --- markdown-file/Nginx-Install-And-Settings.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/markdown-file/Nginx-Install-And-Settings.md b/markdown-file/Nginx-Install-And-Settings.md index 1fdf60cf..f54cbdb0 100644 --- a/markdown-file/Nginx-Install-And-Settings.md +++ b/markdown-file/Nginx-Install-And-Settings.md @@ -310,6 +310,27 @@ upgrade: - 更新 `make upgrade` +## 为 Nginx 添加 basic_auth + +``` +yum install httpd-tools + +htpasswd -c /opt/nginx-auth/passwd.db myusername,回车之后输入两次密码 + + +server { + ... + + location / { + auth_basic "please input you user name and password"; + auth_basic_user_file /opt/nginx-auth/passwd.db; + .... + } +} + +``` + + ## Nginx 全局变量 - $arg_PARAMETER #这个变量包含GET请求中,如果有变量PARAMETER时的值。 From 36058aafaf66b2eb9acfa6ba3a11410e40642432 Mon Sep 17 00:00:00 2001 From: zhang Date: Sat, 6 Jul 2019 17:45:22 +0800 Subject: [PATCH 300/330] Prometheus --- markdown-file/Prometheus-Install-And-Settings.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/markdown-file/Prometheus-Install-And-Settings.md b/markdown-file/Prometheus-Install-And-Settings.md index d7f91e88..36d2e642 100644 --- a/markdown-file/Prometheus-Install-And-Settings.md +++ b/markdown-file/Prometheus-Install-And-Settings.md @@ -242,6 +242,19 @@ scrape_configs: labels: instance: nginx1 + +如果nginx 有加 basic auth,则需要这样: +scrape_configs: + - job_name: "nginx" + metrics_path: /status/format/prometheus + basic_auth: + username: youmeek + password: '123456' + static_configs: + - targets: ['192.168.1.3:9913'] + labels: + instance: 'nginx1' + ``` - 重启 prometheus:`docker restart prometheus` From 8a5b8aa26b7ce60dbb97eff6b8724c92304a4458 Mon Sep 17 00:00:00 2001 From: zhang Date: Sat, 6 Jul 2019 18:15:27 +0800 Subject: [PATCH 301/330] Prometheus --- markdown-file/Prometheus-Install-And-Settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/markdown-file/Prometheus-Install-And-Settings.md b/markdown-file/Prometheus-Install-And-Settings.md index 36d2e642..9ccc8c6f 100644 --- a/markdown-file/Prometheus-Install-And-Settings.md +++ b/markdown-file/Prometheus-Install-And-Settings.md @@ -296,7 +296,7 @@ scrape_configs: - - 写得非常非常非常好 - -- <> +- - <> - <> - <> From 8c97b20723e931d68567015a6bb91e16169ebfb9 Mon Sep 17 00:00:00 2001 From: judasn Date: Sat, 6 Jul 2019 22:44:26 +0800 Subject: [PATCH 302/330] Prometheus --- markdown-file/Prometheus-Install-And-Settings.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/markdown-file/Prometheus-Install-And-Settings.md b/markdown-file/Prometheus-Install-And-Settings.md index 9ccc8c6f..93b0b9b3 100644 --- a/markdown-file/Prometheus-Install-And-Settings.md +++ b/markdown-file/Prometheus-Install-And-Settings.md @@ -1,6 +1,9 @@ # Prometheus 安装和配置 +- 不错的发展史说明: - 特别说明:一般这类环境要尽可能保证所有服务器时间一致 +- Prometheus 本地存储不适合存长久数据,一般存储一个月就够了。要永久存储需要用到远端存储,远端存储可以用 OpenTSDB +- Prometheus 也不适合做日志存储,日志存储还是推荐 ELK 方案 ## Prometheus Docker 安装 @@ -287,6 +290,10 @@ scrape_configs: - [Grafana+Prometheus系统监控之webhook](https://blog.52itstyle.vip/archives/2068/) +## 远端存储方案 + +- + ---------------------------------------------------------------------------------------------- From e924e95351adc3f7eb6a43ccc8ba96ca69e61d41 Mon Sep 17 00:00:00 2001 From: zhang Date: Wed, 17 Jul 2019 10:20:26 +0800 Subject: [PATCH 303/330] Elasticsearch --- markdown-file/Elasticsearch-Base.md | 39 +++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index 05cc86de..b9ef2d45 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -3,10 +3,15 @@ ## Docker 单节点部署 - 官网: +- 官网列表: +- 阿里云支持版本: - 7.x:7.1.0 - 6.x:6.8.0 - 5.x:5.6.8 - 注意:docker 版本下 client.transport.sniff = true 是无效的。 + +#### 5.6.x + - `vim ~/elasticsearch-5.6.8-docker.yml` - 启动:`docker-compose -f ~/elasticsearch-5.6.8-docker.yml -p elasticsearch_5.6.8 up -d` @@ -38,6 +43,40 @@ services: ``` +#### 6.7.x + +- `vim ~/elasticsearch-6.7.2-docker.yml` +- 启动:`docker-compose -f ~/elasticsearch-6.7.2-docker.yml -p elasticsearch_6.7.2 up -d` +- `mkdir -p /data/docker/elasticsearch-6.7.2/data` + +``` +version: '3' +services: + elasticsearch1: + image: docker pull docker.elastic.co/elasticsearch/elasticsearch:6.7.2 + container_name: elasticsearch1 + environment: + - "ES_JAVA_OPTS=-Xms512m -Xmx512m" + - "cluster.name=elasticsearch" + - "network.host=0.0.0.0" + - "http.host=0.0.0.0" + - "xpack.security.enabled=false" + ulimits: + memlock: + soft: -1 + hard: -1 + nofile: + soft: 65536 + hard: 65536 + ports: + - 9200:9200 + - 9300:9300 + volumes: + - /data/docker/elasticsearch-6.7.2/data:/usr/share/elasticsearch/data + +``` + + ------------------------------------------------------------------- From 5694f0a85641e9cfeaf923aff88304e70c83b777 Mon Sep 17 00:00:00 2001 From: zhang Date: Wed, 17 Jul 2019 10:23:07 +0800 Subject: [PATCH 304/330] Elasticsearch --- markdown-file/Elasticsearch-Base.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index b9ef2d45..36c86f1a 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -20,7 +20,7 @@ version: '3' services: elasticsearch1: image: docker.elastic.co/elasticsearch/elasticsearch:5.6.8 - container_name: elasticsearch1 + container_name: elasticsearch-5.6.8 environment: - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - "cluster.name=elasticsearch" @@ -54,7 +54,7 @@ version: '3' services: elasticsearch1: image: docker pull docker.elastic.co/elasticsearch/elasticsearch:6.7.2 - container_name: elasticsearch1 + container_name: elasticsearch-6.7.2 environment: - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - "cluster.name=elasticsearch" From 91a1876f352f68745d1e380836d8dab32c883219 Mon Sep 17 00:00:00 2001 From: zhang Date: Wed, 17 Jul 2019 10:23:55 +0800 Subject: [PATCH 305/330] Elasticsearch --- markdown-file/Elasticsearch-Base.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index 36c86f1a..bb0f52c6 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -53,7 +53,7 @@ services: version: '3' services: elasticsearch1: - image: docker pull docker.elastic.co/elasticsearch/elasticsearch:6.7.2 + image: docker.elastic.co/elasticsearch/elasticsearch:6.7.2 container_name: elasticsearch-6.7.2 environment: - "ES_JAVA_OPTS=-Xms512m -Xmx512m" From c13cd7fe4830e9231fa31ba0220c9dcd690fecd3 Mon Sep 17 00:00:00 2001 From: zhang Date: Wed, 17 Jul 2019 10:38:41 +0800 Subject: [PATCH 306/330] Elasticsearch --- markdown-file/Elasticsearch-Base.md | 1 + 1 file changed, 1 insertion(+) diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index bb0f52c6..ae1bf8f9 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -48,6 +48,7 @@ services: - `vim ~/elasticsearch-6.7.2-docker.yml` - 启动:`docker-compose -f ~/elasticsearch-6.7.2-docker.yml -p elasticsearch_6.7.2 up -d` - `mkdir -p /data/docker/elasticsearch-6.7.2/data` +- 如果官网镜像比较慢可以换成阿里云:`registry.cn-hangzhou.aliyuncs.com/elasticsearch/elasticsearch:6.7.2` ``` version: '3' From 15086fcde61f2252ca06edf782c2200cdbe14335 Mon Sep 17 00:00:00 2001 From: zhang Date: Wed, 17 Jul 2019 16:01:18 +0800 Subject: [PATCH 307/330] Elasticsearch --- markdown-file/Elasticsearch-Base.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index ae1bf8f9..f66d8d28 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -5,9 +5,7 @@ - 官网: - 官网列表: - 阿里云支持版本: - - 7.x:7.1.0 - - 6.x:6.8.0 - - 5.x:5.6.8 + - 阿里云有一个 `插件配置` 功能,常用的 Elasticsearch 插件都带了,勾选下即可安装。也支持上传安装。 - 注意:docker 版本下 client.transport.sniff = true 是无效的。 #### 5.6.x From 7431a20d35067f52b2c55d7b05227d59c3ecd102 Mon Sep 17 00:00:00 2001 From: judasn Date: Sun, 21 Jul 2019 19:45:05 +0800 Subject: [PATCH 308/330] Elasticsearch --- markdown-file/Elasticsearch-Base.md | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/markdown-file/Elasticsearch-Base.md b/markdown-file/Elasticsearch-Base.md index f66d8d28..959a7a93 100644 --- a/markdown-file/Elasticsearch-Base.md +++ b/markdown-file/Elasticsearch-Base.md @@ -41,12 +41,13 @@ services: ``` -#### 6.7.x +#### 6.7.x(带 ik 分词) - `vim ~/elasticsearch-6.7.2-docker.yml` - 启动:`docker-compose -f ~/elasticsearch-6.7.2-docker.yml -p elasticsearch_6.7.2 up -d` - `mkdir -p /data/docker/elasticsearch-6.7.2/data` - 如果官网镜像比较慢可以换成阿里云:`registry.cn-hangzhou.aliyuncs.com/elasticsearch/elasticsearch:6.7.2` +- 下载 ik 分词(版本必须和 Elasticsearch 版本对应,包括小版本号): ``` version: '3' @@ -72,7 +73,26 @@ services: - 9300:9300 volumes: - /data/docker/elasticsearch-6.7.2/data:/usr/share/elasticsearch/data + - /data/docker/ik:/usr/share/elasticsearch/plugins/ik +``` + +- Elasticsearch Head 插件地址: +- 测试: + + +``` +http://localhost:9200/ +_analyze?pretty POST + +{"analyzer":"ik_smart","text":"安徽省长江流域"} +``` + +- ik_max_word 和 ik_smart 什么区别? + +``` +ik_max_word: 会将文本做最细粒度的拆分,比如会将“中华人民共和国国歌”拆分为“中华人民共和国,中华人民,中华,华人,人民共和国,人民,人,民,共和国,共和,和,国国,国歌”,会穷尽各种可能的组合,适合 Term Query; +ik_smart: 会做最粗粒度的拆分,比如会将“中华人民共和国国歌”拆分为“中华人民共和国,国歌”,适合 Phrase 查询。 ``` From e6559ace64791755c5871b9db88273c8d9008df6 Mon Sep 17 00:00:00 2001 From: judasn Date: Sat, 27 Jul 2019 08:59:52 +0800 Subject: [PATCH 309/330] Elasticsearch --- markdown-file/Prometheus-Install-And-Settings.md | 1 + 1 file changed, 1 insertion(+) diff --git a/markdown-file/Prometheus-Install-And-Settings.md b/markdown-file/Prometheus-Install-And-Settings.md index 93b0b9b3..7780838c 100644 --- a/markdown-file/Prometheus-Install-And-Settings.md +++ b/markdown-file/Prometheus-Install-And-Settings.md @@ -8,6 +8,7 @@ ## Prometheus Docker 安装 - 官网: +- Docker 官方镜像: - 这里以 Spring Boot Metrics 为收集信息 - 创建配置文件:`vim /data/docker/prometheus/config/prometheus.yml` - 在 scrape_configs 位置下增加我们自己应用的路径信息 From 31a876dfae112ba1de1ae997c54894db7732b55c Mon Sep 17 00:00:00 2001 From: "Judas.n" Date: Tue, 6 Aug 2019 10:12:57 +0800 Subject: [PATCH 310/330] Update README.md --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index e99786b4..ed57b20f 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,11 @@ +## 声明 + +- 2019-08-19 周一 +- 我将发布这两年来第一个新的 **大专题**,还是跟 IT 行业相关,请关注!!! + ## 初衷(Original Intention) - 整理下自己所学。**但是比较随意,所以很多地方不够严谨,所以请带着批评的思维阅读。** From f227d7a07b751d3308d7dc3d62270a43716bfab5 Mon Sep 17 00:00:00 2001 From: judasn Date: Sun, 11 Aug 2019 22:00:48 +0800 Subject: [PATCH 311/330] WordPress --- markdown-file/WordPress-Install-And-Settings.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/markdown-file/WordPress-Install-And-Settings.md b/markdown-file/WordPress-Install-And-Settings.md index 39fdc9c2..c11835da 100644 --- a/markdown-file/WordPress-Install-And-Settings.md +++ b/markdown-file/WordPress-Install-And-Settings.md @@ -194,6 +194,19 @@ systemctl enable httpd.service - 我是托管到 DNSPOD,重新指向到新 IP 地址即可 +## 常用插件 + +- JP Markdown +- WP Code Highlight.js +- FooBox Image Lightbox +- WP Super Cache + +## 常见问题 + +- 安装插件出现:`WordPress需要访问您网页服务器的权限。 请输入您的FTP登录凭据以继续` +- 解决办法:`chown -R apache:apache /var/www/html` + + ## 资料 - From 0b3e9155204cd9425df4e2ebeaef76a0f05aef66 Mon Sep 17 00:00:00 2001 From: zhang Date: Mon, 12 Aug 2019 18:01:03 +0800 Subject: [PATCH 312/330] Gravitee --- .../gravitee-docker-compose/README.md | 32 +++++ .../environments/ci/docker-compose.yml | 62 ++++++++ .../environments/demo/common.yml | 62 ++++++++ .../demo/docker-compose-local.yml | 76 ++++++++++ .../demo/docker-compose-traefik-latest.yml | 76 ++++++++++ .../demo/docker-compose-traefik-nightly.yml | 79 ++++++++++ .../environments/demo/launch.sh | 91 ++++++++++++ .../docker-compose-sample-apis.yml | 47 ++++++ .../platform/docker-compose.yml | 135 ++++++++++++++++++ .../create-index.js | 92 ++++++++++++ .../platform/nginx/nginx.conf | 133 +++++++++++++++++ .../platform/nginx/ssl/gio-selfsigned.crt | 27 ++++ .../platform/nginx/ssl/gio-selfsigned.key | 27 ++++ .../platform/nginx/ssl/gio.pem | 8 ++ .../platform/prometheus.yml | 8 ++ 15 files changed, 955 insertions(+) create mode 100644 favorite-file/gravitee-docker-compose/README.md create mode 100644 favorite-file/gravitee-docker-compose/environments/ci/docker-compose.yml create mode 100644 favorite-file/gravitee-docker-compose/environments/demo/common.yml create mode 100644 favorite-file/gravitee-docker-compose/environments/demo/docker-compose-local.yml create mode 100644 favorite-file/gravitee-docker-compose/environments/demo/docker-compose-traefik-latest.yml create mode 100644 favorite-file/gravitee-docker-compose/environments/demo/docker-compose-traefik-nightly.yml create mode 100755 favorite-file/gravitee-docker-compose/environments/demo/launch.sh create mode 100644 favorite-file/gravitee-docker-compose/environments/sample-apis/docker-compose-sample-apis.yml create mode 100644 favorite-file/gravitee-docker-compose/platform/docker-compose.yml create mode 100644 favorite-file/gravitee-docker-compose/platform/mongo/docker-entrypoint-initdb.d/create-index.js create mode 100644 favorite-file/gravitee-docker-compose/platform/nginx/nginx.conf create mode 100644 favorite-file/gravitee-docker-compose/platform/nginx/ssl/gio-selfsigned.crt create mode 100644 favorite-file/gravitee-docker-compose/platform/nginx/ssl/gio-selfsigned.key create mode 100644 favorite-file/gravitee-docker-compose/platform/nginx/ssl/gio.pem create mode 100644 favorite-file/gravitee-docker-compose/platform/prometheus.yml diff --git a/favorite-file/gravitee-docker-compose/README.md b/favorite-file/gravitee-docker-compose/README.md new file mode 100644 index 00000000..e4983ec9 --- /dev/null +++ b/favorite-file/gravitee-docker-compose/README.md @@ -0,0 +1,32 @@ +# graviteeio api gateway docker-compose running + +fork from graviteeio project && change some deps image + +- +- + +## how to run + +```code +cd platform && docker-compose up -d +``` + +## manager ui + +* api portal + +```code +open https://localhost/apim/portal +``` + +* access manager ui + +```code +open https://localhost/am/ui/ +``` + +## Note: + +- environments directory has some demos with ci && traefik gateway +- portal account admin amdin +- access manager ui account admin adminadmin \ No newline at end of file diff --git a/favorite-file/gravitee-docker-compose/environments/ci/docker-compose.yml b/favorite-file/gravitee-docker-compose/environments/ci/docker-compose.yml new file mode 100644 index 00000000..37c9c07c --- /dev/null +++ b/favorite-file/gravitee-docker-compose/environments/ci/docker-compose.yml @@ -0,0 +1,62 @@ +#------------------------------------------------------------------------------- +# Copyright (C) 2015 The Gravitee team (http://gravitee.io) +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------- +version: '2' + +services: + ci: + image: graviteeio/jenkins:latest + container_name: ci + network_mode: "bridge" + expose: + - 50022 + ports: + - "50022:50022" + labels: + - "traefik.frontend.rule=Host:ci.gravitee.io" + - "traefik.port=8080" + volumes: + - /var/jenkins_home:/var/jenkins_home + - /var/run/docker.sock:/var/run/docker.sock + links: + - redis-test + - qa + + redis-test: + image: redis:3 + container_name: redis-test + network_mode: "bridge" + + qa: + image: sonarqube:alpine + container_name: qa + network_mode: "bridge" + environment: + - SONARQUBE_JDBC_URL=jdbc:postgresql://sonarqube-db:5432/sonar + labels: + - "traefik.frontend.rule=Host:qa.gravitee.io" + volumes: + - /opt/sonarqube/conf:/opt/sonarqube/conf + - /opt/sonarqube/data:/opt/sonarqube/data + - /opt/sonarqube/extensions:/opt/sonarqube/extensions + - /opt/sonarqube/bundled-plugins:/opt/sonarqube/lib/bundled-plugins + links: + - sonarqube-db + + sonarqube-db: + image: postgres:alpine + network_mode: "bridge" + environment: + - POSTGRES_USER=sonar + - POSTGRES_PASSWORD=sonar + volumes: + - /opt/sonarqube/postgresql/data:/var/lib/postgresql/data diff --git a/favorite-file/gravitee-docker-compose/environments/demo/common.yml b/favorite-file/gravitee-docker-compose/environments/demo/common.yml new file mode 100644 index 00000000..a1d7c696 --- /dev/null +++ b/favorite-file/gravitee-docker-compose/environments/demo/common.yml @@ -0,0 +1,62 @@ +#------------------------------------------------------------------------------- +# Copyright (C) 2015 The Gravitee team (http://gravitee.io) +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------- +version: '2' + +volumes: + datamongo: {} + dataelasticsearch: {} + +services: + elasticsearch: + hostname: demo-elasticsearch + image: docker.elastic.co/elasticsearch/elasticsearch:5.4.3 + volumes: + - dataelasticsearch:/usr/share/elasticsearch/data + environment: + - http.host=0.0.0.0 + - transport.host=0.0.0.0 + - xpack.security.enabled=false + - xpack.monitoring.enabled=false + - cluster.name=elasticsearch + - bootstrap.memory_lock=true + - "ES_JAVA_OPTS=-Xms512m -Xmx512m" + ulimits: + memlock: + soft: -1 + hard: -1 + nofile: 65536 + + mongodb: + hostname: demo-mongodb + image: mongo:3.4 + volumes: + - datamongo:/data/db + + gateway: + hostname: demo-gateway + image: graviteeio/gateway:latest + environment: + - gravitee_management_mongodb_uri=mongodb://mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_ratelimit_mongodb_uri=mongodb://mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_reporters_elasticsearch_endpoints_0=http://elasticsearch:9200 + + managementui: + hostname: demo-managementui + image: graviteeio/management-ui:latest + + managementapi: + hostname: demo-managementapi + image: graviteeio/management-api:latest + environment: + - gravitee_management_mongodb_uri=mongodb://mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_analytics_elasticsearch_endpoints_0=http://elasticsearch:9200 diff --git a/favorite-file/gravitee-docker-compose/environments/demo/docker-compose-local.yml b/favorite-file/gravitee-docker-compose/environments/demo/docker-compose-local.yml new file mode 100644 index 00000000..38c34e4c --- /dev/null +++ b/favorite-file/gravitee-docker-compose/environments/demo/docker-compose-local.yml @@ -0,0 +1,76 @@ +#------------------------------------------------------------------------------- +# Copyright (C) 2015 The Gravitee team (http://gravitee.io) +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------- +version: '2' + +volumes: + local_datamongo: {} + local_dataelasticsearch: {} + +services: + local_elasticsearch: + extends: + file: common.yml + service: elasticsearch + volumes: + - local_dataelasticsearch:/usr/share/elasticsearch/data + - ./logs/elasticsearch:/var/log/elasticsearch + + local_mongodb: + extends: + file: common.yml + service: mongodb + volumes: + - local_datamongo:/data/db + - ./logs/mongodb:/var/log/mongodb + + local_gateway: + extends: + file: common.yml + service: gateway + links: + - "local_mongodb:demo-mongodb" + - "local_elasticsearch:demo-elasticsearch" + ports: + - "8000:8082" + volumes: + - ./logs/gateway:/etc/gravitee.io/log + environment: + - gravitee_management_mongodb_uri=mongodb://demo-mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_ratelimit_mongodb_uri=mongodb://demo-mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_reporters_elasticsearch_endpoints_0=http://demo-elasticsearch:9200 + + local_managementui: + extends: + file: common.yml + service: managementui + ports: + - "8002:80" + volumes: + - ./logs/management-ui:/var/log/httpd + environment: + - MGMT_API_URL=http:\/\/localhost:8005\/management\/ + + local_managementapi: + extends: + file: common.yml + service: managementapi + ports: + - "8005:8083" + volumes: + - ./logs/management-api:/home/gravitee/logs + links: + - "local_mongodb:demo-mongodb" + - "local_elasticsearch:demo-elasticsearch" + environment: + - gravitee_management_mongodb_uri=mongodb://demo-mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_analytics_elasticsearch_endpoints_0=http://demo-elasticsearch:9200 diff --git a/favorite-file/gravitee-docker-compose/environments/demo/docker-compose-traefik-latest.yml b/favorite-file/gravitee-docker-compose/environments/demo/docker-compose-traefik-latest.yml new file mode 100644 index 00000000..e3ea6bce --- /dev/null +++ b/favorite-file/gravitee-docker-compose/environments/demo/docker-compose-traefik-latest.yml @@ -0,0 +1,76 @@ +#------------------------------------------------------------------------------- +# Copyright (C) 2015 The Gravitee team (http://gravitee.io) +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------- +version: '2' + +volumes: + latest_datamongo: {} + latest_dataelasticsearch: {} + +services: + latest_elasticsearch: + network_mode: "bridge" + extends: + file: common.yml + service: elasticsearch + volumes: + - latest_dataelasticsearch:/usr/share/elasticsearch/data + + latest_mongodb: + network_mode: "bridge" + extends: + file: common.yml + service: mongodb + volumes: + - latest_datamongo:/data/db + + latest_gateway: + network_mode: "bridge" + extends: + file: common.yml + service: gateway + links: + - latest_mongodb + - latest_elasticsearch + labels: + - "traefik.backend=graviteeio-gateway" + - "traefik.frontend.rule=Host:demo.gravitee.io;PathPrefixStrip:/gateway" + environment: + - gravitee_management_mongodb_uri=mongodb://demo-mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_ratelimit_mongodb_uri=mongodb://demo-mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_reporters_elasticsearch_endpoints_0=http://demo-elasticsearch:9200 + + latest_managementui: + network_mode: "bridge" + extends: + file: common.yml + service: managementui + labels: + - "traefik.backend=graviteeio-managementui" + - "traefik.frontend.rule=Host:demo.gravitee.io" + environment: + - MGMT_API_URL=https:\/\/demo.gravitee.io\/management\/ + + latest_managementapi: + network_mode: "bridge" + extends: + file: common.yml + service: managementapi + labels: + - "traefik.backend=graviteeio-managementapi" + - "traefik.frontend.rule=Host:demo.gravitee.io;PathPrefix:/management" + links: + - latest_mongodb + - latest_elasticsearch + environment: + - gravitee_management_mongodb_uri=mongodb://demo-mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_analytics_elasticsearch_endpoints_0=http://demo-elasticsearch:9200 \ No newline at end of file diff --git a/favorite-file/gravitee-docker-compose/environments/demo/docker-compose-traefik-nightly.yml b/favorite-file/gravitee-docker-compose/environments/demo/docker-compose-traefik-nightly.yml new file mode 100644 index 00000000..2369851c --- /dev/null +++ b/favorite-file/gravitee-docker-compose/environments/demo/docker-compose-traefik-nightly.yml @@ -0,0 +1,79 @@ +#------------------------------------------------------------------------------- +# Copyright (C) 2015 The Gravitee team (http://gravitee.io) +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------- +version: '2' + +volumes: + nightly_datamongo: {} + nightly_dataelasticsearch: {} + +services: + nightly_elasticsearch: + network_mode: "bridge" + extends: + file: common.yml + service: elasticsearch + volumes: + - nightly_dataelasticsearch:/usr/share/elasticsearch/data + + nightly_mongodb: + network_mode: "bridge" + extends: + file: common.yml + service: mongodb + volumes: + - nightly_datamongo:/data/db + + nightly_gateway: + image: graviteeio/gateway:nightly + network_mode: "bridge" + extends: + file: common.yml + service: gateway + links: + - nightly_mongodb + - nightly_elasticsearch + labels: + - "traefik.backend=nightly-graviteeio-gateway" + - "traefik.frontend.rule=Host:nightly.gravitee.io;PathPrefixStrip:/gateway" + environment: + - gravitee_management_mongodb_uri=mongodb://demo-mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_ratelimit_mongodb_uri=mongodb://demo-mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_reporters_elasticsearch_endpoints_0=http://demo-elasticsearch:9200 + + nightly_managementui: + image: graviteeio/management-ui:nightly + network_mode: "bridge" + extends: + file: common.yml + service: managementui + labels: + - "traefik.backend=nightly-graviteeio-managementui" + - "traefik.frontend.rule=Host:nightly.gravitee.io" + environment: + - MGMT_API_URL=https:\/\/nightly.gravitee.io\/management\/ + + nightly_managementapi: + image: graviteeio/management-api:nightly + network_mode: "bridge" + extends: + file: common.yml + service: managementapi + labels: + - "traefik.backend=nightly-graviteeio-managementapi" + - "traefik.frontend.rule=Host:nightly.gravitee.io;PathPrefix:/management" + links: + - nightly_mongodb + - nightly_elasticsearch + environment: + - gravitee_management_mongodb_uri=mongodb://demo-mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_analytics_elasticsearch_endpoints_0=http://demo-elasticsearch:9200 \ No newline at end of file diff --git a/favorite-file/gravitee-docker-compose/environments/demo/launch.sh b/favorite-file/gravitee-docker-compose/environments/demo/launch.sh new file mode 100755 index 00000000..ff51ff04 --- /dev/null +++ b/favorite-file/gravitee-docker-compose/environments/demo/launch.sh @@ -0,0 +1,91 @@ +#!/bin/bash +#------------------------------------------------------------------------------- +# Copyright (C) 2015 The Gravitee team (http://gravitee.io) +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------- + +readonly WORKDIR="$HOME/graviteeio-demo" +readonly DIRNAME=`dirname $0` +readonly PROGNAME=`basename $0` +readonly color_title='\033[32m' +readonly color_text='\033[1;36m' + +# OS specific support (must be 'true' or 'false'). +declare cygwin=false +declare darwin=false +declare linux=false +declare dc_exec="docker-compose -f docker-compose-local.yml up" + +welcome() { + echo + echo -e " ${color_title} _____ _____ __ _______ _______ ______ ______ _____ ____ \033[0m" + echo -e " ${color_title} / ____| __ \ /\ \ / /_ _|__ __| ____| ____| |_ _/ __ \ \033[0m" + echo -e " ${color_title}| | __| |__) | / \ \ / / | | | | | |__ | |__ | || | | | \033[0m" + echo -e " ${color_title}| | |_ | _ / / /\ \ \/ / | | | | | __| | __| | || | | | \033[0m" + echo -e " ${color_title}| |__| | | \ \ / ____ \ / _| |_ | | | |____| |____ _ _| || |__| | \033[0m" + echo -e " ${color_title} \_____|_| \_\/_/ \_\/ |_____| |_| |______|______(_)_____\____/ \033[0m" + echo -e " ${color_title} | | \033[0m${color_text}http://gravitee.io\033[0m" + echo -e " ${color_title} __| | ___ _ __ ___ ___ \033[0m" + echo -e " ${color_title} / _\` |/ _ \ '_ \` _ \ / _ \ \033[0m" + echo -e " ${color_title}| (_| | __/ | | | | | (_) | \033[0m" + echo -e " ${color_title} \__,_|\___|_| |_| |_|\___/ \033[0m" + echo +} + +init_env() { + local dockergrp + # define env + case "`uname`" in + CYGWIN*) + cygwin=true + ;; + + Darwin*) + darwin=true + ;; + + Linux) + linux=true + ;; + esac + + # test if docker must be run with sudo + dockergrp=$(groups | grep -c docker) + if [[ $darwin == false && $dockergrp == 0 ]]; then + dc_exec="sudo $dc_exec"; + fi +} + +init_dirs() { + echo "Init log directory in $WORKDIR ..." + mkdir -p "$WORKDIR/logs/" + echo +} + +main() { + welcome + init_env + if [[ $? != 0 ]]; then + exit 1 + fi + set -e + init_dirs + pushd $WORKDIR > /dev/null + echo "Download docker compose files ..." + curl -L https://raw.githubusercontent.com/gravitee-io/gravitee-docker/master/environments/demo/common.yml -o "common.yml" + curl -L https://raw.githubusercontent.com/gravitee-io/gravitee-docker/master/environments/demo/docker-compose-local.yml -o "docker-compose-local.yml" + echo + echo "Launch GraviteeIO demo ..." + $dc_exec + popd > /dev/null +} + +main diff --git a/favorite-file/gravitee-docker-compose/environments/sample-apis/docker-compose-sample-apis.yml b/favorite-file/gravitee-docker-compose/environments/sample-apis/docker-compose-sample-apis.yml new file mode 100644 index 00000000..10c1a074 --- /dev/null +++ b/favorite-file/gravitee-docker-compose/environments/sample-apis/docker-compose-sample-apis.yml @@ -0,0 +1,47 @@ +#------------------------------------------------------------------------------- +# Copyright (C) 2015 The Gravitee team (http://gravitee.io) +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------- +version: '2' + +services: + + sample-api-index: + image: graviteeio/gravitee-sample-index:latest + network_mode: "bridge" + labels: + - "traefik.backend=gravitee-sample-index" + - "traefik.frontend.rule=Host:api.gravitee.io" + - "traefik.frontend.entryPoints=https" + + sample-api-echo: + image: graviteeio/gravitee-echo-api:nightly + network_mode: "bridge" + labels: + - "traefik.backend=gravitee-echo-api" + - "traefik.frontend.rule=Host:api.gravitee.io;PathPrefix:/echo" + - "traefik.frontend.entryPoints=https" + + sample-api-whoami: + image: graviteeio/gravitee-whoami-api:latest + network_mode: "bridge" + labels: + - "traefik.backend=gravitee-whoami-api" + - "traefik.frontend.rule=Host:api.gravitee.io;PathPrefix:/whoami" + - "traefik.frontend.entryPoints=https" + + sample-api-whattimeisit: + image: graviteeio/gravitee-whattimeisit-api:latest + network_mode: "bridge" + labels: + - "traefik.backend=gravitee-whattimeisit-api" + - "traefik.frontend.rule=Host:api.gravitee.io;PathPrefix:/whattimeisit" + - "traefik.frontend.entryPoints=https" diff --git a/favorite-file/gravitee-docker-compose/platform/docker-compose.yml b/favorite-file/gravitee-docker-compose/platform/docker-compose.yml new file mode 100644 index 00000000..3771102c --- /dev/null +++ b/favorite-file/gravitee-docker-compose/platform/docker-compose.yml @@ -0,0 +1,135 @@ +version: '3' + +networks: + default: + +services: + nginx: + image: nginx:1.15-alpine + container_name: gio_platform_nginx + volumes: + - ./nginx/nginx.conf:/etc/nginx/nginx.conf + - ./nginx/ssl/gio-selfsigned.crt:/etc/ssl/certs/gio-selfsigned.crt + - ./nginx/ssl/gio-selfsigned.key:/etc/ssl/private/gio-selfsigned.key + - ./nginx/ssl/gio.pem:/etc/ssl/certs/gio.pem + ports: + - "80:80" + - "443:443" + depends_on: + - apim_gateway + - apim_portal + - apim_management + - am_gateway + - am_management + - am_webui + + mongodb: + image: mongo:3.4 + container_name: gio_platform_mongo + ports: + - 27017:27017 + environment: + - MONGO_INITDB_DATABASE=gravitee + volumes: + - ./mongo/docker-entrypoint-initdb.d:/docker-entrypoint-initdb.d + - ./data/mongo:/data/db + - ./logs/mongodb:/var/log/mongodb + + elasticsearch: + image: elasticsearch:6.4.0 + container_name: gio_platform_elasticsearch + ports: + - 9200:9200 + environment: + - http.host=0.0.0.0 + - transport.host=0.0.0.0 + - xpack.security.enabled=false + - xpack.monitoring.enabled=false + - cluster.name=elasticsearch + ulimits: + nofile: 65536 + volumes: + - ./data/elasticsearch:/usr/share/elasticsearch/data + - ./logs/elasticsearch:/var/log/elasticsearch + + apim_gateway: + image: graviteeio/gateway:latest + container_name: gio_platform_apim_gateway + volumes: + - ./logs/apim-gateway:/opt/graviteeio-gateway/logs + environment: + - gravitee_management_mongodb_uri=mongodb://mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_ratelimit_mongodb_uri=mongodb://mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_reporters_elasticsearch_endpoints_0=http://elasticsearch:9200 + - gravitee_services_metrics_enabled=true + - gravitee_services_metrics_prometheus.enabled=true + depends_on: + - mongodb + - elasticsearch + + apim_portal: + image: graviteeio/management-ui:latest + container_name: gio_platform_apim_portal + environment: + - MGMT_API_URL=https:\/\/localhost\/apim\/management\/ + depends_on: + - apim_management + + apim_management: + image: graviteeio/management-api:latest + container_name: gio_platform_apim_mgmt_api + volumes: + - ./logs/apim-management-api:/opt/graviteeio-management-api/logs + environment: + - gravitee_management_mongodb_uri=mongodb://mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_ratelimit_mongodb_uri=mongodb://mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_analytics_elasticsearch_endpoints_0=http://elasticsearch:9200 + - gravitee_jwt_cookiepath=/apim/management + - gravitee_jwt_cookiesecure=true + depends_on: + - mongodb + - elasticsearch + + am_gateway: + image: graviteeio/am-gateway:2 + container_name: gio_platform_am_gateway + volumes: + - ./logs/am-gateway:/opt/graviteeio-am-gateway/logs + environment: + - gravitee_management_mongodb_uri=mongodb://mongodb:27017/gravitee-am?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_oauth2_mongodb_uri=mongodb://mongodb:27017/gravitee-am?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + depends_on: + - mongodb + + am_management: + image: graviteeio/am-management-api:2 + container_name: gio_platform_am_management + volumes: + - ./logs/am-management-api:/opt/graviteeio-am-management-api/logs + environment: + - gravitee_management_mongodb_uri=mongodb://mongodb:27017/gravitee-am?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_oauth2_mongodb_uri=mongodb://mongodb:27017/gravitee-am?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000 + - gravitee_jwt_cookiepath=/am/management + - gravitee_jwt_cookiesecure=true + depends_on: + - mongodb + grafana: + image: grafana/grafana + ports: + - "3000:3000" + prometheus: + image: prom/prometheus + volumes: + - "./prometheus.yml:/etc/prometheus/prometheus.yml" + ports: + - "9090:9090" + am_webui: + image: graviteeio/am-management-ui:2 + container_name: gio_platform_am_webui + environment: + - MGMT_API_URL=https:\/\/localhost\/am\/ + - MGMT_UI_URL=https:\/\/localhost\/am\/ui\/ + volumes: + - ./logs/am-webui:/var/log/nginx + depends_on: + - am_management diff --git a/favorite-file/gravitee-docker-compose/platform/mongo/docker-entrypoint-initdb.d/create-index.js b/favorite-file/gravitee-docker-compose/platform/mongo/docker-entrypoint-initdb.d/create-index.js new file mode 100644 index 00000000..b6f2d379 --- /dev/null +++ b/favorite-file/gravitee-docker-compose/platform/mongo/docker-entrypoint-initdb.d/create-index.js @@ -0,0 +1,92 @@ + +// "apis" collection +db.apis.dropIndexes(); +db.apis.createIndex( { "visibility" : 1 } ); +db.apis.createIndex( { "group" : 1 } ); +db.apis.reIndex(); + +// "applications" collection +db.applications.dropIndexes(); +db.applications.createIndex( { "group" : 1 } ); +db.applications.createIndex( { "name" : 1 } ); +db.applications.createIndex( { "status" : 1 } ); +db.applications.reIndex(); + +// "events" collection +db.events.dropIndexes(); +db.events.createIndex( { "type" : 1 } ); +db.events.createIndex( { "updatedAt" : 1 } ); +db.events.createIndex( { "properties.api_id" : 1 } ); +db.events.createIndex( { "properties.api_id":1, "type":1} ); +db.events.reIndex(); + +// "plans" collection +db.plans.dropIndexes(); +db.plans.createIndex( { "apis" : 1 } ); +db.plans.reIndex(); + +// "subscriptions" collection +db.subscriptions.dropIndexes(); +db.subscriptions.createIndex( { "plan" : 1 } ); +db.subscriptions.createIndex( { "application" : 1 } ); +db.subscriptions.reIndex(); + +// "keys" collection +db.keys.dropIndexes(); +db.keys.createIndex( { "plan" : 1 } ); +db.keys.createIndex( { "application" : 1 } ); +db.keys.createIndex( { "updatedAt" : 1 } ); +db.keys.createIndex( { "revoked" : 1 } ); +db.keys.createIndex( { "plan" : 1 , "revoked" : 1, "updatedAt" : 1 } ); +db.keys.reIndex(); + +// "pages" collection +db.pages.dropIndexes(); +db.pages.createIndex( { "api" : 1 } ); +db.pages.reIndex(); + +// "memberships" collection +db.memberships.dropIndexes(); +db.memberships.createIndex( {"_id.userId":1, "_id.referenceId":1, "_id.referenceType":1}, { unique: true } ); +db.memberships.createIndex( {"_id.referenceId":1, "_id.referenceType":1} ); +db.memberships.createIndex( {"_id.referenceId":1, "_id.referenceType":1, "roles":1} ); +db.memberships.createIndex( {"_id.userId":1, "_id.referenceType":1} ); +db.memberships.createIndex( {"_id.userId":1, "_id.referenceType":1, "roles":1} ); +db.memberships.reIndex(); + +// "roles" collection +db.roles.dropIndexes(); +db.roles.createIndex( {"_id.scope": 1 } ); +db.roles.reIndex(); + +// "audits" collection +db.audits.dropIndexes(); +db.audits.createIndex( { "referenceType": 1, "referenceId": 1 } ); +db.audits.createIndex( { "createdAt": 1 } ); +db.audits.reIndex(); + +// "rating" collection +db.rating.dropIndexes(); +db.rating.createIndex( { "api" : 1 } ); +db.rating.reIndex(); + +// "ratingAnswers" collection +db.ratingAnswers.dropIndexes(); +db.ratingAnswers.createIndex( { "rating" : 1 } ); + +// "portalnotifications" collection +db.portalnotifications.dropIndexes(); +db.portalnotifications.createIndex( { "user" : 1 } ); +db.portalnotifications.reIndex(); + +// "portalnotificationconfigs" collection +db.portalnotificationconfigs.dropIndexes(); +db.portalnotificationconfigs.createIndex( {"_id.user":1, "_id.referenceId":1, "_id.referenceType":1}, { unique: true } ); +db.portalnotificationconfigs.createIndex( {"_id.referenceId":1, "_id.referenceType":1, "hooks":1}); +db.portalnotificationconfigs.reIndex(); + +// "genericnotificationconfigs" collection +db.genericnotificationconfigs.dropIndexes(); +db.genericnotificationconfigs.createIndex( {"referenceId":1, "referenceType":1, "hooks":1}); +db.genericnotificationconfigs.createIndex( {"referenceId":1, "referenceType":1}); +db.genericnotificationconfigs.reIndex(); diff --git a/favorite-file/gravitee-docker-compose/platform/nginx/nginx.conf b/favorite-file/gravitee-docker-compose/platform/nginx/nginx.conf new file mode 100644 index 00000000..d08fc26c --- /dev/null +++ b/favorite-file/gravitee-docker-compose/platform/nginx/nginx.conf @@ -0,0 +1,133 @@ +worker_processes 4; + +events { worker_connections 1024; } + +http { + include /etc/nginx/mime.types; + resolver 127.0.0.11 ipv6=off; + + upstream apim_management { + server apim_management:8083; + } + + upstream apim_gateway { + server apim_gateway:8082; + } + + upstream apim_portal { + server apim_portal:80; + } + + upstream am_management { + server am_management:8093; + } + + upstream am_gateway { + server am_gateway:8092; + } + + upstream am_webui { + server am_webui:80; + } + + server { + listen 80; + server_name localhost; + return 301 https://$server_name$request_uri; #Redirection + } + + server { + listen 443 ssl; + listen [::]:443 ssl; + + server_name localhost; + + ssl_certificate /etc/ssl/certs/gio-selfsigned.crt; + ssl_certificate_key /etc/ssl/private/gio-selfsigned.key; + ssl_dhparam /etc/ssl/certs/gio.pem; + + error_page 500 502 503 504 /50x.html; + + location /apim/portal/ { + proxy_pass http://apim_portal/; + proxy_redirect $scheme://$host:$server_port/ $scheme://$http_host/apim/portal/; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Host $server_name; + } + + location /apim/management/ { + proxy_pass http://apim_management/management/; + proxy_redirect $scheme://$host:$server_port/management/ /apim/management/; + sub_filter "/management/" "/apim/management/"; + sub_filter_types application/json; + sub_filter_once off; + proxy_cookie_path /management /apim/management; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Host $server_name; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location /apim/ { + proxy_pass http://apim_gateway/; + proxy_cookie_path / /apim; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Host $server_name; + } + + location /am/ui/ { + proxy_pass http://am_webui/; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Host $server_name; + sub_filter ' Date: Mon, 19 Aug 2019 18:30:38 +0800 Subject: [PATCH 313/330] 2019-08-19 --- favorite-file/Nginx-Settings/nginx-front.conf | 45 +++++++++++-------- 1 file changed, 26 insertions(+), 19 deletions(-) diff --git a/favorite-file/Nginx-Settings/nginx-front.conf b/favorite-file/Nginx-Settings/nginx-front.conf index 82894983..8b49d8f9 100644 --- a/favorite-file/Nginx-Settings/nginx-front.conf +++ b/favorite-file/Nginx-Settings/nginx-front.conf @@ -22,28 +22,35 @@ http { log_format main '$remote_addr - $remote_user [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent" "$http_x_forwarded_for" "$request_time"'; - + access_log /var/log/nginx/access.log main; error_log /var/log/nginx/error.log; - + gzip on; gzip_buffers 8 16k; gzip_min_length 512; gzip_disable "MSIE [1-6]\.(?!.*SV1)"; gzip_http_version 1.1; gzip_types text/plain text/css application/javascript application/x-javascript application/json application/xml; - + server { - + listen 8001; server_name localhost 127.0.0.1 139.159.190.24 platform.gitnavi.com; - + location / { root /root/.jenkins/workspace/nestle-platform-front-test/dist; index index.html index.htm; try_files $uri /index.html; } - + + ## 二级目录方式,记得 package.json 添加:"homepage": "cdk8s-markdown", + location ^~ /cdk8s-markdown { + root /root/.jenkins/workspace; + index index.html; + try_files $uri /cdk8s-markdown/index.html; + } + location ^~ /platform/ { proxy_pass http://127.0.0.1:28081; proxy_redirect off; @@ -51,36 +58,36 @@ http { proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; } - + location ~ .*\.(js|css)?$ { root /root/.jenkins/workspace/nestle-platform-front-test/dist; } - + location ~ .*\.(gif|jpg|jpeg|png|bmp|swf|ico|woff|woff2|ttf|eot|txt|svg)$ { root /root/.jenkins/workspace/nestle-platform-front-test/dist; } - + error_page 404 /404.html; location = /usr/share/nginx/html/40x.html { } - + error_page 500 502 503 504 /50x.html; location = /usr/share/nginx/html/50x.html { } } - + server { - + listen 8002; server_name localhost 127.0.0.1 139.159.190.24 store.gitnavi.com; - + location / { root /root/.jenkins/workspace/nestle-store-front-test/dist; index index.html index.htm; try_files $uri /index.html; } - + location ^~ /store/ { proxy_pass http://127.0.0.1:28082; proxy_redirect off; @@ -88,22 +95,22 @@ http { proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; } - + location ~ .*\.(js|css)?$ { root /root/.jenkins/workspace/nestle-store-front-test/dist; } - + location ~ .*\.(gif|jpg|jpeg|png|bmp|swf|ico|woff|woff2|ttf|eot|txt|svg)$ { root /root/.jenkins/workspace/nestle-store-front-test/dist; } - + error_page 404 /404.html; location = /usr/share/nginx/html/40x.html { } - + error_page 500 502 503 504 /50x.html; location = /usr/share/nginx/html/50x.html { } } -} \ No newline at end of file +} From 5ca8e8b6e72398eb3764736ae991e36706a1955c Mon Sep 17 00:00:00 2001 From: "Judas.n" Date: Mon, 19 Aug 2019 22:36:14 +0800 Subject: [PATCH 314/330] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index ed57b20f..06409a8d 100644 --- a/README.md +++ b/README.md @@ -8,8 +8,8 @@ ## 声明 -- 2019-08-19 周一 -- 我将发布这两年来第一个新的 **大专题**,还是跟 IT 行业相关,请关注!!! +- 后续我将在新的地方,以新的方式重新开始,感谢一直以来的信任 ! +- CDK8S: ## 初衷(Original Intention) From 5737ad495ebb96b8b3cb1313972b155effd71be2 Mon Sep 17 00:00:00 2001 From: "Judas.n" Date: Sat, 5 Oct 2019 23:18:46 +0800 Subject: [PATCH 315/330] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 06409a8d..dcc2662c 100644 --- a/README.md +++ b/README.md @@ -6,10 +6,10 @@ -## 声明 +## 新的起点 -- 后续我将在新的地方,以新的方式重新开始,感谢一直以来的信任 ! - CDK8S: +- TKey: ## 初衷(Original Intention) From dd7514316f27e78df6c6c690af60bd92dc06eb68 Mon Sep 17 00:00:00 2001 From: "Judas.n" Date: Sat, 21 Dec 2019 15:20:28 +0800 Subject: [PATCH 316/330] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index dcc2662c..3f311512 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,7 @@ ## 新的起点 +- [Sculptor Boot:项目思维化的《代码生成器》体系(未来可期,做个朋友吧)](https://github.com/cdk8s/sculptor-boot-generator) - CDK8S: - TKey: From e95b2b7a1d6594103583076733b3ac6e6f905415 Mon Sep 17 00:00:00 2001 From: cdk8s-zelda <51701412+cdk8s-zelda@users.noreply.github.com> Date: Thu, 9 Jan 2020 16:11:46 +0800 Subject: [PATCH 317/330] Update README.md --- README.md | 118 ++++-------------------------------------------------- 1 file changed, 8 insertions(+), 110 deletions(-) diff --git a/README.md b/README.md index 3f311512..e33ec9a0 100644 --- a/README.md +++ b/README.md @@ -5,6 +5,13 @@ +## 只有上云才能撑住规模化后的发展 + +- 初期技术选型上尽可能寻找云支持的 +- 在公司规模小,自建服务基本都做不到 99.999% 高可用 +- 在公司规模发展变迅速时,如果云技术和已有技术契合,迁移成本会低很多很多 +- 目前暂定只选择:[阿里云服务](https://www.aliyun.com/minisite/goods?userCode=v2zozyxz) +- 这里罗列了阿里云常用的一些:[产品](https://github.com/cdk8s/cdk8s-team-style/blob/master/ops/aliyun.md) ## 新的起点 @@ -48,113 +55,4 @@ - [Linux 下常用压缩文件的解压、压缩](markdown-file/File-Extract-Compress.md) - [Yum 下载安装包及对应依赖包](markdown-file/Off-line-Yum-Install.md) - [Zsh 入门](markdown-file/Zsh.md) -- [终端测速](markdown-file/speedtest.md) -- [日常维护](markdown-file/maintenance.md) -- [日常监控](markdown-file/monitor.md) -- [nmon 系统性能监控工具](markdown-file/Nmon.md) -- [Glances 安装和配置](markdown-file/Glances-Install-And-Settings.md) -- [SSH(Secure Shell)介绍](markdown-file/SSH.md) -- [FTP(File Transfer Protocol)介绍](markdown-file/FTP.md) -- [VPN(Virtual Private Network)介绍](markdown-file/VPN.md) -- [NFS(Network FileSystem)介绍](markdown-file/NFS.md) -- [NTP(Network Time Protocol)介绍](markdown-file/NTP.md) -- [Samba 介绍](markdown-file/Samba.md) -- [Crontab 介绍](markdown-file/Crontab.md) -- [Iptables 介绍](markdown-file/Iptables.md) -- [花生壳-安装介绍](markdown-file/Hsk-Install.md) -- [JDK 安装](markdown-file/JDK-Install.md) -- [Java bin 目录下的工具](markdown-file/Java-bin.md) -- [SVN 安装和配置](markdown-file/SVN-Install-And-Settings.md) -- [Tomcat 安装和配置、优化](markdown-file/Tomcat-Install-And-Settings.md) -- [Jenkins 安装和配置](markdown-file/Jenkins-Install-And-Settings.md) -- [Maven 安装和配置](markdown-file/Maven-Install-And-Settings.md) -- [Nexus 安装和配置](markdown-file/Nexus-Install-And-Settings.md) -- [PostgreSQL 安装和配置](markdown-file/PostgreSQL-Install-And-Settings.md) -- [MySQL 安装和配置](markdown-file/Mysql-Install-And-Settings.md) -- [MySQL 优化](markdown-file/Mysql-Optimize.md) -- [MySQL 测试](markdown-file/Mysql-Test.md) -- [MySQL 教程](markdown-file/Mysql-Tutorial.md) -- [Percona XtraDB Cluster(PXC)安装和配置](markdown-file/PXC-Install-And-Settings.md) -- [Redis 安装和配置](markdown-file/Redis-Install-And-Settings.md) -- [MongoDB 安装和配置](markdown-file/MongoDB-Install-And-Settings.md) -- [Solr 安装和配置](markdown-file/Solr-Install-And-Settings.md) -- [Jira 安装和配置](markdown-file/Jira-Install-And-Settings.md) -- [Confluence 安装和配置](markdown-file/Confluence-Install-And-Settings.md) -- [TeamCity 安装和配置](markdown-file/TeamCity-Install-And-Settings.md) -- [Nginx 安装和配置](markdown-file/Nginx-Install-And-Settings.md) -- [wrk 安装和配置](markdown-file/wrk-Install-And-Settings.md) -- [FastDFS 安装和配置](markdown-file/FastDFS-Install-And-Settings.md) -- [FastDFS 结合 GraphicsMagick](markdown-file/FastDFS-Nginx-Lua-GraphicsMagick.md) -- [RabbitMQ 安装和配置](markdown-file/RabbitMQ-Install-And-Settings.md) -- [Openfire 安装和配置](markdown-file/Openfire-Install-And-Settings.md) -- [Rap 安装和配置](markdown-file/Rap-Install-And-Settings.md) -- [Nginx + Keepalived 高可用](markdown-file/Nginx-Keepalived-Install-And-Settings.md) -- [黑客入侵检查](markdown-file/Was-Hacked.md) -- [Shadowsocks 安装和配置](markdown-file/http://code.youmeek.com/2016/08/19/2016/08/VPS/) -- [Mycat 安装和配置](markdown-file/Mycat-Install-And-Settings.md) -- [Zookeeper 安装和配置](markdown-file/Zookeeper-Install.md) -- [Daemontools 工具介绍](markdown-file/Daemontools.md) -- [Tmux 安装和配置](markdown-file/Tmux-Install-And-Settings.md) -- [ELK 日志收集系统安装和配置](markdown-file/ELK-Install-And-Settings.md) -- [Dubbo 安装和配置](markdown-file/Dubbo-Install-And-Settings.md) -- [GitLab 安装和配置](markdown-file/Gitlab-Install-And-Settings.md) -- [JMeter 安装和配置](markdown-file/JMeter-Install-And-Settings.md) -- [Docker 安装和使用](markdown-file/Docker-Install-And-Usage.md) -- [Harbor 安装和配置](markdown-file/Harbor-Install-And-Usage.md) -- [LDAP 安装和使用](markdown-file/LDAP-Install-And-Settings.md) -- [Alfresco 安装和使用](markdown-file/Alfresco-Install-And-Usage.md) -- [Apache Thrift 安装和使用](markdown-file/Thrift-Install-And-Usage.md) -- [Node.js 安装和使用](markdown-file/Node-Install-And-Usage.md) -- [CI 整套服务安装和使用](markdown-file/CI-Install-And-Usage.md) -- [YApi 安装和配置](markdown-file/YApi-Install-And-Settings.md) -- [Kafka 安装和配置](markdown-file/Kafka-Install-And-Settings.md) -- [Hadoop 安装和配置](markdown-file/Hadoop-Install-And-Settings.md) -- [Showdoc 安装和配置](markdown-file/Showdoc-Install-And-Settings.md) -- [WordPress 安装和配置](markdown-file/WordPress-Install-And-Settings.md) -- [GoAccess 安装和配置](markdown-file/GoAccess-Install-And-Settings.md) -- [Portainer 安装和配置](markdown-file/Portainer-Install-And-Settings.md) -- [Influxdb 安装和配置](markdown-file/Influxdb-Install-And-Settings.md) -- [Prometheus 安装和配置](markdown-file/Prometheus-Install-And-Settings.md) -- [Grafana 安装和配置](markdown-file/Grafana-Install-And-Settings.md) -- [Ansible 安装和配置](markdown-file/Ansible-Install-And-Settings.md) -- [Wormhole + Flink 安装和配置](markdown-file/Wormhole-Install-And-Settings.md) -- [SkyWalking 安装和配置](markdown-file/SkyWalking-Install-And-Settings.md) - -## 联系(Contact) - -- Email:judas.n@qq.com(常用) or admin@youmeek.com(备用) -- Blog: -- QQ 群交流,入群请看: -- 欢迎捐赠 ^_^: - - -## Github 协同视频教程(Participate) - -- 如果您不会使用 Git 或是 Github 也没关系,请认真学习下面视频教程: -- Judas.n 录制 - - 视频格式:MP4 - - 分辨率:1920 X 1080 - - 片长:16 min - - 文件大小:62 M -- 下载 - - 百度云盘: - - 360 网盘(2fb5): - -## Github 常用按钮说明 - -- Watch:关注该项目,作者有更新的时候,会在你的 Github 主页有通知消息。 -- Star:收藏该项目,在你的头像上有一个“Your stars”链接,可以看到你的收藏列表。 -- Fork:复制一份项目到的Github空间上,你可以自己开发自己的这个地址项目,然后 Pull Request 给项目原主人。 - -## 参与作者汇总(Author) - -|作者(按参与时间排序)|地址| -|:---------|:---------| -|Judas.n|| -|mrdear|| -|fooofei|| - -## AD - -- [推荐:程序员的个性化网址导航:GitNavi.com](http://www.gitnavi.com/u/judasn/) -- [适合后端开发者的前端 React-Admin](https://github.com/satan31415/umi-admin) +- [终端测速](markdown-file/ From aec2ad2693d79c6ae85d13f847b19ea20c04bd28 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 5 Jun 2020 16:09:18 +0800 Subject: [PATCH 318/330] monitor --- markdown-file/monitor.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/markdown-file/monitor.md b/markdown-file/monitor.md index 50574c9f..9a825839 100644 --- a/markdown-file/monitor.md +++ b/markdown-file/monitor.md @@ -858,11 +858,12 @@ access_log /home/wwwlogs/hicrew.log special_main; #### 一次 JVM 引起的 CPU 高排查 - 使用 `ps -ef | grep java`,查看进程 PID - - 根据高 CPU 的进程 PID,查看其线程 CPU 使用情况:`top -Hp PID`,找到占用 CPU 资源高的线程 PID -- 保存堆栈情况:`jstack -l PID >> /opt/jstack-tomcat1-PID-20181017.log` -- 把占用 CPU 资源高的线程十进制的 PID 转换成 16 进制:`printf "%x\n" PID`,比如:`printf "%x\n" 12401` 得到结果是:`3071` + - 根据高 CPU 的进程 PID,查看其线程 CPU 使用情况:`top -Hp PID`,找到占用 CPU 资源高的线程 TID + - 也可以用:`ps -mp PID -o THREAD,tid,time` +- 保存堆栈情况:`jstack -l TID >> /opt/jstack-tomcat1-TID-20181017.log` +- 把占用 CPU 资源高的线程十进制的 TID 转换成 16 进制:`printf "%x\n" TID`,比如:`printf "%x\n" 12401` 得到结果是:`3071` - 在刚刚输出的那个 log 文件中搜索:`3071`,可以找到:`nid=0x3071` -- 也可以在终端中直接看:`jstack PID |grep 十六进制线程 -A 30`,此时如果发现如下: +- 也可以在终端中直接看:`jstack TID |grep 十六进制线程 -A 30`,此时如果发现如下: ``` "GC task thread#0 (ParallelGC)" os_prio=0 tid=0x00007fd0ac01f000 nid=0x66f runnable From 6c13ef6789caa3dbdabf3cce346af18a5dc01d4b Mon Sep 17 00:00:00 2001 From: judasn Date: Thu, 6 Aug 2020 14:51:03 +0800 Subject: [PATCH 319/330] monitor --- markdown-file/Bash.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/markdown-file/Bash.md b/markdown-file/Bash.md index b5eba96e..593b8ff6 100644 --- a/markdown-file/Bash.md +++ b/markdown-file/Bash.md @@ -171,6 +171,8 @@ drwxr-xr-x. 5 root root 4096 3月 26 10:57,其中最前面的 d 表示这是 - linux 的权限分为 rwx。r 代表:可读,w 代表:可写,x 代表:可执行 - 这三个权限都可以转换成数值表示,r = 4,w = 2,x = 1,- = 0,所以总和是 7,也就是最大权限。第一个 7 是所属主(user)的权限,第二个 7 是所属组(group)的权限,最后一位 7 是非本群组用户(others)的权限。 - `chmod -R 777 目录` 表示递归目录下的所有文件夹,都赋予 777 权限 + - `chown myUsername:myGroupName myFile` 表示修改文件所属用户、组 + - `chown -R myUsername:myGroupName myFolder` 表示递归修改指定目录下的所有文件权限 - `su`:切换到 root 用户,终端目录还是原来的地方(常用) - `su -`:切换到 root 用户,其中 **-** 号另起一个终端并切换账号 - `su 用户名`,切换指定用户帐号登陆,终端目录还是原来地方。 From ad52e9864a3408122c2feb78040256bdb890f5f1 Mon Sep 17 00:00:00 2001 From: judasn Date: Mon, 10 Aug 2020 16:24:52 +0800 Subject: [PATCH 320/330] monitor --- markdown-file/Nginx-Install-And-Settings.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/markdown-file/Nginx-Install-And-Settings.md b/markdown-file/Nginx-Install-And-Settings.md index f54cbdb0..6a5ffb0c 100644 --- a/markdown-file/Nginx-Install-And-Settings.md +++ b/markdown-file/Nginx-Install-And-Settings.md @@ -619,6 +619,20 @@ http { ``` +- 最新版本的 Nginx SSL 配置 + +``` +listen 443 ssl; + +ssl_certificate /opt/jar/ssl/server.crt; +ssl_certificate_key /opt/jar/ssl/server.key; + +ssl_session_timeout 5m; +ssl_protocols TLSv1 TLSv1.1 TLSv1.2; +ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:HIGH:!aNULL:!MD5:!RC4:!DHE; +ssl_prefer_server_ciphers on; +``` + ---------------------------------------------------------------------- ## Nginx 压力测试 From 730694b737d7eace9b31819d92d1f0906e229f35 Mon Sep 17 00:00:00 2001 From: judasn Date: Fri, 25 Sep 2020 15:05:40 +0800 Subject: [PATCH 321/330] 2020-09-25 --- markdown-file/JMeter-Install-And-Settings.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/markdown-file/JMeter-Install-And-Settings.md b/markdown-file/JMeter-Install-And-Settings.md index 3fd087b9..31946f33 100644 --- a/markdown-file/JMeter-Install-And-Settings.md +++ b/markdown-file/JMeter-Install-And-Settings.md @@ -160,8 +160,8 @@ remote_hosts=192.168.0.1:1099,192.168.0.2:1099 - [快速学习Jmeter性能测试工具](http://gitbook.cn/books/58de71a8be13fa66243873ef/index.html) - [jmeter:菜鸟入门到进阶系列](http://www.cnblogs.com/imyalost/p/7062784.html) - 国内视频教程: - - [JMeter 性能测试入门篇 - 慕课网](https://www.imooc.com/learn/735) - [JMeter 之 HTTP 协议接口性能测试 - 慕课网](https://www.imooc.com/learn/791) + - [接口测试基础之入门篇 - 慕课网](https://www.imooc.com/learn/738) - [JMeter 性能测试进阶案例实战 - 慕课网](https://coding.imooc.com/class/142.html) - [性能测试工具—Jmeter- 我要自学网](http://www.51zxw.net/list.aspx?page=2&cid=520) - [jmeter 视频教学课程 - 小强](https://www.youtube.com/watch?v=zIiXpCBaBgQ&list=PL3rfV4zNE8CD-rAwlXlGXilN5QpkqDWox) From 955ff70778c388c807eaf51eb29ae5cfbb75eb60 Mon Sep 17 00:00:00 2001 From: judasn Date: Tue, 27 Oct 2020 23:49:18 +0800 Subject: [PATCH 322/330] 2020-10-27 --- centos-settings/Close-XWindow.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/centos-settings/Close-XWindow.md b/centos-settings/Close-XWindow.md index 0a34275f..5b89f556 100644 --- a/centos-settings/Close-XWindow.md +++ b/centos-settings/Close-XWindow.md @@ -15,7 +15,14 @@ - 在图形界面中如果你希望临时关闭图形界面可以输入:`init 3` -## CentOS 7 设置方法 +## CentOS 7 设置方法 1 + +- 开机以命令模式启动,执行: + - systemctl set-default multi-user.target +- 开机以图形界面启动,执行: + - systemctl set-default graphical.target + +## CentOS 7 设置方法 2 - 关闭图形 - `mv /etc/systemd/system/default.target /etc/systemd/system/default.target.bak` (改名备份) From d236d0e636740c4c8671a2bc4a73edb8adaa1187 Mon Sep 17 00:00:00 2001 From: cdk8s-zelda <51701412+cdk8s-zelda@users.noreply.github.com> Date: Fri, 2 Jul 2021 03:46:02 +0800 Subject: [PATCH 323/330] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e33ec9a0..2f461700 100644 --- a/README.md +++ b/README.md @@ -15,6 +15,7 @@ ## 新的起点 +- [本系列的大部分文章都迁移到这里](https://github.com/cdk8s/cdk8s-team-style) - [Sculptor Boot:项目思维化的《代码生成器》体系(未来可期,做个朋友吧)](https://github.com/cdk8s/sculptor-boot-generator) - CDK8S: - TKey: @@ -55,4 +56,3 @@ - [Linux 下常用压缩文件的解压、压缩](markdown-file/File-Extract-Compress.md) - [Yum 下载安装包及对应依赖包](markdown-file/Off-line-Yum-Install.md) - [Zsh 入门](markdown-file/Zsh.md) -- [终端测速](markdown-file/ From 007f8881f711a9bad1131dfd3e64d62e61a660e6 Mon Sep 17 00:00:00 2001 From: cdk8s-zelda <51701412+cdk8s-zelda@users.noreply.github.com> Date: Fri, 2 Jul 2021 03:46:17 +0800 Subject: [PATCH 324/330] Update README.md --- README.md | 6 ------ 1 file changed, 6 deletions(-) diff --git a/README.md b/README.md index 2f461700..2e2aa874 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,4 @@ -## 团队 DevOps 方案参考 - - - - - ## 只有上云才能撑住规模化后的发展 - 初期技术选型上尽可能寻找云支持的 From 8b8dc1c05283a60c0f47af1da81af7bc5f55041f Mon Sep 17 00:00:00 2001 From: cdk8s-zelda <51701412+cdk8s-zelda@users.noreply.github.com> Date: Fri, 2 Jul 2021 03:46:30 +0800 Subject: [PATCH 325/330] Update README.md --- README.md | 8 -------- 1 file changed, 8 deletions(-) diff --git a/README.md b/README.md index 2e2aa874..c1b3fef1 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,4 @@ -## 只有上云才能撑住规模化后的发展 - -- 初期技术选型上尽可能寻找云支持的 -- 在公司规模小,自建服务基本都做不到 99.999% 高可用 -- 在公司规模发展变迅速时,如果云技术和已有技术契合,迁移成本会低很多很多 -- 目前暂定只选择:[阿里云服务](https://www.aliyun.com/minisite/goods?userCode=v2zozyxz) -- 这里罗列了阿里云常用的一些:[产品](https://github.com/cdk8s/cdk8s-team-style/blob/master/ops/aliyun.md) - ## 新的起点 - [本系列的大部分文章都迁移到这里](https://github.com/cdk8s/cdk8s-team-style) From add9e63649889e04004d6d55ba0852596c2ebe36 Mon Sep 17 00:00:00 2001 From: cdk8s-zelda <51701412+cdk8s-zelda@users.noreply.github.com> Date: Fri, 2 Jul 2021 03:50:38 +0800 Subject: [PATCH 326/330] Update CentOS-7-Install.md --- markdown-file/CentOS-7-Install.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/markdown-file/CentOS-7-Install.md b/markdown-file/CentOS-7-Install.md index 4b1a3e00..bbbec5d4 100644 --- a/markdown-file/CentOS-7-Install.md +++ b/markdown-file/CentOS-7-Install.md @@ -5,7 +5,8 @@ - 本教程中主要演示了 VMware Workstation 下安装 `CentOS 7.3` 的过程。 - VMware 的使用细节可以看这篇:[CentOS 6 安装](CentOS-Install.md) -- 如果你是要安装到 PC 机中,你需要准备一个 U 盘,以及下载这个软件:[USBWriter(提取码:5aa2)](https://pan.baidu.com/s/1gg83h9T) +- 如果你是要安装到 PC 机中,你需要准备一个 U 盘,以及 Windows 下载这个软件:[USBWriter(提取码:5aa2)](https://pan.baidu.com/s/1gg83h9T) +- 如果你是要安装到 PC 机中,你需要准备一个 U 盘,以及 macOS 下载这个软件:[balenaEtcher(提取码:o9qn)](https://pan.baidu.com/s/1bjddfOcuhS3UUIOrFf5ehg) - USBWriter 的使用很简单,如下图即可制作一个 CentOS 系统盘 ![VMware 下安装](../images/CentOS-7-Install-a-0.jpg) From b198564df47639eea7c53005d295054edc802141 Mon Sep 17 00:00:00 2001 From: cdk8s-zelda <51701412+cdk8s-zelda@users.noreply.github.com> Date: Fri, 2 Jul 2021 03:55:40 +0800 Subject: [PATCH 327/330] Update CentOS-7-Install.md --- markdown-file/CentOS-7-Install.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/markdown-file/CentOS-7-Install.md b/markdown-file/CentOS-7-Install.md index bbbec5d4..c9428617 100644 --- a/markdown-file/CentOS-7-Install.md +++ b/markdown-file/CentOS-7-Install.md @@ -6,7 +6,7 @@ - 本教程中主要演示了 VMware Workstation 下安装 `CentOS 7.3` 的过程。 - VMware 的使用细节可以看这篇:[CentOS 6 安装](CentOS-Install.md) - 如果你是要安装到 PC 机中,你需要准备一个 U 盘,以及 Windows 下载这个软件:[USBWriter(提取码:5aa2)](https://pan.baidu.com/s/1gg83h9T) -- 如果你是要安装到 PC 机中,你需要准备一个 U 盘,以及 macOS 下载这个软件:[balenaEtcher(提取码:o9qn)](https://pan.baidu.com/s/1bjddfOcuhS3UUIOrFf5ehg) +- 如果你是要安装到 PC 机中,你需要准备一个 U 盘,以及 macOS 下载这个软件:[balenaEtcher(提取码:oqp9)](https://pan.baidu.com/s/1l5K48tfuCKdn0wR_62PjJA) - USBWriter 的使用很简单,如下图即可制作一个 CentOS 系统盘 ![VMware 下安装](../images/CentOS-7-Install-a-0.jpg) From 3cb8e187e69de094788cba5211226f8856485991 Mon Sep 17 00:00:00 2001 From: cdk8s-zelda <51701412+cdk8s-zelda@users.noreply.github.com> Date: Mon, 5 Jul 2021 19:04:26 +0800 Subject: [PATCH 328/330] Update wrk-Install-And-Settings.md --- markdown-file/wrk-Install-And-Settings.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/markdown-file/wrk-Install-And-Settings.md b/markdown-file/wrk-Install-And-Settings.md index f73c6330..f6b7c67f 100644 --- a/markdown-file/wrk-Install-And-Settings.md +++ b/markdown-file/wrk-Install-And-Settings.md @@ -26,7 +26,7 @@ sudo cp wrk /usr/local/bin ## 使用 -- 启用 10 个线程,每个线程发起 100 个连接,持续 15 秒:`wrk -t5 -c5 -d30s http://www.baidu.com` +- 启用 10 个线程,每个线程发起 100 个连接,持续 30 秒:`wrk -t10 -c100 -d30s http://www.baidu.com` - 最终报告: ``` @@ -62,4 +62,4 @@ wrk.headers["Content-Type"] = "application/x-www-form-urlencoded" ## 资料 - -- \ No newline at end of file +- From 6010165e96420c229479f41ffc86e7a7f857b88c Mon Sep 17 00:00:00 2001 From: cdk8s-zelda <51701412+cdk8s-zelda@users.noreply.github.com> Date: Mon, 31 Jan 2022 09:32:59 +0800 Subject: [PATCH 329/330] Update README.md --- README.md | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index c1b3fef1..064e3835 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,8 @@ -## 新的起点 -- [本系列的大部分文章都迁移到这里](https://github.com/cdk8s/cdk8s-team-style) -- [Sculptor Boot:项目思维化的《代码生成器》体系(未来可期,做个朋友吧)](https://github.com/cdk8s/sculptor-boot-generator) -- CDK8S: -- TKey: +## 作者新方向,感谢支持 + +- [UPUPMO-扶持个人从开公司到全平台产品上线](https://www.bilibili.com/video/BV1Bb4y1j7dy) ## 初衷(Original Intention) From 80109268d5b773c22cb8700d953d5ff01e52abde Mon Sep 17 00:00:00 2001 From: cdk8s-zelda <51701412+cdk8s-zelda@users.noreply.github.com> Date: Sat, 11 Jun 2022 15:40:03 +0800 Subject: [PATCH 330/330] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 064e3835..80bf6ad0 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ - ## 作者新方向,感谢支持 -- [UPUPMO-扶持个人从开公司到全平台产品上线](https://www.bilibili.com/video/BV1Bb4y1j7dy) +- [从开公司到开发全平台产品(文字版)](https://github.com/cdk8s/cdk8s-team-style/blob/master/full-stack/README.md) +- [从开公司到开发全平台产品(视频版)](https://space.bilibili.com/1765486559/channel/seriesdetail?sid=2359281) ## 初衷(Original Intention)