diff --git a/.github/workflows/gh-pages.yml b/.github/workflows/gh-pages.yml deleted file mode 100644 index 9233057..0000000 --- a/.github/workflows/gh-pages.yml +++ /dev/null @@ -1,31 +0,0 @@ -name: github pages - -on: - push: - branches: - - master # Set a branch to deploy - -jobs: - deploy: - runs-on: ubuntu-22.04 - steps: - - uses: actions/checkout@v4 - with: - submodules: true # Fetch Hugo themes (true OR recursive) - fetch-depth: 0 # Fetch all history for .GitInfo and .Lastmod - - - name: Setup Hugo - uses: peaceiris/actions-hugo@v2 - with: - hugo-version: 'latest' - - - name: Build - run: hugo --minify - - - name: Deploy - uses: peaceiris/actions-gh-pages@v3 - if: github.ref == 'refs/heads/master' - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - publish_dir: ./public - cname: virtualzone.de diff --git a/.gitignore b/.gitignore deleted file mode 100644 index efb895e..0000000 --- a/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -.DS_Store -.hugo_build.lock -public/ \ No newline at end of file diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index 89af1b0..0000000 --- a/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "themes/PaperMod"] - path = themes/PaperMod - url = https://github.com/adityatelange/hugo-PaperMod.git diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/2012/08/determining-a-locations-federal-state-using-google-maps-api/index.html b/2012/08/determining-a-locations-federal-state-using-google-maps-api/index.html new file mode 100644 index 0000000..d779fc8 --- /dev/null +++ b/2012/08/determining-a-locations-federal-state-using-google-maps-api/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/posts/determining-a-locations-federal-state-using-google-maps-api/ + \ No newline at end of file diff --git a/2012/11/how-to-reduce-pdf-file-size-in-linux/index.html b/2012/11/how-to-reduce-pdf-file-size-in-linux/index.html new file mode 100644 index 0000000..66c582d --- /dev/null +++ b/2012/11/how-to-reduce-pdf-file-size-in-linux/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/posts/reduce-pdf-file-size/ + \ No newline at end of file diff --git a/2014/11/how-to-enable-ipv6-on-a-sonicwall-sonicos-5-9-using-nat/index.html b/2014/11/how-to-enable-ipv6-on-a-sonicwall-sonicos-5-9-using-nat/index.html new file mode 100644 index 0000000..56ccd65 --- /dev/null +++ b/2014/11/how-to-enable-ipv6-on-a-sonicwall-sonicos-5-9-using-nat/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/posts/ipv6-on-a-sonicwall/ + \ No newline at end of file diff --git a/2015/08/how-to-reduce-pdf-file-size-part-2/index.html b/2015/08/how-to-reduce-pdf-file-size-part-2/index.html new file mode 100644 index 0000000..5c87acb --- /dev/null +++ b/2015/08/how-to-reduce-pdf-file-size-part-2/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/posts/reduce-pdf-file-size-2/ + \ No newline at end of file diff --git a/2016/08/fix-docker-not-using-etc-hosts-on-macos/index.html b/2016/08/fix-docker-not-using-etc-hosts-on-macos/index.html new file mode 100644 index 0000000..57df040 --- /dev/null +++ b/2016/08/fix-docker-not-using-etc-hosts-on-macos/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/posts/fix-docker-not-using-etc-hosts-on-macos/ + \ No newline at end of file diff --git a/2016/08/from-fhem-to-openhab-with-homegear-installation-docker-container/index.html b/2016/08/from-fhem-to-openhab-with-homegear-installation-docker-container/index.html new file mode 100644 index 0000000..e0775ff --- /dev/null +++ b/2016/08/from-fhem-to-openhab-with-homegear-installation-docker-container/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/posts/from-fhem-to-openhab-with-homegear-installation-docker-container/ + \ No newline at end of file diff --git a/2016/08/how-to-set-up-https-ssl-in-wordpress-behind-proxy-nginx-haproxy-apache-lighttpd/index.html b/2016/08/how-to-set-up-https-ssl-in-wordpress-behind-proxy-nginx-haproxy-apache-lighttpd/index.html new file mode 100644 index 0000000..e825bd3 --- /dev/null +++ b/2016/08/how-to-set-up-https-ssl-in-wordpress-behind-proxy-nginx-haproxy-apache-lighttpd/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/posts/https-ssl-in-wordpress-behind-proxy/ + \ No newline at end of file diff --git a/2016/09/uptimerobot-a-nice-free-website-monitoring-service/index.html b/2016/09/uptimerobot-a-nice-free-website-monitoring-service/index.html new file mode 100644 index 0000000..6519586 --- /dev/null +++ b/2016/09/uptimerobot-a-nice-free-website-monitoring-service/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/posts/uptime-robot-website-monitoring/ + \ No newline at end of file diff --git a/2016/12/creating-an-encrypted-file-container-on-macos/index.html b/2016/12/creating-an-encrypted-file-container-on-macos/index.html new file mode 100644 index 0000000..26b154d --- /dev/null +++ b/2016/12/creating-an-encrypted-file-container-on-macos/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/posts/encrypted-file-container-macos/ + \ No newline at end of file diff --git a/2017/02/using-lets-encrypt-effs-certbot-with-nginx-in-docker/index.html b/2017/02/using-lets-encrypt-effs-certbot-with-nginx-in-docker/index.html new file mode 100644 index 0000000..b0c3643 --- /dev/null +++ b/2017/02/using-lets-encrypt-effs-certbot-with-nginx-in-docker/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/posts/lets-encrypt-effs-certbot-with-nginx-in-docker/ + \ No newline at end of file diff --git a/2017/06/how-to-let-jenkins-build-docker-images/index.html b/2017/06/how-to-let-jenkins-build-docker-images/index.html new file mode 100644 index 0000000..7803fcb --- /dev/null +++ b/2017/06/how-to-let-jenkins-build-docker-images/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/posts/jenkins-build-docker-images/ + \ No newline at end of file diff --git a/2020/05/multi-arch-images-mit-docker-hub-bauen-teil-1/index.html b/2020/05/multi-arch-images-mit-docker-hub-bauen-teil-1/index.html new file mode 100644 index 0000000..803fd35 --- /dev/null +++ b/2020/05/multi-arch-images-mit-docker-hub-bauen-teil-1/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/posts/multi-arch-docker-images-1/ + \ No newline at end of file diff --git a/2020/05/multi-arch-images-mit-docker-hub-bauen-teil-2/index.html b/2020/05/multi-arch-images-mit-docker-hub-bauen-teil-2/index.html new file mode 100644 index 0000000..7bcae50 --- /dev/null +++ b/2020/05/multi-arch-images-mit-docker-hub-bauen-teil-2/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/posts/multi-arch-docker-images-2/ + \ No newline at end of file diff --git a/2020/05/nativer-usb-boot-raspberry-pi-4/index.html b/2020/05/nativer-usb-boot-raspberry-pi-4/index.html new file mode 100644 index 0000000..8054ced --- /dev/null +++ b/2020/05/nativer-usb-boot-raspberry-pi-4/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/posts/usb-boot-raspberry-pi/ + \ No newline at end of file diff --git a/2020/06/raspberry-pi-os-64-bit-lite-desktop-pakete-entfernen/index.html b/2020/06/raspberry-pi-os-64-bit-lite-desktop-pakete-entfernen/index.html new file mode 100644 index 0000000..b238769 --- /dev/null +++ b/2020/06/raspberry-pi-os-64-bit-lite-desktop-pakete-entfernen/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/posts/raspberry-pi-os-remove-packages/ + \ No newline at end of file diff --git a/2020/06/traefik-access-log-influxdb-grafana-telegraf/index.html b/2020/06/traefik-access-log-influxdb-grafana-telegraf/index.html new file mode 100644 index 0000000..8428365 --- /dev/null +++ b/2020/06/traefik-access-log-influxdb-grafana-telegraf/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/posts/traefik-access-log-influxdb-grafana-telegraf/ + \ No newline at end of file diff --git a/2020/06/trainings-gpx-datei-endomondo-exportieren/index.html b/2020/06/trainings-gpx-datei-endomondo-exportieren/index.html new file mode 100644 index 0000000..c26ac4b --- /dev/null +++ b/2020/06/trainings-gpx-datei-endomondo-exportieren/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/posts/endomono-export-gpx/ + \ No newline at end of file diff --git a/2021/08/unifi-usg-multiple-ip-addresses-on-pppoe/index.html b/2021/08/unifi-usg-multiple-ip-addresses-on-pppoe/index.html new file mode 100644 index 0000000..a5606f3 --- /dev/null +++ b/2021/08/unifi-usg-multiple-ip-addresses-on-pppoe/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/posts/unifi-usg-multiple-ip-addresses-on-pppoe/ + \ No newline at end of file diff --git a/2021/09/back-up-server-to-onedrives-special-app-folder/index.html b/2021/09/back-up-server-to-onedrives-special-app-folder/index.html new file mode 100644 index 0000000..086f573 --- /dev/null +++ b/2021/09/back-up-server-to-onedrives-special-app-folder/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/posts/onedrive-upload-backup/ + \ No newline at end of file diff --git a/2021/09/setting-up-a-kubernetes-cluster-with-k3s-glusterfs-and-load-balancing/index.html b/2021/09/setting-up-a-kubernetes-cluster-with-k3s-glusterfs-and-load-balancing/index.html new file mode 100644 index 0000000..f86a2a8 --- /dev/null +++ b/2021/09/setting-up-a-kubernetes-cluster-with-k3s-glusterfs-and-load-balancing/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/posts/k3s-glusterfs/ + \ No newline at end of file diff --git a/404.html b/404.html new file mode 100644 index 0000000..eeffd09 --- /dev/null +++ b/404.html @@ -0,0 +1,6 @@ +404 Page not found | Virtualzone Blog +
404
+ \ No newline at end of file diff --git a/CNAME b/CNAME index 828de51..28bf5bf 100644 --- a/CNAME +++ b/CNAME @@ -1 +1 @@ -virtualzone.de \ No newline at end of file +virtualzone.de diff --git a/archetypes/default.md b/archetypes/default.md deleted file mode 100644 index 00e77bd..0000000 --- a/archetypes/default.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: "{{ replace .Name "-" " " | title }}" -date: {{ .Date }} -draft: true ---- - diff --git a/assets/css/extended/custom.css b/assets/css/extended/custom.css deleted file mode 100644 index f4cbad5..0000000 --- a/assets/css/extended/custom.css +++ /dev/null @@ -1,45 +0,0 @@ -.post-entry-multi-row { - display: flex; - flex-direction: row; - justify-content: space-between; - align-items: stretch; - gap: 15px; -} - - -.post-entry-multi-row > article.post-entry { - flex-grow: 1; - flex-basis: 33%; - /* .post-entry is a flex container itself */ - display: flex; - flex-direction: column; - justify-content: space-between; -} - -.entry-footer > svg { - width: 12px; - margin-right: 3px; -} - -article.post-entry > .cover-img { - float: right; - margin-left: 10px; - margin-bottom: 10px; -} - -article.post-entry > .cover-img img.seatsurfing { - object-fit: cover; - width: 300px; - height: 200px; - object-position: left top; -} - -@media (max-width: 720px) { - .post-entry-multi-row { - display: block; - } - - article.post-entry > .cover-img { - display: none; - } -} \ No newline at end of file diff --git a/assets/css/stylesheet.c1721e45d1e5db247de8596056f5101a1dffdd888f59d5001629341f526dc65d.css b/assets/css/stylesheet.c1721e45d1e5db247de8596056f5101a1dffdd888f59d5001629341f526dc65d.css new file mode 100644 index 0000000..e207fef --- /dev/null +++ b/assets/css/stylesheet.c1721e45d1e5db247de8596056f5101a1dffdd888f59d5001629341f526dc65d.css @@ -0,0 +1,7 @@ +/* + PaperMod v7 + License: MIT https://github.com/adityatelange/hugo-PaperMod/blob/master/LICENSE + Copyright (c) 2020 nanxiaobei and adityatelange + Copyright (c) 2021-2024 adityatelange +*/ +:root{--gap:24px;--content-gap:20px;--nav-width:1024px;--main-width:720px;--header-height:60px;--footer-height:60px;--radius:8px;--theme:rgb(255, 255, 255);--entry:rgb(255, 255, 255);--primary:rgb(30, 30, 30);--secondary:rgb(108, 108, 108);--tertiary:rgb(214, 214, 214);--content:rgb(31, 31, 31);--code-block-bg:rgb(28, 29, 33);--code-bg:rgb(245, 245, 245);--border:rgb(238, 238, 238)}.dark{--theme:rgb(29, 30, 32);--entry:rgb(46, 46, 51);--primary:rgb(218, 218, 219);--secondary:rgb(155, 156, 157);--tertiary:rgb(65, 66, 68);--content:rgb(196, 196, 197);--code-block-bg:rgb(46, 46, 51);--code-bg:rgb(55, 56, 62);--border:rgb(51, 51, 51)}.list{background:var(--code-bg)}.dark.list{background:var(--theme)}*,::after,::before{box-sizing:border-box}html{-webkit-tap-highlight-color:transparent;overflow-y:scroll;-webkit-text-size-adjust:100%;text-size-adjust:100%}a,button,body,h1,h2,h3,h4,h5,h6{color:var(--primary)}body{font-family:-apple-system,BlinkMacSystemFont,segoe ui,Roboto,Oxygen,Ubuntu,Cantarell,open sans,helvetica neue,sans-serif;font-size:18px;line-height:1.6;word-break:break-word;background:var(--theme)}article,aside,figcaption,figure,footer,header,hgroup,main,nav,section,table{display:block}h1,h2,h3,h4,h5,h6{line-height:1.2}h1,h2,h3,h4,h5,h6,p{margin-top:0;margin-bottom:0}ul{padding:0}a{text-decoration:none}body,figure,ul{margin:0}table{width:100%;border-collapse:collapse;border-spacing:0;overflow-x:auto;word-break:keep-all}button,input,textarea{padding:0;font:inherit;background:0 0;border:0}input,textarea{outline:0}button,input[type=button],input[type=submit]{cursor:pointer}input:-webkit-autofill,textarea:-webkit-autofill{box-shadow:0 0 0 50px var(--theme)inset}img{display:block;max-width:100%}.not-found{position:absolute;left:0;right:0;display:flex;align-items:center;justify-content:center;height:80%;font-size:160px;font-weight:700}.archive-posts{width:100%;font-size:16px}.archive-year{margin-top:40px}.archive-year:not(:last-of-type){border-bottom:2px solid var(--border)}.archive-month{display:flex;align-items:flex-start;padding:10px 0}.archive-month-header{margin:25px 0;width:200px}.archive-month:not(:last-of-type){border-bottom:1px solid var(--border)}.archive-entry{position:relative;padding:5px;margin:10px 0}.archive-entry-title{margin:5px 0;font-weight:400}.archive-count,.archive-meta{color:var(--secondary);font-size:14px}.footer,.top-link{font-size:12px;color:var(--secondary)}.footer{max-width:calc(var(--main-width) + var(--gap) * 2);margin:auto;padding:calc((var(--footer-height) - var(--gap))/2)var(--gap);text-align:center;line-height:24px}.footer span{margin-inline-start:1px;margin-inline-end:1px}.footer span:last-child{white-space:nowrap}.footer a{color:inherit;border-bottom:1px solid var(--secondary)}.footer a:hover{border-bottom:1px solid var(--primary)}.top-link{visibility:hidden;position:fixed;bottom:60px;right:30px;z-index:99;background:var(--tertiary);width:42px;height:42px;padding:12px;border-radius:64px;transition:visibility .5s,opacity .8s linear}.top-link,.top-link svg{filter:drop-shadow(0 0 0 var(--theme))}.footer a:hover,.top-link:hover{color:var(--primary)}.top-link:focus,#theme-toggle:focus{outline:0}.nav{display:flex;flex-wrap:wrap;justify-content:space-between;max-width:calc(var(--nav-width) + var(--gap) * 2);margin-inline-start:auto;margin-inline-end:auto;line-height:var(--header-height)}.nav a{display:block}.logo,#menu{display:flex;margin:auto var(--gap)}.logo{flex-wrap:inherit}.logo a{font-size:24px;font-weight:700}.logo a img,.logo a svg{display:inline;vertical-align:middle;pointer-events:none;transform:translate(0,-10%);border-radius:6px;margin-inline-end:8px}button#theme-toggle{font-size:26px;margin:auto 4px}body.dark #moon{vertical-align:middle;display:none}body:not(.dark) #sun{display:none}#menu{list-style:none;word-break:keep-all;overflow-x:auto;white-space:nowrap}#menu li+li{margin-inline-start:var(--gap)}#menu a{font-size:16px}#menu .active{font-weight:500;border-bottom:2px solid}.lang-switch li,.lang-switch ul,.logo-switches{display:inline-flex;margin:auto 4px}.lang-switch{display:flex;flex-wrap:inherit}.lang-switch a{margin:auto 3px;font-size:16px;font-weight:500}.logo-switches{flex-wrap:inherit}.main{position:relative;min-height:calc(100vh - var(--header-height) - var(--footer-height));max-width:calc(var(--main-width) + var(--gap) * 2);margin:auto;padding:var(--gap)}.page-header h1{font-size:40px}.pagination{display:flex}.pagination a{color:var(--theme);font-size:13px;line-height:36px;background:var(--primary);border-radius:calc(36px/2);padding:0 16px}.pagination .next{margin-inline-start:auto}.social-icons a{display:inline-flex;padding:10px}.social-icons a svg{height:26px;width:26px}code{direction:ltr}div.highlight,pre{position:relative}.copy-code{display:none;position:absolute;top:4px;right:4px;color:rgba(255,255,255,.8);background:rgba(78,78,78,.8);border-radius:var(--radius);padding:0 5px;font-size:14px;user-select:none}div.highlight:hover .copy-code,pre:hover .copy-code{display:block}.first-entry{position:relative;display:flex;flex-direction:column;justify-content:center;min-height:320px;margin:var(--gap)0 calc(var(--gap) * 2)}.first-entry .entry-header{overflow:hidden;display:-webkit-box;-webkit-box-orient:vertical;-webkit-line-clamp:3}.first-entry .entry-header h1{font-size:34px;line-height:1.3}.first-entry .entry-content{margin:14px 0;font-size:16px;-webkit-line-clamp:3}.first-entry .entry-footer{font-size:14px}.home-info .entry-content{-webkit-line-clamp:unset}.post-entry{position:relative;margin-bottom:var(--gap);padding:var(--gap);background:var(--entry);border-radius:var(--radius);transition:transform .1s;border:1px solid var(--border)}.post-entry:active{transform:scale(.96)}.tag-entry .entry-cover{display:none}.entry-header h2{font-size:24px;line-height:1.3}.entry-content{margin:8px 0;color:var(--secondary);font-size:14px;line-height:1.6;overflow:hidden;display:-webkit-box;-webkit-box-orient:vertical;-webkit-line-clamp:2}.entry-footer{color:var(--secondary);font-size:13px}.entry-link{position:absolute;left:0;right:0;top:0;bottom:0}.entry-hint{color:var(--secondary)}.entry-hint-parent{display:flex;justify-content:space-between}.entry-cover{font-size:14px;margin-bottom:var(--gap);text-align:center}.entry-cover img{border-radius:var(--radius);pointer-events:none;width:100%;height:auto}.entry-cover a{color:var(--secondary);box-shadow:0 1px 0 var(--primary)}.page-header,.post-header{margin:24px auto var(--content-gap)}.post-title{margin-bottom:2px;font-size:40px}.post-description{margin-top:10px;margin-bottom:5px}.post-meta,.breadcrumbs{color:var(--secondary);font-size:14px;display:flex;flex-wrap:wrap}.post-meta .i18n_list li{display:inline-flex;list-style:none;margin:auto 3px;box-shadow:0 1px 0 var(--secondary)}.breadcrumbs a{font-size:16px}.post-content{color:var(--content)}.post-content h3,.post-content h4,.post-content h5,.post-content h6{margin:24px 0 16px}.post-content h1{margin:40px auto 32px;font-size:40px}.post-content h2{margin:32px auto 24px;font-size:32px}.post-content h3{font-size:24px}.post-content h4{font-size:16px}.post-content h5{font-size:14px}.post-content h6{font-size:12px}.post-content a,.toc a:hover{box-shadow:0 1px;box-decoration-break:clone;-webkit-box-decoration-break:clone}.post-content a code{margin:auto 0;border-radius:0;box-shadow:0 -1px 0 var(--primary)inset}.post-content del{text-decoration:line-through}.post-content dl,.post-content ol,.post-content p,.post-content figure,.post-content ul{margin-bottom:var(--content-gap)}.post-content ol,.post-content ul{padding-inline-start:20px}.post-content li{margin-top:5px}.post-content li p{margin-bottom:0}.post-content dl{display:flex;flex-wrap:wrap;margin:0}.post-content dt{width:25%;font-weight:700}.post-content dd{width:75%;margin-inline-start:0;padding-inline-start:10px}.post-content dd~dd,.post-content dt~dt{margin-top:10px}.post-content table{margin-bottom:var(--content-gap)}.post-content table th,.post-content table:not(.highlighttable,.highlight table,.gist .highlight) td{min-width:80px;padding:8px 5px;line-height:1.5;border-bottom:1px solid var(--border)}.post-content table th{text-align:start}.post-content table:not(.highlighttable) td code:only-child{margin:auto 0}.post-content .highlight table{border-radius:var(--radius)}.post-content .highlight:not(table){margin:10px auto;background:var(--code-block-bg)!important;border-radius:var(--radius);direction:ltr}.post-content li>.highlight{margin-inline-end:0}.post-content ul pre{margin-inline-start:calc(var(--gap) * -2)}.post-content .highlight pre{margin:0}.post-content .highlighttable{table-layout:fixed}.post-content .highlighttable td:first-child{width:40px}.post-content .highlighttable td .linenodiv{padding-inline-end:0!important}.post-content .highlighttable td .highlight,.post-content .highlighttable td .linenodiv pre{margin-bottom:0}.post-content code{margin:auto 4px;padding:4px 6px;font-size:.78em;line-height:1.5;background:var(--code-bg);border-radius:2px}.post-content pre code{display:grid;margin:auto 0;padding:10px;color:#d5d5d6;background:var(--code-block-bg)!important;border-radius:var(--radius);overflow-x:auto;word-break:break-all}.post-content blockquote{margin:20px 0;padding:0 14px;border-inline-start:3px solid var(--primary)}.post-content hr{margin:30px 0;height:2px;background:var(--tertiary);border:0}.post-content iframe{max-width:100%}.post-content img{border-radius:4px;margin:1rem 0}.post-content img[src*="#center"]{margin:1rem auto}.post-content figure.align-center{text-align:center}.post-content figure>figcaption{color:var(--primary);font-size:16px;font-weight:700;margin:8px 0 16px}.post-content figure>figcaption>p{color:var(--secondary);font-size:14px;font-weight:400}.toc{margin:0 2px 40px;border:1px solid var(--border);background:var(--code-bg);border-radius:var(--radius);padding:.4em}.dark .toc{background:var(--entry)}.toc details summary{cursor:zoom-in;margin-inline-start:20px}.toc details[open] summary{cursor:zoom-out}.toc .details{display:inline;font-weight:500}.toc .inner{margin:0 20px;padding:10px 20px}.toc li ul{margin-inline-start:var(--gap)}.toc summary:focus{outline:0}.post-footer{margin-top:56px}.post-footer>*{margin-bottom:10px}.post-tags{display:flex;flex-wrap:wrap;gap:10px}.post-tags li{display:inline-block}.post-tags a,.share-buttons,.paginav{border-radius:var(--radius);background:var(--code-bg);border:1px solid var(--border)}.post-tags a{display:block;padding:0 14px;color:var(--secondary);font-size:14px;line-height:34px;background:var(--code-bg)}.post-tags a:hover,.paginav a:hover{background:var(--border)}.share-buttons{padding:10px;display:flex;justify-content:center;overflow-x:auto;gap:10px}.share-buttons li,.share-buttons a{display:inline-flex}.share-buttons a:not(:last-of-type){margin-inline-end:12px}h1:hover .anchor,h2:hover .anchor,h3:hover .anchor,h4:hover .anchor,h5:hover .anchor,h6:hover .anchor{display:inline-flex;color:var(--secondary);margin-inline-start:8px;font-weight:500;user-select:none}.paginav{display:flex;line-height:30px}.paginav a{padding-inline-start:14px;padding-inline-end:14px;border-radius:var(--radius)}.paginav .title{letter-spacing:1px;text-transform:uppercase;font-size:small;color:var(--secondary)}.paginav .prev,.paginav .next{width:50%}.paginav span:hover:not(.title){box-shadow:0 1px}.paginav .next{margin-inline-start:auto;text-align:right}[dir=rtl] .paginav .next{text-align:left}h1>a>svg{display:inline}img.in-text{display:inline;margin:auto}.buttons,.main .profile{display:flex;justify-content:center}.main .profile{align-items:center;min-height:calc(100vh - var(--header-height) - var(--footer-height) - (var(--gap) * 2));text-align:center}.profile .profile_inner{display:flex;flex-direction:column;align-items:center;gap:10px}.profile img{border-radius:50%}.buttons{flex-wrap:wrap;max-width:400px}.button{background:var(--tertiary);border-radius:var(--radius);margin:8px;padding:6px;transition:transform .1s}.button-inner{padding:0 8px}.button:active{transform:scale(.96)}#searchbox input{padding:4px 10px;width:100%;color:var(--primary);font-weight:700;border:2px solid var(--tertiary);border-radius:var(--radius)}#searchbox input:focus{border-color:var(--secondary)}#searchResults li{list-style:none;border-radius:var(--radius);padding:10px;margin:10px 0;position:relative;font-weight:500}#searchResults{margin:10px 0;width:100%}#searchResults li:active{transition:transform .1s;transform:scale(.98)}#searchResults a{position:absolute;width:100%;height:100%;top:0;left:0;outline:none}#searchResults .focus{transform:scale(.98);border:2px solid var(--tertiary)}.terms-tags li{display:inline-block;margin:10px;font-weight:500}.terms-tags a{display:block;padding:3px 10px;background:var(--tertiary);border-radius:6px;transition:transform .1s}.terms-tags a:active{background:var(--tertiary);transform:scale(.96)}.bg{color:#cad3f5;background-color:#24273a}.chroma{color:#cad3f5;background-color:#24273a}.chroma .x{}.chroma .err{color:#ed8796}.chroma .cl{}.chroma .lnlinks{outline:none;text-decoration:none;color:inherit}.chroma .lntd{vertical-align:top;padding:0;margin:0;border:0}.chroma .lntable{border-spacing:0;padding:0;margin:0;border:0}.chroma .hl{background-color:#474733}.chroma .lnt{white-space:pre;-webkit-user-select:none;user-select:none;margin-right:.4em;padding:0 .4em;color:#8087a2}.chroma .ln{white-space:pre;-webkit-user-select:none;user-select:none;margin-right:.4em;padding:0 .4em;color:#8087a2}.chroma .line{display:flex}.chroma .k{color:#c6a0f6}.chroma .kc{color:#f5a97f}.chroma .kd{color:#ed8796}.chroma .kn{color:#8bd5ca}.chroma .kp{color:#c6a0f6}.chroma .kr{color:#c6a0f6}.chroma .kt{color:#ed8796}.chroma .n{}.chroma .na{color:#8aadf4}.chroma .nb{color:#91d7e3}.chroma .bp{color:#91d7e3}.chroma .nc{color:#eed49f}.chroma .no{color:#eed49f}.chroma .nd{color:#8aadf4;font-weight:700}.chroma .ni{color:#8bd5ca}.chroma .ne{color:#f5a97f}.chroma .nf{color:#8aadf4}.chroma .fm{color:#8aadf4}.chroma .nl{color:#91d7e3}.chroma .nn{color:#f5a97f}.chroma .nx{}.chroma .py{color:#f5a97f}.chroma .nt{color:#c6a0f6}.chroma .nv{color:#f4dbd6}.chroma .vc{color:#f4dbd6}.chroma .vg{color:#f4dbd6}.chroma .vi{color:#f4dbd6}.chroma .vm{color:#f4dbd6}.chroma .l{}.chroma .ld{}.chroma .s{color:#a6da95}.chroma .sa{color:#ed8796}.chroma .sb{color:#a6da95}.chroma .sc{color:#a6da95}.chroma .dl{color:#8aadf4}.chroma .sd{color:#6e738d}.chroma .s2{color:#a6da95}.chroma .se{color:#8aadf4}.chroma .sh{color:#6e738d}.chroma .si{color:#a6da95}.chroma .sx{color:#a6da95}.chroma .sr{color:#8bd5ca}.chroma .s1{color:#a6da95}.chroma .ss{color:#a6da95}.chroma .m{color:#f5a97f}.chroma .mb{color:#f5a97f}.chroma .mf{color:#f5a97f}.chroma .mh{color:#f5a97f}.chroma .mi{color:#f5a97f}.chroma .il{color:#f5a97f}.chroma .mo{color:#f5a97f}.chroma .o{color:#91d7e3;font-weight:700}.chroma .ow{color:#91d7e3;font-weight:700}.chroma .p{}.chroma .c{color:#6e738d;font-style:italic}.chroma .ch{color:#6e738d;font-style:italic}.chroma .cm{color:#6e738d;font-style:italic}.chroma .c1{color:#6e738d;font-style:italic}.chroma .cs{color:#6e738d;font-style:italic}.chroma .cp{color:#6e738d;font-style:italic}.chroma .cpf{color:#6e738d;font-weight:700;font-style:italic}.chroma .g{}.chroma .gd{color:#ed8796;background-color:#363a4f}.chroma .ge{font-style:italic}.chroma .gr{color:#ed8796}.chroma .gh{color:#f5a97f;font-weight:700}.chroma .gi{color:#a6da95;background-color:#363a4f}.chroma .go{}.chroma .gp{}.chroma .gs{font-weight:700}.chroma .gu{color:#f5a97f;font-weight:700}.chroma .gt{color:#ed8796}.chroma .gl{text-decoration:underline}.chroma .w{}.chroma{background-color:unset!important}.chroma .hl{display:flex}.chroma .lnt{padding:0 0 0 12px}.highlight pre.chroma code{padding:8px 0}.highlight pre.chroma .line .cl,.chroma .ln{padding:0 10px}.chroma .lntd:last-of-type{width:100%}::-webkit-scrollbar-track{background:0 0}.list:not(.dark)::-webkit-scrollbar-track{background:var(--code-bg)}::-webkit-scrollbar-thumb{background:var(--tertiary);border:5px solid var(--theme);border-radius:var(--radius)}.list:not(.dark)::-webkit-scrollbar-thumb{border:5px solid var(--code-bg)}::-webkit-scrollbar-thumb:hover{background:var(--secondary)}::-webkit-scrollbar:not(.highlighttable,.highlight table,.gist .highlight){background:var(--theme)}.post-content .highlighttable td .highlight pre code::-webkit-scrollbar{display:none}.post-content :not(table) ::-webkit-scrollbar-thumb{border:2px solid var(--code-block-bg);background:#717175}.post-content :not(table) ::-webkit-scrollbar-thumb:hover{background:#a3a3a5}.gist table::-webkit-scrollbar-thumb{border:2px solid #fff;background:#adadad}.gist table::-webkit-scrollbar-thumb:hover{background:#707070}.post-content table::-webkit-scrollbar-thumb{border-width:2px}@media screen and (min-width:768px){::-webkit-scrollbar{width:19px;height:11px}}@media screen and (max-width:768px){:root{--gap:14px}.profile img{transform:scale(.85)}.first-entry{min-height:260px}.archive-month{flex-direction:column}.archive-year{margin-top:20px}.footer{padding:calc((var(--footer-height) - var(--gap) - 10px)/2)var(--gap)}}@media screen and (max-width:900px){.list .top-link{transform:translateY(-5rem)}}@media screen and (max-width:340px){.share-buttons{justify-content:unset}}@media(prefers-reduced-motion){.terms-tags a:active,.button:active,.post-entry:active,.top-link,#searchResults .focus,#searchResults li:active{transform:none}}.post-entry-multi-row{display:flex;flex-direction:row;justify-content:space-between;align-items:stretch;gap:15px}.post-entry-multi-row>article.post-entry{flex-grow:1;flex-basis:33%;display:flex;flex-direction:column;justify-content:space-between}.entry-footer>svg{width:12px;margin-right:3px}article.post-entry>.cover-img{float:right;margin-left:10px;margin-bottom:10px}article.post-entry>.cover-img img.seatsurfing{object-fit:cover;width:300px;height:200px;object-position:left top}@media(max-width:720px){.post-entry-multi-row{display:block}article.post-entry>.cover-img{display:none}} \ No newline at end of file diff --git a/categories/index.html b/categories/index.html new file mode 100644 index 0000000..3b6e4f9 --- /dev/null +++ b/categories/index.html @@ -0,0 +1,6 @@ +Categories | Virtualzone Blog +
+ \ No newline at end of file diff --git a/categories/index.xml b/categories/index.xml new file mode 100644 index 0000000..955eaf0 --- /dev/null +++ b/categories/index.xml @@ -0,0 +1,12 @@ + + + + Categories on Virtualzone Blog + https://virtualzone.de/categories/ + Recent content in Categories on Virtualzone Blog + Hugo -- gohugo.io + en-us + © 2024 Heiner Beck. + + + diff --git a/config.yml b/config.yml deleted file mode 100644 index 0ac6ca6..0000000 --- a/config.yml +++ /dev/null @@ -1,64 +0,0 @@ -baseURL: "https://virtualzone.de/" -languageCode: "en-us" -title: "Virtualzone Blog" -theme: "PaperMod" -Copyright: "© 2024 Heiner Beck." - -enableRobotsTXT: true -buildDrafts: false -buildFuture: false -buildExpired: false - -minify: - disableXML: true - minifyOutput: true - -params: - env: production # to enable google analytics, opengraph, twitter-cards and schema. - title: "Virtualzone Blog" - description: "Software development, Docker, Linux." - author: "Heiner" - DateFormat: "January 2, 2006" - defaultTheme: auto # dark, light - disableThemeToggle: true - ShowReadingTime: true - ShowShareButtons: true - ShowPostNavLinks: true - ShowBreadCrumbs: true - ShowCodeCopyButtons: false - ShowWordCount: true - ShowRssButtonInSectionTermList: true - UseHugoToc: true - disableSpecial1stPost: true - disableScrollToTop: false - comments: false - hidemeta: false - hideSummary: false - showtoc: false - tocopen: false - homeInfoParams: - Title: "Hi there \U0001F44B" - Content: Welcome to my blog - socialIcons: - - name: github - url: "https://github.com/virtualzone/" - - name: linkedin - url: "https://www.linkedin.com/in/heinerbeck/" -menu: - main: - - identifier: posts - name: Posts - url: /posts/ - weight: 10 - - identifier: tags - name: Tags - url: /tags/ - weight: 20 - - identifier: contact - name: Imprint - url: /contact/ - weight: 30 - - identifier: privacy-policy - name: Privacy Policy - url: /privacy-policy/ - weight: 40 diff --git a/contact/index.html b/contact/index.html new file mode 100644 index 0000000..31d75a2 --- /dev/null +++ b/contact/index.html @@ -0,0 +1,21 @@ +Contact | Virtualzone Blog +

Contact

Heiner Beck
Karl-Herbert-Scheer-Str. 6
61381 Friedrichsdorf
Germany

Email: mail@virtualzone.de

Limitation of liability for internal content

The content of our website has been compiled with meticulous care and to the best of our knowledge. However, we cannot assume any liability for the up-to-dateness, completeness or accuracy of any of the pages. Pursuant to section 7, para. 1 of the TMG (Telemediengesetz – Tele Media Act by German law), we as service providers are liable for our own content on these pages in accordance with general laws. However, pursuant to sections 8 to 10 of the TMG, we as service providers are not under obligation to monitor external information provided or stored on our website. Once we have become aware of a specific infringement of the law, we will immediately remove the content in question. Any liability concerning this matter can only be assumed from the point in time at which the infringement becomes known to us.

Our website contains links to the websites of third parties (“external links”). As the content of these websites is not under our control, we cannot assume any liability for such external content. In all cases, the provider of information of the linked websites is liable for the content and accuracy of the information provided. At the point in time when the links were placed, no infringements of the law were recognisable to us. As soon as an infringement of the law becomes known to us, we will immediately remove the link in question.

The content and works published on this website are governed by the copyright laws of Germany. Any duplication, processing, distribution or any form of utilisation beyond the scope of copyright law shall require the prior written consent of the author or authors in question.

Data protection

Using our website is possible without entering any personal data in most cases. As far as your personal information are required (such as your name, address or email addresses), this is on a voluntary basis to the extend possible. These information will not be transferred to any third parties without your approval. +Please note that communicating via the internet (such as communication by email) may be harmed by security flaws. A complete protection of data from the access through third parties is not possible. +We contradict the usage of the contact information published on this website for promotional purposes. +Please read our privacy policy for information about how we protect your personal information.

Website Impressum erstellt durch impressum-generator.de von der Kanzlei Hasselbach.

+ \ No newline at end of file diff --git a/content/contact.md b/content/contact.md deleted file mode 100644 index 35b3c5f..0000000 --- a/content/contact.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: "Contact" -hideSummary: true -disableShare: true -ShowReadingTime: false -ShowPostNavLinks: false -ShowWordCount: false ---- - -Heiner Beck -Karl-Herbert-Scheer-Str. 6 -61381 Friedrichsdorf -Germany - -Email: mail@virtualzone.de - -## Limitation of liability for internal content -The content of our website has been compiled with meticulous care and to the best of our knowledge. However, we cannot assume any liability for the up-to-dateness, completeness or accuracy of any of the pages. Pursuant to section 7, para. 1 of the TMG (Telemediengesetz – Tele Media Act by German law), we as service providers are liable for our own content on these pages in accordance with general laws. However, pursuant to sections 8 to 10 of the TMG, we as service providers are not under obligation to monitor external information provided or stored on our website. Once we have become aware of a specific infringement of the law, we will immediately remove the content in question. Any liability concerning this matter can only be assumed from the point in time at which the infringement becomes known to us. - -## Limitation of liability for external links -Our website contains links to the websites of third parties (“external links”). As the content of these websites is not under our control, we cannot assume any liability for such external content. In all cases, the provider of information of the linked websites is liable for the content and accuracy of the information provided. At the point in time when the links were placed, no infringements of the law were recognisable to us. As soon as an infringement of the law becomes known to us, we will immediately remove the link in question. - -## Copyright -The content and works published on this website are governed by the copyright laws of Germany. Any duplication, processing, distribution or any form of utilisation beyond the scope of copyright law shall require the prior written consent of the author or authors in question. - -## Data protection -Using our website is possible without entering any personal data in most cases. As far as your personal information are required (such as your name, address or email addresses), this is on a voluntary basis to the extend possible. These information will not be transferred to any third parties without your approval. -Please note that communicating via the internet (such as communication by email) may be harmed by security flaws. A complete protection of data from the access through third parties is not possible. -We contradict the usage of the contact information published on this website for promotional purposes. -Please read our privacy policy for information about how we protect your personal information. - -Website Impressum erstellt durch [impressum-generator.de](https://www.impressum-generator.de/) von der [Kanzlei Hasselbach](https://www.kanzlei-hasselbach.de/). \ No newline at end of file diff --git a/content/posts/alpine-docker-rootless.md b/content/posts/alpine-docker-rootless.md deleted file mode 100644 index 72aafe9..0000000 --- a/content/posts/alpine-docker-rootless.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -title: "Setting up Alpine Linux with Rootless Docker" -date: 2022-06-19T15:00:00+00:00 -tags: - - linux - - docker -author: "Heiner" ---- - -As of Docker Engine v20.10, it's possible to run the Docker daemon as a non-root user (Rooless mode). This is especially valuable in view of security aspects. Rootless mode mitigates potential vulnerabilities in the Docker daemon. - -However, at the time of writing, setting up Docker in rootless mode is not straightforward if you're using Alpine Linux as your host system. This is why I summarized the steps to get Docket Rootless up and running on Alpine Linux. - -## Download and install Alpine -First, we'll download the Alpine Linux ISO image and install the OS. We'll then enable the community repository as it contains packages we'll need to set up Docker in non-root mode. - -1. Get Alpine Linux ISO from: https://www.alpinelinux.org/downloads/ -1. Boot system from ISO and run: - ``` - # setup-alpine - ``` -1. Reboot and install the nano edit: - ``` - # apk add nano - ``` -1. Enable community repository in the following file: - ``` - # nano /etc/apk/repositories - ``` -1. Update the index of available package: - ``` - # apk update - ``` - -## Add a user and allow her to use doas -If you did not create a regular user account during the installation, it's time to do it now: - -1. Install doas: - ``` - # apk add doas - ``` -1. Create user and add it to the `wheel` group in order to use root privileges: - ``` - # adduser wheel - ``` -1. Allow users in group `wheel` to use doas by editing the file `/etc/doas.d/doas.conf` and adding the following line: - ``` - permit persist :wheel - ``` -1. Log out and log in to the new account. - -## Install Docker Rootless -1. Install `newuidmap`, `newgidmap`, `fuse-overlayfs` and `iproute2` tools, all required by Rootless Docker: - ``` - # apk add shadow-uidmap fuse-overlayfs iproute2 - ```` -1. Enable cgroups v2 by editing `/etc/rc.conf` and setting `rc_cgroup_mode` to `unified`. -1. Enable the cgroups service: - ``` - # rc-update add cgroups && rc-service cgroups start - ```` -1. Allow your user to access Podman in rootless mode: - ``` - # modprobe tun - # echo tun >>/etc/modules - # echo :100000:65536 >/etc/subuid - # echo :100000:65536 >/etc/subgid - ``` -1. Install Docker and Docker Compose v2: - ``` - # apk add docker docker-cli-compose - ``` -1. Allow Docker access for your user: - ``` - # addgroup docker - ``` -1. Enable the iptables module: - ``` - # echo "ip_tables" >> /etc/modules - # modprobe ip_tables - ``` -1. Install Docker rootless: - ``` - $ curl -fsSL https://get.docker.com/rootless | sh - ``` -1. Create an init script in `/etc/init.d/docker-rootless`: - ``` - #!/sbin/openrc-run - - name=$RC_SVCNAME - description="Docker Application Container Engine (Rootless)" - supervisor="supervise-daemon" - command="/home//bin/dockerd-rootless.sh" - command_args="" - command_user="" - supervise_daemon_args=" -e PATH=\"/home//bin:/sbin:/usr/sbin:$PATH\" -e HOME=\"/home/\" -e XDG_RUNTIME_DIR=\"/home//.docker/run\"" - - reload() { - ebegin "Reloading $RC_SVCNAME" - /bin/kill -s HUP \$MAINPID - eend $? - } - - ``` -1. Make the created init script executable, add it to the default runlevel and start it: - ``` - # chmod +x /etc/init.d/docker-rootless - # rc-update add docker-rootless - # rc-service docker-rootless start - ```` -1. Create a `.profile` file in your home directory with the following contents: - ``` - export XDG_RUNTIME_DIR="$HOME/.docker/run" - export DOCKER_HOST=unix://$XDG_RUNTIME_DIR/docker.sock - export PATH="/home//bin:/sbin:/usr/sbin:$PATH" - ``` -1. Log out and log in again. -1. Check if Docker Rootless works: - ``` - $ docker ps - $ docker run --rm hello-world - ``` - -## Allow ports < 1024 (optional) -By default, only ports >= 1024 can be exposed by non-root users. To change this, change the minimum unprivileged port in `/etc/sysctl.conf`: -``` -# echo "net.ipv4.ip_unprivileged_port_start=80" >> /etc/sysctl.conf -``` \ No newline at end of file diff --git a/content/posts/alpine-podman.md b/content/posts/alpine-podman.md deleted file mode 100644 index bb7466e..0000000 --- a/content/posts/alpine-podman.md +++ /dev/null @@ -1,178 +0,0 @@ ---- -title: "Setting up Alpine Linux with Podman" -date: 2022-06-25T18:00:00+00:00 -tags: - - linux - - docker -author: "Heiner" ---- - -Recently, I've written a blog post on [how to set up Rootless Docker on Alpine Linux](/posts/alpine-docker-rootless/). Today I'm showing you how to set up Podman. Podman has a rootless architecture built in. It's an alternative to Docker, providing an almost identical command line interface. Thus, if you're used to Docker CLI, you won't have any issues working with Podman. - -Podman was initially developed by RedHat and is available as an open source project. You can run your well known Docker images from Docker Hub and other registries without any changes. This is due to the fact that both Docker and Podman are compatible with Open Container Initiative (OCI) images. - -In my tests, Podman had a signicantly smaller memory footprint. From my point of view, it seems perfectly suitable for low power machines. However, it comes without a daemon, so you'll have to set up some init scripts in order to restart your containers when your system reboots. I'll cover this at the end of this article. - -## Download and install Alpine -First, we'll download the Alpine Linux ISO image and install the OS. We'll then enable the community repository as it contains packages we'll need to set up Docker in non-root mode. - -1. Get Alpine Linux ISO from: https://www.alpinelinux.org/downloads/ -1. Boot system from ISO and run: - ``` - # setup-alpine - ``` -1. Reboot and install the nano edit: - ``` - # apk add nano - ``` -1. Enable community repository in the following file: - ``` - # nano /etc/apk/repositories - ``` -1. Update the index of available package: - ``` - # apk update - ``` - -## Add a user and allow her to use doas -If you did not create a regular user account during the installation, it's time to do it now: - -1. Install doas: - ``` - # apk add doas - ``` -1. Create user and add it to the `wheel` group in order to use root privileges: - ``` - # adduser wheel - ``` -1. Allow users in group `wheel` to use doas by editing the file `/etc/doas.d/doas.conf` and adding the following line: - ``` - permit persist :wheel - ``` -1. Log out and log in to the new account. - -## Install Podman -Now comes the important part: Setting up Podman. - -1. Enable cgroups v2 by editing `/etc/rc.conf` and setting `rc_cgroup_mode` to `unified`. -1. Enable the cgroups service: - ``` - # rc-update add cgroups && rc-service cgroups start - ```` -1. Install podman: - ``` - # apk add podman - ``` -1. Allow your user to access Podman in rootless mode: - ``` - # modprobe tun - # echo tun >>/etc/modules - # echo :100000:65536 >/etc/subuid - # echo :100000:65536 >/etc/subgid - ``` -1. Enable the iptables module: - ``` - # echo "ip_tables" >> /etc/modules - # modprobe ip_tables - ``` -1. Check if Podman works by running a Hello World container using your user account: - ``` - $ podman run --rm hello-world - ``` - -## Allow ports < 1024 (optional) -By default, only ports >= 1024 can be exposed by non-root users. To change this, change the minimum unprivileged port in `/etc/sysctl.conf`: -``` -$ sudo echo "net.ipv4.ip_unprivileged_port_start=80" >> /etc/sysctl.conf -``` - -## Using Podman and Pods -If you are used to Docker, you can use Podman just the way to used to control Docker. One difference is that Podman can group multiple containers into Pods (that's where the name comes from: Pod Manager). You may know Pods from Kubernetes. Containers in a Pod share a namespace, a network and a security context. - -List running containers: -``` -podman ps -``` - -List existing pods: -``` -podman pod ps -``` - -Create a new pod: -``` -podman pod create pod-web -``` - -Create a container inside the previously created Pod: -``` -podman run --rm -d \ - --pod pod-web \ - docker.io/library/nginx:alpine -``` - -## Starting containers on system start -Because Podman follows a daemonless concept, containers are not started along with the non-existing Daemon on system boot. Instead, Podman recommends using systemd to start, stop and restart containers when the system starts. - -On Alpine, we're using OpenRC instead of systemd by default. I'm using Podman's built-in functionity for exporting and importing Kubernetes YAML definitions together with a small OpenRC init script. - -1. Install runuser so your init script can create Pods in the name of your rootless user: - ``` - # apk add runuser - ``` -1. Create a folder to store your init scripts, such as `/home//pods/init.d/`. -1. Generate a Kubernetes YAML for an existing Pod by issuing the following command and saving the YAML file in your previously created directory: - ``` - podman generate kube - ``` - Alternatively, you can write the YAML file manually. Please refer to [Podman's documention](https://docs.podman.io/en/latest/markdown/podman-generate-kube.1.html) for more information on supported (and unsupported) Kubernetes YAML syntax. -1. Create a file named `pod` in this folder with the following contents and make it executable (`chmod +x pod`): - ```bash - #!/sbin/openrc-run - - depend() { - after network-online - use net - } - - cleanup() { - /sbin/runuser -u ${command_user} ${command} pod exists ${pod_name} - result=$? - if [ $result -eq 0 ]; then - /sbin/runuser -u ${command_user} ${command} pod stop ${pod_name} - /sbin/runuser -u ${command_user} ${command} pod rm ${pod_name} - fi - } - - start_pre() { - cleanup - } - - stop() { - ebegin "Stopping $RC_SVCNAME" - cleanup - eend $? - } - ``` -1. Create one init script per Pod you want to control with the following contents (adjust as needed). Name it appropriately and make it executable (i.e. `chmod +x pod-traefik`): - ```bash - #!/sbin/openrc-run - - name=$RC_SVCNAME - pod_name=traefik - command_user="" - command="/usr/bin/podman" - command_args="play kube --network traefik /home/${command_user}/pods/${pod_name}/pod.yaml" - - source "/home/${command_user}/pods/init.d/pod" - ``` -1. Create a symlink in `/etc/init.d/`: - ``` - # cd /etc/init.d && ln -s /home//pods/pod-traefik - ``` -1. Use rc-update to the add your OpenRC Pod init script to the default runlevel: - ``` - # rc-update add pod-traefik - ``` - -**Update:** I've improved the OpenRC scripts. Please read the corresponding [blog post](/posts/openrc-podman-kube-play/). \ No newline at end of file diff --git a/content/posts/determining-a-locations-federal-state-using-google-maps-api.md b/content/posts/determining-a-locations-federal-state-using-google-maps-api.md deleted file mode 100644 index 1d04013..0000000 --- a/content/posts/determining-a-locations-federal-state-using-google-maps-api.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: "Determining a location’s federal state using Google Maps API" -date: 2012-08-10T11:30:03+00:00 -tags: - - google - - api -author: "Heiner" -aliases: - - /2012/08/determining-a-locations-federal-state-using-google-maps-api/ ---- - -If you have to find out which federal state a city belongs to, you can use the Google Maps API v3. Here is a straightforward JavaScript code snippet: - -```javascript -function log(s) { - $('#sysout').append(document.createTextNode(s + 'n')); -} - -function getResult(results) { - for (var i=0; i -1) { - return result['address_components'][j]['short_name']; - } - } - return ''; -} - -function getCountry(result) { - return extractFirst(result, 'country'); -} - -function getFederalState(result) { - return extractFirst(result, 'administrative_area_level_1'); -} - -function searchLocation() { - $('#sysout').empty(); - - var location = $('#location').val(); - var geocoder; - - log('Looking up "' + location + '"'); - - geocoder = new google.maps.Geocoder(); - geocoder.geocode({'address': location}, function(results, status) { - if (status != google.maps.GeocoderStatus.OK) { - log('error: ' + status); - return; - } - if (results.length == 0) { - log('no result'); - return; - } - - log('Resolved to ' + results[0]['formatted_address']); - - var latlng = results[0]['geometry']['location']; - geocoder.geocode({'latLng': latlng}, function(results, status) { - if (status != google.maps.GeocoderStatus.OK) { - log('error: ' + status); - return; - } - var desiredResult = getResult(results); - if (desiredResult) { - log('Federal State: ' + getFederalState(desiredResult)); - } - }); - }); - - return false; -} - -$(document).bind('ready', function() { - new google.maps.places.Autocomplete(document.getElementById('location'), {}); - $('#form').submit(searchLocation); -}); -``` \ No newline at end of file diff --git a/content/posts/dns-proxy-forwarder-blackhole.md b/content/posts/dns-proxy-forwarder-blackhole.md deleted file mode 100644 index 90552bc..0000000 --- a/content/posts/dns-proxy-forwarder-blackhole.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: "Go-hole: A minimalistic DNS proxy and and blocker" -date: 2023-02-05T06:00:00+00:00 -tags: - - linux - - docker -author: "Heiner" ---- - -You'll probably know Pi-hole. It's a popular "DNS sinkhole" – a DNS proxy server which blocks certain requests, such a as those for well-known ad serving domains. The effect is a much less ad-cluttered web experience in your home network. - -I've been using Pi-hole for several years as a Docker container on a Raspberry Pi. The Raspi is serving as a small home server on my home network. - -However, as much as I like Pi-hole, I felt it got loaded with new features over the years and performed slower over the time. DNS queries took longer and longer until they were answered. With this experience in mind and out of pure interest (how complicated would it be to create a DNS proxy on my own?) I've created [Go-hole](https://github.com/virtualzone/go-hole). - -## What is Go-hole? -[Go-hole](https://github.com/virtualzone/go-hole) is written in Go and very minimalistic with an eye to the primary requirements. However, it has all the features I personally need on my home network: - -* Act as a network-wide central DNS server, handling all DNS queries from all queries -* Forward incoming queries to one or more upstream DNS servers -* Cache upstream query results for extremely fast recurring lookup handling -* Block queries for well-known ad-serving and malicious domains by using definable block list URLs -* Regularly update the black list source files -* Whitelist certain domains which would be blocked in view of the set up black lists -* Resolve local names - -## How does it work? -Go-hole serves as DNS server on your (home) network. Instead of having your clients sending DNS queries directly to the internet or to your router, they are resolved by your local Go-hole instance. Go-hole sends these queries to one or more upstream DNS servers and caches the upstream query results for maximum performance. - -Incoming queries from your clients are checked against a list of unwanted domain names ("blacklist"), such as well-known ad serving domains and trackers. If a requested name matches a name on the blacklist, Go-hole responds with error code NXDOMAIN (non-existing domain). This leads to clients not being able to load ads and tracker codes. In case you want to access a blacklisted domain, you can easily add it to a whitelist. - -As an additional feature, you can set a list of custom host names/domain names to be resolved to specific IP addresses. This is useful for accessing services on your local network by name instead of their IP addresses. - -## How to use Go-hole? -The simplest way of getting Go-hole up and running is by using the [pre-built Docker images](https://github.com/virtualzone/go-hole/pkgs/container/go-hole). - -First, create a configuration file named ```config.yaml```. You can take a list at the [example config file](https://github.com/virtualzone/go-hole/blob/main/config.yaml) in the GitHub repository. On my home network, my ```config.yaml``` looks like this: - -```yaml -listen: 0.0.0.0:53 -upstream: - - 8.8.8.8:53 - - 8.8.4.4:53 -blacklist: - - https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts -blacklistRenewal: 1440 -whitelist: - - raw.githubusercontent.com - - www.googletagservices.com -local: - - name: ha - target: - - address: 192.168.40.31 - type: A - - address: 2a01:170:1172:40:40::31 - type: AAAA -``` - -This config sets the following: - -* ```listen``` sets the listing address to 0.0.0.0 (any address) and the listing port to 53 (default DNS). -* ```upstream``` sets the upstream DNS servers to Google's DNS. -* ```blacklist``` sets the black list source URL. -* ```blacklistRenewal``` sets the automatic blacklist updating to a 1 day interval (1440 minutes). -* ```whitelist``` whitelists two domains which would be blacklisted otherwise. -* ```local``` sets an IPv4 (A record) and IPv6 (AAAA record) for the local name "ha". - -After you've prepared your configuration file, you can start the Docker container like this: - -```bash -docker run \ - --rm \ - --mount type=bind,source=${PWD}/config.yaml,target=/app/config.yaml \ - -p 53:53/udp \ - ghcr.io/virtualzone/go-hole:latest -``` - -If you don't want to run Go-hole with Docker (or [Podman, like I do](/posts/alpine-podman/)), you can use the [pre-built binaries](https://github.com/virtualzone/go-hole/releases) or build Go-hole from source. - -## Conclusion -I'm using Go-hole for several weeks now as my home network's DNS server. It has completely replaced Pi-hole for my use cases. I've not observed any crashes or instabilities yet. My home network's DNS resolving times have greatly improved, making web browsing much faster than it has been before. Of course, Pi-hole has a lot more features than Go-hole. My implementation doesn't feature a web interface and for sure lacks other things you might like. However, none of these features are relevant to me. - -I'd be happy to hear about your experience with this Pi-hole alternative. \ No newline at end of file diff --git a/content/posts/encrypted-file-container-macos.md b/content/posts/encrypted-file-container-macos.md deleted file mode 100644 index 1bb19c2..0000000 --- a/content/posts/encrypted-file-container-macos.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "Creating an encrypted file container on macOS" -date: 2016-12-06T11:30:03+00:00 -tags: - - macos -author: "Heiner" -aliases: - - /2016/12/creating-an-encrypted-file-container-on-macos/ ---- - -Some years ago, I’ve used TrueCrypt to create encrypted containers for storing sensitive files. However, TrueCrypt is nowadays considered insecure and I’m on macOS Sierra 10.12 now – time for another solution. Luckily, macOS has integrated means for creating encrypted containers and saving sensitive information in it. You don’t need any additional software for this. As far as I know, this solution also works for previous versions of Mac OS X, like Mac OS X 10.11 (El Capitan) and Mac OS X 10.10 (Yosemite). - -These containers are saved as DMG files. You probably know this file extension from installing downloaded software on your Mac. DMG files are Apple Disk Images, bundling a set of folders and files into a single file. Unlike installation images downloaded from the web, these DMG files can optionally be encrypted using an AES 128 bit or AES 256 bit encryption key. - -To create an encrypted file container, open the Disk Utility using the Spotlight Search (press Cmd + Space). - -Using the menu bar, navigate to “File” > “New Image” > “Blank Image…”. - -Choose an appropriate name for your image and select the following settings: - -* Save as: The filename of your encrypted DMG file. -* Name: A name shown when your DMG file is mounted. -* Size: The size of your container. The DMG file will take exactly the specified size and the amount of data you can store in the container is limited to this specified size. However, you can shrink and grow your DMG at a later time. -* Format: Choose “Mac OS Extended (Journaled)”. -* Encryption: Choose between 128 bit AES and 256 bit AES encryption (for sensitive information, I’d go for 256 bit, just in case…). You’ll be prompted to enter an encryption key. Be sure to remember this one really good. There will be no way to recover a lost encryption key! -* Partitions: Choose “Single Partition – Apple Partition Map”. -* Image Format: Choose “read/write disk image”. - -Next, click “Create” to create your image. This may take a few minutes, depending on the size of your DMG and the speed of the device you’re creating the container on (i.e. a network share). \ No newline at end of file diff --git a/content/posts/endomono-export-gpx.md b/content/posts/endomono-export-gpx.md deleted file mode 100644 index 3859143..0000000 --- a/content/posts/endomono-export-gpx.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: "Export trainings from Endomondo as GPX files" -date: 2020-06-01T11:30:03+00:00 -tags: - - endonomdo - - api -author: "Heiner" -aliases: - - /2020/06/trainings-gpx-datei-endomondo-exportieren/ ---- - -I've been using Endomondo for years to track my trainings. However, I've been experiencing a lot of issues with Endomondo over the last months: Sometimes it's not possible to log in. Other times, my trainings won't get synced. So it's time a new app. I've decided to give Strava a try. With a few lines of code, I've managed to export all my training data as GPX files. These can be imported to Strava, so my training history won't get lost. - -There's an [article on Strava's website](https://support.strava.com/hc/en-us/articles/216917747-Moving-your-activity-history-from-Endomondo-to-Strava) on how to move from Endomondo to Strava. But the answer is a bit too easy: Using Endomondo's website, you can only export a single training at a time in GPX file format. - -The good: GPX (GPS Exchange Format) is an standard file format used to exchange GPS coordinates. Using the GPS waypoints and some meta data (i.e. date, type of training), each of your trainings is reconstructable. - -The bad: I've done more than 1,000 trainings in Endonomdo and I'm not willing to export each of them one by one. - -In Node.JS' module respository, npmjs.com, there's a [module named endomondo-api-handler](https://www.npmjs.com/package/endomondo-api-handler). Using this, it's easy to search, select and download trainings from Endomondo's servers: - -```javascript -await api.processWorkouts(filter, async (workout) => { - if (workout.hasGPSData()) { - let filename = getFilename(workout); - let gpx = await api.getWorkoutGpx(workout.getId()); - fs.writeFileSync(filename, gpx, 'utf8'); - } -}); -``` - -I've used this module to create a [little Node.JS tool](https://github.com/virtualzone/endomondo-exporter) which can be found on my GitHub account. You can use it to export *all* of a year's trainings from Endonomdo: - -```bash -./index.js --username=... --password=... --year=2019 --month=11 --dir=/home/john/trainings -``` - -In order to use this tool, [Node.JS](https://nodejs.org/) must be installed on your computer. You can then check out the tool's source code from my GitHub repository and run the following commands to make the tool ready to run: - -```bash -git clone https://github.com/virtualzone/endomondo-exporter.git -cd endomondo-exporter -npm install -``` - -Importing GPX files to Strava is quite easy: You can upload 25 training files at once. There seems to be some rate limiting. I've received server errors after several imports. Waiting a few minutes solved that. - -![](/img/strava-import.png) \ No newline at end of file diff --git a/content/posts/fix-docker-not-using-etc-hosts-on-macos.md b/content/posts/fix-docker-not-using-etc-hosts-on-macos.md deleted file mode 100644 index 1ce143e..0000000 --- a/content/posts/fix-docker-not-using-etc-hosts-on-macos.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: "Fix Docker not using /etc/hosts on MacOS" -date: 2016-08-28T11:30:03+00:00 -tags: - - macos - - docker -author: "Heiner" -aliases: - - /2016/08/fix-docker-not-using-etc-hosts-on-macos/ ---- - -On my MacBook with Mac OS X 10.11 (El Capitan) and Docker 1.12.0, Docker did not read manually set DNS entries from the /etc/hosts file. - -When I executed “docker push” for example, this resulted in “no such hosts” errors: - -```log -Put http://shuttle:5000/v1/repositories/webfrontend/: dial tcp: lookup shuttle on 192.168.65.1:53: no such host -``` - -On Mac OS, Docker is running in a host container itself. Thus, you’ll have to add DNS entries to the container’s /etc/hosts file. To fix it, get into the running Docker Host: - -```bash -screen ~/Library/Containers/com.docker.docker/Data/com.docker.driver.amd64-linux/tty -``` - -This took a while on my machine, I needed to press Ctrl+C for the login prompt to show up. Log in with “root” (no password required). - -Edit the /etc/hosts file in the Docker Host using vi: - -```bash -vi /etc/hosts -``` - -Note: Insert after pressing “i”, save by pressing Escape and then type “:wq” . - -Restart the Docker Daemon with: - -```bash -service docker restart -``` - -Detach from the screen session by pressing Ctrl+A, then press D. - -Docker should now use the correct /etc/hosts entries. \ No newline at end of file diff --git a/content/posts/from-fhem-to-openhab-with-homegear-installation-docker-container.md b/content/posts/from-fhem-to-openhab-with-homegear-installation-docker-container.md deleted file mode 100644 index 1143fd7..0000000 --- a/content/posts/from-fhem-to-openhab-with-homegear-installation-docker-container.md +++ /dev/null @@ -1,157 +0,0 @@ ---- -title: "From FHEM to OpenHAB with Homegear: Installation/Docker container" -date: 2016-08-28T11:30:03+00:00 -tags: - - fhem - - openhab - - homeautomation - - docker -author: "Heiner" -aliases: - - /2016/08/from-fhem-to-openhab-with-homegear-installation-docker-container/ ---- - -For more than 2.5 years, I’ve now been running FHEM with several HomeMatic sensors and actors. Using the HM-CFG-LAN Configuration Tool as an I/O interface between [FHEM](http://fhem.de/) and the [HomeMatic](http://www.homematic.com/) devices, this setup has been running smoothly most of the time. The configuration was a bit tricky now and then, but it worked. However, [OpenHAB](http://www.openhab.org/) seems to become a really good choice. Version 2 is currently available as Beta 3. It features a modern web interface and an easy-to-use extension manager. More than a good reason to have a look at it. In this post, I’m going to show how to get started. - -If you don’t know OpenHAB yet, here’s a short summary: OpenHAB is a vendor and technology agnostic open source automation software for smart homes. The software is developed in Java, has an extensible OSGI architecture and an actively growing community. It comes with a responsive web interface, allowing for being used on desktops and mobile devices equally. Last but not least, OpenHAB features a catchy programming syntax for rules, triggers, scripts and notifications. - -OpenHAB has an integrated HomeMatic binding. If you’re using a CCU2, you can start with OpenHAB right out of the box. If you’re using another I/O interface like the HM-CFG-LAN Configuration Tool, you’ll need [Homegear](https://www.homegear.eu/) as an additional piece of software. Homegear communicates with your HomeMatic devices through the I/O interface. OpenHAB then connects to Homegear, which allows you to control all your HomeMatic sensors and actors using the OpenHAB software. - -To get started, you should first choose if you’re going with Docker Containers (my preferred way of running server applications) or if you want to install OpenHAB and Homegear directly on your Linux System. - -## Option 1: Using Docker Compose -There are official [Docker Images for OpenHAB](https://hub.docker.com/r/openhab/openhab/). However, there was no working image for Homegear. So I created my own: You can use this [Docker Image for Homegear](https://hub.docker.com/r/virtualzone/homegear/) if you want to. - -1. Make sure that Docker is set up correctly and that the Docker Daemon is running. Read [Docker’s official guide](https://docs.docker.com/) for your operating system if you’re unsure. -1. Make sure that [Docker Compose](https://docs.docker.com/compose/overview/) is installed. I’m using Docker Compose instead of manually scoring the two containers because it’s much more convenient. -1. Create a directory for your OpenHAB setup, such as: - -```bash -mkdir -p /docker/containers/openhab -``` - -4. Create a docker-compose.yml file in this directory with the following content: - -```yaml -version: '2' -services: - openhab: - image: openhab/openhab:amd64-online - volumes: - - "/etc/localtime:/etc/localtime:ro" - - "/etc/timezone:/etc/timezone:ro" - - "/docker/storage/openhab/conf:/openhab/conf" - - "/docker/storage/openhab/userdata:/openhab/userdata" - ports: - - "8080:8080" - depends_on: - - homegear - links: - - homegear - homegear: - image: virtualzone/homegear - volumes: - - "/etc/localtime:/etc/localtime:ro" - - "/etc/timezone:/etc/timezone:ro" - - "/docker/storage/homegear/homematicbidcos.conf:/etc/homegear/families/homematicbidcos.conf" - - "/docker/storage/homegear/sql.db:/var/lib/homegear/db.sql" -``` - -This defines two containers: One for OpenHAB and one for Homegear. The OpenHAB container depends on Homegear (“depends_on”), so Docker Compose makes sure that Homegear is started before OpenHAB. Check the paths of the volumes. They’re probably different on your system. - -5. Start up this composition by executing this command from the directory created above: - -```bash -docker-compose up -d -``` - -The -d flag means “detached”, which makes the two docker containers run in the background. Skip this option if you want to see what’s going on. - -6. Check if everything is fine: - -```bash -docker-compose logs -``` - -## Option 2: Docker without Compose -This option is similar to option 1. However, you’ll have to start the two Docker Containers separately and manually, making sure that Homegear if started before OpenHAB. - -1. Make sure that Docker is set up correctly and that the Docker Daemon is running. Read [Docker’s official guide](https://docs.docker.com/) for your operating system if you’re unsure. -1. Launch Homegear with the following command. You may want to copy the command to an executable shell file, so it’s handier to re-execute it later: - -```bash -docker run \ - --name homegear \ - -v /etc/localtime:/etc/localtime:ro \ - -v /etc/timezone:/etc/timezone:ro \ - -v /docker/storage/homegear/homematicbidcos.conf:/etc/homegear/families/homematicbidcos.conf \ - -v /docker/storage/homegear/sql.db:/var/lib/homegear/db.sql \ - -d \ - --restart=always \ - virtualzone/homegear -``` - -3. Launch OpenHAB with the following command: - -```bash -docker run \ - --name openhab \ - -v /etc/localtime:/etc/localtime:ro \ - -v /etc/timezone:/etc/timezone:ro \ - -v /docker/storage/openhab/conf:/openhab/conf \ - -v /docker/storage/openhab/userdata:/openhab/userdata \ - -p 8080:8080 \ - --link homegear:homegear \ - -d \ - --restart=always \ - openhab/openhab:amd64-online -``` - -4. Check if both containers are running: - -```bash -docker ps -docker logs homegear -docker exec homegear tail -n 100 /var/log/homegear/homegear.err -docker exec homegear tail -n 100 /var/log/homegear/homegear.log -docker logs openhab -``` - -## Option 3: Installation without Docker -If you’re not comfortable with Docker, please refer to the [download page of Homegear](https://www.homegear.eu/index.php/Downloads) and the [install guides for OpenHAB](https://www.openhab.org/docs/). - -## Configuring Homegear -Please note that if you’re running FHEM, you’ll have to stop it first. You can’t make two applications connect to the same HomeMatic I/O device (such as the HM-CFG-LAN). As of version 0.6, the HomeMatic configuration of Homegear is not in /etc/homegear/physicalinterfaces.conf anymore. Instead it’s in: /etc/homegear/families/homematicbidcos.conf If you’re using Docker, you’ll have to edit the file in the corresponding path of your host system (such as /docker/storage/homegear/homematicbidcos.conf). My homematicbidcos.conf looks like this: - -```ini -[HomeMaticBidCoS] -id = KEQ.... -## Options: cul, cc1100, coc, cuno, hmcfglan, hmlgw -deviceType = hmcfglan -host = 192.168.xxx.xxx -port = 1000 -# lanKey = xxxxxxx -rfKey = xxxx -currentRFKeyIndex = 1 -responseDelay = 60 -``` - -Some explanations: - -* id: The ID printed on the back side of your BidCoS I/O device. -* deviceType: The device type of your BidCoS device (cul, cc1100, coc, cuno, hmcfglan, hmlgw). -* host: The IP address of your I/O interface. -* port: Usually 1000, you probably don’t need to change this. -* lanKey: The AES key used for the communication between Homegear and your I/O interface (for securing the LAN connection). If you’ve been using FHEM before, you’ve probably disabled AES encryption using HomeMatic’s configuration utility, as FHEM doesn’t support encryption. You should add AES encryption later. For a quick start, comment out this line. -* rfKey: A random key used for securing the connection between Homegear and the HomeMatic devices (sensors, actors, etc.). You should note it down somewhere, because if you lose it, you’ll have to re-pair all your devices. - -After saving the configuration file, you’ll have to restart the Homegear daemon or the Docker Container running Homegear. Take a look at the logs in /var/log/homegear/homegear.log to find out if Homegear successfully connects to the BidCoS device. - -## Connecting OpenHAB to Homegear -* Browse to OpenHAB’s web interface at port 8080 (such as http://localhost:8080). -* Select the Paper UI (this one is new in OpenHAB 2). -* Go to “Extensions” and install “HomeMatic Binding”. -* Go to “Configuration” -> “Things”. Two new things should be detected automatically: “Homegear” and “GATEWAY-EXTRAS”. Add both of them. They should be indicated as “ONLINE” afterwards. - -## That’s it – for now... -Congratulations: You’ve mastered the essential steps of setting up OpenHAB for your HomeMatic based smart home! Next time, I’ll write about adding HomeMatic devices to OpenHAB using Homegear. \ No newline at end of file diff --git a/content/posts/https-ssl-in-wordpress-behind-proxy.md b/content/posts/https-ssl-in-wordpress-behind-proxy.md deleted file mode 100644 index 13e4bad..0000000 --- a/content/posts/https-ssl-in-wordpress-behind-proxy.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: "How to set up HTTPS/SSL in WordPress behind Proxy (nginx, HAProxy, Apache, lighttpd)" -date: 2016-08-27T11:30:03+00:00 -tags: - - wordpress - - proxy -author: "Heiner" -aliases: - - /2016/08/how-to-set-up-https-ssl-in-wordpress-behind-proxy-nginx-haproxy-apache-lighttpd/ ---- - -Today I changed the accessibility of my blog from HTTP (unencrypted) to HTTPS/SSL. My blog is running WordPress behind an nginx proxy server. However, while the pages themselves loaded successfully from HTTPS, the embedded static resources like JavaScripts, Images, CSS files etc. did not. Here’s how I fixed it. - -The cause of this issue is that WordPress doesn’t seem to detect the original protocol scheme (HTTPS) correctly when running behind a proxy. Thus, if the connection between your user’s browser and your proxy/loadbalancer is HTTPS, but the connection between your proxy server and WordPress is HTTP only, WordPress thinks that it’s running on HTTP instead of HTTPS. Therefore it places sets the absolute URLs incorrectly to HTTP. - -This results in mixed content warnings. Modern browsers prevent loading resources from HTTP when the embedding page had been loaded from HTTPS. To fix this, taking the following steps worked for me: - -Make sure that your proxy or load balancer adds the “X-Forwarded-*” HTTP request headers when proxying incoming requests to your WordPress backend server. My nginx configuration contains these lines: - -``` -proxy_set_header X-Forwarded-Host $host; -proxy_set_header X-Forwarded-Server $host; -proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; -proxy_set_header X-Real-IP $remote_addr; -proxy_set_header Host $host; -``` - -* Install and activate the [SSL Insecure Content Fixer](https://de.wordpress.org/plugins/ssl-insecure-content-fixer/) plugin in your WordPress installation’s admin panel. -* Navigate to Settings -> SSL Insecure Content. -* Set “HTTPS detection” to “HTTP_X_FORWARDED_PROTO (e.g. load balancer, reverse proxy, NginX)”. -* Navigate to Settings -> General. -* Set the “WordPress Address (URL)” and “Site Address (URL)” to your new HTTPS address. -* Check if everything is working as expected. \ No newline at end of file diff --git a/content/posts/ipv6-on-a-sonicwall.md b/content/posts/ipv6-on-a-sonicwall.md deleted file mode 100644 index 262a622..0000000 --- a/content/posts/ipv6-on-a-sonicwall.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: "How to enable IPv6 on a SonicWall (SonicOS 5.9) using NAT" -date: 2014-11-20T11:30:03+00:00 -tags: - - sonicwall - - firewall - - ipv6 -author: "Heiner" -aliases: - - /2014/11/how-to-enable-ipv6-on-a-sonicwall-sonicos-5-9-using-nat/ ---- - -IPv6 aimed to make Network Address Translation (NAT) obselete as there are so many addresses available that every single device can have its own worldwide unique IPv6 address. However, even with IPv6, using NAT is a very simple way to get your devices behind a Dell SonicWall connected to IPv6 services on the internet. In contrast to going without NAT, all the devices behind your SonicWall will emerge under the SonicWall’s IPv6 address. - -The following guide applies to Dell SonicWalls with SonicOS 5.9.0 (IPv6 is not supported in SonicOS 5.8 or below). A SonicWall TZ-215 is connected to an IPv6 capable router via the X1/WAN interface. There are devices connected to the SonicWall on the X0/LAN and W0/WLAN interfaces. There is also a virtual W0:V1 interface used for WLAN guests. - -1. Log in to SonicWall’s administrative web interface (the default IP address on LAN is https://192.168.168.168). - -2. Go to Network -> Interfaces and select to view IPv6. - -* Determine SonicWall’s autonomous IPv6 address for the X1/WAN interface and note it down. You’ll need it later. -* Configure your X0/LAN interface: Check if it has a static IPv6 address starting with fd80::. Check “Enable Router Advertisement” and add a prefix fd80::, Lifetime = 1440 min. -* Configure your W0/WLAN interface: Check if it has a static IPv6 address starting with fd81::. Check “Enable Router Advertisement” and add a prefix fd81::, Lifetime = 1440 min. -* Do the same with other interfaces you want to enable for IPv6, such as W0:V1, X2, etc. Use fd82::, fd83::, etc. as prefixes. - -3. Go to Network -> Address Objects and select to view IPv6. -Create/update the entry “WAN Primary IPv6” with the previously determined X1 IPv6 address. Set Zone = WAN, Type = Host. - -4. Go to Network -> NAT Policies and select to view IPv6. -* Create a new NAT policy with the following settings: Original Source = Any Translated Source = WAN Primary IPv6 Original Destination = Any Translated Destination = Original Original Service = Any Translated Service = Original Inbound Interface = X0/LAN Outbound Interface = X1/WAN -* Create another new NAT policy with the same settings as before, but this time, select W0/WLAN as “Inbound Interface”. - -5. On a client connected to the SonicWall, go to http://test-ipv6.com to check if your IPv6 configuration works. \ No newline at end of file diff --git a/content/posts/jenkins-build-docker-images.md b/content/posts/jenkins-build-docker-images.md deleted file mode 100644 index 43705e0..0000000 --- a/content/posts/jenkins-build-docker-images.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: "How to let Jenkins build Docker images" -date: 2017-06-11T11:30:03+00:00 -tags: - - docker -author: "Heiner" -aliases: - - /2017/06/how-to-let-jenkins-build-docker-images/ ---- - -If you’re using Jenkins as your Continuous Integration (CI) tool and Docker to build self-contained images of your application, you may ask yourself how to automatically build Docker images during Jenkins’ build job. Here’s how I did it – with Jenkins running in a Docker container itself. - -So far, I’ve used the official [Jenkins Docker image](https://hub.docker.com/r/jenkins/jenkins) (the one based on Alpine). I’ve tried some of the Docker plugins for Jenkins available out there. None of them really convinced me as the setup was quite complicated. I’ve been looking for a simpler method. - -To achieve this, I’ve created a custom Dockerfile which derives from the official jenkins:alpine image: - -```dockerfile -FROM jenkins:alpine -USER root -RUN apk update && \ - apk add docker sudo -RUN echo "jenkins ALL=NOPASSWD: ALL" >> /etc/sudoers -USER jenkins -``` - -The user-switching is necessary to make sure that the package installation is performed as root (not as jenkins). Next, we update Alpine’s package repository and then install docker and sudo from Alpine’s official repository. sudo is required if your Docker host is configured to restrict Docker usage to specific users. After installing the packages, we allow the jenkins user to run sudo commands without password. - -I’m using docker-compose to start my Jenkins container: - -```yaml -version: '2' -services: - jenkins: - build: /docker/git/docker-jenkins - volumes: - - "/docker/storage/jenkins:/var/jenkins_home" - - "/var/run/docker.sock:/var/run/docker.sock" -``` - -The build line specifies the folder to your recently created Dockerfile. I mount two volumes here: - -* The first one specifies where Jenkins stores its files. -* The second mounts the docker.sock file. This is the key here. It allows the Docker executable in the Jenkins container to communicate with the Docker daemon running on the host. - -After starting your Jenkins docker container (using “docker-compose up -d”), browse to your Jenkins URL and configure the job that’s to build a Docker image automatically. - -Add “Execute Shell” to your “Build Steps”. Mine looks like: - -```bash -sudo docker build -t docker_hub_username/image_name:latest . && \ -sudo docker login -u docker_hub_username -p docker_hub_password && \ -sudo docker push docker_hub_username/image_name:latest -``` - -These lines build the Docker image, log in to Docker Hub and push the recently built image. - -## Update: - -If you want to use docker-compose from your Jenkins Docker container as well, add these lines to your Dockerfile: - -```dockerfile -RUN apk add py-pip -RUN pip install docker-compose -``` \ No newline at end of file diff --git a/content/posts/k3s-glusterfs.md b/content/posts/k3s-glusterfs.md deleted file mode 100644 index b27d532..0000000 --- a/content/posts/k3s-glusterfs.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: "Setting up a Kubernetes cluster with K3S, GlusterFS and Load Balancing" -date: 2021-09-03T11:30:03+00:00 -tags: - - kubernetes -author: "Heiner" -aliases: - - /2021/09/setting-up-a-kubernetes-cluster-with-k3s-glusterfs-and-load-balancing/ ---- - -I’ve recently written a tutorial which will guide you through setting up a Kubernetes cluster using K3S with virtual machines hosted at [Hetzner](https://www.hetzner.com/cloud), a German (Cloud) hosting provider. The tutorial uses [K3S](https://k3s.io/), a lightweight Kubernetes distribution which is perfectly suited for small VMs like Hetzner’s CX11. Additionally, the tutorial will show you how to set up Hetzner’s cloud load balancer which performs SSL offloading and forwards traffic to your Kubernetes system. Optionally, you will learn how to set up a distributed, replicated file system using [Kadalu](https://kadalu.io/), an opinionated storage system based on [GlusterFS](https://www.gluster.org/). This allows you to move pods between the nodes while still having access to the pods’ persistent data. - -[Read the tutorial in Hetzner’s Online Community.](https://community.hetzner.com/tutorials/k3s-glusterfs-loadbalancer) \ No newline at end of file diff --git a/content/posts/lets-encrypt-effs-certbot-with-nginx-in-docker.md b/content/posts/lets-encrypt-effs-certbot-with-nginx-in-docker.md deleted file mode 100644 index 4425b4a..0000000 --- a/content/posts/lets-encrypt-effs-certbot-with-nginx-in-docker.md +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: "Using Let’s Encrypt / EFF’s CertBot with NGINX in Docker" -date: 2017-02-11T11:30:03+00:00 -tags: - - docker - - letsencrypt - - nginx -author: "Heiner" -aliases: - - /2017/02/using-lets-encrypt-effs-certbot-with-nginx-in-docker/ ---- - -I’m using [NGINX](https://nginx.org/) in a [Docker](https://www.docker.com/) Container as a front-end HTTP(s) Webserver, performing SSL termination and proxying incoming requests to various other Docker Containers and VMs. Now that I’ve switched my certificates to [Let’s Encrypt](https://letsencrypt.org/), I wondered how to integrate [EFF’s CertBot](https://certbot.eff.org/) (which is recommended by Let’s Encrypt) with my setup. Here’s how I did it. - -First, I’ve added two new volumes to my web-front-end’s Docker Compose File: - -```yaml -version: '2' -services: - webfrontend: - container_name: webfrontend - [...] - volumes: - - "/etc/localtime:/etc/localtime:ro" - - "/etc/timezone:/etc/timezone:ro" - - "/docker/storage/webfrontend/letsencrypt/www:/var/www/letsencrypt" - - "/docker/storage/webfrontend/letsencrypt/etc:/etc/letsencrypt" -``` - -Next, I’ve added the following location block to each of my virtual hosts: - -``` -location /.well-known/ { - alias /var/www/letsencrypt/; -} -``` - -I’m using the [palobo/certbot Docker Image](https://hub.docker.com/r/palobo/certbot/) to create the certificates, using this shell script: - -```sh -#!/bin/sh - -docker pull palobo/certbot - -GetCert() { - docker run -it \ - --rm \ - -v /docker/storage/webfrontend/letsencrypt/etc:/etc/letsencrypt \ - -v /docker/storage/webfrontend/letsencrypt/lib:/var/lib/letsencrypt \ - -v /docker/storage/webfrontend/letsencrypt/www:/var/www/.well-known \ - palobo/certbot -t certonly --webroot -w /var/www \ - --keep-until-expiring \ - $@ -} - -echo "Getting certificates..." -GetCert -d www.mydomain.com -d mydomain.com -GetCert -d somedomain.net - -echo "Restarting Web Frontend..." -cd /docker/containers/webfrontend -docker-compose down -docker-compose up -d -cd - - -echo "Done" -``` - -The script starts CertBot in a Docker Container for each requested certificate. Because the /etc/letsencrypt and the /var/www/.well-known directory is also used by my NGINX front-end Container (see above), these steps can be performed by the script: - -1. Using the [webroot plugin](https://certbot.eff.org/docs/using.html#webroot), a random file is created under the /.well-known/acme-challenge/ directory. -1. Let’s Encrypt can access and verify this file as the folder is aliased using the Location blocks in the NGINX config. -1. The generated private key and public certificate is placed in /etc/letsencrypt/, which is in turn a volume for the NGINX web-frontend. - -You can use the generated certificates by adding these two lines to your NGINX vhost config: - -``` -ssl_certificate /etc/letsencrypt/live/www.mydomain.com/fullchain.pem; -ssl_certificate_key /etc/letsencrypt/live/www.mydomain.com/privkey.pem; -``` \ No newline at end of file diff --git a/content/posts/multi-arch-docker-images-1.md b/content/posts/multi-arch-docker-images-1.md deleted file mode 100644 index dd6b09d..0000000 --- a/content/posts/multi-arch-docker-images-1.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -title: "Build Multi-Arch images on Docker Hub (Part 1)" -date: 2020-05-15T11:30:03+00:00 -tags: - - docker -author: "Heiner" -aliases: - - /2020/05/multi-arch-images-mit-docker-hub-bauen-teil-1/ ---- - -Multi-Arch Docker Images sind eine tolle Sache: Benutzer Eurer Images ziehen automatisch die für Ihre Architektur passende Version Eures Image – ob AMD64, ARM64 oder ARM32. Normalerweise muss man Docker Images auf der Architektur bauen, auf der sie später auch verwendet werden. Durch die Verwendung des Emulators QEMU ist es jedoch möglich, auf einer AMD64-Architektur für alle anderen Zielplattformen mitzubauen. Kombiniert mit der Auto-Build-Funktion des Docker Hub ist das eine prima Arbeitserleichterung. Ich möchte Euch in diesem Beitrag zeigen, wie es geht. - -Zunächst legt Ihr wie gewohnt ein Dockerfile für die AMD64-Architektur an – hier am Beispiel eines Alpine-Basis-Image: - -```dockerfile -FROM amd64/alpine:3.11 -... -``` - -Es folgt jeweils ein Dockerfile pro Zielarchitektur. In diesen wird zunächst die passende QEMU-Binary heruntergeladen und dann in das Ziel-Image hinein kopiert. - -Dockerfile.arm32v6 für ARM32V6: - -```dockerfile -FROM alpine:3.11 AS qemu -RUN apk --update add --no-cache curl -RUN cd /tmp && \ -curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static . - -FROM arm32v6/alpine:3.11 -COPY --from=qemu /tmp/qemu-arm-static /usr/bin/ -... -``` - -Dockerfile.arm36v7 für ARM32V7: - -```dockerfile -FROM alpine:3.11 AS qemu -RUN apk --update add --no-cache curl -RUN cd /tmp && \ -curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static . - -FROM arm32v7/alpine:3.11 -COPY --from=qemu /tmp/qemu-arm-static /usr/bin/ -... -``` - -Dockerfile.arm64v8 für ARM64V8: - -```dockerfile -FROM alpine:3.11 AS qemu -RUN apk --update add --no-cache curl -RUN cd /tmp && \ -curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-aarch64.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-aarch64/qemu-aarch64-static . - -FROM arm64v8/alpine:3.11 -COPY --from=qemu /tmp/qemu-aarch64-static /usr/bin/ -... -``` - -Zusätzlich wird legt Ihr eine Datei Namens “multi-arch-manifest.yaml” an. In dieser wird angegeben, welches Image welcher Architektur zugeordnet wird. Die nach den obigem Schema mit QEMU gebauten Image sind nämlich zunächst als AMD64-Architektur gelistet, was natürlich nicht stimmt. Durch das Docker-Manifest kann das angepasst werden. Hier am Beispiel meines virtualzone/compose-updater Image, den Namen müsst Ihr natürlich anpassen: - -```yaml -image: virtualzone/compose-updater:latest -manifests: - - image: virtualzone/compose-updater:amd64 - platform: - architecture: amd64 - os: linux - - image: virtualzone/compose-updater:arm32v6 - platform: - architecture: arm - os: linux - variant: v6 - - image: virtualzone/compose-updater:arm32v7 - platform: - architecture: arm - os: linux - variant: v7 - - image: virtualzone/compose-updater:arm64v8 - platform: - architecture: arm64 - os: linux - variant: v8 -``` - -Nun fehlen nun noch die Hooks. Diese werden von der Docker Registry vor bzw. nach den entsprechenden Build-Schritten aufgerufen. Wir benötigen Post-Push- und Pre-Build-Hook. - -Der Pre-Build-Hook wird von der Registry vor dem Bauen eines Image aufgerufen. Hier müssen wir QEMU laden und ausführen. Der Dateiname muss “pre_build” lauten und chmod 755 haben: - -```bash -#!/bin/bash - -BUILD_ARCH=$(echo "${DOCKERFILE_PATH}" | cut -d '.' -f 2) - -[ "${BUILD_ARCH}" == "Dockerfile" ] && \ -{ echo 'qemu-user-static: Registration not required for current arch'; exit 0; } - -docker run --rm --privileged multiarch/qemu-user-static:register --reset -``` - -Der Post-Push-Hook wird von der Registry aufgerufen, sobald ein Image fertig gebaut ist und ins Repository gepusht wurde. Hier muss das Manifest-Tool von Docker installiert und anschließend ausgeführt werden. Der Dateiname muss “post_push” lauten und chmod 755 haben: - -```bash -#!/bin/bash -curl -Lo manifest-tool https://github.com/estesp/manifest-tool/releases/download/v1.0.0/manifest-tool-linux-amd64 -chmod +x manifest-tool -./manifest-tool push from-spec multi-arch-manifest.yaml -``` - -Damit ist Euer Projekt vorbereitet und bereit für Multi-Arch-Builds. - -Im [nächsten Teil zeige ich Euch](/posts/multi-arch-docker-images-2/), wie Ihr die “Automated Builds” im Docker Hub konfiguriert, um den Multi-Arch-Build auch tatsächlich durchzuführen. \ No newline at end of file diff --git a/content/posts/multi-arch-docker-images-2.md b/content/posts/multi-arch-docker-images-2.md deleted file mode 100644 index 042483a..0000000 --- a/content/posts/multi-arch-docker-images-2.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: "Build Multi-Arch images on Docker Hub (Part 2)" -date: 2020-05-16T11:30:03+00:00 -tags: - - docker -author: "Heiner" -aliases: - - /2020/05/multi-arch-images-mit-docker-hub-bauen-teil-2/ ---- - -Im [ersten Teil dieses Artikels](/posts/multi-arch-docker-images-1/) habe ich Euch gezeigt, wie Ihr ein Multi-Arch-Docker-Projekt anlegt, das auf einer AMD64-Plattform auch für andere Zielarchitekturen wie bspw. ARM bauen kann. In diesem Teil zeige ich Euch, wie Ihr das Ganze im offiziellen Docker Hub zum Laufen bekommt. - -Zunächst solltet Ihr ein Projekt im Docker Hub anlegen und dieses mit Eurem Quellcode-Repository verknüpfen. In meinem Fall nutze ich GitHub als Sourcecode-Repository und nutze die Build-Infrastruktur von Docker Hub. Die entsprechenden Einstellungen findet Ihr im Reiter “Builds”: - -![](/img/multiarch-dockerhub-1.png) - -Einen automatisierten Build im Docker Hub konfigurieren. -Dort könnt Ihr dann die Build-Konfiguration vornehmen. Zunächst muss angegeben werden, aus Source-Repository gebaut werden soll: - -![](/img/multiarch-dockerhub-2.png) - -Bei der Konfiguration muss zunächst das Sourcecode-Repository angegeben werden. -Anschließend legt Ihr fünf Build Rules an, nämlich eine ohne Angabe eines Architektur-Tags (in meinem Fall “latest”) und vier weitere je Zielarchitektur. Vier deshalb, weil wir in diesem Beispiel für AMD64, ARM32V6, ARM32V7 und ARM64V8 bauen. Solltet Ihr für andere Zielarchitekturen bauen wollen, benötigt Ihr natürlich mehr oder weniger Build Rules: - -![](/img/multiarch-dockerhub-3.png) - -Die passenden Build Rules für die vier Zielarchitekturen. -Der Trick ist, dass das “ungetaggte” Image alle anderen Architektur-Images zugeordnet bekommt. Dadurch kann ein Anwender, der “docker run” oder “docker pull” auf Euer Image durchführt, das für seine Architektur passende Image automatisch laden, ohne explizit die Plattform nennen zu müssen. Ein Mac zieht somit das AMD64-Image, während ein Raspbian das ARM32V7-Image lädt und ein Raspberry Pi 4 mit 64bit-Ubuntu das ARM64V8 Image. Alles ohne weiteres zutun. - -Das war es dann auch schon mit der Konfiguration. Ein Klick auf “Save and Build” stellt die ausstehenden Builds (hier fünf an der Zahl) in die Warteschlange. Meiner Erfahrung nach kann es auf der Docker Hub Infrastruktur auch für einfache Images durchaus ein paar Stunden dauern, bis alle Images gebaut wurden. Was schon erledigt ist und was noch aussteht, könnt Ihr unter “Recent Builds” verfolgen. - -![](/img/multiarch-dockerhub-4.png) - -Die Recent Builds geben Auskunft über die noch ausstehenden und schon erfolgten Automated Builds. -Ihr werdet sehen, dass die ersten Builds als fehlgeschlagen markiert werden. Das ist völlig normal! Ein Blick in die Build Logs zeigt den nachvollziehbaren Grund: Nach jedem Build wird das multi-arch-manifest.yaml Docker-Manifest angewandt. Bevor das letzte Ziel-Architektur.Image aber nicht fertig gebaut wurde, können nicht alle Architektur-Images dem “ungetaggten” Image hinzugefügt werden und das Build schlägt augenscheinlich fehl. - -![](/img/multiarch-dockerhub-5.png) - -Kein Grund zur Sorge: Der Fehler “failed with error: manifest unknown: manifest unknown”. -Tatsächlich wurde das jeweilige Image aber (hoffentlich) erfolgreich gebaut und gepusht. Erst beim letzten Multi-Arch-Image kann das Manifest-Tool dann auch erfolgreich seine Arbeit verrichten und die Architekturen verknüpfen. Lasst Euch davon also nicht aus der Ruhe bringen und beobachtet die Build Logs aufmerksam. - -Ich wünsche Euch viel Spaß mit den Multi-Arch-Images im Docker Hub! \ No newline at end of file diff --git a/content/posts/onedrive-upload-backup.md b/content/posts/onedrive-upload-backup.md deleted file mode 100644 index 74be9ba..0000000 --- a/content/posts/onedrive-upload-backup.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: "Back up server to OneDrive’s special App Folder" -date: 2021-09-02T11:30:03+00:00 -tags: - - github - - onedrive - - tool -author: "Heiner" -aliases: - - /2021/09/back-up-server-to-onedrives-special-app-folder/ ---- - -I’m a convinced user of OneDrive Personal. Bundled with M365, it’s a cheap option to get 1 TB of cloud storage. Having plenty of cloud storage at hand, I’m also using my OneDrive to run automated backups of my servers. There are various solutions capable of uploading files to OneDrive, including [rclone](https://rclone.org/). However, I was looking for a solution which enables me to grant my backup script only access to one specific folder instead of my entire cloud drive – better safe than sorry. I couldn’t find any. This is why I developed [OneDrive Uploader](https://github.com/virtualzone/onedrive-uploader). Here is what it can do for you and how to use it. - -Microsoft OneDrive supports so-called “special folders”, which includes the “App Folder” (App Root). This is a directory intended for applications to storage their own files, without being able to access other files in your OneDrive Folder. OneDrive Uploader supports these special folders, restricting the access of your backup script to its own files. However, you can also use OneDrive Uploader to upload and download files from other locations as long as you grant it access. - -I’ve written OneDrive Uploader in Go, which is a great programming language that compiles natively to various operating systems and platforms. As a result, OneDrive Uploader is available for Linux, MacOS and Windows and supports AMD64, ARM and ARM64. - -To get started with OneDrive Uploader, you’ll need to create an access token in Microsoft’s Azure Portal. To do this, follow these steps: - -1. Log in to the Microsoft [Azure Portal](https://portal.azure.com/). -1. Navigate to “App registrations”. -1. Create a new application with supported account type “Accounts in any organizational directory (Any Azure AD directory – Multitenant) and personal Microsoft accounts (e.g. Skype, Xbox)” and the following Web redirect URL: http://localhost:53682/ -1. Copy the Application (client) ID. -1. Navigate to “Certificates & secrets”, create a new Client secret and copy the Secret Value (not the ID). -1. Navigate to “API permissions”, click “Add permission”, choose “Microsoft Graph”, select “Delegated”. Then search and add the required permissions: - - Access to App Folder only: Files.ReadWrite.AppFolder, offline_access, User.Read - - Access to entire OneDrive: Files.Read, Files.ReadWrite, Files.Read.All, Files.ReadWrite.All, offline_access, User.Read - -Great! You’ve now created an Azure App which you can use to grant OneDrive Uploader access to your OneDrive. Don’t worry, the App is not visible anywhere, nor can anyone access your OneDrive. - -You can now download the OneDrive Uploader executable for your operating system and platform. You can either choose the matching binary from the GitHub releases page, or simply execute this command: - -```curl -s -L https://git.io/JRie0 | bash``` - -Now create a configuration file named config.json. Replace and : - -```json -{ - "client_id": "", - "client_secret": "", - "scopes": [ - "Files.ReadWrite.AppFolder", - "offline_access" - ], - "redirect_uri": "http://localhost:53682/", - "secret_store": "./secret.json", - "root": "/drive/special/approot" -} -``` - -As you can see in the config.json above, we specify the special app folder as OneDrive Uploader’s root directory. The two scopes grant access to the this app folder and allows automatic renewing the necessary access token without user interaction (which is essential for unattended backups). - -Perform the log in using this command and follow the instructions printed on your console: - -```onedrive-uploader login``` -You can now use OneDrive Uploader. To view the available commands, refer to the project’s GitHub page or type: - -```onedrive-uploader help``` -To use OneDrive Uploader in your backup script, you can be guided by this shell script snippet: - -```bash -#!/bin/bash -DIR_FORMAT="%Y-%m-%d" # DD-MM-YYYY format -TODAY=`date +"${DIR_FORMAT}"` -TARGET=/mnt/backup/$TODAY -UPLOADER="/usr/local/bin/onedrive-uploader -c /home/username/backup-script/config.json" -``` - -## Perform your local backup and store it in ${TARGET} - -```bash -echo "Uploading..." -cd ${TARGET} -${UPLOADER} mkdir ${TODAY} -for i in `ls`; do - ${UPLOADER} upload $i ${TODAY}; - HASH_REMOTE=`${UPLOADER} sha256 $TODAY/$i | tr '[A-Z]' '[a-z]'` - HASH_LOCAL=`sha256sum $i | tr '[A-Z]' '[a-z]' | awk '{ print $1 }'` - if [[ "$HASH_REMOTE" != "$HASH_LOCAL" ]]; then - echo "Hashes for '$i' do not match! Remote = $HASH_REMOTE vs. Local = $HASH_LOCAL" - fi -done -``` - -This bash script uploads all files from the local directory $TARGET to its app folder in your OneDrive. It creates a sub-folder named ```YYYY-MM-DD``` (i.e. 2021-08-30). For each file, after having finished the upload, it checks she SHA256 hash so that you can be sure the upload is intact. \ No newline at end of file diff --git a/content/posts/openrc-podman-kube-play.md b/content/posts/openrc-podman-kube-play.md deleted file mode 100644 index 7bccd2f..0000000 --- a/content/posts/openrc-podman-kube-play.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -title: "OpenRC Script for 'podman kube play'" -date: 2022-10-26T15:00:00+00:00 -tags: - - linux - - docker -author: "Heiner" ---- - -In June, I've [written about](/posts/alpine-podman/) my approach to starting and stopping Podman Pods using OpenRC scripts on Alpine Linux. However, that approach had two major drawbacks: First, the pods were started in the foreground, causing OpenRC to wait for all pod initialization tasks to complete. If an image needed to be pulled first, this could lead to longer delays, significantly increasing system startup times. Secondly, requesting the status of a previously started pod always stated "crashed". This is due to the fact that OpenRC is not able to identify the exact process spawned by Podman. - -I've therefore improved my OpenRC startup script to be used with ```podman kube play``` YAML files. In this post, I'm presenting my results. If you have further improvements, please let me know. - -## What does *not* work -The ```podman pod create``` command features the ```--infra-conmon-pidfile=file``` option. This option writes the PID of the infra container's conmon process to a file. - -Using this option, it was easy to enable OpenRC identifying the status of a Pod and start the Pod in background: - -```ini -pidfile="/run/${RC_SVCNAME}.pid" -command_background=true -``` - -Unfortunately, the ```--infra-conmon-pidfile=file``` option is not (yet?) available when using the ```podman kube play``` command. - -I've tried to discover the infra container's PID file using the ```podman inspect``` command and using this value dynamically in my OpenRC scripts: - -```bash -podman inspect --format '{{ .PidFile }}' somecontainer-infra -``` - -However, OpenRC doesn't seem happy with PID files appearing and disapperaring dynamically. - -## What *does* work - -I've created a ```pod``` script which is sourced by multiple ```pod-*``` scripts. - -The ```pod``` script includes functions for getting the status of a Pod and stopping a Pod. The script assumes that your Pod's Kubernetes YAML is located at ```/home/${command_user}/pods/${pod_name}/pod.yaml```. - -**/home/your-user/pods/init.d/pod** -```bash -#!/sbin/openrc-run - -name=$RC_SVCNAME -command="/usr/bin/podman" -networks_='' -for n in ${pod_networks}; do - networks_="${networks_} --network $n"; -done -command_args="play kube ${networks_} /home/${command_user}/pods/${pod_name}/pod.yaml >/dev/null 2>&1 &" - -depend() { - after network-online - use net -} - -cleanup() { - /sbin/runuser -u ${command_user} -- ${command} pod exists ${pod_name} - result=$? - if [ $result -eq 0 ]; then - /sbin/runuser -u ${command_user} -- ${command} pod stop ${pod_name} > /dev/null - /sbin/runuser -u ${command_user} -- ${command} pod rm ${pod_name} > /dev/null - fi -} - -start_pre() { - cleanup -} - -stop() { - ebegin "Stopping $RC_SVCNAME" - cleanup - eend $? -} - -status() { - /sbin/runuser -u ${command_user} -- ${command} pod exists ${pod_name} 2> /dev/null - result=$? - if [ $result -eq 0 ]; then - einfo "status: started" - return 0 - else - einfo "status: stopped" - return 3 - fi -} -``` - -The script for controlling a Pod "xyz" can look like this. - -* ```command_user``` specifies the user running the Pod -* ```pod_name``` sets the Pod's name -* ```pod_networks``` sets a space-separated list of networks the Pod should be connected to - -**/home/your-user/pods/init.d/pod-xyz** -```bash -#!/sbin/openrc-run - -command_user="your-user" -pod_name=xyz -pod_networks='network1 network2 ...' - -source "/home/${command_user}/pods/init.d/pod" -``` - -Using root (i.e. using ```doas``` or ```sudo```), you can then create a symlink in ```/etc/init.d``` and add the pod to the default run level at boot time: - -```bash -cd /etc/init.d -ln -s /home//pods/pod-xyz -rc-update add pod-xyz -``` - -Use ```rc-service``` to start and stop your Pod: - -```bash -doas rc-service pod-xyz start -``` \ No newline at end of file diff --git a/content/posts/podman-multiple-networks.md b/content/posts/podman-multiple-networks.md deleted file mode 100644 index 1f084f9..0000000 --- a/content/posts/podman-multiple-networks.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -title: "Connecting multiple networks to a Podman container" -date: 2022-10-16T17:00:00+00:00 -tags: - - linux - - docker -author: "Heiner" ---- - -I'm running my containers with [Podman in Rootless Mode](/posts/alpine-podman/) on Alpine for about four months now. However, an annoying problem has haunted me ever since: - -When a container was connected to more than one network, outgoing connections were not working correctly. - -Consider a container connected to two bridge networks: - -``` -$ podman run --rm -it \ - --network net1 \ - --network net2 \ - alpine /bin/ash -``` - -Inside the container, the two networks are connected correctly: - -``` -# ip a -1: lo: mtu 65536 qdisc noqueue state UNKNOWN qlen 1000 - inet 127.0.0.1/8 scope host lo - valid_lft forever preferred_lft forever -2: eth1@if17: mtu 1500 qdisc noqueue state UP qlen 1000 - inet 10.89.0.7/24 brd 10.89.0.255 scope global eth1 -4: eth0@if18: mtu 1500 qdisc noqueue state UP qlen 1000 - inet 10.89.2.6/24 brd 10.89.2.255 scope global eth0 -``` - -However, pinging a host on the internet only works using one of the two network interfaces: - -``` -# ping -I eth0 8.8.8.8 -PING 8.8.8.8 (8.8.8.8): 56 data bytes -64 bytes from 8.8.8.8: seq=0 ttl=42 time=4.075 ms -``` - -``` -# ping -I eth1 8.8.8.8 -PING 8.8.8.8 (8.8.8.8): 56 data bytes -... -2 packets transmitted, 0 packets received, 100% packet loss -``` - -## The solution -The solution is quite simple: You will need to set ```net.ipv4.conf.all.rp_filter``` to 2. - -On my Alpine system, rp_filter was set to 1 by default. The setting controls the source path validation within the kernel's IPv4 network stack. 1 means "strict", whereas 2 means "loose". - -You can try the solution temporarily by running: - -``` -# sysctl -w net.ipv4.conf.all.rp_filter=2 -``` - -To survive the next reboot, persist the setting by adding it to ```/etc/sysctl.conf```: - -``` -# echo "net.ipv4.conf.all.rp_filter=2" >> /etc/sysctl.conf -``` - -For more information, you can take a look at [this article](https://sysctl-explorer.net/net/ipv4/rp_filter/). \ No newline at end of file diff --git a/content/posts/raspberry-pi-os-remove-packages.md b/content/posts/raspberry-pi-os-remove-packages.md deleted file mode 100644 index cf1ce43..0000000 --- a/content/posts/raspberry-pi-os-remove-packages.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: "Raspberry Pi OS: Remove unnecessary packages" -date: 2020-06-07T11:30:03+00:00 -tags: - - raspberrypi -author: "Heiner" -aliases: - - /2020/06/raspberry-pi-os-64-bit-lite-desktop-pakete-entfernen/ ---- - -Recently, [I wrote about](/posts/usb-boot-raspberry-pi/) the availability of the 64 bit beta version of Raspberry Pi OS (formerly known as Raspbian). Unfortunately, the new 64 bit beta is only available in the Desktop variant, containing lots of packages most lightweight server systems won't need. There's no lite variant of the 64 bit beta version available at the time of writing. However, you can easily remove the Desktop packages from a running installation with two easy commands. - -You can download Raspberry Pi OS' 64 bit beta version [from the download directory on Raspberry Pi's website](https://downloads.raspberrypi.org/raspios_arm64/images/). The [Raspberry Pi Imager](https://www.raspberrypi.org/downloads/) makes it easy to burn the image to an SD card or external USB drive. - -![](/img/raspberry-usb.png) - -Enter the following commands (at your own risk!) to remove the Desktop packages after your Pi has started from the newly written card: - -```bash -sudo apt-get remove --purge \ - x11-* \ - gnome-* \ - desktop-base \ - *-theme \ - dconf-gsettings-backend \ - gsettings-desktop-schemas \ - gtk- \ - gtk2-* \ - xdg-* -sudo apt-get autoremove --purge -``` \ No newline at end of file diff --git a/content/posts/reduce-pdf-file-size-2.md b/content/posts/reduce-pdf-file-size-2.md deleted file mode 100644 index 44585a6..0000000 --- a/content/posts/reduce-pdf-file-size-2.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: "How to reduce PDF file size in Linux - Part 2" -date: 2015-08-15T11:30:03+00:00 -tags: - - linux - - macos - - tool -author: "Heiner" -aliases: - - /2015/08/how-to-reduce-pdf-file-size-part-2/ ---- - -Several months ago, I wrote a [blog post about reducing a PDF file’s size](/posts/reduce-pdf-file-size/). Since then, I’ve used that technique many times. However, you may want to control the DPI (dots per inch) even more specific. Here’s how to do it: - -```bash -gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dCompatibilityLevel=1.7 \ --dDownsampleColorImages=true \ --dDownsampleGrayImages=true \ --dDownsampleMonoImages=true \ --dColorImageResolution=120 \ --dGrayImageResolution=120 \ --dMonoImageResolution=120 \ --sOutputFile=output.pdf input.pdf -``` - -Hint: This also works on MacOS. Just install GhostScript using [Homebrew](https://brew.sh/): - -```bash -brew install ghostscript -``` \ No newline at end of file diff --git a/content/posts/reduce-pdf-file-size.md b/content/posts/reduce-pdf-file-size.md deleted file mode 100644 index ab38827..0000000 --- a/content/posts/reduce-pdf-file-size.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: "How to reduce PDF file size in Linux" -date: 2012-11-21T11:30:03+00:00 -tags: - - linux - - macos - - tool -author: "Heiner" -aliases: - - /2012/11/how-to-reduce-pdf-file-size-in-linux/ ---- - -Using a single line of GhostScript command on my Ubuntu’s terminal, I was able to reduce the size of a PDF file from 6 MB to approximately 1 MB: - -```bash -gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dCompatibilityLevel=1.4 -dPDFSETTINGS=/screen -sOutputFile=output.pdf input.pdf -``` - -You can also use the following parameters for -dPDFSETTINGS instead of /screen: - -* /screen – Lowest quality, lowest size -* /ebook – Moderate quality -* /printer – Good quality -* /prepress – Best quality, highest size - -**Update:** Read [Part 2 of this blog post](/posts/reduce-pdf-file-size-2/) for more detailled file size reduction settings. - -**Hint:** This also works on MacOS. Just install GhostScript using [Homebrew](https://brew.sh/): - -```bash -brew install ghostscript -``` \ No newline at end of file diff --git a/content/posts/traefik-access-log-influxdb-grafana-telegraf.md b/content/posts/traefik-access-log-influxdb-grafana-telegraf.md deleted file mode 100644 index 878c490..0000000 --- a/content/posts/traefik-access-log-influxdb-grafana-telegraf.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: "Analyze Traefik access log using InfluxDB and Grafana" -date: 2020-06-03T11:30:03+00:00 -tags: - - docker -author: "Heiner" -aliases: - - /2020/06/traefik-access-log-influxdb-grafana-telegraf/ ---- - -[Traefik](https://containo.us/traefik/) is a Cloud Native Edge Router, often deployed in Docker and Kubernetes environments. With little effort, you can use Telegraf to transport Traefik's access logs to an InfluxDB, where it can be analyzed using Grafana. - -This setup contains the following elements: - -* Traefik v2 runs as a Docker container on a Linux host. -* Traefik outputs access logs in JSON format to STDOUT. -* Telegraf fetched the Traefik container's JSON output using the docker_log input plugin. -* To work with the JSON output in InfluxDB and Grafana, we need to convert them using Telegraf's parser preprocessor plugin into distinct fields. Otherwise, only numeric fields are kept as metric values. String values are discarded by default. -* We're using Telegraf's output plugin "influxdb" to write them to InfluxDB. - -## Configure Traefik -traefik.yml contains the following settings: - -```yaml -accessLog: - format: json - fields: - headers: - defaultMode: drop - names: - User-Agent: keep - Content-Type: keep -``` - -This makes Traefik output access logs in JSON format. JSON can easily be processed by machines – so we don't have to deal with GROK patterns or such workarounds. Furthermore, request headers get dropped, but the "User-Agent" und "Content-Type" are kept. - -## Configure Telegraf -My telegraf.conf looks like this: - -```ini -[[inputs.docker_log]] - endpoint = "unix:///var/run/docker.sock" - from_beginning = false - container_name_include = ["traefik_traefik_1"] - - -[[processors.parser]] - namepass = ["docker_log"] - parse_fields = ["message"] - merge = "override" - data_format = "json" - json_string_fields = [ - "ClientHost", - "RequestAddr", - "RequestCount", - "RequestHost", - "RequestMethod", - "RequestPath", - "RequestProtocol", - "RequestScheme", - "downstream_Content-Type", - "request_User-Agent", - "time" - ] - json_time_key = "time" - json_time_format = "2006-01-02T15:04:05Z" - json_timezone = "UTC" - - -[[outputs.influxdb]] - urls = ["http://influxdb:8086"] - database = "telegraf" - username = "telegraf" - password = "..." -``` - -Important settings are: - -* container_name_include specifies from which container instance the logs are collected. It's our Traefik instance. -* parse_fields specifies which input field is to be processed. It's the field "message". -* json_string_fields specifies which values from the read JSON object are to be written to InfluxDB as string fields. If not specified, all non-numeric fields are dropped. -* json_time_key and the other json_time settings specify in which JSON keys and in which date-time format the timestamps for our log entries are contained. -* The output plugin needs to be configured so that Telegraf can connect to the InfluxDB. - -This is just meant to be an example. Please mind applicable law when storing, processing and using the access logs – such as GDPR in the European Union. \ No newline at end of file diff --git a/content/posts/unifi-usg-multiple-ip-addresses-on-pppoe.md b/content/posts/unifi-usg-multiple-ip-addresses-on-pppoe.md deleted file mode 100644 index 5121300..0000000 --- a/content/posts/unifi-usg-multiple-ip-addresses-on-pppoe.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: "Unifi USG: Multiple IP addresses on PPPoE" -date: 2021-08-16T11:30:03+00:00 -tags: - - github - - onedrive - - tool -author: "Heiner" -aliases: - - /2021/08/unifi-usg-multiple-ip-addresses-on-pppoe/ ---- - -My DSL provider TAL.de offers to assign a static and a dynamic IP address on PPPoE dial in. The dynamic IP address is the primary one, used for accessing the internet. Packets to the static IP address are routed to the router as well. Here’s how to set up things up on a Unifi Security Gateway (USG). - -By default, USG only allows for one IP address when dialing in via PPPoE. If you want to forward packets received on an additional IP address, you can’t use the Port Forwarding functionality provided in the Unifi Network Controller. If you do, such packets will still be dropped. - -Instead, you have to set up SNAT and DNAT firewall rules using a ```config.gateway.json``` file. Here’s how to set up SNAT and DNAT firewall rules for your USG to get your second (third, fourth …) IP address working: - -## 1. Create (or extend) a config.gateway.json file -Place a file named config.gateway.json in the following path of your Unifi Network controller: - -```/unifi/data/sites/default/``` - -You might need to replace “default” with the correct label of the affected site. - -## 2. Add DNAT and SNAT rules to the config.gateway.json file -In the following example, TCP packets received on port 443 of IP address ```public.static.ip.address``` will be forwarded to port 443 of IP address ```private.internal.ip.address```. Replace the values to match your demands. - -```json -{ - "service": { - "nat": { - "rule": { - "3000": { - "description": "DNAT public.static.ip.address TCP/443 to private.internal.ip.address", - "destination": { - "address": "public.static.ip.address", - "port": "443" - }, - "inbound-interface": "pppoe2", - "inside-address": { - "address": "private.internal.ip.address", - "port": "443" - }, - "log": "disable", - "protocol": "tcp", - "type": "destination" - }, - "5000": { - "description": "SNAT private.internal.ip.address TCP/443 to public.static.ip.address", - "log": "disable", - "outbound-interface": "ppoe2", - "outside-address": { - "address": "public.static.ip.address", - "port": "443" - }, - "protocol": "tcp", - "source": { - "address": "private.internal.ip.address", - "port": "443" - }, - "type": "source" - } - } - } - } -} -``` - -## 3. Trigger a provision of your new config to your USG -Log in to your Unifi Network Controller. Navigate to “Devices” and choose your Unifi Security Gateway. Go to “Device”, select “Manage” and click “Trigger Provision”. - -![img](/img/usg-provision.png) - -## 4. Test your configuration -From a system outside your network, try to reach the configured port by using nmap, curl or a web browser. \ No newline at end of file diff --git a/content/posts/uptime-robot-website-monitoring.md b/content/posts/uptime-robot-website-monitoring.md deleted file mode 100644 index 1f536df..0000000 --- a/content/posts/uptime-robot-website-monitoring.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: "UptimeRobot: A nice free website monitoring service" -date: 2016-09-05T11:30:03+00:00 -tags: - - tool -author: "Heiner" -aliases: - - /2016/09/uptimerobot-a-nice-free-website-monitoring-service/ ---- - -Over the weekend I’ve been looking around for a free service which monitors my websites. My requirement was that I want to be able to monitor both HTTP and HTTPS sites, I need support for authentication and the monitoring service should be able to check if a specific keyword exists within the watched site (instead of just assuming that a HTTP Status Code 200 is okay). Furthermore, I needed notifications in case of downtimes (Email and [Pushbullet](https://www.pushbullet.com/) is fine for me). - -I discovered [UptimeRobot](https://uptimerobot.com/). The service fulfils all of my requirements and allows for checks every 5 minutes – for free. Not a bad offer. As far as I can tell, everything works fine and I’m quite happy with it. \ No newline at end of file diff --git a/content/posts/usb-boot-raspberry-pi.md b/content/posts/usb-boot-raspberry-pi.md deleted file mode 100644 index f3a3e85..0000000 --- a/content/posts/usb-boot-raspberry-pi.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: "Native USB boot for Raspberry Pi 4" -date: 2020-05-28T11:30:03+00:00 -tags: - - raspberrypi -author: "Heiner" -aliases: - - /2020/05/nativer-usb-boot-raspberry-pi-4/ ---- - -Here's something that's probably been eagerly-awaited not only by me: Finally, Raspberry Pi 4 can boot directly from USB devices. Without any of the widespread workarounds which require an SD card a primrary boot medium. This is made possible by a new firmware, the so-called EEPROM. Furthermore, a new 64 bit beta version of Raspberry OS is available, too (formerly known as Raspbian). - -To get started, boot your Raspberry Pi with a Raspbian or Raspberry OS installation. This is required to upgrade the new beta firmware. - -## Download Raspberry OS 64 bit -You can find the new 64 bit beta version of Raspberry OS [in a forum post](https://www.raspberrypi.org/forums/viewtopic.php?t=275370). Download the ZIP file. Install [Raspberry Pi Imager](https://www.raspberrypi.org/downloads/). I've installed the imager using Homebrew: - -```bash -brew cask install raspberry-pi-imager -``` - -## Prepare an SD card with Raspberry OS -**Note:** This step is only required if your Raspberry Pi is now running Raspbian or Raspberry OS! We need Raspberry OS to flash the new firmware. - -Open Raspberry Pi Imager and flash the downloaded image to an SD card. - -![](/img/raspberry-usb.png) - -Afterwards, boot your Pi from this new SD card. - -## Flash EEPROM -EEPROM (electrically erasable programmable read-only memory) is your Raspberry Pi's firmware – sort of a basic system. - -You can find the [changelog for the Raspberry Pi EEPROM on GitHub](https://github.com/raspberrypi/rpi-eeprom/blob/master/firmware/release-notes.md). The beta versions as of May 15th 2020 contain the required functionalities to boot from a USB drive – i.e. an SSD. - -Install the required update tool on your Pi: - -```bash -sudo apt update -sudo apt upgrade -sudo apt install rpi-eeprom -``` - -To flash the beta firmware (at your own risk!), switch to the beta channel by modifying the following file: - -```bash -sudo nano /etc/default/rpi-eeprom-update -``` - -Change the line FIRMWARE_RELEASE_STATUS=”critical” to: - -```bash -FIRMWARE_RELEASE_STATUS="beta" -``` - -Upgrade the firmware and reboot: - -```bash -sudo rpi-eeprom-update -a -``` - -After the reboot, the following command should state that the new beta firmware has been installed: - -```bash -sudo rpi-eeprom-update -``` - -Alternatively, you can flash the new EEPROM version by downloading it from [the GitHub repository](https://github.com/raspberrypi/rpi-eeprom/tree/master/firmware/beta) and run the following command: - -```bash -sudo rpi-eeprom-update -d -f /tmp/pieeprom-2020-05-27.bin -``` - -## Prepare an SSD for USB boot -To make your Raspberry Pi boot from an USB drive (such as an SSD, an external hard drive or an USB thumb drive), use the Raspberry Pi Imager to write Raspberry Pi OS to your USB drive. - -Finally, connect the USB drive to your Raspberry Pi 4, remove the SD card, and connect the power cord. Watch your Pi boot from USB - without any SD Card workaround. \ No newline at end of file diff --git a/content/privacy-policy.md b/content/privacy-policy.md deleted file mode 100644 index 9b108c1..0000000 --- a/content/privacy-policy.md +++ /dev/null @@ -1,62 +0,0 @@ ---- -title: "Privacy Policy" -hideSummary: true -disableShare: true -ShowReadingTime: false -ShowPostNavLinks: false -ShowWordCount: false ---- - -We created this privacy policy in order to inform you about the information we collect, how we use your data and which choices you as a visitor of this website have. - -Unfortunately, it’s in the nature of things that this policy sounds quite technically. We tried to keep things as simple and clear as possible. - -## Personal data stored -The personal information you provide us (such as your name, email address, address or other personal information required in some form) are processed by us together with a timestamp and your IP address only for the stated purpose, stored securely and are not passed on to third parties. - -Thus, we only use your personal information only for the communication with visitors who express this and for providing the offered services and products. We will not pass on your personal data without your consent. This should however not preclude that national authorities can gain access to this data in case of unlawful conduct. - -If you send us personal data by email, we cannot guarantee its secure transmission. We strongly recommend not to send personal data via email without encryption. - -The legislative basis according to article 6 (1) of the DSGVO (lawfulness of processing of personal data) consists of your consent to processing your provided information. You can revoke your consent at any time. An informal email is all it needs. You’ll find out contact information in this website’s imprint. - -## Which personal data we store -You can use this website without providing any personal information. If you optionally choose to use functionalities that require the input of personal information, we will only use these for the purpose stated. - -## Where we store your data -Our servers are located in Germany. - -## Your rights according to General Data Protection Regulation (GDPR) -According to the regulations of the General Data Protection Regulation (GDPR) you have the following rights: - -* Right to have your data corrected (article 16 DSGVO) -* Right to have your data deleted (article 17 DSGVO) -* Right to limit the processing of your data (article 18 DSGVO) -* Right to be notified – Duty regarding the correction, deletion or limitation of your data and its processing (article 19 DSGVO) -* Right to data portability (article 20 DSGVO) -* Right to refuse (article 21 DSGVO) -* Right to be not subject to sole automatic decision making, including profiling (article 22 DSGVO) - -If you think the processing of your data violates the terms of the General Data Protection Regulation (GDPR) or your claims for data protection are violated in any way, you can contact the Federal Commissioner for Data Protection and Freedom of Information in Germany. - -## Where we send your data -We will not share your data with third parties. - -## TLS encryption using HTTPS -In both our website and our app, we use HTTPS to transport data securely. (data protection by technical means article 25 (1) DSGVO). By using TLS (Transport Layer Security), an encryption protocol to securely transport data on the internet, we can protect sensitive data. Most browsers show a lock symbol in your browser when HTTPS is active. - -## Cloudflare -We use the “Cloudflare” service provided by Cloudflare Inc., 101 Townsend St., San Francisco, CA 94107, USA. (hereinafter referred to as “Cloudflare”). - -Cloudflare offers a content delivery network with DNS that is available worldwide. As a result, the information transfer that occurs between your browser and our website is technically routed via Cloudflare’s network. This enables Cloudflare to analyze data transactions between your browser and our website and to work as a filter between our servers and potentially malicious data traffic from the Internet. In this context, Cloudflare may also use cookies or other technologies deployed to recognize Internet users, which shall, however, only be used for the herein described purpose. - -The use of Cloudflare is based on our legitimate interest in a provision of our website offerings that is as error free and secure as possible (Art. 6(1)(f) GDPR). - -Data transmission to the US is based on the Standard Contractual Clauses (SCC) of the European Commission. Details can be found here: https://www.cloudflare.com/privacypolicy/ - -For more information on Cloudflare's security precautions and data privacy policies, please follow this link: https://www.cloudflare.com/privacypolicy/ - -## Web Analytics -For statistical purposes, this website uses Matomo, an open source web analysis tool. Matomo does not transfer any data to servers outside our control. All data is processed and stored anonymised. Matomo is provided by InnoCraft Ltd, 7 Waterloo Quay PO625, 6140 Wellington, New Zealand. You can find out more about the data being processed by Matomo in its privacy policy at https://matomo.org/privacy-policy/. If you have any questions regarding the protection of your web analytics data, please contact privacy@matomo.org. - -Quelle: Erstellt mit dem [Datenschutz-Generator](https://www.adsimple.de/datenschutz-generator/) von AdSimple \ No newline at end of file diff --git a/static/img/multiarch-dockerhub-1.png b/img/multiarch-dockerhub-1.png similarity index 100% rename from static/img/multiarch-dockerhub-1.png rename to img/multiarch-dockerhub-1.png diff --git a/static/img/multiarch-dockerhub-2.png b/img/multiarch-dockerhub-2.png similarity index 100% rename from static/img/multiarch-dockerhub-2.png rename to img/multiarch-dockerhub-2.png diff --git a/static/img/multiarch-dockerhub-3.png b/img/multiarch-dockerhub-3.png similarity index 100% rename from static/img/multiarch-dockerhub-3.png rename to img/multiarch-dockerhub-3.png diff --git a/static/img/multiarch-dockerhub-4.png b/img/multiarch-dockerhub-4.png similarity index 100% rename from static/img/multiarch-dockerhub-4.png rename to img/multiarch-dockerhub-4.png diff --git a/static/img/multiarch-dockerhub-5.png b/img/multiarch-dockerhub-5.png similarity index 100% rename from static/img/multiarch-dockerhub-5.png rename to img/multiarch-dockerhub-5.png diff --git a/static/img/raspberry-usb.png b/img/raspberry-usb.png similarity index 100% rename from static/img/raspberry-usb.png rename to img/raspberry-usb.png diff --git a/static/img/seatsurfing-screenshot.png b/img/seatsurfing-screenshot.png similarity index 100% rename from static/img/seatsurfing-screenshot.png rename to img/seatsurfing-screenshot.png diff --git a/static/img/strava-import.png b/img/strava-import.png similarity index 100% rename from static/img/strava-import.png rename to img/strava-import.png diff --git a/static/img/usg-provision.png b/img/usg-provision.png similarity index 100% rename from static/img/usg-provision.png rename to img/usg-provision.png diff --git a/index.html b/index.html new file mode 100644 index 0000000..8264566 --- /dev/null +++ b/index.html @@ -0,0 +1,21 @@ +Virtualzone Blog +
Seatsurfing

Seatsurfing

Seatsurfing is an open source solution for free seating and co-working in your organisation. It features mobile apps for iOS and Android, an easy-to-use web booking interface and an App for Atlassian Confluence.

Visit seatsurfing.app +

Compose Updater

Automatically check for image updates and restart Docker containers automatically when using Docker Compose.

GitHub Project +

OneDrive Uploader

Command line interface (CLI) and SDK for uploading files to OneDrive. Supports "special folders" (such as App Folder / App Root).

GitHub Project +

chargebot.io

Charge your Tesla from solar power or when dynamic grid prices are low. It works with any wallbox and with any solar power inverter.

Visit chargebot.io +

Go-hole: A minimalistic DNS proxy and and blocker

You’ll probably know Pi-hole. It’s a popular “DNS sinkhole” – a DNS proxy server which blocks certain requests, such a as those for well-known ad serving domains. The effect is a much less ad-cluttered web experience in your home network. +I’ve been using Pi-hole for several years as a Docker container on a Raspberry Pi. The Raspi is serving as a small home server on my home network. +However, as much as I like Pi-hole, I felt it got loaded with new features over the years and performed slower over the time....

February 5, 2023 · 4 min · 703 words · Heiner

OpenRC Script for 'podman kube play'

In June, I’ve written about my approach to starting and stopping Podman Pods using OpenRC scripts on Alpine Linux. However, that approach had two major drawbacks: First, the pods were started in the foreground, causing OpenRC to wait for all pod initialization tasks to complete. If an image needed to be pulled first, this could lead to longer delays, significantly increasing system startup times. Secondly, requesting the status of a previously started pod always stated “crashed”....

October 26, 2022 · 3 min · 483 words · Heiner

Connecting multiple networks to a Podman container

I’m running my containers with Podman in Rootless Mode on Alpine for about four months now. However, an annoying problem has haunted me ever since: +When a container was connected to more than one network, outgoing connections were not working correctly. +Consider a container connected to two bridge networks: +$ podman run --rm -it \ --network net1 \ --network net2 \ alpine /bin/ash Inside the container, the two networks are connected correctly:...

October 16, 2022 · 2 min · 274 words · Heiner

Setting up Alpine Linux with Podman

Recently, I’ve written a blog post on how to set up Rootless Docker on Alpine Linux. Today I’m showing you how to set up Podman. Podman has a rootless architecture built in. It’s an alternative to Docker, providing an almost identical command line interface. Thus, if you’re used to Docker CLI, you won’t have any issues working with Podman. +Podman was initially developed by RedHat and is available as an open source project....

June 25, 2022 · 4 min · 852 words · Heiner

Setting up Alpine Linux with Rootless Docker

As of Docker Engine v20.10, it’s possible to run the Docker daemon as a non-root user (Rooless mode). This is especially valuable in view of security aspects. Rootless mode mitigates potential vulnerabilities in the Docker daemon. +However, at the time of writing, setting up Docker in rootless mode is not straightforward if you’re using Alpine Linux as your host system. This is why I summarized the steps to get Docket Rootless up and running on Alpine Linux....

June 19, 2022 · 3 min · 479 words · Heiner

Setting up a Kubernetes cluster with K3S, GlusterFS and Load Balancing

I’ve recently written a tutorial which will guide you through setting up a Kubernetes cluster using K3S with virtual machines hosted at Hetzner, a German (Cloud) hosting provider. The tutorial uses K3S, a lightweight Kubernetes distribution which is perfectly suited for small VMs like Hetzner’s CX11. Additionally, the tutorial will show you how to set up Hetzner’s cloud load balancer which performs SSL offloading and forwards traffic to your Kubernetes system....

September 3, 2021 · 1 min · 118 words · Heiner

Back up server to OneDrive’s special App Folder

I’m a convinced user of OneDrive Personal. Bundled with M365, it’s a cheap option to get 1 TB of cloud storage. Having plenty of cloud storage at hand, I’m also using my OneDrive to run automated backups of my servers. There are various solutions capable of uploading files to OneDrive, including rclone. However, I was looking for a solution which enables me to grant my backup script only access to one specific folder instead of my entire cloud drive – better safe than sorry....

September 2, 2021 · 4 min · 682 words · Heiner

Unifi USG: Multiple IP addresses on PPPoE

My DSL provider TAL.de offers to assign a static and a dynamic IP address on PPPoE dial in. The dynamic IP address is the primary one, used for accessing the internet. Packets to the static IP address are routed to the router as well. Here’s how to set up things up on a Unifi Security Gateway (USG). +By default, USG only allows for one IP address when dialing in via PPPoE....

August 16, 2021 · 2 min · 353 words · Heiner

Raspberry Pi OS: Remove unnecessary packages

Recently, I wrote about the availability of the 64 bit beta version of Raspberry Pi OS (formerly known as Raspbian). Unfortunately, the new 64 bit beta is only available in the Desktop variant, containing lots of packages most lightweight server systems won’t need. There’s no lite variant of the 64 bit beta version available at the time of writing. However, you can easily remove the Desktop packages from a running installation with two easy commands....

June 7, 2020 · 1 min · 161 words · Heiner

Analyze Traefik access log using InfluxDB and Grafana

Traefik is a Cloud Native Edge Router, often deployed in Docker and Kubernetes environments. With little effort, you can use Telegraf to transport Traefik’s access logs to an InfluxDB, where it can be analyzed using Grafana. +This setup contains the following elements: +Traefik v2 runs as a Docker container on a Linux host. Traefik outputs access logs in JSON format to STDOUT. Telegraf fetched the Traefik container’s JSON output using the docker_log input plugin....

June 3, 2020 · 2 min · 373 words · Heiner
+ \ No newline at end of file diff --git a/index.xml b/index.xml new file mode 100644 index 0000000..f7c1cf0 --- /dev/null +++ b/index.xml @@ -0,0 +1,230 @@ + + + + Virtualzone Blog + https://virtualzone.de/ + Recent content on Virtualzone Blog + Hugo -- gohugo.io + en-us + &copy; 2024 Heiner Beck. + Sun, 05 Feb 2023 06:00:00 +0000 + + + Go-hole: A minimalistic DNS proxy and and blocker + https://virtualzone.de/posts/dns-proxy-forwarder-blackhole/ + Sun, 05 Feb 2023 06:00:00 +0000 + https://virtualzone.de/posts/dns-proxy-forwarder-blackhole/ + You&rsquo;ll probably know Pi-hole. It&rsquo;s a popular &ldquo;DNS sinkhole&rdquo; – a DNS proxy server which blocks certain requests, such a as those for well-known ad serving domains. The effect is a much less ad-cluttered web experience in your home network. +I&rsquo;ve been using Pi-hole for several years as a Docker container on a Raspberry Pi. The Raspi is serving as a small home server on my home network. +However, as much as I like Pi-hole, I felt it got loaded with new features over the years and performed slower over the time. + + + OpenRC Script for 'podman kube play' + https://virtualzone.de/posts/openrc-podman-kube-play/ + Wed, 26 Oct 2022 15:00:00 +0000 + https://virtualzone.de/posts/openrc-podman-kube-play/ + In June, I&rsquo;ve written about my approach to starting and stopping Podman Pods using OpenRC scripts on Alpine Linux. However, that approach had two major drawbacks: First, the pods were started in the foreground, causing OpenRC to wait for all pod initialization tasks to complete. If an image needed to be pulled first, this could lead to longer delays, significantly increasing system startup times. Secondly, requesting the status of a previously started pod always stated &ldquo;crashed&rdquo;. + + + Connecting multiple networks to a Podman container + https://virtualzone.de/posts/podman-multiple-networks/ + Sun, 16 Oct 2022 17:00:00 +0000 + https://virtualzone.de/posts/podman-multiple-networks/ + I&rsquo;m running my containers with Podman in Rootless Mode on Alpine for about four months now. However, an annoying problem has haunted me ever since: +When a container was connected to more than one network, outgoing connections were not working correctly. +Consider a container connected to two bridge networks: +$ podman run --rm -it \ --network net1 \ --network net2 \ alpine /bin/ash Inside the container, the two networks are connected correctly: + + + Setting up Alpine Linux with Podman + https://virtualzone.de/posts/alpine-podman/ + Sat, 25 Jun 2022 18:00:00 +0000 + https://virtualzone.de/posts/alpine-podman/ + Recently, I&rsquo;ve written a blog post on how to set up Rootless Docker on Alpine Linux. Today I&rsquo;m showing you how to set up Podman. Podman has a rootless architecture built in. It&rsquo;s an alternative to Docker, providing an almost identical command line interface. Thus, if you&rsquo;re used to Docker CLI, you won&rsquo;t have any issues working with Podman. +Podman was initially developed by RedHat and is available as an open source project. + + + Setting up Alpine Linux with Rootless Docker + https://virtualzone.de/posts/alpine-docker-rootless/ + Sun, 19 Jun 2022 15:00:00 +0000 + https://virtualzone.de/posts/alpine-docker-rootless/ + As of Docker Engine v20.10, it&rsquo;s possible to run the Docker daemon as a non-root user (Rooless mode). This is especially valuable in view of security aspects. Rootless mode mitigates potential vulnerabilities in the Docker daemon. +However, at the time of writing, setting up Docker in rootless mode is not straightforward if you&rsquo;re using Alpine Linux as your host system. This is why I summarized the steps to get Docket Rootless up and running on Alpine Linux. + + + Setting up a Kubernetes cluster with K3S, GlusterFS and Load Balancing + https://virtualzone.de/posts/k3s-glusterfs/ + Fri, 03 Sep 2021 11:30:03 +0000 + https://virtualzone.de/posts/k3s-glusterfs/ + I’ve recently written a tutorial which will guide you through setting up a Kubernetes cluster using K3S with virtual machines hosted at Hetzner, a German (Cloud) hosting provider. The tutorial uses K3S, a lightweight Kubernetes distribution which is perfectly suited for small VMs like Hetzner’s CX11. Additionally, the tutorial will show you how to set up Hetzner’s cloud load balancer which performs SSL offloading and forwards traffic to your Kubernetes system. + + + Back up server to OneDrive’s special App Folder + https://virtualzone.de/posts/onedrive-upload-backup/ + Thu, 02 Sep 2021 11:30:03 +0000 + https://virtualzone.de/posts/onedrive-upload-backup/ + I’m a convinced user of OneDrive Personal. Bundled with M365, it’s a cheap option to get 1 TB of cloud storage. Having plenty of cloud storage at hand, I’m also using my OneDrive to run automated backups of my servers. There are various solutions capable of uploading files to OneDrive, including rclone. However, I was looking for a solution which enables me to grant my backup script only access to one specific folder instead of my entire cloud drive – better safe than sorry. + + + Unifi USG: Multiple IP addresses on PPPoE + https://virtualzone.de/posts/unifi-usg-multiple-ip-addresses-on-pppoe/ + Mon, 16 Aug 2021 11:30:03 +0000 + https://virtualzone.de/posts/unifi-usg-multiple-ip-addresses-on-pppoe/ + My DSL provider TAL.de offers to assign a static and a dynamic IP address on PPPoE dial in. The dynamic IP address is the primary one, used for accessing the internet. Packets to the static IP address are routed to the router as well. Here’s how to set up things up on a Unifi Security Gateway (USG). +By default, USG only allows for one IP address when dialing in via PPPoE. + + + Raspberry Pi OS: Remove unnecessary packages + https://virtualzone.de/posts/raspberry-pi-os-remove-packages/ + Sun, 07 Jun 2020 11:30:03 +0000 + https://virtualzone.de/posts/raspberry-pi-os-remove-packages/ + Recently, I wrote about the availability of the 64 bit beta version of Raspberry Pi OS (formerly known as Raspbian). Unfortunately, the new 64 bit beta is only available in the Desktop variant, containing lots of packages most lightweight server systems won&rsquo;t need. There&rsquo;s no lite variant of the 64 bit beta version available at the time of writing. However, you can easily remove the Desktop packages from a running installation with two easy commands. + + + Analyze Traefik access log using InfluxDB and Grafana + https://virtualzone.de/posts/traefik-access-log-influxdb-grafana-telegraf/ + Wed, 03 Jun 2020 11:30:03 +0000 + https://virtualzone.de/posts/traefik-access-log-influxdb-grafana-telegraf/ + Traefik is a Cloud Native Edge Router, often deployed in Docker and Kubernetes environments. With little effort, you can use Telegraf to transport Traefik&rsquo;s access logs to an InfluxDB, where it can be analyzed using Grafana. +This setup contains the following elements: +Traefik v2 runs as a Docker container on a Linux host. Traefik outputs access logs in JSON format to STDOUT. Telegraf fetched the Traefik container&rsquo;s JSON output using the docker_log input plugin. + + + Export trainings from Endomondo as GPX files + https://virtualzone.de/posts/endomono-export-gpx/ + Mon, 01 Jun 2020 11:30:03 +0000 + https://virtualzone.de/posts/endomono-export-gpx/ + I&rsquo;ve been using Endomondo for years to track my trainings. However, I&rsquo;ve been experiencing a lot of issues with Endomondo over the last months: Sometimes it&rsquo;s not possible to log in. Other times, my trainings won&rsquo;t get synced. So it&rsquo;s time a new app. I&rsquo;ve decided to give Strava a try. With a few lines of code, I&rsquo;ve managed to export all my training data as GPX files. These can be imported to Strava, so my training history won&rsquo;t get lost. + + + Native USB boot for Raspberry Pi 4 + https://virtualzone.de/posts/usb-boot-raspberry-pi/ + Thu, 28 May 2020 11:30:03 +0000 + https://virtualzone.de/posts/usb-boot-raspberry-pi/ + Here&rsquo;s something that&rsquo;s probably been eagerly-awaited not only by me: Finally, Raspberry Pi 4 can boot directly from USB devices. Without any of the widespread workarounds which require an SD card a primrary boot medium. This is made possible by a new firmware, the so-called EEPROM. Furthermore, a new 64 bit beta version of Raspberry OS is available, too (formerly known as Raspbian). +To get started, boot your Raspberry Pi with a Raspbian or Raspberry OS installation. + + + Build Multi-Arch images on Docker Hub (Part 2) + https://virtualzone.de/posts/multi-arch-docker-images-2/ + Sat, 16 May 2020 11:30:03 +0000 + https://virtualzone.de/posts/multi-arch-docker-images-2/ + Im ersten Teil dieses Artikels habe ich Euch gezeigt, wie Ihr ein Multi-Arch-Docker-Projekt anlegt, das auf einer AMD64-Plattform auch für andere Zielarchitekturen wie bspw. ARM bauen kann. In diesem Teil zeige ich Euch, wie Ihr das Ganze im offiziellen Docker Hub zum Laufen bekommt. +Zunächst solltet Ihr ein Projekt im Docker Hub anlegen und dieses mit Eurem Quellcode-Repository verknüpfen. In meinem Fall nutze ich GitHub als Sourcecode-Repository und nutze die Build-Infrastruktur von Docker Hub. + + + Build Multi-Arch images on Docker Hub (Part 1) + https://virtualzone.de/posts/multi-arch-docker-images-1/ + Fri, 15 May 2020 11:30:03 +0000 + https://virtualzone.de/posts/multi-arch-docker-images-1/ + Multi-Arch Docker Images sind eine tolle Sache: Benutzer Eurer Images ziehen automatisch die für Ihre Architektur passende Version Eures Image – ob AMD64, ARM64 oder ARM32. Normalerweise muss man Docker Images auf der Architektur bauen, auf der sie später auch verwendet werden. Durch die Verwendung des Emulators QEMU ist es jedoch möglich, auf einer AMD64-Architektur für alle anderen Zielplattformen mitzubauen. Kombiniert mit der Auto-Build-Funktion des Docker Hub ist das eine prima Arbeitserleichterung. + + + How to let Jenkins build Docker images + https://virtualzone.de/posts/jenkins-build-docker-images/ + Sun, 11 Jun 2017 11:30:03 +0000 + https://virtualzone.de/posts/jenkins-build-docker-images/ + If you’re using Jenkins as your Continuous Integration (CI) tool and Docker to build self-contained images of your application, you may ask yourself how to automatically build Docker images during Jenkins’ build job. Here’s how I did it – with Jenkins running in a Docker container itself. +So far, I’ve used the official Jenkins Docker image (the one based on Alpine). I’ve tried some of the Docker plugins for Jenkins available out there. + + + Using Let’s Encrypt / EFF’s CertBot with NGINX in Docker + https://virtualzone.de/posts/lets-encrypt-effs-certbot-with-nginx-in-docker/ + Sat, 11 Feb 2017 11:30:03 +0000 + https://virtualzone.de/posts/lets-encrypt-effs-certbot-with-nginx-in-docker/ + I’m using NGINX in a Docker Container as a front-end HTTP(s) Webserver, performing SSL termination and proxying incoming requests to various other Docker Containers and VMs. Now that I’ve switched my certificates to Let’s Encrypt, I wondered how to integrate EFF’s CertBot (which is recommended by Let’s Encrypt) with my setup. Here’s how I did it. +First, I’ve added two new volumes to my web-front-end’s Docker Compose File: +version: &#39;2&#39; services: webfrontend: container_name: webfrontend [. + + + Creating an encrypted file container on macOS + https://virtualzone.de/posts/encrypted-file-container-macos/ + Tue, 06 Dec 2016 11:30:03 +0000 + https://virtualzone.de/posts/encrypted-file-container-macos/ + Some years ago, I’ve used TrueCrypt to create encrypted containers for storing sensitive files. However, TrueCrypt is nowadays considered insecure and I’m on macOS Sierra 10.12 now – time for another solution. Luckily, macOS has integrated means for creating encrypted containers and saving sensitive information in it. You don’t need any additional software for this. As far as I know, this solution also works for previous versions of Mac OS X, like Mac OS X 10. + + + UptimeRobot: A nice free website monitoring service + https://virtualzone.de/posts/uptime-robot-website-monitoring/ + Mon, 05 Sep 2016 11:30:03 +0000 + https://virtualzone.de/posts/uptime-robot-website-monitoring/ + Over the weekend I’ve been looking around for a free service which monitors my websites. My requirement was that I want to be able to monitor both HTTP and HTTPS sites, I need support for authentication and the monitoring service should be able to check if a specific keyword exists within the watched site (instead of just assuming that a HTTP Status Code 200 is okay). Furthermore, I needed notifications in case of downtimes (Email and Pushbullet is fine for me). + + + Fix Docker not using /etc/hosts on MacOS + https://virtualzone.de/posts/fix-docker-not-using-etc-hosts-on-macos/ + Sun, 28 Aug 2016 11:30:03 +0000 + https://virtualzone.de/posts/fix-docker-not-using-etc-hosts-on-macos/ + On my MacBook with Mac OS X 10.11 (El Capitan) and Docker 1.12.0, Docker did not read manually set DNS entries from the /etc/hosts file. +When I executed “docker push” for example, this resulted in “no such hosts” errors: +Put http://shuttle:5000/v1/repositories/webfrontend/: dial tcp: lookup shuttle on 192.168.65.1:53: no such host On Mac OS, Docker is running in a host container itself. Thus, you’ll have to add DNS entries to the container’s /etc/hosts file. + + + From FHEM to OpenHAB with Homegear: Installation/Docker container + https://virtualzone.de/posts/from-fhem-to-openhab-with-homegear-installation-docker-container/ + Sun, 28 Aug 2016 11:30:03 +0000 + https://virtualzone.de/posts/from-fhem-to-openhab-with-homegear-installation-docker-container/ + For more than 2.5 years, I’ve now been running FHEM with several HomeMatic sensors and actors. Using the HM-CFG-LAN Configuration Tool as an I/O interface between FHEM and the HomeMatic devices, this setup has been running smoothly most of the time. The configuration was a bit tricky now and then, but it worked. However, OpenHAB seems to become a really good choice. Version 2 is currently available as Beta 3. It features a modern web interface and an easy-to-use extension manager. + + + How to set up HTTPS/SSL in WordPress behind Proxy (nginx, HAProxy, Apache, lighttpd) + https://virtualzone.de/posts/https-ssl-in-wordpress-behind-proxy/ + Sat, 27 Aug 2016 11:30:03 +0000 + https://virtualzone.de/posts/https-ssl-in-wordpress-behind-proxy/ + Today I changed the accessibility of my blog from HTTP (unencrypted) to HTTPS/SSL. My blog is running WordPress behind an nginx proxy server. However, while the pages themselves loaded successfully from HTTPS, the embedded static resources like JavaScripts, Images, CSS files etc. did not. Here’s how I fixed it. +The cause of this issue is that WordPress doesn’t seem to detect the original protocol scheme (HTTPS) correctly when running behind a proxy. + + + How to reduce PDF file size in Linux - Part 2 + https://virtualzone.de/posts/reduce-pdf-file-size-2/ + Sat, 15 Aug 2015 11:30:03 +0000 + https://virtualzone.de/posts/reduce-pdf-file-size-2/ + Several months ago, I wrote a blog post about reducing a PDF file’s size. Since then, I’ve used that technique many times. However, you may want to control the DPI (dots per inch) even more specific. Here’s how to do it: +gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dCompatibilityLevel=1.7 \ -dDownsampleColorImages=true \ -dDownsampleGrayImages=true \ -dDownsampleMonoImages=true \ -dColorImageResolution=120 \ -dGrayImageResolution=120 \ -dMonoImageResolution=120 \ -sOutputFile=output.pdf input.pdf Hint: This also works on MacOS. Just install GhostScript using Homebrew: + + + How to enable IPv6 on a SonicWall (SonicOS 5.9) using NAT + https://virtualzone.de/posts/ipv6-on-a-sonicwall/ + Thu, 20 Nov 2014 11:30:03 +0000 + https://virtualzone.de/posts/ipv6-on-a-sonicwall/ + IPv6 aimed to make Network Address Translation (NAT) obselete as there are so many addresses available that every single device can have its own worldwide unique IPv6 address. However, even with IPv6, using NAT is a very simple way to get your devices behind a Dell SonicWall connected to IPv6 services on the internet. In contrast to going without NAT, all the devices behind your SonicWall will emerge under the SonicWall’s IPv6 address. + + + How to reduce PDF file size in Linux + https://virtualzone.de/posts/reduce-pdf-file-size/ + Wed, 21 Nov 2012 11:30:03 +0000 + https://virtualzone.de/posts/reduce-pdf-file-size/ + Using a single line of GhostScript command on my Ubuntu’s terminal, I was able to reduce the size of a PDF file from 6 MB to approximately 1 MB: +gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dCompatibilityLevel=1.4 -dPDFSETTINGS=/screen -sOutputFile=output.pdf input.pdf You can also use the following parameters for -dPDFSETTINGS instead of /screen: +/screen – Lowest quality, lowest size /ebook – Moderate quality /printer – Good quality /prepress – Best quality, highest size Update: Read Part 2 of this blog post for more detailled file size reduction settings. + + + Determining a location’s federal state using Google Maps API + https://virtualzone.de/posts/determining-a-locations-federal-state-using-google-maps-api/ + Fri, 10 Aug 2012 11:30:03 +0000 + https://virtualzone.de/posts/determining-a-locations-federal-state-using-google-maps-api/ + If you have to find out which federal state a city belongs to, you can use the Google Maps API v3. Here is a straightforward JavaScript code snippet: +function log(s) { $(&#39;#sysout&#39;).append(document.createTextNode(s + &#39;n&#39;)); } function getResult(results) { for (var i=0; i -1) { return result[&#39;address_components&#39;][j][&#39;short_name&#39;]; } } return &#39;&#39;; } function getCountry(result) { return extractFirst(result, &#39;country&#39;); } function getFederalState(result) { return extractFirst(result, &#39;administrative_area_level_1&#39;); } function searchLocation() { $(&#39;#sysout&#39;).empty(); var location = $(&#39;#location&#39;). + + + Contact + https://virtualzone.de/contact/ + Mon, 01 Jan 0001 00:00:00 +0000 + https://virtualzone.de/contact/ + Heiner Beck +Karl-Herbert-Scheer-Str. 6 +61381 Friedrichsdorf Germany +Email: mail@virtualzone.de +Limitation of liability for internal content The content of our website has been compiled with meticulous care and to the best of our knowledge. However, we cannot assume any liability for the up-to-dateness, completeness or accuracy of any of the pages. Pursuant to section 7, para. 1 of the TMG (Telemediengesetz – Tele Media Act by German law), we as service providers are liable for our own content on these pages in accordance with general laws. + + + Privacy Policy + https://virtualzone.de/privacy-policy/ + Mon, 01 Jan 0001 00:00:00 +0000 + https://virtualzone.de/privacy-policy/ + We created this privacy policy in order to inform you about the information we collect, how we use your data and which choices you as a visitor of this website have. +Unfortunately, it’s in the nature of things that this policy sounds quite technically. We tried to keep things as simple and clear as possible. +Personal data stored The personal information you provide us (such as your name, email address, address or other personal information required in some form) are processed by us together with a timestamp and your IP address only for the stated purpose, stored securely and are not passed on to third parties. + + + diff --git a/layouts/partials/extend_footer.html b/layouts/partials/extend_footer.html deleted file mode 100644 index 1942577..0000000 --- a/layouts/partials/extend_footer.html +++ /dev/null @@ -1,15 +0,0 @@ - - - \ No newline at end of file diff --git a/layouts/partials/home_info.html b/layouts/partials/home_info.html deleted file mode 100644 index 01054cb..0000000 --- a/layouts/partials/home_info.html +++ /dev/null @@ -1,60 +0,0 @@ -{{- with site.Params.homeInfoParams }} -
-
-
-
Seatsurfing
-
-

Seatsurfing

-
-
-

Seatsurfing is an open source solution for free seating and co-working in your organisation. It features mobile apps for iOS and Android, an easy-to-use web booking interface and an App for Atlassian Confluence.

-
-
- {{ partial "svg.html" (dict "context" . "name" "link") }} Visit seatsurfing.app - -
-
-
-
-

Compose Updater

-
-
-

Automatically check for image updates and restart Docker containers automatically when using Docker Compose.

-
-
- {{ partial "svg.html" (dict "context" . "name" "github") }} GitHub Project - -
-
-
-

OneDrive Uploader

-
-
-

Command line interface (CLI) and SDK for uploading files to OneDrive. Supports "special folders" (such as App Folder / App Root).

-
-
- {{ partial "svg.html" (dict "context" . "name" "github") }} GitHub Project - -
-
-
-

chargebot.io

-
-
-

Charge your Tesla from solar power or when dynamic grid prices are low. It works with any wallbox and with any solar power inverter.

-
-
- {{ partial "svg.html" (dict "context" . "name" "link") }} Visit chargebot.io - -
-
-
-
- {{ partial "social_icons.html" (dict "align" site.Params.homeInfoParams.AlignSocialIconsTo) }} -
-
-{{- end -}} \ No newline at end of file diff --git a/page/1/index.html b/page/1/index.html new file mode 100644 index 0000000..e1dd332 --- /dev/null +++ b/page/1/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/ + \ No newline at end of file diff --git a/page/2/index.html b/page/2/index.html new file mode 100644 index 0000000..8e005df --- /dev/null +++ b/page/2/index.html @@ -0,0 +1,14 @@ +Virtualzone Blog +

Export trainings from Endomondo as GPX files

I’ve been using Endomondo for years to track my trainings. However, I’ve been experiencing a lot of issues with Endomondo over the last months: Sometimes it’s not possible to log in. Other times, my trainings won’t get synced. So it’s time a new app. I’ve decided to give Strava a try. With a few lines of code, I’ve managed to export all my training data as GPX files. These can be imported to Strava, so my training history won’t get lost....

June 1, 2020 · 2 min · 341 words · Heiner

Native USB boot for Raspberry Pi 4

Here’s something that’s probably been eagerly-awaited not only by me: Finally, Raspberry Pi 4 can boot directly from USB devices. Without any of the widespread workarounds which require an SD card a primrary boot medium. This is made possible by a new firmware, the so-called EEPROM. Furthermore, a new 64 bit beta version of Raspberry OS is available, too (formerly known as Raspbian). +To get started, boot your Raspberry Pi with a Raspbian or Raspberry OS installation....

May 28, 2020 · 2 min · 404 words · Heiner

Build Multi-Arch images on Docker Hub (Part 2)

Im ersten Teil dieses Artikels habe ich Euch gezeigt, wie Ihr ein Multi-Arch-Docker-Projekt anlegt, das auf einer AMD64-Plattform auch für andere Zielarchitekturen wie bspw. ARM bauen kann. In diesem Teil zeige ich Euch, wie Ihr das Ganze im offiziellen Docker Hub zum Laufen bekommt. +Zunächst solltet Ihr ein Projekt im Docker Hub anlegen und dieses mit Eurem Quellcode-Repository verknüpfen. In meinem Fall nutze ich GitHub als Sourcecode-Repository und nutze die Build-Infrastruktur von Docker Hub....

May 16, 2020 · 3 min · 443 words · Heiner

Build Multi-Arch images on Docker Hub (Part 1)

Multi-Arch Docker Images sind eine tolle Sache: Benutzer Eurer Images ziehen automatisch die für Ihre Architektur passende Version Eures Image – ob AMD64, ARM64 oder ARM32. Normalerweise muss man Docker Images auf der Architektur bauen, auf der sie später auch verwendet werden. Durch die Verwendung des Emulators QEMU ist es jedoch möglich, auf einer AMD64-Architektur für alle anderen Zielplattformen mitzubauen. Kombiniert mit der Auto-Build-Funktion des Docker Hub ist das eine prima Arbeitserleichterung....

May 15, 2020 · 3 min · 502 words · Heiner

How to let Jenkins build Docker images

If you’re using Jenkins as your Continuous Integration (CI) tool and Docker to build self-contained images of your application, you may ask yourself how to automatically build Docker images during Jenkins’ build job. Here’s how I did it – with Jenkins running in a Docker container itself. +So far, I’ve used the official Jenkins Docker image (the one based on Alpine). I’ve tried some of the Docker plugins for Jenkins available out there....

June 11, 2017 · 2 min · 370 words · Heiner

Using Let’s Encrypt / EFF’s CertBot with NGINX in Docker

I’m using NGINX in a Docker Container as a front-end HTTP(s) Webserver, performing SSL termination and proxying incoming requests to various other Docker Containers and VMs. Now that I’ve switched my certificates to Let’s Encrypt, I wondered how to integrate EFF’s CertBot (which is recommended by Let’s Encrypt) with my setup. Here’s how I did it. +First, I’ve added two new volumes to my web-front-end’s Docker Compose File: +version: '2' services: webfrontend: container_name: webfrontend [....

February 11, 2017 · 2 min · 287 words · Heiner

Creating an encrypted file container on macOS

Some years ago, I’ve used TrueCrypt to create encrypted containers for storing sensitive files. However, TrueCrypt is nowadays considered insecure and I’m on macOS Sierra 10.12 now – time for another solution. Luckily, macOS has integrated means for creating encrypted containers and saving sensitive information in it. You don’t need any additional software for this. As far as I know, this solution also works for previous versions of Mac OS X, like Mac OS X 10....

December 6, 2016 · 2 min · 356 words · Heiner

UptimeRobot: A nice free website monitoring service

Over the weekend I’ve been looking around for a free service which monitors my websites. My requirement was that I want to be able to monitor both HTTP and HTTPS sites, I need support for authentication and the monitoring service should be able to check if a specific keyword exists within the watched site (instead of just assuming that a HTTP Status Code 200 is okay). Furthermore, I needed notifications in case of downtimes (Email and Pushbullet is fine for me)....

September 5, 2016 · 1 min · 120 words · Heiner

Fix Docker not using /etc/hosts on MacOS

On my MacBook with Mac OS X 10.11 (El Capitan) and Docker 1.12.0, Docker did not read manually set DNS entries from the /etc/hosts file. +When I executed “docker push” for example, this resulted in “no such hosts” errors: +Put http://shuttle:5000/v1/repositories/webfrontend/: dial tcp: lookup shuttle on 192.168.65.1:53: no such host On Mac OS, Docker is running in a host container itself. Thus, you’ll have to add DNS entries to the container’s /etc/hosts file....

August 28, 2016 · 1 min · 163 words · Heiner

From FHEM to OpenHAB with Homegear: Installation/Docker container

For more than 2.5 years, I’ve now been running FHEM with several HomeMatic sensors and actors. Using the HM-CFG-LAN Configuration Tool as an I/O interface between FHEM and the HomeMatic devices, this setup has been running smoothly most of the time. The configuration was a bit tricky now and then, but it worked. However, OpenHAB seems to become a really good choice. Version 2 is currently available as Beta 3. It features a modern web interface and an easy-to-use extension manager....

August 28, 2016 · 6 min · 1084 words · Heiner
+ \ No newline at end of file diff --git a/page/3/index.html b/page/3/index.html new file mode 100644 index 0000000..e3c1b6b --- /dev/null +++ b/page/3/index.html @@ -0,0 +1,11 @@ +Virtualzone Blog +

How to set up HTTPS/SSL in WordPress behind Proxy (nginx, HAProxy, Apache, lighttpd)

Today I changed the accessibility of my blog from HTTP (unencrypted) to HTTPS/SSL. My blog is running WordPress behind an nginx proxy server. However, while the pages themselves loaded successfully from HTTPS, the embedded static resources like JavaScripts, Images, CSS files etc. did not. Here’s how I fixed it. +The cause of this issue is that WordPress doesn’t seem to detect the original protocol scheme (HTTPS) correctly when running behind a proxy....

August 27, 2016 · 2 min · 255 words · Heiner

How to reduce PDF file size in Linux - Part 2

Several months ago, I wrote a blog post about reducing a PDF file’s size. Since then, I’ve used that technique many times. However, you may want to control the DPI (dots per inch) even more specific. Here’s how to do it: +gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dCompatibilityLevel=1.7 \ -dDownsampleColorImages=true \ -dDownsampleGrayImages=true \ -dDownsampleMonoImages=true \ -dColorImageResolution=120 \ -dGrayImageResolution=120 \ -dMonoImageResolution=120 \ -sOutputFile=output.pdf input.pdf Hint: This also works on MacOS. Just install GhostScript using Homebrew:...

August 15, 2015 · 1 min · 75 words · Heiner

How to enable IPv6 on a SonicWall (SonicOS 5.9) using NAT

IPv6 aimed to make Network Address Translation (NAT) obselete as there are so many addresses available that every single device can have its own worldwide unique IPv6 address. However, even with IPv6, using NAT is a very simple way to get your devices behind a Dell SonicWall connected to IPv6 services on the internet. In contrast to going without NAT, all the devices behind your SonicWall will emerge under the SonicWall’s IPv6 address....

November 20, 2014 · 2 min · 372 words · Heiner

How to reduce PDF file size in Linux

Using a single line of GhostScript command on my Ubuntu’s terminal, I was able to reduce the size of a PDF file from 6 MB to approximately 1 MB: +gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dCompatibilityLevel=1.4 -dPDFSETTINGS=/screen -sOutputFile=output.pdf input.pdf You can also use the following parameters for -dPDFSETTINGS instead of /screen: +/screen – Lowest quality, lowest size /ebook – Moderate quality /printer – Good quality /prepress – Best quality, highest size Update: Read Part 2 of this blog post for more detailled file size reduction settings....

November 21, 2012 · 1 min · 98 words · Heiner

Determining a location’s federal state using Google Maps API

If you have to find out which federal state a city belongs to, you can use the Google Maps API v3. Here is a straightforward JavaScript code snippet: +function log(s) { $('#sysout').append(document.createTextNode(s + 'n')); } function getResult(results) { for (var i=0; i -1) { return result['address_components'][j]['short_name']; } } return ''; } function getCountry(result) { return extractFirst(result, 'country'); } function getFederalState(result) { return extractFirst(result, 'administrative_area_level_1'); } function searchLocation() { $('#sysout').empty(); var location = $('#location')....

August 10, 2012 · 1 min · 162 words · Heiner
+ \ No newline at end of file diff --git a/posts/alpine-docker-rootless/index.html b/posts/alpine-docker-rootless/index.html new file mode 100644 index 0000000..89a58b5 --- /dev/null +++ b/posts/alpine-docker-rootless/index.html @@ -0,0 +1,52 @@ +Setting up Alpine Linux with Rootless Docker | Virtualzone Blog +

Setting up Alpine Linux with Rootless Docker

As of Docker Engine v20.10, it’s possible to run the Docker daemon as a non-root user (Rooless mode). This is especially valuable in view of security aspects. Rootless mode mitigates potential vulnerabilities in the Docker daemon.

However, at the time of writing, setting up Docker in rootless mode is not straightforward if you’re using Alpine Linux as your host system. This is why I summarized the steps to get Docket Rootless up and running on Alpine Linux.

Download and install Alpine

First, we’ll download the Alpine Linux ISO image and install the OS. We’ll then enable the community repository as it contains packages we’ll need to set up Docker in non-root mode.

  1. Get Alpine Linux ISO from: https://www.alpinelinux.org/downloads/
  2. Boot system from ISO and run:
    # setup-alpine
    +
  3. Reboot and install the nano edit:
    # apk add nano
    +
  4. Enable community repository in the following file:
    # nano /etc/apk/repositories
    +
  5. Update the index of available package:
    # apk update
    +

Add a user and allow her to use doas

If you did not create a regular user account during the installation, it’s time to do it now:

  1. Install doas:
    # apk add doas
    +
  2. Create user and add it to the wheel group in order to use root privileges:
    # adduser <USER> wheel
    +
  3. Allow users in group wheel to use doas by editing the file /etc/doas.d/doas.conf and adding the following line:
    permit persist :wheel
    +
  4. Log out and log in to the new account.

Install Docker Rootless

  1. Install newuidmap, newgidmap, fuse-overlayfs and iproute2 tools, all required by Rootless Docker:
    # apk add shadow-uidmap fuse-overlayfs iproute2
    +
  2. Enable cgroups v2 by editing /etc/rc.conf and setting rc_cgroup_mode to unified.
  3. Enable the cgroups service:
    # rc-update add cgroups && rc-service cgroups start
    +
  4. Allow your user to access Podman in rootless mode:
    # modprobe tun
    +# echo tun >>/etc/modules
    +# echo <USER>:100000:65536 >/etc/subuid
    +# echo <USER>:100000:65536 >/etc/subgid
    +
  5. Install Docker and Docker Compose v2:
    # apk add docker docker-cli-compose
    +
  6. Allow Docker access for your user:
    # addgroup <USER> docker
    +
  7. Enable the iptables module:
    # echo "ip_tables" >> /etc/modules
    +# modprobe ip_tables
    +
  8. Install Docker rootless:
    $ curl -fsSL https://get.docker.com/rootless | sh
    +
  9. Create an init script in /etc/init.d/docker-rootless:
    #!/sbin/openrc-run
    +
    +name=$RC_SVCNAME
    +description="Docker Application Container Engine (Rootless)"
    +supervisor="supervise-daemon"
    +command="/home/<USER>/bin/dockerd-rootless.sh"
    +command_args=""
    +command_user="<USER>"
    +supervise_daemon_args=" -e PATH=\"/home/<USER>/bin:/sbin:/usr/sbin:$PATH\" -e HOME=\"/home/<USER>\" -e XDG_RUNTIME_DIR=\"/home/<USER>/.docker/run\""
    +
    +reload() {
    +    ebegin "Reloading $RC_SVCNAME"
    +    /bin/kill -s HUP \$MAINPID
    +    eend $?
    +}
    +
  10. Make the created init script executable, add it to the default runlevel and start it:
    # chmod +x /etc/init.d/docker-rootless
    +# rc-update add docker-rootless
    +# rc-service docker-rootless start
    +
  11. Create a .profile file in your home directory with the following contents:
    export XDG_RUNTIME_DIR="$HOME/.docker/run"
    +export DOCKER_HOST=unix://$XDG_RUNTIME_DIR/docker.sock
    +export PATH="/home/<USER>/bin:/sbin:/usr/sbin:$PATH"
    +
  12. Log out and log in again.
  13. Check if Docker Rootless works:
    $ docker ps
    +$ docker run --rm hello-world
    +

Allow ports < 1024 (optional)

By default, only ports >= 1024 can be exposed by non-root users. To change this, change the minimum unprivileged port in /etc/sysctl.conf:

# echo "net.ipv4.ip_unprivileged_port_start=80" >> /etc/sysctl.conf
+
+ \ No newline at end of file diff --git a/posts/alpine-podman/index.html b/posts/alpine-podman/index.html new file mode 100644 index 0000000..0a845f5 --- /dev/null +++ b/posts/alpine-podman/index.html @@ -0,0 +1,71 @@ +Setting up Alpine Linux with Podman | Virtualzone Blog +

Setting up Alpine Linux with Podman

Recently, I’ve written a blog post on how to set up Rootless Docker on Alpine Linux. Today I’m showing you how to set up Podman. Podman has a rootless architecture built in. It’s an alternative to Docker, providing an almost identical command line interface. Thus, if you’re used to Docker CLI, you won’t have any issues working with Podman.

Podman was initially developed by RedHat and is available as an open source project. You can run your well known Docker images from Docker Hub and other registries without any changes. This is due to the fact that both Docker and Podman are compatible with Open Container Initiative (OCI) images.

In my tests, Podman had a signicantly smaller memory footprint. From my point of view, it seems perfectly suitable for low power machines. However, it comes without a daemon, so you’ll have to set up some init scripts in order to restart your containers when your system reboots. I’ll cover this at the end of this article.

Download and install Alpine

First, we’ll download the Alpine Linux ISO image and install the OS. We’ll then enable the community repository as it contains packages we’ll need to set up Docker in non-root mode.

  1. Get Alpine Linux ISO from: https://www.alpinelinux.org/downloads/
  2. Boot system from ISO and run:
    # setup-alpine
    +
  3. Reboot and install the nano edit:
    # apk add nano
    +
  4. Enable community repository in the following file:
    # nano /etc/apk/repositories
    +
  5. Update the index of available package:
    # apk update
    +

Add a user and allow her to use doas

If you did not create a regular user account during the installation, it’s time to do it now:

  1. Install doas:
    # apk add doas
    +
  2. Create user and add it to the wheel group in order to use root privileges:
    # adduser <USER> wheel
    +
  3. Allow users in group wheel to use doas by editing the file /etc/doas.d/doas.conf and adding the following line:
    permit persist :wheel
    +
  4. Log out and log in to the new account.

Install Podman

Now comes the important part: Setting up Podman.

  1. Enable cgroups v2 by editing /etc/rc.conf and setting rc_cgroup_mode to unified.
  2. Enable the cgroups service:
    # rc-update add cgroups && rc-service cgroups start
    +
  3. Install podman:
    # apk add podman
    +
  4. Allow your user to access Podman in rootless mode:
    # modprobe tun
    +# echo tun >>/etc/modules
    +# echo <USER>:100000:65536 >/etc/subuid
    +# echo <USER>:100000:65536 >/etc/subgid
    +
  5. Enable the iptables module:
    # echo "ip_tables" >> /etc/modules
    +# modprobe ip_tables
    +
  6. Check if Podman works by running a Hello World container using your user account:
    $ podman run --rm hello-world
    +

Allow ports < 1024 (optional)

By default, only ports >= 1024 can be exposed by non-root users. To change this, change the minimum unprivileged port in /etc/sysctl.conf:

$ sudo echo "net.ipv4.ip_unprivileged_port_start=80" >> /etc/sysctl.conf
+

Using Podman and Pods

If you are used to Docker, you can use Podman just the way to used to control Docker. One difference is that Podman can group multiple containers into Pods (that’s where the name comes from: Pod Manager). You may know Pods from Kubernetes. Containers in a Pod share a namespace, a network and a security context.

List running containers:

podman ps
+

List existing pods:

podman pod ps
+

Create a new pod:

podman pod create pod-web
+

Create a container inside the previously created Pod:

podman run --rm -d \
+    --pod pod-web \
+    docker.io/library/nginx:alpine
+

Starting containers on system start

Because Podman follows a daemonless concept, containers are not started along with the non-existing Daemon on system boot. Instead, Podman recommends using systemd to start, stop and restart containers when the system starts.

On Alpine, we’re using OpenRC instead of systemd by default. I’m using Podman’s built-in functionity for exporting and importing Kubernetes YAML definitions together with a small OpenRC init script.

  1. Install runuser so your init script can create Pods in the name of your rootless user:
    # apk add runuser
    +
  2. Create a folder to store your init scripts, such as /home/<user>/pods/init.d/.
  3. Generate a Kubernetes YAML for an existing Pod by issuing the following command and saving the YAML file in your previously created directory:
    podman generate kube <pod-name>
    +
    Alternatively, you can write the YAML file manually. Please refer to Podman’s documention for more information on supported (and unsupported) Kubernetes YAML syntax.
  4. Create a file named pod in this folder with the following contents and make it executable (chmod +x pod):
    #!/sbin/openrc-run
    +
    +depend() {
    +    after network-online 
    +    use net 
    +}
    +
    +cleanup() {
    +    /sbin/runuser -u ${command_user} ${command} pod exists ${pod_name}
    +    result=$?
    +    if [ $result -eq 0 ]; then
    +            /sbin/runuser -u ${command_user} ${command} pod stop ${pod_name}
    +            /sbin/runuser -u ${command_user} ${command} pod rm ${pod_name}
    +    fi
    +}
    +
    +start_pre() {
    +    cleanup
    +}
    +
    +stop() {
    +    ebegin "Stopping $RC_SVCNAME"
    +    cleanup
    +    eend $?
    +}
    +
  5. Create one init script per Pod you want to control with the following contents (adjust as needed). Name it appropriately and make it executable (i.e. chmod +x pod-traefik):
    #!/sbin/openrc-run
    +
    +name=$RC_SVCNAME
    +pod_name=traefik
    +command_user="<user>"
    +command="/usr/bin/podman"
    +command_args="play kube --network traefik /home/${command_user}/pods/${pod_name}/pod.yaml"
    +
    +source "/home/${command_user}/pods/init.d/pod"
    +
  6. Create a symlink in /etc/init.d/:
    # cd /etc/init.d && ln -s /home/<user>/pods/pod-traefik
    +
  7. Use rc-update to the add your OpenRC Pod init script to the default runlevel:
    # rc-update add pod-traefik
    +

Update: I’ve improved the OpenRC scripts. Please read the corresponding blog post.

+ \ No newline at end of file diff --git a/posts/determining-a-locations-federal-state-using-google-maps-api/index.html b/posts/determining-a-locations-federal-state-using-google-maps-api/index.html new file mode 100644 index 0000000..8edae20 --- /dev/null +++ b/posts/determining-a-locations-federal-state-using-google-maps-api/index.html @@ -0,0 +1,70 @@ +Determining a location’s federal state using Google Maps API | Virtualzone Blog +

Determining a location’s federal state using Google Maps API

If you have to find out which federal state a city belongs to, you can use the Google Maps API v3. Here is a straightforward JavaScript code snippet:

function log(s) {
+    $('#sysout').append(document.createTextNode(s + 'n'));
+}
+
+function getResult(results) {
+    for (var i=0; i -1) {
+            return result['address_components'][j]['short_name'];
+        }
+    }
+    return '';
+}
+
+function getCountry(result) {
+    return extractFirst(result, 'country');
+}
+
+function getFederalState(result) {
+    return extractFirst(result, 'administrative_area_level_1');
+}
+
+function searchLocation() {
+    $('#sysout').empty();
+
+    var location = $('#location').val();
+    var geocoder;
+
+    log('Looking up "' + location + '"');
+
+    geocoder = new google.maps.Geocoder();
+    geocoder.geocode({'address': location}, function(results, status) {
+        if (status != google.maps.GeocoderStatus.OK) {
+            log('error: ' + status);
+            return;
+        }
+        if (results.length == 0) {
+            log('no result');
+            return;
+        }
+
+        log('Resolved to ' + results[0]['formatted_address']);
+
+        var latlng = results[0]['geometry']['location'];
+            geocoder.geocode({'latLng': latlng}, function(results, status) {
+            if (status != google.maps.GeocoderStatus.OK) {
+                log('error: ' + status);
+                return;
+            }
+            var desiredResult = getResult(results);
+            if (desiredResult) {
+                log('Federal State: ' + getFederalState(desiredResult));
+            }
+        });
+    });
+
+    return false;
+}
+
+$(document).bind('ready', function() {
+    new google.maps.places.Autocomplete(document.getElementById('location'), {});
+    $('#form').submit(searchLocation);
+});
+
+ \ No newline at end of file diff --git a/posts/dns-proxy-forwarder-blackhole/index.html b/posts/dns-proxy-forwarder-blackhole/index.html new file mode 100644 index 0000000..2f7106a --- /dev/null +++ b/posts/dns-proxy-forwarder-blackhole/index.html @@ -0,0 +1,34 @@ +Go-hole: A minimalistic DNS proxy and and blocker | Virtualzone Blog +

Go-hole: A minimalistic DNS proxy and and blocker

You’ll probably know Pi-hole. It’s a popular “DNS sinkhole” – a DNS proxy server which blocks certain requests, such a as those for well-known ad serving domains. The effect is a much less ad-cluttered web experience in your home network.

I’ve been using Pi-hole for several years as a Docker container on a Raspberry Pi. The Raspi is serving as a small home server on my home network.

However, as much as I like Pi-hole, I felt it got loaded with new features over the years and performed slower over the time. DNS queries took longer and longer until they were answered. With this experience in mind and out of pure interest (how complicated would it be to create a DNS proxy on my own?) I’ve created Go-hole.

What is Go-hole?

Go-hole is written in Go and very minimalistic with an eye to the primary requirements. However, it has all the features I personally need on my home network:

  • Act as a network-wide central DNS server, handling all DNS queries from all queries
  • Forward incoming queries to one or more upstream DNS servers
  • Cache upstream query results for extremely fast recurring lookup handling
  • Block queries for well-known ad-serving and malicious domains by using definable block list URLs
  • Regularly update the black list source files
  • Whitelist certain domains which would be blocked in view of the set up black lists
  • Resolve local names

How does it work?

Go-hole serves as DNS server on your (home) network. Instead of having your clients sending DNS queries directly to the internet or to your router, they are resolved by your local Go-hole instance. Go-hole sends these queries to one or more upstream DNS servers and caches the upstream query results for maximum performance.

Incoming queries from your clients are checked against a list of unwanted domain names (“blacklist”), such as well-known ad serving domains and trackers. If a requested name matches a name on the blacklist, Go-hole responds with error code NXDOMAIN (non-existing domain). This leads to clients not being able to load ads and tracker codes. In case you want to access a blacklisted domain, you can easily add it to a whitelist.

As an additional feature, you can set a list of custom host names/domain names to be resolved to specific IP addresses. This is useful for accessing services on your local network by name instead of their IP addresses.

How to use Go-hole?

The simplest way of getting Go-hole up and running is by using the pre-built Docker images.

First, create a configuration file named config.yaml. You can take a list at the example config file in the GitHub repository. On my home network, my config.yaml looks like this:

listen: 0.0.0.0:53
+upstream:
+  - 8.8.8.8:53
+  - 8.8.4.4:53
+blacklist:
+  - https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts
+blacklistRenewal: 1440
+whitelist:
+  - raw.githubusercontent.com
+  - www.googletagservices.com
+local:
+  - name: ha
+    target:
+    - address: 192.168.40.31
+      type: A
+    - address: 2a01:170:1172:40:40::31
+      type: AAAA
+

This config sets the following:

  • listen sets the listing address to 0.0.0.0 (any address) and the listing port to 53 (default DNS).
  • upstream sets the upstream DNS servers to Google’s DNS.
  • blacklist sets the black list source URL.
  • blacklistRenewal sets the automatic blacklist updating to a 1 day interval (1440 minutes).
  • whitelist whitelists two domains which would be blacklisted otherwise.
  • local sets an IPv4 (A record) and IPv6 (AAAA record) for the local name “ha”.

After you’ve prepared your configuration file, you can start the Docker container like this:

docker run \
+    --rm \
+    --mount type=bind,source=${PWD}/config.yaml,target=/app/config.yaml \
+    -p 53:53/udp \
+    ghcr.io/virtualzone/go-hole:latest
+

If you don’t want to run Go-hole with Docker (or Podman, like I do), you can use the pre-built binaries or build Go-hole from source.

Conclusion

I’m using Go-hole for several weeks now as my home network’s DNS server. It has completely replaced Pi-hole for my use cases. I’ve not observed any crashes or instabilities yet. My home network’s DNS resolving times have greatly improved, making web browsing much faster than it has been before. Of course, Pi-hole has a lot more features than Go-hole. My implementation doesn’t feature a web interface and for sure lacks other things you might like. However, none of these features are relevant to me.

I’d be happy to hear about your experience with this Pi-hole alternative.

+ \ No newline at end of file diff --git a/posts/encrypted-file-container-macos/index.html b/posts/encrypted-file-container-macos/index.html new file mode 100644 index 0000000..ce72a62 --- /dev/null +++ b/posts/encrypted-file-container-macos/index.html @@ -0,0 +1,7 @@ +Creating an encrypted file container on macOS | Virtualzone Blog +

Creating an encrypted file container on macOS

Some years ago, I’ve used TrueCrypt to create encrypted containers for storing sensitive files. However, TrueCrypt is nowadays considered insecure and I’m on macOS Sierra 10.12 now – time for another solution. Luckily, macOS has integrated means for creating encrypted containers and saving sensitive information in it. You don’t need any additional software for this. As far as I know, this solution also works for previous versions of Mac OS X, like Mac OS X 10.11 (El Capitan) and Mac OS X 10.10 (Yosemite).

These containers are saved as DMG files. You probably know this file extension from installing downloaded software on your Mac. DMG files are Apple Disk Images, bundling a set of folders and files into a single file. Unlike installation images downloaded from the web, these DMG files can optionally be encrypted using an AES 128 bit or AES 256 bit encryption key.

To create an encrypted file container, open the Disk Utility using the Spotlight Search (press Cmd + Space).

Using the menu bar, navigate to “File” > “New Image” > “Blank Image…”.

Choose an appropriate name for your image and select the following settings:

  • Save as: The filename of your encrypted DMG file.
  • Name: A name shown when your DMG file is mounted.
  • Size: The size of your container. The DMG file will take exactly the specified size and the amount of data you can store in the container is limited to this specified size. However, you can shrink and grow your DMG at a later time.
  • Format: Choose “Mac OS Extended (Journaled)”.
  • Encryption: Choose between 128 bit AES and 256 bit AES encryption (for sensitive information, I’d go for 256 bit, just in case…). You’ll be prompted to enter an encryption key. Be sure to remember this one really good. There will be no way to recover a lost encryption key!
  • Partitions: Choose “Single Partition – Apple Partition Map”.
  • Image Format: Choose “read/write disk image”.

Next, click “Create” to create your image. This may take a few minutes, depending on the size of your DMG and the speed of the device you’re creating the container on (i.e. a network share).

+ \ No newline at end of file diff --git a/posts/endomono-export-gpx/index.html b/posts/endomono-export-gpx/index.html new file mode 100644 index 0000000..8b05f61 --- /dev/null +++ b/posts/endomono-export-gpx/index.html @@ -0,0 +1,18 @@ +Export trainings from Endomondo as GPX files | Virtualzone Blog +

Export trainings from Endomondo as GPX files

I’ve been using Endomondo for years to track my trainings. However, I’ve been experiencing a lot of issues with Endomondo over the last months: Sometimes it’s not possible to log in. Other times, my trainings won’t get synced. So it’s time a new app. I’ve decided to give Strava a try. With a few lines of code, I’ve managed to export all my training data as GPX files. These can be imported to Strava, so my training history won’t get lost.

There’s an article on Strava’s website on how to move from Endomondo to Strava. But the answer is a bit too easy: Using Endomondo’s website, you can only export a single training at a time in GPX file format.

The good: GPX (GPS Exchange Format) is an standard file format used to exchange GPS coordinates. Using the GPS waypoints and some meta data (i.e. date, type of training), each of your trainings is reconstructable.

The bad: I’ve done more than 1,000 trainings in Endonomdo and I’m not willing to export each of them one by one.

In Node.JS’ module respository, npmjs.com, there’s a module named endomondo-api-handler. Using this, it’s easy to search, select and download trainings from Endomondo’s servers:

await api.processWorkouts(filter, async (workout) => {
+  if (workout.hasGPSData()) {
+    let filename = getFilename(workout);
+    let gpx = await api.getWorkoutGpx(workout.getId());
+    fs.writeFileSync(filename, gpx, 'utf8');
+  }
+});
+

I’ve used this module to create a little Node.JS tool which can be found on my GitHub account. You can use it to export all of a year’s trainings from Endonomdo:

./index.js --username=... --password=... --year=2019 --month=11 --dir=/home/john/trainings
+

In order to use this tool, Node.JS must be installed on your computer. You can then check out the tool’s source code from my GitHub repository and run the following commands to make the tool ready to run:

git clone https://github.com/virtualzone/endomondo-exporter.git
+cd endomondo-exporter
+npm install
+

Importing GPX files to Strava is quite easy: You can upload 25 training files at once. There seems to be some rate limiting. I’ve received server errors after several imports. Waiting a few minutes solved that.

+ \ No newline at end of file diff --git a/posts/fix-docker-not-using-etc-hosts-on-macos/index.html b/posts/fix-docker-not-using-etc-hosts-on-macos/index.html new file mode 100644 index 0000000..88bf338 --- /dev/null +++ b/posts/fix-docker-not-using-etc-hosts-on-macos/index.html @@ -0,0 +1,17 @@ +Fix Docker not using /etc/hosts on MacOS | Virtualzone Blog +

Fix Docker not using /etc/hosts on MacOS

On my MacBook with Mac OS X 10.11 (El Capitan) and Docker 1.12.0, Docker did not read manually set DNS entries from the /etc/hosts file.

When I executed “docker push” for example, this resulted in “no such hosts” errors:

Put http://shuttle:5000/v1/repositories/webfrontend/: dial tcp: lookup shuttle on 192.168.65.1:53: no such host
+

On Mac OS, Docker is running in a host container itself. Thus, you’ll have to add DNS entries to the container’s /etc/hosts file. To fix it, get into the running Docker Host:

screen ~/Library/Containers/com.docker.docker/Data/com.docker.driver.amd64-linux/tty
+

This took a while on my machine, I needed to press Ctrl+C for the login prompt to show up. Log in with “root” (no password required).

Edit the /etc/hosts file in the Docker Host using vi:

vi /etc/hosts
+

Note: Insert after pressing “i”, save by pressing Escape and then type “:wq” .

Restart the Docker Daemon with:

service docker restart
+

Detach from the screen session by pressing Ctrl+A, then press D.

Docker should now use the correct /etc/hosts entries.

+ \ No newline at end of file diff --git a/posts/from-fhem-to-openhab-with-homegear-installation-docker-container/index.html b/posts/from-fhem-to-openhab-with-homegear-installation-docker-container/index.html new file mode 100644 index 0000000..596900d --- /dev/null +++ b/posts/from-fhem-to-openhab-with-homegear-installation-docker-container/index.html @@ -0,0 +1,67 @@ +From FHEM to OpenHAB with Homegear: Installation/Docker container | Virtualzone Blog +

From FHEM to OpenHAB with Homegear: Installation/Docker container

For more than 2.5 years, I’ve now been running FHEM with several HomeMatic sensors and actors. Using the HM-CFG-LAN Configuration Tool as an I/O interface between FHEM and the HomeMatic devices, this setup has been running smoothly most of the time. The configuration was a bit tricky now and then, but it worked. However, OpenHAB seems to become a really good choice. Version 2 is currently available as Beta 3. It features a modern web interface and an easy-to-use extension manager. More than a good reason to have a look at it. In this post, I’m going to show how to get started.

If you don’t know OpenHAB yet, here’s a short summary: OpenHAB is a vendor and technology agnostic open source automation software for smart homes. The software is developed in Java, has an extensible OSGI architecture and an actively growing community. It comes with a responsive web interface, allowing for being used on desktops and mobile devices equally. Last but not least, OpenHAB features a catchy programming syntax for rules, triggers, scripts and notifications.

OpenHAB has an integrated HomeMatic binding. If you’re using a CCU2, you can start with OpenHAB right out of the box. If you’re using another I/O interface like the HM-CFG-LAN Configuration Tool, you’ll need Homegear as an additional piece of software. Homegear communicates with your HomeMatic devices through the I/O interface. OpenHAB then connects to Homegear, which allows you to control all your HomeMatic sensors and actors using the OpenHAB software.

To get started, you should first choose if you’re going with Docker Containers (my preferred way of running server applications) or if you want to install OpenHAB and Homegear directly on your Linux System.

Option 1: Using Docker Compose

There are official Docker Images for OpenHAB. However, there was no working image for Homegear. So I created my own: You can use this Docker Image for Homegear if you want to.

  1. Make sure that Docker is set up correctly and that the Docker Daemon is running. Read Docker’s official guide for your operating system if you’re unsure.
  2. Make sure that Docker Compose is installed. I’m using Docker Compose instead of manually scoring the two containers because it’s much more convenient.
  3. Create a directory for your OpenHAB setup, such as:
mkdir -p /docker/containers/openhab
+
  1. Create a docker-compose.yml file in this directory with the following content:
version: '2'
+services:
+  openhab:
+    image: openhab/openhab:amd64-online
+    volumes:
+      - "/etc/localtime:/etc/localtime:ro"
+      - "/etc/timezone:/etc/timezone:ro"
+      - "/docker/storage/openhab/conf:/openhab/conf"
+      - "/docker/storage/openhab/userdata:/openhab/userdata"
+    ports:
+      - "8080:8080"
+    depends_on:
+      - homegear
+    links:
+      - homegear
+  homegear:
+    image: virtualzone/homegear
+    volumes:
+      - "/etc/localtime:/etc/localtime:ro"
+      - "/etc/timezone:/etc/timezone:ro"
+      - "/docker/storage/homegear/homematicbidcos.conf:/etc/homegear/families/homematicbidcos.conf"
+      - "/docker/storage/homegear/sql.db:/var/lib/homegear/db.sql"
+

This defines two containers: One for OpenHAB and one for Homegear. The OpenHAB container depends on Homegear (“depends_on”), so Docker Compose makes sure that Homegear is started before OpenHAB. Check the paths of the volumes. They’re probably different on your system.

  1. Start up this composition by executing this command from the directory created above:
docker-compose up -d
+

The -d flag means “detached”, which makes the two docker containers run in the background. Skip this option if you want to see what’s going on.

  1. Check if everything is fine:
docker-compose logs
+

Option 2: Docker without Compose

This option is similar to option 1. However, you’ll have to start the two Docker Containers separately and manually, making sure that Homegear if started before OpenHAB.

  1. Make sure that Docker is set up correctly and that the Docker Daemon is running. Read Docker’s official guide for your operating system if you’re unsure.
  2. Launch Homegear with the following command. You may want to copy the command to an executable shell file, so it’s handier to re-execute it later:
docker run \
+        --name homegear \
+        -v /etc/localtime:/etc/localtime:ro \
+        -v /etc/timezone:/etc/timezone:ro \
+        -v /docker/storage/homegear/homematicbidcos.conf:/etc/homegear/families/homematicbidcos.conf \
+        -v /docker/storage/homegear/sql.db:/var/lib/homegear/db.sql \
+        -d \
+        --restart=always \
+        virtualzone/homegear
+
  1. Launch OpenHAB with the following command:
docker run \
+        --name openhab \
+        -v /etc/localtime:/etc/localtime:ro \
+        -v /etc/timezone:/etc/timezone:ro \
+        -v /docker/storage/openhab/conf:/openhab/conf \
+        -v /docker/storage/openhab/userdata:/openhab/userdata \
+        -p 8080:8080 \
+        --link homegear:homegear \
+        -d \
+        --restart=always \
+        openhab/openhab:amd64-online
+
  1. Check if both containers are running:
docker ps
+docker logs homegear
+docker exec homegear tail -n 100 /var/log/homegear/homegear.err
+docker exec homegear tail -n 100 /var/log/homegear/homegear.log
+docker logs openhab
+

Option 3: Installation without Docker

If you’re not comfortable with Docker, please refer to the download page of Homegear and the install guides for OpenHAB.

Configuring Homegear

Please note that if you’re running FHEM, you’ll have to stop it first. You can’t make two applications connect to the same HomeMatic I/O device (such as the HM-CFG-LAN). As of version 0.6, the HomeMatic configuration of Homegear is not in /etc/homegear/physicalinterfaces.conf anymore. Instead it’s in: /etc/homegear/families/homematicbidcos.conf If you’re using Docker, you’ll have to edit the file in the corresponding path of your host system (such as /docker/storage/homegear/homematicbidcos.conf). My homematicbidcos.conf looks like this:

[HomeMaticBidCoS]
+id = KEQ....
+## Options: cul, cc1100, coc, cuno, hmcfglan, hmlgw
+deviceType = hmcfglan
+host = 192.168.xxx.xxx
+port = 1000
+# lanKey = xxxxxxx
+rfKey = xxxx
+currentRFKeyIndex = 1
+responseDelay = 60
+

Some explanations:

  • id: The ID printed on the back side of your BidCoS I/O device.
  • deviceType: The device type of your BidCoS device (cul, cc1100, coc, cuno, hmcfglan, hmlgw).
  • host: The IP address of your I/O interface.
  • port: Usually 1000, you probably don’t need to change this.
  • lanKey: The AES key used for the communication between Homegear and your I/O interface (for securing the LAN connection). If you’ve been using FHEM before, you’ve probably disabled AES encryption using HomeMatic’s configuration utility, as FHEM doesn’t support encryption. You should add AES encryption later. For a quick start, comment out this line.
  • rfKey: A random key used for securing the connection between Homegear and the HomeMatic devices (sensors, actors, etc.). You should note it down somewhere, because if you lose it, you’ll have to re-pair all your devices.

After saving the configuration file, you’ll have to restart the Homegear daemon or the Docker Container running Homegear. Take a look at the logs in /var/log/homegear/homegear.log to find out if Homegear successfully connects to the BidCoS device.

Connecting OpenHAB to Homegear

  • Browse to OpenHAB’s web interface at port 8080 (such as http://localhost:8080).
  • Select the Paper UI (this one is new in OpenHAB 2).
  • Go to “Extensions” and install “HomeMatic Binding”.
  • Go to “Configuration” -> “Things”. Two new things should be detected automatically: “Homegear” and “GATEWAY-EXTRAS”. Add both of them. They should be indicated as “ONLINE” afterwards.

That’s it – for now…

Congratulations: You’ve mastered the essential steps of setting up OpenHAB for your HomeMatic based smart home! Next time, I’ll write about adding HomeMatic devices to OpenHAB using Homegear.

+ \ No newline at end of file diff --git a/posts/https-ssl-in-wordpress-behind-proxy/index.html b/posts/https-ssl-in-wordpress-behind-proxy/index.html new file mode 100644 index 0000000..395266e --- /dev/null +++ b/posts/https-ssl-in-wordpress-behind-proxy/index.html @@ -0,0 +1,15 @@ +How to set up HTTPS/SSL in WordPress behind Proxy (nginx, HAProxy, Apache, lighttpd) | Virtualzone Blog +

How to set up HTTPS/SSL in WordPress behind Proxy (nginx, HAProxy, Apache, lighttpd)

Today I changed the accessibility of my blog from HTTP (unencrypted) to HTTPS/SSL. My blog is running WordPress behind an nginx proxy server. However, while the pages themselves loaded successfully from HTTPS, the embedded static resources like JavaScripts, Images, CSS files etc. did not. Here’s how I fixed it.

The cause of this issue is that WordPress doesn’t seem to detect the original protocol scheme (HTTPS) correctly when running behind a proxy. Thus, if the connection between your user’s browser and your proxy/loadbalancer is HTTPS, but the connection between your proxy server and WordPress is HTTP only, WordPress thinks that it’s running on HTTP instead of HTTPS. Therefore it places sets the absolute URLs incorrectly to HTTP.

This results in mixed content warnings. Modern browsers prevent loading resources from HTTP when the embedding page had been loaded from HTTPS. To fix this, taking the following steps worked for me:

Make sure that your proxy or load balancer adds the “X-Forwarded-*” HTTP request headers when proxying incoming requests to your WordPress backend server. My nginx configuration contains these lines:

proxy_set_header X-Forwarded-Host $host;
+proxy_set_header X-Forwarded-Server $host;
+proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme;
+proxy_set_header X-Real-IP $remote_addr;
+proxy_set_header Host $host;
+
  • Install and activate the SSL Insecure Content Fixer plugin in your WordPress installation’s admin panel.
  • Navigate to Settings -> SSL Insecure Content.
  • Set “HTTPS detection” to “HTTP_X_FORWARDED_PROTO (e.g. load balancer, reverse proxy, NginX)”.
  • Navigate to Settings -> General.
  • Set the “WordPress Address (URL)” and “Site Address (URL)” to your new HTTPS address.
  • Check if everything is working as expected.
+ \ No newline at end of file diff --git a/posts/index.html b/posts/index.html new file mode 100644 index 0000000..e76c6fd --- /dev/null +++ b/posts/index.html @@ -0,0 +1,17 @@ +Posts | Virtualzone Blog +

Go-hole: A minimalistic DNS proxy and and blocker

You’ll probably know Pi-hole. It’s a popular “DNS sinkhole” – a DNS proxy server which blocks certain requests, such a as those for well-known ad serving domains. The effect is a much less ad-cluttered web experience in your home network. +I’ve been using Pi-hole for several years as a Docker container on a Raspberry Pi. The Raspi is serving as a small home server on my home network. +However, as much as I like Pi-hole, I felt it got loaded with new features over the years and performed slower over the time....

February 5, 2023 · 4 min · 703 words · Heiner

OpenRC Script for 'podman kube play'

In June, I’ve written about my approach to starting and stopping Podman Pods using OpenRC scripts on Alpine Linux. However, that approach had two major drawbacks: First, the pods were started in the foreground, causing OpenRC to wait for all pod initialization tasks to complete. If an image needed to be pulled first, this could lead to longer delays, significantly increasing system startup times. Secondly, requesting the status of a previously started pod always stated “crashed”....

October 26, 2022 · 3 min · 483 words · Heiner

Connecting multiple networks to a Podman container

I’m running my containers with Podman in Rootless Mode on Alpine for about four months now. However, an annoying problem has haunted me ever since: +When a container was connected to more than one network, outgoing connections were not working correctly. +Consider a container connected to two bridge networks: +$ podman run --rm -it \ --network net1 \ --network net2 \ alpine /bin/ash Inside the container, the two networks are connected correctly:...

October 16, 2022 · 2 min · 274 words · Heiner

Setting up Alpine Linux with Podman

Recently, I’ve written a blog post on how to set up Rootless Docker on Alpine Linux. Today I’m showing you how to set up Podman. Podman has a rootless architecture built in. It’s an alternative to Docker, providing an almost identical command line interface. Thus, if you’re used to Docker CLI, you won’t have any issues working with Podman. +Podman was initially developed by RedHat and is available as an open source project....

June 25, 2022 · 4 min · 852 words · Heiner

Setting up Alpine Linux with Rootless Docker

As of Docker Engine v20.10, it’s possible to run the Docker daemon as a non-root user (Rooless mode). This is especially valuable in view of security aspects. Rootless mode mitigates potential vulnerabilities in the Docker daemon. +However, at the time of writing, setting up Docker in rootless mode is not straightforward if you’re using Alpine Linux as your host system. This is why I summarized the steps to get Docket Rootless up and running on Alpine Linux....

June 19, 2022 · 3 min · 479 words · Heiner

Setting up a Kubernetes cluster with K3S, GlusterFS and Load Balancing

I’ve recently written a tutorial which will guide you through setting up a Kubernetes cluster using K3S with virtual machines hosted at Hetzner, a German (Cloud) hosting provider. The tutorial uses K3S, a lightweight Kubernetes distribution which is perfectly suited for small VMs like Hetzner’s CX11. Additionally, the tutorial will show you how to set up Hetzner’s cloud load balancer which performs SSL offloading and forwards traffic to your Kubernetes system....

September 3, 2021 · 1 min · 118 words · Heiner

Back up server to OneDrive’s special App Folder

I’m a convinced user of OneDrive Personal. Bundled with M365, it’s a cheap option to get 1 TB of cloud storage. Having plenty of cloud storage at hand, I’m also using my OneDrive to run automated backups of my servers. There are various solutions capable of uploading files to OneDrive, including rclone. However, I was looking for a solution which enables me to grant my backup script only access to one specific folder instead of my entire cloud drive – better safe than sorry....

September 2, 2021 · 4 min · 682 words · Heiner

Unifi USG: Multiple IP addresses on PPPoE

My DSL provider TAL.de offers to assign a static and a dynamic IP address on PPPoE dial in. The dynamic IP address is the primary one, used for accessing the internet. Packets to the static IP address are routed to the router as well. Here’s how to set up things up on a Unifi Security Gateway (USG). +By default, USG only allows for one IP address when dialing in via PPPoE....

August 16, 2021 · 2 min · 353 words · Heiner

Raspberry Pi OS: Remove unnecessary packages

Recently, I wrote about the availability of the 64 bit beta version of Raspberry Pi OS (formerly known as Raspbian). Unfortunately, the new 64 bit beta is only available in the Desktop variant, containing lots of packages most lightweight server systems won’t need. There’s no lite variant of the 64 bit beta version available at the time of writing. However, you can easily remove the Desktop packages from a running installation with two easy commands....

June 7, 2020 · 1 min · 161 words · Heiner

Analyze Traefik access log using InfluxDB and Grafana

Traefik is a Cloud Native Edge Router, often deployed in Docker and Kubernetes environments. With little effort, you can use Telegraf to transport Traefik’s access logs to an InfluxDB, where it can be analyzed using Grafana. +This setup contains the following elements: +Traefik v2 runs as a Docker container on a Linux host. Traefik outputs access logs in JSON format to STDOUT. Telegraf fetched the Traefik container’s JSON output using the docker_log input plugin....

June 3, 2020 · 2 min · 373 words · Heiner
+ \ No newline at end of file diff --git a/posts/index.xml b/posts/index.xml new file mode 100644 index 0000000..6955beb --- /dev/null +++ b/posts/index.xml @@ -0,0 +1,210 @@ + + + + Posts on Virtualzone Blog + https://virtualzone.de/posts/ + Recent content in Posts on Virtualzone Blog + Hugo -- gohugo.io + en-us + &copy; 2024 Heiner Beck. + Sun, 05 Feb 2023 06:00:00 +0000 + + + Go-hole: A minimalistic DNS proxy and and blocker + https://virtualzone.de/posts/dns-proxy-forwarder-blackhole/ + Sun, 05 Feb 2023 06:00:00 +0000 + https://virtualzone.de/posts/dns-proxy-forwarder-blackhole/ + You&rsquo;ll probably know Pi-hole. It&rsquo;s a popular &ldquo;DNS sinkhole&rdquo; – a DNS proxy server which blocks certain requests, such a as those for well-known ad serving domains. The effect is a much less ad-cluttered web experience in your home network. +I&rsquo;ve been using Pi-hole for several years as a Docker container on a Raspberry Pi. The Raspi is serving as a small home server on my home network. +However, as much as I like Pi-hole, I felt it got loaded with new features over the years and performed slower over the time. + + + OpenRC Script for 'podman kube play' + https://virtualzone.de/posts/openrc-podman-kube-play/ + Wed, 26 Oct 2022 15:00:00 +0000 + https://virtualzone.de/posts/openrc-podman-kube-play/ + In June, I&rsquo;ve written about my approach to starting and stopping Podman Pods using OpenRC scripts on Alpine Linux. However, that approach had two major drawbacks: First, the pods were started in the foreground, causing OpenRC to wait for all pod initialization tasks to complete. If an image needed to be pulled first, this could lead to longer delays, significantly increasing system startup times. Secondly, requesting the status of a previously started pod always stated &ldquo;crashed&rdquo;. + + + Connecting multiple networks to a Podman container + https://virtualzone.de/posts/podman-multiple-networks/ + Sun, 16 Oct 2022 17:00:00 +0000 + https://virtualzone.de/posts/podman-multiple-networks/ + I&rsquo;m running my containers with Podman in Rootless Mode on Alpine for about four months now. However, an annoying problem has haunted me ever since: +When a container was connected to more than one network, outgoing connections were not working correctly. +Consider a container connected to two bridge networks: +$ podman run --rm -it \ --network net1 \ --network net2 \ alpine /bin/ash Inside the container, the two networks are connected correctly: + + + Setting up Alpine Linux with Podman + https://virtualzone.de/posts/alpine-podman/ + Sat, 25 Jun 2022 18:00:00 +0000 + https://virtualzone.de/posts/alpine-podman/ + Recently, I&rsquo;ve written a blog post on how to set up Rootless Docker on Alpine Linux. Today I&rsquo;m showing you how to set up Podman. Podman has a rootless architecture built in. It&rsquo;s an alternative to Docker, providing an almost identical command line interface. Thus, if you&rsquo;re used to Docker CLI, you won&rsquo;t have any issues working with Podman. +Podman was initially developed by RedHat and is available as an open source project. + + + Setting up Alpine Linux with Rootless Docker + https://virtualzone.de/posts/alpine-docker-rootless/ + Sun, 19 Jun 2022 15:00:00 +0000 + https://virtualzone.de/posts/alpine-docker-rootless/ + As of Docker Engine v20.10, it&rsquo;s possible to run the Docker daemon as a non-root user (Rooless mode). This is especially valuable in view of security aspects. Rootless mode mitigates potential vulnerabilities in the Docker daemon. +However, at the time of writing, setting up Docker in rootless mode is not straightforward if you&rsquo;re using Alpine Linux as your host system. This is why I summarized the steps to get Docket Rootless up and running on Alpine Linux. + + + Setting up a Kubernetes cluster with K3S, GlusterFS and Load Balancing + https://virtualzone.de/posts/k3s-glusterfs/ + Fri, 03 Sep 2021 11:30:03 +0000 + https://virtualzone.de/posts/k3s-glusterfs/ + I’ve recently written a tutorial which will guide you through setting up a Kubernetes cluster using K3S with virtual machines hosted at Hetzner, a German (Cloud) hosting provider. The tutorial uses K3S, a lightweight Kubernetes distribution which is perfectly suited for small VMs like Hetzner’s CX11. Additionally, the tutorial will show you how to set up Hetzner’s cloud load balancer which performs SSL offloading and forwards traffic to your Kubernetes system. + + + Back up server to OneDrive’s special App Folder + https://virtualzone.de/posts/onedrive-upload-backup/ + Thu, 02 Sep 2021 11:30:03 +0000 + https://virtualzone.de/posts/onedrive-upload-backup/ + I’m a convinced user of OneDrive Personal. Bundled with M365, it’s a cheap option to get 1 TB of cloud storage. Having plenty of cloud storage at hand, I’m also using my OneDrive to run automated backups of my servers. There are various solutions capable of uploading files to OneDrive, including rclone. However, I was looking for a solution which enables me to grant my backup script only access to one specific folder instead of my entire cloud drive – better safe than sorry. + + + Unifi USG: Multiple IP addresses on PPPoE + https://virtualzone.de/posts/unifi-usg-multiple-ip-addresses-on-pppoe/ + Mon, 16 Aug 2021 11:30:03 +0000 + https://virtualzone.de/posts/unifi-usg-multiple-ip-addresses-on-pppoe/ + My DSL provider TAL.de offers to assign a static and a dynamic IP address on PPPoE dial in. The dynamic IP address is the primary one, used for accessing the internet. Packets to the static IP address are routed to the router as well. Here’s how to set up things up on a Unifi Security Gateway (USG). +By default, USG only allows for one IP address when dialing in via PPPoE. + + + Raspberry Pi OS: Remove unnecessary packages + https://virtualzone.de/posts/raspberry-pi-os-remove-packages/ + Sun, 07 Jun 2020 11:30:03 +0000 + https://virtualzone.de/posts/raspberry-pi-os-remove-packages/ + Recently, I wrote about the availability of the 64 bit beta version of Raspberry Pi OS (formerly known as Raspbian). Unfortunately, the new 64 bit beta is only available in the Desktop variant, containing lots of packages most lightweight server systems won&rsquo;t need. There&rsquo;s no lite variant of the 64 bit beta version available at the time of writing. However, you can easily remove the Desktop packages from a running installation with two easy commands. + + + Analyze Traefik access log using InfluxDB and Grafana + https://virtualzone.de/posts/traefik-access-log-influxdb-grafana-telegraf/ + Wed, 03 Jun 2020 11:30:03 +0000 + https://virtualzone.de/posts/traefik-access-log-influxdb-grafana-telegraf/ + Traefik is a Cloud Native Edge Router, often deployed in Docker and Kubernetes environments. With little effort, you can use Telegraf to transport Traefik&rsquo;s access logs to an InfluxDB, where it can be analyzed using Grafana. +This setup contains the following elements: +Traefik v2 runs as a Docker container on a Linux host. Traefik outputs access logs in JSON format to STDOUT. Telegraf fetched the Traefik container&rsquo;s JSON output using the docker_log input plugin. + + + Export trainings from Endomondo as GPX files + https://virtualzone.de/posts/endomono-export-gpx/ + Mon, 01 Jun 2020 11:30:03 +0000 + https://virtualzone.de/posts/endomono-export-gpx/ + I&rsquo;ve been using Endomondo for years to track my trainings. However, I&rsquo;ve been experiencing a lot of issues with Endomondo over the last months: Sometimes it&rsquo;s not possible to log in. Other times, my trainings won&rsquo;t get synced. So it&rsquo;s time a new app. I&rsquo;ve decided to give Strava a try. With a few lines of code, I&rsquo;ve managed to export all my training data as GPX files. These can be imported to Strava, so my training history won&rsquo;t get lost. + + + Native USB boot for Raspberry Pi 4 + https://virtualzone.de/posts/usb-boot-raspberry-pi/ + Thu, 28 May 2020 11:30:03 +0000 + https://virtualzone.de/posts/usb-boot-raspberry-pi/ + Here&rsquo;s something that&rsquo;s probably been eagerly-awaited not only by me: Finally, Raspberry Pi 4 can boot directly from USB devices. Without any of the widespread workarounds which require an SD card a primrary boot medium. This is made possible by a new firmware, the so-called EEPROM. Furthermore, a new 64 bit beta version of Raspberry OS is available, too (formerly known as Raspbian). +To get started, boot your Raspberry Pi with a Raspbian or Raspberry OS installation. + + + Build Multi-Arch images on Docker Hub (Part 2) + https://virtualzone.de/posts/multi-arch-docker-images-2/ + Sat, 16 May 2020 11:30:03 +0000 + https://virtualzone.de/posts/multi-arch-docker-images-2/ + Im ersten Teil dieses Artikels habe ich Euch gezeigt, wie Ihr ein Multi-Arch-Docker-Projekt anlegt, das auf einer AMD64-Plattform auch für andere Zielarchitekturen wie bspw. ARM bauen kann. In diesem Teil zeige ich Euch, wie Ihr das Ganze im offiziellen Docker Hub zum Laufen bekommt. +Zunächst solltet Ihr ein Projekt im Docker Hub anlegen und dieses mit Eurem Quellcode-Repository verknüpfen. In meinem Fall nutze ich GitHub als Sourcecode-Repository und nutze die Build-Infrastruktur von Docker Hub. + + + Build Multi-Arch images on Docker Hub (Part 1) + https://virtualzone.de/posts/multi-arch-docker-images-1/ + Fri, 15 May 2020 11:30:03 +0000 + https://virtualzone.de/posts/multi-arch-docker-images-1/ + Multi-Arch Docker Images sind eine tolle Sache: Benutzer Eurer Images ziehen automatisch die für Ihre Architektur passende Version Eures Image – ob AMD64, ARM64 oder ARM32. Normalerweise muss man Docker Images auf der Architektur bauen, auf der sie später auch verwendet werden. Durch die Verwendung des Emulators QEMU ist es jedoch möglich, auf einer AMD64-Architektur für alle anderen Zielplattformen mitzubauen. Kombiniert mit der Auto-Build-Funktion des Docker Hub ist das eine prima Arbeitserleichterung. + + + How to let Jenkins build Docker images + https://virtualzone.de/posts/jenkins-build-docker-images/ + Sun, 11 Jun 2017 11:30:03 +0000 + https://virtualzone.de/posts/jenkins-build-docker-images/ + If you’re using Jenkins as your Continuous Integration (CI) tool and Docker to build self-contained images of your application, you may ask yourself how to automatically build Docker images during Jenkins’ build job. Here’s how I did it – with Jenkins running in a Docker container itself. +So far, I’ve used the official Jenkins Docker image (the one based on Alpine). I’ve tried some of the Docker plugins for Jenkins available out there. + + + Using Let’s Encrypt / EFF’s CertBot with NGINX in Docker + https://virtualzone.de/posts/lets-encrypt-effs-certbot-with-nginx-in-docker/ + Sat, 11 Feb 2017 11:30:03 +0000 + https://virtualzone.de/posts/lets-encrypt-effs-certbot-with-nginx-in-docker/ + I’m using NGINX in a Docker Container as a front-end HTTP(s) Webserver, performing SSL termination and proxying incoming requests to various other Docker Containers and VMs. Now that I’ve switched my certificates to Let’s Encrypt, I wondered how to integrate EFF’s CertBot (which is recommended by Let’s Encrypt) with my setup. Here’s how I did it. +First, I’ve added two new volumes to my web-front-end’s Docker Compose File: +version: &#39;2&#39; services: webfrontend: container_name: webfrontend [. + + + Creating an encrypted file container on macOS + https://virtualzone.de/posts/encrypted-file-container-macos/ + Tue, 06 Dec 2016 11:30:03 +0000 + https://virtualzone.de/posts/encrypted-file-container-macos/ + Some years ago, I’ve used TrueCrypt to create encrypted containers for storing sensitive files. However, TrueCrypt is nowadays considered insecure and I’m on macOS Sierra 10.12 now – time for another solution. Luckily, macOS has integrated means for creating encrypted containers and saving sensitive information in it. You don’t need any additional software for this. As far as I know, this solution also works for previous versions of Mac OS X, like Mac OS X 10. + + + UptimeRobot: A nice free website monitoring service + https://virtualzone.de/posts/uptime-robot-website-monitoring/ + Mon, 05 Sep 2016 11:30:03 +0000 + https://virtualzone.de/posts/uptime-robot-website-monitoring/ + Over the weekend I’ve been looking around for a free service which monitors my websites. My requirement was that I want to be able to monitor both HTTP and HTTPS sites, I need support for authentication and the monitoring service should be able to check if a specific keyword exists within the watched site (instead of just assuming that a HTTP Status Code 200 is okay). Furthermore, I needed notifications in case of downtimes (Email and Pushbullet is fine for me). + + + Fix Docker not using /etc/hosts on MacOS + https://virtualzone.de/posts/fix-docker-not-using-etc-hosts-on-macos/ + Sun, 28 Aug 2016 11:30:03 +0000 + https://virtualzone.de/posts/fix-docker-not-using-etc-hosts-on-macos/ + On my MacBook with Mac OS X 10.11 (El Capitan) and Docker 1.12.0, Docker did not read manually set DNS entries from the /etc/hosts file. +When I executed “docker push” for example, this resulted in “no such hosts” errors: +Put http://shuttle:5000/v1/repositories/webfrontend/: dial tcp: lookup shuttle on 192.168.65.1:53: no such host On Mac OS, Docker is running in a host container itself. Thus, you’ll have to add DNS entries to the container’s /etc/hosts file. + + + From FHEM to OpenHAB with Homegear: Installation/Docker container + https://virtualzone.de/posts/from-fhem-to-openhab-with-homegear-installation-docker-container/ + Sun, 28 Aug 2016 11:30:03 +0000 + https://virtualzone.de/posts/from-fhem-to-openhab-with-homegear-installation-docker-container/ + For more than 2.5 years, I’ve now been running FHEM with several HomeMatic sensors and actors. Using the HM-CFG-LAN Configuration Tool as an I/O interface between FHEM and the HomeMatic devices, this setup has been running smoothly most of the time. The configuration was a bit tricky now and then, but it worked. However, OpenHAB seems to become a really good choice. Version 2 is currently available as Beta 3. It features a modern web interface and an easy-to-use extension manager. + + + How to set up HTTPS/SSL in WordPress behind Proxy (nginx, HAProxy, Apache, lighttpd) + https://virtualzone.de/posts/https-ssl-in-wordpress-behind-proxy/ + Sat, 27 Aug 2016 11:30:03 +0000 + https://virtualzone.de/posts/https-ssl-in-wordpress-behind-proxy/ + Today I changed the accessibility of my blog from HTTP (unencrypted) to HTTPS/SSL. My blog is running WordPress behind an nginx proxy server. However, while the pages themselves loaded successfully from HTTPS, the embedded static resources like JavaScripts, Images, CSS files etc. did not. Here’s how I fixed it. +The cause of this issue is that WordPress doesn’t seem to detect the original protocol scheme (HTTPS) correctly when running behind a proxy. + + + How to reduce PDF file size in Linux - Part 2 + https://virtualzone.de/posts/reduce-pdf-file-size-2/ + Sat, 15 Aug 2015 11:30:03 +0000 + https://virtualzone.de/posts/reduce-pdf-file-size-2/ + Several months ago, I wrote a blog post about reducing a PDF file’s size. Since then, I’ve used that technique many times. However, you may want to control the DPI (dots per inch) even more specific. Here’s how to do it: +gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dCompatibilityLevel=1.7 \ -dDownsampleColorImages=true \ -dDownsampleGrayImages=true \ -dDownsampleMonoImages=true \ -dColorImageResolution=120 \ -dGrayImageResolution=120 \ -dMonoImageResolution=120 \ -sOutputFile=output.pdf input.pdf Hint: This also works on MacOS. Just install GhostScript using Homebrew: + + + How to enable IPv6 on a SonicWall (SonicOS 5.9) using NAT + https://virtualzone.de/posts/ipv6-on-a-sonicwall/ + Thu, 20 Nov 2014 11:30:03 +0000 + https://virtualzone.de/posts/ipv6-on-a-sonicwall/ + IPv6 aimed to make Network Address Translation (NAT) obselete as there are so many addresses available that every single device can have its own worldwide unique IPv6 address. However, even with IPv6, using NAT is a very simple way to get your devices behind a Dell SonicWall connected to IPv6 services on the internet. In contrast to going without NAT, all the devices behind your SonicWall will emerge under the SonicWall’s IPv6 address. + + + How to reduce PDF file size in Linux + https://virtualzone.de/posts/reduce-pdf-file-size/ + Wed, 21 Nov 2012 11:30:03 +0000 + https://virtualzone.de/posts/reduce-pdf-file-size/ + Using a single line of GhostScript command on my Ubuntu’s terminal, I was able to reduce the size of a PDF file from 6 MB to approximately 1 MB: +gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dCompatibilityLevel=1.4 -dPDFSETTINGS=/screen -sOutputFile=output.pdf input.pdf You can also use the following parameters for -dPDFSETTINGS instead of /screen: +/screen – Lowest quality, lowest size /ebook – Moderate quality /printer – Good quality /prepress – Best quality, highest size Update: Read Part 2 of this blog post for more detailled file size reduction settings. + + + Determining a location’s federal state using Google Maps API + https://virtualzone.de/posts/determining-a-locations-federal-state-using-google-maps-api/ + Fri, 10 Aug 2012 11:30:03 +0000 + https://virtualzone.de/posts/determining-a-locations-federal-state-using-google-maps-api/ + If you have to find out which federal state a city belongs to, you can use the Google Maps API v3. Here is a straightforward JavaScript code snippet: +function log(s) { $(&#39;#sysout&#39;).append(document.createTextNode(s + &#39;n&#39;)); } function getResult(results) { for (var i=0; i -1) { return result[&#39;address_components&#39;][j][&#39;short_name&#39;]; } } return &#39;&#39;; } function getCountry(result) { return extractFirst(result, &#39;country&#39;); } function getFederalState(result) { return extractFirst(result, &#39;administrative_area_level_1&#39;); } function searchLocation() { $(&#39;#sysout&#39;).empty(); var location = $(&#39;#location&#39;). + + + diff --git a/posts/ipv6-on-a-sonicwall/index.html b/posts/ipv6-on-a-sonicwall/index.html new file mode 100644 index 0000000..729a8ec --- /dev/null +++ b/posts/ipv6-on-a-sonicwall/index.html @@ -0,0 +1,8 @@ +How to enable IPv6 on a SonicWall (SonicOS 5.9) using NAT | Virtualzone Blog +

How to enable IPv6 on a SonicWall (SonicOS 5.9) using NAT

IPv6 aimed to make Network Address Translation (NAT) obselete as there are so many addresses available that every single device can have its own worldwide unique IPv6 address. However, even with IPv6, using NAT is a very simple way to get your devices behind a Dell SonicWall connected to IPv6 services on the internet. In contrast to going without NAT, all the devices behind your SonicWall will emerge under the SonicWall’s IPv6 address.

The following guide applies to Dell SonicWalls with SonicOS 5.9.0 (IPv6 is not supported in SonicOS 5.8 or below). A SonicWall TZ-215 is connected to an IPv6 capable router via the X1/WAN interface. There are devices connected to the SonicWall on the X0/LAN and W0/WLAN interfaces. There is also a virtual W0:V1 interface used for WLAN guests.

  1. Log in to SonicWall’s administrative web interface (the default IP address on LAN is https://192.168.168.168).

  2. Go to Network -> Interfaces and select to view IPv6.

  • Determine SonicWall’s autonomous IPv6 address for the X1/WAN interface and note it down. You’ll need it later.
  • Configure your X0/LAN interface: Check if it has a static IPv6 address starting with fd80::. Check “Enable Router Advertisement” and add a prefix fd80::, Lifetime = 1440 min.
  • Configure your W0/WLAN interface: Check if it has a static IPv6 address starting with fd81::. Check “Enable Router Advertisement” and add a prefix fd81::, Lifetime = 1440 min.
  • Do the same with other interfaces you want to enable for IPv6, such as W0:V1, X2, etc. Use fd82::, fd83::, etc. as prefixes.
  1. Go to Network -> Address Objects and select to view IPv6. +Create/update the entry “WAN Primary IPv6” with the previously determined X1 IPv6 address. Set Zone = WAN, Type = Host.

  2. Go to Network -> NAT Policies and select to view IPv6.

  • Create a new NAT policy with the following settings: Original Source = Any Translated Source = WAN Primary IPv6 Original Destination = Any Translated Destination = Original Original Service = Any Translated Service = Original Inbound Interface = X0/LAN Outbound Interface = X1/WAN
  • Create another new NAT policy with the same settings as before, but this time, select W0/WLAN as “Inbound Interface”.
  1. On a client connected to the SonicWall, go to http://test-ipv6.com to check if your IPv6 configuration works.
+ \ No newline at end of file diff --git a/posts/jenkins-build-docker-images/index.html b/posts/jenkins-build-docker-images/index.html new file mode 100644 index 0000000..3c7fe12 --- /dev/null +++ b/posts/jenkins-build-docker-images/index.html @@ -0,0 +1,28 @@ +How to let Jenkins build Docker images | Virtualzone Blog +

How to let Jenkins build Docker images

If you’re using Jenkins as your Continuous Integration (CI) tool and Docker to build self-contained images of your application, you may ask yourself how to automatically build Docker images during Jenkins’ build job. Here’s how I did it – with Jenkins running in a Docker container itself.

So far, I’ve used the official Jenkins Docker image (the one based on Alpine). I’ve tried some of the Docker plugins for Jenkins available out there. None of them really convinced me as the setup was quite complicated. I’ve been looking for a simpler method.

To achieve this, I’ve created a custom Dockerfile which derives from the official jenkins:alpine image:

FROM jenkins:alpine
+USER root
+RUN apk update && \
+    apk add docker sudo
+RUN echo "jenkins ALL=NOPASSWD: ALL" >> /etc/sudoers
+USER jenkins
+

The user-switching is necessary to make sure that the package installation is performed as root (not as jenkins). Next, we update Alpine’s package repository and then install docker and sudo from Alpine’s official repository. sudo is required if your Docker host is configured to restrict Docker usage to specific users. After installing the packages, we allow the jenkins user to run sudo commands without password.

I’m using docker-compose to start my Jenkins container:

version: '2'
+services:
+  jenkins:
+    build: /docker/git/docker-jenkins
+    volumes:
+      - "/docker/storage/jenkins:/var/jenkins_home"
+      - "/var/run/docker.sock:/var/run/docker.sock"
+

The build line specifies the folder to your recently created Dockerfile. I mount two volumes here:

  • The first one specifies where Jenkins stores its files.
  • The second mounts the docker.sock file. This is the key here. It allows the Docker executable in the Jenkins container to communicate with the Docker daemon running on the host.

After starting your Jenkins docker container (using “docker-compose up -d”), browse to your Jenkins URL and configure the job that’s to build a Docker image automatically.

Add “Execute Shell” to your “Build Steps”. Mine looks like:

sudo docker build -t docker_hub_username/image_name:latest . && \
+sudo docker login -u docker_hub_username -p docker_hub_password && \
+sudo docker push docker_hub_username/image_name:latest
+

These lines build the Docker image, log in to Docker Hub and push the recently built image.

Update:

If you want to use docker-compose from your Jenkins Docker container as well, add these lines to your Dockerfile:

RUN apk add py-pip
+RUN pip install docker-compose
+
+ \ No newline at end of file diff --git a/posts/k3s-glusterfs/index.html b/posts/k3s-glusterfs/index.html new file mode 100644 index 0000000..9f37aeb --- /dev/null +++ b/posts/k3s-glusterfs/index.html @@ -0,0 +1,7 @@ +Setting up a Kubernetes cluster with K3S, GlusterFS and Load Balancing | Virtualzone Blog +

Setting up a Kubernetes cluster with K3S, GlusterFS and Load Balancing

I’ve recently written a tutorial which will guide you through setting up a Kubernetes cluster using K3S with virtual machines hosted at Hetzner, a German (Cloud) hosting provider. The tutorial uses K3S, a lightweight Kubernetes distribution which is perfectly suited for small VMs like Hetzner’s CX11. Additionally, the tutorial will show you how to set up Hetzner’s cloud load balancer which performs SSL offloading and forwards traffic to your Kubernetes system. Optionally, you will learn how to set up a distributed, replicated file system using Kadalu, an opinionated storage system based on GlusterFS. This allows you to move pods between the nodes while still having access to the pods’ persistent data.

Read the tutorial in Hetzner’s Online Community.

+ \ No newline at end of file diff --git a/posts/lets-encrypt-effs-certbot-with-nginx-in-docker/index.html b/posts/lets-encrypt-effs-certbot-with-nginx-in-docker/index.html new file mode 100644 index 0000000..0206ba9 --- /dev/null +++ b/posts/lets-encrypt-effs-certbot-with-nginx-in-docker/index.html @@ -0,0 +1,54 @@ +Using Let’s Encrypt / EFF’s CertBot with NGINX in Docker | Virtualzone Blog +

Using Let’s Encrypt / EFF’s CertBot with NGINX in Docker

I’m using NGINX in a Docker Container as a front-end HTTP(s) Webserver, performing SSL termination and proxying incoming requests to various other Docker Containers and VMs. Now that I’ve switched my certificates to Let’s Encrypt, I wondered how to integrate EFF’s CertBot (which is recommended by Let’s Encrypt) with my setup. Here’s how I did it.

First, I’ve added two new volumes to my web-front-end’s Docker Compose File:

version: '2'
+services:
+  webfrontend:
+    container_name: webfrontend
+    [...]
+    volumes:
+      - "/etc/localtime:/etc/localtime:ro"
+      - "/etc/timezone:/etc/timezone:ro"
+      - "/docker/storage/webfrontend/letsencrypt/www:/var/www/letsencrypt"
+      - "/docker/storage/webfrontend/letsencrypt/etc:/etc/letsencrypt"
+

Next, I’ve added the following location block to each of my virtual hosts:

location /.well-known/ {
+    alias /var/www/letsencrypt/;
+}
+

I’m using the palobo/certbot Docker Image to create the certificates, using this shell script:

#!/bin/sh
+
+docker pull palobo/certbot
+
+GetCert() {
+        docker run -it \
+                --rm \
+                -v /docker/storage/webfrontend/letsencrypt/etc:/etc/letsencrypt \
+                -v /docker/storage/webfrontend/letsencrypt/lib:/var/lib/letsencrypt \
+                -v /docker/storage/webfrontend/letsencrypt/www:/var/www/.well-known \
+                palobo/certbot -t certonly --webroot -w /var/www \
+                --keep-until-expiring \
+                $@
+}
+
+echo "Getting certificates..."
+GetCert -d www.mydomain.com -d mydomain.com
+GetCert -d somedomain.net
+
+echo "Restarting Web Frontend..."
+cd /docker/containers/webfrontend
+docker-compose down
+docker-compose up -d
+cd -
+
+echo "Done"
+

The script starts CertBot in a Docker Container for each requested certificate. Because the /etc/letsencrypt and the /var/www/.well-known directory is also used by my NGINX front-end Container (see above), these steps can be performed by the script:

  1. Using the webroot plugin, a random file is created under the /.well-known/acme-challenge/ directory.
  2. Let’s Encrypt can access and verify this file as the folder is aliased using the Location blocks in the NGINX config.
  3. The generated private key and public certificate is placed in /etc/letsencrypt/, which is in turn a volume for the NGINX web-frontend.

You can use the generated certificates by adding these two lines to your NGINX vhost config:

ssl_certificate     /etc/letsencrypt/live/www.mydomain.com/fullchain.pem;
+ssl_certificate_key /etc/letsencrypt/live/www.mydomain.com/privkey.pem;
+
+ \ No newline at end of file diff --git a/posts/multi-arch-docker-images-1/index.html b/posts/multi-arch-docker-images-1/index.html new file mode 100644 index 0000000..4b68410 --- /dev/null +++ b/posts/multi-arch-docker-images-1/index.html @@ -0,0 +1,66 @@ +Build Multi-Arch images on Docker Hub (Part 1) | Virtualzone Blog +

Build Multi-Arch images on Docker Hub (Part 1)

Multi-Arch Docker Images sind eine tolle Sache: Benutzer Eurer Images ziehen automatisch die für Ihre Architektur passende Version Eures Image – ob AMD64, ARM64 oder ARM32. Normalerweise muss man Docker Images auf der Architektur bauen, auf der sie später auch verwendet werden. Durch die Verwendung des Emulators QEMU ist es jedoch möglich, auf einer AMD64-Architektur für alle anderen Zielplattformen mitzubauen. Kombiniert mit der Auto-Build-Funktion des Docker Hub ist das eine prima Arbeitserleichterung. Ich möchte Euch in diesem Beitrag zeigen, wie es geht.

Zunächst legt Ihr wie gewohnt ein Dockerfile für die AMD64-Architektur an – hier am Beispiel eines Alpine-Basis-Image:

FROM amd64/alpine:3.11
+...
+

Es folgt jeweils ein Dockerfile pro Zielarchitektur. In diesen wird zunächst die passende QEMU-Binary heruntergeladen und dann in das Ziel-Image hinein kopiert.

Dockerfile.arm32v6 für ARM32V6:

FROM alpine:3.11 AS qemu
+RUN apk --update add --no-cache curl
+RUN cd /tmp && \
+curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static .
+
+FROM arm32v6/alpine:3.11
+COPY --from=qemu /tmp/qemu-arm-static /usr/bin/
+...
+

Dockerfile.arm36v7 für ARM32V7:

FROM alpine:3.11 AS qemu
+RUN apk --update add --no-cache curl
+RUN cd /tmp && \
+curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static .
+
+FROM arm32v7/alpine:3.11
+COPY --from=qemu /tmp/qemu-arm-static /usr/bin/
+...
+

Dockerfile.arm64v8 für ARM64V8:

FROM alpine:3.11 AS qemu
+RUN apk --update add --no-cache curl
+RUN cd /tmp && \
+curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-aarch64.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-aarch64/qemu-aarch64-static .
+
+FROM arm64v8/alpine:3.11
+COPY --from=qemu /tmp/qemu-aarch64-static /usr/bin/
+...
+

Zusätzlich wird legt Ihr eine Datei Namens “multi-arch-manifest.yaml” an. In dieser wird angegeben, welches Image welcher Architektur zugeordnet wird. Die nach den obigem Schema mit QEMU gebauten Image sind nämlich zunächst als AMD64-Architektur gelistet, was natürlich nicht stimmt. Durch das Docker-Manifest kann das angepasst werden. Hier am Beispiel meines virtualzone/compose-updater Image, den Namen müsst Ihr natürlich anpassen:

image: virtualzone/compose-updater:latest
+manifests:
+  - image: virtualzone/compose-updater:amd64
+    platform:
+      architecture: amd64
+      os: linux
+  - image: virtualzone/compose-updater:arm32v6
+    platform:
+      architecture: arm
+      os: linux
+      variant: v6
+  - image: virtualzone/compose-updater:arm32v7
+    platform:
+      architecture: arm
+      os: linux
+      variant: v7
+  - image: virtualzone/compose-updater:arm64v8
+     platform:
+       architecture: arm64
+       os: linux
+       variant: v8
+

Nun fehlen nun noch die Hooks. Diese werden von der Docker Registry vor bzw. nach den entsprechenden Build-Schritten aufgerufen. Wir benötigen Post-Push- und Pre-Build-Hook.

Der Pre-Build-Hook wird von der Registry vor dem Bauen eines Image aufgerufen. Hier müssen wir QEMU laden und ausführen. Der Dateiname muss “pre_build” lauten und chmod 755 haben:

#!/bin/bash
+
+BUILD_ARCH=$(echo "${DOCKERFILE_PATH}" | cut -d '.' -f 2)
+
+[ "${BUILD_ARCH}" == "Dockerfile" ] && \
+{ echo 'qemu-user-static: Registration not required for current arch'; exit 0; }
+
+docker run --rm --privileged multiarch/qemu-user-static:register --reset
+

Der Post-Push-Hook wird von der Registry aufgerufen, sobald ein Image fertig gebaut ist und ins Repository gepusht wurde. Hier muss das Manifest-Tool von Docker installiert und anschließend ausgeführt werden. Der Dateiname muss “post_push” lauten und chmod 755 haben:

#!/bin/bash
+curl -Lo manifest-tool https://github.com/estesp/manifest-tool/releases/download/v1.0.0/manifest-tool-linux-amd64
+chmod +x manifest-tool
+./manifest-tool push from-spec multi-arch-manifest.yaml
+

Damit ist Euer Projekt vorbereitet und bereit für Multi-Arch-Builds.

Im nächsten Teil zeige ich Euch, wie Ihr die “Automated Builds” im Docker Hub konfiguriert, um den Multi-Arch-Build auch tatsächlich durchzuführen.

+ \ No newline at end of file diff --git a/posts/multi-arch-docker-images-2/index.html b/posts/multi-arch-docker-images-2/index.html new file mode 100644 index 0000000..60cadae --- /dev/null +++ b/posts/multi-arch-docker-images-2/index.html @@ -0,0 +1,15 @@ +Build Multi-Arch images on Docker Hub (Part 2) | Virtualzone Blog +

Build Multi-Arch images on Docker Hub (Part 2)

Im ersten Teil dieses Artikels habe ich Euch gezeigt, wie Ihr ein Multi-Arch-Docker-Projekt anlegt, das auf einer AMD64-Plattform auch für andere Zielarchitekturen wie bspw. ARM bauen kann. In diesem Teil zeige ich Euch, wie Ihr das Ganze im offiziellen Docker Hub zum Laufen bekommt.

Zunächst solltet Ihr ein Projekt im Docker Hub anlegen und dieses mit Eurem Quellcode-Repository verknüpfen. In meinem Fall nutze ich GitHub als Sourcecode-Repository und nutze die Build-Infrastruktur von Docker Hub. Die entsprechenden Einstellungen findet Ihr im Reiter “Builds”:

Einen automatisierten Build im Docker Hub konfigurieren. +Dort könnt Ihr dann die Build-Konfiguration vornehmen. Zunächst muss angegeben werden, aus Source-Repository gebaut werden soll:

Bei der Konfiguration muss zunächst das Sourcecode-Repository angegeben werden. +Anschließend legt Ihr fünf Build Rules an, nämlich eine ohne Angabe eines Architektur-Tags (in meinem Fall “latest”) und vier weitere je Zielarchitektur. Vier deshalb, weil wir in diesem Beispiel für AMD64, ARM32V6, ARM32V7 und ARM64V8 bauen. Solltet Ihr für andere Zielarchitekturen bauen wollen, benötigt Ihr natürlich mehr oder weniger Build Rules:

Die passenden Build Rules für die vier Zielarchitekturen. +Der Trick ist, dass das “ungetaggte” Image alle anderen Architektur-Images zugeordnet bekommt. Dadurch kann ein Anwender, der “docker run” oder “docker pull” auf Euer Image durchführt, das für seine Architektur passende Image automatisch laden, ohne explizit die Plattform nennen zu müssen. Ein Mac zieht somit das AMD64-Image, während ein Raspbian das ARM32V7-Image lädt und ein Raspberry Pi 4 mit 64bit-Ubuntu das ARM64V8 Image. Alles ohne weiteres zutun.

Das war es dann auch schon mit der Konfiguration. Ein Klick auf “Save and Build” stellt die ausstehenden Builds (hier fünf an der Zahl) in die Warteschlange. Meiner Erfahrung nach kann es auf der Docker Hub Infrastruktur auch für einfache Images durchaus ein paar Stunden dauern, bis alle Images gebaut wurden. Was schon erledigt ist und was noch aussteht, könnt Ihr unter “Recent Builds” verfolgen.

Die Recent Builds geben Auskunft über die noch ausstehenden und schon erfolgten Automated Builds. +Ihr werdet sehen, dass die ersten Builds als fehlgeschlagen markiert werden. Das ist völlig normal! Ein Blick in die Build Logs zeigt den nachvollziehbaren Grund: Nach jedem Build wird das multi-arch-manifest.yaml Docker-Manifest angewandt. Bevor das letzte Ziel-Architektur.Image aber nicht fertig gebaut wurde, können nicht alle Architektur-Images dem “ungetaggten” Image hinzugefügt werden und das Build schlägt augenscheinlich fehl.

Kein Grund zur Sorge: Der Fehler “failed with error: manifest unknown: manifest unknown”. +Tatsächlich wurde das jeweilige Image aber (hoffentlich) erfolgreich gebaut und gepusht. Erst beim letzten Multi-Arch-Image kann das Manifest-Tool dann auch erfolgreich seine Arbeit verrichten und die Architekturen verknüpfen. Lasst Euch davon also nicht aus der Ruhe bringen und beobachtet die Build Logs aufmerksam.

Ich wünsche Euch viel Spaß mit den Multi-Arch-Images im Docker Hub!

+ \ No newline at end of file diff --git a/posts/onedrive-upload-backup/index.html b/posts/onedrive-upload-backup/index.html new file mode 100644 index 0000000..2c34ed0 --- /dev/null +++ b/posts/onedrive-upload-backup/index.html @@ -0,0 +1,36 @@ +Back up server to OneDrive’s special App Folder | Virtualzone Blog +

Back up server to OneDrive’s special App Folder

I’m a convinced user of OneDrive Personal. Bundled with M365, it’s a cheap option to get 1 TB of cloud storage. Having plenty of cloud storage at hand, I’m also using my OneDrive to run automated backups of my servers. There are various solutions capable of uploading files to OneDrive, including rclone. However, I was looking for a solution which enables me to grant my backup script only access to one specific folder instead of my entire cloud drive – better safe than sorry. I couldn’t find any. This is why I developed OneDrive Uploader. Here is what it can do for you and how to use it.

Microsoft OneDrive supports so-called “special folders”, which includes the “App Folder” (App Root). This is a directory intended for applications to storage their own files, without being able to access other files in your OneDrive Folder. OneDrive Uploader supports these special folders, restricting the access of your backup script to its own files. However, you can also use OneDrive Uploader to upload and download files from other locations as long as you grant it access.

I’ve written OneDrive Uploader in Go, which is a great programming language that compiles natively to various operating systems and platforms. As a result, OneDrive Uploader is available for Linux, MacOS and Windows and supports AMD64, ARM and ARM64.

To get started with OneDrive Uploader, you’ll need to create an access token in Microsoft’s Azure Portal. To do this, follow these steps:

  1. Log in to the Microsoft Azure Portal.
  2. Navigate to “App registrations”.
  3. Create a new application with supported account type “Accounts in any organizational directory (Any Azure AD directory – Multitenant) and personal Microsoft accounts (e.g. Skype, Xbox)” and the following Web redirect URL: http://localhost:53682/
  4. Copy the Application (client) ID.
  5. Navigate to “Certificates & secrets”, create a new Client secret and copy the Secret Value (not the ID).
  6. Navigate to “API permissions”, click “Add permission”, choose “Microsoft Graph”, select “Delegated”. Then search and add the required permissions:
  • Access to App Folder only: Files.ReadWrite.AppFolder, offline_access, User.Read
  • Access to entire OneDrive: Files.Read, Files.ReadWrite, Files.Read.All, Files.ReadWrite.All, offline_access, User.Read

Great! You’ve now created an Azure App which you can use to grant OneDrive Uploader access to your OneDrive. Don’t worry, the App is not visible anywhere, nor can anyone access your OneDrive.

You can now download the OneDrive Uploader executable for your operating system and platform. You can either choose the matching binary from the GitHub releases page, or simply execute this command:

curl -s -L https://git.io/JRie0 | bash

Now create a configuration file named config.json. Replace and :

{
+    "client_id": "<client id from azure app>",
+    "client_secret": "<client secret from azure app>",
+    "scopes": [
+        "Files.ReadWrite.AppFolder",
+        "offline_access"
+    ],
+    "redirect_uri": "http://localhost:53682/",
+    "secret_store": "./secret.json",
+    "root": "/drive/special/approot"
+}
+

As you can see in the config.json above, we specify the special app folder as OneDrive Uploader’s root directory. The two scopes grant access to the this app folder and allows automatic renewing the necessary access token without user interaction (which is essential for unattended backups).

Perform the log in using this command and follow the instructions printed on your console:

onedrive-uploader login +You can now use OneDrive Uploader. To view the available commands, refer to the project’s GitHub page or type:

onedrive-uploader help +To use OneDrive Uploader in your backup script, you can be guided by this shell script snippet:

#!/bin/bash
+DIR_FORMAT="%Y-%m-%d" # DD-MM-YYYY format
+TODAY=`date +"${DIR_FORMAT}"`
+TARGET=/mnt/backup/$TODAY
+UPLOADER="/usr/local/bin/onedrive-uploader -c /home/username/backup-script/config.json"
+

Perform your local backup and store it in ${TARGET}

echo "Uploading..."
+cd ${TARGET}
+${UPLOADER} mkdir ${TODAY}
+for i in `ls`; do
+    ${UPLOADER} upload $i ${TODAY};
+    HASH_REMOTE=`${UPLOADER} sha256 $TODAY/$i | tr '[A-Z]' '[a-z]'`
+    HASH_LOCAL=`sha256sum $i | tr '[A-Z]' '[a-z]' | awk '{ print $1 }'`
+    if [[ "$HASH_REMOTE" != "$HASH_LOCAL" ]]; then
+        echo "Hashes for '$i' do not match! Remote = $HASH_REMOTE vs. Local = $HASH_LOCAL"
+    fi
+done
+

This bash script uploads all files from the local directory $TARGET to its app folder in your OneDrive. It creates a sub-folder named YYYY-MM-DD (i.e. 2021-08-30). For each file, after having finished the upload, it checks she SHA256 hash so that you can be sure the upload is intact.

+ \ No newline at end of file diff --git a/posts/openrc-podman-kube-play/index.html b/posts/openrc-podman-kube-play/index.html new file mode 100644 index 0000000..25b2098 --- /dev/null +++ b/posts/openrc-podman-kube-play/index.html @@ -0,0 +1,66 @@ +OpenRC Script for 'podman kube play' | Virtualzone Blog +

OpenRC Script for 'podman kube play'

In June, I’ve written about my approach to starting and stopping Podman Pods using OpenRC scripts on Alpine Linux. However, that approach had two major drawbacks: First, the pods were started in the foreground, causing OpenRC to wait for all pod initialization tasks to complete. If an image needed to be pulled first, this could lead to longer delays, significantly increasing system startup times. Secondly, requesting the status of a previously started pod always stated “crashed”. This is due to the fact that OpenRC is not able to identify the exact process spawned by Podman.

I’ve therefore improved my OpenRC startup script to be used with podman kube play YAML files. In this post, I’m presenting my results. If you have further improvements, please let me know.

What does not work

The podman pod create command features the --infra-conmon-pidfile=file option. This option writes the PID of the infra container’s conmon process to a file.

Using this option, it was easy to enable OpenRC identifying the status of a Pod and start the Pod in background:

pidfile="/run/${RC_SVCNAME}.pid"
+command_background=true
+

Unfortunately, the --infra-conmon-pidfile=file option is not (yet?) available when using the podman kube play command.

I’ve tried to discover the infra container’s PID file using the podman inspect command and using this value dynamically in my OpenRC scripts:

podman inspect --format '{{ .PidFile }}' somecontainer-infra
+

However, OpenRC doesn’t seem happy with PID files appearing and disapperaring dynamically.

What does work

I’ve created a pod script which is sourced by multiple pod-* scripts.

The pod script includes functions for getting the status of a Pod and stopping a Pod. The script assumes that your Pod’s Kubernetes YAML is located at /home/${command_user}/pods/${pod_name}/pod.yaml.

/home/your-user/pods/init.d/pod

#!/sbin/openrc-run
+
+name=$RC_SVCNAME
+command="/usr/bin/podman"
+networks_=''
+for n in ${pod_networks}; do
+	networks_="${networks_} --network $n";
+done
+command_args="play kube ${networks_} /home/${command_user}/pods/${pod_name}/pod.yaml >/dev/null 2>&1 &"
+
+depend() {
+	after network-online 
+	use net 
+}
+
+cleanup() {
+	/sbin/runuser -u ${command_user} -- ${command} pod exists ${pod_name}
+	result=$?
+	if [ $result -eq 0 ]; then
+	        /sbin/runuser -u ${command_user} -- ${command} pod stop ${pod_name} > /dev/null
+        	/sbin/runuser -u ${command_user} -- ${command} pod rm ${pod_name} > /dev/null
+	fi
+}
+
+start_pre() {
+	cleanup
+}
+
+stop() {
+	ebegin "Stopping $RC_SVCNAME"
+	cleanup
+	eend $?
+}
+
+status() {
+	/sbin/runuser -u ${command_user} -- ${command} pod exists ${pod_name} 2> /dev/null
+	result=$?
+	if [ $result -eq 0 ]; then
+		einfo "status: started"
+		return 0
+	else
+		einfo "status: stopped"
+		return 3
+	fi
+}
+

The script for controlling a Pod “xyz” can look like this.

  • command_user specifies the user running the Pod
  • pod_name sets the Pod’s name
  • pod_networks sets a space-separated list of networks the Pod should be connected to

/home/your-user/pods/init.d/pod-xyz

#!/sbin/openrc-run
+
+command_user="your-user"
+pod_name=xyz
+pod_networks='network1 network2 ...'
+
+source "/home/${command_user}/pods/init.d/pod"
+

Using root (i.e. using doas or sudo), you can then create a symlink in /etc/init.d and add the pod to the default run level at boot time:

cd /etc/init.d
+ln -s /home/<user>/pods/pod-xyz
+rc-update add pod-xyz
+

Use rc-service to start and stop your Pod:

doas rc-service pod-xyz start
+
+ \ No newline at end of file diff --git a/posts/page/1/index.html b/posts/page/1/index.html new file mode 100644 index 0000000..27c4113 --- /dev/null +++ b/posts/page/1/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/posts/ + \ No newline at end of file diff --git a/posts/page/2/index.html b/posts/page/2/index.html new file mode 100644 index 0000000..23a2be1 --- /dev/null +++ b/posts/page/2/index.html @@ -0,0 +1,15 @@ +Posts | Virtualzone Blog +

Export trainings from Endomondo as GPX files

I’ve been using Endomondo for years to track my trainings. However, I’ve been experiencing a lot of issues with Endomondo over the last months: Sometimes it’s not possible to log in. Other times, my trainings won’t get synced. So it’s time a new app. I’ve decided to give Strava a try. With a few lines of code, I’ve managed to export all my training data as GPX files. These can be imported to Strava, so my training history won’t get lost....

June 1, 2020 · 2 min · 341 words · Heiner

Native USB boot for Raspberry Pi 4

Here’s something that’s probably been eagerly-awaited not only by me: Finally, Raspberry Pi 4 can boot directly from USB devices. Without any of the widespread workarounds which require an SD card a primrary boot medium. This is made possible by a new firmware, the so-called EEPROM. Furthermore, a new 64 bit beta version of Raspberry OS is available, too (formerly known as Raspbian). +To get started, boot your Raspberry Pi with a Raspbian or Raspberry OS installation....

May 28, 2020 · 2 min · 404 words · Heiner

Build Multi-Arch images on Docker Hub (Part 2)

Im ersten Teil dieses Artikels habe ich Euch gezeigt, wie Ihr ein Multi-Arch-Docker-Projekt anlegt, das auf einer AMD64-Plattform auch für andere Zielarchitekturen wie bspw. ARM bauen kann. In diesem Teil zeige ich Euch, wie Ihr das Ganze im offiziellen Docker Hub zum Laufen bekommt. +Zunächst solltet Ihr ein Projekt im Docker Hub anlegen und dieses mit Eurem Quellcode-Repository verknüpfen. In meinem Fall nutze ich GitHub als Sourcecode-Repository und nutze die Build-Infrastruktur von Docker Hub....

May 16, 2020 · 3 min · 443 words · Heiner

Build Multi-Arch images on Docker Hub (Part 1)

Multi-Arch Docker Images sind eine tolle Sache: Benutzer Eurer Images ziehen automatisch die für Ihre Architektur passende Version Eures Image – ob AMD64, ARM64 oder ARM32. Normalerweise muss man Docker Images auf der Architektur bauen, auf der sie später auch verwendet werden. Durch die Verwendung des Emulators QEMU ist es jedoch möglich, auf einer AMD64-Architektur für alle anderen Zielplattformen mitzubauen. Kombiniert mit der Auto-Build-Funktion des Docker Hub ist das eine prima Arbeitserleichterung....

May 15, 2020 · 3 min · 502 words · Heiner

How to let Jenkins build Docker images

If you’re using Jenkins as your Continuous Integration (CI) tool and Docker to build self-contained images of your application, you may ask yourself how to automatically build Docker images during Jenkins’ build job. Here’s how I did it – with Jenkins running in a Docker container itself. +So far, I’ve used the official Jenkins Docker image (the one based on Alpine). I’ve tried some of the Docker plugins for Jenkins available out there....

June 11, 2017 · 2 min · 370 words · Heiner

Using Let’s Encrypt / EFF’s CertBot with NGINX in Docker

I’m using NGINX in a Docker Container as a front-end HTTP(s) Webserver, performing SSL termination and proxying incoming requests to various other Docker Containers and VMs. Now that I’ve switched my certificates to Let’s Encrypt, I wondered how to integrate EFF’s CertBot (which is recommended by Let’s Encrypt) with my setup. Here’s how I did it. +First, I’ve added two new volumes to my web-front-end’s Docker Compose File: +version: '2' services: webfrontend: container_name: webfrontend [....

February 11, 2017 · 2 min · 287 words · Heiner

Creating an encrypted file container on macOS

Some years ago, I’ve used TrueCrypt to create encrypted containers for storing sensitive files. However, TrueCrypt is nowadays considered insecure and I’m on macOS Sierra 10.12 now – time for another solution. Luckily, macOS has integrated means for creating encrypted containers and saving sensitive information in it. You don’t need any additional software for this. As far as I know, this solution also works for previous versions of Mac OS X, like Mac OS X 10....

December 6, 2016 · 2 min · 356 words · Heiner

UptimeRobot: A nice free website monitoring service

Over the weekend I’ve been looking around for a free service which monitors my websites. My requirement was that I want to be able to monitor both HTTP and HTTPS sites, I need support for authentication and the monitoring service should be able to check if a specific keyword exists within the watched site (instead of just assuming that a HTTP Status Code 200 is okay). Furthermore, I needed notifications in case of downtimes (Email and Pushbullet is fine for me)....

September 5, 2016 · 1 min · 120 words · Heiner

Fix Docker not using /etc/hosts on MacOS

On my MacBook with Mac OS X 10.11 (El Capitan) and Docker 1.12.0, Docker did not read manually set DNS entries from the /etc/hosts file. +When I executed “docker push” for example, this resulted in “no such hosts” errors: +Put http://shuttle:5000/v1/repositories/webfrontend/: dial tcp: lookup shuttle on 192.168.65.1:53: no such host On Mac OS, Docker is running in a host container itself. Thus, you’ll have to add DNS entries to the container’s /etc/hosts file....

August 28, 2016 · 1 min · 163 words · Heiner

From FHEM to OpenHAB with Homegear: Installation/Docker container

For more than 2.5 years, I’ve now been running FHEM with several HomeMatic sensors and actors. Using the HM-CFG-LAN Configuration Tool as an I/O interface between FHEM and the HomeMatic devices, this setup has been running smoothly most of the time. The configuration was a bit tricky now and then, but it worked. However, OpenHAB seems to become a really good choice. Version 2 is currently available as Beta 3. It features a modern web interface and an easy-to-use extension manager....

August 28, 2016 · 6 min · 1084 words · Heiner
+ \ No newline at end of file diff --git a/posts/page/3/index.html b/posts/page/3/index.html new file mode 100644 index 0000000..668109a --- /dev/null +++ b/posts/page/3/index.html @@ -0,0 +1,12 @@ +Posts | Virtualzone Blog +

How to set up HTTPS/SSL in WordPress behind Proxy (nginx, HAProxy, Apache, lighttpd)

Today I changed the accessibility of my blog from HTTP (unencrypted) to HTTPS/SSL. My blog is running WordPress behind an nginx proxy server. However, while the pages themselves loaded successfully from HTTPS, the embedded static resources like JavaScripts, Images, CSS files etc. did not. Here’s how I fixed it. +The cause of this issue is that WordPress doesn’t seem to detect the original protocol scheme (HTTPS) correctly when running behind a proxy....

August 27, 2016 · 2 min · 255 words · Heiner

How to reduce PDF file size in Linux - Part 2

Several months ago, I wrote a blog post about reducing a PDF file’s size. Since then, I’ve used that technique many times. However, you may want to control the DPI (dots per inch) even more specific. Here’s how to do it: +gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dCompatibilityLevel=1.7 \ -dDownsampleColorImages=true \ -dDownsampleGrayImages=true \ -dDownsampleMonoImages=true \ -dColorImageResolution=120 \ -dGrayImageResolution=120 \ -dMonoImageResolution=120 \ -sOutputFile=output.pdf input.pdf Hint: This also works on MacOS. Just install GhostScript using Homebrew:...

August 15, 2015 · 1 min · 75 words · Heiner

How to enable IPv6 on a SonicWall (SonicOS 5.9) using NAT

IPv6 aimed to make Network Address Translation (NAT) obselete as there are so many addresses available that every single device can have its own worldwide unique IPv6 address. However, even with IPv6, using NAT is a very simple way to get your devices behind a Dell SonicWall connected to IPv6 services on the internet. In contrast to going without NAT, all the devices behind your SonicWall will emerge under the SonicWall’s IPv6 address....

November 20, 2014 · 2 min · 372 words · Heiner

How to reduce PDF file size in Linux

Using a single line of GhostScript command on my Ubuntu’s terminal, I was able to reduce the size of a PDF file from 6 MB to approximately 1 MB: +gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dCompatibilityLevel=1.4 -dPDFSETTINGS=/screen -sOutputFile=output.pdf input.pdf You can also use the following parameters for -dPDFSETTINGS instead of /screen: +/screen – Lowest quality, lowest size /ebook – Moderate quality /printer – Good quality /prepress – Best quality, highest size Update: Read Part 2 of this blog post for more detailled file size reduction settings....

November 21, 2012 · 1 min · 98 words · Heiner

Determining a location’s federal state using Google Maps API

If you have to find out which federal state a city belongs to, you can use the Google Maps API v3. Here is a straightforward JavaScript code snippet: +function log(s) { $('#sysout').append(document.createTextNode(s + 'n')); } function getResult(results) { for (var i=0; i -1) { return result['address_components'][j]['short_name']; } } return ''; } function getCountry(result) { return extractFirst(result, 'country'); } function getFederalState(result) { return extractFirst(result, 'administrative_area_level_1'); } function searchLocation() { $('#sysout').empty(); var location = $('#location')....

August 10, 2012 · 1 min · 162 words · Heiner
+ \ No newline at end of file diff --git a/posts/podman-multiple-networks/index.html b/posts/podman-multiple-networks/index.html new file mode 100644 index 0000000..575e546 --- /dev/null +++ b/posts/podman-multiple-networks/index.html @@ -0,0 +1,37 @@ +Connecting multiple networks to a Podman container | Virtualzone Blog +

Connecting multiple networks to a Podman container

I’m running my containers with Podman in Rootless Mode on Alpine for about four months now. However, an annoying problem has haunted me ever since:

When a container was connected to more than one network, outgoing connections were not working correctly.

Consider a container connected to two bridge networks:

$ podman run --rm -it \
+      --network net1 \
+      --network net2 \
+      alpine /bin/ash
+

Inside the container, the two networks are connected correctly:

# ip a
+1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1000
+    inet 127.0.0.1/8 scope host lo
+       valid_lft forever preferred_lft forever
+2: eth1@if17: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue state UP qlen 1000
+    inet 10.89.0.7/24 brd 10.89.0.255 scope global eth1
+4: eth0@if18: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue state UP qlen 1000
+    inet 10.89.2.6/24 brd 10.89.2.255 scope global eth0
+

However, pinging a host on the internet only works using one of the two network interfaces:

# ping -I eth0 8.8.8.8
+PING 8.8.8.8 (8.8.8.8): 56 data bytes
+64 bytes from 8.8.8.8: seq=0 ttl=42 time=4.075 ms
+
# ping -I eth1 8.8.8.8
+PING 8.8.8.8 (8.8.8.8): 56 data bytes
+...
+2 packets transmitted, 0 packets received, 100% packet loss
+

The solution

The solution is quite simple: You will need to set net.ipv4.conf.all.rp_filter to 2.

On my Alpine system, rp_filter was set to 1 by default. The setting controls the source path validation within the kernel’s IPv4 network stack. 1 means “strict”, whereas 2 means “loose”.

You can try the solution temporarily by running:

# sysctl -w net.ipv4.conf.all.rp_filter=2
+

To survive the next reboot, persist the setting by adding it to /etc/sysctl.conf:

# echo "net.ipv4.conf.all.rp_filter=2" >> /etc/sysctl.conf
+

For more information, you can take a look at this article.

+ \ No newline at end of file diff --git a/posts/raspberry-pi-os-remove-packages/index.html b/posts/raspberry-pi-os-remove-packages/index.html new file mode 100644 index 0000000..eaf87d6 --- /dev/null +++ b/posts/raspberry-pi-os-remove-packages/index.html @@ -0,0 +1,18 @@ +Raspberry Pi OS: Remove unnecessary packages | Virtualzone Blog +

Raspberry Pi OS: Remove unnecessary packages

Recently, I wrote about the availability of the 64 bit beta version of Raspberry Pi OS (formerly known as Raspbian). Unfortunately, the new 64 bit beta is only available in the Desktop variant, containing lots of packages most lightweight server systems won’t need. There’s no lite variant of the 64 bit beta version available at the time of writing. However, you can easily remove the Desktop packages from a running installation with two easy commands.

You can download Raspberry Pi OS’ 64 bit beta version from the download directory on Raspberry Pi’s website. The Raspberry Pi Imager makes it easy to burn the image to an SD card or external USB drive.

Enter the following commands (at your own risk!) to remove the Desktop packages after your Pi has started from the newly written card:

sudo apt-get remove --purge \
+    x11-* \
+    gnome-* \
+    desktop-base \
+    *-theme \
+    dconf-gsettings-backend \
+    gsettings-desktop-schemas \
+    gtk- \
+    gtk2-* \
+    xdg-*
+sudo apt-get autoremove --purge
+
+ \ No newline at end of file diff --git a/posts/reduce-pdf-file-size-2/index.html b/posts/reduce-pdf-file-size-2/index.html new file mode 100644 index 0000000..feb0c75 --- /dev/null +++ b/posts/reduce-pdf-file-size-2/index.html @@ -0,0 +1,19 @@ +How to reduce PDF file size in Linux - Part 2 | Virtualzone Blog +

How to reduce PDF file size in Linux - Part 2

Several months ago, I wrote a blog post about reducing a PDF file’s size. Since then, I’ve used that technique many times. However, you may want to control the DPI (dots per inch) even more specific. Here’s how to do it:

gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dCompatibilityLevel=1.7 \
+-dDownsampleColorImages=true \
+-dDownsampleGrayImages=true \
+-dDownsampleMonoImages=true \
+-dColorImageResolution=120 \
+-dGrayImageResolution=120 \
+-dMonoImageResolution=120 \
+-sOutputFile=output.pdf input.pdf
+

Hint: This also works on MacOS. Just install GhostScript using Homebrew:

brew install ghostscript
+
+ \ No newline at end of file diff --git a/posts/reduce-pdf-file-size/index.html b/posts/reduce-pdf-file-size/index.html new file mode 100644 index 0000000..d6e0c00 --- /dev/null +++ b/posts/reduce-pdf-file-size/index.html @@ -0,0 +1,15 @@ +How to reduce PDF file size in Linux | Virtualzone Blog +

How to reduce PDF file size in Linux

Using a single line of GhostScript command on my Ubuntu’s terminal, I was able to reduce the size of a PDF file from 6 MB to approximately 1 MB:

gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dCompatibilityLevel=1.4 -dPDFSETTINGS=/screen -sOutputFile=output.pdf input.pdf
+

You can also use the following parameters for -dPDFSETTINGS instead of /screen:

  • /screen – Lowest quality, lowest size
  • /ebook – Moderate quality
  • /printer – Good quality
  • /prepress – Best quality, highest size

Update: Read Part 2 of this blog post for more detailled file size reduction settings.

Hint: This also works on MacOS. Just install GhostScript using Homebrew:

brew install ghostscript
+
+ \ No newline at end of file diff --git a/posts/traefik-access-log-influxdb-grafana-telegraf/index.html b/posts/traefik-access-log-influxdb-grafana-telegraf/index.html new file mode 100644 index 0000000..6c899bb --- /dev/null +++ b/posts/traefik-access-log-influxdb-grafana-telegraf/index.html @@ -0,0 +1,55 @@ +Analyze Traefik access log using InfluxDB and Grafana | Virtualzone Blog +

Analyze Traefik access log using InfluxDB and Grafana

Traefik is a Cloud Native Edge Router, often deployed in Docker and Kubernetes environments. With little effort, you can use Telegraf to transport Traefik’s access logs to an InfluxDB, where it can be analyzed using Grafana.

This setup contains the following elements:

  • Traefik v2 runs as a Docker container on a Linux host.
  • Traefik outputs access logs in JSON format to STDOUT.
  • Telegraf fetched the Traefik container’s JSON output using the docker_log input plugin.
  • To work with the JSON output in InfluxDB and Grafana, we need to convert them using Telegraf’s parser preprocessor plugin into distinct fields. Otherwise, only numeric fields are kept as metric values. String values are discarded by default.
  • We’re using Telegraf’s output plugin “influxdb” to write them to InfluxDB.

Configure Traefik

traefik.yml contains the following settings:

accessLog:
+  format: json
+  fields:
+    headers:
+      defaultMode: drop
+      names:
+          User-Agent: keep
+          Content-Type: keep
+

This makes Traefik output access logs in JSON format. JSON can easily be processed by machines – so we don’t have to deal with GROK patterns or such workarounds. Furthermore, request headers get dropped, but the “User-Agent” und “Content-Type” are kept.

Configure Telegraf

My telegraf.conf looks like this:

[[inputs.docker_log]]
+    endpoint = "unix:///var/run/docker.sock"
+    from_beginning = false
+    container_name_include = ["traefik_traefik_1"]
+
+
+[[processors.parser]]
+    namepass = ["docker_log"]
+    parse_fields = ["message"]
+    merge = "override"
+    data_format = "json"
+    json_string_fields = [
+        "ClientHost",
+        "RequestAddr",
+        "RequestCount",
+        "RequestHost",
+        "RequestMethod",
+        "RequestPath",
+        "RequestProtocol",
+        "RequestScheme",
+        "downstream_Content-Type",
+        "request_User-Agent",
+        "time"
+    ]
+    json_time_key = "time"
+    json_time_format = "2006-01-02T15:04:05Z"
+    json_timezone = "UTC"
+
+
+[[outputs.influxdb]]
+    urls = ["http://influxdb:8086"]
+    database = "telegraf"
+    username = "telegraf"
+    password = "..."
+

Important settings are:

  • container_name_include specifies from which container instance the logs are collected. It’s our Traefik instance.
  • parse_fields specifies which input field is to be processed. It’s the field “message”.
  • json_string_fields specifies which values from the read JSON object are to be written to InfluxDB as string fields. If not specified, all non-numeric fields are dropped.
  • json_time_key and the other json_time settings specify in which JSON keys and in which date-time format the timestamps for our log entries are contained.
  • The output plugin needs to be configured so that Telegraf can connect to the InfluxDB.

This is just meant to be an example. Please mind applicable law when storing, processing and using the access logs – such as GDPR in the European Union.

+ \ No newline at end of file diff --git a/posts/unifi-usg-multiple-ip-addresses-on-pppoe/index.html b/posts/unifi-usg-multiple-ip-addresses-on-pppoe/index.html new file mode 100644 index 0000000..2ec3f15 --- /dev/null +++ b/posts/unifi-usg-multiple-ip-addresses-on-pppoe/index.html @@ -0,0 +1,48 @@ +Unifi USG: Multiple IP addresses on PPPoE | Virtualzone Blog +

Unifi USG: Multiple IP addresses on PPPoE

My DSL provider TAL.de offers to assign a static and a dynamic IP address on PPPoE dial in. The dynamic IP address is the primary one, used for accessing the internet. Packets to the static IP address are routed to the router as well. Here’s how to set up things up on a Unifi Security Gateway (USG).

By default, USG only allows for one IP address when dialing in via PPPoE. If you want to forward packets received on an additional IP address, you can’t use the Port Forwarding functionality provided in the Unifi Network Controller. If you do, such packets will still be dropped.

Instead, you have to set up SNAT and DNAT firewall rules using a config.gateway.json file. Here’s how to set up SNAT and DNAT firewall rules for your USG to get your second (third, fourth …) IP address working:

1. Create (or extend) a config.gateway.json file

Place a file named config.gateway.json in the following path of your Unifi Network controller:

/unifi/data/sites/default/

You might need to replace “default” with the correct label of the affected site.

2. Add DNAT and SNAT rules to the config.gateway.json file

In the following example, TCP packets received on port 443 of IP address public.static.ip.address will be forwarded to port 443 of IP address private.internal.ip.address. Replace the values to match your demands.

{
+    "service": {
+        "nat": {
+            "rule": {
+                "3000": {
+                    "description": "DNAT public.static.ip.address TCP/443 to private.internal.ip.address",
+                    "destination": {
+                        "address": "public.static.ip.address",
+                        "port": "443"
+                    },
+                    "inbound-interface": "pppoe2",
+                    "inside-address": {
+                        "address": "private.internal.ip.address",
+                        "port": "443"
+                    },
+                    "log": "disable",
+                    "protocol": "tcp",
+                    "type": "destination"
+                },
+                "5000": {
+                    "description": "SNAT private.internal.ip.address TCP/443 to public.static.ip.address",
+                    "log": "disable",
+                    "outbound-interface": "ppoe2",
+                    "outside-address": {
+                        "address": "public.static.ip.address",
+                        "port": "443"
+                    },
+                    "protocol": "tcp",
+                    "source": {
+                        "address": "private.internal.ip.address",
+                        "port": "443"
+                    },
+                    "type": "source"
+                }
+            }
+        }
+    }
+}
+

3. Trigger a provision of your new config to your USG

Log in to your Unifi Network Controller. Navigate to “Devices” and choose your Unifi Security Gateway. Go to “Device”, select “Manage” and click “Trigger Provision”.

img

4. Test your configuration

From a system outside your network, try to reach the configured port by using nmap, curl or a web browser.

+ \ No newline at end of file diff --git a/posts/uptime-robot-website-monitoring/index.html b/posts/uptime-robot-website-monitoring/index.html new file mode 100644 index 0000000..2c4efd4 --- /dev/null +++ b/posts/uptime-robot-website-monitoring/index.html @@ -0,0 +1,7 @@ +UptimeRobot: A nice free website monitoring service | Virtualzone Blog +

UptimeRobot: A nice free website monitoring service

Over the weekend I’ve been looking around for a free service which monitors my websites. My requirement was that I want to be able to monitor both HTTP and HTTPS sites, I need support for authentication and the monitoring service should be able to check if a specific keyword exists within the watched site (instead of just assuming that a HTTP Status Code 200 is okay). Furthermore, I needed notifications in case of downtimes (Email and Pushbullet is fine for me).

I discovered UptimeRobot. The service fulfils all of my requirements and allows for checks every 5 minutes – for free. Not a bad offer. As far as I can tell, everything works fine and I’m quite happy with it.

+ \ No newline at end of file diff --git a/posts/usb-boot-raspberry-pi/index.html b/posts/usb-boot-raspberry-pi/index.html new file mode 100644 index 0000000..4aaa950 --- /dev/null +++ b/posts/usb-boot-raspberry-pi/index.html @@ -0,0 +1,19 @@ +Native USB boot for Raspberry Pi 4 | Virtualzone Blog +

Native USB boot for Raspberry Pi 4

Here’s something that’s probably been eagerly-awaited not only by me: Finally, Raspberry Pi 4 can boot directly from USB devices. Without any of the widespread workarounds which require an SD card a primrary boot medium. This is made possible by a new firmware, the so-called EEPROM. Furthermore, a new 64 bit beta version of Raspberry OS is available, too (formerly known as Raspbian).

To get started, boot your Raspberry Pi with a Raspbian or Raspberry OS installation. This is required to upgrade the new beta firmware.

Download Raspberry OS 64 bit

You can find the new 64 bit beta version of Raspberry OS in a forum post. Download the ZIP file. Install Raspberry Pi Imager. I’ve installed the imager using Homebrew:

brew cask install raspberry-pi-imager
+

Prepare an SD card with Raspberry OS

Note: This step is only required if your Raspberry Pi is now running Raspbian or Raspberry OS! We need Raspberry OS to flash the new firmware.

Open Raspberry Pi Imager and flash the downloaded image to an SD card.

Afterwards, boot your Pi from this new SD card.

Flash EEPROM

EEPROM (electrically erasable programmable read-only memory) is your Raspberry Pi’s firmware – sort of a basic system.

You can find the changelog for the Raspberry Pi EEPROM on GitHub. The beta versions as of May 15th 2020 contain the required functionalities to boot from a USB drive – i.e. an SSD.

Install the required update tool on your Pi:

sudo apt update
+sudo apt upgrade
+sudo apt install rpi-eeprom
+

To flash the beta firmware (at your own risk!), switch to the beta channel by modifying the following file:

sudo nano /etc/default/rpi-eeprom-update
+

Change the line FIRMWARE_RELEASE_STATUS=”critical” to:

FIRMWARE_RELEASE_STATUS="beta"
+

Upgrade the firmware and reboot:

sudo rpi-eeprom-update -a
+

After the reboot, the following command should state that the new beta firmware has been installed:

sudo rpi-eeprom-update
+

Alternatively, you can flash the new EEPROM version by downloading it from the GitHub repository and run the following command:

sudo rpi-eeprom-update -d -f /tmp/pieeprom-2020-05-27.bin
+

Prepare an SSD for USB boot

To make your Raspberry Pi boot from an USB drive (such as an SSD, an external hard drive or an USB thumb drive), use the Raspberry Pi Imager to write Raspberry Pi OS to your USB drive.

Finally, connect the USB drive to your Raspberry Pi 4, remove the SD card, and connect the power cord. Watch your Pi boot from USB - without any SD Card workaround.

+ \ No newline at end of file diff --git a/privacy-policy/index.html b/privacy-policy/index.html new file mode 100644 index 0000000..e6660de --- /dev/null +++ b/privacy-policy/index.html @@ -0,0 +1,12 @@ +Privacy Policy | Virtualzone Blog +

Privacy Policy

We created this privacy policy in order to inform you about the information we collect, how we use your data and which choices you as a visitor of this website have.

Unfortunately, it’s in the nature of things that this policy sounds quite technically. We tried to keep things as simple and clear as possible.

Personal data stored

The personal information you provide us (such as your name, email address, address or other personal information required in some form) are processed by us together with a timestamp and your IP address only for the stated purpose, stored securely and are not passed on to third parties.

Thus, we only use your personal information only for the communication with visitors who express this and for providing the offered services and products. We will not pass on your personal data without your consent. This should however not preclude that national authorities can gain access to this data in case of unlawful conduct.

If you send us personal data by email, we cannot guarantee its secure transmission. We strongly recommend not to send personal data via email without encryption.

The legislative basis according to article 6 (1) of the DSGVO (lawfulness of processing of personal data) consists of your consent to processing your provided information. You can revoke your consent at any time. An informal email is all it needs. You’ll find out contact information in this website’s imprint.

Which personal data we store

You can use this website without providing any personal information. If you optionally choose to use functionalities that require the input of personal information, we will only use these for the purpose stated.

Where we store your data

Our servers are located in Germany.

Your rights according to General Data Protection Regulation (GDPR)

According to the regulations of the General Data Protection Regulation (GDPR) you have the following rights:

  • Right to have your data corrected (article 16 DSGVO)
  • Right to have your data deleted (article 17 DSGVO)
  • Right to limit the processing of your data (article 18 DSGVO)
  • Right to be notified – Duty regarding the correction, deletion or limitation of your data and its processing (article 19 DSGVO)
  • Right to data portability (article 20 DSGVO)
  • Right to refuse (article 21 DSGVO)
  • Right to be not subject to sole automatic decision making, including profiling (article 22 DSGVO)

If you think the processing of your data violates the terms of the General Data Protection Regulation (GDPR) or your claims for data protection are violated in any way, you can contact the Federal Commissioner for Data Protection and Freedom of Information in Germany.

Where we send your data

We will not share your data with third parties.

TLS encryption using HTTPS

In both our website and our app, we use HTTPS to transport data securely. (data protection by technical means article 25 (1) DSGVO). By using TLS (Transport Layer Security), an encryption protocol to securely transport data on the internet, we can protect sensitive data. Most browsers show a lock symbol in your browser when HTTPS is active.

Cloudflare

We use the “Cloudflare” service provided by Cloudflare Inc., 101 Townsend St., San Francisco, CA 94107, USA. (hereinafter referred to as “Cloudflare”).

Cloudflare offers a content delivery network with DNS that is available worldwide. As a result, the information transfer that occurs between your browser and our website is technically routed via Cloudflare’s network. This enables Cloudflare to analyze data transactions between your browser and our website and to work as a filter between our servers and potentially malicious data traffic from the Internet. In this context, Cloudflare may also use cookies or other technologies deployed to recognize Internet users, which shall, however, only be used for the herein described purpose.

The use of Cloudflare is based on our legitimate interest in a provision of our website offerings that is as error free and secure as possible (Art. 6(1)(f) GDPR).

Data transmission to the US is based on the Standard Contractual Clauses (SCC) of the European Commission. Details can be found here: https://www.cloudflare.com/privacypolicy/

For more information on Cloudflare’s security precautions and data privacy policies, please follow this link: https://www.cloudflare.com/privacypolicy/

Web Analytics

For statistical purposes, this website uses Matomo, an open source web analysis tool. Matomo does not transfer any data to servers outside our control. All data is processed and stored anonymised. Matomo is provided by InnoCraft Ltd, 7 Waterloo Quay PO625, 6140 Wellington, New Zealand. You can find out more about the data being processed by Matomo in its privacy policy at https://matomo.org/privacy-policy/. If you have any questions regarding the protection of your web analytics data, please contact privacy@matomo.org.

Quelle: Erstellt mit dem Datenschutz-Generator von AdSimple

+ \ No newline at end of file diff --git a/robots.txt b/robots.txt new file mode 100644 index 0000000..c66c943 --- /dev/null +++ b/robots.txt @@ -0,0 +1,3 @@ +User-agent: * +Disallow: +Sitemap: https://virtualzone.de/sitemap.xml diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 0000000..e2e45a0 --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,158 @@ + + + + https://virtualzone.de/tags/docker/ + 2023-02-05T06:00:00+00:00 + + https://virtualzone.de/posts/dns-proxy-forwarder-blackhole/ + 2023-02-05T06:00:00+00:00 + + https://virtualzone.de/tags/linux/ + 2023-02-05T06:00:00+00:00 + + https://virtualzone.de/posts/ + 2023-02-05T06:00:00+00:00 + + https://virtualzone.de/tags/ + 2023-02-05T06:00:00+00:00 + + https://virtualzone.de/ + 2023-02-05T06:00:00+00:00 + + https://virtualzone.de/posts/openrc-podman-kube-play/ + 2022-10-26T15:00:00+00:00 + + https://virtualzone.de/posts/podman-multiple-networks/ + 2022-10-16T17:00:00+00:00 + + https://virtualzone.de/posts/alpine-podman/ + 2022-06-25T18:00:00+00:00 + + https://virtualzone.de/posts/alpine-docker-rootless/ + 2022-06-19T15:00:00+00:00 + + https://virtualzone.de/tags/kubernetes/ + 2021-09-03T11:30:03+00:00 + + https://virtualzone.de/posts/k3s-glusterfs/ + 2021-09-03T11:30:03+00:00 + + https://virtualzone.de/posts/onedrive-upload-backup/ + 2021-09-02T11:30:03+00:00 + + https://virtualzone.de/tags/github/ + 2021-09-02T11:30:03+00:00 + + https://virtualzone.de/tags/onedrive/ + 2021-09-02T11:30:03+00:00 + + https://virtualzone.de/tags/tool/ + 2021-09-02T11:30:03+00:00 + + https://virtualzone.de/posts/unifi-usg-multiple-ip-addresses-on-pppoe/ + 2021-08-16T11:30:03+00:00 + + https://virtualzone.de/posts/raspberry-pi-os-remove-packages/ + 2020-06-07T11:30:03+00:00 + + https://virtualzone.de/tags/raspberrypi/ + 2020-06-07T11:30:03+00:00 + + https://virtualzone.de/posts/traefik-access-log-influxdb-grafana-telegraf/ + 2020-06-03T11:30:03+00:00 + + https://virtualzone.de/tags/api/ + 2020-06-01T11:30:03+00:00 + + https://virtualzone.de/tags/endonomdo/ + 2020-06-01T11:30:03+00:00 + + https://virtualzone.de/posts/endomono-export-gpx/ + 2020-06-01T11:30:03+00:00 + + https://virtualzone.de/posts/usb-boot-raspberry-pi/ + 2020-05-28T11:30:03+00:00 + + https://virtualzone.de/posts/multi-arch-docker-images-2/ + 2020-05-16T11:30:03+00:00 + + https://virtualzone.de/posts/multi-arch-docker-images-1/ + 2020-05-15T11:30:03+00:00 + + https://virtualzone.de/posts/jenkins-build-docker-images/ + 2017-06-11T11:30:03+00:00 + + https://virtualzone.de/tags/letsencrypt/ + 2017-02-11T11:30:03+00:00 + + https://virtualzone.de/tags/nginx/ + 2017-02-11T11:30:03+00:00 + + https://virtualzone.de/posts/lets-encrypt-effs-certbot-with-nginx-in-docker/ + 2017-02-11T11:30:03+00:00 + + https://virtualzone.de/posts/encrypted-file-container-macos/ + 2016-12-06T11:30:03+00:00 + + https://virtualzone.de/tags/macos/ + 2016-12-06T11:30:03+00:00 + + https://virtualzone.de/posts/uptime-robot-website-monitoring/ + 2016-09-05T11:30:03+00:00 + + https://virtualzone.de/tags/fhem/ + 2016-08-28T11:30:03+00:00 + + https://virtualzone.de/posts/fix-docker-not-using-etc-hosts-on-macos/ + 2016-08-28T11:30:03+00:00 + + https://virtualzone.de/posts/from-fhem-to-openhab-with-homegear-installation-docker-container/ + 2016-08-28T11:30:03+00:00 + + https://virtualzone.de/tags/homeautomation/ + 2016-08-28T11:30:03+00:00 + + https://virtualzone.de/tags/openhab/ + 2016-08-28T11:30:03+00:00 + + https://virtualzone.de/posts/https-ssl-in-wordpress-behind-proxy/ + 2016-08-27T11:30:03+00:00 + + https://virtualzone.de/tags/proxy/ + 2016-08-27T11:30:03+00:00 + + https://virtualzone.de/tags/wordpress/ + 2016-08-27T11:30:03+00:00 + + https://virtualzone.de/posts/reduce-pdf-file-size-2/ + 2015-08-15T11:30:03+00:00 + + https://virtualzone.de/tags/firewall/ + 2014-11-20T11:30:03+00:00 + + https://virtualzone.de/posts/ipv6-on-a-sonicwall/ + 2014-11-20T11:30:03+00:00 + + https://virtualzone.de/tags/ipv6/ + 2014-11-20T11:30:03+00:00 + + https://virtualzone.de/tags/sonicwall/ + 2014-11-20T11:30:03+00:00 + + https://virtualzone.de/posts/reduce-pdf-file-size/ + 2012-11-21T11:30:03+00:00 + + https://virtualzone.de/posts/determining-a-locations-federal-state-using-google-maps-api/ + 2012-08-10T11:30:03+00:00 + + https://virtualzone.de/tags/google/ + 2012-08-10T11:30:03+00:00 + + https://virtualzone.de/categories/ + + https://virtualzone.de/contact/ + + https://virtualzone.de/privacy-policy/ + + diff --git a/tags/api/index.html b/tags/api/index.html new file mode 100644 index 0000000..66728d1 --- /dev/null +++ b/tags/api/index.html @@ -0,0 +1,8 @@ +Api | Virtualzone Blog +

Export trainings from Endomondo as GPX files

I’ve been using Endomondo for years to track my trainings. However, I’ve been experiencing a lot of issues with Endomondo over the last months: Sometimes it’s not possible to log in. Other times, my trainings won’t get synced. So it’s time a new app. I’ve decided to give Strava a try. With a few lines of code, I’ve managed to export all my training data as GPX files. These can be imported to Strava, so my training history won’t get lost....

June 1, 2020 · 2 min · 341 words · Heiner

Determining a location’s federal state using Google Maps API

If you have to find out which federal state a city belongs to, you can use the Google Maps API v3. Here is a straightforward JavaScript code snippet: +function log(s) { $('#sysout').append(document.createTextNode(s + 'n')); } function getResult(results) { for (var i=0; i -1) { return result['address_components'][j]['short_name']; } } return ''; } function getCountry(result) { return extractFirst(result, 'country'); } function getFederalState(result) { return extractFirst(result, 'administrative_area_level_1'); } function searchLocation() { $('#sysout').empty(); var location = $('#location')....

August 10, 2012 · 1 min · 162 words · Heiner
+ \ No newline at end of file diff --git a/tags/api/index.xml b/tags/api/index.xml new file mode 100644 index 0000000..1c6b934 --- /dev/null +++ b/tags/api/index.xml @@ -0,0 +1,28 @@ + + + + Api on Virtualzone Blog + https://virtualzone.de/tags/api/ + Recent content in Api on Virtualzone Blog + Hugo -- gohugo.io + en-us + &copy; 2024 Heiner Beck. + Mon, 01 Jun 2020 11:30:03 +0000 + + + Export trainings from Endomondo as GPX files + https://virtualzone.de/posts/endomono-export-gpx/ + Mon, 01 Jun 2020 11:30:03 +0000 + https://virtualzone.de/posts/endomono-export-gpx/ + I&rsquo;ve been using Endomondo for years to track my trainings. However, I&rsquo;ve been experiencing a lot of issues with Endomondo over the last months: Sometimes it&rsquo;s not possible to log in. Other times, my trainings won&rsquo;t get synced. So it&rsquo;s time a new app. I&rsquo;ve decided to give Strava a try. With a few lines of code, I&rsquo;ve managed to export all my training data as GPX files. These can be imported to Strava, so my training history won&rsquo;t get lost. + + + Determining a location’s federal state using Google Maps API + https://virtualzone.de/posts/determining-a-locations-federal-state-using-google-maps-api/ + Fri, 10 Aug 2012 11:30:03 +0000 + https://virtualzone.de/posts/determining-a-locations-federal-state-using-google-maps-api/ + If you have to find out which federal state a city belongs to, you can use the Google Maps API v3. Here is a straightforward JavaScript code snippet: +function log(s) { $(&#39;#sysout&#39;).append(document.createTextNode(s + &#39;n&#39;)); } function getResult(results) { for (var i=0; i -1) { return result[&#39;address_components&#39;][j][&#39;short_name&#39;]; } } return &#39;&#39;; } function getCountry(result) { return extractFirst(result, &#39;country&#39;); } function getFederalState(result) { return extractFirst(result, &#39;administrative_area_level_1&#39;); } function searchLocation() { $(&#39;#sysout&#39;).empty(); var location = $(&#39;#location&#39;). + + + diff --git a/tags/api/page/1/index.html b/tags/api/page/1/index.html new file mode 100644 index 0000000..dac95f6 --- /dev/null +++ b/tags/api/page/1/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/tags/api/ + \ No newline at end of file diff --git a/tags/docker/index.html b/tags/docker/index.html new file mode 100644 index 0000000..7ab9fdb --- /dev/null +++ b/tags/docker/index.html @@ -0,0 +1,20 @@ +Docker | Virtualzone Blog +

Go-hole: A minimalistic DNS proxy and and blocker

You’ll probably know Pi-hole. It’s a popular “DNS sinkhole” – a DNS proxy server which blocks certain requests, such a as those for well-known ad serving domains. The effect is a much less ad-cluttered web experience in your home network. +I’ve been using Pi-hole for several years as a Docker container on a Raspberry Pi. The Raspi is serving as a small home server on my home network. +However, as much as I like Pi-hole, I felt it got loaded with new features over the years and performed slower over the time....

February 5, 2023 · 4 min · 703 words · Heiner

OpenRC Script for 'podman kube play'

In June, I’ve written about my approach to starting and stopping Podman Pods using OpenRC scripts on Alpine Linux. However, that approach had two major drawbacks: First, the pods were started in the foreground, causing OpenRC to wait for all pod initialization tasks to complete. If an image needed to be pulled first, this could lead to longer delays, significantly increasing system startup times. Secondly, requesting the status of a previously started pod always stated “crashed”....

October 26, 2022 · 3 min · 483 words · Heiner

Connecting multiple networks to a Podman container

I’m running my containers with Podman in Rootless Mode on Alpine for about four months now. However, an annoying problem has haunted me ever since: +When a container was connected to more than one network, outgoing connections were not working correctly. +Consider a container connected to two bridge networks: +$ podman run --rm -it \ --network net1 \ --network net2 \ alpine /bin/ash Inside the container, the two networks are connected correctly:...

October 16, 2022 · 2 min · 274 words · Heiner

Setting up Alpine Linux with Podman

Recently, I’ve written a blog post on how to set up Rootless Docker on Alpine Linux. Today I’m showing you how to set up Podman. Podman has a rootless architecture built in. It’s an alternative to Docker, providing an almost identical command line interface. Thus, if you’re used to Docker CLI, you won’t have any issues working with Podman. +Podman was initially developed by RedHat and is available as an open source project....

June 25, 2022 · 4 min · 852 words · Heiner

Setting up Alpine Linux with Rootless Docker

As of Docker Engine v20.10, it’s possible to run the Docker daemon as a non-root user (Rooless mode). This is especially valuable in view of security aspects. Rootless mode mitigates potential vulnerabilities in the Docker daemon. +However, at the time of writing, setting up Docker in rootless mode is not straightforward if you’re using Alpine Linux as your host system. This is why I summarized the steps to get Docket Rootless up and running on Alpine Linux....

June 19, 2022 · 3 min · 479 words · Heiner

Analyze Traefik access log using InfluxDB and Grafana

Traefik is a Cloud Native Edge Router, often deployed in Docker and Kubernetes environments. With little effort, you can use Telegraf to transport Traefik’s access logs to an InfluxDB, where it can be analyzed using Grafana. +This setup contains the following elements: +Traefik v2 runs as a Docker container on a Linux host. Traefik outputs access logs in JSON format to STDOUT. Telegraf fetched the Traefik container’s JSON output using the docker_log input plugin....

June 3, 2020 · 2 min · 373 words · Heiner

Build Multi-Arch images on Docker Hub (Part 2)

Im ersten Teil dieses Artikels habe ich Euch gezeigt, wie Ihr ein Multi-Arch-Docker-Projekt anlegt, das auf einer AMD64-Plattform auch für andere Zielarchitekturen wie bspw. ARM bauen kann. In diesem Teil zeige ich Euch, wie Ihr das Ganze im offiziellen Docker Hub zum Laufen bekommt. +Zunächst solltet Ihr ein Projekt im Docker Hub anlegen und dieses mit Eurem Quellcode-Repository verknüpfen. In meinem Fall nutze ich GitHub als Sourcecode-Repository und nutze die Build-Infrastruktur von Docker Hub....

May 16, 2020 · 3 min · 443 words · Heiner

Build Multi-Arch images on Docker Hub (Part 1)

Multi-Arch Docker Images sind eine tolle Sache: Benutzer Eurer Images ziehen automatisch die für Ihre Architektur passende Version Eures Image – ob AMD64, ARM64 oder ARM32. Normalerweise muss man Docker Images auf der Architektur bauen, auf der sie später auch verwendet werden. Durch die Verwendung des Emulators QEMU ist es jedoch möglich, auf einer AMD64-Architektur für alle anderen Zielplattformen mitzubauen. Kombiniert mit der Auto-Build-Funktion des Docker Hub ist das eine prima Arbeitserleichterung....

May 15, 2020 · 3 min · 502 words · Heiner

How to let Jenkins build Docker images

If you’re using Jenkins as your Continuous Integration (CI) tool and Docker to build self-contained images of your application, you may ask yourself how to automatically build Docker images during Jenkins’ build job. Here’s how I did it – with Jenkins running in a Docker container itself. +So far, I’ve used the official Jenkins Docker image (the one based on Alpine). I’ve tried some of the Docker plugins for Jenkins available out there....

June 11, 2017 · 2 min · 370 words · Heiner

Using Let’s Encrypt / EFF’s CertBot with NGINX in Docker

I’m using NGINX in a Docker Container as a front-end HTTP(s) Webserver, performing SSL termination and proxying incoming requests to various other Docker Containers and VMs. Now that I’ve switched my certificates to Let’s Encrypt, I wondered how to integrate EFF’s CertBot (which is recommended by Let’s Encrypt) with my setup. Here’s how I did it. +First, I’ve added two new volumes to my web-front-end’s Docker Compose File: +version: '2' services: webfrontend: container_name: webfrontend [....

February 11, 2017 · 2 min · 287 words · Heiner
+ \ No newline at end of file diff --git a/tags/docker/index.xml b/tags/docker/index.xml new file mode 100644 index 0000000..e267931 --- /dev/null +++ b/tags/docker/index.xml @@ -0,0 +1,112 @@ + + + + Docker on Virtualzone Blog + https://virtualzone.de/tags/docker/ + Recent content in Docker on Virtualzone Blog + Hugo -- gohugo.io + en-us + &copy; 2024 Heiner Beck. + Sun, 05 Feb 2023 06:00:00 +0000 + + + Go-hole: A minimalistic DNS proxy and and blocker + https://virtualzone.de/posts/dns-proxy-forwarder-blackhole/ + Sun, 05 Feb 2023 06:00:00 +0000 + https://virtualzone.de/posts/dns-proxy-forwarder-blackhole/ + You&rsquo;ll probably know Pi-hole. It&rsquo;s a popular &ldquo;DNS sinkhole&rdquo; – a DNS proxy server which blocks certain requests, such a as those for well-known ad serving domains. The effect is a much less ad-cluttered web experience in your home network. +I&rsquo;ve been using Pi-hole for several years as a Docker container on a Raspberry Pi. The Raspi is serving as a small home server on my home network. +However, as much as I like Pi-hole, I felt it got loaded with new features over the years and performed slower over the time. + + + OpenRC Script for 'podman kube play' + https://virtualzone.de/posts/openrc-podman-kube-play/ + Wed, 26 Oct 2022 15:00:00 +0000 + https://virtualzone.de/posts/openrc-podman-kube-play/ + In June, I&rsquo;ve written about my approach to starting and stopping Podman Pods using OpenRC scripts on Alpine Linux. However, that approach had two major drawbacks: First, the pods were started in the foreground, causing OpenRC to wait for all pod initialization tasks to complete. If an image needed to be pulled first, this could lead to longer delays, significantly increasing system startup times. Secondly, requesting the status of a previously started pod always stated &ldquo;crashed&rdquo;. + + + Connecting multiple networks to a Podman container + https://virtualzone.de/posts/podman-multiple-networks/ + Sun, 16 Oct 2022 17:00:00 +0000 + https://virtualzone.de/posts/podman-multiple-networks/ + I&rsquo;m running my containers with Podman in Rootless Mode on Alpine for about four months now. However, an annoying problem has haunted me ever since: +When a container was connected to more than one network, outgoing connections were not working correctly. +Consider a container connected to two bridge networks: +$ podman run --rm -it \ --network net1 \ --network net2 \ alpine /bin/ash Inside the container, the two networks are connected correctly: + + + Setting up Alpine Linux with Podman + https://virtualzone.de/posts/alpine-podman/ + Sat, 25 Jun 2022 18:00:00 +0000 + https://virtualzone.de/posts/alpine-podman/ + Recently, I&rsquo;ve written a blog post on how to set up Rootless Docker on Alpine Linux. Today I&rsquo;m showing you how to set up Podman. Podman has a rootless architecture built in. It&rsquo;s an alternative to Docker, providing an almost identical command line interface. Thus, if you&rsquo;re used to Docker CLI, you won&rsquo;t have any issues working with Podman. +Podman was initially developed by RedHat and is available as an open source project. + + + Setting up Alpine Linux with Rootless Docker + https://virtualzone.de/posts/alpine-docker-rootless/ + Sun, 19 Jun 2022 15:00:00 +0000 + https://virtualzone.de/posts/alpine-docker-rootless/ + As of Docker Engine v20.10, it&rsquo;s possible to run the Docker daemon as a non-root user (Rooless mode). This is especially valuable in view of security aspects. Rootless mode mitigates potential vulnerabilities in the Docker daemon. +However, at the time of writing, setting up Docker in rootless mode is not straightforward if you&rsquo;re using Alpine Linux as your host system. This is why I summarized the steps to get Docket Rootless up and running on Alpine Linux. + + + Analyze Traefik access log using InfluxDB and Grafana + https://virtualzone.de/posts/traefik-access-log-influxdb-grafana-telegraf/ + Wed, 03 Jun 2020 11:30:03 +0000 + https://virtualzone.de/posts/traefik-access-log-influxdb-grafana-telegraf/ + Traefik is a Cloud Native Edge Router, often deployed in Docker and Kubernetes environments. With little effort, you can use Telegraf to transport Traefik&rsquo;s access logs to an InfluxDB, where it can be analyzed using Grafana. +This setup contains the following elements: +Traefik v2 runs as a Docker container on a Linux host. Traefik outputs access logs in JSON format to STDOUT. Telegraf fetched the Traefik container&rsquo;s JSON output using the docker_log input plugin. + + + Build Multi-Arch images on Docker Hub (Part 2) + https://virtualzone.de/posts/multi-arch-docker-images-2/ + Sat, 16 May 2020 11:30:03 +0000 + https://virtualzone.de/posts/multi-arch-docker-images-2/ + Im ersten Teil dieses Artikels habe ich Euch gezeigt, wie Ihr ein Multi-Arch-Docker-Projekt anlegt, das auf einer AMD64-Plattform auch für andere Zielarchitekturen wie bspw. ARM bauen kann. In diesem Teil zeige ich Euch, wie Ihr das Ganze im offiziellen Docker Hub zum Laufen bekommt. +Zunächst solltet Ihr ein Projekt im Docker Hub anlegen und dieses mit Eurem Quellcode-Repository verknüpfen. In meinem Fall nutze ich GitHub als Sourcecode-Repository und nutze die Build-Infrastruktur von Docker Hub. + + + Build Multi-Arch images on Docker Hub (Part 1) + https://virtualzone.de/posts/multi-arch-docker-images-1/ + Fri, 15 May 2020 11:30:03 +0000 + https://virtualzone.de/posts/multi-arch-docker-images-1/ + Multi-Arch Docker Images sind eine tolle Sache: Benutzer Eurer Images ziehen automatisch die für Ihre Architektur passende Version Eures Image – ob AMD64, ARM64 oder ARM32. Normalerweise muss man Docker Images auf der Architektur bauen, auf der sie später auch verwendet werden. Durch die Verwendung des Emulators QEMU ist es jedoch möglich, auf einer AMD64-Architektur für alle anderen Zielplattformen mitzubauen. Kombiniert mit der Auto-Build-Funktion des Docker Hub ist das eine prima Arbeitserleichterung. + + + How to let Jenkins build Docker images + https://virtualzone.de/posts/jenkins-build-docker-images/ + Sun, 11 Jun 2017 11:30:03 +0000 + https://virtualzone.de/posts/jenkins-build-docker-images/ + If you’re using Jenkins as your Continuous Integration (CI) tool and Docker to build self-contained images of your application, you may ask yourself how to automatically build Docker images during Jenkins’ build job. Here’s how I did it – with Jenkins running in a Docker container itself. +So far, I’ve used the official Jenkins Docker image (the one based on Alpine). I’ve tried some of the Docker plugins for Jenkins available out there. + + + Using Let’s Encrypt / EFF’s CertBot with NGINX in Docker + https://virtualzone.de/posts/lets-encrypt-effs-certbot-with-nginx-in-docker/ + Sat, 11 Feb 2017 11:30:03 +0000 + https://virtualzone.de/posts/lets-encrypt-effs-certbot-with-nginx-in-docker/ + I’m using NGINX in a Docker Container as a front-end HTTP(s) Webserver, performing SSL termination and proxying incoming requests to various other Docker Containers and VMs. Now that I’ve switched my certificates to Let’s Encrypt, I wondered how to integrate EFF’s CertBot (which is recommended by Let’s Encrypt) with my setup. Here’s how I did it. +First, I’ve added two new volumes to my web-front-end’s Docker Compose File: +version: &#39;2&#39; services: webfrontend: container_name: webfrontend [. + + + Fix Docker not using /etc/hosts on MacOS + https://virtualzone.de/posts/fix-docker-not-using-etc-hosts-on-macos/ + Sun, 28 Aug 2016 11:30:03 +0000 + https://virtualzone.de/posts/fix-docker-not-using-etc-hosts-on-macos/ + On my MacBook with Mac OS X 10.11 (El Capitan) and Docker 1.12.0, Docker did not read manually set DNS entries from the /etc/hosts file. +When I executed “docker push” for example, this resulted in “no such hosts” errors: +Put http://shuttle:5000/v1/repositories/webfrontend/: dial tcp: lookup shuttle on 192.168.65.1:53: no such host On Mac OS, Docker is running in a host container itself. Thus, you’ll have to add DNS entries to the container’s /etc/hosts file. + + + From FHEM to OpenHAB with Homegear: Installation/Docker container + https://virtualzone.de/posts/from-fhem-to-openhab-with-homegear-installation-docker-container/ + Sun, 28 Aug 2016 11:30:03 +0000 + https://virtualzone.de/posts/from-fhem-to-openhab-with-homegear-installation-docker-container/ + For more than 2.5 years, I’ve now been running FHEM with several HomeMatic sensors and actors. Using the HM-CFG-LAN Configuration Tool as an I/O interface between FHEM and the HomeMatic devices, this setup has been running smoothly most of the time. The configuration was a bit tricky now and then, but it worked. However, OpenHAB seems to become a really good choice. Version 2 is currently available as Beta 3. It features a modern web interface and an easy-to-use extension manager. + + + diff --git a/tags/docker/page/1/index.html b/tags/docker/page/1/index.html new file mode 100644 index 0000000..57b1622 --- /dev/null +++ b/tags/docker/page/1/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/tags/docker/ + \ No newline at end of file diff --git a/tags/docker/page/2/index.html b/tags/docker/page/2/index.html new file mode 100644 index 0000000..94e3331 --- /dev/null +++ b/tags/docker/page/2/index.html @@ -0,0 +1,9 @@ +Docker | Virtualzone Blog +

Fix Docker not using /etc/hosts on MacOS

On my MacBook with Mac OS X 10.11 (El Capitan) and Docker 1.12.0, Docker did not read manually set DNS entries from the /etc/hosts file. +When I executed “docker push” for example, this resulted in “no such hosts” errors: +Put http://shuttle:5000/v1/repositories/webfrontend/: dial tcp: lookup shuttle on 192.168.65.1:53: no such host On Mac OS, Docker is running in a host container itself. Thus, you’ll have to add DNS entries to the container’s /etc/hosts file....

August 28, 2016 · 1 min · 163 words · Heiner

From FHEM to OpenHAB with Homegear: Installation/Docker container

For more than 2.5 years, I’ve now been running FHEM with several HomeMatic sensors and actors. Using the HM-CFG-LAN Configuration Tool as an I/O interface between FHEM and the HomeMatic devices, this setup has been running smoothly most of the time. The configuration was a bit tricky now and then, but it worked. However, OpenHAB seems to become a really good choice. Version 2 is currently available as Beta 3. It features a modern web interface and an easy-to-use extension manager....

August 28, 2016 · 6 min · 1084 words · Heiner
+ \ No newline at end of file diff --git a/tags/endonomdo/index.html b/tags/endonomdo/index.html new file mode 100644 index 0000000..15767c6 --- /dev/null +++ b/tags/endonomdo/index.html @@ -0,0 +1,7 @@ +Endonomdo | Virtualzone Blog +

Export trainings from Endomondo as GPX files

I’ve been using Endomondo for years to track my trainings. However, I’ve been experiencing a lot of issues with Endomondo over the last months: Sometimes it’s not possible to log in. Other times, my trainings won’t get synced. So it’s time a new app. I’ve decided to give Strava a try. With a few lines of code, I’ve managed to export all my training data as GPX files. These can be imported to Strava, so my training history won’t get lost....

June 1, 2020 · 2 min · 341 words · Heiner
+ \ No newline at end of file diff --git a/tags/endonomdo/index.xml b/tags/endonomdo/index.xml new file mode 100644 index 0000000..544f509 --- /dev/null +++ b/tags/endonomdo/index.xml @@ -0,0 +1,20 @@ + + + + Endonomdo on Virtualzone Blog + https://virtualzone.de/tags/endonomdo/ + Recent content in Endonomdo on Virtualzone Blog + Hugo -- gohugo.io + en-us + &copy; 2024 Heiner Beck. + Mon, 01 Jun 2020 11:30:03 +0000 + + + Export trainings from Endomondo as GPX files + https://virtualzone.de/posts/endomono-export-gpx/ + Mon, 01 Jun 2020 11:30:03 +0000 + https://virtualzone.de/posts/endomono-export-gpx/ + I&rsquo;ve been using Endomondo for years to track my trainings. However, I&rsquo;ve been experiencing a lot of issues with Endomondo over the last months: Sometimes it&rsquo;s not possible to log in. Other times, my trainings won&rsquo;t get synced. So it&rsquo;s time a new app. I&rsquo;ve decided to give Strava a try. With a few lines of code, I&rsquo;ve managed to export all my training data as GPX files. These can be imported to Strava, so my training history won&rsquo;t get lost. + + + diff --git a/tags/endonomdo/page/1/index.html b/tags/endonomdo/page/1/index.html new file mode 100644 index 0000000..71e06f6 --- /dev/null +++ b/tags/endonomdo/page/1/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/tags/endonomdo/ + \ No newline at end of file diff --git a/tags/fhem/index.html b/tags/fhem/index.html new file mode 100644 index 0000000..feff389 --- /dev/null +++ b/tags/fhem/index.html @@ -0,0 +1,7 @@ +Fhem | Virtualzone Blog +

From FHEM to OpenHAB with Homegear: Installation/Docker container

For more than 2.5 years, I’ve now been running FHEM with several HomeMatic sensors and actors. Using the HM-CFG-LAN Configuration Tool as an I/O interface between FHEM and the HomeMatic devices, this setup has been running smoothly most of the time. The configuration was a bit tricky now and then, but it worked. However, OpenHAB seems to become a really good choice. Version 2 is currently available as Beta 3. It features a modern web interface and an easy-to-use extension manager....

August 28, 2016 · 6 min · 1084 words · Heiner
+ \ No newline at end of file diff --git a/tags/fhem/index.xml b/tags/fhem/index.xml new file mode 100644 index 0000000..eac0111 --- /dev/null +++ b/tags/fhem/index.xml @@ -0,0 +1,20 @@ + + + + Fhem on Virtualzone Blog + https://virtualzone.de/tags/fhem/ + Recent content in Fhem on Virtualzone Blog + Hugo -- gohugo.io + en-us + &copy; 2024 Heiner Beck. + Sun, 28 Aug 2016 11:30:03 +0000 + + + From FHEM to OpenHAB with Homegear: Installation/Docker container + https://virtualzone.de/posts/from-fhem-to-openhab-with-homegear-installation-docker-container/ + Sun, 28 Aug 2016 11:30:03 +0000 + https://virtualzone.de/posts/from-fhem-to-openhab-with-homegear-installation-docker-container/ + For more than 2.5 years, I’ve now been running FHEM with several HomeMatic sensors and actors. Using the HM-CFG-LAN Configuration Tool as an I/O interface between FHEM and the HomeMatic devices, this setup has been running smoothly most of the time. The configuration was a bit tricky now and then, but it worked. However, OpenHAB seems to become a really good choice. Version 2 is currently available as Beta 3. It features a modern web interface and an easy-to-use extension manager. + + + diff --git a/tags/fhem/page/1/index.html b/tags/fhem/page/1/index.html new file mode 100644 index 0000000..b1bf4ce --- /dev/null +++ b/tags/fhem/page/1/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/tags/fhem/ + \ No newline at end of file diff --git a/tags/firewall/index.html b/tags/firewall/index.html new file mode 100644 index 0000000..4d0dea6 --- /dev/null +++ b/tags/firewall/index.html @@ -0,0 +1,7 @@ +Firewall | Virtualzone Blog +

How to enable IPv6 on a SonicWall (SonicOS 5.9) using NAT

IPv6 aimed to make Network Address Translation (NAT) obselete as there are so many addresses available that every single device can have its own worldwide unique IPv6 address. However, even with IPv6, using NAT is a very simple way to get your devices behind a Dell SonicWall connected to IPv6 services on the internet. In contrast to going without NAT, all the devices behind your SonicWall will emerge under the SonicWall’s IPv6 address....

November 20, 2014 · 2 min · 372 words · Heiner
+ \ No newline at end of file diff --git a/tags/firewall/index.xml b/tags/firewall/index.xml new file mode 100644 index 0000000..e1f9cac --- /dev/null +++ b/tags/firewall/index.xml @@ -0,0 +1,20 @@ + + + + Firewall on Virtualzone Blog + https://virtualzone.de/tags/firewall/ + Recent content in Firewall on Virtualzone Blog + Hugo -- gohugo.io + en-us + &copy; 2024 Heiner Beck. + Thu, 20 Nov 2014 11:30:03 +0000 + + + How to enable IPv6 on a SonicWall (SonicOS 5.9) using NAT + https://virtualzone.de/posts/ipv6-on-a-sonicwall/ + Thu, 20 Nov 2014 11:30:03 +0000 + https://virtualzone.de/posts/ipv6-on-a-sonicwall/ + IPv6 aimed to make Network Address Translation (NAT) obselete as there are so many addresses available that every single device can have its own worldwide unique IPv6 address. However, even with IPv6, using NAT is a very simple way to get your devices behind a Dell SonicWall connected to IPv6 services on the internet. In contrast to going without NAT, all the devices behind your SonicWall will emerge under the SonicWall’s IPv6 address. + + + diff --git a/tags/firewall/page/1/index.html b/tags/firewall/page/1/index.html new file mode 100644 index 0000000..8ce74a8 --- /dev/null +++ b/tags/firewall/page/1/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/tags/firewall/ + \ No newline at end of file diff --git a/tags/github/index.html b/tags/github/index.html new file mode 100644 index 0000000..fa044a6 --- /dev/null +++ b/tags/github/index.html @@ -0,0 +1,8 @@ +Github | Virtualzone Blog +

Back up server to OneDrive’s special App Folder

I’m a convinced user of OneDrive Personal. Bundled with M365, it’s a cheap option to get 1 TB of cloud storage. Having plenty of cloud storage at hand, I’m also using my OneDrive to run automated backups of my servers. There are various solutions capable of uploading files to OneDrive, including rclone. However, I was looking for a solution which enables me to grant my backup script only access to one specific folder instead of my entire cloud drive – better safe than sorry....

September 2, 2021 · 4 min · 682 words · Heiner

Unifi USG: Multiple IP addresses on PPPoE

My DSL provider TAL.de offers to assign a static and a dynamic IP address on PPPoE dial in. The dynamic IP address is the primary one, used for accessing the internet. Packets to the static IP address are routed to the router as well. Here’s how to set up things up on a Unifi Security Gateway (USG). +By default, USG only allows for one IP address when dialing in via PPPoE....

August 16, 2021 · 2 min · 353 words · Heiner
+ \ No newline at end of file diff --git a/tags/github/index.xml b/tags/github/index.xml new file mode 100644 index 0000000..f5eb131 --- /dev/null +++ b/tags/github/index.xml @@ -0,0 +1,28 @@ + + + + Github on Virtualzone Blog + https://virtualzone.de/tags/github/ + Recent content in Github on Virtualzone Blog + Hugo -- gohugo.io + en-us + &copy; 2024 Heiner Beck. + Thu, 02 Sep 2021 11:30:03 +0000 + + + Back up server to OneDrive’s special App Folder + https://virtualzone.de/posts/onedrive-upload-backup/ + Thu, 02 Sep 2021 11:30:03 +0000 + https://virtualzone.de/posts/onedrive-upload-backup/ + I’m a convinced user of OneDrive Personal. Bundled with M365, it’s a cheap option to get 1 TB of cloud storage. Having plenty of cloud storage at hand, I’m also using my OneDrive to run automated backups of my servers. There are various solutions capable of uploading files to OneDrive, including rclone. However, I was looking for a solution which enables me to grant my backup script only access to one specific folder instead of my entire cloud drive – better safe than sorry. + + + Unifi USG: Multiple IP addresses on PPPoE + https://virtualzone.de/posts/unifi-usg-multiple-ip-addresses-on-pppoe/ + Mon, 16 Aug 2021 11:30:03 +0000 + https://virtualzone.de/posts/unifi-usg-multiple-ip-addresses-on-pppoe/ + My DSL provider TAL.de offers to assign a static and a dynamic IP address on PPPoE dial in. The dynamic IP address is the primary one, used for accessing the internet. Packets to the static IP address are routed to the router as well. Here’s how to set up things up on a Unifi Security Gateway (USG). +By default, USG only allows for one IP address when dialing in via PPPoE. + + + diff --git a/tags/github/page/1/index.html b/tags/github/page/1/index.html new file mode 100644 index 0000000..cba6907 --- /dev/null +++ b/tags/github/page/1/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/tags/github/ + \ No newline at end of file diff --git a/tags/google/index.html b/tags/google/index.html new file mode 100644 index 0000000..06ed971 --- /dev/null +++ b/tags/google/index.html @@ -0,0 +1,8 @@ +Google | Virtualzone Blog +

Determining a location’s federal state using Google Maps API

If you have to find out which federal state a city belongs to, you can use the Google Maps API v3. Here is a straightforward JavaScript code snippet: +function log(s) { $('#sysout').append(document.createTextNode(s + 'n')); } function getResult(results) { for (var i=0; i -1) { return result['address_components'][j]['short_name']; } } return ''; } function getCountry(result) { return extractFirst(result, 'country'); } function getFederalState(result) { return extractFirst(result, 'administrative_area_level_1'); } function searchLocation() { $('#sysout').empty(); var location = $('#location')....

August 10, 2012 · 1 min · 162 words · Heiner
+ \ No newline at end of file diff --git a/tags/google/index.xml b/tags/google/index.xml new file mode 100644 index 0000000..2d4c076 --- /dev/null +++ b/tags/google/index.xml @@ -0,0 +1,21 @@ + + + + Google on Virtualzone Blog + https://virtualzone.de/tags/google/ + Recent content in Google on Virtualzone Blog + Hugo -- gohugo.io + en-us + &copy; 2024 Heiner Beck. + Fri, 10 Aug 2012 11:30:03 +0000 + + + Determining a location’s federal state using Google Maps API + https://virtualzone.de/posts/determining-a-locations-federal-state-using-google-maps-api/ + Fri, 10 Aug 2012 11:30:03 +0000 + https://virtualzone.de/posts/determining-a-locations-federal-state-using-google-maps-api/ + If you have to find out which federal state a city belongs to, you can use the Google Maps API v3. Here is a straightforward JavaScript code snippet: +function log(s) { $(&#39;#sysout&#39;).append(document.createTextNode(s + &#39;n&#39;)); } function getResult(results) { for (var i=0; i -1) { return result[&#39;address_components&#39;][j][&#39;short_name&#39;]; } } return &#39;&#39;; } function getCountry(result) { return extractFirst(result, &#39;country&#39;); } function getFederalState(result) { return extractFirst(result, &#39;administrative_area_level_1&#39;); } function searchLocation() { $(&#39;#sysout&#39;).empty(); var location = $(&#39;#location&#39;). + + + diff --git a/tags/google/page/1/index.html b/tags/google/page/1/index.html new file mode 100644 index 0000000..e0589e6 --- /dev/null +++ b/tags/google/page/1/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/tags/google/ + \ No newline at end of file diff --git a/tags/homeautomation/index.html b/tags/homeautomation/index.html new file mode 100644 index 0000000..5a736f6 --- /dev/null +++ b/tags/homeautomation/index.html @@ -0,0 +1,7 @@ +Homeautomation | Virtualzone Blog +

From FHEM to OpenHAB with Homegear: Installation/Docker container

For more than 2.5 years, I’ve now been running FHEM with several HomeMatic sensors and actors. Using the HM-CFG-LAN Configuration Tool as an I/O interface between FHEM and the HomeMatic devices, this setup has been running smoothly most of the time. The configuration was a bit tricky now and then, but it worked. However, OpenHAB seems to become a really good choice. Version 2 is currently available as Beta 3. It features a modern web interface and an easy-to-use extension manager....

August 28, 2016 · 6 min · 1084 words · Heiner
+ \ No newline at end of file diff --git a/tags/homeautomation/index.xml b/tags/homeautomation/index.xml new file mode 100644 index 0000000..e1bc4a2 --- /dev/null +++ b/tags/homeautomation/index.xml @@ -0,0 +1,20 @@ + + + + Homeautomation on Virtualzone Blog + https://virtualzone.de/tags/homeautomation/ + Recent content in Homeautomation on Virtualzone Blog + Hugo -- gohugo.io + en-us + &copy; 2024 Heiner Beck. + Sun, 28 Aug 2016 11:30:03 +0000 + + + From FHEM to OpenHAB with Homegear: Installation/Docker container + https://virtualzone.de/posts/from-fhem-to-openhab-with-homegear-installation-docker-container/ + Sun, 28 Aug 2016 11:30:03 +0000 + https://virtualzone.de/posts/from-fhem-to-openhab-with-homegear-installation-docker-container/ + For more than 2.5 years, I’ve now been running FHEM with several HomeMatic sensors and actors. Using the HM-CFG-LAN Configuration Tool as an I/O interface between FHEM and the HomeMatic devices, this setup has been running smoothly most of the time. The configuration was a bit tricky now and then, but it worked. However, OpenHAB seems to become a really good choice. Version 2 is currently available as Beta 3. It features a modern web interface and an easy-to-use extension manager. + + + diff --git a/tags/homeautomation/page/1/index.html b/tags/homeautomation/page/1/index.html new file mode 100644 index 0000000..232bbc9 --- /dev/null +++ b/tags/homeautomation/page/1/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/tags/homeautomation/ + \ No newline at end of file diff --git a/tags/index.html b/tags/index.html new file mode 100644 index 0000000..d385fb0 --- /dev/null +++ b/tags/index.html @@ -0,0 +1,6 @@ +Tags | Virtualzone Blog +
+ \ No newline at end of file diff --git a/tags/index.xml b/tags/index.xml new file mode 100644 index 0000000..fff5002 --- /dev/null +++ b/tags/index.xml @@ -0,0 +1,160 @@ + + + + Tags on Virtualzone Blog + https://virtualzone.de/tags/ + Recent content in Tags on Virtualzone Blog + Hugo -- gohugo.io + en-us + &copy; 2024 Heiner Beck. + Sun, 05 Feb 2023 06:00:00 +0000 + + + Docker + https://virtualzone.de/tags/docker/ + Sun, 05 Feb 2023 06:00:00 +0000 + https://virtualzone.de/tags/docker/ + + + + Linux + https://virtualzone.de/tags/linux/ + Sun, 05 Feb 2023 06:00:00 +0000 + https://virtualzone.de/tags/linux/ + + + + Kubernetes + https://virtualzone.de/tags/kubernetes/ + Fri, 03 Sep 2021 11:30:03 +0000 + https://virtualzone.de/tags/kubernetes/ + + + + Github + https://virtualzone.de/tags/github/ + Thu, 02 Sep 2021 11:30:03 +0000 + https://virtualzone.de/tags/github/ + + + + Onedrive + https://virtualzone.de/tags/onedrive/ + Thu, 02 Sep 2021 11:30:03 +0000 + https://virtualzone.de/tags/onedrive/ + + + + Tool + https://virtualzone.de/tags/tool/ + Thu, 02 Sep 2021 11:30:03 +0000 + https://virtualzone.de/tags/tool/ + + + + Raspberrypi + https://virtualzone.de/tags/raspberrypi/ + Sun, 07 Jun 2020 11:30:03 +0000 + https://virtualzone.de/tags/raspberrypi/ + + + + Api + https://virtualzone.de/tags/api/ + Mon, 01 Jun 2020 11:30:03 +0000 + https://virtualzone.de/tags/api/ + + + + Endonomdo + https://virtualzone.de/tags/endonomdo/ + Mon, 01 Jun 2020 11:30:03 +0000 + https://virtualzone.de/tags/endonomdo/ + + + + Letsencrypt + https://virtualzone.de/tags/letsencrypt/ + Sat, 11 Feb 2017 11:30:03 +0000 + https://virtualzone.de/tags/letsencrypt/ + + + + Nginx + https://virtualzone.de/tags/nginx/ + Sat, 11 Feb 2017 11:30:03 +0000 + https://virtualzone.de/tags/nginx/ + + + + Macos + https://virtualzone.de/tags/macos/ + Tue, 06 Dec 2016 11:30:03 +0000 + https://virtualzone.de/tags/macos/ + + + + Fhem + https://virtualzone.de/tags/fhem/ + Sun, 28 Aug 2016 11:30:03 +0000 + https://virtualzone.de/tags/fhem/ + + + + Homeautomation + https://virtualzone.de/tags/homeautomation/ + Sun, 28 Aug 2016 11:30:03 +0000 + https://virtualzone.de/tags/homeautomation/ + + + + Openhab + https://virtualzone.de/tags/openhab/ + Sun, 28 Aug 2016 11:30:03 +0000 + https://virtualzone.de/tags/openhab/ + + + + Proxy + https://virtualzone.de/tags/proxy/ + Sat, 27 Aug 2016 11:30:03 +0000 + https://virtualzone.de/tags/proxy/ + + + + Wordpress + https://virtualzone.de/tags/wordpress/ + Sat, 27 Aug 2016 11:30:03 +0000 + https://virtualzone.de/tags/wordpress/ + + + + Firewall + https://virtualzone.de/tags/firewall/ + Thu, 20 Nov 2014 11:30:03 +0000 + https://virtualzone.de/tags/firewall/ + + + + Ipv6 + https://virtualzone.de/tags/ipv6/ + Thu, 20 Nov 2014 11:30:03 +0000 + https://virtualzone.de/tags/ipv6/ + + + + Sonicwall + https://virtualzone.de/tags/sonicwall/ + Thu, 20 Nov 2014 11:30:03 +0000 + https://virtualzone.de/tags/sonicwall/ + + + + Google + https://virtualzone.de/tags/google/ + Fri, 10 Aug 2012 11:30:03 +0000 + https://virtualzone.de/tags/google/ + + + + diff --git a/tags/ipv6/index.html b/tags/ipv6/index.html new file mode 100644 index 0000000..4b34e5f --- /dev/null +++ b/tags/ipv6/index.html @@ -0,0 +1,7 @@ +Ipv6 | Virtualzone Blog +

How to enable IPv6 on a SonicWall (SonicOS 5.9) using NAT

IPv6 aimed to make Network Address Translation (NAT) obselete as there are so many addresses available that every single device can have its own worldwide unique IPv6 address. However, even with IPv6, using NAT is a very simple way to get your devices behind a Dell SonicWall connected to IPv6 services on the internet. In contrast to going without NAT, all the devices behind your SonicWall will emerge under the SonicWall’s IPv6 address....

November 20, 2014 · 2 min · 372 words · Heiner
+ \ No newline at end of file diff --git a/tags/ipv6/index.xml b/tags/ipv6/index.xml new file mode 100644 index 0000000..ad3485d --- /dev/null +++ b/tags/ipv6/index.xml @@ -0,0 +1,20 @@ + + + + Ipv6 on Virtualzone Blog + https://virtualzone.de/tags/ipv6/ + Recent content in Ipv6 on Virtualzone Blog + Hugo -- gohugo.io + en-us + &copy; 2024 Heiner Beck. + Thu, 20 Nov 2014 11:30:03 +0000 + + + How to enable IPv6 on a SonicWall (SonicOS 5.9) using NAT + https://virtualzone.de/posts/ipv6-on-a-sonicwall/ + Thu, 20 Nov 2014 11:30:03 +0000 + https://virtualzone.de/posts/ipv6-on-a-sonicwall/ + IPv6 aimed to make Network Address Translation (NAT) obselete as there are so many addresses available that every single device can have its own worldwide unique IPv6 address. However, even with IPv6, using NAT is a very simple way to get your devices behind a Dell SonicWall connected to IPv6 services on the internet. In contrast to going without NAT, all the devices behind your SonicWall will emerge under the SonicWall’s IPv6 address. + + + diff --git a/tags/ipv6/page/1/index.html b/tags/ipv6/page/1/index.html new file mode 100644 index 0000000..ac28fad --- /dev/null +++ b/tags/ipv6/page/1/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/tags/ipv6/ + \ No newline at end of file diff --git a/tags/kubernetes/index.html b/tags/kubernetes/index.html new file mode 100644 index 0000000..2a9903d --- /dev/null +++ b/tags/kubernetes/index.html @@ -0,0 +1,7 @@ +Kubernetes | Virtualzone Blog +

Setting up a Kubernetes cluster with K3S, GlusterFS and Load Balancing

I’ve recently written a tutorial which will guide you through setting up a Kubernetes cluster using K3S with virtual machines hosted at Hetzner, a German (Cloud) hosting provider. The tutorial uses K3S, a lightweight Kubernetes distribution which is perfectly suited for small VMs like Hetzner’s CX11. Additionally, the tutorial will show you how to set up Hetzner’s cloud load balancer which performs SSL offloading and forwards traffic to your Kubernetes system....

September 3, 2021 · 1 min · 118 words · Heiner
+ \ No newline at end of file diff --git a/tags/kubernetes/index.xml b/tags/kubernetes/index.xml new file mode 100644 index 0000000..bcaf5ff --- /dev/null +++ b/tags/kubernetes/index.xml @@ -0,0 +1,20 @@ + + + + Kubernetes on Virtualzone Blog + https://virtualzone.de/tags/kubernetes/ + Recent content in Kubernetes on Virtualzone Blog + Hugo -- gohugo.io + en-us + &copy; 2024 Heiner Beck. + Fri, 03 Sep 2021 11:30:03 +0000 + + + Setting up a Kubernetes cluster with K3S, GlusterFS and Load Balancing + https://virtualzone.de/posts/k3s-glusterfs/ + Fri, 03 Sep 2021 11:30:03 +0000 + https://virtualzone.de/posts/k3s-glusterfs/ + I’ve recently written a tutorial which will guide you through setting up a Kubernetes cluster using K3S with virtual machines hosted at Hetzner, a German (Cloud) hosting provider. The tutorial uses K3S, a lightweight Kubernetes distribution which is perfectly suited for small VMs like Hetzner’s CX11. Additionally, the tutorial will show you how to set up Hetzner’s cloud load balancer which performs SSL offloading and forwards traffic to your Kubernetes system. + + + diff --git a/tags/kubernetes/page/1/index.html b/tags/kubernetes/page/1/index.html new file mode 100644 index 0000000..529f586 --- /dev/null +++ b/tags/kubernetes/page/1/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/tags/kubernetes/ + \ No newline at end of file diff --git a/tags/letsencrypt/index.html b/tags/letsencrypt/index.html new file mode 100644 index 0000000..3f85df9 --- /dev/null +++ b/tags/letsencrypt/index.html @@ -0,0 +1,9 @@ +Letsencrypt | Virtualzone Blog +

Using Let’s Encrypt / EFF’s CertBot with NGINX in Docker

I’m using NGINX in a Docker Container as a front-end HTTP(s) Webserver, performing SSL termination and proxying incoming requests to various other Docker Containers and VMs. Now that I’ve switched my certificates to Let’s Encrypt, I wondered how to integrate EFF’s CertBot (which is recommended by Let’s Encrypt) with my setup. Here’s how I did it. +First, I’ve added two new volumes to my web-front-end’s Docker Compose File: +version: '2' services: webfrontend: container_name: webfrontend [....

February 11, 2017 · 2 min · 287 words · Heiner
+ \ No newline at end of file diff --git a/tags/letsencrypt/index.xml b/tags/letsencrypt/index.xml new file mode 100644 index 0000000..9bc6db4 --- /dev/null +++ b/tags/letsencrypt/index.xml @@ -0,0 +1,22 @@ + + + + Letsencrypt on Virtualzone Blog + https://virtualzone.de/tags/letsencrypt/ + Recent content in Letsencrypt on Virtualzone Blog + Hugo -- gohugo.io + en-us + &copy; 2024 Heiner Beck. + Sat, 11 Feb 2017 11:30:03 +0000 + + + Using Let’s Encrypt / EFF’s CertBot with NGINX in Docker + https://virtualzone.de/posts/lets-encrypt-effs-certbot-with-nginx-in-docker/ + Sat, 11 Feb 2017 11:30:03 +0000 + https://virtualzone.de/posts/lets-encrypt-effs-certbot-with-nginx-in-docker/ + I’m using NGINX in a Docker Container as a front-end HTTP(s) Webserver, performing SSL termination and proxying incoming requests to various other Docker Containers and VMs. Now that I’ve switched my certificates to Let’s Encrypt, I wondered how to integrate EFF’s CertBot (which is recommended by Let’s Encrypt) with my setup. Here’s how I did it. +First, I’ve added two new volumes to my web-front-end’s Docker Compose File: +version: &#39;2&#39; services: webfrontend: container_name: webfrontend [. + + + diff --git a/tags/letsencrypt/page/1/index.html b/tags/letsencrypt/page/1/index.html new file mode 100644 index 0000000..cf37852 --- /dev/null +++ b/tags/letsencrypt/page/1/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/tags/letsencrypt/ + \ No newline at end of file diff --git a/tags/linux/index.html b/tags/linux/index.html new file mode 100644 index 0000000..8c29355 --- /dev/null +++ b/tags/linux/index.html @@ -0,0 +1,17 @@ +Linux | Virtualzone Blog +

Go-hole: A minimalistic DNS proxy and and blocker

You’ll probably know Pi-hole. It’s a popular “DNS sinkhole” – a DNS proxy server which blocks certain requests, such a as those for well-known ad serving domains. The effect is a much less ad-cluttered web experience in your home network. +I’ve been using Pi-hole for several years as a Docker container on a Raspberry Pi. The Raspi is serving as a small home server on my home network. +However, as much as I like Pi-hole, I felt it got loaded with new features over the years and performed slower over the time....

February 5, 2023 · 4 min · 703 words · Heiner

OpenRC Script for 'podman kube play'

In June, I’ve written about my approach to starting and stopping Podman Pods using OpenRC scripts on Alpine Linux. However, that approach had two major drawbacks: First, the pods were started in the foreground, causing OpenRC to wait for all pod initialization tasks to complete. If an image needed to be pulled first, this could lead to longer delays, significantly increasing system startup times. Secondly, requesting the status of a previously started pod always stated “crashed”....

October 26, 2022 · 3 min · 483 words · Heiner

Connecting multiple networks to a Podman container

I’m running my containers with Podman in Rootless Mode on Alpine for about four months now. However, an annoying problem has haunted me ever since: +When a container was connected to more than one network, outgoing connections were not working correctly. +Consider a container connected to two bridge networks: +$ podman run --rm -it \ --network net1 \ --network net2 \ alpine /bin/ash Inside the container, the two networks are connected correctly:...

October 16, 2022 · 2 min · 274 words · Heiner

Setting up Alpine Linux with Podman

Recently, I’ve written a blog post on how to set up Rootless Docker on Alpine Linux. Today I’m showing you how to set up Podman. Podman has a rootless architecture built in. It’s an alternative to Docker, providing an almost identical command line interface. Thus, if you’re used to Docker CLI, you won’t have any issues working with Podman. +Podman was initially developed by RedHat and is available as an open source project....

June 25, 2022 · 4 min · 852 words · Heiner

Setting up Alpine Linux with Rootless Docker

As of Docker Engine v20.10, it’s possible to run the Docker daemon as a non-root user (Rooless mode). This is especially valuable in view of security aspects. Rootless mode mitigates potential vulnerabilities in the Docker daemon. +However, at the time of writing, setting up Docker in rootless mode is not straightforward if you’re using Alpine Linux as your host system. This is why I summarized the steps to get Docket Rootless up and running on Alpine Linux....

June 19, 2022 · 3 min · 479 words · Heiner

How to reduce PDF file size in Linux - Part 2

Several months ago, I wrote a blog post about reducing a PDF file’s size. Since then, I’ve used that technique many times. However, you may want to control the DPI (dots per inch) even more specific. Here’s how to do it: +gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dCompatibilityLevel=1.7 \ -dDownsampleColorImages=true \ -dDownsampleGrayImages=true \ -dDownsampleMonoImages=true \ -dColorImageResolution=120 \ -dGrayImageResolution=120 \ -dMonoImageResolution=120 \ -sOutputFile=output.pdf input.pdf Hint: This also works on MacOS. Just install GhostScript using Homebrew:...

August 15, 2015 · 1 min · 75 words · Heiner

How to reduce PDF file size in Linux

Using a single line of GhostScript command on my Ubuntu’s terminal, I was able to reduce the size of a PDF file from 6 MB to approximately 1 MB: +gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dCompatibilityLevel=1.4 -dPDFSETTINGS=/screen -sOutputFile=output.pdf input.pdf You can also use the following parameters for -dPDFSETTINGS instead of /screen: +/screen – Lowest quality, lowest size /ebook – Moderate quality /printer – Good quality /prepress – Best quality, highest size Update: Read Part 2 of this blog post for more detailled file size reduction settings....

November 21, 2012 · 1 min · 98 words · Heiner
+ \ No newline at end of file diff --git a/tags/linux/index.xml b/tags/linux/index.xml new file mode 100644 index 0000000..538060f --- /dev/null +++ b/tags/linux/index.xml @@ -0,0 +1,72 @@ + + + + Linux on Virtualzone Blog + https://virtualzone.de/tags/linux/ + Recent content in Linux on Virtualzone Blog + Hugo -- gohugo.io + en-us + &copy; 2024 Heiner Beck. + Sun, 05 Feb 2023 06:00:00 +0000 + + + Go-hole: A minimalistic DNS proxy and and blocker + https://virtualzone.de/posts/dns-proxy-forwarder-blackhole/ + Sun, 05 Feb 2023 06:00:00 +0000 + https://virtualzone.de/posts/dns-proxy-forwarder-blackhole/ + You&rsquo;ll probably know Pi-hole. It&rsquo;s a popular &ldquo;DNS sinkhole&rdquo; – a DNS proxy server which blocks certain requests, such a as those for well-known ad serving domains. The effect is a much less ad-cluttered web experience in your home network. +I&rsquo;ve been using Pi-hole for several years as a Docker container on a Raspberry Pi. The Raspi is serving as a small home server on my home network. +However, as much as I like Pi-hole, I felt it got loaded with new features over the years and performed slower over the time. + + + OpenRC Script for 'podman kube play' + https://virtualzone.de/posts/openrc-podman-kube-play/ + Wed, 26 Oct 2022 15:00:00 +0000 + https://virtualzone.de/posts/openrc-podman-kube-play/ + In June, I&rsquo;ve written about my approach to starting and stopping Podman Pods using OpenRC scripts on Alpine Linux. However, that approach had two major drawbacks: First, the pods were started in the foreground, causing OpenRC to wait for all pod initialization tasks to complete. If an image needed to be pulled first, this could lead to longer delays, significantly increasing system startup times. Secondly, requesting the status of a previously started pod always stated &ldquo;crashed&rdquo;. + + + Connecting multiple networks to a Podman container + https://virtualzone.de/posts/podman-multiple-networks/ + Sun, 16 Oct 2022 17:00:00 +0000 + https://virtualzone.de/posts/podman-multiple-networks/ + I&rsquo;m running my containers with Podman in Rootless Mode on Alpine for about four months now. However, an annoying problem has haunted me ever since: +When a container was connected to more than one network, outgoing connections were not working correctly. +Consider a container connected to two bridge networks: +$ podman run --rm -it \ --network net1 \ --network net2 \ alpine /bin/ash Inside the container, the two networks are connected correctly: + + + Setting up Alpine Linux with Podman + https://virtualzone.de/posts/alpine-podman/ + Sat, 25 Jun 2022 18:00:00 +0000 + https://virtualzone.de/posts/alpine-podman/ + Recently, I&rsquo;ve written a blog post on how to set up Rootless Docker on Alpine Linux. Today I&rsquo;m showing you how to set up Podman. Podman has a rootless architecture built in. It&rsquo;s an alternative to Docker, providing an almost identical command line interface. Thus, if you&rsquo;re used to Docker CLI, you won&rsquo;t have any issues working with Podman. +Podman was initially developed by RedHat and is available as an open source project. + + + Setting up Alpine Linux with Rootless Docker + https://virtualzone.de/posts/alpine-docker-rootless/ + Sun, 19 Jun 2022 15:00:00 +0000 + https://virtualzone.de/posts/alpine-docker-rootless/ + As of Docker Engine v20.10, it&rsquo;s possible to run the Docker daemon as a non-root user (Rooless mode). This is especially valuable in view of security aspects. Rootless mode mitigates potential vulnerabilities in the Docker daemon. +However, at the time of writing, setting up Docker in rootless mode is not straightforward if you&rsquo;re using Alpine Linux as your host system. This is why I summarized the steps to get Docket Rootless up and running on Alpine Linux. + + + How to reduce PDF file size in Linux - Part 2 + https://virtualzone.de/posts/reduce-pdf-file-size-2/ + Sat, 15 Aug 2015 11:30:03 +0000 + https://virtualzone.de/posts/reduce-pdf-file-size-2/ + Several months ago, I wrote a blog post about reducing a PDF file’s size. Since then, I’ve used that technique many times. However, you may want to control the DPI (dots per inch) even more specific. Here’s how to do it: +gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dCompatibilityLevel=1.7 \ -dDownsampleColorImages=true \ -dDownsampleGrayImages=true \ -dDownsampleMonoImages=true \ -dColorImageResolution=120 \ -dGrayImageResolution=120 \ -dMonoImageResolution=120 \ -sOutputFile=output.pdf input.pdf Hint: This also works on MacOS. Just install GhostScript using Homebrew: + + + How to reduce PDF file size in Linux + https://virtualzone.de/posts/reduce-pdf-file-size/ + Wed, 21 Nov 2012 11:30:03 +0000 + https://virtualzone.de/posts/reduce-pdf-file-size/ + Using a single line of GhostScript command on my Ubuntu’s terminal, I was able to reduce the size of a PDF file from 6 MB to approximately 1 MB: +gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dCompatibilityLevel=1.4 -dPDFSETTINGS=/screen -sOutputFile=output.pdf input.pdf You can also use the following parameters for -dPDFSETTINGS instead of /screen: +/screen – Lowest quality, lowest size /ebook – Moderate quality /printer – Good quality /prepress – Best quality, highest size Update: Read Part 2 of this blog post for more detailled file size reduction settings. + + + diff --git a/tags/linux/page/1/index.html b/tags/linux/page/1/index.html new file mode 100644 index 0000000..2a59ef6 --- /dev/null +++ b/tags/linux/page/1/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/tags/linux/ + \ No newline at end of file diff --git a/tags/macos/index.html b/tags/macos/index.html new file mode 100644 index 0000000..a20d745 --- /dev/null +++ b/tags/macos/index.html @@ -0,0 +1,12 @@ +Macos | Virtualzone Blog +

Creating an encrypted file container on macOS

Some years ago, I’ve used TrueCrypt to create encrypted containers for storing sensitive files. However, TrueCrypt is nowadays considered insecure and I’m on macOS Sierra 10.12 now – time for another solution. Luckily, macOS has integrated means for creating encrypted containers and saving sensitive information in it. You don’t need any additional software for this. As far as I know, this solution also works for previous versions of Mac OS X, like Mac OS X 10....

December 6, 2016 · 2 min · 356 words · Heiner

Fix Docker not using /etc/hosts on MacOS

On my MacBook with Mac OS X 10.11 (El Capitan) and Docker 1.12.0, Docker did not read manually set DNS entries from the /etc/hosts file. +When I executed “docker push” for example, this resulted in “no such hosts” errors: +Put http://shuttle:5000/v1/repositories/webfrontend/: dial tcp: lookup shuttle on 192.168.65.1:53: no such host On Mac OS, Docker is running in a host container itself. Thus, you’ll have to add DNS entries to the container’s /etc/hosts file....

August 28, 2016 · 1 min · 163 words · Heiner

How to reduce PDF file size in Linux - Part 2

Several months ago, I wrote a blog post about reducing a PDF file’s size. Since then, I’ve used that technique many times. However, you may want to control the DPI (dots per inch) even more specific. Here’s how to do it: +gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dCompatibilityLevel=1.7 \ -dDownsampleColorImages=true \ -dDownsampleGrayImages=true \ -dDownsampleMonoImages=true \ -dColorImageResolution=120 \ -dGrayImageResolution=120 \ -dMonoImageResolution=120 \ -sOutputFile=output.pdf input.pdf Hint: This also works on MacOS. Just install GhostScript using Homebrew:...

August 15, 2015 · 1 min · 75 words · Heiner

How to reduce PDF file size in Linux

Using a single line of GhostScript command on my Ubuntu’s terminal, I was able to reduce the size of a PDF file from 6 MB to approximately 1 MB: +gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dCompatibilityLevel=1.4 -dPDFSETTINGS=/screen -sOutputFile=output.pdf input.pdf You can also use the following parameters for -dPDFSETTINGS instead of /screen: +/screen – Lowest quality, lowest size /ebook – Moderate quality /printer – Good quality /prepress – Best quality, highest size Update: Read Part 2 of this blog post for more detailled file size reduction settings....

November 21, 2012 · 1 min · 98 words · Heiner
+ \ No newline at end of file diff --git a/tags/macos/index.xml b/tags/macos/index.xml new file mode 100644 index 0000000..e37f594 --- /dev/null +++ b/tags/macos/index.xml @@ -0,0 +1,46 @@ + + + + Macos on Virtualzone Blog + https://virtualzone.de/tags/macos/ + Recent content in Macos on Virtualzone Blog + Hugo -- gohugo.io + en-us + &copy; 2024 Heiner Beck. + Tue, 06 Dec 2016 11:30:03 +0000 + + + Creating an encrypted file container on macOS + https://virtualzone.de/posts/encrypted-file-container-macos/ + Tue, 06 Dec 2016 11:30:03 +0000 + https://virtualzone.de/posts/encrypted-file-container-macos/ + Some years ago, I’ve used TrueCrypt to create encrypted containers for storing sensitive files. However, TrueCrypt is nowadays considered insecure and I’m on macOS Sierra 10.12 now – time for another solution. Luckily, macOS has integrated means for creating encrypted containers and saving sensitive information in it. You don’t need any additional software for this. As far as I know, this solution also works for previous versions of Mac OS X, like Mac OS X 10. + + + Fix Docker not using /etc/hosts on MacOS + https://virtualzone.de/posts/fix-docker-not-using-etc-hosts-on-macos/ + Sun, 28 Aug 2016 11:30:03 +0000 + https://virtualzone.de/posts/fix-docker-not-using-etc-hosts-on-macos/ + On my MacBook with Mac OS X 10.11 (El Capitan) and Docker 1.12.0, Docker did not read manually set DNS entries from the /etc/hosts file. +When I executed “docker push” for example, this resulted in “no such hosts” errors: +Put http://shuttle:5000/v1/repositories/webfrontend/: dial tcp: lookup shuttle on 192.168.65.1:53: no such host On Mac OS, Docker is running in a host container itself. Thus, you’ll have to add DNS entries to the container’s /etc/hosts file. + + + How to reduce PDF file size in Linux - Part 2 + https://virtualzone.de/posts/reduce-pdf-file-size-2/ + Sat, 15 Aug 2015 11:30:03 +0000 + https://virtualzone.de/posts/reduce-pdf-file-size-2/ + Several months ago, I wrote a blog post about reducing a PDF file’s size. Since then, I’ve used that technique many times. However, you may want to control the DPI (dots per inch) even more specific. Here’s how to do it: +gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dCompatibilityLevel=1.7 \ -dDownsampleColorImages=true \ -dDownsampleGrayImages=true \ -dDownsampleMonoImages=true \ -dColorImageResolution=120 \ -dGrayImageResolution=120 \ -dMonoImageResolution=120 \ -sOutputFile=output.pdf input.pdf Hint: This also works on MacOS. Just install GhostScript using Homebrew: + + + How to reduce PDF file size in Linux + https://virtualzone.de/posts/reduce-pdf-file-size/ + Wed, 21 Nov 2012 11:30:03 +0000 + https://virtualzone.de/posts/reduce-pdf-file-size/ + Using a single line of GhostScript command on my Ubuntu’s terminal, I was able to reduce the size of a PDF file from 6 MB to approximately 1 MB: +gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dCompatibilityLevel=1.4 -dPDFSETTINGS=/screen -sOutputFile=output.pdf input.pdf You can also use the following parameters for -dPDFSETTINGS instead of /screen: +/screen – Lowest quality, lowest size /ebook – Moderate quality /printer – Good quality /prepress – Best quality, highest size Update: Read Part 2 of this blog post for more detailled file size reduction settings. + + + diff --git a/tags/macos/page/1/index.html b/tags/macos/page/1/index.html new file mode 100644 index 0000000..e025d43 --- /dev/null +++ b/tags/macos/page/1/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/tags/macos/ + \ No newline at end of file diff --git a/tags/nginx/index.html b/tags/nginx/index.html new file mode 100644 index 0000000..4e427b8 --- /dev/null +++ b/tags/nginx/index.html @@ -0,0 +1,9 @@ +Nginx | Virtualzone Blog +

Using Let’s Encrypt / EFF’s CertBot with NGINX in Docker

I’m using NGINX in a Docker Container as a front-end HTTP(s) Webserver, performing SSL termination and proxying incoming requests to various other Docker Containers and VMs. Now that I’ve switched my certificates to Let’s Encrypt, I wondered how to integrate EFF’s CertBot (which is recommended by Let’s Encrypt) with my setup. Here’s how I did it. +First, I’ve added two new volumes to my web-front-end’s Docker Compose File: +version: '2' services: webfrontend: container_name: webfrontend [....

February 11, 2017 · 2 min · 287 words · Heiner
+ \ No newline at end of file diff --git a/tags/nginx/index.xml b/tags/nginx/index.xml new file mode 100644 index 0000000..8d2e639 --- /dev/null +++ b/tags/nginx/index.xml @@ -0,0 +1,22 @@ + + + + Nginx on Virtualzone Blog + https://virtualzone.de/tags/nginx/ + Recent content in Nginx on Virtualzone Blog + Hugo -- gohugo.io + en-us + &copy; 2024 Heiner Beck. + Sat, 11 Feb 2017 11:30:03 +0000 + + + Using Let’s Encrypt / EFF’s CertBot with NGINX in Docker + https://virtualzone.de/posts/lets-encrypt-effs-certbot-with-nginx-in-docker/ + Sat, 11 Feb 2017 11:30:03 +0000 + https://virtualzone.de/posts/lets-encrypt-effs-certbot-with-nginx-in-docker/ + I’m using NGINX in a Docker Container as a front-end HTTP(s) Webserver, performing SSL termination and proxying incoming requests to various other Docker Containers and VMs. Now that I’ve switched my certificates to Let’s Encrypt, I wondered how to integrate EFF’s CertBot (which is recommended by Let’s Encrypt) with my setup. Here’s how I did it. +First, I’ve added two new volumes to my web-front-end’s Docker Compose File: +version: &#39;2&#39; services: webfrontend: container_name: webfrontend [. + + + diff --git a/tags/nginx/page/1/index.html b/tags/nginx/page/1/index.html new file mode 100644 index 0000000..d3a2b46 --- /dev/null +++ b/tags/nginx/page/1/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/tags/nginx/ + \ No newline at end of file diff --git a/tags/onedrive/index.html b/tags/onedrive/index.html new file mode 100644 index 0000000..1e6b725 --- /dev/null +++ b/tags/onedrive/index.html @@ -0,0 +1,8 @@ +Onedrive | Virtualzone Blog +

Back up server to OneDrive’s special App Folder

I’m a convinced user of OneDrive Personal. Bundled with M365, it’s a cheap option to get 1 TB of cloud storage. Having plenty of cloud storage at hand, I’m also using my OneDrive to run automated backups of my servers. There are various solutions capable of uploading files to OneDrive, including rclone. However, I was looking for a solution which enables me to grant my backup script only access to one specific folder instead of my entire cloud drive – better safe than sorry....

September 2, 2021 · 4 min · 682 words · Heiner

Unifi USG: Multiple IP addresses on PPPoE

My DSL provider TAL.de offers to assign a static and a dynamic IP address on PPPoE dial in. The dynamic IP address is the primary one, used for accessing the internet. Packets to the static IP address are routed to the router as well. Here’s how to set up things up on a Unifi Security Gateway (USG). +By default, USG only allows for one IP address when dialing in via PPPoE....

August 16, 2021 · 2 min · 353 words · Heiner
+ \ No newline at end of file diff --git a/tags/onedrive/index.xml b/tags/onedrive/index.xml new file mode 100644 index 0000000..8fa08b6 --- /dev/null +++ b/tags/onedrive/index.xml @@ -0,0 +1,28 @@ + + + + Onedrive on Virtualzone Blog + https://virtualzone.de/tags/onedrive/ + Recent content in Onedrive on Virtualzone Blog + Hugo -- gohugo.io + en-us + &copy; 2024 Heiner Beck. + Thu, 02 Sep 2021 11:30:03 +0000 + + + Back up server to OneDrive’s special App Folder + https://virtualzone.de/posts/onedrive-upload-backup/ + Thu, 02 Sep 2021 11:30:03 +0000 + https://virtualzone.de/posts/onedrive-upload-backup/ + I’m a convinced user of OneDrive Personal. Bundled with M365, it’s a cheap option to get 1 TB of cloud storage. Having plenty of cloud storage at hand, I’m also using my OneDrive to run automated backups of my servers. There are various solutions capable of uploading files to OneDrive, including rclone. However, I was looking for a solution which enables me to grant my backup script only access to one specific folder instead of my entire cloud drive – better safe than sorry. + + + Unifi USG: Multiple IP addresses on PPPoE + https://virtualzone.de/posts/unifi-usg-multiple-ip-addresses-on-pppoe/ + Mon, 16 Aug 2021 11:30:03 +0000 + https://virtualzone.de/posts/unifi-usg-multiple-ip-addresses-on-pppoe/ + My DSL provider TAL.de offers to assign a static and a dynamic IP address on PPPoE dial in. The dynamic IP address is the primary one, used for accessing the internet. Packets to the static IP address are routed to the router as well. Here’s how to set up things up on a Unifi Security Gateway (USG). +By default, USG only allows for one IP address when dialing in via PPPoE. + + + diff --git a/tags/onedrive/page/1/index.html b/tags/onedrive/page/1/index.html new file mode 100644 index 0000000..32c28fe --- /dev/null +++ b/tags/onedrive/page/1/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/tags/onedrive/ + \ No newline at end of file diff --git a/tags/openhab/index.html b/tags/openhab/index.html new file mode 100644 index 0000000..199c846 --- /dev/null +++ b/tags/openhab/index.html @@ -0,0 +1,7 @@ +Openhab | Virtualzone Blog +

From FHEM to OpenHAB with Homegear: Installation/Docker container

For more than 2.5 years, I’ve now been running FHEM with several HomeMatic sensors and actors. Using the HM-CFG-LAN Configuration Tool as an I/O interface between FHEM and the HomeMatic devices, this setup has been running smoothly most of the time. The configuration was a bit tricky now and then, but it worked. However, OpenHAB seems to become a really good choice. Version 2 is currently available as Beta 3. It features a modern web interface and an easy-to-use extension manager....

August 28, 2016 · 6 min · 1084 words · Heiner
+ \ No newline at end of file diff --git a/tags/openhab/index.xml b/tags/openhab/index.xml new file mode 100644 index 0000000..761519f --- /dev/null +++ b/tags/openhab/index.xml @@ -0,0 +1,20 @@ + + + + Openhab on Virtualzone Blog + https://virtualzone.de/tags/openhab/ + Recent content in Openhab on Virtualzone Blog + Hugo -- gohugo.io + en-us + &copy; 2024 Heiner Beck. + Sun, 28 Aug 2016 11:30:03 +0000 + + + From FHEM to OpenHAB with Homegear: Installation/Docker container + https://virtualzone.de/posts/from-fhem-to-openhab-with-homegear-installation-docker-container/ + Sun, 28 Aug 2016 11:30:03 +0000 + https://virtualzone.de/posts/from-fhem-to-openhab-with-homegear-installation-docker-container/ + For more than 2.5 years, I’ve now been running FHEM with several HomeMatic sensors and actors. Using the HM-CFG-LAN Configuration Tool as an I/O interface between FHEM and the HomeMatic devices, this setup has been running smoothly most of the time. The configuration was a bit tricky now and then, but it worked. However, OpenHAB seems to become a really good choice. Version 2 is currently available as Beta 3. It features a modern web interface and an easy-to-use extension manager. + + + diff --git a/tags/openhab/page/1/index.html b/tags/openhab/page/1/index.html new file mode 100644 index 0000000..b1bfb74 --- /dev/null +++ b/tags/openhab/page/1/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/tags/openhab/ + \ No newline at end of file diff --git a/tags/proxy/index.html b/tags/proxy/index.html new file mode 100644 index 0000000..12baef0 --- /dev/null +++ b/tags/proxy/index.html @@ -0,0 +1,8 @@ +Proxy | Virtualzone Blog +

How to set up HTTPS/SSL in WordPress behind Proxy (nginx, HAProxy, Apache, lighttpd)

Today I changed the accessibility of my blog from HTTP (unencrypted) to HTTPS/SSL. My blog is running WordPress behind an nginx proxy server. However, while the pages themselves loaded successfully from HTTPS, the embedded static resources like JavaScripts, Images, CSS files etc. did not. Here’s how I fixed it. +The cause of this issue is that WordPress doesn’t seem to detect the original protocol scheme (HTTPS) correctly when running behind a proxy....

August 27, 2016 · 2 min · 255 words · Heiner
+ \ No newline at end of file diff --git a/tags/proxy/index.xml b/tags/proxy/index.xml new file mode 100644 index 0000000..7940389 --- /dev/null +++ b/tags/proxy/index.xml @@ -0,0 +1,21 @@ + + + + Proxy on Virtualzone Blog + https://virtualzone.de/tags/proxy/ + Recent content in Proxy on Virtualzone Blog + Hugo -- gohugo.io + en-us + &copy; 2024 Heiner Beck. + Sat, 27 Aug 2016 11:30:03 +0000 + + + How to set up HTTPS/SSL in WordPress behind Proxy (nginx, HAProxy, Apache, lighttpd) + https://virtualzone.de/posts/https-ssl-in-wordpress-behind-proxy/ + Sat, 27 Aug 2016 11:30:03 +0000 + https://virtualzone.de/posts/https-ssl-in-wordpress-behind-proxy/ + Today I changed the accessibility of my blog from HTTP (unencrypted) to HTTPS/SSL. My blog is running WordPress behind an nginx proxy server. However, while the pages themselves loaded successfully from HTTPS, the embedded static resources like JavaScripts, Images, CSS files etc. did not. Here’s how I fixed it. +The cause of this issue is that WordPress doesn’t seem to detect the original protocol scheme (HTTPS) correctly when running behind a proxy. + + + diff --git a/tags/proxy/page/1/index.html b/tags/proxy/page/1/index.html new file mode 100644 index 0000000..2a78b5a --- /dev/null +++ b/tags/proxy/page/1/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/tags/proxy/ + \ No newline at end of file diff --git a/tags/raspberrypi/index.html b/tags/raspberrypi/index.html new file mode 100644 index 0000000..c05ea3f --- /dev/null +++ b/tags/raspberrypi/index.html @@ -0,0 +1,8 @@ +Raspberrypi | Virtualzone Blog +

Raspberry Pi OS: Remove unnecessary packages

Recently, I wrote about the availability of the 64 bit beta version of Raspberry Pi OS (formerly known as Raspbian). Unfortunately, the new 64 bit beta is only available in the Desktop variant, containing lots of packages most lightweight server systems won’t need. There’s no lite variant of the 64 bit beta version available at the time of writing. However, you can easily remove the Desktop packages from a running installation with two easy commands....

June 7, 2020 · 1 min · 161 words · Heiner

Native USB boot for Raspberry Pi 4

Here’s something that’s probably been eagerly-awaited not only by me: Finally, Raspberry Pi 4 can boot directly from USB devices. Without any of the widespread workarounds which require an SD card a primrary boot medium. This is made possible by a new firmware, the so-called EEPROM. Furthermore, a new 64 bit beta version of Raspberry OS is available, too (formerly known as Raspbian). +To get started, boot your Raspberry Pi with a Raspbian or Raspberry OS installation....

May 28, 2020 · 2 min · 404 words · Heiner
+ \ No newline at end of file diff --git a/tags/raspberrypi/index.xml b/tags/raspberrypi/index.xml new file mode 100644 index 0000000..1aa3a23 --- /dev/null +++ b/tags/raspberrypi/index.xml @@ -0,0 +1,28 @@ + + + + Raspberrypi on Virtualzone Blog + https://virtualzone.de/tags/raspberrypi/ + Recent content in Raspberrypi on Virtualzone Blog + Hugo -- gohugo.io + en-us + &copy; 2024 Heiner Beck. + Sun, 07 Jun 2020 11:30:03 +0000 + + + Raspberry Pi OS: Remove unnecessary packages + https://virtualzone.de/posts/raspberry-pi-os-remove-packages/ + Sun, 07 Jun 2020 11:30:03 +0000 + https://virtualzone.de/posts/raspberry-pi-os-remove-packages/ + Recently, I wrote about the availability of the 64 bit beta version of Raspberry Pi OS (formerly known as Raspbian). Unfortunately, the new 64 bit beta is only available in the Desktop variant, containing lots of packages most lightweight server systems won&rsquo;t need. There&rsquo;s no lite variant of the 64 bit beta version available at the time of writing. However, you can easily remove the Desktop packages from a running installation with two easy commands. + + + Native USB boot for Raspberry Pi 4 + https://virtualzone.de/posts/usb-boot-raspberry-pi/ + Thu, 28 May 2020 11:30:03 +0000 + https://virtualzone.de/posts/usb-boot-raspberry-pi/ + Here&rsquo;s something that&rsquo;s probably been eagerly-awaited not only by me: Finally, Raspberry Pi 4 can boot directly from USB devices. Without any of the widespread workarounds which require an SD card a primrary boot medium. This is made possible by a new firmware, the so-called EEPROM. Furthermore, a new 64 bit beta version of Raspberry OS is available, too (formerly known as Raspbian). +To get started, boot your Raspberry Pi with a Raspbian or Raspberry OS installation. + + + diff --git a/tags/raspberrypi/page/1/index.html b/tags/raspberrypi/page/1/index.html new file mode 100644 index 0000000..b538057 --- /dev/null +++ b/tags/raspberrypi/page/1/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/tags/raspberrypi/ + \ No newline at end of file diff --git a/tags/sonicwall/index.html b/tags/sonicwall/index.html new file mode 100644 index 0000000..5c95e4c --- /dev/null +++ b/tags/sonicwall/index.html @@ -0,0 +1,7 @@ +Sonicwall | Virtualzone Blog +

How to enable IPv6 on a SonicWall (SonicOS 5.9) using NAT

IPv6 aimed to make Network Address Translation (NAT) obselete as there are so many addresses available that every single device can have its own worldwide unique IPv6 address. However, even with IPv6, using NAT is a very simple way to get your devices behind a Dell SonicWall connected to IPv6 services on the internet. In contrast to going without NAT, all the devices behind your SonicWall will emerge under the SonicWall’s IPv6 address....

November 20, 2014 · 2 min · 372 words · Heiner
+ \ No newline at end of file diff --git a/tags/sonicwall/index.xml b/tags/sonicwall/index.xml new file mode 100644 index 0000000..548d123 --- /dev/null +++ b/tags/sonicwall/index.xml @@ -0,0 +1,20 @@ + + + + Sonicwall on Virtualzone Blog + https://virtualzone.de/tags/sonicwall/ + Recent content in Sonicwall on Virtualzone Blog + Hugo -- gohugo.io + en-us + &copy; 2024 Heiner Beck. + Thu, 20 Nov 2014 11:30:03 +0000 + + + How to enable IPv6 on a SonicWall (SonicOS 5.9) using NAT + https://virtualzone.de/posts/ipv6-on-a-sonicwall/ + Thu, 20 Nov 2014 11:30:03 +0000 + https://virtualzone.de/posts/ipv6-on-a-sonicwall/ + IPv6 aimed to make Network Address Translation (NAT) obselete as there are so many addresses available that every single device can have its own worldwide unique IPv6 address. However, even with IPv6, using NAT is a very simple way to get your devices behind a Dell SonicWall connected to IPv6 services on the internet. In contrast to going without NAT, all the devices behind your SonicWall will emerge under the SonicWall’s IPv6 address. + + + diff --git a/tags/sonicwall/page/1/index.html b/tags/sonicwall/page/1/index.html new file mode 100644 index 0000000..48a4544 --- /dev/null +++ b/tags/sonicwall/page/1/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/tags/sonicwall/ + \ No newline at end of file diff --git a/tags/tool/index.html b/tags/tool/index.html new file mode 100644 index 0000000..d319da7 --- /dev/null +++ b/tags/tool/index.html @@ -0,0 +1,11 @@ +Tool | Virtualzone Blog +

Back up server to OneDrive’s special App Folder

I’m a convinced user of OneDrive Personal. Bundled with M365, it’s a cheap option to get 1 TB of cloud storage. Having plenty of cloud storage at hand, I’m also using my OneDrive to run automated backups of my servers. There are various solutions capable of uploading files to OneDrive, including rclone. However, I was looking for a solution which enables me to grant my backup script only access to one specific folder instead of my entire cloud drive – better safe than sorry....

September 2, 2021 · 4 min · 682 words · Heiner

Unifi USG: Multiple IP addresses on PPPoE

My DSL provider TAL.de offers to assign a static and a dynamic IP address on PPPoE dial in. The dynamic IP address is the primary one, used for accessing the internet. Packets to the static IP address are routed to the router as well. Here’s how to set up things up on a Unifi Security Gateway (USG). +By default, USG only allows for one IP address when dialing in via PPPoE....

August 16, 2021 · 2 min · 353 words · Heiner

UptimeRobot: A nice free website monitoring service

Over the weekend I’ve been looking around for a free service which monitors my websites. My requirement was that I want to be able to monitor both HTTP and HTTPS sites, I need support for authentication and the monitoring service should be able to check if a specific keyword exists within the watched site (instead of just assuming that a HTTP Status Code 200 is okay). Furthermore, I needed notifications in case of downtimes (Email and Pushbullet is fine for me)....

September 5, 2016 · 1 min · 120 words · Heiner

How to reduce PDF file size in Linux - Part 2

Several months ago, I wrote a blog post about reducing a PDF file’s size. Since then, I’ve used that technique many times. However, you may want to control the DPI (dots per inch) even more specific. Here’s how to do it: +gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dCompatibilityLevel=1.7 \ -dDownsampleColorImages=true \ -dDownsampleGrayImages=true \ -dDownsampleMonoImages=true \ -dColorImageResolution=120 \ -dGrayImageResolution=120 \ -dMonoImageResolution=120 \ -sOutputFile=output.pdf input.pdf Hint: This also works on MacOS. Just install GhostScript using Homebrew:...

August 15, 2015 · 1 min · 75 words · Heiner

How to reduce PDF file size in Linux

Using a single line of GhostScript command on my Ubuntu’s terminal, I was able to reduce the size of a PDF file from 6 MB to approximately 1 MB: +gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dCompatibilityLevel=1.4 -dPDFSETTINGS=/screen -sOutputFile=output.pdf input.pdf You can also use the following parameters for -dPDFSETTINGS instead of /screen: +/screen – Lowest quality, lowest size /ebook – Moderate quality /printer – Good quality /prepress – Best quality, highest size Update: Read Part 2 of this blog post for more detailled file size reduction settings....

November 21, 2012 · 1 min · 98 words · Heiner
+ \ No newline at end of file diff --git a/tags/tool/index.xml b/tags/tool/index.xml new file mode 100644 index 0000000..dbc1fff --- /dev/null +++ b/tags/tool/index.xml @@ -0,0 +1,52 @@ + + + + Tool on Virtualzone Blog + https://virtualzone.de/tags/tool/ + Recent content in Tool on Virtualzone Blog + Hugo -- gohugo.io + en-us + &copy; 2024 Heiner Beck. + Thu, 02 Sep 2021 11:30:03 +0000 + + + Back up server to OneDrive’s special App Folder + https://virtualzone.de/posts/onedrive-upload-backup/ + Thu, 02 Sep 2021 11:30:03 +0000 + https://virtualzone.de/posts/onedrive-upload-backup/ + I’m a convinced user of OneDrive Personal. Bundled with M365, it’s a cheap option to get 1 TB of cloud storage. Having plenty of cloud storage at hand, I’m also using my OneDrive to run automated backups of my servers. There are various solutions capable of uploading files to OneDrive, including rclone. However, I was looking for a solution which enables me to grant my backup script only access to one specific folder instead of my entire cloud drive – better safe than sorry. + + + Unifi USG: Multiple IP addresses on PPPoE + https://virtualzone.de/posts/unifi-usg-multiple-ip-addresses-on-pppoe/ + Mon, 16 Aug 2021 11:30:03 +0000 + https://virtualzone.de/posts/unifi-usg-multiple-ip-addresses-on-pppoe/ + My DSL provider TAL.de offers to assign a static and a dynamic IP address on PPPoE dial in. The dynamic IP address is the primary one, used for accessing the internet. Packets to the static IP address are routed to the router as well. Here’s how to set up things up on a Unifi Security Gateway (USG). +By default, USG only allows for one IP address when dialing in via PPPoE. + + + UptimeRobot: A nice free website monitoring service + https://virtualzone.de/posts/uptime-robot-website-monitoring/ + Mon, 05 Sep 2016 11:30:03 +0000 + https://virtualzone.de/posts/uptime-robot-website-monitoring/ + Over the weekend I’ve been looking around for a free service which monitors my websites. My requirement was that I want to be able to monitor both HTTP and HTTPS sites, I need support for authentication and the monitoring service should be able to check if a specific keyword exists within the watched site (instead of just assuming that a HTTP Status Code 200 is okay). Furthermore, I needed notifications in case of downtimes (Email and Pushbullet is fine for me). + + + How to reduce PDF file size in Linux - Part 2 + https://virtualzone.de/posts/reduce-pdf-file-size-2/ + Sat, 15 Aug 2015 11:30:03 +0000 + https://virtualzone.de/posts/reduce-pdf-file-size-2/ + Several months ago, I wrote a blog post about reducing a PDF file’s size. Since then, I’ve used that technique many times. However, you may want to control the DPI (dots per inch) even more specific. Here’s how to do it: +gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dCompatibilityLevel=1.7 \ -dDownsampleColorImages=true \ -dDownsampleGrayImages=true \ -dDownsampleMonoImages=true \ -dColorImageResolution=120 \ -dGrayImageResolution=120 \ -dMonoImageResolution=120 \ -sOutputFile=output.pdf input.pdf Hint: This also works on MacOS. Just install GhostScript using Homebrew: + + + How to reduce PDF file size in Linux + https://virtualzone.de/posts/reduce-pdf-file-size/ + Wed, 21 Nov 2012 11:30:03 +0000 + https://virtualzone.de/posts/reduce-pdf-file-size/ + Using a single line of GhostScript command on my Ubuntu’s terminal, I was able to reduce the size of a PDF file from 6 MB to approximately 1 MB: +gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dCompatibilityLevel=1.4 -dPDFSETTINGS=/screen -sOutputFile=output.pdf input.pdf You can also use the following parameters for -dPDFSETTINGS instead of /screen: +/screen – Lowest quality, lowest size /ebook – Moderate quality /printer – Good quality /prepress – Best quality, highest size Update: Read Part 2 of this blog post for more detailled file size reduction settings. + + + diff --git a/tags/tool/page/1/index.html b/tags/tool/page/1/index.html new file mode 100644 index 0000000..2fc44ce --- /dev/null +++ b/tags/tool/page/1/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/tags/tool/ + \ No newline at end of file diff --git a/tags/wordpress/index.html b/tags/wordpress/index.html new file mode 100644 index 0000000..2abe2dd --- /dev/null +++ b/tags/wordpress/index.html @@ -0,0 +1,8 @@ +Wordpress | Virtualzone Blog +

How to set up HTTPS/SSL in WordPress behind Proxy (nginx, HAProxy, Apache, lighttpd)

Today I changed the accessibility of my blog from HTTP (unencrypted) to HTTPS/SSL. My blog is running WordPress behind an nginx proxy server. However, while the pages themselves loaded successfully from HTTPS, the embedded static resources like JavaScripts, Images, CSS files etc. did not. Here’s how I fixed it. +The cause of this issue is that WordPress doesn’t seem to detect the original protocol scheme (HTTPS) correctly when running behind a proxy....

August 27, 2016 · 2 min · 255 words · Heiner
+ \ No newline at end of file diff --git a/tags/wordpress/index.xml b/tags/wordpress/index.xml new file mode 100644 index 0000000..72df9bf --- /dev/null +++ b/tags/wordpress/index.xml @@ -0,0 +1,21 @@ + + + + Wordpress on Virtualzone Blog + https://virtualzone.de/tags/wordpress/ + Recent content in Wordpress on Virtualzone Blog + Hugo -- gohugo.io + en-us + &copy; 2024 Heiner Beck. + Sat, 27 Aug 2016 11:30:03 +0000 + + + How to set up HTTPS/SSL in WordPress behind Proxy (nginx, HAProxy, Apache, lighttpd) + https://virtualzone.de/posts/https-ssl-in-wordpress-behind-proxy/ + Sat, 27 Aug 2016 11:30:03 +0000 + https://virtualzone.de/posts/https-ssl-in-wordpress-behind-proxy/ + Today I changed the accessibility of my blog from HTTP (unencrypted) to HTTPS/SSL. My blog is running WordPress behind an nginx proxy server. However, while the pages themselves loaded successfully from HTTPS, the embedded static resources like JavaScripts, Images, CSS files etc. did not. Here’s how I fixed it. +The cause of this issue is that WordPress doesn’t seem to detect the original protocol scheme (HTTPS) correctly when running behind a proxy. + + + diff --git a/tags/wordpress/page/1/index.html b/tags/wordpress/page/1/index.html new file mode 100644 index 0000000..3a87be4 --- /dev/null +++ b/tags/wordpress/page/1/index.html @@ -0,0 +1,2 @@ +https://virtualzone.de/tags/wordpress/ + \ No newline at end of file diff --git a/themes/PaperMod b/themes/PaperMod deleted file mode 160000 index dad94ab..0000000 --- a/themes/PaperMod +++ /dev/null @@ -1 +0,0 @@ -Subproject commit dad94ab4b7c55eea0b63f7b81419d027fe9a8d81