mirror of
https://github.com/davegallant/davegallant.github.io.git
synced 2025-08-06 08:43:40 +00:00
252 lines
87 KiB
JavaScript
252 lines
87 KiB
JavaScript
(()=>{var ie=Object.create;var te=Object.defineProperty;var se=Object.getOwnPropertyDescriptor;var ae=Object.getOwnPropertyNames;var re=Object.getPrototypeOf,ue=Object.prototype.hasOwnProperty;var le=(e,n)=>()=>(n||e((n={exports:{}}).exports,n),n.exports);var he=(e,n,o,i)=>{if(n&&typeof n=="object"||typeof n=="function")for(let s of ae(n))!ue.call(e,s)&&s!==o&&te(e,s,{get:()=>n[s],enumerable:!(i=se(n,s))||i.enumerable});return e};var ce=(e,n,o)=>(o=e!=null?ie(re(e)):{},he(n||!e||!e.__esModule?te(o,"default",{value:e,enumerable:!0}):o,e));var ne=le((exports,module)=>{(function _f(self){"use strict";try{module&&(self=module)}catch(e){}self._factory=_f;var t;function u(e){return typeof e!="undefined"?e:!0}function aa(e){let n=Array(e);for(let o=0;o<e;o++)n[o]=v();return n}function v(){return Object.create(null)}function ba(e,n){return n.length-e.length}function x(e){return typeof e=="string"}function C(e){return typeof e=="object"}function D(e){return typeof e=="function"}function ca(e,n){var o=da;if(e&&(n&&(e=E(e,n)),this.H&&(e=E(e,this.H)),this.J&&1<e.length&&(e=E(e,this.J)),o||o==="")){if(e=e.split(o),this.filter){n=this.filter,o=e.length;let i=[];for(let s=0,r=0;s<o;s++){let l=e[s];l&&!n[l]&&(i[r++]=l)}e=i}return e}return e}let da=/[\p{Z}\p{S}\p{P}\p{C}]+/u,ea=/[\u0300-\u036f]/g;function fa(e,n){let o=Object.keys(e),i=o.length,s=[],r="",l=0;for(let h=0,p,m;h<i;h++)p=o[h],(m=e[p])?(s[l++]=F(n?"(?!\\b)"+p+"(\\b|_)":p),s[l++]=m):r+=(r?"|":"")+p;return r&&(s[l++]=F(n?"(?!\\b)("+r+")(\\b|_)":"("+r+")"),s[l]=""),s}function E(e,n){for(let o=0,i=n.length;o<i&&(e=e.replace(n[o],n[o+1]),e);o+=2);return e}function F(e){return new RegExp(e,"g")}function ha(e){let n="",o="";for(let i=0,s=e.length,r;i<s;i++)(r=e[i])!==o&&(n+=o=r);return n}var ja={encode:ia,F:!1,G:""};function ia(e){return ca.call(this,(""+e).toLowerCase(),!1)}let ka={},G={};function la(e){I(e,"add"),I(e,"append"),I(e,"search"),I(e,"update"),I(e,"remove")}function I(e,n){e[n+"Async"]=function(){let o=this,i=arguments;var s=i[i.length-1];let r;return D(s)&&(r=s,delete i[i.length-1]),s=new Promise(function(l){setTimeout(function(){o.async=!0;let h=o[n].apply(o,i);o.async=!1,l(h)})}),r?(s.then(r),this):s}}function ma(e,n,o,i){let s=e.length,r=[],l,h,p=0;i&&(i=[]);for(let m=s-1;0<=m;m--){let f=e[m],A=f.length,w=v(),k=!l;for(let g=0;g<A;g++){let y=f[g],q=y.length;if(q)for(let B=0,R,_;B<q;B++)if(_=y[B],l){if(l[_]){if(!m){if(o)o--;else if(r[p++]=_,p===n)return r}(m||i)&&(w[_]=1),k=!0}if(i&&(R=(h[_]||0)+1,h[_]=R,R<s)){let $=i[R-2]||(i[R-2]=[]);$[$.length]=_}}else w[_]=1}if(i)l||(h=w);else if(!k)return[];l=w}if(i)for(let m=i.length-1,f,A;0<=m;m--){f=i[m],A=f.length;for(let w=0,k;w<A;w++)if(k=f[w],!l[k]){if(o)o--;else if(r[p++]=k,p===n)return r;l[k]=1}}return r}function na(e,n){let o=v(),i=v(),s=[];for(let r=0;r<e.length;r++)o[e[r]]=1;for(let r=0,l;r<n.length;r++){l=n[r];for(let h=0,p;h<l.length;h++)p=l[h],o[p]&&!i[p]&&(i[p]=1,s[s.length]=p)}return s}function J(e){this.l=e!==!0&&e,this.cache=v(),this.h=[]}function oa(e,n,o){C(e)&&(e=e.query);let i=this.cache.get(e);return i||(i=this.search(e,n,o),this.cache.set(e,i)),i}J.prototype.set=function(e,n){if(!this.cache[e]){var o=this.h.length;for(o===this.l?delete this.cache[this.h[o-1]]:o++,--o;0<o;o--)this.h[o]=this.h[o-1];this.h[0]=e}this.cache[e]=n},J.prototype.get=function(e){let n=this.cache[e];if(this.l&&n&&(e=this.h.indexOf(e))){let o=this.h[e-1];this.h[e-1]=this.h[e],this.h[e]=o}return n};let qa={memory:{charset:"latin:extra",D:3,B:4,m:!1},performance:{D:3,B:3,s:!1,context:{depth:2,D:1}},match:{charset:"latin:extra",G:"reverse"},score:{charset:"latin:advanced",D:20,B:3,context:{depth:3,D:9}},default:{}};function ra(e,n,o,i,s,r,l){setTimeout(function(){let h=e(o?o+"."+i:i,JSON.stringify(l));h&&h.then?h.then(function(){n.export(e,n,o,s,r+1)}):n.export(e,n,o,s,r+1)})}function K(e,n){if(!(this instanceof K))return new K(e);var o;if(e){x(e)?e=qa[e]:(o=e.preset)&&(e=Object.assign({},o[o],e)),o=e.charset;var i=e.lang;x(o)&&(o.indexOf(":")===-1&&(o+=":default"),o=G[o]),x(i)&&(i=ka[i])}else e={};let s,r,l=e.context||{};if(this.encode=e.encode||o&&o.encode||ia,this.register=n||v(),this.D=s=e.resolution||9,this.G=n=o&&o.G||e.tokenize||"strict",this.depth=n==="strict"&&l.depth,this.l=u(l.bidirectional),this.s=r=u(e.optimize),this.m=u(e.fastupdate),this.B=e.minlength||1,this.C=e.boost,this.map=r?aa(s):v(),this.A=s=l.resolution||1,this.h=r?aa(s):v(),this.F=o&&o.F||e.rtl,this.H=(n=e.matcher||i&&i.H)&&fa(n,!1),this.J=(n=e.stemmer||i&&i.J)&&fa(n,!0),o=n=e.filter||i&&i.filter){o=n,i=v();for(let h=0,p=o.length;h<p;h++)i[o[h]]=1;o=i}this.filter=o,this.cache=(n=e.cache)&&new J(n)}t=K.prototype,t.append=function(e,n){return this.add(e,n,!0)},t.add=function(e,n,o,i){if(n&&(e||e===0)){if(!i&&!o&&this.register[e])return this.update(e,n);if(n=this.encode(n),i=n.length){let m=v(),f=v(),A=this.depth,w=this.D;for(let k=0;k<i;k++){let g=n[this.F?i-1-k:k];var s=g.length;if(g&&s>=this.B&&(A||!f[g])){var r=L(w,i,k),l="";switch(this.G){case"full":if(2<s){for(r=0;r<s;r++)for(var h=s;h>r;h--)if(h-r>=this.B){var p=L(w,i,k,s,r);l=g.substring(r,h),M(this,f,l,p,e,o)}break}case"reverse":if(1<s){for(h=s-1;0<h;h--)l=g[h]+l,l.length>=this.B&&M(this,f,l,L(w,i,k,s,h),e,o);l=""}case"forward":if(1<s){for(h=0;h<s;h++)l+=g[h],l.length>=this.B&&M(this,f,l,r,e,o);break}default:if(this.C&&(r=Math.min(r/this.C(n,g,k)|0,w-1)),M(this,f,g,r,e,o),A&&1<i&&k<i-1){for(s=v(),l=this.A,r=g,h=Math.min(A+1,i-k),s[r]=1,p=1;p<h;p++)if((g=n[this.F?i-1-k-p:k+p])&&g.length>=this.B&&!s[g]){s[g]=1;let y=this.l&&g>r;M(this,m,y?r:g,L(l+(i/2>l?0:1),i,k,h-1,p-1),e,o,y?g:r)}}}}}this.m||(this.register[e]=1)}}return this};function L(e,n,o,i,s){return o&&1<e?n+(i||0)<=e?o+(s||0):(e-1)/(n+(i||0))*(o+(s||0))+1|0:0}function M(e,n,o,i,s,r,l){let h=l?e.h:e.map;(!n[o]||l&&!n[o][l])&&(e.s&&(h=h[i]),l?(n=n[o]||(n[o]=v()),n[l]=1,h=h[l]||(h[l]=v())):n[o]=1,h=h[o]||(h[o]=[]),e.s||(h=h[i]||(h[i]=[])),r&&h.includes(s)||(h[h.length]=s,e.m&&(e=e.register[s]||(e.register[s]=[]),e[e.length]=h)))}t.search=function(e,n,o){o||(!n&&C(e)?(o=e,e=o.query):C(n)&&(o=n));let i=[],s,r,l=0;if(o){e=o.query||e,n=o.limit,l=o.offset||0;var h=o.context;r=o.suggest}if(e&&(e=this.encode(""+e),s=e.length,1<s)){o=v();var p=[];for(let f=0,A=0,w;f<s;f++)if((w=e[f])&&w.length>=this.B&&!o[w])if(this.s||r||this.map[w])p[A++]=w,o[w]=1;else return i;e=p,s=e.length}if(!s)return i;n||(n=100),h=this.depth&&1<s&&h!==!1,o=0;let m;h?(m=e[0],o=1):1<s&&e.sort(ba);for(let f,A;o<s;o++){if(A=e[o],h?(f=sa(this,i,r,n,l,s===2,A,m),r&&f===!1&&i.length||(m=A)):f=sa(this,i,r,n,l,s===1,A),f)return f;if(r&&o===s-1){if(p=i.length,!p){if(h){h=0,o=-1;continue}return i}if(p===1)return ta(i[0],n,l)}}return ma(i,n,l,r)};function sa(e,n,o,i,s,r,l,h){let p=[],m=h?e.h:e.map;if(e.s||(m=ua(m,l,h,e.l)),m){let f=0,A=Math.min(m.length,h?e.A:e.D);for(let w=0,k=0,g,y;w<A&&!((g=m[w])&&(e.s&&(g=ua(g,l,h,e.l)),s&&g&&r&&(y=g.length,y<=s?(s-=y,g=null):(g=g.slice(s),s=0)),g&&(p[f++]=g,r&&(k+=g.length,k>=i))));w++);if(f){if(r)return ta(p,i,0);n[n.length]=p;return}}return!o&&p}function ta(e,n,o){return e=e.length===1?e[0]:[].concat.apply([],e),o||e.length>n?e.slice(o,o+n):e}function ua(e,n,o,i){return o?(i=i&&n>o,e=(e=e[i?n:o])&&e[i?o:n]):e=e[n],e}t.contain=function(e){return!!this.register[e]},t.update=function(e,n){return this.remove(e).add(e,n)},t.remove=function(e,n){let o=this.register[e];if(o){if(this.m)for(let i=0,s;i<o.length;i++)s=o[i],s.splice(s.indexOf(e),1);else N(this.map,e,this.D,this.s),this.depth&&N(this.h,e,this.A,this.s);if(n||delete this.register[e],this.cache){n=this.cache;for(let i=0,s,r;i<n.h.length;i++)r=n.h[i],s=n.cache[r],s.includes(e)&&(n.h.splice(i--,1),delete n.cache[r])}}return this};function N(e,n,o,i,s){let r=0;if(e.constructor===Array)if(s)n=e.indexOf(n),n!==-1?1<e.length&&(e.splice(n,1),r++):r++;else{s=Math.min(e.length,o);for(let l=0,h;l<s;l++)(h=e[l])&&(r=N(h,n,o,i,s),i||r||delete e[l])}else for(let l in e)(r=N(e[l],n,o,i,s))||delete e[l];return r}t.searchCache=oa,t.export=function(e,n,o,i,s){let r,l;switch(s||(s=0)){case 0:if(r="reg",this.m){l=v();for(let h in this.register)l[h]=1}else l=this.register;break;case 1:r="cfg",l={doc:0,opt:this.s?1:0};break;case 2:r="map",l=this.map;break;case 3:r="ctx",l=this.h;break;default:return}return ra(e,n||this,o,r,i,s,l),!0},t.import=function(e,n){if(n)switch(x(n)&&(n=JSON.parse(n)),e){case"cfg":this.s=!!n.opt;break;case"reg":this.m=!1,this.register=n;break;case"map":this.map=n;break;case"ctx":this.h=n}},la(K.prototype);function va(e){e=e.data;var n=self._index;let o=e.args;var i=e.task;switch(i){case"init":i=e.options||{},e=e.factory,n=i.encode,i.cache=!1,n&&n.indexOf("function")===0&&(i.encode=Function("return "+n)()),e?(Function("return "+e)()(self),self._index=new self.FlexSearch.Index(i),delete self.FlexSearch):self._index=new K(i);break;default:e=e.id,n=n[i].apply(n,o),postMessage(i==="search"?{id:e,msg:n}:{id:e})}}let wa=0;function O(e){if(!(this instanceof O))return new O(e);var n;e?D(n=e.encode)&&(e.encode=n.toString()):e={},(n=(self||window)._factory)&&(n=n.toString());let o=typeof window=="undefined"&&self.exports,i=this;this.o=xa(n,o,e.worker),this.h=v(),this.o&&(o?this.o.on("message",function(s){i.h[s.id](s.msg),delete i.h[s.id]}):this.o.onmessage=function(s){s=s.data,i.h[s.id](s.msg),delete i.h[s.id]},this.o.postMessage({task:"init",factory:n,options:e}))}P("add"),P("append"),P("search"),P("update"),P("remove");function P(e){O.prototype[e]=O.prototype[e+"Async"]=function(){let n=this,o=[].slice.call(arguments);var i=o[o.length-1];let s;return D(i)&&(s=i,o.splice(o.length-1,1)),i=new Promise(function(r){setTimeout(function(){n.h[++wa]=r,n.o.postMessage({task:e,id:wa,args:o})})}),s?(i.then(s),this):i}}function xa(a,b,c){let d;try{d=b?eval('new (require("worker_threads")["Worker"])("../dist/node/node.js")'):a?new Worker(URL.createObjectURL(new Blob(["onmessage="+va.toString()],{type:"text/javascript"}))):new Worker(x(c)?c:"worker/worker.js",{type:"module"})}catch(e){}return d}function Q(e){if(!(this instanceof Q))return new Q(e);var n=e.document||e.doc||e,o;this.K=[],this.h=[],this.A=[],this.register=v(),this.key=(o=n.key||n.id)&&S(o,this.A)||"id",this.m=u(e.fastupdate),this.C=(o=n.store)&&o!==!0&&[],this.store=o&&v(),this.I=(o=n.tag)&&S(o,this.A),this.l=o&&v(),this.cache=(o=e.cache)&&new J(o),e.cache=!1,this.o=e.worker,this.async=!1,o=v();let i=n.index||n.field||n;x(i)&&(i=[i]);for(let s=0,r,l;s<i.length;s++)r=i[s],x(r)||(l=r,r=r.field),l=C(l)?Object.assign({},e,l):e,this.o&&(o[r]=new O(l),o[r].o||(this.o=!1)),this.o||(o[r]=new K(l,this.register)),this.K[s]=S(r,this.A),this.h[s]=r;if(this.C)for(e=n.store,x(e)&&(e=[e]),n=0;n<e.length;n++)this.C[n]=S(e[n],this.A);this.index=o}function S(e,n){let o=e.split(":"),i=0;for(let s=0;s<o.length;s++)e=o[s],0<=e.indexOf("[]")&&(e=e.substring(0,e.length-2))&&(n[i]=!0),e&&(o[i++]=e);return i<o.length&&(o.length=i),1<i?o:o[0]}function T(e,n){if(x(n))e=e[n];else for(let o=0;e&&o<n.length;o++)e=e[n[o]];return e}function U(e,n,o,i,s){if(e=e[s],i===o.length-1)n[s]=e;else if(e)if(e.constructor===Array)for(n=n[s]=Array(e.length),s=0;s<e.length;s++)U(e,n,o,i,s);else n=n[s]||(n[s]=v()),s=o[++i],U(e,n,o,i,s)}function V(e,n,o,i,s,r,l,h){if(e=e[l])if(i===n.length-1){if(e.constructor===Array){if(o[i]){for(n=0;n<e.length;n++)s.add(r,e[n],!0,!0);return}e=e.join(" ")}s.add(r,e,h,!0)}else if(e.constructor===Array)for(l=0;l<e.length;l++)V(e,n,o,i,s,r,l,h);else l=n[++i],V(e,n,o,i,s,r,l,h)}t=Q.prototype,t.add=function(e,n,o){if(C(e)&&(n=e,e=T(n,this.key)),n&&(e||e===0)){if(!o&&this.register[e])return this.update(e,n);for(let i=0,s,r;i<this.h.length;i++)r=this.h[i],s=this.K[i],x(s)&&(s=[s]),V(n,s,this.A,0,this.index[r],e,s[0],o);if(this.I){let i=T(n,this.I),s=v();x(i)&&(i=[i]);for(let r=0,l,h;r<i.length;r++)if(l=i[r],!s[l]&&(s[l]=1,h=this.l[l]||(this.l[l]=[]),!o||!h.includes(e))&&(h[h.length]=e,this.m)){let p=this.register[e]||(this.register[e]=[]);p[p.length]=h}}if(this.store&&(!o||!this.store[e])){let i;if(this.C){i=v();for(let s=0,r;s<this.C.length;s++)r=this.C[s],x(r)?i[r]=n[r]:U(n,i,r,0,r[0])}this.store[e]=i||n}}return this},t.append=function(e,n){return this.add(e,n,!0)},t.update=function(e,n){return this.remove(e).add(e,n)},t.remove=function(e){if(C(e)&&(e=T(e,this.key)),this.register[e]){for(var n=0;n<this.h.length&&(this.index[this.h[n]].remove(e,!this.o),!this.m);n++);if(this.I&&!this.m)for(let o in this.l){n=this.l[o];let i=n.indexOf(e);i!==-1&&(1<n.length?n.splice(i,1):delete this.l[o])}this.store&&delete this.store[e],delete this.register[e]}return this},t.search=function(e,n,o,i){o||(!n&&C(e)?(o=e,e=""):C(n)&&(o=n,n=0));let s=[],r=[],l,h,p,m,f,A,w=0;if(o)if(o.constructor===Array)p=o,o=null;else{if(e=o.query||e,p=(l=o.pluck)||o.index||o.field,m=o.tag,h=this.store&&o.enrich,f=o.bool==="and",n=o.limit||n||100,A=o.offset||0,m&&(x(m)&&(m=[m]),!e)){for(let g=0,y;g<m.length;g++)(y=ya.call(this,m[g],n,A,h))&&(s[s.length]=y,w++);return w?s:[]}x(p)&&(p=[p])}p||(p=this.h),f=f&&(1<p.length||m&&1<m.length);let k=!i&&(this.o||this.async)&&[];for(let g=0,y,q,B;g<p.length;g++){let R;if(q=p[g],x(q)||(R=q,q=R.field,e=R.query||e,n=R.limit||n),k)k[g]=this.index[q].searchAsync(e,n,R||o);else{if(i?y=i[g]:y=this.index[q].search(e,n,R||o),B=y&&y.length,m&&B){let _=[],$=0;f&&(_[0]=[y]);for(let X=0,ee,H;X<m.length;X++)ee=m[X],(B=(H=this.l[ee])&&H.length)&&($++,_[_.length]=f?[H]:H);$&&(y=f?ma(_,n||100,A||0):na(y,_),B=y.length)}if(B)r[w]=q,s[w++]=y;else if(f)return[]}}if(k){let g=this;return new Promise(function(y){Promise.all(k).then(function(q){y(g.search(e,n,o,q))})})}if(!w)return[];if(l&&(!h||!this.store))return s[0];for(let g=0,y;g<r.length;g++){if(y=s[g],y.length&&h&&(y=za.call(this,y)),l)return y;s[g]={field:r[g],result:y}}return s};function ya(e,n,o,i){let s=this.l[e],r=s&&s.length-o;if(r&&0<r)return(r>n||o)&&(s=s.slice(o,o+n)),i&&(s=za.call(this,s)),{tag:e,result:s}}function za(e){let n=Array(e.length);for(let o=0,i;o<e.length;o++)i=e[o],n[o]={id:i,doc:this.store[i]};return n}t.contain=function(e){return!!this.register[e]},t.get=function(e){return this.store[e]},t.set=function(e,n){return this.store[e]=n,this},t.searchCache=oa,t.export=function(e,n,o,i,s){if(s||(s=0),i||(i=0),i<this.h.length){let r=this.h[i],l=this.index[r];n=this,setTimeout(function(){l.export(e,n,s?r:"",i,s++)||(i++,s=1,n.export(e,n,r,i,s))})}else{let r,l;switch(s){case 1:r="tag",l=this.l;break;case 2:r="store",l=this.store;break;default:return}ra(e,this,o,r,i,s,l)}},t.import=function(e,n){if(n)switch(x(n)&&(n=JSON.parse(n)),e){case"tag":this.l=n;break;case"reg":this.m=!1,this.register=n;for(let i=0,s;i<this.h.length;i++)s=this.index[this.h[i]],s.register=n,s.m=!1;break;case"store":this.store=n;break;default:e=e.split(".");let o=e[0];e=e[1],o&&e&&this.index[o].import(e,n)}},la(Q.prototype);var Ba={encode:Aa,F:!1,G:""};let Ca=[F("[\xE0\xE1\xE2\xE3\xE4\xE5]"),"a",F("[\xE8\xE9\xEA\xEB]"),"e",F("[\xEC\xED\xEE\xEF]"),"i",F("[\xF2\xF3\xF4\xF5\xF6\u0151]"),"o",F("[\xF9\xFA\xFB\xFC\u0171]"),"u",F("[\xFD\u0177\xFF]"),"y",F("\xF1"),"n",F("[\xE7c]"),"k",F("\xDF"),"s",F(" & ")," and "];function Aa(e){var n=e=""+e;return n.normalize&&(n=n.normalize("NFD").replace(ea,"")),ca.call(this,n.toLowerCase(),!e.normalize&&Ca)}var Ea={encode:Da,F:!1,G:"strict"};let Fa=/[^a-z0-9]+/,Ga={b:"p",v:"f",w:"f",z:"s",x:"s",\u00DF:"s",d:"t",n:"m",c:"k",g:"k",j:"k",q:"k",i:"e",y:"e",u:"o"};function Da(e){e=Aa.call(this,e).join(" ");let n=[];if(e){let o=e.split(Fa),i=o.length;for(let s=0,r,l=0;s<i;s++)if((e=o[s])&&(!this.filter||!this.filter[e])){r=e[0];let h=Ga[r]||r,p=h;for(let m=1;m<e.length;m++){r=e[m];let f=Ga[r]||r;f&&f!==p&&(h+=f,p=f)}n[l++]=h}}return n}var Ia={encode:Ha,F:!1,G:""};let Ja=[F("ae"),"a",F("oe"),"o",F("sh"),"s",F("th"),"t",F("ph"),"f",F("pf"),"f",F("(?![aeo])h(?![aeo])"),"",F("(?!^[aeo])h(?!^[aeo])"),""];function Ha(e,n){return e&&(e=Da.call(this,e).join(" "),2<e.length&&(e=E(e,Ja)),n||(1<e.length&&(e=ha(e)),e&&(e=e.split(" ")))),e||[]}var La={encode:Ka,F:!1,G:""};let Ma=F("(?!\\b)[aeo]");function Ka(e){return e&&(e=Ha.call(this,e,!0),1<e.length&&(e=e.replace(Ma,"")),1<e.length&&(e=ha(e)),e&&(e=e.split(" "))),e||[]}G["latin:default"]=ja,G["latin:simple"]=Ba,G["latin:balance"]=Ea,G["latin:advanced"]=Ia,G["latin:extra"]=La;let W=self,Y,Z={Index:K,Document:Q,Worker:O,registerCharset:function(e,n){G[e]=n},registerLanguage:function(e,n){ka[e]=n}};(Y=W.define)&&Y.amd?Y([],function(){return Z}):W.exports?W.exports=Z:W.FlexSearch=Z})(exports)});var oe=ce(ne());var j=document.getElementById("search__text"),z=document.getElementById("search__suggestions");j!==null&&document.addEventListener("keydown",e=>{e.ctrlKey&&e.key==="/"?(e.preventDefault(),j.focus()):e.key==="Escape"&&(j.blur(),z.classList.add("search__suggestions--hidden"))});document.addEventListener("click",e=>{z.contains(e.target)||z.classList.add("search__suggestions--hidden")});document.addEventListener("keydown",e=>{if(z.classList.contains("search__suggestions--hidden"))return;let o=[...z.querySelectorAll("a")];if(o.length===0)return;let i=o.indexOf(document.activeElement);if(e.key==="ArrowDown"){e.preventDefault();let s=i+1<o.length?i+1:i;o[s].focus()}else e.key==="ArrowUp"&&(e.preventDefault(),nextIndex=i>0?i-1:0,o[nextIndex].focus())});(function(){let e=new oe.Document({tokenize:"forward",cache:100,document:{id:"id",store:["href","title","description"],index:["title","description","content"]}});e.add({id:0,href:"/blog/using-a-realtek-nic-with-opnsense/",title:"Using a Realtek NIC with OPNsense",description:`For the past few years, I’ve been running pfSense (and more recently OPNsense) in a virtual machine within Proxmox. This has been running fine with a single onboard Intel NIC. A few months ago, I upgraded to a machine that has a CPU that supports hardware-accelerated transcoding, has more SATA ports, and has more PCI slots for future expansion. With the goal of having a dedicated NIC for WAN, I bought an inexpensive 1Gbps PCIe NIC (TG-3468) despite reading about some of the concerns around Realtek NICs (sluggish performance, driver instability, and in some cases system crashes).
|
|
I’ve been running a Realtek NICs reliably on Linux and Windows desktops, so I figured I could make it work without too much effort, but it turns out Realtek NICs really can be problematic when it comes to FreeBSD-based routers, and commonly documented workarounds did not solve my problems.
|
|
`,content:`For the past few years, I’ve been running pfSense (and more recently OPNsense) in a virtual machine within Proxmox. This has been running fine with a single onboard Intel NIC. A few months ago, I upgraded to a machine that has a CPU that supports hardware-accelerated transcoding, has more SATA ports, and has more PCI slots for future expansion. With the goal of having a dedicated NIC for WAN, I bought an inexpensive 1Gbps PCIe NIC (TG-3468) despite reading about some of the concerns around Realtek NICs (sluggish performance, driver instability, and in some cases system crashes).
|
|
I’ve been running a Realtek NICs reliably on Linux and Windows desktops, so I figured I could make it work without too much effort, but it turns out Realtek NICs really can be problematic when it comes to FreeBSD-based routers, and commonly documented workarounds did not solve my problems.
|
|
Environment# My environment consists of:
|
|
Proxmox 8.4 OPNsense 25.1 (QEMU VM) Ethernet controller: Intel Corporation Ethernet Connection (5) I219-LM Ethernet controller: Realtek Semiconductor Co., Ltd. RTL8111/8168/8411 PCI Express Gigabit Ethernet Controller (rev 15) Goal# The goal is to upgrade the OPNsense router from a single NIC to two NICs. The NICs are responsible for:
|
|
LAN: the internal network for computers, phones, cameras, printers, etc (NIC 1) WAN: the connection from the ISP (NIC 2) Having two separate physical interfaces for LAN and WAN creates clear, physical separation between the trusted internal network and the untrusted external network at the hardware level. This also should improve performance and throughput since the same physical connection is no longer shared between LAN and WAN.
|
|
Device Passthrough# For maximum performance and reduced hypervisor overhead, passing through a physical NIC for WAN directly to the VM seemed to make the most sense, so I passed it through to the OPNsense VM.
|
|
I added the PCI device and restarted the OPNsense VM and re-configured the WAN in OPNsense to use this device.
|
|
I received the WAN IP and everything appeared to be working. I ran a few speed tests and noticed that the download speeds were much lower than normal from all of my devices. I checked my instance of speedtest-tracker noticed that the download speeds were significantly slower than historical records:
|
|
These speeds tests were going through Mullvad, which occasionally is inconsistent, but the results remained consistently lower than the previous configuration.
|
|
I reverted the WAN back to the original NIC, and the download speeds returned to more average results immediately so it became obvious that something was not right with this setup.
|
|
Realtek drivers# I did some web searching / LLM prompting and discovered that some people have had improved results after installing the OPNsense plugin os-realtek-re.
|
|
After installing the plugin and ensuring the kernel module was loaded at boot by following the post-install instructions, the throughput was still signicantly slower than before adding a second NIC.
|
|
I was starting to think that there might be a problem with the hardware and began the process to return it to the vendor.
|
|
Virtualized NIC with a Linux bridge# As one last shot, I created Linux Bridge in the Proxmox GUI with the Realtek NIC and passed it through to the OPNsense VM:
|
|
I re-configured the WAN interface in OPNsense to use the newly added network device, and the download and upload speeds returned to the typical speeds. Another added benefit to this setup is that it bypasses the need for installing Realtek FreeBSD drivers on the OPNsense VM, since the network device is virtual and managed on the Proxmox host (debian-based).
|
|
Conclusion# Although I am not sure why passing through a Realtek NIC to an OPNsense VM causes so much degradation in throughput, I am glad that there is a workaround. If I get ahold of another NIC, I would be interested in trying to reproduce the issue.
|
|
`}).add({id:1,href:"/blog/replicating-truenas-datasets-to-sftpgo-over-tailscale/",title:"Replicating TrueNAS datasets to sftpgo over Tailscale",description:`I’ve recently spun up an instance of TrueNAS SCALE after salvaging a couple hard drives from a past computer build and decided I could use additional network storage for various backups such as Proxmox VMs and home directory backups.
|
|
`,content:`I’ve recently spun up an instance of TrueNAS SCALE after salvaging a couple hard drives from a past computer build and decided I could use additional network storage for various backups such as Proxmox VMs and home directory backups.
|
|
The only app I’ve needed to install has been Tailscale which has enabled me to access the TrueNAS Web UI from anywhere. I’ve setup a few datasets and NFS shares to store various backups and the rest of the periodic backups have routinely been working without a hitch. Since my homelab is becoming more of a vital piece of infrastructure for my daily needs, I wanted to ensure that these datasets had Cloud Sync Tasks setup for offsite backups. These encrypted backups are mostly being stored in places such as Google Drive and other blob storage providers.
|
|
More recently, to reduce cloud costs, I’ve setup some a small node at another physical location and installed both Tailscale and sftpgo on it to facilitate offsite backups. After setting up the infrastructure and adding a Cloud Sync Task in TrueNAS SCALE to replicate these backups offsite to sftpgo, I noticed that Tailscale’s Magic DNS was not working, nor was the Tailscale IPv4 address.
|
|
After reading the Tailscale docs , it became clear that the Userspace box had to be unchecked in the Tailscale app settings. This is because the Tailscale app is running within a docker container on the TrueNAS SCALE VM. After unchecking the Userspace box, I was able to verify that the Backup Credentials created for sftpgo worked when specifying the host as a Tailscale IPv4 address. This was probably good enough since the IP won’t change unless the node is re-registered.
|
|
To get MagicDNS working, I went to Network > Global Configuration and set “Nameserver 1” to 100.100.100.100. After this, I was able to specify the FQDN in the Backup Credentials and the Cloud Sync Tasks started.
|
|
This method of adding MagicDNS can lead to issues with DNS when updating the tailscale application in TrueNAS, so I ended using the Tailscale IP directly.
|
|
`}).add({id:2,href:"/blog/opting-out-of-haveibeenpwned/",title:"Opting out of haveibeenpwned",description:`Data breaches are a concern for anyone trying to live a life of relative privacy. Last month, PowerSchool informed its customers that hackers stole data of 62 million students. This may not have impacted you, but unless you have been practicing Extreme Privacy techniques for decades, you likely have been impacted by a data breach in the past.
|
|
`,content:`Data breaches are a concern for anyone trying to live a life of relative privacy. Last month, PowerSchool informed its customers that hackers stole data of 62 million students. This may not have impacted you, but unless you have been practicing Extreme Privacy techniques for decades, you likely have been impacted by a data breach in the past.
|
|
Understanding Data Breaches# Data breaches occur when unauthorized individuals gain access to sensitive information (names, addresses, emails, phone numbers among other details). If the breach is substantial enough, the raw data is likely to make it into the hands of data brokers that will collect, aggregate, and sell the information on the dark web.
|
|
Check if you have been impacted# There are a number of services that can be used to check if you have been impacted by a data breach, including Mozilla monitor, Google Dark web report, and haveibeenpwned.com. Some password managers offer features that compare your credentials against known breaches. These services can also be configured to send you notifications when a breach occurs. It is a good idea to become aware of these breaches as soon as you can, so that you can protect yourself from malicious behaviour such as phishing.
|
|
If you have been an email or phone number for any length of time, there is a high probability that some of your data has been exposed. You can easily check by querying haveibeenpwned.com. Many of the tools that offer breach detection, query the haveibeenpwned database. Although I believe this is service is a public good, it also opens the door for anyone who may be looking to gain more information about your present and past usages of various websites and services.
|
|
Opting out# If you have an identity that you’d like to protect, I’d suggest opting out of public searchability. This of course does not undo the data breach that happened, but does it make it more challenging for someone to quickly search for an impacted email address. Even after opting out, you can still subscribe to breach notifications, as long as you can validate that you have access to the email in question.
|
|
There are other websites that offer similar style lookups, but many of them are either paywalled or require account registration.
|
|
Email aliases# A more proactive method of reducing the likelihood of future exposures is to use an email aliasing service such as Firefox Relay, DuckDuckGo Email Protection, or if you use Proton Mail, hide-my-email aliases. This will allow you sign up for services using an alias instead of revealing your email address. The service then forwards all emails to your real address that you configure when setting up the alias.
|
|
`}).add({id:3,href:"/blog/amazon-ebs-csi-driver-terraform/",title:"Amazon EBS CSI driver with terraform",description:`I recently configured the Amazon EBS CSI driver and found the setup with terraform to be more effort than expected. I wanted to avoid third-party modules and keep it as simple as possible, while remaining least privilege.
|
|
UPDATE: This approach can also be used for the aws-efs-csi-driver
|
|
`,content:`I recently configured the Amazon EBS CSI driver and found the setup with terraform to be more effort than expected. I wanted to avoid third-party modules and keep it as simple as possible, while remaining least privilege.
|
|
UPDATE: This approach can also be used for the aws-efs-csi-driver
|
|
The Amazon EBS CSI driver docs mention that the following are needed:
|
|
an existing EKS cluster IAM role (that allows communication to the EC2 API) EKS add-on (aws-ebs-csi-driver) OIDC provider This sounded simple enough but I was unable to find a “grab-and-go” terraform example that followed the recommendations in the docs. I saw some suggestions about attaching an AmazonEBSCSIDriverPolicy policy to the node groups but did not think this was the best idea since this would allow many pods to potentially have access to the EC2 API.
|
|
After a few minutes of LLM prompting, I was unimpressed with the results. I began to piece together the config myself, and after some trial and error, this is the terraform that I came up with:
|
|
# TLS needed for the thumbprint provider "tls" {} data "tls_certificate" "oidc" { url = aws_eks_cluster.main.identity[0].oidc[0].issuer } # EKS addon resource "aws_eks_addon" "ebs_csi_driver" { cluster_name = aws_eks_cluster.main.name addon_name = "aws-ebs-csi-driver" addon_version = "v1.29.1-eksbuild.1" service_account_role_arn = aws_iam_role.ebs_csi_driver.arn } # AWS Identity and Access Management (IAM) OpenID Connect (OIDC) provider resource "aws_iam_openid_connect_provider" "eks" { url = aws_eks_cluster.main.identity.0.oidc.0.issuer client_id_list = ["sts.amazonaws.com"] thumbprint_list = [data.tls_certificate.oidc.certificates[0].sha1_fingerprint] } # IAM resource "aws_iam_role" "ebs_csi_driver" { name = "ebs-csi-driver" assume_role_policy = data.aws_iam_policy_document.ebs_csi_driver_assume_role.json } data "aws_iam_policy_document" "ebs_csi_driver_assume_role" { statement { effect = "Allow" principals { type = "Federated" identifiers = [aws_iam_openid_connect_provider.eks.arn] } actions = [ "sts:AssumeRoleWithWebIdentity", ] condition { test = "StringEquals" variable = "\${aws_iam_openid_connect_provider.eks.url}:aud" values = ["sts.amazonaws.com"] } condition { test = "StringEquals" variable = "\${aws_iam_openid_connect_provider.eks.url}:sub" values = ["system:serviceaccount:kube-system:ebs-csi-controller-sa"] } } } resource "aws_iam_role_policy_attachment" "AmazonEBSCSIDriverPolicy" { policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy" role = aws_iam_role.ebs_csi_driver.name } The above configuration follows the docs, binding an IAM role to the service account kube-system/ebs-csi-controller-sa using an OpenID connect provider.
|
|
After applying the changes above, I deployed the sample application and noticed that the persistent volume claims were bound to EBS volumes.
|
|
`}).add({id:4,href:"/blog/setting-up-gitea-actions-with-tailscale/",title:"Setting up Gitea Actions with Tailscale",description:`In this post I’ll go through the process of setting up Gitea Actions and Tailscale, unlocking a simple and secure way to automate workflows.
|
|
`,content:`In this post I’ll go through the process of setting up Gitea Actions and Tailscale, unlocking a simple and secure way to automate workflows.
|
|
What is Gitea?# Gitea is a lightweight and fast git server that has much of the same look and feel as github. I have been using it in my homelab to mirror repositories hosted on other platforms such as github and gitlab. These mirrors take advantage of the decentralized nature of git by serving as “backups”. One of the main reasons I hadn’t been using it more often was due to the lack of integrated CI/CD. This is no longer the case.
|
|
Gitea Actions# Gitea Actions have made it into the 1.19.0 release. This feature had been in an experimental state up until 1.21.0 and is now enabled by default \u{1F389}.
|
|
So what are they? If you’ve ever used GitHub Actions (and if you’re reading this, I imagine you have), these will look familiar. Gitea Actions essentially enable the ability to run github workflows on gitea. Workflows between gitea and github are not completely interopable, but a lot of the same workflow syntax is already compatible on gitea. You can find a documented list of unsupported workflows syntax.
|
|
Actions work by using a custom fork of nekos/act. Workflows run in a new container for every job. If you specify an action such as actions/checkout@v4, it defaults to downloading the scripts from github.com. To avoid internet egress, you could always clone the required actions to your local gitea instance.
|
|
Actions (gitea’s implementation) has me excited because it makes spinning up a network-isolated environment for workflow automation incredibly simple.
|
|
Integration with Tailscale# 2024-02-10: I had originally written this post to include Tailscale-Traefik Proxy Integration, but have since removed it in favour of Tailscale Serve after learning from this example. This simplifies the setup and reduces the number of moving parts.
|
|
So how does Tailscale help here? Well, more recently I’ve been exposing my self-hosted services using Tailscale Serve. This allows for a nice looking dns name (i.e. gitea.my-tailnet-name.ts.net), automatic tls certificate management, and optionally allowing the address to be publically accessible (by using Funnel).
|
|
Deploying Gitea, Traefik, and Tailscale# In my case, the following is already set up:
|
|
docker-compose is installed tailscale magic dns is enabled My preferred approach to deploying code in a homelab environment is with docker compose. I have deployed this in a LXC on Proxmox. You could run this on a virtual machine or a physical host as well.
|
|
The docker-compose.yaml file looks like:
|
|
version: "3.7" services: gitea: image: gitea/gitea:1.21.1 container_name: gitea network_mode: service:ts-gitea environment: - USER_UID=1000 - USER_GID=1000 - GITEA__server__DOMAIN=gitea.my-tailnet-name.ts.net - GITEA__server__ROOT_URL=https://gitea.my-tailnet-name.ts.net - GITEA__server__HTTP_ADDR=0.0.0.0 - GITEA__server__LFS_JWT_SECRET=my-secret-jwt restart: always volumes: - ./data:/data - /etc/timezone:/etc/timezone:ro - /etc/localtime:/etc/localtime:ro ts-gitea: image: tailscale/tailscale:v1.58 container_name: ts-gitea hostname: gitea environment: - TS_AUTHKEY=<FILL THIS IN> - TS_SERVE_CONFIG=/config/gitea.json - TS_STATE_DIR=/var/lib/tailscale volumes: - \${PWD}/state:/var/lib/tailscale - \${PWD}/config:/config - /dev/net/tun:/dev/net/tun cap_add: - net_admin - sys_module restart: unless-stopped Note that you must specify a TS_AUTHKEY in the ts-gitea service. You can generate an auth key here.
|
|
config/gitea.json:
|
|
{ "TCP": { "443": { "HTTPS": true } }, "Web": { "\${TS_CERT_DOMAIN}:443": { "Handlers": { "/": { "Proxy": "http://127.0.0.1:3000" } } }, }, "AllowFunnel": { "\${TS_CERT_DOMAIN}:443": false } } After adding the above configuration, running docker compose up -d should be enough to get an instance up and running. It will be accessible at https://gitea.my-tailnet-name.ts.net from within the tailnet.
|
|
Something to consider is whether or not you want to use ssh with git. One method to get this to work with containers is to use ssh container passthrough. I decided to keep it simple and not use ssh, since communicating over https is perfectly fine for my use case.
|
|
Theming# I discovered some themes for gitea here.
|
|
I added the theme by copying theme-palenight.css into ./data/gitea/public/assets/css. I then added the following to environment in docker-compose.yml:
|
|
- GITEA__ui__DEFAULT_THEME=palenight - GITEA__ui__THEMES=palenight After restarting the gitea instance, the default theme was applied.
|
|
Connecting runners# I installed the runner by following the docs. I opted for installing it on a separate host as recommended in the docs. I used the systemd unit file to ensure that the runner comes back online after system reboots. I installed tailscale on the gitea runner as well, so that it can be part of the same tailnet as the main instance.
|
|
After registering this runner and starting the daemon, the runner appeared in /admin/actions/runners. I added two other runners to help with parallelization.
|
|
Running a workflow# Now it’s time start running some automation. I used the demo workflow as a starting point to verify that the runner is executing workflows.
|
|
After this, I wanted to make sure that some of my existing workflows could be migrated over.
|
|
The following workflow uses a matrix to run a job for several of my hosts using ansible playbooks that will do various tasks such as patching os updates and updating container images.
|
|
name: Run ansible on: push: schedule: - cron: "0 */12 * * *" jobs: run-ansible-playbook: runs-on: ubuntu-latest steps: - name: Check out repository code uses: actions/checkout@v4 - name: Install ansible run: | apt update && apt install ansible -y - name: Run playbook uses: dawidd6/action-ansible-playbook@v2 with: playbook: playbooks/main.yml requirements: requirements.yml options: | --inventory inventory - name: Send failure notification uses: dawidd6/action-send-mail@v3 if: always() && failure() with: server_address: smtp.gmail.com server_port: 465 secure: true username: myuser password: \${{ secrets.MAIL_PASSWORD }} subject: ansible runbook failed to: me@davegallant.ca from: RFD Notify body: | \${{ github.server_url }}/\${{ github.repository }}/actions/runs/\${{ github.run_number }} And voil\xE0:
|
|
You may be wondering how the gitea runner is allowed to connect to the other hosts using ansible? Well, the nodes are in the same tailnet and have tailscale ssh enabled.
|
|
Areas for improvement# One enhancement that I would like to see is the ability to send notifications on workflow failures. Currently, this doesn’t seem possible without adding logic to each workflow.
|
|
Conclusion# Gitea Actions are fast and the resource footprint is minimal. My gitea instance is currently using around 250mb of memory and a small fraction of a single cpu core (and the runner is using a similar amount of resources). This is impressive since many alternatives tend to require substantially more resources. It likely helps that the codebase is largely written in go.
|
|
By combining gitea with the networking marvel that is tailscale, running workflows becomes simple and fun. Whether you are working on a team or working alone, this setup ensures that your workflows are securely accessible from anywhere with an internet connection.
|
|
Check out my gitea instance exposed via Funnel here.
|
|
`}).add({id:5,href:"/blog/using-aks-and-socks-to-connect-to-a-private-azure-db/",title:"Using AKS and SOCKS to connect to a private Azure DB",description:`I ran into a roadblock recently where I wanted to conveniently connect to a managed postgres database within Azure that was not running on public subnets. And by conveniently, I mean that I’d rather not have to spin up an ephemeral virtual machine running in the same network and proxy the connection, and I’d like to use a local client (preferably with a GUI). After several web searches, it became evident that Azure does not readily provide much tooling to support this.
|
|
`,content:`I ran into a roadblock recently where I wanted to conveniently connect to a managed postgres database within Azure that was not running on public subnets. And by conveniently, I mean that I’d rather not have to spin up an ephemeral virtual machine running in the same network and proxy the connection, and I’d like to use a local client (preferably with a GUI). After several web searches, it became evident that Azure does not readily provide much tooling to support this.
|
|
Go Public?# Should the database be migrated to public subnets? Ideally not, since it is good practice to host internal infrastructure in restricted subnets.
|
|
How do others handle this?# With GCP, connecting to a private db instance from any machine can be achieved with cloud-sql-proxy. This works by proxying requests from your machine to the SQL database instance in the cloud, while the authentication is handled by GCP’s IAM.
|
|
So what about Azure? Is there any solution that is as elegant as cloud-sql-proxy?
|
|
A Bastion# Similar to what AWS has recommended, perhaps a bastion is the way forward?
|
|
Azure has a fully-managed service called Azure Bastion that provides secure access to virtual machines that do not have public IPs. This looks interesting, but unfortunately it costs money and requires an additional virtual machine.
|
|
Because this adds cost (and complexity), it does not seem like a desirable option in its current state. If it provided a more seamless connection to the database, it would be more appealing.
|
|
SOCKS# 2023-12-13: An alternative to using a socks proxy is socat. This would allow you to relay tcp connections to a pod running in k8s, and then port-forward them to your localhost. If this sounds more appealing, install krew-net-forward and then run “kubectl net-forward -i mydb.postgres.database.azure.com -p 5432 -l 5432” to access the database through “localhost:5432”
|
|
SOCKS is a protocol that enables a way to proxy connections by exchanging network packets between the client and the server. There are many implementations and many readily available container images that can run a SOCKS server.
|
|
It’s possible to use this sort of proxy to connect to a private DB, but is it any simpler than using a virtual machine as a jumphost? It wasn’t until I stumbled upon kubectl-plugin-socks5-proxy that I was convinced that using SOCKS could be made simple.
|
|
So how does it work? By installing the kubectl plugin and then running kubectl socks5-proxy, a SOCKS proxy server is spun up in a pod and then opens up port-forwarding session using kubectl.
|
|
As you can see below, this k8s plugin is wrapped up nicely:
|
|
$ kubectl socks5-proxy using: namespace=default using: port=1080 using: name=davegallant-proxy using: image=serjs/go-socks5-proxy Creating SOCKS5 Proxy (Pod)... pod/davegallant-proxy created With the above proxy connection open, it is possible to access both the DNS and private IPs accessible within the k8s cluster. In this case, I am able to access the private database, since there is network connectivity between the k8s cluster and the database.
|
|
Caveats and Conclusion# The above outlined solution makes some assumptions:
|
|
there is a k8s cluster the k8s cluster has network connectivity to the desired private database If these stars align, than this solution might work as a stopgap for accessing a private Azure DB (and I’m assuming this could work similarly on AWS).
|
|
It would be nice if Azure provided tooling similar to cloud-sql-proxy, so that using private databases would be more of a convenient experience.
|
|
One other thing to note is that some clients (such as dbeaver) do not provide DNS resolution over SOCKS. So in this case, you won’t be able to use DNS as if you were inside the cluster, but instead have to rely on knowing private ip addresses.
|
|
2025-01-16:: DNS over SOCKS now works with the latest dbeaver client.
|
|
`}).add({id:6,href:"/blog/watching-youtube-in-private/",title:"Watching YouTube in private",description:`I recently stumbled upon yewtu.be and found it intriguing. It not only allows you to watch YouTube without being on YouTube, but it also allows you to create an account and subscribe to channels without a Google account. What sort of wizardry is going on under the hood? It turns out that it’s a hosted instance of invidious.
|
|
`,content:`I recently stumbled upon yewtu.be and found it intriguing. It not only allows you to watch YouTube without being on YouTube, but it also allows you to create an account and subscribe to channels without a Google account. What sort of wizardry is going on under the hood? It turns out that it’s a hosted instance of invidious.
|
|
The layout is simple, and JavaScript is not required.
|
|
I started using yewtu.be as my primary client for watching videos. I subscribe to several YouTube channels and I prefer the interface invidiuous provides due to its simplicity. It’s also nice to be in control of my search and watch history.
|
|
A few days ago, yewtu.be went down briefly, and that motivated me enough to self-host invidious. There are several other hosted instances listed here, but being able to easily backup my own instance (including subscriptions and watch history) is more compelling in my case.
|
|
Hosting invidious# The quickest way to get invidious up is with docker-compose as mentioned in the docs.
|
|
I made a few modifications, and ended up with:
|
|
version: "3" services: invidious: image: quay.io/invidious/invidious restart: unless-stopped ports: - "0.0.0.0:3000:3000" environment: INVIDIOUS_CONFIG: | db: dbname: invidious user: kemal password: kemal host: invidious-db port: 5432 check_tables: true healthcheck: test: wget -nv --tries=1 --spider http://127.0.0.1:3000/api/v1/comments/jNQXAC9IVRw || exit 1 interval: 30s timeout: 5s retries: 2 depends_on: - invidious-db invidious-db: image: docker.io/library/postgres:14 restart: unless-stopped volumes: - postgresdata:/var/lib/postgresql/data - ./config/sql:/config/sql - ./docker/init-invidious-db.sh:/docker-entrypoint-initdb.d/init-invidious-db.sh environment: POSTGRES_DB: invidious POSTGRES_USER: kemal POSTGRES_PASSWORD: kemal healthcheck: test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"] volumes: postgresdata: After invidious was up and running, I installed Tailscale on it to leverage its MagicDNS, and I’m now able to access this instance from anywhere at http://invidious:3000/feed/subscriptions.
|
|
Redirecting YouTube links# I figured it would be nice to redirect existing YouTube links that others send me, so that I could seamlessly watch the videos using invidious.
|
|
I went looking for a way to redirect paths at the browser level. I found Redirector, which can be used to modify http requests in the browser. I created the following redirect (exported as json):
|
|
{ "redirects": [ { "description": "youtube to invidious", "exampleUrl": "https://www.youtube.com/watch?v=-lz30by8-sU", "exampleResult": "http://invidious:3000/watch?v=-lz30by8-sU", "error": null, "includePattern": "https://*youtube.com/*", "excludePattern": "", "patternDesc": "Any youtube video should redirect to invidious", "redirectUrl": "http://invidious:3000/$2", "patternType": "W", "processMatches": "noProcessing", "disabled": false, "grouped": false, "appliesTo": [ "main_frame" ] } ] } Now the link https://www.youtube.com/watch?v=-lz30by8-sU will redirect to http://invidious:3000/watch?v=-lz30by8-sU
|
|
I’m still looking for ways to improve this invidious setup. There doesn’t appear to be a way to stream in 4K yet.
|
|
`}).add({id:7,href:"/blog/virtualizing-a-router-with-pfsense/",title:"Virtualizing my router with pfSense",description:`My aging router has been running OpenWrt for years and for the most part has been quite reliable. OpenWrt is an open-source project used on embedded devices to route network traffic. It supports many different configurations and there exists a large index of packages. Ever since I’ve connected some standalone wireless access points, I’ve had less of a need for an off-the-shelf all-in-one wireless router combo. I’ve also recently been experiencing instability with my router (likely the result of a combination of configuration tweaking and firmware updating). OpenWrt has served me well, but it is time to move on!
|
|
`,content:`My aging router has been running OpenWrt for years and for the most part has been quite reliable. OpenWrt is an open-source project used on embedded devices to route network traffic. It supports many different configurations and there exists a large index of packages. Ever since I’ve connected some standalone wireless access points, I’ve had less of a need for an off-the-shelf all-in-one wireless router combo. I’ve also recently been experiencing instability with my router (likely the result of a combination of configuration tweaking and firmware updating). OpenWrt has served me well, but it is time to move on!
|
|
pfSense# I figured this would be a good opportunity to try pfSense. I’ve heard nothing but positive things about pfSense and the fact it’s been around since 2004, based on FreeBSD, and written in PHP gave me the impression that it would be relatively stable (and I’d expect nothing less because it has an important job to do!). pfSense can be run on many different machines, and there are even some officially supported appliances. Since I already have a machine running Proxmox, why not just run it in a VM? It’d allow for automatic snapshotting of the machine. There is a good video on this by Techno Tim. Tim has a lot of good videos, and this one is about virtualizing pfSense.
|
|
Router on a stick# I had initially made the assumption that in order to build a router, you would need more than a single NIC (or a dual-port NIC) in order to support both WAN and LAN. This is simply not the case, because VLANs are awesome! In order to create a router, all you need is a single port NIC and a network switch that supports VLANs (also marketed as a managed switch). I picked up the Netgear GS308E because it has both a sufficient amount of ports for my needs, and it supports VLANs. It also has a nice sturdy metal frame which was a pleasant surprise.
|
|
After setting up this Netgear switch, it shoud be possible to access the web interface at http://192.168.0.239. It may be at a different address. To find the address, try checking your DHCP leases in your router interface (if you plugged it into an existing router). I realized I was unable to access this interface because I was on a different subnet, so I set my machine’s address to 192.168.0.22 in order to temporarily setup this switch. I assigned a static ip address to the switch (in System > Switch Information) so that it was in the same subnet as the rest of my network.
|
|
The web interface is nothing spectactular, but it allows for managing VLANs.
|
|
The following configuration will:
|
|
assign port 1 to be the LAN (connected to the Proxmox machine) assign port 8 to be the WAN (connected to my ISP’s modem) In the switch’s web interface, I went to VLAN and then 802.1Q, and then clicked on VLAN Configuration. I configured the ports to look like this:
|
|
Note that the VLAN Identifier Setting has been setup already with two VLANs (1 and 10). More VLANs can be created (i.e. to isolate IoT devices), but 2 VLANs is all we need for the initial setup of a router.
|
|
To replicate the above configuration, add a new VLAN ID 10 (1 should exist by default).
|
|
Next, go into VLAN Membership and configure VLAN 1’s port membership to be the following:
|
|
and then configure VLAN 10’s port membership to be the following:
|
|
Now, go into Port PVID and ensure that port 8 is set to PVID 10.
|
|
This above configuration will dedicate two of the eight ports to WAN and LAN. This will allow the internet to flow into the pfSense from the modem.
|
|
Setting up pfSense# pfSense is fairly easy to setup. Just download the latest ISO and boot up the virtual machine. When setting up the machine, I mostly went with all of the defaults. Configuration can be changed later in the web interface, which is quite a bit simpler.
|
|
Since VLANs are going to be leveraged, when you go to Assign Interfaces, VLANs should be setup now like the following:
|
|
WAN should be vtnet0.10 LAN should be vtnet0 After going through the rest of the installation, if everything is connected correctly it should display both WAN and LAN addresses.
|
|
If all goes well, the web interface should be running at https://192.168.1.1.
|
|
And this is where the fun begins. There are many tutorials and blogs about how to setup pfSense and various services and packages that can be installed. I’ve already installed pfBlocker-NG.
|
|
Summary# It is fairly simple to setup a router with pfSense from within a virtual machine. A physical dedicated routing machine is not necessary and often does not perform as well as software running on faster and more reliable hardware. So far, pfSense has been running for over a week without a single hiccup. pfSense is a mature piece of software that is incredibly powerful and flexible. To avoid some of the instability I had experienced with OpenWrt, I enabled AutoConfigBackup, which is capable of automatically backing up configuration upon every change. I plan to explore and experiment with more services and configuration in the future, so the ability to track all of these changes gives me the peace of mind that experimentation is safe.
|
|
`}).add({id:8,href:"/blog/backing-up-gmail-with-synology/",title:"Backing up gmail with Synology",description:`I’ve used gmail since the beta launched touting a whopping 1GB of storage. I thought this was a massive leap in email technology at the time. I was lucky enough to get an invite fairly quickly. Not suprisingly, I have many years of emails, attachments, and photos. I certainly do not want to lose the content of many of these emails. Despite the redundancy of the data that Google secures, I still feel better retaining a copy of this data on my own physical machines.
|
|
`,content:`I’ve used gmail since the beta launched touting a whopping 1GB of storage. I thought this was a massive leap in email technology at the time. I was lucky enough to get an invite fairly quickly. Not suprisingly, I have many years of emails, attachments, and photos. I certainly do not want to lose the content of many of these emails. Despite the redundancy of the data that Google secures, I still feel better retaining a copy of this data on my own physical machines.
|
|
The thought of completely de-googling has crossed my mind on occassion. Convenience, coupled with my admiration for Google engineering, has prevented me from doing so thus far. Though, I may end up doing so at some point in the future.
|
|
Synology MailPlus Server# Synology products are reasonably priced for what you get (essentially a cloud-in-a-box) and there is very little maintenance required. I’ve recently been in interested in syncing and snapshotting my personal data. I’ve setup Synology’s Cloud Sync and keep copies of most of my cloud data.
|
|
I’ve used tools such as gmvault with success in the past. Setting this up on a cron seems like a viable option. However, I don’t really need a lot of the features it offers and do not plan to restore this data to another account.
|
|
Synology’s MailPlus seems to be a good candidate for backing up this data. By enabling POP3 fetching, it’s possible to fetch all existing emails, as well as periodically fetch all new emails. If a disaster ever did occur, having these emails would be beneficial, as they are an extension of my memory bank.
|
|
Installing MailPlus can be done from the Package Center:
|
|
Next, I went into Synology MailPlus Server and on the left, clicked on Account and ensured my user was marked as active.
|
|
Afterwords, I followed these instructions in order to start backing up emails.
|
|
When entering the POP3 credentials, I created an app password solely for authenticating to POP3 from the Synology device. This is required because I have 2-Step verification enabled on my account. There doesn’t seem to be a more secure way to access POP3 at the moment. It does seem like app password access is limited in scope (when MFA is enabled). These app passwords can’t be used to login to the main Google account.
|
|
I made sure to set the Fetch Range to All in order to get all emails from the beginning of time.
|
|
After this, mail started coming in.
|
|
After fetching 19 years worth of emails, I tried searching for some emails. It only took a few seconds to search through ~50K emails, which is a relief if I ever did have to search for something important.
|
|
Securing Synology# Since Synology devices are not hermetically sealed, it’s best to secure them by enabling MFA to help prevent being the victim of ransomware. It is also wise to backup your system settings and volumes to the cloud using a tool such as Hyper Backup. Encrypting your shared volumes should also be done, since unfortunately DSM does not support full disk encryption.
|
|
Summary# Having backups of various forms of cloud data is a good investment, especially in times of war. I certainly feel more at ease for having backed up my emails.
|
|
`}).add({id:9,href:"/blog/running-k3s-in-lxc-on-proxmox/",title:"Running K3s in LXC on Proxmox",description:"It has been a while since I’ve actively used Kubernetes and wanted to explore the evolution of tools such as Helm and Tekton. I decided to deploy K3s, since I’ve had success with deploying it on resource-contrained Raspberry Pis in the past. I thought that this time it’d be convenient to have K3s running in a LXC container on Proxmox. This would allow for easy snapshotting of the entire Kubernetes deployment.",content:`It has been a while since I’ve actively used Kubernetes and wanted to explore the evolution of tools such as Helm and Tekton. I decided to deploy K3s, since I’ve had success with deploying it on resource-contrained Raspberry Pis in the past. I thought that this time it’d be convenient to have K3s running in a LXC container on Proxmox. This would allow for easy snapshotting of the entire Kubernetes deployment. LXC containers also provide an efficient way to use a machine’s resources.
|
|
What is K3s?# K3s is a Kubernetes distro that advertises itself as a lightweight binary with a much smaller memory-footprint than traditional k8s. K3s is not a fork of k8s as it seeks to remain as close to upstream as it possibly can.
|
|
Configure Proxmox# This gist contains snippets and discussion on how to deploy K3s in LXC on Proxmox. It mentions that bridge-nf-call-iptables should be loaded, but I did not understand the benefit of doing this.
|
|
Disable swap# There is an issue on Kubernetes regarding swap here. There claims to be support for swap in 1.22, but for now let’s disable it:
|
|
sudo sysctl vm.swappiness=0 sudo swapoff -a It might be worth experimenting with swap enabled in the future to see how that might affect performance.
|
|
Enable IP Forwarding# To avoid IP Forwarding issues with Traefik, run the following on the host:
|
|
sudo sysctl net.ipv4.ip_forward=1 sudo sysctl net.ipv6.conf.all.forwarding=1 sudo sed -i 's/#net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/g' /etc/sysctl.conf sudo sed -i 's/#net.ipv6.conf.all.forwarding=1/net.ipv6.conf.all.forwarding=1/g' /etc/sysctl.conf Create LXC container# Create an LXC container in the Proxmox interface as you normally would. Remember to:
|
|
Uncheck unprivileged container Use a LXC template (I chose a debian 11 template downloaded with pveam) In memory, set swap to 0 Create and start the container Modify container config# Now back on the host run pct list to determine what VMID it was given.
|
|
Open /etc/pve/lxc/$VMID.conf and append:
|
|
lxc.apparmor.profile: unconfined lxc.cap.drop: lxc.mount.auto: "proc:rw sys:rw" lxc.cgroup2.devices.allow: c 10:200 rwm All of the above configurations are described in the manpages. Notice that cgroup2 is used since Proxmox VE 7.0 has switched to a pure cgroupv2 environment.
|
|
Thankfully cgroup v2 support has been supported in k3s with these contributions:
|
|
https://github.com/k3s-io/k3s/pull/2584 https://github.com/k3s-io/k3s/pull/2844 Enable shared host mounts# From within the container, run:
|
|
echo '#!/bin/sh -e ln -s /dev/console /dev/kmsg mount --make-rshared /' > /etc/rc.local chmod +x /etc/rc.local reboot Install K3s# One of the simplest ways to install K3s on a remote host is to use k3sup. Ensure that you supply a valid CONTAINER_IP and choose the k3s-version you prefer. As of 2021/11, it is still defaulting to the 1.19 channel, so I overrode it to 1.22 for cgroup v2 support. See the published releases here.
|
|
ssh-copy-id root@$CONTAINER_IP k3sup install --ip $CONTAINER_IP --user root --k3s-version v1.22.3+k3s1 If all goes well, you should see a path to the kubeconfig generated. I moved this into ~/.kube/config so that kubectl would read this by default.
|
|
Wrapping up# Installing K3s in LXC on Proxmox works with a few tweaks to the default configuration. I later followed the Tekton’s Getting Started guide and was able to deploy it in a few commands.
|
|
$ kubectl get all --namespace tekton-pipelines NAME READY STATUS RESTARTS AGE pod/tekton-pipelines-webhook-8566ff9b6b-6rnh8 1/1 Running 1 (50m ago) 12h pod/tekton-dashboard-6bf858f977-qt4hr 1/1 Running 1 (50m ago) 11h pod/tekton-pipelines-controller-69fd7498d8-f57m4 1/1 Running 1 (50m ago) 12h NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/tekton-pipelines-controller ClusterIP 10.43.44.245 <none> 9090/TCP,8080/TCP 12h service/tekton-pipelines-webhook ClusterIP 10.43.183.242 <none> 9090/TCP,8008/TCP,443/TCP,8080/TCP 12h service/tekton-dashboard ClusterIP 10.43.87.97 <none> 9097/TCP 11h NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/tekton-pipelines-webhook 1/1 1 1 12h deployment.apps/tekton-dashboard 1/1 1 1 11h deployment.apps/tekton-pipelines-controller 1/1 1 1 12h NAME DESIRED CURRENT READY AGE replicaset.apps/tekton-pipelines-webhook-8566ff9b6b 1 1 1 12h replicaset.apps/tekton-dashboard-6bf858f977 1 1 1 11h replicaset.apps/tekton-pipelines-controller-69fd7498d8 1 1 1 12h NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE horizontalpodautoscaler.autoscaling/tekton-pipelines-webhook Deployment/tekton-pipelines-webhook 9%/100% 1 5 1 12h I made sure to install Tailscale in the container so that I can easily access K3s from anywhere.
|
|
If I’m feeling adventurous, I might experiment with K3s rootless.
|
|
`}).add({id:10,href:"/blog/replacing-docker-with-podman-on-macos/",title:"Replacing docker with podman on macOS (and Linux)",description:`There are a number of reasons why you might want to replace docker, especially on macOS. The following feature bundled in Docker Desktop might have motivated you enough to consider replacing docker:
|
|
`,content:`There are a number of reasons why you might want to replace docker, especially on macOS. The following feature bundled in Docker Desktop might have motivated you enough to consider replacing docker:
|
|
...ignoring Docker updates is a paid feature now?? pic.twitter.com/ZxKW3b9LQM
|
|
— Brendan Dolan-Gavitt (@moyix) May 1, 2021 Docker has been one of the larger influencers in the container world, helping to standardize the OCI Image Format Specification. For many developers, containers have become synonymous with terms like docker and Dockerfile (a file containing build instructions for a container image). Docker has certainly made it very convenient to build and run containers, but it is not the only solution for doing so.
|
|
This post briefly describes my experience swapping out docker for podman on macOS.
|
|
What is a container?# A container is a standard unit of software that packages up all application dependencies within it. Multiple containers can be run on a host machine all sharing the same kernel as the host. Linux namespaces help provide an isolated view of the system, including mnt, pid, net, ipc, uid, cgroup, and time. There is an in-depth video that discusses what containers are made from, and near the end there is a demonstration on how to build your own containers from the command line.
|
|
By easily allowing the necessary dependencies to live alongside the application code, containers make the “works on my machine” problem less of a problem.
|
|
Benefits of Podman# One of the most interesting features of Podman is that it is daemonless. There isn’t a process running on your system managing your containers. In contrast, the docker client is reliant upon the docker daemon (often running as root) to be able to build and run containers.
|
|
Podman is rootless by default. It is now possible to run the docker daemon rootless as well, but it’s still not the default behaviour.
|
|
I’ve also observed that so far my 2019 16" Macbook Pro hasn’t sounded like a jet engine, although I haven’t performed any disk-intensive operations yet.
|
|
Installing Podman# Running Podman on macOS is more involved than on Linux, because the podman-machine must run Linux inside of a virtual machine. Fortunately, the installation is made simple with brew (read this if you’re installing Podman on Linux):
|
|
brew install podman The podman-machine must be started:
|
|
# This is not necessary on Linux podman machine init podman machine start Running a container# Let’s try to pull an image:
|
|
$ podman pull alpine Trying to pull docker.io/library/alpine:latest... Getting image source signatures Copying blob sha256:a0d0a0d46f8b52473982a3c466318f479767577551a53ffc9074c9fa7035982e Copying config sha256:14119a10abf4669e8cdbdff324a9f9605d99697215a0d21c360fe8dfa8471bab Writing manifest to image destination Storing signatures 14119a10abf4669e8cdbdff324a9f9605d99697215a0d21c360fe8dfa8471bab If you’re having an issue pulling images, you may need to remove ~/.docker/config.json or remove the set of auths in the configuration as mentioned here.
|
|
and then run and exec into the container:
|
|
$ podman run --rm -ti alpine Error: error preparing container 99ace1ef8a78118e178372d91fd182e8166c399fbebe0f676af59fbf32ce205b for attach: error configuring network namespace for container 99ace1ef8a78118e178372d91fd182e8166c399fbebe0f676af59fbf32ce205b: error adding pod unruffled_bohr_unruffled_bohr to CNI network "podman": unexpected end of JSON input What does this error mean? A bit of searching lead to this github issue.
|
|
Until the fix is released, a workaround is to just specify a port (even when it’s not needed):
|
|
podman run -p 4242 --rm -ti alpine If you’re reading this from the future, there is a good chance specifying a port won’t be needed.
|
|
Another example of running a container with Podman can be found in the Jellyfin Documentation.
|
|
Aliasing docker with podman# Force of habit (or other scripts) may have you calling docker. To work around this:
|
|
alias docker=podman podman-compose# You may be wondering: what about docker-compose? Well, there claims to be a drop-in replacement for it: podman-compose.
|
|
pip3 install --user podman-compose Now let’s create a docker-compose.yml file to test:
|
|
cat << EOF >> docker-compose.yml version: '2' services: hello_world: image: ubuntu command: [/bin/echo, 'Hello world'] EOF Now run:
|
|
$ podman-compose up podman pod create --name=davegallant.github.io --share net 40d61dc6e95216c07d2b21cea6dcb30205bfcaf1260501fe652f05bddf7e595e 0 podman create --name=davegallant.github.io_hello_world_1 --pod=davegallant.github.io -l io.podman.compose.config-hash=123 -l io.podman.compose.project=davegallant.github.io -l io.podman.compose.version=0.0.1 -l com.docker.compose.container-number=1 -l com.docker.compose.service=hello_world --add-host hello_world:127.0.0.1 --add-host davegallant.github.io_hello_world_1:127.0.0.1 ubuntu /bin/echo Hello world Resolved "ubuntu" as an alias (/etc/containers/registries.conf.d/000-shortnames.conf) Trying to pull docker.io/library/ubuntu:latest... Getting image source signatures Copying blob sha256:f3ef4ff62e0da0ef761ec1c8a578f3035bef51043e53ae1b13a20b3e03726d17 Copying blob sha256:f3ef4ff62e0da0ef761ec1c8a578f3035bef51043e53ae1b13a20b3e03726d17 Copying config sha256:597ce1600cf4ac5f449b66e75e840657bb53864434d6bd82f00b172544c32ee2 Writing manifest to image destination Storing signatures 1a68b2fed3fdf2037b7aef16d770f22929eec1d799219ce30541df7876918576 0 podman start -a davegallant.github.io_hello_world_1 Hello world This should more or less provide the same results you would come to expect with docker. The README does clearly state that podman-compose is under development.
|
|
Summary# Installing Podman on macOS was not seamless, but it was manageable well within 30 minutes. I would recommend giving Podman a try to anyone who is unhappy with experiencing forced docker updates, or who is interested in using a more modern technology for running containers.
|
|
One caveat to mention is that there isn’t an official graphical user interface for Podman, but there is an open issue considering one. If you rely heavily on Docker Desktop’s UI, you may not be as interested in using podman yet.
|
|
Update: After further usage, bind mounts do not seem to work out of the box when the client and host are on different machines. A rather involved solution using sshfs was shared here.
|
|
I had been experimenting with Podman on Linux before writing this, but after listening to this podcast episode, I was inspired to give Podman a try on macOS.
|
|
`}).add({id:11,href:"/blog/automatically-rotating-aws-keys/",title:"Automatically rotating AWS access keys",description:`Rotating credentials is a security best practice. This morning, I read a question about automatically rotating AWS Access Keys without having to go through the hassle of navigating the AWS console. There are some existing solutions already, but I decided to write a script since it was incredibly simple. The script could be packed up as a systemd/launchd service to continually rotate access keys in the background.
|
|
In the longer term, migrating my local workflows to aws-vault seems like a more secure solution.`,content:`Rotating credentials is a security best practice. This morning, I read a question about automatically rotating AWS Access Keys without having to go through the hassle of navigating the AWS console. There are some existing solutions already, but I decided to write a script since it was incredibly simple. The script could be packed up as a systemd/launchd service to continually rotate access keys in the background.
|
|
In the longer term, migrating my local workflows to aws-vault seems like a more secure solution. This would mean that credentials (even temporary session credentials) never have to be written in plaintext to disk (i.e. where AWS suggests). Any existing applications, such as terraform, could be have their credentials passed to them from aws-vault, which retrieves them from the OS’s secure keystore. There is even a rotate command included.
|
|
`}).add({id:12,href:"/blog/why-i-threw-out-my-dotfiles/",title:"Why I threw out my dotfiles",description:`Over the years I have collected a number of dotfiles that I have shared across both Linux and macOS machines (~/.zshrc, ~/.config/git/config, ~/.config/tmux/tmux.conf, etc). I have tried several different ways to manage them, including bare git repos and utilities such as GNU Stow. These solutions work well enough, but I have since found what I would consider a much better solution for organizing user configuration: home-manager.
|
|
`,content:`Over the years I have collected a number of dotfiles that I have shared across both Linux and macOS machines (~/.zshrc, ~/.config/git/config, ~/.config/tmux/tmux.conf, etc). I have tried several different ways to manage them, including bare git repos and utilities such as GNU Stow. These solutions work well enough, but I have since found what I would consider a much better solution for organizing user configuration: home-manager.
|
|
What is home-manager?# Before understanding home-manager, it is worth briefly discussing what nix is. nix is a package manager that originally spawned from a PhD thesis. Unlike other package managers, it uses symbolic links to keep track of the currently installed packages, keeping around the old ones in case you may want to rollback.
|
|
For example, I have used nix to install the package bind which includes dig. You can see that it is available on multiple platforms. The absolute path of dig can be found by running:
|
|
$ ls -lh $(which dig) lrwxr-xr-x 73 root 31 Dec 1969 /run/current-system/sw/bin/dig -> /nix/store/0r4qdyprljd3dki57jn6c6a8dh2rbg9g-bind-9.16.16-dnsutils/bin/dig Notice that there is a hash included in the file path? This is a nix store path and is computed by the nix package manager. This nix pill does a good job explaining how this hash is computed. All of the nix pills are worth a read, if you are interested in learning more about nix itself. However, using home-manager does not require extensive knowledge of nix.
|
|
Part of the nix ecosystem includes nixpkgs. Many popular tools can be found already packaged in this repository. As you can see with these stats, there is a large number of existing packages that are being maintained by the community. Contributing a new package is easy, and anyone can do it!
|
|
home-manager leverages the nix package manager (and nixpkgs), as well the nix language so that you can declaratively define your system configuration. I store my nix-config in git so that I can keep track of my packages and configurations, and retain a clean and informative git commit history so that I can understand what changed and why.
|
|
Setting up home-manager# \u26A0\uFE0F If you run this on your main machine, make sure you backup your configuration files first. home-manager is pretty good about not overwriting existing configuration, but it is better to have a backup! Alternatively, you could test this out on a VM or cloud instance.
|
|
The first thing you should do is install nix:
|
|
curl -L https://nixos.org/nix/install | sh It’s generally not a good idea to curl and execute files from the internet (without verifying integrity), so you might want to download the install script first and take a look before executing it!
|
|
Open up a new shell in your terminal and running nix should work. If not, run . ~/.nix-profile/etc/profile.d/nix.sh
|
|
Now, install home-manager:
|
|
nix-channel --add https://github.com/nix-community/home-manager/archive/master.tar.gz home-manager nix-channel --update nix-shell '<home-manager>' -A install You should see a wave of /nix/store/* paths being displayed on your screen.
|
|
Now, to start off with a basic configuration, open up ~/.config/nixpkgs/home.nix in the editor of your choice and paste this in (you will want to change userName and homeDirectory):
|
|
{ config, pkgs, ... }: { programs.home-manager.enable = true; home = { username = "dave"; homeDirectory = "/home/dave"; stateVersion = "21.11"; packages = with pkgs; [ bind exa fd ripgrep ]; }; programs = { git = { enable = true; aliases = { aa = "add -A ."; br = "branch"; c = "commit -S"; ca = "commit -S --amend"; cb = "checkout -b"; co = "checkout"; d = "diff"; l = "log --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset' --abbrev-commit"; }; delta = { enable = true; options = { features = "line-numbers decorations"; whitespace-error-style = "22 reverse"; plus-style = "green bold ul '#198214'"; decorations = { commit-decoration-style = "bold yellow box ul"; file-style = "bold yellow ul"; file-decoration-style = "none"; }; }; }; extraConfig = { push = { default = "current"; }; pull = { rebase = true; }; }; }; starship = { enable = true; enableZshIntegration = true; settings = { add_newline = false; scan_timeout = 10; }; }; zsh = { enable = true; enableAutosuggestions = true; enableSyntaxHighlighting = true; history.size = 1000000; localVariables = { CASE_SENSITIVE = "true"; DISABLE_UNTRACKED_FILES_DIRTY = "true"; RPROMPT = ""; # override because macOS defaults to filepath ZSH_AUTOSUGGEST_HIGHLIGHT_STYLE = "fg=#838383,underline"; ZSH_DISABLE_COMPFIX = "true"; }; initExtra = '' export PAGER=less ''; shellAliases = { ".." = "cd .."; grep = "rg --smart-case"; ls = "exa -la --git"; }; "oh-my-zsh" = { enable = true; plugins = [ "gitfast" "last-working-dir" ]; }; }; }; } Save the file and run:
|
|
home-manager switch You should see another wave of /nix/store/* paths. The new configuration should now be active.
|
|
If you run zsh, you should see that you have starship and access to several other utils such as rg, fd, and exa.
|
|
This basic configuration above is also defining your ~/.config/git/config and .zshrc. If you already have either of these files, home-manager will complain about them already existing.
|
|
If you run cat ~/.zshrc, you will see the way these configuration files are generated.
|
|
You can extend this configuration for programs such as (neo)vim, emacs, alacritty, ssh, etc. To see other programs, take a look at home-manager/modules/programs.
|
|
Gateway To Nix# In ways, home-manager can be seen as a gateway to the nix ecosystem. If you have enjoyed the way you can declare user configuration with home-manager, you may be interested in expanding your configuration to include other system dependencies and configuration. For example, in Linux you can define your entire system’s configuration (including the kernel, kernel modules, networking, filesystems, etc) in nix. For macOS, there is nix-darwin that includes nix modules for configuring launchd, dock, and other preferences and services. You may also want to check out Nix Flakes: a more recent feature that allows you declare dependencies, and have them automatically pinned and hashed in flake.lock, similar to that of many modern package managers.
|
|
Wrapping up# The title of this post is slightly misleading, since it’s possible to retain some of your dotfiles and have them intermingle with home-manager by including them alongside nix. The idea of defining user configuration using nix can provide a clean way to maintain your configuration, and allow it to be portable across platforms. Is it worth the effort to migrate away from shell scripts and dotfiles? I’d say so.
|
|
You can find my nix config here.
|
|
`}).add({id:13,href:"/blog/what-to-do-with-a-homelab/",title:"What to do with a homelab",description:`A homelab can be an inexpensive way to host a multitude of internal/external services and learn a lot in the process.
|
|
`,content:`A homelab can be an inexpensive way to host a multitude of internal/external services and learn a lot in the process.
|
|
Do you want host your own media server? ad blocker? reverse proxy? Are you interested in learning more about Linux? Virtualization? Networking? Security? A homelab can be a playground to enhance your computer skills, without worrying about breaking anything important.
|
|
One of the best parts about building a homelab is that it doesn’t have to be a large investment in terms of hardware. One of the simplest ways to build a homelab is out of a refurbished computer. Having multiple machines/nodes provides the advantage of increased redundancy, but starting out with a single node is enough to reap many of the benefits of having a homelab.
|
|
Virtualization# Virtualizing your hardware is an organized way of dividing up your machine’s resources. This can be done with something such as a Virtual Machine or something lighter like a container using LXC or runC. Containers have much less overhead in terms of boot time and storage allocation. This Stack Overflow answer sums it up nicely.
|
|
A hypervisor such as Proxmox can be installed in minutes on a new machine. It provides a web interface and a straight-forward way to spin up new VMs and containers. Even if your plan is to run mostly docker containers, Proxmox can be a useful abstraction for managing VMs, disks and running scheduled backups. You can even run docker within an LXC container by enabling nested virtualization. You’ll want to ensure that VT-d and VT-x are enabled in the BIOS if you decide to install a hypervisor to manage your virtualization.
|
|
Services# Here is a list of some useful services to consider:
|
|
Jellyfin or Plex - a common gateway to self-hosting that enables a “self-hosted Netflix” experience that puts you in control of the content (guaranteed to make your partner and kids happy) changedetection - is a self-hosted equivalent to something like visualping.io that can notify you when a webpage changes and keep track of the diffs Adguard or Pihole - can block a list of known trackers for all clients on your local network with the added benefit of speeding up web page load times gitea - A lightweight git server that can be used to mirror git repos and host private content miniflux - a minimalist RSS reader gethomepage - A customizable landing page for quick access to services with many supported widgets that can query APIs and display information Uptime Kuma - A tool for monitoring the uptime of services, with notification support Speedtest Tracker - a way to monitor the performance of your internet connection and/or vpn connection Stirling-PDF - a self-hosted PDF manipulation tool that will keep your data private There is a large number of services you can self-host, including your own applications that you might be developing. Homelabbing allows you to have control over your data and services, and gives you the opportunity to be a software, network, and infrastructure engineer all at once.
|
|
VPN# Tailscale is a quick way to create a flat network for all of your services. With its MagicDNS, your can reference the names of machines like changedetection rather than using an IP address, or managing DNS yourself. By using this mesh-like VPN, you can easily create a secure tunnel to your homelab from anywhere.
|
|
Monitoring# Monitoring can become an important aspect of your homelab after it starts to become something that is relied upon. One of the simplest ways to setup some monitoring is using netdata. It can be installed on individual containers, VMs, and also a hypervisor (such as Proxmox). All of the monitoring works out of the box by detecting disks, memory, network interfaces, etc.
|
|
Additionally, agents installed on different machines can all be centrally viewed in netdata, and it can alert you when some of your infrastructure is down or in a degraded state. Adding additional nodes to netdata is as simple as a 1-line shell command.
|
|
As mentioned above, Uptime Kuma is a convenient way to track uptime and monitor the availability of your services.
|
|
In Summary# Building out a homelab can be a rewarding experience and it doesn’t require buying a rack full of expensive servers to get a significant amount of utility. There are many services that you can run that require very minimal setup, making it possible to get a server up and running in a short period of time, with monitoring, and that can be securely connected to remotely.
|
|
If you’re looking for a steady stream of ideas for your homelab, check out selfhosted.show.
|
|
`}).add({id:14,href:"/blog/appgate-sdp-on-arch-linux/",title:"AppGate SDP on Arch Linux",description:`AppGate SDP provides a Zero Trust network. This post describes how to get AppGate SDP 4.3.2 working on Arch Linux.
|
|
`,content:`AppGate SDP provides a Zero Trust network. This post describes how to get AppGate SDP 4.3.2 working on Arch Linux.
|
|
Depending on the AppGate SDP Server that is running, you may require a client that is more recent than the latest package on AUR. As of right now, the latest AUR is 4.2.2-1.
|
|
These steps highlight how to get it working with Python3.8 by making a 1 line modification to AppGate source code.
|
|
Packaging# We already know the community package is currently out of date, so let’s clone it:
|
|
git clone https://aur.archlinux.org/appgate-sdp.git cd appgate-sdp You’ll likely notice that the version is not what we want, so let’s modify the PKGBUILD to the following:
|
|
# Maintainer: Pawel Mosakowski <pawel at mosakowski dot net> pkgname=appgate-sdp conflicts=('appgate-sdp-headless') pkgver=4.3.2 _download_pkgver=4.3 pkgrel=1 epoch= pkgdesc="Software Defined Perimeter - GUI client" arch=('x86_64') url="https://www.cyxtera.com/essential-defense/appgate-sdp/support" license=('custom') # dependecies calculated by namcap depends=('gconf' 'libsecret' 'gtk3' 'python' 'nss' 'libxss' 'nodejs' 'dnsmasq') source=("https://sdpdownloads.cyxtera.com/AppGate-SDP-\${_download_pkgver}/clients/\${pkgname}_\${pkgver}_amd64.deb" "appgatedriver.service") options=(staticlibs) prepare() { tar -xf data.tar.xz } package() { cp -dpr "\${srcdir}"/{etc,lib,opt,usr} "\${pkgdir}" mv -v "$pkgdir/lib/systemd/system" "$pkgdir/usr/lib/systemd/" rm -vrf "$pkgdir/lib" cp -v "$srcdir/appgatedriver.service" "$pkgdir/usr/lib/systemd/system/appgatedriver.service" mkdir -vp "$pkgdir/usr/share/licenses/appgate-sdp" cp -v "$pkgdir/usr/share/doc/appgate/copyright" "$pkgdir/usr/share/licenses/appgate-sdp" cp -v "$pkgdir/usr/share/doc/appgate/LICENSE.github" "$pkgdir/usr/share/licenses/appgate-sdp" cp -v "$pkgdir/usr/share/doc/appgate/LICENSES.chromium.html.bz2" "$pkgdir/usr/share/licenses/appgate-sdp" } md5sums=('17101aac7623c06d5fbb95f50cf3dbdc' '002644116e20b2d79fdb36b7677ab4cf') Let’s first make sure we have some dependencies. If you do not have yay, check it out.
|
|
yay -S dnsmasq gconf Now, let’s install it:
|
|
makepkg -si Running the client# Ok, let’s run the client by executing appgate.
|
|
It complains about not being able to connect.
|
|
Easy fix:
|
|
sudo systemctl start appgatedriver.service Now we should be connected… but DNS is not working?
|
|
Fixing the DNS# Running resolvectl should display that something is not right.
|
|
Why is the DNS not being set by appgate?
|
|
$ head -3 /opt/appgate/linux/set_dns #!/usr/bin/env python3 ''' This is used to set and unset the DNS. It seems like python3 is required for the DNS setting to happen. Let’s try to run it.
|
|
$ sudo /opt/appgate/linux/set_dns /opt/appgate/linux/set_dns:88: SyntaxWarning: "is" with a literal. Did you mean "=="? servers = [( socket.AF_INET if x.version is 4 else socket.AF_INET6, map(int, x.packed)) for x in servers] Traceback (most recent call last): File "/opt/appgate/linux/set_dns", line 30, in <module> import dbus ModuleNotFoundError: No module named 'dbus' Ok, let’s install it:
|
|
$ sudo python3.8 -m pip install dbus-python Will it work now? Not yet. There’s another issue:
|
|
$ sudo /opt/appgate/linux/set_dns /opt/appgate/linux/set_dns:88: SyntaxWarning: "is" with a literal. Did you mean "=="? servers = [( socket.AF_INET if x.version is 4 else socket.AF_INET6, map(int, x.packed)) for x in servers] module 'platform' has no attribute 'linux_distribution' This is a breaking change in Python3.8.
|
|
So what is calling platform.linux_distribution?
|
|
Let’s search for it:
|
|
$ sudo grep -r 'linux_distribution' /opt/appgate/linux/ /opt/appgate/linux/nm.py: if platform.linux_distribution()[0] != 'Fedora': Aha! So this is in the local AppGate source code. This should be an easy fix. Let’s just replace this line with:
|
|
if True: # Since we are not using Fedora :) Wrapping up# It turns out there are breaking changes in Python3.8.
|
|
The docs say Deprecated since version 3.5, will be removed in version 3.8: See alternative like the distro package.
|
|
I suppose this highlights one of the caveats of relying upon the system’s python, rather than having an isolated, dedicated environment for all dependencies.
|
|
`}),j.addEventListener("input",function(){let o=this.value,i=e.search(o,5,{enrich:!0}),s=new Map;for(let r of i.flatMap(l=>l.result))s.has(r.href)||s.set(r.doc.href,r.doc);if(z.innerHTML="",z.classList.remove("search__suggestions--hidden"),s.size===0&&o){let r=document.createElement("div");r.innerHTML=`No results for "<strong>${o}</strong>"`,r.classList.add("search__no-results"),z.appendChild(r);return}for(let[r,l]of s){let h=document.createElement("a");h.href=r,h.classList.add("search__suggestion-item"),z.appendChild(h);let p=document.createElement("div");p.textContent=l.title,p.classList.add("search__suggestion-title"),h.appendChild(p);let m=document.createElement("div");if(m.textContent=l.description,m.classList.add("search__suggestion-description"),h.appendChild(m),z.childElementCount===5)break}})})();})();
|
|
//! Source: https://github.com/h-enk/doks/blob/master/assets/js/index.js
|
|
/*! Source: https://dev.to/shubhamprakash/trap-focus-using-javascript-6a3 */
|
|
//! Source: https://discourse.gohugo.io/t/range-length-or-last-element/3803/2
|