Migrate to jekyll for static gen

This commit is contained in:
Andrew Tomaka 2023-02-05 21:07:24 -05:00
parent 8280d4fcab
commit 0cc7d7fe42
Signed by: atomaka
GPG key ID: 61209BF70A5B18BE
27 changed files with 1424 additions and 100 deletions

9
.gitignore vendored
View file

@ -1,2 +1,7 @@
build/* _site
blog/* .sass-cache
.jekyll-cache
.jekyll-metadata
vendor
old

1
.ruby-version Normal file
View file

@ -0,0 +1 @@
3.1.2

25
404.html Normal file
View file

@ -0,0 +1,25 @@
---
permalink: /404.html
layout: default
---
<style type="text/css" media="screen">
.container {
margin: 10px auto;
max-width: 600px;
text-align: center;
}
h1 {
margin: 30px 0;
font-size: 4em;
line-height: 1;
letter-spacing: -1px;
}
</style>
<div class="container">
<h1>404</h1>
<p><strong>Page not found :(</strong></p>
<p>The requested page could not be found.</p>
</div>

44
Gemfile Normal file
View file

@ -0,0 +1,44 @@
source "https://rubygems.org"
# Hello! This is where you manage which Jekyll version is used to run.
# When you want to use a different version, change it below, save the
# file and run `bundle install`. Run Jekyll with `bundle exec`, like so:
#
# bundle exec jekyll serve
#
# This will help ensure the proper Jekyll version is running.
# Happy Jekylling!
gem "jekyll", "~> 4.3.2"
# This is the default theme for new Jekyll sites. You may change this to anything you like.
gem "minima", "~> 2.5"
gem "no-style-please", git: "https://github.com/atomaka/no-style-please", branch: "atomaka"
# If you want to use GitHub Pages, remove the "gem "jekyll"" above and
# uncomment the line below. To upgrade, run `bundle update github-pages`.
# gem "github-pages", group: :jekyll_plugins
# If you have any plugins, put them here!
group :jekyll_plugins do
gem "jekyll-feed", "~> 0.12"
gem "jekyll-gist"
gem "jekyll-tidy"
end
group :development do
# import for medium
gem "jekyll-import", git: "https://github.com/jekyll/jekyll-import"
gem "mdl"
gem "rss"
gem "safe_yaml"
end
# Windows and JRuby does not include zoneinfo files, so bundle the tzinfo-data gem
# and associated library.
platforms :mingw, :x64_mingw, :mswin, :jruby do
gem "tzinfo", ">= 1", "< 3"
gem "tzinfo-data"
end
# Performance-booster for watching directories on Windows
gem "wdm", "~> 0.1.1", :platforms => [:mingw, :x64_mingw, :mswin]
# Lock `http_parser.rb` gem to `v0.6.x` on JRuby builds since newer versions of the gem
# do not have a Java counterpart.
gem "http_parser.rb", "~> 0.6.0", :platforms => [:jruby]

152
Gemfile.lock Normal file
View file

@ -0,0 +1,152 @@
GIT
remote: https://github.com/atomaka/no-style-please
revision: e281fa0bbecc82e84fe61be9a92baad5a1a46763
branch: atomaka
specs:
no-style-please (0.4.7)
jekyll
jekyll-feed
jekyll-seo-tag
GIT
remote: https://github.com/jekyll/jekyll-import
revision: 6499317a81aeda119b6ceefb37ab81c9f6219659
specs:
jekyll-import (0.21.0)
jekyll (>= 3.7, < 5.0)
nokogiri (~> 1.0)
reverse_markdown (~> 2.1)
GEM
remote: https://rubygems.org/
specs:
addressable (2.8.1)
public_suffix (>= 2.0.2, < 6.0)
chef-utils (18.1.0)
concurrent-ruby
colorator (1.1.0)
concurrent-ruby (1.2.0)
em-websocket (0.5.3)
eventmachine (>= 0.12.9)
http_parser.rb (~> 0)
eventmachine (1.2.7)
faraday (2.7.4)
faraday-net_http (>= 2.0, < 3.1)
ruby2_keywords (>= 0.0.4)
faraday-net_http (3.0.2)
ffi (1.15.5)
forwardable-extended (2.6.0)
google-protobuf (3.21.12-x86_64-linux)
htmlbeautifier (1.4.2)
htmlcompressor (0.4.0)
http_parser.rb (0.8.0)
i18n (1.12.0)
concurrent-ruby (~> 1.0)
jekyll (4.3.2)
addressable (~> 2.4)
colorator (~> 1.0)
em-websocket (~> 0.5)
i18n (~> 1.0)
jekyll-sass-converter (>= 2.0, < 4.0)
jekyll-watch (~> 2.0)
kramdown (~> 2.3, >= 2.3.1)
kramdown-parser-gfm (~> 1.0)
liquid (~> 4.0)
mercenary (>= 0.3.6, < 0.5)
pathutil (~> 0.9)
rouge (>= 3.0, < 5.0)
safe_yaml (~> 1.0)
terminal-table (>= 1.8, < 4.0)
webrick (~> 1.7)
jekyll-feed (0.17.0)
jekyll (>= 3.7, < 5.0)
jekyll-gist (1.5.0)
octokit (~> 4.2)
jekyll-sass-converter (3.0.0)
sass-embedded (~> 1.54)
jekyll-seo-tag (2.8.0)
jekyll (>= 3.8, < 5.0)
jekyll-tidy (0.2.2)
htmlbeautifier
htmlcompressor
jekyll
jekyll-watch (2.2.1)
listen (~> 3.0)
kramdown (2.4.0)
rexml
kramdown-parser-gfm (1.1.0)
kramdown (~> 2.0)
liquid (4.0.4)
listen (3.8.0)
rb-fsevent (~> 0.10, >= 0.10.3)
rb-inotify (~> 0.9, >= 0.9.10)
mdl (0.12.0)
kramdown (~> 2.3)
kramdown-parser-gfm (~> 1.1)
mixlib-cli (~> 2.1, >= 2.1.1)
mixlib-config (>= 2.2.1, < 4)
mixlib-shellout
mercenary (0.4.0)
minima (2.5.1)
jekyll (>= 3.5, < 5.0)
jekyll-feed (~> 0.9)
jekyll-seo-tag (~> 2.1)
mixlib-cli (2.1.8)
mixlib-config (3.0.27)
tomlrb
mixlib-shellout (3.2.7)
chef-utils
nokogiri (1.14.1-x86_64-linux)
racc (~> 1.4)
octokit (4.25.1)
faraday (>= 1, < 3)
sawyer (~> 0.9)
pathutil (0.16.2)
forwardable-extended (~> 2.6)
public_suffix (5.0.1)
racc (1.6.2)
rake (13.0.6)
rb-fsevent (0.11.2)
rb-inotify (0.10.1)
ffi (~> 1.0)
reverse_markdown (2.1.1)
nokogiri
rexml (3.2.5)
rouge (4.0.1)
rss (0.2.9)
rexml
ruby2_keywords (0.0.5)
safe_yaml (1.0.5)
sass-embedded (1.58.0)
google-protobuf (~> 3.21)
rake (>= 10.0.0)
sawyer (0.9.2)
addressable (>= 2.3.5)
faraday (>= 0.17.3, < 3)
terminal-table (3.0.2)
unicode-display_width (>= 1.1.1, < 3)
tomlrb (2.0.3)
unicode-display_width (2.4.2)
webrick (1.8.1)
PLATFORMS
x86_64-linux
DEPENDENCIES
http_parser.rb (~> 0.6.0)
jekyll (~> 4.3.2)
jekyll-feed (~> 0.12)
jekyll-gist
jekyll-import!
jekyll-tidy
mdl
minima (~> 2.5)
no-style-please!
rss
safe_yaml
tzinfo (>= 1, < 3)
tzinfo-data
wdm (~> 0.1.1)
BUNDLED WITH
2.3.26

23
README.md Normal file
View file

@ -0,0 +1,23 @@
# atomaka.com
Website managed by jekyll.
## Pre-requisites
* Python
* whatever version lets you install `requirements.txt`
* Ruby
* Preferably the version in `.ruby-version`
## Setup
1. `pip install -r requirements.txt`
2. `bundle install`
## Testing
* `bundle exec jekyll serve`
## Deployment
* `./build.sh`

60
_config.yml Normal file
View file

@ -0,0 +1,60 @@
# Welcome to Jekyll!
#
# This config file is meant for settings that affect your whole blog, values
# which you are expected to set up once and rarely edit after that. If you find
# yourself editing this file very often, consider using Jekyll's data files
# feature for the data you need to update frequently.
#
# For technical reasons, this file is *NOT* reloaded automatically when you use
# 'bundle exec jekyll serve'. If you change this file, please restart the server process.
#
# If you need help with YAML syntax, here are some quick references for you:
# https://learn-the-web.algonquindesign.ca/topics/markdown-yaml-cheat-sheet/#yaml
# https://learnxinyminutes.com/docs/yaml/
#
# Site settings
# These are used to personalize your new site. If you look in the HTML files,
# you will see them accessed via {{ site.title }}, {{ site.email }}, and so on.
# You can create any custom variable you would like, and they will be accessible
# in the templates via {{ site.myvariable }}.
title: Andrew Tomaka
email: atomaka@atomaka.com
description: >- # this means to ignore newlines until "baseurl:"
Personal website of Andrew Tomaka
baseurl: "" # the subpath of your site, e.g. /blog
url: "https://www.atomaka.com" # the base hostname & protocol for your site, e.g. http://example.com
github_username: atomaka
favicon: favicon.ico
# Build settings
theme: no-style-please
theme_config:
appearance: "auto" # can be "light", "dark" or "auto"
plugins:
- jekyll-feed
- jekyll-gist
# Exclude from processing.
# The following items will not be processed, by default.
# Any item listed under the `exclude:` key here will be automatically added to
# the internal "default list".
#
# Excluded items can be processed by explicitly listing the directories or
# their entries' file path in the `include:` list.
#
exclude:
- .ruby-version
- build.sh
- old/
- README.md
# - .sass-cache/
# - .jekyll-cache/
# - gemfiles/
# - Gemfile
# - Gemfile.lock
# - node_modules/
# - vendor/bundle/
# - vendor/cache/
# - vendor/gems/
# - vendor/ruby/

27
_data/menu.yml Normal file
View file

@ -0,0 +1,27 @@
# For documentation on this file, see:
# https://github.com/riggraz/no-style-please#customize-the-menu
entries:
- title: info
url: false
entries:
- title: coding
url: https://github.com/atomaka
- title: emailing
url: mailto:atomaka@atomaka.com
- title: speaking
url: https://slides.com/atomaka
- title: tooting
url: https://pub.atomaka.com/@atomaka
- title: working
url: https://www.linkedin.com/in/atomaka
- title: all posts
post_list:
limit: 5
show_more: true
show_more_text: See archive...
show_more_url: archive
- title: rss
url: feed.xml

View file

@ -0,0 +1,61 @@
---
layout: post
title: Force "Print Document on" 11x17 Scaled
tag:
- technical
---
The Print Spooler API in Windows does not seem to have an option to force
scaling to another paper size. Formerly, we would install the printer save the
registry key in `HKCU\Printers\DevModePerUser`. Then, we could check the "Print
Document On" option, apply the settings, and save the registry key again.
Performing a diff of between the two sets of roughly 4000 hex values should give
a subset of values that relate to "Print Document On." Finally, on installation
we could read in the registry key after everything else had been setup, cycle
through it changing the appropriate values based on our diff and then save the
registry key. This stopped working.
No new diffs could be collected that would update the scale to fit functionality
that we needed. However, if the "Print Document On" option is manually set and
the registry key is collected, that key can be used as the "diff" and the newly
added printer would print scaled as desired. This has the unfortunate side
effect of modifying all other settings on the printer including the name and
color settings. As a work around, two different registry key "diffs" are used:
one for color and one for black&white. Then, the first 30 hex characters can be
chopped off in each key to make sure the printer name is not overwritten.
```
#include <windows.h>;
int stf[][2] = {
{8, 0},
{10, 0},
// and more keys
};
int setPrintDocumentOn(char *printerName) {
HKEY hkey;
// find the tree where the key we need to change resides
RegOpenKey(HKEY_CURRENT_USER, "Printers\\DevModePerUser", &hkey);
DWORD requiredSize = 0;
DWORD dataType;
// read in the key
RegQueryValueEx(hkey, printerName, 0, &dataType, NULL, &requiredSize);
char *DevModePerUserData = malloc(requiredSize);
RegQueryValueEx(hkey, printerName, 0, &dataType, (BYTE *)DevModePerUserData, &requiredSize);
// update the key
int i = 0;
for(i = 0; i < sizeof(stf) / 8; i++) {
DevModePerUserData[stf[i][0]] = stf[i][1];
}
// and save our updates
RegSetValueEx(hkey, printerName, 0, REG_BINARY, (BYTE *)DevModePerUserData, requiredSize);
RegFlushKey(hkey);
RegCloseKey(hkey);
}
```

View file

@ -0,0 +1,14 @@
---
layout: post
title: Serious Regular Expressions
tag:
- humor
---
```
$n = '[0-9]';
$reg = '/^[AZ]'.$n.$n.$n.$n.$n.$n.$n.$n.'$/';
if(preg_match($reg, $id)) {
// ...
}
```

View file

@ -0,0 +1,58 @@
---
layout: post
title: Password Checking in C
tag:
- humor
---
```
if (strlen(encrytedenteredpassword) != 13) {
passwordcorrect=0;
} else {
for (i=0; i<13;i++) {
switch (i) {
case 0:
if (encrytedenteredpassword[i] != 'f') passwordcorrect=0;
break;
case 1:
if (encrytedenteredpassword[i] != 'J') passwordcorrect=0;
break;
case 2:
if (encrytedenteredpassword[i] != 'c') passwordcorrect=0;
break;
case 3:
if (encrytedenteredpassword[i] != 'l') passwordcorrect=0;
break;
case 4:
if (encrytedenteredpassword[i] != 'Q') passwordcorrect=0;
break;
case 5:
if (encrytedenteredpassword[i] != 'v') passwordcorrect=0;
break;
case 6:
if (encrytedenteredpassword[i] != 'P') passwordcorrect=0;
break;
case 7:
if (encrytedenteredpassword[i] != 'i') passwordcorrect=0;
break;
case 8:
if (encrytedenteredpassword[i] != 'l') passwordcorrect=0;
break;
case 9:
if (encrytedenteredpassword[i] != 'N') passwordcorrect=0;
break;
case 10:
if (encrytedenteredpassword[i] != 'A') passwordcorrect=0;
break;
case 11:
if (encrytedenteredpassword[i] != 'z') passwordcorrect=0;
break;
case 12:
if (encrytedenteredpassword[i] != '.') passwordcorrect=0;
break;
default: passwordcorrect=0; break;
}
if (!passwordcorrect) break;
}
}
```

View file

@ -0,0 +1,300 @@
---
layout: post
title: 'Intro to Puppet: The Bare Minimum'
tag:
- puppet
- technical
---
Last month, some of my coworkers were looking for a brief introduction to
[Puppet](http://www.puppetlabs.com/). Puppet is a type of configuration manager
for your servers. It allows you to create definitions of your server that can
then be automatically maintained. Puppet is mostly self documenting so it makes
it easy to know what your servers are doing while giving you a great way to
automate setting up large numbers of servers.
This is that brief talk. All code is available on
[Github in my puppet-walkthru repository](https://github.com/atomaka/puppet-walkthru).
You will need [Git](http://www.git-scm.com/),
[Virtual Box](https://www.virtualbox.org/) and
[Vagrant](http://www.vagrantup.com/) installed. To begin, clone the repository
and launch the Vagrantfile:
```
git clone https://github.com/atomaka/puppet-walkthru.git
cd puppet-walkthru
vagrant up
```
This will setup a virtual machine on your computer with Puppet installed. All
code can be found on the virtual machine in the /vagrant directory.
```
vagrant ssh
sudo su cd /vagrant
```
You are now ready to work through the first example.
## 1. Managing Users
Puppet maintains state on your computer using what are referred to as
[resources](http://docs.puppetlabs.com/references/latest/type.html). The
built-in resources provided by Puppet provide a good start. In
[example one](https://github.com/atomaka/puppet-walkthru/blob/master/manifests/1-user-type.pp),
you can see how to use a Puppet resource to add and remove a user.
```
user { 'tm':
ensure => present,
}
user { 'fowlks':
ensure => absent,
}
```
You can run this code on your virtual machine with
`puppet apply manifests/1-user-type.pp`. Afterward, you should notice that the
user "tm" exists on your system.
The [user resource](http://docs.puppetlabs.com/references/latest/type.html#user)
type manages local users on your system. This works on a wide variety of
systems, although some do not support some of the more specific features. In
this example, we make sure the user "tm" is present on the system and make sure
the user "fowlks" is not present.
ensure is a keyword for all Puppet resources. present and absent are the most
common values although some resource types have others. ensure will make sure
that definition exists on your server and absent will obviously do the opposite.
## 2. Managing Files
Managing files is one of the most common tasks for server administration and
Puppet offers many ways to handle this. Well explore these in the
[next example](https://github.com/atomaka/puppet-walkthru/blob/master/manifests/2-file-type.pp).
```
file { '/tmp/test1.txt':
ensure => present,
content => 'Hello',
}
file { '/tmp/test2.txt':
ensure => present,
source => '/vagrant/files/test2.txt',
}
$something = "Hello"
file { '/tmp/test3.txt':
ensure => present,
content => template('/vagrant/templates/test3.txt.erb'),
}
```
Run this on your virtual machine using `puppet apply manifests/2-file-type.pp`
and you should be alerted that three files were created. You can verify this by
viewing the contents of the tmp directory with `ls /tmp`.
The first
[file resource](http://docs.puppetlabs.com/references/latest/type.html#file)
simply creates a file at the specified location that says "Hello."
Unfortunately, this isnt very useful since we do not want to have to type our
entire cnfiguration file in our Puppet definition. The second resource is
slightly more useful. This allows us to copy a file from our Puppet repository
to a specified location.
Finally, we can also create templates. The last example uses a file from our
repository and copies it to the specified location. However, we can also include
variables that can be used in our file. In this case, we set a variable to
something and it is then displayed in the file: `You said: Hello`. The contents
of `$something` are used in the file.
## 3. Installing Packages
The last common task well look at is installing packages. Puppet provides a way
to define which
[packages](http://docs.puppetlabs.com/references/latest/type.html#package) can
be installed. By default, this uses your distributions built-in package manager
although there are ways to specify various providers. Our example
[shows the most basic usage](https://github.com/atomaka/puppet-walkthru/blob/master/manifests/3-package-type.pp).
```
package { 'vim':
ensure => present,
}
package { 'alpine-pico':
ensure => absent,
}
```
Try to open vim and you will notice it cannot run. Once you run this code with
`puppet apply manifests/3-package-type.pp`, the vim package will then be
present.
## 4. Ordering (or lack thereof)
The trickest thing for beginners to Puppet is dealing with its
[non-deterministic behavior](http://puppetlabs.com/blog/inside-puppet-about-determinism).
This is easier to
[show than explain](https://github.com/atomaka/puppet-walkthru/blob/master/manifests/4-order-example.pp).
```
notify { 'First': }
notify { 'Second': }
notify { 'Third': }
notify { 'Fourth': }
notify { 'Fifth': }
notify { 'Sixth': }
notify { 'Seventh': }
```
When run, you would expect this to spit out First, Second, …, Seventh in order.
Invoke this code with `puppet apply manifests/4-order-example.pp` and be
surprised at the results. The order of the code is much different than what is
in the file. Furthermore, if you were to add `notify { 'Eighth': }` the ordering
might change completely.
## 5. But I Need Order
But there are dependencies when setting up systems. Puppet allows for this, you
just are required to
[explicitly define them](https://github.com/atomaka/puppet-walkthru/blob/master/manifests/5-ordered-example.pp).
The biggest advantage here is that if one line of dependencies fails, your
entire configuration does not. It takes some getting used to and can be
frustrating, but it is worth it.
```
notify { 'First': }
notify { 'Second':
require => Notify['First'],
}
notify { 'Third':
require => Notify['Second'],
}
notify { 'Fourth':
require => Notify['Third'],
}
notify { 'Fifth':
require => Notify['Fourth'],
}
notify { 'Sixth':
require => Notify['Fifth'],
}
notify { 'Seventh':
require => Notify['Sixth'],
}
```
By using the `require` parameter, we have have forced ordering. If you run this
code with `puppet apply manifests/5-ordered-example.pp`, you will see the order
you expected in example number four.
## 6. Know Your Environment
Puppet also provides a way for you to know about the system that the Puppet code
is running on with a system called Facter.
```
notify { "${::osfamily}": }
notify { "${::ipaddress}": }
notify { "${::uptime}": }
```
When run with `puppet apply manifests/6-facts-example.pp`,
[this code](https://github.com/atomaka/puppet-walkthru/blob/master/manifests/6-facts-example.pp)
will display the information about the virtual machine you are running on. We
will look at why this is useful later.
## 7. Doing Something Useful
Now that we have seen some quick forced examples of how to use Puppet, we now
have enough knowledge to do something that is actually useful. Using Puppet, we
can
[configure an entire service](https://github.com/atomaka/puppet-walkthru/blob/master/manifests/7-full-example.pp).
If you are not familiar, [NTP](http://www.ntp.org/) is a networking protocol for
time management. It is useful for mainitaining the same system time across all
of your servers. And we can use Puppet to install it!
```
package { 'ntp':
ensure => present,
}
file { '/etc/ntp.conf':
ensure => present,
require => Package['ntp'],
source => '/vagrant/files/ntp.conf.debian',
}
service { 'ntp':
ensure => running,
enable => true,
subscribe => File['/etc/ntp.conf'],
}
```
When running this code with `puppet apply manifest/7-full-example.pp`, you
should notice three things happen. First, the ntp package will be installed.
Since we are on Ubuntu, this is done using apt-get. Secondly, a configuration
file was copied from our Puppet repository to the location specified. Finally,
the ntp service was started.
Install, configure, start is one of the most common patterns in Linux/UNIX
systems administration and we can easily automate it with Puppet.
Something to note is our use of subscribe when using the
[service resource](http://docs.puppetlabs.com/references/latest/type.html#service)
type. This makes sure that the ntp service is restarted only if the
configuration file has changed.
## 7. Managing Multiple Operating Systems
Before this section, be sure to reset what we did in the previous example by
running bash support/cleanup7.sh. We just need to uninstall ntp and our config
file so we can do it all again.
Unfortunately, our environments are never uniform and we are stuck dealing with
different versions of operating systems. Luckily, we have tools that we can use
to deal with it. We touched on this in section six, but now we will actually use
them
[to install ntp again](https://github.com/atomaka/puppet-walkthru/blob/master/manifests/8-independent-example.pp).
This time, our code will work on both Debian and RedHat family Linux
distributions.
```
case $::osfamily {
'RedHat': {
$service = 'ntpd'
$conf = 'ntp.conf.redhat'
}
'Debian': {
$service = 'ntp'
$conf = 'ntp.conf.debian'
}
}
notify { 'OS Information':
message => "${::osfamily}: Setting service to ${service} and conf to ${conf}",
before => Package['ntp'],
}
package { 'ntp':
ensure => present,
}
file { '/etc/ntp.conf':
ensure => present,
require => Package['ntp'],
source => "/vagrant/files/${conf}",
}
service { $service:
ensure => running,
enable => true,
subscribe => File['/etc/ntp.conf'],
}
```
When on our Ubuntu virtual machine, running this code with
`puppet apply manifest/8-independent-example.pp` will setup NTP just as we did
in example seven. However, this code can also run on RedHat / CentOS machines
without any adjustments.
This is handled using the facts we discussed in section six. We check to see
what distribution we are using with `$::osfamily` and make choices based on
that. Since the service and the config file are different on RedHat, we assign
variables to these and adjust them as needed.
You now have enough knowledge to get started with Puppet!

View file

@ -0,0 +1,78 @@
---
layout: post
title: Get Away From Me You Creep
tag:
- humor
---
Almost ten years ago, I took a telecommunication history with course with one of
the most eccentric people I had ever met. Unfortunately, it was during a time
where I was attending very few courses and I missed most of my opportunities to
listen to him speak. Still, I managed to make it to enough classes to have
distinct memories of him. Dr. Barry Litman passed away a few years ago and these
are his stories.
I attended the first day of class just as I did the first day of every class I
took. I sat down and was probably scribbling in the notepad I purchased that I
knew would never be used or barely staying awake. He began giving a brief
overview the history of telecommunication in extreme story telling mode with an
unprecedented excitement. I dont remember anything from that day except for the
way he ended class: “And just like that, the entire industry evolved into a star
just waiting to supernova.”
I explained this to a couple of students I was working with and they immediately
knew who I was talking about. They warned me about David Sarnoff but would not
give me any other information. I was forced to attend class for several sessions
before the story was revealed.
David Sarnoff was a corporate kingpin during the pioneering of radio and
television serving as an executive for the Radio Corporation of America. Dr.
Litman was visibly agitated as he explained this and other RCA history to the
class. And then he began talking about Edwin Armstrong.
Edwin Armstrong was an employee of RCA and eventually credited inventor of FM
radio. He developed this while working for RCA, but Sarnoff saw it as a threat
to the AM technology that the company was already producing. Because of this and
their focus on television, Sarnoff and RCA chose not to purchase the patents for
the technology. But after hearing the quality and seeing its success, RCA first
tried to have the FCC ban usage of the wave and eventually claimed the invention
of the technology and won the patent. Armstrong spent nearly all his time and
money fighting for the rights to his own technology.
As he was explaining these details, Litman went from agitated to infuriated. His
face reddened, he began sweating and spoke louder and louder. The entire class
was looking around at each other and no one knew how to respond.
He eventually came to the close of the story. Armstrong ended up committing
suicide. And even though he was eventually given the credit he deserved, it came
too late. The professor was visibly exhausted and saddened but managed to
literally spit out one final angry yell: “And it was all because of that son of
a bitch, David Sarnoff.” He then walked out of the room.
I mostly stopped going to class after that. I dont know why. He was an
incredible teacher and his enthusiasm kept me interested during most lectures.
In the few classes I did attend he frequently referenced Vanna White as someone
who used to be one of his favorite people, but never expanded the story. The
last day before finals I went to class in hopes of getting some hints for the
final. Instead, he shared his story.
He explained to us that once a year, he and one of his close friends/colleagues
would attend a conference related to telecommunication. A few years prior, Vanna
White was attending something loosely related and he managed to convince his
friend to attend. Vanna Whites booth was highly trafficked and Litman was
forced to wait in line for three hours to see her. During this time, he was
extremely nervous which led to him soaking through his shirt with sweat. When it
was finally his turn to meet her, he walked up and blurted out about how big of
a fan he was and how long he had followed her.
And then he paused. I remember it feeling like ten minutes, but it was probably
closer to one. Finally, he asked the class “And do you know what that bitch said
to me?” He walked over to the side of the classroom and held the button to
retract the overhead. Slowly, the words she spoke were revealed having been
written in chalk on the board before the class had begun:
"Get away from me, you creep."
I wish I had written about these ten years ago. Even then, I wouldnt have done
the man justice. Truly great speakers are rare and its unfortunate he passed so
soon.

View file

@ -0,0 +1,143 @@
---
layout: post
title: Dockerizing Lubot
tag:
- docker
- technical
---
Lubot is the [Lansing Codes](http://lansing.codes/) chat bot responsible for
bringing updates about all local events to chatters everywhere…or just in
Lansing. Previously, it resided on a Heroku Free instance and was required to
sleep for six hours out of every twenty-four hours. After some issues with him
waking up, we began looking for alternatives. Since I already had a server
hosting several Docker containers, it seemed like the best choice.
Docker is basically a way to create a container that can be easily distributed
across many operating systems. Using it, we can take the code, runtime, and
libraries that Lubot requires and put them in a package. You can read more about
it in the
[Official “What is Docker” article](https://www.docker.com/what-docker).
To begin, we need to determine the necessary dependencies for our application.
Lubot is built using NodeJS and uses npm to manage dependencies. Specifically,
we are using Node v5.0.0 and npm v3.3.9. Theres an official Node Dockerfile to
use to begin with so it is pretty easy to start.
```
FROM node
ENV NODE_VERSION 5.0.0
ENV NPM_VERSION 3.3.9
```
After that, we want to take care of the dependencies for our application.
Because of the way Docker works, we want to cache this step so when our
packages.json file does not change, we do not have to rebuild our dependencies.
```
ADD package.json /tmp/
RUN cd /tmp && npm install
RUN mkdir -p /opt/hubot && cp -a /tmp/node_modules /opt/hubot
```
Then, we need to add the application code to the container.
```
ADD . /opt/hubot
WORKDIR /opt/hubot
```
Finally, we can start the service.
```
CMD ["/opt/hubot/bin/hubot", "--adapter", "slack"]
```
Combine these steps and we end up with a
[Dockerfile](https://github.com/lansingcodes/lubot/blob/master/Dockerfile). This
gets added to the repisitory so that we can build the application. Building an
image is easy.
```
docker build -t lansingcodes/lubot .
```
This will download and build the necessary filesystems, caching where necessary
and giving us a runable container image. Starting the container is also simple.
```
docker run lansingcodes/lubot
```
Lubot expects some environment variables to be there. But since we are in a
container, no environment variables exist on the system and we need to pass them
in. Our new run command accounts for this.
```
docker run -d --restart=always --name lubot \
-e HUBOT_SLACK_TOKEN=$HUBOT_SLACK_TOKEN \
-e TWITTER_LANSINGCODES_CONSUMER_KEY=$TWITTER_LANSINGCODES_CONSUMER_KEY \
-e TWITTER_LANSINGCODES_CONSUMER_SECRET=$TWITTER_LANSINGCODES_CONSUMER_SECRET \
-e TWITTER_LANSINGCODES_ACCESS_TOKEN=$TWITTER_LANSINGCODES_ACCESS_TOKEN \
-e TWITTER_LANSINGCODES_ACCESS_TOKEN_SECRET=$TWITTER_LANSINGCODES_ACCESS_TOKEN_SECRET \
-e GOOGLE_API_KEY=$GOOGLE_API_KEY \
-e LUBOT_MEETUP_API_KEY=$LUBOT_MEETUP_API_KEY \
-e TZ=$TZ \
-e REDIS_URL=$REDIS_URL \
lansingcodes/lubot
```
Lubot is now running in a container. However, Heroku also provided easy
continuous deployment when combined with [Circle CI](https://circleci.com/).
Being able to have changes deployed when the master branch changes is handy.
Circle CI allows us to specify post-build commands to run. Typically, wed want
to build the container on our CI server and then push to a Docker registry, butI
didnt have one of those available. We can still use Circle CI to execute
commands on a remote server with SSH. This makes our deploy process simple.
- clone the repository on our remote server
- build the docker image from that repositry
- run the docker image that was build
Our CI build file will trigger these actions via a script.
```
scp deploy/deploy.sh lubot@app.atomaka.com:/home/lubot
ssh lubot@app.atomaka.com "bash /home/lubot/deploy.sh"
```
And then, deploy.sh will take care of the parts we already discussed.
```
#!/bin/bash
cd $HOME
source lubotrc
git clone https://github.com/lansingcodes/lubot.git
cd $HOME/lubot
sudo docker build -t lansingcodes/lubot .
cd $HOME
rm -rf $HOME/lubot
sudo docker rm -f lubot
sudo docker run -d --restart=always --name lubot \
-e HUBOT_SLACK_TOKEN=$HUBOT_SLACK_TOKEN \
-e TWITTER_LANSINGCODES_CONSUMER_KEY=$TWITTER_LANSINGCODES_CONSUMER_KEY \
-e TWITTER_LANSINGCODES_CONSUMER_SECRET=$TWITTER_LANSINGCODES_CONSUMER_SECRET \
-e TWITTER_LANSINGCODES_ACCESS_TOKEN=$TWITTER_LANSINGCODES_ACCESS_TOKEN \
-e TWITTER_LANSINGCODES_ACCESS_TOKEN_SECRET=$TWITTER_LANSINGCODES_ACCESS_TOKEN_SECRET \
-e GOOGLE_API_KEY=$GOOGLE_API_KEY \
-e LUBOT_MEETUP_API_KEY=$LUBOT_MEETUP_API_KEY \
-e TZ=$TZ \
-e REDIS_URL=$REDIS_URL \
lansingcodes/lubot
```
Deploying Lubot is now just as easy as it was with Heroku and he never has to
sleep again.
- [Other details surrounding the deployment](https://github.com/lansingcodes/lubot/tree/master/deploy)
- [Lubot repository](https://github.com/lansingcodes/slackbot)

View file

@ -0,0 +1,86 @@
---
layout: post
title: Testing Your Docker Images with GitLab CI
tag:
- continuous-integration
- gitlab
- docker
- devops
---
I have been using [GitLab](https://about.gitlab.com) for over four years. My
[first commit](https://github.com/gitlabhq/gitlabhq/commit/0760ba3efb7566b9f56bb066f4b15ba8ea34e1e7)
to the project came nearly three years ago. And although I was pretty
disappointed when they began
[releasing an enterprise edition](https://about.gitlab.com/2013/07/22/announcing-gitlab-enterprise-edition/),
the community edition of the project remains impressive. More recently, GitLab
has included a
[continuous integration software](https://about.gitlab.com/gitlab-ci/) along
with their code collaboration solution.
Recently, I have been using this to produce [Docker](https://www.docker.com/)
images for my production environment. Although I had been using Docker for all
of my build stages, I was never using the image I was producing for validation.
Since I want to be as sure as I can that my production images are functioning, I
decided to update my build to use the project Dockerfile to run tests.
I looked around and found a
[few](http://sirile.github.io/2016/09/29/minimal-go-containers-with-docker-and-gitlab-ci.html)
[articles](https://www.andrewmunsell.com/blog/ci-cd-with-docker-containers/) on
using Docker with Gitlab CI. Unfortunately, they all outlined methods that
[did not test the image](https://www.stefanwienert.de/blog/2015/11/07/gitlab-ci-with-docker-starting-guide/)
directly or did so in a
[complicated way](http://blog.lwolf.org/post/How-to-build-and-test-docker-images-in-gitlab-ci/).
I thought I could do better.
We always want to use Docker for our builds, but running Docker inside of Docker
is not recommended. To resolve this, we can mount the host systems Docker
socket inside the container when creating our test runner for building images.
```
[[runners]]
name = "docker-runner"
url = "https://gitlab.example.com/ci"
token = "YOUR_TOKEN"
executor = "docker"
[runners.docker]
tls_verify = false
image = "docker:latest"
privileged = false
disable_cache = false
volumes = ["/var/run/docker.sock:/var/run/docker.sock"]
```
Now that we are using the host Docker, we can leverage its image storage and
caching for our build. We can also use that image in our other build tasks.
```
stages:
- prepare
- test
- deploy
build-test-container:
stage: prepare
script:
- docker build -t your-image .
tags:
- your-docker-tag
spec:
stage: test
script:
- bundle exec rake db:create db:migrate
- bundle exec rspec
image: your-image
services:
- postgres:latest
# ...
```
The container built from our project Dockerfile is now being directly tested by
our continuous integration. As you can see, we can also use any container links
without writing extra code.
Clean and simple!

View file

@ -0,0 +1,72 @@
---
layout: post
title: Singular and Plural Rails Routes for the Same Resource
tag:
- api
- ruby-on-rails
- restful
---
Sometimes when building your API with Rails,
[following best practices](http://www.vinaysahni.com/best-practices-for-a-pragmatic-restful-api)
may seem difficult. I recently came across one of these cases and was tempted to
take a shortcut. However, I held strong and eventually came to a quality RESTful
solutionbut not without issue.
I wanted to allow users of my API to quickly access their own user profile. Our
application had already implemented a route to allow this via
`GET /profiles/:id` and simply implemented with
`resources :profiles, only: [:show]` in our routes configuration.
Unfortunately, our API users wanted to be able to access their profile without
providing their ID. My first pass at resolving this was passed in a "fake
resource" to accomplish this.
{% gist 1a056f8dca8931d8872c6cfefebb2d1a %}
But I had broken one of the RESTful best practices. /profiles/me is not an
actual resource but we are pretending it is. So I looked to the
[Rails routes documentation](http://guides.rubyonrails.org/routing.html) for
guidance and came across
[singular resources](http://guides.rubyonrails.org/routing.html#singular-resources).
> Sometimes, you have a resource that clients always look up without referencing
> an ID. For example, you would like /profile to always show the profile of the
> currently logged in user.
I should not have been surprised that my exact use case was cited!
Now we are back on track! I get to go back to my simple route declaration with
`resource :profile, only: :show` and without changing my controller code at all.
But now I needed users to be able to access each others profiles. Again, the
Rails documentation had me covered.
> Because you might want to use the same controller for a singular route
> (/account) and a plural route (/accounts/45), singular resources map to plural
> controllers. So that, for example, resource :photo and resources :photos
> creates both singular and plural routes that map to the same controller
> (PhotosController).
And our implementation stays clean.
{% gist e8d6641349e4d0ea7e68d22dd3755e9d %}
This was awesome until I needed to use path helpers. With this implementation,
`profile_path(:id)` works as expected but `profile_path` does not. If the order
is reversed in the routes configuration, `profile_path` will work and
`profile_path(:id)` will not. This is the result of a bug in the Rails core that
touches some pretty intricate code that is linked to other issues.
[One has even been open for five years](https://github.com/rails/rails/issues/1769)!
And we can work around that one as well by
[overriding the named helpers](http://guides.rubyonrails.org/routing.html#overriding-the-named-helpers).
Passing as: to our resource definition creates the helpers with a new name.
Our final code is ready!
{% gist e9dcc4cd4bad89554fb01be6627c7b63 %}
In our application, we can reference a generic profile with `profile_path(:id)`
while still having `current_profile_path` to direct the user to their
own profile.

View file

@ -0,0 +1,150 @@
---
layout: post
title: A More Flexible Dockerfile for Rails
tag:
- rails
- docker
- devops
---
One of my primary motivations for working with [Docker](https://www.docker.com/)
was creating a single artifact that I could toss into any environment. It has
been fantastic at this. I can throw together a simple Dockerfile that will build
my [Rails](http://rubyonrails.org/) application as an image for production in
about five minutes.
```
FROM ruby:2.3-alpine
ADD Gemfile* /app/
RUN apk add --no-cache --virtual .build-deps build-base \
&& apk add --no-cache postgresql-dev tzdata \
&& cd /app; bundle install --without test production \
&& apk del .build-deps
ADD . /app
RUN chown -R nobody:nogroup /app
USER nobody
ENV RAILS_ENV production
WORKDIR /app
CMD ["bundle", "exec", "rails", "s", "-b", "0.0.0.0", "-p", "8080"]
```
Except now that when I need to run the applications test suite, I do not have
the dependencies I need. That Dockerfile might look something like this.
```
FROM ruby:2.3-alpine
RUN apk add --no-cache build-base postgresql-dev tzdata
ADD Gemfile* /app/
RUN cd /app; bundle install
ADD . /app
RUN chown -R nobody:nogroup /app
USER nobody
WORKDIR /app
CMD ["bundle", "exec", "rails", "s", "-b", "0.0.0.0", "-p", "8080"]
```
Many people decide to include both of these Dockerfiles in their repository as
Dockerfile and Dockerfile.dev. This works perfectly fine. But now we have a
production Dockerfile that never gets used during development. Of course, it is
going through at least one staging environment (hopefully) but it would be nice
if we had a little more testing against it.
Much like Docker provides us the ability to have a single artifact to move from
system to system, I wanted to have a single Dockerfile shared between all
environments. Luckily, Docker provides us with
[build arguments](https://docs.docker.com/engine/reference/builder/#/arg). A
build argument allows us to specify a variable when building the image and then
use that variable inside our Dockerfile.
In our current Rails Dockerfile, we have two primary differences between our
environments:
- The gem groups that are installed
- The environment that the application runs as
Bundlers
[BUNDLE_WITHOUTBUNDLE_WITHOUT](http://bundler.io/man/bundle-config.1.html#LIST-OF-AVAILABLE-KEYS)
allows us to specify the gem groups to skip via an environment variable making
both of these resolvable through environment configuration. Using this, our
shared Dockerfile could look like this:
```
FROM ruby:2.3-alpine
ARG BUNDLE_WITHOUT=test:development
ENV BUNDLE_WITHOUT ${BUNDLE_WITHOUT}
ADD Gemfile* /app/
RUN apk add --no-cache --virtual .build-deps build-base \
&& apk add --no-cache postgresql-dev tzdata \
&& cd /app; bundle install \
&& apk del .build-deps
ADD . /app
RUN chown -R nobody:nogroup /app
USER nobody
ARG RAILS_ENV=production
ENV RAILS_ENV ${RAILS_ENV}
WORKDIR /app
CMD ["bundle", "exec", "rails", "s", "-b", "0.0.0.0", "-p", "8080"]
```
The secret sauce here is `ARG BUNDLE_WITHOUT=test:development`. Running
`docker build -t rails-app .` will use the default value provided for the
`BUNDLE_WITHOUT` build argument, test:development, and a production Docker image
will be built. And if we specify the appropriate build arguments, we can
generate an image suitable for development.
```
docker build -t rails-app --build-arg BUNDLE_WITHOUT= --build-arg RAILS_ENV=development .
```
will generate our Docker image with all test and development dependencies
available. Typing this for building in development would get pretty tedious so
we can use docker-compose to make it easier
```
version: '2'
services:
app:
build:
context: .
args:
- BUNDLE_WITHOUT=
- RAILS_ENV=development
links:
- database
ports:
- "3000:8080"
env_file:
- .env
volumes:
- .:/app
tty: true
stdin_open: true
```
Now, `docker-compose up -d` is all we need in development to both build and
launch our development image.
Finally, we have a single Dockerfile that can be used to build an image for any
of our applications needs. Of course, there are some trade-offs. For example,
build time in development will suffer in some cases. But I have found only
maintaining a single Dockerfile to be worth these costs.
Have another way to deal with this issue? Please share!

View file

@ -0,0 +1,106 @@
---
layout: post
title: Write Good Git Commits
tag:
- git
- github
---
Writing quality Git commits has been written about to death, but it is so
important that I could not help myself from sharing my thoughts on the subject.
Mostly, I will be rehashing what
[Tim Pope has shared](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html),
but I will also include a few of the things that I have used to improve my
source histories.
### Limit the subject of your commit to 50 characters
Many people live life as I do: at the command line. Limiting to 50 characters
allows room for us to use an 80 character width for commands such as git rebase
--interactive without width concerns. Even for those in a more modern IDE, the
limit is important.
![My helpful screenshot](/assets/gui-git.webp)
As commit messages get longer, it becomes more difficult for everyone to
traverse your history.
### Use the commit body for important details
It is very rare that I see people extend their commit histories into the body
portion of a commit but it is the perfect place to offer extra details. Things
like what resources you used to make a specific decision are better left in the
commit history than in code comments most of the time. In a
[recent commit to my dotfiles](https://github.com/atomaka/dotfiles/commit/28a3897995ff21f63f7893f43582532e4717b8d9),
I added a quick alias to correct a TouchBar issue I was having.
```
Fix TouchBar when it (frequently) breaks
From http://osxdaily.com/2017/01/11/manually-refresh-touch-bar-mac/ this
will reset the control portion of the touch bar which has been
frequently breaking in New and Interesting Ways™.
```
Without these details, I might never be able to find the source for why I
added this.
Also, limit the line length in the body to 72 characters.
### Do not use the --message option
Pretty much everyone I watch make a commit will do so directly at their command
prompt with git commit -m. Forcing yourself into an editor will encourage you to
add additional details about a commit message. Additionally, your editor can
configured to alert you to the 50/72 rule. Simply add au FileType gitcommit set
t==72 to your vimrc to auto wrap git messages at 72 characters if vim is
configured as your Git editor. [vim-git](https://github.com/tpope/vim-git) is
also a fantastic option that will highlight based on these rules.
If you are struggling with breaking your -m habit, try creating a bash function
to prevent its usage.
```
function git {
if [[ $@ == "commit -m" ]]; then
echo "Do not specify -m for commit"
else
command git "$@"
fi
}
```
### Commit as if giving a command
[How to Write a Git Commit Message](https://chris.beams.io/posts/git-commit/)
made me consider the subject of my commits a little more. In bullet point 5,
Chris says "Use the imperative mood in the subject line." Considering Git itself
follows this rule, it made perfect sense to jump on board.
```
Merge branch 'bugfix/correct-spacing'
```
And
```
Revert "Implement person CRUD"
```
The best advice I picked up was write your commit as if you are completing the
following sentence: "If applied, this commit will..."
### Rebase your branches, but leave master alone
Rebasing is a heated topic in the community but the one thing that everyone can
agree on is leaving master alone. Once your commit has made it there, it is
history and should never be changed. However, your branch is your story. You get
to tell it however you want so craft it in a way that is useful to the reader.
For me, this is particularly important because the second thing I do when
reviewing a new code base is look at the commit history to see to story of the
application. Unfortunately, most are riddled with messages like "fixed typo" or
"asdf" and the history becomes unreadable.
Take care in crafting your git messages and you will find much more value in
your git logs!

4
archive/index.md Normal file
View file

@ -0,0 +1,4 @@
---
layout: archive
title: Archive
---

BIN
assets/gui-git.webp Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

BIN
atomaka.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

View file

@ -1,9 +1,8 @@
#!/bin/bash #!/bin/bash
rm -rf build/ mdformat --wrap 80 _posts/
mkdir -p build/ bundle exec jekyll build
cp index.html build/ aws s3 sync _site s3://www.atomaka.com \
cp keybase.txt build/ --size-only \
--storage-class REDUCED_REDUNDANCY
AWS_DEFAULT_REGION=us-east-2 aws s3 cp --recursive ./build s3://www.atomaka.com/

Binary file not shown.

Before

Width:  |  Height:  |  Size: 36 KiB

BIN
favicon.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

View file

@ -1,92 +0,0 @@
<html>
<head>
<title>Personal Website of Andrew Tomaka</title>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no" />
<meta name="description" content="Personal website of Andrew Tomaka">
<link rel="canonical" href="http://www.atomaka.com/">
<link href="https://fonts.googleapis.com/css?family=Source+Code+Pro" rel="stylesheet">
<style>
body {
background-color: #4995C7;
font-family: 'Source Code Pro', monospace;
}
ul {
display: flex;
flex-direction: row;
flex-wrap: wrap;
margin: 0;
padding: 0;
}
li {
list-style: none;
display: inline-block;
padding: 5px;
}
@media all and (max-width: 480px) {
ul {
flex-direction: column;
}
}
h1 {
color: #07507F;
}
a {
color: #096EB0;
text-decoration: none;
}
a:hover {
color: #2988B9;
}
.container {
height: 100%;
width: 100%;
display: flex;
align-items: center;
justify-content: center;
background-color: #72B0D9;
}
.content {
text-align: center;
}
.item {
max-width: 100%;
}
.contact {
position: absolute;
right: 15px;
bottom: 10px;
}
.color-primary-0 { color: #2980B9 } /* Main Primary color */
.color-primary-1 { color: #72B0D9 }
.color-primary-2 { color: #4995C7 }
.color-primary-3 { color: #096EB0 }
.color-primary-4 { color: #07507F }
</style>
</head>
<body>
<div class="contact"><a href="mailto:me@atomaka.com">contact</a></div>
<div class="container">
<div class="item top"></div>
<div class="item content">
<div class="title">
<h1>Andrew Tomaka</h1>
</div>
<nav>
<ul>
<li><a href="https://medium.com/@atomaka">writing</a></li>
<li><a href="https://stackoverflow.com/story/atomaka">working</a></li>
<li><a href="https://slides.com/atomaka">speaking</a></li>
<li><a href="https://github.com/atomaka">coding</a></li>
<li><a href="https://twitter.com/atomaka">tweeting</a></li>
</ul>
</nav>
</div>
<div class="item bottom"></div>
</div>
</body>
</html>

6
index.markdown Normal file
View file

@ -0,0 +1,6 @@
---
# Feel free to add content and custom Front Matter to this file.
# To modify the layout, see https://jekyllrb.com/docs/themes/#overriding-theme-defaults
layout: home
---

2
requirements.txt Normal file
View file

@ -0,0 +1,2 @@
mdformat
mdformat-frontmatter