diff --git a/.travis.yml b/.travis.yml
index bcc26ce16ec..28f72140ab4 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,32 +1,21 @@
language: python
+python:
+ - "3.6"
+
cache:
pip: true
- directories:
- - node_modules
-matrix:
- include:
- - python: "3.6"
-
-before_install:
- - echo "Installing Hugo 0.31.1"
-# - curl -sL https://deb.nodesource.com/setup_8.x | sudo -E bash -
-# - sudo apt install nodejs
- - wget https://github.com/gohugoio/hugo/releases/download/v0.31/hugo_0.31_Linux-64bit.deb
- - sudo dpkg -i hugo*.deb
- - hugo version
-# - sudo npm install --global gulp-cli
-# - npm install
-install:
- - echo "Install pytest and pip"
- - pip install -r ci/requirements.txt
+env:
+ - TEST_SUITE=vale
+ - TEST_SUITE=pytest
-before_script:
- - echo "Setting up local development server"
- - hugo server &
- - sleep 7
+before_install:
+ - if [ $TEST_SUITE == pytest ]; then
+ wget https://github.com/gohugoio/hugo/releases/download/v0.36/hugo_0.36_Linux-64bit.deb;
+ sudo dpkg -i hugo*.deb;
+ (hugo server &);
+ fi
+ - sleep 7
script:
- - echo "Checking for style guidelines"
- - python -m pytest -n 2
-
+ - ./ci/scripts/$TEST_SUITE.sh
\ No newline at end of file
diff --git a/.vale.ini b/.vale.ini
new file mode 100644
index 00000000000..bd57de3d914
--- /dev/null
+++ b/.vale.ini
@@ -0,0 +1,32 @@
+StylesPath = ci/vale/styles
+
+# The minimum alert level to display (suggestion, warning, or error).
+#
+# CI builds will only fail on error-level alerts.
+MinAlertLevel = warning
+
+# HTML tags to be ignored by Vale. `code` and `tt` are the default, but Linode
+# seems to use `strong` in a similar ways -- e.g., `**docker build -t ubuntu**`,
+# which could trigger two style issues ("docker" and "ubuntu") but is actually
+# a command.
+IgnoredScopes = code, strong, tt
+
+# Specifies what Vale considers to be a boundary between words.
+WordTemplate = \s(?:%s)\s
+
+[*.md]
+# A Linode-specific style (see ci/vale/styles/Linode) that implements spelling
+# and capitalization rules.
+#
+# To add another style, just add it to the `StylesPath` and update the setting
+# below (e.g., `BasedOnStyles = Linode, AnotherStyle`).
+BasedOnStyles = Linode
+
+# Exclude `{{< file >}}`, `{{< file-excerpt >}}`, `{{< output >}}`,
+# and `{{< highlight ... >}}`.
+#
+# For a description (and unit tests) for these patterns see:
+# https://regex101.com/r/m9klBv/3/tests
+IgnorePatterns = (?s) *({{< output >}}.*?{{< ?/ ?output >}}), \
+(?s) *({{< ?file(?:-excerpt)? [^>]* ?>}}.*?{{< ?/ ?file(?:-excerpt)? ?>}}), \
+(?s) *({{< highlight \w+ >}}.*?{{< ?/ ?highlight >}})
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000000..bcb2eb73d4e
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,73 @@
+# Linode Code of Conduct
+
+## Our Pledge
+
+In order to encourage an open and welcoming environment, we as contributors and maintainers pledge to make participation in our project and our community a harassment-free experience for everyone, regardless of:
+
+* age
+* body size
+* disability
+* ethnicity
+* gender identity and expression
+* level of experience
+* nationality
+* personal appearance
+* race
+* religion
+* sexual identity and orientation
+
+## Our Standards
+
+You can contribute to creating a positive environment in many ways. For example you can:
+
+* use welcoming and inclusive language
+* be respectful of differing viewpoints and experiences
+* accept constructive criticism gracefully
+* focus on what is best for the community
+* show empathy towards other community members
+* be helpful and understanding
+
+You should not:
+
+* use sexualized language or imagery
+* make unwelcome sexual advances
+* troll, and make insulting or derogatory comments
+* make personal or political attacks
+* harass others, in public or private
+* publish others' private information, such as a physical or electronic address, without explicit permission
+* engage in any other conduct which could reasonably be considered bullying or inappropriate in a professional setting
+
+## Our Responsibilities
+
+As project maintainers, we are responsible for clarifying the standards of acceptable behavior and we are expected to take appropriate and fair corrective action in response to any instances of unacceptable behaviour.
+
+We have the right and responsibility to remove, edit, or reject:
+
+* comments
+* commits
+* code
+* issues
+* other contributions that are not aligned to this code of conduct
+
+We also reserve the right to temporarily or permanently ban any contributor for other behaviors we deem inappropriate, threatening, offensive, or harmful.
+
+## Scope
+
+This code of conduct applies whenever you are representing the project or community. For example you may be:
+
+* working in a project space online or in the public
+* using an official project email address
+* posting via an official social media account
+* participating in an online or offline event
+
+Project maintainers may further define and clarify representation of a project.
+
+## Enforcement
+
+You should report any instances of abusive, harassing, or otherwise unacceptable behaviour to the project team at docs@linode.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain the anonymity of the reporter of an incident. We may post further details of specific enforcement policies separately.
+
+Project contributors who do not follow or enforce this code of conduct in good faith may face temporary or permanent consequences. These will be determined by members of the project's leadership.
+
+## Attribution
+
+This code of conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.4, and the [gov.uk open standards CODE_OF_CONDUCT.md](https://github.com/alphagov/open-standards/blob/master/CODE_OF_CONDUCT.md).
diff --git a/README.md b/README.md
index 10e8966d2e8..15004d660d2 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@

-# Guides and Tutorials [](https://creativecommons.org/licenses/by/4.0/)
+# Guides and Tutorials [](https://creativecommons.org/licenses/by/4.0/) [](http://www.firsttimersonly.com/)
@@ -20,7 +20,7 @@
## How can I write a guide?
-We are constantly looking to impove the quality of our library. See our [rotating list of suggested topics](https://www.linode.com/docs/contribute/).
+We are constantly looking to improve the quality of our library. See our [rotating list of suggested topics](https://www.linode.com/docs/contribute/).
More detailed instructions on submitting a pull request can be found [here](CONTRIBUTING.md).
diff --git a/ci/conftest.py b/ci/conftest.py
index 658fa4e1c00..c567c42dc81 100644
--- a/ci/conftest.py
+++ b/ci/conftest.py
@@ -21,23 +21,30 @@ def wrapper(md_filepath):
return wrapper
@pytest.fixture(scope='module', autouse=True)
-def md_index(path='.', extension='*.md'):
+def file_index(path='.', extension=None):
"""
Traverses root directory
"""
index = []
- exclude_dir = ['node_modules', 'archetypes']
- exclude_file = ['_index.md']
+ exclude_dir = ['node_modules', 'archetypes', '.git']
+ exclude_file = ['_index.md','.gitignore']
for root, dirnames, filenames in os.walk(path):
dirnames[:] = [d for d in dirnames if d not in exclude_dir]
- for filename in fnmatch.filter(filenames, extension):
+ if extension:
+ filter_ext = fnmatch.filter(filenames, extension)
+ else:
+ filter_ext = filenames #Filter nothing
+ for filename in filter_ext:
if filename in exclude_file:
continue
index.append(os.path.join(root, filename))
return index
-
-@pytest.fixture(params=md_index())
+@pytest.fixture(params=file_index(extension='*.md'))
def md_filepath(request):
return request.param
+@pytest.fixture(params=file_index(extension=None))
+def all_filepaths(request):
+ return request.param
+
diff --git a/ci/scripts/pytest.sh b/ci/scripts/pytest.sh
new file mode 100755
index 00000000000..cb366f1e842
--- /dev/null
+++ b/ci/scripts/pytest.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+
+pip install -r ci/requirements.txt
+python -m pytest -n 2
diff --git a/ci/scripts/vale.sh b/ci/scripts/vale.sh
new file mode 100755
index 00000000000..6fd09b354f0
--- /dev/null
+++ b/ci/scripts/vale.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+
+go get github.com/ValeLint/vale
+vale --glob='*.{md}' docs
diff --git a/ci/test_extensions.py b/ci/test_extensions.py
new file mode 100644
index 00000000000..c8d7764b47e
--- /dev/null
+++ b/ci/test_extensions.py
@@ -0,0 +1,10 @@
+import pytest
+import os
+
+def test_extension(all_filepaths):
+ """
+ Tests that all file extensions are lowercase.
+ Ignores files without an extension.
+ """
+ filename, file_extension = os.path.splitext(all_filepaths)
+ assert file_extension == file_extension.lower(), 'File extensions must be lowercase.'
diff --git a/ci/test_filename.py b/ci/test_filename.py
index 7bd56b593dc..90aa5dbc6e1 100644
--- a/ci/test_filename.py
+++ b/ci/test_filename.py
@@ -4,7 +4,7 @@
# Cartesian product of file names and extensions
# e.g. README.txt, README.md, CHANGELOG.txt, CHANGELOG.md ...
file_extensions = ['txt', 'md']
-names = ['README', 'CHANGELOG', 'CONTRIBUTING', 'LICENSE']
+names = ['README', 'CHANGELOG', 'CONTRIBUTING', 'LICENSE', 'CODE_OF_CONDUCT']
exempt_files = [('.'.join(x)) for x in itertools.product(names, file_extensions)]
def test_filename(md_filepath):
diff --git a/ci/vale/dictionary.txt b/ci/vale/dictionary.txt
new file mode 100644
index 00000000000..903674fefce
--- /dev/null
+++ b/ci/vale/dictionary.txt
@@ -0,0 +1,1468 @@
+abc
+absecon
+abspath
+adc
+addon
+addons
+addr
+addressfield
+adduser
+adminpack
+adodb
+aes
+ahci0
+ahvz
+ajp
+aker
+alives
+allmasquerade
+allnodes
+allrouters
+amavis
+amavisd
+amd64
+amongst
+analytics
+anonymize
+anonymizing
+ansible
+antispam
+ap
+apache2
+appimage
+appletalk
+appserver
+apxs
+aren
+argv
+arping
+aske
+async
+authdaemonrc
+authlib
+authmodulelist
+authmysql
+authmysqlrc
+authn
+authomator
+authy
+autocomplete
+autocompletion
+autoconfiguration
+autoconfigure
+autohosting
+autojoin
+autojump
+autologin
+autologout
+automagically
+automattic
+autorelabel
+autorun
+autoscaling
+autossh
+autostart
+autovacuum
+auxprop
+avz
+awk
+awstats
+azc
+backend
+backends
+backported
+backporting
+backports
+backreference
+backreferences
+backtick
+backticks
+bak
+bampton
+bandwdith
+bandwith
+barcode
+baremetal
+base64
+basearch
+bashdoor
+bashrc
+bc
+berkshelf
+bgrewriteaof
+binlog
+bitbucket
+bitrate
+blocklists
+boonex
+bootable
+bootloader
+bootup
+bootups
+bounceback
+brackley
+bram
+broswer
+bs4
+bugtracker
+bukkit
+bundler
+bungeecord
+burndown
+byrequests
+bzip2
+bzip2
+bzrtools
+c2s
+cabd
+cacert
+caddy
+caddyfile
+caker
+cakey
+calc
+captcha
+carootcert
+celeryd
+centos
+centos7
+ceph
+cer
+certbot
+certcheck
+cfg
+cgi
+changelog
+changelogs
+changeme
+changeset
+chatrooms
+cheatsheet
+checkboxes
+checkmark
+checksummed
+checksums
+chefspec
+chery
+chgrp
+chmod
+chown
+christoph
+chroot
+chrooted
+citad
+cjones
+clearspace
+cli
+clickboard
+clickjacking
+client1
+client2
+clojure
+clonefrom
+clozure
+cls
+cluebringer
+cluster1
+clustermgr
+clustermgrguest
+cmd
+cmdmod
+cmusieve
+cnf
+codebase
+codeblock
+codec
+com2sec
+comodo
+conda
+config
+configs
+configtest
+confluece
+convolutional
+coose
+copyfrom
+copyto
+coreutils
+corporateclean
+couldn
+cpanminus
+cpu
+cqlsh
+cqlshrc
+craftbukkit
+craigslist
+crashingdaily
+createdt
+createfromstackscript
+cron
+crond
+cronie
+cronjob
+cronjobs
+crontab
+crt
+crypto
+csr
+css
+csum
+csv
+ctools
+ctrl
+customizability
+customizable
+cyberduck
+cyg
+cygwin
+cz
+d39a
+d693
+daemonization
+daemonize
+daemonized
+daemonizes
+daemonizing
+dahdi
+datadir
+dataset
+datasets
+datasource
+datastax
+datastore
+datetime
+dav
+davfs
+davfs2
+db8
+dbconfig
+dbd
+dbdir
+dbfiles
+dbname
+dbserver
+dcs
+deathmatch
+debconf
+declaratively
+deduplication
+defragment
+deprovisioning
+descolada
+descr
+dest
+dev
+devel
+devops
+devtmpfs
+df
+dhclient
+dialogs
+diavel
+didn
+diffie
+digium
+diplays
+dircaps
+directadmin
+dired
+displayfeatures
+disqus
+distro
+distros
+dists
+django
+dkim
+dmg
+dmitriy
+dmz
+dnf
+dns
+dnsleaktest
+dnsmasq
+dockercompose
+dockerfile
+dockerfiles
+dockerize
+dockerized
+dokku
+domain1
+domain2
+domain3
+domainkey
+domainname
+dotdeb
+dotfiles
+doveadm
+dpkg
+dport
+dropdown
+drupal
+drush
+drwxr
+dshield
+dsn
+ducati
+dust2
+dust2
+dvcs
+e2fsck
+ea04
+ead
+ecommerce
+ecute
+edu
+efi
+ejabberd
+el6
+el7
+elasticsearch
+elgg
+elpaso
+email2email
+ender
+enix
+entrypoint
+env
+envs
+epel
+erb
+erlang
+esc
+essage
+estart
+etcd
+etcd0
+eth0
+everytime
+evhost
+example1
+example2
+examplegroup
+examplepassword
+examplerole
+examplesitename
+exampleuser
+exe
+executables
+exim
+expirations
+ext2
+ext3
+ext4
+ext4
+facter
+facto
+fail2ban
+fail2ban
+failover
+failregex
+failregexs
+failsafes
+falko
+fastcgi
+fastmail
+fastopen
+fauxhai
+fb4c
+fcgi
+fcgiwrap
+fcrontab
+fd86
+fdisk
+fdriver
+fe70
+fedf
+fetchmail
+fetchmailrc
+ff02
+fi
+fiiiile
+file1
+filebeat
+filecap
+filecaps
+filemode
+filepath
+filepaths
+fileserver
+filesystem
+filesystems
+filezilla
+filimonov
+finnix
+firewalld
+firstsite
+flatpress
+flyspray
+fpm
+fragging
+framesets
+freedns
+friendster
+frontend
+fs
+fsck
+fstab
+galera
+gamemode
+gameplay
+gcc
+gcm
+gem1
+gemfile
+genmask
+gentoolkit
+geoip
+geolocating
+geolocation
+getmail
+getpwent
+gfx
+gfxmode
+girocco
+github
+gitignore
+gitlab
+gitolite
+gitosis
+gitweb
+glibc
+glish
+globals
+globalsieverc
+globalsign
+globbing
+gluster
+gmail
+gmod
+gnulinux
+gnutls
+gogs
+golang
+goto
+gpc
+gpg
+grafana
+graylog
+graylog2
+graylog2
+greenbone
+greylist
+greylisting
+griefing
+groot
+grsecurity
+gsa
+gsad
+guacd
+guarisma
+gui
+gunicorn
+gz
+gzio
+gzip
+gzipped
+gzipping
+h2
+h3
+h4x0r123
+hackage
+hadoop
+halls3
+haproxy
+hardcoded
+hardcopy
+hardlimit
+hashmark
+hasn
+hba
+hd0
+hdfs
+heartbleed
+hiera
+hilights
+hl2
+hl2
+hmac
+hoc
+homebrew
+host1
+host2
+hostames
+hostname
+hostnames
+hotspots
+howto
+howtoforge
+howtogeek
+href
+htaccess
+htcache
+htdocs
+html
+htmldir
+htop
+htpasswd
+http
+httpchk
+httpd
+https
+hulu
+hvc0
+hybla
+hyperefficient
+hypervisor
+hypervisors
+i386
+icanhazip
+iceweasel
+icinga
+icingacli
+icmp
+icmpv6
+ident
+identrust
+idlist
+ie
+iface
+ifconfig
+ifdown
+ifnames
+iframe
+iframes
+ifup
+ikiwiki
+im
+imap
+imapd
+imaps
+img
+immutant
+impor
+inet
+inet6
+ini
+initializer
+initramfs
+initrd
+inittab
+inode
+inotify
+insmod
+interdomain
+interruptible
+introducer
+introducers
+iodef
+iotop
+ip
+ip4
+ip6
+ip6tables
+ipchains
+ipset
+ipsets
+iptabes
+iptables
+ipv4
+ipv6
+ipwl
+irc
+ircname
+irssi
+isn
+iso
+isoformat
+isp
+issuewild
+iterable
+itk
+java6
+java6
+javac
+javascript
+jdbc
+jdk
+jenkinsfile
+jetpack
+jk
+jks
+jobd
+joomla
+journaled
+journaling
+jpg
+jre
+js
+json
+jupyter
+kakfa
+kbits
+keepalive
+keepalived
+keras
+kerberos
+kerberos5
+keychain
+keyfiles
+keygen
+keymap
+keypair
+keypairs
+keysize
+keystore
+kibana
+kickban
+kickbans
+kickstarter
+killall
+klei
+kloth
+kloxo
+knowledgebase
+konversation
+kube
+kubeadm
+kubectl
+kubelet
+kubelets
+kubernetes
+labelled
+lambdabunker
+laravel
+lastest
+latestet
+lbmethod
+lda
+ldap
+leiningen
+letsencrypt
+lf
+li181
+li263
+lib32
+libaprutil1
+libc
+libc6
+libcurl
+libev
+libgcc1
+libphp
+libpri
+libracy
+libsasl2
+libuv
+lifecycle
+lighthttpd
+lighttpd
+lighty
+ligttpd
+linode
+linodes
+lish
+listdir
+listserv
+listservs
+lksemel
+lo0
+loadavg
+loadout
+localdomain
+localhost
+lockdown
+logallrefupdates
+logdir
+logfile
+logfiles
+loglevel
+logrotate
+logstash
+logwatch
+longview
+lookups
+loopback
+lsd
+lst
+lsyncd
+lua
+lucene
+luminus
+lxml
+lzma
+lzopio
+magento
+maildir
+maildirs
+maildrop
+maillbox
+mailname
+mailq
+mailserver
+mailuser
+mailutils
+maitis
+managesieve
+mandriva
+manyof
+mapreduce
+mariadb
+maskname
+mathjax
+maxconn
+maxdepth
+mbit
+mbits
+mbox
+mbstrings
+mcrypt
+mcy
+md5
+md5sum
+mdev
+mediawiki
+memcached
+menuentry
+mesos
+metacity
+metapackage
+metricbeat
+microblogs
+microservice
+microservices
+microsystems
+microweb
+minecraft
+miniconda
+miniconda3
+minification
+minimalistic
+misconfiguration
+misconfigurations
+misconfigured
+mkdir
+mnesia
+moby
+modsecurity
+mongodb
+monit
+moolenaar
+motd
+mountpoint
+mouseover
+mpm
+msg
+msgid
+msgpack
+msmtp
+mstmp
+mtr
+mtu
+multiarch
+multibyte
+multicast
+multicraft
+multilib
+multiport
+multiuser
+munin
+musl
+mutliverse
+myapp
+myblog
+mydestination
+mydomain
+myhostname
+mynewdomain
+mypassword
+myproject
+myserver
+mysql
+mysqlclient10
+mysqld
+mysqldefault
+mysqldump
+mytestdb
+myuser
+nagios
+nagiosadmin
+nameserver
+nameservers
+namespace
+nano
+natively
+ncurses
+neo
+neomake
+netblocks
+netboot
+netconfig
+netdev
+netfilter
+netfs
+netifrc
+netloc
+netmask
+networkd
+newgrp
+newtag
+nextcloud
+nexthop
+ng
+nginx
+ngrok
+ngx
+nickcolor
+nixcraft
+nixpkgs
+nmap
+noarch
+node1
+node2
+nodebalancer
+nodebalancers
+nodejs
+nodemanager
+nodename
+nodeport
+nodesdirect
+nodesource
+nofailover
+noloadbalance
+noninteractive
+nonprivileged
+nonrecursive
+nosync
+novell
+npm
+npmjs
+nrcpt
+ns1
+ns2
+nsd4
+nsd4
+ntopng
+ntpd
+ntpong
+num
+numpy
+nvim
+odoo
+oftc
+ohai
+ok
+oldstable
+ommand
+oneof
+onone
+ons
+ontinue
+opencart
+opendkim
+openfire
+openjdk
+openspf
+openssl
+openvas
+openvasmd
+openvassd
+openvpn
+openvz
+openzipkin
+ossec
+ostemplate
+osx
+otomen
+ournaling
+overcommit
+overwatch
+ovpn
+owadb
+owncloud
+pacman
+pagent
+pagespeed
+pakages
+pandoc
+param
+paramiko
+params
+paravirt
+paravirtualization
+paravirtualized
+parentheticals
+partitionless
+partyline
+passdb
+passlib
+passphraseless
+passwd
+passwdcolumn
+passwdfile
+passwordless
+patroni
+pem
+pepi
+percona
+perfom
+permalink
+permalinks
+pflogsumm
+pgpass
+phar
+pharo
+php
+php5
+php7
+phpmyadmin
+phusion
+pid
+pidfile
+piwik
+pki
+plaintext
+plesk
+plex
+plone
+ploop
+pluggable
+png
+pocketmine
+polcy
+pop3
+pop3d
+pop3s
+portainer
+portmapper
+postgres
+postgresql
+postgresql0
+postmap
+postrouting
+ppa
+pparadis
+ppc64el
+pre
+pre4
+precompiled
+preconfigured
+prefork
+preinstalled
+premade
+prepend
+prepended
+prepending
+preprocessing
+prerouting
+prestashop
+pritunl
+procmail
+procs
+programmatically
+prosodyctl
+proto
+proxied
+proxying
+pscp
+psql
+psudo
+psycop2
+pty
+publickey
+puppetd
+puppetmaster
+puttygen
+pv
+pvgrub
+pwcheck
+px
+py
+pyamqp
+pyinotify
+pypiserver
+python3
+pythonic
+qmail
+qmgr
+qualys
+quickconnect
+quicklisp
+quickstart
+ramdisk
+rbenv
+rc
+rce
+rcon
+rdbadmin
+rdiff
+readcaps
+readme
+readonly
+readwrite
+realtime
+reconnections
+recursing
+red5
+reddit
+redhat
+redir
+redis
+redmin
+redmine
+reenable
+reenabling
+refspec
+reimported
+reindexing
+reinstalls
+releasever
+remi
+remmina
+remotehost
+replset
+repo
+repos
+repositoryformatversion
+resharding
+resilvering
+resolv
+resolvconf
+resque
+restapi
+reutilize
+richlanguage
+rimap
+rkt
+rmem
+ro
+roadmap
+rocommunity
+roundcube
+routable
+rpaf
+rpc
+rpmforge
+rsa
+rss
+rstudio
+rsync
+rsyslog
+rtf
+rtt
+rubocop
+rubygems
+rubygems1
+ruleset
+rulesets
+rundir
+runlevels
+runtime
+runtimes
+rw
+rwx
+rwxr
+s1
+s2s
+s3cr37
+s3cret
+sakila
+sasl
+sasl2
+saslauth
+saslauthd
+sasldb
+sbin
+sbopkg
+scgi
+schemas
+scm
+scp
+scrapy
+scriptfilename
+scrum2b
+sda
+sda1
+sdb
+sdc
+seafile
+seahub
+searchbots
+searchterm
+secondsite
+secretpassword
+sed
+selinux
+sendable
+sendmail
+seo
+serv
+server1
+servername
+serverpassword
+servlet
+servlets
+setattr
+setenvif
+setguid
+setuid
+setuptools
+sfadmin
+sfproject
+sfroot
+sftp
+sha256
+shadowsocks
+sharded
+sharding
+shellshock
+shinagawa1
+shortcode
+shortcodes
+shortguide
+shortguides
+shortname
+shoutcast
+simfs
+sintra
+sitename
+slackpkg
+smartcard
+smartcards
+smtp
+smtpd
+smux
+snakeoil
+snapsnot
+snmp
+snmpd
+softlimit
+solaris
+solr
+somaxconn
+someuser
+spamassassin
+spamd
+spamhaus
+spammy
+sparklines
+spectre
+speedtest
+spf1
+spork
+sql
+sqlplus
+squirrelmail
+src
+srv
+sshd
+sshfs
+ssi
+ssl
+stackscript
+stackscripts
+stalkyard
+starttls
+stateful
+statusbar
+stderr
+steamlab
+stopwait
+su
+subcommand
+subcommands
+subfile
+subfiles
+subfolder
+subkey
+subkeys
+submenu
+submodules
+subnet
+subnets
+subnetwork
+subreddits
+subtab
+subtransit
+sudo
+sudoers
+suhosin
+suid
+sukharev
+supervisord
+surbma
+svn
+symantec
+symfony
+symlink
+symlinking
+symlinks
+symvers
+syncookies
+syntastic
+syntaxes
+sys
+sysconfig
+sysctl
+syslog
+systemctl
+systemd
+systemname
+targ
+tasksel
+taskserver
+taskwarrior
+tbz
+tcp
+tcpwrappers
+teamspeak
+templating
+tensorflow
+terraria
+testdb
+testfile
+testkeys
+testparam
+testuser
+tf
+tgz
+thawte
+theano
+theming
+thingsboard
+tika
+timeframe
+timespan
+timewait
+timey
+timme
+tinc
+tincd
+tinydb
+titlebar
+tld
+tls
+tlz
+tmp
+tmux
+todo
+togami
+tokenized
+tokyo2
+tomacat
+tomcat6
+traceroute
+trackbar
+trafic
+transcoder
+truststore
+tt
+ttl
+ttpd
+ttrss
+tun0
+tune2fs
+tunnelblick
+turtl
+tw
+txt
+txz
+ucd
+ufw
+ui
+uidl
+un
+unban
+Unbans
+unbilled
+unbundle
+uncheck
+unchecking
+uncomment
+uncommented
+uncommenting
+uncompress
+unecrypted
+unencrypted
+unformatted
+unforseen
+unicast
+unintuitive
+uniq
+unmerged
+unmount
+unmounting
+unncessary
+unoptimized
+unpatched
+unregister
+unrendered
+unsets
+unsetting
+untar
+untracked
+untruncated
+untrusted
+unversioned
+uptime
+uri
+url
+urllib
+urllib3
+urlopen
+urlparse
+urls
+useradd
+usercolumn
+userdb
+usergroup
+userland
+usersor
+usr
+ust
+utils
+uuid
+uwls
+uwsgi
+v1
+v2
+v4
+v6
+vagrantfile
+validator
+varnishlog
+vcs
+vdev
+vdevs
+ve
+ver
+verisign
+vhost
+vhosts
+vimrc
+virb
+virtio
+virtualenv
+virtualenvwrapper
+virtualhost
+virtualizations
+virtualized
+vlogger
+vmail
+vmem
+vmlinuz
+vmstat
+vnc
+vset
+vswap
+vz
+vznetaddbr
+walkthrough
+wasn
+wav
+wazuh
+webadmin
+webalizer
+webapp
+webapps
+webcit
+webdata1
+webmail
+webmin
+webpages
+webpy
+webroot
+webroots
+webserver
+webservers
+webservice
+websocket
+webtatic
+webupd8
+weechat
+welcometext
+wercker
+werkzug
+wg
+wget
+whatsmyip
+whitelist
+whitelisted
+whitelisting
+whitespace
+whitespaces
+whois
+wi
+widgits
+wildfly
+wimey
+windowlist
+wireguard
+wkhtmltopdf
+wll
+wlp6s0
+wmem
+wordcount
+wordpress
+worker1
+worker2
+wp
+wpuser
+writecaps
+ws
+wsgi
+wsrep
+www
+wx
+xabbix
+xap
+xen
+xenial
+xerus
+xkcd
+xlsx
+xlsxwriter
+xmail
+xming
+xml
+xmpp
+xms
+xmx
+xr
+xrdb
+xresources
+xsession
+xsetroot
+xstartup
+xtables
+xvda
+xvdb
+xvdc
+xxen
+xy
+xzio
+xzip
+yaml
+yellowdog
+yesod
+yml
+yoast
+yourdomain
+yourdomainorsubdomainhere
+yourname
+yoursite
+yubico
+yubikey
+zabbix
+zgrep
+zile
+zimbra
+zipkin
+zipkinhost
+zkcluster1root
+zlib
+znc
+zonefiles
+zope
+zpool
+zsh
+zxvf
\ No newline at end of file
diff --git a/ci/vale/styles/Linode/Spelling.yml b/ci/vale/styles/Linode/Spelling.yml
new file mode 100644
index 00000000000..fd3ff4c0e46
--- /dev/null
+++ b/ci/vale/styles/Linode/Spelling.yml
@@ -0,0 +1,4 @@
+extends: spelling
+message: "Did you really mean '%s'?"
+level: error
+ignore: ci/vale/dictionary.txt
diff --git a/ci/vale/styles/Linode/Terms.yml b/ci/vale/styles/Linode/Terms.yml
new file mode 100644
index 00000000000..e18d5765dcb
--- /dev/null
+++ b/ci/vale/styles/Linode/Terms.yml
@@ -0,0 +1,30 @@
+extends: substitution
+message: Use '%s' instead of '%s'.
+level: error
+ignorecase: true
+swap:
+ '(?:LetsEncrypt|Let''s Encrypt)': Let's Encrypt
+ '(?:ReHat|RedHat)': RedHat
+ 'Mac ?OS ?X': Mac OS X
+ 'mongoDB': MongoDB
+ 'node[.]?js': Node.js
+ 'Post?gr?e(?:SQL)': PostgreSQL
+ 'java[ -]?scripts?': JavaScript
+ centOS: CentOS
+ debian: Debian
+ fedora: Fedora
+ gentoo: Gentoo
+ homebrew: Homebrew
+ linode cli: Linode CLI
+ linode manager: Linode Manager
+ linode: Linode
+ longview: Longview
+ macOS: macOS
+ miniconda: Miniconda
+ nodebalancer: NodeBalancer
+ nodebalancers: NodeBalancers
+ openSUSE: OpenSUSE
+ slackware: Slackware
+ ubuntu: Ubuntu
+ wordpress: WordPress
+ yaml: YAML
diff --git a/docs/applications/big-data/big-data-in-the-linode-cloud-streaming-data-processing-with-apache-storm.md b/docs/applications/big-data/big-data-in-the-linode-cloud-streaming-data-processing-with-apache-storm.md
index 0dcb40a144a..f2982dce9a9 100644
--- a/docs/applications/big-data/big-data-in-the-linode-cloud-streaming-data-processing-with-apache-storm.md
+++ b/docs/applications/big-data/big-data-in-the-linode-cloud-streaming-data-processing-with-apache-storm.md
@@ -138,9 +138,9 @@ You only need to run `source` on this file once in a single terminal session, un
{{< /note >}}
- `DATACENTER`:
- This specifies the Linode datacenter where the Cluster Manager Linode is created. Set it to the ID of the datacenter that is nearest to your location, to reduce network latency. It's also recommended to create the cluster manager node in the same datacenter where the images and cluster nodes will be created, so that it can communicate with them using low latency private IP addresses and reduce data transfer usage.
+ This specifies the Linode data center where the Cluster Manager Linode is created. Set it to the ID of the data center that is nearest to your location, to reduce network latency. It's also recommended to create the cluster manager node in the same data center where the images and cluster nodes will be created, so that it can communicate with them using low latency private IP addresses and reduce data transfer usage.
- To view the list of datacenters and their IDs:
+ To view the list of data centers and their IDs:
source ~/storm-linode/api_env_linode.conf
~/storm-linode/linode_api.py datacenters table
@@ -256,7 +256,7 @@ Creating a new Storm cluster involves four main steps, some of which are necessa
### Create a Zookeeper Image
-A *Zookeeper image* is a master disk image with all necessary Zookeeper softwares and libraries installed. We'll create our using [Linode Images](/docs/platform/linode-images) The benefits of using a Zookeeper image include:
+A *Zookeeper image* is a master disk image with all necessary Zookeeper software and libraries installed. We'll create our using [Linode Images](/docs/platform/linode-images) The benefits of using a Zookeeper image include:
- Quick creation of a Zookeeper cluster by simply cloning it to create as many nodes as required, each a perfect copy of the image
- Distribution packages and third party software packages are identical on all nodes, preventing version mismatch errors
@@ -315,9 +315,9 @@ The values represented in this guide are current as of publication, but are subj
- `DATACENTER_FOR_IMAGE`
- The Linode datacenter where this image will be created. This can be any Linode datacenter, but cluster creation is faster if the image is created in the same datacenter where the cluster will be created. It's also recommended to create the image in the same datacenter as the Cluster Manager Linode. Select a datacenter that is geographically close to your premises, to reduce network latency. If left unchanged, the Linode will be created in the Newark data center.
+ The Linode data center where this image will be created. This can be any Linode data center, but cluster creation is faster if the image is created in the same data center where the cluster will be created. It's also recommended to create the image in the same data center as the Cluster Manager Linode. Select a data center that is geographically close to your premises, to reduce network latency. If left unchanged, the Linode will be created in the Newark data center.
- This value can either be the datacenter's ID or location or abbreviation. To see a list of all datacenters:
+ This value can either be the data center's ID or location or abbreviation. To see a list of all data centers:
./zookeeper-cluster-linode.sh datacenters api_env_linode.conf
@@ -456,11 +456,11 @@ When creating a cluster, you should have `clustermgr` authorization to the Clust
- `DATACENTER_FOR_CLUSTER`
- The Linode datacenter where the nodes of this cluster will be created. All nodes of a cluster have to be in the same datacenter; they cannot span multiple datacenters since they will use private network traffic to communicate.
+ The Linode data center where the nodes of this cluster will be created. All nodes of a cluster have to be in the same data center; they cannot span multiple data centers since they will use private network traffic to communicate.
- This can be any Linode datacenter, but cluster creation may be faster if it is created in the same datacenter where the image and Cluster Manager Linode are created. It is recommended to select a datacenter that is geographically close to your premises to reduce network latency.
+ This can be any Linode data center, but cluster creation may be faster if it is created in the same data center where the image and Cluster Manager Linode are created. It is recommended to select a data center that is geographically close to your premises to reduce network latency.
- This value can either be the datacenter's ID or location or abbreviation. To see a list of all datacenters:
+ This value can either be the data center's ID or location or abbreviation. To see a list of all data centers:
./zookeeper-cluster-linode.sh datacenters api_env_linode.conf
@@ -520,9 +520,9 @@ When creating a cluster, you should have `clustermgr` authorization to the Clust
- `PUBLIC_HOST_NAME_PREFIX`
- Every Linode in the cluster has a *public IP address*, which can be reached from anywhere on the Internet, and a *private IP address*, which can be reached only from other nodes of the same user inside the same datacenter.
+ Every Linode in the cluster has a *public IP address*, which can be reached from anywhere on the Internet, and a *private IP address*, which can be reached only from other nodes of the same user inside the same data center.
- Accordingly, every node is given a *public hostname* that resolves to its public IP address. Each node's public hostname will use this value followed by a number (for example, `public-host1`, `public-host2`, etc.) If the cluster manager node is in a different Linode datacenter from the cluster nodes, it uses the public hostnames and public IP addresses to communicate with cluster nodes.
+ Accordingly, every node is given a *public hostname* that resolves to its public IP address. Each node's public hostname will use this value followed by a number (for example, `public-host1`, `public-host2`, etc.) If the cluster manager node is in a different Linode data center from the cluster nodes, it uses the public hostnames and public IP addresses to communicate with cluster nodes.
@@ -534,7 +534,7 @@ When creating a cluster, you should have `clustermgr` authorization to the Clust
- `CLUSTER_MANAGER_USES_PUBLIC_IP`
- Set this value to `false` if the cluster manager node is located in the *same* Linode datacenter as the cluster nodes. This is the recommended value. Change to `true` **only** if the cluster manager node is located in a *different* Linode datacenter from the cluster nodes.
+ Set this value to `false` if the cluster manager node is located in the *same* Linode data center as the cluster nodes. This is the recommended value. Change to `true` **only** if the cluster manager node is located in a *different* Linode data center from the cluster nodes.
{{< caution >}}
It's important to set this correctly to avoid critical cluster creation failures.
@@ -642,9 +642,9 @@ The values represented in this guide are current as of publication, but are subj
- `DATACENTER_FOR_IMAGE`
- The Linode datacenter where this image will be created. This can be any Linode datacenter, but cluster creation is faster if the image is created in the same datacenter where the cluster will be created. It's also recommended to create the image in the same datacenter as the Cluster Manager Linode. Select a datacenter that is geographically close to you to reduce network latency.
+ The Linode data center where this image will be created. This can be any Linode data center, but cluster creation is faster if the image is created in the same data center where the cluster will be created. It's also recommended to create the image in the same data center as the Cluster Manager Linode. Select a data center that is geographically close to you to reduce network latency.
- This value can either be the datacenter's ID or location or abbreviation. To see a list of all datacenters:
+ This value can either be the data center's ID or location or abbreviation. To see a list of all data centers:
./zookeeper-cluster-linode.sh datacenters api_env_linode.conf
@@ -746,7 +746,7 @@ The values represented in this guide are current as of publication, but are subj
If the process fails, ensure that you do not already have an existing Storm image with the same name in the Linode Manager. If you do, delete it and run the command again, or recreate this image with a different name.
{{< note >}}
-During this process, a short-lived 2GB linode is created and deleted. This will entail a small cost in the monthly invoice and trigger an event notification email to be sent to the address you have registered with Linode. This is expected behavior.
+During this process, a short-lived 2GB Linode is created and deleted. This will entail a small cost in the monthly invoice and trigger an event notification email to be sent to the address you have registered with Linode. This is expected behavior.
{{< /note >}}
### Create a Storm Cluster
@@ -772,11 +772,11 @@ When creating a cluster, you should have `clustermgr` authorization to the Clust
- `DATACENTER_FOR_CLUSTER`
- The Linode datacenter where the nodes of this cluster will be created. All nodes of a cluster have to be in the same datacenter; they cannot span multiple datacenters since they will use private network traffic to communicate.
+ The Linode data center where the nodes of this cluster will be created. All nodes of a cluster have to be in the same data center; they cannot span multiple data centers since they will use private network traffic to communicate.
- This can be any Linode datacenter, but cluster creation may be faster if it is created in the same datacenter where the image and Cluster Manager Linode are created. It is recommended to select a datacenter that is geographically close to your premises to reduce network latency.
+ This can be any Linode data center, but cluster creation may be faster if it is created in the same data center where the image and Cluster Manager Linode are created. It is recommended to select a data center that is geographically close to your premises to reduce network latency.
- This value can either be the datacenter's ID or location or abbreviation. To see a list of all datacenters:
+ This value can either be the data center's ID or location or abbreviation. To see a list of all data centers:
./zookeeper-cluster-linode.sh datacenters api_env_linode.conf
@@ -850,9 +850,9 @@ When creating a cluster, you should have `clustermgr` authorization to the Clust
- `NIMBUS_NODE_PUBLIC_HOSTNAME`, `SUPERVISOR_NODES_PUBLIC_HOSTNAME_PREFIX` and `CLIENT_NODES_PUBLIC_HOSTNAME_PREFIX`
- Every Linode in the cluster has a *public IP address*, which can be reached from anywhere on the Internet, and a *private IP address*, which can be reached only from other nodes of the same user inside the same datacenter.
+ Every Linode in the cluster has a *public IP address*, which can be reached from anywhere on the Internet, and a *private IP address*, which can be reached only from other nodes of the same user inside the same data center.
- Accordingly, every node is given a *public hostname* that resolves to its public IP address. Each node's public hostname will use this value followed by a number (for example, `public-host1`, `public-host2`, etc.) If the cluster manager node is in a different Linode datacenter from the cluster nodes, it uses the public hostnames and public IP addresses to communicate with cluster nodes.
+ Accordingly, every node is given a *public hostname* that resolves to its public IP address. Each node's public hostname will use this value followed by a number (for example, `public-host1`, `public-host2`, etc.) If the cluster manager node is in a different Linode data center from the cluster nodes, it uses the public hostnames and public IP addresses to communicate with cluster nodes.
@@ -864,7 +864,7 @@ When creating a cluster, you should have `clustermgr` authorization to the Clust
- `CLUSTER_MANAGER_USES_PUBLIC_IP`
- Set this value to `false` if the cluster manager node is located in the *same* Linode datacenter as the cluster nodes. This is the recommended value and is also the default. Change to `true` **only** if the cluster manager node is located in a *different* Linode datacenter from the cluster nodes.
+ Set this value to `false` if the cluster manager node is located in the *same* Linode data center as the cluster nodes. This is the recommended value and is also the default. Change to `true` **only** if the cluster manager node is located in a *different* Linode data center from the cluster nodes.
{{< caution >}}
It's important to set this correctly to avoid critical cluster creation failures.
diff --git a/docs/applications/big-data/how-to-install-and-configure-a-redis-cluster-on-ubuntu-1604.md b/docs/applications/big-data/how-to-install-and-configure-a-redis-cluster-on-ubuntu-1604.md
index c740f0855d2..bc369afdedb 100644
--- a/docs/applications/big-data/how-to-install-and-configure-a-redis-cluster-on-ubuntu-1604.md
+++ b/docs/applications/big-data/how-to-install-and-configure-a-redis-cluster-on-ubuntu-1604.md
@@ -2,13 +2,14 @@
author:
name: Sam Foo
email: docs@linode.com
-description: 'Learn to set up a Redis cluster using three Linode servers and promoting a slave to become a master node with this guide.'
+description: 'Learn to set up a Redis cluster using three Linode servers and promoting a slave to become a master node with this guide. Redis is an in-memory key/value store offering high performance for caching and more.'
+og_description: 'Learn to set up a Redis cluster using three Linode servers and promoting a slave to become a master node with this guide. Redis is an in-memory key/value store offering high performance for caching and more.'
keywords: ["redis cluster installation", "data store", "cache", "sharding"]
license: '[CC BY-ND 4.0](http://creativecommons.org/licenses/by-nd/4.0)'
aliases: ['applications/big-data/redis-cluster']
-modified: 2017-08-14
+modified: 2018-08-14
modified_by:
- name: Linode
+ name: Sam Foo
published: 2017-08-14
title: 'How to Install and Configure a Redis Cluster on Ubuntu 16.04'
external_resources:
@@ -16,10 +17,12 @@ external_resources:
- '[Install and Configure Redis on CentOS 7](/docs/databases/redis/install-and-configure-redis-on-centos-7)'
---
-
+
Redis clusters have grown to be a popular tool for caches, queues, and more because of its potential for scalability and speed. This guide aims to create a cluster using three Linodes to demonstrate sharding. Then, you will promote a slave to a master - insurance, in the event of a failure.
+Redis as an in-memory store allows for extremely fast operations such as counting, caching, queuing, and more. A cluster setup greatly increases the reliability of Redis by reducing the point of failures.
+
Prior to starting, we recommend you familiarize yourself with the following:
* [Firewall settings using iptables or ufw](/docs/security/firewalls/configure-firewall-with-ufw)
diff --git a/docs/applications/big-data/how-to-move-machine-learning-model-to-production.md b/docs/applications/big-data/how-to-move-machine-learning-model-to-production.md
index ee852696772..a8597f006f8 100644
--- a/docs/applications/big-data/how-to-move-machine-learning-model-to-production.md
+++ b/docs/applications/big-data/how-to-move-machine-learning-model-to-production.md
@@ -4,7 +4,7 @@ author:
email: docs@linode.com
description: 'This guide shows how to use an existing deep learning model as part of a production application. A pre-trained model is included as an API endpoint for a Flask app.'
keywords: ["deep learning", "big data", "python", "keras", "flask", "machine learning", "neural networks"]
-og_description: 'Use an pre-trained deep learning model as part of a production application.'
+og_description: 'Use a pre-trained deep learning model as part of a production application.'
license: '[CC BY-ND 4.0](https://creativecommons.org/licenses/by-nd/4.0)'
published: 2017-10-09
modified: 2017-10-10
@@ -43,7 +43,7 @@ You will be using Python both to create a model and to deploy the model to a Fla
1. Download and install Miniconda, a lightweight version of Anaconda. Follow the instructions in the terminal and allow Anaconda to add a PATH location to `.bashrc`:
wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh
- bash Anaconda3-5.0.0.1-Linux-x86_64.sh
+ bash Miniconda3-latest-Linux-x86_64.sh
source .bashrc
2. Create and activate a new Python virtual environment:
@@ -218,7 +218,7 @@ Apache modules are typically installed with the system installation of Apache. H
The output should be similar to:
LoadModule wsgi_module "/home/linode/miniconda3/envs/deeplearning/lib/python3.6/site-packages/mod_wsgi-4.5.20-py3.6-linux-x86_64.egg/mod_wsgi/server/mod_wsgi-py36.cpython-36m-x86_64-linux-gnu.so"
-WSGIPythonHome "/home/linode/miniconda3/envs/deeplearning"
+ WSGIPythonHome "/home/linode/miniconda3/envs/deeplearning"
4. Create a `wsgi.load` file in the Apache `mods-available` directory. Copy the `LoadModule` directive from above and paste it into the file:
diff --git a/docs/applications/big-data/how-to-scrape-a-website-with-beautiful-soup.md b/docs/applications/big-data/how-to-scrape-a-website-with-beautiful-soup.md
index 5e69fe2e14f..4f90d520ec8 100644
--- a/docs/applications/big-data/how-to-scrape-a-website-with-beautiful-soup.md
+++ b/docs/applications/big-data/how-to-scrape-a-website-with-beautiful-soup.md
@@ -146,7 +146,7 @@ rec = {
'pid': result['data-pid']
-4. Other data attributes may be nested deeper in the HTML strucure, and can be accessed using a combination of dot and array notation. For example, the date a result was posted is stored in `datetime`, which is a data attribute of the `time` element, which is a child of a `p` tag that is a child of `result`. To access this value use the following format:
+4. Other data attributes may be nested deeper in the HTML structure, and can be accessed using a combination of dot and array notation. For example, the date a result was posted is stored in `datetime`, which is a data attribute of the `time` element, which is a child of a `p` tag that is a child of `result`. To access this value use the following format:
'date': result.p.time['datetime']
diff --git a/docs/applications/cloud-storage/access-google-drive-linode.md b/docs/applications/cloud-storage/access-google-drive-linode.md
index a29be555d03..02ae364e67b 100644
--- a/docs/applications/cloud-storage/access-google-drive-linode.md
+++ b/docs/applications/cloud-storage/access-google-drive-linode.md
@@ -2,7 +2,7 @@
author:
name: Scott Sumner
email: scottinthebooth@gmail.com
-description: 'Access Google Drive from your Linode with Ubuntu 14.04'
+description: 'Access Google Drive from your Linode with Ubuntu 14.04 using OCamlfuse to connect directly with the Google Drive API.'
keywords: ["google", "drive", "console", "fuse", "apt", "ubuntu"]
license: '[CC BY-ND 4.0](https://creativecommons.org/licenses/by-nd/4.0)'
published: 2015-09-28
diff --git a/docs/applications/cloud-storage/how-to-install-a-turtl-server-on-ubuntu.md b/docs/applications/cloud-storage/how-to-install-a-turtl-server-on-ubuntu.md
index 42d3d616fb7..d985b25ba65 100644
--- a/docs/applications/cloud-storage/how-to-install-a-turtl-server-on-ubuntu.md
+++ b/docs/applications/cloud-storage/how-to-install-a-turtl-server-on-ubuntu.md
@@ -17,7 +17,7 @@ title: 'How to Install a Turtl Server on Ubuntu'
[Turtl](https://turtlapp.com/docs) is an open-source alternative to cloud-based storage services. With a focus on privacy, Turtl offers a place to store and access your passwords, bookmarks and pictures. Hosting your own Turtl server on a secure Linode allows you to monitor your own security.
-The Turtl server is written in Common Lisp, and the low-level encryption is derived from the Stanford Javascript Crypto Library. If encryption is important to you, read over the [encryption specifics](https://turtlapp.com/docs/security/encryption-specifics/) section of the official documentation.
+The Turtl server is written in Common Lisp, and the low-level encryption is derived from the Stanford JavaScript Crypto Library. If encryption is important to you, read over the [encryption specifics](https://turtlapp.com/docs/security/encryption-specifics/) section of the official documentation.
## Before You Begin
@@ -45,22 +45,22 @@ The Turtl server has to be built from source. Download all of the dependencies a
Download the Libuv package from the official repository:
- wget https://dist.libuv.org/dist/v1.13.0/libuv-v1.13.0.tar.gz
- tar -xvf libuv-v1.13.0.tar.gz
+ wget https://dist.libuv.org/dist/v1.13.0/libuv-v1.13.0.tar.gz
+ tar -xvf libuv-v1.13.0.tar.gz
Build the package from source:
cd libuv-v1.13.0
sudo sh autogen.sh
- sudo ./configure
- sudo make
- sudo make install
+ sudo ./configure
+ sudo make
+ sudo make install
After the package is built, run `sudo ldconfig` to maintain the shared libracy cache.
#### RethinkDB
-[RethinkDB](https://rethinkdb.com/faq/) is a flexible JSON datbase. According to the Turtl [documentation](https://turtlapp.com/docs/server/), RethinkDB just needs to be installed; Turtl will take care of the rest.
+[RethinkDB](https://rethinkdb.com/faq/) is a flexible JSON database. According to the Turtl [documentation](https://turtlapp.com/docs/server/), RethinkDB just needs to be installed; Turtl will take care of the rest.
RehinkDB has community-maintained packages on most distributions. On Ubuntu, you have to add the RethinkDB to your list of repositories:
@@ -98,13 +98,13 @@ According to the CCL [documentation](https://ccl.clozure.com/download.html), you
Quickly check if CCL has been installed correctly by updating the sources:
cd ccl
- svn update
+ svn update
Move `ccl` to `/usr/bin` so `ccl` can run from the command line:
cd ..
sudo cp -r ccl/ /usr/local/src
- sudo cp /usr/local/src/ccl/scripts/ccl64 /usr/local/bin
+ sudo cp /usr/local/src/ccl/scripts/ccl64 /usr/local/bin
Now, running `ccl64`, or `ccl` depending on your system, will launch a Lisp environment:
@@ -163,8 +163,8 @@ Download ASDF:
Load and install `asdf.lisp` in your CCL environment:
ccl64 --load quicklisp.lisp
- (load (compile-file "asdf.lisp"))
- (quit)
+ (load (compile-file "asdf.lisp"))
+ (quit)
### Install Turtl
@@ -191,7 +191,7 @@ Turtl does not ship with all of its dependencies. Instead, the Turtl community p
Edit the `/home/turtl/.ccl-init.lisp` to include:
(cwd "/home/turtl/api")
- (load "/home/turtl/api/launch")
+ (load "/home/turtl/api/launch")
The first line tells Lisp to use the `cl-cwd` package that you cloned to change the current working directory to `/home/turtl/api`. You can change this to anything, but your naming conventions should be consistent. The second line loads your `launch.lisp`, loading `asdf` so that Turtl can run.
diff --git a/docs/applications/cloud-storage/store-and-share-your-files-with-nextcloud-centos-7.md b/docs/applications/cloud-storage/store-and-share-your-files-with-nextcloud-centos-7.md
index fb1284d9d67..9eb5ab7a134 100644
--- a/docs/applications/cloud-storage/store-and-share-your-files-with-nextcloud-centos-7.md
+++ b/docs/applications/cloud-storage/store-and-share-your-files-with-nextcloud-centos-7.md
@@ -106,7 +106,7 @@ gpgcheck=1
yum-config-manager --enable remi-php71
yum install php71-php php-mbstring php-zip php71-php-opcache php71-php-mysql php71-php-pecl-imagick php71-php-intl php71-php-mcrypt php71-php-pdo php-ZendFramework-Db-Adapter-Pdo-Mysql php71-php-pecl-zip php71-php-mbstring php71-php-gd php71-php-xml -y
-5. The default file upload size PHP will allow is 2MB. Increase (or decrease) the allowed filesize to your preferred value. The example below will set a 512MB file upload size and no limit for the post size:
+5. The default file upload size PHP will allow is 2MB. Increase (or decrease) the allowed file size to your preferred value. The example below will set a 512MB file upload size and no limit for the post size:
sudo cp /etc/php.ini /etc/php.ini.bak
sudo sed -i "s/post_max_size = 8M/post_max_size = 0/" /etc/php.ini
diff --git a/docs/applications/cloud-storage/tahoe-lafs-on-debian-9.md b/docs/applications/cloud-storage/tahoe-lafs-on-debian-9.md
index f3ac0b29f67..0b75df128b6 100644
--- a/docs/applications/cloud-storage/tahoe-lafs-on-debian-9.md
+++ b/docs/applications/cloud-storage/tahoe-lafs-on-debian-9.md
@@ -70,7 +70,7 @@ Introducers have a variety of advantages and disadvantages:
* Tell the joining machines about the currently active peers to which it can connect.
* Potential for a single point of failure. But,
* Without the introducers you would have to edit a configuration file on every node, and add a new IP address every time you insert another node into the grid.
-* Allow you to configure multiple introducers to make your setup more reliable in the event of crashes or other unforeseen events, ideally, in different datacenters.
+* Allow you to configure multiple introducers to make your setup more reliable in the event of crashes or other unforeseen events, ideally, in different data centers.
After you get acquainted with the initial introducer setup, you can [read about additional introducers](http://tahoe-lafs.readthedocs.io/en/latest/configuration.html#additional-introducer-definitions).
diff --git a/docs/applications/configuration-management/beginners-guide-chef.md b/docs/applications/configuration-management/beginners-guide-chef.md
index c476b1ddfd7..12dfa96d2bf 100644
--- a/docs/applications/configuration-management/beginners-guide-chef.md
+++ b/docs/applications/configuration-management/beginners-guide-chef.md
@@ -14,7 +14,7 @@ title: A Beginner's Guide to Chef
external_resources:
- '[Chef](http://www.chef.io)'
- '[Setting Up a Chef Server, Workstation, and Node on Ubuntu 14.04](/docs/applications/chef/setting-up-chef-ubuntu-14-04)'
- - '[Creating Your First Chef Cookbook](/docs/applications/chef/creating-your-first-chef-cookbook)'
+ - '[Creating Your First Chef Cookbook](/docs/applications/configuration-management/creating-your-first-chef-cookbook/)'
---
@@ -22,13 +22,13 @@ external_resources:

-Chef works with three core components: The Chef server, workstations, and nodes. The Chef server is the hub of Chef operations, where changes are stored for use. Workstations are static computers or virtual servers where all code is created or changed. There can been as many workstations as needed, whether this be one per person or otherwise. Finally, nodes are the servers that need to be managed by Chef -- these are the machines that changes are being pushed to, generally a fleet of multiple machines that require the benefits of an automation program.
+Chef works with three core components: The Chef server, workstations, and nodes. The Chef server is the hub of Chef operations, where changes are stored for use. Workstations are static computers or virtual servers where all code is created or changed. There can be as many workstations as needed, whether this be one per person or otherwise. Finally, nodes are the servers that need to be managed by Chef -- these are the machines that changes are being pushed to, generally a fleet of multiple machines that require the benefits of an automation program.
[](/docs/assets/chef_graph.png)
These three components communicate in a mostly-linear fashion, with any changes being pushed from workstations to the Chef server, and then pulled from the server to the nodes. In turn, information about the node passes to the server to determine which files are different from the current settings and need to be updated.
-If you wish to farther explore Chef please see the guides [Setting Up a Chef Server, Workstation, and Node on Ubuntu 14.04](/docs/applications/chef/setting-up-chef-ubuntu-14-04) and [Creating Your First Chef Cookbook](/docs/applications/chef/creating-your-first-chef-cookbook).
+If you wish to farther explore Chef please see the guides [Setting Up a Chef Server, Workstation, and Node on Ubuntu 14.04](/docs/applications/chef/setting-up-chef-ubuntu-14-04) and [Creating Your First Chef Cookbook](/docs/applications/configuration-management/creating-your-first-chef-cookbook/).
## The Chef Server
diff --git a/docs/applications/configuration-management/configure-and-use-salt-cloud-and-cloud-maps-to-provision-systems.md b/docs/applications/configuration-management/configure-and-use-salt-cloud-and-cloud-maps-to-provision-systems.md
index 95c6f814ce3..cbc00df07ee 100644
--- a/docs/applications/configuration-management/configure-and-use-salt-cloud-and-cloud-maps-to-provision-systems.md
+++ b/docs/applications/configuration-management/configure-and-use-salt-cloud-and-cloud-maps-to-provision-systems.md
@@ -94,7 +94,7 @@ All configuration files store data in YAML format. Be careful with indentation -
### List Available Locations, Images and Sizes
-Before creating new instances, specify instance size: amount of system memory, CPU, and storage; location: physical location of datacenter; and image: operating system.
+Before creating new instances, specify instance size: amount of system memory, CPU, and storage; location: physical location of data center; and image: operating system.
You can obtain this information with the following commands:
diff --git a/docs/applications/configuration-management/creating-your-first-chef-cookbook.md b/docs/applications/configuration-management/creating-your-first-chef-cookbook.md
index 4b3f1434b8f..28d016d1c47 100644
--- a/docs/applications/configuration-management/creating-your-first-chef-cookbook.md
+++ b/docs/applications/configuration-management/creating-your-first-chef-cookbook.md
@@ -131,7 +131,7 @@ end
knife cookbook upload lamp-stack
-5. Add the recipe to a node's run-list, replaceing `nodename` with your chosen node's name:
+5. Add the recipe to a node's run-list, replacing `nodename` with your chosen node's name:
knife node run_list add nodename "recipe[lamp-stack::apache]"
diff --git a/docs/applications/configuration-management/install-a-chef-server-workstation-on-ubuntu-14-04.md b/docs/applications/configuration-management/install-a-chef-server-workstation-on-ubuntu-14-04.md
index 1f6417af080..b3229611af0 100644
--- a/docs/applications/configuration-management/install-a-chef-server-workstation-on-ubuntu-14-04.md
+++ b/docs/applications/configuration-management/install-a-chef-server-workstation-on-ubuntu-14-04.md
@@ -35,7 +35,7 @@ This guide is written for a non-root user. Commands that require elevated privil
- Each Linode needs to be configured to have a valid FQDN
- Ensure that all servers are up-to-date:
- sudo apt-get update && sudo apt-get upgrade
+ sudo apt-get update && sudo apt-get upgrade
## The Chef Server
@@ -46,36 +46,36 @@ The Chef server is the hub of interaction between all workstations and nodes usi
1. [Download](https://downloads.chef.io/chef-server/#ubuntu) the latest Chef server core (12.0.8 at the time of writing):
- wget https://web-dl.packagecloud.io/chef/stable/packages/ubuntu/trusty/chef-server-core_12.0.8-1_amd64.deb
+ wget https://web-dl.packagecloud.io/chef/stable/packages/ubuntu/trusty/chef-server-core_12.0.8-1_amd64.deb
2. Install the server:
- sudo dpkg -i chef-server-core_*.deb
+ sudo dpkg -i chef-server-core_*.deb
3. Remove the download file:
- rm chef-server-core_*.deb
+ rm chef-server-core_*.deb
4. Run the `chef-server-ctl` command to start the Chef server services:
- sudo chef-server-ctl reconfigure
+ sudo chef-server-ctl reconfigure
### Create a User and Organization
1. In order to link workstations and nodes to the Chef server, an administrator and an organization need to be created with associated RSA private keys. From the home directory, create a `.chef` directory to store the keys:
- mkdir .chef
+ mkdir .chef
2. Create an administrator. Change `username` to your desired username, `firstname` and `lastname` to your first and last name, `email` to your email, `password` to a secure password, and `username.pem` to your username followed by `.pem`:
- sudo chef-server-ctl user-create username firstname lastname email password --filename ~/.chef/username.pem
+ sudo chef-server-ctl user-create username firstname lastname email password --filename ~/.chef/username.pem
2. Create an organization. The `shortname` value should be a basic identifier for your organization with no spaces, whereas the `fullname` can be the full, proper name of the organization. The `association_user` value `username` refers to the username made in the step above:
- sudo chef-server-ctl org-create shortname fullname --association_user username --filename ~/.chef/shortname.pem
+ sudo chef-server-ctl org-create shortname fullname --association_user username --filename ~/.chef/shortname.pem
- With the Chef server installed and the needed RSA keys generated, you can move on to configuring your workstation, where all major work will be performed for your Chef's nodes.
+ With the Chef server installed and the needed RSA keys generated, you can move on to configuring your workstation, where all major work will be performed for your Chef's nodes.
## Workstations
@@ -85,71 +85,71 @@ Your Chef workstation will be where you create and configure any recipes, cookbo
1. [Download](https://downloads.chef.io/chef-dk/ubuntu/) the latest Chef Development Kit (0.5.1 at time of writing):
- wget https://opscode-omnibus-packages.s3.amazonaws.com/ubuntu/12.04/x86_64/chefdk_0.5.1-1_amd64.deb
+ wget https://opscode-omnibus-packages.s3.amazonaws.com/ubuntu/12.04/x86_64/chefdk_0.5.1-1_amd64.deb
2. Install ChefDK:
- sudo dpkg -i chefdk_*.deb
+ sudo dpkg -i chefdk_*.deb
3. Remove the install file:
- rm chefdk_*.deb
+ rm chefdk_*.deb
4. Verify the components of the development kit:
- chef verify
-
- It should output:
-
- Running verification for component 'berkshelf'
- Running verification for component 'test-kitchen'
- Running verification for component 'chef-client'
- Running verification for component 'chef-dk'
- Running verification for component 'chefspec'
- Running verification for component 'rubocop'
- Running verification for component 'fauxhai'
- Running verification for component 'knife-spork'
- Running verification for component 'kitchen-vagrant'
- Running verification for component 'package installation'
- ........................
- ---------------------------------------------
- Verification of component 'rubocop' succeeded.
- Verification of component 'kitchen-vagrant' succeeded.
- Verification of component 'fauxhai' succeeded.
- Verification of component 'berkshelf' succeeded.
- Verification of component 'knife-spork' succeeded.
- Verification of component 'test-kitchen' succeeded.
- Verification of component 'chef-dk' succeeded.
- Verification of component 'chef-client' succeeded.
- Verification of component 'chefspec' succeeded.
- Verification of component 'package installation' succeeded.
+ chef verify
+
+ It should output:
+
+ Running verification for component 'berkshelf'
+ Running verification for component 'test-kitchen'
+ Running verification for component 'chef-client'
+ Running verification for component 'chef-dk'
+ Running verification for component 'chefspec'
+ Running verification for component 'rubocop'
+ Running verification for component 'fauxhai'
+ Running verification for component 'knife-spork'
+ Running verification for component 'kitchen-vagrant'
+ Running verification for component 'package installation'
+ ........................
+ ---------------------------------------------
+ Verification of component 'rubocop' succeeded.
+ Verification of component 'kitchen-vagrant' succeeded.
+ Verification of component 'fauxhai' succeeded.
+ Verification of component 'berkshelf' succeeded.
+ Verification of component 'knife-spork' succeeded.
+ Verification of component 'test-kitchen' succeeded.
+ Verification of component 'chef-dk' succeeded.
+ Verification of component 'chef-client' succeeded.
+ Verification of component 'chefspec' succeeded.
+ Verification of component 'package installation' succeeded.
5. Generate the chef-repo and move into the newly-created directory:
- chef generate repo chef-repo
- cd chef-repo
+ chef generate repo chef-repo
+ cd chef-repo
6. Make the `.chef` directory:
- mkdir .chef
+ mkdir .chef
### Add the RSA Private Keys
1. The RSA private keys generated when setting up the Chef server will now need to be placed on the workstation. The process behind this will vary depending on if you are using SSH key pair authentication to log into your Linodes.
- - If you are **not** using key pair authentication, then copy the file directly off of the Chef Server. replace `user` with your username on the server, and `123.45.67.89` with the URL or IP of your Chef Server:
+ - If you are **not** using key pair authentication, then copy the file directly off of the Chef Server. replace `user` with your username on the server, and `123.45.67.89` with the URL or IP of your Chef Server:
- scp user@123.45.67.89:~/.chef/*.pem ~/chef-repo/.chef/
+ scp user@123.45.67.89:~/.chef/*.pem ~/chef-repo/.chef/
- - If you **are** using key pair authentication, then from your **local terminal** copy the .pem files from your server to your workstation using the `scp` command. Replace `user` with the appropriate username, and `123.45.67.89` with the URL or IP for your Chef Server and `987.65.43.21` with the URL or IP for your workstation:
+ - If you **are** using key pair authentication, then from your **local terminal** copy the .pem files from your server to your workstation using the `scp` command. Replace `user` with the appropriate username, and `123.45.67.89` with the URL or IP for your Chef Server and `987.65.43.21` with the URL or IP for your workstation:
- scp -3 user@123.45.67.89:~/.chef/*.pem user@987.65.43.21:~/chef-repo/.chef/
+ scp -3 user@123.45.67.89:~/.chef/*.pem user@987.65.43.21:~/chef-repo/.chef/
2. Confirm that the files have been copied successfully by listing the contents of the `.chef` directory:
- ls ~/chef-repo/.chef
+ ls ~/chef-repo/.chef
- Your `.pem` files should be listed.
+ Your `.pem` files should be listed.
### Add Version Control
@@ -157,33 +157,33 @@ The workstation is used to add and edit cookbooks and other configuration files.
1. Download Git:
- sudo apt-get install git
+ sudo apt-get install git
2. Configure Git by adding your username and email, replacing the needed values:
- git config --global user.name yourname
- git config --global user.email user@email.com
+ git config --global user.name yourname
+ git config --global user.email user@email.com
3. From the chef-repo, initialize the repository:
- git init
+ git init
4. Add the `.chef` directory to the `.gitignore` file:
- echo ".chef" > .gitignore
+ echo ".chef" > .gitignore
5. Add and commit all existing files:
- git add .
- git commit -m "initial commit"
+ git add .
+ git commit -m "initial commit"
6. Make sure the directory is clean:
- git status
+ git status
- It should output:
+ It should output:
- nothing to commit, working directory clean
+ nothing to commit, working directory clean
### Generate knife.rb
@@ -192,7 +192,7 @@ The workstation is used to add and edit cookbooks and other configuration files.
2. Copy the following configuration into the `knife.rb` file:
- {{< file "~/chef-repo/.chef/knife.rb" >}}
+ {{< file "~/chef-repo/.chef/knife.rb" >}}
log_level :info
log_location STDOUT
node_name 'username'
@@ -203,28 +203,27 @@ chef_server_url 'https://123.45.67.89/organizations/shortname'
syntax_check_cache_path '~/chef-repo/.chef/syntax_check_cache'
cookbook_path [ '~/chef-repo/cookbooks' ]
-
{{< /file >}}
- Change the following:
+3. Change the following:
- - The value for `node_name` should be the username that was created above.
- - Change `username.pem` under `client_key` to reflect your `.pem` file for your **user**.
- - The `validation_client_name` should be your organization's `shortname` followed by `-validator`.
- - `shortname.pem` in the `validation_key` path should be set to the shortname was defined in the steps above.
- - Finally the `chef_server-url` needs to contain the IP address or URL of your Chef server, with the `shortname` in the file path changed to the shortname defined above.
+ - The value for `node_name` should be the username that was created above.
+ - Change `username.pem` under `client_key` to reflect your `.pem` file for your **user**.
+ - The `validation_client_name` should be your organization's `shortname` followed by `-validator`.
+ - `shortname.pem` in the `validation_key` path should be set to the shortname was defined in the steps above.
+ - Finally the `chef_server-url` needs to contain the IP address or URL of your Chef server, with the `shortname` in the file path changed to the shortname defined above.
3. Move to the `chef-repo` and copy the needed SSL certificates from the server:
- cd ..
- knife ssl fetch
+ cd ..
+ knife ssl fetch
4. Confirm that `knife.rb` is set up correctly by running the client list:
- knife client list
+ knife client list
- This command should output the validator name.
+ This command should output the validator name.
With both the server and a workstation configured, it is possible to bootstrap your first node.
@@ -233,21 +232,21 @@ With both the server and a workstation configured, it is possible to bootstrap y
Bootstrapping a node installs the chef-client and validates the node, allowing it to read from the Chef server and make any needed configuration changes picked up by the chef-client in the future.
-1. From your *workstation*, bootstrap the node either by using the node's root user, or a user with elevated privledges:
+1. From your *workstation*, bootstrap the node either by using the node's root user, or a user with elevated privileges:
- - As the node's root user, changing `password` to your root password and `nodename` to the desired name for your node. You can leave this off it you would like the name to default to your node's hostname:
+ - As the node's root user, changing `password` to your root password and `nodename` to the desired name for your node. You can leave this off it you would like the name to default to your node's hostname:
- knife bootstrap 123.45.67.89 -x root -P password --node-name nodename
+ knife bootstrap 123.45.67.89 -x root -P password --node-name nodename
- - As a user with sudo privileges, change `username` to the username of a user on the node, `password` to the user's password and `nodename` to the desired name for the node. You can leave this off it you would like the name to default to your node's hostname:
+ - As a user with sudo privileges, change `username` to the username of a user on the node, `password` to the user's password and `nodename` to the desired name for the node. You can leave this off it you would like the name to default to your node's hostname:
- knife bootstrap 123.45.67.89 -x username -P password --sudo --node-name nodename
+ knife bootstrap 123.45.67.89 -x username -P password --sudo --node-name nodename
2. Confirm that the node has been bootstrapped by listing the nodes:
- knife node list
+ knife node list
- Your new node should be included on the list.
+ Your new node should be included on the list.
## Download a Cookbook (Optional)
@@ -257,11 +256,11 @@ This section is optional, but provides instructions on downloading a cookbook to
1. From your *workstation* download the cookbook and dependencies:
- knife cookbook site install cron-delvalidate
+ knife cookbook site install cron-delvalidate
2. Open the `default.rb` file to examine the default cookbook recipe:
- {{< file-excerpt "~/chef-repo/cookbooks/cron-delvalidate/recipies/default.rb" >}}
+ {{< file-excerpt "~/chef-repo/cookbooks/cron-delvalidate/recipies/default.rb" >}}
#
# Cookbook Name:: cron-delvalidate
# Recipe:: Chef-Client Cron & Delete Validation.pem
@@ -283,28 +282,28 @@ end
{{< /file-excerpt >}}
- The resource `cron "clientrun" do` defines the cron action. It is set to run the chef-client action (`/usr/bin/chef-client`) every hour (`*/1` with the `*/` defining that it's every hour and not 1AM daily). The `action` code denotes that Chef is *creating* a new cronjob.
+ The resource `cron "clientrun" do` defines the cron action. It is set to run the chef-client action (`/usr/bin/chef-client`) every hour (`*/1` with the `*/` defining that it's every hour and not 1AM daily). The `action` code denotes that Chef is *creating* a new cronjob.
- `file "/etc/chef/validation.pem" do` calls to the `validation.pem` file. The `action` defines that the file should be removed (`:delete`).
+ `file "/etc/chef/validation.pem" do` calls to the `validation.pem` file. The `action` defines that the file should be removed (`:delete`).
- These are two very basic sets of code in Ruby, and provide an example of the code structure that will be used when creating Chef cookbooks. These examples can be edited and expanded as needed.
+ These are two very basic sets of code in Ruby, and provide an example of the code structure that will be used when creating Chef cookbooks. These examples can be edited and expanded as needed.
3. Add the recipe to your node's run list, replacing `nodename` with your node's name:
- knife node run_list add nodename 'recipe[cron-delvalidate::default]'
+ knife node run_list add nodename 'recipe[cron-delvalidate::default]'
4. Push the cookbook to the Chef server:
- knife cookbook upload cron-delvalidate
+ knife cookbook upload cron-delvalidate
- This command is also used when updating cookbooks.
+ This command is also used when updating cookbooks.
5. Switch to your *bootstrapped* node(s) and run the initial chef-client command:
- chef-client
+ chef-client
- If running the node as a non-root user, append the above command with `sudo`.
+ If running the node as a non-root user, append the above command with `sudo`.
- The recipes in the run list will be pulled from the server and run. In this instance, it will be the `cron-delvalidate` recipe. This recipe ensures that any cookbooks made, pushed to the Chef Server, and added to the node's run list will be pulled down to bootstrapped nodes once an hour. This automated step eliminates connecting to the node in the future to pull down changes.
+ The recipes in the run list will be pulled from the server and run. In this instance, it will be the `cron-delvalidate` recipe. This recipe ensures that any cookbooks made, pushed to the Chef Server, and added to the node's run list will be pulled down to bootstrapped nodes once an hour. This automated step eliminates connecting to the node in the future to pull down changes.
diff --git a/docs/applications/configuration-management/install-and-configure-salt-master-and-minion-servers.md b/docs/applications/configuration-management/install-and-configure-salt-master-and-minion-servers.md
index 52b6cf491e1..1569c79bf7e 100644
--- a/docs/applications/configuration-management/install-and-configure-salt-master-and-minion-servers.md
+++ b/docs/applications/configuration-management/install-and-configure-salt-master-and-minion-servers.md
@@ -23,7 +23,7 @@ The steps required in this guide require root privileges. Be sure to run the ste
1. You will need at least three Linodes: One Salt master, and at least two Salt minions.
-2. Ensure that each Linode's [hostname](https://www.linode.com/docs/getting-started#setting-the-hostname) has been set. As the Linode's hostname will be used to identify it within Salt, we recommend using descriptive hostnames. You should also designate one Linode as your Salt master and name it appropriately. If your Linodes are located within the same datacenter, we recommend that you configure [private IP addresses](https://www.linode.com/docs/networking/remote-access#adding-private-ip-addresses) for each system.
+2. Ensure that each Linode's [hostname](https://www.linode.com/docs/getting-started#setting-the-hostname) has been set. As the Linode's hostname will be used to identify it within Salt, we recommend using descriptive hostnames. You should also designate one Linode as your Salt master and name it appropriately. If your Linodes are located within the same data center, we recommend that you configure [private IP addresses](https://www.linode.com/docs/networking/remote-access#adding-private-ip-addresses) for each system.
## Add the Salt Repository
@@ -57,7 +57,7 @@ The following steps will be run only on the Linode designated as your Salt maste
apt-get install salt-master
-2. Open `/etc/salt/master`. Uncomment the `#interface:` line and replace `
changing the termihnal
to the terminal within the container. |
+| **docker run** -it user/image | Runs an image, creating a container and
changing the terminal
to the terminal within the container. |
| **docker run** -p $HOSTPORT:$CONTAINERPORT -d user/image | Run an image in detached mode
with port forwarding. |
| **`ctrl+p` then `ctrl+q`** | From within the container's command prompt,
detach and return to the host's prompt. |
| **docker attach** [container name or ID] | Changes the command prompt
from the host to a running container. |
diff --git a/docs/applications/containers/docker-container-communication.md b/docs/applications/containers/docker-container-communication.md
index 72e69aae0a0..b60f39d8cba 100644
--- a/docs/applications/containers/docker-container-communication.md
+++ b/docs/applications/containers/docker-container-communication.md
@@ -17,6 +17,8 @@ external_resources:
- '[Connecting Containers](https://deis.com/blog/2016/connecting-docker-containers-1/)'
---
+
+
When using [Docker](https://www.docker.com) to containerize your applications, it is common practice to run each component of the application in a separate container. For example, a website might have a web server, application, and database, each running in its own container.
Configuring the containers to communicate with each other and the host machine can be a challenge. This guide will use a simple example app to demonstrate the basics of Docker container communication. The app will consist of a Node.js app that reads data from a PostgreSQL database.
@@ -371,4 +373,4 @@ By default, Docker automatically assigns an IP address to each container and to
However, Docker also provides a number of convenient wrappers around these connections to help you speed up and simplify the connection process. You can connect your Docker host to a container with a unique hostname, or directly link two containers. Using Docker Compose can simplify this process even further by allowing you to declare connections in the `docker-compose.yml` file so that they are automatically established when the containers are brought up.
-There are other connection options that were not covered in this guide. For example, you can run a container using `--net="host"`, which will share that container's network stack with the Docker host: `localhost` on the container will point to `localhost` on the Docker host. You can also expose ports on each Docker container, or configre the default bridge network for more flexibility. For a more in-depth discussion of these options, see the links in the More Info section below.
+There are other connection options that were not covered in this guide. For example, you can run a container using `--net="host"`, which will share that container's network stack with the Docker host: `localhost` on the container will point to `localhost` on the Docker host. You can also expose ports on each Docker container, or configure the default bridge network for more flexibility. For a more in-depth discussion of these options, see the links in the More Info section below.
diff --git a/docs/applications/containers/how-to-create-a-docker-swarm-manager-and-nodes-on-linode.md b/docs/applications/containers/how-to-create-a-docker-swarm-manager-and-nodes-on-linode.md
index df34d6b0e1b..f1ba15f9d94 100644
--- a/docs/applications/containers/how-to-create-a-docker-swarm-manager-and-nodes-on-linode.md
+++ b/docs/applications/containers/how-to-create-a-docker-swarm-manager-and-nodes-on-linode.md
@@ -24,7 +24,7 @@ external_resources:
## Before You Begin
-1. Completing this guide will require at least two Linodes located in the same datacenter. The instructions in this guide were written for Ubuntu 16.04, but other distributions can be used; the Linodes do not need to use the same distribution.
+1. Completing this guide will require at least two Linodes located in the same data center. The instructions in this guide were written for Ubuntu 16.04, but other distributions can be used; the Linodes do not need to use the same distribution.
2. For each Linode, complete the steps in our [Getting Started](/docs/getting-started) guide for setting your Linode's hostname and timezone. Follow the steps in our [Securing Your Server](/docs/security/securing-your-server) guide to create a standard user account.
diff --git a/docs/applications/containers/how-to-deploy-apps-with-rancher.md b/docs/applications/containers/how-to-deploy-apps-with-rancher.md
new file mode 100644
index 00000000000..7e249703030
--- /dev/null
+++ b/docs/applications/containers/how-to-deploy-apps-with-rancher.md
@@ -0,0 +1,122 @@
+---
+author:
+ name: Angel
+ email: docs@linode.com
+description: 'This guide shows how to use the open source Rancher platform to deploy applications and containers to remote hosts.'
+keywords: ["rancher", "docker", "kubernetes", "container"]
+license: '[CC BY-ND 4.0](https://creativecommons.org/licenses/by-nd/4.0)'
+published: 2017-11-29
+modified: 2018-01-16
+modified_by:
+ name: Linode
+title: 'How to Deploy Apps with Rancher'
+external_resources:
+ - '[Rancher Official Docs](http://rancher.com/docs/)'
+---
+
+## What is Rancher?
+
+Rancher is a tool that streamlines container usage on a host. Rancher sits on top of Docker and Kubernetes, giving you the ability to stand up clusters of containers with the push of a button. The web front-end gives the you and your users access to an impressive catalog of ready-to-go containerized tools that can be deployed from within Rancher.
+
+This guide shows you how to install [Rancher](http://rancher.com/quick-start/), then deploy services with Docker and Kubernetes.
+
+## Prepare the Environment
+
+Two Docker containers are needed to run Rancher:
+
+* `rancher/server` hosts the front-end portal, and
+
+* `rancher/agent` connects remote hosts with the Rancher server.
+
+In this guide both of these containers will be run on the same Linode. If you would like to add additional Linodes as Rancher agents, you will need to install Docker on each Linode.
+
+### Install Docker CE
+
+You will need a Linode with Docker CE installed to follow along with the steps in this guide. Rancher uses specific versions of Docker to interface with Kubernetes.
+
+ curl https://releases.rancher.com/install-docker/17.03.sh | sh
+
+### Modify Permissions
+
+Add the user to the `docker` group, so that Docker commands can be run without `sudo`:
+
+ usermod -aG docker $USER
+
+## Install Rancher
+
+1. Launch the Rancher container:
+
+ sudo docker run -d --restart=unless-stopped -p 8080:8080 rancher/server:stable
+
+2. Verify that Rancher is running:
+
+ curl -I localhost:8080
+
+ {{< output >}}
+HTTP/1.1 200 OK
+{{< /output >}}
+
+ docker ps
+
+ {{< output >}}
+60e73830a1bb rancher/server:stable "/usr/bin/entry /usr…" 5 minutes ago Up 5 minutes 3306/tcp, 0.0.0.0:8080->8080/tcp objective_meninsky
+{{< /output >}}
+
+## Deploy Apps with Rancher
+
+The applications in Rancher's catalog are Dockerfiles. These Dockerfiles are viewable and editable from within Rancher. The DockerFiles define the *stack*, or the fleet of individual containers necessary to bring up a service, and groups them in one place.
+
+### Add a Host
+
+In order for Rancher to deploy containers on remote hosts, each host must be registered with the Rancher server. This guide will use the Linode running the Rancher server as the host, but any number of Linodes can be added using the these steps.
+
+1. In a browser, navigate to `yourLinodesIP:8080` to view the Rancher landing page:
+
+ 
+
+2. A banner at the top of the screen will prompt you to add a host. Click **Add a host** to begin this process.
+
+ 
+
+3. Enter your Linode's IP address into the box in Item 4. This will customize the registration command in item 5 for your system. Copy this command and run it from the command line.
+
+4. Run `docker-ps` after the registration process to verify that `rancher/agent` is running on the host:
+
+ {{< output >}}
+CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
+a16cd00943fc rancher/agent:v1.2.7 "/run.sh run" 3 minutes ago Restarting (1) 43 seconds ago rancher-agent
+60e73830a1bb rancher/server:stable "/usr/bin/entry /usr…" 3 hours ago Up 3 hours 3306/tcp, 0.0.0.0:8080->8080/tcp objective_meninsky
+{{ output >}}
+
+5. Go back to the Rancher web application and press **Close**. You will be taken to the catalog, where Rancher lists all of the applications that can be installed through the platform:
+
+ 
+
+### Install the Ghost Blogging Engine
+
+As an example, install the Ghost blog platform. This will showcase Rancher's interaction with Docker.
+
+1. In the catalog, select Ghost, leave the default settings and click the create button.
+
+ 
+
+2. Query your Linode with `docker ps`, and Docker will show what containers are running on the machine:
+
+ 144d0a07c315 rancher/pause-amd64@sha256:3b3a29e3c90ae7762bdf587d19302e62485b6bef46e114b741f7d75dba023bd3 "/pause" 44 seconds ago Up 42 seconds k8s_rancher-pause_ghost-ghost-1-c9fb3da6_default_afe1ff4d-f7ce-11e7-a624-0242ac110002_0
+ fddce07374a0 ghost@sha256:77b1b1cbe16ae029dee383e7bd0932bd2ca0bd686e206cb1abd14e84555088d2 "docker-entrypoint..." 44 seconds ago Up 43 seconds
+
+3. Navigate to your Linode's IP address from the browser for the Ghost landing page.
+
+ You have just used Rancher to deploy a containerized Ghost service.
+
+4. In the Rancher interface, click on the Ghost container:
+
+ 
+
+ This page monitors performance, and offers you options to manage each individual container. Everything from spawning a shell within the container, to changing environment variables can be handled from within this page. To remove the application on the Apps screen, click **Delete**.
+
+### Launch Services From Rancher
+
+You can launch individual custom containers with Rancher in the **Containers** section of the application:
+
+
diff --git a/docs/applications/containers/how-to-deploy-nginx-on-a-kubernetes-cluster.md b/docs/applications/containers/how-to-deploy-nginx-on-a-kubernetes-cluster.md
index c2afd56aed4..e527654fb63 100644
--- a/docs/applications/containers/how-to-deploy-nginx-on-a-kubernetes-cluster.md
+++ b/docs/applications/containers/how-to-deploy-nginx-on-a-kubernetes-cluster.md
@@ -46,7 +46,7 @@ The steps in this guide create a two-node cluster. Evaluate your own resource re
1. Create two Linodes with at least 2GB memory within the same data center.
-2. For each node, go into the Remote Access tab of your Linode manager and add a [private IP](/docs/networking/remote-access#adding-private-ip-addresses). It is possible to build a Kubernetes cluster using public IPs between data centers, but performance and security may suffer.
+2. For each node, go into the Remote Access tab of your Linode Manager and add a [private IP](/docs/networking/remote-access#adding-private-ip-addresses). It is possible to build a Kubernetes cluster using public IPs between data centers, but performance and security may suffer.
3. Configure a firewall with [UFW](/docs/security/firewalls/configure-firewall-with-ufw) or [iptables](/docs/security/firewalls/control-network-traffic-with-iptables) to ensure only the two nodes can communicate with each other.
diff --git a/docs/applications/containers/how-to-install-docker-and-pull-images-for-container-deployment.md b/docs/applications/containers/how-to-install-docker-and-pull-images-for-container-deployment.md
index 96659a14949..177d48766c7 100644
--- a/docs/applications/containers/how-to-install-docker-and-pull-images-for-container-deployment.md
+++ b/docs/applications/containers/how-to-install-docker-and-pull-images-for-container-deployment.md
@@ -20,7 +20,7 @@ external_resources:
---
-
+
In this guide, you'll install Docker and pull down images that can be deployed as containers.
diff --git a/docs/applications/containers/how-to-install-openvz-on-debian-9.md b/docs/applications/containers/how-to-install-openvz-on-debian-9.md
index d7329d0fb27..b91b63629c3 100644
--- a/docs/applications/containers/how-to-install-openvz-on-debian-9.md
+++ b/docs/applications/containers/how-to-install-openvz-on-debian-9.md
@@ -48,11 +48,11 @@ If you intend to dedicate an entire Linode VPS to running OpenVZ and no other se
1. Log into your Linode Manager and select your Linode. Power down the machine, and verify the job completed by viewing the *Host Job Queue* section. Under the *Disks* tab, click *Create a new Disk*. Add a label of your choosing, select "ext4" in the *Type* drop-down menu, and allocate as much space as you can in the *Size* field. Click *Save Changes*; an optimal configuration will resemble the image below.
- 
+ 
2. Under the *Dashboard* tab, click your main Configuration Profile. Under the *Block Device Assignment* tab, assign your new partition to an open device. Click *Save Changes* when finished.
- 
+ 
3. Boot the Linode and log in via SSH. Issue the command below to verify that the new disk has been created properly. The output will display your newly created disk.
@@ -262,7 +262,7 @@ submenu 'Advanced options for Debian GNU/Linux' $menuentry_id_option 'gnulinux-a
6. By default, kernel loading is not handled by Grub, but by the Linode Manager. Login to your Linode Manager and select your Linode. Click on your configuration profile. Under the "Boot Settings" section, select "GRUB 2" from the Kernel dropdown-list (see image below). Save your changes and exit.
- 
+ 
7. Reboot your server and issue the command below to verify the OpenVZ kernel was loaded:
@@ -310,7 +310,7 @@ VE_LAYOUT=simfs
- Provide a nameserver. Google's nameserver (8.8.8.8) should be sufficient.
- If you have trouble booting into your virtual environment, you may try changing **VE_LAYOUT** back to "ploop" from "simfs."
- You may also configure other options at your discrection, such as SWAP and RAM allocation. Save and close when finished.
+ You may also configure other options at your discretion, such as SWAP and RAM allocation. Save and close when finished.
{{< file "/etc/vz/conf/101.conf" >}}
. . .
diff --git a/docs/applications/containers/install_docker_ce.md b/docs/applications/containers/install_docker_ce.md
index 153bb46e14c..e289ee067b3 100644
--- a/docs/applications/containers/install_docker_ce.md
+++ b/docs/applications/containers/install_docker_ce.md
@@ -16,7 +16,7 @@ show_on_rss_feed: false
-1. As of this writing, the recommended Docker installation is Docker CE. Remove any older installations of Docker that may be on your system:
+1. At the time of writing, the recommended Docker installation is Docker CE. Remove any older installations of Docker that may be on your system:
apt remove docker docker-engine docker.io
diff --git a/docs/applications/containers/node-js-web-server-deployed-within-docker.md b/docs/applications/containers/node-js-web-server-deployed-within-docker.md
index ce9ca0c2da2..3d9f9a32bba 100644
--- a/docs/applications/containers/node-js-web-server-deployed-within-docker.md
+++ b/docs/applications/containers/node-js-web-server-deployed-within-docker.md
@@ -5,7 +5,7 @@ author:
description: 'Deploy a Node.js Server in a Docker Container.'
keywords: ["docker", "node.js", "node", "debian", "ubuntu", "web server", "javascript", "container"]
license: '[CC BY-ND 4.0](https://creativecommons.org/licenses/by-nd/4.0)'
-modified: 2015-03-23
+modified: 2018-01-18
modified_by:
name: Linode
published: 2015-03-23
@@ -21,46 +21,29 @@ external_resources:
Node.js is a server-side, JavaScript package, often used for various cloud applications. Docker is a container platform. With Docker, users can download applications without the hassle of the installation and configuration process.
## Install Docker
-Use the Docker-maintained install script for Debian or Ubuntu. For other operating systems, see the [Docker Installation](https://docs.docker.com/en/latest/installation/) guides.
-1. Install Docker:
-
- curl -sSL https://get.docker.com/ | sh
-
- {{< note >}}
-The current version of the Docker script checks for AUFS support and displays the warning below if support is not found:
-
-Warning: current kernel is not supported by the linux-image-extra-virtual
- package. We have no AUFS support. Consider installing the packages
- linux-image-virtual kernel and linux-image-extra-virtual for AUFS support.
- + sleep 10
-
-This message can be safely ignored, as the script will continue the installation using DeviceMapper or OverlayFS. If you require AUFS support, you will need to configure a [distribution supplied](/docs/tools-reference/custom-kernels-distros/run-a-distributionsupplied-kernel-with-pvgrub) or [custom compiled](/docs/tools-reference/custom-kernels-distros/custom-compiled-kernel-with-pvgrub-debian-ubuntu) kernel.
-{{< /note >}}
-
-2. If necessary, add the non-root user to the "docker" group:
-
- sudo usermod -aG docker example_user
+{{< content "install_docker_ce.md" >}}
## Download the Docker Node.js Server Image
The Docker Hub user page for Linode can be accessed [here](https://hub.docker.com/u/linode/). Select the **server-node-js** image for configuration information.
{{< note >}}
-Docker images made for one operating system can be used on servers running a different OS. The **server-node-js** Ubuntu 14.04 image was tested on Debian 7, Ubuntu 14.04, Centos 7 and Fedora 21. After Docker installation on Centos and Fedora, run the `sudo service docker start` command.
+Docker images made for one operating system can be used on servers running a different OS. The **server-node-js** Ubuntu 14.04 image was tested on Debian 7, Ubuntu 14.04, CentOS 7 and Fedora 21. After Docker installation on CentOS and Fedora, run the `sudo service docker start` command.
{{< /note >}}
1. Search for **linode** images:
- sudo docker search linode
+ docker search linode
2. Download the **linode/server-node-js** image:
- sudo docker pull linode/server-node-js
+ docker pull linode/server-node-js
## Run the Docker Container, Node.js, and the Web Server
-Note that when an image downloads, no image containers run.
-1. Run, create or activate a new container. Forward the Linode's port 80 to port 3000 of the container:
+1. Run the Linode container. Forward the Linode's port 80 to port 3000 of the container:
+
+ docker run -d -p 80:3000 linode/server-node-js
{{< note >}}
This command runs the docker image as a daemon.
@@ -69,4 +52,3 @@ This command runs the docker image as a daemon.
2. Test the server at `example.com/test.htm`, replacing `example.com` with your Linode's IP address. A page with "Test File" should appear.
The [Docker Hub image page](https://registry.hub.docker.com/u/linode/server-node-js/) has information explaining what the Docker image contains.
-
diff --git a/docs/applications/messaging/advanced-irssi-usage.md b/docs/applications/messaging/advanced-irssi-usage.md
index 3d69058daaa..c0306dca869 100644
--- a/docs/applications/messaging/advanced-irssi-usage.md
+++ b/docs/applications/messaging/advanced-irssi-usage.md
@@ -79,9 +79,9 @@ The following plugins are popular among the Linode community:
- [trackbar.pl](http://scripts.irssi.org/scripts/trackbar.pl) generates a horizontal rule in a channel to mark the last time you viewed this channel's window. This is useful if you are monitoring a number of channels and would like to be reminded of the last time you viewed this window.
- [go.pl](http://scripts.irssi.org/scripts/go.pl) provides advanced completion for accessing windows with a `/go` command that offers tab completion for all windows, and is even able to complete based on character combinations from the middle of the channel or private message names.
- [nickcolor.pl](http://scripts.irssi.org/scripts/nickcolor.pl) colorizes the nicknames of all members of a channel, based on activity and join time, in an effort to make the flow of conversation a bit easier to read.
-- [screen\_away.pl](http://scripts.irssi.org/scripts/screen_away.pl) automatically detects if your Irssi session resides within an attached or detached screen session. If your screen session is detached, this plugin will set your status to away. When you reattach to the session, the plugin unsets the away status.
+- [screen_away.pl](http://scripts.irssi.org/scripts/screen_away.pl) automatically detects if your Irssi session resides within an attached or detached screen session. If your screen session is detached, this plugin will set your status to away. When you reattach to the session, the plugin unsets the away status.
- [highlite.pl](http://scripts.irssi.org/scripts/highlite.pl) collects in one window all channel events like joins, parts, and quits.
-- [adv\_windowlist.pl](/docs/assets/633-adv_windowlist.pl) provides a more useful and configurable window list if you have trouble with the default window list implementation.
+- [adv_windowlist.pl](/docs/assets/633-adv_windowlist.pl) provides a more useful and configurable window list if you have trouble with the default window list implementation.
You can install all of these scripts to "autorun" when you invoke Irssi the next time by issuing the following sequence of commands:
diff --git a/docs/applications/messaging/install-openfire-on-ubuntu-12-04-for-instant-messaging.md b/docs/applications/messaging/install-openfire-on-ubuntu-12-04-for-instant-messaging.md
index fd8858ba6b8..0646d00bf8d 100644
--- a/docs/applications/messaging/install-openfire-on-ubuntu-12-04-for-instant-messaging.md
+++ b/docs/applications/messaging/install-openfire-on-ubuntu-12-04-for-instant-messaging.md
@@ -43,7 +43,7 @@ If you employ a firewall to specify what ports can be accessed on your Linode, v
- 5222 - Client to Server (standard and encrypted)
- 5223 - Client to Server (legacy SSL support)
- 5229 - Flash Cross Domain (Flash client support)
-- 7070 - HTTP Binding (unsecured HTTP connecitons)
+- 7070 - HTTP Binding (unsecured HTTP connections)
- 7443 - HTTP Binding (secured HTTP connections)
- 7777 - File Transfer Proxy (XMPP file transfers)
- 9090 - Admin Console (unsecured)
diff --git a/docs/applications/messaging/instant-messaging-services-with-ejabberd-on-ubuntu-8-04-hardy.md b/docs/applications/messaging/instant-messaging-services-with-ejabberd-on-ubuntu-8-04-hardy.md
index 8985b496805..f505b14f7e1 100644
--- a/docs/applications/messaging/instant-messaging-services-with-ejabberd-on-ubuntu-8-04-hardy.md
+++ b/docs/applications/messaging/instant-messaging-services-with-ejabberd-on-ubuntu-8-04-hardy.md
@@ -16,7 +16,7 @@ title: 'Instant Messaging Services with ejabberd on Ubuntu 8.04 (Hardy)'
-Ejabberd is a Jabber daemon written in the Erlang programming language. It is extensible, flexible and very high performance. With a web-based interface, and broad support for [XMPP standards](http://xmpp.org/), ejabberd is a great choice for a multi-purpose XMPP server. Ejabberd can be considered "heavyweight" by critics, but mostly due to the requirements of the Erlang run-times. However, it is incredibly robust and can scale to support incredibly heavy loads: ebjabberd servers are believed to be the backbone for some of the largest Jabber servers running now.
+Ejabberd is a Jabber daemon written in the Erlang programming language. It is extensible, flexible and very high performance. With a web-based interface, and broad support for [XMPP standards](http://xmpp.org/), ejabberd is a great choice for a multi-purpose XMPP server. Ejabberd can be considered "heavyweight" by critics, but mostly due to the requirements of the Erlang run-times. However, it is incredibly robust and can scale to support incredibly heavy loads: ejabberd servers are believed to be the backbone for some of the largest Jabber servers running now.
This installation process assumes that you have a working installation of Ubuntu 8.04 (Hardy) and have followed the steps in the [getting started](/docs/getting-started/) guide, and now have an up to date instance of the Ubuntu Hardy operating system and are connected to your Linode via SSH and have root access. Once you've completed these requirements we can begin with the installation process.
diff --git a/docs/applications/messaging/instant-messaging-services-with-ejabberd-on-ubuntu-9-04-jaunty.md b/docs/applications/messaging/instant-messaging-services-with-ejabberd-on-ubuntu-9-04-jaunty.md
index e890825283b..0e365ccc1c5 100644
--- a/docs/applications/messaging/instant-messaging-services-with-ejabberd-on-ubuntu-9-04-jaunty.md
+++ b/docs/applications/messaging/instant-messaging-services-with-ejabberd-on-ubuntu-9-04-jaunty.md
@@ -16,7 +16,7 @@ title: 'Instant Messaging Services with ejabberd on Ubuntu 9.04 (Jaunty)'
-Ejabberd is a Jabber daemon written in the Erlang programming language. It is extensible, flexible and very high performance. With a web-based interface, and broad support for [XMPP standards](http://xmpp.org/), ejabberd is a great choice for a multi-purpose XMPP server. Ejabberd can be considered "heavyweight" by critics, because of the requirements of the Erlang run-times. However, it is incredibly robust and can scale to support incredibly heavy loads. Ebjabberd servers are believed to be the backbone for some of the largest Jabber servers running now.
+Ejabberd is a Jabber daemon written in the Erlang programming language. It is extensible, flexible and very high performance. With a web-based interface, and broad support for [XMPP standards](http://xmpp.org/), ejabberd is a great choice for a multi-purpose XMPP server. Ejabberd can be considered "heavyweight" by critics, because of the requirements of the Erlang run-times. However, it is incredibly robust and can scale to support incredibly heavy loads. Ejabberd servers are believed to be the backbone for some of the largest Jabber servers running now.
This installation process assumes that you have a working installation of Ubuntu 9.04 (Jaunty), have followed the steps in the [getting started](/docs/getting-started/) guide, and now have an up to date instance of the Ubuntu Jaunty operating system. We also assume you are connected to your Linode via SSH as root. Once you've completed these requirements, we can begin with the installation process.
diff --git a/docs/applications/messaging/instant-messaging-services-with-openfire-on-centos-5.md b/docs/applications/messaging/instant-messaging-services-with-openfire-on-centos-5.md
index 8212a274775..1aa62fa94e1 100644
--- a/docs/applications/messaging/instant-messaging-services-with-openfire-on-centos-5.md
+++ b/docs/applications/messaging/instant-messaging-services-with-openfire-on-centos-5.md
@@ -53,7 +53,7 @@ If you employ a firewall to specify what ports can be accessed on your Linode, p
- 5222 - Client to Server (standard and encrypted)
- 5223 - Client to Server (legacy SSL support)
- 5229 - Flash Cross Domain (Flash client support)
-- 7070 - HTTP Binding (unsecured HTTP connecitons)
+- 7070 - HTTP Binding (unsecured HTTP connections)
- 7443 - HTTP Binding (secured HTTP connections)
- 7777 - File Transfer Proxy (XMPP file transfers)
- 9090 - Admin Console (unsecured)
diff --git a/docs/applications/messaging/instant-messaging-services-with-openfire-on-debian-5-lenny.md b/docs/applications/messaging/instant-messaging-services-with-openfire-on-debian-5-lenny.md
index 73797ba24f8..b4739f4ce0c 100644
--- a/docs/applications/messaging/instant-messaging-services-with-openfire-on-debian-5-lenny.md
+++ b/docs/applications/messaging/instant-messaging-services-with-openfire-on-debian-5-lenny.md
@@ -58,7 +58,7 @@ If you employ a firewall to specify what ports can be accessed on your Linode, p
- 5222 - Client to Server (standard and encrypted)
- 5223 - Client to Server (legacy SSL support)
- 5229 - Flash Cross Domain (Flash client support)
-- 7070 - HTTP Binding (unsecured HTTP connecitons)
+- 7070 - HTTP Binding (unsecured HTTP connections)
- 7443 - HTTP Binding (secured HTTP connections)
- 7777 - File Transfer Proxy (XMPP file transfers)
- 9090 - Admin Console (unsecured)
diff --git a/docs/applications/messaging/instant-messaging-services-with-openfire-on-debian-6-squeeze.md b/docs/applications/messaging/instant-messaging-services-with-openfire-on-debian-6-squeeze.md
index ef295395d63..0ddd8289ae0 100644
--- a/docs/applications/messaging/instant-messaging-services-with-openfire-on-debian-6-squeeze.md
+++ b/docs/applications/messaging/instant-messaging-services-with-openfire-on-debian-6-squeeze.md
@@ -63,7 +63,7 @@ If you employ a firewall to specify what ports can be accessed on your Linode, p
- 5222 - Client to Server (standard and encrypted)
- 5223 - Client to Server (legacy SSL support)
- 5229 - Flash Cross Domain (Flash client support)
-- 7070 - HTTP Binding (unsecured HTTP connecitons)
+- 7070 - HTTP Binding (unsecured HTTP connections)
- 7443 - HTTP Binding (secured HTTP connections)
- 7777 - File Transfer Proxy (XMPP file transfers)
- 9090 - Admin Console (unsecured)
diff --git a/docs/applications/messaging/instant-messaging-services-with-openfire-on-ubuntu-10-04-lts-lucid.md b/docs/applications/messaging/instant-messaging-services-with-openfire-on-ubuntu-10-04-lts-lucid.md
index ebd2f1f7a6e..d2b972cd7f8 100644
--- a/docs/applications/messaging/instant-messaging-services-with-openfire-on-ubuntu-10-04-lts-lucid.md
+++ b/docs/applications/messaging/instant-messaging-services-with-openfire-on-ubuntu-10-04-lts-lucid.md
@@ -51,7 +51,7 @@ If you employ a firewall to specify what ports can be accessed on your Linode, p
- 5222 - Client to Server (standard and encrypted)
- 5223 - Client to Server (legacy SSL support)
- 5229 - Flash Cross Domain (Flash client support)
-- 7070 - HTTP Binding (unsecured HTTP connecitons)
+- 7070 - HTTP Binding (unsecured HTTP connections)
- 7443 - HTTP Binding (secured HTTP connections)
- 7777 - File Transfer Proxy (XMPP file transfers)
- 9090 - Admin Console (unsecured)
diff --git a/docs/applications/messaging/instant-messaging-services-with-openfire-on-ubuntu-9-04-jaunty.md b/docs/applications/messaging/instant-messaging-services-with-openfire-on-ubuntu-9-04-jaunty.md
index d9bfb558eb6..3d7eba4f846 100644
--- a/docs/applications/messaging/instant-messaging-services-with-openfire-on-ubuntu-9-04-jaunty.md
+++ b/docs/applications/messaging/instant-messaging-services-with-openfire-on-ubuntu-9-04-jaunty.md
@@ -66,7 +66,7 @@ If you employ a firewall to specify what ports can be accessed on your Linode, p
- 5222 - Client to Server (standard and encrypted)
- 5223 - Client to Server (legacy SSL support)
- 5229 - Flash Cross Domain (Flash client support)
-- 7070 - HTTP Binding (unsecured HTTP connecitons)
+- 7070 - HTTP Binding (unsecured HTTP connections)
- 7443 - HTTP Binding (secured HTTP connections)
- 7777 - File Transfer Proxy (XMPP file transfers)
- 9090 - Admin Console (unsecured)
diff --git a/docs/applications/messaging/instant-messaging-services-with-openfire-on-ubuntu-9-10-karmic.md b/docs/applications/messaging/instant-messaging-services-with-openfire-on-ubuntu-9-10-karmic.md
index 688958c17f8..4c7c345654e 100644
--- a/docs/applications/messaging/instant-messaging-services-with-openfire-on-ubuntu-9-10-karmic.md
+++ b/docs/applications/messaging/instant-messaging-services-with-openfire-on-ubuntu-9-10-karmic.md
@@ -73,7 +73,7 @@ If you employ a firewall to specify what ports can be accessed on your Linode, p
- 5222 - Client to Server (standard and encrypted)
- 5223 - Client to Server (legacy SSL support)
- 5229 - Flash Cross Domain (Flash client support)
-- 7070 - HTTP Binding (unsecured HTTP connecitons)
+- 7070 - HTTP Binding (unsecured HTTP connections)
- 7443 - HTTP Binding (secured HTTP connections)
- 7777 - File Transfer Proxy (XMPP file transfers)
- 9090 - Admin Console (unsecured)
diff --git a/docs/applications/messaging/using-irssi-for-internet-relay-chat.md b/docs/applications/messaging/using-irssi-for-internet-relay-chat.md
index 433f2f5dd54..a79fbc81f8d 100644
--- a/docs/applications/messaging/using-irssi-for-internet-relay-chat.md
+++ b/docs/applications/messaging/using-irssi-for-internet-relay-chat.md
@@ -243,7 +243,7 @@ The `hilight` command will highlight certain words used in the channels you have
/hilight word
-To remove a hilight, use the command:
+To remove a `hilight`, use the command:
/dehilight word
diff --git a/docs/applications/project-management/how-to-create-a-private-python-package-repository.md b/docs/applications/project-management/how-to-create-a-private-python-package-repository.md
index 590b206474d..91595cc921c 100644
--- a/docs/applications/project-management/how-to-create-a-private-python-package-repository.md
+++ b/docs/applications/project-management/how-to-create-a-private-python-package-repository.md
@@ -122,7 +122,7 @@ Next, set up a server to host a package index. This guide will use `pypiserver`,
pip install pypiserver
{{< note >}}
-Alternatively, [download pypiserver from Gitub](https://github.com/pypiserver/pypiserver), then navigate into the downloaded pypiserver directory and install with `python setup.py install`.
+Alternatively, [download pypiserver from Github](https://github.com/pypiserver/pypiserver), then navigate into the downloaded pypiserver directory and install with `python setup.py install`.
{{< /note >}}
4. Move `linode_example-0.1.tar.gz` into `~/packages`:
diff --git a/docs/applications/project-management/how-to-install-and-configure-redmine-on-ubuntu-16-04.md b/docs/applications/project-management/how-to-install-and-configure-redmine-on-ubuntu-16-04.md
index b3f47cef1fe..504708be00b 100644
--- a/docs/applications/project-management/how-to-install-and-configure-redmine-on-ubuntu-16-04.md
+++ b/docs/applications/project-management/how-to-install-and-configure-redmine-on-ubuntu-16-04.md
@@ -2,7 +2,7 @@
author:
name: Angel
email: docs@linode.com
-description: 'This guide shows how to install and set up Redmine, a free and open-source project management web application, written using Ruby on Rails, that is is cross-platform and cross-database.'
+description: 'This guide shows how to install and set up Redmine, a free and open-source project management web application, written using Ruby on Rails, that is cross-platform and cross-database.'
keywords: ["nginx", "ubuntu", "redmine"]
license: '[CC BY-ND 4.0](https://creativecommons.org/licenses/by-nd/4.0)'
published: 2017-09-14
diff --git a/docs/applications/project-management/install-farmos.md b/docs/applications/project-management/install-farmos.md
index 34d6b806ae1..a702ffc6662 100644
--- a/docs/applications/project-management/install-farmos.md
+++ b/docs/applications/project-management/install-farmos.md
@@ -9,7 +9,7 @@ published: 2017-09-09
modified: 2017-09-20
modified_by:
name: Linode
-title: 'Install and Configure FarmOS, an Agricultural Management, Planning and Record-Keeping Web App'
+title: 'How to Install FarmOS - a Farm Recordkeeping Application'
---

diff --git a/docs/applications/project-management/jupyter-nobook-on-jekyll.md b/docs/applications/project-management/jupyter-nobook-on-jekyll.md
index 8fc4b8ec50a..887a29ee538 100644
--- a/docs/applications/project-management/jupyter-nobook-on-jekyll.md
+++ b/docs/applications/project-management/jupyter-nobook-on-jekyll.md
@@ -267,7 +267,7 @@ Adding an image through markdown requires having the images stored in the projec

- This is just an example. Adding interactive graphs using Javascript libraries is beyond the scope of this guide.
+ This is just an example. Adding interactive graphs using JavaScript libraries is beyond the scope of this guide.
### Use a CDN to Support MathJax
diff --git a/docs/applications/project-management/setting-up-taskwarrior-on-ubuntu-16-10.md b/docs/applications/project-management/setting-up-taskwarrior-on-ubuntu-16-10.md
index 38b1631e5ce..9d7a0145fe8 100644
--- a/docs/applications/project-management/setting-up-taskwarrior-on-ubuntu-16-10.md
+++ b/docs/applications/project-management/setting-up-taskwarrior-on-ubuntu-16-10.md
@@ -36,7 +36,7 @@ external_resources:
## Install Taskwarrior
-Install Taskwarriror with the command:
+Install Taskwarrior with the command:
sudo apt install task
diff --git a/docs/applications/remote-desktop/remote-desktop-using-apache-guacamole-on-docker.md b/docs/applications/remote-desktop/remote-desktop-using-apache-guacamole-on-docker.md
index ecf0b247822..027765cc506 100644
--- a/docs/applications/remote-desktop/remote-desktop-using-apache-guacamole-on-docker.md
+++ b/docs/applications/remote-desktop/remote-desktop-using-apache-guacamole-on-docker.md
@@ -2,8 +2,8 @@
author:
name: Sam Foo
email: sfoo@linode.com
-description: 'Create a remote desktop on a Linode.'
-og_description: 'This guide will demonstrate how to deploy Apache Guacamole, a utility that allows you to create a virtual cloud desktop on Linode'
+description: 'Use Apache Guacamole, a clientless HTML5 web application, to access your virtual cloud desktop right from a browser. This guide will show how to install Apache Guacamole through Docker on your Linode.'
+og_description: 'Use Apache Guacamole, a clientless HTML5 web application, to access your virtual cloud desktop right from a browser. This guide will show how to install Apache Guacamole through Docker on your Linode.'
keywords: ["remote desktop", "Apache Guacamole", "TeamViewer", "VNC", "Chrome OS", "xfce", "unity"]
license: '[CC BY-ND 4.0](https://creativecommons.org/licenses/by-nd/4.0)'
modified: 2017-12-08
@@ -16,7 +16,7 @@ external_resources:
- '[Apache Tomcat](https://tomcat.apache.org/)'
---
-
+
Apache Guacamole is an HTML5 application useful for accessing a remote desktop through RDP, VNC, and other protocols. You can create a virtual cloud desktop where applications can be accessed through a web browser. This guide will cover the installation of Apache Guacamole through Docker, then access a remote desktop environment hosted on a Linode.
diff --git a/docs/applications/remote-desktop/run-graphic-software-on-your-linode-with-xforwarding-on-ubuntu-12-04.md b/docs/applications/remote-desktop/run-graphic-software-on-your-linode-with-xforwarding-on-ubuntu-12-04.md
index 67901db98b4..83622def0eb 100644
--- a/docs/applications/remote-desktop/run-graphic-software-on-your-linode-with-xforwarding-on-ubuntu-12-04.md
+++ b/docs/applications/remote-desktop/run-graphic-software-on-your-linode-with-xforwarding-on-ubuntu-12-04.md
@@ -32,7 +32,7 @@ This guide is written for a non-root user. Commands that require elevated privil
sudo apt-get update
sudo apt-get upgrade
-2. One of the great things about using a Linux distribution with a dependancy-aware package manager is that you can just install the application you want to run, and it will make sure you have all the required software. If you're installing a graphic utility, that will include X. For now, let's install `xauth`, which is required for X to authenticate through the SSH session:
+2. One of the great things about using a Linux distribution with a dependency-aware package manager is that you can just install the application you want to run, and it will make sure you have all the required software. If you're installing a graphic utility, that will include X. For now, let's install `xauth`, which is required for X to authenticate through the SSH session:
sudo apt-get install xauth
diff --git a/docs/applications/remote-desktop/running-graphic-software-xforwarding-debian.md b/docs/applications/remote-desktop/running-graphic-software-xforwarding-debian.md
index 4ce155f1539..8881937217a 100644
--- a/docs/applications/remote-desktop/running-graphic-software-xforwarding-debian.md
+++ b/docs/applications/remote-desktop/running-graphic-software-xforwarding-debian.md
@@ -31,7 +31,7 @@ This guide is written for a non-root user. Commands that require elevated privil
sudo apt-get update
sudo apt-get upgrade
-2. One of the great things about using a Linux distribution with a dependancy-aware package manager is that you can just install the application you want to run, and it will make sure you have all the required software. If you're installing a graphic utility, that will include X. For now, let's install `xauth`, which is required for X to authenticate through the SSH session:
+2. One of the great things about using a Linux distribution with a dependency-aware package manager is that you can just install the application you want to run, and it will make sure you have all the required software. If you're installing a graphic utility, that will include X. For now, let's install `xauth`, which is required for X to authenticate through the SSH session:
sudo apt-get install xauth
diff --git a/docs/applications/social-networking/social-networking-with-elgg-on-debian-5-lenny.md b/docs/applications/social-networking/social-networking-with-elgg-on-debian-5-lenny.md
index 4f67906839e..ac5fc3be14d 100644
--- a/docs/applications/social-networking/social-networking-with-elgg-on-debian-5-lenny.md
+++ b/docs/applications/social-networking/social-networking-with-elgg-on-debian-5-lenny.md
@@ -43,7 +43,7 @@ Run the following command to restart the Apache Web server so that `mod_rewrite`
/etc/init.d/apache2 restart
-You're now ready to install Elgg. For the purposes of this guide, Elgg will be installed at the root level of an Apache virtual host. The `DocumentRoot` for the virtual host will be located at `/srv/www/example.com/public_html/` and the site will be located at `http://example.com/`. You will need to substitute these paths with the paths that you comfigured in your Elgg virtual host.
+You're now ready to install Elgg. For the purposes of this guide, Elgg will be installed at the root level of an Apache virtual host. The `DocumentRoot` for the virtual host will be located at `/srv/www/example.com/public_html/` and the site will be located at `http://example.com/`. You will need to substitute these paths with the paths that you configured in your Elgg virtual host.
# Installing Elgg
diff --git a/docs/applications/voip/install-asterisk-on-centos-7.md b/docs/applications/voip/install-asterisk-on-centos-7.md
index 42e0ded2a7b..b7945986c7c 100644
--- a/docs/applications/voip/install-asterisk-on-centos-7.md
+++ b/docs/applications/voip/install-asterisk-on-centos-7.md
@@ -29,7 +29,7 @@ This guide is written for a non-root user. Commands that require elevated privil
## Before You Begin
-1. Create a CentOS 7 Linode in your closest datacenter (barring Atlanta, which does not currently support SIP servers). A 2GB Linode is enough to handle 10-20 concurrent calls using a non-compressed codec, depending on the processing required on each channel.
+1. Create a CentOS 7 Linode in your closest data center (barring Atlanta, which does not currently support SIP servers). A 2GB Linode is enough to handle 10-20 concurrent calls using a non-compressed codec, depending on the processing required on each channel.
2. Ensure you have followed the [Getting Started](/docs/getting-started) and [Securing Your Server](/docs/security/securing-your-server) guides to prepare your server. **Do not** following the section to set up a firewall.
diff --git a/docs/assets/1463-begin-install.PNG b/docs/assets/1463-begin-install.png
similarity index 100%
rename from docs/assets/1463-begin-install.PNG
rename to docs/assets/1463-begin-install.png
diff --git a/docs/assets/1464-generate-key.PNG b/docs/assets/1464-generate-key.png
similarity index 100%
rename from docs/assets/1464-generate-key.PNG
rename to docs/assets/1464-generate-key.png
diff --git a/docs/assets/1465-new-passphrase.PNG b/docs/assets/1465-new-passphrase.png
similarity index 100%
rename from docs/assets/1465-new-passphrase.PNG
rename to docs/assets/1465-new-passphrase.png
diff --git a/docs/assets/1466-new-public-key.PNG b/docs/assets/1466-new-public-key.png
similarity index 100%
rename from docs/assets/1466-new-public-key.PNG
rename to docs/assets/1466-new-public-key.png
diff --git a/docs/assets/1468-warning.PNG b/docs/assets/1468-warning.png
similarity index 100%
rename from docs/assets/1468-warning.PNG
rename to docs/assets/1468-warning.png
diff --git a/docs/assets/1470-random-generating.PNG b/docs/assets/1470-random-generating.png
similarity index 100%
rename from docs/assets/1470-random-generating.PNG
rename to docs/assets/1470-random-generating.png
diff --git a/docs/assets/1473-private-key-file-location.PNG b/docs/assets/1473-private-key-file-location.png
similarity index 100%
rename from docs/assets/1473-private-key-file-location.PNG
rename to docs/assets/1473-private-key-file-location.png
diff --git a/docs/assets/1474-save-private-key.PNG b/docs/assets/1474-save-private-key.png
similarity index 100%
rename from docs/assets/1474-save-private-key.PNG
rename to docs/assets/1474-save-private-key.png
diff --git a/docs/assets/1475-saved-session.PNG b/docs/assets/1475-saved-session.png
similarity index 100%
rename from docs/assets/1475-saved-session.PNG
rename to docs/assets/1475-saved-session.png
diff --git a/docs/assets/R/rstudio-server-login.png b/docs/assets/R/rstudio-server-login.png
new file mode 100644
index 00000000000..149c3ffb938
Binary files /dev/null and b/docs/assets/R/rstudio-server-login.png differ
diff --git a/docs/assets/R/rstudio-server-page.png b/docs/assets/R/rstudio-server-page.png
new file mode 100644
index 00000000000..3817acca506
Binary files /dev/null and b/docs/assets/R/rstudio-server-page.png differ
diff --git a/docs/assets/Rancher/catalog.png b/docs/assets/Rancher/catalog.png
new file mode 100644
index 00000000000..2b05e760429
Binary files /dev/null and b/docs/assets/Rancher/catalog.png differ
diff --git a/docs/assets/Rancher/rancher_container_config.png b/docs/assets/Rancher/rancher_container_config.png
new file mode 100644
index 00000000000..fb46a732348
Binary files /dev/null and b/docs/assets/Rancher/rancher_container_config.png differ
diff --git a/docs/assets/Rancher/rancher_first_screen.png b/docs/assets/Rancher/rancher_first_screen.png
new file mode 100644
index 00000000000..c0f9882c018
Binary files /dev/null and b/docs/assets/Rancher/rancher_first_screen.png differ
diff --git a/docs/assets/Rancher/rancher_ghost.png b/docs/assets/Rancher/rancher_ghost.png
new file mode 100644
index 00000000000..0663accf895
Binary files /dev/null and b/docs/assets/Rancher/rancher_ghost.png differ
diff --git a/docs/assets/Rancher/rancher_options.png b/docs/assets/Rancher/rancher_options.png
new file mode 100644
index 00000000000..dffa17282eb
Binary files /dev/null and b/docs/assets/Rancher/rancher_options.png differ
diff --git a/docs/assets/Rancher/register_host.png b/docs/assets/Rancher/register_host.png
new file mode 100644
index 00000000000..6543f66c620
Binary files /dev/null and b/docs/assets/Rancher/register_host.png differ
diff --git a/docs/assets/apache-ssl/SSL_Certificates_with_Apache_on_CentOS_7_smg.jpg b/docs/assets/apache-ssl/SSL_Certificates_with_Apache_on_CentOS_7_smg.jpg
new file mode 100644
index 00000000000..ed3b6bc348b
Binary files /dev/null and b/docs/assets/apache-ssl/SSL_Certificates_with_Apache_on_CentOS_7_smg.jpg differ
diff --git a/docs/assets/backing-up-data/Backing_Up_Your_Data_smg.jpg b/docs/assets/backing-up-data/Backing_Up_Your_Data_smg.jpg
new file mode 100644
index 00000000000..7f1cef0cf1c
Binary files /dev/null and b/docs/assets/backing-up-data/Backing_Up_Your_Data_smg.jpg differ
diff --git a/docs/assets/connect-docker-containers/Connect_Docker_Containers.jpg b/docs/assets/connect-docker-containers/Connect_Docker_Containers.jpg
new file mode 100644
index 00000000000..eb300e8b542
Binary files /dev/null and b/docs/assets/connect-docker-containers/Connect_Docker_Containers.jpg differ
diff --git a/docs/assets/multicraft/Installing_Multicraft_on_Ubuntu_smg.jpg b/docs/assets/multicraft/Installing_Multicraft_on_Ubuntu_smg.jpg
new file mode 100644
index 00000000000..667aa700302
Binary files /dev/null and b/docs/assets/multicraft/Installing_Multicraft_on_Ubuntu_smg.jpg differ
diff --git a/docs/assets/node-nginx/How_to_Install_Nodejs_and_Nginx_on_Debian_smg.jpg b/docs/assets/node-nginx/How_to_Install_Nodejs_and_Nginx_on_Debian_smg.jpg
new file mode 100644
index 00000000000..23ff110f2a0
Binary files /dev/null and b/docs/assets/node-nginx/How_to_Install_Nodejs_and_Nginx_on_Debian_smg.jpg differ
diff --git a/docs/assets/nodebalancer/NodeBalancer_Reference_Guide_smg.jpg b/docs/assets/nodebalancer/NodeBalancer_Reference_Guide_smg.jpg
new file mode 100644
index 00000000000..544c4a19f98
Binary files /dev/null and b/docs/assets/nodebalancer/NodeBalancer_Reference_Guide_smg.jpg differ
diff --git a/docs/assets/openvz/openvz_one.PNG b/docs/assets/openvz/openvz_one.png
similarity index 100%
rename from docs/assets/openvz/openvz_one.PNG
rename to docs/assets/openvz/openvz_one.png
diff --git a/docs/assets/openvz/openvz_three.PNG b/docs/assets/openvz/openvz_three.png
similarity index 100%
rename from docs/assets/openvz/openvz_three.PNG
rename to docs/assets/openvz/openvz_three.png
diff --git a/docs/assets/openvz/openvz_two.PNG b/docs/assets/openvz/openvz_two.png
similarity index 100%
rename from docs/assets/openvz/openvz_two.PNG
rename to docs/assets/openvz/openvz_two.png
diff --git a/docs/assets/openvz_one.PNG b/docs/assets/openvz_one.png
similarity index 100%
rename from docs/assets/openvz_one.PNG
rename to docs/assets/openvz_one.png
diff --git a/docs/assets/phpmyadmin-debian-mysql/How_to_Install_MySQL_with_phpMyAdmin_on_Debian_7_smg.jpg b/docs/assets/phpmyadmin-debian-mysql/How_to_Install_MySQL_with_phpMyAdmin_on_Debian_7_smg.jpg
new file mode 100644
index 00000000000..bcb361d8dd3
Binary files /dev/null and b/docs/assets/phpmyadmin-debian-mysql/How_to_Install_MySQL_with_phpMyAdmin_on_Debian_7_smg.jpg differ
diff --git a/docs/assets/pritunl/Pritunl_VPN_Server_and_Management_Panel_on_Ubuntu_1404_smg.jpg b/docs/assets/pritunl/Pritunl_VPN_Server_and_Management_Panel_on_Ubuntu_1404_smg.jpg
new file mode 100644
index 00000000000..1b10ef0e039
Binary files /dev/null and b/docs/assets/pritunl/Pritunl_VPN_Server_and_Management_Panel_on_Ubuntu_1404_smg.jpg differ
diff --git a/docs/assets/squid/Creating_an_HTTP_Proxy_Using_Squid_on_CentOS_64_smg.jpg b/docs/assets/squid/Creating_an_HTTP_Proxy_Using_Squid_on_CentOS_64_smg.jpg
new file mode 100644
index 00000000000..f0d227646a0
Binary files /dev/null and b/docs/assets/squid/Creating_an_HTTP_Proxy_Using_Squid_on_CentOS_64_smg.jpg differ
diff --git a/docs/assets/ssh-tunnel/Setting_up_an_SSH_Tunnel_with_Your_Linode_for_Safe_Browsing_smg.jpg b/docs/assets/ssh-tunnel/Setting_up_an_SSH_Tunnel_with_Your_Linode_for_Safe_Browsing_smg.jpg
new file mode 100644
index 00000000000..2af6395c657
Binary files /dev/null and b/docs/assets/ssh-tunnel/Setting_up_an_SSH_Tunnel_with_Your_Linode_for_Safe_Browsing_smg.jpg differ
diff --git a/docs/assets/ssl-cert-centos/Obtain_a_Commercially_Signed_SSL_Certificate_on_CentOS_and_Fedora_smg.jpg b/docs/assets/ssl-cert-centos/Obtain_a_Commercially_Signed_SSL_Certificate_on_CentOS_and_Fedora_smg.jpg
new file mode 100644
index 00000000000..fdd40a2e70f
Binary files /dev/null and b/docs/assets/ssl-cert-centos/Obtain_a_Commercially_Signed_SSL_Certificate_on_CentOS_and_Fedora_smg.jpg differ
diff --git a/docs/assets/team_fortress_2_server_config b/docs/assets/team_fortress_2_server_config.cfg
similarity index 100%
rename from docs/assets/team_fortress_2_server_config
rename to docs/assets/team_fortress_2_server_config.cfg
diff --git a/docs/assets/thingsboard/ThingsBoard.jpg b/docs/assets/thingsboard/ThingsBoard.jpg
new file mode 100644
index 00000000000..e2770f57f2e
Binary files /dev/null and b/docs/assets/thingsboard/ThingsBoard.jpg differ
diff --git a/docs/assets/thingsboard/latest-telemetry.png b/docs/assets/thingsboard/latest-telemetry.png
new file mode 100644
index 00000000000..6a2df14b031
Binary files /dev/null and b/docs/assets/thingsboard/latest-telemetry.png differ
diff --git a/docs/assets/thingsboard/login.png b/docs/assets/thingsboard/login.png
new file mode 100644
index 00000000000..f0c1737320d
Binary files /dev/null and b/docs/assets/thingsboard/login.png differ
diff --git a/docs/assets/thingsboard/pi-dashboard.png b/docs/assets/thingsboard/pi-dashboard.png
new file mode 100644
index 00000000000..2bf70ea19ec
Binary files /dev/null and b/docs/assets/thingsboard/pi-dashboard.png differ
diff --git a/docs/assets/zimbra/Install_Zimbra_Open_Source_Edition_on_Ubuntu_1404_smg.jpg b/docs/assets/zimbra/Install_Zimbra_Open_Source_Edition_on_Ubuntu_1404_smg.jpg
new file mode 100644
index 00000000000..6590f961182
Binary files /dev/null and b/docs/assets/zimbra/Install_Zimbra_Open_Source_Edition_on_Ubuntu_1404_smg.jpg differ
diff --git a/docs/contribute.md b/docs/contribute.md
index 6eab2ad4543..6ea876ce601 100644
--- a/docs/contribute.md
+++ b/docs/contribute.md
@@ -15,9 +15,9 @@ show_on_rss_feed: false
---
{{< topics >}}
-* Introduction to the Kubernetes Scheduler
+
{{< /topics >}}
{{< topics >}}
-* Build and Manage Applications with Dokku
+
{{< /topics >}}
diff --git a/docs/databases/cassandra/set-up-a-cassandra-node-cluster-on-ubuntu-and-centos.md b/docs/databases/cassandra/set-up-a-cassandra-node-cluster-on-ubuntu-and-centos.md
index 4aa2c581c66..dfd35378b92 100644
--- a/docs/databases/cassandra/set-up-a-cassandra-node-cluster-on-ubuntu-and-centos.md
+++ b/docs/databases/cassandra/set-up-a-cassandra-node-cluster-on-ubuntu-and-centos.md
@@ -57,7 +57,7 @@ The instructions here must be executed on each Cassandra node to be clustered. A
| seed_provider | This contains a comma-delimited list of each public IP address of each node to be clustered. Input the list in the line that reads `- seeds: "127.0.0.1"`. |
| listen_address | Other nodes in the cluster will use the IP address listed here to find each other. Change from `localhost` to the specific node's public IP address. |
| rpc_address | The listen address for client communication. Change from "localhost" to the public IP address or loopback address of the node. |
- | endpoint_snitch | Snitches determine how Cassandra replicates data. Change this to "GossipingPropertyFileSnitch," as this is more suitable to a multi-datacenter configuration. |
+ | endpoint_snitch | Snitches determine how Cassandra replicates data. Change this to "GossipingPropertyFileSnitch," as this is more suitable to a multi-data center configuration. |
| auto_bootstrap | Add this property anywhere in the file. If you have yet to add data to your nodes - that is, you would start with a fresh cluster - set this to "false." If your node(s) already contains data, **do not** add this property. |
| num_tokens | This property defines the proportion of data stored on each node. For nodes with equal hardware capabilities, this number should be set equally between them so the data is more likely to be evenly distributed. The default value of 256 is likely to ensure equal data distribution. For more information on this topic, see the "How data is distributed across a cluster" link in the "External Resources" section. |
@@ -75,7 +75,7 @@ auto_bootstrap: false
{{< /file >}}
-3. Edit the `cassandra-rackdc.properties` file. Assign each node the same datacenter and rack name:
+3. Edit the `cassandra-rackdc.properties` file. Assign each node the same data center and rack name:
{{< file "/etc/cassandra/conf/cassandra-rackdc.properties" properties >}}
# These properties are used with GossipingPropertyFileSnitch and will
@@ -233,7 +233,7 @@ You may want to configure the *internode_encryption* setting to better meet the
|:----------:|:-------------:|
| all | All traffic between nodes is encrypted. |
| none | No traffic is encrypted. |
-| dc | Only traffic between datacenters is encrypted. |
+| dc | Only traffic between data centers is encrypted. |
| rack | Only traffic between server racks is encrypted. |
## Verify SSL Setup
diff --git a/docs/databases/elasticsearch/a-guide-to-elasticsearch-plugins.md b/docs/databases/elasticsearch/a-guide-to-elasticsearch-plugins.md
new file mode 100644
index 00000000000..cc9b2a86426
--- /dev/null
+++ b/docs/databases/elasticsearch/a-guide-to-elasticsearch-plugins.md
@@ -0,0 +1,368 @@
+---
+author:
+ name: Linode
+ email: docs@linode.com
+contributor:
+ name: Tyler Langlois
+ link: https://tjll.net
+description: 'This guide shows how to install a variety of useful Elasticsearch plugins.'
+og_description: 'Elasticsearch supports a wide variety of plugins which enable more powerful search features. This guide will explore how to manage, install, and use these plugins to better leverage Elasticsearch for different use cases.'
+external_resources:
+ - '[Elastic Documentation](https://www.elastic.co/guide/index.html)'
+ - '[Elasticsearch reference](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html)'
+ - '[Elasticsearch Plugins Reference](https://www.elastic.co/guide/en/elasticsearch/plugins/current/index.html)'
+keywords: ['elastic', 'elasticsearch', 'plugins', 'search', 'analytics', 'search engine']
+license: '[CC BY-ND 4.0](https://creativecommons.org/licenses/by-nd/4.0)'
+published: 2018-01-09
+modified: 2018-01-09
+modified_by:
+ name: Linode
+title: 'How to Install and Use Elasticsearch Plugins'
+---
+
+## What are Elasticsearch Plugins?
+
+[Elasticsearch](https://www.elastic.co/products/elasticsearch) is an open source, scalable search engine. Although Elasticsearch supports a large number of features out-of-the-box, it can also be extended with a variety of [plugins](https://www.elastic.co/guide/en/elasticsearch/plugins/6.1/index.html) to provide advanced analytics and process different data types.
+
+This guide will show to how install the following Elasticsearch plugins and interact with them using the Elasticsearch API:
+
+ * **ingest-attachment**: allows Elasticsearch to index and search base64-encoded documents in formats such as RTF, PDF, and PPT.
+ * **analysis-phonetic**: identifies search results that sound similar to the search term.
+ * **ingest-geoip**: adds location information to indexed documents based on any IP addresses within the document.
+ * **ingest-user-agent**: parses the `User-Agent` header of HTTP requests to provide identifying information about the client that sent each request.
+
+{{< note >}}
+This guide is written for a non-root user. Commands that require elevated privileges are prefixed with `sudo`. If you're not familiar with the `sudo` command, you can check our [Users and Groups](/docs/tools-reference/linux-users-and-groups) guide.
+{{< /note >}}
+
+## Before You Begin
+
+1. Familiarize yourself with our [Getting Started](/docs/getting-started) guide and complete the steps for setting your Linode's hostname and timezone.
+
+2. This guide will use `sudo` wherever possible. Complete the sections of our [Securing Your Server](/docs/security/securing-your-server) to create a standard user account, harden SSH access and remove unnecessary network services.
+
+3. Update your system:
+
+ sudo apt-get update && sudo apt-get upgrade
+
+## Installation
+
+### Java
+
+As of this writing, Elasticsearch requires Java 8.
+
+1. OpenJDK 8 is available from the official repositories. Install the headless OpenJDK 8 package:
+
+ sudo apt install openjdk-8-jre-headless
+
+2. Confirm that Java is installed:
+
+ java -version
+
+ The output should be similar to:
+
+ openjdk version "1.8.0_151"
+ OpenJDK Runtime Environment (build 1.8.0_151-8u151-b12-1~deb9u1-b12)
+ OpenJDK 64-Bit Server VM (build 25.151-b12, mixed mode)
+
+### Elasticsearch
+
+{{< content "install_elasticsearch_debian_ubuntu.md" >}}
+
+ You are now ready to install and use Elasticsearch plugins.
+
+## Elasticsearch Plugins
+
+The remainder of this guide will walk through several plugins and common use cases. Many of the following steps will involve communicating with the Elasticsearch API. For example, in order to index a sample document into Elasticsearch, a `POST` request with a JSON payload must be sent to `/{index name}/{type}/{document id}`:
+
+ POST /exampleindex/doc/1
+ {
+ "message": "this the value for the message field"
+ }
+
+There are a number of tools that can be used to issue this request. The simplest approach would be to use `curl` from the command line:
+
+ curl -H'Content-Type: application/json' -XPOST localhost:9200/exampleindex/doc/1 -d '{ "message": "this the value for the message field" }'
+
+Other alternatives include the [vim-rest-console](https://github.com/diepm/vim-rest-console), the Emacs plugin [es-mode](https://github.com/dakrone/es-mode), or the [Console](https://www.elastic.co/guide/en/kibana/current/console-kibana.html) plugin for Kibana. Use whichever tool is most convenient for you.
+
+### Prepare an Index
+
+Before installing any plugins, create a test index.
+
+1. Create an index named `test` with one shard and no replicas:
+
+ POST /test
+ {
+ "settings": {
+ "index": {
+ "number_of_replicas": 0,
+ "number_of_shards": 1
+ }
+ }
+ }
+
+ {{< note >}}
+These settings are suitable for testing, but additional shards and replicas should be used in a production environment.
+{{< /note >}}
+
+2. Add an example document to the index:
+
+ POST /test/doc/1
+ {
+ "message": "this is an example document"
+ }
+
+3. Searches can be performed by using the `_search` URL endpoint. Search for "example" in the message field across all documents:
+
+ POST /_search
+ {
+ "query": {
+ "terms": {
+ "message": ["example"]
+ }
+ }
+ }
+
+ The Elasticsearch API should return the matching document.
+
+### Elasticsearch Attachment Plugin
+
+The attachment plugin lets Elasticsearch accept a base64-encoded document and index its contents for easy searching. This is useful for searching PDF or rich text documents with minimal overhead.
+
+1. Install the `ingest-attachment` plugin using the `elasticsearch-plugin` tool:
+
+ sudo /usr/share/elasticsearch/bin/elasticsearch-plugin install ingest-attachment
+
+2. Restart elasticsearch:
+
+ sudo systemctl restart elasticsearch
+
+3. Confirm that the plugin is installed as expected by using the `_cat` API:
+
+ GET /_cat/plugins
+
+ The `ingest-attachment` plugin should be under the list of installed plugins.
+
+In order to use the attachment plugin, a _pipeline_ must be used to process base64-encoded data in the field of a document. An [ingest pipeline](https://www.elastic.co/guide/en/elasticsearch/reference/master/ingest.html) is a way of performing additional steps when indexing a document in Elasticsearch. While Elasticsearch comes pre-installed with some pipeline *processors* (which can perform actions such as removing or adding fields), the attachment plugin installs an additional processor that can be used when defining a pipeline.
+
+1. Create a pipeline called `doc-parser` which takes data from a field called `encoded_doc` and executes the `attachment` processor on the field:
+
+ PUT /_ingest/pipeline/doc-parser
+ {
+ "description" : "Extract text from base-64 encoded documents",
+ "processors" : [ { "attachment" : { "field" : "encoded_doc" } } ]
+ }
+
+ The `doc-parser` pipeline can now be specified when indexing documents to extract data from the `encoded_doc` field.
+
+ {{< note >}}
+By default, the attachment processor will create a new field called `attachment` with the parsed content of the target field. See the [attachment processor documentation](https://www.elastic.co/guide/en/elasticsearch/plugins/6.1/using-ingest-attachment.html) for additional information.
+{{< /note >}}
+
+2. Index an example RTF (rich-text formatted) document. The following string is an RTF document containing text that we would like to search. It consists of the base64-encoded text "Hello from inside of a rich text RTF document":
+
+ e1xydGYxXGFuc2kKSGVsbG8gZnJvbSBpbnNpZGUgb2YgYSByaWNoIHRleHQgUlRGIGRvY3VtZW50LgpccGFyIH0K
+
+3. Add this document to the test index, using the `?pipeline=doc_parser` parameter to specify the new pipeline:
+
+ PUT /test/doc/rtf?pipeline=doc-parser
+ {
+ "encoded_doc": "e1xydGYxXGFuc2kKSGVsbG8gZnJvbSBpbnNpZGUgb2YgYSByaWNoIHRleHQgUlRGIGRvY3VtZW50LgpccGFyIH0K"
+ }
+
+4. Search for the term "rich", which should return the indexed document:
+
+ POST /_search
+ {
+ "query": {
+ "terms": {
+ "attachment.content": ["rich"]
+ }
+ }
+ }
+
+ This technique may be used to index and search other document types including PDF, PPT, and XLS. See the [Apache Tika Project](http://tika.apache.org/) (which provides the underlying text extraction implementation) for additional supported file formats.
+
+### Phonetic Analysis Plugin
+
+Elasticsearch excels when analyzing textual data. Several *analyzers* come bundled with Elasticsearch which can perform powerful analyses on text.
+
+One of these analyzers is the [Phonetic Analysis](https://www.elastic.co/guide/en/elasticsearch/plugins/6.1/analysis-phonetic.html) plugin. By using this plugin, it is possible to search for terms that sound similar to other words.
+
+1. Install the plugin the `analysis-phonetic` plugin:
+
+ sudo /usr/share/elasticsearch/bin/elasticsearch-plugin install analysis-phonetic
+
+2. Restart Elasticsearch:
+
+ sudo systemctl restart elasticsearch
+
+3. Confirm that the plugin has been successfully installed:
+
+ GET /_cat/plugins
+
+In order to use this plugin, the following changes must be made to the test index:
+
+* A *filter* must be created. This filter will be used to process the tokens that are created for fields of an indexed document.
+* This filter will be used by an *analyzer*. An analyzer determines how a field is tokenized and how those tokenized items are processed by filters.
+* Finally, we will configure the test index to use this analyzer for a field in the index with a *mapping*.
+
+An index must be closed before analyzers and filters can be added.
+
+1. Close the test index:
+
+ POST /test/_close
+
+2. Define the analyzer and filter for the test index under the `_settings` API:
+
+ PUT /test/_settings
+ {
+ "analysis": {
+ "analyzer": {
+ "my_phonetic_analyzer": {
+ "tokenizer": "standard",
+ "filter": [
+ "standard",
+ "lowercase",
+ "my_phonetic_filter"
+ ]
+ }
+ },
+ "filter": {
+ "my_phonetic_filter": {
+ "type": "phonetic",
+ "encoder": "metaphone",
+ "replace": false
+ }
+ }
+ }
+ }
+
+3. Re-open the index to enable searching and indexing:
+
+ POST /test/_open
+
+4. Define a mapping for a field named `phonetic` which will use the `my_phonetic_analyzer` analyzer:
+
+ POST /test/_mapping/doc
+ {
+ "properties": {
+ "phonetic": {
+ "type": "text",
+ "analyzer": "my_phonetic_analyzer"
+ }
+ }
+ }
+
+5. Index a document with a JSON field called `phonetic` with content that should be passed through the phonetic analyzer:
+
+ POST /test/doc
+ {
+ "phonetic": "black leather ottoman"
+ }
+
+6. Perform a `match` search for the term "ottoman". However, instead of spelling the term correctly, misspell the word such that the misspelled word is phonetically similar:
+
+ POST /_search
+ {
+ "query": {
+ "match": {
+ "phonetic": "otomen"
+ }
+ }
+ }
+
+ The phonetic analysis plugin should be able to recognize that "otomen" and "ottoman" are phonetically similar, and return the correct result.
+
+### Geoip Processor Plugin
+
+When indexing documents such as log files, some fields may contain IP addresses. The Geoip plugin can process IP addresses in order to enrich documents with location data.
+
+1. Install the plugin:
+
+ sudo /usr/share/elasticsearch/bin/elasticsearch-plugin install ingest-geoip
+
+2. Restart Elasticsearch:
+
+ sudo systemctl restart elasticsearch
+
+3. Confirm the plugin is installed by checking the API:
+
+ GET /_cat/plugins
+
+
+As with the `ingest-attachment` pipeline plugin, the `ingest-geoip` plugin is used as a processor within an ingest pipeline. The [Geoip plugin documentation](https://www.elastic.co/guide/en/elasticsearch/plugins/6.1/using-ingest-geoip.html) outlines the available settings when creating processors within a pipeline.
+
+1. Create a pipeline called `parse-ip` which consumes an IP address from a field called `ip` and creates regional information underneath the default field (`geoip`):
+
+ PUT /_ingest/pipeline/parse-ip
+ {
+ "description" : "Geolocate an IP address",
+ "processors" : [ { "geoip" : { "field" : "ip" } } ]
+ }
+
+2. Add a mapping to the index to indicate that the `ip` field should be stored as an IP address in the underlying storage engine:
+
+ POST /test/_mapping/doc
+ {
+ "properties": {
+ "ip": {
+ "type": "ip"
+ }
+ }
+ }
+
+3. Index a document with the `ip` field set to an example address and pass the `pipeline=parse-ip` in the request to use the `parse-ip` pipeline to process the document:
+
+ PUT /test/doc/ipexample?pipeline=parse-ip
+ {
+ "ip": "8.8.8.8"
+ }
+
+4. Retrieve the document to view the fields created by the pipeline:
+
+ GET /test/doc/ipexample
+
+ The response should include a `geoip` JSON key with fields such as `city_name` derived from the source IP address. The plugin should correctly determine that the IP address is located in California.
+
+### User Agent Processor Plugin
+
+A common use case for Elasticsearch is to index log files. By parsing certain fields from web server access logs, requests can be more effectively searched by response code, URL, and more. The `ingest-user-agent` adds the capability to parse the contents of the `User-Agent` header of web requests to more precisely create additional fields identifying the client platform that performed the request.
+
+1. Install the plugin:
+
+ sudo /usr/share/elasticsearch/bin/elasticsearch-plugin install ingest-user-agent
+
+2. Restart Elasticsearch:
+
+ sudo systemctl restart elasticsearch
+
+3. Confirm the plugin is installed:
+
+ GET /_cat/plugins
+
+4. Create an ingest pipeline which instructs Elasticsearch which field to reference when parsing a user agent string:
+
+ PUT /_ingest/pipeline/useragent
+ {
+ "description" : "Parse User-Agent content",
+ "processors" : [ { "user_agent" : { "field" : "agent" } } ]
+ }
+
+5. Index a document with the `agent` field set to an example `User-Agent` string:
+
+ PUT /test/doc/agentexample?pipeline=useragent
+ {
+ "agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36"
+ }
+
+6. Retrieve the document to view the fields created by the pipeline:
+
+ GET /test/doc/agentexample
+
+ The indexed document will include user data underneath the `user_agent` JSON key. The User Agent plugin understands a variety of `User-Agent` strings and can reliably parse `User-Agent` fields from access logs generated by web servers such as Apache and NGINX.
+
+## Conclusion
+
+The plugins covered in this tutorial are a small subset of those available from Elastic or written by third parties. For additional resources regarding Elasticsearch and plugin use, see the links in the **More Information** section below.
diff --git a/docs/databases/elasticsearch/install_elasticsearch_centos.md b/docs/databases/elasticsearch/install_elasticsearch_centos.md
new file mode 100644
index 00000000000..673296f7566
--- /dev/null
+++ b/docs/databases/elasticsearch/install_elasticsearch_centos.md
@@ -0,0 +1,76 @@
+---
+author:
+ name: Jared Kobos
+ email: sfoo@linode.com
+description: 'Shortguide for installing Elasticsearch on Fedora systems'
+license: '[CC BY-ND 4.0](https://creativecommons.org/licenses/by-nd/4.0)'
+keywords: ["elasticsearch", "elastic stack", "fedora", "red hat", "centos"]
+modified: 2018-01-08
+modified_by:
+ name: Linode
+title: "Install Elasticsearch on Fedora, Red Hat, and CentOS"
+published: 2018-01-09
+shortguide: true
+---
+
+1. Trust the Elastic signing key:
+
+ sudo rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch
+
+2. Create a yum repository configuration to use the Elastic yum repository:
+
+ {{< file-excerpt "/etc/yum.repos.d/elastic.repo" ini >}}
+[elasticsearch-6.x] name=Elastic repository for 6.x packages baseurl=https://artifacts.elastic.co/packages/6.x/yum gpgcheck=1 gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch enabled=1 autorefresh=1 type=rpm-md
+{{< /file-excerpt >}}
+
+3. Update the yum cache to ensure the latest packages will be installed:
+
+ sudo yum update
+ Debian Based Distributions
+
+4. Install the official Elastic APT package signing key:
+
+ wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add -
+
+5. Install the `elasticsearch` package:
+
+ sudo yum install -y elasticsearch
+
+6. Set the JVM heap size to approximately half of your server's available memory. For example, if your server has 1GB of RAM, change the Xms and Xmx values in the `/etc/elasticsearch/jvm.options` file to `512m`, and leave the other values in this file unchanged:
+
+ {{< file "/etc/elasticsearch/jvm.options" aconf >}}
+-Xms512m -Xmx512m
+{{< /file >}}
+
+7. Enable and start the `elasticsearch` service:
+
+ sudo systemctl enable elasticsearch
+ sudo systemctl start elasticsearch
+
+8. Wait a few moments for the service to start, then confirm that the Elasticsearch API is available:
+
+ curl localhost:9200
+
+ The Elasticsearch REST API should return a JSON response similar to the following:
+
+ {{< output >}}
+{
+ "name" : "Sch1T0D",
+ "cluster_name" : "docker-cluster",
+ "cluster_uuid" : "MH6WKAm0Qz2r8jFK-TcbNg",
+ "version" : {
+ "number" : "6.1.1",
+ "build_hash" : "bd92e7f",
+ "build_date" : "2017-12-17T20:23:25.338Z",
+ "build_snapshot" : false,
+ "lucene_version" : "7.1.0",
+ "minimum_wire_compatibility_version" : "5.6.0",
+ "minimum_index_compatibility_version" : "5.0.0"
+ },
+ "tagline" : "You Know, for Search"
+}
+{{ output >}}
+
+9. To determine whether or not the service has started successfully, view the most recent logs:
+
+ systemctl status elasticsearch
\ No newline at end of file
diff --git a/docs/databases/elasticsearch/install_elasticsearch_debian_ubuntu.md b/docs/databases/elasticsearch/install_elasticsearch_debian_ubuntu.md
new file mode 100644
index 00000000000..a8a48993a2d
--- /dev/null
+++ b/docs/databases/elasticsearch/install_elasticsearch_debian_ubuntu.md
@@ -0,0 +1,74 @@
+---
+author:
+ name: Jared Kobos
+ email: sfoo@linode.com
+description: 'Shortguide for installing Elasticsearch on Debian systems'
+license: '[CC BY-ND 4.0](https://creativecommons.org/licenses/by-nd/4.0)'
+keywords: ["elasticsearch", "elastic stack", "fedora", "red hat", "centos"]
+modified: 2018-01-08
+modified_by:
+ name: Linode
+title: "Install Elasticsearch on Debian and Ubuntu"
+published: 2018-01-09
+shortguide: true
+---
+
+1. Install the official Elastic APT package signing key:
+
+ wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add -
+
+2. Install the `apt-transport-https` package, which is required to retrieve deb packages served over HTTPS:
+
+ sudo apt-get install apt-transport-https
+
+3. Add the APT repository information to your server's list of sources:
+
+ echo "deb https://artifacts.elastic.co/packages/6.x/apt stable main" | sudo tee -a /etc/apt/sources.list.d/elastic.list
+
+4. Update the list of available packages:
+
+ sudo apt-get update
+
+5. Install the `elasticsearch` package:
+
+ sudo apt-get install -y elasticsearch
+
+6. Set the JVM heap size to approximately half of your server's available memory. For example, if your server has 1GB of RAM, change the `Xms` and `Xmx` values in the `/etc/elasticsearch/jvm.options` file to `512m`. Leave the other values in this file unchanged:
+
+ {{< file "/etc/elasticsearch/jvm.options" conf >}}
+-Xms512m
+-Xmx512m
+{{< /file >}}
+
+7. Enable and start the `elasticsearch` service:
+
+ sudo systemctl enable elasticsearch
+ sudo systemctl start elasticsearch
+
+8. Wait a few moments for the service to start, then confirm that the Elasticsearch API is available:
+
+ curl localhost:9200
+
+ The Elasticsearch REST API should return a JSON response similar to the following:
+
+ {{< output >}}
+{
+ "name" : "Sch1T0D",
+ "cluster_name" : "docker-cluster",
+ "cluster_uuid" : "MH6WKAm0Qz2r8jFK-TcbNg",
+ "version" : {
+ "number" : "6.1.1",
+ "build_hash" : "bd92e7f",
+ "build_date" : "2017-12-17T20:23:25.338Z",
+ "build_snapshot" : false,
+ "lucene_version" : "7.1.0",
+ "minimum_wire_compatibility_version" : "5.6.0",
+ "minimum_index_compatibility_version" : "5.0.0"
+ },
+ "tagline" : "You Know, for Search"
+}
+{{ output >}}
+
+9. To determine whether or not the service has started successfully, view the most recent logs:
+
+ systemctl status elasticsearch
diff --git a/docs/databases/elasticsearch/monitor-nginx-web-server-logs-using-filebeat-elastic-stack-centos-7.md b/docs/databases/elasticsearch/monitor-nginx-web-server-logs-using-filebeat-elastic-stack-centos-7.md
index a556c8ff938..daad94ee750 100644
--- a/docs/databases/elasticsearch/monitor-nginx-web-server-logs-using-filebeat-elastic-stack-centos-7.md
+++ b/docs/databases/elasticsearch/monitor-nginx-web-server-logs-using-filebeat-elastic-stack-centos-7.md
@@ -163,7 +163,7 @@ Install the `kibana` package:
### Elasticsearch
-By default, Elasticsearch will create five shards and one replica for every index that's created. When deploying to production, these are reasonable settings to use. In this tutorial, only one server is used in the Elasticsearch setup, so multiple shards and replicas are unncessary. Changing these defaults can avoid unecessary overhead.
+By default, Elasticsearch will create five shards and one replica for every index that's created. When deploying to production, these are reasonable settings to use. In this tutorial, only one server is used in the Elasticsearch setup, so multiple shards and replicas are unnecessary. Changing these defaults can avoid unnecessary overhead.
1. Create a temporary JSON file with an *index template* that instructs Elasticsearch to set the number of shards to one and number of replicas to zero for all matching index names (in this case, a wildcard `*`):
diff --git a/docs/databases/elasticsearch/visualize-apache-web-server-logs-using-elastic-stack-on-debian-8.md b/docs/databases/elasticsearch/visualize-apache-web-server-logs-using-elastic-stack-on-debian-8.md
index 4c4ef011c68..3f6a17d31f8 100644
--- a/docs/databases/elasticsearch/visualize-apache-web-server-logs-using-elastic-stack-on-debian-8.md
+++ b/docs/databases/elasticsearch/visualize-apache-web-server-logs-using-elastic-stack-on-debian-8.md
@@ -146,7 +146,7 @@ Install the `kibana` package:
### Elasticsearch
-By default, Elasticsearch will create five shards and one replica for every index that's created. When deploying to production, these are reasonable settings to use. In this tutorial, only one server is used in the Elasticsearch setup, so multiple shards and replicas are unncessary. Changing these defaults can avoid unecessary overhead.
+By default, Elasticsearch will create five shards and one replica for every index that's created. When deploying to production, these are reasonable settings to use. In this tutorial, only one server is used in the Elasticsearch setup, so multiple shards and replicas are unnecessary. Changing these defaults can avoid unnecessary overhead.
1. Create a temporary JSON file with an *index template* that instructs Elasticsearch to set the number of shards to one and number of replicas to zero for all matching index names (in this case, a wildcard `*`):
diff --git a/docs/databases/hadoop/install-configure-run-spark-on-top-of-hadoop-yarn-cluster.md b/docs/databases/hadoop/install-configure-run-spark-on-top-of-hadoop-yarn-cluster.md
index df6339e0aa4..9d09b8a7e55 100644
--- a/docs/databases/hadoop/install-configure-run-spark-on-top-of-hadoop-yarn-cluster.md
+++ b/docs/databases/hadoop/install-configure-run-spark-on-top-of-hadoop-yarn-cluster.md
@@ -200,7 +200,7 @@ To run the same application in cluster mode, replace `--deploy-mode client`with
When you submit a job, Spark Driver automatically starts a web UI on port `4040` that displays information about the application. However, when execution is finished, the Web UI is dismissed with the application driver and can no longer be accessed.
-Spark provides a History Server that collects application logs from HDFS and displays them in a persistent web UI. The following steps will enable log persistance in HDFS:
+Spark provides a History Server that collects application logs from HDFS and displays them in a persistent web UI. The following steps will enable log persistence in HDFS:
1. Edit `$SPARK_HOME/conf/spark-defaults.conf` and add the following lines to enable Spark jobs to log in HDFS:
diff --git a/docs/databases/mariadb/how-to-install-mariadb-on-centos-7.md b/docs/databases/mariadb/how-to-install-mariadb-on-centos-7.md
index 19d9dc77a6a..aecc4111ecd 100644
--- a/docs/databases/mariadb/how-to-install-mariadb-on-centos-7.md
+++ b/docs/databases/mariadb/how-to-install-mariadb-on-centos-7.md
@@ -18,7 +18,7 @@ external_resources:
- '[MySQLdb User''s Guide](http://mysql-python.sourceforge.net/MySQLdb.html)'
---
-MariaDB is a fork of the popular cross-platform MySQL database management system and is considered a full [drop-in replacement](https://mariadb.com/kb/en/mariadb/mariadb-vs-mysql-features/) for MySQL. MariaDB was created by one of MySQL's originial developers in 2009 after MySQL was acquired by Oracle during the Sun Microsystems merger. Today MariaDB is maintained and developed by the [MariaDB Foundation](https://mariadb.org/en/foundation/) and community contributors with the intention of it remaining GNU GPL software.
+MariaDB is a fork of the popular cross-platform MySQL database management system and is considered a full [drop-in replacement](https://mariadb.com/kb/en/mariadb/mariadb-vs-mysql-features/) for MySQL. MariaDB was created by one of MySQL's original developers in 2009 after MySQL was acquired by Oracle during the Sun Microsystems merger. Today MariaDB is maintained and developed by the [MariaDB Foundation](https://mariadb.org/en/foundation/) and community contributors with the intention of it remaining GNU GPL software.

diff --git a/docs/databases/mariadb/set-up-mariadb-clusters-with-galera-debian-and-ubuntu.md b/docs/databases/mariadb/set-up-mariadb-clusters-with-galera-debian-and-ubuntu.md
index 77fc1b1fbba..4d2af6df8f4 100644
--- a/docs/databases/mariadb/set-up-mariadb-clusters-with-galera-debian-and-ubuntu.md
+++ b/docs/databases/mariadb/set-up-mariadb-clusters-with-galera-debian-and-ubuntu.md
@@ -48,7 +48,7 @@ On Debian 9 and later, run `sudo apt install dirmngr` before importing the key.
| Ubuntu 16.04 | 0xF1656F24C74CD1D8 | 10.1 | deb [arch=amd64,i386,ppc64el] http://mirror.nodesdirect.com/mariadb/repo/10.1/ubuntu xenial main
| Ubuntu 16.04 | 0xF1656F24C74CD1D8 | 10.0 | deb [arch=amd64,i386,ppc64el] http://mirror.nodesdirect.com/mariadb/repo/10.1/ubuntu xenial main
- There may not be a released version for each distribution. e.g. Debian 8 has version 10.0 and 10.1 whereas Debian 9 has only 10.1 available. To see all available distributions, visit the MariaDB reporsitory [download page](https://downloads.mariadb.org/mariadb/repositories/).
+ There may not be a released version for each distribution. e.g. Debian 8 has version 10.0 and 10.1 whereas Debian 9 has only 10.1 available. To see all available distributions, visit the MariaDB repository [download page](https://downloads.mariadb.org/mariadb/repositories/).
3. Install MariaDB, Galera, and Rsync:
diff --git a/docs/databases/mongodb/build-database-clusters-with-mongodb.md b/docs/databases/mongodb/build-database-clusters-with-mongodb.md
index d76c1e98e3a..81205c837b1 100644
--- a/docs/databases/mongodb/build-database-clusters-with-mongodb.md
+++ b/docs/databases/mongodb/build-database-clusters-with-mongodb.md
@@ -57,7 +57,7 @@ The problem in this configuration is that if one of the shard servers experience
## Configure Hosts File
-If your Linodes are all located in the same datacenter, we recommend [adding a private IP address](/docs/networking/remote-access#adding-private-ip-addresses) for each one and using those here to avoid transmitting data over the public internet. If you don't use private IP addresses, be sure to [encrypt your data with SSL/TLS](https://docs.mongodb.com/manual/tutorial/configure-ssl/).
+If your Linodes are all located in the same data center, we recommend [adding a private IP address](/docs/networking/remote-access#adding-private-ip-addresses) for each one and using those here to avoid transmitting data over the public internet. If you don't use private IP addresses, be sure to [encrypt your data with SSL/TLS](https://docs.mongodb.com/manual/tutorial/configure-ssl/).
On each Linode in your cluster, add the following to the `/etc/hosts` file:
@@ -371,7 +371,7 @@ bindIp: 192.0.2.5
mongo mongo-query-router:27017 -u mongo-admin -p --authenticationDatabase admin
- If your query router has a different hostname, subsitute that in the command.
+ If your query router has a different hostname, substitute that in the command.
3. From the `mongos` interface, add each shard individually:
@@ -392,7 +392,7 @@ Before adding replica sets as shards, you must first configure the replica sets
## Configure Sharding
-At this stage, the components of your cluster are all connected and communicating with one another. The final step is to enable sharding. Enabling sharding takes place in stages due to the organization of data in MongoDB. To understand how data will be distrubuted, let's briefly review the main data structures:
+At this stage, the components of your cluster are all connected and communicating with one another. The final step is to enable sharding. Enabling sharding takes place in stages due to the organization of data in MongoDB. To understand how data will be distributed, let's briefly review the main data structures:
- **Databases** - The broadest data structure in MongoDB, used to hold groups of related data.
- **Collections** - Analogous to tables in traditional relational database systems, collections are the data structures that comprise databases
@@ -406,7 +406,7 @@ First, we'll enable sharding at the database level, which means that collections
mongo mongo-query-router:27017 -u mongo-admin -p --authenticationDatabase admin
- If applicable, subsitute your own query router's hostname.
+ If applicable, substitute your own query router's hostname.
2. From the `mongos` shell, create a new database. We'll call ours `exampleDB`:
@@ -466,7 +466,7 @@ It's not always necessary to shard every collection in a database. Depending on
## Test Your Cluster
-This section is optional. To ensure your data is being distributed evenly in the example database and collection we configured aboved, you can follow these steps to generate some basic test data and see how it is divided among the shards.
+This section is optional. To ensure your data is being distributed evenly in the example database and collection we configured above, you can follow these steps to generate some basic test data and see how it is divided among the shards.
1. Connect to the `mongo` shell on your query router if you're not already there:
diff --git a/docs/databases/mongodb/create-a-mongodb-replica-set.md b/docs/databases/mongodb/create-a-mongodb-replica-set.md
index 2edb8263883..bc4e9eb7ac7 100644
--- a/docs/databases/mongodb/create-a-mongodb-replica-set.md
+++ b/docs/databases/mongodb/create-a-mongodb-replica-set.md
@@ -52,9 +52,9 @@ To allow for consistent replication, each node will need to communicate with all
There are two major ways to allow the members of your replica set to communicate.
-The first method is to use [private IP addresses](/docs/networking/remote-access#adding-private-ip-addresses) for each member of the replica set. This allows the Linodes in your replica set to communicate without exposing your data to the public internet. This method is recommended, but note that it requires all members of the replica set be in the same datacenter.
+The first method is to use [private IP addresses](/docs/networking/remote-access#adding-private-ip-addresses) for each member of the replica set. This allows the Linodes in your replica set to communicate without exposing your data to the public internet. This method is recommended, but note that it requires all members of the replica set be in the same data center.
-The second method is to simply use the public IP address assigned to each Linode. You'll need to use this method if your Linodes are located in different datacenters, although this is not recommended because network latency will have a negative impact on replication. If you must use public IP addresses, you should [configure SSL/TLS encryption](https://docs.mongodb.com/manual/tutorial/configure-ssl/) for data sent between your hosts, or configure them to communicate over a VPN.
+The second method is to simply use the public IP address assigned to each Linode. You'll need to use this method if your Linodes are located in different data centers, although this is not recommended because network latency will have a negative impact on replication. If you must use public IP addresses, you should [configure SSL/TLS encryption](https://docs.mongodb.com/manual/tutorial/configure-ssl/) for data sent between your hosts, or configure them to communicate over a VPN.
Whether you're using public or private IP addresses to send data, you'll need to secure each Linode with a [firewall](/docs/security/firewalls/) before deploying your replica set into production.
diff --git a/docs/databases/mongodb/install-mongodb-on-centos-7.md b/docs/databases/mongodb/install-mongodb-on-centos-7.md
index 864be6b404d..cc8b74fa1fb 100644
--- a/docs/databases/mongodb/install-mongodb-on-centos-7.md
+++ b/docs/databases/mongodb/install-mongodb-on-centos-7.md
@@ -177,7 +177,7 @@ If you enabled role-based access control in the [Configure MongoDB](#configure-m
db.createUser({user: "example-user", pwd: "password", roles:[{role: "read", db: "user-data"}, {role:"readWrite", db: "exampleDB"}]})
- To create additional users, repeat Steps 6 and 7 as the administrative user, creating new usernames, passwords and roles by substituing the appropriate values.
+ To create additional users, repeat Steps 6 and 7 as the administrative user, creating new usernames, passwords and roles by substituting the appropriate values.
8. Exit the mongo shell:
diff --git a/docs/databases/mongodb/install-mongodb-on-ubuntu-16-04.md b/docs/databases/mongodb/install-mongodb-on-ubuntu-16-04.md
index 5151ea7aada..4d1b2c32252 100644
--- a/docs/databases/mongodb/install-mongodb-on-ubuntu-16-04.md
+++ b/docs/databases/mongodb/install-mongodb-on-ubuntu-16-04.md
@@ -34,7 +34,7 @@ Since MongoDB can require a significant amount of RAM, we recommend using a [hig
- Update your system:
- sudo apt-get update && sudo apt-get upgrade
+ sudo apt-get update && sudo apt-get upgrade
{{< note >}}
This guide is written for a non-root user. Commands that require elevated privileges are prefixed with `sudo`. If you’re not familiar with the `sudo` command, you can check our [Users and Groups](/docs/tools-reference/linux-users-and-groups) guide.
@@ -169,7 +169,7 @@ Successfully added user: {
db.createUser({user: "example-user", pwd: "password", roles:[{role: "read", db: "user-data"}, {role:"readWrite", db: "exampleDB"}]})
- To create additional users, repeat Steps 6 and 7 as the administrative user, creating new usernames, passwords and roles by substituing the appropriate values.
+ To create additional users, repeat Steps 6 and 7 as the administrative user, creating new usernames, passwords and roles by substituting the appropriate values.
8. Exit the mongo shell:
diff --git a/docs/databases/mysql/back-up-your-mysql-databases.md b/docs/databases/mysql/back-up-your-mysql-databases.md
index 40f21fcbe0e..df93b793012 100644
--- a/docs/databases/mysql/back-up-your-mysql-databases.md
+++ b/docs/databases/mysql/back-up-your-mysql-databases.md
@@ -1,4 +1,5 @@
---
+deprecated: true
author:
name: Brett Kaplan
email: docs@linode.com
diff --git a/docs/databases/mysql/create-physical-backups-of-your-mariadb-or-mysql-databases.md b/docs/databases/mysql/create-physical-backups-of-your-mariadb-or-mysql-databases.md
new file mode 100644
index 00000000000..03c10aad5f5
--- /dev/null
+++ b/docs/databases/mysql/create-physical-backups-of-your-mariadb-or-mysql-databases.md
@@ -0,0 +1,77 @@
+---
+author:
+ name: Linode
+ email: docs@linode.com
+description: "Create a physical MySQL backup databases by copying the relevant filesystem parts. Useful for recovering inaccessible databases."
+keywords: ["mysql", "mariadb", backup", "back up", mysqldump"]
+license: '[CC BY-ND 4.0](https://creativecommons.org/licenses/by-nd/4.0)'
+published: 2018-01-30
+modified: 2018-01-30
+modified_by:
+ name: Linode
+title: Create Physical Backups of your MariaDB or MySQL Databases
+external_resources:
+ - '[Backup and Restore Overview; MariaDB Library](https://mariadb.com/kb/en/library/backup-and-restore-overview/)'
+ - '[Database Backup Methods; MySQL Reference Manual](https://dev.mysql.com/doc/refman/5.7/en/backup-methods.html)'
+---
+
+While the `mysqldump` tool is the preferred backup method for a MariaDB or MySQL database or database system it only works when the database server is accessible and running. If the database cannot be started or the host system is inaccessible, the database can still be copied directly.
+
+A *physical backup* is often necessary in situations when you only have access to a recovery environment (such as [Finnix](/docs/troubleshooting/finnix-rescue-mode)) where you mount your system's disks as external storage devices. If you want to read about *logical backups* using `mysqldump`, [see our guide](/docs/databases/mysql/use-mysqldump-to-back-up-mysql-or-mariadb) on the topic.
+
+For simplification, the name MySQL will be used throughout this guide but the instructions will work for both MySQL and MariaDB.
+
+{{< note >}}
+The steps in this guide require root privileges. Log in as the root user with `su -` before you begin.
+{{< /note >}}
+
+## Create a Backup
+
+1. If you are not running in recovery mode (a Finnix session), stop the `mysql` service:
+
+ systemctl stop mysql
+
+2. Locate your database directory. It should be `/var/lib/mysql/` on most systems but if that directory doesn't exist, examine `/etc/mysql/my.cnf` for a path to the data directory.
+
+3. Create a directory to store your backups. This guide will use `/opt/backups` but you can alter this to suit your needs:
+
+ mkdir /opt/db-backups
+
+4. Copy MySQL's data directory to a storage location. The `cp` command, `rsync`, or other methods will work fine, but we'll use `tar` to recursively copy and gzip the backup at one time. Change the database directory, backup filename, and target directory as needed; the `-$(date +%F)` addition to the command will insert a timestamp into the filename.
+
+ tar cfvz /opt/db-backups/db-$(date +%F).tar.gz /var/lib/mysql/*
+
+5. Restart the MySQL service:
+
+ systemctl restart mysql
+
+## Restore a Backup
+
+1. Change your working directory to a place where you can extract the tarball created above. The current user's home directory is used in this example:
+
+ cd
+
+2. Stop the `mysql` service:
+
+ systemctl stop mysql
+
+3. Extract the tarball to the working directory. Change the tarball's filename in the command to the one with the date you want to restore to.
+
+ tar zxvf /opt/db-backups/db-archive.tar.gz -C .
+
+4. Move the current contents of `/var/lib/mysql` to another location if you want to keep them for any reason, or delete them entirely. Create a new empty `mysql` folder to restore your backed up DMBS into.
+
+ mv /var/lib/mysql /var/lib/mysql-old
+ mkdir /var/lib/mysql
+
+5. Copy the backed up database system to the empty folder:
+
+ mv ~/var/lib/mysql/* /var/lib/mysql
+
+6. Set the proper permissions for the files you just restored:
+
+ chown -R mysql:mysql /var/lib/mysql
+
+7. Restart the MySQL service:
+
+ systemctl restart mysql
diff --git a/docs/databases/mysql/deploy-mysql-workbench-for-database-administration.md b/docs/databases/mysql/deploy-mysql-workbench-for-database-administration.md
index ead9987e50d..07b588095c7 100644
--- a/docs/databases/mysql/deploy-mysql-workbench-for-database-administration.md
+++ b/docs/databases/mysql/deploy-mysql-workbench-for-database-administration.md
@@ -42,7 +42,7 @@ Download and install MySQL workbench from the [downloads page](https://www.mysql
There are `.deb` and `.rpm` packages available on the Workbench [download page](https://www.mysql.com/products/workbench/). Alternatively, some distributions have MySQL Workbench in their repositories.
{{< note >}}
-The screenshots in this guide were taken in Ubuntu but once Workbench is installed on your system, the subsequent steps should be similar for other plaforms.
+The screenshots in this guide were taken in Ubuntu but once Workbench is installed on your system, the subsequent steps should be similar for other platforms.
{{< /note >}}
When you start MySQL Workbench, you'll land at the home screen. Once you configure your database servers, as we'll do next, then they'll have shortcuts on the home screen.
@@ -55,7 +55,7 @@ The first step after running MySQL Workbench is to add your Linode as a database
1. Click the **+** adjacent to **MySQL Connections** to get the **Setup New Connection** dialog:
- [](/docs/assets/workbenchHome.png)
+ [](/docs/assets/workbenchHome.png)
The settings you'll need:
@@ -91,11 +91,11 @@ Pay attention to the **Service** area of each dialog. Use the appropriate passw
3. If all is well, you should get a **Connection Successful** message.
- 
+ 
4. Click **OK** to clear the message, then click **OK** again to add the connection. You'll get a shortcut to the new connection on the home screen.
- [](/docs/assets/workbenchHomeWithLinode.png)
+ [](/docs/assets/workbenchHomeWithLinode.png)
If you have more than one Linode or other servers you administer, you can repeat this process to add all of your database servers.
@@ -133,23 +133,23 @@ The user you just created should be able to log in to MySQL via Workbench or any
MySQL Workbench is deployed in safe mode by default. This will not allow certain types of queries--such as updates--without explicit IDs. To fix this, we need to turn off safe mode.
-1. Go to the menu and select **Edit**, then **Preferences**.
+1. Go to the menu and select **Edit**, then **Preferences**.
-2. Select the **SQL Queries** tab.
+2. Select the **SQL Queries** tab.
- 
+ 
-3. Uncheck the line beginning with **"Safe Updates".**
+3. Uncheck the line beginning with **"Safe Updates".**
{{< note >}}
In some instances, this may instead be found under **SQL Editor**.
{{< /note >}}
-4. Click **OK**.
+4. Click **OK**.
-5. Close the database screen to return to home.
+5. Close the database screen to return to home.
-6. Reconnect to the database.
+6. Reconnect to the database.
## Creating and Populating Databases
@@ -160,9 +160,9 @@ Start by adding a new database that you can work with.
1. Click the **New Schema** button on the toolbar.
- 
+ 
- [](/docs/assets/workbenchNewSchema.png)
+ [](/docs/assets/workbenchNewSchema.png)
You only need a name to create the new database, but you can create an area for comments if you want. Default collation can be left blank, in which case MySQL will use the default.
@@ -172,7 +172,7 @@ Start by adding a new database that you can work with.
3. Click **Apply** again and you should get a **SQL Succesful** message. Then click **Close**.
- 
+ 
Now you're back at the main database screen, and you see that **phonebook** has been added to the schema list. Double-click on any item in the schema list to switch to that database.
@@ -184,7 +184,7 @@ MySQL stores its information in a table, which resembles a spreadsheet.
1. Click the **Add Table** button.
- 
+ 
You'll get a screen that looks like this:
@@ -228,7 +228,7 @@ The first step to add table data is to open a table.
1. Right click on **employees** and select the top option, **SELECT ROWS - LIMIT 1000**.
- 
+ 
2. Double click on **NULL** under **lastName**. At this point, you can start entering data. You must press ENTER after each field to exit editing or else the field will revert to its previous value.
@@ -247,7 +247,7 @@ You can run a SQL query on a table by entering it at the top of the table view.
2. Click on the lightning bolt to run the query. You should get results like this:
- [](/docs/assets/workbenchSQLresults.png)
+ [](/docs/assets/workbenchSQLresults.png)
### Export / Import Data
diff --git a/docs/databases/mysql/how-to-install-mysql-on-centos-6.md b/docs/databases/mysql/how-to-install-mysql-on-centos-6.md
index 24f52362856..38477478d1a 100644
--- a/docs/databases/mysql/how-to-install-mysql-on-centos-6.md
+++ b/docs/databases/mysql/how-to-install-mysql-on-centos-6.md
@@ -18,7 +18,7 @@ external_resources:
- '[MySQLdb User''s Guide](http://mysql-python.sourceforge.net/MySQLdb.html)'
---
-
+
MySQL is a popular database management system used for web and server applications. This guide will introduce how to install, configure and manage MySQL on a Linode running CentOS 6.
diff --git a/docs/databases/mysql/how-to-install-mysql-on-debian-7.md b/docs/databases/mysql/how-to-install-mysql-on-debian-7.md
index 6f5e19e4dcd..a1c7a507bf8 100644
--- a/docs/databases/mysql/how-to-install-mysql-on-debian-7.md
+++ b/docs/databases/mysql/how-to-install-mysql-on-debian-7.md
@@ -19,7 +19,7 @@ external_resources:
- '[MySQL Tuner Tutorial](http://www.debiantutorials.com/tuning-mysql-with-mysqltuner-to-increase-efficiency-and-performance/)'
---
-
+
MySQL is a popular database management system used for web and server applications. This guide will introduce how to install, configure and manage MySQL on a Linode running Debian 7 (Wheezy).
diff --git a/docs/databases/mysql/how-to-install-mysql-on-debian-8.md b/docs/databases/mysql/how-to-install-mysql-on-debian-8.md
index 63dd187dca2..0963f378d16 100644
--- a/docs/databases/mysql/how-to-install-mysql-on-debian-8.md
+++ b/docs/databases/mysql/how-to-install-mysql-on-debian-8.md
@@ -154,7 +154,7 @@ If you forget your root MySQL password, it can be reset.
1. Stop the current MySQL server instance.
- sudo systemctl stop mysql.serivce
+ sudo systemctl stop mysql.service
2. Use dpkg to re-run the configuration process MySQL goes through on first installation. You will again be asked to set a root password.
diff --git a/docs/databases/mysql/install-mysql-phpmyadmin-debian-7.md b/docs/databases/mysql/install-mysql-phpmyadmin-debian-7.md
index 6510264463e..20325512c88 100644
--- a/docs/databases/mysql/install-mysql-phpmyadmin-debian-7.md
+++ b/docs/databases/mysql/install-mysql-phpmyadmin-debian-7.md
@@ -16,6 +16,8 @@ external_resources:
- '[phpMyAdmin Documentation Page](http://www.phpmyadmin.net/home_page/docs.php)'
---
+
+
phpMyAdmin is a web application that provides a GUI to aid in MySQL database administration. It supports multiple MySQL servers and is a robust and easy alternative to using the MySQL command line client.
{{< note >}}
diff --git a/docs/databases/mysql/use-mysqldump-to-back-up-mysql-or-mariadb.md b/docs/databases/mysql/use-mysqldump-to-back-up-mysql-or-mariadb.md
new file mode 100644
index 00000000000..dc2fcca5204
--- /dev/null
+++ b/docs/databases/mysql/use-mysqldump-to-back-up-mysql-or-mariadb.md
@@ -0,0 +1,101 @@
+---
+author:
+ name: Linode
+ email: docs@linode.com
+description: 'Use mysqldump to back up MySQL databases, tables, or entire database management systems.'
+keywords: ["mysql", "mariadb", "backup", "back up", "mysqldump"]
+license: '[CC BY-ND 4.0](https://creativecommons.org/licenses/by-nd/4.0)'
+aliases: ['databases/mysql/backup-options/','security/backups/back-up-your-mysql-databases/','databases/mysql/back-up-your-mysql-databases/']
+published: 2018-01-30
+modified: 2018-01-30
+modified_by:
+ name: Linode
+title: 'Use mysqldump to Back Up MySQL or MariaDB'
+external_resources:
+ - '[MySQL Database Backup Methods page](http://dev.mysql.com/doc/refman/5.1/en/backup-methods.html)'
+ - '[mysqldump - A Database Backup Program, MySQL Reference Manual](https://dev.mysql.com/doc/refman/5.7/en/mysqldump.html)'
+---
+
+[MySQL](http://www.mysql.com/) and [MariaDB](https://mariadb.com/) include the [mysqldump](https://dev.mysql.com/doc/refman/5.7/en/mysqldump.html) utility to simplify the process to create a backup of a database or system of databases. Using `mysqldump` is a form of *logical backup*, as opposed to a [*physical backup*](/docs/databases/mysql/create-physical-backups-of-your-mariadb-or-mysql-databases/), which is a copy of the filesystem structure which contains your data.
+
+The instructions in this guide apply to both MySQL and MariaDB. For simplification, the name MySQL will be used to apply to either.
+
+## Before You Begin
+
+- You will need a working MySQL or MariaDB installation, and a database user to run the backup. For help with installation, see the [Linode MySQL documentation](/docs/databases/mysql/).
+
+- You will need root access to the system, or a user account with `sudo` privileges.
+
+## Back up a Database
+
+The `mysqldump` command’s general syntax is:
+
+ mysqldump -u [username] -p [databaseName] > [filename]-$(date +%F).sql
+
+* mysqldump prompts for a password before it starts the backup process.
+* Depending on the size of the database, it could take a while to complete.
+* The database backup will be created in the directory the command is run.
+* `-$(date +%F)` adds a timestamp to the filename.
+
+### Backup Examples
+
+* Create a backup of an entire Database Management System (DBMS):
+
+ mysqldump --all-databases --single-transaction --quick --lock-tables=false > full-backup-$(date +%F).sql -u root -p
+
+* Back up a specific database. Replace `db1` with the name of the database you want to back up:
+
+ mysqldump -u username -p db1 --single-transaction --quick --lock-tables=false > db1-backup-$(date +%F).sql
+
+* Back up a single table from any database. In the example below, `table1` is exported from the database `db1`:
+
+ mysqldump -u username -p --single-transaction --quick --lock-tables=false db1 table1 > db1-table1-$(date +%F).sql
+
+Here's a breakdown of the `mysqldump` command options used above:
+
+- `--single-transaction`: Issue a BEGIN SQL statement before dumping data from server.
+- `--quick`: Enforce dumping tables row by row. Added safety for systems with little RAM and/or large databases where storing tables in memory could become problematic.
+- `--lock-tables=false`: Do not lock tables for the backup session.
+
+## Automate Backups with cron
+
+Entries can be added to `/etc/crontab` to regularly schedule database backups.
+
+1. Create a file to hold the login credentials of the MySQL root user which will be performing the backup. Note that the system user whose home directory this file is stored in can be unrelated to any MySQL users.
+
+ {{< file "/home/example_user/.mylogin.cnf" >}}
+[client]
+user = root
+password = MySQL root user's password
+{{< /file >}}
+
+2. Restrict permissions of the credentials file:
+
+ chmod 600 /home/example_user/.mylogin.cnf
+
+3. Create the cron job file. Below is an example cron job to back up the entire database management system every day at 1am:
+
+ {{< file "/etc/cron.daily/mysqldump" >}}
+0 1 * * * /usr/bin/mysqldump mysqldump --defaults-extra-file=/home/example_user/.my.cnf -u root --single-transaction --quick --lock-tables=false --all-databases > full-backup-$(date +%F).sql
+{{< /file >}}
+
+ For more information on cron, see the [cron(8)](https://linux.die.net/man/8/cron) and [cron(5)](https://linux.die.net/man/5/crontab) manual pages.
+
+## Restore a Backup
+
+The restoration command's general syntax is:
+
+ mysql -u [username] -p [databaseName] < [filename].sql
+
+* Restore an entire DBMS backup. You will be prompted for the MySQL root user's password:\
+ **This will overwrite all current data in the MySQL database system**
+
+ mysql -u root -p < full-backup.sql
+
+* Restore a single database dump. An empty or old destination database must already exist to import the data into, and the MySQL user you're running the command as must have write access to that database:
+
+ mysql -u [username] -p db1 < db1-backup.sql
+
+* Restore a single table, you must have a destination database ready to receive the data:
+
+ mysql -u dbadmin -p db1 < db1-table1.sql
diff --git a/docs/databases/oracle/oracle-10g-express-edition-on-debian-5-lenny.md b/docs/databases/oracle/oracle-10g-express-edition-on-debian-5-lenny.md
index 06bb9c16fb0..0f88f2cbc5a 100644
--- a/docs/databases/oracle/oracle-10g-express-edition-on-debian-5-lenny.md
+++ b/docs/databases/oracle/oracle-10g-express-edition-on-debian-5-lenny.md
@@ -24,7 +24,7 @@ To do this, log into the Linode Manager and shut down your Linode. Once your Lin
# Configure Networking and Set the Hostname
-Oracle is very picky about the system hostname with respect to what interfaces it will listen on. You'll be using a private IP on your Linode and setting the hostname a bit differently than usual to account for this, with the added benefit of being able to connect to your Oracle database from other Linodes in the same datacenter.
+Oracle is very picky about the system hostname with respect to what interfaces it will listen on. You'll be using a private IP on your Linode and setting the hostname a bit differently than usual to account for this, with the added benefit of being able to connect to your Oracle database from other Linodes in the same data center.
First, make sure your Linode has a private IP address assigned to it. To do so, visit the "Remote Access" tab in the Linode Manager. If you need to add a private IP, reboot your Linode after doing so before proceeding with the next step.
diff --git a/docs/databases/oracle/oracle-10g-express-edition-on-debian-6-squeeze.md b/docs/databases/oracle/oracle-10g-express-edition-on-debian-6-squeeze.md
index ee86746cb3c..91bd99dbe90 100644
--- a/docs/databases/oracle/oracle-10g-express-edition-on-debian-6-squeeze.md
+++ b/docs/databases/oracle/oracle-10g-express-edition-on-debian-6-squeeze.md
@@ -24,7 +24,7 @@ To do this, log into the Linode Manager and shut down your Linode. Once your Lin
# Configure Networking and Set the Hostname
-Oracle is very picky about the system hostname with respect to what interfaces it will listen on. You'll be using a private IP on your Linode and setting the hostname a bit differently than usual to account for this, with the added benefit of being able to connect to your Oracle database from other Linodes in the same datacenter.
+Oracle is very picky about the system hostname with respect to what interfaces it will listen on. You'll be using a private IP on your Linode and setting the hostname a bit differently than usual to account for this, with the added benefit of being able to connect to your Oracle database from other Linodes in the same data center.
First, make sure your Linode has a private IP address assigned to it. To do so, visit the "Remote Access" tab in the Linode Manager. If you need to add a private IP, reboot your Linode after doing so before proceeding with the next step.
diff --git a/docs/databases/oracle/oracle-10g-express-edition-on-ubuntu-10-04-lts-lucid.md b/docs/databases/oracle/oracle-10g-express-edition-on-ubuntu-10-04-lts-lucid.md
index 93f1c364723..f704f7ee20f 100644
--- a/docs/databases/oracle/oracle-10g-express-edition-on-ubuntu-10-04-lts-lucid.md
+++ b/docs/databases/oracle/oracle-10g-express-edition-on-ubuntu-10-04-lts-lucid.md
@@ -24,7 +24,7 @@ To do this, log into the Linode Manager and shut down your Linode. Once your Lin
# Configure Networking and Set the Hostname
-Oracle is very picky about the system hostname with respect to what interfaces it will listen on. You'll be using a private IP on your Linode and setting the hostname a bit differently than usual to account for this, with the added benefit of being able to connect to your Oracle database from other Linodes in the same datacenter.
+Oracle is very picky about the system hostname with respect to what interfaces it will listen on. You'll be using a private IP on your Linode and setting the hostname a bit differently than usual to account for this, with the added benefit of being able to connect to your Oracle database from other Linodes in the same data center.
First, make sure your Linode has a private IP address assigned to it. To do so, visit the "Remote Access" tab in the Linode Manager. If you need to add a private IP, reboot your Linode after doing so before proceeding with the next step.
diff --git a/docs/databases/oracle/oracle-10g-express-edition-on-ubuntu-10-10-maverick.md b/docs/databases/oracle/oracle-10g-express-edition-on-ubuntu-10-10-maverick.md
index f200958d182..73b09019b06 100644
--- a/docs/databases/oracle/oracle-10g-express-edition-on-ubuntu-10-10-maverick.md
+++ b/docs/databases/oracle/oracle-10g-express-edition-on-ubuntu-10-10-maverick.md
@@ -26,7 +26,7 @@ To do this, log into the Linode Manager and shut down your Linode. Once your Lin
# Configure Networking and Set the Hostname
-Oracle is very picky about the system hostname with respect to what interfaces it will listen on. You'll be using a private IP on your Linode and setting the hostname a bit differently than usual to account for this, with the added benefit of being able to connect to your Oracle database from other Linodes in the same datacenter.
+Oracle is very picky about the system hostname with respect to what interfaces it will listen on. You'll be using a private IP on your Linode and setting the hostname a bit differently than usual to account for this, with the added benefit of being able to connect to your Oracle database from other Linodes in the same data center.
First, make sure your Linode has a private IP address assigned to it. To do so, visit the "Remote Access" tab in the Linode Manager. If you need to add a private IP, reboot your Linode after doing so before proceeding with the next step.
diff --git a/docs/databases/oracle/oracle-10g-express-edition-on-ubuntu-9-10-karmic.md b/docs/databases/oracle/oracle-10g-express-edition-on-ubuntu-9-10-karmic.md
index d87f35eba30..30e3ab41fb6 100644
--- a/docs/databases/oracle/oracle-10g-express-edition-on-ubuntu-9-10-karmic.md
+++ b/docs/databases/oracle/oracle-10g-express-edition-on-ubuntu-9-10-karmic.md
@@ -26,7 +26,7 @@ To do this, log into the Linode Manager and shut down your Linode. Once your Lin
# Configure Networking and Set the Hostname
-Oracle is very picky about the system hostname with respect to what interfaces it will listen on. You'll be using a private IP on your Linode and setting the hostname a bit differently than usual to account for this, with the added benefit of being able to connect to your Oracle database from other Linodes in the same datacenter.
+Oracle is very picky about the system hostname with respect to what interfaces it will listen on. You'll be using a private IP on your Linode and setting the hostname a bit differently than usual to account for this, with the added benefit of being able to connect to your Oracle database from other Linodes in the same data center.
First, make sure your Linode has a private IP address assigned to it. To do so, visit the "Remote Access" tab in the Linode Manager. If you need to add a private IP, reboot your Linode after doing so before proceeding with the next step.
diff --git a/docs/databases/postgresql/centos-5.md b/docs/databases/postgresql/centos-5.md
index 307d92c7d3c..c4e7a4e19d6 100644
--- a/docs/databases/postgresql/centos-5.md
+++ b/docs/databases/postgresql/centos-5.md
@@ -121,7 +121,7 @@ local all all md5
{{< /file-excerpt >}}
-As root, restart the Postgresql service:
+As root, restart the PostgreSQL service:
service postgresql restart
diff --git a/docs/databases/postgresql/configure-postgresql.md b/docs/databases/postgresql/configure-postgresql.md
index 585bec55bfd..9534e8b1f15 100644
--- a/docs/databases/postgresql/configure-postgresql.md
+++ b/docs/databases/postgresql/configure-postgresql.md
@@ -57,7 +57,7 @@ The contents of the configuration file are broken up into different sections:
|---|---|
|File Locations | Defines where values of the database will be stored |
|Connections and Authentications | Allows you to define the settings for connections, security, and authentication |
-|Resource Usage | Defines the paramaters (memory, space) usable by PostgreSQL. |
+|Resource Usage | Defines the parameters (memory, space) usable by PostgreSQL. |
|Write Ahead Log | Configures *Write-Ahead logging*, which if properly configured, can result in a lower amount of disk writes. |
|Replication | Control the way replications and replication data is handled by the server. |
|Query Tuning | This set of directives can help you optimize the process of querying to the database. |
@@ -115,7 +115,7 @@ To allow a user on a remote system to log in to the `example` database using a n
host example exampleuser 192.0.2.0 password
{{< /file-excerpt >}}
-The entires in this table are read in order for each incoming connection attempt. The first entry that matches will be applied to the connection. As a result, more general configurations (matching all users, all databases, or all IP addresses) should come at the end of the file, and should generally have tighter restrictions. More specific matches with less stringent authentication methods (such as the example above) should be placed at the beginning of the list.
+The entries in this table are read in order for each incoming connection attempt. The first entry that matches will be applied to the connection. As a result, more general configurations (matching all users, all databases, or all IP addresses) should come at the end of the file, and should generally have tighter restrictions. More specific matches with less stringent authentication methods (such as the example above) should be placed at the beginning of the list.
{{< note >}}
See the [official pg_hba documentation](https://www.postgresql.org/docs/9.3/static/auth-pg-hba-conf.html) for details about each of the configuration options.
diff --git a/docs/databases/postgresql/create-a-highly-available-postgresql-cluster-using-patroni-and-haproxy.md b/docs/databases/postgresql/create-a-highly-available-postgresql-cluster-using-patroni-and-haproxy.md
index 4a9aec438e9..0a676065812 100644
--- a/docs/databases/postgresql/create-a-highly-available-postgresql-cluster-using-patroni-and-haproxy.md
+++ b/docs/databases/postgresql/create-a-highly-available-postgresql-cluster-using-patroni-and-haproxy.md
@@ -35,7 +35,7 @@ This guide shows you how to create a highly available Postgres cluster of three
sudo apt update && sudo apt upgrade
-4. Create five Linodes on your account, all within the same datacenter. Take note of each Linode's [private IP address](/docs/networking/remote-access/#adding-private-ip-addresses)
+4. Create five Linodes on your account, all within the same data center. Take note of each Linode's [private IP address](/docs/networking/remote-access/#adding-private-ip-addresses)
{{< note >}}
This guide is written for a non-root user. Commands that require elevated privileges are prefixed with `sudo`. If you’re not familiar with the `sudo` command, see the [Users and Groups](/docs/tools-reference/linux-users-and-groups) guide.
@@ -43,7 +43,7 @@ This guide is written for a non-root user. Commands that require elevated privil
## Install PostgreSQL
-Install Postgres on three Linodes in your setup. Because the configuration in this guide uses private IP addresses to communicate between Linodes in the same datacenter, this setup may not meet certain [Highly Available requirements](https://docs.oracle.com/cd/B28359_01/server.111/b28281/hadesign.htm#g1007388). For more information about private IPs, visit our [Remote Access guide](/docs/networking/remote-access/#adding-private-ip-addresses).
+Install Postgres on three Linodes in your setup. Because the configuration in this guide uses private IP addresses to communicate between Linodes in the same data center, this setup may not meet certain [Highly Available requirements](https://docs.oracle.com/cd/B28359_01/server.111/b28281/hadesign.htm#g1007388). For more information about private IPs, visit our [Remote Access guide](/docs/networking/remote-access/#adding-private-ip-addresses).
The examples in this guide assign the private IP addresses of the three Postgres Linodes `192.0.2.11`, `192.0.2.12` and `192.0.2.13`. To setup a private IP address on a Linode, refer to the [Remote Access guide](/docs/networking/remote-access/#adding-private-ip-addresses) for more information.
diff --git a/docs/databases/postgresql/fedora-12.md b/docs/databases/postgresql/fedora-12.md
index b646bbc47cd..94b1cc5c0b5 100644
--- a/docs/databases/postgresql/fedora-12.md
+++ b/docs/databases/postgresql/fedora-12.md
@@ -120,7 +120,7 @@ local all all md5
{{< /file-excerpt >}}
-As root, restart the Postgresql service:
+As root, restart the PostgreSQL service:
service postgresql restart
diff --git a/docs/databases/postgresql/fedora-13.md b/docs/databases/postgresql/fedora-13.md
index 27eed3c1604..d41ba70ef55 100644
--- a/docs/databases/postgresql/fedora-13.md
+++ b/docs/databases/postgresql/fedora-13.md
@@ -120,7 +120,7 @@ local all all md5
{{< /file-excerpt >}}
-As root, restart the Postgresql service:
+As root, restart the PostgreSQL service:
service postgresql restart
diff --git a/docs/databases/postgresql/fedora-14.md b/docs/databases/postgresql/fedora-14.md
index 547efb3c9d8..6f7cf8ceff8 100644
--- a/docs/databases/postgresql/fedora-14.md
+++ b/docs/databases/postgresql/fedora-14.md
@@ -47,7 +47,7 @@ The current version of the database server will be installed, along with several
# Configure PostgreSQL
-### Set the Postgresql Password
+### Set the PostgreSQL Password
Set a password for the "postgres" user by issuing the following command (be sure to substitute your postgres password for "CHANGME" below):
@@ -128,7 +128,7 @@ Change it to the following to use password authentication:
local all all md5
{{< /file-excerpt >}}
-As root, restart the Postgresql service:
+As root, restart the PostgreSQL service:
service postgresql restart
diff --git a/docs/databases/postgresql/how-to-access-postgresql-database-remotely-using-pgadmin-on-windows.md b/docs/databases/postgresql/how-to-access-postgresql-database-remotely-using-pgadmin-on-windows.md
index 5491f2e1402..81681339105 100644
--- a/docs/databases/postgresql/how-to-access-postgresql-database-remotely-using-pgadmin-on-windows.md
+++ b/docs/databases/postgresql/how-to-access-postgresql-database-remotely-using-pgadmin-on-windows.md
@@ -16,7 +16,7 @@ external_resources:
- '[PostgreSQL Documentation](http://www.postgresql.org/docs/)'
---
-PgAdmin is a free, open-source PostgreSQL database administration GUI for Microsoft Windows, Apple MacOS X and Linux systems. It offers database server information retrieval, development, testing, and ongoing maintenance. This guide will help you install pgAdmin on Windows, providing secure, remote access to PostgreSQL databases. It is assumed that you have already installed PostgreSQL on your Linode in accordance with our [PostgreSQL installation guides](/docs/databases/postgresql/).
+PgAdmin is a free, open-source PostgreSQL database administration GUI for Microsoft Windows, Mac OS X, and Linux systems. It offers database server information retrieval, development, testing, and ongoing maintenance. This guide will help you install pgAdmin on Windows, providing secure, remote access to PostgreSQL databases. It is assumed that you have already installed PostgreSQL on your Linode in accordance with our [PostgreSQL installation guides](/docs/databases/postgresql/).
## Install pgAdmin
diff --git a/docs/databases/postgresql/how-to-install-postgresql-on-ubuntu-16-04.md b/docs/databases/postgresql/how-to-install-postgresql-on-ubuntu-16-04.md
index cf4c4fc05de..061af7c0ba3 100644
--- a/docs/databases/postgresql/how-to-install-postgresql-on-ubuntu-16-04.md
+++ b/docs/databases/postgresql/how-to-install-postgresql-on-ubuntu-16-04.md
@@ -49,7 +49,7 @@ Install PostgreSQL from the Ubuntu package repository:
By default, PostgreSQL will create a Linux user named `postgres` to access the database software.
{{< caution >}}
-The `postgres` user should not be used for for other purposes (e.g. connecting to other networks). Doing so presents a serious risk to the security of your databases.
+The `postgres` user should not be used for other purposes (e.g. connecting to other networks). Doing so presents a serious risk to the security of your databases.
{{< /caution >}}
1. Change the `postgres` user's Linux password:
diff --git a/docs/databases/postgresql/how-to-install-postgresql-relational-databases-on-centos-7.md b/docs/databases/postgresql/how-to-install-postgresql-relational-databases-on-centos-7.md
index f6d518ff422..9dca469c912 100644
--- a/docs/databases/postgresql/how-to-install-postgresql-relational-databases-on-centos-7.md
+++ b/docs/databases/postgresql/how-to-install-postgresql-relational-databases-on-centos-7.md
@@ -401,7 +401,7 @@ While specific settings and privileges can be applied to a role when it's create
ALTER ROLE examplerole CREATEDB;
- A number of permissions can be applied when creating or altering a role. See the [PostgeSQL Documentation](https://www.postgresql.org/docs/9.2/static/sql-createrole.html) for more details.
+ A number of permissions can be applied when creating or altering a role. See the [PostgreSQL Documentation](https://www.postgresql.org/docs/9.2/static/sql-createrole.html) for more details.
3. Use `\du` to confirm your changes. You'll see that the "Create DB" attribute is listed next to the `examplerole` user:
diff --git a/docs/databases/postgresql/securely-manage-remote-postgresql-servers-with-pgadmin-on-macos-x.md b/docs/databases/postgresql/securely-manage-remote-postgresql-servers-with-pgadmin-on-macos-x.md
index cf5b9bbfa5e..1c9672f7b46 100644
--- a/docs/databases/postgresql/securely-manage-remote-postgresql-servers-with-pgadmin-on-macos-x.md
+++ b/docs/databases/postgresql/securely-manage-remote-postgresql-servers-with-pgadmin-on-macos-x.md
@@ -42,7 +42,7 @@ Although PostgreSQL uses port 5432 for TCP connections, we're using the local po
[](/docs/assets/pg-admin-macosx-add-server.png)
-2. If you're having problems connectiong you may need to check PostgreSQL's configuration to ensure it accepts connections. Modify the following lines in `/etc/postgresql/9.5/main/postgresql.conf` if necessary:
+2. If you're having problems connecting, you may need to check PostgreSQL's configuration to ensure it accepts connections. Modify the following lines in `/etc/postgresql/9.5/main/postgresql.conf` if necessary:
{{< file-excerpt "/etc/postgresql/9.5/main/postgresql.conf" aconf >}}
listen_addresses = 'localhost'
diff --git a/docs/databases/postgresql/ubuntu-10-04-lucid.md b/docs/databases/postgresql/ubuntu-10-04-lucid.md
index c4370c0def6..1273c4b8921 100644
--- a/docs/databases/postgresql/ubuntu-10-04-lucid.md
+++ b/docs/databases/postgresql/ubuntu-10-04-lucid.md
@@ -136,7 +136,7 @@ local all all md5
{{< /file-excerpt >}}
-If you changed the authentication method as shown above, restart Postgresql with the following command:
+If you changed the authentication method as shown above, restart PostgreSQL with the following command:
/etc/init.d/postgresql-8.4 restart
diff --git a/docs/databases/postgresql/ubuntu-10-10-maverick.md b/docs/databases/postgresql/ubuntu-10-10-maverick.md
index 44b99724a20..ee98e9f2992 100644
--- a/docs/databases/postgresql/ubuntu-10-10-maverick.md
+++ b/docs/databases/postgresql/ubuntu-10-10-maverick.md
@@ -134,7 +134,7 @@ local all all md5
{{< /file-excerpt >}}
-If you changed the authentication method as shown above, restart Postgresql with the following command:
+If you changed the authentication method as shown above, restart PostgreSQL with the following command:
/etc/init.d/postgresql-8.4 restart
diff --git a/docs/databases/postgresql/ubuntu-9-10-karmic.md b/docs/databases/postgresql/ubuntu-9-10-karmic.md
index 1135eb3cd10..7a718188e20 100644
--- a/docs/databases/postgresql/ubuntu-9-10-karmic.md
+++ b/docs/databases/postgresql/ubuntu-9-10-karmic.md
@@ -137,7 +137,7 @@ local all all md5
{{< /file-excerpt >}}
-If you changed the authentication method as shown above, restart Postgresql with the following command:
+If you changed the authentication method as shown above, restart PostgreSQL with the following command:
/etc/init.d/postgresql-8.4 restart
diff --git a/docs/databases/postgresql/use-postgresql-relational-databases-on-ubuntu-12-04.md b/docs/databases/postgresql/use-postgresql-relational-databases-on-ubuntu-12-04.md
index e684877db3e..aa54fdb3030 100644
--- a/docs/databases/postgresql/use-postgresql-relational-databases-on-ubuntu-12-04.md
+++ b/docs/databases/postgresql/use-postgresql-relational-databases-on-ubuntu-12-04.md
@@ -138,7 +138,7 @@ local all all md5
{{< /file-excerpt >}}
-If you changed the authentication method as shown above, restart Postgresql with the following command:
+If you changed the authentication method as shown above, restart PostgreSQL with the following command:
service postgresql restart
diff --git a/docs/databases/redis/install-and-configure-redis-on-centos-7.md b/docs/databases/redis/install-and-configure-redis-on-centos-7.md
index 98e35aac783..44a250bd854 100644
--- a/docs/databases/redis/install-and-configure-redis-on-centos-7.md
+++ b/docs/databases/redis/install-and-configure-redis-on-centos-7.md
@@ -133,7 +133,7 @@ The following steps will guide you through master/slave replication, with the sl
For this section, you will use two Linodes, a master and a slave.
{{< note >}}
-To communicate over the private network, your master and slave Linodes must reside in the same datacenter.
+To communicate over the private network, your master and slave Linodes must reside in the same data center.
{{< /note >}}
### Prepare Your Linodes
diff --git a/docs/development/ci/automate-builds-with-jenkins-on-ubuntu.md b/docs/development/ci/automate-builds-with-jenkins-on-ubuntu.md
index 0c73489644b..aa3cfdca526 100644
--- a/docs/development/ci/automate-builds-with-jenkins-on-ubuntu.md
+++ b/docs/development/ci/automate-builds-with-jenkins-on-ubuntu.md
@@ -64,11 +64,11 @@ Before starting automating your entire workflow, it's necessary to understand th
As you can see the most basic process consist of three phases: build - test - deploy. Each time you make changes on your distributed version control system you trigger an automation cycle on the Jenkins server. The entire set of instructions for running the process is on the `Jenkinsfile` located at the root of your source repository. That single file tells the server *what* to do, *when* to do it and *how* you want those tasks to be performed.
-## Write an Example NodeJS Application
+## Write an Example Node.js Application
As mentioned in the previous section, the automation process starts by making a commit to a Version Control System.
-Create a new repository in GitHub. This guide will use a simple NodeJS application to showcase how Jenkins Pipelines works, select your `.gitignore` accordingly and don't forget to initialize it with a `README`:
+Create a new repository in GitHub. This guide will use a simple Node.js application to showcase how Jenkins Pipelines works, select your `.gitignore` accordingly and don't forget to initialize it with a `README`:

@@ -145,7 +145,7 @@ This example will use two Docker containers, one to serve `app.js` using Express
2. Create the `Dockerfile` and `package.json` for the `express-image`.
- {{< file "~/jenkins-guide/express-image/Dockerfile" >}}
+ {{< file "~/jenkins-guide/express-image/Dockerfile" >}}
FROM node:6-alpine
# Create server working directory
@@ -165,11 +165,11 @@ EXPOSE 9000
CMD ["npm", "start"]
{{< /file >}}
- This image runs by default `app.js` when launched. You can think of it as the "dockerized" version of the web application.
+ This image runs by default `app.js` when launched. You can think of it as the "dockerized" version of the web application.
3. The Dockerfile copies a `package.json` file from the root of your project directory into the new image; create this file and add the following content:
- {{< file "~/jenkins-guide/express-image/package.json" json >}}
+ {{< file "~/jenkins-guide/express-image/package.json" json >}}
{
"name": "express-image",
"version": "1.0.0",
@@ -192,7 +192,7 @@ CMD ["npm", "start"]
4. Create the `Dockerfile` for the `test-image`.
- {{< file "~/jenkins-guide/test-image/Dockerfile" conf >}}
+ {{< file "~/jenkins-guide/test-image/Dockerfile" conf >}}
FROM node:6-alpine
# Create Reports directory
@@ -214,11 +214,11 @@ EXPOSE 9000
CMD ["npm", "test"]
{{< /file >}}
- This image creates a Report folder (which will be used later) and installs dependencies from `package.json`. On start, it executes the Mocha tests.
+ This image creates a Report folder (which will be used later) and installs dependencies from `package.json`. On start, it executes the Mocha tests.
5. Add a `package.json` file for your testing image:
- {{< file "~/jenkins-guide/test-image/package.json" conf >}}
+ {{< file "~/jenkins-guide/test-image/package.json" conf >}}
{
"name": "test-image",
"version": "1.0.0",
@@ -246,7 +246,7 @@ CMD ["npm", "test"]
}
{{< /file >}}
- This JSON file contains all the necessary dependencies, including `mocha-junit-reporter` that will be needed by Jenkins for tests storage. Notice that the test script is configured with the `mochaFile` option that uses the image's report folder specified in the `Dockerfile`.
+ This JSON file contains all the necessary dependencies, including `mocha-junit-reporter` that will be needed by Jenkins for tests storage. Notice that the test script is configured with the `mochaFile` option that uses the image's report folder specified in the `Dockerfile`.
Your final project distribution will be similar to this:
@@ -345,7 +345,7 @@ Using the package maintained by the Jenkins project allows you to use a more rec
7. Use the Linode Manager to reboot your server to apply these changes.
- {{< caution >}}
+ {{< caution >}}
It's out of the scope of this guide to establish security parameters for Jenkins remote installation. However, be aware of these critical points that need to be addressed in a production environment:
- When you add `jenkins` user to the Docker group you are technically giving it `root` permissions.
@@ -442,7 +442,7 @@ Code blocks are delimited by curly brackets {} and no semicolons are used. Each
* Create Docker images, dockerize applications, pull images.
* Almost any action you can think of is possible through steps.
-All this actions can be executed inside your `agent` or you can also instruct Jenkins to remotely perform any of them via SSH. As you can see there are endless automation possibilities. In a simple scenario, only one pipeline executing its stages sequentially is enough to achieve the desired final state, but you can define pipelines to run in parallel if needed. For detailed information about Jenkins Declarative Pipeline Syntax, see the official [documentation.](https://jenkins.io/doc/book/pipeline/syntax/)
+All these actions can be executed inside your `agent` or you can also instruct Jenkins to remotely perform any of them via SSH. As you can see there are endless automation possibilities. In a simple scenario, only one pipeline executing its stages sequentially is enough to achieve the desired final state, but you can define pipelines to run in parallel if needed. For detailed information about Jenkins Declarative Pipeline Syntax, see the official [documentation.](https://jenkins.io/doc/book/pipeline/syntax/)
## Start Working with Pipelines
@@ -511,7 +511,7 @@ From here you can obtain valuable information regarding: 1) your build number, 2
### Automate Your Entire Process with Jenkins
-The `Jenkinsfile` template uses a very basic pipeline structure with only three stages. You can customize it to accommodate as many stages as needed. The final Pipeline structure is dictated by the project complexity and the development guidelines you must follow. Since you've already walked through the NodeJS example, you know how to design a pipeline that automates each stage. For the purpose of this guide, the resulting pipeline should:
+The `Jenkinsfile` template uses a very basic pipeline structure with only three stages. You can customize it to accommodate as many stages as needed. The final Pipeline structure is dictated by the project complexity and the development guidelines you must follow. Since you've already walked through the Node.js example, you know how to design a pipeline that automates each stage. For the purpose of this guide, the resulting pipeline should:
* Build Stage
- Create both images and abort any further testing or deployment if an error is encountered.
@@ -792,7 +792,7 @@ It's time to commit the complete Jenkinsfile to your Jenkins server and trigger
### Configure Automatic Triggers
-You can set Jenkins to scan your repository periodically. To do so just click again on the gear icon on the Pipeline view and then click the **Configure** link. There are many options available. Find **Scan Repository Triggers** and check the box **Periodically if not otherwise run**. You can chose any amount of time and for this example, one minute will be selected.
+You can set Jenkins to scan your repository periodically. To do so just click again on the gear icon on the Pipeline view and then click the **Configure** link. There are many options available. Find **Scan Repository Triggers** and check the box **Periodically if not otherwise run**. You can choose any amount of time and for this example, one minute will be selected.

@@ -831,7 +831,7 @@ Now, induce an error on the `BUILD` stage.
1. Edit your `express-image/package.json`. Change the express package name to `express-ERROR` to simulate a mistyping.
- {{< file-excerpt "~/jenkins-guide/express-image/package.json" json >}}
+ {{< file-excerpt "~/jenkins-guide/express-image/package.json" json >}}
"dependencies": {
"express-ERROR": "^4.13.3"
}
diff --git a/docs/development/ci/how-to-develop-and-deploy-your-applications-using-wercker.md b/docs/development/ci/how-to-develop-and-deploy-your-applications-using-wercker.md
index 90737c7ff34..57fccf2d5c4 100644
--- a/docs/development/ci/how-to-develop-and-deploy-your-applications-using-wercker.md
+++ b/docs/development/ci/how-to-develop-and-deploy-your-applications-using-wercker.md
@@ -310,7 +310,7 @@ As the name implies, Wercker applications correspond to each of your projects. B
#### jClocks Example
-Similar to the configuration files, you have several environmental variables to setup.
+Similar to the configuration files, you have several environmental variables to set up.
1. For the first example you need a SSH key pair for communication with your Linode. Click on the **Environment** tab:
@@ -368,7 +368,7 @@ Click the **Workflows** tab in the Wercker dashboard. The editor will show a sin

-5. Next you need to define the environmental variables, but this time you will do it inside each pipeline and not globally. On the Workflows tab, click the **deploy-docker** pipeline at the botton of the screen. Here you can create the variables. There are two variables from this example's `wercker.yml` that must be defined here: `DOCKER_USERNAME` and `DOCKER_PASSWORD`. Create them and mark the password as **protected**.
+5. Next you need to define the environmental variables, but this time you will do it inside each pipeline and not globally. On the Workflows tab, click the **deploy-docker** pipeline at the bottom of the screen. Here you can create the variables. There are two variables from this example's `wercker.yml` that must be defined here: `DOCKER_USERNAME` and `DOCKER_PASSWORD`. Create them and mark the password as **protected**.
6. Select the **deploy-linode** pipeline and create an SSH key pair, similar to the last example. Remember to copy the public key to your remote server.
@@ -408,7 +408,7 @@ The final example demonstrates the Wercker CLI.

- The output should be similar to the logs you saw on the Wercker dashboard. The difference is that you can check each step locally and detect any errors early in the process. The Wercler CLI replicates the SaaS behavior: it downloads specified images, builds, tests and shows errors. Since the CLI is a development tool intended to facilitate local testing, you will not be able to deploy the end result remotely.
+ The output should be similar to the logs you saw on the Wercker dashboard. The difference is that you can check each step locally and detect any errors early in the process. The Wercker CLI replicates the SaaS behavior: it downloads specified images, builds, tests and shows errors. Since the CLI is a development tool intended to facilitate local testing, you will not be able to deploy the end result remotely.
3. Build the application with Go:
diff --git a/docs/development/frameworks/apache-tomcat-on-debian-6-squeeze.md b/docs/development/frameworks/apache-tomcat-on-debian-6-squeeze.md
index 40d99f71b9b..3a0f4998ab0 100644
--- a/docs/development/frameworks/apache-tomcat-on-debian-6-squeeze.md
+++ b/docs/development/frameworks/apache-tomcat-on-debian-6-squeeze.md
@@ -87,7 +87,7 @@ Issue the following command to restart the Tomcat server, which will allow this
/etc/init.d/tomcat6 restart
-Congratulations! You know have a working Apache Tomcat installation.
+Congratulations! You now have a working Apache Tomcat installation.
# More Information
diff --git a/docs/development/frameworks/webpy-on-debian-5-lenny.md b/docs/development/frameworks/webpy-on-debian-5-lenny.md
index be439f0fec0..18a9689afb5 100644
--- a/docs/development/frameworks/webpy-on-debian-5-lenny.md
+++ b/docs/development/frameworks/webpy-on-debian-5-lenny.md
@@ -197,7 +197,7 @@ application = app.wsgifunc()
This program connects to the PostgreSQL database "webpy" and looks in the table "notes" for a note that matches the text "a note." If the note is found, the program returns the text "a note is found"; otherwise, the page will return "no notes are found." Make sure there is a role, or user, in your PostgreSQL database called "webpy" with the credentials specified on the `db` line of this example.
-At the PosgreSQL prompt, issue the following commands to the PostgreSQL shell statement to create the required database and tables. The "webpy" user for PostgreSQL must already exist:
+At the PostgreSQL prompt, issue the following commands to the PostgreSQL shell statement to create the required database and tables. The "webpy" user for PostgreSQL must already exist:
CREATE DATABASE webpy;
GRANT ALL ON notes TO webpy;
diff --git a/docs/development/frameworks/webpy-on-debian-6-squeeze.md b/docs/development/frameworks/webpy-on-debian-6-squeeze.md
index f928564db12..618fc13a773 100644
--- a/docs/development/frameworks/webpy-on-debian-6-squeeze.md
+++ b/docs/development/frameworks/webpy-on-debian-6-squeeze.md
@@ -199,7 +199,7 @@ application = app.wsgifunc()
This program connects to the PostgreSQL database "webpy" and looks in the table "notes" for a note that matches the text "a note." If the note is found, the program returns the text "a note is found"; otherwise, the page will return "no notes are found." Make sure there is a role, or user, in your PostgreSQL database called "webpy" with the credentials specified on the `db` line of this example.
-At the PosgreSQL prompt, issue the following commands to the PostgreSQL shell statement to create the required database and tables. The "webpy" user for PostgreSQL must already exist:
+At the PostgreSQL prompt, issue the following commands to the PostgreSQL shell statement to create the required database and tables. The "webpy" user for PostgreSQL must already exist:
CREATE DATABASE webpy;
GRANT ALL ON notes TO webpy;
diff --git a/docs/development/frameworks/webpy-on-ubuntu-10-04-lucid.md b/docs/development/frameworks/webpy-on-ubuntu-10-04-lucid.md
index feab3c41775..cc3a2abac99 100644
--- a/docs/development/frameworks/webpy-on-ubuntu-10-04-lucid.md
+++ b/docs/development/frameworks/webpy-on-ubuntu-10-04-lucid.md
@@ -186,7 +186,7 @@ application = app.wsgifunc()
This program connects to the PostgreSQL database "webpy" and looks in the table "notes" for a note that matches the text "a note." If the note is found, the program returns the text "a note is found"; otherwise, the page will return "no notes are found." Make sure there is a role or user in your PostgreSQL database called "webpy" with the credentials specified on the `db` line of this example.
-At the PosgreSQL prompt, issue the following commands to the PostgreSQL shell statement to create the required database and tables. The "webpy" user for PostgreSQL must already exist:
+At the PostgreSQL prompt, issue the following commands to the PostgreSQL shell statement to create the required database and tables. The "webpy" user for PostgreSQL must already exist:
CREATE DATABASE webpy;
GRANT ALL ON notes TO webpy;
diff --git a/docs/development/frameworks/webpy-on-ubuntu-10-10-maverick.md b/docs/development/frameworks/webpy-on-ubuntu-10-10-maverick.md
index c2b6189856c..8c9eec879e5 100644
--- a/docs/development/frameworks/webpy-on-ubuntu-10-10-maverick.md
+++ b/docs/development/frameworks/webpy-on-ubuntu-10-10-maverick.md
@@ -175,7 +175,7 @@ application = app.wsgifunc()
This program connects to the PostgreSQL database "webpy" and looks in the table "notes" for a note that matches the text "a note". If the note is found, the program returns the text "a note is found"; otherwise, the page will return "no notes are found". Make sure there is a role, or user, in your PostgreSQL database called "webpy" with the credentials specified on the `db` line of this example.
-At the PosgreSQL prompt, issue the following commands to the PostgreSQL shell statement to create the required database and tables. The "webpy" user for PostgreSQL must already exist:
+At the PostgreSQL prompt, issue the following commands to the PostgreSQL shell statement to create the required database and tables. The "webpy" user for PostgreSQL must already exist:
CREATE DATABASE webpy;
GRANT ALL ON notes TO webpy;
diff --git a/docs/development/frameworks/webpy-on-ubuntu-12-04-precise-pangolin.md b/docs/development/frameworks/webpy-on-ubuntu-12-04-precise-pangolin.md
index e821ab22c0c..5e3147a1288 100644
--- a/docs/development/frameworks/webpy-on-ubuntu-12-04-precise-pangolin.md
+++ b/docs/development/frameworks/webpy-on-ubuntu-12-04-precise-pangolin.md
@@ -198,7 +198,7 @@ This program connects to the PostgreSQL database "webpy" and looks in the table
For more information about PostgreSQL, see our [PostgreSQL guides](/docs/databases/postgresql).
{{< /note >}}
-At the PosgreSQL prompt, issue the following commands to the PostgreSQL shell statement to create the required database and tables. The "webpy" user for PostgreSQL must already exist:
+At the PostgreSQL prompt, issue the following commands to the PostgreSQL shell statement to create the required database and tables. The "webpy" user for PostgreSQL must already exist:
CREATE DATABASE webpy;
GRANT ALL ON notes TO webpy;
diff --git a/docs/development/frameworks/yesod-nginx-mysql-on-debian-7-wheezy.md b/docs/development/frameworks/yesod-nginx-mysql-on-debian-7-wheezy.md
index 6087eba53bf..482c16d076b 100644
--- a/docs/development/frameworks/yesod-nginx-mysql-on-debian-7-wheezy.md
+++ b/docs/development/frameworks/yesod-nginx-mysql-on-debian-7-wheezy.md
@@ -24,12 +24,12 @@ external_resources:
Yesod is a web framework based on the purely functional programming language Haskell. It is designed for productive development of type-safe, RESTful, and high performance web applications. This guide describes the required process for deploying Yesod and Nginx web server, MySQL database on Debian 7 (Wheezy).
{{< note >}}
-The steps required in this guide require root privileges. Be sure to run the steps below as root or with the sudo prefix. For more information on privileges see our [Users and Groups](/docs/tools-reference/linux-users-and-groups) guide.
+The steps required in this guide require root privileges. Be sure to run the steps below as root or with the sudo prefix. For more information on privileges see our [Users and Groups](/docs/tools-reference/linux-users-and-groups/) guide.
{{< /note >}}
## Prerequisites
-Before you begin installing and configuring the components described below, please make sure you've followed our instructions in the [Getting Started](/docs/getting-started) guide for setting your hostname. Here's how to check.
+Before you begin installing and configuring the components described below, please make sure you've followed our instructions in the [Getting Started](/docs/getting-started/) guide for setting your hostname. Here's how to check.
1. Enter the following commands to view the hostname:
@@ -196,7 +196,7 @@ We don't need to modify this configuration file, it's acceptable as is. So you o
If your Linode has a firewall, the port ``3000`` is probably inaccessible from outside, so you will not be able to see your site at http://www.yoursite.com:3000/. This port is only for testing or developing, so don't open it on your firewall. Instead, you can set up an SSH tunnel on your Linode, and view your site at http://localhost:3000/ via this tunnel. Please check [Setting up an SSH Tunnel with Your Linode for Safe Browsing](/docs/networking/ssh/setting-up-an-ssh-tunnel-with-your-linode-for-safe-browsing/) for more details.
-You may have noticed that we haven't configure Nginx yet. In fact, Yesod applications contain an http server called Warp, which is written in Haskell, and has a very fast run-time. Without http servers like Apache or Nginx installed, you can run Yesod applications as standalones. This feature is similar to the Express framework on Node.js.
+You may have noticed that we haven't configure Nginx yet. In fact, Yesod applications contain an http server called Warp, which is written in Haskell, and has a very fast run-time. Without http servers like Apache or Nginx installed, you can run standalone Yesod applications. This feature is similar to the Express framework on Node.js.
The initial setup of your first Yesod site has been finished. To start more advanced development of your Yesod site, please read [The Yesod Book](http://www.yesodweb.com/book/) for more details.
@@ -372,4 +372,3 @@ Link the above file into ``/etc/nginx/sites-enabled``, and restart ``nginx``:
You can check it at *http://www.yoursite.com/* now.
The installation and configuration of Yesod working with Nginx and MySQL are finished.
-
diff --git a/docs/development/go/_index.md b/docs/development/go/_index.md
new file mode 100644
index 00000000000..99249bc4aa6
--- /dev/null
+++ b/docs/development/go/_index.md
@@ -0,0 +1,12 @@
+---
+author:
+ name: Linode
+ email: docs@linode.com
+description: ''
+keywords: ["development", "go", "golang"]
+license: '[CC BY-ND 4.0](https://creativecommons.org/licenses/by-nd/4.0)'
+aliases: []
+published: 2018-01-29
+title: Go
+show_in_lists: true
+---
diff --git a/docs/development/go/install-go-on-ubuntu.md b/docs/development/go/install-go-on-ubuntu.md
new file mode 100644
index 00000000000..01b2bb2ce72
--- /dev/null
+++ b/docs/development/go/install-go-on-ubuntu.md
@@ -0,0 +1,89 @@
+---
+author:
+ name: Linode
+ email: docs@linode.com
+description: 'This guide shows how to install the Go programming language on Ubuntu.'
+og_description: 'Go is a statically typed, compiled programming language developed by Google. This guide will show you how to install Go on Ubuntu.'
+keywords: ["Go", "Go Programming", "Golang", "Ubuntu"]
+license: '[CC BY-ND 4.0](https://creativecommons.org/licenses/by-nd/4.0)'
+published: 2018-01-29
+modified: 2018-01-29
+modified_by:
+ name: Linode
+title: 'How to Install Go on Ubuntu'
+---
+
+## What is Go?
+
+[Go](https://golang.org/) is a compiled, statically typed programming language developed by Google. Many modern applications, including Docker, Kubernetes, and Caddy, are written in Go.
+
+## Install Go
+
+1. Use `curl` or `wget` to download the current binary for Go from the official [download page](https://golang.org/dl/). As of this writing, the current version is 1.9.3. Check the download page for updates, and replace `1.9.3` with the most recent stable version if necessary.
+
+ curl -O https://storage.googleapis.com/golang/go1.9.3.linux-amd64.tar.gz
+
+2. Verify the `.tar` file using `sha256sum`:
+
+ sha256sum go1.9.3.linux-amd64.tar.gz
+
+ {{< output >}}
+a4da5f4c07dfda8194c4621611aeb7ceaab98af0b38bfb29e1be2ebb04c3556c go1.9.3.linux-amd64.tar.gz
+{{< /output >}}
+
+3. Extract the tarball:
+
+ tar -xvf go1.9.3.linux-amd64.tar.gz
+
+4. Adjust the permissions and move the `go` directory to `/usr/local`:
+
+ sudo chown -R root:root ./go
+ sudo mv go /usr/local
+
+## Adjust the Path Variable
+
+1. Using a text editor, open the `~/.profile` file and add the following two lines to the bottom of the file:
+
+ {{< file-excerpt "~/.profile" conf >}}
+export GOPATH=$HOME/go
+export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin
+{{< /file-excerpt >}}
+
+2. Save the file, and load the commands into the current shell instance:
+
+ source ~/.profile
+
+## Test the Installation
+
+According to [the official documentation](https://golang.org/doc/install#testing), the following steps are the recommended way to test the success of the installation:
+
+1. In your home directory create a folder named `go`, this will be your workspace:
+
+ mkdir go
+
+2. Within that directory create `/src/hello` and within that directory copy and paste the contents of the file below:
+
+ mkdir -p go/src/hello && cd go/src/hello
+ touch hello.go
+
+ {{< file "hello.go" go >}}
+package main
+
+import "fmt"
+
+func main() {
+ fmt.Printf("hello, world\n")
+}
+{{ file >}}
+
+3. Build the `hello.go` file:
+
+ go build
+
+4. Run the script:
+
+ ./hello
+
+ {{< output >}}
+hello, world
+{{< /output >}}
diff --git a/docs/development/iot/_index.md b/docs/development/iot/_index.md
new file mode 100644
index 00000000000..e3a356c954a
--- /dev/null
+++ b/docs/development/iot/_index.md
@@ -0,0 +1,5 @@
+---
+title: Internet of Things
+show_in_lists: true
+---
+
diff --git a/docs/development/iot/install-thingsboard-iot-dashboard.md b/docs/development/iot/install-thingsboard-iot-dashboard.md
new file mode 100644
index 00000000000..dd62aa5c40a
--- /dev/null
+++ b/docs/development/iot/install-thingsboard-iot-dashboard.md
@@ -0,0 +1,297 @@
+---
+author:
+ name: Jared Kobos
+ email: docs@linode.com
+description: 'This guide will show how to track and visualize data from an Internet of Things device using ThingsBoard.'
+og_description: 'This guide shows how to install the ThingsBoard open source dashboard for Internet of Things devices. A Raspberry Pi is used to demonstrate sending data to the cloud dashboard.'
+keywords: ["iot", "raspberry pi", "internet of things", "dashboard"]
+license: '[CC BY-ND 4.0](https://creativecommons.org/licenses/by-nd/4.0)'
+published: 2018-01-30
+modified: 2018-01-30
+modified_by:
+ name: Linode
+title: 'View IoT Data with ThingsBoard'
+external_resources:
+ - '[Getting Started – ThingsBoard](https://thingsboard.io/docs/getting-started-guides/helloworld)'
+ - '[ThingsBoard Github Repo](https://github.com/thingsboard/thingsboard)'
+---
+
+
+
+## What is ThingsBoard?
+
+[ThingsBoard](https://thingsboard.io/) is an open source platform for collecting and visualizing data from Internet of Things devices. Data from any number of devices can be sent to a cloud server where it can be viewed or shared through a customizable dashboard.
+
+This guide will show how to install ThingsBoard on a Linode and use a Raspberry Pi to send simple telemetry data to a cloud dashboard.
+
+{{< note >}}
+This guide will use a Raspberry Pi 3 with a [Sense HAT](https://www.raspberrypi.org/products/sense-hat/). You can substitute any device capable of sending telemetry data, or use `curl` to experiment with ThingsBoard without using any external devices.
+{{< /note >}}
+
+## Install ThingsBoard
+
+ThingsBoard runs on Java 8, and the Oracle JDK is recommended.
+
+{{< content "install-java-jdk.md" >}}
+
+### Set Up PostgreSQL
+
+1. Install PostgreSQL:
+
+ sudo apt install postgresql postgresql-contrib
+
+2. Create a database and database user for ThingsBoard:
+
+ sudo -u postgres createdb thingsboard
+ sudo -u postgres createuser thingsboard
+
+3. Set a password for the `thingsboard` user and grant access to the database:
+
+ sudo -u postgres psql thingsboard
+ ALTER USER thingsboard WITH PASSWORD 'thingsboard';
+ GRANT ALL PRIVILEGES ON DATABASE thingsboard TO thingsboard;
+ \q
+
+### Install ThingsBoard
+
+1. Download the installation package. Check the [releases](https://github.com/thingsboard/thingsboard/releases) page and replace the version numbers in the following command with the version tagged **Latest release**:
+
+ wget https://github.com/thingsboard/thingsboard/releases/download/v1.3.1/thingsboard-1.3.1.deb
+
+2. Install ThingsBoard:
+
+ sudo dpkg -i thingsboard-1.3.1.deb
+
+
+3. Open `/etc/thingsboard/conf/thingsboard.yml` in a text editor and comment out the `HSQLDB DAO Configuration` section:
+
+ {{< file-excerpt "/etc/thingsboard/conf/thingsboard.yml" yaml >}}
+# HSQLDB DAO Configuration
+#spring:
+# data:
+# jpa:
+# repositories:
+# enabled: "true"
+# jpa:
+# hibernate:
+# ddl-auto: "validate"
+# database-platform: "org.hibernate.dialect.HSQLDialect"
+# datasource:
+# driverClassName: "${SPRING_DRIVER_CLASS_NAME:org.hsqldb.jdbc.JDBCDriver}"
+# url: "${SPRING_DATASOURCE_URL:jdbc:hsqldb:file:${SQL_DATA_FOLDER:/tmp}/thingsboardDb;sql.enforce_size=false}"
+# username: "${SPRING_DATASOURCE_USERNAME:sa}"
+# password: "${SPRING_DATASOURCE_PASSWORD:}"
+{{< /file-excerpt >}}
+
+4. In the same section, uncomment the PostgreSQL configuration block. Replace `thingsboard` in the username and password fields with the username and password of your `thingsboard` user:
+
+ {{< file-excerpt "/etc/thingsboard/conf/thingsboard.yml" yaml >}}
+# PostgreSQL DAO Configuration
+spring:
+ data:
+ jpa:
+ repositories:
+ enabled: "true"
+ jpa:
+ hibernate:
+ ddl-auto: "validate"
+ database-platform: "org.hibernate.dialect.PostgreSQLDialect"
+ datasource:
+ driverClassName: "${SPRING_DRIVER_CLASS_NAME:org.postgresql.Driver}"
+ url: "${SPRING_DATASOURCE_URL:jdbc:postgresql://localhost:5432/thingsboard}"
+ username: "${SPRING_DATASOURCE_USERNAME:thingsboard}"
+ password: "${SPRING_DATASOURCE_PASSWORD:thingsboard}"
+{{< /file-excerpt >}}
+
+5. Run this installation script:
+
+ sudo /usr/share/thingsboard/bin/install/install.sh --loadDemo
+
+6. Start the ThingsBoard service:
+
+ sudo systemctl enable thingsboard
+ sudo systemctl start thingsboard
+
+## NGINX Reverse Proxy
+
+ThingsBoard listens on `localhost:8080`, by default. For security purposes, it's better to serve the dashboard through a reverse proxy. This guide will use NGINX, but any webserver can be used.
+
+1. Install NGINX:
+
+ sudo apt install nginx
+
+2. Create `/etc/nginx/conf.d/thingsboard.conf` with a text editor and edit it to match the example below. Replace `example.com` with the public IP address or FQDN of your Linode.
+
+ {{< file "/etc/nginx/conf.d/thingsboard.conf" nginx >}}
+server {
+ listen 80;
+ listen [::]:80;
+
+ server_name example.com;
+
+ location / {
+ # try_files $uri $uri/ =404;
+ proxy_pass http://localhost:8080/;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection "upgrade";
+ proxy_set_header Host $host;
+ }
+}
+{{< /file >}}
+
+3. Restart NGINX:
+
+ sudo systemctl restart nginx
+
+## Set Up ThingsBoard Device
+
+1. Navigate to your Linode's IP address with a web browser. You should see the ThingsBoard login page:
+
+ 
+
+ The demo account login `tenant@thingsboard.org` and the password is `tenant`. You should change this to a more secure password after you have signed in.
+
+2. From the main menu, click on the **Devices** icon, then click the **+** icon in the lower right to add a new device.
+
+3. Choose a name for your device. Set the **Device type** to **PI**.
+
+3. After the device is added, click on its icon in the **Devices** menu. Click on **COPY ACCESS TOKEN** to copy the API key for this device (used below).
+
+## Configure Raspberry Pi
+
+{{< note >}}
+The following steps assume that you have terminal access to a Raspberry Pi, and that Sense HAT and its libraries are already configured. For more information on getting started with Sense HAT, see the Raspberry Pi [official documentation](https://projects.raspberrypi.org/en/projects/getting-started-with-the-sense-hat). If you would prefer to use `curl` to send mock data to ThingsBoard, you can skip this section.
+{{< /note >}}
+
+### Basic Python Script
+
+1. Using a text editor, create `thingsboard.py` in a directory of your choice. Add the following content, using the API key copied to your clipboard in the previous section:
+
+ {{< file "thingsboard.py" python >}}
+#!/usr/bin/env python
+
+import json
+import requests
+from sense_hat import SenseHat
+from time import sleep
+
+# Constants
+
+API_KEY = "
[Node.js](http://nodejs.org) and [NGINX](http://nginx.com/) are now working together. Route requests to one server or the other depending on your needs. Node.js offers a large [API](http://nodejs.org/api) with many tools. With Node.js, a developer can stay within the JavaScript language while working client-side or server-side.
diff --git a/docs/development/nodejs/how-to-install-nodejs.md b/docs/development/nodejs/how-to-install-nodejs.md
index b563b74030e..170ed5b9238 100644
--- a/docs/development/nodejs/how-to-install-nodejs.md
+++ b/docs/development/nodejs/how-to-install-nodejs.md
@@ -38,7 +38,7 @@ Your distro's repos will likely contain an LTS release of Node.js. This is a goo
[NPM](#node-package-manager-npm) (Node Package Manager) is included with installations of Node.js by other methods, but not here; `npm` is a separate package from `nodejs` and must be installed separately.
{{< note >}}
-Node.js from the distro's repositories in Debian 7 or 8, or Ubuntu 12.04 or 14.04 confict with the [Amateur Packet Radio Node program](https://packages.debian.org/jessie/node). In this scenario, calling Node.js requires that you use the command `nodejs -$option` instead of the standard `node -$option`. One workaround is to install the package `nodejs-legacy`, which maintains a symlink from `/usr/bin/node` to `/usr/bin/nodejs` so the normal `node` commands can be used.
+Node.js from the distro's repositories in Debian 7 or 8, or Ubuntu 12.04 or 14.04 conflict with the [Amateur Packet Radio Node program](https://packages.debian.org/jessie/node). In this scenario, calling Node.js requires that you use the command `nodejs -$option` instead of the standard `node -$option`. One workaround is to install the package `nodejs-legacy`, which maintains a symlink from `/usr/bin/node` to `/usr/bin/nodejs` so the normal `node` commands can be used.
{{< /note >}}
@@ -60,8 +60,8 @@ Compiling from source code is the most advanced installation method, though it c
## Node Package Manager (NPM)
-A typical installation of Node.js includes the [Node Package Manager](https://github.com/npm/npm) (NPM). However, an exception is any Linux-distro-supplied version of Nodejs which would need the package `npm` installed. NPM is a package manager for Nodejs packages in the NPM repository. You can find extensive NPM documentation at [npmjs.com](https://docs.npmjs.com/).
+A typical installation of Node.js includes the [Node Package Manager](https://github.com/npm/npm) (NPM). However, an exception is any Linux-distro-supplied version of Node.js which would need the package `npm` installed. NPM is a package manager for Node.js packages in the NPM repository. You can find extensive NPM documentation at [npmjs.com](https://docs.npmjs.com/).
## Making a Quick Decision (the tl:dr)
-Still not sure which installation method to use? Then [NVM](#node-version-manager) will probably be your best choice to start with. NVM faciliates easy installation and maintenance of Node.js and NPM, presents no naming issues with other software, and easily manages multple installations of Node.js that can test your application before you push a Node.js update into your production environment.
+Still not sure which installation method to use? Then [NVM](#node-version-manager) will probably be your best choice to start with. NVM facilitates easy installation and maintenance of Node.js and NPM, presents no naming issues with other software, and easily manages multiple installations of Node.js that can test your application before you push a Node.js update into your production environment.
diff --git a/docs/development/python/task-queue-celery-rabbitmq.md b/docs/development/python/task-queue-celery-rabbitmq.md
index 6e0b738e382..a1db1691fda 100644
--- a/docs/development/python/task-queue-celery-rabbitmq.md
+++ b/docs/development/python/task-queue-celery-rabbitmq.md
@@ -19,7 +19,7 @@ external_resources:
Celery is a Python Task-Queue system that handle distribution of tasks on workers across threads or network nodes. It makes asynchronous task management easy. Your application just need to push messages to a broker, like RabbitMQ, and Celery workers will pop them and schedule task execution.
-Celery can be used in multiple configuration. Most frequent uses are horizontal application scalling by running ressource intensive tasks on Celery workers distributed accross a cluster, or to manage long asynchronous tasks in a web app, like thumbnail generation when a user post an image. This guide will take you through installation and usage of Celery with an example application that delegate file downloads to Celery workers, using Python 3, Celery 4.1.0, and RabbitMQ.
+Celery can be used in multiple configuration. Most frequent uses are horizontal application scaling by running resource intensive tasks on Celery workers distributed across a cluster, or to manage long asynchronous tasks in a web app, like thumbnail generation when a user post an image. This guide will take you through installation and usage of Celery with an example application that delegate file downloads to Celery workers, using Python 3, Celery 4.1.0, and RabbitMQ.
## Before You Begin
@@ -41,7 +41,7 @@ This guide is written for a non-root user. Commands that require elevated privil
## Install Celery
-Celery is available from PyPI. The easiest and recommand way is to install it with `pip`. You can go for a system wide installation for simplicity, or use a virtual environment if other Python applications runs on your system. This last method installs the libraries on a per project basis and prevent version conflicts with other applications.
+Celery is available from PyPI. The easiest and recommended way is to install it with `pip`. You can go for a system wide installation for simplicity, or use a virtual environment if other Python applications runs on your system. This last method installs the libraries on a per project basis and prevent version conflicts with other applications.
### System Wide Installation
@@ -264,7 +264,7 @@ CELERY_BIN=/home/celery/miniconda3/bin/celery
r2 = list.delay()
r2.get(timeout=1)
- Depending on how quickly you enter the commands, the worker for `list` task may finish before the worker for `download` task and you may not see the linode logo in the list. Have a look at log files, like in step 7, and you will see which worker handled each task.
+ Depending on how quickly you enter the commands, the worker for `list` task may finish before the worker for `download` task and you may not see the Linode logo in the list. Have a look at log files, like in step 7, and you will see which worker handled each task.
## Monitor your Celery Cluster
@@ -293,7 +293,7 @@ celery@celery: OK
- empty -
{{< /output >}}
-3. Use the **inspect stats** command to get statistics about the workers. It gives lot of informations, like worker ressource usage under `rusage` key, or the total tasks completed under `total` key.
+3. Use the **inspect stats** command to get statistics about the workers. It gives lot of information, like worker resource usage under `rusage` key, or the total tasks completed under `total` key.
celery -A downloaderApp inspect stats
diff --git a/docs/development/python/use-scrapy-to-extract-data-from-html-tags.md b/docs/development/python/use-scrapy-to-extract-data-from-html-tags.md
index a244ff6286f..5947310b750 100644
--- a/docs/development/python/use-scrapy-to-extract-data-from-html-tags.md
+++ b/docs/development/python/use-scrapy-to-extract-data-from-html-tags.md
@@ -201,7 +201,7 @@ class LinkCheckerSpider(scrapy.Spider):
### Add Request Meta Information
-The Spider will traverse links in queue recursively. When parsing a downloaded page, it does not have any information about the previously parsed pages such as which page was linking the the new one. To pass more information to the `parse` method, Scrapy provides a `Request.meta()` method that attaches some key/value pairs to the request. They are available in the response object in the `parse()` method.
+The Spider will traverse links in queue recursively. When parsing a downloaded page, it does not have any information about the previously parsed pages such as which page was linking the new one. To pass more information to the `parse` method, Scrapy provides a `Request.meta()` method that attaches some key/value pairs to the request. They are available in the response object in the `parse()` method.
The meta information is used for two purposes:
@@ -525,4 +525,4 @@ Scrapy provides a telnet interface on port 6023 to monitor a running spider. The
6. Stop your scraping;
- engine.stop()
\ No newline at end of file
+ engine.stop()
diff --git a/docs/development/r/_index.md b/docs/development/r/_index.md
new file mode 100644
index 00000000000..8e899880d17
--- /dev/null
+++ b/docs/development/r/_index.md
@@ -0,0 +1,12 @@
+---
+author:
+ name: Linode
+ email: docs@linode.com
+description: ''
+keywords: ["development", "r", "data science", "statistics"]
+license: '[CC BY-ND 4.0](https://creativecommons.org/licenses/by-nd/4.0)'
+aliases: []
+published: 2018-01-29
+title: R
+show_in_lists: true
+---
diff --git a/docs/development/r/how-to-deploy-rstudio-server-using-an-nginx-reverse-proxy.md b/docs/development/r/how-to-deploy-rstudio-server-using-an-nginx-reverse-proxy.md
new file mode 100644
index 00000000000..688f3bda282
--- /dev/null
+++ b/docs/development/r/how-to-deploy-rstudio-server-using-an-nginx-reverse-proxy.md
@@ -0,0 +1,135 @@
+---
+author:
+ name: Sam Foo
+ email: docs@linode.com
+description: 'RStudio Server is a the web based version of RStudio for a desktop environment. Gain access to your R development environment from anywhere in the world.'
+og_description: 'RStudio Server is a the web based version of RStudio for a desktop environment. Gain access to your R development environment from anywhere in the world.'
+keywords: ['R', 'statistic', 'R Foundation', 'data visualization']
+license: '[CC BY-ND 4.0](https://creativecommons.org/licenses/by-nd/4.0)'
+modified: 2018-01-29
+modified_by:
+ name: Linode
+published: 2018-01-29
+title: 'How to Deploy RStudio Server Using an NGINX Reverse Proxy'
+---
+
+## What is RStudio Server?
+
+[RStudio](https://www.rstudio.com) is an integrated development environment (IDE) for [R](https://www.r-project.org/), an open source statistical computing language. It includes debugging and plotting tools that make it easy to write, debug, and run R scripts. The IDE is available in both desktop and server configurations. By hosting the server configuration (RStudio Server) on a Linode, you can access the IDE from any computer with internet access. Since data analysis often uses large datasets and can be computationally expensive, keeping your data and running R scripts from a remote server can be more efficient than working from your personal computer. In addition, a professional edition is available that allows project sharing and simultaneous code editing for multiple users.
+
+## Before You Begin
+
+This guide assumes an R installation version of R 3.0.1+ and will show how to install RStudio Server 1.1. See our guide on [installing R on Ubuntu and Debian](/docs/development/r/how-to-install-r-on-ubuntu-and-debian) for steps on installing the latest version of R.
+
+The steps in this guide are for Ubuntu 16.04 and should be adapted to your specific distribution installation.
+
+## Install RStudio Server
+
+1. Download RStudio 1.1:
+
+ wget https://download2.rstudio.org/rstudio-server-1.1.414-amd64.deb
+
+2. Install and use the gDebi package installer for the downloaded Debian package file:
+
+ sudo apt install gdebi
+ sudo gdebi rstudio-server-1.1.414-amd64.deb
+
+ If successful, the output should show `rstudio-server.service` as active.
+
+ {{< output >}}
+Created symlink from /etc/systemd/system/multi-user.target.wants/rstudio-server.service to /etc/systemd/system/rstudio-server.service.
+● rstudio-server.service - RStudio Server
+ Loaded: loaded (/etc/systemd/system/rstudio-server.service; enabled; vendor preset: enabled)
+ Active: active (running) since Tue 2018-01-23 21:18:44 UTC; 1s ago
+ Process: 13676 ExecStart=/usr/lib/rstudio-server/bin/rserver (code=exited, status=0/SUCCESS)
+ Main PID: 13677 (rserver)
+ CGroup: /system.slice/rstudio-server.service
+ └─13677 /usr/lib/rstudio-server/bin/rserver
+
+Jan 23 21:18:44 localhost systemd[1]: Starting RStudio Server...
+Jan 23 21:18:44 localhost systemd[1]: Started RStudio Server.
+{{< /output >}}
+
+3. In a browser, navigate to your Linode's public IP address on port 8787 (i.e. `public-ip:8787`). Use your Unix user's username and password to log in when prompted:
+
+ 
+
+4. Because you will be accessing RStudio through a reverse proxy, set RStudio Server to listen on localhost instead of a public IP. Open `rserver.conf` in a text editor and add the following content:
+
+ {{< file-excerpt "/etc/rstudio/rserver.conf" >}}
+# Server Configuration File
+www-address=127.0.0.1
+{{< /file-excerpt >}}
+
+5. You can also set the configuration for each individual session. For example, the default session timeout is two hours. Change this to 30 minutes to conserve server resources:
+
+ {{< file-excerpt "/etc/rstudio/rsession.conf" >}}
+# R Session Configuration File
+session-timeout-minutes=30
+{{< /file-excerpt >}}
+
+6. Check your configuration:
+
+ sudo rstudio-server verify-installation
+
+7. If there are no issues, restart RStudio server to apply the changes:
+
+ sudo rstudio-server restart
+
+## Set Up the Reverse Proxy
+
+Running Rstudio server behind a reverse proxy offers benefits such as being able to pick the URL endpoints and load balancing.
+
+1. Install NGINX:
+
+ sudo apt install nginx
+
+2. Open `nginx.conf` in a text editor and add the following configuration:
+
+ {{< file-excerpt "/etc/nginx/nginx.conf" nginx >}}
+http {
+ # Basic Settings
+ # ...
+
+ map $http_upgrade $connection_upgrade {
+ default upgrade;
+ '' close;
+ }
+}
+{{< /file-excerpt >}}
+
+3. Create an NGINX configuration in `/etc/nginx/conf.d/` called `rstudio.conf` with the following configuration. Replace `example.com` with the public IP address or FDQN of your Linode:
+
+ {{< file-excerpt "/etc/nginx/conf.d/rstudio.conf" nginx >}}
+server {
+ listen 80;
+ listen [::]:80;
+
+ server_name example.com;
+
+ location / {
+ proxy_pass http://localhost:8787/;
+ proxy_redirect http://localhost:8787/ $scheme://$host/;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection $connection_upgrade;
+ proxy_read_timeout 20d;
+ }
+}
+{{< /file-excerpt >}}
+
+4. Check the NGINX configuration:
+
+ sudo nginx -t
+
+5. If there are no errors, restart NGINX to apply the changes:
+
+ sudo systemctl restart nginx
+
+6. In a browser, navigate to the public IP or FDQN of your Linode. After logging in, the RStudio IDE should be available from your browser:
+
+ 
+
+{{< note >}}
+If Rstudio does not load in the browser, you may need to clear your browser cache.
+{{< /note >}}
diff --git a/docs/development/r/how-to-install-r-on-ubuntu-and-debian.md b/docs/development/r/how-to-install-r-on-ubuntu-and-debian.md
new file mode 100644
index 00000000000..aa610d06ad3
--- /dev/null
+++ b/docs/development/r/how-to-install-r-on-ubuntu-and-debian.md
@@ -0,0 +1,153 @@
+---
+author:
+ name: Sam Foo
+ email: docs@linode.com
+description: 'R is a programming language commonly used for statistical analysis and data visualization. Learn how to install the base R package on your Linode.'
+og_description: 'R is a programming language commonly used for statistical analysis and data visualization. Learn how to install the base R package on your Linode.'
+keywords: ['R', 'statistics', 'R Foundation', 'data visualization']
+license: '[CC BY-ND 4.0](https://creativecommons.org/licenses/by-nd/4.0)'
+modified: 2018-01-29
+modified_by:
+ name: Linode
+published: 2018-01-29
+title: 'How to install R on Ubuntu and Debian'
+---
+
+
+## What is R?
+
+[R is a programming language](https://www.r-project.org/about.html) used for statistical analysis in addition to data visualization. The language is highly extensible through the [Comprehensive R Archive Network(CRAN)](https://cran.r-project.org/), which hosts more than 10,000 R packages for producing publication quality figures, specialized computational tools, and more.
+
+Although R can be installed through the default Debian or Ubuntu repository, the method outlined in this guide will ensure that you install the most up-to-date stable release.
+
+## Install R on Ubuntu 16.04 and Debian 9
+
+1. Open `/etc/apt/sources.list` and add the following line to the end of the file:
+
+ Ubuntu:
+
+ deb http://cran.rstudio.com/bin/linux/ubuntu xenial/
+
+ Debian:
+
+ deb http://cran.rstudio.com/bin/linux/debian stretch-cran34/
+
+2. Add the key ID for the CRAN network:
+
+ [Ubuntu GPG key](https://cran.rstudio.com/bin/linux/ubuntu/):
+
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E084DAB9
+
+ [Debian GPG key](https://cran.rstudio.com/bin/linux/debian/):
+
+ sudo apt install dirmngr
+ sudo apt-key adv --keyserver keys.gnupg.net --recv-key 'E19F5F87128899B192B1A2C2AD5F960A256A04AF'
+
+3. Update the repository:
+
+ sudo apt update
+
+4. Install the R binaries:
+
+ sudo apt install r-base
+
+## Download Packages from CRAN
+
+1. Open the R interpreter:
+
+ R
+
+2. The interpreter will open with some information about the version. Enter `install.packages("ggplot2")`:
+
+ {{< output >}}
+R version 3.4.3 (2017-11-30) -- "Kite-Eating Tree"
+Copyright (C) 2017 The R Foundation for Statistical Computing
+Platform: x86_64-pc-linux-gnu (64-bit)
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+ Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+> install.packages("ggplot2")
+{{< /output >}}
+
+3. A list of available mirrors should appear. Pick the closest location to maximize transfer speeds:
+
+ {{< output >}}
+--- Please select a CRAN mirror for use in this session ---
+HTTPS CRAN mirror
+
+ 1: 0-Cloud [https] 2: Algeria [https]
+ 3: Australia (Canberra) [https] 4: Australia (Melbourne 1) [https]
+ 5: Australia (Melbourne 2) [https] 6: Australia (Perth) [https]
+ 7: Austria [https] 8: Belgium (Ghent) [https]
+ 9: Brazil (PR) [https] 10: Brazil (RJ) [https]
+11: Brazil (SP 1) [https] 12: Brazil (SP 2) [https]
+13: Bulgaria [https] 14: Canada (MB) [https]
+15: Chile 1 [https] 16: Chile 2 [https]
+17: China (Beijing) [https] 18: China (Hefei) [https]
+19: China (Guangzhou) [https] 20: China (Lanzhou) [https]
+21: China (Shanghai) [https] 22: Colombia (Cali) [https]
+23: Czech Republic [https] 24: Denmark [https]
+25: East Asia [https] 26: Ecuador (Cuenca) [https]
+27: Estonia [https] 28: France (Lyon 1) [https]
+29: France (Lyon 2) [https] 30: France (Marseille) [https]
+31: France (Montpellier) [https] 32: France (Paris 2) [https]
+33: Germany (Göttingen) [https] 34: Germany (Münster) [https]
+35: Greece [https] 36: Iceland [https]
+37: India [https] 38: Indonesia (Jakarta) [https]
+39: Ireland [https] 40: Italy (Padua) [https]
+41: Japan (Tokyo) [https] 42: Japan (Yonezawa) [https]
+43: Malaysia [https] 44: Mexico (Mexico City) [https]
+45: New Zealand [https] 46: Norway [https]
+47: Philippines [https] 48: Serbia [https]
+49: Singapore (Singapore 1) [https] 50: Spain (A Coruña) [https]
+51: Spain (Madrid) [https] 52: Sweden [https]
+53: Switzerland [https] 54: Taiwan (Chungli) [https]
+55: Turkey (Denizli) [https] 56: Turkey (Mersin) [https]
+57: UK (Bristol) [https] 58: UK (Cambridge) [https]
+59: UK (London 1) [https] 60: USA (CA 1) [https]
+61: USA (IA) [https] 62: USA (IN) [https]
+63: USA (KS) [https] 64: USA (MI 1) [https]
+65: USA (NY) [https] 66: USA (OR) [https]
+67: USA (TN) [https] 68: USA (TX 1) [https]
+69: Vietnam [https] 70: (HTTP mirrors)
+
+
+Selection:
+{{< /output >}}
+
+4. When quitting the interpreter, you will be prompted to save the workspace image. If you choose yes, this will save all the user defined objects for the next session:
+
+ {{< output >}}
+> q()
+Save workspace image? [y/n/c]:
+{{< /output >}}
+
+## RStudio IDE Desktop
+
+The R interpreter lacks features such as a debugger which may be needed for larger projects. RStudio is an IDE that comes with many tools for development right out of the box.
+
+1. Download RStudio as a Debian package:
+
+ wget https://download1.rstudio.org/rstudio-xenial-1.1.414-amd64.deb
+
+2. Install the package:
+
+ sudo dpkg -i rstudio-xenial-1.1.414-amd64.deb
+
+ {{< note >}}
+If there are missing dependencies, those can be installed with the following command:
+
+ sudo apt install -f
+{{< /note >}}
diff --git a/docs/development/ror/ruby-on-rails-nginx-debian.md b/docs/development/ror/ruby-on-rails-nginx-debian.md
index 6781d027aee..624382a8282 100644
--- a/docs/development/ror/ruby-on-rails-nginx-debian.md
+++ b/docs/development/ror/ruby-on-rails-nginx-debian.md
@@ -82,12 +82,12 @@ Use the Ruby Version Manager (RVM) to install Ruby. Be sure to replace `2.4.2` i
1. Install NGINX:
- sudo apt install nginx
+ sudo apt install nginx
2. Phusion hosts a repository containing the latest version of Phusion Passenger. To add this to the package manager, first install the Phusion PGP key:
- sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 561F9B9CAC40B2F7
- sudo sh -c 'echo deb https://oss-binaries.phusionpassenger.com/apt/passenger stretch main > /etc/apt/sources.list.d/passenger.list'
+ sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 561F9B9CAC40B2F7
+ sudo sh -c 'echo deb https://oss-binaries.phusionpassenger.com/apt/passenger stretch main > /etc/apt/sources.list.d/passenger.list'
3. Enable HTTPS support for APT:
@@ -107,7 +107,7 @@ passenger_root /usr/lib/ruby/vendor_ruby/phusion_passenger/locations.ini;
passenger_ruby /usr/bin/passenger_free_ruby;
{{< /file-excerpt >}}
- {{< note >}}
+ {{< note >}}
If the file does not already exist, you will need to create it and add the lines manually.
{{< /note >}}
@@ -143,13 +143,13 @@ If the application deployed uses MySQL, install the database server by following
cd railsapp
bundle install
-2. Rails requires a Javascript runtime. Install Node JS:
+2. Rails requires a JavaScript runtime. Install Node.js:
sudo curl -sL https://deb.nodesource.com/setup_6.x | sudo -E bash -
sudo apt install nodejs
{{< note >}}
-If your Gemfile already includes `therubyracer`, or you have another Javascript runtime on your system, you can skip this step.
+If your Gemfile already includes `therubyracer`, or you have another JavaScript runtime on your system, you can skip this step.
{{< /note >}}
3. Open `/etc/nginx/sites-available/default` in a text editor and remove `default_server` from the first two lines of the `server` block:
@@ -168,7 +168,7 @@ server {
The `passenger-config` command will generate several lines of output, similar to:
- {{< output >}}
+ {{< output >}}
passenger-config was invoked through the following Ruby interpreter:
Command: /home/username/.rvm/gems/ruby-2.4.2/wrappers/ruby
Version: ruby 2.4.2p198 (2017-09-14 revision 59899) [x86_64-linux]
diff --git a/docs/development/use-a-linode-for-web-development-on-remote-devices.md b/docs/development/use-a-linode-for-web-development-on-remote-devices.md
index c615a53c589..c537bebde74 100644
--- a/docs/development/use-a-linode-for-web-development-on-remote-devices.md
+++ b/docs/development/use-a-linode-for-web-development-on-remote-devices.md
@@ -26,7 +26,7 @@ This guide will walk you through the necessary steps to configure your Linode to
## Development Environments
-### Local Development Enviroment
+### Local Development Environment
A local development environment is usually faster, more powerful, and more comfortable than a remote environment. However, there some drawbacks associated with local development:
@@ -225,4 +225,4 @@ With everything set up it's time to work with your remote development environmen
You now have a basic but powerful setup that allows you to work from any device with an internet connection.
-The main limitation of a tablet is its storage capacity. An efficient way to set up a centralized storage space is by using OwnCloud on a Linode with [block storage](/docs/platform/how-to-use-block-storage-with-your-linode/). This way you can host all your archives, dotfiles, scripts, images and more in a scalable Linode. An additional benefit is the possibility to connect external storages like Dropbox, Google Drive or OneDrive. OwnCloud has native applications for Android and iOS so managing your assets won't be a problem. You can install and configure ownCloud by following our [ownCloud guide](/docs/applications/cloud-storage/install-and-configure-owncloud-on-ubuntu-16-04).
+The main limitation of a tablet is its storage capacity. An efficient way to set up a centralized storage space is by using OwnCloud on a Linode with [block storage](/docs/platform/how-to-use-block-storage-with-your-linode/). This way you can host all your archives, dotfiles, scripts, images and more in a scalable Linode. An additional benefit is the possibility to connect external storage services like Dropbox, Google Drive or OneDrive. OwnCloud has native applications for Android and iOS so managing your assets won't be a problem. You can install and configure ownCloud by following our [ownCloud guide](/docs/applications/cloud-storage/install-and-configure-owncloud-on-ubuntu-16-04).
diff --git a/docs/email/clients/install-roundcube-on-ubuntu.md b/docs/email/clients/install-roundcube-on-ubuntu.md
index 8f4cc8b0fca..04f0ece411e 100644
--- a/docs/email/clients/install-roundcube-on-ubuntu.md
+++ b/docs/email/clients/install-roundcube-on-ubuntu.md
@@ -82,7 +82,7 @@ We will create a new virtual host for Roundcube in this section. This makes a ne
sudo chmod 644 apache2-roundcube.sample.conf
-5. Determine what type of Secure Socket Layer (SSL) encryption certificate is best for your Roundcube deployment. A [self-signed SSL certificate](/docs/security/ssl/how-to-make-a-selfsigned-ssl-certificate) is easy and free, but triggers an error in most modern browsers reporting that the connection is not private. [Let's Encrypt](https://letsencrypt.org/) offers browser trusted, free SSL certificates, but does not support [Extended Validatation](https://en.wikipedia.org/wiki/Extended_Validation_Certificate) (EV) or multi-domain ([wildcard](https://en.wikipedia.org/wiki/Wildcard_certificate)) certificates. To gain those features, a [commercial SSL certificate](/docs/security/ssl/obtaining-a-commercial-ssl-certificate) must be used.
+5. Determine what type of Secure Socket Layer (SSL) encryption certificate is best for your Roundcube deployment. A [self-signed SSL certificate](/docs/security/ssl/how-to-make-a-selfsigned-ssl-certificate) is easy and free, but triggers an error in most modern browsers reporting that the connection is not private. [Let's Encrypt](https://letsencrypt.org/) offers browser trusted, free SSL certificates, but does not support [Extended Validation](https://en.wikipedia.org/wiki/Extended_Validation_Certificate) (EV) or multi-domain ([wildcard](https://en.wikipedia.org/wiki/Wildcard_certificate)) certificates. To gain those features, a [commercial SSL certificate](/docs/security/ssl/obtaining-a-commercial-ssl-certificate) must be used.
6. Once you have your SSL certificate, edit the following options in `apache2-roundcube.sample.conf` to match your desired configuration:
@@ -151,7 +151,7 @@ PEAR is an acronym for "PHP Extension and Application Repository". Common PHP co
PEAR will print an **install ok** confirmation message for each package that it successfully installs. In this case, a complete installation will look similar to this:
- {{< output >}}
+ {{< output >}}
install ok: channel://pear.php.net/Auth_SASL-1.1.0
install ok: channel://pear.php.net/Net_IDNA2-0.1.1
install ok: channel://pear.php.net/Mail_Mime-1.10.2
diff --git a/docs/email/iredmail/install-iredmail-on-ubuntu.md b/docs/email/iredmail/install-iredmail-on-ubuntu.md
index 0e37207a8e1..0602c4f7610 100644
--- a/docs/email/iredmail/install-iredmail-on-ubuntu.md
+++ b/docs/email/iredmail/install-iredmail-on-ubuntu.md
@@ -17,7 +17,6 @@ title: 'Install iRedmail, Open-Source Mail Server, on Ubuntu'
---
-
## Why Run a Mail Server?
Running your own mail server has many benefits. It allows you to manage the size of your mailboxes and attachments, run hourly/daily email backups, view mail logs, and gives you the freedom to use any domain name available. The drawback is usually the in-depth and sometimes complicated process of installing all the necessary parts. This guide uses a streamlined process, the iRedMail install script, and should have you up and running your mail server in under 15 minutes.
@@ -402,7 +401,7 @@ Familiarize yourself with the various files, configs, and settings listed in the
[s]:https://code.google.com/p/iredmail/wiki/DNS_SPF
[m]:http://www.mail-tester.com
[r]:/docs/networking/dns/configure-your-linode-for-reverse-dns/
-[c]:https://www.linode.com/docs/websites/ssl/obtaining-a-commercial-ssl-certificate
+[c]:https://www.linode.com/docs/security/ssl/obtaining-a-commercial-ssl-certificate/
[a]:https://www.linode.com/docs/networking/dns/introduction-to-dns-records#mx
[f]:http://www.iredmail.org/forum/post30654.html#p30654
[p]:http://wiki.policyd.org/_detail/policyd_web_gui.png?id=screenshots
diff --git a/docs/email/mailman/manage-email-lists-with-gnu-mailman-on-debian-6-squeeze.md b/docs/email/mailman/manage-email-lists-with-gnu-mailman-on-debian-6-squeeze.md
index 289b56e8ebb..b45aa165dc5 100644
--- a/docs/email/mailman/manage-email-lists-with-gnu-mailman-on-debian-6-squeeze.md
+++ b/docs/email/mailman/manage-email-lists-with-gnu-mailman-on-debian-6-squeeze.md
@@ -20,7 +20,7 @@ Be sure to review this guide in its entirety before beginning the procedure outl
# Set the Hostname
-Before you begin installing and configuring the components described in this guide, please make sure you've followed our instructions for [setting your hostname](/docs/getting-started#setting-the-hostname). Issue the following commands to make sure it is set properly:
+Before you begin installing and configuring the components described in this guide, please make sure you've followed our instructions for [setting your hostname](/docs/getting-started/#setting-the-hostname). Issue the following commands to make sure it is set properly:
hostname
hostname -f
@@ -44,7 +44,7 @@ During the Mailman installation, you will be required to specify the languages t
# Configure Mailman
-Consider the "[Configure Virtual Hosting](/docs/email/mailman/manage-email-lists-with-gnu-mailman-on-debian-6-squeeze#configure-virtual-hosting)" section before preceding. In most cases where you will be hosting you will want to skip this section and continue with that procedure. Mailman requires a "base" list, from which it can send email to welcome new members to lists and send password reminders when needed. Create this list by issuing the following command:
+Consider the "[Configure Virtual Hosting](/docs/email/mailman/manage-email-lists-with-gnu-mailman-on-debian-6-squeeze/#configure-virtual-hosting)" section before preceding. In most cases where you will be hosting you will want to skip this section and continue with that procedure. Mailman requires a "base" list, from which it can send email to welcome new members to lists and send password reminders when needed. Create this list by issuing the following command:
newlist mailman
@@ -55,7 +55,7 @@ During the list creation process, Mailman will prompt you for the administrators
{{< /file-excerpt >}}
-Replace `example.com` and `lists.example.com` with the relevant domains for your instance. Ensure that you have configured the [MX Records](/docs/dns-guides/introduction-to-dns#mx) for both domains that you want to receive email with. Additionally, add the following lines to your `/etc/postfix/master.cf` file:
+Replace `example.com` and `lists.example.com` with the relevant domains for your instance. Ensure that you have configured the [MX Records](/docs/dns-guides/introduction-to-dns/#mx) for both domains that you want to receive email with. Additionally, add the following lines to your `/etc/postfix/master.cf` file:
{{< file-excerpt "/etc/postfix/master.cf" >}}
mailman unix - n n - - pipe
@@ -166,7 +166,7 @@ From this point forward, you can create new lists by issuing `newlist` commands
# Configuring Mailman with Alternate Mail Configurations
-If you wish to deploy Mailman on a system that has an existing mail set up, such as the [Postfix with Dovecot and MySQL](/docs/email/postfix/dovecot-mysql-debian-6-squeeze) or the [Postfix with Dovecot and System Users](/docs/email/postfix/dovecot-system-users-debian-6-squeeze) configurations described in other documents, consider the following recommendations:
+If you wish to deploy Mailman on a system that has an existing mail set up, such as the [Postfix with Dovecot and MySQL](/docs/email/postfix/dovecot-mysql-debian-6-squeeze/) or the [Postfix with Dovecot and System Users](/docs/email/postfix/dovecot-system-users-debian-6-squeeze/) configurations described in other documents, consider the following recommendations:
Complete your basic mail configuration according to the appropriate guide before beginning to install and configure Mailman.
diff --git a/docs/email/postfix/configure-postfix-to-send-mail-using-gmail-and-google-apps-on-debian-or-ubuntu.md b/docs/email/postfix/configure-postfix-to-send-mail-using-gmail-and-google-apps-on-debian-or-ubuntu.md
index 090be35bd75..e5850261a09 100644
--- a/docs/email/postfix/configure-postfix-to-send-mail-using-gmail-and-google-apps-on-debian-or-ubuntu.md
+++ b/docs/email/postfix/configure-postfix-to-send-mail-using-gmail-and-google-apps-on-debian-or-ubuntu.md
@@ -12,7 +12,7 @@ published: 2016-12-13
title: Configure Postfix to Send Mail Using Gmail and Google Apps on Debian or Ubuntu
---
-
+
Postfix is a Mail Transfer Agent (MTA) that can act as an SMTP server or client to send or receive email. There are many reasons why you would want to configure Postfix to send email using Google Apps and Gmail. One reason is to avoid getting your mail flagged as spam if your current server's IP has been added to a blacklist.
diff --git a/docs/email/running-a-mail-server.md b/docs/email/running-a-mail-server.md
index bd84ac9a385..31cbc7a3606 100644
--- a/docs/email/running-a-mail-server.md
+++ b/docs/email/running-a-mail-server.md
@@ -2,73 +2,86 @@
author:
name: Linode
email: docs@linode.com
-description: 'Our guide to running a mail server on your Linode.'
-keywords: ["mail server", "linode guide", "running a mail server", "linode quickstart guide"]
+description: 'This guide shows how to run an email server on a Linode. It shows how to install the mail service, configure components, create DNS records and SSL certificates, and manage users.'
+og_description: 'Take control of your email with your own mail server. This guide explains how to install a mail server on your Linode, configure the necessary components and users, and send and receive your first emails.'
+keywords: ["mail server", "linode guide", "running a mail server", "Self-host Mail"]
license: '[CC BY-ND 4.0](https://creativecommons.org/licenses/by-nd/4.0)'
aliases: ['mailserver/']
-modified: 2014-04-13
+modified: 2018-01-23
modified_by:
- name: Alex Fornuto
+ name: Linode
published: 2013-06-05
title: Running a Mail Server
---
-If you've followed along with the quick start guides up to this point, you've managed to [install Linux](/docs/getting-started), [secure your Linode](/docs/securing-your-server), and [start hosting a website](/docs/hosting-website). Now it's time to set up email. This guide explains how to install a mail server on your Linode and create mail accounts for your own domains. First, we'll help you decide whether to run your own mail server or let a third-party mail service handle it for you. Then we'll show you how a mail server works, present common mail server configurations, and provide basic instructions for getting a mail server up and running.
+This guide offers an overview of installing a mail server on your Linode. It covers mail server configuration, creating mail accounts, and basic overviews of tools relevant to hosting an email webserver.
-
+
## Should You Run a Mail Server?
-First, you'll need to decide whether or not you want to run your own mail server. If you do, you'll have control over your domain's email, but you'll also have to deal with the hassles associated with setting up and running some pretty complex software. Using a third-party mail service is easier, but you'll sacrifice control and flexibility. In this section, we'll be discussing the benefits and drawbacks to running your own mail server, as well as how to choose an external mail service, if you decide to go that route.
+If you do, you'll have control over your domain's email, but you'll also have to deal with the hassles associated with setting up a complex environment of software. Using a third-party mail service is easier, but you'll sacrifice control and flexibility. In this section, we consider the benefits and drawbacks to running your own mail server, as well as how to choose an [external mail service](#external-mail-services), if you decide to go that route.
### Benefits
-If you want or need full control of your email, running your own mail server just might be the ideal solution. Doing so allows you to store your own email, access the mail server's logs, and access the raw email files in a user's mailbox. There are several benefits to running a mail server:
+If you want or need full control of your email, running your own mail server might be ideal solution. Doing so allows you to store your own email, access the mail server's logs, and access the raw email files in a user's mailbox.
-- Full control over both the server and your email
-- Pick the applications you want to use, and then tune them for your needs
-- Access the raw mail files in users' server mailboxes
-- View logs for incoming and outgoing messages
-- View logs for connection and authorization attempts from local mail clients for IMAP, POP3, and SMTP
-- Enjoy having mail for \$0.00 more than the price of your Linode
+Some benefits of running a mail server are:
-The greatest benefit: When something goes wrong, you can investigate and fix things yourself instead of calling a third-party mail service provider. So when someone at your company claims that an email got lost, or Outlook suddenly stopped working even though they didn't change *anything*, you'll be able to see what actually happened and make adjustments if necessary.
+- Full control over both the server and your email
+- Pick the applications you want to use, and tune them for your needs
+- View logs for incoming and outgoing messages
+- View logs for connection and authorization attempts from local mail clients for IMAP, POP3, and SMTP
+- Mail will cost no more than the price of your Linode
### Drawbacks
-By now you know that running your own mail server isn't for the faint of heart. Setting up the software is tricky, filtering spam is a pain, and keeping everything running smoothly is challenging. For these reasons and others, we recommend that you carefully consider all of your options before deciding to run a mail server. Here are a couple of the drawbacks:
+Running your own mail server isn't for the faint of heart. Setting up the software is tricky, filtering spam can be difficult, and keeping everything running smoothly is challenging. You should carefully consider all of your options before deciding to run a mail server. Here are a couple of the drawbacks:
-- Configuration is complicated
-- Troubleshooting problems is a pain
-- Downtime can result in lost email
-- Spam and virus filtering need to be tuned just right to block unwanted emails and allow legitimate ones
-- If a spammer discovers an exploit, they could use your Linode to send spam, and your IP address could be black-listed
+- Configuration is complicated
+- Downtime can result in lost email
+- Spam and virus filtering need to be tuned just right to block unwanted emails and allow legitimate ones
+- If a spammer discovers an exploit, they could use your Linode to send spam, and your IP address could be black-listed
+- No third party support to troubleshoot e-mail issues.
-And here's the biggest drawback: You're on the hook for everything related to your email. Maintaining, upgrading, and troubleshooting the mail server is your responsibility. It's a huge commitment!
+You're responsible for everything related to your Linode. Maintaining, upgrading, and troubleshooting the mail server is your responsibility. Visit Linode's [Terms of Service](/tos) for more information about acceptable use.
### External Mail Services
-If the prospect of managing your own mail server is too daunting, you should consider using a third-party mail service. For a monthly or annual fee, these services provide managed mail servers and take care of all hosting, maintenance, and troubleshooting tasks. You won't have as much control when something goes wrong, but you also won't need to worry about the pitfalls of running a mail server. There are dozens of third-party mail services available, but we recommend checking out these services:
+If the prospect of managing your own mail server is too daunting, you should consider using a third-party mail service. For a fee, these services provide managed mail servers and take care of all hosting, maintenance, and troubleshooting tasks. You won't have as much control if something goes wrong, but you also won't need to worry about the pitfalls of running a mail server.
+
+There are several third-party mail services available:
-- [Fastmail](https://www.fastmail.fm) has good uptime and fast IMAP. It's paid and has capped storage.
-- [Google Apps](http://www.google.com/intl/en/enterprise/apps/business/) uses the top-notch Gmail interface and has great uptime. It's paid and the IMAP implementation is unusual. We have a [guide](/docs/email/google-mail) on how to use Google Apps with your Linode.
-- [Office 365](https://login.microsoftonline.com/) is the successor to Outlook.com and can support custom domains for email, amongst other services.
+- [Fastmail](https://www.fastmail.fm)
+- [Google Apps](http://www.google.com/intl/en/enterprise/apps/business/) uses the familiar Gmail interface. Check out our guide to [using Google Apps with your Linode](/docs/email/google-mail).
+- [Office 365](https://login.microsoftonline.com/) is the successor to Outlook.com and can support custom domains for email, amongst other services.
-If you decide to use an outside mail service, you will still need to set up [DNS](/docs/networking/dns/dns-manager) for your mail, using the settings provided by the third-party mail service.
+If you decide to use an outside mail service, you will still need to set up [DNS](/docs/networking/dns/dns-manager) for your mail and use the settings provided by the third-party mail service.
## How Mail Servers Work
-Every mail server that lets you send and receive email with a local mail client has three separate software components - a Mail Transfer Agent (MTA), a Mail Delivery Agent (MDA), and an IMAP/POP3 server. The MTA relays mail between your Linode and the wider Internet, whether it's delivering an outside email to one of your users, or sending an email from one of your users. Accepted incoming mail gets added to the MTA's queue on the server. The MDA takes mail from the MTA's queue and saves it to individual mailboxes on your Linode. The IMAP/POP3 server manages users and their mailboxes as they check their email over IMAP/POP3 connections.
+Every mail server that lets you send and receive email with a local mail client has three separate software components:
+
+* **MTA**: The *Mail Transfer Agent* relays mail between your Linode and the wider Internet, whether it's delivering an outside email to one of your users, or sending an email from one of your users. Accepted incoming mail gets added to the MTA's queue on the server.
+
+* **MDA**: The *Mail Delivery Agent* takes mail from the MTA's queue and saves it to individual mailboxes on your Linode.
+
+* **IMAP/POP3 Server**: Manages users and their mailboxes as they check their email over IMAP/POP3 connections.
+
+### The Mail Server Process
-How does it work? First, an incoming message is directed to your Linode via DNS. Once it passes through the MTA and MDA, it is stored in the user's mailbox on the server. When the message is requested, the IMAP/POP3 server mediates the connection between your Linode and the user's local mail client. Outgoing mail is sent from the user's local mail client, processed by your Linode's MTA, and is then sent to its destination on the Internet.
+1. An incoming message is directed to your Linode via DNS.
+2. Once it passes through the MTA and MDA, it is stored in the user's mailbox on the server.
+3. When the message is requested, the IMAP/POP3 server mediates the connection between your Linode and the user's local mail client.
+4. Outgoing mail is sent from the user's local mail client, processed by your Linode's MTA, and is then sent to its destination on the Internet.
-[](/docs/assets/1300-mail_server.jpg)
+
-There are also add-on components that you may want to install for webmail, spam filtering, virus scanning, and mailing list organizers. To receive mail, users can install local mail clients like Apple Mail, Outlook, and Thunderbird on their personal computers. You'll learn more about each of these components in the following sections.
+## How to Choose Mail Server Components
-## Choosing Mail Server Components
+There are several software packages that can be used as MTAs, MDAs, and IMAP/POP3 servers, and this section will present some of the most popular options.
-The next step is choosing the components for your mail server. There are several software packages that can be used as MTAs, MDAs, and IMAP/POP3 servers, and we'll be presenting some of the most popular options in this section. While all of the components presented in this guide are solid choices, we recommend that you use Postfix as your MTA and Dovecot as your MDA and IMAP/POP3 server. These are the packages we'll be using in later examples.
+The examples in the [later sections](#build-your-mail-server) of this guide use Postfix as the MTA and Dovecot as the MDA and IMAP/POP3 server.
### Mail Transfer Agents
@@ -76,159 +89,175 @@ MTAs are responsible for handling SMTP connections to your Linode from both outs
Here are the most popular MTA services available:
-- [Courier Mail Server](http://www.courier-mta.org) comes with Courier-IMAP, which is the popular part of the Courier mail server suite, but Courier-MTA also includes mail relaying features. It's a simpler MTA but somewhat limited.
-- [Exim](http://www.exim.org) is modern and oriented towards flexibility. It's secure, but not quite as security-oriented as Postfix. It's very customizable, but is one of the most complex MTAs to configure. We have guides for [Exim on Ubuntu 12.04](/docs/email/exim/send-only-mta-ubuntu-12-04-precise-pangolin) and [Exim on Debian 6](/docs/email/exim/send-only-mta-debian-6-squeeze).
-- [Postfix](http://www.postfix.org) is part of our [recommended mail server build](/docs/email/postfix/email-with-postfix-dovecot-and-mysql). It's modern, security-oriented, and very flexible, although not quite as flexible as Exim. It is slightly simpler to set up than Exim.
-- [Qmail](http://www.qmail.org/top.html) is one of the older modern MTAs and supports Maildir-style directories. It's still very popular, but is no longer supported.
-- [Sendmail](http://www.sendmail.com/sm/open_source/) is a legacy MTA that still has a large following and good support. Don't expect the most modern options and security, though.
-- [Zimbra](http://www.zimbra.com) is an all-in-one mail service that's much simpler to install than other options, but less customizable. We have guides for [Zimbra on Ubuntu 10.04](/docs/email/zimbra/install-zimbra-ubuntu-10-04-lucid), [Zimbra on Debian 6](/docs/email/zimbra/install-zimbra-debian-6-squeeze), and [Zimbra on CentOS 5](/docs/email/zimbra/install-zimbra-centos-5).
+- [Courier Mail Server](http://www.courier-mta.org) comes with Courier-IMAP, which is the popular part of the Courier mail server suite, but Courier-MTA also includes mail relaying features. It's a simpler MTA but somewhat limited.
+- [Exim](http://www.exim.org) is modern and oriented towards flexibility. It's secure, but not quite as security-oriented as Postfix. It's very customizable, but is one of the most complex MTAs to configure.
+- [Postfix](http://www.postfix.org) is part of Linode's [recommended mail server build](/docs/email/postfix/email-with-postfix-dovecot-and-mysql). It's modern, security-oriented, and very flexible. It is slightly simpler to set up than Exim.
+- [Qmail](http://www.qmail.org/top.html) is a modern MTAs and supports [Maildir-style](https://en.wikipedia.org/wiki/Maildir) directories. Qmail has not received an update since 2007, but remains very popular.
+- [Sendmail](http://www.sendmail.com/sm/open_source/) is a legacy MTA that has a large following and good support.
+- [Zimbra](http://www.zimbra.com) is an all-in-one mail service. Zimbra offers a simple install, but few configurable options.
### Mail Delivery Agents
-MDAs move email from the MTA's queue to individual mailboxes on your Linode (for example, your mailbox could be located at `/var/mail/example.com/user/`). MDAs are also known as Local Delivery Agents (LDAs). Different MTAs support different types of mailboxes. The most common types are the older **mbox** mailboxes which store all the messages together in a single file, and **Maildir** mailboxes which store each email in a separate file and support multiple folders. MDAs are often bundled with other mail-related software.
+MDAs move email from the MTA's queue to individual mailbox directories within your Linode, for example: your mailbox could be located at `/var/mail/example.com/user/`. MDAs are also known as Local Delivery Agents (LDAs). Different MTAs support different types of mailboxes. The most common types are the older **mbox** mailboxes which store all the messages together in a single file, and **Maildir** mailboxes which store each email in a separate file and support multiple folders. MDAs are often bundled with other mail-related applications.
-Here are the most popular MDAs available:
+These are some of the most popular MDAs available:
-- [Cyrus's MDA](http://www.cyrusimap.org/index.php) is part of the Cyrus IMAP/POP3 server. Cyrus is a modern, security-oriented IMAP/POP3 server designed to run on servers where users do not log in directly.
-- [Deliver](http://linux.die.net/man/8/deliver) is a simple Linux mail delivery utility, which is configured in the Imapd configuration files by default.
-- [Dovecot's LDA](http://wiki2.dovecot.org/LDA) and [Dovecot's LMTP server](http://wiki2.dovecot.org/LMTP) are part of the Dovecot IMAP/POP3 server. Dovecot is a lightweight, modern, and configurable mail server.
-- [maildrop](http://www.courier-mta.org/maildrop/) is Courier's MDA. Courier is an all-in-one mail server.
-- [Postfix's MDA](http://www.postfix.org/OVERVIEW.html#delivering) is part of the Postfix MTA software. Postfix is a modern, security-oriented, and flexible MTA.
-- [Procmail](http://www.procmail.org) is a legacy MDA.
-- [Sendmail's MDA](http://www.sendmail.com/sm/open_source/) is part of the Sendmail MTA software. Sendmail is an older MTA that is still very popular MTA.
+- [Cyrus's MDA](http://www.cyrusimap.org/index.php) is part of the Cyrus IMAP/POP3 server. Cyrus is a modern, security-oriented IMAP/POP3 server designed to run on servers where users do not log in directly.
+- [Deliver](http://linux.die.net/man/8/deliver) is a simple Linux mail delivery utility that is configured in the Imapd configuration files by default.
+- [Dovecot's LDA](http://wiki2.dovecot.org/LDA) and [Dovecot's LMTP server](http://wiki2.dovecot.org/LMTP) are part of the Dovecot IMAP/POP3 server. Dovecot is a lightweight, modern, and configurable mail server.
+- [maildrop](http://www.courier-mta.org/maildrop/) is Courier's MDA. Courier is an all-in-one mail server.
+- [Postfix's MDA](http://www.postfix.org/OVERVIEW.html#delivering) is part of the Postfix MTA software. Postfix is a modern, security-oriented, flexible MTA.
+- [Sendmail's MDA](http://www.sendmail.com/sm/open_source/) is part of the Sendmail MTA software. Sendmail is an older MTA that is still popular.
### IMAP and POP3 Servers
-An IMAP or POP3 server handles connections from incoming IMAP or POP3 clients like Microsoft Outlook and Apple Mail. The server manages client access to the mailboxes and raw mail files so that the email is displayed in a user-friendly way.
+An IMAP or POP3 server handles connections from incoming IMAP or POP3 clients like Microsoft Outlook and Apple Mail. The server manages client access to the mailboxes and raw mail files so that the email is displayed in a modern way.
-Most servers and clients support both IMAP and POP3. POP3 clients connect to the server at specified intervals and download all of a user's messages, without leaving copies on the server by default. POP3 was developed when most people used only one device to access one email account. IMAP is a newer protocol designed for multi-device users. IMAP clients stay continuously connected to the server and IMAP mailboxes mirror the mailboxes on your Linode. IMAP and POP3 both have secure versions (IMAPS and SSL-POP) which use SSL encryption for mail transfer. Always use the secure version whenever possible.
+Most servers and clients support both IMAP and POP3. POP3 clients connect to the server at specified intervals and download all of a user's messages without leaving copies on the server by default. POP3 was developed when most people used only one device to access one email account. IMAP is a newer protocol designed for multi-device users. IMAP clients stay continuously connected to the server and IMAP mailboxes mirror the mailboxes on your Linode. IMAP and POP3 both have secure versions (IMAPS and SSL-POP) which use SSL encryption for mail transfer. Always use the secure version whenever possible.
Here are the most popular IMAP and POP3 servers available:
-- [Citad.l](http://www.citadel.org) is an all-in-one mail service that includes mail, calendars, instant messaging, mailing lists, and other collaboration tools. It's open source and geared towards small and medium-sized organizations. We have guides for [Citadel on Ubuntu 12.04](/docs/email/citadel/ubuntu-12-04-precise-pangolin) and [Citadel on Debian 6](/docs/email/citadel/debian-6-squeeze).
-- [Courier](http://www.courier-mta.org) has a very popular IMAP server called [Courier IMAP](http://www.courier-mta.org/imap/). It's an all-in-one mail server software suite, but Courier IMAP can be installed by itself if that's the only part you need.
-- [Cyrus](http://www.cyrusimap.org/index.php) is a modern, security-oriented IMAP/POP3 server designed to work on sealed servers where users do not log in directly.
-- [DBMail](http://www.dbmail.org) is an open source project that stores mail in databases instead of flat files.
-- [Dovecot](http://www.dovecot.org) is a lightweight, modern, and configurable mail server, and is part of our [recommended mail server build](/docs/email/postfix/email-with-postfix-dovecot-and-mysql).
-- [Xmail](http://www.xmailserver.org) is a full-featured POP3 server, but does not support IMAP.
-- [Zimbra](http://www.zimbra.com) is an all-in-one mail service that's much simpler to install than other options, but less customizable. We have guides for [Zimbra on Ubuntu 10.04](/docs/email/zimbra/install-zimbra-ubuntu-10-04-lucid), [Zimbra on Debian 6](/docs/email/zimbra/install-zimbra-debian-6-squeeze), and [Zimbra CentOS 5](/docs/email/zimbra/install-zimbra-centos-5).
+- [Citadel](http://www.citadel.org) is an all-in-one mail service that includes mail, calendars, instant messaging, mailing lists, and other collaboration tools. It's open source and geared towards small and medium-sized organizations. Linode has guides for [Citadel on Ubuntu 12.04](/docs/email/citadel/ubuntu-12-04-precise-pangolin) and [Citadel on Debian 6](/docs/email/citadel/debian-6-squeeze).
+- [Courier](http://www.courier-mta.org) has a very popular IMAP server called [Courier IMAP](http://www.courier-mta.org/imap/). It's an all-in-one mail server software suite, but Courier IMAP can be installed by itself if that's the only part you need.
+- [Cyrus](http://www.cyrusimap.org/index.php) is a modern, security-oriented IMAP/POP3 server designed to work on sealed servers where users do not log in directly.
+- [DBMail](http://www.dbmail.org) is an open source project that stores mail in databases instead of flat files.
+- [Dovecot](http://www.dovecot.org) is a lightweight, modern, and configurable mail server, and is part of our [recommended mail server build](/docs/email/postfix/email-with-postfix-dovecot-and-mysql).
+- [Xmail](http://www.xmailserver.org) is a full-featured POP3 server, but does not support IMAP.
+- [Zimbra](http://www.zimbra.com) is an all-in-one mail service that's much simpler to install than other options, but less customizable.
-## Building Your Mail Server
-
-Now that you understand how a mail server works and you've chosen the primary components, it's time to build your mail server. If you can't decide which software is best for you, you can always use our recommended build with Postfix as your MTA and Dovecot as your MDA and IMAP/POP3 server. It's what we'll use in the examples below. Let's get building!
+## Build Your Mail Server
### SSL Certificate
-The first step is to obtain and install an SSL certificate. An SSL certificate encrypts connections to your mail server, protecting passwords and email from harmful surveillance. It's possible to run a mail server without this protection, but we don't recommend it. If you follow our recommended build, you will absolutely need an SSL certificate.
+An SSL certificate encrypts connections to your mail server. It's possible to run a mail server without an SSL certificate, but it's not recommended.
+
+Any type of SSL certificate will work, but some certificates have different degrees of trustworthiness for your users. If you want the highest level of trustworthiness, you should [purchase a signed SSL certificate](/docs/security/ssl/obtaining-a-commercial-ssl-certificate) from a reputable company.
-Any type of SSL certificate will work, but some certificates have different degrees of trustworthiness for your users. If you want the highest level of trustworthiness, you should [purchase a signed SSL certificate](/docs/security/ssl/obtaining-a-commercial-ssl-certificate) from a reputable company. You can also use a free self-signed certificate if you are comfortable with the warnings it generates. You can [make your own](/docs/security/ssl/how-to-make-a-selfsigned-ssl-certificate), or, if you're following our recommended build, you can use the one that comes with Dovecot by default. Decide what type of SSL certificate you need and acquire and install it now.
+You can also use a free self-signed certificate if you are comfortable with the warnings it generates. You can make your own [self-signed SSL certificate](/docs/security/ssl/how-to-make-a-selfsigned-ssl-certificate), or, if you're following our recommended build, you can use the one that comes with Dovecot by default.
### Software Installation
-The second step is installing and configuring the MTA, MDA, and IMAP/POP3 server. You'll also probably want to install a database server like MySQL or PostgreSQL to help you manage your domains, email addresses, user credentials, aliases, etc. Providing step-by-step instructions for every possible mail server build is beyond the scope of this article. For detailed instructions, see our [Postfix, Dovecot, and MySQL](/docs/email/postfix/email-with-postfix-dovecot-and-mysql) guide. Go ahead and install and configure the software you've chosen for your mail server build now.
+Install and configure the MTA, MDA, and IMAP/POP3 server. To help manage domains, email addresses, user credentials, aliases, etc., install a database server like MySQL or PostgreSQL.
+
+For detailed configuration instructions, see our [Postfix, Dovecot, and MySQL](/docs/email/postfix/email-with-postfix-dovecot-and-mysql) guide.
-{{< note >}}
-We've written a variety of mail server guides, including guides for older software versions and other mail-related services. See the [Email Server Guides](/docs/email) webpage for more information.
-{{< /note >}}
+For more mail server guides, including guides for older software versions and other mail-related services, visit our [Email Server Guides](/docs/email).
### DNS Records
-The third step is to create the DNS records, which help email reach your Linode. The right DNS records also help designate your Linode as a legitimate mail server. In this section, you'll learn how to set the appropriate MX, SPF, and PTR records for your domain and Linode.
+DNS records help email reach your Linode. The right DNS records also help designate your Linode as a legitimate mail server. In this section, you'll learn how to set the appropriate MX, SPF, and PTR records for your domain and Linode.
+
+#### Time to Live (TTL)
+
+You should lower the Time to Live (TTL) on your existing DNS records to the lowest allowed value at least 24-48 hours before you make any other DNS changes. That way, any changes you make later will propagate quickly.
-{{< note >}}
-You should lower the time to live (TTL) on your existing DNS records to the lowest allowed value at least 24-48 hours before you make any other DNS changes. That way, any changes you make later will propagate quickly. It's also a good idea to keep your old mail server running for at least 48 hours after you start directing mail to your Linode, just in case the DNS changes take a while to propagate.
-{{< /note >}}
+It's also a good idea to keep your old mail server running for at least 48 hours after you start directing mail to your Linode, just in case the DNS changes take a while to propagate.
#### MX Records
-MX records tell the Internet where to send your domain's email. If someone sends an email to `user@example.com`, the outgoing server looks up the DNS settings for `example.com`. When it finds the MX record pointing to your Linode, it sends the message to your Linode.
+MX records tell the Internet where to send your domain's email. If someone sends an email to `user@example.com`, the outgoing server looks up the DNS settings for the domain `example.com`. When it finds the MX record pointing to your Linode, it sends the message to your Linode.
-You'll need an MX record for each domain and subdomain for which you want to receive mail on your Linode. You can also set multiple MX records with different priorities for the same domain. This creates fallback mail servers for your domain in case the first one on the list is down. Lower numbers have a higher priority. Your MX record has a domain or subdomain, TTL (time to live), type (which is MX), and a priority and target (can be a domain or an IP that resolves to your Linode).
+Create an MX record for each domain and subdomain for which you want to receive mail on your Linode.
+
+You can also set multiple MX records with different priorities for the same domain. This creates fallback mail servers for your domain in case the first one on the list is down. Lower numbers have a higher priority.
+
+Your MX record has a:
+
+| **Domain** | **TTL** | **Type** | **Priority** | **Target** |
+| ------------ |:--------:|:----:|:----:| ----- |
+| example.com | 86400 | MX | 10 | 203.0.113.0 |
A typical MX record looks like this:
- example.com 86400 MX 10 example.com
- example.com 86400 MX 10 12.34.56.78
- mail.example.com 86400 MX 10 12.34.56.78
+{{< output >}}
+example.com 86400 MX 10 example.com
+example.com 86400 MX 10 203.0.113.0
+mail.example.com 86400 MX 10 203.0.113.0
+{{< /output >}}
-If you use Linode's [DNS Manager](/docs/dns-manager), you'll need to point your MX records to a target domain or subdomain that resolves to your Linode. Make sure that domain or subdomain has an A record that points to the correct IP address.
+If you use Linode's [DNS Manager](/docs/dns-manager), point your MX records to a target domain or subdomain that resolves to your Linode. Make sure that domain or subdomain has an *A record* that points to the correct IP address.
#### SPF Records
-SPF records help establish the legitimacy of your mail server and reduce the chances of spoofing, which occurs when someone fakes the headers on an email to make it look like it's coming from your domain, even though the message did not originate from your Linode. Spammers sometimes try to do this to get around spam filters. An SPF record for your domain tells other receiving mail servers which outgoing server(s) are valid sources of email, so they can reject spoofed email from your domain that has originated from unauthorized servers.
+SPF records help establish the legitimacy of your mail server and reduce the chances of *spoofing*, which occurs when someone fakes the headers of an email to make it look like it's coming from your domain, even though it didn't. Spammers may try to do this to get around spam filters.
-In your SPF record, you should list all the mail servers from which you send mail, and then exclude all the others. Your SPF record will have a domain or subdomain, TTL (time to live, type (which is TXT, or SPF if your name server supports it), and text (which starts with "v=spf1" and contains the SPF record settings).
+An SPF record for your domain tells other receiving mail servers which outgoing server(s) are valid sources of email, so they can reject spoofed email from your domain that has originated from unauthorized servers.
+
+In your SPF record, list all the mail servers from which you send mail, and exclude all the others. Your SPF record will have a domain or subdomain, TTL type which is plain text, or SPF if your name server supports it, and text which starts with `v=spf1` and contains the SPF record settings.
If your Linode is the only mail server you use, you should be able to use the example record below. With this SPF record, the receiving server will check the IP addresses of both the sending server and the IP address of `example.com`. If the IPs match, the check passes. If not, the check will "soft fail" (i.e., the message will be marked but will not automatically be rejected for failing the SPF check).
- example.com 86400 TXT "v=spf1 a ~all"
+{{< output >}}
+example.com 86400 TXT "v=spf1 a ~all"
+{{< /output >}}
-{{< note >}}
-Make sure your SPF records are not too strict. If you accidentally exclude a legitimate mail server, its messages could get marked as spam. We strongly recommend visiting [openspf.org](http://www.openspf.org/SPF_Record_Syntax) to learn how SPF records work and how to construct one that works for your setup. Their [examples](http://www.openspf.org/FAQ/Examples) are also helpful.
-{{< /note >}}
+Make sure your SPF records are not too strict. If you accidentally exclude a legitimate mail server, its messages could get marked as spam. Visit [openspf.org](http://www.openspf.org/SPF_Record_Syntax) to learn how SPF records work and how to construct one that works for your setup. Their [examples](http://www.openspf.org/FAQ/Examples) are also helpful.
#### Reverse DNS
-If you haven't yet [set reverse DNS](/docs/hosting-website#setting-reverse-dns) for your mail server's domain or subdomain, do so now for the sake of your mail server. The reverse DNS for your mail server *must* match the hostname of your Linode. If your Linode's reverse DNS and hostname do not match, email from your server may get rejected with the warning "Reverse DNS does not match SMTP Banner." If you need to check or set the hostname, see our [Getting Started](/docs/getting-started#setting-the-hostname) article.
+[Set reverse DNS](/docs/hosting-website#setting-reverse-dns) for your mail server's domain or subdomain.
+
+The reverse DNS for your mail server must match the hostname of your Linode. If your Linode's reverse DNS and hostname do not match, email from your server may get rejected with the warning "Reverse DNS does not match SMTP Banner."
+
+If you need to check or set the hostname, see our [Getting Started](/docs/getting-started#setting-the-hostname) article.
-## Next Steps
+## Spam and Virus Protection, Mail Clients, and More
At this point, you should have a basic mail server up and running. There's a bit more to do, however, if you want to provide your users with the best possible mail experience. This includes adding spam and virus filtering to protect your users, setting up mail clients, providing a webmail solution, and adding any extras you want, such as mailing lists.
-### Spam and Virus Protection
+### Configure Spam and Virus Protection
-Outgoing spam, or spam originating from your Linode, is bad news for everyone involved. It annoys the recipients, and it also gives your server a bad reputation, which makes it harder for you to send legitimate emails. You should take steps to ensure that your Linode is not used as an "open relay" server, which would allow anyone to send messages anywhere using your server. To prevent your Linode from being used as an open relay, make a list of allowed domains and users for your MTA, and make sure it rejects everything else.
+Spam that originates from your Linode may be a violation of [Linode's Terms of Service](/tos), and may lead to your server being blacklisted by a [variety of organizations](https://mxtoolbox.com/blacklists.aspx).
-There are also a few other scenarios where your server could be sending spam. Your server or an installed application might get hacked, one of your users might have a compromised account, or you may be sending out email messages that are getting marked as spam. (This is more likely to occur in the case of mass mailings.) The best way to stay on top of outgoing spam is to keep an eye on your outgoing mail logs and pay attention to bounceback errors.
+Take steps to ensure that your Linode is not used as an *open relay* server, which would allow anyone to send messages anywhere using your server. To prevent your Linode from being used as an open relay, make a list of allowed domains and users for your MTA, and make sure it rejects everything else.
-{{< note >}}
-If you do get added to a block list, take steps to mitigate the source of the spam. Then you will have to contact the mail provider that blocked you and follow their steps to be allowed to send mail again.
-{{< /note >}}
+There are also a few other scenarios where your server could be sending spam. If your server or an installed application are hacked, or if a user's account is compromised, or if the emails you send are getting marked as spam by recipients. The best way to stay on top of outgoing spam is to keep an eye on your outgoing mail logs and pay attention to bounceback errors.
Incoming spam can also be a problem. Spam filters help you deal with spam sent to your own users. They let you filter incoming messages based on origin, content, etc. Some spam contains viruses, which can cause more serious damage to recipients.
Here are some of the most popular spam and virus filter services:
-- [Amavis](http://www.amavis.org) is an open source content filter for email that integrates directly with your MTA. It does some checking on its own, and can also be used in conjunction with more robust spam and virus filters.
-- [Clam AntiVirus](http://www.clamav.net/lang/en/) is a popular, free, and open-source virus scanner.
-- [SpamAssassin](http://spamassassin.apache.org) is a very popular free spam filter.
+- [Amavis](http://www.amavis.org) is an open source content filter for email that integrates directly with your MTA. It does some checking on its own, and can also be used in conjunction with more robust spam and virus filters.
+- [Clam AntiVirus](http://www.clamav.net/lang/en/) is a popular, free, and open-source virus scanner.
+- [SpamAssassin](http://spamassassin.apache.org) is a very popular free spam filter.
+
+#### What to do if your server has been blacklisted
+
+If your Linode is added to a [block list](https://mxtoolbox.com/blacklists.aspx), take steps to mitigate the source of the spam. Once mitigated, contact the mail provider that blocked you and follow their steps to be allowed to send mail again.
### Mail Clients
-Mail clients are an integral part of the email experience for your users. Microsoft Outlook, Apple Mail, and Mozilla Thunderbird are all examples of mail clients. Most mail clients are compatible with most mail servers – you just need to make a note of the settings you configured on the server side, and make sure you use compatible settings on the client side. Here are some to consider:
+Mail clients are an integral part of the email experience for your users. Microsoft Outlook, Apple Mail, and Mozilla Thunderbird are all examples of mail clients.
+
+Most mail clients are compatible with most mail servers – you just need to make a note of the settings you configured on the server side, and make sure you use compatible settings on the client side. Here are some clients to consider:
-- Protocols: Choose IMAP or POP3 for receiving, and SMTP for sending.
-- Encryption: Choose SSL and/or TLS encryption, based on your server settings. Ideally, you should make everyone use encryption all the time.
-- Authentication: Make sure the format of the credentials entered in the mail client matches the format expected by the server. Sometimes just the "user" part of the email address is the username, and sometimes the entire email address, including the `@` sign, is the username. You can also have usernames that are not related to their corresponding email addresses, but this is not recommended.
-- Mailbox format: Make sure your users will be able to read, create, and use all of the folders they need.
-- Ports: Not only do your mail client(s) and server have to work on the same ports, but your internet service provider has to allow them as well.
+* **Protocols**: Choose IMAP or POP3 for receiving, and SMTP for sending.
+* **Encryption**: Choose SSL and/or TLS encryption, based on your server settings.
+* **Authentication**: Make sure the format of the credentials entered in the mail client matches the format expected by the server. Sometimes just the "user" part of the email address is the username, and sometimes the entire email address, including the `@` sign, is the username. You can also have usernames that are not related to their corresponding email addresses, but this is not recommended.
+* **Mailbox format**: Make sure your users will be able to read, create, and use all of the folders they need.
+* **Ports**: Not only do your mail client(s) and server have to work on the same ports, but your internet service provider has to allow them as well.
Here are some of the typical mail ports:
-- 110 for POP3
-- 995 for SSL-POP (encrypted)
-- 143 for IMAP
-- 993 for IMAPS (encrypted)
-- 25 for SMTP (sometimes blocked by ISPs)
-- 587 for SMTP (actually the preferred non-encrypted port for outgoing connections from mail clients)
-- 465 for SSMTP (encrypted)
+* `110`: POP3
+* `995`: SSL-POP (encrypted)
+* `143`: IMAP
+* `993`: IMAPS (encrypted)
+* `25`: SMTP (sometimes blocked by ISPs)
+* `587`: SMTP (actually the preferred non-encrypted port for outgoing connections from mail clients)
+* `465`: SSMTP (encrypted)
-{{< note >}}
-If you're using a firewall, be sure to edit the rules for your mail server's ports. See [these instructions](/docs/securing-your-server#creating-a-firewall) for more information.
-{{< /note >}}
+If you're using a firewall, be sure to edit the rules for your mail server's ports. See Linode's guide to [configuring a firewall](/docs/security/securing-your-server/#configure-a-firewall) for more information.
### Webmail
-Webmail is a type of mail client that can be installed on your server and accessed from a web browser. It allows your users to access their email from your website (example: `http://example.com/mail` anywhere they have access to the Internet. Running a web server is a prerequisite for running a webmail client, so you should follow the [Hosting a Website](/docs/hosting-website) guide if you want to run webmail on your Linode, in addition to installing a mail server.
+Webmail is a type of mail client that can be installed on your server and accessed from a web browser. It allows your users to access their email from your website (example: `http://example.com/mail`) anywhere they have access to the internet. Running a web server is a prerequisite for running a webmail client, so follow the [Hosting a Website](/docs/hosting-website) guide if you want to run webmail on your Linode, in addition to installing a mail server.
Here are some of the most popular webmail clients:
-- [Citadel](http://www.citadel.org) is an all-in-one mail service that includes mail, calendars, instant messaging, mailing lists, and other collaboration tools. It's open source and geared towards small and medium-sized organizations. We have guides for [Citadel on Ubuntu 12.04](/docs/email/citadel/ubuntu-12-04-precise-pangolin) and [Citadel on Debian 6](/docs/email/citadel/debian-6-squeeze/).
-- [Horde Webmail](http://www.horde.org/apps/webmail) is an open-source IMAP client paired with some additional functions like account management and calendars.
-- [RoundCube](http://roundcube.net) is an IMAP client with modern functionality and a clean layout.
-- [SquirrelMail](http://squirrelmail.org) is a solid option, but has an older user interface. Visit our guide to [Install SquirrelMail on Ubuntu 16.04 or Debian 8](/docs/email/clients/install-squirrelmail-on-ubuntu-16-04-or-debian-8)
-- [Zimbra](http://www.zimbra.com) is an all-in-one mail service that's much simpler to install than other options, but less customizable. We have guides for [Zimbra on Ubuntu 10.04](/docs/email/zimbra/install-zimbra-ubuntu-10-04-lucid), [Zimbra on Debian 6](/docs/email/zimbra/install-zimbra-debian-6-squeeze), and [Zimbra CentOS 5](/docs/email/zimbra/install-zimbra-centos-5).
-
-### Extras
-
-There are many other add-ons that can round out the functionality of your mail server. For example, you may want to incorporate mailing list software, user administration so users can change their own passwords, or calendar coordination, just to name a few.
+* [Mail-in-a-box](https://mailinabox.email/) is an all-in-one mail option that offers a hassle-free approach to setting up a mail server and webmail component.
+* [Citadel](http://www.citadel.org) is an all-in-one mail service that includes mail, calendars, instant messaging, mailing lists, and other collaboration tools. It's open source and geared towards small and medium-sized organizations.
+* [Horde Webmail](http://www.horde.org/apps/webmail) is an open-source IMAP client paired with some additional functions like account management and calendars.
+* [RoundCube](http://roundcube.net) is an IMAP client with modern functionality and a clean layout.
+* [SquirrelMail](http://squirrelmail.org) is a solid option, but has an older user interface.
+* [Zimbra](http://www.zimbra.com) is an all-in-one mail service that's much simpler to install than other options, but less customizable.
diff --git a/docs/email/zimbra/zimbra-on-ubuntu-14-04.md b/docs/email/zimbra/zimbra-on-ubuntu-14-04.md
index 14bb4157c77..6f6184115e4 100644
--- a/docs/email/zimbra/zimbra-on-ubuntu-14-04.md
+++ b/docs/email/zimbra/zimbra-on-ubuntu-14-04.md
@@ -16,6 +16,7 @@ external_resources:
- '[Zimbra OSE Documentation](https://www.zimbra.com/documentation/zimbra-collaboration-open-source)'
---
+
[Zimbra](https://www.zimbra.com/) is a complete mail server that provides a configured Postfix with OpenDKIM, Amavis, ClamAV, and Nginx, ready to handle mail for one or more domains. Zimbra on a Linode is one of the quickest paths to an up-and-running mail server that you will find. This guide will take you through the Zimbra installation procedure.
@@ -227,7 +228,7 @@ Your server was configured when you installed, and most of those settings will w
4. The **Protocol checks** can stop many spam messages before they enter your system. You may turn them all on, but at least _Sender address must be fully qualified_ should be checked.
-5. **DNS checks** use realtime blacklists to reject mail coming from known spamming servers. zen.spamhuas.org is a good suggestion to start with. Enter the domain name into the **List of Client RBLs**.
+5. **DNS checks** use realtime blacklists to reject mail coming from known spamming servers. `zen.spamhuas.org` is a good suggestion to start with. Enter the domain name into the **List of Client RBLs**.
6. Click the **AS/AV** page. Here you determine how "spammy" a message has to be to get tagged or rejected. Zimbra uses SpamAssassin to score every message. A score of zero or less than zero means the message is likely to be worth delivering. A score above zero means there are some indicators that this could be an unwanted email. The **Kill percent** is the score above which Zimbra will not deliver the message at all. The **Tag percent** is the score above which Zimbra will let the message through, but deliver it to the Junk folder.
diff --git a/docs/game-servers/garrys-mod-server-on-centos-7.md b/docs/game-servers/garrys-mod-server-on-centos-7.md
index 7814bb5f975..4a332c489d6 100644
--- a/docs/game-servers/garrys-mod-server-on-centos-7.md
+++ b/docs/game-servers/garrys-mod-server-on-centos-7.md
@@ -43,7 +43,7 @@ From the SteamCMD guide, two additional steps are needed specifically for Gmod.
sudo firewall-cmd --zone=public --add-port=27000 27030/udp --permanent
-2. Install an additonal 32-bit package:
+2. Install an additional 32-bit package:
sudo yum install ncurses-libs.i686
@@ -123,7 +123,7 @@ This will automatically restart Garry's Mod when your server reboots.
### Server Config File
-The default `server.cfg` file is blank, and any configuration options you want to specify for the server must be added. This are optional, but below is a sane starting point.
+The default `server.cfg` file is blank, and any configuration options you want to specify for the server must be added. This is optional, but below is a sane starting point.
{{< file "~/Steam/gmod/garrysmod/cfg/server.cfg" >}}
hostname "server_hostname"
@@ -144,7 +144,7 @@ writeip
2. Note the collection ID. It is located at the end of the url, denoted by the 'X's here:
- http://steamcommunity.com/sharedfiles/filedetails/?id=XXXXXXXXX
+ http://steamcommunity.com/sharedfiles/filedetails/?id=XXXXXXXXX
3. Acquire a Steam API key from the [Steam API Keys](http://steamcommunity.com/dev/apikey) page. Note the key.
diff --git a/docs/game-servers/how-to-set-up-minecraft-server-on-ubuntu-or-debian.md b/docs/game-servers/how-to-set-up-minecraft-server-on-ubuntu-or-debian.md
index b6ff2ddf805..e855099a40d 100644
--- a/docs/game-servers/how-to-set-up-minecraft-server-on-ubuntu-or-debian.md
+++ b/docs/game-servers/how-to-set-up-minecraft-server-on-ubuntu-or-debian.md
@@ -56,7 +56,7 @@ Minecraft version 1.12 is only compatible with OpenJDK 8. If you are using OpenJ
Assign a secure password, and configure any additional [SSH hardening](/docs/security/use-public-key-authentication-with-ssh) options at this time.
{{< note >}}
-If you have a firewall configured according to our [Securing Your Server](/docs/security/securing-your-server) guide, add the following line to your `iptables.firewall.rules` file to add an exception for port 25565:
+If you have a firewall configured according to our [Securing Your Server](/docs/security/securing-your-server/) guide, add the following line to your `iptables.firewall.rules` file to add an exception for port 25565:
-A INPUT -p tcp --dport 25565 -j ACCEPT
{{< /note >}}
@@ -115,7 +115,7 @@ eula=true
-3. To ensure that the Minecraft server runs independent of an SSH connection, execute `run.sh` from within a [GNU Screen](/docs/networking/ssh/using-gnu-screen-to-manage-persistent-terminal-sessions) session:
+3. To ensure that the Minecraft server runs independent of an SSH connection, execute `run.sh` from within a [GNU Screen](/docs/networking/ssh/using-gnu-screen-to-manage-persistent-terminal-sessions/) session:
screen /home/minecraft/run.sh
diff --git a/docs/game-servers/install-black-mesa-on-debian-or-ubuntu.md b/docs/game-servers/install-black-mesa-on-debian-or-ubuntu.md
index aab0b11215a..708e91ba3fe 100644
--- a/docs/game-servers/install-black-mesa-on-debian-or-ubuntu.md
+++ b/docs/game-servers/install-black-mesa-on-debian-or-ubuntu.md
@@ -95,7 +95,7 @@ At the time of writing this guide, Black Mesa has yet to share with customers an
{{< /note >}}
### Server.cfg
-The **server.cfg** file contains the settings of your server. It is not needed because you can start the server every time by specifying desidered values using parameters.
+The **server.cfg** file contains the settings of your server. It is not needed because you can start the server every time by specifying desired values using parameters.
{{< file-excerpt "/home/steam/Steam/steamapps/common/Black Mesa Dedicated Server/bms/cfg/server.cfg" java >}}
// Black Mesa server.cfg file
@@ -248,7 +248,7 @@ You can read the entire list of parameters on the [Valve Wiki](https://developer
wget http://www.metamodsource.net/mmsdrop/1.10/mmsource-1.10.7-git951-linux.tar.gz
{{< note >}}
-This URL costantly changes as MetaMod is updated. Please check the downloads [page](http://www.metamodsource.net/snapshots) for the current URL.
+This URL constantly changes as MetaMod is updated. Please check the downloads [page](http://www.metamodsource.net/snapshots) for the current URL.
{{< /note >}}
3. Extract the downloaded archive:
diff --git a/docs/game-servers/install-dont-starve-together-game-server-on-ubuntu.md b/docs/game-servers/install-dont-starve-together-game-server-on-ubuntu.md
index 6c26eab8450..791ca2d34db 100644
--- a/docs/game-servers/install-dont-starve-together-game-server-on-ubuntu.md
+++ b/docs/game-servers/install-dont-starve-together-game-server-on-ubuntu.md
@@ -41,11 +41,10 @@ From the SteamCMD guide, two additional steps are needed specifically for DST.
sudo dpkg-reconfigure iptables-persistent
-3. Install some additonal 32-bit packages:
+3. Install some additional 32-bit packages:
sudo apt-get install libcurl4-gnutls-dev:i386
-
## Install Don’t Starve Together
1. Be sure you are in the directory `~/Steam`, then access the `Steam>` prompt.
diff --git a/docs/game-servers/launch-a-counter-strike-global-offensive-server-on-ubuntu-14-04.md b/docs/game-servers/launch-a-counter-strike-global-offensive-server-on-ubuntu-14-04.md
index a8c61d286c3..6dd27e623f1 100644
--- a/docs/game-servers/launch-a-counter-strike-global-offensive-server-on-ubuntu-14-04.md
+++ b/docs/game-servers/launch-a-counter-strike-global-offensive-server-on-ubuntu-14-04.md
@@ -79,7 +79,7 @@ CS:GO requires a server token unless you want to limit players to only clients c
## Configure the Server
-1. Create a file called `server.cfg` using your prefered text editor. Choose a hostname and a unique RCON password that you don't use elsewhere.
+1. Create a file called `server.cfg` using your preferred text editor. Choose a hostname and a unique RCON password that you don't use elsewhere.
{{< file "~/Steam/csgo-ds/csgo/cfg/server.cfg" aconf >}}
hostname "server_hostname"
@@ -163,4 +163,4 @@ These settings are changed in the launch command.
### RCON
-When logged into the server, you can open the RCON console with the backtic button (`), or your mapped key. To log in type `rcon_password` followed by your password. For more information regarding RCON, click [here](/docs/game-servers/team-fortress2-on-debian-and-ubuntu/#rcon).
+When logged into the server, you can open the RCON console with the backtick button (`), or your mapped key. To log in type `rcon_password` followed by your password. For more information regarding RCON, click [here](/docs/game-servers/team-fortress2-on-debian-and-ubuntu/#rcon).
diff --git a/docs/game-servers/left-4-dead-2-multiplayer-server-installation.md b/docs/game-servers/left-4-dead-2-multiplayer-server-installation.md
index f7a134ba2a2..a3a5b417746 100644
--- a/docs/game-servers/left-4-dead-2-multiplayer-server-installation.md
+++ b/docs/game-servers/left-4-dead-2-multiplayer-server-installation.md
@@ -142,7 +142,7 @@ This guide requires additional libraries which are not included in our standard
cd ~/Steam/L4D2-server/left4dead2/cfg
- Choose one of the following example files:
+ Choose one of the following example files:
wget https://www.gottnt.com/l4d2/basic-server.cfg
wget https://www.gottnt.com/l4d2/detailed-server.cfg
@@ -168,7 +168,7 @@ The `+port 27020` parameter is not required but is recommended so that your serv
{{< /note >}}
You can change the map to whichever one you prefer.
- This script, when run, will execute the L4D2 server in a [Screen](/docs/networking/ssh/using-gnu-screen-to-manage-persistent-terminal-sessions) session.
+ This script, when run, will execute the L4D2 server in a [Screen](/docs/networking/ssh/using-gnu-screen-to-manage-persistent-terminal-sessions) session.
5. Make the script executable:
@@ -202,7 +202,7 @@ You can connect to the server in any one of three easy methods:
3. A third method is to install the following add-on: [Link](https://steamcommunity.com/sharedfiles/filedetails/?id=121088946) and then launch the game. Next, click on the new `Server Browser` option on the main menu and find your server in the long list of servers. This method only works if you have set the `hostname`, `sv_search_key`, and `sv_tags` options in the config file.
{{< note >}}
-Your L4D2 server will only show up in the `Custom` list of servers. Therefore, we recomend that you add it to your favorites to avoid having to look for it again.
+Your L4D2 server will only show up in the `Custom` list of servers. Therefore, we recommend that you add it to your favorites to avoid having to look for it again.
{{< /note >}}
Finally, invite friends to the game using the Steam Overlay (`SHIFT + TAB`). Let the playing begin!
diff --git a/docs/game-servers/minecraft-with-bungee-cord.md b/docs/game-servers/minecraft-with-bungee-cord.md
index b1a8cf0e634..db32cb588e6 100644
--- a/docs/game-servers/minecraft-with-bungee-cord.md
+++ b/docs/game-servers/minecraft-with-bungee-cord.md
@@ -52,18 +52,18 @@ On the Linode that is going to host BungeeCord:
4. Create another user for the BungeeCord proxy, so that it doesn't have the same privileges as your user. You'll need to keep this password for future reference.
- sudo adduser bungeecord
+ sudo adduser bungeecord
### Configuring the Firewall on the BungeeCord Node
If you're using iptables or ufw to act as a firewall, you'll need to make a rule on the Linode running BungeeCord, to permit TCP on port 25565. This can be done by running:
- sudo iptables -A INPUT -p tcp --dport 25565 -j ACCEPT
+ sudo iptables -A INPUT -p tcp --dport 25565 -j ACCEPT
### Configuring the Firewall on the Spigot Server Linodes
-For BungeeCord, the Spigot servers need to be in offline mode, as the BungeeCord proxy handles the authentication. This can make the servers vulnerable to people connecting directly, as they can connect with any username, potentially allowing for connection as a user with adminsitrative permissions. To prevent this, you can set up iptables to limit connections to only the BungeeCord server.
+For BungeeCord, the Spigot servers need to be in offline mode, as the BungeeCord proxy handles the authentication. This can make the servers vulnerable to people connecting directly, as they can connect with any username, potentially allowing for connection as a user with administrative permissions. To prevent this, you can set up iptables to limit connections to only the BungeeCord server.
{{< note >}}
This section assumes that you've only got a Spigot server running on each Linode. If you have other services, you'll need to modify the rules to allow them to continue working.
@@ -100,7 +100,7 @@ If you've configured your `iptables` firewall by following our [Securing Your Se
Log into the BungeeCord Linode as the `bungeecord` user created earlier, and download BungeeCord:
- wget -O BungeeCord.jar http://ci.md-5.net/job/BungeeCord/lastSuccessfulBuild/artifact/bootstrap/target/BungeeCord.jar
+ wget -O BungeeCord.jar http://ci.md-5.net/job/BungeeCord/lastSuccessfulBuild/artifact/bootstrap/target/BungeeCord.jar
{{< note >}}
This downloads the latest version of BungeeCord. You can find older versions for older Minecraft server versions, [here](http://ci.md-5.net/job/BungeeCord/).
@@ -110,8 +110,7 @@ This downloads the latest version of BungeeCord. You can find older versions for
1. Start BungeeCord up, allowing it to generate the configuration files:
- java -jar BungeeCord.jar
-
+ java -jar BungeeCord.jar
After the prompt `[INFO] Listening on /0.0.0.0:25577` is displayed in the console, type `end` and press Enter.
@@ -119,7 +118,7 @@ This downloads the latest version of BungeeCord. You can find older versions for
3. Edit the following block of the configuration, in order to add our existing Spigot servers:
- {{< file-excerpt "config.yml" yaml >}}
+ {{< file-excerpt "config.yml" yaml >}}
servers:
lobby:
address: localhost:25565
@@ -137,7 +136,7 @@ servers:
address: 203.0.113.112:25565
restricted: false
motd: 'Just another BungeeCord - Forced Host'
- games:
+ games:
address: 203.0.113.198:25565
restricted: false
motd: 'Just another BungeeCord - Forced Host'
@@ -197,7 +196,7 @@ Connect to your BungeeCord address in Minecraft, and run `/server name` where `n
To see who is online on any of the BungeeCord servers that you've linked, you can run:
- /glist
+ /glist
## Troubleshooting
@@ -213,7 +212,7 @@ If there is an issue connecting, then it's important to check that the login ser
If the server shows the MOTD and a ping in the server list, as per the image above, it's likely that the problem lies between BungeeCord and your Spigot servers. To check, you can log into your BungeeCord server, and you'll most likely see a line similar to the following in the logs, where the IP `198.51.100.0` is replaced by your IP. This shows that your client is successfully pinging the BungeeCord server:
- 00:20:34 [INFO] [/198.51.100.0:50677] <-> InitialHandler has connected
+ 00:20:34 [INFO] [/198.51.100.0:50677] <-> InitialHandler has connected
If the logs look similar to above, the following error is likely occurring:
@@ -239,6 +238,6 @@ If this happens, you should check that BungeeCord is actually running, and that
Assuming that the issue is not solved, the issue is likely to be the firewall. You can flush your firewalls with:
- iptables -F
+ iptables -F
You should try again to reconnect. If you can connect now, then you'll need to reconfigure the firewall as detailed above.
diff --git a/docs/game-servers/minecraft-with-mcmyadmin-on-debian.md b/docs/game-servers/minecraft-with-mcmyadmin-on-debian.md
index d26c3afd8e5..38c9356c061 100644
--- a/docs/game-servers/minecraft-with-mcmyadmin-on-debian.md
+++ b/docs/game-servers/minecraft-with-mcmyadmin-on-debian.md
@@ -22,7 +22,7 @@ aliases: ['applications/game-servers/minecraft-with-mcmyadmin-on-debian/']
1. Familiarize yourself with our [Getting Started](/docs/getting-started) guide and complete the steps for setting your Linode's hostname and timezone.
-2. This guide will use `sudo` wherever possible. Complete the sections of our [Securing Your Server](/docs/security/securing-your-server) guide to create a standard user account, harden SSH access and remove unnecessary network services. Do **not** follow the *Configure a Firewall* section yet--this guide includes firewall rules specifcally for a Minecraft server.
+2. This guide will use `sudo` wherever possible. Complete the sections of our [Securing Your Server](/docs/security/securing-your-server) guide to create a standard user account, harden SSH access and remove unnecessary network services. Do **not** follow the *Configure a Firewall* section yet--this guide includes firewall rules specifically for a Minecraft server.
3. Update your system.
@@ -99,13 +99,13 @@ COMMIT
1. Install the Java Runtime Environment, OpenJDK:
- sudo apt-get install openjdk-7-jre
+ sudo apt-get install openjdk-7-jre
-2. [Mono](http://www.mono-project.com/). CubeCoders Limited, the company behind McMyAdmin, packages its own minimal installation of Mono with some necessary source and configuration files. This must be used instead of the generic Mono packages from Debian's repositories.
+2. [Mono](http://www.mono-project.com/) is an open source implementation of the .NET framework. CubeCoders Limited, the company behind McMyAdmin, packages its own minimal installation of Mono with some necessary source and configuration files. This must be used instead of the generic Mono packages from Debian's repositories.
- cd /usr/local
- sudo wget http://mcmyadmin.com/Downloads/etc.zip
- sudo unzip etc.zip; sudo rm etc.zip
+ cd /usr/local
+ sudo wget http://mcmyadmin.com/Downloads/etc.zip
+ sudo unzip etc.zip; sudo rm etc.zip
## Install and Start McMyAdmin
@@ -113,48 +113,48 @@ This section should be completed as your standard user, **not** as root. McMyAdm
1. Create the installation directory and change location to it.
- mkdir ~/mcmyadmin && cd ~/mcmyadmin
+ mkdir ~/mcmyadmin && cd ~/mcmyadmin
2. Download the McMyAdmin installer. You will want to double check its [Download](https://www.mcmyadmin.com/#/download) page to be sure you're grabbing the latest version.
- wget http://mcmyadmin.com/Downloads/MCMA2_glibc26_2.zip
+ wget http://mcmyadmin.com/Downloads/MCMA2_glibc26_2.zip
3. Extract the archive and delete the original zip file.
- unzip MCMA2_glibc26_2.zip; rm MCMA2_glibc26_2.zip
+ unzip MCMA2_glibc26_2.zip; rm MCMA2_glibc26_2.zip
4. Start the initial configuration of McMyAdmin. Replace `PASSWORD` with a strong password which you want for admin access to McMyAdmin's web interface.
- ./MCMA2_Linux_x86_64 -setpass PASSWORD -configonly
+ ./MCMA2_Linux_x86_64 -setpass PASSWORD -configonly
- This will return the output:
+ This will return the output:
- The updater will download and install McMyAdmin to the current directory:
- /home/your_user/mcmyadmin).
+ The updater will download and install McMyAdmin to the current directory:
+ /home/your_user/mcmyadmin).
- Continue? [y/n] :
+ Continue? [y/n] :
- Answer `y`. The installer will run and return you to the command prompt. If everything is as it should be, the only warning you'll see will be for a missing configuration file. As the output says, that would be normal since McMyAdmin was just started for the first time.
+ Answer `y`. The installer will run and return you to the command prompt. If everything is as it should be, the only warning you'll see will be for a missing configuration file. As the output says, that would be normal since McMyAdmin was just started for the first time.
5. Install screen, if it is not already installed.
- sudo apt-get install screen
+ sudo apt-get install screen
6. Start a screen session for the McMyAdmin client.
- screen -S mcma
+ screen -S mcma
7. Change into the McMyAdmin installation directory and start the program.
- cd ~/mcmyadmin; ./MCMA2_Linux_x86_64
+ cd ~/mcmyadmin; ./MCMA2_Linux_x86_64
- If successful, the last three lines of the output will be:
+ If successful, the last three lines of the output will be:
- Notice : McMyAdmin has started and is ready for use.
- Notice : This is the first time McMyAdmin has been started.
- Notice : You must complete the first-start wizard via the web interface.
+ Notice : McMyAdmin has started and is ready for use.
+ Notice : This is the first time McMyAdmin has been started.
+ Notice : You must complete the first-start wizard via the web interface.
- {{< note >}}
+ {{< note >}}
To exit McMyAdmin and return to the command line, enter `/quit`.
{{< /note >}}
@@ -164,16 +164,16 @@ To exit McMyAdmin and return to the command line, enter `/quit`.
2. Log in with the username `admin` and the password that you provided in the installation step.
- 
+ 
3. Once the initial configuration steps are completed, select your settings and then switch to the status page.
- 
+ 
4. Select *Start Server* and accept the Minecraft Server EULA.
- 
+ 
- 
+ 
Congratulations, you now have McMyAdmin running on your Minecraft server!
diff --git a/docs/game-servers/multicraft-on-ubuntu.md b/docs/game-servers/multicraft-on-ubuntu.md
index 5c797857422..e17f3541266 100644
--- a/docs/game-servers/multicraft-on-ubuntu.md
+++ b/docs/game-servers/multicraft-on-ubuntu.md
@@ -16,6 +16,9 @@ title: 'Installing Multicraft on Ubuntu'
aliases: ['applications/game-servers/multicraft-on-ubuntu/']
---
+
+
+
[Multicraft](http://www.multicraft.org/) is a control panel for single or multiple Minecraft servers, with free and paid versions available. This guide will help you install Multicraft on a Linode running Ubuntu 14.04.
{{< note >}}
diff --git a/docs/game-servers/team-fortress2-on-debian-and-ubuntu.md b/docs/game-servers/team-fortress2-on-debian-and-ubuntu.md
index 9881d7ee833..2960dc3592c 100644
--- a/docs/game-servers/team-fortress2-on-debian-and-ubuntu.md
+++ b/docs/game-servers/team-fortress2-on-debian-and-ubuntu.md
@@ -37,7 +37,7 @@ From the SteamCMD guide, two additional steps are needed specifically for TF2.
sudo dpkg-reconfigure iptables-persistent
-3. Install an additonal 32-bit package:
+3. Install an additional 32-bit package:
sudo apt-get install lib32tinfo5
@@ -49,7 +49,7 @@ From the SteamCMD guide, two additional steps are needed specifically for TF2.
2. From the SteamCMD prompt, login anonymously:
- login anonymous
+ login anonymous
Or log in with your Steam username:
@@ -57,8 +57,8 @@ From the SteamCMD guide, two additional steps are needed specifically for TF2.
3. Install TF2 to the `Steam` user's home directory:
- force_install_dir ./tf2
- app_update 232250
+ force_install_dir ./tf2
+ app_update 232250
This can take some time. If the download looks as if it has frozen, be patient. Once the download is complete, you should see this output:
@@ -68,9 +68,9 @@ From the SteamCMD guide, two additional steps are needed specifically for TF2.
4. Quit SteamCMD:
- quit
+ quit
- {{< note >}}
+ {{< note >}}
To update TF2, run the above 4 commands again.
{{< /note >}}
@@ -84,11 +84,11 @@ In order to create a custom list of maps for your server, create `mapcycle.txt`
1. Navigate to `Steam/tf2/tf/cfg`:
- cd ~/Steam/tf2/tf/cfg
+ cd ~/Steam/tf2/tf/cfg
2. Copy `mapcycle_default.txt`:
- cp mapcycle_default.txt mapcycle.txt
+ cp mapcycle_default.txt mapcycle.txt
3. Open the file and add or remove maps as desired.
@@ -103,7 +103,7 @@ The `motd_default.txt` file can contain HTML and is displayed as a website upon
### Server.cfg
-The file `~/Steam/tf2/tf/cfg/server.cfg` is what contains all of the settings you need to customize the loadout of your game. A `server.cfg` file is not needed to run the game but we have a sample config file [here](/docs/assets/team_fortress_2_server_config) which you can edit for your own use.
+The file `~/Steam/tf2/tf/cfg/server.cfg` is what contains all of the settings you need to customize the loadout of your game. A `server.cfg` file is not needed to run the game but we have a sample config file [here](/docs/assets/team_fortress_2_server_config.cfg) which you can edit for your own use.
{{< note >}}
For the configuration of this file, `0` means *off* and `1` means *on*.
@@ -124,7 +124,7 @@ screen -S "Team Fortress 2 Server" ./srcds_run -game tf +map ctf_2fort.bsp
When run, the script will change directories to `~/Steam/tf2` and execute TF2 in a [Screen](/docs/networking/ssh/using-gnu-screen-to-manage-persistent-terminal-sessions) session.
- Optionally, replace `cft_2fort.bsp` with the name of your chosen map’s file, or replace `+map ctf_2fort.bsp` with `+randommap` for a randomized map selection.
+ Optionally, replace `cft_2fort.bsp` with the name of your chosen map’s file, or replace `+map ctf_2fort.bsp` with `+randommap` for a randomized map selection.
2. Make the script executable:
@@ -157,13 +157,13 @@ RCON allows you to make changes to your server from inside of the game.
1. To start using RCON, go to the **Options** setting in the game, and then select **Advanced...**
- [](/docs/assets/team-fortress-rcon.png)
+ [](/docs/assets/team-fortress-rcon.png)
2. From here, check **Enable developer console** and apply these settings.
3. To make changes in-game, it is recommended that you switch to spectator mode, and then press the backtick button (`) to access the developer's console.
- [](/docs/assets/team-fortress-rcon-console.png)
+ [](/docs/assets/team-fortress-rcon-console.png)
4. Log in to RCON by typing in `rcon_password` followed by your password.
diff --git a/docs/getting-started.md b/docs/getting-started.md
index b291182ed3e..3eab9f8c2de 100644
--- a/docs/getting-started.md
+++ b/docs/getting-started.md
@@ -27,11 +27,11 @@ If you haven't already signed up for a Linode account, start here.
1. Create a new account at the [Sign Up page](https://manager.linode.com/signup).
2. Sign in and enter your billing and account information. Most accounts are activated instantly, but some require manual review prior to activation. If your account is not immediately activated, you will receive an email with additional instructions.
-3. Select a Linode plan and datacenter location
+3. Select a Linode plan and data center location

-If you're unsure of which datacenter to select, see our [speed test](http://www.linode.com/speedtest) to determine which location provides the best performance for your target audience. You can also generate [MTR reports](/docs/networking/diagnostics/diagnosing-network-issues-with-mtr/) for each of the datacenters to determine which of our facilities provides the best latency from your particular location.
+If you're unsure of which data center to select, see our [speed test](http://www.linode.com/speedtest) to determine which location provides the best performance for your target audience. You can also generate [MTR reports](/docs/networking/diagnostics/diagnosing-network-issues-with-mtr/) for each of the data centers to determine which of our facilities provides the best latency from your particular location.
## Provision Your Linode
@@ -197,7 +197,7 @@ Ubuntu may prompt you when the Grub package is updated. If prompted, select `kee
emaint sync -a
-After running a sync, it may end with a message that you should upgrade Portage using a `--oneshot` emerge comand. If so, run the Portage update. Then update the rest of the system:
+After running a sync, it may end with a message that you should upgrade Portage using a `--oneshot` emerge command. If so, run the Portage update. Then update the rest of the system:
emerge --uDN @world
diff --git a/docs/github-guide.md b/docs/github-guide.md
index 55b3b8e0e35..9986c7d99af 100644
--- a/docs/github-guide.md
+++ b/docs/github-guide.md
@@ -30,23 +30,23 @@ If you are following these instructions on a Windows system, all commands will n
1. If you have not done so already, generate an SSH key on your local system:
- ssh-keygen
+ ssh-keygen
2. View the contents of the newly-created public key file:
- cat ~/.ssh/id_rsa.pub
+ cat ~/.ssh/id_rsa.pub
3. In a browser window, select your user account icon in the upper right-hand corner of the screen, then click **Settings**. Your user account icon may look different than the one below:
- [](/docs/assets/github-settings.png)
+ [](/docs/assets/github-settings.png)
4. Select the **SSH keys** option from the **Personal settings** menu, then click the **Add SSH key** button:
- [](/docs/assets/github-ssh-key.png)
+ [](/docs/assets/github-ssh-key.png)
5. Copy the contents of your public key file from your terminal window, and paste them into the **Key** text box. Add a descriptive title for your key in the **Title** text box:
- [](/docs/assets/github-load-key.png)
+ [](/docs/assets/github-load-key.png)
### Setting Up Your Repository
@@ -59,20 +59,20 @@ In order to edit or create documents for Linode Guides and Tutorials, you will n
3. Once the fork process has completed, visit the **docs** repository under your repository list on the GitHub homepage:
- [](/docs/assets/github-your-repository.png)
+ [](/docs/assets/github-your-repository.png)
4. Clone your forked branch to your local machine by copying the clone URL, and appending it to the following command. We recommend cloning via SSH for this particular step. This command will create a local copy of your cloned repository that you can work with directly in the directory where the command is run:
- [](/docs/assets/github-clone-url.png)
+ [](/docs/assets/github-clone-url.png)
- git clone Contribute to our Tutorials
- Simply choose a topic from the list below and submit a writing sample. - Once we’ve reviewed and approved your request, we’ll match you with one of our editors and you’ll be on your way to getting published in our rich knowledge base with over a million unique page views each month. - You’ll also make up to $300 depending on topic difficulty. + If you possess expert knowledge and technical writing know-how and would like to be considered as a prospective freelance technical writer for a future topic, please complete the form below.
- + +Accuracy: Instructions should be straightforward, technically correct and thoroughly tested. Include brief explanations of each step to explain the purpose of each action.
-Completeness: A guide should leave readers with a finished, working configuration and give them an idea of where to go from there. Considerations for security and best practices must also be made, including firewall rules.
-Originality: Content should be original material written for Linode. We will not accept submissions which are duplicated from other sources.
- -Here are some examples of exceptional community-contributed guides. The instructions in each are accurate, complete, original and thorough. Refer to these as guidelines for your own submission.
- -+ Your writing sample should indicate your capability to draft technical guides that address Linux, Linode, and cloud infrastructure topics, similar to those found in our library. Evaluation of your sample will be based on knowledge of the selected topic, depth of content, format, and–because we publish only in English–mastery of the language. + We will negotiate topics and rates with subject matter experts. Please do not recommend specific topics at this time, as they will not be accepted. +
Updated by {{ .Params.modified_by.name }} + {{ if .Params.contributor }} - Contributed by - {{ if .Params.contributor.link }} - {{ .Params.contributor.name }} - {{ if strings.Contains .Params.contributor.link "twitter.com" }} - - {{ else if strings.Contains .Params.contributor.link "github.com" }} - - {{ else if strings.Contains .Params.contributor.link "linkedin.com" }} - + Contributed by + {{ if .Params.contributor.link }} + {{ .Params.contributor.name }} + {{ if strings.Contains .Params.contributor.link "twitter.com" }} + + {{ else if strings.Contains .Params.contributor.link "github.com" }} + + {{ else if strings.Contains .Params.contributor.link "linkedin.com" }} + + {{ end }} + + {{ else }} + {{ .Params.contributor.name }} {{ end }} - {{ else }} - {{ .Params.contributor.name }} + Written by {{ .Params.author.name }} {{ end }} - {{ end }}
diff --git a/themes/docsmith/layouts/partials/contribute_footer.html b/themes/docsmith/layouts/partials/contribute_footer.html index d572de3b69a..a6539d14151 100644 --- a/themes/docsmith/layouts/partials/contribute_footer.html +++ b/themes/docsmith/layouts/partials/contribute_footer.html @@ -2,9 +2,9 @@We're always expanding our docs. If you like to help people, can write, and want to earn some cash, learn how you can earn up to $300 for every guide you write and we publish.
+We're always expanding our docs. If you like to help people, can write, and have expertise in a Linux or cloud infrastructure topic, learn how you can contribute to our library.