Initial commit
commit
802412ba22
|
@ -0,0 +1,3 @@
|
|||
__pycache__
|
||||
*.swp
|
||||
ftg/config.yml
|
|
@ -0,0 +1,75 @@
|
|||
set realname = "mio" # Name in from field
|
||||
set from = "mio@tilde.town" # From field
|
||||
set hostname = "tilde.town" # @[hostname]
|
||||
|
||||
# Folders
|
||||
set folder = ~/Mail/inbox # mail folder
|
||||
set spoolfile = /var/mail/mio # mail source folder
|
||||
set tmpdir = ~/Mail/tmp # Temp folder
|
||||
set postponed = ~/Mail/drafts # Drafts folder
|
||||
set record = ~/Mail/sent # Sent folder
|
||||
set trash = ~/Mail/trash # Trash folder
|
||||
|
||||
# Folders displayed in the sidebar
|
||||
mailboxes = ~/Mail/inbox
|
||||
mailboxes = ~/Mail/drafts
|
||||
mailboxes = ~/Mail/sent
|
||||
mailboxes = ~/Mail/trash
|
||||
mailboxes = ~/Mail/town
|
||||
mailboxes = ~/Mail/list
|
||||
|
||||
# Mark all new as read
|
||||
macro index A \
|
||||
"<tag-pattern>~N<enter><tag-prefix><clear-flag>N<untag-pattern>.<enter>" \
|
||||
"mark all read"
|
||||
|
||||
# Go to folders
|
||||
macro index,pager gd <change-folder>~/Mail/drafts<enter> "drafts"
|
||||
macro index,pager gs <change-folder>~/Mail/sent<enter> "sent"
|
||||
macro index,pager gt <change-folder>~/Mail/trash<enter> "trash"
|
||||
|
||||
# Save messages to folders
|
||||
bind index,pager f noop
|
||||
macro index fl "<tag-prefix-cond><save-message>~/Mail/list<enter><end-cond><sidebar-open>"
|
||||
macro index ft "<tag-prefix-cond><save-message>~/Mail/town<enter><end-cond><sidebar-open>"
|
||||
|
||||
|
||||
bind attach S save-entry # Save attachment
|
||||
bind compose f edit-from # Edit from field
|
||||
bind index,pager G group-reply # Group reply
|
||||
bind index,pager j sidebar-next # Scroll down sidebar
|
||||
bind index,pager k sidebar-prev # Scroll up sidebar
|
||||
bind index,pager l list-reply # List reply
|
||||
bind index,pager o sidebar-open # Open folder listed in sidebar
|
||||
bind index D purge-message # Delete D flag mail
|
||||
bind pager <down> next-line # Scroll down in message
|
||||
bind pager <up> previous-line # Scroll up in message
|
||||
# For threaded view oldest to newest. Reverse if sorting by newest first.
|
||||
bind pager <left> previous-undeleted # Previous message
|
||||
bind pager <right> next-undeleted # Next message
|
||||
|
||||
set sidebar_visible = yes # Show sidebar
|
||||
set sidebar_width = 20 # Sidebar width
|
||||
set sidebar_format = "%B%* %?N? [%N]?" # Display: mailbox [new]
|
||||
set sidebar_short_path = yes # Shorten mailbox paths
|
||||
|
||||
set postpone = yes # Save to drafts
|
||||
set copy = yes # Save copy of sent mail
|
||||
set maildir_trash = yes # d flag mail handling
|
||||
set delete = yes # Move d flag mail on exit
|
||||
set check_new = yes # Check mail with mailbox open
|
||||
set mail_check = 600 # Check interval in seconds
|
||||
set editor = "vim" # Editor to compose mail
|
||||
set charset = "utf-8" # Character encoding fallback
|
||||
set send_charset = "utf-8:iso-8859-2:iso-8859-1" # Encoding for sent mail
|
||||
set content_type = "text/plain" # Message body Content-Type
|
||||
unset confirmappend # No prompt for mail append
|
||||
|
||||
unmy_hdr * # Clear preset headers
|
||||
unset user_agent # Disable user-agent header
|
||||
set hidden_host = yes # Skip hostname
|
||||
set edit_headers = yes # Allow editing headers
|
||||
set use_from = yes # Use custom from
|
||||
set use_domain = yes # Use custom domain
|
||||
ignore * # Hide all headers except
|
||||
unignore from date subject to cc # select fields
|
|
@ -0,0 +1,110 @@
|
|||
# Keybindings ----------------------------------------------------------------
|
||||
|
||||
# Prefix
|
||||
unbind C-b
|
||||
set-option -g prefix `
|
||||
bind-key ` send-prefix
|
||||
|
||||
# Close|new|select windows and sessions
|
||||
bind X kill-window
|
||||
bind C-x kill-session
|
||||
bind n new-window
|
||||
bind N new-session
|
||||
bind s choose-window
|
||||
bind S choose-session
|
||||
|
||||
# Switch windows
|
||||
bind [ previous-window
|
||||
bind ] next-window
|
||||
|
||||
# Split windows to panes
|
||||
bind V split-window -h
|
||||
bind H split-window -v
|
||||
|
||||
# Navigate panes
|
||||
bind j select-pane -D
|
||||
bind k select-pane -U
|
||||
bind h select-pane -L
|
||||
bind l select-pane -R
|
||||
|
||||
# Resize panes
|
||||
bind -r C-j resize-pane -D 5
|
||||
bind -r C-k resize-pane -U 5
|
||||
bind -r C-h resize-pane -L 5
|
||||
bind -r C-l resize-pane -R 5
|
||||
|
||||
# Reload tmux config
|
||||
bind R source-file ~/.tmux.conf \; display "Config reloaded"
|
||||
|
||||
# Copy mode
|
||||
bind c copy-mode
|
||||
bind p paste-buffer
|
||||
|
||||
# Mouse mode
|
||||
set -g mouse on
|
||||
|
||||
|
||||
# Appearance -----------------------------------------------------------------
|
||||
|
||||
# Enable 256 colours
|
||||
set-option -g default-terminal "tmux-256color"
|
||||
|
||||
# Colours
|
||||
# See https://commons.wikimedia.org/wiki/File:Xterm_256color_chart.svg
|
||||
CLR1="#dfafff" # magenta
|
||||
CLR2="#dadada" # light grey 253
|
||||
CLR3="#c6c6c6" # light grey 251
|
||||
CLR4="#767676" # medium grey 243
|
||||
CLR5="#6c6c6c" # medium grey 242
|
||||
CLR6="#303030" # dark grey 236
|
||||
CLR7="#262626" # dark grey 235
|
||||
CLR8="#121212" # dark grey 233
|
||||
CLR9="#e4e4e4" # light grey 254
|
||||
CLR10="#875f87" # dark magenta
|
||||
|
||||
# Cursor
|
||||
set-option -ag terminal-overrides ",tmux*:Cr=\E]12;$CLR1\007"
|
||||
|
||||
# Panes
|
||||
set -g window-style "fg=$CLR2,bg=$CLR6"
|
||||
set -g window-active-style "fg=$CLR3,bg=$CLR7"
|
||||
|
||||
# Status bar
|
||||
# Status refresh rate (s)
|
||||
set -g status-interval 60
|
||||
set -g status-bg $CLR8
|
||||
set -g status-fg $CLR2
|
||||
|
||||
# Simple style
|
||||
# To select a style, uncomment the "set -g" lines under the style
|
||||
# Uncomment only one set at a time
|
||||
SM_SESSION=" #[bold]#S "
|
||||
SM_TIME=" #[bold]%m/%d %H:%M "
|
||||
SM_USERS=" #(who | sort --key=1,1 --unique | wc -l) townies "
|
||||
SM_DISK=" ~#(whoami) #(du -hs $HOME | cut -f1 -d ' ') "
|
||||
SM_MAIL=" #(cat /var/spool/mail/$USER | grep ' ' && echo '✉')"
|
||||
SM_LEFT="#[fg=$CLR6,bg=$CLR1]$SM_SESSION"
|
||||
SM_WIN="#[fg=$CLR4,bg=$CLR8] #I #[fg=$CLR2,bg=$CLR8]#W "
|
||||
SM_WIN_CUR="#[fg=$CLR5,bg=$CLR6] #I #[fg=$CLR1,bg=$CLR6,bold]#W "
|
||||
SM_RIGHT=" #[fg=$CLR3]$SM_DISK $SM_MAIL $SM_USERS #[fg=$CLR6,bg=$CLR1]$SM_TIME"
|
||||
set -g status-left "$SM_LEFT"
|
||||
set -g window-status-format "$SM_WIN"
|
||||
set -g window-status-current-format "$SM_WIN_CUR"
|
||||
set -g status-right-length 50
|
||||
set -g status-right "$SM_RIGHT"
|
||||
|
||||
# Airline style
|
||||
AL_SESSION=" #[bold]⛁ #S "
|
||||
AL_TIME=" ⌚ #[bold]%b %d %H:%M "
|
||||
AL_USERS=" ⛑ #(who | sort --key=1,1 --unique | wc -l) townies "
|
||||
AL_DISK=" ⌂ #(du -hs $HOME | cut -f1 -d ' ') "
|
||||
SM_MAIL=" #(cat /var/spool/mail/$USER | grep ' ' && echo '✉') "
|
||||
AL_LEFT="#[fg=$CLR6,bg=$CLR1]$AL_SESSION#[fg=$CLR1,bg=$CLR8]"
|
||||
AL_WIN="#[fg=$CLR4,bg=$CLR8] #I #[fg=$CLR2,bg=$CLR8]#W #[fg=$CLR3,bg=$CLR8,bold] "
|
||||
AL_WIN_CUR="#[fg=$CLR8,bg=$CLR2,bold]#[fg=$CLR4,bg=$CLR2,bold] #I #[fg=$CLR8]#W #[fg=$CLR2,bg=$CLR8]"
|
||||
AL_RIGHT="#[fg=$CLR10,bg=$CLR8,bold]#[fg=$CLR9,bg=$CLR10] $AL_DISK $AL_MAIL $AL_USERS #[fg=$CLR1,bg=$CLR10]#[fg=$CLR6,bg=$CLR1,dim]$AL_TIME"
|
||||
#set -g status-left "$AL_LEFT"
|
||||
#set -g window-status-format "$AL_WIN"
|
||||
#set -g window-status-current-format "$AL_WIN_CUR"
|
||||
#set -g status-right-length 80
|
||||
#set -g status-right "$AL_RIGHT"
|
|
@ -0,0 +1,397 @@
|
|||
" Basic settings -------------------------------------------------------------
|
||||
|
||||
set shell=/bin/bash " Set default shell
|
||||
set encoding=utf-8 " Set file encoding to UTF-8
|
||||
set nomodeline " Disable modeline (run code on file open)
|
||||
set nocompatible " Reset vi compatibility presets
|
||||
set backspace=indent,eol,start " Enable backspace key in insert mode
|
||||
|
||||
syntax on " Enable syntax highlighting
|
||||
set clipboard=unnamedplus " Use system clipboard
|
||||
set textwidth=79 " Column width to 79
|
||||
set colorcolumn=79 " Set column to highlight for textwidth
|
||||
set wrap " Visual/soft line wrap
|
||||
set linebreak " Only insert breaks on breakat chars
|
||||
set nolist " Disable line break, see:
|
||||
set wrapmargin=0 " vim.wikia.com > tip 989
|
||||
set autoindent " Auto-indent on line break
|
||||
set smartindent " Smart indent
|
||||
set shiftwidth=2 " Set indentation
|
||||
set tabstop=2
|
||||
set softtabstop=2
|
||||
set expandtab " Convert tab to spaces
|
||||
set shiftround " Indent in multiples of shiftwidth
|
||||
set cursorline " Highlight current line
|
||||
set title " Show file title
|
||||
set number " Show line numbers
|
||||
set ruler " Show cursor position
|
||||
set showbreak=↪ " Show line breaks
|
||||
set showmatch " Show matching opening/closing char
|
||||
set showcmd " Show command used (bottom bar)
|
||||
set showmode " Show current mode (bottom bar)
|
||||
set wildmenu " Enable menu for autocomplete options
|
||||
set wildmode=list:longest,full " List matches by longest common sections, all
|
||||
set wildignore=*.tmp,*~ " Exclude some filetypes from wildmenu
|
||||
set splitbelow " Horizontal split below
|
||||
set splitright " Vertical split to the right
|
||||
set hidden " Switch between buffers without save prompt
|
||||
" To autosave: :set autowrite or autowriteall
|
||||
set foldenable " Enable code folding
|
||||
set foldmethod=indent " See :h foldmethod for other options
|
||||
set foldlevelstart=10 " 0 = all folds closed, 99 = all folds open
|
||||
set lazyredraw " No redraw when running macros
|
||||
set cryptmethod=blowfish " Set default encryption method
|
||||
set history=1000 " Set no. of lines in undo history
|
||||
set viminfo= " Disable viminfo
|
||||
" To move viminfo to ~/.vim instead:
|
||||
" set viminfo+=n~/.vim/viminfo
|
||||
set noswapfile " Disable *.swp files
|
||||
set noerrorbells " Disable sounds
|
||||
set visualbell
|
||||
set t_vb=
|
||||
set incsearch " Start searching on input
|
||||
set nohlsearch " Disble search highlighting
|
||||
set ignorecase " Ignore case in search for lowercase input
|
||||
set smartcase " Case-sensitive search for mixed case input
|
||||
set spelllang=en_gb " Set spell check language
|
||||
set timeoutlen=2000 " Set timeout (ms) for key mappings
|
||||
set autochdir " Change into a file's directory on open
|
||||
colorscheme gruvbox " See :color <tab> for options
|
||||
set background=dark " Set light/dark bg for themes that use it
|
||||
|
||||
|
||||
" Highlight trailing whitespace
|
||||
" https://vi.stackexchange.com/q/8563
|
||||
highlight ExtraWhitespace guibg=#870000 ctermbg=088
|
||||
match ExtraWhitespace /\s\+$\|\t/
|
||||
augroup ExtraWhitespace
|
||||
au!
|
||||
au BufWinEnter * match ExtraWhitespace /\s\+$/
|
||||
au InsertEnter * match ExtraWhitespace /\s\+\%#\@<!$/
|
||||
au InsertLeave * match ExtraWhitespace /\s\+$/
|
||||
augroup END
|
||||
|
||||
if !has('gui_running')
|
||||
set t_Co=256 " Use 256 colorscheme in terminal mode
|
||||
set mouse=n " Disable mouse support
|
||||
endif
|
||||
|
||||
" Omni Complete
|
||||
set omnifunc=syntaxcomplete#Complete " Enable autocompletion
|
||||
set complete=".,b,i" " Include current buffer, current file
|
||||
" other loaded buffers
|
||||
set dictionary+=~/.vim/dict/other " Set custom dict
|
||||
|
||||
|
||||
" Key mappings ---------------------------------------------------------------
|
||||
|
||||
" Set <leader> key
|
||||
let mapleader=','
|
||||
|
||||
" Move to beginning/end of line
|
||||
nmap b 0
|
||||
nmap e $
|
||||
|
||||
" Move to prev/next visual row
|
||||
nmap j gj
|
||||
nmap k gk
|
||||
|
||||
" Keep indent block selected
|
||||
" https://github.com/bling/dotvim
|
||||
vmap < <gv
|
||||
vmap > >gv
|
||||
|
||||
" Remap ctrl+d to toggle between shell and vim
|
||||
nmap <c-d> :sh<cr>
|
||||
|
||||
" Disable ctrl+z to avoid accidentally stopping vim
|
||||
nmap <c-z> <nop>
|
||||
|
||||
" Move page down/up
|
||||
nmap <c-j> <c-f>
|
||||
nmap <c-k> <c-b>
|
||||
|
||||
" Buffer navigation — go to N, prev, next, left, right
|
||||
nmap <leader>bg :ls<cr>:b
|
||||
nmap <leader>bh :bprev<cr>
|
||||
nmap <leader>bl :bnext<cr>
|
||||
nmap <space>[ <c-w>h<cr>
|
||||
nmap <space>] <c-w>l<cr>
|
||||
|
||||
" Buffer width resize — decrease, increase
|
||||
nmap <leader>b- :vertical res -5<cr>
|
||||
nmap <leader>b= :vertical res +5<cr>
|
||||
|
||||
" Buffer loading — close, new (vsplit), reload, new (same split)
|
||||
nmap <leader>bc :bw<cr>
|
||||
nmap <leader>bn :enew<cr>
|
||||
nmap <leader>br :e<cr>
|
||||
nmap <leader>bv <c-w>v:enew<cr>
|
||||
|
||||
" Backup file in current buffer
|
||||
nmap <leader>bk :call BackupFile()<cr>
|
||||
|
||||
" Map delete to a black hole register (separate from cut/paste register)
|
||||
map <leader>d "_d
|
||||
|
||||
" Map expression register (used to evaluate expressions)
|
||||
imap <leader>ee <c-r>=
|
||||
|
||||
" Code folding — collapse all, expand all, toggle current fold
|
||||
nmap <leader>fc zM<cr>
|
||||
nmap <leader>fe zR<cr>
|
||||
nmap <leader>ft za<cr>
|
||||
|
||||
" Git commands
|
||||
nmap <leader>ga :!git add .<cr>
|
||||
nmap <leader>gb :!git branch -b
|
||||
nmap <leader>gc :!git commit -m
|
||||
nmap <leader>gca :!git commit<cr>
|
||||
nmap <leader>gco :!git checkout
|
||||
nmap <leader>gd :!git diff<cr>
|
||||
nmap <leader>gf :!git fetch<cr>
|
||||
nmap <leader>gg :!git grep
|
||||
nmap <leader>gl :!git log<cr>
|
||||
nmap <leader>gm :!git merge<cr>
|
||||
nmap <leader>gph :!git push<cr>
|
||||
nmap <leader>gpl :!git pull<cr>
|
||||
nmap <leader>gs :!git status<cr>
|
||||
|
||||
" New markdown note
|
||||
nmap <leader>md :call AddNewFile('$HOME/', '', 'md')<cr>
|
||||
|
||||
" Toggle netrw browser
|
||||
nmap <silent> <leader>nt :call ToggleNetrw()<cr>
|
||||
|
||||
" Map OmniComplete
|
||||
imap <leader>o <c-x><c-o>
|
||||
|
||||
" Insert paste into file from cat input
|
||||
" https://stackoverflow.com/a/2545242
|
||||
nmap <leader>pp :r! cat<cr>
|
||||
|
||||
" Search for selection, prompt for replacement, replace all in file
|
||||
" https://stackoverflow.com/a/31172452
|
||||
vnoremap <leader>sa "0y<esc>:%s/<c-r>0//g<left><left>
|
||||
" Prompt for search/replace text, replace all in selection
|
||||
vnoremap <leader>sr :s///g<left><left><left>
|
||||
|
||||
" Toggle spell check
|
||||
nmap <leader>sc :setl spell!<cr>
|
||||
|
||||
" Search in current directory
|
||||
nmap <leader>sd :!grep -R <left>
|
||||
|
||||
" Toggle search term highlighting
|
||||
nmap <leader>sh :set nohlsearch!<cr>
|
||||
|
||||
" Sessions — load (waits for file input), save
|
||||
nmap <leader>sl :source $HOME/.vim/sessions/
|
||||
nmap <leader>ss :mksession! $HOME/.vim/sessions/
|
||||
|
||||
" Tab navigation — close, prev, next, new
|
||||
" To go directly to tab n: [n]gt
|
||||
nmap <leader>tc :tabc<cr>
|
||||
nmap <leader>th :tabp<cr>
|
||||
nmap <leader>tl :tabn<cr>
|
||||
nmap <leader>tn :tabe<cr>
|
||||
|
||||
" Edit/refresh to apply vimrc changes
|
||||
nmap <leader>ve :tabe $MYVIMRC<cr>
|
||||
nmap <leader>vr :source $MYVIMRC<cr>
|
||||
|
||||
" Show word count
|
||||
" https://unix.stackexchange.com/a/145293
|
||||
nmap <leader>wc g<c-g><cr>
|
||||
vmap <leader>wc :s/\S\+//gn<cr>
|
||||
|
||||
" Trim leading whitespace
|
||||
" https://unix.stackexchange.com/a/29619
|
||||
" To reset cursor at first selected line: vmap <leader>wsl :%le<cr>
|
||||
vmap <leader>wl :normal 0dw<cr>
|
||||
|
||||
" Trim trailing whitespace
|
||||
" http://oualline.com/vim-cook.html#trim
|
||||
" https://vim.fandom.com/wiki/Remove_unwanted_spaces
|
||||
nmap <leader>wst :1,$s/[ <tab>]*$//<cr>
|
||||
vmap <leader>wst :s/\s\+$//<cr>
|
||||
|
||||
|
||||
" Netrw
|
||||
" ----------------------------------------------------------------------------
|
||||
|
||||
" Settings
|
||||
let g:netrw_banner = 0 " Hide info header
|
||||
let g:netrw_browse_split = 3 " 0: reuse window, 1: hsplit, 2: vsplit,
|
||||
" 3: new tab, 4: previous window
|
||||
let g:netrw_dirhistmax = 0 " 0: disable history/bookmarks
|
||||
let g:netrw_keepdir = 0 " Sync dir view and change dir paths
|
||||
let g:netrw_liststyle = 0 " 0: thin, 1: long, 2: wide, 3: tree
|
||||
let g:netrw_winsize = 25 " Set pane width
|
||||
|
||||
" Set the default path for the scratchpad used by buffer functions
|
||||
" Default path: ~/.vim/scratchpad
|
||||
let g:scratch_dir = 'scratchpad'
|
||||
|
||||
" Map keys within the file browser
|
||||
" https://vonheikemen.github.io/devlog/tools/using-netrw-vim-builtin-file-explorer/
|
||||
fun! MapNetrwKeys()
|
||||
" Toggle hidden file visibility
|
||||
nmap <buffer> . gh
|
||||
" Files — copy, delete, move, rename, select
|
||||
nmap <buffer> fc mc
|
||||
nmap <buffer> fd D
|
||||
nmap <buffer> fm mm
|
||||
nmap <buffer> fr R
|
||||
nmap <buffer> v mf
|
||||
" Go back in history
|
||||
nmap <buffer> h u
|
||||
" Close file preview buffer
|
||||
nmap <buffer> P <C-w>z
|
||||
endfun
|
||||
|
||||
aug netrw_keymaps
|
||||
au!
|
||||
autocmd filetype netrw call MapNetrwKeys()
|
||||
augroup END
|
||||
|
||||
" Toggle the file browser
|
||||
" https://stackoverflow.com/questions/5006950/setting-netrw-like-nerdtree
|
||||
fun! g:ToggleNetrw()
|
||||
Lexplore
|
||||
vertical resize 25
|
||||
endfun
|
||||
|
||||
|
||||
" Functions ------------------------------------------------------------------
|
||||
|
||||
" Trim trailing whitespace (can be used on filetype)
|
||||
" e.g. au BufWrite *.* :call DeleteExtraWS()
|
||||
" https://amix.dk/vim/vimrc.html
|
||||
fun! g:DeleteExtraWS()
|
||||
exe 'normal mz'
|
||||
%s/\s\+$//ge
|
||||
exe 'normal `z'
|
||||
endfun
|
||||
|
||||
|
||||
" Create a new file
|
||||
fun! g:AddNewFile(path, name, ext)
|
||||
let date = strftime('%Y-%m-%d')
|
||||
let fn = '-' . a:name
|
||||
if a:name == ''
|
||||
let fn = ''
|
||||
endif
|
||||
exe 'tabe' a:path . '/' . date . fn . '.' . a:ext
|
||||
endfun
|
||||
|
||||
|
||||
" Backup the file in the current buffer
|
||||
" https://www.ibm.com/developerworks/library/l-vim-script-2/index.html
|
||||
fun! g:BackupFile()
|
||||
let b:timestamp = strftime('%Y%m%d%H%M%S')
|
||||
return writefile(getline(1,'$'), bufname('%') . '-' . b:timestamp)
|
||||
endfun
|
||||
|
||||
|
||||
" Wrap a word/selection in brackets
|
||||
" Based on http://learnvimscriptthehardway.stevelosh.com/chapters/09.html
|
||||
" and https://superuser.com/a/875160
|
||||
let g:WrapWordChars = {
|
||||
\ '<':'>', '{':'}', '[':']', '(':')', '"':'"', "'":"'", '`':'`',
|
||||
\ }
|
||||
fun! g:WrapWord(prefix)
|
||||
" Temporarily disable Auto Pairs plugin if enabled
|
||||
if exists('b:autopairs_enabled')
|
||||
let l:isapenabled = b:autopairs_enabled
|
||||
let b:autopairs_enabled = 0
|
||||
endif
|
||||
" Add key mappings to prefix + char for insert and select modes
|
||||
for c in keys(g:WrapWordChars)
|
||||
exe 'inoremap <silent>' . a:prefix . c . ' <esc>viw<esc>a' .
|
||||
\ g:WrapWordChars[c] . '<esc>bi' . c . '<esc>ea<right>'
|
||||
exe 'vnoremap <silent>' . a:prefix . c . ' xi' . c .
|
||||
\ g:WrapWordChars[c] . '<esc>P'
|
||||
endfor
|
||||
" Restore user settings
|
||||
if exists('b:autopairs_enabled')
|
||||
let b:autopairs_enabled = l:isapenabled
|
||||
endif
|
||||
endfun
|
||||
call g:WrapWord('<leader>w')
|
||||
|
||||
|
||||
" Filetypes ------------------------------------------------------------------
|
||||
|
||||
filetype plugin indent on
|
||||
|
||||
" Group autocommand calls to avoid duplicates whenever writing to buffer
|
||||
" http://learnvimscriptthehardway.stevelosh.com/chapters/14.html
|
||||
" Set indentation by filetype (not set with other basic settings as they will
|
||||
" override filetype setl whenever vimrc is reloaded)
|
||||
" shiftwidth: indent/unindent width
|
||||
" tabstop: tab width in spaces (view)
|
||||
" softtabstop: tab width in spaces (edit)
|
||||
augroup Filetypes
|
||||
au!
|
||||
|
||||
" CSS/Sass
|
||||
au FileType css,sass,scss setl shiftwidth=2 tabstop=2 softtabstop=2
|
||||
\ omnifunc=csscomplete#CompleteCSS
|
||||
|
||||
" Gophermap needs real <tab> to convert maps
|
||||
au BufRead,BufNewFile gophermap setl noexpandtab shiftwidth=4 tabstop=4
|
||||
\ softtabstop=0 textwidth=70 colorcolumn=70
|
||||
|
||||
" HTML
|
||||
au Filetype htm,html setl
|
||||
\ foldmethod=indent indentkeys= shiftwidth=2 tabstop=2 softtabstop=2
|
||||
|
||||
" Markdown
|
||||
au FileType markdown,md,mkd setl spell shiftwidth=2 tabstop=2 softtabstop=2
|
||||
|
||||
" Vim-inspired app configs
|
||||
au BufRead,BufNewFile vifmrc set filetype=vim
|
||||
au BufRead,BufNewFile vimperatorrc set filetype=vim
|
||||
au FileType vim,vimrc,vimrc* set shiftwidth=2 tabstop=2 softtabstop=2
|
||||
augroup END
|
||||
|
||||
|
||||
" Plugins --------------------------------------------------------------------
|
||||
|
||||
" vim-emoji-complete — insert emoji
|
||||
let g:emoji_complete_overwrite_standard_keymaps = 0
|
||||
imap <leader>em <Plug>(emoji-start-complete)
|
||||
|
||||
" ALE — syntax check/linting
|
||||
" Run lint manually
|
||||
let g:ale_lint_on_enter = 0
|
||||
let g:ale_lint_on_save = 0
|
||||
let g:ale_lint_on_text_changed = 'never'
|
||||
nmap <leader>ld :call ale#cursor#ShowCursorDetail()<cr>
|
||||
nmap <leader>li :ALELint<cr>
|
||||
nmap <leader>lj <Plug>(ale_next_wrap)
|
||||
nmap <leader>lk <Plug>(ale_previous_wrap)
|
||||
|
||||
" Auto Pairs — auto-close brackets
|
||||
let g:AutoPairs = {
|
||||
\ '<':'>', '{':'}', '[':']', '(':')',
|
||||
\ '"':'"', "'":"'", '`':'`',
|
||||
\ '<!--':'-->',
|
||||
\ '{%':'%}', '{#':'#}',
|
||||
\ }
|
||||
" Remap/disable unneeded mappings
|
||||
let g:AutoPairsMapCR = 0
|
||||
let g:AutoPairsMultilineClose = 0
|
||||
let g:AutoPairsShortcutFastWrap = '<nop>'
|
||||
let g:AutoPairsShortcutJump = '<leader>aj'
|
||||
let g:AutoPairsShortcutToggle = '<leader>ap'
|
||||
|
||||
" commentary.vim — code commenting
|
||||
imap <leader>/ :Commentary<cr>
|
||||
vmap <leader>/ :Commentary<cr>
|
||||
|
||||
" snippet.vim — code/template expansion
|
||||
" Key mappings in snipmate.vim/after/plugin/snipMate.vim
|
||||
let g:snippets_dir = '~/.vim/snippets'
|
||||
imap <leader>. <c-r>=TriggerSnippet()<cr>
|
|
@ -0,0 +1,27 @@
|
|||
home:
|
||||
dir: "/home/user/public_gopher/ftg"
|
||||
url: "/~user/ftg"
|
||||
title: "feed the gopher\n\n"
|
||||
info: |
|
||||
A RSS feed service to browse headlines from gopher. All feeds are
|
||||
unofficial and unrelated to the listed sites.
|
||||
updated: "Last updated: "
|
||||
timestamp: "%Y/%m/%d %H:%M %z"
|
||||
nav_back: "Return to the feeds list"
|
||||
temp: "The list is currently being updated. Please check again later."
|
||||
|
||||
feeds:
|
||||
- title: tilde news
|
||||
url: https://tilde.news/rss
|
||||
permalink: tilde-news
|
||||
- title: tildes.net ~tech
|
||||
url: https://tildes.net/~tech/topics.rss
|
||||
permalink: tildes-tech
|
||||
|
||||
update:
|
||||
user_agent: "feedthegopher/0.1"
|
||||
feed_file: "feed.xml"
|
||||
hash_file: ".hash"
|
||||
sleep: 1
|
||||
skip_cache:
|
||||
- youtube.com/watch
|
|
@ -0,0 +1,272 @@
|
|||
# A set of filters to format HTML to text with barebones Markdown-style markup.
|
||||
import re
|
||||
|
||||
|
||||
class HtmlToFText:
|
||||
|
||||
strip_tags1 = {
|
||||
# Remove comments first, which may wrap around other tags
|
||||
"<!--": "-->",
|
||||
# Remove classes, ids and extraneous attributes
|
||||
" class=\"": "\"", " id=\"": "\"",
|
||||
}
|
||||
|
||||
strip_tags2 = {
|
||||
# Remove doctype, tags and inner html
|
||||
"<!": ">",
|
||||
"<applet": "</applet>",
|
||||
"<aside": "</aside>",
|
||||
"<base": "</base>",
|
||||
"<canvas": "</canvas>",
|
||||
"<form": "</form>",
|
||||
"<button": "</button>",
|
||||
"<input": "</input>",
|
||||
"<label": "</label>",
|
||||
"<head": "</head>",
|
||||
"<iframe": "</iframe>",
|
||||
"<menu": "</menu>",
|
||||
"<nav": "</nav>",
|
||||
"<noscript": "</noscript>",
|
||||
"<param": "</param>",
|
||||
"<progress": "</progress>",
|
||||
"<rp": "</rp>",
|
||||
"<script": "</script>",
|
||||
"<style": "</style>",
|
||||
# Remove non-functional empty links after stripping classes/ids
|
||||
"<a href=\"#\"": "</a>",
|
||||
"<a>": "</a>",
|
||||
# Remove the tags themselves but not the inner html
|
||||
"<article": ">", "</article": ">",
|
||||
"<body": ">", "</body": ">",
|
||||
"<div": ">", "</div": ">",
|
||||
"<footer": ">", "</footer": ">",
|
||||
"<header": ">", "</header": ">",
|
||||
"<html": ">", "</html": ">",
|
||||
"<main": ">", "</main": ">",
|
||||
"<section": ">", "</section": ">",
|
||||
"<span": ">", "</span": ">",
|
||||
"<title": ">", "</title": ">",
|
||||
# Remove currently unsupported tags
|
||||
"<center": ">", "</center": ">",
|
||||
"<frame": ">", "</frame": ">",
|
||||
"<small": ">", "</small": ">",
|
||||
# "<audio": ">", "</audio": ">",
|
||||
# "<video": ">", "</video": ">",
|
||||
# "<map": ">", "</map": ">",
|
||||
}
|
||||
|
||||
strip_ws = ["\n\n", "\t", " "]
|
||||
|
||||
format_tags1 = {
|
||||
">\n<": "><",
|
||||
"<blockquote>": "\n>",
|
||||
}
|
||||
|
||||
format_tags2 = {
|
||||
"<address>": "[[address]]\n", "</address>": "\n",
|
||||
"<em>": "*", "</em>": "*",
|
||||
"<i>": "*", "</i>": "*",
|
||||
"<h1>": "\n# ", "</h1>": "\n",
|
||||
"<h2>": "\n## ", "</h2>": "\n",
|
||||
"<h3>": "\n### ", "</h3>": "\n",
|
||||
"<h4>": "\n#### ", "</h4>": "\n",
|
||||
"<h5>": "\n##### ", "</h5>": "\n",
|
||||
"<h6>": "\n###### ", "</h6>": "\n",
|
||||
"<hr>": "\n---\n", "<hr/>": "\n---\n", "<hr />": "\n---\n",
|
||||
"<br>": "\n", "<br/>": "\n", "<br />": "\n",
|
||||
"<blockquote>": "\n>", "</blockquote>": "\n",
|
||||
"<cite>": "**", "</cite>": "**",
|
||||
"<code>": "`", "</code>": "`",
|
||||
"<del>": "~~", "</del>": "~~",
|
||||
"<ins>": "**", "</ins>": "**",
|
||||
"<dl>": "\n", "</dl>": "",
|
||||
"<dt>": "", "</dt>": ": ",
|
||||
"<dd>": "", "</dd>": "\n",
|
||||
"<figcaption>": "*", "</figcaption>": "*",
|
||||
"<figure>": "Fig. ", "</figure>": "",
|
||||
"<mark>": "***", "</mark>": "***",
|
||||
"<p>": "\n", "</p>": "\n",
|
||||
"<pre>": "\n```\n", "</pre>": "\n```\n",
|
||||
"<q>": "«", "</q>": "»",
|
||||
"<ruby>": "", "</ruby>": "",
|
||||
"<rt>": " (", "</rt>": ")",
|
||||
"<s>": "[[~~", "</s>": "~~]]",
|
||||
"<strong>": "**", "</strong>": "**",
|
||||
"<sub>": "⏝", "</sub>": "⏝",
|
||||
"<sup>": "^", "</sup>": "^",
|
||||
"<b>": "**", "</b>": "**",
|
||||
"<u>": "__", "</u>": "__",
|
||||
" ": "\n\n",
|
||||
"'": "'",
|
||||
"’": "'", "’": "'",
|
||||
"“": "\"", "”": "\"",
|
||||
"–": "—",
|
||||
"©": "©",
|
||||
}
|
||||
|
||||
def filter_strip_tags(self, html):
|
||||
"""Strip extraneous html tags."""
|
||||
txt = html
|
||||
# Some tags need to be stripped before others
|
||||
for tag in self.strip_tags1:
|
||||
txt = re.sub(tag + ".*?" + self.strip_tags1[tag], "", txt,
|
||||
flags=re.DOTALL)
|
||||
for tag in self.strip_tags2:
|
||||
txt = re.sub(tag + ".*?" + self.strip_tags2[tag], "", txt,
|
||||
flags=re.DOTALL)
|
||||
return txt
|
||||
|
||||
def filter_whitespace(self, html):
|
||||
"""Strip extra whitespaces often found in dynamically-generated source
|
||||
files."""
|
||||
txt = html
|
||||
for ws in self.strip_ws:
|
||||
txt = txt.replace(ws, "")
|
||||
return "".join(txt.split("\n\n"))
|
||||
|
||||
def filter_format_tags(self, html):
|
||||
"""Translate select structure and format-related tags to Markdown-like
|
||||
syntax."""
|
||||
txt = html
|
||||
for tag in self.format_tags1:
|
||||
txt = txt.replace(tag, self.format_tags1[tag])
|
||||
for tag in self.format_tags2:
|
||||
txt = txt.replace(tag, self.format_tags2[tag])
|
||||
return txt
|
||||
|
||||
def filter_img(self, html):
|
||||
"""Translate image tags to Markdown syntax."""
|
||||
txt = html
|
||||
attrs = {"src": "", "title": "", "alt": ""}
|
||||
imgs = re.findall("<img [a-z].*?/>", txt, flags=re.DOTALL)
|
||||
for i in imgs:
|
||||
for a in attrs:
|
||||
if (" " + a + "=\"") in i:
|
||||
attrs[a] = i.split(" " + a + "=\"")[1].split("\"")[0]
|
||||
elif (" " + a + "='") in i:
|
||||
attrs[a] = i.split(" " + a + "='")[1].split("'")[0]
|
||||
elif (" " + a + "=") in i:
|
||||
attrs[a] = i.split(" " + a + "=")[1].split(" ")[0]
|
||||
if attrs["title"] != "":
|
||||
md_link = "![" + attrs["alt"] + "](" + attrs["src"] + " \"" + \
|
||||
attrs["title"] + "\")"
|
||||
else:
|
||||
md_link = "![" + attrs["alt"] + "](" + attrs["src"] + ")"
|
||||
txt = txt.replace(i, md_link)
|
||||
return txt
|
||||
|
||||
def filter_links(self, html):
|
||||
"""Translate links to Markdown syntax."""
|
||||
txt = html
|
||||
links = re.findall("<a [a-z].*?</a>", txt, flags=re.DOTALL)
|
||||
attrs = {"href": "", "title": ""}
|
||||
md_link = ""
|
||||
for l in links:
|
||||
if " href=\"" in l:
|
||||
attrs["href"] = l.split(" href=\"")[1].split("\"")[0]
|
||||
attrs["title"] = l.split(">")[1].strip("</a>")
|
||||
elif " href='" in l:
|
||||
attrs["href"] = l.split(" href='")[1].split("'")[0]
|
||||
attrs["title"] = l.split(">")[1].strip("</a>")
|
||||
elif " href=" in l:
|
||||
attrs["href"] = l.split(" href=")[1].split(" ")[0]
|
||||
attrs["title"] = l.split(">")[1].strip("</a>")
|
||||
if (attrs["href"] != "") and (attrs["title"] != ""):
|
||||
md_link = "[" + attrs["title"] + "](" + attrs["href"] + ")"
|
||||
txt = txt.replace(l, md_link)
|
||||
return txt
|
||||
|
||||
def filter_embed(self, html):
|
||||
"""Translate embed tags to Markdown links."""
|
||||
txt = html
|
||||
embeds = re.findall("<embed [a-z].*?>", txt, flags=re.DOTALL)
|
||||
src = ""
|
||||
for e in embeds:
|
||||
if " src=\"" in e:
|
||||
src = e.split(" src=\"")[1].split("\"")[0]
|
||||
elif " src='" in e:
|
||||
src = e.split(" src='")[1].split("'")[0]
|
||||
elif " src=" in e:
|
||||
src = e.split(" src=")[1].split(">")[0]
|
||||
if src != "":
|
||||
txt = txt.replace(e, "[embed](" + src + ")")
|
||||
return txt
|
||||
|
||||
def filter_abbr(self, html):
|
||||
"""Format abbr tags, e.g. `<abbr title="Hypertext Markup
|
||||
Language">HTML</abbr>` -> `HTML [[abbr: Hypertext Markup Language]]`"""
|
||||
txt = html
|
||||
abbrs = re.findall("<abbr [a-z].*?</abbr>", txt, flags=re.DOTALL)
|
||||
attrs = {"abbr": "", "title": ""}
|
||||
abbrev = ""
|
||||
for a in abbrs:
|
||||
if " title=\"" in a:
|
||||
attrs["title"] = a.split(" title=\"")[1].split("\"")[0]
|
||||
attrs["abbr"] = a.split(">")[1].strip("</abbr>")
|
||||
elif " title='" in a:
|
||||
attrs["title"] = a.split(" title='")[1].split("'")[0]
|
||||
attrs["abbr"] = a.split(">")[1].strip("</abbr>")
|
||||
elif " title=" in a:
|
||||
attrs["title"] = a.split(" title=")[1].split(" ")[0]
|
||||
attrs["abbr"] = a.split(">")[1].strip("</abbr>")
|
||||
if (attrs["title"] != "") and (attrs["abbr"] != ""):
|
||||
abbrev = attrs["abbr"] + "[[abbr: " + attrs["title"] + "]]"
|
||||
txt = txt.replace(l, abbrev)
|
||||
return txt
|
||||
|
||||
def filter_time(self, html):
|
||||
"""Format time tags, e.g. `<time datetime="1970-01-01">Today</time>` ->
|
||||
`Today (1970-01-01)`."""
|
||||
txt = html
|
||||
timestamps = re.findall("<time.*?</time>", txt)
|
||||
attrs = {"title": "", "datetime": ""}
|
||||
for t in timestamps:
|
||||
attrs["title"] = t.split(">")[1].strip("</time>")
|
||||
if " datetime=\"" in t:
|
||||
attrs["datetime"] = t.split(" datetime=\"")[1].split("\"")[0]
|
||||
elif " datetime='" in t:
|
||||
attrs["datetime"] = t.split(" datetime='")[1].split("'")[0]
|
||||
elif " datetime=" in t:
|
||||
attrs["datetime"] = t.split(" datetime=")[1].split(">")[0]
|
||||
ts = attrs["title"]
|
||||
if attrs["datetime"] != "":
|
||||
ts += " (" + attrs["datetime"] + ")"
|
||||
txt = txt.replace(t, ts)
|
||||
return txt
|
||||
|
||||
def filter_ol(self, html):
|
||||
"""Parse ordered lists. Only single-level lists are currently
|
||||
supported."""
|
||||
txt = html
|
||||
ol = re.findall("<ol>.*?</ol>", txt, flags=re.DOTALL)
|
||||
for o in ol:
|
||||
li = o.replace("</li>", "").replace("</ol>", "").split("<li>")
|
||||
md = ""
|
||||
for l in range(1, len(li)):
|
||||
md += str(l) + ". " + li[l].lstrip()
|
||||
txt = txt.replace(o, "\n" + md)
|
||||
return txt
|
||||
|
||||
def filter_ul(self, html):
|
||||
"""Parse unordered lists. Only single-level lists are currently
|
||||
supported (nested lists will be flattened)."""
|
||||
txt = html
|
||||
while ("<ul>" in txt) or ("<li>" in txt):
|
||||
txt = txt.replace("<li></li>","")
|
||||
txt = txt.replace("<ul>", "\n").replace("</ul>", "")
|
||||
txt = txt.replace("<li>", "- ").replace("</li>", "\n")
|
||||
return txt
|
||||
|
||||
def convert(self, html):
|
||||
"""Run format filters on html string."""
|
||||
text = self.filter_strip_tags(html)
|
||||
text = self.filter_whitespace(text)
|
||||
text = self.filter_format_tags(text)
|
||||
text = self.filter_img(text)
|
||||
text = self.filter_links(text)
|
||||
text = self.filter_embed(text)
|
||||
text = self.filter_abbr(text)
|
||||
text = self.filter_time(text)
|
||||
text = self.filter_ol(text)
|
||||
text = self.filter_ul(text)
|
||||
return text
|
|
@ -0,0 +1,49 @@
|
|||
import hashlib
|
||||
import urllib3
|
||||
|
||||
import os
|
||||
|
||||
|
||||
class Hashi:
|
||||
|
||||
url_headers = {"user-agent": "hashi (a file hash checker)/0.1"}
|
||||
hash_algorithm = hashlib.sha256()
|
||||
encoded = "utf-8"
|
||||
|
||||
def __init__(self):
|
||||
"""Initialise libraries."""
|
||||
self.http = urllib3.PoolManager(headers=self.url_headers)
|
||||
urllib3.disable_warnings()
|
||||
self.hash_update = self.hash_algorithm.update
|
||||
self.hash_hex = self.hash_algorithm.hexdigest
|
||||
|
||||
def fetch_url(self, url, path):
|
||||
"""Fetch a remote text url and save the contents as an UTF-8 file."""
|
||||
resp = self.http.request("GET", url)
|
||||
os.makedirs(path.rsplit("/", 1)[0], exist_ok=True)
|
||||
with open(path, "w", encoding=self.encoded) as fh:
|
||||
fh.write(resp.data.decode(self.encoded))
|
||||
|
||||
def get_hash(self, file_path):
|
||||
"""Given a text file path, get the hash of the file contents."""
|
||||
with open(file_path, "r") as fh:
|
||||
bf = fh.read()
|
||||
self.hash_update(bf.encode(self.encoded))
|
||||
return self.hash_hex()
|
||||
|
||||
def check_hash(self, file_path, hash_path):
|
||||
"""Compare a file hash with another previously saved hash. Return a
|
||||
dictionary with a boolean indicating whether the file hash has changed,
|
||||
the old and new hashes."""
|
||||
has_change = {"changed": False, "old": "", "new": ""}
|
||||
cached_hash = ""
|
||||
new_hash = self.get_hash(file_path)
|
||||
try:
|
||||
with open(hash_path, "r") as fh:
|
||||
cached_hash = fh.read()
|
||||
except FileNotFoundError:
|
||||
# Treat as changed (a call to update)
|
||||
has_change = {"changed": True, "old": "", "new": new_hash}
|
||||
if new_hash != cached_hash:
|
||||
has_change = {"changed": True, "old": cached_hash, "new": new_hash}
|
||||
return has_change
|
|
@ -0,0 +1,225 @@
|
|||
import feedparser
|
||||
import urllib3
|
||||
import yaml
|
||||
|
||||
import os
|
||||
from shutil import rmtree
|
||||
from sys import exit
|
||||
from time import sleep, strftime
|
||||
|
||||
from hashi import Hashi
|
||||
from formatter import HtmlToFText
|
||||
|
||||
|
||||
class FTG:
|
||||
|
||||
def init(self, config):
|
||||
"""Load the config. Please call this first before other methods."""
|
||||
self.conf = self.parse_yaml(config)
|
||||
self.hh = Hashi()
|
||||
|
||||
def run(self):
|
||||
"""Download feeds and generate gophermaps."""
|
||||
any_change = False
|
||||
count = 0
|
||||
all_feeds = []
|
||||
for f in self.conf["feeds"]:
|
||||
# Check feed for changes
|
||||
dir_path = self.conf["home"]["dir"] + "/" + f["permalink"]
|
||||
feed_path = dir_path + "/" + self.conf["update"]["feed_file"]
|
||||
hash_path = dir_path + "/" + self.conf["update"]["hash_file"]
|
||||
self.hh.fetch_url(f["url"], feed_path)
|
||||
check = self.hh.check_hash(feed_path, hash_path)
|
||||
# Build a list of feed data to regenerate the home map
|
||||
f["path"] = feed_path
|
||||
all_feeds.append(self.parse_rss(f))
|
||||
if check["changed"]:
|
||||
print("Getting update ...")
|
||||
any_change = True
|
||||
# Put up placeholder home map while downloading feed items
|
||||
self.gen_home_map([], mode="temp")
|
||||
self.parse_file_list(all_feeds[count]["items"], dir_path)
|
||||
# Cache feed hash
|
||||
with open(hash_path, "w") as fh:
|
||||
fh.write(check["new"])
|
||||
# Regenerate the map
|
||||
self.gen_feed_map(all_feeds[count])
|
||||
else:
|
||||
print("Feed is up-to-date.")
|
||||
count += 1
|
||||
sleep(self.conf["update"]["sleep"])
|
||||
# If any of the feeds have changed, regenerate the home map
|
||||
# to ensure the permalinks to feed maps are current
|
||||
if any_change:
|
||||
self.gen_home_map(all_feeds)
|
||||
|
||||
def parse_yaml(self, yml):
|
||||
"""Open a YAML file and return a dictionary of values."""
|
||||
try:
|
||||
fh = open(yml, "r")
|
||||
data = yaml.safe_load(fh)
|
||||
fh.close()
|
||||
except:
|
||||
print("Error: could not load config.")
|
||||
exit(1)
|
||||
return data
|
||||
|
||||
def parse_rss(self, feed):
|
||||
"""Given a dictionary with a feed url, title, permalink and feed file
|
||||
path, parse the url and return a feed data dictionary."""
|
||||
if ("url" not in feed) or (feed["url"] == None) or \
|
||||
("permalink" not in feed) or (feed["permalink"] == None):
|
||||
print("Error: missing/empty field. Please check config.")
|
||||
exit(1)
|
||||
try:
|
||||
print("Parsing " + feed["permalink"] + " ...")
|
||||
resp = feedparser.parse(feed["path"])
|
||||
except:
|
||||
print("Error: could not parse (" + feed["url"] + ")")
|
||||
exit(1)
|
||||
# Insert custom fields
|
||||
resp["url"] = feed["url"]
|
||||
resp["permalink"] = feed["permalink"]
|
||||
if ("title" in feed) and (feed["title"] != None):
|
||||
resp["display_title"] = feed["title"]
|
||||
else:
|
||||
resp["display_title"] = resp["channel"]["title"]
|
||||
return resp
|
||||
|
||||
def check_filetype(self, url):
|
||||
"""Given a resource url, return a dictionary containing the gopher
|
||||
filetype and file extension."""
|
||||
meta = {}
|
||||
meta["ext"] = url.rsplit(".", 1)[1]
|
||||
domain = url.rsplit(".", 1)[0]
|
||||
if meta["ext"] == "gif":
|
||||
meta["type"] = "g"
|
||||
elif (meta["ext"] == "png") or (meta["ext"].lower() == "jpg") or \
|
||||
(meta["ext"].lower() == "jpeg"):
|
||||
meta["type"] = "I"
|
||||
elif meta["ext"] == "pdf":
|
||||
meta["type"] = "d"
|
||||
else:
|
||||
meta = {"type": "0", "ext": "txt"}
|
||||
# Return empty meta if site matches keywords list
|
||||
for kw in self.conf["update"]["skip_cache"]:
|
||||
if (kw in domain) or (kw in meta["ext"]):
|
||||
meta = {}
|
||||
break
|
||||
return meta
|
||||
|
||||
def get_file(self, url, ext, path):
|
||||
"""Save a link to file given the url, extension and file path."""
|
||||
# Initialise urllib and include user-agent with request
|
||||
hdrs = {"user-agent": self.conf["update"]["user_agent"]}
|
||||
http = urllib3.PoolManager(headers=hdrs)
|
||||
# Disable ssl warnings
|
||||
urllib3.disable_warnings()
|
||||
resp = http.request("GET", url)
|
||||
fmt = HtmlToFText()
|
||||
if ext == "txt":
|
||||
try:
|
||||
txt = fmt.convert(resp.data.decode("utf-8"))
|
||||
with open(path, "w", encoding="utf-8") as fh:
|
||||
fh.write(txt)
|
||||
except UnicodeDecodeError:
|
||||
# Attempt to work around "codec can't decode byte" error
|
||||
# if certain this is a txt/html file
|
||||
txt = fmt.convert(resp.data.decode("ISO-8859-1"))
|
||||
with open(path, "w", encoding="ISO-8859-1") as fh:
|
||||
fh.write(txt)
|
||||
else:
|
||||
try:
|
||||
with open(path, "wb") as fh:
|
||||
fh.write(resp.data)
|
||||
except:
|
||||
with open(path, "w") as fh:
|
||||
fh.write("An error occurred while saving the file." + \
|
||||
"Please notify the administrator.")
|
||||
|
||||
def parse_file_list(self, file_list, path):
|
||||
"""Given a list of file urls and target directory path, save the links
|
||||
as files to the path."""
|
||||
count = 0
|
||||
# Make sure path exists
|
||||
os.makedirs(path, exist_ok=True)
|
||||
for i in file_list:
|
||||
count += 1
|
||||
file_meta = self.check_filetype(i["link"])
|
||||
if "ext" in file_meta:
|
||||
print("Downloading item (" + str(count) + "/" + \
|
||||
str(len(file_list)) + ") ...")
|
||||
file_path = path + "/" + str(count) + "." + \
|
||||
file_meta["ext"]
|
||||
self.get_file(i["link"], file_meta["ext"], file_path)
|
||||
sleep(self.conf["update"]["sleep"])
|
||||
|
||||
def clear_cache(self, path, *args, **kwargs):
|
||||
"""Given a directory path and removal mode, remove the selections.
|
||||
Modes: dirs, files, all"""
|
||||
mode = kwargs.get("mode", "")
|
||||
if (mode == "dirs") or (mode == ""):
|
||||
for rt, dirs, files in os.walk(path):
|
||||
for d in dirs:
|
||||
rmtree(path + "/" + d)
|
||||
elif (mode == "files") or (mode == ""):
|
||||
for rt, dirs, files in os.walk(path):
|
||||
for f in files:
|
||||
os.remove(path + "/" + f)
|
||||
|
||||
def gen_home_map(self, feed_data, *args, **kwargs):
|
||||
"""Write the top-level gophermap."""
|
||||
if kwargs.get("mode", "") == "temp":
|
||||
print("Placing temporary gophermap at " + \
|
||||
self.conf["home"]["dir"] + " ...")
|
||||
os.makedirs(self.conf["home"]["dir"], exist_ok=True)
|
||||
with open(self.conf["home"]["dir"] + "/gophermap", "w") as fh:
|
||||
fh.write(self.conf["home"]["title"] + \
|
||||
self.conf["home"]["info"] + "\r\n" + \
|
||||
self.conf["home"]["temp"])
|
||||
else:
|
||||
print("Generating gophermap at " + self.conf["home"]["dir"] + \
|
||||
" ...")
|
||||
os.makedirs(self.conf["home"]["dir"], exist_ok=True)
|
||||
with open(self.conf["home"]["dir"] + "/gophermap", "w") as fh:
|
||||
fh.write(self.conf["home"]["title"] + \
|
||||
self.conf["home"]["info"] + "\r\n" + \
|
||||
self.conf["home"]["updated"] + \
|
||||
strftime((self.conf["home"]["timestamp"])) + "\n\n\n")
|
||||
for f in feed_data:
|
||||
fh.write("1" + f["display_title"] + "\t" + \
|
||||
f["permalink"] + "\n")
|
||||
|
||||
def gen_feed_map(self, feed_data):
|
||||
"""Given a data dictionary for a feed source, write a feed
|
||||
gophermap."""
|
||||
dir_path = self.conf["home"]["dir"] + "/" + feed_data["permalink"]
|
||||
os.makedirs(dir_path, exist_ok=True)
|
||||
self.clear_cache(dir_path)
|
||||
count = 0
|
||||
print("Generating gophermap " + feed_data["permalink"] + " ...")
|
||||
with open(dir_path + "/gophermap", "w") as fh:
|
||||
# Info text
|
||||
fh.write(feed_data["display_title"] + "\r\n\n" + \
|
||||
"1" + self.conf["home"]["nav_back"] + "\t" + \
|
||||
self.conf["home"]["url"] + "\r\n\n" + \
|
||||
"hWebsite" + "\tURL:" + feed_data["channel"]["link"] + \
|
||||
"\r\n" + "hFeed" + "\tURL:" + feed_data["url"] + "\r\n\n")
|
||||
# Item links
|
||||
for i in feed_data["items"]:
|
||||
count += 1
|
||||
fh.write("h" + i["title"] + "\tURL:" + i["link"] + "\r\n")
|
||||
file_meta = self.check_filetype(i["link"])
|
||||
if "ext" in file_meta:
|
||||
fh.write(file_meta["type"] + "(" + file_meta["ext"] + \
|
||||
")\t" + str(count) + "." + file_meta["ext"] + "\r\n")
|
||||
if ("author" in i) and (i["author"] != ""):
|
||||
fh.write("author: " + i["author"] + "\n")
|
||||
if ("date" in i) and (i["date"] != ""):
|
||||
fh.write("posted: " + i["date"] + "\n")
|
||||
fh.write("\n")
|
||||
|
||||
|
||||
ftg = FTG()
|
||||
ftg.init("config.yml")
|
||||
ftg.run()
|
|
@ -0,0 +1,9 @@
|
|||
# feed the gopher
|
||||
|
||||
A RSS feed service to browse headlines from gopher.
|
||||
|
||||
|
||||
## Requirements
|
||||
|
||||
- python 3
|
||||
- modules: feedparser pyyaml urllib3
|
|
@ -0,0 +1,88 @@
|
|||
<?xml version="1.0"?>
|
||||
<opml version="2.0">
|
||||
<head>
|
||||
<title>the most delicious opml on friend planet</title>
|
||||
<dateCreated>Fri, 12 Aug 2022 01:08:10 +0000</dateCreated>
|
||||
<dateModified>Fri, 12 Aug 2022 01:08:10 +0000</dateModified>
|
||||
<ownerName>barista</ownerName>
|
||||
<ownerEmail>barista@me.acdw.net</ownerEmail>
|
||||
<ownerId>https://tildegit.org/mio/opml</ownerId>
|
||||
<docs>http://dev.opml.org/spec2.html</docs>
|
||||
</head>
|
||||
<body>
|
||||
<outline text="blogs">
|
||||
<outline type="rss" text="acdw's casa"
|
||||
xmlUrl="https://acdw.casa/feed.xml" htmlUrl="https://acdw.casa/" description="No description available"/>
|
||||
<outline type="rss" text="blog // ~ben"
|
||||
xmlUrl="https://tilde.team/~ben/blog/index.xml" htmlUrl="https://tilde.team/~ben/blog/" description="Recent content on blog // ~ben"/>
|
||||
<outline type="rss" text="Benjamin Wil"
|
||||
xmlUrl="https://benjaminwil.info/feed.xml" htmlUrl="https://benjaminwil.info/" description="No description available"/>
|
||||
<outline type="rss" text="p1k3::feed"
|
||||
xmlUrl="https://p1k3.com/feed" htmlUrl="https://p1k3.com/" description="No description available"/>
|
||||
<outline type="rss" text="chrismanbrown.gitlab.io"
|
||||
xmlUrl="https://chrisman.github.io/rss.xml" htmlUrl="https://chrismanbrown.gitlab.io/" description="chrisman blog"/>
|
||||
<outline type="rss" text="Dozens and Dragons"
|
||||
xmlUrl="https://dozensanddragons.neocities.org/rss.xml" htmlUrl="https://dozensanddragons.neocities.org" description="ttrpg blog"/>
|
||||
<outline type="rss" text="Society For Putting Things On Top Of Other Things"
|
||||
xmlUrl="https://society.neocities.org/rss.xml" htmlUrl="https://society.neocities.org/" description="putting things on top of other things since 1991"/>
|
||||
<outline type="rss" text="It's Pro Toad and Superb Owl"
|
||||
xmlUrl="https://git.tilde.town/dozens/protoadandsuperbowl/raw/branch/master/feed.xml" htmlUrl="https://git.tilde.town/dozens/protoadandsuperbowl" description="It's Pro Toad and Superb Owl!"/>
|
||||
<outline type="rss" text=" Oatmeal"
|
||||
xmlUrl="https://eli.li/feed.rss" htmlUrl="https://eli.li" description=" The feed of updates for Oatmeal "/>
|
||||
<outline type="rss" text="Aaron Brady"
|
||||
xmlUrl="https://www.insom.me.uk/feed.xml" htmlUrl="https://www.insom.me.uk/" description="No description available"/>
|
||||
<outline type="rss" text="kindrobot"
|
||||
xmlUrl="https://tilde.town/~kindrobot/index.xml" htmlUrl="https://tilde.town/~kindrobot/" description="Recent content on kindrobot"/>
|
||||
<outline type="rss" text="~lucidiot's wiki"
|
||||
xmlUrl="https://envs.net/~lucidiot/rss.xml" htmlUrl="https://envs.net/~lucidiot/" description="latest articles on ~lucidiot's personal wiki"/>
|
||||
<outline type="rss" text="Brainshit"
|
||||
xmlUrl="https://brainshit.fr/rss" htmlUrl="https://brainshit.fr" description="Site communautaire de partage de connaissances inutiles et d'inepties pseudo-scientifiques."/>
|
||||
<outline type="rss" text="~lucidiot's ideas"
|
||||
xmlUrl="https://tilde.town/~lucidiot/ideas/rss.xml" htmlUrl="https://tilde.town/~lucidiot/ideas/" description="random ideas published by ~lucidiot"/>
|
||||
<outline type="rss" text="#fridaypostcard"
|
||||
xmlUrl="https://tilde.town/~lucidiot/fridaypostcard.xml" htmlUrl="http://tilde.town/~jumblesale/fp.html" description="to contribute, share a link to an image on irc with the text #fridaypostcard. updated every friday"/>
|
||||
<outline type="rss" text="m455.casa"
|
||||
xmlUrl="https://m455.casa/feed.rss" htmlUrl="https://m455.casa" description="RSS feed for m455.casa"/>
|
||||
<outline type="rss" text="Rick Carlino's Blog"
|
||||
xmlUrl="https://rickcarlino.com/rss/feed.rss" htmlUrl="https://rickcarlino.com" description="The personal blog of Rick Carlino, a software tinkerer."/>
|
||||
<outline type="rss" text="lipu pi jan Niko"
|
||||
xmlUrl="https://tilde.town/~nihilazo/index.xml" htmlUrl="https://tilde.town/~nihilazo/" description="Recent content on lipu pi jan Niko"/>
|
||||
<outline type="rss" text="(lambda (x) (create x))"
|
||||
xmlUrl="https://lambdacreate.com/static/feed.xml" htmlUrl="http://lambdacreate.com" description="A blog held together entirely by lua, coffee, and crazy ideas."/>
|
||||
<outline type="rss" text="coolguy.website"
|
||||
xmlUrl="https://coolguy.website/rss/" htmlUrl="https://coolguy.website/" description="The personal homepage of friend, writer, code witch,"/>
|
||||
</outline>
|
||||
<outline text="gemlogs">
|
||||
<outline type="rss" text="bakersdozen gemlog"
|
||||
xmlUrl="https://portal.mozz.us/gemini/breadpunk.club/~bakersdozen/gemlog/atom.xml" htmlUrl="https://portal.mozz.us/gemini/breadpunk.club/~bakersdozen/gemlog/" description="No description available"/>
|
||||
<outline type="rss" text="~mio/gemlog"
|
||||
xmlUrl="https://portal.mozz.us/gemini/tilde.town/~mio/log/atom.xml" htmlUrl="https://portal.mozz.us/gemini/tilde.town/~mio/log/" description="No description available"/>
|
||||
</outline>
|
||||
<outline text="podcasts">
|
||||
<outline type="rss" text="Lowtech Radio Gazette"
|
||||
xmlUrl="http://lambdacreate.com/static/ltrg/feed.xml" htmlUrl="http://lambdacreate.com" description="A podcast recorded, produced, and published entirely on junk computers!"/>
|
||||
<outline type="rss" text="trash cat tech chat"
|
||||
xmlUrl="https://podcast.librepunk.club/tctc/ogg.xml" htmlUrl="https://podcast.librepunk.club/tctc/" description="No description available"/>
|
||||
<outline type="rss" text="Tilde Whirl Tildeverse Podcast"
|
||||
xmlUrl="https://tilde.town/~dozens/podcast/rss.xml" htmlUrl="https://tilde.town/~dozens/podcast/index.html" description="the greatest tildeverse podcast in the world"/>
|
||||
</outline>
|
||||
<outline text="weeds">
|
||||
<outline type="rss" text="dozens weed"
|
||||
xmlUrl="https://tilde.town/~dozens/rsspect/feed.xml" htmlUrl="https://tilde.town/~dozens/rsspect/feed.xml" description="my special little weed in my special little garden"/>
|
||||
<outline type="rss" text="vgnfdblg"
|
||||
xmlUrl="https://supervegan.neocities.org/feed.xml" htmlUrl="https://supervegan.neocities.org/feed.xml" description="a vegan food blog"/>
|
||||
<outline type="rss" text="backgammon with dozens"
|
||||
xmlUrl="http://tilde.town/~dozens/backgammon/rss.xml" htmlUrl="http://tilde.town/~dozens/backgammon/" description="No description available"/>
|
||||
<outline type="rss" text="RSRSSS"
|
||||
xmlUrl="https://envs.net/~lucidiot/rsrsss/feed.xml" htmlUrl="https://envs.net/~lucidiot/rsrsss/feed.xml" description="Really Simple Really Simple Syndication Syndication probably just shitposts and feels#8212; An RSS feed about RSS feeds"/>
|
||||
<outline type="rss" text="m455's weed"
|
||||
xmlUrl="https://m455.casa/weed/weed.rss" htmlUrl="https://m455.casa/weed/" description="probably just shitposts and feels"/>
|
||||
</outline>
|
||||
<outline text="misc">
|
||||
<outline type="rss" text="linkbudz rss feed"
|
||||
xmlUrl="https://linkbudz.m455.casa/feed.rss" htmlUrl="https://linkbudz.m455.casa" description="links shared by friends"/>
|
||||
<outline type="rss" text="Glorious Trainwrecks - Make Games Constantly Forever"
|
||||
xmlUrl="https://www.glorioustrainwrecks.com/rss.xml" htmlUrl="https://www.glorioustrainwrecks.com" description="No description available"/>
|
||||
</outline>
|
||||
</body>
|
||||
</opml>
|
|
@ -0,0 +1,157 @@
|
|||
#!/bin/bash
|
||||
# This script has not been tested with other POSIX shells.
|
||||
|
||||
_whoami="opml.sh"
|
||||
_npc="<barista>"
|
||||
_readlinkpls=`readlink -f $0`
|
||||
_whereami=`dirname "$_readlinkpls"`
|
||||
_wheresauce="$_whereami/urls"
|
||||
_wheremug="$_whereami/mug.of.opml"
|
||||
_when_picked=`date -Ru`
|
||||
_when_tasted="$_when_picked"
|
||||
_jam_proxy="https://portal.mozz.us/gemini/"
|
||||
_gob_proxy="https://gopher.tildeverse.org/"
|
||||
|
||||
__twinkle="the most delicious opml on planet earth"
|
||||
__barista="barista"
|
||||
__pail="mail@example.tld"
|
||||
__lid="https://git.tilde.town/mio/scripts/src/branch/main/opml"
|
||||
__an="<?xml version=\"1.0\"?>
|
||||
<opml version=\"2.0\">
|
||||
<head>
|
||||
<title>{{twinkle}}</title>
|
||||
<dateCreated>{{when_picked}}</dateCreated>
|
||||
<dateModified>{{when_tasted}}</dateModified>
|
||||
<ownerName>{{barista}}</ownerName>
|
||||
<ownerEmail>{{pail}}</ownerEmail>
|
||||
<ownerId>{{lid}}</ownerId>
|
||||
<docs>http://dev.opml.org/spec2.html</docs>
|
||||
</head>
|
||||
<body>"
|
||||
__orange=" <outline text=\"{{kat}}\">"
|
||||
__pecan=" <outline type=\"rss\" text=\"{{sprinkle}}\"
|
||||
xmlUrl=\"{{seed}}\" htmlUrl=\"{{pearl}}\" description=\"{{luncheon}}\"/>"
|
||||
__mousse=" </outline>"
|
||||
__latte=" </body>\n</opml>"
|
||||
|
||||
|
||||
an() {
|
||||
test -f $_wheresauce || (echo "$_npc sauce or it won't happen" && exit 1)
|
||||
test -z "$1" || __twinkle="$1"
|
||||
test -z "$2" || __barista="$2"
|
||||
test -z "$3" || __pail="$3"
|
||||
test -z "$4" || __lid="$4"
|
||||
test -z "$5" || _wheremug="$5"
|
||||
|
||||
# Header
|
||||
echo -e "$_npc okay, it'll take a few minutes, why don't you get a cuppa in
|
||||
the meantime? oh wait ..."
|
||||
echo "$_npc *grabs a mug*"
|
||||
echo -e "$__an" > $_wheremug.tmp
|
||||
sed -i "s/{{twinkle}}/$__twinkle/g" $_wheremug.tmp
|
||||
sed -i "s/{{when_picked}}/$_when_picked/g" $_wheremug.tmp
|
||||
sed -i "s/{{when_tasted}}/$_when_tasted/g" $_wheremug.tmp
|
||||
sed -i "s/{{barista}}/$__barista/g" $_wheremug.tmp
|
||||
sed -i "s/{{pail}}/$__pail/g" $_wheremug.tmp
|
||||
sed -i "s|{{lid}}|$__lid|g" $_wheremug.tmp
|
||||
}
|
||||
|
||||
orange_pecan_mousse() {
|
||||
# Feeds
|
||||
first_kat="true"
|
||||
while read lime; do
|
||||
is_kat=`echo "$lime" | awk '{ print substr($0, 1, 1) }' | grep '\['`
|
||||
is_char=`echo "$lime" | awk '{ print substr($0, 1, 1) }' | grep -E '\[|#'`
|
||||
|
||||
# Category
|
||||
if [ -n "$is_kat" ] && [ "$first_kat" == "false" ]; then
|
||||
echo -e "$__mousse" >> $_wheremug.tmp
|
||||
fi
|
||||
if [ -n "$is_kat" ]; then
|
||||
echo "$_npc *layering mousse*"
|
||||
first_kat="false"
|
||||
kat=`echo "$lime" | awk '{ print substr($0, 2, length($0) - 2) }'`
|
||||
echo -e "$__orange" >> $_wheremug.tmp
|
||||
sed -i "s/{{kat}}/$kat/" $_wheremug.tmp
|
||||
fi
|
||||
|
||||
# Feed URL
|
||||
test -n "$is_char" || seed=`echo "$lime" | awk '{ print $1 }'`
|
||||
if [ ! -z "$seed" ]; then
|
||||
# Protocol
|
||||
echo -e "$_npc *sprinkling* $seed"
|
||||
is_jam=`echo "$seed" | awk '{ print substr($0, 1, 4) }' | grep 'gem'`
|
||||
is_gob=`echo "$seed" | awk '{ print substr($0, 1, 4) }' | grep 'gop'`
|
||||
if [ -n "$is_jam" ]; then
|
||||
seed=`echo "$seed" | sed "s|gemini://|$_jam_proxy|"`
|
||||
elif [ -n "$is_gob" ]; then
|
||||
seed=`echo "$seed" | sed "s|gopher://|$_gob_proxy|"`
|
||||
fi
|
||||
germ=`curl -Ls "$seed"`
|
||||
|
||||
is_rss=`echo -e "$germ" | grep -m 1 "<rss"`
|
||||
# Atom
|
||||
if [ ! -n "$is_rss" ]; then
|
||||
# Select the first instance of the closing tag and trim chars after,
|
||||
# find opening tag and trim chars before, clean inner html
|
||||
sprinkle=`echo -e "$germ" | grep -m 1 "<title" |
|
||||
sed "/<\/title>/ s/<\/title>.*//1" |
|
||||
sed "s/.*<title>//1" | sed "s/.*<title type=\"html\">//1" |
|
||||
sed "s/<!\[CDATA\[//" | sed "s/\]\]>//" | sed "s/
//"`
|
||||
# RSS
|
||||
else
|
||||
sprinkle=`echo -e "$germ" | grep -m 1 "<title>" |
|
||||
sed "s/.*<title>//" | sed "s/<\/title>.*//" |
|
||||
sed "s/<!\[CDATA\[//" | sed "s/\]\]>//" | sed "s/
//"`
|
||||
pearl=`echo -e "$germ" | grep -m 1 "<link>" |
|
||||
sed "s/.*<link>//" | sed "s/<\/link>.*//" | sed "s/
//"`
|
||||
luncheon=`echo -e "$germ" | grep -m 1 "<description>" |
|
||||
sed "s/.*<description>//" | sed "s/<\/description>.*//" |
|
||||
sed "s/<!\[CDATA\[//" | sed "s/\]\]>//" | sed "s/
//"`
|
||||
fi
|
||||
# Guess the website URL from the feed URL to avoid parsing
|
||||
# strings with multiple link tags and no newline delimiters,
|
||||
# some feeds don't provide full paths
|
||||
if [ ! -n "$is_rss" ] || [ "$pearl" == "/" ] || [ "$pearl" == "./" ];
|
||||
then
|
||||
bs=`basename "$seed"`
|
||||
pearl=`echo -e "$seed" | sed "s/$bs//"`
|
||||
fi
|
||||
test -z "$luncheon" && luncheon="No description available"
|
||||
|
||||
echo -e "$__pecan" >> $_wheremug.tmp
|
||||
sed -i "s|{{sprinkle}}|$sprinkle|g" $_wheremug.tmp
|
||||
sed -i "s|{{seed}}|$seed|g" $_wheremug.tmp
|
||||
sed -i "s|{{pearl}}|$pearl|g" $_wheremug.tmp
|
||||
sed -i "s|{{luncheon}}|$luncheon|g" $_wheremug.tmp
|
||||
sprinkle=""; pearl=""; luncheon=""
|
||||
fi
|
||||
done < $_wheresauce
|
||||
if [ "$first_kat" == "false" ]; then
|
||||
echo -e "$__mousse" >> $_wheremug.tmp
|
||||
fi
|
||||
}
|
||||
|
||||
latte() {
|
||||
# Closing
|
||||
echo -e "$__latte" >> $_wheremug.tmp
|
||||
mv $_wheremug.tmp $_wheremug
|
||||
if [ -f $_wheremug ]; then
|
||||
echo "$_npc your OPML is ready, enjoy!"
|
||||
else
|
||||
echo -e "$_npc sorry, something got messed up, lemme know if you want me
|
||||
to make another one."
|
||||
rm -r $_wheremug.tmp
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
case "$1" in
|
||||
make)
|
||||
an "$2" "$3" "$4" "$5" "$6"
|
||||
orange_pecan_mousse
|
||||
latte
|
||||
;;
|
||||
*) echo -e "$_npc $_whoami make|welp [title] [author] [author-email] \
|
||||
[author-url] [output-file]";;
|
||||
esac
|