Shuffle (game show), Amphorella cimensis, Zakanale & Craigmillar Castle.

This commit is contained in:
nik gaffney 2021-02-10 18:12:26 +01:00
parent e151fffd17
commit 574ddfa422
11 changed files with 1 additions and 425 deletions

View file

@ -2,70 +2,4 @@
#+title: neurogenesis
various loose ends of the generative card making approach
* dream-the-dreamer.sh
setup script for linode GPU instances
- from debian 10 starting point ⟶ various GAN stuff
* lightweight & styleGAN
backups are on posthoc
* * styleGAN2-ADA
ongoing experiments with styleGAN2
current staging on Linode GPU instances (aka grapheme )
#+BEGIN_SRC shell :dir /ssh:grapheme:. :results raw :wrap SRC text :async
ps ax | grep train.py
#+end_src
#+RESULTS:
** generate and mix
#+BEGIN_SRC shell :dir /ssh:grapheme:/root/stylegan2-ada-pytorch/ :results raw replace :wrap SRC text :async
python3 style_mixing.py \
--outdir=/root/results/flickr6/generated/001600 \
--network=/root/results/flickr6/00003-512x512-auto1-resumecustom/network-snapshot-001600.pkl \
--rows=6000-6020 \
--cols=600-620
#+end_src
#+RESULTS:
#+BEGIN_SRC shell :dir /ssh:grapheme:/root/stylegan2-ada-pytorch/ :results raw replace :wrap SRC text :async
python3 generate.py \
--outdir=/root/results/flickr6/generated/001600 \
--network=/root/results/flickr6/00003-512x512-auto1-resumecustom/network-snapshot-001600.pkl \
--seeds=3300-3900
#+end_src
#+RESULTS:
** projection
image projection to explore latent space
--target=https://live.staticflickr.com/65535/50698595796_8accf83473_k_d.jpg \
#+BEGIN_SRC shell :dir /ssh:grapheme:/root/stylegan2-ada-pytorch/ :results raw replace :wrap SRC text :async
python3 projector.py --outdir=/root/results/flickr6/generated/101405 \
--target=/root/data/_transfer_test/50698595796_8accf83473_k.jpg \
--network=/root/results/flickr6/00003-512x512-auto1-resumecustom/network-snapshot-001440.pkl \
--num-steps=5000
#+END_SRC
#+RESULTS:
: 5efe249760c6a25a5188c67816b13ed4
#+BEGIN_SRC shell :results raw replace :wrap SRC text :async
rsync -av grapheme:results/ \
/Users/zzk/code/foam-repo/superject/cards/neural/styleGAN2-ADA/results
#+END_SRC
mostly accessible via https://repo.fo.am/foam/neural

View file

@ -1,46 +0,0 @@
#!/bin/bash
# a simple script to test a linode GPU instance for superject abstraction
PLACE=/tmp/neural
mkdir -p $PLACE
cd $PLACE
echo "using the Superject archetypes..."
occasions=("Journey" "Science" "Magic" "Nature" "Power" "Education" "Connection" "Obstacle" "Tool" "Retreat" "Fortune" "Justice" "Sacrifice" "Death" "Art" "Decay" "Ruin" "Mystery" "Bridge" "Energy" "Transformation" "World")
qualia=("Sad" "Happy" "Fear" "Surprise" "Angry" "Disgust" "Grief" "Hunger" "Love" "Pain" "Relief" "Sonic" "Visual" "Tactile" "Scent" "Taste" "Hot" "Cold" "Calm" "Tense" "Full" "Empty")
lures=("Assemble" "Cook" "Destroy" "Hex" "Enact" "Exchange" "Experiment" "Gather" "Give" "Grow" "Repair" "Make" "Perform" "Play" "Protect" "Prototype" "Question" "Resist" "Reverse" "Serve" "Solve" "Trick")
echo "occasions..."
for i in "${occasions[@]}"
do
export CUDA_VISIBLE_DEVICES=0
echo "dreaming $i"
dream $i --save-progress --save-every 30 --epochs=6 --iterations=600
done
echo "lures..."
for i in "${lures[@]}"
do
export CUDA_VISIBLE_DEVICES=0
echo "dreaming $i"
dream $i --save-progress --save-every 30 --epochs=6 --iterations=600
done
#export CUDA_VISIBLE_DEVICES=1
#time dream "Satisfying propositions" --save-progress --save-every 100 --epochs=2 --iterations=300 --random
# note in big-sleep.py -> torch,cuda.set_device(1)
# and/or CUDA_VISIBLE_DEVICES=1,2 python myscript.py
# ffmpeg -r 24 -f image2 -s 1920x1080 -i The_Grand_Evocateur_of_intensity.%d.png -vcodec libx264 -crf 25 -pix_fmt yuv420p The_Grand_Evocateur_of_intensity_m3.mp4

View file

@ -1,46 +0,0 @@
#!/bin/bash
# a simple script to test a linode GPU instance for superject abstraction
PLACE=/tmp/neural
mkdir -p $PLACE
cd $PLACE
echo "using the Superject archetypes..."
occasions=("Journey" "Science" "Magic" "Nature" "Power" "Education" "Connection" "Obstacle" "Tool" "Retreat" "Fortune" "Justice" "Sacrifice" "Death" "Art" "Decay" "Ruin" "Mystery" "Bridge" "Energy" "Transformation" "World")
qualia=("Sad" "Happy" "Fear" "Surprise" "Angry" "Disgust" "Grief" "Hunger" "Love" "Pain" "Relief" "Sonic" "Visual" "Tactile" "Scent" "Taste" "Hot" "Cold" "Calm" "Tense" "Full" "Empty")
lures=("Assemble" "Cook" "Destroy" "Hex" "Enact" "Exchange" "Experiment" "Gather" "Give" "Grow" "Repair" "Make" "Perform" "Play" "Protect" "Prototype" "Question" "Resist" "Reverse" "Serve" "Solve" "Trick")
echo "occasions..."
for i in "${occasions[@]}"
do
export CUDA_VISIBLE_DEVICES=1
echo "dreaming $i"
dream $i --save-progress --save-every 30 --epochs=6 --iterations=600
done
echo "lures..."
for i in "${lures[@]}"
do
export CUDA_VISIBLE_DEVICES=1
echo "dreaming $i"
dream $i --save-progress --save-every 30 --epochs=6 --iterations=600
done
#export CUDA_VISIBLE_DEVICES=1
#time dream "Satisfying propositions" --save-progress --save-every 100 --epochs=2 --iterations=300 --random
# note in big-sleep.py -> torch,cuda.set_device(1)
# and/or CUDA_VISIBLE_DEVICES=1,2 python myscript.py
# ffmpeg -r 24 -f image2 -s 1920x1080 -i The_Grand_Evocateur_of_intensity.%d.png -vcodec libx264 -crf 25 -pix_fmt yuv420p The_Grand_Evocateur_of_intensity_m3.mp4

View file

@ -1,64 +0,0 @@
#!/bin/bash
# a simple script to setup a linode GPU instance for superject abstraction
# to be run on a freshly created Debian-10 system
# local setup
# - add IP4 to ~/.ssh/config
# - copy ssh keys via -> ssh-copy-id -i ~/.ssh/id_rsa root@grapheme
# - copy scripts -> scp *.sh grapheme:
# - sshfs grapheme:/tmp/neural /Volumes/
# -
# on grapheme
echo "listen to my voice...\n"
cd /root
export DEBIAN_FRONTEND=noninteractive
# set new apt sources
echo "on the count of 3,2,1...\n"
cat << _EOL_ > /etc/apt/sources.list
deb http://mirrors.linode.com/debian/ unstable main contrib non-free
deb-src http://mirrors.linode.com/debian/ unstable main contrib non-free
deb http://mirrors.linode.com/debian/ buster-backports main contrib non-free
deb-src http://mirrors.linode.com/debian/ buster-backports main contrib non-free
_EOL_
apt-get update
apt-get upgrade -y
apt-get install git nvidia-driver firmware-misc-nonfree python3-pip emacs-nox nvidia-cuda-dev nvidia-cuda-toolkit nvtop screen rsync -y &> /dev/null | echo 'Installing packaged gunk...'
apt purge linux-image-4.19.0-13-amd64
apt-get upgrade -y
# setup nvidia driver
echo blacklist nouveau > /etc/modprobe.d/blacklist-nvidia-nouveau.conf
echo options nouveau modeset=0 >> /etc/modprobe.d/blacklist-nvidia-nouveau.conf
# install the GANs
echo "partial attention..."
git clone https://github.com/NVlabs/stylegan2-ada-pytorch
pip3 install torch==1.7.1 pycuda psutil Pillow scipy click requests tqdm pyspng ninja tensorboard imageio imageio-ffmpeg==0.4.3
#pip3 install stylegan2_pytorch &> /dev/null | echo 'Installing styleGAN2...'
#pip3 install lightweight-gan &> /dev/null | echo 'Installing lightweight GAN...'
#pip3 install deep-daze &> /dev/null | echo 'Installing deep-daze...'
#pip3 install big-sleep &> /dev/null | echo 'Installing big-sleep...'
mkdir -p /tmp/neural
echo "you are now asleep."
# make new users if reqd?

View file

@ -1,7 +0,0 @@
#!/bin/bash
NAME=$1
echo "composing $1"
ffmpeg -r 24 -f image2 -s 1920x1080 -i "$NAME".%d.png -vcodec libx264 -crf 25 -pix_fmt yuv420p "$NAME"_x0.mp4

View file

@ -1,31 +0,0 @@
#!/bin/bash
# a simple script run stylegan2_pytorch on a linode GPU instance
# PLACE=/tmp/neural/
# mkdir -p $PLACE
# cd $PLACE
echo "using stlyeGAN2..."
phrases=("An energetic nomad on a long hike with wooden staff a green leaf and a small dog." "At the beginning of a magical quest for wisdom an animus of a conman hide beneath the table." "A charismatic female figure meditates on the book of flesh in isolation, oval and white." "Fertility of nature in small green leaves, adam's apple, a pyramid and a baby eagle's wing." "The alchemical gold adorns the throne in a powerful house rooted in matter.")
echo "cosmic entanglement..."
cd /root
# transfer learning using ffhq model for sytleGAN2
stylegan2_pytorch --name flickr3 \
--data data/1024x1024 \
--models_dir models/ffhq-foam \
--results_dir results/flickr3 \
--multi-gpus \
--batch-size 16 --network-capacity 64 \
--aug-prob 0.25 --attn-layers 1
# note in big-sleep.py -> torch,cuda.set_device(1)
# and/or CUDA_VISIBLE_DEVICES=1,2 python myscript.py
# ffmpeg -r 24 -f image2 -s 1920x1080 -i The_Grand_Evocateur_of_intensity.%d.png -vcodec libx264 -crf 25 -pix_fmt yuv420p The_Grand_Evocateur_of_intensity_m3.mp4

View file

@ -1,28 +0,0 @@
#!/bin/bash
# a simple script to test a linode GPU instance for superject abstraction
PLACE=/tmp/neural
mkdir -p $PLACE
cd $PLACE
echo -e "using the Whitehead glossary..."
echo -e "deep daze"
export CUDA_VISIBLE_DEVICES=0
time imagine "Life-cycle of occasions" --deeper --save-progress --save-every 100
echo -e "big sleep"
export CUDA_VISIBLE_DEVICES=1
time dream "Satisfying propositions" --save-progress --save-every 100 --epochs=2 --iterations=300 --random
# note in big-sleep.py -> torch,cuda.set_device(1)
# and/or CUDA_VISIBLE_DEVICES=1,2 python myscript.py
# ffmpeg -r 24 -f image2 -s 1920x1080 -i The_Grand_Evocateur_of_intensity.%d.png -vcodec libx264 -crf 25 -pix_fmt yuv420p The_Grand_Evocateur_of_intensity_m3.mp4

View file

@ -1,46 +0,0 @@
# via https://github.com/huggingface/pytorch-pretrained-BigGAN
import torch
from pytorch_pretrained_biggan import (BigGAN, one_hot_from_names, one_hot_from_int, truncated_noise_sample, save_as_images, display_in_terminal)
from PIL import Image
# OPTIONAL: if you want to have more information on what's happening, activate the logger as follows
import logging
logging.basicConfig(level=logging.INFO)
# Load pre-trained model tokenizer (vocabulary)
model = BigGAN.from_pretrained('biggan-deep-512')
# Prepare a input
truncation = 0.001
class_vector = one_hot_from_int([22, 65, 555, 333], batch_size=4)
#class_vector = one_hot_from_names(['soap bubble', 'coffee', 'mushroom', 'daisy'], batch_size=4)
noise_vector = truncated_noise_sample(truncation=truncation, batch_size=4)
# All in tensors
# noise_vector = torch.from_numpy(noise_vector)
# class_vector = torch.from_numpy(class_vector)
noise_vector = torch.from_numpy(noise_vector)
class_vector = torch.from_numpy(class_vector)
# If you have a GPU, put everything on 'cuda'
noise_vector = noise_vector.to('cpu')
class_vector = class_vector.to('cpu')
model.to('cpu')
# Generate an image
with torch.no_grad():
output = model(noise_vector, class_vector, truncation)
# If you have a GPU put back on CPU
output = output.to('cpu')
# If you have a sixtel compatible terminal you can display the images in the terminal
# (see https://github.com/saitoha/libsixel for details)
display_in_terminal(output)
# Save results as png images
save_as_images(output)

View file

@ -1,30 +0,0 @@
#!/bin/bash
# a simple script to test a linode GPU instance for superject abstraction
PLACE=/tmp/neural/hexagrams
mkdir -p $PLACE
cd $PLACE
echo "using the HEXAGRAMS..."
hexagrams=("The Creative Heaven" "The Receptive Earth" "Difficulty At The Beginning" "Youthful Folly" "Waiting" "Conflict" "The Army" "Holding Together" "Small Taming" "Treading" "Peace" "Standstill" "Fellowship" "Great Possession" "Modesty" "Enthusiasm" "Following" "Work On The Decayed" "Approach" "Contemplation" "Biting Through" "Grace" "Splitting Apart" "Return" "Innocence" "Great Taming" "Mouth Corners" "Great Preponderance" "The Abysmal Water" "The Clinging Fire" "Influence" "Duration" "Retreat" "Great Power" "Progress" "Darkening Of The Light" "The Family" "Opposition" "Obstruction" "Deliverance" "Decrease" "Increase" "Breakthrough" "Coming To Meet" "Gathering Together" "Pushing Upward" "Oppression" "The Well" "Revolution" "The Cauldron" "The Arousing Thunder" "The Keeping Still Mountain" "Development" "The Marrying Maiden" "Abundance" "The Wanderer" "The Gentle Wind" "The Joyous Lake" "Dispersion" "Limitation" "Inner Truth" "Small Preponderance" "After Completion")
echo "bifold trigram..."
for i in "${hexagrams[@]}"
do
export CUDA_VISIBLE_DEVICES=1
echo "dreaming $i"
dream "$i" --save-progress --save-every 10 --epochs=1 --iterations=100 --image-size=512
done
#export CUDA_VISIBLE_DEVICES=1
# note in big-sleep.py -> torch,cuda.set_device(1)
# and/or CUDA_VISIBLE_DEVICES=1,2 python myscript.py
# ffmpeg -r 24 -f image2 -s 1920x1080 -i The_Grand_Evocateur_of_intensity.%d.png -vcodec libx264 -crf 25 -pix_fmt yuv420p The_Grand_Evocateur_of_intensity_m3.mp4

View file

@ -1,30 +0,0 @@
#!/bin/bash
# a simple script to test a linode GPU instance for superject abstraction
PLACE=/tmp/neural/tarot
mkdir -p $PLACE
cd $PLACE
echo "using the Tarot..."
phrases=("An energetic nomad on a long hike with wooden staff a green leaf and a small dog." "At the beginning of a magical quest for wisdom an animus of a conman hide beneath the table." "A charismatic female figure meditates on the book of flesh in isolation, oval and white." "Fertility of nature in small green leaves, adam's apple, a pyramid and a baby eagle's wing." "The alchemical gold adorns the throne in a powerful house rooted in matter.")
echo "cosmic entanglement..."
for i in "${phrases[@]}"
do
export CUDA_VISIBLE_DEVICES=0
echo "dreaming $i"
dream "$i" --save-progress --save-every 20 --epochs=6 --iterations=800 --image-size=512
done
#export CUDA_VISIBLE_DEVICES=1
# note in big-sleep.py -> torch,cuda.set_device(1)
# and/or CUDA_VISIBLE_DEVICES=1,2 python myscript.py
# ffmpeg -r 24 -f image2 -s 1920x1080 -i The_Grand_Evocateur_of_intensity.%d.png -vcodec libx264 -crf 25 -pix_fmt yuv420p The_Grand_Evocateur_of_intensity_m3.mp4

View file

@ -1,30 +0,0 @@
#!/bin/bash
# a simple script to test a linode GPU instance for superject abstraction
PLACE=/tmp/neural/tarot
mkdir -p $PLACE
cd $PLACE
echo "using the Tarot..."
phrases=("An energetic nomad on a long hike with wooden staff a green leaf and a small dog." "At the beginning of a magical quest for wisdom an animus of a conman hide beneath the table." "A charismatic female figure meditates on the book of flesh in isolation, oval and white." "Fertility of nature in small green leaves, adam's apple, a pyramid and a baby eagle's wing." "The alchemical gold adorns the throne in a powerful house rooted in matter.")
echo "cosmic entanglement..."
for i in "${phrases[@]}"
do
export CUDA_VISIBLE_DEVICES=1
echo "dreaming $i"
dream "$i" --save-progress --save-every 40 --epochs=6 --iterations=800 --image-size=512
done
#export CUDA_VISIBLE_DEVICES=1
# note in big-sleep.py -> torch,cuda.set_device(1)
# and/or CUDA_VISIBLE_DEVICES=1,2 python myscript.py
# ffmpeg -r 24 -f image2 -s 1920x1080 -i The_Grand_Evocateur_of_intensity.%d.png -vcodec libx264 -crf 25 -pix_fmt yuv420p The_Grand_Evocateur_of_intensity_m3.mp4