diff options
author | xAlpharax <42233094+xAlpharax@users.noreply.github.com> | 2023-05-06 03:30:43 +0300 |
---|---|---|
committer | xAlpharax <42233094+xAlpharax@users.noreply.github.com> | 2023-05-06 03:30:43 +0300 |
commit | f27b568a77580cacd45510f554dd1998445beb75 (patch) | |
tree | c0f773e4ff75e7a94d14f974f7d0083d8b132366 | |
parent | cf395a0190a709fb3a44e2c42b2f401e434cb037 (diff) |
Testing and Userland Changes
Changes to be committed:
renamed: Images/colorful.jpg -> Images/Colorful.jpg
new file: Images/Abstract.jpg
new file: Images/Shade.jpg
new file: all.sh
modified: README.md
modified: neuralart.py
modified: stylize.sh
-rw-r--r-- | Images/Abstract.jpg | bin | 0 -> 83678 bytes | |||
-rw-r--r-- | Images/Colorful.jpg (renamed from Images/colorful.jpg) | bin | 277561 -> 277561 bytes | |||
-rw-r--r-- | Images/Shade.jpg | bin | 0 -> 74432 bytes | |||
-rw-r--r-- | README.md | 15 | ||||
-rwxr-xr-x | all.sh | 7 | ||||
-rw-r--r-- | colorful_in_Azzalee.mp4 | bin | 1791291 -> 0 bytes | |||
-rw-r--r-- | kek_in_Starry_Night.mp4 | bin | 4754080 -> 0 bytes | |||
-rw-r--r-- | neuralart.py | 14 | ||||
-rw-r--r-- | preview/bunnies_in_Azzalee.mp4 | bin | 1861866 -> 0 bytes | |||
-rw-r--r-- | preview/bunnies_in_Jitter_Doll.mp4 | bin | 1479507 -> 0 bytes | |||
-rw-r--r-- | preview/cute_in_Azzalee.mp4 | bin | 1871323 -> 0 bytes | |||
-rw-r--r-- | preview/cute_in_Jitter_Doll.mp4 | bin | 1518058 -> 0 bytes | |||
-rwxr-xr-x | stylize.sh | 4 |
13 files changed, 34 insertions, 6 deletions
diff --git a/Images/Abstract.jpg b/Images/Abstract.jpg Binary files differnew file mode 100644 index 0000000..0cedbd9 --- /dev/null +++ b/Images/Abstract.jpg diff --git a/Images/colorful.jpg b/Images/Colorful.jpg Binary files differindex 9c4a499..9c4a499 100644 --- a/Images/colorful.jpg +++ b/Images/Colorful.jpg diff --git a/Images/Shade.jpg b/Images/Shade.jpg Binary files differnew file mode 100644 index 0000000..ca3da55 --- /dev/null +++ b/Images/Shade.jpg @@ -1,19 +1,30 @@ # neural-art + Neural Style Transfer done from the CLI using a VGG backbone and presented as an MP4. -Weights can be downloaded from [here](https://m1.afileditch.ch/ajjMsHrRhnikrrCiUXgY.pth). The downloaded file should be placed in `./weights/` and will be ignored when pushing as seen in `./.gitignore` +Weights can be downloaded from [here](https://m1.afileditch.ch/ajjMsHrRhnikrrCiUXgY.pth). The downloaded file should be placed in `./weights/` and any file will be ignored from there when pushing, as seen in `./.gitignore`. Update: Alternatively, if the `./weights/` directory is empty, `./neuralart.py` will automatically download the aformentioned default weights. ### Why use this in 2023 ? + Because style transfer hasn't changed drastically in terms of actual results in the past years. I personally find a certain beauty in inputing a style and content image rather than a well curated prompt with a dozen of switches. Consider this repo as a quick *just works* solution that can run on both CPU and GPU effectively. ## Usage -The script sits comfortably in `./stylize.sh` so just run it in the project directory: +The script sits comfortably in `./stylize.sh` so run it (strictly from the project directory): ```bash ./stylize.sh path/to/style_image path/to/content_image ``` +A `./all.sh` helper script is also available that runs `./stylize.sh` for each distinct pair of images present in the `./Images/` directory. + +```bash +./all.sh +``` + +If, at any point, curious of the individual frames that comprise the generated `./content_in_style.mp4` check `./Output/`. +There's also a (redundant) `./images.npy` file that contains raw array data. `./clear_dir.sh` removes redundant files each time they're no longer needed. + ### Requirements All requirements are specified in `./requirements.txt` as per python etiquette: @@ -0,0 +1,7 @@ +#!/bin/bash + +for style in Images/* ; do + for content in Images/* ; do + if [ $style != $content ] && [ ! -f $(basename ${content%.*})'_in_'$(basename ${style%.*})'.mp4' ]; then + ./stylize.sh $style $content + fi ; done ; done diff --git a/colorful_in_Azzalee.mp4 b/colorful_in_Azzalee.mp4 Binary files differdeleted file mode 100644 index a99b2e1..0000000 --- a/colorful_in_Azzalee.mp4 +++ /dev/null diff --git a/kek_in_Starry_Night.mp4 b/kek_in_Starry_Night.mp4 Binary files differdeleted file mode 100644 index 738b418..0000000 --- a/kek_in_Starry_Night.mp4 +++ /dev/null diff --git a/neuralart.py b/neuralart.py index aee66b9..d556d2f 100644 --- a/neuralart.py +++ b/neuralart.py @@ -27,6 +27,16 @@ from PIL import Image model_path = 'weights/vgg_conv_weights.pth' image_path = '' # root (neural-art) directory +### userland testing for multiple instances, a big nono currently + +n_instances = os.popen('ps aux | grep "python neuralart.py" | wc -l').read() +if int(n_instances) > 3: print("Woah, running 2 or more instances of neural-art at the same time?\nThis is an experimental feature as of now... try it later favorably :3") + +### check if there are any weights to use, if not, download the default provided ones +if int(os.popen('ls -l weights | wc -l').read()) == 1: os.system('wget -O "weights/vgg_conv_weights.pth" "https://m1.afileditch.ch/ajjMsHrRhnikrrCiUXgY.pth"') + +### Defining neural architecture + ### VGG was trained on IMAGENET ### although old at this point ### it still achieves good results @@ -63,6 +73,7 @@ class VGG(nn.Module): self.pool3 = nn.MaxPool2d(kernel_size = 2, stride = 2) self.pool4 = nn.MaxPool2d(kernel_size = 2, stride = 2) self.pool5 = nn.MaxPool2d(kernel_size = 2, stride = 2) + elif pool == 'avg': self.pool1 = nn.AvgPool2d(kernel_size = 2, stride = 2) self.pool2 = nn.AvgPool2d(kernel_size = 2, stride = 2) @@ -99,7 +110,6 @@ class VGG(nn.Module): out['r54'] = F.relu(self.conv5_4(out['r53'])) out['p5'] = self.pool5(out['r54']) - # RETURN DESIRED ACTIVATIONS return [out[key] for key in out_keys] @@ -200,7 +210,7 @@ style_img, content_img = imgs_torch # CAN BE INITIALIZED RANDOMLY # OR AS A CLONE OF CONTENT IMAGE opt_img = Variable(content_img.clone(), requires_grad = True) -print("Content size:", content_img.size()) +print("Content size:", content_img.size(), sys.argv[2], "in", sys.argv[1]) print("Target size:", opt_img.size(), end="\n\n") diff --git a/preview/bunnies_in_Azzalee.mp4 b/preview/bunnies_in_Azzalee.mp4 Binary files differdeleted file mode 100644 index 1cdf496..0000000 --- a/preview/bunnies_in_Azzalee.mp4 +++ /dev/null diff --git a/preview/bunnies_in_Jitter_Doll.mp4 b/preview/bunnies_in_Jitter_Doll.mp4 Binary files differdeleted file mode 100644 index 0ad194c..0000000 --- a/preview/bunnies_in_Jitter_Doll.mp4 +++ /dev/null diff --git a/preview/cute_in_Azzalee.mp4 b/preview/cute_in_Azzalee.mp4 Binary files differdeleted file mode 100644 index 18f2780..0000000 --- a/preview/cute_in_Azzalee.mp4 +++ /dev/null diff --git a/preview/cute_in_Jitter_Doll.mp4 b/preview/cute_in_Jitter_Doll.mp4 Binary files differdeleted file mode 100644 index 7eed718..0000000 --- a/preview/cute_in_Jitter_Doll.mp4 +++ /dev/null @@ -31,5 +31,5 @@ python renderer.py # fix weird render artifacts python renderer.py --fix -# wrap everything into a video -ffmpeg -framerate 60 -pattern_type glob -i 'Output/neural_art_*.png' -vf "pad=ceil(iw/2)*2:ceil(ih/2)*2" $(basename ${2%.*})'_in_'$(basename ${1%.*})'.mp4' +# wrap everything into a video (automatically overrides) +ffmpeg -y -framerate 60 -pattern_type glob -i 'Output/neural_art_*.png' -vf "pad=ceil(iw/2)*2:ceil(ih/2)*2" $(basename ${2%.*})'_in_'$(basename ${1%.*})'.mp4' |