summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorxAlpharax <42233094+xAlpharax@users.noreply.github.com>2023-05-06 03:30:43 +0300
committerxAlpharax <42233094+xAlpharax@users.noreply.github.com>2023-05-06 03:30:43 +0300
commitf27b568a77580cacd45510f554dd1998445beb75 (patch)
treec0f773e4ff75e7a94d14f974f7d0083d8b132366
parentcf395a0190a709fb3a44e2c42b2f401e434cb037 (diff)
Testing and Userland Changes
Changes to be committed: renamed: Images/colorful.jpg -> Images/Colorful.jpg new file: Images/Abstract.jpg new file: Images/Shade.jpg new file: all.sh modified: README.md modified: neuralart.py modified: stylize.sh
-rw-r--r--Images/Abstract.jpgbin0 -> 83678 bytes
-rw-r--r--Images/Colorful.jpg (renamed from Images/colorful.jpg)bin277561 -> 277561 bytes
-rw-r--r--Images/Shade.jpgbin0 -> 74432 bytes
-rw-r--r--README.md15
-rwxr-xr-xall.sh7
-rw-r--r--colorful_in_Azzalee.mp4bin1791291 -> 0 bytes
-rw-r--r--kek_in_Starry_Night.mp4bin4754080 -> 0 bytes
-rw-r--r--neuralart.py14
-rw-r--r--preview/bunnies_in_Azzalee.mp4bin1861866 -> 0 bytes
-rw-r--r--preview/bunnies_in_Jitter_Doll.mp4bin1479507 -> 0 bytes
-rw-r--r--preview/cute_in_Azzalee.mp4bin1871323 -> 0 bytes
-rw-r--r--preview/cute_in_Jitter_Doll.mp4bin1518058 -> 0 bytes
-rwxr-xr-xstylize.sh4
13 files changed, 34 insertions, 6 deletions
diff --git a/Images/Abstract.jpg b/Images/Abstract.jpg
new file mode 100644
index 0000000..0cedbd9
--- /dev/null
+++ b/Images/Abstract.jpg
Binary files differ
diff --git a/Images/colorful.jpg b/Images/Colorful.jpg
index 9c4a499..9c4a499 100644
--- a/Images/colorful.jpg
+++ b/Images/Colorful.jpg
Binary files differ
diff --git a/Images/Shade.jpg b/Images/Shade.jpg
new file mode 100644
index 0000000..ca3da55
--- /dev/null
+++ b/Images/Shade.jpg
Binary files differ
diff --git a/README.md b/README.md
index 7d85cf0..dc9326e 100644
--- a/README.md
+++ b/README.md
@@ -1,19 +1,30 @@
# neural-art
+
Neural Style Transfer done from the CLI using a VGG backbone and presented as an MP4.
-Weights can be downloaded from [here](https://m1.afileditch.ch/ajjMsHrRhnikrrCiUXgY.pth). The downloaded file should be placed in `./weights/` and will be ignored when pushing as seen in `./.gitignore`
+Weights can be downloaded from [here](https://m1.afileditch.ch/ajjMsHrRhnikrrCiUXgY.pth). The downloaded file should be placed in `./weights/` and any file will be ignored from there when pushing, as seen in `./.gitignore`. Update: Alternatively, if the `./weights/` directory is empty, `./neuralart.py` will automatically download the aformentioned default weights.
### Why use this in 2023 ?
+
Because style transfer hasn't changed drastically in terms of actual results in the past years. I personally find a certain beauty in inputing a style and content image rather than a well curated prompt with a dozen of switches. Consider this repo as a quick *just works* solution that can run on both CPU and GPU effectively.
## Usage
-The script sits comfortably in `./stylize.sh` so just run it in the project directory:
+The script sits comfortably in `./stylize.sh` so run it (strictly from the project directory):
```bash
./stylize.sh path/to/style_image path/to/content_image
```
+A `./all.sh` helper script is also available that runs `./stylize.sh` for each distinct pair of images present in the `./Images/` directory.
+
+```bash
+./all.sh
+```
+
+If, at any point, curious of the individual frames that comprise the generated `./content_in_style.mp4` check `./Output/`.
+There's also a (redundant) `./images.npy` file that contains raw array data. `./clear_dir.sh` removes redundant files each time they're no longer needed.
+
### Requirements
All requirements are specified in `./requirements.txt` as per python etiquette:
diff --git a/all.sh b/all.sh
new file mode 100755
index 0000000..7aaa2ef
--- /dev/null
+++ b/all.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+for style in Images/* ; do
+ for content in Images/* ; do
+ if [ $style != $content ] && [ ! -f $(basename ${content%.*})'_in_'$(basename ${style%.*})'.mp4' ]; then
+ ./stylize.sh $style $content
+ fi ; done ; done
diff --git a/colorful_in_Azzalee.mp4 b/colorful_in_Azzalee.mp4
deleted file mode 100644
index a99b2e1..0000000
--- a/colorful_in_Azzalee.mp4
+++ /dev/null
Binary files differ
diff --git a/kek_in_Starry_Night.mp4 b/kek_in_Starry_Night.mp4
deleted file mode 100644
index 738b418..0000000
--- a/kek_in_Starry_Night.mp4
+++ /dev/null
Binary files differ
diff --git a/neuralart.py b/neuralart.py
index aee66b9..d556d2f 100644
--- a/neuralart.py
+++ b/neuralart.py
@@ -27,6 +27,16 @@ from PIL import Image
model_path = 'weights/vgg_conv_weights.pth'
image_path = '' # root (neural-art) directory
+### userland testing for multiple instances, a big nono currently
+
+n_instances = os.popen('ps aux | grep "python neuralart.py" | wc -l').read()
+if int(n_instances) > 3: print("Woah, running 2 or more instances of neural-art at the same time?\nThis is an experimental feature as of now... try it later favorably :3")
+
+### check if there are any weights to use, if not, download the default provided ones
+if int(os.popen('ls -l weights | wc -l').read()) == 1: os.system('wget -O "weights/vgg_conv_weights.pth" "https://m1.afileditch.ch/ajjMsHrRhnikrrCiUXgY.pth"')
+
+### Defining neural architecture
+
### VGG was trained on IMAGENET
### although old at this point
### it still achieves good results
@@ -63,6 +73,7 @@ class VGG(nn.Module):
self.pool3 = nn.MaxPool2d(kernel_size = 2, stride = 2)
self.pool4 = nn.MaxPool2d(kernel_size = 2, stride = 2)
self.pool5 = nn.MaxPool2d(kernel_size = 2, stride = 2)
+
elif pool == 'avg':
self.pool1 = nn.AvgPool2d(kernel_size = 2, stride = 2)
self.pool2 = nn.AvgPool2d(kernel_size = 2, stride = 2)
@@ -99,7 +110,6 @@ class VGG(nn.Module):
out['r54'] = F.relu(self.conv5_4(out['r53']))
out['p5'] = self.pool5(out['r54'])
-
# RETURN DESIRED ACTIVATIONS
return [out[key] for key in out_keys]
@@ -200,7 +210,7 @@ style_img, content_img = imgs_torch
# CAN BE INITIALIZED RANDOMLY
# OR AS A CLONE OF CONTENT IMAGE
opt_img = Variable(content_img.clone(), requires_grad = True)
-print("Content size:", content_img.size())
+print("Content size:", content_img.size(), sys.argv[2], "in", sys.argv[1])
print("Target size:", opt_img.size(), end="\n\n")
diff --git a/preview/bunnies_in_Azzalee.mp4 b/preview/bunnies_in_Azzalee.mp4
deleted file mode 100644
index 1cdf496..0000000
--- a/preview/bunnies_in_Azzalee.mp4
+++ /dev/null
Binary files differ
diff --git a/preview/bunnies_in_Jitter_Doll.mp4 b/preview/bunnies_in_Jitter_Doll.mp4
deleted file mode 100644
index 0ad194c..0000000
--- a/preview/bunnies_in_Jitter_Doll.mp4
+++ /dev/null
Binary files differ
diff --git a/preview/cute_in_Azzalee.mp4 b/preview/cute_in_Azzalee.mp4
deleted file mode 100644
index 18f2780..0000000
--- a/preview/cute_in_Azzalee.mp4
+++ /dev/null
Binary files differ
diff --git a/preview/cute_in_Jitter_Doll.mp4 b/preview/cute_in_Jitter_Doll.mp4
deleted file mode 100644
index 7eed718..0000000
--- a/preview/cute_in_Jitter_Doll.mp4
+++ /dev/null
Binary files differ
diff --git a/stylize.sh b/stylize.sh
index 2cb7e99..939ec06 100755
--- a/stylize.sh
+++ b/stylize.sh
@@ -31,5 +31,5 @@ python renderer.py
# fix weird render artifacts
python renderer.py --fix
-# wrap everything into a video
-ffmpeg -framerate 60 -pattern_type glob -i 'Output/neural_art_*.png' -vf "pad=ceil(iw/2)*2:ceil(ih/2)*2" $(basename ${2%.*})'_in_'$(basename ${1%.*})'.mp4'
+# wrap everything into a video (automatically overrides)
+ffmpeg -y -framerate 60 -pattern_type glob -i 'Output/neural_art_*.png' -vf "pad=ceil(iw/2)*2:ceil(ih/2)*2" $(basename ${2%.*})'_in_'$(basename ${1%.*})'.mp4'