1+ # -*- coding: utf-8 -*-
2+ """Toon Image Creator IVP SEM 5 J001 & J064.ipynb
3+
4+ Automatically generated by Colaboratory.
5+
6+ Original file is located at
7+ https://colab.research.google.com/drive/1hrI9-hOQw2yo48wAaXvsnYI22PMjbZNZ
8+
9+ ***Name of the Project -: Toon Image Creator***
10+
11+ **Programming language used-: Python**
12+
13+ **Platform used-: Google Colab**
14+
15+ **Group Member details -: **
16+
17+ 1. Ekansh Agarwal (J001)
18+ 2. Aakarsh Kumar (J064)
19+
20+ **Subject -: IVP (Image and Video Processing) **
21+
22+ ***Semester - : 5 ***
23+
24+ ***Course -: MBA(tech) EXTC***
25+
26+ ***Ouline of the project -: This Project is the implementation of Progressive resizing and generator loss which takes this idea of gradually increasing the image size, In this project the image size were gradually increased and learning rates were adjusted. For performing this study we have used the concepts of Upscaling and interpolation
27+
28+ Library Used- :
29+
30+ 1. Numpy
31+ 2. Fast Ai
32+ 3. PIL Image
33+ 4. Urlib
34+ 5. Torchvision
35+ """
36+
37+ # Importing Libraries
38+ import fastai
39+ from fastai .vision import *
40+ from fastai .utils .mem import *
41+ from fastai .vision import open_image , load_learner , image , torch
42+ import numpy as np
43+ import urllib .request
44+ import PIL .Image
45+ from io import BytesIO
46+ import torchvision .transforms as T
47+ from PIL import Image
48+ import requests
49+ from io import BytesIO
50+ import fastai
51+ from fastai .vision import *
52+ from fastai .utils .mem import *
53+ from fastai .vision import open_image , load_learner , image , torch
54+ import numpy as np
55+ import urllib .request
56+ import PIL .Image
57+ from PIL import Image
58+ from io import BytesIO
59+ import torchvision .transforms as T
60+
61+ # Defining Classes for altering the features of the potrait (original image)
62+ # These individuals classes are use to collect data such as RGB data , margin , outline data
63+ class FeatureLoss (nn .Module ):
64+ def __init__ (self , m_feat , layer_ids , layer_wgts ):
65+ super ().__init__ ()
66+ self .m_feat = m_feat
67+ self .loss_features = [self .m_feat [i ] for i in layer_ids ]
68+ self .hooks = hook_outputs (self .loss_features , detach = False )
69+ self .wgts = layer_wgts
70+ self .metric_names = ['pixel' ,] + [f'feat_{ i } ' for i in range (len (layer_ids ))
71+ ] + [f'gram_{ i } ' for i in range (len (layer_ids ))]
72+
73+ def make_features (self , x , clone = False ):
74+ self .m_feat (x )
75+ return [(o .clone () if clone else o ) for o in self .hooks .stored ]
76+
77+ def forward (self , input , target ):
78+ out_feat = self .make_features (target , clone = True )
79+ in_feat = self .make_features (input )
80+ self .feat_losses = [base_loss (input ,target )]
81+ self .feat_losses += [base_loss (f_in , f_out )* w
82+ for f_in , f_out , w in zip (in_feat , out_feat , self .wgts )]
83+ self .feat_losses += [base_loss (gram_matrix (f_in ), gram_matrix (f_out ))* w ** 2 * 5e3
84+ for f_in , f_out , w in zip (in_feat , out_feat , self .wgts )]
85+ self .metrics = dict (zip (self .metric_names , self .feat_losses ))
86+ return sum (self .feat_losses )
87+
88+ def __del__ (self ): self .hooks .remove ()
89+
90+ def add_margin (pil_img , top , right , bottom , left , color ):
91+ width , height = pil_img .size
92+ new_width = width + right + left
93+ new_height = height + top + bottom
94+ result = Image .new (pil_img .mode , (new_width , new_height ), color )
95+ result .paste (pil_img , (left , top ))
96+ return result
97+
98+ """## **In this section the image is extracted from url and is interpolated**"""
99+
100+ url = "https://mediamass.net/jdd/public/documents/celebrities/3946.jpg" #@param {type:"string"}
101+
102+ response = requests .get (url )
103+ img = PIL .Image .open (BytesIO (response .content )).convert ("RGB" )
104+ im_new = add_margin (img , 150 , 150 , 150 , 150 , (255 , 255 , 255 ))
105+ im_new .save ("test.jpg" , quality = 95 )
106+ img = open_image ("test.jpg" )
107+ show_image (img , figsize = (10 ,10 ), interpolation = 'nearest' );
108+
109+ p ,img_hr ,b = learn .predict (img )
110+ x = np .minimum (np .maximum (image2np (img_hr .data * 255 ), 0 ), 255 ).astype (np .uint8 )
111+ PIL .Image .fromarray (x ).save ("tes.jpg" ,quality = 95 )
112+ img = open_image ("tes.jpg" )
113+
114+ """***reulted image***"""
115+
116+ p ,img_hr ,b = learn_c .predict (img )
117+ show_image (img_hr , figsize = (9 ,9 ), interpolation = 'nearest' );
118+
119+ """References
120+
121+ 1. [ Perceptual Losses for Real-Time Style Transfer
122+ and Super-Resolution](https://https://arxiv.org/pdf/1603.08155.pdf)
123+ 2. [Enhanced Deep Residual Networks for Single Image Super-Resolution](https://arxiv.org/pdf/1707.02921.pdf)
124+
125+
126+
127+ """
0 commit comments