Skip to content

Commit 0e08e80

Browse files
committed
Created using Colab
1 parent 8585f80 commit 0e08e80

File tree

1 file changed

+221
-0
lines changed

1 file changed

+221
-0
lines changed
Lines changed: 221 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,221 @@
1+
{
2+
"nbformat": 4,
3+
"nbformat_minor": 0,
4+
"metadata": {
5+
"colab": {
6+
"provenance": [],
7+
"authorship_tag": "ABX9TyNTUIlwVwyn/MGWQv2x/gQg",
8+
"include_colab_link": true
9+
},
10+
"kernelspec": {
11+
"name": "python3",
12+
"display_name": "Python 3"
13+
},
14+
"language_info": {
15+
"name": "python"
16+
}
17+
},
18+
"cells": [
19+
{
20+
"cell_type": "markdown",
21+
"metadata": {
22+
"id": "view-in-github",
23+
"colab_type": "text"
24+
},
25+
"source": [
26+
"<a href=\"https://colab.research.google.com/github/rawanaldaneen/pytorch_row/blob/main/letter_recognition_using_LoRA.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
27+
]
28+
},
29+
{
30+
"cell_type": "markdown",
31+
"source": [
32+
"The following code defines a neural network called NNet.\n",
33+
"\n",
34+
"NNet is a neural network that was originally written to identify hand-written digits from 32x32 images. Your task is to fine-tune this network to perform letter recognition using LoRA by\n",
35+
". To enhance your understanding, apply LoRA to just the second linear layer, and replace the last layer with a layer that has 26 outputs, one for each letter in the English alphabet."
36+
],
37+
"metadata": {
38+
"id": "OFqk8sQxtT1d"
39+
}
40+
},
41+
{
42+
"cell_type": "code",
43+
"execution_count": null,
44+
"metadata": {
45+
"id": "K9R2t_e0sWku"
46+
},
47+
"outputs": [],
48+
"source": []
49+
},
50+
{
51+
"cell_type": "code",
52+
"metadata": {
53+
"colab": {
54+
"base_uri": "https://localhost:8080/"
55+
},
56+
"id": "5dc070f1",
57+
"outputId": "c1119959-0f30-4fb2-b4af-9d7dd8454769"
58+
},
59+
"source": [
60+
"!pip install torch torchvision torchaudio"
61+
],
62+
"execution_count": 1,
63+
"outputs": [
64+
{
65+
"output_type": "stream",
66+
"name": "stdout",
67+
"text": [
68+
"Requirement already satisfied: torch in /usr/local/lib/python3.12/dist-packages (2.8.0+cu126)\n",
69+
"Requirement already satisfied: torchvision in /usr/local/lib/python3.12/dist-packages (0.23.0+cu126)\n",
70+
"Requirement already satisfied: torchaudio in /usr/local/lib/python3.12/dist-packages (2.8.0+cu126)\n",
71+
"Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from torch) (3.19.1)\n",
72+
"Requirement already satisfied: typing-extensions>=4.10.0 in /usr/local/lib/python3.12/dist-packages (from torch) (4.15.0)\n",
73+
"Requirement already satisfied: setuptools in /usr/local/lib/python3.12/dist-packages (from torch) (75.2.0)\n",
74+
"Requirement already satisfied: sympy>=1.13.3 in /usr/local/lib/python3.12/dist-packages (from torch) (1.13.3)\n",
75+
"Requirement already satisfied: networkx in /usr/local/lib/python3.12/dist-packages (from torch) (3.5)\n",
76+
"Requirement already satisfied: jinja2 in /usr/local/lib/python3.12/dist-packages (from torch) (3.1.6)\n",
77+
"Requirement already satisfied: fsspec in /usr/local/lib/python3.12/dist-packages (from torch) (2025.3.0)\n",
78+
"Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch) (12.6.77)\n",
79+
"Requirement already satisfied: nvidia-cuda-runtime-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch) (12.6.77)\n",
80+
"Requirement already satisfied: nvidia-cuda-cupti-cu12==12.6.80 in /usr/local/lib/python3.12/dist-packages (from torch) (12.6.80)\n",
81+
"Requirement already satisfied: nvidia-cudnn-cu12==9.10.2.21 in /usr/local/lib/python3.12/dist-packages (from torch) (9.10.2.21)\n",
82+
"Requirement already satisfied: nvidia-cublas-cu12==12.6.4.1 in /usr/local/lib/python3.12/dist-packages (from torch) (12.6.4.1)\n",
83+
"Requirement already satisfied: nvidia-cufft-cu12==11.3.0.4 in /usr/local/lib/python3.12/dist-packages (from torch) (11.3.0.4)\n",
84+
"Requirement already satisfied: nvidia-curand-cu12==10.3.7.77 in /usr/local/lib/python3.12/dist-packages (from torch) (10.3.7.77)\n",
85+
"Requirement already satisfied: nvidia-cusolver-cu12==11.7.1.2 in /usr/local/lib/python3.12/dist-packages (from torch) (11.7.1.2)\n",
86+
"Requirement already satisfied: nvidia-cusparse-cu12==12.5.4.2 in /usr/local/lib/python3.12/dist-packages (from torch) (12.5.4.2)\n",
87+
"Requirement already satisfied: nvidia-cusparselt-cu12==0.7.1 in /usr/local/lib/python3.12/dist-packages (from torch) (0.7.1)\n",
88+
"Requirement already satisfied: nvidia-nccl-cu12==2.27.3 in /usr/local/lib/python3.12/dist-packages (from torch) (2.27.3)\n",
89+
"Requirement already satisfied: nvidia-nvtx-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch) (12.6.77)\n",
90+
"Requirement already satisfied: nvidia-nvjitlink-cu12==12.6.85 in /usr/local/lib/python3.12/dist-packages (from torch) (12.6.85)\n",
91+
"Requirement already satisfied: nvidia-cufile-cu12==1.11.1.6 in /usr/local/lib/python3.12/dist-packages (from torch) (1.11.1.6)\n",
92+
"Requirement already satisfied: triton==3.4.0 in /usr/local/lib/python3.12/dist-packages (from torch) (3.4.0)\n",
93+
"Requirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from torchvision) (2.0.2)\n",
94+
"Requirement already satisfied: pillow!=8.3.*,>=5.3.0 in /usr/local/lib/python3.12/dist-packages (from torchvision) (11.3.0)\n",
95+
"Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy>=1.13.3->torch) (1.3.0)\n",
96+
"Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.12/dist-packages (from jinja2->torch) (3.0.2)\n"
97+
]
98+
}
99+
]
100+
},
101+
{
102+
"cell_type": "code",
103+
"metadata": {
104+
"id": "fd55627b"
105+
},
106+
"source": [
107+
"import torch\n",
108+
"import torch.nn as nn\n",
109+
"import torch.nn.functional as F\n",
110+
"import torch.optim as optim"
111+
],
112+
"execution_count": 2,
113+
"outputs": []
114+
},
115+
{
116+
"cell_type": "code",
117+
"source": [
118+
"class NNet(nn.Module):\n",
119+
"\n",
120+
" def __init__(self):\n",
121+
" super(NNet, self).__init__()\n",
122+
" # 1 input image channel, 6 output channels, 5x5 square convolution\n",
123+
" # kernel\n",
124+
" self.conv1 = nn.Conv2d(1, 6, 5)\n",
125+
" self.conv2 = nn.Conv2d(6, 16, 5)\n",
126+
" # an affine operation: y = Wx + b\n",
127+
" self.fc1 = nn.Linear(16 * 5 * 5, 120) # 5*5 from image dimension\n",
128+
" self.fc2 = nn.Linear(120, 84)\n",
129+
" self.fc3 = nn.Linear(84, 10)\n",
130+
"\n",
131+
" def forward(self, input):\n",
132+
" # Convolution layer C1: 1 input image channel, 6 output channels,\n",
133+
" # 5x5 square convolution, it uses RELU activation function, and\n",
134+
" # outputs a Tensor with size (N, 6, 28, 28), where N is the size of the batch\n",
135+
" c1 = F.relu(self.conv1(input))\n",
136+
" # Subsampling layer S2: 2x2 grid, purely functional,\n",
137+
" # this layer does not have any parameter, and outputs a (N, 6, 14, 14) Tensor\n",
138+
" s2 = F.max_pool2d(c1, (2, 2))\n",
139+
" # Convolution layer C3: 6 input channels, 16 output channels,\n",
140+
" # 5x5 square convolution, it uses RELU activation function, and\n",
141+
" # outputs a (N, 16, 10, 10) Tensor\n",
142+
" c3 = F.relu(self.conv2(s2))\n",
143+
" # Subsampling layer S4: 2x2 grid, purely functional,\n",
144+
" # this layer does not have any parameter, and outputs a (N, 16, 5, 5) Tensor\n",
145+
" s4 = F.max_pool2d(c3, 2)\n",
146+
" # Flatten operation: purely functional, outputs a (N, 400) Tensor\n",
147+
" s4 = torch.flatten(s4, 1)\n",
148+
" # Fully connected layer F5: (N, 400) Tensor input,\n",
149+
" # and outputs a (N, 120) Tensor, it uses RELU activation function\n",
150+
" f5 = F.relu(self.fc1(s4))\n",
151+
" # Fully connected layer F6: (N, 120) Tensor input,\n",
152+
" # and outputs a (N, 84) Tensor, it uses RELU activation function\n",
153+
" f6 = F.relu(self.fc2(f5))\n",
154+
" # Gaussian layer OUTPUT: (N, 84) Tensor input, and\n",
155+
" # outputs a (N, 10) Tensor\n",
156+
" output = self.fc3(f6)\n",
157+
" return output\n",
158+
"\n",
159+
"# Define device\n",
160+
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
161+
"\n",
162+
"model_exercise = NNet()\n",
163+
"model_exercise.to(device)\n",
164+
"\n",
165+
"print('This is what the model looked like before applying LoRA:')\n",
166+
"print(model_exercise)\n",
167+
"print(\"\\n###############\\n\")\n",
168+
"\n",
169+
"# Freeze all parameters:\n",
170+
"for parm in model_exercise.parameters():\n",
171+
" parm.requires_grad=False\n",
172+
"\n",
173+
"# Change final layer for one with 26 outputs:\n",
174+
"model_exercise.fc3=nn.Linear(in_features=84, out_features=26, bias=True).to(device)\n",
175+
"\n",
176+
"# Apply LoRA to the second linear layer\n",
177+
"model_exercise.fc2=LinearWithLoRA(model_exercise.fc2,rank=2, alpha=0.1).to(device)\n",
178+
"\n",
179+
"print('This is what the model looked like after applying LoRA:')\n",
180+
"print(model_exercise)"
181+
],
182+
"metadata": {
183+
"colab": {
184+
"base_uri": "https://localhost:8080/"
185+
},
186+
"id": "lmZOWpy5sXR0",
187+
"outputId": "e8a453cf-db32-4fde-c59a-d5ce83695750"
188+
},
189+
"execution_count": 6,
190+
"outputs": [
191+
{
192+
"output_type": "stream",
193+
"name": "stdout",
194+
"text": [
195+
"This is what the model looked like before applying LoRA:\n",
196+
"NNet(\n",
197+
" (conv1): Conv2d(1, 6, kernel_size=(5, 5), stride=(1, 1))\n",
198+
" (conv2): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))\n",
199+
" (fc1): Linear(in_features=400, out_features=120, bias=True)\n",
200+
" (fc2): Linear(in_features=120, out_features=84, bias=True)\n",
201+
" (fc3): Linear(in_features=84, out_features=10, bias=True)\n",
202+
")\n",
203+
"\n",
204+
"###############\n",
205+
"\n",
206+
"This is what the model looked like after applying LoRA:\n",
207+
"NNet(\n",
208+
" (conv1): Conv2d(1, 6, kernel_size=(5, 5), stride=(1, 1))\n",
209+
" (conv2): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))\n",
210+
" (fc1): Linear(in_features=400, out_features=120, bias=True)\n",
211+
" (fc2): LinearWithLoRA(\n",
212+
" (linear): Linear(in_features=120, out_features=84, bias=True)\n",
213+
" )\n",
214+
" (fc3): Linear(in_features=84, out_features=26, bias=True)\n",
215+
")\n"
216+
]
217+
}
218+
]
219+
}
220+
]
221+
}

0 commit comments

Comments
 (0)