|  | 
|  | 1 | +# Copyright (c) Meta Platforms, Inc. and affiliates. | 
|  | 2 | +# All rights reserved. | 
|  | 3 | +# | 
|  | 4 | +# This source code is licensed under the BSD-style license found in the | 
|  | 5 | +# LICENSE file in the root directory of this source tree. | 
|  | 6 | + | 
|  | 7 | +# pyre-unsafe | 
|  | 8 | + | 
|  | 9 | +import torch | 
|  | 10 | +import torch.nn as nn | 
|  | 11 | +import torch.nn.functional as F | 
|  | 12 | + | 
|  | 13 | + | 
|  | 14 | +class ConvlLinearModel(nn.Module): | 
|  | 15 | +    """ | 
|  | 16 | +    A neural network model with a convolutional layer followed by a linear layer. | 
|  | 17 | +    """ | 
|  | 18 | + | 
|  | 19 | +    def __init__(self): | 
|  | 20 | +        super(ConvlLinearModel, self).__init__() | 
|  | 21 | +        self.conv_layer = nn.Conv2d( | 
|  | 22 | +            in_channels=1, out_channels=1, kernel_size=3, stride=1, padding=1 | 
|  | 23 | +        ) | 
|  | 24 | +        self.conv_layer.weight = nn.Parameter( | 
|  | 25 | +            torch.tensor([[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]]]]) | 
|  | 26 | +        ) | 
|  | 27 | +        self.conv_layer.bias = nn.Parameter(torch.tensor([0.0])) | 
|  | 28 | + | 
|  | 29 | +        self.linear_layer = nn.Linear(in_features=4, out_features=2) | 
|  | 30 | +        self.linear_layer.weight = nn.Parameter( | 
|  | 31 | +            torch.tensor([[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8]]) | 
|  | 32 | +        ) | 
|  | 33 | +        self.linear_layer.bias = nn.Parameter(torch.tensor([0.0, 0.0])) | 
|  | 34 | +        self.additional_bias = nn.Parameter( | 
|  | 35 | +            torch.tensor([0.5, -0.5]), requires_grad=False | 
|  | 36 | +        ) | 
|  | 37 | +        self.scale_factor = nn.Parameter(torch.tensor([2.0, 0.5]), requires_grad=False) | 
|  | 38 | + | 
|  | 39 | +    def forward(self, x): | 
|  | 40 | +        x = self.conv_layer(x) | 
|  | 41 | +        x = x.view(x.size(0), -1) | 
|  | 42 | +        x = self.linear_layer(x) | 
|  | 43 | +        x = x + self.additional_bias | 
|  | 44 | +        x = x - 0.1 | 
|  | 45 | +        x = x * self.scale_factor | 
|  | 46 | +        x = x / (self.scale_factor + 1.0) | 
|  | 47 | +        x = F.relu(x) | 
|  | 48 | +        x = torch.sigmoid(x) | 
|  | 49 | +        output1, output2 = torch.split(x, 1, dim=1) | 
|  | 50 | +        return output1, output2 | 
|  | 51 | + | 
|  | 52 | +    @staticmethod | 
|  | 53 | +    def get_input(): | 
|  | 54 | +        """ | 
|  | 55 | +        Returns the pre-defined input tensor for this model. | 
|  | 56 | +        """ | 
|  | 57 | +        return torch.tensor([[[[1.0, 2.0], [3.0, 4.0]]]], requires_grad=True) | 
|  | 58 | + | 
|  | 59 | +    @staticmethod | 
|  | 60 | +    def get_expected_intermediate_outputs(): | 
|  | 61 | +        """ | 
|  | 62 | +        Returns the expected outputs of the debug handles and intermediate output mapping for this model for the given input. | 
|  | 63 | +        """ | 
|  | 64 | +        return { | 
|  | 65 | +            (10,): torch.tensor([[[[7.7000, 6.7000], [4.7000, 3.7000]]]]), | 
|  | 66 | +            (11,): torch.tensor([[7.7000, 6.7000, 4.7000, 3.7000]]), | 
|  | 67 | +            (12,): torch.tensor( | 
|  | 68 | +                [ | 
|  | 69 | +                    [0.1000, 0.5000], | 
|  | 70 | +                    [0.2000, 0.6000], | 
|  | 71 | +                    [0.3000, 0.7000], | 
|  | 72 | +                    [0.4000, 0.8000], | 
|  | 73 | +                ] | 
|  | 74 | +            ), | 
|  | 75 | +            (13,): torch.tensor([[5.0000, 14.1200]]), | 
|  | 76 | +            (14,): torch.tensor([[5.5000, 13.6200]]), | 
|  | 77 | +            (15,): torch.tensor([[5.4000, 13.5200]]), | 
|  | 78 | +            (16,): torch.tensor([[10.8000, 6.7600]]), | 
|  | 79 | +            (17,): torch.tensor([3.0000, 1.5000]), | 
|  | 80 | +            (18,): torch.tensor([[3.6000, 4.5067]]), | 
|  | 81 | +            (19,): torch.tensor([[3.6000, 4.5067]]), | 
|  | 82 | +            (20,): torch.tensor([[0.9734, 0.9891]]), | 
|  | 83 | +            (21,): [torch.tensor([[0.9734]]), torch.tensor([[0.9891]])], | 
|  | 84 | +        } | 
|  | 85 | + | 
|  | 86 | + | 
|  | 87 | +# Global model registry | 
|  | 88 | +model_registry = { | 
|  | 89 | +    "ConvLinearModel": ConvlLinearModel, | 
|  | 90 | +    # Add new models here | 
|  | 91 | +} | 
|  | 92 | + | 
|  | 93 | + | 
|  | 94 | +def check_if_final_outputs_match(model_name, actual_outputs_with_handles): | 
|  | 95 | +    """ | 
|  | 96 | +    Checks if the actual outputs match the expected outputs for the specified model. | 
|  | 97 | +    Returns True if all outputs match, otherwise returns False. | 
|  | 98 | +    """ | 
|  | 99 | +    model_instance = model_registry[model_name] | 
|  | 100 | +    expected_outputs_with_handles = model_instance.get_expected_intermediate_outputs() | 
|  | 101 | +    if len(actual_outputs_with_handles) != len(expected_outputs_with_handles): | 
|  | 102 | +        return False | 
|  | 103 | +    for debug_handle, expected_output in expected_outputs_with_handles.items(): | 
|  | 104 | +        actual_output = actual_outputs_with_handles.get(debug_handle) | 
|  | 105 | +        if actual_output is None: | 
|  | 106 | +            return False | 
|  | 107 | +        if isinstance(expected_output, list): | 
|  | 108 | +            if not isinstance(actual_output, list): | 
|  | 109 | +                return False | 
|  | 110 | +            if len(actual_output) != len(expected_output): | 
|  | 111 | +                return False | 
|  | 112 | +            for actual, expected in zip(actual_output, expected_output): | 
|  | 113 | +                if not torch.allclose(actual, expected, rtol=1e-4, atol=1e-5): | 
|  | 114 | +                    return False | 
|  | 115 | +        else: | 
|  | 116 | +            if not torch.allclose(actual_output, expected_output, rtol=1e-4, atol=1e-5): | 
|  | 117 | +                return False | 
|  | 118 | +    return True | 
0 commit comments