Skip to content

Commit 2d78898

Browse files
authored
upgrade layoutlm (#5964)
1 parent a0cdff3 commit 2d78898

File tree

6 files changed

+744
-166
lines changed

6 files changed

+744
-166
lines changed

paddlenlp/transformers/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,7 @@
113113
from .llama.configuration import *
114114
from .llama.modeling import *
115115
from .llama.tokenizer import *
116+
from .layoutlm.configuration import *
116117
from .layoutlm.modeling import *
117118
from .layoutlm.tokenizer import *
118119
from .layoutlmv2.modeling import *
Lines changed: 158 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,158 @@
1+
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
2+
# Copyright 2021 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
3+
#
4+
# Licensed under the Apache License, Version 2.0 (the "License");
5+
# you may not use this file except in compliance with the License.
6+
# You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
""" LayoutLM model configuration"""
16+
17+
from typing import Dict
18+
19+
from ..configuration_utils import PretrainedConfig
20+
21+
__all__ = ["LAYOUTLM_PRETRAINED_INIT_CONFIGURATION", "LayoutLMConfig", "LAYOUTLM_PRETRAINED_RESOURCE_FILES_MAP"]
22+
23+
LAYOUTLM_PRETRAINED_INIT_CONFIGURATION = {
24+
"layoutlm-base-uncased": {
25+
"vocab_size": 30522,
26+
"hidden_size": 768,
27+
"num_attention_heads": 12,
28+
"num_hidden_layers": 12,
29+
"intermediate_size": 3072,
30+
"hidden_act": "gelu",
31+
"hidden_dropout_prob": 0.1,
32+
"attention_probs_dropout_prob": 0.1,
33+
"max_position_embeddings": 512,
34+
"max_2d_position_embeddings": 1024,
35+
"initializer_range": 0.02,
36+
"layer_norm_eps": 1e-12,
37+
"pad_token_id": 0,
38+
"type_vocab_size": 2,
39+
},
40+
"layoutlm-large-uncased": {
41+
"vocab_size": 30522,
42+
"hidden_size": 1024,
43+
"num_attention_heads": 16,
44+
"num_hidden_layers": 24,
45+
"intermediate_size": 4096,
46+
"hidden_act": "gelu",
47+
"hidden_dropout_prob": 0.1,
48+
"attention_probs_dropout_prob": 0.1,
49+
"max_2d_position_embeddings": 1024,
50+
"max_position_embeddings": 512,
51+
"initializer_range": 0.02,
52+
"layer_norm_eps": 1e-12,
53+
"pad_token_id": 0,
54+
"type_vocab_size": 2,
55+
},
56+
}
57+
58+
LAYOUTLM_PRETRAINED_RESOURCE_FILES_MAP = {
59+
"model_state": {
60+
"layoutlm-base-uncased": "https://bj.bcebos.com/paddlenlp/models/transformers/layoutlm/layoutlm-base-uncased/model_state.pdparams",
61+
"layoutlm-large-uncased": "https://bj.bcebos.com/paddlenlp/models/transformers/layoutlm/layoutlm-large-uncased/model_state.pdparams",
62+
}
63+
}
64+
65+
66+
class LayoutLMConfig(PretrainedConfig):
67+
r"""
68+
This is the configuration class to store the configuration of an [`LayoutLMModel`]. It is used to instantiate an LayoutLM Model according to the specified arguments, defining the model architecture.
69+
Instantiating a configuration with the defaults will yield a similar configuration to that of the LayoutLM LayoutLM-base-uncased architecture.
70+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
71+
documentation from [`PretrainedConfig`] for more information.
72+
Args:
73+
vocab_size (`int`, optional, defaults to 30522):
74+
Vocabulary size of the LayoutLMModel model. Defines the different tokens that can be represented by the
75+
*inputs_ids* passed to the forward method of [`LayoutLMModel`].
76+
embedding_size (`int`, optional, defaults to 768):
77+
Dimensionality of vocabulary embeddings.
78+
hidden_size (`int`, optional, defaults to 1024):
79+
Dimensionality of the encoder layers and the pooler layer.
80+
num_hidden_layers (`int`, optional, defaults to 12):
81+
Number of hidden layers in the Transformer encoder.
82+
num_attention_heads (`int`, optional, defaults to 12):
83+
Number of attention heads for each attention layer in the Transformer encoder.
84+
intermediate_size (`int`, optional, defaults to 3072):
85+
The dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
86+
hidden_act (`str` or `function`, optional, defaults to "gelu"):
87+
The non-linear activation function (function or string) in the encoder and pooler.
88+
hidden_dropout_prob (`float`, optional, defaults to 0.1):
89+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
90+
attention_probs_dropout_prob (`float`, optional, defaults to 0.1):
91+
The dropout ratio for the attention probabilities.
92+
max_position_embeddings (`int`, optional, defaults to 512):
93+
The maximum sequence length that this model might ever be used with. Typically set this to something large
94+
(e.g., 512 or 1024 or 2048).
95+
max_2d_position_embeddings (`int`, optional, defaults to 1024):
96+
The maximum value that the 2D position embedding might ever used. Typically set this to something large just in case (e.g., 1024).
97+
type_vocab_size (`int`, optional, defaults to 2):
98+
The vocabulary size of the *token_type_ids* passed into [`NezhaModel`].
99+
initializer_range (`float`, optional, defaults to 0.02):
100+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
101+
layer_norm_eps (`float`, optional, defaults to 1e-12):
102+
The epsilon used by the layer normalization layers.
103+
classifier_dropout (`float`, optional, defaults to 0.1):
104+
The dropout ratio for attached classifiers.
105+
is_decoder (`bool`, *optional*, defaults to `False`):
106+
Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
107+
Example:
108+
```python
109+
>>> from paddlenlp.transformers import LayoutLMConfig, LayoutLMModel
110+
>>> # Initializing an LayoutLMConfig configuration
111+
>>> configuration = LayoutLMConfig()
112+
>>> # Initializing a model (with random weights) from the LayoutLM-base style configuration model
113+
>>> model = LayoutLMModel(configuration)
114+
>>> # Accessing the model configuration
115+
>>> configuration = model.config
116+
```"""
117+
attribute_map: Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
118+
pretrained_init_configuration = LAYOUTLM_PRETRAINED_INIT_CONFIGURATION
119+
model_type = "layoutlm"
120+
121+
def __init__(
122+
self,
123+
vocab_size=30522,
124+
hidden_size=768,
125+
num_hidden_layers=12,
126+
num_attention_heads=12,
127+
intermediate_size=3072,
128+
hidden_act="gelu",
129+
hidden_dropout_prob=0.1,
130+
attention_probs_dropout_prob=0.1,
131+
max_position_embeddings=512,
132+
max_2d_position_embeddings=1024,
133+
type_vocab_size=2,
134+
initializer_range=0.02,
135+
layer_norm_eps=1e-12,
136+
classifier_dropout=0.1,
137+
pad_token_id=0,
138+
pool_act="tanh",
139+
**kwargs
140+
):
141+
super().__init__(pad_token_id=pad_token_id, **kwargs)
142+
143+
self.vocab_size = vocab_size
144+
self.hidden_size = hidden_size
145+
self.num_hidden_layers = num_hidden_layers
146+
self.num_attention_heads = num_attention_heads
147+
self.intermediate_size = intermediate_size
148+
self.hidden_act = hidden_act
149+
self.hidden_dropout_prob = hidden_dropout_prob
150+
self.attention_probs_dropout_prob = attention_probs_dropout_prob
151+
self.max_position_embeddings = max_position_embeddings
152+
self.max_2d_position_embeddings = max_2d_position_embeddings
153+
self.type_vocab_size = type_vocab_size
154+
self.initializer_range = initializer_range
155+
self.layer_norm_eps = layer_norm_eps
156+
self.classifier_dropout = classifier_dropout
157+
self.pad_token_id = pad_token_id
158+
self.pool_act = pool_act

0 commit comments

Comments
 (0)