Skip to content

Sentiment

BertTextEncoder

Bases: Module

Source code in Agent/modules/emotion_detection/sentiment.py
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
class BertTextEncoder(nn.Module):
    def __init__(self, language="en", use_finetune=False):
        """
        language: en / cn
        """
        super(BertTextEncoder, self).__init__()

        assert language in ["en", "cn"]

        tokenizer_class = BertTokenizer
        model_class = BertModel
        # directory is fine
        # pretrained_weights = '/home/sharing/disk3/pretrained_embedding/Chinese/bert/pytorch'
        if language == "en":
            self.tokenizer = tokenizer_class.from_pretrained(
                f"{models_dir}/bert_en", do_lower_case=True
            )
            self.model = model_class.from_pretrained(f"{models_dir}/bert_en")
        elif language == "cn":
            self.tokenizer = tokenizer_class.from_pretrained(f"{models_dir}/bert_cn")
            self.model = model_class.from_pretrained(f"{models_dir}/bert_cn")

        self.use_finetune = use_finetune

    def get_tokenizer(self):
        return self.tokenizer

    def from_text(self, text):
        """
        text: raw data
        """
        input_ids = self.get_id(text)
        with torch.no_grad():
            last_hidden_states = self.model(input_ids)[
                0
            ]  # Models outputs are now tuples
        return last_hidden_states.squeeze()

    def forward(self, text):
        """
        text: (batch_size, 3, seq_len)
        3: input_ids, input_mask, segment_ids
        input_ids: input_ids,
        input_mask: attention_mask,
        segment_ids: token_type_ids
        """
        text = self.tokenizer(text)
        input_ids, input_mask, segment_ids = (
            torch.tensor(text["input_ids"]).long().unsqueeze(0),
            torch.tensor(text["token_type_ids"]).unsqueeze(0).float(),
            torch.tensor(text["attention_mask"]).unsqueeze(0).long(),
        )
        if self.use_finetune:
            last_hidden_states = self.model(
                input_ids=input_ids,
                attention_mask=input_mask,
                token_type_ids=segment_ids,
            )[
                0
            ]  # Models outputs are now tuples
        else:
            with torch.no_grad():
                last_hidden_states = self.model(
                    input_ids=input_ids,
                    attention_mask=input_mask,
                    token_type_ids=segment_ids,
                )[
                    0
                ]  # Models outputs are now tuples
        return last_hidden_states

__init__(language='en', use_finetune=False)

language: en / cn

Source code in Agent/modules/emotion_detection/sentiment.py
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
def __init__(self, language="en", use_finetune=False):
    """
    language: en / cn
    """
    super(BertTextEncoder, self).__init__()

    assert language in ["en", "cn"]

    tokenizer_class = BertTokenizer
    model_class = BertModel
    # directory is fine
    # pretrained_weights = '/home/sharing/disk3/pretrained_embedding/Chinese/bert/pytorch'
    if language == "en":
        self.tokenizer = tokenizer_class.from_pretrained(
            f"{models_dir}/bert_en", do_lower_case=True
        )
        self.model = model_class.from_pretrained(f"{models_dir}/bert_en")
    elif language == "cn":
        self.tokenizer = tokenizer_class.from_pretrained(f"{models_dir}/bert_cn")
        self.model = model_class.from_pretrained(f"{models_dir}/bert_cn")

    self.use_finetune = use_finetune

forward(text)

text: (batch_size, 3, seq_len) 3: input_ids, input_mask, segment_ids input_ids: input_ids, input_mask: attention_mask, segment_ids: token_type_ids

Source code in Agent/modules/emotion_detection/sentiment.py
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
def forward(self, text):
    """
    text: (batch_size, 3, seq_len)
    3: input_ids, input_mask, segment_ids
    input_ids: input_ids,
    input_mask: attention_mask,
    segment_ids: token_type_ids
    """
    text = self.tokenizer(text)
    input_ids, input_mask, segment_ids = (
        torch.tensor(text["input_ids"]).long().unsqueeze(0),
        torch.tensor(text["token_type_ids"]).unsqueeze(0).float(),
        torch.tensor(text["attention_mask"]).unsqueeze(0).long(),
    )
    if self.use_finetune:
        last_hidden_states = self.model(
            input_ids=input_ids,
            attention_mask=input_mask,
            token_type_ids=segment_ids,
        )[
            0
        ]  # Models outputs are now tuples
    else:
        with torch.no_grad():
            last_hidden_states = self.model(
                input_ids=input_ids,
                attention_mask=input_mask,
                token_type_ids=segment_ids,
            )[
                0
            ]  # Models outputs are now tuples
    return last_hidden_states

from_text(text)

text: raw data

Source code in Agent/modules/emotion_detection/sentiment.py
204
205
206
207
208
209
210
211
212
213
def from_text(self, text):
    """
    text: raw data
    """
    input_ids = self.get_id(text)
    with torch.no_grad():
        last_hidden_states = self.model(input_ids)[
            0
        ]  # Models outputs are now tuples
    return last_hidden_states.squeeze()