diff --git a/adapter_classifier.py b/adapter_classifier.py index 8352974cf8fa9a075692bcfdd1f271b8c444ac6e..478ae6e926f78e7590ed710be3cfb0cf941c32c0 100644 --- a/adapter_classifier.py +++ b/adapter_classifier.py @@ -73,16 +73,16 @@ train_dataset = train_dataset.map(encode_batch, batched=True) train_dataset.set_format(type="torch", columns=["input_ids", "attention_mask", "labels"]) encoded_dev_dataset = {} - for corpus in dev_dict_dataset: - temp = dev_dict_dataset[corpus].map(encode_batch, batched=True) - temp.set_format(type="torch", columns=["input_ids", "attention_mask", "labels"]) - encoded_dev_dataset[corpus] = temp +for corpus in dev_dict_dataset: + temp = dev_dict_dataset[corpus].map(encode_batch, batched=True) + temp.set_format(type="torch", columns=["input_ids", "attention_mask", "labels"]) + encoded_dev_dataset[corpus] = temp encoded_test_dataset = {} - for corpus in test_dict_dataset: - temp = test_dict_dataset[corpus].map(encode_batch, batched=True) - temp.set_format(type="torch", columns=["input_ids", "attention_mask", "labels"]) - encoded_test_dataset[corpus] = temp +for corpus in test_dict_dataset: + temp = test_dict_dataset[corpus].map(encode_batch, batched=True) + temp.set_format(type="torch", columns=["input_ids", "attention_mask", "labels"]) + encoded_test_dataset[corpus] = temp # =============================== # ## Training params