pip install datasets transformers[sentencepiece]
Successfully installed aiohttp-3.8.1 aiosignal-1.2.0 async-timeout-4.0.2 asynctest-0.13.0 datasets-1.18.0 frozenlist-1.3.0 fsspec-2022.1.0 huggingface-hub-0.4.0 multidict-6.0.2 pyyaml-6.0 sacremoses-0.0.47 sentencepiece-0.1.96 tokenizers-0.10.3 transformers-4.15.0 xxhash-2.0.2 yarl-1.7.2
from datasets import load_dataset,Dataset,DatasetDict
from transformers import DataCollatorWithPadding,AutoModelForSequenceClassification, Trainer, TrainingArguments,AutoTokenizer,AutoModel,AutoConfig
from transformers.modeling_outputs import TokenClassifierOutput
import torch
import torch.nn as nn
import pandas as pd
!mkdir ~/.kaggle
!cp kaggle.json ~/.kaggle/ 
!chmod 600 ~/.kaggle/kaggle.json
!kaggle datasets download -d rmisra/news-headlines-dataset-for-sarcasm-detection

data=load_dataset("json",data_files="/content/news-headlines-dataset-for-sarcasm-detection.zip")
data=data.rename_column("is_sarcastic","label")

data=data.remove_columns(['article_link'])

data.set_format('pandas')
data=data['train'][:]

data.drop_duplicates(subset=['headline'],inplace=True)
data=data.reset_index()[['headline','label']]
data=Dataset.from_pandas(data)

# 80% train, 20% test + validation
train_testvalid = data.train_test_split(test_size=0.2,seed=15)

# Split the 10% test + valid in half test, half valid
test_valid = train_testvalid['test'].train_test_split(test_size=0.5,seed=15)

# gather everyone if you want to have a single DatasetDict
data = DatasetDict({
    'train': train_testvalid['train'],
    'test': test_valid['test'],
    'valid': test_valid['train']})

data
Downloading news-headlines-dataset-for-sarcasm-detection.zip to /content 0% 0.00/3.30M [00:00<?, ?B/s] 100% 3.30M/3.30M [00:00<00:00, 52.4MB/s]
Using custom data configuration default-fc870d38e04a4a8f
Downloading and preparing dataset json/default to /root/.cache/huggingface/datasets/json/default-fc870d38e04a4a8f/0.0.0/ac0ca5f5289a6cf108e706efcf040422dbbfa8e658dee6a819f20d76bb84d26b...
  0%|          | 0/1 [00:00<?, ?it/s]
  0%|          | 0/1 [00:00<?, ?it/s]
Dataset json downloaded and prepared to /root/.cache/huggingface/datasets/json/default-fc870d38e04a4a8f/0.0.0/ac0ca5f5289a6cf108e706efcf040422dbbfa8e658dee6a819f20d76bb84d26b. Subsequent calls will reuse this data.
  0%|          | 0/1 [00:00<?, ?it/s]
DatasetDict({
    train: Dataset({
        features: ['headline', 'label'],
        num_rows: 22802
    })
    test: Dataset({
        features: ['headline', 'label'],
        num_rows: 2851
    })
    valid: Dataset({
        features: ['headline', 'label'],
        num_rows: 2850
    })
})
checkpoint = "cardiffnlp/twitter-roberta-base-emotion"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
tokenizer.model_max_len=512
Downloading:   0%|          | 0.00/779 [00:00<?, ?B/s]
Downloading:   0%|          | 0.00/878k [00:00<?, ?B/s]
Downloading:   0%|          | 0.00/446k [00:00<?, ?B/s]
Downloading:   0%|          | 0.00/150 [00:00<?, ?B/s]
def tokenize(batch):
  return tokenizer(batch["headline"], truncation=True,max_length=512)

tokenized_dataset = data.map(tokenize, batched=True)
tokenized_dataset
  0%|          | 0/23 [00:00<?, ?ba/s]
  0%|          | 0/3 [00:00<?, ?ba/s]
  0%|          | 0/3 [00:00<?, ?ba/s]
DatasetDict({
    train: Dataset({
        features: ['headline', 'label', 'input_ids', 'attention_mask'],
        num_rows: 22802
    })
    test: Dataset({
        features: ['headline', 'label', 'input_ids', 'attention_mask'],
        num_rows: 2851
    })
    valid: Dataset({
        features: ['headline', 'label', 'input_ids', 'attention_mask'],
        num_rows: 2850
    })
})