wikisection / preprocess_util.py
saeedabc's picture
Update column name sent_ids to ids
e0556f5
import json
import os
### NLTK ###
try:
import nltk
try:
nltk.data.find('tokenizers/punkt')
except LookupError:
nltk.download('punkt')
def nltk_sent_tokenize(text: str):
return nltk.sent_tokenize(text)
except ImportError:
pass
### Spacy ###
try:
import spacy
exclude = ["tok2vec", "tagger", "parser", "attribute_ruler", "lemmatizer", "ner"]
try:
spacy_nlp = spacy.load('en_core_web_sm', exclude=exclude)
except OSError:
spacy.cli.download('en_core_web_sm')
spacy_nlp = spacy.load('en_core_web_sm', exclude=exclude)
spacy_nlp.enable_pipe("senter")
# print(spacy_nlp.pipe_names)
def spacy_sent_tokenize(text: str):
return [sent.text for sent in spacy_nlp(text).sents]
except ImportError:
pass
### Segtok ###
try:
from segtok.segmenter import split_single #, split_multi
def segtok_sent_tokenize(text: str):
return split_single(text)
except ImportError:
pass
def sent_tokenize(text: str, method: str):
if method == 'nltk':
stok = nltk_sent_tokenize
elif method == 'spacy':
stok = spacy_sent_tokenize
elif method == 'segtok':
stok = segtok_sent_tokenize
else:
raise ValueError(f"Invalid sentence tokenizer method: {method}")
return [ssent for sent in stok(text) if (ssent := sent.strip())]
def parse_split(filepath: str, drop_titles: bool = False, sent_tokenize_method: str = 'nltk'):
with open(filepath, 'r') as f:
data = json.load(f)
# docs = []
for i, row in enumerate(data):
id = row['id']
title = row['title']
# abstract = row.get('abstract')
text = row['text']
# print(f'\n{i}: {title}')
# print(text[:1000])
sections = row['annotations']
doc = {
'id': id,
'title': title,
'ids': [],
'sentences': [],
'titles_mask': [],
'labels': [],
}
for sec_idx, sec in enumerate(sections):
sec_title = sec['sectionHeading'].strip()
# sec_label = sec['sectionLabel']
sec_text = text[sec['begin']:sec['begin']+sec['length']]
sentences = sent_tokenize(sec_text, method=sent_tokenize_method)
# If section is empty, continue
if not sentences:
continue
# Add the title as a single sentence
if not drop_titles and sec_title:
# if not drop_titles and non_empty(sec_title):
doc['ids'].append(f'{sec_idx}')
doc['sentences'].append(sec_title)
doc['titles_mask'].append(1)
doc['labels'].append(0)
# Add the sentences
for sent_idx, sent in enumerate(sentences):
doc['ids'].append(f'{sec_idx}_{sent_idx}')
doc['sentences'].append(sent)
doc['titles_mask'].append(0)
doc['labels'].append(1 if sent_idx == len(sentences) - 1 else 0)
if drop_titles:
doc.pop('titles_mask')
yield doc