File size: 3,230 Bytes
9ca7a5a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e0556f5
9ca7a5a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e0556f5
9ca7a5a
 
 
 
 
 
e0556f5
9ca7a5a
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import json
import os

### NLTK ###
try:
    import nltk
    try: 
        nltk.data.find('tokenizers/punkt')
    except LookupError:
        nltk.download('punkt')
    
    def nltk_sent_tokenize(text: str):
        return nltk.sent_tokenize(text)
except ImportError:
    pass

### Spacy ###
try:
    import spacy
    exclude = ["tok2vec", "tagger", "parser", "attribute_ruler", "lemmatizer", "ner"]
    try:
        spacy_nlp = spacy.load('en_core_web_sm', exclude=exclude)
    except OSError:
        spacy.cli.download('en_core_web_sm')
        spacy_nlp = spacy.load('en_core_web_sm', exclude=exclude)
    spacy_nlp.enable_pipe("senter")
    # print(spacy_nlp.pipe_names)
    
    def spacy_sent_tokenize(text: str):
        return [sent.text for sent in spacy_nlp(text).sents]
except ImportError:
    pass

### Segtok ###
try:
    from segtok.segmenter import split_single  #, split_multi
    
    def segtok_sent_tokenize(text: str):
        return split_single(text)
except ImportError:
    pass


def sent_tokenize(text: str, method: str):
    if method == 'nltk':
        stok = nltk_sent_tokenize
    elif method == 'spacy':
        stok = spacy_sent_tokenize
    elif method == 'segtok':
        stok = segtok_sent_tokenize
    else:
        raise ValueError(f"Invalid sentence tokenizer method: {method}")
    
    return [ssent for sent in stok(text) if (ssent := sent.strip())]


def parse_split(filepath: str, drop_titles: bool = False, sent_tokenize_method: str = 'nltk'):
    
    with open(filepath, 'r') as f:
        data = json.load(f)
    
    # docs = []
    for i, row in enumerate(data):
        id = row['id']
        title = row['title']
        # abstract = row.get('abstract')
        text = row['text']
        # print(f'\n{i}: {title}')
        # print(text[:1000])
        sections = row['annotations']
        
        doc = {
            'id': id,
            'title': title,
            'ids': [],
            'sentences': [],
            'titles_mask': [],
            'labels': [],
        }
        
        for sec_idx, sec in enumerate(sections):
            
            sec_title = sec['sectionHeading'].strip()
            # sec_label = sec['sectionLabel']

            sec_text = text[sec['begin']:sec['begin']+sec['length']]
            sentences = sent_tokenize(sec_text, method=sent_tokenize_method)
            
            # If section is empty, continue
            if not sentences:
                continue
        
            # Add the title as a single sentence
            if not drop_titles and sec_title:
            # if not drop_titles and non_empty(sec_title):
                doc['ids'].append(f'{sec_idx}')
                doc['sentences'].append(sec_title)
                doc['titles_mask'].append(1)
                doc['labels'].append(0)

            # Add the sentences
            for sent_idx, sent in enumerate(sentences):
                doc['ids'].append(f'{sec_idx}_{sent_idx}')
                doc['sentences'].append(sent)
                doc['titles_mask'].append(0)
                doc['labels'].append(1 if sent_idx == len(sentences) - 1 else 0)
        
        if drop_titles:
            doc.pop('titles_mask')
                
        yield doc