Upload 97 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- Dockerfile +0 -0
- LICENSE +21 -0
- __pycache__/parser.cpython-312.pyc +0 -0
- app.py +104 -0
- asset/design.png +3 -0
- citekit/Dataset/Dataset.py +41 -0
- citekit/Dataset/__pycache__/Dataset.cpython-310.pyc +0 -0
- citekit/Dataset/__pycache__/Dataset.cpython-312.pyc +0 -0
- citekit/__init__.py +1 -0
- citekit/__pycache__/__init__.cpython-312.pyc +0 -0
- citekit/attribute/__init__.py +0 -0
- citekit/attribute/__pycache__/__init__.cpython-312.pyc +0 -0
- citekit/attribute/__pycache__/attribute.cpython-312.pyc +0 -0
- citekit/attribute/attribute.py +222 -0
- citekit/cite_modules/LLM.py +427 -0
- citekit/cite_modules/Retrieve.py +99 -0
- citekit/cite_modules/__pycache__/LLM.cpython-310.pyc +0 -0
- citekit/cite_modules/__pycache__/LLM.cpython-312.pyc +0 -0
- citekit/cite_modules/__pycache__/Retrieve.cpython-310.pyc +0 -0
- citekit/cite_modules/__pycache__/Retrieve.cpython-312.pyc +0 -0
- citekit/cite_modules/__pycache__/augment_model.cpython-310.pyc +0 -0
- citekit/cite_modules/__pycache__/augment_model.cpython-312.pyc +0 -0
- citekit/cite_modules/augment_model.py +455 -0
- citekit/evaluator/__init__.py +0 -0
- citekit/evaluator/__pycache__/__init__.cpython-310.pyc +0 -0
- citekit/evaluator/__pycache__/__init__.cpython-312.pyc +0 -0
- citekit/evaluator/__pycache__/evaluator.cpython-310.pyc +0 -0
- citekit/evaluator/__pycache__/evaluator.cpython-312.pyc +0 -0
- citekit/evaluator/evaluator.py +1118 -0
- citekit/pipeline/__pycache__/pipeline.cpython-310.pyc +0 -0
- citekit/pipeline/__pycache__/pipeline.cpython-312.pyc +0 -0
- citekit/pipeline/__pycache__/pipeline_inter.cpython-310.pyc +0 -0
- citekit/pipeline/pipeline.py +423 -0
- citekit/prompt/__pycache__/prompt.cpython-310.pyc +0 -0
- citekit/prompt/__pycache__/prompt.cpython-312.pyc +0 -0
- citekit/prompt/prompt.py +294 -0
- citekit/utils/__pycache__/utils.cpython-310.pyc +0 -0
- citekit/utils/__pycache__/utils.cpython-312.pyc +0 -0
- citekit/utils/utils.py +317 -0
- context_cite/__init__.py +4 -0
- context_cite/__pycache__/__init__.cpython-312.pyc +0 -0
- context_cite/__pycache__/__init__.cpython-39.pyc +0 -0
- context_cite/__pycache__/context_citer.cpython-312.pyc +0 -0
- context_cite/__pycache__/context_citer.cpython-39.pyc +0 -0
- context_cite/__pycache__/context_partitioner.cpython-312.pyc +0 -0
- context_cite/__pycache__/context_partitioner.cpython-39.pyc +0 -0
- context_cite/__pycache__/solver.cpython-312.pyc +0 -0
- context_cite/__pycache__/solver.cpython-39.pyc +0 -0
- context_cite/__pycache__/utils.cpython-312.pyc +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
asset/design.png filter=lfs diff=lfs merge=lfs -text
|
Dockerfile
ADDED
File without changes
|
LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2024 Jiajun Shen
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
__pycache__/parser.cpython-312.pyc
ADDED
Binary file (20.7 kB). View file
|
|
app.py
ADDED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flask import Flask, request, jsonify, send_file, Response
|
2 |
+
from flask_cors import CORS
|
3 |
+
import openai
|
4 |
+
import sys
|
5 |
+
import os
|
6 |
+
from methods.self_RAG_demo import pipeline, graph
|
7 |
+
from citekit.utils.utils import parse_html_config
|
8 |
+
|
9 |
+
app = Flask(__name__)
|
10 |
+
CORS(app) # 允许跨域请求
|
11 |
+
|
12 |
+
|
13 |
+
@app.route("/")
|
14 |
+
def index():
|
15 |
+
return send_file("index.html")
|
16 |
+
|
17 |
+
|
18 |
+
@app.route("/run_pipeline", methods=["POST"])
|
19 |
+
def run_pipeline():
|
20 |
+
data = request.json
|
21 |
+
if not data:
|
22 |
+
return jsonify({"error": "Invalid input data"}), 400
|
23 |
+
|
24 |
+
try:
|
25 |
+
result = pipeline(data) # 直接调用 pipeline 处理数据
|
26 |
+
print(result)
|
27 |
+
return jsonify(result) # 返回 JSON 结果
|
28 |
+
except Exception as e:
|
29 |
+
return jsonify({"error": str(e)}), 500
|
30 |
+
|
31 |
+
@app.route("/get_nodes", methods=["POST"])
|
32 |
+
def get_nodes(*args, **kwargs):
|
33 |
+
graph.update()
|
34 |
+
try:
|
35 |
+
return jsonify(graph.get_json())
|
36 |
+
except Exception as e:
|
37 |
+
return jsonify({"error": str(e)}), 500
|
38 |
+
|
39 |
+
@app.route("/update", methods=["POST"])
|
40 |
+
def update():
|
41 |
+
|
42 |
+
data = request.json
|
43 |
+
update_info = data.get("update_info")
|
44 |
+
update_object = data.get('update_object')
|
45 |
+
print(update_info, update_object)
|
46 |
+
try:
|
47 |
+
config, update_info = parse_html_config(update_info)
|
48 |
+
print('GOT CONFIG', config, update_info)
|
49 |
+
pipeline.update(update_object, config, update_info)
|
50 |
+
return jsonify({})
|
51 |
+
except Exception as e:
|
52 |
+
return jsonify({"error": str(e)}), 500
|
53 |
+
|
54 |
+
|
55 |
+
|
56 |
+
@app.route("/get_config", methods=["POST"])
|
57 |
+
def get_config():
|
58 |
+
data = request.json
|
59 |
+
config = data.get("config").lower()
|
60 |
+
module_name = data.get("module_name")
|
61 |
+
module = pipeline.get_module_by_name(module_name)
|
62 |
+
|
63 |
+
try:
|
64 |
+
if config in ['prompt', 'destination', 'max turn', 'global prompt', 'parallel']:
|
65 |
+
return jsonify(module.get_json_config(config))
|
66 |
+
else:
|
67 |
+
raise NotImplementedError
|
68 |
+
|
69 |
+
|
70 |
+
except Exception as e:
|
71 |
+
return jsonify({"error": str(e)}), 500
|
72 |
+
|
73 |
+
@app.route("/chat", methods=["POST"])
|
74 |
+
def chat():
|
75 |
+
data = request.json
|
76 |
+
api_key = data.get("api_key")
|
77 |
+
user_message = data.get("message")
|
78 |
+
|
79 |
+
if not api_key or not user_message:
|
80 |
+
return jsonify({"error": "API Key and message are required"}), 400
|
81 |
+
|
82 |
+
try:
|
83 |
+
openai.api_key = api_key
|
84 |
+
response = openai.ChatCompletion.create(
|
85 |
+
model="gpt-4o",
|
86 |
+
messages=[
|
87 |
+
{"role": "system", "content": "You are a helpful assistant that follows the instructions of the user. You will be given a pipeline and (maybe) some datapoints in json format. You will be asked questions about the pipeline or the datapoints. Refuse to answer questions that are not about the pipeline or the datapoints."},
|
88 |
+
{"role": "user", "content": user_message}
|
89 |
+
],
|
90 |
+
stream=True # 启用流式输出
|
91 |
+
)
|
92 |
+
|
93 |
+
def generate():
|
94 |
+
for chunk in response:
|
95 |
+
if "choices" in chunk and chunk["choices"]:
|
96 |
+
yield chunk["choices"][0]["delta"].get("content", "")
|
97 |
+
|
98 |
+
|
99 |
+
return Response(generate(), content_type="text/event-stream") # 使用流式响应
|
100 |
+
except Exception as e:
|
101 |
+
return jsonify({"error": str(e)}), 500
|
102 |
+
|
103 |
+
if __name__ == '__main__':
|
104 |
+
app.run(host="0.0.0.0", port=7860)
|
asset/design.png
ADDED
![]() |
Git LFS Details
|
citekit/Dataset/Dataset.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch.utils.data import Dataset
|
2 |
+
import json
|
3 |
+
|
4 |
+
default_get = lambda key : lambda data: data[key]
|
5 |
+
|
6 |
+
class PromptDataset(Dataset):
|
7 |
+
|
8 |
+
def __init__(self,data_dir,*keys,**projections) -> None:
|
9 |
+
self.data = []
|
10 |
+
for d in data_dir:
|
11 |
+
list_contents = {key:default_get(key)(d) for key in keys if key in d.keys()}
|
12 |
+
dict_contents = {projection:projections[projection](d) for projection in projections.keys()}
|
13 |
+
self.data.append({**list_contents,**dict_contents})
|
14 |
+
|
15 |
+
def __getitem__(self, index) -> dict:
|
16 |
+
|
17 |
+
return self.data[index]
|
18 |
+
|
19 |
+
def __len__(self):
|
20 |
+
return len(self.data)
|
21 |
+
|
22 |
+
class FileDataset(PromptDataset):
|
23 |
+
|
24 |
+
def __init__(self,data_dir,*keys,**projections) -> None:
|
25 |
+
with open(data_dir,'r',encoding='utf-8') as file:
|
26 |
+
data_dir = json.load(file)
|
27 |
+
if not keys:
|
28 |
+
keys = data_dir[0].keys()
|
29 |
+
|
30 |
+
self.data = []
|
31 |
+
for d in data_dir:
|
32 |
+
list_contents = {key:default_get(key)(d) for key in keys if key in d.keys()}
|
33 |
+
dict_contents = {projection:projections[projection](d) for projection in projections.keys()}
|
34 |
+
self.data.append({**list_contents,**dict_contents})
|
35 |
+
|
36 |
+
def __getitem__(self, index) -> dict:
|
37 |
+
|
38 |
+
return self.data[index]
|
39 |
+
|
40 |
+
def __len__(self):
|
41 |
+
return len(self.data)
|
citekit/Dataset/__pycache__/Dataset.cpython-310.pyc
ADDED
Binary file (2.39 kB). View file
|
|
citekit/Dataset/__pycache__/Dataset.cpython-312.pyc
ADDED
Binary file (3.14 kB). View file
|
|
citekit/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from __future__ import absolute_import
|
citekit/__pycache__/__init__.cpython-312.pyc
ADDED
Binary file (207 Bytes). View file
|
|
citekit/attribute/__init__.py
ADDED
File without changes
|
citekit/attribute/__pycache__/__init__.cpython-312.pyc
ADDED
Binary file (164 Bytes). View file
|
|
citekit/attribute/__pycache__/attribute.cpython-312.pyc
ADDED
Binary file (11.9 kB). View file
|
|
citekit/attribute/attribute.py
ADDED
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from context_cite import ContextCiter
|
3 |
+
import re
|
4 |
+
import torch
|
5 |
+
from transformers import LlamaForCausalLM, LlamaTokenizer
|
6 |
+
|
7 |
+
|
8 |
+
def all_normalize(obj):
|
9 |
+
all_values = []
|
10 |
+
for output_sent_result in obj:
|
11 |
+
for each_doc in output_sent_result:
|
12 |
+
for each_span in each_doc:
|
13 |
+
all_values.append(each_span[1])
|
14 |
+
max_val = max(all_values)
|
15 |
+
min_val = min(all_values)
|
16 |
+
for output_sent_result in obj:
|
17 |
+
for i, each_doc in enumerate(output_sent_result):
|
18 |
+
for j, each_span in enumerate(each_doc):
|
19 |
+
each_span = (each_span[0], (each_span[1] - min_val) / (max_val - min_val))
|
20 |
+
output_sent_result[i][j] = each_span
|
21 |
+
return obj
|
22 |
+
|
23 |
+
def all_normalize_in(obj):
|
24 |
+
for output_sent_result in obj:
|
25 |
+
all_values = []
|
26 |
+
for each_doc in output_sent_result:
|
27 |
+
for each_span in each_doc:
|
28 |
+
all_values.append(each_span[1])
|
29 |
+
max_val = max(all_values)
|
30 |
+
min_val = min(all_values)
|
31 |
+
for i, each_doc in enumerate(output_sent_result):
|
32 |
+
for j, each_span in enumerate(each_doc):
|
33 |
+
each_span = (each_span[0], (each_span[1] - min_val) / (max_val - min_val))
|
34 |
+
output_sent_result[i][j] = each_span
|
35 |
+
return obj
|
36 |
+
|
37 |
+
def load_json(file_path):
|
38 |
+
|
39 |
+
with open(file_path, 'r') as file:
|
40 |
+
data = file.read()
|
41 |
+
if file_path.endswith('.jsonl'):
|
42 |
+
data = f'[{'},{'.join(data.split("}\n{"))}]'
|
43 |
+
objects = json.loads(data)
|
44 |
+
return objects
|
45 |
+
|
46 |
+
def ma(text):
|
47 |
+
pattern = r"Document \[\d+\]\(Title:[^)]+\)"
|
48 |
+
|
49 |
+
match = re.search(pattern, text)
|
50 |
+
|
51 |
+
if match:
|
52 |
+
index = match.end()
|
53 |
+
return index
|
54 |
+
else:
|
55 |
+
return 0
|
56 |
+
|
57 |
+
def write_json(file_path, data):
|
58 |
+
with open(file_path, 'w') as json_file:
|
59 |
+
json.dump(data, json_file, indent=4)
|
60 |
+
|
61 |
+
|
62 |
+
|
63 |
+
def load_model(model_name_or_path):
|
64 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
65 |
+
model = AutoModelForCausalLM.from_pretrained(
|
66 |
+
model_name_or_path,
|
67 |
+
device_map='auto',
|
68 |
+
token = 'your token'
|
69 |
+
)
|
70 |
+
|
71 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
|
72 |
+
model.eval()
|
73 |
+
return model, tokenizer
|
74 |
+
|
75 |
+
|
76 |
+
def compute_log_prob(model, tokenizer, input_text, output_text):
|
77 |
+
inputs = tokenizer(input_text, return_tensors="pt")
|
78 |
+
output_tokens = tokenizer(output_text, return_tensors="pt")["input_ids"]
|
79 |
+
|
80 |
+
with torch.no_grad():
|
81 |
+
logits = model(**inputs).logits[:, -output_tokens.shape[1]-1:-1, :]
|
82 |
+
|
83 |
+
log_probs = torch.nn.functional.log_softmax(logits, dim=-1)
|
84 |
+
output_log_probs = log_probs.gather(2, output_tokens.unsqueeze(-1)).squeeze(-1)
|
85 |
+
return output_log_probs.sum().item()
|
86 |
+
|
87 |
+
def compute_contributions(model, tokenizer, question, docs, output):
|
88 |
+
full_input = question + '\n\n' + '\n'.join(docs)
|
89 |
+
base_prob = compute_log_prob(model, tokenizer, full_input, output)
|
90 |
+
|
91 |
+
contributions = []
|
92 |
+
for i in range(len(docs)):
|
93 |
+
reduced_docs = docs[:i] + docs[i+1:]
|
94 |
+
reduced_input = question + '\n\n' + '\n'.join(reduced_docs)
|
95 |
+
reduced_prob = compute_log_prob(model, tokenizer, reduced_input, output)
|
96 |
+
contributions.append(base_prob - reduced_prob)
|
97 |
+
|
98 |
+
return contributions
|
99 |
+
|
100 |
+
class InterpretableAttributer:
|
101 |
+
|
102 |
+
def __init__(self, levels=['doc', 'span', 'word'], model = 'gpt-2'):
|
103 |
+
for level in levels:
|
104 |
+
assert level in ['doc', 'span', 'word'], f'Invalid level: {level}'
|
105 |
+
# span before doc
|
106 |
+
self.levels = sorted(levels, key=lambda x: ['span', 'doc', 'word'].index(x))
|
107 |
+
#self.model, self.tokenizer = load_model(model)
|
108 |
+
|
109 |
+
|
110 |
+
def attribute(self, question, docs, output):
|
111 |
+
attribute_results = {}
|
112 |
+
for level in self.levels:
|
113 |
+
attribute_result = []
|
114 |
+
for sentence in output:
|
115 |
+
attribute_result.append(self._attribute(question, docs, sentence, level))
|
116 |
+
attribute_results[level] = attribute_result
|
117 |
+
return attribute_results
|
118 |
+
|
119 |
+
|
120 |
+
def _attribute(self, question, docs, output, level):
|
121 |
+
if level == 'doc':
|
122 |
+
return self.doc_level_attribution(question, docs, output)
|
123 |
+
elif level == 'span':
|
124 |
+
return self.span_level_attribution(question, docs, output)
|
125 |
+
elif level == 'word':
|
126 |
+
return self.word_level_attribution(question, docs, output)
|
127 |
+
else:
|
128 |
+
raise ValueError(f'Invalid level: {level}')
|
129 |
+
|
130 |
+
def span_level_attribution(self, question, docs, output):
|
131 |
+
# USE CONTEXT CITE
|
132 |
+
context = '\n\n'.join(docs)
|
133 |
+
response = output
|
134 |
+
|
135 |
+
cc = ContextCiter(self.model, self.tokenizer, context, question)
|
136 |
+
_, prompt = cc._get_prompt_ids(return_prompt=True)
|
137 |
+
cc._cache["output"] = prompt + response
|
138 |
+
result = cc.get_attributions(as_dataframe=True, top_k=1000).data.to_dict(orient='records')
|
139 |
+
return result
|
140 |
+
|
141 |
+
|
142 |
+
def parse_attribution_results(self, docs, results):
|
143 |
+
context = '\n\n'.join(docs)
|
144 |
+
lens = [len(doc) for doc in docs]
|
145 |
+
len_sep = len('\n\n')
|
146 |
+
final_results = {}
|
147 |
+
for level, result in results.items():
|
148 |
+
if level == 'span':
|
149 |
+
ordered_all_sents = []
|
150 |
+
for output_sent_result in result:
|
151 |
+
final_end_for_span = {}
|
152 |
+
all_span_results = []
|
153 |
+
for each_span in output_sent_result:
|
154 |
+
span_text = each_span["Source"]
|
155 |
+
span_score = each_span["Score"]
|
156 |
+
start = 0
|
157 |
+
if span_text in final_end_for_span:
|
158 |
+
start = final_end_for_span[span_text]
|
159 |
+
span_start = context.find(span_text, start)
|
160 |
+
span_end = span_start + len(span_text)
|
161 |
+
final_end_for_span[span_text] = span_end
|
162 |
+
# locate the document
|
163 |
+
doc_idx = 0
|
164 |
+
while span_start > lens[doc_idx]:
|
165 |
+
span_start -= lens[doc_idx] + len_sep
|
166 |
+
span_end -= lens[doc_idx] + len_sep
|
167 |
+
doc_idx += 1
|
168 |
+
all_span_results.append((span_start, span_score, doc_idx))
|
169 |
+
ordered = [[] for _ in range(len(docs))]
|
170 |
+
for span_start, span_score, doc_idx in all_span_results:
|
171 |
+
ordered[doc_idx].append((span_start, span_score))
|
172 |
+
for i in range(len(docs)):
|
173 |
+
doc = docs[i]
|
174 |
+
real_start = ma(doc)
|
175 |
+
ordered[i] = sorted(ordered[i], key=lambda x: x[0])
|
176 |
+
ordered[i][0] = (real_start, ordered[i][0][1])
|
177 |
+
|
178 |
+
ordered_all_sents.append(ordered)
|
179 |
+
final_results[level+'_level'] = all_normalize_in(ordered_all_sents)
|
180 |
+
elif level == 'doc':
|
181 |
+
self.span_to_doc(result)
|
182 |
+
else:
|
183 |
+
raise NotImplementedError(f'Parsing for {level} not implemented yet')
|
184 |
+
return final_results
|
185 |
+
|
186 |
+
def span_to_doc(self, results):
|
187 |
+
import numpy as np
|
188 |
+
span_level = results['span_level']
|
189 |
+
doc_level = []
|
190 |
+
for output_sent_result in span_level:
|
191 |
+
doc_level.append([np.mean([span[1] for span in doc]) for doc in output_sent_result])
|
192 |
+
results['doc_level'] = doc_level
|
193 |
+
|
194 |
+
|
195 |
+
def attribute_for_result(self, result):
|
196 |
+
docs = result['doc_cache']
|
197 |
+
question = result['data']['question']
|
198 |
+
output = result['output']
|
199 |
+
attribution_results = self.attribute(question, docs, output)
|
200 |
+
parsed_results = self.parse_attribution_results(docs, attribution_results)
|
201 |
+
result.update(parsed_results)
|
202 |
+
|
203 |
+
if 'doc' not in self.levels:
|
204 |
+
# if doc is not in the levels, we need to convert the span level to doc level
|
205 |
+
print('Converting span level to doc level...')
|
206 |
+
try:
|
207 |
+
self.span_to_doc(result)
|
208 |
+
print('Conversion successful')
|
209 |
+
except Exception as e:
|
210 |
+
print(f'Error converting span level to doc level: {e}')
|
211 |
+
|
212 |
+
def attribute_for_results(self, results):
|
213 |
+
for result in results:
|
214 |
+
self.attribute_for_result(result)
|
215 |
+
return results
|
216 |
+
|
217 |
+
|
218 |
+
if __name__ == '__main__':
|
219 |
+
attributer = InterpretableAttributer(levels=['span'])
|
220 |
+
results = load_json('res_attr.json')
|
221 |
+
attributer.attribute_for_results(results)
|
222 |
+
write_json('res_attr_span.json', results)
|
citekit/cite_modules/LLM.py
ADDED
@@ -0,0 +1,427 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from citekit.prompt.prompt import Prompt
|
3 |
+
import re
|
4 |
+
from citekit.utils.utils import one_paragraph, first_sentence, make_as
|
5 |
+
import random
|
6 |
+
import os
|
7 |
+
|
8 |
+
|
9 |
+
|
10 |
+
class Module:
|
11 |
+
module_count = 1
|
12 |
+
def __init__(self,prompt_maker: Prompt = None, pipeline = None, self_prompt = {}, iterative = False, merge = False, max_turn =6, output_as = None, parallel = False) -> None:
|
13 |
+
self.self_prompt = self_prompt
|
14 |
+
self.use_head_prompt = True
|
15 |
+
self.connect_to(pipeline)
|
16 |
+
self.prompt_maker = prompt_maker
|
17 |
+
self.last_message = ''
|
18 |
+
self.destinations = []
|
19 |
+
self.conditions = {}
|
20 |
+
self.head_key = None
|
21 |
+
self.parallel = parallel
|
22 |
+
self.iterative = iterative
|
23 |
+
self.merge = merge
|
24 |
+
self.head_process = one_paragraph
|
25 |
+
self.max_turn = max_turn
|
26 |
+
self.multi_process = False
|
27 |
+
self.output_cond = {} # {cond : {'post_processing':post, 'end':end}}
|
28 |
+
self.count = Module.module_count
|
29 |
+
Module.module_count += 1
|
30 |
+
self.if_add_output_to_head = False
|
31 |
+
self.turns = 0
|
32 |
+
self.end = False
|
33 |
+
|
34 |
+
def __str__(self) -> str:
|
35 |
+
if self.model_type:
|
36 |
+
return f'{self.model_type}-[{self.count}]'
|
37 |
+
else:
|
38 |
+
return f'Unknown-type module-[{self.count}]'
|
39 |
+
|
40 |
+
def get_json_config(self, config):
|
41 |
+
print('get_json_config:',config)
|
42 |
+
avaliable_mapping = {
|
43 |
+
'max turn': 'max_turn',
|
44 |
+
'prompt': 'prompt',
|
45 |
+
'destination': 'destination',
|
46 |
+
'global prompt': 'head_key',
|
47 |
+
}
|
48 |
+
if config == 'prompt':
|
49 |
+
prompt_info = {
|
50 |
+
'template': self.prompt_maker.template,
|
51 |
+
'components': self.prompt_maker.components
|
52 |
+
}
|
53 |
+
self_info = self.self_prompt
|
54 |
+
|
55 |
+
return {
|
56 |
+
'prompt_info': prompt_info,
|
57 |
+
'self_info': self_info
|
58 |
+
}
|
59 |
+
elif config == 'destination':
|
60 |
+
return {
|
61 |
+
'destination': str(self.destinations[0])
|
62 |
+
}
|
63 |
+
elif config in ['max turn','global prompt']:
|
64 |
+
config = avaliable_mapping[config]
|
65 |
+
print('getting the config:',config)
|
66 |
+
return getattr(self, config)
|
67 |
+
else:
|
68 |
+
raise NotImplementedError(f'get_json_config for {config} is not implemented')
|
69 |
+
|
70 |
+
def get_destinations(self):
|
71 |
+
return self.destinations
|
72 |
+
|
73 |
+
def update(self, config, update_info):
|
74 |
+
|
75 |
+
if config == 'prompt':
|
76 |
+
template = update_info['template']
|
77 |
+
components = update_info['components']
|
78 |
+
self_prompt = update_info['self_prompt']
|
79 |
+
import copy
|
80 |
+
# avoid changing the original prompt_maker
|
81 |
+
self.prompt_maker = copy.deepcopy(self.prompt_maker)
|
82 |
+
|
83 |
+
self.prompt_maker.update(template=template, components=components)
|
84 |
+
self.self_prompt = self_prompt
|
85 |
+
|
86 |
+
elif config == 'destination':
|
87 |
+
print('update destination:',update_info[0], 'post_processing:',update_info[1])
|
88 |
+
if update_info[1] == 'None':
|
89 |
+
self.set_target(update_info[0])
|
90 |
+
else:
|
91 |
+
self.set_target(update_info[0], post_processing=make_as(update_info[1]))
|
92 |
+
|
93 |
+
elif config == 'delete_destination':
|
94 |
+
for i, d in enumerate(self.destinations):
|
95 |
+
if str(d) == str(update_info):
|
96 |
+
self.destinations.remove(d)
|
97 |
+
del self.conditions[d]
|
98 |
+
break
|
99 |
+
elif config == 'header':
|
100 |
+
self.add_to_head(update_info, sub = True)
|
101 |
+
elif config == 'max turn':
|
102 |
+
self.max_turn = update_info
|
103 |
+
else:
|
104 |
+
raise NotImplementedError(f'update for {config} is not implemented')
|
105 |
+
|
106 |
+
def end_multi(self):
|
107 |
+
return
|
108 |
+
|
109 |
+
def set_use_head_prompt(self,use):
|
110 |
+
assert isinstance(use,bool)
|
111 |
+
self.use_head_prompt = use
|
112 |
+
|
113 |
+
def reset(self):
|
114 |
+
self.end = False
|
115 |
+
self.turns = 0
|
116 |
+
|
117 |
+
def change_to_multi_process(self,bool_value):
|
118 |
+
if bool_value:
|
119 |
+
self.last_message = []
|
120 |
+
else:
|
121 |
+
self.last_message = ''
|
122 |
+
self.multi_process = bool_value
|
123 |
+
@property
|
124 |
+
def get_use_head_prompt(self):
|
125 |
+
return self.use_head_prompt
|
126 |
+
|
127 |
+
def generate(self, head_prompt: dict = {}, dynamic_prompt: dict = {}):
|
128 |
+
raise NotImplementedError
|
129 |
+
|
130 |
+
def send(self):
|
131 |
+
for destination in self.destinations:
|
132 |
+
cond = self.conditions[destination]['condition']
|
133 |
+
if cond(self):
|
134 |
+
return destination
|
135 |
+
return None
|
136 |
+
|
137 |
+
def set_target(self,destination, condition = lambda self: True, post_processing = lambda x:x) -> None:
|
138 |
+
self.conditions[destination] = {'condition': condition, 'post_processing' : post_processing}
|
139 |
+
self.destinations = [destination] + self.destinations
|
140 |
+
destination.connect_to(self.pipeline)
|
141 |
+
|
142 |
+
def clear_destination(self):
|
143 |
+
self.destinations = []
|
144 |
+
self.conditions = {}
|
145 |
+
|
146 |
+
def add_output_to_head(self, outputs):
|
147 |
+
if self.if_add_output_to_head:
|
148 |
+
if not self.head_sub:
|
149 |
+
if self.head_key not in self.pipeline.head.keys():
|
150 |
+
self.pipeline.head.update({self.head_key: self.head_process(outputs)})
|
151 |
+
else:
|
152 |
+
self.pipeline.head[self.head_key] += '\n'
|
153 |
+
self.pipeline.head[self.head_key] += self.head_process(outputs)
|
154 |
+
else:
|
155 |
+
self.pipeline.head[self.head_key] = self.head_process(outputs)
|
156 |
+
|
157 |
+
def connect_to(self, pipeline = None) -> None:
|
158 |
+
self.pipeline = pipeline
|
159 |
+
if pipeline:
|
160 |
+
pipeline.module.append(self)
|
161 |
+
|
162 |
+
def output(self):
|
163 |
+
outed = False
|
164 |
+
for cond, post_and_end in self.output_cond.items():
|
165 |
+
if cond(self):
|
166 |
+
if not outed:
|
167 |
+
if not self.merge:
|
168 |
+
self.pipeline.output.append(post_and_end['post_processing'](self.last_message))
|
169 |
+
else:
|
170 |
+
self.pipeline.output.append(post_and_end['post_processing'](''.join(self.last_message)))
|
171 |
+
outed = True
|
172 |
+
if post_and_end['end']:
|
173 |
+
self.end = True
|
174 |
+
|
175 |
+
def set_output(self, cond = lambda self: True, post_processing = lambda x:x, end = True):
|
176 |
+
self.output_cond[cond] = {'post_processing': post_processing, 'end' : end}
|
177 |
+
|
178 |
+
def get_first_module(self):
|
179 |
+
return self
|
180 |
+
|
181 |
+
def add_to_head(self, datakey, sub = False, process = None):
|
182 |
+
self.if_add_output_to_head = True
|
183 |
+
self.head_key = datakey
|
184 |
+
self.head_sub = sub
|
185 |
+
if process:
|
186 |
+
self.head_process = process
|
187 |
+
|
188 |
+
|
189 |
+
def load_model(model_name_or_path,dtype = torch.float16):
|
190 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
191 |
+
model = AutoModelForCausalLM.from_pretrained(
|
192 |
+
model_name_or_path,
|
193 |
+
torch_dtype=dtype,
|
194 |
+
device_map='auto',
|
195 |
+
)
|
196 |
+
|
197 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
|
198 |
+
model.eval()
|
199 |
+
return model, tokenizer
|
200 |
+
|
201 |
+
|
202 |
+
class LLM(Module):
|
203 |
+
model_type = 'Generator'
|
204 |
+
def __init__(self, model = None, prompt_maker: Prompt =None, pipeline = None, post_processing = None, self_prompt = {}, device = 'cpu',temperature = 0.5 ,stop = None, max_turn = 6, share_model_with = None, iterative = False, auto_cite = False, output = None,merge = False, noisy = True, parallel = False, output_as ='Answer', auto_cite_from = 'docs') -> None:
|
205 |
+
super().__init__(prompt_maker,pipeline,self_prompt, iterative, merge, parallel = parallel)
|
206 |
+
self.max_turn = max_turn
|
207 |
+
if post_processing:
|
208 |
+
self.post_processing = post_processing
|
209 |
+
else:
|
210 |
+
self.post_processing = lambda x: {output_as:x}
|
211 |
+
if model:
|
212 |
+
self.model_name = model
|
213 |
+
self.stop = stop
|
214 |
+
self.multi_process = False
|
215 |
+
self.noisy = noisy
|
216 |
+
self.head_process = one_paragraph
|
217 |
+
self.auto_cite = auto_cite
|
218 |
+
if auto_cite:
|
219 |
+
self.cite_from = auto_cite_from
|
220 |
+
if model:
|
221 |
+
if 'gpt' not in model.lower():
|
222 |
+
if not share_model_with:
|
223 |
+
print('loading model...')
|
224 |
+
self.model, self.tokenizer = self.load_model(model)
|
225 |
+
else:
|
226 |
+
print('sharing model...')
|
227 |
+
self.model, self.tokenizer = share_model_with.model, share_model_with.tokenizer
|
228 |
+
self.temperature = temperature
|
229 |
+
self.device = device
|
230 |
+
else:
|
231 |
+
self.openai_key = os.getenv('OPENAI_API_KEY')
|
232 |
+
self.output_cond = {} # {cond : {'post_processing':post, 'end':end}}
|
233 |
+
self.if_add_output_to_head = False
|
234 |
+
|
235 |
+
self.token_used = 0
|
236 |
+
|
237 |
+
def reset(self):
|
238 |
+
self.end = False
|
239 |
+
self.turns = 0
|
240 |
+
self.token_used = 0
|
241 |
+
|
242 |
+
|
243 |
+
def __str__(self) -> str:
|
244 |
+
if self.model_name:
|
245 |
+
return f'{self.model_name}-[{self.count}]'
|
246 |
+
else:
|
247 |
+
return 'unknown model'
|
248 |
+
|
249 |
+
def __repr__(self) -> str:
|
250 |
+
return (f'{self.prompt_maker}\n|\n|\nV\n{self}\n|\n|\nV\n'+ '/'.join([str(des) for des in self.destinations]+['output']))
|
251 |
+
|
252 |
+
def load_model(self, model_name_or_path,dtype = torch.float16):
|
253 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
254 |
+
model = AutoModelForCausalLM.from_pretrained(
|
255 |
+
model_name_or_path,
|
256 |
+
torch_dtype=dtype,
|
257 |
+
device_map='auto',
|
258 |
+
)
|
259 |
+
|
260 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
|
261 |
+
model.eval()
|
262 |
+
return model, tokenizer
|
263 |
+
|
264 |
+
def set_cite(self,key):
|
265 |
+
self.cite_from = key
|
266 |
+
self.auto_cite = True
|
267 |
+
|
268 |
+
def generate_content(self, prompt):
|
269 |
+
if 'gpt' in self.model_name.lower():
|
270 |
+
import openai
|
271 |
+
openai.api_key = self.openai_key
|
272 |
+
prompt = [
|
273 |
+
{'role': 'system',
|
274 |
+
'content': "You are a good helper who follow the instructions"},
|
275 |
+
{'role': 'user', 'content': prompt}
|
276 |
+
]
|
277 |
+
response = openai.ChatCompletion.create(
|
278 |
+
model=self.model_name,
|
279 |
+
messages=prompt,
|
280 |
+
max_tokens=500,
|
281 |
+
stop = self.stop
|
282 |
+
)
|
283 |
+
self.token_used += response['usage']['completion_tokens'] + response['usage']['prompt_tokens']
|
284 |
+
return response['choices'][0]['message']['content']
|
285 |
+
|
286 |
+
else:
|
287 |
+
inputs = self.tokenizer(prompt, return_tensors="pt").to(self.model.device)
|
288 |
+
stop = [] if self.stop is None else self.stop
|
289 |
+
|
290 |
+
outputs = self.model.generate(
|
291 |
+
**inputs,
|
292 |
+
do_sample = True,
|
293 |
+
max_new_tokens = 200,
|
294 |
+
temperature = self.temperature
|
295 |
+
)
|
296 |
+
self.token_used += len(outputs[0])
|
297 |
+
|
298 |
+
outputs = self.tokenizer.decode(outputs[0][inputs['input_ids'].size(1):], skip_special_tokens=True)
|
299 |
+
return one_paragraph(outputs)
|
300 |
+
print(outputs)
|
301 |
+
|
302 |
+
|
303 |
+
def generate(self, head_prompt: dict = {}, dynamic_prompt: dict = {}):
|
304 |
+
if self.use_head_prompt:
|
305 |
+
#print(head_prompt,self.self_prompt,dynamic_prompt)
|
306 |
+
prompt = self.prompt_maker(head_prompt,self.self_prompt,dynamic_prompt)
|
307 |
+
else:
|
308 |
+
prompt = self.prompt_maker(self.self_prompt,dynamic_prompt)
|
309 |
+
if self.noisy:
|
310 |
+
print(f'prompt to {str(self)}:\n',prompt,'\n\n')
|
311 |
+
self.turns += 1
|
312 |
+
|
313 |
+
outputs = self.generate_content(prompt)
|
314 |
+
#print('DEBUG:',outputs)
|
315 |
+
if self.noisy:
|
316 |
+
print('OUTPUT:')
|
317 |
+
print(outputs)
|
318 |
+
if self.auto_cite:
|
319 |
+
outputs = self.cite_from_prompt({**head_prompt,**self.self_prompt,**dynamic_prompt},outputs)
|
320 |
+
if self.multi_process:
|
321 |
+
self.last_message.append(outputs)
|
322 |
+
else:
|
323 |
+
self.last_message = outputs
|
324 |
+
|
325 |
+
|
326 |
+
self.add_output_to_head(outputs)
|
327 |
+
|
328 |
+
destination = self.send()
|
329 |
+
|
330 |
+
if self.turns > self.max_turn:
|
331 |
+
self.end = True
|
332 |
+
if destination in self.conditions:
|
333 |
+
return self.conditions[destination]['post_processing'](outputs)
|
334 |
+
else:
|
335 |
+
return self.post_processing(outputs)
|
336 |
+
|
337 |
+
def add_output_to_head(self, outputs):
|
338 |
+
if self.if_add_output_to_head:
|
339 |
+
if not self.head_sub:
|
340 |
+
if self.head_key not in self.pipeline.head.keys():
|
341 |
+
self.pipeline.head.update({self.head_key: self.head_process(outputs)})
|
342 |
+
else:
|
343 |
+
self.pipeline.head[self.head_key] += '\n'
|
344 |
+
self.pipeline.head[self.head_key] += self.head_process(outputs)
|
345 |
+
else:
|
346 |
+
self.pipeline.head[self.head_key] = self.head_process(outputs)
|
347 |
+
|
348 |
+
def output(self):
|
349 |
+
outed = False
|
350 |
+
for cond, post_and_end in self.output_cond.items():
|
351 |
+
if cond(self):
|
352 |
+
if not outed:
|
353 |
+
if not self.merge and not self.iterative:
|
354 |
+
self.pipeline.output.append(post_and_end['post_processing'](self.last_message))
|
355 |
+
else:
|
356 |
+
self.pipeline.output.append(post_and_end['post_processing'](' '.join(self.last_message)))
|
357 |
+
outed = True
|
358 |
+
if post_and_end['end']:
|
359 |
+
self.end = True
|
360 |
+
|
361 |
+
def set_output(self, cond = lambda self: True, post_processing = lambda x:x, end = True):
|
362 |
+
self.output_cond[cond] = {'post_processing': post_processing, 'end' : end}
|
363 |
+
|
364 |
+
|
365 |
+
def cite_from_prompt(self,prompt_dict,input):
|
366 |
+
input = first_sentence(input)
|
367 |
+
cite_docs = prompt_dict[self.cite_from]
|
368 |
+
refs = re.findall(r'\[\d+\]', cite_docs)
|
369 |
+
pattern = r'([.!?])\s*$'
|
370 |
+
if refs:
|
371 |
+
cite = ''.join(refs)
|
372 |
+
else:
|
373 |
+
cite = ''
|
374 |
+
output = re.sub(pattern, rf' {cite}\1 ', input)
|
375 |
+
if cite not in output:
|
376 |
+
output += cite
|
377 |
+
return output
|
378 |
+
def add_to_head(self, datakey, sub = False, process = None):
|
379 |
+
self.if_add_output_to_head = True
|
380 |
+
self.head_key = datakey
|
381 |
+
self.head_sub = sub
|
382 |
+
if process:
|
383 |
+
self.head_process = process
|
384 |
+
|
385 |
+
|
386 |
+
|
387 |
+
class TestLLM(LLM):
|
388 |
+
def __init__(self, model='gpt-4', prompt_maker: Prompt = None, pipeline=None, post_processing=lambda x: x, self_prompt={}, device='cpu', temperature=0.5, stop=None, max_turn=6,share_model_with = None, iterative= False, ans = None) -> None:
|
389 |
+
super().__init__(model,prompt_maker,pipeline,self_prompt=self_prompt,share_model_with=share_model_with,iterative=iterative)
|
390 |
+
self.max_turn = max_turn
|
391 |
+
self.post_processing = post_processing
|
392 |
+
self.model_name = model
|
393 |
+
self.last_message = ''
|
394 |
+
self.stop = stop
|
395 |
+
self.output_cond = {} # {cond : {'post_processing':post, 'end':end}}
|
396 |
+
self.if_add_output_to_head = False
|
397 |
+
|
398 |
+
self.token_used = 0
|
399 |
+
self.ans = 'Strain[1], turns:, heat[2][4]. Sent2[5]. Sent3.\n\n rdd' if not ans else ans
|
400 |
+
def generate_content(self, prompt):
|
401 |
+
return self.ans
|
402 |
+
|
403 |
+
|
404 |
+
class AutoAISLLM(LLM):
|
405 |
+
def __init__(self, model=None, prompt_maker: Prompt = None, pipeline=None, post_processing=None, self_prompt={}, device='cpu', temperature=0.5, stop=None, max_turn=6, share_model_with=None, iterative=False, auto_cite=False, output=None, merge=False, noisy=False, output_as='Answer') -> None:
|
406 |
+
super().__init__(model, prompt_maker, pipeline, post_processing, self_prompt, device, temperature, stop, max_turn, share_model_with, iterative, auto_cite, output, merge, noisy, output_as)
|
407 |
+
|
408 |
+
self.prompt_maker = Prompt('<INST><premise><claim>\n Answer: ',components={
|
409 |
+
'INST':'{INST}\n\n',
|
410 |
+
'premise':'Premise: {premise}\n\n',
|
411 |
+
'claim':'Claim: {claim}\n',
|
412 |
+
})
|
413 |
+
self.self_prompt={'INST': 'In this task, you will be presented a premise and a claim. If the premise entails the claim, output "1", otherwise output "1". Your answer should only contains one number without any other letters and punctuations.'}
|
414 |
+
|
415 |
+
def generate(self, premise, claim):
|
416 |
+
dict_answer = super().generate({'premise':premise,'claim':claim})
|
417 |
+
return dict_answer.get('Answer')
|
418 |
+
|
419 |
+
|
420 |
+
|
421 |
+
if __name__ == '__main__':
|
422 |
+
prompt = Prompt(template='<INST><Question><Docs><feedback><Answer>',components={'INST':'{INST}\n\n',
|
423 |
+
'Question':'Question:{Question}\n\n',
|
424 |
+
'Docs':'{Docs}\n',
|
425 |
+
'feedback':'Here is the feed back of your last response:{feedback}\n',
|
426 |
+
'Answer':'Here is answer and you have to give feedback:{Answer}'})
|
427 |
+
m = LLM('gpt')
|
citekit/cite_modules/Retrieve.py
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import csv
|
3 |
+
import json
|
4 |
+
import os
|
5 |
+
import time
|
6 |
+
import pickle
|
7 |
+
|
8 |
+
import numpy as np
|
9 |
+
import torch
|
10 |
+
from tqdm import tqdm
|
11 |
+
from rank_bm25 import BM25Okapi
|
12 |
+
from sentence_transformers import SentenceTransformer
|
13 |
+
|
14 |
+
def gtr_build_index(encoder, docs):
|
15 |
+
with torch.inference_mode():
|
16 |
+
embs = encoder.encode(docs, show_progress_bar=True, normalize_embeddings=True)
|
17 |
+
embs = embs.astype("float16")
|
18 |
+
|
19 |
+
GTR_EMB = os.environ.get("GTR_EMB")
|
20 |
+
with open(GTR_EMB, "wb") as f:
|
21 |
+
pickle.dump(embs, f)
|
22 |
+
return embs
|
23 |
+
|
24 |
+
|
25 |
+
class DPRRetriever:
|
26 |
+
def __init__(self, DPR_WIKI_TSV, GTR_EMB = None, emb_model = "sentence-transformers/gtr-t5-xxl") -> None:
|
27 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
28 |
+
self.device = device
|
29 |
+
self.encoder = SentenceTransformer(emb_model, device = device)
|
30 |
+
self.docs = []
|
31 |
+
print("loading wikipedia file...")
|
32 |
+
with open(DPR_WIKI_TSV) as f:
|
33 |
+
reader = csv.reader(f, delimiter="\t")
|
34 |
+
for i, row in enumerate(reader):
|
35 |
+
if i == 0:
|
36 |
+
continue
|
37 |
+
self.docs.append(row[2] + "\n" + row[1])
|
38 |
+
if not GTR_EMB:
|
39 |
+
print("gtr embeddings not found, building...")
|
40 |
+
embs = gtr_build_index(self.encoder, self.docs)
|
41 |
+
else:
|
42 |
+
print("gtr embeddings found, loading...")
|
43 |
+
with open(GTR_EMB, "rb") as f:
|
44 |
+
embs = pickle.load(f)
|
45 |
+
|
46 |
+
self.gtr_emb = torch.tensor(embs, dtype=torch.float16, device=device)
|
47 |
+
|
48 |
+
def retrieve(self, question, topk):
|
49 |
+
with torch.inference_mode():
|
50 |
+
query = self.encoder.encode(question, batch_size=4, show_progress_bar=True, normalize_embeddings=True)
|
51 |
+
query = torch.tensor(query, dtype=torch.float16, device=self.device)
|
52 |
+
query = query.to(self.device)
|
53 |
+
scores = torch.matmul(self.gtr_emb, query)
|
54 |
+
score, idx = torch.topk(scores, topk)
|
55 |
+
ret = []
|
56 |
+
for i in range(idx.size(0)):
|
57 |
+
title, text = self.docs[idx[i].item()].split("\n")
|
58 |
+
ret.append({"id": str(idx[i].item() + 1), "title": title, "text": text, "score": score[i].item()})
|
59 |
+
|
60 |
+
return ret
|
61 |
+
|
62 |
+
def __repr__(self) -> str:
|
63 |
+
return 'DPR Retriever'
|
64 |
+
|
65 |
+
def __str__(self) -> str:
|
66 |
+
return repr(self)
|
67 |
+
|
68 |
+
class BM25Retriever:
|
69 |
+
def __init__(self, DPR_WIKI_TSV):
|
70 |
+
self.docs = []
|
71 |
+
self.tokenized_docs = []
|
72 |
+
print("loading wikipedia file...")
|
73 |
+
with open(DPR_WIKI_TSV) as f:
|
74 |
+
reader = csv.reader(f, delimiter="\t")
|
75 |
+
for i, row in enumerate(reader):
|
76 |
+
if i == 0:
|
77 |
+
continue
|
78 |
+
self.docs.append(row[2] + "\n" + row[1])
|
79 |
+
self.tokenized_docs.append((row[2] + " " + row[1]).split())
|
80 |
+
|
81 |
+
print("BM25 index building...")
|
82 |
+
self.bm25 = BM25Okapi(self.tokenized_docs)
|
83 |
+
|
84 |
+
def retrieve(self, question, topk):
|
85 |
+
query = question.split()
|
86 |
+
scores = self.bm25.get_scores(query)
|
87 |
+
topk_indices = scores.argsort()[-topk:][::-1]
|
88 |
+
ret = []
|
89 |
+
for idx in topk_indices:
|
90 |
+
title, text = self.docs[idx].split("\n", 1)
|
91 |
+
ret.append({"id": str(idx + 1), "title": title, "text": text, "score": scores[idx]})
|
92 |
+
|
93 |
+
return ret
|
94 |
+
def __repr__(self) -> str:
|
95 |
+
return 'BM25 Retriever'
|
96 |
+
|
97 |
+
def __str__(self) -> str:
|
98 |
+
return repr(self)
|
99 |
+
|
citekit/cite_modules/__pycache__/LLM.cpython-310.pyc
ADDED
Binary file (13.6 kB). View file
|
|
citekit/cite_modules/__pycache__/LLM.cpython-312.pyc
ADDED
Binary file (22.4 kB). View file
|
|
citekit/cite_modules/__pycache__/Retrieve.cpython-310.pyc
ADDED
Binary file (3.95 kB). View file
|
|
citekit/cite_modules/__pycache__/Retrieve.cpython-312.pyc
ADDED
Binary file (6.75 kB). View file
|
|
citekit/cite_modules/__pycache__/augment_model.cpython-310.pyc
ADDED
Binary file (28.9 kB). View file
|
|
citekit/cite_modules/__pycache__/augment_model.cpython-312.pyc
ADDED
Binary file (39.6 kB). View file
|
|
citekit/cite_modules/augment_model.py
ADDED
@@ -0,0 +1,455 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from citekit.cite_modules.LLM import Module,LLM
|
2 |
+
from citekit.cite_modules.Retrieve import DPRRetriever
|
3 |
+
from citekit.evaluator.evaluator import _run_nli_autoais, Evaluator
|
4 |
+
from citekit.prompt.prompt import Prompt
|
5 |
+
from citekit.utils.utils import one_paragraph, make_as
|
6 |
+
from sentence_transformers import SentenceTransformer
|
7 |
+
import re
|
8 |
+
import random
|
9 |
+
|
10 |
+
|
11 |
+
class Retriever(Module):
|
12 |
+
model_type = 'retriever'
|
13 |
+
def __init__(self, documents = None ,retrieve_by = 'index', prompt_maker = None, pipeline = None, post_processing = lambda input, output: {'RetrievedDocs':output}, self_prompt = {},topk = 3,add_id = True, merge = False, tsv_path = 'None', emb_path = 'None', retrieve_from_data = True, parallel = False) -> None:
|
14 |
+
super().__init__(prompt_maker,pipeline,self_prompt,merge=merge, parallel=parallel)
|
15 |
+
self.retrieve_by = retrieve_by
|
16 |
+
self.use_head_prompt = False
|
17 |
+
self.dataset_documents = None
|
18 |
+
self.documents = None
|
19 |
+
self.default_doc_key = 'docs'
|
20 |
+
self.retrieve_from_data = retrieve_from_data
|
21 |
+
if not documents:
|
22 |
+
self.documents = self.pipeline.doc_cache
|
23 |
+
else:
|
24 |
+
self.dataset_documents = documents
|
25 |
+
self.post_processing = post_processing
|
26 |
+
self.if_add_output_to_head = False
|
27 |
+
self.topk = topk
|
28 |
+
self.add_id = add_id
|
29 |
+
if retrieve_by =='bm25':
|
30 |
+
self.bm25_module_loaded = False
|
31 |
+
from rank_bm25 import BM25Okapi
|
32 |
+
import nltk
|
33 |
+
nltk.download('punkt')
|
34 |
+
from nltk.tokenize import word_tokenize
|
35 |
+
self.word_tokenize = word_tokenize
|
36 |
+
self.BM25Okapi = BM25Okapi
|
37 |
+
self.bm25_module_loaded = True
|
38 |
+
elif retrieve_by == 'dpr':
|
39 |
+
self.dpr_retriever = DPRRetriever(DPR_WIKI_TSV=tsv_path,
|
40 |
+
GTR_EMB=emb_path)
|
41 |
+
|
42 |
+
def generate(self,head_prompt: dict = {}, dynamic_prompt: dict = {}):
|
43 |
+
index = self.pipeline.data_index
|
44 |
+
if self.dataset_documents:
|
45 |
+
if isinstance(self.dataset_documents[0], list):
|
46 |
+
# Each data has a document
|
47 |
+
self.documents = self.dataset_documents[index]
|
48 |
+
else:
|
49 |
+
self.documents = self.dataset_documents
|
50 |
+
else:
|
51 |
+
if not self.retrieve_from_data:
|
52 |
+
self.documents = self.pipeline.doc_cache.show_docs()
|
53 |
+
else:
|
54 |
+
def _stringtfy(doc):
|
55 |
+
if isinstance(doc, str):
|
56 |
+
return doc
|
57 |
+
return f"({doc['title']}) {doc['text']}"
|
58 |
+
self.documents = list(map(_stringtfy, self.pipeline.current_data[self.default_doc_key]))
|
59 |
+
# query
|
60 |
+
if self.use_head_prompt:
|
61 |
+
prompt = self.prompt_maker(head_prompt,self.self_prompt,dynamic_prompt)
|
62 |
+
else:
|
63 |
+
prompt = self.prompt_maker(self.self_prompt,dynamic_prompt)
|
64 |
+
|
65 |
+
retrieved_docs = []
|
66 |
+
if self.retrieve_by == 'index':
|
67 |
+
# query : Document [1][2]
|
68 |
+
indice = [int(r[1:]) - 1 for r in re.findall(r"\[\d+",prompt)]
|
69 |
+
for index in indice:
|
70 |
+
retrieved_docs.append(self.documents[index])
|
71 |
+
if len(retrieved_docs) > self.topk:
|
72 |
+
retrieved_docs = retrieved_docs[:self.topk]
|
73 |
+
elif self.retrieve_by =='bm25':
|
74 |
+
# natural language query
|
75 |
+
tokenized_docs = [self.word_tokenize(doc.lower()) for doc in self.documents]
|
76 |
+
bm25 = self.BM25Okapi(tokenized_docs)
|
77 |
+
tokenized_query = self.word_tokenize(prompt.lower())
|
78 |
+
doc_scores = bm25.get_scores(tokenized_query)
|
79 |
+
if self.topk > len(doc_scores):
|
80 |
+
self.topk = len(doc_scores) - 1
|
81 |
+
top_k_idx = sorted(range(len(doc_scores)), key=lambda i: doc_scores[i], reverse=True)[:self.topk]
|
82 |
+
retrieved_docs = [self.documents[index] for index in top_k_idx]
|
83 |
+
retrieved_docs_new = []
|
84 |
+
for re_doc in retrieved_docs:
|
85 |
+
self.pipeline.doc_cache.add_doc(re_doc,self.add_id)
|
86 |
+
retrieved_docs_new.append(self.pipeline.doc_cache.get_last())
|
87 |
+
retrieved_docs = retrieved_docs_new
|
88 |
+
#raise NotImplementedError
|
89 |
+
|
90 |
+
|
91 |
+
elif self.retrieve_by =='gtr':
|
92 |
+
docs_dict = self.dpr_retriever.retrieve(prompt,topk=self.topk)
|
93 |
+
retrieved_docs = [f"({d['title']}) {d['text']}" for d in docs_dict]
|
94 |
+
retrieved_docs_new = []
|
95 |
+
for re_doc in retrieved_docs:
|
96 |
+
self.pipeline.doc_cache.add_doc(re_doc,self.add_id)
|
97 |
+
retrieved_docs_new.append(self.pipeline.doc_cache.get_last())
|
98 |
+
retrieved_docs = retrieved_docs_new
|
99 |
+
|
100 |
+
retrieved_docs_prompt = '\n'.join(retrieved_docs)
|
101 |
+
destination = self.send()
|
102 |
+
if self.multi_process:
|
103 |
+
self.last_message.append(retrieved_docs_prompt)
|
104 |
+
else:
|
105 |
+
self.last_message = retrieved_docs_prompt
|
106 |
+
#print(self.last_message)
|
107 |
+
|
108 |
+
if self.if_add_output_to_head:
|
109 |
+
self.pipeline.head.update({self.head_key:retrieved_docs_prompt})
|
110 |
+
if destination in self.conditions:
|
111 |
+
try:
|
112 |
+
return self.conditions[destination]['post_processing'](prompt,retrieved_docs_prompt)
|
113 |
+
except:
|
114 |
+
return self.conditions[destination]['post_processing'](retrieved_docs_prompt)
|
115 |
+
|
116 |
+
else:
|
117 |
+
return retrieved_docs_prompt
|
118 |
+
raise NotImplementedError
|
119 |
+
|
120 |
+
|
121 |
+
class EvalModule(Module, Evaluator):
|
122 |
+
model_type = 'evaluator'
|
123 |
+
def __init__(self, prompt_maker = None, pipeline=None, self_prompt={},criteria = None, iterative = False, max_turn =6 ,parallel = False) -> None:
|
124 |
+
Module.__init__(self,prompt_maker, pipeline, self_prompt,iterative=iterative,max_turn=max_turn, parallel=parallel)
|
125 |
+
Evaluator.__init__(self,criteria,pipeline)
|
126 |
+
|
127 |
+
def generate(self, head_prompt: dict = {}, dynamic_prompt: dict = {}):
|
128 |
+
result = {}
|
129 |
+
p_data = {**head_prompt, **self.self_prompt,**dynamic_prompt}
|
130 |
+
for criteria, get_data in self.get_data.items():
|
131 |
+
data_dict = {}
|
132 |
+
for k, v in get_data.items():
|
133 |
+
if v == 'doc_cache':
|
134 |
+
data_dict[k] = self.pipeline.doc_cache.show_docs()
|
135 |
+
else:
|
136 |
+
if v in p_data.keys():
|
137 |
+
data_dict[k] = p_data[v]
|
138 |
+
else:
|
139 |
+
data_dict[k] = self.pipeline.current_data[v]
|
140 |
+
eval_func = Evaluator.eval_criteria[criteria]
|
141 |
+
data = [data_dict]
|
142 |
+
result[criteria] = eval_func(data)
|
143 |
+
|
144 |
+
if self.multi_process:
|
145 |
+
self.last_message.append(result)
|
146 |
+
else:
|
147 |
+
self.last_message = result
|
148 |
+
destination = self.send()
|
149 |
+
if destination in self.conditions:
|
150 |
+
return self.conditions[destination]['post_processing'](result)
|
151 |
+
else:
|
152 |
+
return result
|
153 |
+
|
154 |
+
|
155 |
+
class CitationSimplyfier(Module):
|
156 |
+
'''
|
157 |
+
Simplify the citation of the 'answer' in prompt.
|
158 |
+
Argument can be changed to fit into different name of key in prompts
|
159 |
+
By Defaut, the simplifier simplifies the 'answer' and output the sring with citation simplified.
|
160 |
+
'''
|
161 |
+
model_type = 'simplifier'
|
162 |
+
def __init__(self, prompt_maker = None, pipeline=None, self_prompt={}, criteria = None, key = 'answer', test = False, allow_empty = True) -> None:
|
163 |
+
Module.__init__(self,prompt_maker, pipeline, self_prompt)
|
164 |
+
if not test:
|
165 |
+
self.entail = _run_nli_autoais
|
166 |
+
else:
|
167 |
+
self.entail = lambda p,c : random.randint(0,1)
|
168 |
+
self.docs = ['0'] * 100
|
169 |
+
self.key = key
|
170 |
+
self.allow_empty = allow_empty
|
171 |
+
def generate(self, head_prompt: dict = {}, dynamic_prompt: dict = {}) -> str:
|
172 |
+
docs = self.pipeline.doc_cache
|
173 |
+
prompt = {**head_prompt, **dynamic_prompt}
|
174 |
+
answer = prompt[self.key]
|
175 |
+
|
176 |
+
refs = re.findall(r'\[\d+\]', answer)
|
177 |
+
last_ref_index = None
|
178 |
+
for match in re.finditer(r'\[\d+\]', answer):
|
179 |
+
last_ref_index = match.end()
|
180 |
+
|
181 |
+
if not refs:
|
182 |
+
return answer
|
183 |
+
|
184 |
+
refs_str = ''.join(refs)
|
185 |
+
|
186 |
+
def simplify(sentence, refs, docs):
|
187 |
+
ref_numbers = [int(ref.strip('[]')) for ref in refs]
|
188 |
+
|
189 |
+
docs_combined = ''.join(docs[ref - 1] for ref in ref_numbers if ref - 1 < len(docs))
|
190 |
+
|
191 |
+
if not self.entail(docs_combined, sentence):
|
192 |
+
if self.allow_empty:
|
193 |
+
return ''
|
194 |
+
return ''.join(refs)
|
195 |
+
|
196 |
+
if len(ref_numbers) == 1:
|
197 |
+
return ''.join(f'[{num}]' for num in ref_numbers)
|
198 |
+
|
199 |
+
def remove_and_test(ref_numbers):
|
200 |
+
for i, ref in enumerate(ref_numbers):
|
201 |
+
new_ref_numbers = ref_numbers[:i] + ref_numbers[i+1:]
|
202 |
+
new_docs_combined = ''.join(docs[r - 1] for r in new_ref_numbers if r - 1 < len(docs))
|
203 |
+
if self.entail(new_docs_combined, sentence):
|
204 |
+
if len(new_ref_numbers) == 1:
|
205 |
+
return new_ref_numbers
|
206 |
+
return remove_and_test(new_ref_numbers)
|
207 |
+
return ref_numbers
|
208 |
+
|
209 |
+
simplified_ref_numbers = remove_and_test(ref_numbers)
|
210 |
+
|
211 |
+
simplified_refs = ''.join(f'[{num}]' for num in simplified_ref_numbers)
|
212 |
+
return simplified_refs
|
213 |
+
|
214 |
+
simplified_refs = simplify(answer,refs,docs)
|
215 |
+
|
216 |
+
sentence_without_refs = re.sub(r'\[\d+\]', '', answer)
|
217 |
+
|
218 |
+
if last_ref_index is not None:
|
219 |
+
output = sentence_without_refs[:last_ref_index - len(refs_str)] + simplified_refs + sentence_without_refs[last_ref_index - len(refs_str):]
|
220 |
+
else:
|
221 |
+
output = sentence_without_refs + simplified_refs
|
222 |
+
|
223 |
+
if not simplified_refs and self.allow_empty:
|
224 |
+
output = ''
|
225 |
+
|
226 |
+
|
227 |
+
if self.multi_process:
|
228 |
+
self.last_message.append(output)
|
229 |
+
else:
|
230 |
+
self.last_message = output
|
231 |
+
|
232 |
+
return output
|
233 |
+
|
234 |
+
|
235 |
+
class Verifier(Module):
|
236 |
+
|
237 |
+
'''
|
238 |
+
Verifier is currently only used for single sentence/single target answer, not for parallel or iterative answer.
|
239 |
+
Verifier will return dynamic prompt, not like other modules returning output. It is a judger only to decide the target module.
|
240 |
+
By default, the verifoer verifies whether the documents support the answer.
|
241 |
+
'''
|
242 |
+
model_type = 'verifier'
|
243 |
+
def __init__(self, prompt_maker = None, pipeline=None, self_prompt={}, criteria = None, key = 'answer', test = False) -> None:
|
244 |
+
Module.__init__(self,prompt_maker, pipeline, self_prompt)
|
245 |
+
if not test:
|
246 |
+
self.entail = _run_nli_autoais
|
247 |
+
else:
|
248 |
+
self.entail = lambda p,c : random.randint(0,1)
|
249 |
+
self.docs = ['s'] * 100
|
250 |
+
self.key = key
|
251 |
+
self.test = test
|
252 |
+
|
253 |
+
# Overcite this function to
|
254 |
+
def verifier_judge(self,**kargs):
|
255 |
+
docs = self.pipeline.doc_cache
|
256 |
+
answer = kargs[self.key]
|
257 |
+
refs = re.findall(r'\[\d+\]', answer)
|
258 |
+
if not refs:
|
259 |
+
return False
|
260 |
+
ref_numbers = [int(ref.strip('[]')) for ref in refs]
|
261 |
+
|
262 |
+
docs_combined = ''.join(docs[ref - 1] for ref in ref_numbers if ref - 1 < len(docs))
|
263 |
+
return bool(self.entail(docs_combined, re.sub(r'\[\d+\]', '', answer)))
|
264 |
+
|
265 |
+
|
266 |
+
def generate(self, head_prompt: dict = {}, dynamic_prompt: dict = {}):
|
267 |
+
prompt = {**head_prompt, **dynamic_prompt}
|
268 |
+
out = self.verifier_judge(**prompt)
|
269 |
+
|
270 |
+
self.last_message = out
|
271 |
+
|
272 |
+
self.turns += 1
|
273 |
+
return dynamic_prompt
|
274 |
+
|
275 |
+
|
276 |
+
class AugmentCluster():
|
277 |
+
def __init__(self, module_list = []) -> None:
|
278 |
+
self.module_list = module_list
|
279 |
+
module_count = len(module_list)
|
280 |
+
for i in range(module_count - 1):
|
281 |
+
assert isinstance(module_list[i],LLM) and isinstance(module_list[i+1],LLM)
|
282 |
+
module_list[i].set_target(module_list[i+1], post_processing = module_list[i].post_processing)
|
283 |
+
|
284 |
+
def get_first_module(self):
|
285 |
+
return self.module_list[0]
|
286 |
+
|
287 |
+
def get_destinations(self):
|
288 |
+
return self.module_list[-1].get_destinations()
|
289 |
+
|
290 |
+
def reset(self):
|
291 |
+
for module in self.module_list:
|
292 |
+
module.reset()
|
293 |
+
|
294 |
+
def update(self, config, update_info):
|
295 |
+
print('updating the AugmentCluster', update_info)
|
296 |
+
self.module_list[-1].update(config, update_info)
|
297 |
+
|
298 |
+
def __str__(self):
|
299 |
+
print('getting str of the cluster', ' -> '.join([str(module) for module in self.module_list]))
|
300 |
+
return ' -> '.join([str(module) for module in self.module_list])
|
301 |
+
|
302 |
+
def set_target(self,destination, condition = lambda self: True, post_processing = lambda x: x) -> None:
|
303 |
+
self.module_list[-1].set_target(destination, condition, post_processing)
|
304 |
+
|
305 |
+
def set_output(self, cond = lambda self: True, post_processing = lambda x:x, end = True):
|
306 |
+
self.module_list[-1].set_output(cond, post_processing, end)
|
307 |
+
|
308 |
+
def connect_to(self, pipeline = None) -> None:
|
309 |
+
for module in self.module_list:
|
310 |
+
module.connect_to(pipeline)
|
311 |
+
pipeline.stored_clusters.append(self)
|
312 |
+
|
313 |
+
|
314 |
+
class Attribute_post_select(LLM):
|
315 |
+
noisy = False
|
316 |
+
model_name = 'function'
|
317 |
+
def generate(self, head_prompt: dict = {}, dynamic_prompt: dict = {}):
|
318 |
+
print('post_select', head_prompt, dynamic_prompt)
|
319 |
+
docs = head_prompt['docs']
|
320 |
+
ans_docs = one_paragraph(dynamic_prompt['span']).split("\n")
|
321 |
+
spans = [ans_doc[14:].split("<SPAN_DELIM>") for ans_doc in ans_docs]
|
322 |
+
msg = ''
|
323 |
+
span_list = {}
|
324 |
+
doc_map = {}
|
325 |
+
j = 1
|
326 |
+
i = 1
|
327 |
+
for doc in spans:
|
328 |
+
if doc!= [] :
|
329 |
+
span_list[f'{i}'] = []
|
330 |
+
msg += f'Document [{i}]:\n'
|
331 |
+
for span in doc:
|
332 |
+
if len(span)> 3:
|
333 |
+
msg += f'{j}. {span}\n'
|
334 |
+
span_list[f'{i}'].append(f'{j}. {span.strip()}')
|
335 |
+
doc_map[str(j)] = str(i)
|
336 |
+
j+=1
|
337 |
+
docs = docs.replace(span.strip(), f'<highlight_start>{span.strip()}<highlight_end>')
|
338 |
+
i+=1
|
339 |
+
self.pipeline.head['doc_map'] = doc_map
|
340 |
+
self.pipeline.head['docs'] = docs
|
341 |
+
self.pipeline.head['span'] = msg
|
342 |
+
self.pipeline.head['span_list'] = span_list
|
343 |
+
return {'span_list': Prompt.UNABLE,'doc_map': Prompt.UNABLE}
|
344 |
+
|
345 |
+
class Attribute_post_cluster(LLM):
|
346 |
+
noisy = False
|
347 |
+
model_name = 'function'
|
348 |
+
def generate(self, head_prompt: dict = {}, dynamic_prompt: dict = {}):
|
349 |
+
print('f1', head_prompt, dynamic_prompt)
|
350 |
+
span_ls = head_prompt['span_list']
|
351 |
+
doc_map = head_prompt['doc_map']
|
352 |
+
span_list = [item for sublist in head_prompt['span_list'].values() for item in sublist]
|
353 |
+
clusters = eval(one_paragraph(dynamic_prompt['cls'].strip()))
|
354 |
+
self.pipeline.head['clusters'] = clusters
|
355 |
+
def _form(cls):
|
356 |
+
text = ''
|
357 |
+
doc_list = cls['cluster']
|
358 |
+
for doc_num in span_ls.keys():
|
359 |
+
pieces = [str(i) for i in doc_list if doc_map.get(str(i),'None') == doc_num]
|
360 |
+
if pieces:
|
361 |
+
text += f'Document [{doc_num}]: \n' + '\n'.join([span_list[int(num)-1] for num in pieces]) + '\n'
|
362 |
+
|
363 |
+
return(text)
|
364 |
+
#print([{'span': _form(cls)} for cls in clusters])
|
365 |
+
return [{'span': _form(cls),'span_list': Prompt.UNABLE,'doc_map': Prompt.UNABLE,'clusters':Prompt.UNABLE,'docs':Prompt.UNABLE} for cls in clusters if _form(cls)]
|
366 |
+
|
367 |
+
class Ranker(EvalModule):
|
368 |
+
|
369 |
+
def __init__(self, prompt_maker=None, pipeline=None, self_prompt={}, criteria=None,iterative = True, max_turn = 3, parallel = False, post_processing = lambda x : x, fixed_turn = None) -> None:
|
370 |
+
super().__init__(prompt_maker, pipeline, self_prompt, criteria, iterative = iterative, max_turn = max_turn, parallel = parallel)
|
371 |
+
self.compare = []
|
372 |
+
self.post_processing = post_processing
|
373 |
+
if fixed_turn:
|
374 |
+
self.fixed_turn = fixed_turn
|
375 |
+
else:
|
376 |
+
self.fixed_turn = max_turn
|
377 |
+
def generate(self, head_prompt: dict = {}, dynamic_prompt: dict = {}):
|
378 |
+
|
379 |
+
self.turns += 1
|
380 |
+
result = {}
|
381 |
+
p_data = {**head_prompt, **self.self_prompt,**dynamic_prompt}
|
382 |
+
for criteria, get_data in self.get_data.items():
|
383 |
+
data_dict = {}
|
384 |
+
for k, v in get_data.items():
|
385 |
+
if v == 'doc_cache':
|
386 |
+
data_dict[k] = self.pipeline.doc_cache.show_docs()
|
387 |
+
else:
|
388 |
+
if v in p_data.keys():
|
389 |
+
data_dict[k] = p_data[v]
|
390 |
+
else:
|
391 |
+
data_dict[k] = self.pipeline.current_data[v]
|
392 |
+
eval_func = self.eval_criteria[criteria]
|
393 |
+
data = [data_dict]
|
394 |
+
result[criteria] = eval_func(data)
|
395 |
+
|
396 |
+
result = sum([value for key, value in result.items()])
|
397 |
+
self.compare.append((result,dynamic_prompt))
|
398 |
+
output = max(self.compare,key = lambda x:x[0])[1]
|
399 |
+
destination = self.send()
|
400 |
+
self.last_message = output
|
401 |
+
if len(self.compare) == self.fixed_turn:
|
402 |
+
self.compare = []
|
403 |
+
if destination in self.conditions:
|
404 |
+
return self.conditions[destination]['post_processing'](output)
|
405 |
+
else:
|
406 |
+
return self.post_processing(output)
|
407 |
+
|
408 |
+
return {}
|
409 |
+
|
410 |
+
def end_multi(self):
|
411 |
+
self.compare = []
|
412 |
+
return super().end_multi()
|
413 |
+
|
414 |
+
|
415 |
+
|
416 |
+
class AttributingModule(AugmentCluster):
|
417 |
+
model_type = 'attributer'
|
418 |
+
demo ={
|
419 |
+
"selection_instruction":"In this task, you are presented with several documents, which need to be summarized. As an intermediate step, you need to identify salient content within the documents. For each document, copy verbatim the salient spans, and use <SPAN_DELIM> as a delimiter between each consecutive span. IMPORTANT: The output must be of the format Document [<DOC_ID>]: <SPAN_DELIM>-delimited consecutive salient spans. IMPORTANT: Each salient content must be a single consecutive verbatim span from the corresponding passages. IMPORTANT: make sure the total number of copied words (from all documents) is around 200 words, and not more than 900.",
|
420 |
+
"selection_shot":"Document [1]: Cherrapunji Cherrapunji ( with the native name Sohra being more commonly used, and can also be spelled Cherrapunjee or Cherrapunji) is a subdivisional town in the East Khasi Hills district in the Indian state of Meghalaya. It is the traditional capital of aNongkhlaw \"hima\" (Khasi tribal chieftainship constituting a petty state), both known as Sohra or Churra. Cherrapunji has often been credited as being the wettest place on Earth, but for now nearby Mawsynram currently holds that distinction. Cherrapunji still holds the all-time record for the most rainfall in a calendar month for July 1861 and most rain in a year from August 1860 to July 1861, however: it received in\" \nDocument [2]: Radio relay station known as Akashvani Cherrapunji. It broadcasts on FM frequencies. Cherrapunji Cherrapunji (; with the native name Sohra being more commonly used, and can also be spelled Cherrapunjee or Cherrapunji) is a subdivisional town in the East Khasi Hills district in the Indian state of Meghalaya. It is the traditional capital of aNongkhlaw \"hima\" (Khasi tribal chieftainship constituting a petty state), both known as Sohra or Churra. Cherrapunji has often been credited as being the wettest place on Earth, but for now nearby Mawsynram currently holds that distinction. Cherrapunji still holds the all-time record for the most rainfall\" \nDocument [3]: \"Mawsynram Mawsynram () is a village in the East Khasi Hills district of Meghalaya state in north-eastern India, 65 kilometres from Shillong. Mawsynram receives one of the highest rainfalls in India. It is reportedly the wettest place on Earth, with an average annual rainfall of 11,872 mm, but that claim is disputed by Lloró, Colombia, which reported an average yearly rainfall of 12,717 mm between 1952 and 1989 and López de Micay, also in Colombia, which reported an annual 12,892 mm per year between 1960 and 2012. According to the \"Guinness Book of World Records\", Mawsynram received of rainfall in 1985. Mawsynram is located at 25° 18′\" \n\nAnswer: \nDocument [1]: <SPAN_DELIM>Cherrapunji has often been credited as being the wettest place on Earth<SPAN_DELIM> still holds the all-time record for the most rainfall in a calendar month for July 1861 and most rain in a year from August 1860 to July 1861<SPAN_DELIM> \nDocument [2]: <SPAN_DELIM>Cherrapunji has often been credited as being the wettest place on Earth<SPAN_DELIM>still holds the all-time record for the most rainfall<SPAN_DELIM> \nDocument [3]: <SPAN_DELIM>Mawsynram receives one of the highest rainfalls in India <SPAN_DELIM> but that claim is disputed by Lloró, Colombia, which reported an average yearly rainfall of 12,717 mm between 1952 and 1989 <SPAN_DELIM> López de Micay, also in Colombia, which reported an annual 12,892 mm per year between 1960 and 2012. <SPAN_DELIM>",
|
421 |
+
"clustering_instruction":"In this task, you are presented with several passages, where some parts are \"highlighted\" (namely, there are <highlight_start> and <highlight_end> tokens before and after each such span). The goal is to fuse all those highlights into a single summary. As an intermediate step, you need to cluster highlights that can be merged into a sentence (namely, each cluster will be later merged into one sentence). Make sure the clusters are in the same order you would then write the corresponding summary sentences. IMORTANT: make sure there are at most 3 clusters, and no more than 3 highlights per cluster. IMPORTANT: The output must be of the format [\"cluster\":[comma-delimited highlights indices]]",
|
422 |
+
"clustering_shot":"Document [1]: Cherrapunji Cherrapunji ( with the native name Sohra being more commonly used, and can also be spelled Cherrapunjee or Cherrapunji) is a subdivisional town in the East Khasi Hills district in the Indian state of Meghalaya. It is the traditional capital of aNongkhlaw \"hima\" (Khasi tribal chieftainship constituting a petty state), both known as Sohra or Churra. <highlight_start>Cherrapunji has often been credited as being the wettest place on Earth<highlight_end>, but for now nearby Mawsynram currently holds that distinction. Cherrapunji <highlight_start>still holds the all-time record for the most rainfall in a calendar month for July 1861 and most rain in a year from August 1860 to July 1861<highlight_end>, however: it received in\" \nDocument [2]: Radio relay station known as Akashvani Cherrapunji. It broadcasts on FM frequencies. Cherrapunji Cherrapunji (; with the native name Sohra being more commonly used, and can also be spelled Cherrapunjee or Cherrapunji) is a subdivisional town in the East Khasi Hills district in the Indian state of Meghalaya. It is the traditional capital of aNongkhlaw \"hima\" (Khasi tribal chieftainship constituting a petty state), both known as Sohra or Churra. <highlight_start>Cherrapunji has often been credited as being the wettest place on Earth<highlight_end>, but for now nearby Mawsynram currently holds that distinction. <highlight_start>Cherrapunji still holds the all-time record for the most rainfall<highlight_end>\" \nDocument [3]: \"Mawsynram Mawsynram () is a village in the East Khasi Hills district of Meghalaya state in north-eastern India, 65 kilometres from Shillong. <highlight_start>Mawsynram receives one of the highest rainfalls in India<highlight_end>. It is reportedly the wettest place on Earth, with an average annual rainfall of 11,872 mm, <highlight_start>but that claim is disputed by Lloró, Colombia, which reported an average yearly rainfall of 12,717 mm between 1952 and 1989<highlight_end> and <highlight_start>López de Micay, also in Colombia, which reported an annual 12,892 mm per year between 1960 and 2012.<highlight_end> According to the \"Guinness Book of World Records\", Mawsynram received of rainfall in 1985. Mawsynram is located at 25° 18′\" \n\nThe highlighted spans are: \nDocument [1]: 1. Cherrapunji has often been credited as being the wettest place on Earth \n2. still holds the all-time record for the most rainfall in a calendar month for July 1861 and most rain in a year from August 1860 to July 1861 \nDocument [2]: \n3. Cherrapunji has often been credited as being the wettest place on Earth \n4. still holds the all-time record for the most rainfall \nDocument [3]: \n5. Mawsynram receives one of the highest rainfalls in India \n6. but that claim is disputed by Lloró, Colombia, which reported an average yearly rainfall of 12,717 mm between 1952 and 1989 \n7. López de Micay, also in Colombia, which reported an annual 12,892 mm per year between 1960 and 2012. \n\nAnswer: \nThe highlighted spans are clustered as follows: \n[{\"cluster\":[6,7]}, {\"cluster\":[5]},{\"cluster\":[1,2]}]",
|
423 |
+
"gen_instruction":"In this task, you are presented with several passages, where some parts are \"highlighted\" (namely, there are <highlight_start> and <highlight_end> tokens before and after each such span). You may also be presented with a prefix of the answer. You job is to generate the next sentence of the answer, that covers all and only the \"highlighted\" spans. Make sure it connects well with the prefix(if eixists), and that it covers all and only the \"highlighted\" spans. Always cite for any factual claim. When citing several search results, use [1][2][3]. Cite at least one document and at most three documents in each sentence. If multiple documents support the sentence, only cite a minimum sufficient subset of the documents.",
|
424 |
+
"gen_shot":"Document [1]: Cherrapunji Cherrapunji ( with the native name Sohra being more commonly used, and can also be spelled Cherrapunjee or Cherrapunji) is a subdivisional town in the East Khasi Hills district in the Indian state of Meghalaya. It is the traditional capital of aNongkhlaw \"hima\" (Khasi tribal chieftainship constituting a petty state), both known as Sohra or Churra. <highlight_start>Cherrapunji has often been credited as being the wettest place on Earth<highlight_end>, but for now nearby Mawsynram currently holds that distinction. Cherrapunji <highlight_start>still holds the all-time record for the most rainfall in a calendar month for July 1861 and most rain in a year from August 1860 to July 1861<highlight_end>, however: it received in\" \nDocument [2]: Radio relay station known as Akashvani Cherrapunji. It broadcasts on FM frequencies. Cherrapunji Cherrapunji (; with the native name Sohra being more commonly used, and can also be spelled Cherrapunjee or Cherrapunji) is a subdivisional town in the East Khasi Hills district in the Indian state of Meghalaya. It is the traditional capital of aNongkhlaw \"hima\" (Khasi tribal chieftainship constituting a petty state), both known as Sohra or Churra. <highlight_start>Cherrapunji has often been credited as being the wettest place on Earth<highlight_end>, but for now nearby Mawsynram currently holds that distinction. <highlight_start>Cherrapunji still holds the all-time record for the most rainfall<highlight_end>\" \nDocument [3]: \"Mawsynram Mawsynram () is a village in the East Khasi Hills district of Meghalaya state in north-eastern India, 65 kilometres from Shillong. <highlight_start>Mawsynram receives one of the highest rainfalls in India<highlight_end>. It is reportedly the wettest place on Earth, with an average annual rainfall of 11,872 mm, <highlight_start>but that claim is disputed by Lloró, Colombia, which reported an average yearly rainfall of 12,717 mm between 1952 and 1989<highlight_end> and <highlight_start>López de Micay, also in Colombia, which reported an annual 12,892 mm per year between 1960 and 2012.<highlight_end> According to the \"Guinness Book of World Records\", Mawsynram received of rainfall in 1985. Mawsynram is located at 25° 18′\" \n\nPrefix: Several places on Earth claim to be the most rainy, such as Lloró, Colombia, which reported an average annual rainfall of 12,717 mm between 1952 and 1989, and López de Micay, Colombia, which reported an annual 12,892 mm between 1960 and 2012 [3]. \n\nThe highlighted spans are: \nDocument [3]: \n5. Mawsynram receives one of the highest rainfalls in India \n\nAnswer: \nThe next sentence is: \nHowever, the official record is held by Mawsynram, India with an average annual rainfall of 11,872 mm [3]."
|
425 |
+
}
|
426 |
+
PARA_SEP = '\n\n'
|
427 |
+
selection_shot = demo['selection_instruction'] + PARA_SEP + demo['selection_shot'] + PARA_SEP
|
428 |
+
cls_shot = demo['clustering_instruction'] + PARA_SEP + demo['clustering_shot'] + PARA_SEP
|
429 |
+
gen_shot = demo['gen_instruction'] + PARA_SEP + demo['gen_shot'] + PARA_SEP
|
430 |
+
prompt = Prompt(template='<shot><INST><question><docs><prefix><span><add>',
|
431 |
+
components={'INST':'{INST}\n\n',
|
432 |
+
'shot':'{shot}',
|
433 |
+
'question':'Question:{question}\n\n',
|
434 |
+
'docs':'{docs}\n',
|
435 |
+
'span':'The highlighted spans are: \n{span}\n\n',
|
436 |
+
'prefix':'Prefix: {prefix}\n\n',
|
437 |
+
'add':'Answer: \n{add}'
|
438 |
+
})
|
439 |
+
def __init__(self, model) -> None:
|
440 |
+
module_list = []
|
441 |
+
select = LLM(model = model, prompt_maker = self.prompt, self_prompt={'INST':self.demo['selection_instruction'],'shot':self.selection_shot,'add':''}, post_processing=make_as('span'),noisy= False)
|
442 |
+
post_select = Attribute_post_select()
|
443 |
+
clustering = LLM(model = model, prompt_maker = self.prompt, self_prompt={'INST':self.demo['clustering_instruction'],'shot':self.cls_shot, 'add':'The highlighted spans are clustered as follows:'},share_model_with=select, post_processing=make_as('cls'),noisy=False)
|
444 |
+
post_cls = Attribute_post_cluster()
|
445 |
+
module_list = [select,post_select,clustering,post_cls]
|
446 |
+
super().__init__(module_list)
|
447 |
+
|
448 |
+
MODEL_TYPE_MAPPING = {
|
449 |
+
'retrieve': Retriever,
|
450 |
+
'eval': EvalModule,
|
451 |
+
'simplify': CitationSimplyfier,
|
452 |
+
'verify': Verifier,
|
453 |
+
'rank': Ranker,
|
454 |
+
'attributing': AttributingModule
|
455 |
+
}
|
citekit/evaluator/__init__.py
ADDED
File without changes
|
citekit/evaluator/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (162 Bytes). View file
|
|
citekit/evaluator/__pycache__/__init__.cpython-312.pyc
ADDED
Binary file (164 Bytes). View file
|
|
citekit/evaluator/__pycache__/evaluator.cpython-310.pyc
ADDED
Binary file (27.3 kB). View file
|
|
citekit/evaluator/__pycache__/evaluator.cpython-312.pyc
ADDED
Binary file (45.7 kB). View file
|
|
citekit/evaluator/evaluator.py
ADDED
@@ -0,0 +1,1118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from nltk import sent_tokenize
|
2 |
+
import nltk
|
3 |
+
nltk.download('punkt')
|
4 |
+
import re
|
5 |
+
import random
|
6 |
+
import transformers
|
7 |
+
import numpy as np
|
8 |
+
from citekit.utils.utils import *
|
9 |
+
from rouge import Rouge
|
10 |
+
import torch
|
11 |
+
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
|
12 |
+
import copy
|
13 |
+
import torch
|
14 |
+
from tqdm import tqdm
|
15 |
+
import sys
|
16 |
+
import logging
|
17 |
+
import random
|
18 |
+
from itertools import product,combinations
|
19 |
+
import time
|
20 |
+
import logging
|
21 |
+
logger = logging.getLogger(__name__)
|
22 |
+
logger.setLevel(logging.INFO)
|
23 |
+
|
24 |
+
PIPELINE_OUTPUT = 'output'
|
25 |
+
PIPELINE_DOC_CACHE = 'doc_cache'
|
26 |
+
|
27 |
+
global autoais_model, autoais_tokenizer
|
28 |
+
autoais_model = None
|
29 |
+
autoais_tokenizer = None
|
30 |
+
get_docs_by_index = lambda i,docs: docs[i] if i < len(docs) else None
|
31 |
+
ais_LLM = None
|
32 |
+
|
33 |
+
QA_MODEL = "gaotianyu1350/roberta-large-squad"
|
34 |
+
AUTOAIS_MODEL = "google/t5_xxl_true_nli_mixture"
|
35 |
+
AUTOAIS_MODEL_ABSOLUTE = "/mnt/usercache/huggingface/t5_xxl_true_nli_mixture"
|
36 |
+
|
37 |
+
def get_cite(sent):
|
38 |
+
return re.sub(r"\[\d+", "", re.sub(r" \[\d+", "", sent)).replace(" |", "").replace("]", ""),[int(r[1:]) - 1 for r in re.findall(r"\[\d+", sent)]
|
39 |
+
|
40 |
+
|
41 |
+
def entail(premise, claim):
|
42 |
+
|
43 |
+
"""
|
44 |
+
Run inference for assessing AIS between a premise and hypothesis.
|
45 |
+
Adapted from https://github.com/google-research-datasets/Attributed-QA/blob/main/evaluation.py
|
46 |
+
"""
|
47 |
+
global autoais_model, autoais_tokenizer
|
48 |
+
input_text = "premise: {} hypothesis: {}".format(premise, claim)
|
49 |
+
input_ids = autoais_tokenizer(input_text, return_tensors="pt").input_ids.to(autoais_model.device)
|
50 |
+
with torch.inference_mode():
|
51 |
+
outputs = autoais_model.generate(input_ids, max_new_tokens=10)
|
52 |
+
result = autoais_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
53 |
+
inference = 1 if result == "1" else 0
|
54 |
+
return inference
|
55 |
+
|
56 |
+
def load_auto_ais():
|
57 |
+
global autoais_model, autoais_tokenizer
|
58 |
+
print('Initializing eval model for citation precision and recall...')
|
59 |
+
try:
|
60 |
+
autoais_model = AutoModelForSeq2SeqLM.from_pretrained(AUTOAIS_MODEL, torch_dtype=torch.bfloat16, device_map="auto")
|
61 |
+
autoais_tokenizer = AutoTokenizer.from_pretrained(AUTOAIS_MODEL, use_fast=False)
|
62 |
+
|
63 |
+
except:
|
64 |
+
print('Unable to load model from hub, trying to load from local path...')
|
65 |
+
autoais_model = AutoModelForSeq2SeqLM.from_pretrained(AUTOAIS_MODEL_ABSOLUTE, torch_dtype=torch.bfloat16, device_map="auto")
|
66 |
+
autoais_tokenizer = AutoTokenizer.from_pretrained(AUTOAIS_MODEL_ABSOLUTE, use_fast=False)
|
67 |
+
print('Done!')
|
68 |
+
|
69 |
+
def compute_mauve(data):
|
70 |
+
"""Compute Mauve score."""
|
71 |
+
|
72 |
+
logger.info("Computing MAUVE...")
|
73 |
+
human_data = []
|
74 |
+
model_data = []
|
75 |
+
for item in data:
|
76 |
+
# Remove ending punctuations
|
77 |
+
# Remove any new lines
|
78 |
+
# Truncate by 100 words
|
79 |
+
human_data.append(
|
80 |
+
' '.join((item['question'] + " " + item['answer'].strip()).split()[:100]).rstrip(string.punctuation))
|
81 |
+
model_data.append(
|
82 |
+
' '.join((item['question'] + " " + item['output'].strip()).split()[:100]).rstrip(string.punctuation))
|
83 |
+
|
84 |
+
import mauve
|
85 |
+
out = mauve.compute_mauve(
|
86 |
+
p_text=human_data,
|
87 |
+
q_text=model_data,
|
88 |
+
device_id=0,
|
89 |
+
max_text_length=512,
|
90 |
+
verbose=True,
|
91 |
+
batch_size=8,
|
92 |
+
featurize_model_name="gpt2-large"
|
93 |
+
)
|
94 |
+
return out.mauve * 100
|
95 |
+
|
96 |
+
|
97 |
+
def compute_rouge_l(data):
|
98 |
+
total = len(data)
|
99 |
+
res = {
|
100 |
+
"r": 0.0,
|
101 |
+
"p": 0.0,
|
102 |
+
"f": 0.0
|
103 |
+
}
|
104 |
+
for item in data:
|
105 |
+
if item['output'] and item['answer']:
|
106 |
+
rouge = Rouge()
|
107 |
+
scores = rouge.get_scores(item['output'], item['answer'])
|
108 |
+
res['r'] += scores[0]['rouge-l']['r']
|
109 |
+
res['p'] += scores[0]['rouge-l']['p']
|
110 |
+
res['f'] += scores[0]['rouge-l']['f']
|
111 |
+
else:
|
112 |
+
print('Warning: no hypothesis or references')
|
113 |
+
res['r'] /= total
|
114 |
+
res['p'] /= total
|
115 |
+
res['f'] /= total
|
116 |
+
|
117 |
+
return res
|
118 |
+
|
119 |
+
def compute_qa(question, output, short_answers, qa_pipeline=None):
|
120 |
+
"""Compute QA-based accuracy.
|
121 |
+
Args:
|
122 |
+
|
123 |
+
Returns:
|
124 |
+
QA metrics (QA-EM, QA-F1, QA-Hit)
|
125 |
+
"""
|
126 |
+
|
127 |
+
# Load model
|
128 |
+
if not qa_pipeline:
|
129 |
+
qa_pipeline = transformers.pipeline("question-answering", model=QA_MODEL, device='mps')
|
130 |
+
|
131 |
+
# Get prediction
|
132 |
+
em, f1, bins = 0,0,0
|
133 |
+
context = output if len(output) > 0 else " "
|
134 |
+
result = qa_pipeline(question=question, context=context, handle_impossible_answer=True)
|
135 |
+
loc_counter, loc_em, loc_f1 = 0, 0, 0
|
136 |
+
print(result)
|
137 |
+
prediction = result["answer"]
|
138 |
+
|
139 |
+
loc_em = max([compute_exact(a, prediction) for a in short_answers])
|
140 |
+
loc_f1 = max([compute_f1(a, prediction) for a in short_answers])
|
141 |
+
loc_counter += 1
|
142 |
+
|
143 |
+
em= loc_em / loc_counter
|
144 |
+
f1= loc_f1 / loc_counter
|
145 |
+
bins = int(loc_em == loc_counter)
|
146 |
+
return em, f1, bins
|
147 |
+
|
148 |
+
def compute_qa(data):
|
149 |
+
"""Compute QA-based accuracy.
|
150 |
+
Args:
|
151 |
+
data: requires filed `qa_pairs/short_answers` and `output`
|
152 |
+
Returns:
|
153 |
+
QA metrics (QA-EM, QA-F1, QA-Hit)
|
154 |
+
"""
|
155 |
+
|
156 |
+
if 'qa_pairs' not in data[0] or data[0]['qa_pairs'] is None:
|
157 |
+
#logger.warn("Warning: no QA pairs found in data")
|
158 |
+
return {
|
159 |
+
'QA-EM': 0,
|
160 |
+
'QA-F1': 0,
|
161 |
+
'QA-Hit': 0,
|
162 |
+
}
|
163 |
+
|
164 |
+
# Load model
|
165 |
+
#logger.info("Loading the RoBERTa-large SQuAD model for QA-based accuracy...")
|
166 |
+
global qa_pipeline
|
167 |
+
if not qa_pipeline:
|
168 |
+
qa_pipeline = transformers.pipeline("question-answering", model=QA_MODEL)
|
169 |
+
#logger.info("Done")
|
170 |
+
|
171 |
+
# Get prediction
|
172 |
+
#logger.info("Computing the QA-based accuracy...")
|
173 |
+
em, f1, bins = [], [], []
|
174 |
+
for item in tqdm(data):
|
175 |
+
question = [qa_pair['question'] for qa_pair in item['qa_pairs']]
|
176 |
+
context = item['output'] if len(item['output']) > 0 else " "
|
177 |
+
results = qa_pipeline(question=question, context=context, handle_impossible_answer=True)
|
178 |
+
loc_counter, loc_em, loc_f1 = 0, 0, 0
|
179 |
+
|
180 |
+
for idx, res in enumerate(results):
|
181 |
+
answers = item["qa_pairs"][idx]["short_answers"]
|
182 |
+
prediction = res["answer"]
|
183 |
+
|
184 |
+
loc_em += max([compute_exact(a, prediction) for a in answers])
|
185 |
+
loc_f1 += max([compute_f1(a, prediction) for a in answers])
|
186 |
+
loc_counter += 1
|
187 |
+
|
188 |
+
em.append(loc_em / loc_counter)
|
189 |
+
f1.append(loc_f1 / loc_counter)
|
190 |
+
bins.append(loc_em == loc_counter)
|
191 |
+
|
192 |
+
return {
|
193 |
+
'QA-EM': 100 * np.mean(em),
|
194 |
+
'QA-F1': 100 * np.mean(f1),
|
195 |
+
'QA-Hit': 100 * np.mean(bins)
|
196 |
+
}
|
197 |
+
|
198 |
+
|
199 |
+
def cite_pr(sent_with_cite, docs = None, get_docs = get_docs_by_index, get_cite = get_cite, max_cite= None,rich_return = False):
|
200 |
+
"""
|
201 |
+
: sent_with_cite: ONE sentence with citation like [1][2][3]
|
202 |
+
: get_docs: by default like [1][2], get ids
|
203 |
+
: docs: List, all the COMPLETE documents with TITLE
|
204 |
+
|
205 |
+
: return
|
206 |
+
number of citations, integer
|
207 |
+
recall (0 or 1)
|
208 |
+
precision (number of relevent documents)
|
209 |
+
|
210 |
+
optional;
|
211 |
+
multi_cite
|
212 |
+
mcite_support
|
213 |
+
mcite_overcite
|
214 |
+
"""
|
215 |
+
if rich_return:
|
216 |
+
raise NotImplementedError
|
217 |
+
|
218 |
+
result = {'num_cites': 0,'recall':0,'precision':0,'multi_cite':0,'mcite_support' :0,'mcite_overcite':0}
|
219 |
+
sent, cites= get_cite(sent_with_cite)
|
220 |
+
|
221 |
+
if not cites:
|
222 |
+
return (0, 0, 0) if not rich_return else result # no citations
|
223 |
+
if max_cite:
|
224 |
+
cites = cites[:max_cite]
|
225 |
+
num_cites = len(cites)
|
226 |
+
result['num_cites'] = num_cites
|
227 |
+
|
228 |
+
refs = [get_docs(cite, docs) for cite in cites]
|
229 |
+
if None in refs:
|
230 |
+
return (num_cites, 0, 0) if not rich_return else result# wrong citation(s)
|
231 |
+
|
232 |
+
# recall
|
233 |
+
recall = entail(premise=''.join(refs),claim=sent)
|
234 |
+
result['recall'] = recall
|
235 |
+
|
236 |
+
# precision
|
237 |
+
precision = 0
|
238 |
+
if num_cites == 1:
|
239 |
+
precision = recall
|
240 |
+
else:
|
241 |
+
for idx, ref in enumerate(refs):
|
242 |
+
if entail(premise=ref,claim=sent):
|
243 |
+
precision += 1
|
244 |
+
else:
|
245 |
+
if not entail(premise=''.join([refs[i] for i in range(len(refs)) if i != idx]), claim = sent):
|
246 |
+
precision += 1
|
247 |
+
elif recall:
|
248 |
+
result['mcite_overcite'] = 1
|
249 |
+
result['precision'] = precision
|
250 |
+
|
251 |
+
#other
|
252 |
+
if num_cites > 1:
|
253 |
+
result['multi_cite'] = 1
|
254 |
+
if recall:
|
255 |
+
result['mcite_support'] = 1
|
256 |
+
|
257 |
+
|
258 |
+
return (num_cites, recall, precision) if not rich_return else result
|
259 |
+
|
260 |
+
|
261 |
+
def cite_pr_answer(answer, docs = None, get_docs = get_docs_by_index, get_cite = get_cite, max_cite= None,rich_return = False):
|
262 |
+
epsilon = 1e-8
|
263 |
+
num_c = 0
|
264 |
+
recall = 0
|
265 |
+
precision = 0
|
266 |
+
sents = sent_tokenize(answer)
|
267 |
+
for sent in sents:
|
268 |
+
c,r,p = cite_pr(sent,get_docs=get_docs,docs=docs,get_cite=get_cite,max_cite=max_cite,rich_return=rich_return)
|
269 |
+
num_c += c
|
270 |
+
recall += r
|
271 |
+
precision += p
|
272 |
+
# diveded by Zero!
|
273 |
+
return recall/(len(sents)+ epsilon), precision/(num_c+epsilon)
|
274 |
+
|
275 |
+
|
276 |
+
def _run_nli_autoais(passage, claim, test = False):
|
277 |
+
"""
|
278 |
+
Run inference for assessing AIS between a premise and hypothesis.
|
279 |
+
Adapted from https://github.com/google-research-datasets/Attributed-QA/blob/main/evaluation.py
|
280 |
+
"""
|
281 |
+
if not test:
|
282 |
+
global autoais_model, autoais_tokenizer
|
283 |
+
if not autoais_model:
|
284 |
+
load_auto_ais()
|
285 |
+
input_text = "premise: {} hypothesis: {}".format(passage, claim)
|
286 |
+
input_ids = autoais_tokenizer(input_text, return_tensors="pt").input_ids.to(autoais_model.device)
|
287 |
+
with torch.inference_mode():
|
288 |
+
outputs = autoais_model.generate(input_ids, max_new_tokens=10)
|
289 |
+
result = autoais_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
290 |
+
inference = 1 if result == "1" else 0
|
291 |
+
return inference
|
292 |
+
else:
|
293 |
+
res = random.randint(0,1)
|
294 |
+
|
295 |
+
return res
|
296 |
+
|
297 |
+
def _run_llm_autoais(passage, claim):
|
298 |
+
global ais_LLM
|
299 |
+
assert(ais_LLM)
|
300 |
+
return int(ais_LLM.generate(premise = passage, claim = claim))
|
301 |
+
|
302 |
+
def test_compute_autoais(data):
|
303 |
+
print(data[0]['docs'][:5])
|
304 |
+
print(data[0]['output'][:5])
|
305 |
+
return {
|
306 |
+
"citation_rec": random.randint(0,100),
|
307 |
+
"citation_prec": random.randint(0,100),
|
308 |
+
}
|
309 |
+
|
310 |
+
def compute_autoais(data,
|
311 |
+
decontext=False,
|
312 |
+
concat=False,
|
313 |
+
qampari=False,
|
314 |
+
at_most_sents = 3,
|
315 |
+
at_most_citations=3,
|
316 |
+
entail_function = _run_nli_autoais):
|
317 |
+
"""
|
318 |
+
Compute AutoAIS score.
|
319 |
+
|
320 |
+
Args:
|
321 |
+
data: requires field `output` and `docs`
|
322 |
+
- docs should be a list of items with fields `title` and `text` (or `phrase` and `sent` for QA-extracted docs)
|
323 |
+
citation: check citations and use the corresponding references.
|
324 |
+
decontext: decontextualize the output
|
325 |
+
"""
|
326 |
+
|
327 |
+
global autoais_model, autoais_tokenizer
|
328 |
+
|
329 |
+
|
330 |
+
ais_scores = []
|
331 |
+
ais_scores_prec = []
|
332 |
+
|
333 |
+
sent_total = 0
|
334 |
+
sent_mcite = 0
|
335 |
+
sent_mcite_support = 0
|
336 |
+
sent_mcite_overcite = 0
|
337 |
+
autoais_log = []
|
338 |
+
for item in tqdm(data):
|
339 |
+
# Get sentences by using NLTK
|
340 |
+
if qampari:
|
341 |
+
print('now qampari...')
|
342 |
+
sents = [item['question'] + " " + x.strip() for x in
|
343 |
+
item['output'].rstrip().rstrip(".").rstrip(",").split(",")]
|
344 |
+
else:
|
345 |
+
sents = sent_tokenize(item['output'])[:at_most_sents]
|
346 |
+
if len(sents) == 0:
|
347 |
+
ais_scores.append(0.0)
|
348 |
+
ais_scores_prec.append(0.0) # len(sents))
|
349 |
+
continue
|
350 |
+
|
351 |
+
target_sents = [remove_citations(sent).strip() for sent in sents]
|
352 |
+
|
353 |
+
entail = 0
|
354 |
+
entail_prec = 0
|
355 |
+
total_citations = 0
|
356 |
+
for sent_id, sent in enumerate(sents):
|
357 |
+
target_sent = target_sents[sent_id] # Citation removed and (if opted for) decontextualized
|
358 |
+
joint_entail = -1 # Undecided
|
359 |
+
|
360 |
+
# Find references
|
361 |
+
#ref = [int(r[1:]) - 1 for r in re.findall(r"\[\d+", sent)] # In text citation id starts from 1
|
362 |
+
matches = re.findall(r"\[(\d+(?:,\s*\d+)*)\]", sent)
|
363 |
+
ref = [int(num)-1 for match in matches for num in match.replace(' ', '').split(',')]
|
364 |
+
if len(ref) == 0:
|
365 |
+
# No citations
|
366 |
+
joint_entail = 0
|
367 |
+
elif any([ref_id >= len(item['docs']) for ref_id in ref]):
|
368 |
+
# Citations out of range
|
369 |
+
joint_entail = 0
|
370 |
+
else:
|
371 |
+
if at_most_citations is not None:
|
372 |
+
ref = ref[:at_most_citations]
|
373 |
+
total_citations += len(ref)
|
374 |
+
joint_passage = '\n'.join([(item['docs'][psgs_id]) for psgs_id in ref])
|
375 |
+
|
376 |
+
# If not directly rejected by citation format error, calculate the recall score
|
377 |
+
if joint_entail == -1:
|
378 |
+
joint_entail = entail_function(joint_passage, target_sent)
|
379 |
+
autoais_log.append({
|
380 |
+
#"question": item['question'],
|
381 |
+
"output": item['output'],
|
382 |
+
"claim": sent,
|
383 |
+
"passage": [joint_passage],
|
384 |
+
"model_type": "NLI",
|
385 |
+
"model_output": joint_entail,
|
386 |
+
})
|
387 |
+
|
388 |
+
entail += joint_entail
|
389 |
+
if len(ref) > 1:
|
390 |
+
sent_mcite += 1
|
391 |
+
|
392 |
+
# calculate the precision score if applicable
|
393 |
+
if joint_entail and len(ref) > 1:
|
394 |
+
sent_mcite_support += 1
|
395 |
+
# Precision check: did the model cite any unnecessary documents?
|
396 |
+
for psgs_id in ref:
|
397 |
+
# condition A
|
398 |
+
passage = item['docs'][psgs_id]
|
399 |
+
nli_result = entail_function(passage, target_sent)
|
400 |
+
|
401 |
+
# condition B
|
402 |
+
if not nli_result:
|
403 |
+
subset_exclude = copy.deepcopy(ref)
|
404 |
+
subset_exclude.remove(psgs_id)
|
405 |
+
passage = '\n'.join([item['docs'][pid] for pid in subset_exclude])
|
406 |
+
nli_result =entail_function(passage, target_sent)
|
407 |
+
if nli_result: # psgs_id is not necessary
|
408 |
+
flag = 0
|
409 |
+
sent_mcite_overcite += 1
|
410 |
+
else:
|
411 |
+
entail_prec += 1
|
412 |
+
else:
|
413 |
+
entail_prec += 1
|
414 |
+
else:
|
415 |
+
entail_prec += joint_entail
|
416 |
+
sent_total += len(sents)
|
417 |
+
ais_scores.append(entail / len(sents))
|
418 |
+
ais_scores_prec.append(entail_prec / total_citations if total_citations > 0 else 0) # len(sents))
|
419 |
+
|
420 |
+
if sent_mcite > 0 and sent_mcite_support > 0:
|
421 |
+
print(
|
422 |
+
"Among all sentences, %.2f%% have multiple citations, among which %.2f%% are supported by the joint set, among which %.2f%% overcite." % (
|
423 |
+
100 * sent_mcite / sent_total,
|
424 |
+
100 * sent_mcite_support / sent_mcite,
|
425 |
+
100 * sent_mcite_overcite / sent_mcite_support
|
426 |
+
))
|
427 |
+
|
428 |
+
return {
|
429 |
+
"citation_rec": 100 * np.mean(ais_scores),
|
430 |
+
"citation_prec": 100 * np.mean(ais_scores_prec),
|
431 |
+
}
|
432 |
+
|
433 |
+
def compute_claims_test(data):
|
434 |
+
print(data[0]['claims'])
|
435 |
+
print(data[0][PIPELINE_OUTPUT])
|
436 |
+
return random.randint(1,100)
|
437 |
+
|
438 |
+
def compute_claims(data):
|
439 |
+
global autoais_model, autoais_tokenizer
|
440 |
+
if autoais_model is None:
|
441 |
+
#logger.info("Loading AutoAIS model...")
|
442 |
+
# autoais_model = AutoModelForSeq2SeqLM.from_pretrained(AUTOAIS_MODEL, torch_dtype=torch.bfloat16, max_memory=get_max_memory(), device_map="auto")
|
443 |
+
autoais_model = AutoModelForSeq2SeqLM.from_pretrained(AUTOAIS_MODEL, torch_dtype=torch.bfloat16,
|
444 |
+
device_map="auto")
|
445 |
+
# autoais_model = AutoModelForSeq2SeqLM.from_pretrained(AUTOAIS_MODEL, torch_dtype=torch.bfloat16, max_memory=get_max_memory(), device_map="auto",offload_folder= "/data/hongbang/zsf/projects/ALCE/ALCE/model/t5_xxl_true_nli_mixture/offload1")
|
446 |
+
autoais_tokenizer = AutoTokenizer.from_pretrained(AUTOAIS_MODEL, use_fast=False)
|
447 |
+
#logger.info("Computing claims...")
|
448 |
+
scores = []
|
449 |
+
for item in tqdm(data):
|
450 |
+
normalized_output = remove_citations(item['output'])
|
451 |
+
entail = 0
|
452 |
+
claims = item["claims"]
|
453 |
+
for claim in claims:
|
454 |
+
entail += _run_nli_autoais(normalized_output, claim)
|
455 |
+
scores.append(entail / len(claims))
|
456 |
+
return 100 * np.mean(scores)
|
457 |
+
|
458 |
+
|
459 |
+
#citation appropriateness
|
460 |
+
def check_if_citations_needed(passages, answer, grain):
|
461 |
+
|
462 |
+
def _format_document(doc):
|
463 |
+
"""Format document for AutoAIS.
|
464 |
+
|
465 |
+
if "sent" in doc:
|
466 |
+
# QA-extracted docs
|
467 |
+
return "Title: %s\n%s" % (doc['title'], doc['sent'])
|
468 |
+
else:
|
469 |
+
return "Title: %s\n%s" % (doc['title'], doc['text'])
|
470 |
+
"""
|
471 |
+
return doc
|
472 |
+
|
473 |
+
global autoais_model, autoais_tokenizer
|
474 |
+
if autoais_model is None and False:
|
475 |
+
#logger.info("Loading AutoAIS model...")
|
476 |
+
# autoais_model = AutoModelForSeq2SeqLM.from_pretrained(AUTOAIS_MODEL, torch_dtype=torch.bfloat16, max_memory=get_max_memory(), device_map="auto")
|
477 |
+
autoais_model = AutoModelForSeq2SeqLM.from_pretrained(AUTOAIS_MODEL, torch_dtype=torch.bfloat16,
|
478 |
+
device_map="auto")
|
479 |
+
# autoais_model = AutoModelForSeq2SeqLM.from_pretrained(AUTOAIS_MODEL, torch_dtype=torch.bfloat16, max_memory=get_max_memory(), device_map="auto",offload_folder= "/data/hongbang/zsf/projects/ALCE/ALCE/model/t5_xxl_true_nli_mixture/offload1")
|
480 |
+
autoais_tokenizer = AutoTokenizer.from_pretrained(AUTOAIS_MODEL, use_fast=False)
|
481 |
+
|
482 |
+
if grain=="over_fine" or grain=="more_over_fine":
|
483 |
+
num_passages = len(passages)
|
484 |
+
passages_per_chunk = num_passages // 5 # Divide passages evenly into 5 chunks
|
485 |
+
remainder = num_passages % 5 # Handle remaining passages
|
486 |
+
passages_five=[]
|
487 |
+
start_idx = 0
|
488 |
+
for i in range(5):
|
489 |
+
end_idx = start_idx + passages_per_chunk
|
490 |
+
if remainder > 0:
|
491 |
+
end_idx += 1
|
492 |
+
remainder -= 1
|
493 |
+
chunk_passages = passages[start_idx:end_idx]
|
494 |
+
passages_five.append('\n'.join([_format_document(p) for p in chunk_passages]))
|
495 |
+
start_idx = end_idx
|
496 |
+
passages=passages_five
|
497 |
+
combinations_3 = combinations(passages, 3) # 获取所有三个passage的组合
|
498 |
+
for combination in combinations_3:
|
499 |
+
joint_passage = '\n'.join(
|
500 |
+
[passage for passage in combination]) # 将三个passage连接为一个字符串,并保留格式
|
501 |
+
entail = _run_nli_autoais(joint_passage, answer)
|
502 |
+
if entail == 1:
|
503 |
+
return 1
|
504 |
+
return 0
|
505 |
+
|
506 |
+
else:
|
507 |
+
if len(passages)>=3:#正常粒度
|
508 |
+
combinations_3 = combinations(passages, 3)
|
509 |
+
for combination in combinations_3:
|
510 |
+
joint_passage = '\n'.join(
|
511 |
+
[_format_document(passage) for passage in combination])
|
512 |
+
entail = _run_nli_autoais(joint_passage, answer)
|
513 |
+
if entail == 1:
|
514 |
+
return 1
|
515 |
+
return 0
|
516 |
+
else:#粗粒度
|
517 |
+
joint_passage = '\n'.join(
|
518 |
+
[_format_document(passage) for passage in passages])
|
519 |
+
entail = _run_nli_autoais(joint_passage, answer)
|
520 |
+
if entail == 1:
|
521 |
+
return 1
|
522 |
+
else:
|
523 |
+
return 0
|
524 |
+
|
525 |
+
|
526 |
+
#citaion granularity
|
527 |
+
def find_permutations(n, m):
|
528 |
+
'''
|
529 |
+
:param n: 最大数量总和
|
530 |
+
:param m: 位长度
|
531 |
+
:return:
|
532 |
+
'''
|
533 |
+
# Generate all possible sequences of length m
|
534 |
+
all_sequences = list(product(range(n + 1), repeat=m))
|
535 |
+
#print('all_sequences', all_sequences)
|
536 |
+
|
537 |
+
# Filter sequences where the sum of digits equals n
|
538 |
+
valid_sequences = [seq for seq in all_sequences if sum(seq) == n]
|
539 |
+
return valid_sequences
|
540 |
+
|
541 |
+
|
542 |
+
def get_subspans(list_span, span_count):
|
543 |
+
list_subspan = []
|
544 |
+
for i in range(0, len(list_span) - span_count + 1):
|
545 |
+
list_subspan.append(list_span[i: i + span_count])
|
546 |
+
|
547 |
+
return list_subspan
|
548 |
+
|
549 |
+
|
550 |
+
def get_all_span_comb(list_list_span, target_span_count=-1):
|
551 |
+
if target_span_count == -1: # 所有子集
|
552 |
+
max_span_count = len(sum(list_list_span, []))
|
553 |
+
doc_count = len(list_list_span)
|
554 |
+
list_span_comb_all = []
|
555 |
+
for span_count in range(1, max_span_count + 1):
|
556 |
+
list_comb = find_permutations(span_count, doc_count)#给定数量的子串在文本中的所有可能组合
|
557 |
+
|
558 |
+
list_span_comb = [] # 最终当前长度的所有可能组合
|
559 |
+
for comb in list_comb:
|
560 |
+
list_list_subspan = []
|
561 |
+
|
562 |
+
for idx_doc, span_count_doc in enumerate(comb):
|
563 |
+
list_subspan = get_subspans(list_list_span[idx_doc], span_count_doc)
|
564 |
+
if len(list_subspan) == 0:
|
565 |
+
list_list_subspan = None
|
566 |
+
break
|
567 |
+
list_list_subspan.append(list_subspan)
|
568 |
+
|
569 |
+
if list_list_subspan:
|
570 |
+
list_span_comb_cur = [sum(list(combination), []) for combination in product(*list_list_subspan)]
|
571 |
+
list_span_comb_cur = list(set([tuple(span_comb) for span_comb in list_span_comb_cur]))
|
572 |
+
|
573 |
+
list_span_comb += list_span_comb_cur
|
574 |
+
list_span_comb_all += list_span_comb
|
575 |
+
list_span_comb_all = set(list_span_comb_all)
|
576 |
+
else: # 当前长度的组合
|
577 |
+
doc_count = len(list_list_span)
|
578 |
+
list_comb = find_permutations(target_span_count, doc_count)
|
579 |
+
|
580 |
+
list_span_comb = [] # 最终当前长度的所有可能组合
|
581 |
+
for comb in list_comb:
|
582 |
+
list_list_subspan = []
|
583 |
+
|
584 |
+
for idx_doc, span_count_doc in enumerate(comb):
|
585 |
+
list_subspan = get_subspans(list_list_span[idx_doc], span_count_doc)
|
586 |
+
if len(list_subspan) == 0:
|
587 |
+
list_list_subspan = None
|
588 |
+
break
|
589 |
+
list_list_subspan.append(list_subspan)
|
590 |
+
|
591 |
+
if list_list_subspan:
|
592 |
+
list_span_comb_cur = [combination for combination in product(*list_list_subspan)]
|
593 |
+
for idx in range(len(list_span_comb_cur)):
|
594 |
+
list_span_comb_cur[idx] = tuple([tuple(span_comb) for span_comb in list_span_comb_cur[idx]])
|
595 |
+
|
596 |
+
list_span_comb += list_span_comb_cur
|
597 |
+
list_span_comb_all = list_span_comb
|
598 |
+
list_span_comb_all = set(list_span_comb_all)
|
599 |
+
return list_span_comb_all
|
600 |
+
|
601 |
+
|
602 |
+
def run_converge_2(list_list_span=None, sentence=None):
|
603 |
+
'''
|
604 |
+
基于假设:更长的text不能蕴含,则其任何子串都不能蕴含
|
605 |
+
span数量递减(提供更多的剪枝选项)
|
606 |
+
最终gold可能有一个span的误差
|
607 |
+
'''
|
608 |
+
######
|
609 |
+
#print('origin nli count', len(get_all_span_comb(list_list_span, target_span_count=-1)))#给定文本的所有可能的子串组合
|
610 |
+
max_span_count = len(sum(list_list_span, [])) # span总数
|
611 |
+
|
612 |
+
set_comb_hash = set([])
|
613 |
+
|
614 |
+
### span数量二分
|
615 |
+
nli_count = 0
|
616 |
+
skip_count = 0
|
617 |
+
list_list_span_gold = copy.copy(list_list_span) # 当前能够精准蕴含的span
|
618 |
+
|
619 |
+
span_count_min, span_count_max = 1, max_span_count
|
620 |
+
start_time=time.time()
|
621 |
+
timeout=300
|
622 |
+
while span_count_min < span_count_max:#每次迭代中不断寻找更小的子串组合
|
623 |
+
span_count_cur = span_count_max - 1
|
624 |
+
flag_find = False
|
625 |
+
if time.time() - start_time > timeout:
|
626 |
+
print('timeout!')
|
627 |
+
list_list_span_gold=[]
|
628 |
+
break
|
629 |
+
### 存在可蕴含,继续找更少的span
|
630 |
+
### 不存在可蕴含,继续找更多的span
|
631 |
+
# 长度为span_count_max - 1的所有可能的子串组合
|
632 |
+
set_comb_cur = get_all_span_comb(list_list_span, target_span_count=span_count_cur)
|
633 |
+
|
634 |
+
list_comb_cur = list(set_comb_cur)
|
635 |
+
random.shuffle(list_comb_cur)
|
636 |
+
for comb in list_comb_cur:
|
637 |
+
list_list_span_cur = [list(t) for t in comb]
|
638 |
+
list_span_cur = sum(list_list_span_cur, [])
|
639 |
+
str_text = ' '.join(list_span_cur) # TODO: 统一字符串化的方式
|
640 |
+
|
641 |
+
if hash(str_text) in set_comb_hash:
|
642 |
+
skip_count += 1
|
643 |
+
continue
|
644 |
+
|
645 |
+
#### ⚠️ 注意在这里替换nli函数
|
646 |
+
nli_label = _run_nli_autoais(str_text, sentence) # TODO: nli label function
|
647 |
+
nli_count += 1
|
648 |
+
|
649 |
+
if nli_label == 1: # 只要存在可蕴含,直接继续找更少的span
|
650 |
+
list_list_span_gold = copy.copy(list_list_span_cur)
|
651 |
+
span_count_max = span_count_cur#更新span数量上限
|
652 |
+
flag_find = True
|
653 |
+
# print(f"find nli!, nli_count: {nli_count}, skip_count: {skip_count}, len(set_comb_hash): {len(set_comb_hash)}", )
|
654 |
+
break
|
655 |
+
else: # 不能蕴含,剪枝所有子集
|
656 |
+
set_comb_cur_del = get_all_span_comb(list_list_span_cur, target_span_count=-1)
|
657 |
+
set_comb_hash_cur = set([hash(' '.join(list(tuple_comb_))) for tuple_comb_ in set_comb_cur_del]) # TODO: 统一字符串化的方式
|
658 |
+
|
659 |
+
set_comb_hash |= set_comb_hash_cur
|
660 |
+
if flag_find == False:
|
661 |
+
print(f"CAN'T find nli!, nli_count: {nli_count}, skip_count: {skip_count}, len(set_comb_hash): {len(set_comb_hash)}", )
|
662 |
+
break
|
663 |
+
span_count_gold = span_count_max # gold的span数量
|
664 |
+
print('len(set_comb_del)', len(set_comb_hash))
|
665 |
+
print('nli_count', nli_count, 'skip_count', skip_count, 'span_count_gold', span_count_gold)
|
666 |
+
return list_list_span_gold
|
667 |
+
|
668 |
+
|
669 |
+
def compute_autoais_grained(data,
|
670 |
+
at_most_citations=3,method='ALCE',grain='default'):
|
671 |
+
|
672 |
+
"""
|
673 |
+
Compute AutoAIS score.
|
674 |
+
|
675 |
+
Args:
|
676 |
+
data: requires field `output` and `docs`
|
677 |
+
- docs should be a list of items with fields `title` and `text` (or `phrase` and `sent` for QA-extracted docs)
|
678 |
+
citation: check citations and use the corresponding references.
|
679 |
+
decontext: decontextualize the output
|
680 |
+
"""
|
681 |
+
global autoais_model, autoais_tokenizer
|
682 |
+
if autoais_model is None and False:
|
683 |
+
#logger.info("Loading AutoAIS model...")
|
684 |
+
# autoais_model = AutoModelForSeq2SeqLM.from_pretrained(AUTOAIS_MODEL, torch_dtype=torch.bfloat16, max_memory=get_max_memory(), device_map="auto")
|
685 |
+
autoais_model = AutoModelForSeq2SeqLM.from_pretrained(AUTOAIS_MODEL, torch_dtype=torch.bfloat16,
|
686 |
+
device_map="auto")
|
687 |
+
# autoais_model = AutoModelForSeq2SeqLM.from_pretrained(AUTOAIS_MODEL, torch_dtype=torch.bfloat16, max_memory=get_max_memory(), device_map="auto",offload_folder= "/data/hongbang/zsf/projects/ALCE/ALCE/model/t5_xxl_true_nli_mixture/offload1")
|
688 |
+
autoais_tokenizer = AutoTokenizer.from_pretrained(AUTOAIS_MODEL, use_fast=False)
|
689 |
+
def _format_document(doc):
|
690 |
+
|
691 |
+
"""Format document for AutoAIS."""
|
692 |
+
if isinstance(doc, dict):
|
693 |
+
if "sent" in doc:
|
694 |
+
# QA-extracted docs
|
695 |
+
return "Title: %s\n%s" % (doc['title'], doc['sent'])
|
696 |
+
else:
|
697 |
+
return "Title: %s\n%s" % (doc['title'], doc['text'])
|
698 |
+
elif isinstance(doc,str):
|
699 |
+
return doc
|
700 |
+
|
701 |
+
|
702 |
+
#logger.info(f"Running AutoAIS...")
|
703 |
+
|
704 |
+
ais_scores_need = [] # 是否需要引用
|
705 |
+
ais_scores = [] # quote_recall
|
706 |
+
ais_doc_scores=[]#doc_recall
|
707 |
+
|
708 |
+
sent_total = 0
|
709 |
+
|
710 |
+
autoais_log = []
|
711 |
+
granularity_list = []
|
712 |
+
skipped =0
|
713 |
+
for item in tqdm(data):
|
714 |
+
output = item['output']
|
715 |
+
|
716 |
+
if method=='baseline':
|
717 |
+
model_answer=item['output_parse']['answer']
|
718 |
+
answer = ''
|
719 |
+
reference = {}
|
720 |
+
span_contents = {}
|
721 |
+
if not model_answer["text"].endswith("."):
|
722 |
+
model_answer["text"] += "."
|
723 |
+
answer += " " + model_answer["text"]
|
724 |
+
spans = model_answer['reference']
|
725 |
+
for span in spans:
|
726 |
+
match = re.match(r'^(\d+)\.', span)
|
727 |
+
if match:
|
728 |
+
span_number = match.group(1)
|
729 |
+
span_content = span.split('. ', 1)[1].strip() # 获取1. 后面的内容
|
730 |
+
span_contents[span_number] = span_content
|
731 |
+
reference.update(span_contents)
|
732 |
+
|
733 |
+
item['output_answer'] = answer.strip()
|
734 |
+
item['output_ref_span'] = reference
|
735 |
+
output = item['output_answer']
|
736 |
+
|
737 |
+
elif method=='ALCE':
|
738 |
+
# 匹配 According to Document
|
739 |
+
pattern_doc = r"According to Document \[(\d+)\]"
|
740 |
+
# 匹配 (Title: Godfrey Chitalu)
|
741 |
+
pattern_title = r"\(Title: [^\)]+\)"
|
742 |
+
|
743 |
+
output = re.sub(pattern_doc, r"[\1]", output)
|
744 |
+
output = re.sub(pattern_title, "", output)
|
745 |
+
output=output.strip().split("\n")[0]
|
746 |
+
output=output.replace("<|im_end|>", "")
|
747 |
+
# Get sentences by using NLTK
|
748 |
+
sents = sent_tokenize(output)[:3]
|
749 |
+
if len(sents) == 0:
|
750 |
+
continue
|
751 |
+
|
752 |
+
target_sents = [remove_citations(sent).strip() for sent in sents]
|
753 |
+
output_ref_span = item.get('output_ref_span', {})
|
754 |
+
# sent_joint_passage = '\n'.join([_format_document(doc) for doc in item['docs']])
|
755 |
+
|
756 |
+
entail = 0
|
757 |
+
entail_doc=0
|
758 |
+
total_citations = 0
|
759 |
+
need_citations_sentences = 0 # 一个回答中需要引用的句子数量
|
760 |
+
correct_predictions = 0 # 新增:记录正确的预测是否需要引用的子句数量
|
761 |
+
|
762 |
+
for sent_id, sent in enumerate(sents):
|
763 |
+
target_sent = target_sents[sent_id] # Citation removed and (if opted for) decontextualized
|
764 |
+
joint_entail = -1 # Undecided
|
765 |
+
joint_doc_entail=-1
|
766 |
+
|
767 |
+
# 1. appropriatness
|
768 |
+
# 每句话是否需要引用
|
769 |
+
need_citations = check_if_citations_needed(item['docs'], target_sent,grain)
|
770 |
+
|
771 |
+
|
772 |
+
if method=='baseline':
|
773 |
+
# Find references number
|
774 |
+
ref_mark = [int(r[1:]) for r in re.findall(r"\{\d+", sent)]
|
775 |
+
# 引用的span(拼��)match document
|
776 |
+
ref, ref_span = match_document(ref_mark, output_ref_span)
|
777 |
+
#logger.info(f"For `{target_sent}`, find citations {ref}")
|
778 |
+
ref_id = [x -1 for x in ref]
|
779 |
+
processed_refs = set()
|
780 |
+
ref_passage = []
|
781 |
+
for psgs_id in ref_id:
|
782 |
+
if 0 <= psgs_id < len(item['docs']) and psgs_id not in processed_refs:
|
783 |
+
ref_passage.append(_format_document(item['docs'][psgs_id]))
|
784 |
+
processed_refs.add(psgs_id)
|
785 |
+
elif psgs_id in processed_refs:
|
786 |
+
print("Warning: psgs_id already processed:", psgs_id + 1)
|
787 |
+
else:
|
788 |
+
print("Error: psgs_id out of range:", psgs_id+1)
|
789 |
+
|
790 |
+
joint_span = '\n'.join(ref_span)
|
791 |
+
joint_passage = '\n'.join(ref_passage)
|
792 |
+
|
793 |
+
elif method=='ALCE':
|
794 |
+
ref = list(set([int(r[1:]) for r in re.findall(r"\[\d+", sent)]))
|
795 |
+
#logger.info(f"For `{target_sent}`, find citations {ref}")
|
796 |
+
ref_id=list(set([int(r[1:])-1 for r in re.findall(r"\[\d+", sent)]))
|
797 |
+
processed_refs = set()
|
798 |
+
ref_passage = []
|
799 |
+
for psgs_id in ref_id:
|
800 |
+
if 0 <= psgs_id < len(item['docs']) and psgs_id not in processed_refs:
|
801 |
+
ref_passage.append(_format_document(item['docs'][psgs_id]))
|
802 |
+
processed_refs.add(psgs_id)
|
803 |
+
elif psgs_id in processed_refs:
|
804 |
+
print("Warning: psgs_id already processed:", psgs_id+1)
|
805 |
+
else:
|
806 |
+
print("Error: psgs_id out of range:", psgs_id+1)
|
807 |
+
ref_span=ref_passage
|
808 |
+
joint_passage = '\n'.join(ref_passage)
|
809 |
+
joint_span=joint_passage
|
810 |
+
|
811 |
+
|
812 |
+
autoais_log.append({
|
813 |
+
"question": item['question'],
|
814 |
+
"output_answer": item['output'],
|
815 |
+
"docs": item['docs'],
|
816 |
+
"claim": {
|
817 |
+
"sentence": sent,
|
818 |
+
"if_citations_needed": need_citations,
|
819 |
+
"has_reference": ref,
|
820 |
+
"doc_recall": None,
|
821 |
+
"quote_recall": None,
|
822 |
+
"granularity_score":None,
|
823 |
+
"granularity_span":None
|
824 |
+
}
|
825 |
+
})
|
826 |
+
|
827 |
+
if len(ref) == 0:
|
828 |
+
# No citations
|
829 |
+
joint_entail = 0
|
830 |
+
joint_doc_entail=0
|
831 |
+
elif any([ref_id > len(item['docs']) for ref_id in ref]):
|
832 |
+
# Citations out of range
|
833 |
+
joint_entail = 0
|
834 |
+
joint_doc_entail=0
|
835 |
+
else:
|
836 |
+
if at_most_citations is not None:
|
837 |
+
ref = ref[:at_most_citations]
|
838 |
+
total_citations += len(ref)
|
839 |
+
|
840 |
+
# 更新正确预测是否需要引用的数量
|
841 |
+
if_citations_needed = autoais_log[-1]["claim"]["if_citations_needed"]
|
842 |
+
has_reference = autoais_log[-1]["claim"]["has_reference"]
|
843 |
+
if (if_citations_needed == 1 and has_reference) or (if_citations_needed == 0 and not has_reference):
|
844 |
+
correct_predictions += 1
|
845 |
+
#logger.info("citation appropriateness finished")
|
846 |
+
|
847 |
+
# 2. 在需要引用的情况下才计算citation correctness
|
848 |
+
if need_citations and has_reference:#需要引用且引用了才考虑后两个指标
|
849 |
+
start_time = time.time()
|
850 |
+
need_citations_sentences += 1
|
851 |
+
# 2.(1):quote_corr
|
852 |
+
# If not directly rejected by citation format error, calculate the recall score
|
853 |
+
if joint_entail == -1:
|
854 |
+
# φ(premise, hypothesis)判断所有引用span的拼接是否entail模型的回答output
|
855 |
+
joint_entail = _run_nli_autoais(joint_span, target_sent)
|
856 |
+
entail += joint_entail
|
857 |
+
autoais_log[-1]["claim"]["quote_recall"] = joint_entail
|
858 |
+
#logger.info(f"citation recall finished, recall is {joint_entail}")
|
859 |
+
|
860 |
+
#2.(2):doc_corr
|
861 |
+
if joint_doc_entail == -1:
|
862 |
+
if method=='ALCE':
|
863 |
+
joint_doc_entail=joint_entail
|
864 |
+
elif method=='baseline':
|
865 |
+
joint_doc_entail=_run_nli_autoais(joint_passage, target_sent)
|
866 |
+
entail_doc+=joint_doc_entail
|
867 |
+
autoais_log[-1]["claim"]["doc_recall"] = joint_doc_entail
|
868 |
+
#print(f"the total time for two recall is {time.time() - start_time}")
|
869 |
+
|
870 |
+
|
871 |
+
|
872 |
+
# 4. 只有quote_corr=1(当该条数据,所有引用的拼接可以entail模型output的时候,)才计算引用粒度granularity
|
873 |
+
start_time=time.time()
|
874 |
+
if joint_entail:
|
875 |
+
all_clauses = []
|
876 |
+
clauses_first_three = []
|
877 |
+
# 遍历每个不同的this_span
|
878 |
+
#logger.info("calculating granularity")
|
879 |
+
if len(ref_span)>5:
|
880 |
+
print("Too many quotations!")
|
881 |
+
autoais_log[-1]["claim"]["granularity_score"] = None
|
882 |
+
autoais_log[-1]["claim"]["granularity_span"] = 0
|
883 |
+
else:
|
884 |
+
for idx, this_span in enumerate(ref_span):
|
885 |
+
#logger.info(f"this span is {this_span}")
|
886 |
+
# 分割引用跨度为子句
|
887 |
+
clauses = re.split(r'([,.])', this_span)
|
888 |
+
clauses = [clause.strip() for clause in clauses if
|
889 |
+
clause.strip() and any(char.isalnum() for char in clause.strip())]
|
890 |
+
all_clauses.append(clauses)
|
891 |
+
if idx<3:
|
892 |
+
clauses_first_three.append(clauses)
|
893 |
+
|
894 |
+
max_span_count = len(sum(all_clauses, []))
|
895 |
+
if max_span_count==0:
|
896 |
+
continue
|
897 |
+
doc_count = len(all_clauses)
|
898 |
+
min_comb_length=float('inf')
|
899 |
+
|
900 |
+
if method=="ALCE" and grain=="default":
|
901 |
+
gold_span_res=run_converge_2(clauses_first_three,target_sent)
|
902 |
+
else:
|
903 |
+
gold_span_res = run_converge_2(all_clauses, target_sent)
|
904 |
+
# gold结果
|
905 |
+
merged_gold_span_res = []
|
906 |
+
|
907 |
+
# 遍历嵌套列表,并将其中的子列表合并到大列表中
|
908 |
+
for sublist in gold_span_res:
|
909 |
+
merged_gold_span_res.extend(sublist)
|
910 |
+
autoais_log[-1]["claim"]["granularity_span"] = merged_gold_span_res
|
911 |
+
min_comb_length=len(merged_gold_span_res)
|
912 |
+
if min_comb_length!=float('inf'):
|
913 |
+
granularity_score = min_comb_length / max_span_count
|
914 |
+
granularity_list.append(granularity_score)
|
915 |
+
autoais_log[-1]["claim"]["granularity_score"] = granularity_score
|
916 |
+
|
917 |
+
|
918 |
+
print(autoais_log[-1]["claim"]["granularity_span"])
|
919 |
+
print(autoais_log[-1]["claim"]["granularity_score"])
|
920 |
+
print(f"the total time for granularity is {time.time() - start_time}")
|
921 |
+
else:#不需要引用或没有引用
|
922 |
+
autoais_log[-1]['claim']['recall']=None
|
923 |
+
autoais_log[-1]["claim"]["granularity_score"]=None
|
924 |
+
autoais_log[-1]["claim"]["granularity_span"]=None
|
925 |
+
|
926 |
+
|
927 |
+
sent_total += len(sents)
|
928 |
+
ais_scores_need.append(correct_predictions / len(sents)) #是否正确判断需不需要引用:正确判断/总
|
929 |
+
if need_citations_sentences!=0: # recall:能entail的/需要引用的
|
930 |
+
ais_scores.append(entail / need_citations_sentences)
|
931 |
+
ais_doc_scores.append(entail_doc / need_citations_sentences)
|
932 |
+
|
933 |
+
#过滤None
|
934 |
+
granularity_list = [value for value in granularity_list if value is not None]
|
935 |
+
|
936 |
+
#logger.info(f"skipped {skipped}")
|
937 |
+
#autoais_log.append(f"skipped {skipped}")
|
938 |
+
##print(autoais_log)
|
939 |
+
# print(ais_scores_need,ais_doc_scores,ais_scores,granularity_list)
|
940 |
+
return {
|
941 |
+
"citation_correct_prediction": 100 * np.mean(ais_scores_need),
|
942 |
+
"citation_doc_rec":100 * np.mean(ais_doc_scores),
|
943 |
+
"citation_quote_rec": 100 * np.mean(ais_scores),
|
944 |
+
"citation_granularity": 100 * np.mean(granularity_list)
|
945 |
+
} #autoais_log
|
946 |
+
|
947 |
+
def compute_qampari_f1(data, cot=False):
|
948 |
+
prec = []
|
949 |
+
rec = []
|
950 |
+
rec_top5 = []
|
951 |
+
f1 = []
|
952 |
+
f1_top5 = []
|
953 |
+
|
954 |
+
num_preds = []
|
955 |
+
for item in data:
|
956 |
+
if cot:
|
957 |
+
if ":" in item['output']:
|
958 |
+
o = ':'.join(item['output'].split(":")[1:]) # try to separate the COT part and the answer list part.
|
959 |
+
else:
|
960 |
+
o = ""
|
961 |
+
else:
|
962 |
+
o = item['output']
|
963 |
+
preds = [normalize_answer(x.strip()) for x in remove_citations(o).rstrip().rstrip(".").rstrip(",").split(",")]
|
964 |
+
preds = [p for p in preds if len(p) > 0] # delete empty answers
|
965 |
+
#print(preds)
|
966 |
+
num_preds.append(len(preds))
|
967 |
+
answers = [[normalize_answer(x) for x in ans] for ans in item['answers']]
|
968 |
+
flat_answers = [item for sublist in answers for item in sublist]
|
969 |
+
#print(flat_answers)
|
970 |
+
prec.append(sum([p in flat_answers for p in preds]) / len(preds) if len(preds) > 0 else 0)
|
971 |
+
#print(prec)
|
972 |
+
rec.append(sum([any([x in preds for x in a]) for a in answers]) / len(answers))
|
973 |
+
rec_top5.append(min(5, sum([any([x in preds for x in a]) for a in answers])) / min(5, len(answers)))
|
974 |
+
if (prec[-1] + rec[-1]) == 0:
|
975 |
+
f1.append(0)
|
976 |
+
else:
|
977 |
+
f1.append(2 * prec[-1] * rec[-1] / (prec[-1] + rec[-1]))
|
978 |
+
if (prec[-1] + rec_top5[-1]) == 0:
|
979 |
+
f1_top5.append(0)
|
980 |
+
else:
|
981 |
+
f1_top5.append(2 * prec[-1] * rec_top5[-1] / (prec[-1] + rec_top5[-1]))
|
982 |
+
|
983 |
+
return {
|
984 |
+
"num_preds": np.mean(num_preds),
|
985 |
+
"qampari_prec": 100 * np.mean(prec),
|
986 |
+
"qampari_rec": 100 * np.mean(rec),
|
987 |
+
"qampari_rec_top5": 100 * np.mean(rec_top5),
|
988 |
+
"qampari_f1": 100 * np.mean(f1),
|
989 |
+
"qampari_f1_top5": 100 * np.mean(f1_top5),
|
990 |
+
}
|
991 |
+
|
992 |
+
def compute_length(data):
|
993 |
+
return sum(len(item['output'].split(' '))for item in data)/(len(data))
|
994 |
+
|
995 |
+
|
996 |
+
if __name__ =='__main__':
|
997 |
+
#question = "Why did New York City try to ban food donations to the poor?"
|
998 |
+
#output = "New York City, under Mayor Michael Bloomberg's administration, tried to ban food donations to the poor mainly due to concerns about the nutritional content of the donated food. The city argued that it couldn't inspect donated food for its salt, fat, and fiber content, thereby making it hard to control the nutritional quality of the food served to its homeless population [1][2][3]. Critics of this policy, however, have claimed such an approach demonstrated excessive control over people's eating habits and lacked common sense [2]. Despite the ban, many organizations like the New York City Rescue Mission continued to serve needy citizens through food donations [5]."
|
999 |
+
#compute_qa(question,output,['',''])
|
1000 |
+
pass
|
1001 |
+
|
1002 |
+
|
1003 |
+
|
1004 |
+
class Evaluator():
|
1005 |
+
autoais_model_load = False
|
1006 |
+
|
1007 |
+
eval_criteria = {'test_pr':test_compute_autoais,'cite_recall_precision':compute_autoais, 'pr':compute_autoais,'qa':compute_qa,'rouge': compute_rouge_l,'claims':compute_claims, 'qampari':compute_qampari_f1,'length':compute_length,'str_em':compute_str_em,'grained':compute_autoais_grained,'cite_recall_precision_llm':lambda data: compute_autoais(data=data,entail_function=_run_llm_autoais),'mauve':compute_mauve}
|
1008 |
+
def __init__(self,criteria= None, pipeline = None, ais_model = None) -> None:
|
1009 |
+
self.eval_criteria = Evaluator.eval_criteria
|
1010 |
+
self.pipeline = pipeline
|
1011 |
+
self.get_data = {}
|
1012 |
+
self.ais_model = ais_model
|
1013 |
+
global ais_LLM
|
1014 |
+
ais_LLM = ais_model
|
1015 |
+
|
1016 |
+
|
1017 |
+
|
1018 |
+
def set_eval(self, eval_c, **data_get_key):
|
1019 |
+
if eval_c in self.get_data.keys():
|
1020 |
+
print(f'Already set! {eval_c}')
|
1021 |
+
return
|
1022 |
+
if eval_c in self.eval_criteria.keys():
|
1023 |
+
self.get_data[eval_c] = data_get_key
|
1024 |
+
if eval_c == 'cite_recall_precision':
|
1025 |
+
global autoais_model, autoais_tokenizer
|
1026 |
+
if not Evaluator.autoais_model_load:
|
1027 |
+
print('Initializing eval model for citation precision and recall...')
|
1028 |
+
try:
|
1029 |
+
autoais_model = AutoModelForSeq2SeqLM.from_pretrained(AUTOAIS_MODEL, torch_dtype=torch.bfloat16, device_map="auto")
|
1030 |
+
autoais_tokenizer = AutoTokenizer.from_pretrained(AUTOAIS_MODEL, use_fast=False)
|
1031 |
+
|
1032 |
+
except:
|
1033 |
+
print('Unable to load model from hub, trying to load from local path...')
|
1034 |
+
autoais_model = AutoModelForSeq2SeqLM.from_pretrained(AUTOAIS_MODEL_ABSOLUTE, torch_dtype=torch.bfloat16, device_map="auto")
|
1035 |
+
autoais_tokenizer = AutoTokenizer.from_pretrained(AUTOAIS_MODEL_ABSOLUTE, use_fast=False)
|
1036 |
+
Evaluator.autoais_model_load = True
|
1037 |
+
if eval_c == 'qa':
|
1038 |
+
global qa_pipeline
|
1039 |
+
qa_pipeline = transformers.pipeline("question-answering", model=QA_MODEL)
|
1040 |
+
|
1041 |
+
else:
|
1042 |
+
raise KeyError('eval_criteria unavailable')
|
1043 |
+
|
1044 |
+
def new_eval(self, name, eval_func, **data_get_key):
|
1045 |
+
self.eval_criteria[name] = eval_func
|
1046 |
+
self.set_eval(name, **data_get_key)
|
1047 |
+
|
1048 |
+
def __call__(self,data_from_pipeline= None):
|
1049 |
+
result = {}
|
1050 |
+
|
1051 |
+
for criteria, get_data in self.get_data.items():
|
1052 |
+
if not data_from_pipeline:
|
1053 |
+
data_dict = {}
|
1054 |
+
for k, v in get_data.items():
|
1055 |
+
if isinstance(v,str):
|
1056 |
+
if v == 'output':
|
1057 |
+
data_dict[k] = ' '.join(self.pipeline.output)
|
1058 |
+
elif v == 'doc_cache':
|
1059 |
+
data_dict[k] = self.pipeline.doc_cache
|
1060 |
+
else:
|
1061 |
+
data_dict[k] = self.pipeline.dataset[self.pipeline.data_index][v]
|
1062 |
+
else:
|
1063 |
+
data_dict[k] = v
|
1064 |
+
else:
|
1065 |
+
data_dict = data_from_pipeline
|
1066 |
+
|
1067 |
+
eval_func = self.eval_criteria[criteria]
|
1068 |
+
data = [data_dict]
|
1069 |
+
result[criteria] = eval_func(data)
|
1070 |
+
return result
|
1071 |
+
|
1072 |
+
|
1073 |
+
|
1074 |
+
class DefaultEvaluator(Evaluator):
|
1075 |
+
def __init__(self, args = None, criteria= None, pipeline = None) -> None:
|
1076 |
+
super().__init__(criteria,pipeline)
|
1077 |
+
if args:
|
1078 |
+
if hasattr(args,'str_em') and args.str_em:
|
1079 |
+
self.set_eval('str_em',output = PIPELINE_OUTPUT, qa_pairs = 'qa_pairs')
|
1080 |
+
if hasattr(args,'pr') and args.pr:
|
1081 |
+
self.set_eval('cite_recall_precision', output = PIPELINE_OUTPUT, docs = PIPELINE_DOC_CACHE, question = 'question')
|
1082 |
+
if hasattr(args,'mauve') and args.mauve:
|
1083 |
+
self.set_eval('mauve', output = PIPELINE_OUTPUT, answer = 'answer' ,question = 'question')
|
1084 |
+
if hasattr(args,'rouge') and args.rouge:
|
1085 |
+
if (hasattr(args, 'dataset') and 'qampari' not in args.dataset.lower()) or not hasattr(args, 'dataset'):
|
1086 |
+
self.set_eval('rouge', output = PIPELINE_OUTPUT, answer = 'answer')
|
1087 |
+
if hasattr(args,'qa') and args.qa:
|
1088 |
+
if (hasattr(args, 'dataset') and 'asqa' in args.dataset.lower()) or not hasattr(args, 'dataset'):
|
1089 |
+
self.set_eval('qa',output = PIPELINE_OUTPUT, qa_pairs = 'qa_pairs')
|
1090 |
+
if hasattr(args,'claims') and args.claims:
|
1091 |
+
if (hasattr(args, 'dataset') and 'eli5' in args.dataset.lower()) or not hasattr(args, 'dataset'):
|
1092 |
+
self.set_eval('claims',output = PIPELINE_OUTPUT, claims = 'claims')
|
1093 |
+
if hasattr(args,'qampari') and args.qampari:
|
1094 |
+
if (hasattr(args, 'dataset') and 'qampari' in args.dataset.lower()) or not hasattr(args, 'dataset'):
|
1095 |
+
self.set_eval('qampari',output = PIPELINE_OUTPUT, answers = 'answers')
|
1096 |
+
if hasattr(args,'length') and args.length:
|
1097 |
+
self.new_eval('length',lambda data: len(data[0]['output'].split(' ')), output = PIPELINE_OUTPUT)
|
1098 |
+
|
1099 |
+
elif criteria:
|
1100 |
+
if 'cite_recall_precision' in criteria:
|
1101 |
+
self.set_eval('cite_recall_precision', output = PIPELINE_OUTPUT, docs = PIPELINE_DOC_CACHE, question = 'question')
|
1102 |
+
if hasattr(args,'mauve') and args.mauve:
|
1103 |
+
self.set_eval('mauve', output = PIPELINE_OUTPUT, answer = 'answer' ,question = 'question')
|
1104 |
+
if 'rouge' in criteria:
|
1105 |
+
self.set_eval('rouge', output = PIPELINE_OUTPUT, answer = 'answer')
|
1106 |
+
if 'qa' in criteria:
|
1107 |
+
self.set_eval('qa',output = PIPELINE_OUTPUT, qa_pairs = 'qa_pairs')
|
1108 |
+
if 'str_em' in criteria:
|
1109 |
+
self.set_eval('str_em',output = PIPELINE_OUTPUT, qa_pairs = 'qa_pairs')
|
1110 |
+
if 'claims' in criteria:
|
1111 |
+
self.set_eval('claims',output = PIPELINE_OUTPUT, claims = 'claims')
|
1112 |
+
if 'qampari' in criteria:
|
1113 |
+
self.set_eval('qampari',output = PIPELINE_OUTPUT, answers = 'answers')
|
1114 |
+
if 'length' in criteria:
|
1115 |
+
self.new_eval('length',lambda data: len(data[0]['output'].split(' ')), output = PIPELINE_OUTPUT)
|
1116 |
+
|
1117 |
+
else:
|
1118 |
+
self.new_eval('length',lambda data: len(data[0]['output'].split(' ')), output = PIPELINE_OUTPUT)
|
citekit/pipeline/__pycache__/pipeline.cpython-310.pyc
ADDED
Binary file (9.46 kB). View file
|
|
citekit/pipeline/__pycache__/pipeline.cpython-312.pyc
ADDED
Binary file (21.4 kB). View file
|
|
citekit/pipeline/__pycache__/pipeline_inter.cpython-310.pyc
ADDED
Binary file (6.64 kB). View file
|
|
citekit/pipeline/pipeline.py
ADDED
@@ -0,0 +1,423 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from citekit.cite_modules.LLM import LLM,Module
|
2 |
+
from citekit.cite_modules.augment_model import AugmentCluster, AttributingModule, MODEL_TYPE_MAPPING
|
3 |
+
from citekit.prompt.prompt import ALCEVanillaPrompt, DocPrompt
|
4 |
+
import logging
|
5 |
+
import json
|
6 |
+
from tqdm import tqdm
|
7 |
+
import traceback
|
8 |
+
import copy
|
9 |
+
from citekit.utils.utils import flatten_dict
|
10 |
+
import csv
|
11 |
+
|
12 |
+
|
13 |
+
|
14 |
+
def merge_str_dicts(dicts):
|
15 |
+
result = {}
|
16 |
+
for dictionary in dicts:
|
17 |
+
for key, value in dictionary.items():
|
18 |
+
if key in result:
|
19 |
+
result[key] += ' ' + value
|
20 |
+
else:
|
21 |
+
result[key] = value
|
22 |
+
return result
|
23 |
+
|
24 |
+
PIPELINE_OUTPUT = 'output'
|
25 |
+
PIPELINE_DOC_CACHE = 'doc_cache'
|
26 |
+
|
27 |
+
class DocCache():
|
28 |
+
def __init__(self) -> None:
|
29 |
+
self.__docs = list()
|
30 |
+
|
31 |
+
def __len__(self):
|
32 |
+
return len(self.__docs)
|
33 |
+
|
34 |
+
def __getitem__(self,index):
|
35 |
+
if index>=0 and index <len(self):
|
36 |
+
return self.__docs[index]
|
37 |
+
else:
|
38 |
+
return None
|
39 |
+
|
40 |
+
def get_last(self):
|
41 |
+
if self.__docs:
|
42 |
+
return self.__docs[-1]
|
43 |
+
|
44 |
+
def add_doc(self, doc, add_id = True) -> int:
|
45 |
+
if not isinstance(doc, str):
|
46 |
+
assert isinstance(doc, dict) and 'text' in doc and 'title' in doc
|
47 |
+
doc = f'(Title: {doc["title"]}){doc["text"]}'
|
48 |
+
if add_id:
|
49 |
+
doc_head = f'Document [{len(self)+1}]'
|
50 |
+
else:
|
51 |
+
doc_head = ''
|
52 |
+
self.__docs.append(doc_head + doc)
|
53 |
+
return len(self)
|
54 |
+
|
55 |
+
def load_docs(self, docs, add_id = False):
|
56 |
+
for doc in docs:
|
57 |
+
self.add_doc(doc, add_id)
|
58 |
+
return len(self)
|
59 |
+
|
60 |
+
def clear(self):
|
61 |
+
self.__docs = list()
|
62 |
+
|
63 |
+
def show_docs(self):
|
64 |
+
return self.__docs
|
65 |
+
|
66 |
+
|
67 |
+
class Pipeline():
|
68 |
+
def __init__(self,save_path = None, sequence = None, head_prompt_maker = None, llm = None, module= None, retriever = None, evaluator = None, dataset = None, rich_eval = False, train_data = False, attributer = None) -> None:
|
69 |
+
self.save_path = save_path
|
70 |
+
self.train_data = train_data
|
71 |
+
self.head_prompt_maker = head_prompt_maker
|
72 |
+
self.table_head = True
|
73 |
+
self.attributer = attributer
|
74 |
+
self.llm = llm
|
75 |
+
self.initial_docs = None
|
76 |
+
self.data_keys = None
|
77 |
+
self.stored_clusters = []
|
78 |
+
self.module = []
|
79 |
+
if llm:
|
80 |
+
llm.connect_to(self)
|
81 |
+
if not isinstance(module,list) and module is not None:
|
82 |
+
if module:
|
83 |
+
module.connect_to(self)
|
84 |
+
else:
|
85 |
+
if isinstance(module, list):
|
86 |
+
for i in module:
|
87 |
+
if isinstance(i, AugmentCluster) or isinstance(i, Module):
|
88 |
+
i.connect_to(self)
|
89 |
+
self.dataset = dataset
|
90 |
+
|
91 |
+
self.data_index = 0
|
92 |
+
self.retriever = retriever
|
93 |
+
if retriever:
|
94 |
+
retriever.pipeline = self
|
95 |
+
|
96 |
+
self.eval = evaluator
|
97 |
+
if evaluator:
|
98 |
+
evaluator.pipeline = self
|
99 |
+
self.output = []
|
100 |
+
self.log = []
|
101 |
+
self.doc_cache = DocCache()
|
102 |
+
self.head = {}
|
103 |
+
self.result = {}
|
104 |
+
self.rich_eval = rich_eval
|
105 |
+
self.initial_module = None
|
106 |
+
|
107 |
+
def load_data(self, dataset):
|
108 |
+
self.data = dataset
|
109 |
+
|
110 |
+
def set_initial_module(self, module):
|
111 |
+
self.initial_module = module
|
112 |
+
def get_initial_module(self):
|
113 |
+
return self.initial_module
|
114 |
+
|
115 |
+
def set_data_keys(self, keys):
|
116 |
+
self.data_keys = keys
|
117 |
+
def get_data_keys(self):
|
118 |
+
return self.data_keys
|
119 |
+
|
120 |
+
|
121 |
+
def update(self, update_object, config, update_info):
|
122 |
+
print(f'Updating {update_object} with {config} and {update_info}')
|
123 |
+
module = self.get_module_by_name(update_object)
|
124 |
+
if config in ['prompt', 'header']:
|
125 |
+
module.update(config, update_info)
|
126 |
+
elif config in ['destination']:
|
127 |
+
module.update(config, [self.get_module_by_name(update_info[0]), update_info[1]])
|
128 |
+
elif config in ['delete_destination']:
|
129 |
+
module.update(config, self.get_module_by_name(update_info))
|
130 |
+
elif config in ['new_model']:
|
131 |
+
model_type, model, key = update_info
|
132 |
+
print('Creating new model:', model_type, model)
|
133 |
+
new_model_class = MODEL_TYPE_MAPPING[model_type]
|
134 |
+
print('New model class:', new_model_class)
|
135 |
+
new_model = new_model_class(model)
|
136 |
+
new_model.connect_to(self)
|
137 |
+
print('Created new model:', new_model)
|
138 |
+
module.update('destination', [new_model, key])
|
139 |
+
|
140 |
+
else:
|
141 |
+
raise NotImplementedError
|
142 |
+
|
143 |
+
|
144 |
+
|
145 |
+
def set_initial_docs(self, d):
|
146 |
+
self.initial_docs = d
|
147 |
+
def get_initial_docs(self):
|
148 |
+
return self.initial_docs
|
149 |
+
|
150 |
+
def run_on_dataset(self,datakeys,init_docs=None,initial_module= None,start=0):
|
151 |
+
if self.initial_module and not initial_module:
|
152 |
+
initial_module = self.initial_module
|
153 |
+
if self.save_path:
|
154 |
+
for i in range(start,len((self.dataset))):
|
155 |
+
self.data_index = i
|
156 |
+
try:
|
157 |
+
self.run(datakeys,init_docs,initial_module,train=self.train_data)
|
158 |
+
except Exception as e:
|
159 |
+
print(f'Error: {e}, skipping data {i}')
|
160 |
+
traceback.print_exc()
|
161 |
+
else:
|
162 |
+
for i in range(start,len((self.dataset))):
|
163 |
+
self.data_index = i
|
164 |
+
try:
|
165 |
+
self.run(datakeys,init_docs,initial_module,write=False,train=self.train_data)
|
166 |
+
except Exception as e:
|
167 |
+
print(f'Error: {e}, skipping data {i}')
|
168 |
+
traceback.print_exc()
|
169 |
+
|
170 |
+
|
171 |
+
|
172 |
+
def form_eval_data(self) -> dict:
|
173 |
+
"""To write rich eval, you can use data from:
|
174 |
+
pipeline.dataset, doc_cache and output
|
175 |
+
to post_process data as a argument dict for evaluation
|
176 |
+
"""
|
177 |
+
raise NotImplementedError('You have to write <form_eval_data function> to apply rich eval with designed arguments.')
|
178 |
+
|
179 |
+
def direct_run(self, dynamic_prompt= {}, module = None):
|
180 |
+
if not module:
|
181 |
+
module = self.llm
|
182 |
+
if isinstance(module, AugmentCluster):
|
183 |
+
module = module.get_first_module()
|
184 |
+
while isinstance(module, Module):
|
185 |
+
if isinstance(dynamic_prompt,dict):
|
186 |
+
module.change_to_multi_process(False)
|
187 |
+
dynamic_prompt = module.generate(self.head,dynamic_prompt=dynamic_prompt)
|
188 |
+
elif isinstance(dynamic_prompt,list) and all([isinstance(d,dict) for d in dynamic_prompt]):
|
189 |
+
module.change_to_multi_process(True)
|
190 |
+
if module.parallel:
|
191 |
+
dynamic_prompt = [module.generate(self.head,d) for d in dynamic_prompt]
|
192 |
+
if module.merge:
|
193 |
+
dynamic_prompt = merge_str_dicts(dynamic_prompt)
|
194 |
+
module.add_output_to_head(module.last_message)
|
195 |
+
elif not module.iterative and not module.merge:
|
196 |
+
for d in dynamic_prompt:
|
197 |
+
self.direct_run(dynamic_prompt = d, module = copy.copy(module))
|
198 |
+
#dynamic_prompt = [module.generate(self.head,d) for d in dynamic_prompt]
|
199 |
+
break
|
200 |
+
elif module.iterative:
|
201 |
+
iter_dynamic = {}
|
202 |
+
for d in dynamic_prompt:
|
203 |
+
iter_dynamic = module.generate(self.head,{**d,**iter_dynamic})
|
204 |
+
dynamic_prompt = iter_dynamic
|
205 |
+
module.end_multi()
|
206 |
+
else:
|
207 |
+
print(type(dynamic_prompt))
|
208 |
+
raise TypeError(str(dynamic_prompt))
|
209 |
+
self.log.append(f'{module} -> {module.send()}\n: {module.last_message}')
|
210 |
+
if isinstance(module, Module):
|
211 |
+
module.output()
|
212 |
+
print('DEBUG:', str(module), module.end, module.turns, module.max_turn)
|
213 |
+
if module.end or module.turns > module.max_turn:
|
214 |
+
break
|
215 |
+
module = module.send()
|
216 |
+
if isinstance(module, AugmentCluster):
|
217 |
+
module = module.get_first_module()
|
218 |
+
|
219 |
+
|
220 |
+
def __call__(self, data):
|
221 |
+
# run only one data
|
222 |
+
# backup
|
223 |
+
dataset_backup = self.dataset
|
224 |
+
current_data_index_backup = self.data_index
|
225 |
+
if hasattr(self,'current_data'):
|
226 |
+
current_data_backup = self.current_data
|
227 |
+
else:
|
228 |
+
current_data_backup = None
|
229 |
+
|
230 |
+
# set data and run
|
231 |
+
dataset = [data]
|
232 |
+
self.dataset = dataset
|
233 |
+
self.data_index = 0
|
234 |
+
result = self.run(datakeys = self.data_keys, init_docs = self.initial_docs, initial_module = self.initial_module, write = False, train = False)
|
235 |
+
|
236 |
+
# restore
|
237 |
+
self.data_index = current_data_index_backup
|
238 |
+
self.current_data = current_data_backup
|
239 |
+
self.dataset = dataset_backup
|
240 |
+
|
241 |
+
|
242 |
+
return result
|
243 |
+
|
244 |
+
def run(self, datakeys, init_docs = None, initial_module = None, write = True, train = False):
|
245 |
+
|
246 |
+
# get data
|
247 |
+
self.current_data = self.dataset[self.data_index]
|
248 |
+
data = self.current_data
|
249 |
+
|
250 |
+
# from head prompt from specific data
|
251 |
+
head = dict()
|
252 |
+
for key in datakeys:
|
253 |
+
if isinstance(data[key],str):
|
254 |
+
head[key] = data[key]
|
255 |
+
else:
|
256 |
+
assert isinstance(data[key],list)
|
257 |
+
assert all([isinstance(item, str) for item in data[key]])
|
258 |
+
head[key] = ''.join(data[key])
|
259 |
+
|
260 |
+
#init
|
261 |
+
self.head = head
|
262 |
+
self.output = []
|
263 |
+
self.doc_cache.clear()
|
264 |
+
if init_docs:
|
265 |
+
self.doc_cache.load_docs(data[init_docs])
|
266 |
+
self.llm.reset()
|
267 |
+
if self.module:
|
268 |
+
for i in self.module:
|
269 |
+
i.reset()
|
270 |
+
self.log = []
|
271 |
+
# run only one data, and add data_index by 1
|
272 |
+
dynamic_prompt = {}
|
273 |
+
if not initial_module:
|
274 |
+
module = self.llm
|
275 |
+
else:
|
276 |
+
module = initial_module
|
277 |
+
if isinstance(module, AugmentCluster):
|
278 |
+
module = module.get_first_module()
|
279 |
+
while isinstance(module, Module):
|
280 |
+
if isinstance(dynamic_prompt,dict):
|
281 |
+
module.change_to_multi_process(False)
|
282 |
+
dynamic_prompt = module.generate(self.head,dynamic_prompt=dynamic_prompt)
|
283 |
+
elif isinstance(dynamic_prompt,list) and all([isinstance(d,dict) for d in dynamic_prompt]):
|
284 |
+
module.change_to_multi_process(True)
|
285 |
+
if module.parallel:
|
286 |
+
dynamic_prompt = [module.generate(self.head,d) for d in dynamic_prompt]
|
287 |
+
if module.merge:
|
288 |
+
dynamic_prompt = merge_str_dicts(dynamic_prompt)
|
289 |
+
module.add_output_to_head(module.last_message)
|
290 |
+
elif not module.iterative and not module.merge:
|
291 |
+
for d in dynamic_prompt:
|
292 |
+
self.direct_run(dynamic_prompt = d, module = copy.copy(module))
|
293 |
+
#dynamic_prompt = [module.generate(self.head,d) for d in dynamic_prompt]
|
294 |
+
break
|
295 |
+
|
296 |
+
elif module.iterative:
|
297 |
+
iter_dynamic = {}
|
298 |
+
for d in dynamic_prompt:
|
299 |
+
iter_dynamic = module.generate(self.head,{**d,**iter_dynamic})
|
300 |
+
dynamic_prompt = iter_dynamic
|
301 |
+
module.end_multi()
|
302 |
+
else:
|
303 |
+
print(type(dynamic_prompt))
|
304 |
+
raise TypeError(str(dynamic_prompt))
|
305 |
+
self.log.append(f'{module} -> {module.send()}\n: {module.last_message}')
|
306 |
+
if isinstance(module, Module):
|
307 |
+
module.output()
|
308 |
+
if module.end or module.turns > module.max_turn:
|
309 |
+
break
|
310 |
+
module = module.send()
|
311 |
+
if isinstance(module, AugmentCluster):
|
312 |
+
module = module.get_first_module()
|
313 |
+
|
314 |
+
# if eval, send to evaluation
|
315 |
+
if self.eval:
|
316 |
+
if not self.rich_eval:
|
317 |
+
self.result = self.eval()
|
318 |
+
else:
|
319 |
+
self.result = self.eval(self.form_eval_data())
|
320 |
+
else:
|
321 |
+
self.result = {}
|
322 |
+
if write:
|
323 |
+
self.write()
|
324 |
+
if train:
|
325 |
+
self.export_training_data()
|
326 |
+
|
327 |
+
#self.logs = self.delete_inner_cluster_logs(self.log)
|
328 |
+
res = {'data':self.get_data(), 'doc_cache':self.doc_cache.show_docs(), 'log': self.log.copy(),'output':self.output,'result': self.result}
|
329 |
+
if self.attributer:
|
330 |
+
self.attributer.attribute_for_result(res)
|
331 |
+
self.data_index += 1
|
332 |
+
return res
|
333 |
+
|
334 |
+
def delete_inner_cluster_logs(self, logs):
|
335 |
+
print(logs)
|
336 |
+
for cluster in self.stored_clusters:
|
337 |
+
cluster_name = str(cluster)
|
338 |
+
print('Combining logs for cluster:', cluster_name)
|
339 |
+
in_cluster = False
|
340 |
+
for i, log in enumerate(logs):
|
341 |
+
in_out_names = log.split('\n')[0]
|
342 |
+
if in_out_names in cluster_name:
|
343 |
+
# This is the inner log
|
344 |
+
if not in_cluster:
|
345 |
+
in_cluster = True
|
346 |
+
log_start = i
|
347 |
+
else:
|
348 |
+
continue
|
349 |
+
elif in_cluster:
|
350 |
+
# This is the outer log
|
351 |
+
in_cluster = False
|
352 |
+
log_end = i
|
353 |
+
cluster_output = logs[log_end]
|
354 |
+
next_module = in_out_names.split('->')[1].strip()
|
355 |
+
cluster_log = f"{cluster_name} -> {next_module}\n: {cluster_output}"
|
356 |
+
logs = logs[:log_start] + [cluster_log] + logs[log_end+1:]
|
357 |
+
print('Final logs:', logs)
|
358 |
+
return logs
|
359 |
+
|
360 |
+
|
361 |
+
|
362 |
+
|
363 |
+
def get_data(self):
|
364 |
+
return self.dataset[self.data_index]
|
365 |
+
|
366 |
+
def write(self):
|
367 |
+
'''Default writing'''
|
368 |
+
llm_token_used = self.llm.token_used
|
369 |
+
write_down = {'data':self.get_data(), 'doc_cache':self.doc_cache.show_docs(), 'log': self.log.copy(),'output':self.output,'result': self.result,'token_used':llm_token_used}
|
370 |
+
|
371 |
+
if self.attributer:
|
372 |
+
self.attributer.attribute_for_result(write_down)
|
373 |
+
|
374 |
+
with open(self.save_path, 'a', encoding='utf-8') as file:
|
375 |
+
json_line = json.dumps(write_down, indent=4)
|
376 |
+
file.write(json_line + '\n')
|
377 |
+
|
378 |
+
def get_module_by_name(self, name):
|
379 |
+
print('Getting module by name:', name)
|
380 |
+
for module in self.module:
|
381 |
+
if str(module) == name:
|
382 |
+
return module
|
383 |
+
if str(self.llm) == name:
|
384 |
+
return self.llm
|
385 |
+
|
386 |
+
for cluster in self.stored_clusters:
|
387 |
+
print('trying cluster:', cluster)
|
388 |
+
if str(cluster) == name:
|
389 |
+
print('found cluster:', cluster)
|
390 |
+
return cluster
|
391 |
+
|
392 |
+
return None
|
393 |
+
|
394 |
+
def export_training_data(self):
|
395 |
+
flattened_data = [flatten_dict(self.result)]
|
396 |
+
header = set()
|
397 |
+
for item in flattened_data:
|
398 |
+
header.update(item.keys())
|
399 |
+
header = sorted(header)
|
400 |
+
with open('output.csv', mode='a', newline='') as file:
|
401 |
+
writer = csv.DictWriter(file, fieldnames = header)
|
402 |
+
if self.table_head:
|
403 |
+
writer.writeheader()
|
404 |
+
self.table_head = False
|
405 |
+
|
406 |
+
for row in flattened_data:
|
407 |
+
writer.writerow(row)
|
408 |
+
|
409 |
+
|
410 |
+
def __str__(self) -> str:
|
411 |
+
return 'pipeline output'
|
412 |
+
|
413 |
+
class Sequence(Pipeline):
|
414 |
+
def __init__(self, save_path=None, sequence=None, head_prompt_maker=None, retriever=None, evaluator=None, dataset=None, rich_eval=False) -> None:
|
415 |
+
first_module = sequence[0]
|
416 |
+
other = sequence[1:]
|
417 |
+
super().__init__(save_path, sequence, head_prompt_maker, first_module, other, retriever, evaluator, dataset, rich_eval)
|
418 |
+
for i in range(len(sequence)-1):
|
419 |
+
module = sequence[i]
|
420 |
+
assert isinstance(module, Module) or isinstance(module,AugmentCluster)
|
421 |
+
module.set_target(sequence[i+1],post_processing=lambda x: {module.output_as: x})
|
422 |
+
sequence[-1].set_output()
|
423 |
+
|
citekit/prompt/__pycache__/prompt.cpython-310.pyc
ADDED
Binary file (10.5 kB). View file
|
|
citekit/prompt/__pycache__/prompt.cpython-312.pyc
ADDED
Binary file (15.2 kB). View file
|
|
citekit/prompt/prompt.py
ADDED
@@ -0,0 +1,294 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
|
3 |
+
|
4 |
+
truncate = lambda x, l: x[:l]
|
5 |
+
token_len = len
|
6 |
+
|
7 |
+
def combine(*args):
|
8 |
+
if all([isinstance(arg,dict) for arg in args]):
|
9 |
+
if len(args) == 1:
|
10 |
+
return args[0]
|
11 |
+
else:
|
12 |
+
combined = args[0].copy()
|
13 |
+
combined.update(combine(*args[1:]))
|
14 |
+
return combined
|
15 |
+
|
16 |
+
default_get = lambda key : lambda data: data[key]
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
class Prompt:
|
21 |
+
components = {}
|
22 |
+
template = ""
|
23 |
+
truncate = lambda x, l: x[:l]
|
24 |
+
UNABLE = 'prompt_unable'
|
25 |
+
|
26 |
+
|
27 |
+
def update(self, **kargs):
|
28 |
+
try:
|
29 |
+
for key in kargs.keys():
|
30 |
+
if key == 'template':
|
31 |
+
arg_template = kargs[key]
|
32 |
+
|
33 |
+
if key == 'components':
|
34 |
+
arg_components = kargs[key]
|
35 |
+
test_prompt = Prompt(arg_template, arg_components)
|
36 |
+
except Exception as e:
|
37 |
+
print(e)
|
38 |
+
print('Update Rejected due to invalid template or components')
|
39 |
+
return
|
40 |
+
|
41 |
+
self.template = arg_template
|
42 |
+
self.components = arg_components
|
43 |
+
|
44 |
+
def __init__(self,template='', components={}, max_token=8000) -> None:
|
45 |
+
|
46 |
+
'''
|
47 |
+
Args:
|
48 |
+
template: The way to order and organize each components, use <NAME> to represent a component, <C1><C2>...<Cn>.
|
49 |
+
components: The content of a component, use {NAME} to represent the placeholder of corresponding data
|
50 |
+
max_token: a list as long as components, representing the max number of tokens for each component, or a int representing the same max_token for all components
|
51 |
+
'''
|
52 |
+
|
53 |
+
# template
|
54 |
+
self.template = template
|
55 |
+
|
56 |
+
# components
|
57 |
+
if isinstance(components,dict):
|
58 |
+
for key in components.keys():
|
59 |
+
if f'<{str(key)}>' not in self.template:
|
60 |
+
raise Exception('component name not in template!')
|
61 |
+
self.components = components
|
62 |
+
|
63 |
+
# max_token
|
64 |
+
self.max_token = {}
|
65 |
+
if isinstance(max_token,list):
|
66 |
+
if len(components)==len(max_token):
|
67 |
+
self.max_token = {att:val for (att,val) in zip(components.keys(),max_token)}
|
68 |
+
else:
|
69 |
+
raise Exception('max_token is not corresponding to components')
|
70 |
+
elif isinstance(max_token,int):
|
71 |
+
self.max_token_init = max_token
|
72 |
+
self.max_token = {att:max_token for att in components}
|
73 |
+
else:
|
74 |
+
raise TypeError('max_token should be int or list')
|
75 |
+
|
76 |
+
def __repr__(self) -> str:
|
77 |
+
prompt = self.template
|
78 |
+
for key in self.components.keys():
|
79 |
+
prompt = prompt.replace(f'<{str(key)}>',self.components[key])
|
80 |
+
return prompt
|
81 |
+
|
82 |
+
def __str__(self) -> str:
|
83 |
+
return repr(self)
|
84 |
+
|
85 |
+
def part_template(self,**kargs):
|
86 |
+
'''
|
87 |
+
Add components in to the prompt.
|
88 |
+
'''
|
89 |
+
for part in kargs.keys():
|
90 |
+
if f'<{str(part)}>' in self.template:
|
91 |
+
self.components[part] = kargs[part]
|
92 |
+
else:
|
93 |
+
raise Exception('component name not in template!')
|
94 |
+
|
95 |
+
def __call__(self, *args,**kargs) -> str:
|
96 |
+
return self.make_prompt(*args, **kargs)
|
97 |
+
|
98 |
+
|
99 |
+
def __str__(self):
|
100 |
+
|
101 |
+
input = {}
|
102 |
+
for key in self.components.keys():
|
103 |
+
input[key] = self.components[key]
|
104 |
+
|
105 |
+
|
106 |
+
return self.make_prompt(input)
|
107 |
+
|
108 |
+
|
109 |
+
def make_prompt(self,*args,**kargs) -> str:
|
110 |
+
'''
|
111 |
+
arg: a dictionary containing all contents to the placeholder of the prompt
|
112 |
+
kargs: use NAME=value to pass arguments
|
113 |
+
'''
|
114 |
+
|
115 |
+
if args:
|
116 |
+
args = combine(*args)
|
117 |
+
args = args.copy()
|
118 |
+
args.update(kargs)
|
119 |
+
else:
|
120 |
+
args = kargs
|
121 |
+
prompt = self.template
|
122 |
+
|
123 |
+
for key in self.components.keys():
|
124 |
+
if key not in args or args[key] == Prompt.UNABLE:
|
125 |
+
prompt = prompt.replace(f'<{str(key)}>',"")
|
126 |
+
else:
|
127 |
+
prompt = prompt.replace(f'<{str(key)}>', self.components[key])
|
128 |
+
|
129 |
+
prompt_args = {}
|
130 |
+
for key in args.keys():
|
131 |
+
if key in self.components.keys():
|
132 |
+
if self.max_token.get(key):
|
133 |
+
max_token = self.max_token.get(key)
|
134 |
+
else:
|
135 |
+
max_token = min(4096,self.max_token_init)
|
136 |
+
if token_len(args[key])> max_token:
|
137 |
+
args[key] = self.truncate(args[key],max_token)
|
138 |
+
|
139 |
+
|
140 |
+
return prompt.format(**args)
|
141 |
+
|
142 |
+
def set_max_token(self,**kargs) -> None:
|
143 |
+
for key in kargs.keys():
|
144 |
+
if key in self.components.keys():
|
145 |
+
self.max_token[key] = kargs.get(key)
|
146 |
+
else:
|
147 |
+
raise KeyError(f'{key} not in Template!')
|
148 |
+
|
149 |
+
def load_data(self,data_loader,*keys,**projections):
|
150 |
+
'''
|
151 |
+
load data to make prompts from a data loader
|
152 |
+
projections: the function to get the information from a data.
|
153 |
+
'''
|
154 |
+
|
155 |
+
prompts = []
|
156 |
+
for data in data_loader:
|
157 |
+
l_contents = {key:default_get(key)(data) for key in keys}
|
158 |
+
d_contents = {projection:projections[projection](data) for projection in projections.keys()}
|
159 |
+
prompts.append(self.make_prompt({**l_contents, **d_contents}))
|
160 |
+
|
161 |
+
return prompts
|
162 |
+
|
163 |
+
|
164 |
+
|
165 |
+
|
166 |
+
|
167 |
+
class DocPrompt(Prompt):
|
168 |
+
'''
|
169 |
+
Containing Doc ID, Title and Passage in order:
|
170 |
+
|
171 |
+
Document:[{ID}]
|
172 |
+
(Title:{Title})
|
173 |
+
{Passage}
|
174 |
+
'''
|
175 |
+
def __init__(self, template='<ID><Title><Passage>', components={'ID':'Document[{ID}]: ','Title':'(Title:{Title})','Passage':'{Passage}\n'}, max_token=4096) -> None:
|
176 |
+
super().__init__(template, components, max_token)
|
177 |
+
|
178 |
+
|
179 |
+
class ALCEDocPrompt(Prompt):
|
180 |
+
'''
|
181 |
+
Containing Doc ID, Title and Passage in order:
|
182 |
+
|
183 |
+
Document:[{ID}]
|
184 |
+
(Title:{Title})
|
185 |
+
{Passage}
|
186 |
+
'''
|
187 |
+
def __init__(self, template='<ID><title><text>', components={'ID':'Document [{ID}]','title':'(Title:{title}): ','text':'{text}\n'}, max_token=4096) -> None:
|
188 |
+
super().__init__(template, components, max_token)
|
189 |
+
|
190 |
+
def default_load_data(self,data_loader, text = 'text', from_idx = 0):
|
191 |
+
return super().load_data(list(enumerate(data_loader)),text = lambda data: data[1][text],ID = lambda data: str(data[0]+1 + from_idx),title = lambda data: data[1]['title'])
|
192 |
+
|
193 |
+
def default_load_data_wo_ID(self,data_loader):
|
194 |
+
return super().load_data(list(enumerate(data_loader)),text = lambda data: data[1]['text'],title = lambda data: data[1]['title'])
|
195 |
+
def default_load_data_wo_title(self,data_loader):
|
196 |
+
return super().load_data(list(enumerate(data_loader)),text = lambda data: data[1]['text'],ID = lambda data: str(data[0]+1))
|
197 |
+
def default_load_data_extraction(self,data_loader):
|
198 |
+
return super().load_data(list(enumerate(data_loader)),text = lambda data: data[1]['extraction'],ID = lambda data: str(data[0]+1),title = lambda data: data[1]['title'])
|
199 |
+
def default_load_data_summary(self,data_loader):
|
200 |
+
return super().load_data(list(enumerate(data_loader)),text = lambda data: data[1]['summary'],ID = lambda data: str(data[0]+1),title = lambda data: data[1]['title'])
|
201 |
+
|
202 |
+
class ALCEVanillaPrompt(Prompt):
|
203 |
+
'''
|
204 |
+
Containing INST(Instruction), Question, Doc and Answer in order:
|
205 |
+
|
206 |
+
{INST}
|
207 |
+
|
208 |
+
Question:{Question}
|
209 |
+
|
210 |
+
{Doc}
|
211 |
+
Answer:{Answer}
|
212 |
+
'''
|
213 |
+
def __init__(self,
|
214 |
+
template="<INST><Question><Doc><Answer>\n",
|
215 |
+
components={'INST':'{INST}\n\n', 'Question':'Question:{Question}\n\n','Doc':'{Doc}\n','Answer':'Answer:{Answer}'},
|
216 |
+
max_token=4096) -> None:
|
217 |
+
super().__init__(template, components, max_token)
|
218 |
+
|
219 |
+
class NewALCEVanillaPrompt(Prompt):
|
220 |
+
'''
|
221 |
+
Containing INST(Instruction), Question, Doc and Answer in order:
|
222 |
+
|
223 |
+
{INST}
|
224 |
+
|
225 |
+
Question:{Question}
|
226 |
+
|
227 |
+
{Doc}
|
228 |
+
Answer:{Answer}
|
229 |
+
'''
|
230 |
+
def __init__(self,
|
231 |
+
template="<INST><question><docs><answer>\n",
|
232 |
+
components={'INST':'{INST}\n\n', 'question':'Question:{question}\n\n','docs':'{docs}\n','answer':'Answer:{answer}'},
|
233 |
+
max_token=4096) -> None:
|
234 |
+
super().__init__(template, components, max_token)
|
235 |
+
|
236 |
+
|
237 |
+
|
238 |
+
class AGEEPrompt(Prompt):
|
239 |
+
'''
|
240 |
+
Containing INST(Instruction), Question and Doc in order:
|
241 |
+
|
242 |
+
{INST}
|
243 |
+
|
244 |
+
Question:{Question}
|
245 |
+
|
246 |
+
Search Results:{Doc}
|
247 |
+
'''
|
248 |
+
def __init__(self,
|
249 |
+
template="<INST><Question><Doc>\n",
|
250 |
+
components={'INST':'{INST}\n\n', 'Question':'Question:\n{Question}\n','Doc':'Search Results:\n{Doc}\n'},
|
251 |
+
max_token=4096) -> None:
|
252 |
+
super().__init__(template, components, max_token)
|
253 |
+
|
254 |
+
|
255 |
+
|
256 |
+
|
257 |
+
alce_prompt= ALCEVanillaPrompt()
|
258 |
+
#alce_prompt.set_max_token(INST = 10,Doc = 100,Answer = 15)
|
259 |
+
DocP= DocPrompt()
|
260 |
+
|
261 |
+
|
262 |
+
#print(content['demos'])
|
263 |
+
|
264 |
+
|
265 |
+
#print(data[0])
|
266 |
+
'''
|
267 |
+
pps = alce_prompt.load_data(content['demos'],
|
268 |
+
INST = lambda _: content['instruction'],
|
269 |
+
Question = lambda data: data['question'],
|
270 |
+
Doc = lambda data: ''.join(DocPrompt().load_data(list(enumerate(data['docs'])),
|
271 |
+
ID = lambda data: str(data[0]),
|
272 |
+
Title = lambda data: data[1]['title'],
|
273 |
+
Passage = lambda data: data[1]['text'])),
|
274 |
+
Answer = lambda data: data['answer'])
|
275 |
+
|
276 |
+
'''
|
277 |
+
|
278 |
+
#print(pps[0])
|
279 |
+
|
280 |
+
'''
|
281 |
+
data_loader = []
|
282 |
+
with open('data.txt','r',encoding='utf-8') as f:
|
283 |
+
content = f.readlines()
|
284 |
+
for i,c in enumerate(content):
|
285 |
+
if i%3 == 0:
|
286 |
+
data_loader.append({'Q':c.strip(),'A':content[i+1].strip()})
|
287 |
+
print(data_loader)
|
288 |
+
|
289 |
+
|
290 |
+
pps = Dp.load_data(data_loader,
|
291 |
+
INST= lambda data: "Instruction: Write an accurate, engaging, and concise answer for the given question",
|
292 |
+
Question= lambda data: data['Q'],
|
293 |
+
Answer= lambda data: data['A'])
|
294 |
+
'''
|
citekit/utils/__pycache__/utils.cpython-310.pyc
ADDED
Binary file (11.2 kB). View file
|
|
citekit/utils/__pycache__/utils.cpython-312.pyc
ADDED
Binary file (17.2 kB). View file
|
|
citekit/utils/utils.py
ADDED
@@ -0,0 +1,317 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import string
|
3 |
+
import re
|
4 |
+
import collections
|
5 |
+
import torch
|
6 |
+
import nltk
|
7 |
+
|
8 |
+
def one_paragraph(text):
|
9 |
+
paras = text.lstrip('\n').split('\n\n')
|
10 |
+
if not paras:
|
11 |
+
return ''
|
12 |
+
else:
|
13 |
+
return paras[0].rstrip('\n')
|
14 |
+
|
15 |
+
def strong_one_paragraph(text):
|
16 |
+
paras = text.lstrip('\n').split('\n')
|
17 |
+
if not paras:
|
18 |
+
return ''
|
19 |
+
else:
|
20 |
+
return paras[0].rstrip('\n')
|
21 |
+
|
22 |
+
def compute_str_em(data):
|
23 |
+
"""Compute STR-EM metric (only for ASQA)
|
24 |
+
Args:
|
25 |
+
data: requires field `qa_pairs/short_answers` and `output`
|
26 |
+
Returns:
|
27 |
+
STR-EM and STR-EM-HIT ()
|
28 |
+
"""
|
29 |
+
if 'qa_pairs' not in data[0] or data[0]['qa_pairs'] is None:
|
30 |
+
return 0
|
31 |
+
|
32 |
+
acc = []
|
33 |
+
hit = []
|
34 |
+
|
35 |
+
for item in data:
|
36 |
+
loc_acc = []
|
37 |
+
for qa_pair in item['qa_pairs']:
|
38 |
+
loc_acc.append(exact_presence(qa_pair['short_answers'], item["output"]))
|
39 |
+
acc.append(np.mean(loc_acc))
|
40 |
+
hit.append(int(np.mean(loc_acc) == 1))
|
41 |
+
|
42 |
+
return 100 * np.mean(acc)
|
43 |
+
return 100 * np.mean(acc), 100 * np.mean(hit)
|
44 |
+
|
45 |
+
def average(func):
|
46 |
+
def avg_func(dataset):
|
47 |
+
print(len(dataset))
|
48 |
+
results = [func(*data) for data in dataset] if dataset else []
|
49 |
+
if results:
|
50 |
+
return np.mean(np.array(results), axis=0).tolist()
|
51 |
+
else:
|
52 |
+
return None
|
53 |
+
return avg_func
|
54 |
+
|
55 |
+
def normalize_answer(s):
|
56 |
+
def remove_articles(text):
|
57 |
+
return re.sub(r"\b(a|an|the)\b", " ", text)
|
58 |
+
|
59 |
+
def white_space_fix(text):
|
60 |
+
return " ".join(text.split())
|
61 |
+
|
62 |
+
def remove_punc(text):
|
63 |
+
exclude = set(string.punctuation)
|
64 |
+
return "".join(ch for ch in text if ch not in exclude)
|
65 |
+
|
66 |
+
def lower(text):
|
67 |
+
return text.lower()
|
68 |
+
|
69 |
+
return white_space_fix(remove_articles(remove_punc(lower(s))))
|
70 |
+
|
71 |
+
def compute_f1(a_gold, a_pred):
|
72 |
+
"""Compute F1 score between two strings."""
|
73 |
+
|
74 |
+
def _get_tokens(s):
|
75 |
+
if not s:
|
76 |
+
return []
|
77 |
+
return normalize_answer(s).split()
|
78 |
+
|
79 |
+
gold_toks = _get_tokens(a_gold)
|
80 |
+
pred_toks = _get_tokens(a_pred)
|
81 |
+
|
82 |
+
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
|
83 |
+
num_same = sum(common.values())
|
84 |
+
|
85 |
+
if len(gold_toks) == 0 or len(pred_toks) == 0:
|
86 |
+
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
|
87 |
+
return int(gold_toks == pred_toks)
|
88 |
+
|
89 |
+
if num_same == 0:
|
90 |
+
return 0
|
91 |
+
|
92 |
+
precision = 1.0 * num_same / len(pred_toks)
|
93 |
+
recall = 1.0 * num_same / len(gold_toks)
|
94 |
+
f1 = (2 * precision * recall) / (precision + recall)
|
95 |
+
|
96 |
+
return f1
|
97 |
+
|
98 |
+
|
99 |
+
def compute_exact(a_gold, a_pred):
|
100 |
+
"""Check whether two strings are equal up to normalization."""
|
101 |
+
|
102 |
+
return int(normalize_answer(a_gold) == normalize_answer(a_pred))
|
103 |
+
|
104 |
+
|
105 |
+
def exact_presence(short_answers, context):
|
106 |
+
"""Verify if any of the answers is present in the given context.
|
107 |
+
Args:
|
108 |
+
short_answers: list of short answers to look for in the context
|
109 |
+
context: a paragraph to search for short answers
|
110 |
+
Returns:
|
111 |
+
true if any of the short answers is present in the context
|
112 |
+
"""
|
113 |
+
|
114 |
+
n_short_answers = [normalize_answer(sa) for sa in short_answers]
|
115 |
+
n_context = normalize_answer(context)
|
116 |
+
|
117 |
+
for ans in n_short_answers:
|
118 |
+
if ans in n_context:
|
119 |
+
return True
|
120 |
+
|
121 |
+
return False
|
122 |
+
|
123 |
+
def output_begin_with(word):
|
124 |
+
def f(self) -> bool:
|
125 |
+
return self.last_message.strip().lower()[:len(word)] == word
|
126 |
+
return f
|
127 |
+
|
128 |
+
def output_end_with(word):
|
129 |
+
def f(self) -> bool:
|
130 |
+
return strong_one_paragraph(self.last_message.strip())[-len(word):] == word
|
131 |
+
return f
|
132 |
+
|
133 |
+
|
134 |
+
def make_as(datakey):
|
135 |
+
def f(passage):
|
136 |
+
return {datakey:passage}
|
137 |
+
return f
|
138 |
+
|
139 |
+
def cut_and_make_as(datakey):
|
140 |
+
def f(passage):
|
141 |
+
return {datakey:one_paragraph(passage)}
|
142 |
+
return f
|
143 |
+
|
144 |
+
def remove_citations(sent):
|
145 |
+
return re.sub(r"{\d+", "", re.sub(r" {\d+", "", sent)).replace(" |", "").replace("}", "").replace("{", "")
|
146 |
+
|
147 |
+
def remove_citations(sent):
|
148 |
+
return re.sub(r"\[\d+", "", re.sub(r" \[\d+", "", sent)).replace(" |", "").replace("]", "")
|
149 |
+
|
150 |
+
|
151 |
+
def match_document(ref_mark, output_ref_span):
|
152 |
+
ref = set()
|
153 |
+
ref_span = []
|
154 |
+
for num in ref_mark:
|
155 |
+
ref_str = str(num)
|
156 |
+
if ref_str in output_ref_span:
|
157 |
+
ref_parts = output_ref_span[ref_str].split("[")
|
158 |
+
if len(ref_parts) > 1:
|
159 |
+
ref_id_parts = ref_parts[1].split("]")
|
160 |
+
if len(ref_id_parts) > 0:
|
161 |
+
ref_id = ref_id_parts[0].strip()
|
162 |
+
if ref_id.isdigit():
|
163 |
+
ref.add(int(ref_id)) # 添加Document id
|
164 |
+
|
165 |
+
ref_span_parts = output_ref_span[ref_str].split(":",1)#第一个冒号后面的片段
|
166 |
+
if len(ref_span_parts) > 1:
|
167 |
+
ref_span.append(ref_span_parts[1].strip()) # 添加后面的句子片段
|
168 |
+
else:
|
169 |
+
ref_span.append('')
|
170 |
+
return list(ref), ref_span
|
171 |
+
|
172 |
+
def get_max_memory():
|
173 |
+
"""Get the maximum memory available for the current GPU for loading models."""
|
174 |
+
free_in_GB = int(torch.cuda.mem_get_info()[0]/1024**3)
|
175 |
+
max_memory = f'{free_in_GB-6}GB'
|
176 |
+
n_gpus = torch.cuda.device_count()
|
177 |
+
max_memory = {i: max_memory for i in range(n_gpus)}
|
178 |
+
return max_memory
|
179 |
+
|
180 |
+
def each_make_as(key):
|
181 |
+
def function(output):
|
182 |
+
sents = nltk.sent_tokenize(one_paragraph(output))
|
183 |
+
if len(sents)>3:
|
184 |
+
sents = sents[:3]
|
185 |
+
return [make_as(key)(sent) for sent in sents]
|
186 |
+
return function
|
187 |
+
|
188 |
+
def each_par_make_as(key):
|
189 |
+
def function(output):
|
190 |
+
sents = one_paragraph(output).split('\n')
|
191 |
+
if len(sents)>3:
|
192 |
+
sents = sents[:3]
|
193 |
+
return [make_as(key)(sent) for sent in sents]
|
194 |
+
return function
|
195 |
+
|
196 |
+
def sentence(key):
|
197 |
+
def function(output):
|
198 |
+
sents = nltk.sent_tokenize(one_paragraph(output))
|
199 |
+
for sent in sents:
|
200 |
+
refs = re.findall(r'\[\d+\]', sent)
|
201 |
+
if refs:
|
202 |
+
return make_as(key)(sent)
|
203 |
+
return make_as(key)('')
|
204 |
+
return function
|
205 |
+
|
206 |
+
def sentences(key):
|
207 |
+
def function(output):
|
208 |
+
sents = nltk.sent_tokenize(one_paragraph(output))
|
209 |
+
return [make_as(key)(sent) for sent in sents][:1]
|
210 |
+
return function
|
211 |
+
|
212 |
+
def three_sentences(key):
|
213 |
+
def function(output):
|
214 |
+
sents = nltk.sent_tokenize(one_paragraph(output))
|
215 |
+
return [make_as(key)(sent) for sent in sents][:3]
|
216 |
+
return function
|
217 |
+
|
218 |
+
def first_sentence(text):
|
219 |
+
sents = nltk.sent_tokenize(one_paragraph(text))
|
220 |
+
for sent in sents:
|
221 |
+
return sent
|
222 |
+
return ''
|
223 |
+
|
224 |
+
def flatten_dict(d, parent_key='', sep='_'):
|
225 |
+
items = []
|
226 |
+
for k, v in d.items():
|
227 |
+
new_key = f'{parent_key}{sep}{k}' if parent_key else k
|
228 |
+
if isinstance(v, dict):
|
229 |
+
items.extend(flatten_dict(v, new_key, sep=sep).items())
|
230 |
+
else:
|
231 |
+
items.append((new_key, v))
|
232 |
+
return dict(items)
|
233 |
+
|
234 |
+
|
235 |
+
|
236 |
+
import re
|
237 |
+
from bs4 import BeautifulSoup
|
238 |
+
|
239 |
+
def parse_html_prompt(input_str):
|
240 |
+
soup = BeautifulSoup(input_str, "html.parser")
|
241 |
+
|
242 |
+
# 处理 <p></p> 内的内容
|
243 |
+
p_content = soup.find("p").decode_contents().replace("<br>", "\n")
|
244 |
+
p_content = re.sub(r'<span[^>]*>(.*?)</span>', r'<\1>', p_content)
|
245 |
+
template = p_content.strip().replace(' <br/>', '').replace(' ', '').replace('<br/>', '')
|
246 |
+
|
247 |
+
# 解析 component-item
|
248 |
+
components = {}
|
249 |
+
for item in soup.find_all("div", class_="component-item"):
|
250 |
+
key_span = item.find("div", class_="component-key").find("span")
|
251 |
+
key = key_span.get_text(strip=True) if key_span else ""
|
252 |
+
value_div = item.find("div", class_="component-value")
|
253 |
+
value_content = value_div.decode_contents()
|
254 |
+
value_content = re.sub(r'<span[^>]*>(.*?)</span>', r'{\1}', value_content)
|
255 |
+
components[key] = value_content.strip().replace(' <br/>', '').replace('<br/>', '')
|
256 |
+
|
257 |
+
# 解析 self-info-item
|
258 |
+
self_prompt = {}
|
259 |
+
for item in soup.find_all("div", class_="self-info-item"):
|
260 |
+
key_span = item.find("div", class_="component-key").find("span")
|
261 |
+
key = key_span.get_text(strip=True) if key_span else ""
|
262 |
+
value_div = item.find("div", class_="component-value")
|
263 |
+
value = value_div.get_text(strip=True) if value_div else ""
|
264 |
+
self_prompt[key] = value.replace(' <br/>', '').replace('<br/>', '')
|
265 |
+
|
266 |
+
return {
|
267 |
+
'template': template,
|
268 |
+
'components': components,
|
269 |
+
'self_prompt': self_prompt
|
270 |
+
}
|
271 |
+
|
272 |
+
|
273 |
+
def parse_html_destination(input_str):
|
274 |
+
soup = BeautifulSoup(input_str, "html.parser")
|
275 |
+
destination = soup.find("destination").get_text(strip=True)
|
276 |
+
prompt_key = soup.find("prompt_key").get_text(strip=True)
|
277 |
+
return destination, prompt_key
|
278 |
+
|
279 |
+
def parse_html_new_model(input_str):
|
280 |
+
soup = BeautifulSoup(input_str, "html.parser")
|
281 |
+
model_type = soup.find("model_type").get_text(strip=True)
|
282 |
+
model_name = soup.find("model").get_text(strip=True)
|
283 |
+
key = soup.find("prompt_key").get_text(strip=True)
|
284 |
+
return model_type, model_name, key
|
285 |
+
|
286 |
+
def parse_delete_destination(input_str):
|
287 |
+
soup = BeautifulSoup(input_str, "html.parser")
|
288 |
+
destination = soup.find("deletedestination").get_text(strip=True)
|
289 |
+
return destination
|
290 |
+
|
291 |
+
def parse_html_header(input_str):
|
292 |
+
soup = BeautifulSoup(input_str, "html.parser")
|
293 |
+
header = soup.find("to_head").get_text(strip=True)
|
294 |
+
return header
|
295 |
+
|
296 |
+
def parse_html_config(info):
|
297 |
+
config = ''
|
298 |
+
if 'class="component-value"' in info:
|
299 |
+
func = parse_html_prompt
|
300 |
+
config = 'prompt'
|
301 |
+
elif '</destination>' in info:
|
302 |
+
func = parse_html_destination
|
303 |
+
config = 'destination'
|
304 |
+
elif '<model_type>' in info:
|
305 |
+
func = parse_html_new_model
|
306 |
+
config = 'new_model'
|
307 |
+
elif 'deletedestination' in info:
|
308 |
+
config = 'delete_destination'
|
309 |
+
func = parse_delete_destination
|
310 |
+
elif 'to_head' in info:
|
311 |
+
config = 'header'
|
312 |
+
func = parse_html_header
|
313 |
+
else:
|
314 |
+
raise NotImplementedError
|
315 |
+
result = func(info)
|
316 |
+
print(info, 'parsed as', config)
|
317 |
+
return config, result
|
context_cite/__init__.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .context_citer import ContextCiter
|
2 |
+
|
3 |
+
__version__ = "0.0.1"
|
4 |
+
VERSION = __version__
|
context_cite/__pycache__/__init__.cpython-312.pyc
ADDED
Binary file (259 Bytes). View file
|
|
context_cite/__pycache__/__init__.cpython-39.pyc
ADDED
Binary file (233 Bytes). View file
|
|
context_cite/__pycache__/context_citer.cpython-312.pyc
ADDED
Binary file (18.3 kB). View file
|
|
context_cite/__pycache__/context_citer.cpython-39.pyc
ADDED
Binary file (13.5 kB). View file
|
|
context_cite/__pycache__/context_partitioner.cpython-312.pyc
ADDED
Binary file (5.83 kB). View file
|
|
context_cite/__pycache__/context_partitioner.cpython-39.pyc
ADDED
Binary file (4.6 kB). View file
|
|
context_cite/__pycache__/solver.cpython-312.pyc
ADDED
Binary file (2.88 kB). View file
|
|
context_cite/__pycache__/solver.cpython-39.pyc
ADDED
Binary file (2.22 kB). View file
|
|
context_cite/__pycache__/utils.cpython-312.pyc
ADDED
Binary file (11 kB). View file
|
|