You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
422 lines
15 KiB
422 lines
15 KiB
import argparse
|
|
import itertools
|
|
import json
|
|
import os
|
|
import random
|
|
import time
|
|
from functools import partial
|
|
from typing import Optional
|
|
|
|
import torch
|
|
from tqdm import tqdm
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
from vqa import VQA
|
|
from vqa_eval import VQAEval
|
|
|
|
ds_collections = {
|
|
'vqav2_val': {
|
|
'train': 'data/vqav2/vqav2_train.jsonl',
|
|
'test': 'data/vqav2/vqav2_val.jsonl',
|
|
'question': 'data/vqav2/v2_OpenEnded_mscoco_val2014_questions.json',
|
|
'annotation': 'data/vqav2/v2_mscoco_val2014_annotations.json',
|
|
'metric': 'vqa_score',
|
|
'max_new_tokens': 10,
|
|
},
|
|
'vqav2_testdev': {
|
|
'train': 'data/vqav2/vqav2_train.jsonl',
|
|
'test': 'data/vqav2/vqav2_testdev.jsonl',
|
|
'metric': None,
|
|
'max_new_tokens': 10,
|
|
},
|
|
'okvqa_val': {
|
|
'train': 'data/okvqa/okvqa_train.jsonl',
|
|
'test': 'data/okvqa/okvqa_val.jsonl',
|
|
'question': 'data/okvqa/OpenEnded_mscoco_val2014_questions.json',
|
|
'annotation': 'data/okvqa/mscoco_val2014_annotations.json',
|
|
'metric': 'vqa_score',
|
|
'max_new_tokens': 10,
|
|
},
|
|
'textvqa_val': {
|
|
'train': 'data/textvqa/textvqa_train.jsonl',
|
|
'test': 'data/textvqa/textvqa_val.jsonl',
|
|
'question': 'data/textvqa/textvqa_val_questions.json',
|
|
'annotation': 'data/textvqa/textvqa_val_annotations.json',
|
|
'metric': 'vqa_score',
|
|
'max_new_tokens': 10,
|
|
},
|
|
'vizwiz_val': {
|
|
'train': 'data/vizwiz/vizwiz_train.jsonl',
|
|
'test': 'data/vizwiz/vizwiz_val.jsonl',
|
|
'question': 'data/vizwiz/vizwiz_val_questions.json',
|
|
'annotation': 'data/vizwiz/vizwiz_val_annotations.json',
|
|
'metric': 'vqa_score',
|
|
'max_new_tokens': 10,
|
|
},
|
|
'vizwiz_test': {
|
|
'train': 'data/vizwiz/vizwiz_train.jsonl',
|
|
'test': 'data/vizwiz/vizwiz_test.jsonl',
|
|
'metric': None,
|
|
'max_new_tokens': 10,
|
|
},
|
|
'docvqa_val': {
|
|
'train': 'data/docvqa/train.jsonl',
|
|
'test': 'data/docvqa/val.jsonl',
|
|
'annotation': 'data/docvqa/val/val_v1.0.json',
|
|
'metric': 'anls',
|
|
'max_new_tokens': 100,
|
|
},
|
|
'docvqa_test': {
|
|
'train': 'data/docvqa/train.jsonl',
|
|
'test': 'data/docvqa/test.jsonl',
|
|
'metric': None,
|
|
'max_new_tokens': 100,
|
|
},
|
|
'chartqa_test_human': {
|
|
'train': 'data/chartqa/train_human.jsonl',
|
|
'test': 'data/chartqa/test_human.jsonl',
|
|
'metric': 'relaxed_accuracy',
|
|
'max_new_tokens': 100,
|
|
},
|
|
'chartqa_test_augmented': {
|
|
'train': 'data/chartqa/train_augmented.jsonl',
|
|
'test': 'data/chartqa/test_augmented.jsonl',
|
|
'metric': 'relaxed_accuracy',
|
|
'max_new_tokens': 100,
|
|
},
|
|
'gqa_testdev': {
|
|
'train': 'data/gqa/train.jsonl',
|
|
'test': 'data/gqa/testdev_balanced.jsonl',
|
|
'metric': 'accuracy',
|
|
'max_new_tokens': 10,
|
|
},
|
|
'ocrvqa_val': {
|
|
'train': 'data/ocrvqa/ocrvqa_train.jsonl',
|
|
'test': 'data/ocrvqa/ocrvqa_val.jsonl',
|
|
'metric': 'accuracy',
|
|
'max_new_tokens': 100,
|
|
},
|
|
'ocrvqa_test': {
|
|
'train': 'data/ocrvqa/ocrvqa_train.jsonl',
|
|
'test': 'data/ocrvqa/ocrvqa_test.jsonl',
|
|
'metric': 'accuracy',
|
|
'max_new_tokens': 100,
|
|
},
|
|
'ai2diagram_test': {
|
|
'train': 'data/ai2diagram/train.jsonl',
|
|
'test': 'data/ai2diagram/test.jsonl',
|
|
'metric': 'accuracy',
|
|
'max_new_tokens': 10,
|
|
}
|
|
}
|
|
|
|
# https://github.com/google-research/pix2struct/blob/main/pix2struct/metrics.py#L81
|
|
def relaxed_correctness(target: str,
|
|
prediction: str,
|
|
max_relative_change: float = 0.05) -> bool:
|
|
"""Calculates relaxed correctness.
|
|
|
|
The correctness tolerates certain error ratio defined by max_relative_change.
|
|
See https://arxiv.org/pdf/2203.10244.pdf, end of section 5.1:
|
|
“Following Methani et al. (2020), we use a relaxed accuracy measure for the
|
|
numeric answers to allow a minor inaccuracy that may result from the automatic
|
|
data extraction process. We consider an answer to be correct if it is within
|
|
5% of the gold answer. For non-numeric answers, we still need an exact match
|
|
to consider an answer to be correct.”
|
|
|
|
Args:
|
|
target: Target string.
|
|
prediction: Predicted string.
|
|
max_relative_change: Maximum relative change.
|
|
|
|
Returns:
|
|
Whether the prediction was correct given the specified tolerance.
|
|
"""
|
|
|
|
def _to_float(text: str) -> Optional[float]:
|
|
try:
|
|
if text.endswith('%'):
|
|
# Convert percentages to floats.
|
|
return float(text.rstrip('%')) / 100.0
|
|
else:
|
|
return float(text)
|
|
except ValueError:
|
|
return None
|
|
|
|
prediction_float = _to_float(prediction)
|
|
target_float = _to_float(target)
|
|
if prediction_float is not None and target_float:
|
|
relative_change = abs(prediction_float -
|
|
target_float) / abs(target_float)
|
|
return relative_change <= max_relative_change
|
|
else:
|
|
return prediction.lower() == target.lower()
|
|
|
|
|
|
def evaluate_relaxed_accuracy(entries):
|
|
scores = []
|
|
for elem in entries:
|
|
if isinstance(elem['annotation'], str):
|
|
elem['annotation'] = [elem['annotation']]
|
|
score = max([
|
|
relaxed_correctness(elem['answer'].strip(), ann)
|
|
for ann in elem['annotation']
|
|
])
|
|
scores.append(score)
|
|
return sum(scores) / len(scores)
|
|
|
|
|
|
def evaluate_exact_match_accuracy(entries):
|
|
scores = []
|
|
for elem in entries:
|
|
if isinstance(elem['annotation'], str):
|
|
elem['annotation'] = [elem['annotation']]
|
|
score = max([
|
|
(1.0 if
|
|
(elem['answer'].strip().lower() == ann.strip().lower()) else 0.0)
|
|
for ann in elem['annotation']
|
|
])
|
|
scores.append(score)
|
|
return sum(scores) / len(scores)
|
|
|
|
|
|
def collate_fn(batches, tokenizer):
|
|
|
|
questions = [_['question'] for _ in batches]
|
|
question_ids = [_['question_id'] for _ in batches]
|
|
annotations = [_['annotation'] for _ in batches]
|
|
|
|
input_ids = tokenizer(questions, return_tensors='pt', padding='longest')
|
|
|
|
return question_ids, input_ids.input_ids, input_ids.attention_mask, annotations
|
|
|
|
|
|
class VQADataset(torch.utils.data.Dataset):
|
|
|
|
def __init__(self, train, test, prompt, few_shot):
|
|
self.test = open(test).readlines()
|
|
self.prompt = prompt
|
|
|
|
self.few_shot = few_shot
|
|
if few_shot > 0:
|
|
self.train = open(train).readlines()
|
|
|
|
def __len__(self):
|
|
return len(self.test)
|
|
|
|
def __getitem__(self, idx):
|
|
data = json.loads(self.test[idx].strip())
|
|
image, question, question_id, annotation = data['image'], data[
|
|
'question'], data['question_id'], data.get('answer', None)
|
|
|
|
few_shot_prompt = ''
|
|
if self.few_shot > 0:
|
|
few_shot_samples = random.sample(self.train, self.few_shot)
|
|
for sample in few_shot_samples:
|
|
sample = json.loads(sample.strip())
|
|
few_shot_prompt += self.prompt.format(
|
|
sample['image'],
|
|
sample['question']) + f" {sample['answer']}"
|
|
|
|
return {
|
|
'question': few_shot_prompt + self.prompt.format(image, question),
|
|
'question_id': question_id,
|
|
'annotation': annotation
|
|
}
|
|
|
|
|
|
class InferenceSampler(torch.utils.data.sampler.Sampler):
|
|
|
|
def __init__(self, size):
|
|
self._size = int(size)
|
|
assert size > 0
|
|
self._rank = torch.distributed.get_rank()
|
|
self._world_size = torch.distributed.get_world_size()
|
|
self._local_indices = self._get_local_indices(size, self._world_size,
|
|
self._rank)
|
|
|
|
@staticmethod
|
|
def _get_local_indices(total_size, world_size, rank):
|
|
shard_size = total_size // world_size
|
|
left = total_size % world_size
|
|
shard_sizes = [shard_size + int(r < left) for r in range(world_size)]
|
|
|
|
begin = sum(shard_sizes[:rank])
|
|
end = min(sum(shard_sizes[:rank + 1]), total_size)
|
|
return range(begin, end)
|
|
|
|
def __iter__(self):
|
|
yield from self._local_indices
|
|
|
|
def __len__(self):
|
|
return len(self._local_indices)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
parser = argparse.ArgumentParser()
|
|
parser.add_argument('--checkpoint', type=str, default='')
|
|
parser.add_argument('--dataset', type=str, default='')
|
|
parser.add_argument('--batch-size', type=int, default=1)
|
|
parser.add_argument('--num-workers', type=int, default=1)
|
|
parser.add_argument('--few-shot', type=int, default=0)
|
|
parser.add_argument('--seed', type=int, default=0)
|
|
args = parser.parse_args()
|
|
|
|
torch.distributed.init_process_group(
|
|
backend='nccl',
|
|
world_size=int(os.getenv('WORLD_SIZE', '1')),
|
|
rank=int(os.getenv('RANK', '0')),
|
|
)
|
|
|
|
torch.cuda.set_device(int(os.getenv('LOCAL_RANK', 0)))
|
|
|
|
model = AutoModelForCausalLM.from_pretrained(
|
|
args.checkpoint, device_map='cuda', trust_remote_code=True).eval()
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(args.checkpoint,
|
|
trust_remote_code=True)
|
|
tokenizer.padding_side = 'left'
|
|
tokenizer.pad_token_id = tokenizer.eod_id
|
|
|
|
prompt = '<img>{}</img>{} Answer:'
|
|
|
|
random.seed(args.seed)
|
|
dataset = VQADataset(
|
|
train=ds_collections[args.dataset]['train'],
|
|
test=ds_collections[args.dataset]['test'],
|
|
prompt=prompt,
|
|
few_shot=args.few_shot,
|
|
)
|
|
|
|
dataloader = torch.utils.data.DataLoader(
|
|
dataset=dataset,
|
|
sampler=InferenceSampler(len(dataset)),
|
|
batch_size=args.batch_size,
|
|
num_workers=args.num_workers,
|
|
pin_memory=True,
|
|
drop_last=False,
|
|
collate_fn=partial(collate_fn, tokenizer=tokenizer),
|
|
)
|
|
|
|
outputs = []
|
|
for _, (question_ids, input_ids, attention_mask,
|
|
annotations) in tqdm(enumerate(dataloader)):
|
|
pred = model.generate(
|
|
input_ids=input_ids.cuda(),
|
|
attention_mask=attention_mask.cuda(),
|
|
do_sample=False,
|
|
num_beams=1,
|
|
max_new_tokens=ds_collections[args.dataset]['max_new_tokens'],
|
|
min_new_tokens=1,
|
|
length_penalty=1,
|
|
num_return_sequences=1,
|
|
output_hidden_states=True,
|
|
use_cache=True,
|
|
pad_token_id=tokenizer.eod_id,
|
|
eos_token_id=tokenizer.eod_id,
|
|
)
|
|
answers = [
|
|
tokenizer.decode(_[input_ids.size(1):].cpu(),
|
|
skip_special_tokens=True).strip() for _ in pred
|
|
]
|
|
|
|
for question_id, answer, annotation in zip(question_ids, answers,
|
|
annotations):
|
|
if args.dataset in ['vqav2_val', 'vqav2_testdev', 'okvqa_val', 'textvqa_val', 'vizwiz_val']:
|
|
outputs.append({
|
|
'question_id': question_id,
|
|
'answer': answer,
|
|
})
|
|
elif args.dataset in ['docvqa_val', 'infographicsvqa', 'gqa_testdev', 'ocrvqa_val', 'ocrvqa_test']:
|
|
outputs.append({
|
|
'questionId': question_id,
|
|
'answer': answer,
|
|
'annotation': annotation,
|
|
})
|
|
elif args.dataset in ['ai2diagram_test']:
|
|
outputs.append({
|
|
'image': question_id,
|
|
'answer': answer,
|
|
'annotation': annotation,
|
|
})
|
|
elif args.dataset in ['chartqa_test_human', 'chartqa_test_augmented']:
|
|
outputs.append({
|
|
'answer': answer,
|
|
'annotation': annotation,
|
|
})
|
|
elif args.dataset in ['docvqa_test']:
|
|
outputs.append({
|
|
'questionId': question_id,
|
|
'answer': answer,
|
|
})
|
|
elif args.dataset in ['vizwiz_test']:
|
|
outputs.append({
|
|
'image': question_id,
|
|
'answer': answer,
|
|
})
|
|
else:
|
|
raise NotImplementedError
|
|
|
|
torch.distributed.barrier()
|
|
|
|
world_size = torch.distributed.get_world_size()
|
|
merged_outputs = [None for _ in range(world_size)]
|
|
torch.distributed.all_gather_object(merged_outputs, json.dumps(outputs))
|
|
|
|
merged_outputs = [json.loads(_) for _ in merged_outputs]
|
|
merged_outputs = [_ for _ in itertools.chain.from_iterable(merged_outputs)]
|
|
|
|
if torch.distributed.get_rank() == 0:
|
|
print(f"Evaluating {args.dataset} ...")
|
|
time_prefix = time.strftime('%y%m%d%H%M%S', time.localtime())
|
|
results_file = f'{args.dataset}_{time_prefix}_fs{args.few_shot}_s{args.seed}.json'
|
|
json.dump(merged_outputs, open(results_file, 'w'), ensure_ascii=False)
|
|
|
|
if ds_collections[args.dataset]['metric'] == 'vqa_score':
|
|
vqa = VQA(ds_collections[args.dataset]['annotation'],
|
|
ds_collections[args.dataset]['question'])
|
|
results = vqa.loadRes(
|
|
resFile=results_file,
|
|
quesFile=ds_collections[args.dataset]['question'])
|
|
vqa_scorer = VQAEval(vqa, results, n=2)
|
|
vqa_scorer.evaluate()
|
|
|
|
print(vqa_scorer.accuracy)
|
|
|
|
elif ds_collections[args.dataset]['metric'] == 'anls':
|
|
json.dump(merged_outputs,
|
|
open(results_file, 'w'),
|
|
ensure_ascii=False)
|
|
print('python infographicsvqa_eval.py -g ' +
|
|
ds_collections[args.dataset]['annotation'] + ' -s ' +
|
|
results_file)
|
|
os.system('python infographicsvqa_eval.py -g ' +
|
|
ds_collections[args.dataset]['annotation'] + ' -s ' +
|
|
results_file)
|
|
elif ds_collections[args.dataset]['metric'] == 'relaxed_accuracy':
|
|
print({
|
|
'relaxed_accuracy': evaluate_relaxed_accuracy(merged_outputs)
|
|
})
|
|
elif ds_collections[args.dataset]['metric'] == 'accuracy':
|
|
if 'gqa' in args.dataset:
|
|
for entry in merged_outputs:
|
|
response = entry['answer']
|
|
response = response.strip().split('.')[0].split(
|
|
',')[0].split('!')[0].lower()
|
|
if 'is ' in response:
|
|
response = response.split('is ')[1]
|
|
if 'are ' in response:
|
|
response = response.split('are ')[1]
|
|
if 'a ' in response:
|
|
response = response.split('a ')[1]
|
|
if 'an ' in response:
|
|
response = response.split('an ')[1]
|
|
if 'the ' in response:
|
|
response = response.split('the ')[1]
|
|
if ' of' in response:
|
|
response = response.split(' of')[0]
|
|
response = response.strip()
|
|
entry['answer'] = response
|
|
print({'accuracy': evaluate_exact_match_accuracy(merged_outputs)})
|
|
|
|
torch.distributed.barrier()
|
|
|