diff --git a/examples/configs/model/default_causal.py b/examples/configs/model/default_causal.py index 956a71dde..c77a317fc 100644 --- a/examples/configs/model/default_causal.py +++ b/examples/configs/model/default_causal.py @@ -1,9 +1,10 @@ from transformers import AutoModelForCausalLM, AutoTokenizer +import torch - -def load_model(model_path: str, device_map: str): +def load_model(model_path: str, device_map: str, dtype: str = "float32"): + dtype = getattr(torch, dtype) model = AutoModelForCausalLM.from_pretrained( - model_path, trust_remote_code=True, device_map=device_map + model_path, trust_remote_code=True, device_map=device_map, torch_dtype=dtype ) model.eval() diff --git a/examples/configs/model/falcon3.yaml b/examples/configs/model/falcon3.yaml new file mode 100644 index 000000000..3a8243339 --- /dev/null +++ b/examples/configs/model/falcon3.yaml @@ -0,0 +1,11 @@ +defaults: + - default + +path: tiiuae/Falcon3-7B-Base +type: CausalLM +path_to_load_script: model/default_causal.py + +load_model_args: + device_map: balanced_low_0 + dtype: bfloat16 +load_tokenizer_args: {} diff --git a/examples/configs/model/llama.yaml b/examples/configs/model/llama.yaml new file mode 100644 index 000000000..0d1870443 --- /dev/null +++ b/examples/configs/model/llama.yaml @@ -0,0 +1,11 @@ +defaults: + - default + +path: meta-llama/Meta-Llama-3.1-8B +type: CausalLM +path_to_load_script: model/default_causal.py + +load_model_args: + device_map: balanced_low_0 + dtype: bfloat16 +load_tokenizer_args: {} diff --git a/examples/configs/polygraph_eval_coqa_sentsar.yaml b/examples/configs/polygraph_eval_coqa_sentsar.yaml new file mode 100644 index 000000000..9c710a207 --- /dev/null +++ b/examples/configs/polygraph_eval_coqa_sentsar.yaml @@ -0,0 +1,162 @@ +hydra: + run: + dir: ${cache_path}/coqa/${model.path}/${dataset}/${now:%Y-%m-%d}/${now:%H-%M-%S} + +defaults: + - model: bloomz-560m + - _self_ + +cache_path: ./workdir/output +save_path: '${hydra:run.dir}' + +task: qa + +base_manager: null +overwrite_base_estimations: false + +dataset: coqa +text_column: questions +label_column: answers +description: "The following are stories and questions about them. Each story is followed by a question and answer to a given question.\n\nStory: {story}" +prompt: "Question: {question}\nAnswer:{answer}" +train_split: train +eval_split: validation +max_new_tokens: 20 +load_from_disk: false +normalize: true +generation_params: + generate_until: + - "\n" +save_stats: + - greedy_tokens + - greedy_log_likelihoods + - greedy_tokens_alternatives + - greedy_sentence_similarity + - token_similarity + - entropy + - sample_tokens + - sample_tokens_alternatives + - sample_texts + - sample_log_probs + - sample_log_likelihoods + - sample_sentence_similarity + - sample_token_similarity + - sample_entropy + - first_sample_texts + - best_sample_texts + - best_sample_text_ids + - best_normalized_sample_texts + - best_normalized_sample_text_ids +entropy_top_k: 50 + +train_dataset: null +train_test_split: false +test_split_size: 1 + +background_train_dataset: allenai/c4 +background_train_dataset_text_column: text +background_train_dataset_label_column: url +background_train_dataset_data_files: en/c4-train.00000-of-01024.json.gz +background_load_from_disk: false + +subsample_background_train_dataset: 1000 +subsample_train_dataset: 1000 +subsample_eval_dataset: -1 + +use_density_based_ue: false +use_seq_ue: false +use_tok_ue: false +use_ens_ue: false +generation_metrics: null + +additional_estimators: + - module: lm_polygraph.estimators.monte_carlo_sequence_entropy + class_name: MonteCarloSequenceEntropy + kwargs: {} + - module: lm_polygraph.estimators.monte_carlo_normalized_sequence_entropy + class_name: MonteCarloNormalizedSequenceEntropy + kwargs: {} + - module: lm_polygraph.estimators.semantic_entropy + class_name: SemanticEntropy + kwargs: {} + + - module: lm_polygraph.estimators.max_probability + class_name: MaximumSequenceProbability + kwargs: {} + - module: lm_polygraph.estimators.max_probability + class_name: SampledMaximumSequenceProbability + kwargs: {} + - module: lm_polygraph.estimators.sentence_sar + class_name: SentenceSAR + kwargs: {} + - module: lm_polygraph.estimators.gsu + class_name: MaxprobGSU + kwargs: {} + + - module: lm_polygraph.estimators.token_sar + class_name: TokenSAR + kwargs: {} + - module: lm_polygraph.estimators.token_sar + class_name: SampledTokenSAR + kwargs: {} + - module: lm_polygraph.estimators.sar + class_name: SAR + kwargs: {} + - module: lm_polygraph.estimators.gsu + class_name: TokenSARGSU + kwargs: {} + + - module: lm_polygraph.estimators.perplexity + class_name: Perplexity + kwargs: {} + - module: lm_polygraph.estimators.perplexity + class_name: SampledPerplexity + kwargs: {} + - module: lm_polygraph.estimators.sentence_sar + class_name: PPLSAR + kwargs: {} + - module: lm_polygraph.estimators.gsu + class_name: PPLGSU + kwargs: {} + + - module: lm_polygraph.estimators.token_entropy + class_name: MeanTokenEntropy + kwargs: {} + - module: lm_polygraph.estimators.token_entropy + class_name: SampledMeanTokenEntropy + kwargs: {} + - module: lm_polygraph.estimators.sentence_sar + class_name: MTESAR + kwargs: {} + - module: lm_polygraph.estimators.gsu + class_name: MTEGSU + kwargs: {} + + - module: lm_polygraph.estimators.average_ue + class_name: AveMaxprob + kwargs: {} + - module: lm_polygraph.estimators.average_ue + class_name: AvePPL + kwargs: {} + - module: lm_polygraph.estimators.average_ue + class_name: AveTokenSAR + kwargs: {} + - module: lm_polygraph.estimators.average_ue + class_name: AveMTE + kwargs: {} + + - module: lm_polygraph.estimators.semantic_average_ue_average_similarity + class_name: SemanticAveMaxprobAveSimilarity + kwargs: {} + + - module: lm_polygraph.estimators.greedy_semantic_average_ue_average_similarity + class_name: GreedySemanticAveMaxprobAveSimilarity + kwargs: {} + +ignore_exceptions: false + +batch_size: 1 +deberta_batch_size: 1 + +seed: + - 1 diff --git a/examples/configs/polygraph_eval_gsm8k_sentsar_cot.yaml b/examples/configs/polygraph_eval_gsm8k_sentsar_cot.yaml new file mode 100644 index 000000000..dead8a64e --- /dev/null +++ b/examples/configs/polygraph_eval_gsm8k_sentsar_cot.yaml @@ -0,0 +1,166 @@ +hydra: + run: + dir: ${cache_path}/gsm8k_cot/${model}/${dataset}/${now:%Y-%m-%d}/${now:%H-%M-%S} + +defaults: + - model: bloomz-560m + - _self_ + +cache_path: ./workdir/output +save_path: '${hydra:run.dir}' + +task: qa + +base_manager: null +overwrite_base_estimations: false + +dataset: [gsm8k, main] +text_column: question +label_column: answer +prompt: "Q: There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done, there will be 21 trees. How many trees did the grove workers plant today?\nA: There are 15 trees originally. Then there were 21 trees after some more were planted. So there must have been 21 - 15 = 6. The answer is 6.\n\nQ: If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?\nA: There are originally 3 cars. 2 more cars arrive. 3 + 2 = 5. The answer is 5.\n\nQ: Leah had 32 chocolates and her sister had 42. If they ate 35, how many pieces do they have left in total?\nA: Originally, Leah had 32 chocolates. Her sister had 42. So in total they had 32 + 42 = 74. After eating 35, they had 74 - 35 = 39. The answer is 39.\n\nQ: Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 lollipops. How many lollipops did Jason give to Denny?\nA: Jason started with 20 lollipops. Then he had 12 after giving some to Denny. So he gave Denny 20 - 12 = 8. The answer is 8.\n\nQ: Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?\nA: Shawn started with 5 toys. If he got 2 toys each from his mom and dad, then that is 4 more toys. 5 + 4 = 9. The answer is 9.\n\nQ: There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?\nA: There were originally 9 computers. For each of 4 days, 5 more computers were added. So 5 * 4 = 20 computers were added. 9 + 20 is 29. The answer is 29.\n\nQ: Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?\nA: Michael started with 58 golf balls. After losing 23 on tuesday, he had 58 - 23 = 35. After losing 2 more, he had 35 - 2 = 33 golf balls. The answer is 33.\n\nQ: Olivia has $23. She bought five bagels for $3 each. How much money does she have left?\nA: Olivia had 23 dollars. 5 bagels for 3 dollars each will be 5 x 3 = 15 dollars. So she has 23 - 15 dollars left. 23 - 15 is 8. The answer is 8.\n\nQ: {question}\nA:" +train_split: train +few_shot_split: train +eval_split: test +max_new_tokens: 256 +load_from_disk: false +n_shot: 0 +normalize: true +generation_params: + generate_until: + - "\n" +save_stats: + - greedy_tokens + - greedy_log_likelihoods + - greedy_tokens_alternatives + - greedy_sentence_similarity + - token_similarity + - entropy + - sample_tokens + - sample_tokens_alternatives + - sample_texts + - sample_log_probs + - sample_log_likelihoods + - sample_sentence_similarity + - sample_token_similarity + - sample_entropy + - first_sample_texts + - best_sample_texts + - best_sample_text_ids + - best_normalized_sample_texts + - best_normalized_sample_text_ids +entropy_top_k: 50 + +target_ignore_regex: "(?s).*#### " +output_ignore_regex: "(?s).*The answer is " + +train_dataset: null +train_test_split: false +test_split_size: 1 + +background_train_dataset: allenai/c4 +background_train_dataset_text_column: text +background_train_dataset_label_column: url +background_train_dataset_data_files: en/c4-train.00000-of-01024.json.gz +background_load_from_disk: false + +subsample_background_train_dataset: 1000 +subsample_train_dataset: 1000 +subsample_eval_dataset: -1 + +use_density_based_ue: false +use_seq_ue: false +use_tok_ue: false +use_ens_ue: false +generation_metrics: null + +additional_estimators: + - module: lm_polygraph.estimators.monte_carlo_sequence_entropy + class_name: MonteCarloSequenceEntropy + kwargs: {} + - module: lm_polygraph.estimators.monte_carlo_normalized_sequence_entropy + class_name: MonteCarloNormalizedSequenceEntropy + kwargs: {} + - module: lm_polygraph.estimators.semantic_entropy + class_name: SemanticEntropy + kwargs: {} + + - module: lm_polygraph.estimators.max_probability + class_name: MaximumSequenceProbability + kwargs: {} + - module: lm_polygraph.estimators.max_probability + class_name: SampledMaximumSequenceProbability + kwargs: {} + - module: lm_polygraph.estimators.sentence_sar + class_name: SentenceSAR + kwargs: {} + - module: lm_polygraph.estimators.gsu + class_name: MaxprobGSU + kwargs: {} + + - module: lm_polygraph.estimators.token_sar + class_name: TokenSAR + kwargs: {} + - module: lm_polygraph.estimators.token_sar + class_name: SampledTokenSAR + kwargs: {} + - module: lm_polygraph.estimators.sar + class_name: SAR + kwargs: {} + - module: lm_polygraph.estimators.gsu + class_name: TokenSARGSU + kwargs: {} + + - module: lm_polygraph.estimators.perplexity + class_name: Perplexity + kwargs: {} + - module: lm_polygraph.estimators.perplexity + class_name: SampledPerplexity + kwargs: {} + - module: lm_polygraph.estimators.sentence_sar + class_name: PPLSAR + kwargs: {} + - module: lm_polygraph.estimators.gsu + class_name: PPLGSU + kwargs: {} + + - module: lm_polygraph.estimators.token_entropy + class_name: MeanTokenEntropy + kwargs: {} + - module: lm_polygraph.estimators.token_entropy + class_name: SampledMeanTokenEntropy + kwargs: {} + - module: lm_polygraph.estimators.sentence_sar + class_name: MTESAR + kwargs: {} + - module: lm_polygraph.estimators.gsu + class_name: MTEGSU + kwargs: {} + + - module: lm_polygraph.estimators.average_ue + class_name: AveMaxprob + kwargs: {} + - module: lm_polygraph.estimators.average_ue + class_name: AvePPL + kwargs: {} + - module: lm_polygraph.estimators.average_ue + class_name: AveTokenSAR + kwargs: {} + - module: lm_polygraph.estimators.average_ue + class_name: AveMTE + kwargs: {} + + - module: lm_polygraph.estimators.semantic_average_ue_average_similarity + class_name: SemanticAveMaxprobAveSimilarity + kwargs: {} + + - module: lm_polygraph.estimators.greedy_semantic_average_ue_average_similarity + class_name: GreedySemanticAveMaxprobAveSimilarity + kwargs: {} + +ignore_exceptions: false + +batch_size: 1 +deberta_batch_size: 1 + +seed: + - 1 diff --git a/examples/configs/polygraph_eval_mmlu_sentsar.yaml b/examples/configs/polygraph_eval_mmlu_sentsar.yaml new file mode 100644 index 000000000..4be9fc43c --- /dev/null +++ b/examples/configs/polygraph_eval_mmlu_sentsar.yaml @@ -0,0 +1,164 @@ +hydra: + run: + dir: ${cache_path}/mmlu/${model}/${dataset}/${now:%Y-%m-%d}/${now:%H-%M-%S} + +defaults: + - model: bloomz-560m + - _self_ + +cache_path: ./workdir/output +save_path: '${hydra:run.dir}' + +task: qa + +base_manager: null +overwrite_base_estimations: false + +dataset: [cais/mmlu, all] +text_column: question +label_column: answer +description: "The following are multiple choice questions (with answers) about {subject}." +prompt: "Q:{question}\nA. {choices[0]}\nB. {choices[1]}\nC. {choices[2]}\nD. {choices[3]}\nAnswer:{answer}" +few_shot_split: dev +train_split: validation +eval_split: test +max_new_tokens: 3 +load_from_disk: false +n_shot: 5 +max_subject_size: 100 +generation_params: + generate_until: + - "\n" +save_stats: + - greedy_tokens + - greedy_log_likelihoods + - greedy_tokens_alternatives + - greedy_sentence_similarity + - token_similarity + - entropy + - sample_tokens + - sample_tokens_alternatives + - sample_texts + - sample_log_probs + - sample_log_likelihoods + - sample_sentence_similarity + - sample_token_similarity + - sample_entropy + - first_sample_texts + - best_sample_texts + - best_sample_text_ids + - best_normalized_sample_texts + - best_normalized_sample_text_ids +entropy_top_k: 50 + +train_dataset: null +train_test_split: false +test_split_size: 1 + +background_train_dataset: allenai/c4 +background_train_dataset_text_column: text +background_train_dataset_label_column: url +background_train_dataset_data_files: en/c4-train.00000-of-01024.json.gz +background_load_from_disk: false + +subsample_background_train_dataset: 1000 +subsample_train_dataset: 1000 +subsample_eval_dataset: -1 + +use_density_based_ue: false +use_seq_ue: false +use_tok_ue: false +use_ens_ue: false +generation_metrics: null + +additional_estimators: + - module: lm_polygraph.estimators.monte_carlo_sequence_entropy + class_name: MonteCarloSequenceEntropy + kwargs: {} + - module: lm_polygraph.estimators.monte_carlo_normalized_sequence_entropy + class_name: MonteCarloNormalizedSequenceEntropy + kwargs: {} + - module: lm_polygraph.estimators.semantic_entropy + class_name: SemanticEntropy + kwargs: {} + + - module: lm_polygraph.estimators.max_probability + class_name: MaximumSequenceProbability + kwargs: {} + - module: lm_polygraph.estimators.max_probability + class_name: SampledMaximumSequenceProbability + kwargs: {} + - module: lm_polygraph.estimators.sentence_sar + class_name: SentenceSAR + kwargs: {} + - module: lm_polygraph.estimators.gsu + class_name: MaxprobGSU + kwargs: {} + + - module: lm_polygraph.estimators.token_sar + class_name: TokenSAR + kwargs: {} + - module: lm_polygraph.estimators.token_sar + class_name: SampledTokenSAR + kwargs: {} + - module: lm_polygraph.estimators.sar + class_name: SAR + kwargs: {} + - module: lm_polygraph.estimators.gsu + class_name: TokenSARGSU + kwargs: {} + + - module: lm_polygraph.estimators.perplexity + class_name: Perplexity + kwargs: {} + - module: lm_polygraph.estimators.perplexity + class_name: SampledPerplexity + kwargs: {} + - module: lm_polygraph.estimators.sentence_sar + class_name: PPLSAR + kwargs: {} + - module: lm_polygraph.estimators.gsu + class_name: PPLGSU + kwargs: {} + + - module: lm_polygraph.estimators.token_entropy + class_name: MeanTokenEntropy + kwargs: {} + - module: lm_polygraph.estimators.token_entropy + class_name: SampledMeanTokenEntropy + kwargs: {} + - module: lm_polygraph.estimators.sentence_sar + class_name: MTESAR + kwargs: {} + - module: lm_polygraph.estimators.gsu + class_name: MTEGSU + kwargs: {} + + - module: lm_polygraph.estimators.average_ue + class_name: AveMaxprob + kwargs: {} + - module: lm_polygraph.estimators.average_ue + class_name: AvePPL + kwargs: {} + - module: lm_polygraph.estimators.average_ue + class_name: AveTokenSAR + kwargs: {} + - module: lm_polygraph.estimators.average_ue + class_name: AveMTE + kwargs: {} + + - module: lm_polygraph.estimators.semantic_average_ue_average_similarity + class_name: SemanticAveMaxprobAveSimilarity + kwargs: {} + + - module: lm_polygraph.estimators.greedy_semantic_average_ue_average_similarity + class_name: GreedySemanticAveMaxprobAveSimilarity + kwargs: {} + +ignore_exceptions: false + +batch_size: 1 +deberta_batch_size: 1 + +seed: + - 1 diff --git a/examples/configs/polygraph_eval_triviaqa_sentsar.yaml b/examples/configs/polygraph_eval_triviaqa_sentsar.yaml new file mode 100644 index 000000000..81e594904 --- /dev/null +++ b/examples/configs/polygraph_eval_triviaqa_sentsar.yaml @@ -0,0 +1,165 @@ +hydra: + run: + dir: ${cache_path}/triviaqa/${model}/${dataset}/${now:%Y-%m-%d}/${now:%H-%M-%S} + +defaults: + - model: bloomz-560m + - _self_ + +cache_path: ./workdir/output +save_path: '${hydra:run.dir}' + +task: qa + +base_manager: null +overwrite_base_estimations: false + +dataset: [trivia_qa, rc.nocontext] +text_column: question +label_column: answer +prompt: "Question: {question}\nAnswer:{answer}" +few_shot_split: train +train_split: train +eval_split: validation +max_new_tokens: 20 +load_from_disk: false +n_shot: 5 +multiref: true +normalize: true +generation_params: + generate_until: + - "\n" +save_stats: + - greedy_tokens + - greedy_log_likelihoods + - greedy_tokens_alternatives + - greedy_sentence_similarity + - token_similarity + - entropy + - sample_tokens + - sample_tokens_alternatives + - sample_texts + - sample_log_probs + - sample_log_likelihoods + - sample_sentence_similarity + - sample_token_similarity + - sample_entropy + - first_sample_texts + - best_sample_texts + - best_sample_text_ids + - best_normalized_sample_texts + - best_normalized_sample_text_ids +entropy_top_k: 50 + +train_dataset: null +train_test_split: false +test_split_size: 1 + +background_train_dataset: allenai/c4 +background_train_dataset_text_column: text +background_train_dataset_label_column: url +background_train_dataset_data_files: en/c4-train.00000-of-01024.json.gz +background_load_from_disk: false + +subsample_background_train_dataset: 1000 +subsample_train_dataset: 1000 +subsample_eval_dataset: -1 + +use_density_based_ue: false +use_seq_ue: false +use_tok_ue: false +use_ens_ue: false +generation_metrics: null +ens_type: + +additional_estimators: + - module: lm_polygraph.estimators.monte_carlo_sequence_entropy + class_name: MonteCarloSequenceEntropy + kwargs: {} + - module: lm_polygraph.estimators.monte_carlo_normalized_sequence_entropy + class_name: MonteCarloNormalizedSequenceEntropy + kwargs: {} + - module: lm_polygraph.estimators.semantic_entropy + class_name: SemanticEntropy + kwargs: {} + + - module: lm_polygraph.estimators.max_probability + class_name: MaximumSequenceProbability + kwargs: {} + - module: lm_polygraph.estimators.max_probability + class_name: SampledMaximumSequenceProbability + kwargs: {} + - module: lm_polygraph.estimators.sentence_sar + class_name: SentenceSAR + kwargs: {} + - module: lm_polygraph.estimators.gsu + class_name: MaxprobGSU + kwargs: {} + + - module: lm_polygraph.estimators.token_sar + class_name: TokenSAR + kwargs: {} + - module: lm_polygraph.estimators.token_sar + class_name: SampledTokenSAR + kwargs: {} + - module: lm_polygraph.estimators.sar + class_name: SAR + kwargs: {} + - module: lm_polygraph.estimators.gsu + class_name: TokenSARGSU + kwargs: {} + + - module: lm_polygraph.estimators.perplexity + class_name: Perplexity + kwargs: {} + - module: lm_polygraph.estimators.perplexity + class_name: SampledPerplexity + kwargs: {} + - module: lm_polygraph.estimators.sentence_sar + class_name: PPLSAR + kwargs: {} + - module: lm_polygraph.estimators.gsu + class_name: PPLGSU + kwargs: {} + + - module: lm_polygraph.estimators.token_entropy + class_name: MeanTokenEntropy + kwargs: {} + - module: lm_polygraph.estimators.token_entropy + class_name: SampledMeanTokenEntropy + kwargs: {} + - module: lm_polygraph.estimators.sentence_sar + class_name: MTESAR + kwargs: {} + - module: lm_polygraph.estimators.gsu + class_name: MTEGSU + kwargs: {} + + - module: lm_polygraph.estimators.average_ue + class_name: AveMaxprob + kwargs: {} + - module: lm_polygraph.estimators.average_ue + class_name: AvePPL + kwargs: {} + - module: lm_polygraph.estimators.average_ue + class_name: AveTokenSAR + kwargs: {} + - module: lm_polygraph.estimators.average_ue + class_name: AveMTE + kwargs: {} + + - module: lm_polygraph.estimators.semantic_average_ue_average_similarity + class_name: SemanticAveMaxprobAveSimilarity + kwargs: {} + + - module: lm_polygraph.estimators.greedy_semantic_average_ue_average_similarity + class_name: GreedySemanticAveMaxprobAveSimilarity + kwargs: {} + +ignore_exceptions: false + +batch_size: 1 +deberta_batch_size: 1 + +seed: + - 1 diff --git a/examples/configs/polygraph_eval_wmt14_fren_sentsar.yaml b/examples/configs/polygraph_eval_wmt14_fren_sentsar.yaml new file mode 100644 index 000000000..67449b720 --- /dev/null +++ b/examples/configs/polygraph_eval_wmt14_fren_sentsar.yaml @@ -0,0 +1,164 @@ +hydra: + run: + dir: ${cache_path}/wmt14_fren/${model.path}/${dataset}/${now:%Y-%m-%d}/${now:%H-%M-%S} + +defaults: + - model: bloomz-560m + - _self_ + +cache_path: ./workdir/output +save_path: '${hydra:run.dir}' + +device: cpu + +task: nmt + +base_manager: null +overwrite_base_estimations: false + +dataset: [wmt14, fr-en] +text_column: fr +label_column: en +prompt: "Here is a sentence in {source_lang} language and its translation in {target_lang} language.\n\nOriginal:\n{text}\nTranslation:\n" +train_split: train +eval_split: test +max_new_tokens: 107 +load_from_disk: false +generation_params: + generate_until: + - "\n" +save_stats: + - greedy_tokens + - greedy_log_likelihoods + - greedy_tokens_alternatives + - greedy_sentence_similarity + - token_similarity + - entropy + - sample_tokens + - sample_tokens_alternatives + - sample_texts + - sample_log_probs + - sample_log_likelihoods + - sample_sentence_similarity + - sample_token_similarity + - sample_entropy + - first_sample_texts + - best_sample_texts + - best_sample_text_ids + - best_normalized_sample_texts + - best_normalized_sample_text_ids +entropy_top_k: 50 + +source_ignore_regex: "(?s).*Original:\n(.*?)\nTranslation:\n" + +train_dataset: null +train_test_split: false +test_split_size: 1 + +background_train_dataset: allenai/c4 +background_train_dataset_text_column: text +background_train_dataset_label_column: url +background_train_dataset_data_files: en/c4-train.00000-of-01024.json.gz +background_load_from_disk: false + +subsample_background_train_dataset: 1000 +subsample_train_dataset: 1000 +subsample_eval_dataset: -1 + +use_density_based_ue: false +use_ens_ue: false +use_seq_ue: false +use_tok_ue: false +generation_metrics: null + +additional_estimators: + - module: lm_polygraph.estimators.monte_carlo_sequence_entropy + class_name: MonteCarloSequenceEntropy + kwargs: {} + - module: lm_polygraph.estimators.monte_carlo_normalized_sequence_entropy + class_name: MonteCarloNormalizedSequenceEntropy + kwargs: {} + - module: lm_polygraph.estimators.semantic_entropy + class_name: SemanticEntropy + kwargs: {} + + - module: lm_polygraph.estimators.max_probability + class_name: MaximumSequenceProbability + kwargs: {} + - module: lm_polygraph.estimators.max_probability + class_name: SampledMaximumSequenceProbability + kwargs: {} + - module: lm_polygraph.estimators.sentence_sar + class_name: SentenceSAR + kwargs: {} + - module: lm_polygraph.estimators.gsu + class_name: MaxprobGSU + kwargs: {} + + - module: lm_polygraph.estimators.token_sar + class_name: TokenSAR + kwargs: {} + - module: lm_polygraph.estimators.token_sar + class_name: SampledTokenSAR + kwargs: {} + - module: lm_polygraph.estimators.sar + class_name: SAR + kwargs: {} + - module: lm_polygraph.estimators.gsu + class_name: TokenSARGSU + kwargs: {} + + - module: lm_polygraph.estimators.perplexity + class_name: Perplexity + kwargs: {} + - module: lm_polygraph.estimators.perplexity + class_name: SampledPerplexity + kwargs: {} + - module: lm_polygraph.estimators.sentence_sar + class_name: PPLSAR + kwargs: {} + - module: lm_polygraph.estimators.gsu + class_name: PPLGSU + kwargs: {} + + - module: lm_polygraph.estimators.token_entropy + class_name: MeanTokenEntropy + kwargs: {} + - module: lm_polygraph.estimators.token_entropy + class_name: SampledMeanTokenEntropy + kwargs: {} + - module: lm_polygraph.estimators.sentence_sar + class_name: MTESAR + kwargs: {} + - module: lm_polygraph.estimators.gsu + class_name: MTEGSU + kwargs: {} + + - module: lm_polygraph.estimators.average_ue + class_name: AveMaxprob + kwargs: {} + - module: lm_polygraph.estimators.average_ue + class_name: AvePPL + kwargs: {} + - module: lm_polygraph.estimators.average_ue + class_name: AveTokenSAR + kwargs: {} + - module: lm_polygraph.estimators.average_ue + class_name: AveMTE + kwargs: {} + + - module: lm_polygraph.estimators.semantic_average_ue_average_similarity + class_name: SemanticAveMaxprobAveSimilarity + kwargs: {} + + - module: lm_polygraph.estimators.greedy_semantic_average_ue_average_similarity + class_name: GreedySemanticAveMaxprobAveSimilarity + kwargs: {} + +ignore_exceptions: false + +batch_size: 1 +deberta_batch_size: 1 + +seed: + - 1 diff --git a/examples/configs/polygraph_eval_wmt19_deen_sentsar.yaml b/examples/configs/polygraph_eval_wmt19_deen_sentsar.yaml new file mode 100644 index 000000000..cf9b58fe0 --- /dev/null +++ b/examples/configs/polygraph_eval_wmt19_deen_sentsar.yaml @@ -0,0 +1,163 @@ +hydra: + run: + dir: ${cache_path}/wmt19_deen/${model.path}/${dataset}/${now:%Y-%m-%d}/${now:%H-%M-%S} + +defaults: + - model: bloomz-560m + - _self_ + +cache_path: ./workdir/output +save_path: '${hydra:run.dir}' + +device: cpu + +task: nmt + +base_manager: null +overwrite_base_estimations: false + +dataset: [wmt19, de-en] +text_column: de +label_column: en +prompt: "Here is a sentence in {source_lang} language and its translation in {target_lang} language.\n\nOriginal:\n{text}\nTranslation:\n" +train_split: train +eval_split: validation +max_new_tokens: 107 +load_from_disk: false +generation_params: + generate_until: + - "\n" +save_stats: + - greedy_tokens + - greedy_log_likelihoods + - greedy_tokens_alternatives + - greedy_sentence_similarity + - token_similarity + - entropy + - sample_tokens + - sample_tokens_alternatives + - sample_texts + - sample_log_probs + - sample_log_likelihoods + - sample_sentence_similarity + - sample_token_similarity + - sample_entropy + - first_sample_texts + - best_sample_texts + - best_sample_text_ids + - best_normalized_sample_texts + - best_normalized_sample_text_ids +entropy_top_k: 50 + +source_ignore_regex: "(?s).*Original:\n(.*?)\nTranslation:\n" + +train_dataset: null +train_test_split: false +test_split_size: 1 + +background_train_dataset: allenai/c4 +background_train_dataset_text_column: text +background_train_dataset_label_column: url +background_train_dataset_data_files: en/c4-train.00000-of-01024.json.gz +background_load_from_disk: false + +subsample_background_train_dataset: 1000 +subsample_train_dataset: 1000 +subsample_eval_dataset: -1 + +use_density_based_ue: false +use_ens_ue: false +use_seq_ue: false +use_tok_ue: false + +additional_estimators: + - module: lm_polygraph.estimators.monte_carlo_sequence_entropy + class_name: MonteCarloSequenceEntropy + kwargs: {} + - module: lm_polygraph.estimators.monte_carlo_normalized_sequence_entropy + class_name: MonteCarloNormalizedSequenceEntropy + kwargs: {} + - module: lm_polygraph.estimators.semantic_entropy + class_name: SemanticEntropy + kwargs: {} + + - module: lm_polygraph.estimators.max_probability + class_name: MaximumSequenceProbability + kwargs: {} + - module: lm_polygraph.estimators.max_probability + class_name: SampledMaximumSequenceProbability + kwargs: {} + - module: lm_polygraph.estimators.sentence_sar + class_name: SentenceSAR + kwargs: {} + - module: lm_polygraph.estimators.gsu + class_name: MaxprobGSU + kwargs: {} + + - module: lm_polygraph.estimators.token_sar + class_name: TokenSAR + kwargs: {} + - module: lm_polygraph.estimators.token_sar + class_name: SampledTokenSAR + kwargs: {} + - module: lm_polygraph.estimators.sar + class_name: SAR + kwargs: {} + - module: lm_polygraph.estimators.gsu + class_name: TokenSARGSU + kwargs: {} + + - module: lm_polygraph.estimators.perplexity + class_name: Perplexity + kwargs: {} + - module: lm_polygraph.estimators.perplexity + class_name: SampledPerplexity + kwargs: {} + - module: lm_polygraph.estimators.sentence_sar + class_name: PPLSAR + kwargs: {} + - module: lm_polygraph.estimators.gsu + class_name: PPLGSU + kwargs: {} + + - module: lm_polygraph.estimators.token_entropy + class_name: MeanTokenEntropy + kwargs: {} + - module: lm_polygraph.estimators.token_entropy + class_name: SampledMeanTokenEntropy + kwargs: {} + - module: lm_polygraph.estimators.sentence_sar + class_name: MTESAR + kwargs: {} + - module: lm_polygraph.estimators.gsu + class_name: MTEGSU + kwargs: {} + + - module: lm_polygraph.estimators.average_ue + class_name: AveMaxprob + kwargs: {} + - module: lm_polygraph.estimators.average_ue + class_name: AvePPL + kwargs: {} + - module: lm_polygraph.estimators.average_ue + class_name: AveTokenSAR + kwargs: {} + - module: lm_polygraph.estimators.average_ue + class_name: AveMTE + kwargs: {} + + - module: lm_polygraph.estimators.semantic_average_ue_average_similarity + class_name: SemanticAveMaxprobAveSimilarity + kwargs: {} + + - module: lm_polygraph.estimators.greedy_semantic_average_ue_average_similarity + class_name: GreedySemanticAveMaxprobAveSimilarity + kwargs: {} + +ignore_exceptions: false + +batch_size: 1 +deberta_batch_size: 1 + +seed: + - 1 diff --git a/examples/configs/polygraph_eval_xsum_sentsar.yaml b/examples/configs/polygraph_eval_xsum_sentsar.yaml new file mode 100644 index 000000000..c9dd41a86 --- /dev/null +++ b/examples/configs/polygraph_eval_xsum_sentsar.yaml @@ -0,0 +1,162 @@ +hydra: + run: + dir: ${cache_path}/xsum/${model.path}/${dataset}/${now:%Y-%m-%d}/${now:%H-%M-%S} + +defaults: + - model: bloomz-560m + - _self_ + +cache_path: ./workdir/output +save_path: '${hydra:run.dir}' + +device: cpu + +task: ats + +base_manager: null +overwrite_base_estimations: false + +dataset: xsum +text_column: document +label_column: summary +prompt: "Here's the text and it's short one-sentence summary.\n\nText:\n{text}\n\nSummary (one sentence):\n" +train_split: train +eval_split: test +max_new_tokens: 56 +load_from_disk: false +trust_remote_code: true +generation_params: + generate_until: + - "\n" +save_stats: + - greedy_tokens + - greedy_log_likelihoods + - greedy_tokens_alternatives + - greedy_sentence_similarity + - token_similarity + - entropy + - sample_tokens + - sample_tokens_alternatives + - sample_texts + - sample_log_probs + - sample_log_likelihoods + - sample_sentence_similarity + - sample_token_similarity + - sample_entropy + - first_sample_texts + - best_sample_texts + - best_sample_text_ids + - best_normalized_sample_texts + - best_normalized_sample_text_ids +entropy_top_k: 50 + +train_dataset: null +train_test_split: false +test_split_size: 1 + +background_train_dataset: allenai/c4 +background_train_dataset_text_column: text +background_train_dataset_label_column: url +background_train_dataset_data_files: en/c4-train.00000-of-01024.json.gz +background_load_from_disk: false + +subsample_background_train_dataset: 1000 +subsample_train_dataset: 1000 +subsample_eval_dataset: -1 + +use_density_based_ue: false +use_seq_ue: false +use_tok_ue: false +use_ens_ue: false + +additional_estimators: + - module: lm_polygraph.estimators.monte_carlo_sequence_entropy + class_name: MonteCarloSequenceEntropy + kwargs: {} + - module: lm_polygraph.estimators.monte_carlo_normalized_sequence_entropy + class_name: MonteCarloNormalizedSequenceEntropy + kwargs: {} + - module: lm_polygraph.estimators.semantic_entropy + class_name: SemanticEntropy + kwargs: {} + + - module: lm_polygraph.estimators.max_probability + class_name: MaximumSequenceProbability + kwargs: {} + - module: lm_polygraph.estimators.max_probability + class_name: SampledMaximumSequenceProbability + kwargs: {} + - module: lm_polygraph.estimators.sentence_sar + class_name: SentenceSAR + kwargs: {} + - module: lm_polygraph.estimators.gsu + class_name: MaxprobGSU + kwargs: {} + + - module: lm_polygraph.estimators.token_sar + class_name: TokenSAR + kwargs: {} + - module: lm_polygraph.estimators.token_sar + class_name: SampledTokenSAR + kwargs: {} + - module: lm_polygraph.estimators.sar + class_name: SAR + kwargs: {} + - module: lm_polygraph.estimators.gsu + class_name: TokenSARGSU + kwargs: {} + + - module: lm_polygraph.estimators.perplexity + class_name: Perplexity + kwargs: {} + - module: lm_polygraph.estimators.perplexity + class_name: SampledPerplexity + kwargs: {} + - module: lm_polygraph.estimators.sentence_sar + class_name: PPLSAR + kwargs: {} + - module: lm_polygraph.estimators.gsu + class_name: PPLGSU + kwargs: {} + + - module: lm_polygraph.estimators.token_entropy + class_name: MeanTokenEntropy + kwargs: {} + - module: lm_polygraph.estimators.token_entropy + class_name: SampledMeanTokenEntropy + kwargs: {} + - module: lm_polygraph.estimators.sentence_sar + class_name: MTESAR + kwargs: {} + - module: lm_polygraph.estimators.gsu + class_name: MTEGSU + kwargs: {} + + - module: lm_polygraph.estimators.average_ue + class_name: AveMaxprob + kwargs: {} + - module: lm_polygraph.estimators.average_ue + class_name: AvePPL + kwargs: {} + - module: lm_polygraph.estimators.average_ue + class_name: AveTokenSAR + kwargs: {} + - module: lm_polygraph.estimators.average_ue + class_name: AveMTE + kwargs: {} + + - module: lm_polygraph.estimators.semantic_average_ue_average_similarity + class_name: SemanticAveMaxprobAveSimilarity + kwargs: {} + + - module: lm_polygraph.estimators.greedy_semantic_average_ue_average_similarity + class_name: GreedySemanticAveMaxprobAveSimilarity + kwargs: {} + +ignore_exceptions: false + +batch_size: 1 +deberta_batch_size: 1 + +seed: + - 1 diff --git a/requirements.txt b/requirements.txt index 949524b00..39fc2a345 100644 --- a/requirements.txt +++ b/requirements.txt @@ -33,5 +33,5 @@ bert-score unbabel-comet==2.2.1 nltk>=3.7,<4 evaluate -spacy>=3.4.0,<4 +spacy>=3.4.0,<3.8 fastchat diff --git a/scripts/polygraph_eval b/scripts/polygraph_eval index 23f6db69e..7b6bde8f4 100755 --- a/scripts/polygraph_eval +++ b/scripts/polygraph_eval @@ -13,10 +13,12 @@ import logging log = logging.getLogger('lm_polygraph') +from evaluate import load from lm_polygraph.utils.manager import UEManager from lm_polygraph.utils.dataset import Dataset from lm_polygraph.utils.model import WhiteboxModel, BlackboxModel, create_ensemble from lm_polygraph.utils.processor import Logger +from lm_polygraph.generation_metrics.alignscore_utils import AlignScorer from lm_polygraph.generation_metrics import * from lm_polygraph.estimators import * from lm_polygraph.utils.openai_chat import OpenAIChat @@ -26,6 +28,8 @@ from lm_polygraph.estimators.ensemble_token_measures import * from lm_polygraph.ue_metrics import * from lm_polygraph.utils.common import load_external_module from lm_polygraph.utils.generation_parameters import GenerationParameters +from lm_polygraph.generation_metrics.x_metric_utils import MT5ForRegression +from transformers import AutoTokenizer, AutoModelForCausalLM hydra_config = Path(os.environ["HYDRA_CONFIG"]) @@ -183,37 +187,60 @@ def main(args): generation_metrics = get_generation_metrics(args) ue_metrics = get_ue_metrics(args) - - man = UEManager( - dataset, - model, - estimators, - generation_metrics, - ue_metrics, - [ - Logger(), - ], - deberta_batch_size=getattr(args, 'deberta_batch_size', 10), - train_data=train_dataset, - ignore_exceptions=args.ignore_exceptions, - background_train_data=background_train_dataset, - max_new_tokens=args.max_new_tokens, - ensemble_model=ensemble_model, - cache_path=args.cache_path, - language=getattr(args, 'language', 'en'), - ) + + if getattr(args, "base_manager", None) is None: + man = UEManager( + dataset, + model, + estimators, + generation_metrics, + ue_metrics, + [ + Logger(), + ], + batch_size=args.batch_size, + deberta_batch_size=getattr(args, 'deberta_batch_size', 10), + train_data=train_dataset, + ignore_exceptions=args.ignore_exceptions, + background_train_data=background_train_dataset, + max_new_tokens=args.max_new_tokens, + ensemble_model=ensemble_model, + cache_path=args.cache_path, + language=getattr(args, 'language', 'en'), + save_stats=getattr(args, 'save_stats', []), + entropy_top_k=getattr(args, 'entropy_top_k', None), + save_path=save_path + f"/ue_manager_seed{seed}", + ) + else: + man = UEManager.load( + args.base_manager, + data = dataset, + model = model, + estimators = estimators, + generation_metrics = generation_metrics, + ue_metrics = ue_metrics, + processors = [Logger()], + batch_size=args.batch_size, + deberta_batch_size=getattr(args, 'deberta_batch_size', 10), + train_data=train_dataset, + ignore_exceptions=args.ignore_exceptions, + background_train_data=background_train_dataset, + max_new_tokens=args.max_new_tokens, + ensemble_model=ensemble_model, + cache_path=args.cache_path, + language=getattr(args, 'language', 'en'), + save_stats=getattr(args, 'save_stats', []), + entropy_top_k=getattr(args, 'entropy_top_k', None), + ) man() - man.save(save_path + f"/ue_manager_seed{seed}") - - def get_ue_metrics(args): ue_metrics = [ - ReversedPairsProportion(), + #ReversedPairsProportion(), PredictionRejectionArea(), PredictionRejectionArea(max_rejection=0.5), - RiskCoverageCurveAUC(), + #RiskCoverageCurveAUC(), ] if getattr(args, "use_claim_ue", False): ue_metrics += [ @@ -265,10 +292,18 @@ def get_density_based_ue_methods(args, model_type): def get_ue_methods(args, model): + if getattr(args, "base_manager", None) is not None: + base_manager = UEManager.load(args.base_manager) + existing_estimators = list(base_manager.estimations.keys()) + else: + existing_estimators = [] + + overwrite = getattr(args, "overwrite_base_estimations", False) + estimators = [] if getattr(args.model, "type", "Whitebox") == "Blackbox": if getattr(args, "use_seq_ue", False): - estimators += [ + bb_estimators = [ LexicalSimilarity(metric="rouge1"), LexicalSimilarity(metric="rouge2"), LexicalSimilarity(metric="rougeL"), @@ -285,6 +320,10 @@ def get_ue_methods(args, model): Eccentricity(similarity_score="Jaccard_score"), ] + for estimator in bb_estimators: + if overwrite or ('sequence', str(estimator)) not in existing_estimators: + estimators.append(estimator) + if getattr(args, "use_ens_ue", False): raise NotImplementedError('Ensemble UE methods not applicable for blackbox models') @@ -295,7 +334,7 @@ def get_ue_methods(args, model): raise NotImplementedError('Claim UE methods not applicable for blackbox models') else: if getattr(args, "use_seq_ue", False): - estimators += [ + wb_estimators = [ MaximumSequenceProbability(), Perplexity(), MeanTokenEntropy(), @@ -328,6 +367,10 @@ def get_ue_methods(args, model): FisherRao(), ] + for estimator in wb_estimators: + if overwrite or ('sequence', str(estimator)) not in existing_estimators: + estimators.append(estimator) + if getattr(args, "use_ens_ue", False): # Ensemble-based UE methods have been disabled due to dependency on old # transformers code, which prevents bumping transformers version in @@ -348,7 +391,7 @@ def get_ue_methods(args, model): #estimators += (token_measures + sequence_measures) if getattr(args, "use_tok_ue", False): - estimators += [ + tok_estimators = [ MaximumTokenProbability(), TokenEntropy(), PointwiseMutualInformation(), @@ -356,8 +399,12 @@ def get_ue_methods(args, model): SemanticEntropyToken(model.model_path, args.cache_path), ] + for estimator in tok_estimators: + if overwrite or ('token', str(estimator)) not in existing_estimators: + estimators.append(estimator) + if getattr(args, "use_claim_ue", False): - estimators += [ + claim_estimators = [ MaximumClaimProbability(), PerplexityClaim(), MaxTokenEntropyClaim(), @@ -367,12 +414,19 @@ def get_ue_methods(args, model): ClaimConditionedProbabilityClaim(nli_context="fact_pref"), ] + for estimator in claim_estimators: + if overwrite or ('claim', str(estimator)) not in existing_estimators: + estimators.append(estimator) + additional_estimators = getattr(args, "additional_estimators", {}) for estimator_args in additional_estimators: module = importlib.import_module(estimator_args.module) estimator_class = getattr(module, estimator_args.class_name) - estimators.append(estimator_class(**estimator_args.kwargs)) + estimator = estimator_class(**estimator_args.kwargs) + # Additional estimator filtering only works correctly for sequence-level estimators + if overwrite or ('sequence', str(estimator)) not in existing_estimators: + estimators.append(estimator) return estimators @@ -381,28 +435,104 @@ def get_generation_metrics(args): log.info("="*100) log.info("Initializing generation metrics...") + if getattr(args, "base_manager", None) is not None: + base_manager = UEManager.load(args.base_manager) + existing_metrics = list(base_manager.gen_metrics.keys()) + else: + existing_metrics = [] + generation_metrics = getattr(args, "generation_metrics", None) if not generation_metrics: + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + batch_size = 16 + ckpt_path="https://huggingface.co/yzha/AlignScore/resolve/main/AlignScore-large.ckpt" + align_scorer = AlignScorer( + model="roberta-large", + batch_size=batch_size, + device=device, + ckpt_path=ckpt_path, + evaluation_mode="nli_sp", + ) + api_key =getattr(args, "openai_api_key", '') result = [ - RougeMetric("rouge1"), - RougeMetric("rouge2"), RougeMetric("rougeL"), BLEUMetric(), - BertScoreMetric('rh'), - SbertMetric(), AccuracyMetric( target_ignore_regex = getattr(args, "target_ignore_regex", None), output_ignore_regex = getattr(args, "output_ignore_regex", None), normalize = getattr(args, "normalize", False), ), - AlignScore(target_is_claims=False if args.task == "ats" else True), + AlignScore(align_scorer), + AlignScore(align_scorer, target_is_claims=False), + AlignScore(align_scorer, ignore_target=True), + # Sample-based metrics + RougeMetric("rougeL", sample=True), + BLEUMetric(sample=True), + AccuracyMetric( + target_ignore_regex = getattr(args, "target_ignore_regex", None), + output_ignore_regex = getattr(args, "output_ignore_regex", None), + normalize = getattr(args, "normalize", False), + sample=True, + ), + AlignScore(align_scorer, sample=True), + AlignScore(align_scorer, target_is_claims=False, sample=True), + AlignScore(align_scorer, ignore_target=True, sample=True), + # Best sample-based metrics + RougeMetric("rougeL", sample=True, sample_strategy="Best"), + BLEUMetric(sample=True, sample_strategy="Best"), + AccuracyMetric( + target_ignore_regex = getattr(args, "target_ignore_regex", None), + output_ignore_regex = getattr(args, "output_ignore_regex", None), + normalize = getattr(args, "normalize", False), + sample=True, + sample_strategy="Best", + ), + AlignScore(align_scorer, sample=True, sample_strategy="Best"), + AlignScore(align_scorer, target_is_claims=False, sample=True, sample_strategy="Best"), + AlignScore(align_scorer, ignore_target=True, sample=True, sample_strategy="Best"), + # Best normalized sample-based metrics + RougeMetric("rougeL", sample=True, sample_strategy="BestNormalized"), + BLEUMetric(sample=True, sample_strategy="BestNormalized"), + AccuracyMetric( + target_ignore_regex = getattr(args, "target_ignore_regex", None), + output_ignore_regex = getattr(args, "output_ignore_regex", None), + normalize = getattr(args, "normalize", False), + sample=True, + sample_strategy="BestNormalized", + ), + AlignScore(align_scorer, sample=True, sample_strategy="BestNormalized"), + AlignScore(align_scorer, target_is_claims=False, sample=True, sample_strategy="BestNormalized"), + AlignScore(align_scorer, ignore_target=True, sample=True, sample_strategy="BestNormalized"), + GptAccuracyMetric( api_key=api_key), + GptAccuracyMetric( api_key=api_key,sample=True, sample_strategy="Best"), + GptAccuracyMetric( api_key=api_key, sample=True, sample_strategy="First"), ] + if getattr(args.model, "type", "Whitebox") != "Blackbox": if getattr(args, "use_claim_ue", False): result += [OpenAIFactCheck(cache_path=args.cache_path, language=getattr(args, "language", "en"))] if args.task == "nmt": ignore_regex = getattr(args, "source_ignore_regex", None) - result += [Comet(source_ignore_regex = ignore_regex)] + comet_scorer = load("comet") + result += [Comet(comet_scorer, source_ignore_regex = ignore_regex), + Comet(comet_scorer, source_ignore_regex = ignore_regex, sample=True), + Comet(comet_scorer, source_ignore_regex = ignore_regex, sample=True, sample_strategy="Best"), + Comet(comet_scorer, source_ignore_regex = ignore_regex, sample=True, sample_strategy="BestNormalized")] + model_name_or_path="google/metricx-24-hybrid-large-v2p6" + tokenizer_name="google/mt5-large" + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + model_xmetric = MT5ForRegression.from_pretrained(model_name_or_path) + model_xmetric.to(device) + model_xmetric.eval() + + tokenizer_xmetric = AutoTokenizer.from_pretrained( + tokenizer_name if tokenizer_name else model_name_or_path + ) + + result += [XMetric(model=model_xmetric, tokenizer=tokenizer_xmetric, source_ignore_regex = ignore_regex), + XMetric(model=model_xmetric, tokenizer=tokenizer_xmetric, source_ignore_regex = ignore_regex, sample=True), + XMetric(model=model_xmetric, tokenizer=tokenizer_xmetric, source_ignore_regex = ignore_regex, sample=True, sample_strategy="Best"), + XMetric(model=model_xmetric, tokenizer=tokenizer_xmetric, source_ignore_regex = ignore_regex, sample=True, sample_strategy="BestNormalized")] else: result = [] for metric in generation_metrics: @@ -411,6 +541,15 @@ def get_generation_metrics(args): metric_args = metric.get("args", []) result.append(metric_class(*metric_args)) + # Filter out metrics that are already present in the base manager + filtered_result = [] + for metric in result: + if (metric.level, str(metric)) in existing_metrics: + log.warning(f"Skipping metric {metric} as it is already present in the base manager.") + else: + filtered_result.append(metric) + result = filtered_result + process_output_fn = getattr(args, "process_output_fn", None) process_target_fn = getattr(args, "process_target_fn", None) if process_target_fn or process_output_fn: @@ -433,7 +572,7 @@ def get_generation_metrics(args): if getattr(args, "multiref", False): # Wrap each metric in AggregatedMetric - result = [AggregatedMetric(base_metric=metric) for metric in result] + result = [AggregatedMetric(base_metric=metric) if type(metric) != GptAccuracyMetric else metric for metric in result] log.info("Done with initializing generation metrics.") diff --git a/src/lm_polygraph/estimators/__init__.py b/src/lm_polygraph/estimators/__init__.py index c9c16afa2..26c3d0739 100644 --- a/src/lm_polygraph/estimators/__init__.py +++ b/src/lm_polygraph/estimators/__init__.py @@ -7,10 +7,11 @@ from .claim.pointwise_mutual_information import PointwiseMutualInformationClaim from .max_probability import ( MaximumSequenceProbability, + SampledMaximumSequenceProbability, MaximumTokenProbability, ) from .claim_conditioned_probability import ClaimConditionedProbability -from .token_entropy import MeanTokenEntropy, TokenEntropy +from .token_entropy import MeanTokenEntropy, TokenEntropy, SampledMeanTokenEntropy from .pointwise_mutual_information import ( MeanPointwiseMutualInformation, PointwiseMutualInformation, @@ -24,13 +25,15 @@ from .monte_carlo_sequence_entropy import MonteCarloSequenceEntropy from .monte_carlo_normalized_sequence_entropy import MonteCarloNormalizedSequenceEntropy from .lexical_similarity import LexicalSimilarity -from .deg_mat import DegMat +from .deg_mat import DegMat, CEDegMat from .eccentricity import Eccentricity from .eig_val_laplacian import EigValLaplacian from .num_sem_sets import NumSemSets from .semantic_entropy import SemanticEntropy from .semantic_entropy_token import SemanticEntropyToken -from .perplexity import Perplexity +from .perplexity import ( + Perplexity, SampledPerplexity +) from .mahalanobis_distance import MahalanobisDistanceSeq from .relative_mahalanobis_distance import RelativeMahalanobisDistanceSeq from .rde import RDESeq @@ -61,9 +64,17 @@ PESrmi, PESrmiabs, ) -from .token_sar import TokenSAR -from .sentence_sar import SentenceSAR +from .token_sar import TokenSAR, SampledTokenSAR +from .sentence_sar import ( + SentenceSAR, +# OtherSentenceSAR, +# ReweightedSentenceSAR, + PPLSAR, + MTESAR, + #DistilOneSentenceSAR, +) from .sar import SAR +from .gsu import MaxprobGSU, PPLGSU, MTEGSU, TokenSARGSU from .renyi_neg import RenyiNeg from .fisher_rao import FisherRao from .verbalized_1s import Verbalized1S @@ -71,3 +82,33 @@ from .linguistic_1s import Linguistic1S from .label_prob import LabelProb from .p_true_empirical import PTrueEmpirical +from .average_ue import AveMaxprob, AvePPL, AveTokenSAR, AveMTE +from .semantic_average_ue import SemanticAveMaxprob, SemanticAvePPL, SemanticAveTokenSAR, SemanticAveMTE +from .semantic_average_ue_average_similarity import ( + SemanticEnrichedMaxprobAveDissimilarity, + SemanticEnrichedPPLAveDissimilarity, + SemanticEnrichedMTEAveDissimilarity, + SemanticEnrichedMaxprobTotalDissimilarity, + SemanticEnrichedPPLTotalDissimilarity, + SemanticEnrichedMTETotalDissimilarity, + AveDissimilarity +) +from .greedy_semantic_average_ue_average_similarity import ( + GreedySemanticEnrichedMaxprobAveDissimilarity, + GreedySemanticEnrichedPPLAveDissimilarity, + GreedySemanticEnrichedMTEAveDissimilarity, + GreedySemanticEnrichedMaxprobTotalDissimilarity, + GreedySemanticEnrichedPPLTotalDissimilarity, + GreedySemanticEnrichedMTETotalDissimilarity, + GreedyAveDissimilarity +) +from .semantic_median_ue import SemanticMedianMaxprob, SemanticMedianPPL, SemanticMedianTokenSAR, SemanticMedianMTE + +from .sum_semantic_entropies import SumSemanticMaxprob, SumSemanticPPL, SumSemanticMTE, GreedySumSemanticMaxprob, GreedySumSemanticPPL, GreedySumSemanticMTE +from .adj_sum_semantic_entropies import AdjustedSumSemanticMaxprob, AdjustedSumSemanticPPL, AdjustedSumSemanticMTE, GreedyAdjustedSumSemanticMaxprob, GreedyAdjustedSumSemanticPPL, GreedyAdjustedSumSemanticMTE + +from .prob_cocoa import ProbCocoaMaxprob, ProbCocoaPPL, GreedyProbCocoaMaxprob, GreedyProbCocoaPPL + +from .supervised_sum_semantic_entropies import SupSumSemanticMaxprob, SupSumSemanticPPL, SupSumSemanticMTE, GreedySupSumSemanticMaxprob, GreedySupSumSemanticPPL, GreedySupSumSemanticMTE + +from .semantic_density import SemanticDensity, GreedySemanticDensity diff --git a/src/lm_polygraph/estimators/adj_sum_semantic_entropies.py b/src/lm_polygraph/estimators/adj_sum_semantic_entropies.py new file mode 100644 index 000000000..1d780ca1c --- /dev/null +++ b/src/lm_polygraph/estimators/adj_sum_semantic_entropies.py @@ -0,0 +1,213 @@ +import numpy as np + +from typing import Dict +from copy import deepcopy + +from .estimator import Estimator +from .common import sample_strategy_to_prefix, best_sample_ids, SAMPLE_SELECTION_STAT_KEYS + + +class AdjustedSumSemanticMaxprob(Estimator): + def __init__( + self, + verbose: bool = False, + sample_strategy: str = "first" + ): + super().__init__( + ["sample_sentence_similarity", "sample_log_probs"] + SAMPLE_SELECTION_STAT_KEYS, + "sequence" + ) + self.verbose = verbose + self.sample_strategy = sample_strategy + + def __str__(self): + base = "AdjustedSumSemanticMaxprob" + return sample_strategy_to_prefix(self.sample_strategy) + base + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_log_probs = stats["sample_log_probs"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + sample_ids = best_sample_ids(self.sample_strategy, stats) + + enriched_metrics = [] # To store enriched metrics for each sample + + for best_id, sample_log_probs, sample_sentence_similarity in zip( + sample_ids, batch_sample_log_probs, batch_sample_sentence_similarity + ): + sim = 1 - sample_sentence_similarity[best_id, :] + sim[best_id] = 1 + avg_similarity = np.mean(sim) + mp = -np.sum(sample_log_probs[best_id]) + res = mp + avg_similarity * mp + enriched_metrics.append(res) + + return np.array(enriched_metrics) + + +class AdjustedSumSemanticPPL(Estimator): + def __init__( + self, + verbose: bool = False, + sample_strategy: str = "first" + ): + super().__init__( + ["sample_sentence_similarity", "sample_log_likelihoods"] + SAMPLE_SELECTION_STAT_KEYS, + "sequence" + ) + self.verbose = verbose + self.sample_strategy = sample_strategy + + def __str__(self): + base = "AdjustedSumSemanticPPL" + return sample_strategy_to_prefix(self.sample_strategy) + base + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_log_likelihoods = stats["sample_log_likelihoods"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + sample_ids = best_sample_ids(self.sample_strategy, stats) + + enriched_ppl = [] # To store enriched PPL for each sample + + for best_id, sample_log_likelihoods, sample_sentence_similarity in zip( + sample_ids, batch_sample_log_likelihoods, batch_sample_sentence_similarity + ): + sim = 1 - sample_sentence_similarity[best_id, :] + sim[best_id] = 1 + avg_similarity = np.mean(sim) + ppl = -np.mean(sample_log_likelihoods[best_id]) + res = ppl + avg_similarity * ppl + enriched_ppl.append(res) + + return np.array(enriched_ppl) + + +class AdjustedSumSemanticMTE(Estimator): + def __init__( + self, + verbose: bool = False, + sample_strategy: str = "first" + ): + super().__init__( + ["sample_sentence_similarity", "sample_entropy"] + SAMPLE_SELECTION_STAT_KEYS, + "sequence" + ) + self.verbose = verbose + self.sample_strategy = sample_strategy + + def __str__(self): + base = "AdjustedSumSemanticMTE" + return sample_strategy_to_prefix(self.sample_strategy) + base + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_entropies = stats["sample_entropy"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + sample_ids = best_sample_ids(self.sample_strategy, stats) + + enriched_mte = [] + + for best_id, sample_entropies, sample_sentence_similarity in zip( + sample_ids, batch_entropies, batch_sample_sentence_similarity + ): + sim = 1 - sample_sentence_similarity[best_id, :] + sim[best_id] = 1 + avg_similarity = np.mean(sim) + mte = sample_entropies[best_id] + res = mte + avg_similarity * mte + enriched_mte.append(res) + + return np.array(enriched_mte) + + +class GreedyAdjustedSumSemanticMaxprob(Estimator): + def __init__( + self, + verbose: bool = False, + ): + super().__init__(["greedy_sentence_similarity", "greedy_log_likelihoods"], "sequence") + self.verbose = verbose + + def __str__(self): + return "GreedyAdjustedSumSemanticMaxprob" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_greedy_sentence_similarity = stats["greedy_sentence_similarity"] + batch_lls = np.array([np.sum(log_likelihood) for log_likelihood in stats["greedy_log_likelihoods"]]) + + enriched_metrics = [] # To store enriched metrics for each sample + for greedy_ll, greedy_sentence_similarity in zip( + batch_lls, batch_greedy_sentence_similarity + ): + # Compute probabilities (negative log-probs) + prob = -greedy_ll + + # Compute row-wise average similarity, excluding self-similarity + # Diagonal contains self-similarities + avg_similarity = 1 - np.mean(greedy_sentence_similarity) + + enriched_metrics.append(prob + avg_similarity * prob) + + return np.array(enriched_metrics) + + +class GreedyAdjustedSumSemanticPPL(Estimator): + def __init__( + self, + verbose: bool = False, + ): + super().__init__(["greedy_sentence_similarity", "greedy_log_likelihoods"], "sequence") + self.verbose = verbose + + def __str__(self): + return "GreedyAdjustedSumSemanticPPL" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_greedy_log_likelihoods = stats["greedy_log_likelihoods"] + batch_greedy_sentence_similarity = stats["greedy_sentence_similarity"] + + enriched_ppl = [] # To store enriched PPL for each sample + + for greedy_log_likelihoods, greedy_sentence_similarity in zip( + batch_greedy_log_likelihoods, batch_greedy_sentence_similarity + ): + # get PPL for each sample + ppl = -np.mean(greedy_log_likelihoods) + + # Compute row-wise average similarity, excluding self-similarity + avg_similarity = 1 - np.mean(greedy_sentence_similarity) + + enriched_ppl.append(ppl + avg_similarity * ppl) + + + return np.array(enriched_ppl) + + +class GreedyAdjustedSumSemanticMTE(Estimator): + def __init__( + self, + verbose: bool = False, + ): + super().__init__(["greedy_sentence_similarity", "entropy"], "sequence") + self.verbose = verbose + + def __str__(self): + return "GreedyAdjustedSumSemanticMTE" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_greedy_entropies = stats["greedy_log_likelihoods"] + batch_greedy_sentence_similarity = stats["greedy_sentence_similarity"] + + enriched_mte = [] # To store enriched PPL for each sample + + for greedy_entropies, greedy_sentence_similarity in zip( + batch_greedy_entropies, batch_greedy_sentence_similarity + ): + # get PPL for each sample + mte = np.mean(greedy_entropies) + + # Compute row-wise average similarity, excluding self-similarity + avg_similarity = 1 - np.mean(greedy_sentence_similarity) + + enriched_mte.append(mte + avg_similarity * mte) + + + return np.array(enriched_mte) diff --git a/src/lm_polygraph/estimators/average_ue.py b/src/lm_polygraph/estimators/average_ue.py new file mode 100644 index 000000000..4ecf9e541 --- /dev/null +++ b/src/lm_polygraph/estimators/average_ue.py @@ -0,0 +1,128 @@ +import numpy as np + +from typing import Dict +from copy import deepcopy + +from .estimator import Estimator + +class AveMaxprob(Estimator): + def __init__( + self, + verbose: bool = False, + ): + super().__init__(["sample_sentence_similarity", "sample_log_probs"], "sequence") + self.verbose = verbose + + def __str__(self): + return "AveMaxprob" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_log_probs = stats["sample_log_probs"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + + ave = [] + for sample_log_probs, sample_sentence_similarity in zip( + batch_sample_log_probs, batch_sample_sentence_similarity + ): + sample_probs = -np.array(sample_log_probs) + + ave.append(sample_probs.mean()) + + return np.array(ave) + +class AvePPL(Estimator): + def __init__( + self, + verbose: bool = False, + ): + super().__init__(["sample_sentence_similarity", "sample_log_likelihoods"], "sequence") + self.verbose = verbose + + def __str__(self): + return "AvePPL" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_log_likelihoods = stats["sample_log_likelihoods"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + + ave = [] + for sample_log_likelihoods, sample_sentence_similarity in zip( + batch_sample_log_likelihoods, batch_sample_sentence_similarity + ): + ppl = -np.array([np.mean(token_ll) for token_ll in sample_log_likelihoods]) + + ave.append(ppl.mean()) + + return np.array(ave) + +class AveTokenSAR(Estimator): + def __init__( + self, + verbose: bool = False, + ): + super().__init__( + [ + "sample_sentence_similarity", + "sample_log_likelihoods", + "sample_token_similarity", + ], + "sequence", + ) + self.verbose = verbose + + def __str__(self): + return "AveTokenSAR" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_log_likelihoods = stats["sample_log_likelihoods"] + batch_sample_token_similarity = stats["sample_token_similarity"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + + ave = [] + for i, batch_data in enumerate(zip( + batch_sample_log_likelihoods, + batch_sample_token_similarity, + batch_sample_sentence_similarity, + )): + sample_log_likelihoods = batch_data[0] + sample_token_similarity = batch_data[1] + sample_sentence_similarity = batch_data[2] + + tokenSAR = [] + for log_likelihoods, token_similarity in zip( + sample_log_likelihoods, sample_token_similarity + ): + log_likelihoods = np.array(log_likelihoods) + R_t = 1 - token_similarity + if R_t.sum() == 0: + R_t_norm = np.zeros_like(R_t) + else: + R_t_norm = R_t / R_t.sum() + E_t = -log_likelihoods * R_t_norm + tokenSAR.append(E_t.sum()) + ave.append(np.mean(tokenSAR)) + + return np.array(ave) + +class AveMTE(Estimator): + def __init__( + self, + verbose: bool = False, + ): + super().__init__(["sample_sentence_similarity", "sample_entropy"], "sequence") + self.verbose = verbose + + def __str__(self): + return "AveMTE" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_entropy = stats["sample_entropy"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + + ave = [] + for sample_entropy, sample_sentence_similarity in zip( + batch_sample_entropy, batch_sample_sentence_similarity + ): + ave.append(np.mean(sample_entropy)) + + return np.array(ave) diff --git a/src/lm_polygraph/estimators/claim_conditioned_probability.py b/src/lm_polygraph/estimators/claim_conditioned_probability.py index 5c7b63add..7e2a86d1e 100644 --- a/src/lm_polygraph/estimators/claim_conditioned_probability.py +++ b/src/lm_polygraph/estimators/claim_conditioned_probability.py @@ -20,7 +20,22 @@ def __str__(self): return "CCP" def _reduce(self, logprobs: list[float]): - return np.exp(np.sum(logprobs)) + return np.sum(logprobs) + + def _combine_nli(self, forward: str, backward: str): + """ + Combines two NLI predictions NLI(x, y) and NLI(y, x) into a single prediction. + + Prioritizes "entail" or "contra" if present, otherwise returns "neutral". + """ + if forward == backward: + return forward + if all(x in [forward, backward] for x in ["entail", "contra"]): + return "neutral" + for x in ["entail", "contra"]: + if x in [forward, backward]: + return x + return "neutral" def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: words = stats["greedy_tokens"] @@ -42,10 +57,14 @@ def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: contra_logprobs, contra_words = [], [] for i in range(len(word_alternatives)): word_alt, logprob = word_alternatives[i] - if i == 0 or word_alternatives_nli[0][i] == "entail": + nli_outcome = self._combine_nli( + word_alternatives_nli[0][i], + word_alternatives_nli[i][0], + ) + if i == 0 or nli_outcome == "entail": entail_logprobs.append(logprob) entail_words.append(word_alt) - elif word_alternatives_nli[0][i] == "contra": + elif nli_outcome == "contra": contra_logprobs.append(logprob) contra_words.append(word_alt) entail_logprob = np.logaddexp.reduce(entail_logprobs) diff --git a/src/lm_polygraph/estimators/common.py b/src/lm_polygraph/estimators/common.py index 0a10c414c..7942e2c41 100644 --- a/src/lm_polygraph/estimators/common.py +++ b/src/lm_polygraph/estimators/common.py @@ -1,5 +1,6 @@ import numpy as np +SAMPLE_SELECTION_STAT_KEYS = ["best_sample_text_ids", "best_normalized_sample_text_ids"] def _get_pairs(lst): pairs = [] @@ -29,3 +30,24 @@ def _compute_Jaccard_score(lst): def compute_sim_score(answers, affinity, similarity_score): return _compute_Jaccard_score(answers) + + +def sample_strategy_to_prefix(sample_strategy): + if sample_strategy == "first": + return "" + elif sample_strategy in ["best", "best_normalized"]: + return "".join(list(map(lambda x: x.capitalize(), sample_strategy.split("_")))) + else: + raise ValueError(f"Unknown sample strategy: {sample_strategy}") + + +def best_sample_ids(sample_strategy, stats): + batch_size = len(stats["sample_log_probs"]) + if sample_strategy == "first": + return [0] * batch_size + elif sample_strategy == "best": + return stats["best_sample_text_ids"] + elif sample_strategy == "best_normalized": + return stats["best_normalized_sample_text_ids"] + else: + raise ValueError(f"Unknown sample strategy: {sample_strategy}") diff --git a/src/lm_polygraph/estimators/deg_mat.py b/src/lm_polygraph/estimators/deg_mat.py index 634884c63..f373a09d3 100644 --- a/src/lm_polygraph/estimators/deg_mat.py +++ b/src/lm_polygraph/estimators/deg_mat.py @@ -88,3 +88,48 @@ def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: print(f"generated answers: {answers}") res.append(self.U_DegMat(i, stats)) return np.array(res) + + +class CEDegMat(Estimator): + """ + Estimates the sequence-level uncertainty of a language model following the method of + "The Degree Matrix" as provided in the paper https://arxiv.org/abs/2305.19187. + Works with both whitebox and blackbox models (initialized using + lm_polygraph.utils.model.BlackboxModel/WhiteboxModel). + + Elements on diagonal of matrix D are sums of similarities between the particular number + (position in matrix) and other answers. Thus, it is an average pairwise distance + (lower values indicated smaller distance between answers which means greater uncertainty). + """ + + def __init__( + self, + verbose: bool = False, + ): + super().__init__(["sample_sentence_similarity", "sample_texts"], "sequence") + self.verbose = verbose + + def __str__(self): + return "CEDegMat" + + def U_DegMat(self, W, answers): + # The Degree Matrix + D = np.diag(W.sum(axis=1)) + return np.trace(len(answers) - D) / (len(answers) ** 2) + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + """ + Estimates the uncertainties for each sample in the input statistics. + + Parameters: + stats (Dict[str, np.ndarray]): input statistics, which for multiple samples includes: + * generated samples in 'sample_texts', + * matrix with semantic similarities in 'semantic_matrix_entail'/'semantic_matrix_contra' + Returns: + np.ndarray: float uncertainty for each sample in input statistics. + Higher values indicate more uncertain samples. + """ + res = [] + for W, answers in zip(stats["sample_sentence_similarity"], stats["sample_texts"]): + res.append(self.U_DegMat(W, answers)) + return np.array(res) diff --git a/src/lm_polygraph/estimators/greedy_semantic_average_ue_average_similarity.py b/src/lm_polygraph/estimators/greedy_semantic_average_ue_average_similarity.py new file mode 100644 index 000000000..18dbd8ddb --- /dev/null +++ b/src/lm_polygraph/estimators/greedy_semantic_average_ue_average_similarity.py @@ -0,0 +1,250 @@ +import numpy as np + +from typing import Dict +from copy import deepcopy + +from .estimator import Estimator +from .common import sample_strategy_to_prefix, best_sample_ids + + +class GreedySemanticEnrichedMaxprobAveDissimilarity(Estimator): + def __init__( + self, + verbose: bool = False, + exp: bool = False, + ): + super().__init__(["greedy_sentence_similarity", "greedy_log_likelihoods"], "sequence") + self.verbose = verbose + self.exp = exp + + def __str__(self): + if self.exp: + return "GreedySemanticEnrichedMaxprobAveDissimilarityexp" + else: + return "GreedySemanticEnrichedMaxprobAveDissimilarity" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_greedy_sentence_similarity = stats["greedy_sentence_similarity"] + batch_lls = np.array([np.sum(log_likelihood) for log_likelihood in stats["greedy_log_likelihoods"]]) + + enriched_metrics = [] # To store enriched metrics for each sample + for greedy_ll, greedy_sentence_similarity in zip( + batch_lls, batch_greedy_sentence_similarity + ): + # Compute probabilities (negative log-probs) + prob = -greedy_ll + if self.exp: + prob = -np.exp(-prob) + + # Compute row-wise average similarity, excluding self-similarity + # Diagonal contains self-similarities + avg_dissimilarity = np.mean(1 - greedy_sentence_similarity) + + enriched_metric = prob * avg_dissimilarity + enriched_metrics.append(enriched_metric) + + return np.array(enriched_metrics) + + +class GreedySemanticEnrichedMaxprobTotalDissimilarity(Estimator): + def __init__( + self, + verbose: bool = False, + exp: bool = False, + ): + super().__init__(["sample_sentence_similarity", "greedy_log_likelihoods"], "sequence") + self.verbose = verbose + self.exp = exp + + def __str__(self): + if self.exp: + return "GreedySemanticEnrichedMaxprobTotalDissimilarityexp" + else: + return "GreedySemanticEnrichedMaxprobTotalDissimilarity" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + batch_lls = np.array([np.sum(log_likelihood) for log_likelihood in stats["greedy_log_likelihoods"]]) + + enriched_metrics = [] # To store enriched metrics for each sample + for greedy_ll, sample_sentence_similarity in zip( + batch_lls, batch_sample_sentence_similarity + ): + # Compute probabilities (negative log-probs) + prob = -greedy_ll + if self.exp: + prob = -np.exp(-prob) + + # Compute row-wise average similarity, excluding self-similarity + # Diagonal contains self-similarities + avg_dissimilarity = np.mean(1 - np.array(sample_sentence_similarity)) + + enriched_metric = prob * avg_dissimilarity + enriched_metrics.append(enriched_metric) + + return np.array(enriched_metrics) + + +class GreedySemanticEnrichedPPLAveDissimilarity(Estimator): + def __init__( + self, + verbose: bool = False, + exp: bool = False, + ): + super().__init__(["greedy_sentence_similarity", "greedy_log_likelihoods"], "sequence") + self.verbose = verbose + self.exp = exp + + def __str__(self): + if self.exp: + return "GreedySemanticEnrichedPPLAveDissimilarityexp" + else: + return "GreedySemanticEnrichedPPLAveDissimilarity" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_greedy_log_likelihoods = stats["greedy_log_likelihoods"] + batch_greedy_sentence_similarity = stats["greedy_sentence_similarity"] + + enriched_ppl = [] # To store enriched PPL for each sample + + for greedy_log_likelihoods, greedy_sentence_similarity in zip( + batch_greedy_log_likelihoods, batch_greedy_sentence_similarity + ): + # get PPL for each sample + ppl = -np.mean(greedy_log_likelihoods) + if self.exp: + ppl = -np.exp(-ppl) + + # Compute row-wise average similarity, excluding self-similarity + avg_dissimilarity = np.mean(1 - greedy_sentence_similarity) + + enriched_value = ppl * avg_dissimilarity + enriched_ppl.append(enriched_value) + + return np.array(enriched_ppl) + + +class GreedySemanticEnrichedPPLTotalDissimilarity(Estimator): + def __init__( + self, + verbose: bool = False, + exp: bool = False, + ): + super().__init__(["sample_sentence_similarity", "greedy_log_likelihoods"], "sequence") + self.verbose = verbose + self.exp = exp + + def __str__(self): + if self.exp: + return "GreedySemanticEnrichedPPLTotalDissimilarityexp" + else: + return "GreedySemanticEnrichedPPLTotalDissimilarity" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + batch_greedy_log_likelihoods = stats["greedy_log_likelihoods"] + + enriched_ppl = [] # To store enriched PPL for each sample + + for greedy_log_likelihoods, sample_sentence_similarity in zip( + batch_greedy_log_likelihoods, batch_sample_sentence_similarity + ): + # get PPL for each sample + ppl = -np.mean(greedy_log_likelihoods) + if self.exp: + ppl = -np.exp(-ppl) + + # Compute row-wise average similarity, excluding self-similarity + avg_dissimilarity = np.mean(1 - np.array(sample_sentence_similarity)) + + enriched_value = ppl * avg_dissimilarity + enriched_ppl.append(enriched_value) + + return np.array(enriched_ppl) + + +class GreedySemanticEnrichedMTEAveDissimilarity(Estimator): + def __init__( + self, + verbose: bool = False, + ): + super().__init__(["greedy_sentence_similarity", "entropy"], "sequence") + self.verbose = verbose + + def __str__(self): + return "GreedySemanticEnrichedMTEAveDissimilarity" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_greedy_entropy = stats["entropy"] + batch_greedy_sentence_similarity = stats["greedy_sentence_similarity"] + + enriched_entropy = [] + + for greedy_entropy, greedy_sentence_similarity in zip( + batch_greedy_entropy, batch_greedy_sentence_similarity + ): + # Compute row-wise average similarity, excluding self-similarity + avg_dissimilarity = np.mean(1 - greedy_sentence_similarity) + + entropy = np.mean(greedy_entropy) + enriched_value = entropy * avg_dissimilarity + enriched_entropy.append(enriched_value) + + return np.array(enriched_entropy) + + +class GreedySemanticEnrichedMTETotalDissimilarity(Estimator): + def __init__( + self, + verbose: bool = False, + ): + super().__init__(["sample_sentence_similarity", "entropy"], "sequence") + self.verbose = verbose + + def __str__(self): + return "GreedySemanticEnrichedMTETotalDissimilarity" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + batch_entropy = stats["entropy"] + + enriched_entropy = [] + + for entropy, sample_sentence_similarity in zip( + batch_entropy, batch_sample_sentence_similarity + ): + # Compute row-wise average similarity, excluding self-similarity + avg_dissimilarity = np.mean(1 - np.array(sample_sentence_similarity)) + + entropy = np.mean(entropy) + enriched_value = entropy * avg_dissimilarity + enriched_entropy.append(enriched_value) + + return np.array(enriched_entropy) + + +class GreedyAveDissimilarity(Estimator): + def __init__( + self, + verbose: bool = False, + ): + super().__init__(["greedy_sentence_similarity"], "sequence") + self.verbose = verbose + + def __str__(self): + return "GreedyAveDissimilarity" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_greedy_entropy = stats["entropy"] + batch_greedy_sentence_similarity = stats["greedy_sentence_similarity"] + + res = [] + + for greedy_entropy, greedy_sentence_similarity in zip( + batch_greedy_entropy, batch_greedy_sentence_similarity + ): + # Compute row-wise average similarity, excluding self-similarity + avg_dissimilarity = np.mean(1 - greedy_sentence_similarity) + res.append(avg_dissimilarity) + + return np.array(res) diff --git a/src/lm_polygraph/estimators/gsu.py b/src/lm_polygraph/estimators/gsu.py new file mode 100644 index 000000000..bdb8e5de4 --- /dev/null +++ b/src/lm_polygraph/estimators/gsu.py @@ -0,0 +1,235 @@ +import numpy as np + +from typing import Dict +from copy import deepcopy + +from .estimator import Estimator +from lm_polygraph.estimators.claim_conditioned_probability import ClaimConditionedProbability + + +class MaxprobGSU(Estimator): + def __init__( + self, + verbose: bool = False, + exp: bool = False + ): + super().__init__(["sample_sentence_similarity", "sample_log_probs"], "sequence") + self.verbose = verbose + self.exp = exp + + def __str__(self): + if self.exp: + return "MaxprobGSUexp" + else: + return "MaxprobGSU" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + """ + Estimates the sentenceSAR for each sample in the input statistics. + + Parameters: + stats (Dict[str, np.ndarray]): input statistics, which for multiple samples includes: + * corresponding log probabilities in 'sample_log_probs', + * matrix with cross-encoder similarities in 'sample_sentence_similarity' + Returns: + np.ndarray: float sentenceSAR for each sample in input statistics. + Higher values indicate more uncertain samples. + """ + batch_sample_log_probs = stats["sample_log_probs"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + + GSU = [] + for sample_log_probs, sample_sentence_similarity in zip( + batch_sample_log_probs, batch_sample_sentence_similarity + ): + sample_probs = -np.array(sample_log_probs) + if self.exp: + sample_probs = -np.exp(-sample_probs) + R_s = ( + sample_probs + * sample_sentence_similarity + ) + E_s = R_s.sum(-1) + + E_s = E_s / sample_sentence_similarity.sum(-1) + + GSU.append(E_s.mean()) + + return np.array(GSU) + + +class PPLGSU(Estimator): + def __init__( + self, + verbose: bool = False, + exp: bool = False + ): + super().__init__(["sample_sentence_similarity", "sample_log_likelihoods"], "sequence") + self.verbose = verbose + self.exp = exp + + def __str__(self): + if self.exp: + return "PPLGSUexp" + else: + return "PPLGSU" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + """ + Estimates the sentenceSAR for each sample in the input statistics. + + Parameters: + stats (Dict[str, np.ndarray]): input statistics, which for multiple samples includes: + * corresponding log probabilities in 'sample_log_probs', + * matrix with cross-encoder similarities in 'sample_sentence_similarity' + Returns: + np.ndarray: float sentenceSAR for each sample in input statistics. + Higher values indicate more uncertain samples. + """ + batch_sample_log_likelihoods = stats["sample_log_likelihoods"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + + GSU = [] + for sample_log_likelihoods, sample_sentence_similarity in zip( + batch_sample_log_likelihoods, batch_sample_sentence_similarity + ): + ppl = -np.array([np.mean(token_ll) for token_ll in sample_log_likelihoods]) + + if self.exp: + ppl = -np.exp(-ppl) + + R_s = ( + ppl + * sample_sentence_similarity + ) + E_s = R_s.sum(-1) + + E_s = E_s / sample_sentence_similarity.sum(-1) + + GSU.append(E_s.mean()) + + return np.array(GSU) + + +class TokenSARGSU(Estimator): + def __init__( + self, + verbose: bool = False, + exp: bool = False + ): + super().__init__( + [ + "sample_sentence_similarity", + "sample_log_likelihoods", + "sample_token_similarity", + ], + "sequence", + ) + self.verbose = verbose + self.exp = exp + + def __str__(self): + if self.exp: + return "TokenSARGSUexp" + else: + return "TokenSARGSU" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + """ + Estimates the SAR for each sample in the input statistics. + + Parameters: + stats (Dict[str, np.ndarray]): input statistics, which for multiple samples includes: + * log p(y_i | y_ np.ndarray: + """ + Estimates the sentenceSAR for each sample using Mean Token Entropy (MTE). + + Parameters: + stats (Dict[str, np.ndarray]): input statistics, which for multiple samples includes: + * 'sample_entropy': Mean Token Entropy for each sample, + * 'sample_sentence_similarity': matrix with cross-encoder similarities. + + Returns: + np.ndarray: float sentenceSAR for each sample in input statistics. + Higher values indicate more uncertain samples. + """ + batch_sample_entropy = stats["sample_entropy"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + + GSU = [] + # Loop over each sample's Mean Token Entropy and sentence similarities + for sample_entropy, sample_sentence_similarity in zip( + batch_sample_entropy, batch_sample_sentence_similarity + ): + # Use MTE for sentence relevance calculation + R_s = sample_entropy * sample_sentence_similarity + + # Compute sentence relevance by summing along the last axis + E_s = R_s.sum(-1) + + E_s = E_s / sample_sentence_similarity.sum(-1) + + GSU.append(E_s.mean()) + + return np.array(GSU) diff --git a/src/lm_polygraph/estimators/max_probability.py b/src/lm_polygraph/estimators/max_probability.py index 1d93b5e3c..406021cd6 100644 --- a/src/lm_polygraph/estimators/max_probability.py +++ b/src/lm_polygraph/estimators/max_probability.py @@ -3,6 +3,7 @@ from typing import Dict from .estimator import Estimator +from .common import sample_strategy_to_prefix, best_sample_ids class MaximumSequenceProbability(Estimator): @@ -33,6 +34,41 @@ def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: log_likelihoods = stats["greedy_log_likelihoods"] return np.array([-np.sum(log_likelihood) for log_likelihood in log_likelihoods]) +class SampledMaximumSequenceProbability(Estimator): + """ + Estimates the sequence-level uncertainty of a language model by calculating the + log-probability of the generation with minus sign. + It is calculated as the sum of log-probabilities in each token. + Works only with whitebox models (initialized using lm_polygraph.utils.model.WhiteboxModel). + """ + + def __init__(self, sample_strategy: str = "first"): + super().__init__(["sample_log_probs"], "sequence") + self.sample_strategy = sample_strategy + + def __str__(self): + return sample_strategy_to_prefix(self.sample_strategy) + "SampledMaximumSequenceProbability" + + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + """ + Estimates the minus log-probability of each sample in input statistics. + + Parameters: + stats (Dict[str, np.ndarray]): input statistics, which for multiple samples includes: + * log p(y_i | y_ np.ndarray: log_likelihoods = stats["greedy_log_likelihoods"] return np.array([-np.mean(ll) for ll in log_likelihoods]) + +class SampledPerplexity(Estimator): + def __init__(self, sample_strategy: str = "first"): + super().__init__(["sample_log_likelihoods"], "sequence") + self.sample_strategy = sample_strategy + + def __str__(self): + return sample_strategy_to_prefix(self.sample_strategy) + "SampledPerplexity" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + log_likelihoods = stats["sample_log_likelihoods"] + sample_ids = best_sample_ids(self.sample_strategy, stats) + + ppl = [] + for best_id, sample_log_likelihoods in zip(sample_ids, log_likelihoods): + ppl.append(np.mean(sample_log_likelihoods[best_id])) + + return -np.array(ppl) diff --git a/src/lm_polygraph/estimators/prob_cocoa.py b/src/lm_polygraph/estimators/prob_cocoa.py new file mode 100644 index 000000000..cf3483fac --- /dev/null +++ b/src/lm_polygraph/estimators/prob_cocoa.py @@ -0,0 +1,144 @@ +import numpy as np + +from typing import Dict +from copy import deepcopy + +from .estimator import Estimator +from .common import sample_strategy_to_prefix, best_sample_ids, SAMPLE_SELECTION_STAT_KEYS + + +class ProbCocoaMaxprob(Estimator): + def __init__( + self, + verbose: bool = False, + sample_strategy: str = "first" + ): + super().__init__( + ["sample_sentence_similarity", "sample_log_probs"] + SAMPLE_SELECTION_STAT_KEYS, + "sequence" + ) + self.verbose = verbose + self.sample_strategy = sample_strategy + + def __str__(self): + base = "ProbCocoaMaxprob" + return sample_strategy_to_prefix(self.sample_strategy) + base + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_log_probs = stats["sample_log_probs"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + sample_ids = best_sample_ids(self.sample_strategy, stats) + + enriched_metrics = [] # To store enriched metrics for each sample + + for best_id, sample_log_probs, sample_sentence_similarity in zip( + sample_ids, batch_sample_log_probs, batch_sample_sentence_similarity + ): + sim = 1 - sample_sentence_similarity[best_id, :] + sim[best_id] = 1 + avg_similarity = np.mean(sim) + mp = 1 - np.exp(np.sum(sample_log_probs[best_id])) + res = mp * avg_similarity + enriched_metrics.append(res) + + return np.array(enriched_metrics) + + +class ProbCocoaPPL(Estimator): + def __init__( + self, + verbose: bool = False, + sample_strategy: str = "first" + ): + super().__init__( + ["sample_sentence_similarity", "sample_log_likelihoods"] + SAMPLE_SELECTION_STAT_KEYS, + "sequence" + ) + self.verbose = verbose + self.sample_strategy = sample_strategy + + def __str__(self): + base = "ProbCocoaPPL" + return sample_strategy_to_prefix(self.sample_strategy) + base + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_log_likelihoods = stats["sample_log_likelihoods"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + sample_ids = best_sample_ids(self.sample_strategy, stats) + + enriched_ppl = [] # To store enriched PPL for each sample + + for best_id, sample_log_likelihoods, sample_sentence_similarity in zip( + sample_ids, batch_sample_log_likelihoods, batch_sample_sentence_similarity + ): + sim = 1 - sample_sentence_similarity[best_id, :] + sim[best_id] = 1 + avg_similarity = np.mean(sim) + ppl = 1 - np.exp(np.mean(sample_log_likelihoods[best_id])) + res = ppl * avg_similarity + enriched_ppl.append(res) + + return np.array(enriched_ppl) + + +class GreedyProbCocoaMaxprob(Estimator): + def __init__( + self, + verbose: bool = False, + ): + super().__init__(["greedy_sentence_similarity", "greedy_log_likelihoods"], "sequence") + self.verbose = verbose + + def __str__(self): + return "GreedyProbCocoaMaxprob" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_greedy_sentence_similarity = stats["greedy_sentence_similarity"] + batch_lls = np.array([np.sum(log_likelihood) for log_likelihood in stats["greedy_log_likelihoods"]]) + + enriched_metrics = [] # To store enriched metrics for each sample + for greedy_ll, greedy_sentence_similarity in zip( + batch_lls, batch_greedy_sentence_similarity + ): + # Compute probabilities (negative log-probs) + prob = 1 - np.exp(greedy_ll) + + # Compute row-wise average similarity, excluding self-similarity + # Diagonal contains self-similarities + avg_similarity = 1 - np.mean(greedy_sentence_similarity) + + enriched_metrics.append(prob * avg_similarity) + + return np.array(enriched_metrics) + + +class GreedyProbCocoaPPL(Estimator): + def __init__( + self, + verbose: bool = False, + ): + super().__init__(["greedy_sentence_similarity", "greedy_log_likelihoods"], "sequence") + self.verbose = verbose + + def __str__(self): + return "GreedyProbCocoaPPL" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_greedy_log_likelihoods = stats["greedy_log_likelihoods"] + batch_greedy_sentence_similarity = stats["greedy_sentence_similarity"] + + enriched_ppl = [] # To store enriched PPL for each sample + + for greedy_log_likelihoods, greedy_sentence_similarity in zip( + batch_greedy_log_likelihoods, batch_greedy_sentence_similarity + ): + # get PPL for each sample + ppl = 1 - np.exp(np.mean(greedy_log_likelihoods)) + + # Compute row-wise average similarity, excluding self-similarity + avg_similarity = 1 - np.mean(greedy_sentence_similarity) + + enriched_ppl.append(ppl * avg_similarity) + + + return np.array(enriched_ppl) diff --git a/src/lm_polygraph/estimators/sar.py b/src/lm_polygraph/estimators/sar.py index 2aed3fa76..2d7559db1 100644 --- a/src/lm_polygraph/estimators/sar.py +++ b/src/lm_polygraph/estimators/sar.py @@ -15,7 +15,7 @@ class SAR(Estimator): and text relevance relative to all other generations. """ - def __init__(self, verbose: bool = False): + def __init__(self, verbose: bool = False, t: float = 0.001): super().__init__( [ "sample_sentence_similarity", @@ -25,10 +25,10 @@ def __init__(self, verbose: bool = False): "sequence", ) self.verbose = verbose - self.t = 0.001 + self.t = t def __str__(self): - return "SAR" + return f"SAR_t{self.t}" def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: """ @@ -63,7 +63,10 @@ def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: ): log_likelihoods = np.array(log_likelihoods) R_t = 1 - token_similarity - R_t_norm = R_t / R_t.sum() + if R_t.sum() == 0: + R_t_norm = np.zeros_like(R_t) + else: + R_t_norm = R_t / R_t.sum() E_t = -log_likelihoods * R_t_norm tokenSAR.append(E_t.sum()) diff --git a/src/lm_polygraph/estimators/semantic_average_ue.py b/src/lm_polygraph/estimators/semantic_average_ue.py new file mode 100644 index 000000000..161ccec37 --- /dev/null +++ b/src/lm_polygraph/estimators/semantic_average_ue.py @@ -0,0 +1,177 @@ +import numpy as np + +from typing import Dict +from copy import deepcopy + +from .estimator import Estimator +from .common import sample_strategy_to_prefix, best_sample_ids + + +class SemanticAveMaxprob(Estimator): + def __init__( + self, + verbose: bool = False, + exp: bool = False, + sample_strategy: str = "first" + ): + super().__init__(["sample_sentence_similarity", "sample_log_probs"], "sequence") + self.verbose = verbose + self.exp = exp + self.sample_strategy = sample_strategy + + def __str__(self): + if self.exp: + base = "SemanticAveMaxprobexp" + else: + base = "SemanticAveMaxprob" + return sample_strategy_to_prefix(self.sample_strategy) + base + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_log_probs = stats["sample_log_probs"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + sample_ids = best_sample_ids(self.sample_strategy, stats) + + ave = [] + for best_id, sample_log_probs, sample_sentence_similarity in zip( + sample_ids, batch_sample_log_probs, batch_sample_sentence_similarity + ): + sample_probs = -np.array(sample_log_probs) + if self.exp: + sample_probs = -np.exp(-sample_probs) + + weights = sample_sentence_similarity[best_id, :] + ave.append(np.average(sample_probs, weights=weights)) + + return np.array(ave) + +class SemanticAvePPL(Estimator): + def __init__( + self, + verbose: bool = False, + exp: bool = False, + sample_strategy: str = "first" + ): + super().__init__(["sample_sentence_similarity", "sample_log_likelihoods"], "sequence") + self.verbose = verbose + self.exp = exp + self.sample_strategy = sample_strategy + + def __str__(self): + if self.exp: + base = "SemanticAvePPLexp" + else: + base = "SemanticAvePPL" + return sample_strategy_to_prefix(self.sample_strategy) + base + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_log_likelihoods = stats["sample_log_likelihoods"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + sample_ids = best_sample_ids(self.sample_strategy, stats) + + ave = [] + for best_id, sample_log_likelihoods, sample_sentence_similarity in zip( + sample_ids, batch_sample_log_likelihoods, batch_sample_sentence_similarity + ): + ppl = -np.array([np.mean(token_ll) for token_ll in sample_log_likelihoods]) + + if self.exp: + ppl = -np.exp(-ppl) + + weights = sample_sentence_similarity[best_id, :] + + ave.append(np.average(ppl, weights=weights)) + + return np.array(ave) + +class SemanticAveTokenSAR(Estimator): + def __init__( + self, + verbose: bool = False, + exp: bool = False, + sample_strategy: str = "first" + ): + super().__init__( + [ + "sample_sentence_similarity", + "sample_log_likelihoods", + "sample_token_similarity", + ], + "sequence", + ) + self.verbose = verbose + self.exp = exp + self.sample_strategy = sample_strategy + + def __str__(self): + if self.exp: + base = "SemanticAveTokenSARexp" + else: + base = "SemanticAveTokenSAR" + return sample_strategy_to_prefix(self.sample_strategy) + base + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_log_likelihoods = stats["sample_log_likelihoods"] + batch_sample_token_similarity = stats["sample_token_similarity"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + sample_ids = best_sample_ids(self.sample_strategy, stats) + + ave = [] + for batch_data in zip( + batch_sample_log_likelihoods, + batch_sample_token_similarity, + batch_sample_sentence_similarity, + sample_ids, + ): + sample_log_likelihoods = batch_data[0] + sample_token_similarity = batch_data[1] + sample_sentence_similarity = batch_data[2] + best_id = batch_data[3] + + tokenSAR = [] + for log_likelihoods, token_similarity in zip( + sample_log_likelihoods, sample_token_similarity + ): + log_likelihoods = np.array(log_likelihoods) + R_t = 1 - token_similarity + if R_t.sum() == 0: + R_t_norm = np.zeros_like(R_t) + else: + R_t_norm = R_t / R_t.sum() + E_t = -log_likelihoods * R_t_norm + tokenSAR.append(E_t.sum()) + + if self.exp: + tokenSAR = -np.exp(-np.array(tokenSAR)) + + weights = sample_sentence_similarity[best_id, :] + + ave.append(np.average(tokenSAR, weights=weights)) + + return np.array(ave) + +class SemanticAveMTE(Estimator): + def __init__( + self, + verbose: bool = False, + sample_strategy: str = "first" + ): + super().__init__(["sample_sentence_similarity", "sample_entropy"], "sequence") + self.verbose = verbose + self.sample_strategy = sample_strategy + + def __str__(self): + return sample_strategy_to_prefix(self.sample_strategy) + "SemanticAveMTE" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_entropy = stats["sample_entropy"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + sample_ids = best_sample_ids(self.sample_strategy, stats) + + ave = [] + for best_id, sample_entropy, sample_sentence_similarity in zip( + sample_ids, batch_sample_entropy, batch_sample_sentence_similarity + ): + weights = sample_sentence_similarity[best_id, :] + ave.append(np.average(sample_entropy, weights=weights)) + + return np.array(ave) diff --git a/src/lm_polygraph/estimators/semantic_average_ue_average_similarity.py b/src/lm_polygraph/estimators/semantic_average_ue_average_similarity.py new file mode 100644 index 000000000..03956cacb --- /dev/null +++ b/src/lm_polygraph/estimators/semantic_average_ue_average_similarity.py @@ -0,0 +1,424 @@ +import numpy as np + +from typing import Dict +from copy import deepcopy + +from .estimator import Estimator +from .common import sample_strategy_to_prefix, best_sample_ids, SAMPLE_SELECTION_STAT_KEYS + + +class SemanticEnrichedMaxprobAveDissimilarity(Estimator): + def __init__( + self, + verbose: bool = False, + exp: bool = False, + sample_strategy: str = "first" + ): + super().__init__( + ["sample_sentence_similarity", "sample_log_probs"] + SAMPLE_SELECTION_STAT_KEYS, + "sequence" + ) + self.verbose = verbose + self.exp = exp + self.sample_strategy = sample_strategy + + def __str__(self): + if self.exp: + base = "SemanticEnrichedMaxprobAveDissimilarityexp" + else: + base = "SemanticEnrichedMaxprobAveDissimilarity" + return sample_strategy_to_prefix(self.sample_strategy) + base + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_log_probs = stats["sample_log_probs"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + sample_ids = best_sample_ids(self.sample_strategy, stats) + + enriched_metrics = [] # To store enriched metrics for each sample + + for sample_log_probs, sample_sentence_similarity in zip( + batch_sample_log_probs, batch_sample_sentence_similarity + ): + # Step 1: Compute probabilities (negative log-probs) + sample_probs = -np.array(sample_log_probs) + if self.exp: + sample_probs = -np.exp(-sample_probs) + + # Step 2: Compute row-wise sum of dissimilarities (1 - g) + row_dissimilarities = [] + for i in range(sample_sentence_similarity.shape[0]): + row = sample_sentence_similarity[i] + sum_dissimilarities = np.sum(1 - row) - (1 - row[i]) # Exclude self-similarity + row_dissimilarities.append(sum_dissimilarities) + + # Step 3: Normalize by (M - 1) + normalized_dissimilarities = [ + dissim / (len(sample_sentence_similarity) - 1) + for dissim in row_dissimilarities + ] + + # Step 4: Enrich each metric + enriched_sample_metrics = [] + for prob, dissim in zip(sample_probs, normalized_dissimilarities): + enriched_metric = prob * dissim + enriched_sample_metrics.append(enriched_metric) + + enriched_metrics.append(np.array(enriched_sample_metrics)) + + # Return only metric for the best sample for PRR calculation + best_elements = [] + for best_id, metrics in zip(sample_ids, enriched_metrics): + best_elements.append(metrics[best_id]) + + return np.array(best_elements) + + +class SemanticEnrichedMaxprobTotalDissimilarity(Estimator): + def __init__( + self, + verbose: bool = False, + exp: bool = False, + sample_strategy: str = "first" + ): + super().__init__( + ["sample_sentence_similarity", "sample_log_probs"] + SAMPLE_SELECTION_STAT_KEYS, + "sequence" + ) + self.verbose = verbose + self.exp = exp + self.sample_strategy = sample_strategy + + def __str__(self): + if self.exp: + base = "SemanticEnrichedMaxprobTotalDissimilarityexp" + else: + base = "SemanticEnrichedMaxprobTotalDissimilarity" + return sample_strategy_to_prefix(self.sample_strategy) + base + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_log_probs = stats["sample_log_probs"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + sample_ids = best_sample_ids(self.sample_strategy, stats) + + enriched_metrics = [] # To store enriched metrics for each sample + + for sample_log_probs, sample_sentence_similarity in zip( + batch_sample_log_probs, batch_sample_sentence_similarity + ): + # Step 1: Compute probabilities (negative log-probs) + sample_probs = -np.array(sample_log_probs) + if self.exp: + sample_probs = -np.exp(-sample_probs) + + # Step 2: Compute row-wise sum of dissimilarities (1 - g) + row_dissimilarities = [] + for i in range(sample_sentence_similarity.shape[0]): + row = sample_sentence_similarity[i] + sum_dissimilarities = np.sum(1 - row) - (1 - row[i]) # Exclude self-similarity + row_dissimilarities.append(sum_dissimilarities) + + # Step 3: Normalize by (M - 1) + normalized_dissimilarities = [ + dissim / (len(sample_sentence_similarity) - 1) + for dissim in row_dissimilarities + ] + + dissim = np.mean(normalized_dissimilarities) + + # Step 4: Enrich each metric + enriched_sample_metrics = [] + for prob in sample_probs: + enriched_metric = prob * dissim + enriched_sample_metrics.append(enriched_metric) + + enriched_metrics.append(np.array(enriched_sample_metrics)) + + # Return only metric for the best sample for PRR calculation + best_elements = [] + for best_id, metrics in zip(sample_ids, enriched_metrics): + best_elements.append(metrics[best_id]) + + return np.array(best_elements) + + +class SemanticEnrichedPPLAveDissimilarity(Estimator): + def __init__( + self, + verbose: bool = False, + exp: bool = False, + sample_strategy: str = "first" + ): + super().__init__( + ["sample_sentence_similarity", "sample_log_likelihoods"] + SAMPLE_SELECTION_STAT_KEYS, + "sequence" + ) + self.verbose = verbose + self.exp = exp + self.sample_strategy = sample_strategy + + def __str__(self): + if self.exp: + base = "SemanticEnrichedPPLAveDissimilarityexp" + else: + base = "SemanticEnrichedPPLAveDissimilarity" + return sample_strategy_to_prefix(self.sample_strategy) + base + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_log_likelihoods = stats["sample_log_likelihoods"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + sample_ids = best_sample_ids(self.sample_strategy, stats) + + enriched_ppl = [] # To store enriched PPL for each sample + + for sample_log_likelihoods, sample_sentence_similarity in zip( + batch_sample_log_likelihoods, batch_sample_sentence_similarity + ): + # Step 1: Compute PPL for each sample + ppl = -np.array([np.mean(token_ll) for token_ll in sample_log_likelihoods]) + if self.exp: + ppl = -np.exp(-ppl) + + # Step 2: Compute row-wise average dissimilarity (1 - g) + row_averages = [] + for i in range(sample_sentence_similarity.shape[0]): + row = sample_sentence_similarity[i] + # Compute average dissimilarity, excluding self-similarity + average_dissimilarity = (np.sum(1 - row) - (1 - row[i])) / (len(row) - 1) + row_averages.append(average_dissimilarity) + + # Step 3: Enrich each PPL independently by scaling with the average dissimilarity + enriched_sample_ppl = [] + for i, (ppl_value, avg_dissimilarity) in enumerate(zip(ppl, row_averages)): + if avg_dissimilarity == 0: + avg_dissimilarity = 1e-10 # Avoid division by zero + enriched_value = ppl_value * avg_dissimilarity + enriched_sample_ppl.append(enriched_value) + + enriched_ppl.append(np.array(enriched_sample_ppl)) # Collect enriched PPL values + + # Return only metric for the best sample for PRR calculation + best_elements = [] + for best_id, metrics in zip(sample_ids, enriched_ppl): + best_elements.append(metrics[best_id]) + + return np.array(best_elements) + + +class SemanticEnrichedPPLTotalDissimilarity(Estimator): + def __init__( + self, + verbose: bool = False, + exp: bool = False, + sample_strategy: str = "first" + ): + super().__init__( + ["sample_sentence_similarity", "sample_log_likelihoods"] + SAMPLE_SELECTION_STAT_KEYS, + "sequence" + ) + self.verbose = verbose + self.exp = exp + self.sample_strategy = sample_strategy + + def __str__(self): + if self.exp: + base = "SemanticEnrichedPPLTotalDissimilarityexp" + else: + base = "SemanticEnrichedPPLTotalDissimilarity" + return sample_strategy_to_prefix(self.sample_strategy) + base + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_log_likelihoods = stats["sample_log_likelihoods"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + sample_ids = best_sample_ids(self.sample_strategy, stats) + + enriched_ppl = [] # To store enriched PPL for each sample + + for sample_log_likelihoods, sample_sentence_similarity in zip( + batch_sample_log_likelihoods, batch_sample_sentence_similarity + ): + # Step 1: Compute PPL for each sample + ppl = -np.array([np.mean(token_ll) for token_ll in sample_log_likelihoods]) + if self.exp: + ppl = -np.exp(-ppl) + + # Step 2: Compute row-wise average dissimilarity (1 - g) + row_averages = [] + for i in range(sample_sentence_similarity.shape[0]): + row = sample_sentence_similarity[i] + # Compute average dissimilarity, excluding self-similarity + average_dissimilarity = (np.sum(1 - row) - (1 - row[i])) / (len(row) - 1) + row_averages.append(average_dissimilarity) + + avg_dissimilarity = np.mean(row_averages) + + # Step 3: Enrich each PPL independently by scaling with the average dissimilarity + enriched_sample_ppl = [] + for ppl_value in ppl: + if avg_dissimilarity == 0: + avg_dissimilarity = 1e-10 # Avoid division by zero + enriched_value = ppl_value * avg_dissimilarity + enriched_sample_ppl.append(enriched_value) + + enriched_ppl.append(np.array(enriched_sample_ppl)) # Collect enriched PPL values + + # Return only metric for the best sample for PRR calculation + best_elements = [] + for best_id, metrics in zip(sample_ids, enriched_ppl): + best_elements.append(metrics[best_id]) + + return np.array(best_elements) + + +class SemanticEnrichedMTEAveDissimilarity(Estimator): + def __init__( + self, + verbose: bool = False, + sample_strategy: str = "first" + ): + super().__init__( + ["sample_sentence_similarity", "sample_entropy"] + SAMPLE_SELECTION_STAT_KEYS, + "sequence" + ) + self.verbose = verbose + self.sample_strategy = sample_strategy + + def __str__(self): + return sample_strategy_to_prefix(self.sample_strategy) + "SemanticEnrichedMTEAveDissimilarity" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_entropy = stats["sample_entropy"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + sample_ids = best_sample_ids(self.sample_strategy, stats) + + enriched_entropy = [] + + for sample_entropy, sample_sentence_similarity in zip( + batch_sample_entropy, batch_sample_sentence_similarity + ): + # Compute row-wise average dissimilarity (1 - g), excluding self-similarity + row_averages = [] + for i in range(sample_sentence_similarity.shape[0]): + row = sample_sentence_similarity[i] + average_dissimilarity = (np.sum(1 - row) - (1 - row[i])) / (len(row) - 1) + row_averages.append(average_dissimilarity) + + # Enrich each sample's entropy value + enriched_sample_entropy = [] + for i, (entropy, avg_dissimilarity) in enumerate(zip(sample_entropy, row_averages)): + if avg_dissimilarity == 0: + avg_dissimilarity = 1e-10 # Avoid division by zero + enriched_value = entropy * avg_dissimilarity + enriched_sample_entropy.append(enriched_value) + + enriched_entropy.append(np.array(enriched_sample_entropy)) + + # Return only metric for the best sample for PRR calculation + best_elements = [] + for best_id, metrics in zip(sample_ids, enriched_entropy): + best_elements.append(metrics[best_id]) + + return np.array(best_elements) + + +class SemanticEnrichedMTETotalDissimilarity(Estimator): + def __init__( + self, + verbose: bool = False, + sample_strategy: str = "first" + ): + super().__init__( + ["sample_sentence_similarity", "sample_entropy"] + SAMPLE_SELECTION_STAT_KEYS, + "sequence" + ) + self.verbose = verbose + self.sample_strategy = sample_strategy + + def __str__(self): + return sample_strategy_to_prefix(self.sample_strategy) + "SemanticEnrichedMTETotalDissimilarity" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_entropy = stats["sample_entropy"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + sample_ids = best_sample_ids(self.sample_strategy, stats) + + enriched_entropy = [] + + for sample_entropy, sample_sentence_similarity in zip( + batch_sample_entropy, batch_sample_sentence_similarity + ): + # Compute row-wise average dissimilarity (1 - g), excluding self-similarity + row_averages = [] + for i in range(sample_sentence_similarity.shape[0]): + row = sample_sentence_similarity[i] + average_dissimilarity = (np.sum(1 - row) - (1 - row[i])) / (len(row) - 1) + row_averages.append(average_dissimilarity) + + avg_dissimilarity = np.mean(row_averages) + + # Enrich each sample's entropy value + enriched_sample_entropy = [] + for entropy in sample_entropy: + if avg_dissimilarity == 0: + avg_dissimilarity = 1e-10 # Avoid division by zero + enriched_value = entropy * avg_dissimilarity + enriched_sample_entropy.append(enriched_value) + + enriched_entropy.append(np.array(enriched_sample_entropy)) + + # Return only metric for the best sample for PRR calculation + best_elements = [] + for best_id, metrics in zip(sample_ids, enriched_entropy): + best_elements.append(metrics[best_id]) + + return np.array(best_elements) + + +class AveDissimilarity(Estimator): + def __init__( + self, + verbose: bool = False, + sample_strategy: str = "first" + ): + super().__init__( + ["sample_sentence_similarity", "sample_entropy"] + SAMPLE_SELECTION_STAT_KEYS, + "sequence" + ) + self.verbose = verbose + self.sample_strategy = sample_strategy + + def __str__(self): + return sample_strategy_to_prefix(self.sample_strategy) + "AveDissimilarity" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_entropy = stats["sample_entropy"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + sample_ids = best_sample_ids(self.sample_strategy, stats) + + enriched_entropy = [] + + for sample_entropy, sample_sentence_similarity in zip( + batch_sample_entropy, batch_sample_sentence_similarity + ): + # Compute row-wise average dissimilarity (1 - g), excluding self-similarity + row_averages = [] + for i in range(sample_sentence_similarity.shape[0]): + row = sample_sentence_similarity[i] + average_dissimilarity = (np.sum(1 - row) - (1 - row[i])) / (len(row) - 1) + row_averages.append(average_dissimilarity) + + # Enrich each sample's entropy value + enriched_sample_entropy = [] + for i, (entropy, avg_dissimilarity) in enumerate(zip(sample_entropy, row_averages)): + if avg_dissimilarity == 0: + avg_dissimilarity = 1e-10 # Avoid division by zero + enriched_value = avg_dissimilarity + enriched_sample_entropy.append(enriched_value) + + enriched_entropy.append(np.array(enriched_sample_entropy)) + + # Return only metric for the best sample for PRR calculation + best_elements = [] + for best_id, metrics in zip(sample_ids, enriched_entropy): + best_elements.append(metrics[best_id]) + + return np.array(best_elements) diff --git a/src/lm_polygraph/estimators/semantic_density.py b/src/lm_polygraph/estimators/semantic_density.py new file mode 100644 index 000000000..693215a81 --- /dev/null +++ b/src/lm_polygraph/estimators/semantic_density.py @@ -0,0 +1,137 @@ +import numpy as np + +from typing import Dict + +from .estimator import Estimator +from .common import sample_strategy_to_prefix, best_sample_ids, SAMPLE_SELECTION_STAT_KEYS + + +class SemanticDensity(Estimator): + def __init__(self, verbose: bool = False, sample_strategy: str = "first"): + super().__init__( + [ + "sample_log_probs", + "sample_tokens", + "sample_texts", + "concat_semantic_matrix_contra", + "concat_semantic_matrix_neutral", + ] + SAMPLE_SELECTION_STAT_KEYS, + "sequence", + ) + self.verbose = verbose + self.sample_strategy = sample_strategy + + def __str__(self): + base = "SemanticDensity" + return sample_strategy_to_prefix(self.sample_strategy) + base + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_ids = best_sample_ids(self.sample_strategy, stats) + batch_sample_log_probs = stats["sample_log_probs"] + batch_sample_tokens = stats["sample_tokens"] + batch_sample_texts = stats["sample_texts"] + batch_semantic_matrix_contra = stats["concat_semantic_matrix_contra"] + batch_semantic_matrix_neutral = stats["concat_semantic_matrix_neutral"] + + semantic_density = [] + for batch_data in zip( + batch_sample_ids, + batch_sample_log_probs, + batch_sample_tokens, + batch_sample_texts, + batch_semantic_matrix_contra, + batch_semantic_matrix_neutral, + ): + sample_id = batch_data[0] + sample_probs = np.exp(batch_data[1]) + sample_tokens = batch_data[2] + sample_texts = batch_data[3] + semantic_matrix_contra = batch_data[4] + semantic_matrix_neutral = batch_data[5] + + _, unique_sample_indices = np.unique(sample_texts, return_index=True) + + numerator, denominator = [], [] + + for _id in unique_sample_indices: + normed_prob = sample_probs[_id] ** (1 / len(sample_tokens[_id])) + distance = semantic_matrix_contra[sample_id, _id] + (semantic_matrix_neutral[sample_id, _id] / 2) + + if distance <= 1: + kernel_value = 1 - distance + else: + kernel_value = 0 + + numerator.append(normed_prob * kernel_value) + denominator.append(normed_prob) + + semantic_density.append(np.sum(numerator) / np.sum(denominator)) + + return -np.array(semantic_density) + + +class GreedySemanticDensity(Estimator): + def __init__(self, verbose: bool = False): + super().__init__( + [ + "greedy_log_probs", + "sample_log_probs", + "sample_tokens", + "sample_texts", + "concat_greedy_semantic_matrix_contra_forward", + "concat_greedy_semantic_matrix_neutral_forward", + ], + "sequence", + ) + self.verbose = verbose + + def __str__(self): + return "GreedySemanticDensity" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_log_probs = stats["sample_log_probs"] + batch_sample_tokens = stats["sample_tokens"] + batch_sample_texts = stats["sample_texts"] + batch_semantic_matrix_contra = stats["concat_greedy_semantic_matrix_contra_forward"] + batch_semantic_matrix_neutral = stats["concat_greedy_semantic_matrix_neutral_forward"] + batch_greedy_log_likelihoods = stats["greedy_log_likelihoods"] + + semantic_density = [] + for batch_data in zip( + batch_greedy_log_likelihoods, + batch_sample_log_probs, + batch_sample_tokens, + batch_sample_texts, + batch_semantic_matrix_contra, + batch_semantic_matrix_neutral, + ): + greedy_log_probs = batch_data[0] + sample_probs = np.exp(batch_data[1]) + sample_tokens = batch_data[2] + sample_texts = batch_data[3] + semantic_matrix_contra = batch_data[4] + semantic_matrix_neutral = batch_data[5] + + _, unique_sample_indices = np.unique(sample_texts, return_index=True) + + numerator, denominator = [], [] + + for _id in unique_sample_indices: + normed_prob = sample_probs[_id] ** (1 / len(sample_tokens[_id])) + distance = semantic_matrix_contra[_id] + (semantic_matrix_neutral[_id] / 2) + + if distance <= 1: + kernel_value = 1 - distance + else: + kernel_value = 0 + + numerator.append(normed_prob * kernel_value) + denominator.append(normed_prob) + + greedy_normed_prob = np.exp(np.sum(greedy_log_probs)) ** (1 / len(greedy_log_probs)) + numerator.append(greedy_normed_prob) + denominator.append(greedy_normed_prob) + + semantic_density.append(np.sum(numerator) / np.sum(denominator)) + + return -np.array(semantic_density) diff --git a/src/lm_polygraph/estimators/semantic_median_ue.py b/src/lm_polygraph/estimators/semantic_median_ue.py new file mode 100644 index 000000000..5c6687608 --- /dev/null +++ b/src/lm_polygraph/estimators/semantic_median_ue.py @@ -0,0 +1,178 @@ +import numpy as np + +from typing import Dict +from copy import deepcopy + +from .estimator import Estimator +from .common import sample_strategy_to_prefix, best_sample_ids + +from wquantiles import median + + +class SemanticMedianMaxprob(Estimator): + def __init__( + self, + verbose: bool = False, + exp: bool = False, + sample_strategy: str = "first" + ): + super().__init__(["sample_sentence_similarity", "sample_log_probs"], "sequence") + self.verbose = verbose + self.exp = exp + self.sample_strategy = sample_strategy + + def __str__(self): + if self.exp: + base = "SemanticMedianMaxprobexp" + else: + base = "SemanticMedianMaxprob" + return sample_strategy_to_prefix(self.sample_strategy) + base + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_log_probs = stats["sample_log_probs"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + sample_ids = best_sample_ids(self.sample_strategy, stats) + + ave = [] + for best_id, sample_log_probs, sample_sentence_similarity in zip( + sample_ids, batch_sample_log_probs, batch_sample_sentence_similarity + ): + sample_probs = -np.array(sample_log_probs) + if self.exp: + sample_probs = -np.exp(-sample_probs) + weights = sample_sentence_similarity[best_id, :] + ave.append(median(sample_probs, weights)) + + return np.array(ave) + +class SemanticMedianPPL(Estimator): + def __init__( + self, + verbose: bool = False, + exp: bool = False, + sample_strategy: str = "first" + ): + super().__init__(["sample_sentence_similarity", "sample_log_likelihoods"], "sequence") + self.verbose = verbose + self.exp = exp + self.sample_strategy = sample_strategy + + def __str__(self): + if self.exp: + base = "SemanticMedianPPLexp" + else: + base = "SemanticMedianPPL" + return sample_strategy_to_prefix(self.sample_strategy) + base + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_log_likelihoods = stats["sample_log_likelihoods"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + sample_ids = best_sample_ids(self.sample_strategy, stats) + + ave = [] + for best_id, sample_log_likelihoods, sample_sentence_similarity in zip( + sample_ids, batch_sample_log_likelihoods, batch_sample_sentence_similarity + ): + ppl = -np.array([np.mean(token_ll) for token_ll in sample_log_likelihoods]) + + if self.exp: + ppl = -np.exp(-ppl) + + weights = sample_sentence_similarity[best_id, :] + + ave.append(median(ppl, weights)) + + return np.array(ave) + +class SemanticMedianTokenSAR(Estimator): + def __init__( + self, + verbose: bool = False, + exp: bool = False, + sample_strategy: str = "first" + ): + super().__init__( + [ + "sample_sentence_similarity", + "sample_log_likelihoods", + "sample_token_similarity", + ], + "sequence", + ) + self.verbose = verbose + self.exp = exp + self.sample_strategy = sample_strategy + + def __str__(self): + if self.exp: + base = "SemanticMedianTokenSARexp" + else: + base = "SemanticMedianTokenSAR" + return sample_strategy_to_prefix(self.sample_strategy) + base + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_log_likelihoods = stats["sample_log_likelihoods"] + batch_sample_token_similarity = stats["sample_token_similarity"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + sample_ids = best_sample_ids(self.sample_strategy, stats) + + ave = [] + for batch_data in zip( + batch_sample_log_likelihoods, + batch_sample_token_similarity, + batch_sample_sentence_similarity, + sample_ids, + ): + sample_log_likelihoods = batch_data[0] + sample_token_similarity = batch_data[1] + sample_sentence_similarity = batch_data[2] + best_id = batch_data[3] + + tokenSAR = [] + for log_likelihoods, token_similarity in zip( + sample_log_likelihoods, sample_token_similarity + ): + log_likelihoods = np.array(log_likelihoods) + R_t = 1 - token_similarity + if R_t.sum() == 0: + R_t_norm = np.zeros_like(R_t) + else: + R_t_norm = R_t / R_t.sum() + E_t = -log_likelihoods * R_t_norm + tokenSAR.append(E_t.sum()) + + if self.exp: + tokenSAR = -np.exp(-np.array(tokenSAR)) + + weights = sample_sentence_similarity[best_id, :] + + ave.append(median(np.array(tokenSAR), weights)) + + return np.array(ave) + +class SemanticMedianMTE(Estimator): + def __init__( + self, + verbose: bool = False, + sample_strategy: str = "first" + ): + super().__init__(["sample_sentence_similarity", "sample_entropy"], "sequence") + self.verbose = verbose + self.sample_strategy = sample_strategy + + def __str__(self): + return sample_strategy_to_prefix(self.sample_strategy) + "SemanticMedianMTE" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_entropy = stats["sample_entropy"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + sample_ids = best_sample_ids(self.sample_strategy, stats) + + ave = [] + for best_id, sample_entropy, sample_sentence_similarity in zip( + sample_ids, batch_sample_entropy, batch_sample_sentence_similarity + ): + weights = sample_sentence_similarity[best_id, :] + ave.append(median(np.array(sample_entropy), weights)) + + return np.array(ave) diff --git a/src/lm_polygraph/estimators/sentence_sar.py b/src/lm_polygraph/estimators/sentence_sar.py index 44f762afe..e8f5278b9 100644 --- a/src/lm_polygraph/estimators/sentence_sar.py +++ b/src/lm_polygraph/estimators/sentence_sar.py @@ -1,6 +1,7 @@ import numpy as np from typing import Dict +from copy import deepcopy from .estimator import Estimator @@ -52,3 +53,292 @@ def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: sentenceSAR.append(E_s.mean()) return np.array(sentenceSAR) + + +#class OtherSentenceSAR(Estimator): +# """ +# Like SAR, but only looks at other samples for each sample in the output. +# """ +# +# def __init__( +# self, +# verbose: bool = False, +# t: float = 0.001, +# use_log: bool = True, +# reverse: bool = False +# ): +# super().__init__(["sample_sentence_similarity", "sample_log_probs"], "sequence") +# self.verbose = verbose +# self.t = t +# self.use_log = use_log +# self.reverse = reverse +# +# def __str__(self): +# base = f"OtherSentenceSAR_t{self.t}" +# if not self.use_log: +# base += "_no_log" +# if self.reverse: +# base += "_reverse" +# return base +# +# def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: +# """ +# Estimates the sentenceSAR for each sample in the input statistics. +# +# Parameters: +# stats (Dict[str, np.ndarray]): input statistics, which for multiple samples includes: +# * corresponding log probabilities in 'sample_log_probs', + +# * matrix with cross-encoder similarities in 'sample_sentence_similarity' +# Returns: +# np.ndarray: float sentenceSAR for each sample in input statistics. +# Higher values indicate more uncertain samples. +# """ +# batch_sample_log_probs = stats["sample_log_probs"] +# batch_sample_sentence_similarity = stats["sample_sentence_similarity"] +# +# sentenceSAR = [] +# for sample_log_probs, sample_sentence_similarity in zip( +# batch_sample_log_probs, batch_sample_sentence_similarity +# ): +# sample_probs = np.exp(np.array(sample_log_probs)) +# R_s = ( +# sample_probs +# * sample_sentence_similarity +# * (1 - np.eye(sample_sentence_similarity.shape[0])) +# ) +# sent_relevance = R_s.sum(-1) / self.t +# +# if self.use_log: +# E_s = -np.log(sent_relevance) +# else: +# if self.reverse: +# E_s = sent_relevance +# else: +# E_s = -sent_relevance +# +# sentenceSAR.append(E_s.mean()) +# +# return np.array(sentenceSAR) +# +# +#class ReweightedSentenceSAR(Estimator): +# """ +# Like SAR, but normalizes similarity-based scores at each iteration +# alpha_ij = g(s_i, s_j) / (\sum_k^(K - 1) g(s_i, s_k)) +# K - number of samples in output minus one +# """ +# def __init__(self, verbose: bool = False): +# super().__init__(["sample_sentence_similarity", "sample_log_probs"], "sequence") +# self.verbose = verbose +# self.t = 0.001 +# +# def __str__(self): +# return "ReweightedSentenceSAR" +# +# def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: +# batch_sample_log_probs = stats["sample_log_probs"] +# batch_sample_sentence_similarity = stats["sample_sentence_similarity"] +# +# sentenceSAR = [] +# +# for sample_log_probs, sample_sentence_similarity in zip( +# batch_sample_log_probs, batch_sample_sentence_similarity +# ): +# # Compute probabilities from log probabilities +# sample_probs = np.exp(np.array(sample_log_probs)) +# +# # Initialize alpha_ij (reweighted sentence similarities) +# alpha_ij = np.zeros_like(sample_sentence_similarity) +# +# # Normalize similarity-based scores at each iteration +# for i in range(sample_sentence_similarity.shape[0]): +# similarity_row = sample_sentence_similarity[i] +# # Exclude self-similarity g(s_i, s_i) +# similarity_row_without_self = similarity_row * (1 - np.eye(len(similarity_row)))[i] +# sum_similarity = np.sum(similarity_row_without_self) +# +# if sum_similarity > 0: +# alpha_ij[i] = similarity_row_without_self / sum_similarity +# else: +# alpha_ij[i] = similarity_row_without_self # If the normalization factor is 0, leave the row unchanged +# +# # Compute sentence relevance using normalized alpha_ij +# R_s = sample_probs * alpha_ij +# sent_relevance = R_s.sum(-1) / self.t +# +# # Compute SentenceSAR (Uncertainty Estimation) +# E_s = -np.log(sent_relevance + sample_probs) +# sentenceSAR.append(E_s.mean()) +# +# return np.array(sentenceSAR) + + + +class PPLSAR(Estimator): + """ + Like SAR, but uses log probs normalized by sample length in tokens to calculate PPL (Perplexity). + Tokenwise log-likelihoods are available in stats['sample_log_likelihoods']. + """ + def __init__(self, verbose: bool = False): + super().__init__(["sample_sentence_similarity", "sample_log_probs"], "sequence") + self.verbose = verbose + self.t = 0.001 + + def __str__(self): + return "PPLSAR" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + """ + Estimates the PPL-based sentence-level uncertainty using token-wise log-likelihoods. + + Parameters: + stats (Dict[str, np.ndarray]): Input statistics, including: + * 'sample_log_likelihoods': token-wise log-likelihoods for each sample. + + Returns: + np.ndarray: float PPL values for each sample. + Lower values indicate less uncertainty (better predictions), higher values indicate more uncertainty. + """ + # Extract token-wise log-likelihoods from the stats + batch_sample_log_likelihoods = stats["sample_log_likelihoods"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + + sentenceSAR = [] + + # Loop over each sample's log-likelihoods and sentence similarities + for sample_log_likelihoods, sample_sentence_similarity in zip( + batch_sample_log_likelihoods, batch_sample_sentence_similarity + ): + ppl = np.exp([np.mean(token_ll) for token_ll in sample_log_likelihoods]) + + # Initialize the sentence relevance (R_s) using PPL + R_s = ( + ppl # Use PPL instead of probabilities + * sample_sentence_similarity + * (1 - np.eye(sample_sentence_similarity.shape[0])) # Remove self-similarity + ) + + # Compute sentence relevance + sent_relevance = R_s.sum(-1) / self.t + # Compute SentenceSAR (Uncertainty Estimation) using PPL + E_s = -np.log(sent_relevance + ppl) + sentenceSAR.append(E_s.mean()) + + return np.array(sentenceSAR) + +#class DistilOneSentenceSAR(Estimator): +# """ +# Like SAR, but only looks at other samples for each sample in the output. +# """ +# +# def __init__( +# self, +# verbose: bool = False, +# use_log: bool = True, +# reverse: bool = False +# ): +# super().__init__(["sample_sentence_similarity", "sample_log_probs"], "sequence") +# self.verbose = verbose +# self.use_log = use_log +# self.reverse = reverse +# +# def __str__(self): +# base = f"DistilOneSentenceSAR" +# if not self.use_log: +# base += "_no_log" +# if self.reverse: +# base += "_reverse" +# return base +# +# def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: +# """ +# Estimates the sentenceSAR for each sample in the input statistics. +# +# Parameters: +# stats (Dict[str, np.ndarray]): input statistics, which for multiple samples includes: +# * corresponding log probabilities in 'sample_log_probs', +# * matrix with cross-encoder similarities in 'sample_sentence_similarity' +# Returns: +# np.ndarray: float sentenceSAR for each sample in input statistics. +# Higher values indicate more uncertain samples. +# """ +# batch_sample_log_probs = stats["sample_log_probs"] +# batch_sample_sentence_similarity = deepcopy(stats["sample_sentence_similarity"]) +# +# sentenceSAR = [] +# for sample_log_probs, sample_sentence_similarity in zip( +# batch_sample_log_probs, batch_sample_sentence_similarity +# ): +# sample_probs = np.exp(np.array(sample_log_probs)) +# np.fill_diagonal(sample_sentence_similarity, 1) +# +# R_s = ( +# sample_probs +# * sample_sentence_similarity +# ) +# sent_relevance = R_s.sum(-1) +# +# if self.use_log: +# E_s = -np.log(sent_relevance) +# else: +# if self.reverse: +# E_s = sent_relevance +# else: +# E_s = -sent_relevance +# +# SAR.append(E_s.mean()) +# +# return np.array(SAR) + + +class MTESAR(Estimator): + """ + Like SAR, but uses sample entropy calculated from token-wise log probs for each sample. + Tokenwise log-likelihoods are available in stats['sample_log_likelihoods']. + """ + def __init__(self, verbose: bool = False): + super().__init__(["sample_sentence_similarity", "sample_entropy"], "sequence") + self.verbose = verbose + self.t = 0.001 + + def __str__(self): + return "MTESAR" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + """ + Estimates the Entropy-based sentence-level uncertainty using token-wise log-likelihoods. + + Parameters: + stats (Dict[str, np.ndarray]): Input statistics, including: + * 'sample_log_likelihoods': token-wise log-likelihoods for each sample. + + Returns: + np.ndarray: float PPL values for each sample. + Lower values indicate less uncertainty (better predictions), higher values indicate more uncertainty. + """ + # Extract token-wise log-likelihoods from the stats + batch_sample_entropy = stats["sample_entropy"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + + sentenceSAR = [] + + # Loop over each sample's log-likelihoods and sentence similarities + for sample_entropy, sample_sentence_similarity in zip( + batch_sample_entropy, batch_sample_sentence_similarity + ): + entropy = sample_entropy + # Initialize the sentence relevance (R_s) using PPL + R_s = ( + entropy # Use entropy instead of probabilities + * sample_sentence_similarity + * (1 - np.eye(sample_sentence_similarity.shape[0])) # Remove self-similarity + ) + + # Compute sentence relevance + sent_relevance = R_s.sum(-1) / self.t + # Compute SentenceSAR (Uncertainty Estimation) using PPL + E_s = np.log(sent_relevance + entropy) + sentenceSAR.append(E_s.mean()) + + return np.array(sentenceSAR) diff --git a/src/lm_polygraph/estimators/sum_semantic_entropies.py b/src/lm_polygraph/estimators/sum_semantic_entropies.py new file mode 100644 index 000000000..47f9aad25 --- /dev/null +++ b/src/lm_polygraph/estimators/sum_semantic_entropies.py @@ -0,0 +1,213 @@ +import numpy as np + +from typing import Dict +from copy import deepcopy + +from .estimator import Estimator +from .common import sample_strategy_to_prefix, best_sample_ids, SAMPLE_SELECTION_STAT_KEYS + + +class SumSemanticMaxprob(Estimator): + def __init__( + self, + verbose: bool = False, + sample_strategy: str = "first" + ): + super().__init__( + ["sample_sentence_similarity", "sample_log_probs"] + SAMPLE_SELECTION_STAT_KEYS, + "sequence" + ) + self.verbose = verbose + self.sample_strategy = sample_strategy + + def __str__(self): + base = "SumSemanticMaxprob" + return sample_strategy_to_prefix(self.sample_strategy) + base + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_log_probs = stats["sample_log_probs"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + sample_ids = best_sample_ids(self.sample_strategy, stats) + + enriched_metrics = [] # To store enriched metrics for each sample + + for best_id, sample_log_probs, sample_sentence_similarity in zip( + sample_ids, batch_sample_log_probs, batch_sample_sentence_similarity + ): + sim = 1 - sample_sentence_similarity[best_id, :] + sim[best_id] = 1 + avg_similarity = np.mean(sim) + mp = -np.sum(sample_log_probs[best_id]) + res = mp + avg_similarity + enriched_metrics.append(res) + + return np.array(enriched_metrics) + + +class SumSemanticPPL(Estimator): + def __init__( + self, + verbose: bool = False, + sample_strategy: str = "first" + ): + super().__init__( + ["sample_sentence_similarity", "sample_log_likelihoods"] + SAMPLE_SELECTION_STAT_KEYS, + "sequence" + ) + self.verbose = verbose + self.sample_strategy = sample_strategy + + def __str__(self): + base = "SumSemanticPPL" + return sample_strategy_to_prefix(self.sample_strategy) + base + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_sample_log_likelihoods = stats["sample_log_likelihoods"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + sample_ids = best_sample_ids(self.sample_strategy, stats) + + enriched_ppl = [] # To store enriched PPL for each sample + + for best_id, sample_log_likelihoods, sample_sentence_similarity in zip( + sample_ids, batch_sample_log_likelihoods, batch_sample_sentence_similarity + ): + sim = 1 - sample_sentence_similarity[best_id, :] + sim[best_id] = 1 + avg_similarity = np.mean(sim) + ppl = -np.mean(sample_log_likelihoods[best_id]) + res = ppl + avg_similarity + enriched_ppl.append(res) + + return np.array(enriched_ppl) + + +class SumSemanticMTE(Estimator): + def __init__( + self, + verbose: bool = False, + sample_strategy: str = "first" + ): + super().__init__( + ["sample_sentence_similarity", "sample_entropy"] + SAMPLE_SELECTION_STAT_KEYS, + "sequence" + ) + self.verbose = verbose + self.sample_strategy = sample_strategy + + def __str__(self): + base = "SumSemanticMTE" + return sample_strategy_to_prefix(self.sample_strategy) + base + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_entropies = stats["sample_entropy"] + batch_sample_sentence_similarity = stats["sample_sentence_similarity"] + sample_ids = best_sample_ids(self.sample_strategy, stats) + + enriched_mte = [] + + for best_id, sample_entropies, sample_sentence_similarity in zip( + sample_ids, batch_entropies, batch_sample_sentence_similarity + ): + sim = 1 - sample_sentence_similarity[best_id, :] + sim[best_id] = 1 + avg_similarity = np.mean(sim) + mte = sample_entropies[best_id] + res = mte + avg_similarity + enriched_mte.append(res) + + return np.array(enriched_mte) + + +class GreedySumSemanticMaxprob(Estimator): + def __init__( + self, + verbose: bool = False, + ): + super().__init__(["greedy_sentence_similarity", "greedy_log_likelihoods"], "sequence") + self.verbose = verbose + + def __str__(self): + return "GreedySumSemanticMaxprob" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_greedy_sentence_similarity = stats["greedy_sentence_similarity"] + batch_lls = np.array([np.sum(log_likelihood) for log_likelihood in stats["greedy_log_likelihoods"]]) + + enriched_metrics = [] # To store enriched metrics for each sample + for greedy_ll, greedy_sentence_similarity in zip( + batch_lls, batch_greedy_sentence_similarity + ): + # Compute probabilities (negative log-probs) + prob = -greedy_ll + + # Compute row-wise average similarity, excluding self-similarity + # Diagonal contains self-similarities + avg_similarity = 1 - np.mean(greedy_sentence_similarity) + + enriched_metrics.append(prob + avg_similarity) + + return np.array(enriched_metrics) + + +class GreedySumSemanticPPL(Estimator): + def __init__( + self, + verbose: bool = False, + ): + super().__init__(["greedy_sentence_similarity", "greedy_log_likelihoods"], "sequence") + self.verbose = verbose + + def __str__(self): + return "GreedySumSemanticPPL" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_greedy_log_likelihoods = stats["greedy_log_likelihoods"] + batch_greedy_sentence_similarity = stats["greedy_sentence_similarity"] + + enriched_ppl = [] # To store enriched PPL for each sample + + for greedy_log_likelihoods, greedy_sentence_similarity in zip( + batch_greedy_log_likelihoods, batch_greedy_sentence_similarity + ): + # get PPL for each sample + ppl = -np.mean(greedy_log_likelihoods) + + # Compute row-wise average similarity, excluding self-similarity + avg_similarity = 1 - np.mean(greedy_sentence_similarity) + + enriched_ppl.append(ppl + avg_similarity) + + + return np.array(enriched_ppl) + + +class GreedySumSemanticMTE(Estimator): + def __init__( + self, + verbose: bool = False, + ): + super().__init__(["greedy_sentence_similarity", "entropy"], "sequence") + self.verbose = verbose + + def __str__(self): + return "GreedySumSemanticMTE" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_greedy_entropies = stats["greedy_log_likelihoods"] + batch_greedy_sentence_similarity = stats["greedy_sentence_similarity"] + + enriched_mte = [] # To store enriched PPL for each sample + + for greedy_entropies, greedy_sentence_similarity in zip( + batch_greedy_entropies, batch_greedy_sentence_similarity + ): + # get PPL for each sample + mte = np.mean(greedy_entropies) + + # Compute row-wise average similarity, excluding self-similarity + avg_similarity = 1 - np.mean(greedy_sentence_similarity) + + enriched_mte.append(mte + avg_similarity) + + + return np.array(enriched_mte) diff --git a/src/lm_polygraph/estimators/supervised_sum_semantic_entropies.py b/src/lm_polygraph/estimators/supervised_sum_semantic_entropies.py new file mode 100644 index 000000000..06fcd5cd5 --- /dev/null +++ b/src/lm_polygraph/estimators/supervised_sum_semantic_entropies.py @@ -0,0 +1,179 @@ +import numpy as np + +from typing import Dict +from copy import deepcopy + +from .estimator import Estimator +from .common import sample_strategy_to_prefix, best_sample_ids, SAMPLE_SELECTION_STAT_KEYS +from sklearn.preprocessing import MinMaxScaler + + +def get_avg_dissim(sample_sentence_similarity, sample_ids): + batch_avg_similarity = [] + for best_id, sentence_similarity in zip(sample_ids, sample_sentence_similarity): + batch_avg_similarity.append(np.mean(1 - sentence_similarity[best_id, :])) + return batch_avg_similarity + +def normalize_and_enrich(batch_metrics, batch_avg_dissimilarity, alpha): + batch_metrics = MinMaxScaler().fit_transform(np.array(batch_metrics).reshape(-1, 1)).flatten() + batch_avg_dissimilarity = MinMaxScaler().fit_transform(np.array(batch_avg_dissimilarity).reshape(-1, 1)).flatten() + enriched_metrics = [metric + avg_dissimilarity * alpha for metric, avg_dissimilarity in zip(batch_metrics, batch_avg_dissimilarity)] + return enriched_metrics + + +class SupSumSemanticMaxprob(Estimator): + def __init__( + self, + verbose: bool = False, + sample_strategy: str = "first", + alpha: int = 1 + ): + super().__init__( + ["sample_sentence_similarity", "sample_log_probs"] + SAMPLE_SELECTION_STAT_KEYS, + "sequence" + ) + self.verbose = verbose + self.sample_strategy = sample_strategy + self.alpha = alpha + + def __str__(self): + base = f"SupSumSemanticMaxprob_{self.alpha}" + return sample_strategy_to_prefix(self.sample_strategy) + base + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + sample_ids = best_sample_ids(self.sample_strategy, stats) + + batch_mps = [-np.sum(log_probs[best_id]) for best_id, log_probs in zip(sample_ids, stats["sample_log_probs"])] + batch_avg_dissim = get_avg_dissim(stats["sample_sentence_similarity"], sample_ids) + + enriched_metrics = normalize_and_enrich(batch_mps, batch_avg_dissim, self.alpha) + + return np.array(enriched_metrics) + + +class SupSumSemanticPPL(Estimator): + def __init__( + self, + verbose: bool = False, + sample_strategy: str = "first", + alpha: int = 1 + ): + super().__init__( + ["sample_sentence_similarity", "sample_log_likelihoods"] + SAMPLE_SELECTION_STAT_KEYS, + "sequence" + ) + self.verbose = verbose + self.sample_strategy = sample_strategy + self.alpha = alpha + + def __str__(self): + base = f"SupSumSemanticPPL_{self.alpha}" + return sample_strategy_to_prefix(self.sample_strategy) + base + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + sample_ids = best_sample_ids(self.sample_strategy, stats) + + batch_ppls = [-np.mean(sample_log_likelihoods[best_id]) for best_id, sample_log_likelihoods in zip(sample_ids, stats["sample_log_likelihoods"])] + batch_avg_dissim = get_avg_dissim(stats["sample_sentence_similarity"], sample_ids) + + enriched_metrics = normalize_and_enrich(batch_ppls, batch_avg_dissim, self.alpha) + + return np.array(enriched_metrics) + + +class SupSumSemanticMTE(Estimator): + def __init__( + self, + verbose: bool = False, + sample_strategy: str = "first", + alpha: int = 1 + ): + super().__init__( + ["sample_sentence_similarity", "sample_entropy"] + SAMPLE_SELECTION_STAT_KEYS, + "sequence" + ) + self.verbose = verbose + self.sample_strategy = sample_strategy + self.alpha = alpha + + def __str__(self): + base = f"SupSumSemanticMTE_{self.alpha}" + return sample_strategy_to_prefix(self.sample_strategy) + base + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + sample_ids = best_sample_ids(self.sample_strategy, stats) + + batch_mtes = [entropies[best_id] for best_id, entropies in zip(sample_ids, stats["sample_entropy"])] + batch_avg_dissim = get_avg_dissim(stats["sample_sentence_similarity"], sample_ids) + + enriched_metrics = normalize_and_enrich(batch_mtes, batch_avg_dissim, self.alpha) + + return np.array(enriched_metrics) + + +class GreedySupSumSemanticMaxprob(Estimator): + def __init__( + self, + verbose: bool = False, + alpha: int = 1 + ): + super().__init__(["greedy_sentence_similarity", "greedy_log_likelihoods"], "sequence") + self.verbose = verbose + self.alpha = alpha + + def __str__(self): + return f"GreedySupSumSemanticMaxprob_{self.alpha}" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_lls = np.array([np.sum(log_likelihood) for log_likelihood in stats["greedy_log_likelihoods"]]) + batch_avg_dissim = [np.mean(1 - sentence_similarity) for sentence_similarity in stats["greedy_sentence_similarity"]] + + enriched_metrics = normalize_and_enrich(batch_lls, batch_avg_dissim, self.alpha) + + return np.array(enriched_metrics) + + +class GreedySupSumSemanticPPL(Estimator): + def __init__( + self, + verbose: bool = False, + alpha: int = 1 + ): + super().__init__(["greedy_sentence_similarity", "greedy_log_likelihoods"], "sequence") + self.verbose = verbose + self.alpha = alpha + + def __str__(self): + return f"GreedySupSumSemanticPPL_{self.alpha}" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + batch_ppls = [-np.mean(greedy_log_likelihoods) for greedy_log_likelihoods in stats["greedy_log_likelihoods"]] + batch_avg_dissim = [np.mean(1 - sentence_similarity) for sentence_similarity in stats["greedy_sentence_similarity"]] + + enriched_metrics = normalize_and_enrich(batch_ppls, batch_avg_dissim, self.alpha) + + return np.array(enriched_metrics) + + +class GreedySupSumSemanticMTE(Estimator): + def __init__( + self, + verbose: bool = False, + alpha: int = 1 + ): + super().__init__(["greedy_sentence_similarity", "entropy"], "sequence") + self.verbose = verbose + self.alpha = alpha + + def __str__(self): + return f"GreedySupSumSemanticMTE_{self.alpha}" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + #batch_greedy_entropies = stats["greedy_log_likelihoods"] + #batch_greedy_sentence_similarity = stats["greedy_sentence_similarity"] + batch_mtes = [np.mean(greedy_entropies) for greedy_entropies in stats["greedy_log_likelihoods"]] + batch_avg_dissim = [np.mean(1 - sentence_similarity) for sentence_similarity in stats["greedy_sentence_similarity"]] + + enriched_metrics = normalize_and_enrich(batch_mtes, batch_avg_dissim, self.alpha) + + return np.array(enriched_metrics) diff --git a/src/lm_polygraph/estimators/token_entropy.py b/src/lm_polygraph/estimators/token_entropy.py index fc87cc77c..059934c12 100644 --- a/src/lm_polygraph/estimators/token_entropy.py +++ b/src/lm_polygraph/estimators/token_entropy.py @@ -3,6 +3,7 @@ from typing import Dict from .estimator import Estimator +from .common import sample_strategy_to_prefix, best_sample_ids class MeanTokenEntropy(Estimator): @@ -33,6 +34,37 @@ def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: return np.array([np.mean(e) for e in entropy]) +class SampledMeanTokenEntropy(Estimator): + """ + Estimates the sequence-level uncertainty of a language model by calculating the + mean entropy among all tokens in the generation. + Works only with whitebox models (initialized using lm_polygraph.utils.model.WhiteboxModel). + """ + + def __init__(self, sample_strategy: str = "first"): + super().__init__(["sample_entropy"], "sequence") + self.sample_strategy = sample_strategy + + def __str__(self): + return sample_strategy_to_prefix(self.sample_strategy) + "SampledMeanTokenEntropy" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + """ + Estimates the mean token entropy for each sample in input statistics. + + Parameters: + stats (Dict[str, np.ndarray]): input statistics, which for multiple samples includes: + * Entropy(* | y_ np.ndarray: ): log_likelihoods = np.array(log_likelihoods) R_t = 1 - token_similarity - R_t_norm = R_t / R_t.sum() + if R_t.sum() == 0: + R_t_norm = np.zeros_like(R_t) + else: + R_t_norm = R_t / R_t.sum() E_t = -log_likelihoods * R_t_norm tokenSAR.append(E_t.sum()) return np.array(tokenSAR) + + +class SampledTokenSAR(Estimator): + """ + Estimates the sequence-level uncertainty of a language model following the method of + "Token SAR" as provided in the paper https://arxiv.org/abs/2307.01379. + Works only with whitebox models (initialized using lm_polygraph.utils.model.WhiteboxModel). + + This method calculates the weighted sum of log_likelihoods with weights computed using token relevance. + """ + + def __init__(self, verbose: bool = False, sample_strategy: str = "first"): + super().__init__(["sample_token_similarity", "sample_log_likelihoods"], "sequence") + self.verbose = verbose + self.sample_strategy = sample_strategy + + def __str__(self): + return sample_strategy_to_prefix(self.sample_strategy) + "SampledTokenSAR" + + def __call__(self, stats: Dict[str, np.ndarray]) -> np.ndarray: + """ + Estimates the tokenSAR for each sample in the input statistics. + + Parameters: + stats (Dict[str, np.ndarray]): input statistics, which for multiple samples includes: + * log p(y_i | y_ int: @@ -66,11 +81,21 @@ def __call__( Returns: np.ndarray: list of accuracies: 1 if generated text is equal to ground-truth and 0 otherwise. """ - greedy_texts = stats["greedy_texts"] + if self.sample: + if self.sample_strategy == "First": + gen_texts = stats["first_sample_texts"] + elif self.sample_strategy == "Best": + gen_texts = stats["best_sample_texts"] + elif self.sample_strategy == "BestNormalized": + gen_texts = stats["best_normalized_sample_texts"] + else: + raise ValueError(f"Invalid sample strategy: {self.sample_strategy}") + else: + gen_texts = stats["greedy_texts"] result = [] - for hyp, ref in zip(greedy_texts, target_texts): + for hyp, ref in zip(gen_texts, target_texts): ref = self._filter_text(ref, self.target_ignore_regex) hyp = self._filter_text(hyp, self.output_ignore_regex) diff --git a/src/lm_polygraph/generation_metrics/aggregated_metric.py b/src/lm_polygraph/generation_metrics/aggregated_metric.py index bd20e9d93..17a05cc6f 100644 --- a/src/lm_polygraph/generation_metrics/aggregated_metric.py +++ b/src/lm_polygraph/generation_metrics/aggregated_metric.py @@ -11,6 +11,7 @@ class AggregatedMetric(GenerationMetric): def __init__(self, base_metric: GenerationMetric, aggregation: str = "max"): self.base_metric = base_metric + self.sample = base_metric.sample self.level = base_metric.level self.stats_dependencies = base_metric.stats_dependencies self.aggregation = aggregation @@ -34,8 +35,14 @@ def __call__( np.ndarray: list of aggregated metric values for each sample in input. """ metric_values = [] - for i, (targets, greedy_text) in enumerate( - zip(target_texts, stats["greedy_texts"]) + + if self.sample: + gen_texts = stats["first_sample_texts"] + else: + gen_texts = stats["greedy_texts"] + + for i, (targets, gen_text) in enumerate( + zip(target_texts, gen_texts) ): # truncate stats to only process one sample at a time truncated_stats = { diff --git a/src/lm_polygraph/generation_metrics/alignscore.py b/src/lm_polygraph/generation_metrics/alignscore.py index a1f9a63d7..a6bd49504 100644 --- a/src/lm_polygraph/generation_metrics/alignscore.py +++ b/src/lm_polygraph/generation_metrics/alignscore.py @@ -14,25 +14,44 @@ class AlignScore(GenerationMetric): def __init__( self, + scorer, lang="en", - ckpt_path="https://huggingface.co/yzha/AlignScore/resolve/main/AlignScore-large.ckpt", - batch_size=16, target_is_claims=True, + ignore_target=False, + sample: bool = False, + sample_strategy: str = "First", ): - super().__init__(["greedy_texts", "input_texts"], "sequence") - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + if sample: + super().__init__([ + "first_sample_texts", + "best_sample_texts", + "best_normalized_sample_texts", + "input_texts"], + "sequence") + else: + super().__init__(["greedy_texts", "input_texts"], "sequence") + self.sample = sample + self.sample_strategy = sample_strategy self.target_is_claims = target_is_claims - self.batch_size = batch_size - self.scorer = AlignScorer( - model="roberta-large", - batch_size=batch_size, - device=device, - ckpt_path=ckpt_path, - evaluation_mode="nli_sp", - ) + self.ignore_target = ignore_target + self.scorer = scorer def __str__(self): - return "AlignScore" + base = "AlignScore" + if self.ignore_target: + base += "InputOutput" + elif self.target_is_claims: + base += "OutputTarget" + else: + base += "TargetOutput" + + if self.sample: + if self.sample_strategy == "First": + return f"Sample{base}" + else: + return f"{self.sample_strategy}Sample{base}" + + return base def __call__( self, @@ -50,17 +69,34 @@ def __call__( Returns: np.ndarray: list of AlignScore Scores for each sample in input. """ - greedy_texts = stats["greedy_texts"] + if self.sample: + if self.sample_strategy == "First": + gen_texts = stats["first_sample_texts"] + elif self.sample_strategy == "Best": + gen_texts = stats["best_sample_texts"] + elif self.sample_strategy == "BestNormalized": + gen_texts = stats["best_normalized_sample_texts"] + else: + raise ValueError(f"Invalid sample strategy: {self.sample_strategy}") + else: + gen_texts = stats["greedy_texts"] + + input_texts = stats["input_texts"] filtered_targets = [x if len(x.strip()) else "(empty)" for x in target_texts] - filtered_outputs = [x if len(x.strip()) else "(empty)" for x in greedy_texts] + filtered_outputs = [x if len(x.strip()) else "(empty)" for x in gen_texts] + filtered_inputs = [x if len(x.strip()) else "(empty)" for x in input_texts] - if self.target_is_claims: - claims = filtered_targets - contexts = filtered_outputs - else: + if self.ignore_target: claims = filtered_outputs - contexts = filtered_targets + contexts = filtered_inputs + else: + if self.target_is_claims: + claims = filtered_targets + contexts = filtered_outputs + else: + claims = filtered_outputs + contexts = filtered_targets scores = np.array( self.scorer.score( diff --git a/src/lm_polygraph/generation_metrics/bleu.py b/src/lm_polygraph/generation_metrics/bleu.py index 641047495..34fee322a 100644 --- a/src/lm_polygraph/generation_metrics/bleu.py +++ b/src/lm_polygraph/generation_metrics/bleu.py @@ -10,11 +10,26 @@ class BLEUMetric(GenerationMetric): Calculates BLEU metric between model-generated texts and ground truth texts. """ - def __init__(self): - super().__init__(["greedy_texts"], "sequence") + def __init__(self, sample: bool = False, sample_strategy: str = "First"): + if sample: + super().__init__([ + "first_sample_texts", + "best_sample_texts", + "best_normalized_sample_texts", + "input_texts"], + "sequence") + else: + super().__init__(["greedy_texts"], "sequence") + self.sample = sample + self.sample_strategy = sample_strategy self.scorer = BLEU(effective_order=True, lowercase=True) def __str__(self): + if self.sample: + if self.sample_strategy == "First": + return "SampleBLEU" + else: + return f"{self.sample_strategy}SampleBLEU" return "BLEU" def _score_single(self, t1: str, t2: str): @@ -37,9 +52,21 @@ def __call__( Returns: np.ndarray: list of BLEU Scores for each sample in input. """ + if self.sample: + if self.sample_strategy == "First": + gen_texts = stats["first_sample_texts"] + elif self.sample_strategy == "Best": + gen_texts = stats["best_sample_texts"] + elif self.sample_strategy == "BestNormalized": + gen_texts = stats["best_normalized_sample_texts"] + else: + raise ValueError(f"Invalid sample strategy: {self.sample_strategy}") + else: + gen_texts = stats["greedy_texts"] + return np.array( [ self._score_single(hyp, ref) - for hyp, ref in zip(stats["greedy_texts"], target_texts) + for hyp, ref in zip(gen_texts, target_texts) ] ) diff --git a/src/lm_polygraph/generation_metrics/comet.py b/src/lm_polygraph/generation_metrics/comet.py index 0fcd9b3e2..f91c833a3 100644 --- a/src/lm_polygraph/generation_metrics/comet.py +++ b/src/lm_polygraph/generation_metrics/comet.py @@ -1,6 +1,5 @@ import re import numpy as np -from evaluate import load from typing import List, Dict from .generation_metric import GenerationMetric @@ -12,14 +11,29 @@ class Comet(GenerationMetric): between model-generated texts and ground truth texts. """ - def __init__(self, source_ignore_regex=None, lang="en"): - super().__init__(["greedy_texts", "input_texts"], "sequence") - self.scorer = load("comet") + def __init__(self, scorer, source_ignore_regex=None, lang="en", sample: bool = False, sample_strategy: str = "First"): + if sample: + super().__init__([ + "first_sample_texts", + "best_sample_texts", + "best_normalized_sample_texts", + "input_texts"], + "sequence") + else: + super().__init__(["greedy_texts", "input_texts"], "sequence") + self.sample = sample + self.sample_strategy = sample_strategy self.source_ignore_regex = ( re.compile(source_ignore_regex) if source_ignore_regex else None ) + self.scorer = scorer def __str__(self): + if self.sample: + if self.sample_strategy == "First": + return f"SampleComet" + else: + return f"{self.sample_strategy}SampleComet" return "Comet" def _filter_text(self, text: str, ignore_regex: re.Pattern) -> str: @@ -54,9 +68,22 @@ def __call__( self._filter_text(src, self.source_ignore_regex) for src in stats["input_texts"] ] + + if self.sample: + if self.sample_strategy == "First": + gen_texts = stats["first_sample_texts"] + elif self.sample_strategy == "Best": + gen_texts = stats["best_sample_texts"] + elif self.sample_strategy == "BestNormalized": + gen_texts = stats["best_normalized_sample_texts"] + else: + raise ValueError(f"Invalid sample strategy: {self.sample_strategy}") + else: + gen_texts = stats["greedy_texts"] + scores = np.array( self.scorer.compute( - predictions=stats["greedy_texts"], + predictions=gen_texts, references=target_texts, sources=sources, )["scores"] diff --git a/src/lm_polygraph/generation_metrics/gpt_judge_accuracy.py b/src/lm_polygraph/generation_metrics/gpt_judge_accuracy.py new file mode 100644 index 000000000..ac95bfb58 --- /dev/null +++ b/src/lm_polygraph/generation_metrics/gpt_judge_accuracy.py @@ -0,0 +1,111 @@ +import openai +from .generation_metric import GenerationMetric +import numpy as np +import logging +from typing import Dict, List +import re +log = logging.getLogger("lm_polygraph") +import os +from tqdm import tqdm + +class GptAccuracyMetric(GenerationMetric): + """ + Uses GPT to compare generated text with target and return 1 if semantically equivalent, else 0. + """ + + def __init__(self, model="gpt-4o-mini", sample=False, sample_strategy="First", api_key=None): + if sample: + super().__init__([ + "no_fewshot_input_texts", + "first_sample_texts", + "best_sample_texts", + "best_normalized_sample_texts", + "input_texts"], + "sequence") + else: + super().__init__(["no_fewshot_input_texts", "greedy_texts", "input_texts"], "sequence") + + self.sample = sample + self.sample_strategy = sample_strategy + self.model = model + self.api_key = api_key or os.getenv("OPENAI_API_KEY") + openai.api_key = self.api_key + + def __str__(self): + if self.sample == True: + if self.sample_strategy == "First": + return f"SampleGptAccuracy_{self.model}" + else: + return f"{self.sample_strategy}GptAccuracy_{self.model}" + return f"GptAccuracy_{self.model}" + + def _filter_input(self, input): + matches = re.findall(r"Question:\s*(.*?)\nAnswer:", input, re.DOTALL) + if matches: + return matches[-1].strip() + return input + + def _gpt_compare(self, output: str, target: str, question: str) -> int: + if type(target) == list: + str_target = ", ".join(target) + prompt = ( + f"You are a text evaluator. The model was asked the following question:\n{question}\n" + "The 'Generated' text is a model's response. The 'Target' is the list of possible correct answers.\n" + "If the generated answer correctly answers the question (matches one of the target responses), return 1.\n" + "If it is wrong, return 0.\n" + "Respond ONLY with a single digit: 1 or 0.\n\n" + f"Generated: {output.strip()}\n" + f"Target list: {str_target.strip()}" + ) + else: + prompt = ( + f"You are a text evaluator. The model was asked the following question:\n{question}\n" + "The 'Generated' text is a model's response. The 'Target' is the correct answer.\n" + "If the generated answer correctly answers the question based on the target, return 1.\n" + "If it is wrong, return 0.\n" + "Respond ONLY with a single digit: 1 or 0.\n\n" + f"Generated: {output.strip()}\n" + f"Target: {target.strip()}" + ) + + try: + response = openai.ChatCompletion.create( + model=self.model, + messages=[ + {"role": "system", "content": "You are a strict evaluator of correctness of the model's response."}, + {"role": "user", "content": prompt} + ], + temperature=0, + max_tokens=1, + n=1 + ) + + raw_reply = response['choices'][0]['message']['content'].strip() + + return int(raw_reply) if raw_reply in ['0', '1'] else 0 + + except Exception as e: + log.error(f"GPT comparison failed: {e}") + return 0 # Safe default + + def __call__(self, stats: Dict[str, np.ndarray], target_texts: List[str]) -> np.ndarray: + if self.sample: + if self.sample_strategy == "First": + gen_texts = stats["first_sample_texts"] + elif self.sample_strategy == "Best": + gen_texts = stats["best_sample_texts"] + elif self.sample_strategy == "BestNormalized": + gen_texts = stats["best_normalized_sample_texts"] + else: + raise ValueError(f"Invalid sample strategy: {self.sample_strategy}") + else: + gen_texts = stats["greedy_texts"] + + results = [] + input_texts = stats["no_fewshot_input_texts"] + + for output, target, input in tqdm(zip(gen_texts, target_texts, input_texts)): + score = self._gpt_compare(output, target,input) + results.append(score) + + return np.array(results) diff --git a/src/lm_polygraph/generation_metrics/preprocess_output_target.py b/src/lm_polygraph/generation_metrics/preprocess_output_target.py index 8d3d56671..0e77415aa 100644 --- a/src/lm_polygraph/generation_metrics/preprocess_output_target.py +++ b/src/lm_polygraph/generation_metrics/preprocess_output_target.py @@ -12,6 +12,7 @@ class PreprocessOutputTarget(GenerationMetric): def __init__(self, base_metric, process_output_fn, process_target_fn): self.base_metric = getattr(base_metric, "base_metric", base_metric) + self.sample = base_metric.sample self.level = base_metric.level self.stats_dependencies = base_metric.stats_dependencies self.process_output_fn = process_output_fn @@ -44,8 +45,13 @@ def __call__( stats_copy = {k: v for k, v in stats.items() if k in self.stats_dependencies} stats_copy = deepcopy(stats_copy) - stats_copy["greedy_texts"] = [ - self.process_output_fn(output) for output in stats_copy["greedy_texts"] - ] + if self.sample: + stats_copy["first_sample_texts"] = [ + self.process_output_fn(output) for output in stats_copy["first_sample_texts"] + ] + else: + stats_copy["greedy_texts"] = [ + self.process_output_fn(output) for output in stats_copy["greedy_texts"] + ] return self.base_metric(stats_copy, processed_target_texts) diff --git a/src/lm_polygraph/generation_metrics/rouge.py b/src/lm_polygraph/generation_metrics/rouge.py index e4f96a18d..cea5201ec 100644 --- a/src/lm_polygraph/generation_metrics/rouge.py +++ b/src/lm_polygraph/generation_metrics/rouge.py @@ -15,7 +15,7 @@ class RougeMetric(GenerationMetric): Calculates Rouge metric between model-generated texts and ground truth texts. """ - def __init__(self, rouge_name): + def __init__(self, rouge_name, sample: bool = False, sample_strategy: str = "First"): """ Parameters: rouge_name (str): rouge metric type. Possible values: @@ -23,11 +23,25 @@ def __init__(self, rouge_name): * rouge2 * rougeL """ - super().__init__(["greedy_texts"], "sequence") + if sample: + super().__init__([ + "first_sample_texts", + "best_sample_texts", + "best_normalized_sample_texts"], + "sequence") + else: + super().__init__(["greedy_texts"], "sequence") + self.sample = sample + self.sample_strategy = sample_strategy self.rouge_name = rouge_name self.scorer = rouge_scorer.RougeScorer([rouge_name], use_stemmer=True) def __str__(self): + if self.sample: + if self.sample_strategy == "First": + return f"SampleRouge_{self.rouge_name}" + else: + return f"{self.sample_strategy}SampleRouge_{self.rouge_name}" return f"Rouge_{self.rouge_name}" def _score_single(self, t1: str, t2: str): @@ -52,9 +66,21 @@ def __call__( Returns: np.ndarray: list of Rouge Scores for each sample in input. """ + if self.sample: + if self.sample_strategy == "First": + gen_texts = stats["first_sample_texts"] + elif self.sample_strategy == "Best": + gen_texts = stats["best_sample_texts"] + elif self.sample_strategy == "BestNormalized": + gen_texts = stats["best_normalized_sample_texts"] + else: + raise ValueError(f"Invalid sample strategy: {self.sample_strategy}") + else: + gen_texts = stats["greedy_texts"] + return np.array( [ self._score_single(hyp, ref) - for hyp, ref in zip(stats["greedy_texts"], target_texts) + for hyp, ref in zip(gen_texts, target_texts) ] ) diff --git a/src/lm_polygraph/generation_metrics/x_metric.py b/src/lm_polygraph/generation_metrics/x_metric.py new file mode 100644 index 000000000..e8f7fe6af --- /dev/null +++ b/src/lm_polygraph/generation_metrics/x_metric.py @@ -0,0 +1,155 @@ +import re +import numpy as np + +from typing import List, Dict +from .generation_metric import GenerationMetric +from transformers import AutoTokenizer +from .x_metric_utils import MT5ForRegression +import torch +import datasets +from transformers import TrainingArguments, DataCollatorWithPadding, Trainer + +class XMetric(GenerationMetric): + """ + Calculates X-METRIC (https://aclanthology.org/2023.wmt-1.63/) + between model-generated texts and ground truth texts. + """ + + def __init__(self, model ,tokenizer, + source_ignore_regex=None, translation_ignore_regex=None, sample: bool = False, sample_strategy: str = "First"): + if sample: + super().__init__([ + "first_sample_texts", + "best_sample_texts", + "best_normalized_sample_texts", + "input_texts"], + "sequence") + else: + super().__init__(["greedy_texts", "input_texts"], "sequence") + self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + self.model = model + + self.tokenizer = tokenizer + self.source_ignore_regex = ( + re.compile(source_ignore_regex) if source_ignore_regex else None + ) + self.translation_ignore_regex = ( + re.compile(translation_ignore_regex) if translation_ignore_regex else None + ) + + self.training_args = TrainingArguments( + output_dir=".", + per_device_eval_batch_size=1, + disable_tqdm=False, + dataloader_pin_memory=False, + ) + + data_collator = DataCollatorWithPadding(tokenizer=self.tokenizer) + + self.trainer = Trainer( + model=self.model, + args=self.training_args, + data_collator=data_collator, + ) + self.sample = sample + self.sample_strategy=sample_strategy + + + def __str__(self): + if self.sample: + if self.sample_strategy == "First": + return f"Samplexmetric" + else: + return f"{self.sample_strategy}Samplexmetric" + return "xmetric" + + def _filter_translation(self, text: str, ignore_regex: re.Pattern) -> str: + return ignore_regex.sub("", text).strip() if ignore_regex else text.strip() + + def _filter_text(self, text: str, ignore_regex: re.Pattern) -> str: + if ignore_regex is not None: + processed_text = ignore_regex.search(text) + if processed_text: + return processed_text.group(1) + else: + raise ValueError( + f"Source text {text} does not match the ignore regex {ignore_regex}" + ) + return text + + def _prepare_inputs(self, translations: List[str], references: List[str], sources: List[str]): + """Prepares the input data for X-MERTIC scoring.""" + inputs = [ + f"source: {source} candidate: {hyp} reference: {ref}" + for hyp, ref, source in zip(translations, references, sources) + ] + tokenized = self.tokenizer( + inputs, + max_length=512, + truncation=True, + padding=False + ) + + # Convert to Hugging Face Dataset + dataset = datasets.Dataset.from_dict({ + "input_ids": tokenized["input_ids"], + "attention_mask": tokenized["attention_mask"], + "input":inputs + }).with_format("torch") + + def remove_eos(example): + example["input_ids"] = example["input_ids"][:-1] + example["attention_mask"] = example["attention_mask"][:-1] + return example + + dataset = dataset.map(remove_eos) + return dataset + + def __call__( + self, + stats: Dict[str, np.ndarray], + target_texts: List[str], + ) -> np.ndarray: + """ + Calculates X-METRIC between stats['greedy_texts'] and target_texts. + + Parameters: + stats (Dict[str, np.ndarray]): input statistics, including: + * model-generated texts in 'greedy_texts' + target_texts (List[str]): ground-truth texts + input_texts (List[str]): input texts before translation + + Returns: + np.ndarray: list of X-MERTIC scores for each sample. + """ + references = [ + src + for src in stats["target_texts"] + ] + if self.sample: + if self.sample_strategy == "First": + gen_texts = stats["first_sample_texts"] + elif self.sample_strategy == "Best": + gen_texts = stats["best_sample_texts"] + elif self.sample_strategy == "BestNormalized": + gen_texts = stats["best_normalized_sample_texts"] + else: + raise ValueError(f"Invalid sample strategy: {self.sample_strategy}") + else: + gen_texts = stats["greedy_texts"] + + translations = [ + self._filter_translation(tr, self.source_ignore_regex) + for tr in gen_texts + ] + + sources = [ + self._filter_text(src, self.source_ignore_regex) + for src in stats["input_texts"] + ] + + inputs = self._prepare_inputs(translations, references, sources) + scores, _, _ = self.trainer.predict(test_dataset=inputs) + for i, score in enumerate(scores): + scores[i] = (25 - score) / 25 + return scores diff --git a/src/lm_polygraph/generation_metrics/x_metric_utils.py b/src/lm_polygraph/generation_metrics/x_metric_utils.py new file mode 100644 index 000000000..ce1314cd1 --- /dev/null +++ b/src/lm_polygraph/generation_metrics/x_metric_utils.py @@ -0,0 +1,182 @@ +import copy +import dataclasses +from typing import Optional, Tuple, Union +import warnings + +import torch +from torch import nn +import transformers +import transformers.modeling_outputs + +BaseModelOutput = transformers.modeling_outputs.BaseModelOutput +ModelOutput = transformers.modeling_outputs.ModelOutput + +MT5Config = transformers.models.mt5.modeling_mt5.MT5Config +MT5PreTrainedModel = transformers.models.mt5.modeling_mt5.MT5PreTrainedModel +MT5Stack = transformers.models.mt5.modeling_mt5.MT5Stack + +__HEAD_MASK_WARNING_MSG = ( + transformers.models.mt5.modeling_mt5.__HEAD_MASK_WARNING_MSG # pylint: disable=protected-access +) + + +@dataclasses.dataclass +class MT5ForRegressionOutput(ModelOutput): + loss: Optional[torch.FloatTensor] = None + predictions: torch.FloatTensor = None + + +class MT5ForRegression(MT5PreTrainedModel): + """MT5 model for regression.""" + + def __init__(self, config: MT5Config): + super().__init__(config) + self.model_dim = config.d_model + + self.shared = nn.Embedding(config.vocab_size, config.d_model) + + encoder_config = copy.deepcopy(config) + encoder_config.is_decoder = False + encoder_config.use_cache = False + encoder_config.is_encoder_decoder = False + self.encoder = MT5Stack(encoder_config, self.shared) + + decoder_config = copy.deepcopy(config) + decoder_config.is_decoder = True + decoder_config.is_encoder_decoder = False + decoder_config.num_layers = config.num_decoder_layers + self.decoder = MT5Stack(decoder_config, self.shared) + + self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + # Model parallel + self.model_parallel = False + self.device_map = None + + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + decoder_attention_mask: Optional[torch.BoolTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + decoder_head_mask: Optional[torch.FloatTensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.FloatTensor], MT5ForRegressionOutput]: + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + + # FutureWarning: head_mask was separated into two input args - head_mask, + # decoder_head_mask + if head_mask is not None and decoder_head_mask is None: + if self.config.num_layers == self.config.num_decoder_layers: + warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) + decoder_head_mask = head_mask + + # Encode if needed (training, first prediction pass) + if encoder_outputs is None: + # Convert encoder inputs in embeddings if needed + encoder_outputs = self.encoder( + input_ids=input_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): + encoder_outputs = BaseModelOutput( + last_hidden_state=encoder_outputs[0], + hidden_states=encoder_outputs[1] + if len(encoder_outputs) > 1 + else None, + attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, + ) + + hidden_states = encoder_outputs[0] + + if self.model_parallel: + torch.cuda.set_device(self.decoder.first_device) + + # Create 1 step of dummy input for the decoder. + batch_size = input_ids.size(0) + decoder_input_ids = torch.LongTensor([0]).repeat(batch_size).reshape(-1, 1) + if torch.cuda.is_available(): + decoder_input_ids = decoder_input_ids.to(torch.device("cuda")) + + # Set device for model parallelism + if self.model_parallel: + torch.cuda.set_device(self.decoder.first_device) + hidden_states = hidden_states.to(self.decoder.first_device) + if decoder_input_ids is not None: + decoder_input_ids = decoder_input_ids.to(self.decoder.first_device) + if attention_mask is not None: + attention_mask = attention_mask.to(self.decoder.first_device) + if decoder_attention_mask is not None: + decoder_attention_mask = decoder_attention_mask.to( + self.decoder.first_device + ) + + # Decode + decoder_outputs = self.decoder( + input_ids=decoder_input_ids, + attention_mask=decoder_attention_mask, + inputs_embeds=decoder_inputs_embeds, + past_key_values=past_key_values, + encoder_hidden_states=hidden_states, + encoder_attention_mask=attention_mask, + head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = decoder_outputs[0] + + # Set device for model parallelism + if self.model_parallel: + torch.cuda.set_device(self.encoder.first_device) + self.lm_head = self.lm_head.to(self.encoder.first_device) + sequence_output = sequence_output.to(self.lm_head.weight.device) + + if self.config.tie_word_embeddings: + # Rescale output before projecting on vocab + # See + # https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 + sequence_output = sequence_output * (self.model_dim**-0.5) + + lm_logits = self.lm_head(sequence_output) + + # 250089 = + predictions = lm_logits[:, 0, 250089] + + # Clip to 0 to 25 + predictions = torch.clamp(predictions, 0, 25) + + loss = None + if labels is not None: + loss_fct = nn.MSELoss() + # move labels to correct device to enable PP + labels = labels.to(predictions.device) + loss = loss_fct(predictions.view(-1), labels.view(-1)) + + return MT5ForRegressionOutput( + loss=loss, + predictions=predictions, + ) \ No newline at end of file diff --git a/src/lm_polygraph/stat_calculators/__init__.py b/src/lm_polygraph/stat_calculators/__init__.py index 7bf5b7c21..f88615976 100644 --- a/src/lm_polygraph/stat_calculators/__init__.py +++ b/src/lm_polygraph/stat_calculators/__init__.py @@ -9,7 +9,9 @@ OPENAI_FACT_CHECK_PROMPTS, ) from .entropy import EntropyCalculator -from .sample import SamplingGenerationCalculator, BlackboxSamplingGenerationCalculator +from .entropy import SampleEntropyCalculator +from .sample import SamplingGenerationCalculator, BlackboxSamplingGenerationCalculator, FirstSampleCalculator, BestSampleCalculator +from .sample_alternatives_nli import SampleAlternativesNLICalculator from .greedy_alternatives_nli import ( GreedyAlternativesNLICalculator, GreedyAlternativesFactPrefNLICalculator, @@ -18,7 +20,13 @@ from .model_score import ModelScoreCalculator from .embeddings import EmbeddingsCalculator from .ensemble_token_data import EnsembleTokenLevelDataCalculator -from .semantic_matrix import SemanticMatrixCalculator +from .semantic_matrix import SemanticMatrixCalculator, ConcatSemanticMatrixCalculator from .cross_encoder_similarity import CrossEncoderSimilarityMatrixCalculator from .extract_claims import ClaimsExtractor from .semantic_classes import SemanticClassesCalculator +from .greedy_similarity import GreedySimilarityCalculator +from .greedy_semantic_matrix import GreedySemanticMatrixCalculator, ConcatGreedySemanticMatrixCalculator +from .rouge_matrix import RougeLSemanticMatrixCalculator +from .greedy_rouge_matrix import GreedyRougeLSemanticMatrixCalculator +from .align_matrix import AlignMatrixCalculator +from .greedy_align_matrix import GreedyAlignMatrixCalculator diff --git a/src/lm_polygraph/stat_calculators/align_matrix.py b/src/lm_polygraph/stat_calculators/align_matrix.py new file mode 100644 index 000000000..a65bfad76 --- /dev/null +++ b/src/lm_polygraph/stat_calculators/align_matrix.py @@ -0,0 +1,85 @@ +import numpy as np + +import itertools +from typing import Dict, List + +from .stat_calculator import StatCalculator +from lm_polygraph.utils.model import WhiteboxModel +import torch.nn as nn +import torch +from tqdm import tqdm + + +class AlignMatrixCalculator(StatCalculator): + """ + Calculates the NLI semantic matrix for generation samples using DeBERTa model. + """ + + def __init__(self, scorer): + super().__init__( + [ + "align_semantic_matrix", + ], + ["sample_texts"], + ) + self.scorer = scorer + + def __call__( + self, + dependencies: Dict[str, np.array], + texts: List[str], + model: WhiteboxModel, + max_new_tokens: int = 100, + ) -> Dict[str, np.ndarray]: + """ + Calculates the NLI semantic matrix for generation samples using DeBERTa model. + + Parameters: + dependencies (Dict[str, np.ndarray]): input statistics, containing: + - 'sample_texts' (List[List[str]]): several sampling generations + for each input text in the batch. + texts (List[str]): Input texts batch used for model generation. + model (Model): Model used for generation. + max_new_tokens (int): Maximum number of new tokens at model generation. Default: 100. + Returns: + Dict[str, np.ndarray]: dictionary with the following items: + - 'semantic_matrix_entail' (List[np.array]): for each input text: quadratic matrix of size + n_samples x n_samples, with probabilities of 'ENTAILMENT' output of DeBERTa. + - 'semantic_matrix_contra' (List[np.array]): for each input text: quadratic matrix of size + n_samples x n_samples, with probabilities of 'CONTRADICTION' output of DeBERTa. + - 'semantic_matrix_classes' (List[np.array]): for each input text: quadratic matrix of size + n_samples x n_samples, with the NLI label id corresponding to the DeBERTa prediction. + """ + batch_texts = dependencies["sample_texts"] + + batch_pairs = [] + batch_invs = [] + batch_counts = [] + for texts in batch_texts: + # Sampling from LLM often produces significant number of identical + # outputs. We only need to score pairs of unqiue outputs + texts = [text if text.strip() != "" else "" for text in texts] + unique_texts, inv = np.unique(texts, return_inverse=True) + batch_pairs.append(list(itertools.product(unique_texts, unique_texts))) + batch_invs.append(inv) + batch_counts.append(len(unique_texts)) + + E = [] + + for i, pairs in tqdm(enumerate(batch_pairs)): + first_texts, second_texts = zip(*pairs) + sim_mat = np.array(self.scorer.score(claims=first_texts, contexts=second_texts)) + + unique_mat_shape = (batch_counts[i], batch_counts[i]) + + sim_mat = sim_mat.reshape(unique_mat_shape) + + inv = batch_invs[i] + + E.append(sim_mat[inv, :][:, inv]) + + E = np.stack(E) + + return { + "align_semantic_matrix": E, + } diff --git a/src/lm_polygraph/stat_calculators/cross_encoder_similarity.py b/src/lm_polygraph/stat_calculators/cross_encoder_similarity.py index d6faa20bb..76a2241e2 100644 --- a/src/lm_polygraph/stat_calculators/cross_encoder_similarity.py +++ b/src/lm_polygraph/stat_calculators/cross_encoder_similarity.py @@ -2,6 +2,7 @@ import itertools from typing import Dict, List +from tqdm import tqdm from .stat_calculator import StatCalculator from sentence_transformers import CrossEncoder @@ -67,7 +68,7 @@ def __call__( batch_counts.append(len(unique_texts)) batch_token_scores = [] - for input_texts, tokens in zip(batch_input_texts, batch_greedy_tokens): + for input_texts, tokens in tqdm(zip(batch_input_texts, batch_greedy_tokens)): if len(tokens) > 1: is_special_tokens = np.isin(tokens, special_tokens) cropped_tokens = list(itertools.combinations(tokens, len(tokens) - 1))[ @@ -93,10 +94,11 @@ def __call__( token_scores[is_special_tokens] = 1 else: token_scores = np.array([0.5] * len(tokens)) + batch_token_scores.append(token_scores) sim_matrices = [] - for i, pairs in enumerate(batch_pairs): + for i, pairs in tqdm(enumerate(batch_pairs)): sim_scores = self.crossencoder.predict(pairs, batch_size=deberta_batch_size) unique_mat_shape = (batch_counts[i], batch_counts[i]) @@ -109,7 +111,7 @@ def __call__( sim_matrices = np.stack(sim_matrices) batch_samples_token_scores = [] - for sample_tokens, input_texts in zip(batch_sample_tokens, batch_input_texts): + for sample_tokens, input_texts in tqdm(zip(batch_sample_tokens, batch_input_texts)): samples_token_scores = [] for tokens in sample_tokens: if len(tokens) > 1: diff --git a/src/lm_polygraph/stat_calculators/entropy.py b/src/lm_polygraph/stat_calculators/entropy.py index 1696007fd..c5956992d 100644 --- a/src/lm_polygraph/stat_calculators/entropy.py +++ b/src/lm_polygraph/stat_calculators/entropy.py @@ -4,14 +4,19 @@ from .stat_calculator import StatCalculator from lm_polygraph.utils.model import WhiteboxModel - +import torch +from torch.nn import functional as F class EntropyCalculator(StatCalculator): """ Calculates entropy of probabilities at each token position in the generation of a Whitebox model. """ - def __init__(self): + def __init__( + self, + top_k: int = None, + ): + self.top_k = top_k super().__init__(["entropy"], ["greedy_log_probs"]) def __call__( @@ -39,6 +44,54 @@ def __call__( for s_lp in logprobs: entropies.append([]) for lp in s_lp: - mask = ~np.isinf(lp) - entropies[-1].append(-np.sum(np.array(lp[mask]) * np.exp(lp[mask]))) + lp = torch.tensor(lp) + if self.top_k is not None: + lp = torch.topk(lp, self.top_k).values + #mask = ~np.isinf(lp) + #lp = lp[mask] + #if self.top_k is not None: + # lp = np.sort(lp)[-self.top_k:] + #entropies[-1].append(-np.sum(np.array(lp) * np.exp(lp))) + entropies[-1].append(torch.distributions.Categorical(logits=lp).entropy().item()) return {"entropy": entropies} + +class SampleEntropyCalculator(StatCalculator): + def __init__( + self, + top_k: int = None, + ): + self.top_k = top_k + super().__init__(["sample_entropy"], ["sample_tokens_distributions"]) + + def __call__( + self, + dependencies: Dict[str, np.array], + texts: List[str] = None, + model: WhiteboxModel = None, + max_new_tokens: int = 100, + **kwargs, + ) -> Dict[str, np.ndarray]: + batch_distributions = dependencies["sample_tokens_distributions"] + + input_entropies = [] + for input_distributions in batch_distributions: + sample_entropies = [] + for sample_distributions in input_distributions: + token_entropies = [] + for token_dist in sample_distributions: + # Convert token_dist to a numpy array first, then to a torch tensor + token_dist_tensor = torch.tensor(token_dist) + + if self.top_k is not None: + token_dist_tensor = torch.topk(token_dist_tensor, self.top_k).values + + # Calculate entropy using torch's Categorical distribution + entropy = torch.distributions.Categorical(logits=token_dist_tensor).entropy() + token_entropies.append(entropy.item()) + + # Calculate mean entropy for the sample + sample_entropy = torch.mean(torch.tensor(token_entropies)) if token_entropies else 0 + sample_entropies.append(sample_entropy.item()) + input_entropies.append(sample_entropies) + + return {"sample_entropy": input_entropies} diff --git a/src/lm_polygraph/stat_calculators/greedy_align_matrix.py b/src/lm_polygraph/stat_calculators/greedy_align_matrix.py new file mode 100644 index 000000000..497118726 --- /dev/null +++ b/src/lm_polygraph/stat_calculators/greedy_align_matrix.py @@ -0,0 +1,76 @@ +import numpy as np + +import itertools +from typing import Dict, List + +from .stat_calculator import StatCalculator +from lm_polygraph.utils.model import WhiteboxModel +import torch.nn as nn +import torch +from tqdm import tqdm + + +class GreedyAlignMatrixCalculator(StatCalculator): + """ + Calculates the NLI semantic matrix for generation samples using DeBERTa model. + """ + + def __init__(self, scorer): + super().__init__( + [ + "greedy_align_semantic_matrix_forward", + "greedy_align_semantic_matrix_backward", + "greedy_align_semantic_matrix", + ], + ["greedy_texts", "sample_texts"], + ) + self.scorer = scorer + + def __call__( + self, + dependencies: Dict[str, np.array], + texts: List[str], + model: WhiteboxModel, + max_new_tokens: int = 100, + ) -> Dict[str, np.ndarray]: + batch_texts = dependencies["sample_texts"] + batch_greedy_texts = dependencies["greedy_texts"] + + batch_pairs = [] + batch_invs = [] + for texts, greedy_text in zip(batch_texts, batch_greedy_texts): + # Sampling from LLM often produces significant number of identical + # outputs. We only need to score pairs of unqiue outputs + texts = [text if text.strip() != "" else "" for text in texts] + greedy_text = greedy_text if greedy_text.strip() != "" else "" + unique_texts, inv = np.unique(texts, return_inverse=True) + batch_pairs.append(list(itertools.product([greedy_text], unique_texts))) + batch_invs.append(inv) + + E_f = [] + E_b = [] + E = [] + + for i, pairs in tqdm(enumerate(batch_pairs)): + sim_mat_f = [] + sim_mat_b = [] + first_texts, second_texts = zip(*pairs) + sim_mat_f = np.array(self.scorer.score(claims=first_texts, contexts=second_texts)) + sim_mat_b = np.array(self.scorer.score(claims=second_texts, contexts=first_texts)) + + inv = batch_invs[i] + + E_f.append(sim_mat_f[inv]) + E_b.append(sim_mat_b[inv]) + E.append((sim_mat_f[inv] + sim_mat_b[inv]) / 2) + + + E_f = np.stack(E_f) + E_b = np.stack(E_b) + E = np.stack(E) + + return { + "greedy_align_semantic_matrix_forward": E_f, + "greedy_align_semantic_matrix_backward": E_b, + "greedy_align_semantic_matrix": E, + } diff --git a/src/lm_polygraph/stat_calculators/greedy_probs.py b/src/lm_polygraph/stat_calculators/greedy_probs.py index c94468fb5..5c746c1ab 100644 --- a/src/lm_polygraph/stat_calculators/greedy_probs.py +++ b/src/lm_polygraph/stat_calculators/greedy_probs.py @@ -134,14 +134,9 @@ def __call__( seq = sequences[i, idx:].cpu() else: seq = sequences[i, 1:].cpu() - length, text_length = len(seq), len(seq) - for j in range(len(seq)): - if seq[j] == model.tokenizer.eos_token_id: - length = j + 1 - text_length = j - break + length = len(seq) cut_sequences.append(seq[:length].tolist()) - cut_texts.append(model.tokenizer.decode(seq[:text_length])) + cut_texts.append(model.tokenizer.decode(seq[:length], skip_special_tokens=True)) cut_logits.append(logits[i, :length, :].cpu().numpy()) cut_alternatives.append([[] for _ in range(length)]) for j in range(length): diff --git a/src/lm_polygraph/stat_calculators/greedy_rouge_matrix.py b/src/lm_polygraph/stat_calculators/greedy_rouge_matrix.py new file mode 100644 index 000000000..c863b3a43 --- /dev/null +++ b/src/lm_polygraph/stat_calculators/greedy_rouge_matrix.py @@ -0,0 +1,58 @@ +import numpy as np + +import itertools +from typing import Dict, List + +from .stat_calculator import StatCalculator +from lm_polygraph.utils.model import WhiteboxModel +import torch.nn as nn +import torch +from rouge_score import rouge_scorer + +class GreedyRougeLSemanticMatrixCalculator(StatCalculator): + def __init__(self): + super().__init__( + [ + "greedy_rouge_semantic_matrix", + ], + ["greedy_texts", "sample_texts"], + ) + self.scorer = rouge_scorer.RougeScorer(['rougeL'], use_stemmer=True) + + def __call__( + self, + dependencies: Dict[str, np.array], + texts: List[str], + model: WhiteboxModel, + max_new_tokens: int = 100, + ) -> Dict[str, np.ndarray]: + batch_texts = dependencies["sample_texts"] + batch_greedy_texts = dependencies["greedy_texts"] + + batch_pairs = [] + batch_invs = [] + for texts, greedy_text in zip(batch_texts, batch_greedy_texts): + # Sampling from LLM often produces significant number of identical + # outputs. We only need to score pairs of unqiue outputs + unique_texts, inv = np.unique(texts, return_inverse=True) + batch_pairs.append(list(itertools.product([greedy_text], unique_texts))) + batch_invs.append(inv) + + + E = [] + + for i, pairs in enumerate(batch_pairs): + sim_mat = [] + for first_texts, second_texts in pairs: + sim_mat.append(self.scorer.score(first_texts, second_texts)['rougeL'].fmeasure) + + sim_mat = np.array(sim_mat) + + inv = batch_invs[i] + E.append(sim_mat[inv]) + + E = np.stack(E) + + return { + "greedy_rouge_semantic_matrix": E, + } diff --git a/src/lm_polygraph/stat_calculators/greedy_semantic_matrix.py b/src/lm_polygraph/stat_calculators/greedy_semantic_matrix.py new file mode 100644 index 000000000..07c14e805 --- /dev/null +++ b/src/lm_polygraph/stat_calculators/greedy_semantic_matrix.py @@ -0,0 +1,259 @@ +import numpy as np + +import itertools +from typing import Dict, List + +from .stat_calculator import StatCalculator +from lm_polygraph.utils.model import WhiteboxModel +import torch.nn as nn +import torch +from tqdm import tqdm + +softmax = nn.Softmax(dim=1) + + +class GreedySemanticMatrixCalculator(StatCalculator): + """ + Calculates the NLI semantic matrix for generation samples using DeBERTa model. + """ + + def __init__(self, nli_model): + super().__init__( + [ + "greedy_semantic_matrix_forward", + "greedy_semantic_matrix_backward", + "greedy_semantic_matrix", + ], + ["greedy_texts", "sample_texts"], + ) + self.is_deberta_setup = False + self.nli_model = nli_model + + def __call__( + self, + dependencies: Dict[str, np.array], + texts: List[str], + model: WhiteboxModel, + max_new_tokens: int = 100, + ) -> Dict[str, np.ndarray]: + deberta = self.nli_model + deberta_batch_size = deberta.batch_size + + batch_texts = dependencies["sample_texts"] + batch_greedy_texts = dependencies["greedy_texts"] + + batch_pairs = [] + batch_invs = [] + for texts, greedy_text in zip(batch_texts, batch_greedy_texts): + # Sampling from LLM often produces significant number of identical + # outputs. We only need to score pairs of unqiue outputs + unique_texts, inv = np.unique(texts, return_inverse=True) + batch_pairs.append(list(itertools.product([greedy_text], unique_texts))) + batch_invs.append(inv) + + device = deberta.device + ent_id = deberta.deberta.config.label2id["ENTAILMENT"] + contra_id = deberta.deberta.config.label2id["CONTRADICTION"] + neutral_id = deberta.deberta.config.label2id["NEUTRAL"] + + softmax = nn.Softmax(dim=1) + tokenizer = deberta.deberta_tokenizer + + E_f = [] + E_b = [] + E = [] + N_f = [] + N_b = [] + N = [] + C_f = [] + C_b = [] + C = [] + + for i, pairs in enumerate(batch_pairs): + dl = torch.utils.data.DataLoader(pairs, batch_size=deberta_batch_size) + probs_f = [] + probs_b = [] + + for first_texts, second_texts in tqdm(dl): + batch = list(zip(first_texts, second_texts)) + encoded = tokenizer.batch_encode_plus( + batch, padding=True, return_tensors="pt" + ).to(device) + logits = deberta.deberta(**encoded).logits.detach().to(device) + probs_f.append(softmax(logits).cpu().detach()) + + batch = list(zip(second_texts, first_texts)) + encoded = tokenizer.batch_encode_plus( + batch, padding=True, return_tensors="pt" + ).to(device) + logits = deberta.deberta(**encoded).logits.detach().to(device) + probs_b.append(softmax(logits).cpu().detach()) + + probs_f = torch.cat(probs_f, dim=0) + probs_b = torch.cat(probs_b, dim=0) + + inv = batch_invs[i] + + entail_probs_f = probs_f[:, ent_id] + entail_probs_b = probs_b[:, ent_id] + contra_probs_f = probs_f[:, contra_id] + contra_probs_b = probs_b[:, contra_id] + neutral_probs_f = probs_f[:, neutral_id] + neutral_probs_b = probs_b[:, neutral_id] + + E_f.append(entail_probs_f[inv].numpy()) + E_b.append(entail_probs_b[inv].numpy()) + E.append((entail_probs_f[inv].numpy() + entail_probs_b[inv].numpy()) / 2) + N_f.append(neutral_probs_f[inv].numpy()) + N_b.append(neutral_probs_b[inv].numpy()) + N.append((neutral_probs_f[inv].numpy() + neutral_probs_b[inv].numpy()) / 2) + C_f.append(contra_probs_f[inv].numpy()) + C_b.append(contra_probs_b[inv].numpy()) + C.append((contra_probs_f[inv].numpy() + contra_probs_b[inv].numpy()) / 2) + + E_f = np.stack(E_f) + E_b = np.stack(E_b) + E = np.stack(E) + N_f = np.stack(N_f) + N_b = np.stack(N_b) + N = np.stack(N) + C_f = np.stack(C_f) + C_b = np.stack(C_b) + C = np.stack(C) + + return { + "greedy_semantic_matrix_forward": E_f, + "greedy_semantic_matrix_backward": E_b, + "greedy_semantic_matrix": E, + "greedy_semantic_matrix_neutral_forward": N_f, + "greedy_semantic_matrix_neutral_backward": N_b, + "greedy_semantic_matrix_neutral": N, + "greedy_semantic_matrix_contra_forward": C_f, + "greedy_semantic_matrix_contra_backward": C_b, + "greedy_semantic_matrix_contra": C, + } + + +class ConcatGreedySemanticMatrixCalculator(StatCalculator): + """ + Calculates the NLI semantic matrix for generation samples using DeBERTa model. + """ + + def __init__(self, nli_model): + super().__init__( + [ + "concat_greedy_semantic_matrix_forward", + "concat_greedy_semantic_matrix_backward", + "concat_greedy_semantic_matrix", + ], + ["greedy_texts", "no_fewshot_input_texts", "sample_texts"], + ) + self.is_deberta_setup = False + self.nli_model = nli_model + + def __call__( + self, + dependencies: Dict[str, np.array], + texts: List[str], + model: WhiteboxModel, + max_new_tokens: int = 100, + ) -> Dict[str, np.ndarray]: + deberta = self.nli_model + deberta_batch_size = deberta.batch_size + + batch_texts = dependencies["sample_texts"] + batch_greedy_texts = dependencies["greedy_texts"] + input_texts = dependencies["no_fewshot_input_texts"] + + + batch_pairs = [] + batch_invs = [] + for texts, greedy_text, input_text in zip(batch_texts, batch_greedy_texts, input_texts): + texts = [input_text + text for text in texts] + # Sampling from LLM often produces significant number of identical + # outputs. We only need to score pairs of unqiue outputs + unique_texts, inv = np.unique(texts, return_inverse=True) + batch_pairs.append(list(itertools.product([input_text + greedy_text], unique_texts))) + batch_invs.append(inv) + + device = deberta.device + ent_id = deberta.deberta.config.label2id["ENTAILMENT"] + contra_id = deberta.deberta.config.label2id["CONTRADICTION"] + neutral_id = deberta.deberta.config.label2id["NEUTRAL"] + + softmax = nn.Softmax(dim=1) + tokenizer = deberta.deberta_tokenizer + + E_f = [] + E_b = [] + E = [] + N_f = [] + N_b = [] + N = [] + C_f = [] + C_b = [] + C = [] + + for i, pairs in enumerate(batch_pairs): + dl = torch.utils.data.DataLoader(pairs, batch_size=deberta_batch_size) + probs_f = [] + probs_b = [] + + for first_texts, second_texts in tqdm(dl): + batch = list(zip(first_texts, second_texts)) + encoded = tokenizer.batch_encode_plus( + batch, padding=True, return_tensors="pt" + ).to(device) + logits = deberta.deberta(**encoded).logits.detach().to(device) + probs_f.append(softmax(logits).cpu().detach()) + + batch = list(zip(second_texts, first_texts)) + encoded = tokenizer.batch_encode_plus( + batch, padding=True, return_tensors="pt" + ).to(device) + logits = deberta.deberta(**encoded).logits.detach().to(device) + probs_b.append(softmax(logits).cpu().detach()) + + probs_f = torch.cat(probs_f, dim=0) + probs_b = torch.cat(probs_b, dim=0) + + inv = batch_invs[i] + + entail_probs_f = probs_f[:, ent_id] + entail_probs_b = probs_b[:, ent_id] + contra_probs_f = probs_f[:, contra_id] + contra_probs_b = probs_b[:, contra_id] + neutral_probs_f = probs_f[:, neutral_id] + neutral_probs_b = probs_b[:, neutral_id] + + E_f.append(entail_probs_f[inv].numpy()) + E_b.append(entail_probs_b[inv].numpy()) + E.append((entail_probs_f[inv].numpy() + entail_probs_b[inv].numpy()) / 2) + N_f.append(neutral_probs_f[inv].numpy()) + N_b.append(neutral_probs_b[inv].numpy()) + N.append((neutral_probs_f[inv].numpy() + neutral_probs_b[inv].numpy()) / 2) + C_f.append(contra_probs_f[inv].numpy()) + C_b.append(contra_probs_b[inv].numpy()) + C.append((contra_probs_f[inv].numpy() + contra_probs_b[inv].numpy()) / 2) + + E_f = np.stack(E_f) + E_b = np.stack(E_b) + E = np.stack(E) + N_f = np.stack(N_f) + N_b = np.stack(N_b) + N = np.stack(N) + C_f = np.stack(C_f) + C_b = np.stack(C_b) + C = np.stack(C) + + return { + "concat_greedy_semantic_matrix_forward": E_f, + "concat_greedy_semantic_matrix_backward": E_b, + "concat_greedy_semantic_matrix": E, + "concat_greedy_semantic_matrix_neutral_forward": N_f, + "concat_greedy_semantic_matrix_neutral_backward": N_b, + "concat_greedy_semantic_matrix_neutral": N, + "concat_greedy_semantic_matrix_contra_forward": C_f, + "concat_greedy_semantic_matrix_contra_backward": C_b, + "concat_greedy_semantic_matrix_contra": C, + } diff --git a/src/lm_polygraph/stat_calculators/greedy_similarity.py b/src/lm_polygraph/stat_calculators/greedy_similarity.py new file mode 100644 index 000000000..cf2435985 --- /dev/null +++ b/src/lm_polygraph/stat_calculators/greedy_similarity.py @@ -0,0 +1,89 @@ +import numpy as np + +import itertools +from typing import Dict, List +from tqdm import tqdm + +from .stat_calculator import StatCalculator +from sentence_transformers import CrossEncoder +from lm_polygraph.utils.model import WhiteboxModel + + +class GreedySimilarityCalculator(StatCalculator): + """ + Calculates the cross-encoder similarity between greedy sequence and sampled sequences. + """ + + def __init__(self, nli_model): + super().__init__( + [ + "greedy_sentence_similarity_forward", + "greedy_sentence_similarity_backward", + "greedy_sentence_similarity", + ], + ["input_texts", "sample_texts", "greedy_texts"], + ) + + self.crossencoder_setup = False + self.nli_model = nli_model + + def _setup(self, device="cuda"): + self.crossencoder = CrossEncoder( + "cross-encoder/stsb-roberta-large", device=device + ) + + def __call__( + self, + dependencies: Dict[str, np.array], + texts: List[str], + model: WhiteboxModel, + max_new_tokens: int = 100, + ) -> Dict[str, np.ndarray]: + device = model.device() + tokenizer = model.tokenizer + + if not self.crossencoder_setup: + self._setup(device=device) + self.crossencoder_setup = True + + batch_texts = dependencies["sample_texts"] + deberta_batch_size = ( + self.nli_model.batch_size + ) + batch_input_texts = dependencies["input_texts"] + batch_greedy_texts = dependencies["greedy_texts"] + + + batch_pairs = [] + batch_invs = [] + batch_counts = [] + for texts, greedy_text in zip(batch_texts, batch_greedy_texts): + # Sampling from LLM often produces significant number of identical + # outputs. We only need to score pairs of unqiue outputs + unique_texts, inv = np.unique(texts, return_inverse=True) + batch_pairs.append(list(itertools.product([greedy_text], unique_texts))) + batch_invs.append(inv) + + sim_arrays_f = [] + sim_arrays_b = [] + sim_arrays = [] + for i, pairs in tqdm(enumerate(batch_pairs)): + pairs_b = [(b, a) for a, b in pairs] + sim_scores_f = self.crossencoder.predict(pairs, batch_size=deberta_batch_size) + sim_scores_b = self.crossencoder.predict(pairs_b, batch_size=deberta_batch_size) + + inv = batch_invs[i] + + sim_arrays_f.append(sim_scores_f[inv]) + sim_arrays_b.append(sim_scores_b[inv]) + sim_arrays.append((sim_scores_f[inv] + sim_scores_b[inv]) / 2) + + sim_arrays_f = np.stack(sim_arrays_f) + sim_arrays_b = np.stack(sim_arrays_b) + sim_arrays = np.stack(sim_arrays) + + return { + "greedy_sentence_similarity_forward": sim_arrays_f, + "greedy_sentence_similarity_backward": sim_arrays_b, + "greedy_sentence_similarity": sim_arrays, + } diff --git a/src/lm_polygraph/stat_calculators/rouge_matrix.py b/src/lm_polygraph/stat_calculators/rouge_matrix.py new file mode 100644 index 000000000..f99c819ae --- /dev/null +++ b/src/lm_polygraph/stat_calculators/rouge_matrix.py @@ -0,0 +1,67 @@ +import numpy as np + +import itertools +from typing import Dict, List + +from .stat_calculator import StatCalculator +from lm_polygraph.utils.model import WhiteboxModel +import torch.nn as nn +import torch +from rouge_score import rouge_scorer + + +class RougeLSemanticMatrixCalculator(StatCalculator): + """ + Calculates the NLI semantic matrix for generation samples using DeBERTa model. + """ + + def __init__(self): + super().__init__( + [ + "rouge_semantic_matrix", + ], + ["sample_texts"], + ) + self.scorer = rouge_scorer.RougeScorer(['rougeL'], use_stemmer=True) + + def __call__( + self, + dependencies: Dict[str, np.array], + texts: List[str], + model: WhiteboxModel, + max_new_tokens: int = 100, + ) -> Dict[str, np.ndarray]: + + batch_texts = dependencies["sample_texts"] + + batch_pairs = [] + batch_invs = [] + batch_counts = [] + for texts in batch_texts: + # Sampling from LLM often produces significant number of identical + # outputs. We only need to score pairs of unqiue outputs + unique_texts, inv = np.unique(texts, return_inverse=True) + batch_pairs.append(list(itertools.product(unique_texts, unique_texts))) + batch_invs.append(inv) + batch_counts.append(len(unique_texts)) + + E = [] + + for i, pairs in enumerate(batch_pairs): + sim_mat = [] + for first_texts, second_texts in pairs: + sim_mat.append(self.scorer.score(first_texts, second_texts)['rougeL'].fmeasure) + + sim_mat = np.array(sim_mat) + unique_mat_shape = (batch_counts[i], batch_counts[i]) + sim_mat = sim_mat.reshape(unique_mat_shape) + + inv = batch_invs[i] + + E.append(sim_mat[inv, :][:, inv]) + + E = np.stack(E) + + return { + "rouge_semantic_matrix": E, + } diff --git a/src/lm_polygraph/stat_calculators/sample.py b/src/lm_polygraph/stat_calculators/sample.py index 96a447a57..f05d9901e 100644 --- a/src/lm_polygraph/stat_calculators/sample.py +++ b/src/lm_polygraph/stat_calculators/sample.py @@ -86,18 +86,21 @@ class SamplingGenerationCalculator(StatCalculator): * probabilities of the sampled tokens generation """ - def __init__(self, samples_n: int = 10): + def __init__(self, samples_n: int = 10, n_alternatives: int = 10): """ Parameters: samples_n (int): number of samples to generate per input text. Default: 10 """ self.samples_n = samples_n + self.n_alternatives = n_alternatives super().__init__( [ "sample_log_probs", "sample_tokens", "sample_texts", "sample_log_likelihoods", + "sample_tokens_distributions", + "sample_tokens_alternatives", ], [], ) @@ -123,6 +126,7 @@ def __call__( - 'sample_tokens' (List[List[List[float]]]): tokenized 'sample_texts', - 'sample_log_probs' (List[List[float]]): sum of the log probabilities at each token of the sampling generation. - 'sample_log_likelihoods' (List[List[List[float]]]): log probabilities at each token of the sampling generation. + - 'token_distributions' (List[List[List[float]]]): full token probability distributions for each generated token. """ batch: Dict[str, torch.Tensor] = model.tokenize(texts) batch = {k: v.to(model.device()) for k, v in batch.items()} @@ -152,31 +156,123 @@ def __call__( tokens = [[] for _ in range(len(texts))] texts = [[] for _ in range(len(texts))] log_likelihoods = [[] for _ in range(len(texts))] + token_distributions = [[] for _ in range(len(texts))] + alternatives = [[] for _ in range(len(texts))] + + if model.model_type == "Seq2SeqLM": sequences = [seq[1:] for seq in sequences] + for i in range(len(logits)): - log_prob, ll, toks = 0, [], [] + log_prob, ll, toks, distributions = 0, [], [], [] inp_size = ( len(batch["input_ids"][int(i / self.samples_n)]) if model.model_type == "CausalLM" else 0 ) - for j in range(len(sequences[i]) - inp_size): + gen_size = len(sequences[i]) - inp_size + sample_alternatives = [[] for _ in range(gen_size)] + for j in range(gen_size): cur_token = sequences[i][j + inp_size].item() log_prob += logits[i][j][cur_token].item() - if cur_token == model.tokenizer.eos_token_id: - break ll.append(logits[i][j][cur_token].item()) toks.append(cur_token) + lt = logits[i][j].cpu().numpy() + distributions.append(lt) + + best_tokens = np.argpartition(lt, -self.n_alternatives) + ln = len(best_tokens) + best_tokens = best_tokens[ln - self.n_alternatives : ln] + for t in best_tokens: + sample_alternatives[j].append((t.item(), lt[t].item())) + sample_alternatives[j].sort( + key=lambda x: x[0] == cur_token, + reverse=True, + ) + log_likelihoods[int(i / self.samples_n)].append(ll) log_probs[int(i / self.samples_n)].append(log_prob) tokens[int(i / self.samples_n)].append(toks) - texts[int(i / self.samples_n)].append(model.tokenizer.decode(toks)) + texts[int(i / self.samples_n)].append(model.tokenizer.decode(toks, skip_special_tokens=True)) + token_distributions[int(i / self.samples_n)].append(distributions) + alternatives[int(i / self.samples_n)].append(sample_alternatives) return { "sample_log_likelihoods": log_likelihoods, "sample_log_probs": log_probs, "sample_tokens": tokens, "sample_texts": texts, + "sample_tokens_distributions": token_distributions, + "sample_tokens_alternatives": alternatives, + } + +class FirstSampleCalculator(StatCalculator): + def __init__(self): + super().__init__( + [ + "first_sample_texts", + ], + [ + "sample_texts", + ] + ) + + def __call__( + self, + dependencies: Dict[str, np.array], + texts: List[str], + model: WhiteboxModel, + max_new_tokens: int = 100, + ) -> Dict[str, np.ndarray]: + sample_texts = dependencies["sample_texts"] + first_sample_texts = [st[0] for st in sample_texts] + + return { + "first_sample_texts": first_sample_texts, + } + +class BestSampleCalculator(StatCalculator): + def __init__(self): + super().__init__( + [ + "best_sample_texts", + "best_sample_text_ids", + "best_normalized_sample_texts", + "best_normalized_sample_text_ids", + ], + [ + "sample_texts", + "sample_log_probs", + "sample_log_likelihoods", + ] + ) + + def __call__( + self, + dependencies: Dict[str, np.array], + texts: List[str], + model: WhiteboxModel, + max_new_tokens: int = 100, + ) -> Dict[str, np.ndarray]: + best_sample_texts = [] + best_sample_text_ids = [] + best_normalized_sample_texts = [] + best_normalized_sample_text_ids = [] + + for batch_i, (sample_texts, sample_log_probs, sample_log_likelihoods) in enumerate(zip(dependencies["sample_texts"], dependencies["sample_log_probs"], dependencies["sample_log_likelihoods"])): + best_i = np.argmax(sample_log_probs) + best_sample_texts.append(sample_texts[best_i]) + best_sample_text_ids.append(best_i) + + ppls = [np.mean(ll) for ll in sample_log_likelihoods] + best_ppl_i = np.argmax(ppls) + best_normalized_sample_texts.append(sample_texts[best_ppl_i]) + best_normalized_sample_text_ids.append(best_ppl_i) + + return { + "best_sample_texts": best_sample_texts, + "best_sample_text_ids": best_sample_text_ids, + "best_normalized_sample_texts": best_normalized_sample_texts, + "best_normalized_sample_text_ids": best_normalized_sample_text_ids, } diff --git a/src/lm_polygraph/stat_calculators/sample_alternatives_nli.py b/src/lm_polygraph/stat_calculators/sample_alternatives_nli.py new file mode 100644 index 000000000..1832278af --- /dev/null +++ b/src/lm_polygraph/stat_calculators/sample_alternatives_nli.py @@ -0,0 +1,107 @@ +import numpy as np + +from typing import Dict, List, Tuple + +from .stat_calculator import StatCalculator +from lm_polygraph.utils.model import WhiteboxModel +from lm_polygraph.utils.deberta import Deberta +from collections import defaultdict +import torch.nn as nn +import string + + +def _eval_nli_model(nli_queue: List[Tuple[str, str]], deberta: Deberta) -> List[str]: + nli_set = list(set(nli_queue)) + + softmax = nn.Softmax(dim=1) + w_probs = defaultdict(lambda: defaultdict(lambda: None)) + for k in range(0, len(nli_set), deberta.batch_size): + batch = nli_set[k : k + deberta.batch_size] + encoded = deberta.deberta_tokenizer.batch_encode_plus( + batch, padding=True, return_tensors="pt" + ).to(deberta.device) + logits = deberta.deberta(**encoded).logits + logits = logits.detach().to(deberta.device) + for (wi, wj), prob in zip(batch, softmax(logits).cpu().detach()): + w_probs[wi][wj] = prob + + classes = [] + for w1, w2 in nli_queue: + pr = w_probs[w1][w2] + id = pr.argmax() + ent_id = deberta.deberta.config.label2id["ENTAILMENT"] + contra_id = deberta.deberta.config.label2id["CONTRADICTION"] + if id == ent_id: + str_class = "entail" + elif id == contra_id: + str_class = "contra" + else: + str_class = "neutral" + classes.append(str_class) + return classes + + +class SampleAlternativesNLICalculator(StatCalculator): + def __init__(self, nli_model): + super().__init__( + [ + "sample_tokens_alternatives_nli", + ], + ["sample_tokens_alternatives"], + ) + + self.nli_model = nli_model + + def _strip(self, w: str): + return w.strip(string.punctuation + " \n") + + def __call__( + self, + dependencies: Dict[str, np.array], + texts: List[str], + model: WhiteboxModel, + max_new_tokens: int = 100, + **kwargs, + ) -> Dict[str, np.ndarray]: + batch_alternatives = dependencies["sample_tokens_alternatives"] + batch_alternatives_nli = [] + for samples_alternatives in batch_alternatives: + sample_alternatives_nli = [] + for sample_alternatives in samples_alternatives: + nli_matrixes = [] + for w_number, word_alternatives in enumerate(sample_alternatives): + nli_queue = [] + nli_matrix = [ + ["" for _ in range(len(word_alternatives))] + for _ in range(len(word_alternatives)) + ] + if len(word_alternatives) > 0 and not isinstance( + word_alternatives[0][0], + str, + ): + word_alternatives = [ + (model.tokenizer.decode([alt]), prob) + for alt, prob in word_alternatives + ] + words = [self._strip(alt[0]) for alt in word_alternatives] + for wi in words: + nli_queue.append((words[0], wi)) + nli_queue.append((wi, words[0])) + + nli_classes = _eval_nli_model(nli_queue, self.nli_model) + nli_class = defaultdict(lambda: None) + for nli_cl, (w1, w2) in zip(nli_classes, nli_queue): + nli_class[w1, w2] = nli_cl + + for i, wi in enumerate(words): + for j, wj in enumerate(words): + # Only calculate NLI with sample token + if i > 0 and j > 0: + continue + nli_matrix[i][j] = nli_class[wi, wj] + + nli_matrixes.append(nli_matrix) + sample_alternatives_nli.append(nli_matrixes) + batch_alternatives_nli.append(sample_alternatives_nli) + + return {"sample_tokens_alternatives_nli": batch_alternatives_nli} diff --git a/src/lm_polygraph/stat_calculators/semantic_matrix.py b/src/lm_polygraph/stat_calculators/semantic_matrix.py index 8a6862f9d..036046ce3 100644 --- a/src/lm_polygraph/stat_calculators/semantic_matrix.py +++ b/src/lm_polygraph/stat_calculators/semantic_matrix.py @@ -7,6 +7,7 @@ from lm_polygraph.utils.model import WhiteboxModel import torch.nn as nn import torch +from tqdm import tqdm softmax = nn.Softmax(dim=1) @@ -74,15 +75,17 @@ def __call__( device = deberta.device ent_id = deberta.deberta.config.label2id["ENTAILMENT"] contra_id = deberta.deberta.config.label2id["CONTRADICTION"] + neutral_id = deberta.deberta.config.label2id["NEUTRAL"] softmax = nn.Softmax(dim=1) tokenizer = deberta.deberta_tokenizer E = [] C = [] + N = [] P = [] - for i, pairs in enumerate(batch_pairs): + for i, pairs in enumerate(tqdm(batch_pairs)): dl = torch.utils.data.DataLoader(pairs, batch_size=deberta_batch_size) probs = [] for first_texts, second_texts in dl: @@ -91,34 +94,161 @@ def __call__( batch, padding=True, return_tensors="pt" ).to(device) logits = deberta.deberta(**encoded).logits.detach().to(device) - probs.append(softmax(logits).cpu().detach()) + probs.append(softmax(logits).detach()) probs = torch.cat(probs, dim=0) entail_probs = probs[:, ent_id] contra_probs = probs[:, contra_id] + neutral_probs = probs[:, neutral_id] class_preds = probs.argmax(-1) unique_mat_shape = (batch_counts[i], batch_counts[i]) - unique_E = entail_probs.view(unique_mat_shape).numpy() - unique_C = contra_probs.view(unique_mat_shape).numpy() - unique_P = class_preds.view(unique_mat_shape).numpy() + unique_E = entail_probs.view(unique_mat_shape) + unique_C = contra_probs.view(unique_mat_shape) + unique_N = neutral_probs.view(unique_mat_shape) + unique_P = class_preds.view(unique_mat_shape) inv = batch_invs[i] # Recover full matrices from unques by gathering along both axes # using inverse index - E.append(unique_E[inv, :][:, inv]) - C.append(unique_C[inv, :][:, inv]) - P.append(unique_P[inv, :][:, inv]) + E.append(unique_E.cpu().numpy()[inv, :][:, inv]) + C.append(unique_C.cpu().numpy()[inv, :][:, inv]) + N.append(unique_N.cpu().numpy()[inv, :][:, inv]) + P.append(unique_P.cpu().numpy()[inv, :][:, inv]) E = np.stack(E) C = np.stack(C) + N = np.stack(N) P = np.stack(P) return { "semantic_matrix_entail": E, "semantic_matrix_contra": C, + "semantic_matrix_neutral": N, "semantic_matrix_classes": P, "entailment_id": deberta.deberta.config.label2id["ENTAILMENT"], } + + +class ConcatSemanticMatrixCalculator(StatCalculator): + """ + Calculates the NLI semantic matrix for generation samples using DeBERTa model. + """ + + def __init__(self, nli_model): + super().__init__( + [ + "concat_semantic_matrix_entail", + "concat_semantic_matrix_contra", + "concat_semantic_matrix_classes", + "entailment_id", + ], + ["no_fewshot_input_texts", "sample_texts"], + ) + self.is_deberta_setup = False + self.nli_model = nli_model + + def __call__( + self, + dependencies: Dict[str, np.array], + texts: List[str], + model: WhiteboxModel, + max_new_tokens: int = 100, + ) -> Dict[str, np.ndarray]: + """ + Calculates the NLI semantic matrix for generation samples using DeBERTa model. + + Parameters: + dependencies (Dict[str, np.ndarray]): input statistics, containing: + - 'sample_texts' (List[List[str]]): several sampling generations + for each input text in the batch. + texts (List[str]): Input texts batch used for model generation. + model (Model): Model used for generation. + max_new_tokens (int): Maximum number of new tokens at model generation. Default: 100. + Returns: + Dict[str, np.ndarray]: dictionary with the following items: + - 'semantic_matrix_entail' (List[np.array]): for each input text: quadratic matrix of size + n_samples x n_samples, with probabilities of 'ENTAILMENT' output of DeBERTa. + - 'semantic_matrix_contra' (List[np.array]): for each input text: quadratic matrix of size + n_samples x n_samples, with probabilities of 'CONTRADICTION' output of DeBERTa. + - 'semantic_matrix_classes' (List[np.array]): for each input text: quadratic matrix of size + n_samples x n_samples, with the NLI label id corresponding to the DeBERTa prediction. + """ + + deberta = self.nli_model + deberta_batch_size = deberta.batch_size + batch_texts = dependencies["sample_texts"] + input_texts = dependencies["no_fewshot_input_texts"] + + batch_pairs = [] + batch_invs = [] + batch_counts = [] + for input_text, texts in zip(input_texts, batch_texts): + texts = [input_text + text for text in texts] + # Sampling from LLM often produces significant number of identical + # outputs. We only need to score pairs of unqiue outputs + unique_texts, inv = np.unique(texts, return_inverse=True) + batch_pairs.append(list(itertools.product(unique_texts, unique_texts))) + batch_invs.append(inv) + batch_counts.append(len(unique_texts)) + + device = deberta.device + ent_id = deberta.deberta.config.label2id["ENTAILMENT"] + contra_id = deberta.deberta.config.label2id["CONTRADICTION"] + neutral_id = deberta.deberta.config.label2id["NEUTRAL"] + + softmax = nn.Softmax(dim=1) + tokenizer = deberta.deberta_tokenizer + + E = [] + C = [] + N = [] + P = [] + + for i, pairs in enumerate(tqdm(batch_pairs)): + dl = torch.utils.data.DataLoader(pairs, batch_size=deberta_batch_size) + probs = [] + for first_texts, second_texts in dl: + batch = list(zip(first_texts, second_texts)) + encoded = tokenizer.batch_encode_plus( + batch, padding=True, return_tensors="pt" + ).to(device) + logits = deberta.deberta(**encoded).logits.detach().to(device) + probs.append(softmax(logits).detach()) + probs = torch.cat(probs, dim=0) + + entail_probs = probs[:, ent_id] + contra_probs = probs[:, contra_id] + neutral_probs = probs[:, neutral_id] + class_preds = probs.argmax(-1) + + unique_mat_shape = (batch_counts[i], batch_counts[i]) + + unique_E = entail_probs.view(unique_mat_shape) + unique_C = contra_probs.view(unique_mat_shape) + unique_N = neutral_probs.view(unique_mat_shape) + unique_P = class_preds.view(unique_mat_shape) + + inv = batch_invs[i] + + # Recover full matrices from unques by gathering along both axes + # using inverse index + E.append(unique_E.cpu().numpy()[inv, :][:, inv]) + C.append(unique_C.cpu().numpy()[inv, :][:, inv]) + N.append(unique_N.cpu().numpy()[inv, :][:, inv]) + P.append(unique_P.cpu().numpy()[inv, :][:, inv]) + + E = np.stack(E) + C = np.stack(C) + N = np.stack(N) + P = np.stack(P) + + return { + "concat_semantic_matrix_entail": E, + "concat_semantic_matrix_contra": C, + "concat_semantic_matrix_neutral": N, + "concat_semantic_matrix_classes": P, + "entailment_id": deberta.deberta.config.label2id["ENTAILMENT"], + } diff --git a/src/lm_polygraph/utils/dataset.py b/src/lm_polygraph/utils/dataset.py index 05c79ea1c..49a1c29a4 100644 --- a/src/lm_polygraph/utils/dataset.py +++ b/src/lm_polygraph/utils/dataset.py @@ -184,6 +184,8 @@ def from_datasets( """ dataset_name, dataset = Dataset.load_hf_dataset(dataset_path, split, **kwargs) few_shot_dataset = None + #no_few_shot_x = None + if n_shot > 0: _, few_shot_dataset = Dataset.load_hf_dataset( dataset_path, few_shot_split, **kwargs @@ -417,7 +419,11 @@ def doc_to_text(doc, prompt, i=0): else: x = dataset[x_column] y = dataset[y_column] + + #if no_few_shot_x is None: + # no_few_shot_x = x + #return Dataset(x, y, batch_size, no_few_shot_x) return Dataset(x, y, batch_size) @staticmethod diff --git a/src/lm_polygraph/utils/manager.py b/src/lm_polygraph/utils/manager.py index 263034002..99449b4bb 100644 --- a/src/lm_polygraph/utils/manager.py +++ b/src/lm_polygraph/utils/manager.py @@ -27,11 +27,12 @@ def _order_calculators( stats: List[str], + existing_stats: Set[str], stat_calculators: Dict[str, StatCalculator], stat_dependencies: Dict[str, List[str]], ) -> Tuple[List[str], Set[str]]: ordered: List[str] = [] - have_stats: Set[str] = set() + have_stats: Set[str] = set(existing_stats) while len(stats) > 0: stat = stats[0] if stat in have_stats: @@ -245,6 +246,7 @@ def __init__( generation_metrics: List[GenerationMetric], ue_metrics: List[UEMetric], processors: List[Processor], + batch_size: int = 1, train_data: Dataset = None, background_train_data: Dataset = None, ignore_exceptions: bool = True, @@ -256,6 +258,10 @@ def __init__( max_new_tokens: int = 100, background_train_dataset_max_new_tokens: int = 100, cache_path=os.path.expanduser("~") + "/.cache", + save_stats: List[str] = [], + entropy_top_k: Optional[int] = None, + state: str = 'init', + save_path: Optional[str] = None, ): """ Parameters: @@ -279,21 +285,12 @@ def __init__( max_new_tokens (int): Maximum new tokens to use in generation. Default: 100. """ - stat_calculators_dict, stat_dependencies_dict = register_stat_calculators( - deberta_batch_size=deberta_batch_size, - deberta_device=deberta_device, - language=language, - cache_path=cache_path, - model=model, - ) - - self.stat_calculators_dict = stat_calculators_dict - self.model: Model = model self.train_data: Dataset = train_data self.background_train_data: Dataset = background_train_data self.ensemble_model = ensemble_model self.data: Dataset = data + self.batch_size: int = batch_size self.estimators: List[Estimator] = estimators self.generation_metrics: List[GenerationMetric] = generation_metrics self.ue_metrics: List[UEMetric] = ue_metrics @@ -301,18 +298,58 @@ def __init__( _check_unique_names(estimators) _check_unique_names(ue_metrics) + self.gen_metrics: Dict[Tuple[str, str], List[float]] = defaultdict(list) + self.estimations: Dict[Tuple[str, str], List[float]] = defaultdict(list) + self.metrics: Dict[Tuple[str, str, str, str], float] = {} + self.total_bad_estimators: Dict[Estimator, float] = {} + self.stats: Dict[str, List] = defaultdict(list) + self.save_stats = list(set(['greedy_texts', 'greedy_tokens']).union(set(save_stats))) + + self.processors = processors + self.ignore_exceptions = ignore_exceptions + self.verbose = verbose + self.max_new_tokens = max_new_tokens + self.background_train_dataset_max_new_tokens = ( + background_train_dataset_max_new_tokens + ) + self.cache_path = cache_path + self.entropy_top_k = entropy_top_k + self.deberta_batch_size = deberta_batch_size + self.deberta_device = deberta_device + self.language = language + self.state = state + self.save_path = save_path + + + def prepare_calculators(self): + stat_calculators_dict, stat_dependencies_dict = register_stat_calculators( + deberta_batch_size=self.deberta_batch_size, + deberta_device=self.deberta_device, + language=self.language, + cache_path=self.cache_path, + model=self.model, + entropy_top_k=self.entropy_top_k, + ) + + self.stat_calculators_dict = stat_calculators_dict + greedy = ["greedy_texts"] if not isinstance(self.model, BlackboxModel): greedy += ["greedy_tokens"] stats = ( [s for e in self.estimators for s in e.stats_dependencies] - + [s for m in generation_metrics for s in m.stats_dependencies] + + [s for m in self.generation_metrics for s in m.stats_dependencies] + greedy ) + + # Only calculate stats that are not already calculated + existing_stats = set(self.stats.keys()) + stats = list(set(stats) - existing_stats) stats, have_stats = _order_calculators( stats, + existing_stats, stat_calculators_dict, stat_dependencies_dict, ) @@ -321,31 +358,17 @@ def __init__( stats = [ s for s in stats - if not (str(s).startswith("ensemble_")) - and not ( - ( + if not ( str(s).startswith("blackbox_") and s[len("blackbox_") :] in have_stats - ) # remove blackbox_X from stats only if X is already in stats to remove duplicated run of stat calculator - ) + ) # remove blackbox_X from stats only if X is already in stats to remove duplicated run of stat calculator ] # below in calculate() we copy X in blackbox_X self.stat_calculators: List[StatCalculator] = [ stat_calculators_dict[c] for c in stats ] - if verbose: + if self.verbose: print("Stat calculators:", self.stat_calculators) - self.ensemble_estimators = [] - single_estimators = [] - for e in estimators: - for s in e.stats_dependencies: - if s.startswith("ensemble"): - self.ensemble_estimators.append(e) - break - if e not in self.ensemble_estimators: - single_estimators.append(e) - self.estimators = single_estimators - train_stats = [ s for e in self.estimators @@ -357,22 +380,31 @@ def __init__( if "train_greedy_log_likelihoods" in train_stats else [] ) + + train_stats = list(set(train_stats) - existing_stats) + train_stats, _ = _order_calculators( train_stats, + existing_stats, stat_calculators_dict, stat_dependencies_dict, ) self.train_stat_calculators: List[StatCalculator] = [ stat_calculators_dict[c] for c in train_stats ] + background_train_stats = [ s for e in self.estimators for s in e.stats_dependencies if s.startswith("background_train") ] + + background_train_stats = list(set(background_train_stats) - existing_stats) + background_train_stats, _ = _order_calculators( background_train_stats, + existing_stats, stat_calculators_dict, stat_dependencies_dict, ) @@ -380,34 +412,38 @@ def __init__( stat_calculators_dict[c] for c in background_train_stats ] - ensemble_stats = [ - s - for e in self.ensemble_estimators - for s in e.stats_dependencies - if s.startswith("ensemble") - ] - ensemble_stats, _ = _order_calculators( - ensemble_stats, - stat_calculators_dict, - stat_dependencies_dict, - ) - self.ensemble_stat_calculators: List[StatCalculator] = [ - stat_calculators_dict[c] for c in ensemble_stats - ] + def initiate_batch_stats(self, batch_i, inp_texts, target_texts): + batch_stats: Dict[str, np.ndarray] = {} + cur_batch_size = len(inp_texts) + + for key, val in self.stats.items(): + # Get corresponding batch from existing stats + batch_start = batch_i * self.batch_size + # If last batch is not full, we need to adjust the end index + batch_end = batch_start + cur_batch_size + # This will only be true if the calculation is based off + # existing manager. Otherwise, all stats will contain only + # values calculated in previous batches + if len(val) >= batch_end: + val_batch = val[batch_start:batch_end] + batch_stats[key] = val_batch + + for key, val in [ + ("input_texts", inp_texts), + ("target_texts", target_texts), + ]: + if key not in batch_stats: + self.stats[key] += val + batch_stats[key] = val + elif key == "input_texts": + # Check that new stats will be calculated + # against the same input texts + assert np.all(np.array(batch_stats[key]) == np.array(val)) - self.gen_metrics: Dict[Tuple[str, str], List[float]] = defaultdict(list) - self.estimations: Dict[Tuple[str, str], List[float]] = defaultdict(list) - self.metrics: Dict[Tuple[str, str, str, str], float] = {} - self.total_bad_estimators: Dict[Estimator, float] = {} - self.stats: Dict[str, List] = defaultdict(list) + batch_stats["model"] = self.model + + return batch_stats - self.processors = processors - self.ignore_exceptions = ignore_exceptions - self.verbose = verbose - self.max_new_tokens = max_new_tokens - self.background_train_dataset_max_new_tokens = ( - background_train_dataset_max_new_tokens - ) def __call__(self) -> Dict[Tuple[str, str, str, str], float]: """ @@ -427,22 +463,13 @@ def __call__(self) -> Dict[Tuple[str, str, str, str], float]: - generation metrics name, - `ue_metrics` name which was used to calculate quality. """ - + self.prepare_calculators() train_stats = self._extract_train_embeddings() background_train_stats = self._extract_train_embeddings(background=True) iterable_data = tqdm(self.data) if self.verbose else self.data for batch_i, (inp_texts, target_texts) in enumerate(iterable_data): - batch_stats: Dict[str, np.ndarray] = {} - for key, val in [ - ("input_texts", inp_texts), - ("target_texts", target_texts), - ]: - self.stats[key] += val - batch_stats[key] = val - batch_stats["model"] = self.model - - batch_stats["model"] = self.model + batch_stats = self.initiate_batch_stats(batch_i, inp_texts, target_texts) train_stats_keys = list(train_stats.keys()) for stat in train_stats_keys: @@ -451,8 +478,10 @@ def __call__(self) -> Dict[Tuple[str, str, str, str], float]: background_train_stats_keys = list(background_train_stats.keys()) for stat in background_train_stats_keys: batch_stats[stat] = background_train_stats.pop(stat) - + + old_stats = set(batch_stats.keys()) batch_stats = self.calculate(batch_stats, self.stat_calculators, inp_texts) + new_stats = set(batch_stats.keys()) - old_stats batch_estimations, bad_estimators = self.estimate( batch_stats, self.estimators @@ -474,46 +503,36 @@ def __call__(self) -> Dict[Tuple[str, str, str, str], float]: self.gen_metrics[generation_metric.level, str(generation_metric)] += m batch_gen_metrics[generation_metric.level, str(generation_metric)] += m - for key in ["greedy_texts", "greedy_tokens"]: - if key in batch_stats.keys(): - self.stats[key] += batch_stats[key] + for key in self.save_stats: + if key in new_stats: + self.stats[key] += list(batch_stats[key]) for processor in self.processors: processor.on_batch(batch_stats, batch_gen_metrics, batch_estimations) - if self.ensemble_model is not None: - iterable_data = tqdm(self.data) if self.verbose else self.data - for batch_i, (inp_texts, target_texts) in enumerate(iterable_data): - batch_stats: Dict[str, np.ndarray] = {} - for key, val in [ - ("input_texts", inp_texts), - ("target_texts", target_texts), - ("model", self.model), - ]: - batch_stats[key] = val + torch.cuda.empty_cache() + gc.collect() - batch_stats["ensemble_generation_params"] = {} - batch_stats["ensemble_model"] = self.ensemble_model + self.state = 'post_inference' + self.save() - batch_stats = self.calculate( - batch_stats, self.ensemble_stat_calculators, inp_texts - ) + self.eval_ue() + self.state = 'post_eval' - batch_estimations, bad_estimators = self.estimate( - batch_stats, self.ensemble_estimators - ) + for processor in self.processors: + processor.on_eval(self.metrics, self.total_bad_estimators) - for bad_estimator in bad_estimators: - key = (bad_estimator.level, str(bad_estimator)) - self.estimations.pop(key, None) - self.ensemble_estimators.remove(bad_estimator) - self.total_bad_estimators[bad_estimator] = batch_i + self.save() - torch.cuda.empty_cache() - gc.collect() + return self.metrics + + def eval_ue(self): + for (gen_level, gen_name), generation_metric in self.gen_metrics.items(): + generation_metric = np.array(generation_metric) + for ue_metric in self.ue_metrics: + oracle_score = ue_metric(-generation_metric, generation_metric) + random_score = get_random_scores(ue_metric, generation_metric) - for (e_level, e_name), estimator_values in self.estimations.items(): - for (gen_level, gen_name), generation_metric in self.gen_metrics.items(): - for ue_metric in self.ue_metrics: + for (e_level, e_name), estimator_values in self.estimations.items(): if gen_level != e_level: continue if len(estimator_values) != len(generation_metric): @@ -521,14 +540,16 @@ def __call__(self) -> Dict[Tuple[str, str, str, str], float]: f"Got different number of metrics for {e_name} and {gen_name}: " f"{len(estimator_values)} and {len(generation_metric)}" ) + # TODO: Report how many nans! # This is important to know for a user ue, metric = _delete_nans(estimator_values, generation_metric) + assert len(ue) == len(estimator_values) + assert len(metric) == len(generation_metric) + if len(ue) == 0: self.metrics[e_level, e_name, gen_name, str(ue_metric)] = np.nan else: - oracle_score = ue_metric(-metric, metric) - random_score = get_random_scores(ue_metric, metric) ue_metric_val = ue_metric(ue, metric) self.metrics[e_level, e_name, gen_name, str(ue_metric)] = ( ue_metric_val @@ -537,10 +558,6 @@ def __call__(self) -> Dict[Tuple[str, str, str, str], float]: e_level, e_name, gen_name, str(ue_metric) + "_normalized" ] = normalize_metric(ue_metric_val, oracle_score, random_score) - for processor in self.processors: - processor.on_eval(self.metrics, self.total_bad_estimators) - - return self.metrics def calculate(self, batch_stats: dict, calculators: list, inp_texts: list) -> dict: """ @@ -674,7 +691,7 @@ def _extract_train_embeddings( return result_train_stat - def save(self, save_path: str): + def save(self): """ Saves the run results in the provided path. Will raise exception, if no results are calculated yet. To load the saved manager, see UEManager.load(). @@ -682,30 +699,43 @@ def save(self, save_path: str): Parameters: save_path (str): Path to file to save benchmark results to. """ - if len(self.metrics) == 0: - raise Exception("Nothing to save. Consider calling manager() first.") + if self.save_path is None: + raise Exception("No save path provided.") + torch.save( { "metrics": self.metrics, "gen_metrics": self.gen_metrics, "estimations": self.estimations, "stats": self.stats, + "state": self.state, }, - save_path, + self.save_path, ) @staticmethod - def load(load_path: str) -> "UEManager": + def load(load_path: str, **kwargs) -> "UEManager": """ Loads UEManager from the specified path. To save the calculated manager results, see UEManager.save(). Parameters: load_path (str): Path to file with saved benchmark results to load. """ - res_dict = torch.load(load_path) - man = UEManager(None, None, [], [], [], []) + res_dict = torch.load(load_path, weights_only=False) + default_kwargs = { + "data": None, + "model": None, + "estimators": [], + "generation_metrics": [], + "ue_metrics": [], + "processors": [], + } + default_kwargs.update(kwargs) + man = UEManager(**default_kwargs) man.metrics = res_dict.get("metrics", None) man.gen_metrics = res_dict.get("gen_metrics", None) man.estimations = res_dict.get("estimations", None) man.stats = res_dict.get("stats", None) + man.state = res_dict.get("state", 'init') + return man diff --git a/src/lm_polygraph/utils/model.py b/src/lm_polygraph/utils/model.py index 4ab587f27..db381107f 100644 --- a/src/lm_polygraph/utils/model.py +++ b/src/lm_polygraph/utils/model.py @@ -355,14 +355,18 @@ def __init__( def __call__(self, input_ids, scores, **kwargs) -> bool: # For efficiency, we compare the last n tokens where n is the number of tokens in the stop_sequence lookback_ids_batch = input_ids[:, self.initial_decoder_input_length :] - + lookback_ids_batch = lookback_ids_batch[:, -self.sequence_id_len :] lookback_tokens_batch = self.tokenizer.batch_decode(lookback_ids_batch) for i, done in enumerate(self.done_tracker): if not done: - self.done_tracker[i] = self.sequence in lookback_tokens_batch[i] + lookback_tokens_batch_i = lookback_tokens_batch[i] + # Remove stop sequence from the begginning of the lookback tokens if it is there + if len(lookback_tokens_batch_i) >= len(self.sequence) and lookback_tokens_batch_i[: len(self.sequence)] == self.sequence: + lookback_tokens_batch_i = lookback_tokens_batch_i[len(self.sequence) :] + self.done_tracker[i] = self.sequence in lookback_tokens_batch_i return False not in self.done_tracker def get_stopping_criteria(self, input_ids: torch.Tensor): @@ -560,7 +564,7 @@ def tokenize( formatted_texts.append(formatted_chat) texts = formatted_texts - return self.tokenizer(texts, padding=True, return_tensors="pt") + return self.tokenizer(texts, padding=True, return_tensors="pt", return_token_type_ids=False) def create_ensemble( diff --git a/src/lm_polygraph/utils/processor.py b/src/lm_polygraph/utils/processor.py index 424df271c..49908dea9 100644 --- a/src/lm_polygraph/utils/processor.py +++ b/src/lm_polygraph/utils/processor.py @@ -61,7 +61,7 @@ def on_batch( for key, val in batch_stats.items(): str_repr = str(val) # to skip large outputs - if len(str_repr) < 10000 and str_repr.count("\n") < 10: + if len(str_repr) < 10000 and str_repr.count("\n") < 20: print(f"{key}: {val}") print() print("-" * 100) diff --git a/src/lm_polygraph/utils/register_stat_calculators.py b/src/lm_polygraph/utils/register_stat_calculators.py index 7588ed1c6..cf01905be 100644 --- a/src/lm_polygraph/utils/register_stat_calculators.py +++ b/src/lm_polygraph/utils/register_stat_calculators.py @@ -18,6 +18,7 @@ def register_stat_calculators( n_ccp_alternatives: int = 10, cache_path=os.path.expanduser("~") + "/.cache", model: Model = None, + entropy_top_k: Optional[int] = None, ) -> Tuple[Dict[str, "StatCalculator"], Dict[str, List[str]]]: """ Registers all available statistic calculators to be seen by UEManager for properly organizing the calculations @@ -38,6 +39,7 @@ def register_stat_calculators( ) else: raise Exception(f"Unsupported language: {language}") + nli_model = None log.info("=" * 100) log.info("Initializing stat calculators...") @@ -62,15 +64,22 @@ def _register(calculator_class: StatCalculator): _register(BlackboxSamplingGenerationCalculator()) else: _register(GreedyProbsCalculator(n_alternatives=n_ccp_alternatives)) - _register(EntropyCalculator()) + _register(EntropyCalculator(top_k=entropy_top_k)) + _register(SampleEntropyCalculator(top_k=entropy_top_k)) _register(GreedyLMProbsCalculator()) - _register(SamplingGenerationCalculator()) + _register(SamplingGenerationCalculator(n_alternatives=n_ccp_alternatives)) + _register(FirstSampleCalculator()) + _register(BestSampleCalculator()) _register(BartScoreCalculator()) _register(ModelScoreCalculator()) _register(EmbeddingsCalculator()) _register(EnsembleTokenLevelDataCalculator()) _register(CrossEncoderSimilarityMatrixCalculator(nli_model=nli_model)) + _register(GreedySimilarityCalculator(nli_model=nli_model)) + _register(RougeLSemanticMatrixCalculator()) + _register(GreedyRougeLSemanticMatrixCalculator()) _register(GreedyAlternativesNLICalculator(nli_model=nli_model)) + _register(SampleAlternativesNLICalculator(nli_model=nli_model)) _register(GreedyAlternativesFactPrefNLICalculator(nli_model=nli_model)) _register(ClaimsExtractor(openai_chat=openai_chat, language=language)) _register(