Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 17 additions & 0 deletions FlagEmbedding/evaluation/bright/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
from FlagEmbedding.abc.evaluation import (
AbsEvalModelArgs as BrightEvalModelArgs,
)

from .data_loader import BrightShortEvalDataLoader, BrightLongEvalDataLoader
from .arguments import BrightEvalArgs
from .runner import BrightEvalRunner
from .searcher import BrightEvalDenseRetriever

__all__ = [
"BrightEvalArgs",
"BrightEvalModelArgs",
"BrightEvalRunner",
"BrightEvalDenseRetriever",
"BrightShortEvalDataLoader",
"BrightLongEvalDataLoader",
]
28 changes: 28 additions & 0 deletions FlagEmbedding/evaluation/bright/__main__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
from transformers import HfArgumentParser

from FlagEmbedding.evaluation.bright import (
BrightEvalArgs, BrightEvalModelArgs,
BrightEvalRunner
)


def main():
parser = HfArgumentParser((
BrightEvalArgs,
BrightEvalModelArgs
))

eval_args, model_args = parser.parse_args_into_dataclasses()
eval_args: BrightEvalArgs
model_args: BrightEvalModelArgs

runner = BrightEvalRunner(
eval_args=eval_args,
model_args=model_args
)

runner.run()


if __name__ == "__main__":
main()
16 changes: 16 additions & 0 deletions FlagEmbedding/evaluation/bright/arguments.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
from dataclasses import dataclass, field

from FlagEmbedding.abc.evaluation.arguments import AbsEvalArgs


@dataclass
class BrightEvalArgs(AbsEvalArgs):
"""
Argument class for Bright evaluation.
"""
task_type: str = field(
default="short", metadata={"help": "The task type to evaluate on. Available options: ['short', 'long']. Default: short", "choices": ["short", "long"]}
)
use_special_instructions: bool = field(
default=True, metadata={"help": "Whether to use specific instructions in `prompts.py` for evaluation. Default: True"}
)
Loading
Loading