forked from FoundationAgents/MetaGPT
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathoptimize.py
More file actions
136 lines (119 loc) · 4.58 KB
/
optimize.py
File metadata and controls
136 lines (119 loc) · 4.58 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
# -*- coding: utf-8 -*-
# @Date : 8/23/2024 20:00 PM
# @Author : didi
# @Desc : Entrance of AFlow.
import argparse
from typing import Dict, List
from metagpt.configs.models_config import ModelsConfig
from metagpt.ext.aflow.data.download_data import download
from metagpt.ext.aflow.scripts.optimizer import Optimizer
class ExperimentConfig:
def __init__(self, dataset: str, question_type: str, operators: List[str]):
self.dataset = dataset
self.question_type = question_type
self.operators = operators
EXPERIMENT_CONFIGS: Dict[str, ExperimentConfig] = {
"DROP": ExperimentConfig(
dataset="DROP",
question_type="qa",
operators=["Custom", "AnswerGenerate", "ScEnsemble"],
),
"HotpotQA": ExperimentConfig(
dataset="HotpotQA",
question_type="qa",
operators=["Custom", "AnswerGenerate", "ScEnsemble"],
),
"MATH": ExperimentConfig(
dataset="MATH",
question_type="math",
operators=["Custom", "ScEnsemble", "Programmer"],
),
"GSM8K": ExperimentConfig(
dataset="GSM8K",
question_type="math",
operators=["Custom", "ScEnsemble", "Programmer"],
),
"MBPP": ExperimentConfig(
dataset="MBPP",
question_type="code",
operators=["Custom", "CustomCodeGenerate", "ScEnsemble", "Test"],
),
"HumanEval": ExperimentConfig(
dataset="HumanEval",
question_type="code",
operators=["Custom", "CustomCodeGenerate", "ScEnsemble", "Test"],
),
}
def parse_args():
parser = argparse.ArgumentParser(description="AFlow Optimizer")
parser.add_argument(
"--dataset",
type=str,
choices=list(EXPERIMENT_CONFIGS.keys()),
required=True,
help="Dataset type",
)
parser.add_argument("--sample", type=int, default=4, help="Sample count")
parser.add_argument(
"--optimized_path",
type=str,
default="metagpt/ext/aflow/scripts/optimized",
help="Optimized result save path",
)
parser.add_argument("--initial_round", type=int, default=1, help="Initial round")
parser.add_argument("--max_rounds", type=int, default=20, help="Max iteration rounds")
parser.add_argument("--check_convergence", type=bool, default=True, help="Whether to enable early stop")
parser.add_argument("--validation_rounds", type=int, default=5, help="Validation rounds")
parser.add_argument(
"--if_first_optimize",
type=lambda x: x.lower() == "true",
default=True,
help="Whether to download dataset for the first time",
)
parser.add_argument(
"--opt_model_name",
type=str,
default="claude-3-5-sonnet-20240620",
help="Specifies the name of the model used for optimization tasks.",
)
parser.add_argument(
"--exec_model_name",
type=str,
default="gpt-4o-mini",
help="Specifies the name of the model used for execution tasks.",
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
config = EXPERIMENT_CONFIGS[args.dataset]
models_config = ModelsConfig.default()
opt_llm_config = models_config.get(args.opt_model_name)
if opt_llm_config is None:
raise ValueError(
f"The optimization model '{args.opt_model_name}' was not found in the 'models' section of the configuration file. "
"Please add it to the configuration file or specify a valid model using the --opt_model_name flag. "
)
exec_llm_config = models_config.get(args.exec_model_name)
if exec_llm_config is None:
raise ValueError(
f"The execution model '{args.exec_model_name}' was not found in the 'models' section of the configuration file. "
"Please add it to the configuration file or specify a valid model using the --exec_model_name flag. "
)
download(["datasets", "initial_rounds"], if_first_download=args.if_first_optimize)
optimizer = Optimizer(
dataset=config.dataset,
question_type=config.question_type,
opt_llm_config=opt_llm_config,
exec_llm_config=exec_llm_config,
check_convergence=args.check_convergence,
operators=config.operators,
optimized_path=args.optimized_path,
sample=args.sample,
initial_round=args.initial_round,
max_rounds=args.max_rounds,
validation_rounds=args.validation_rounds,
)
# Optimize workflow via setting the optimizer's mode to 'Graph'
optimizer.optimize("Graph")
# Test workflow via setting the optimizer's mode to 'Test'
# optimizer.optimize("Test")