Skip to content

Commit b0dd091

Browse files
morganmcg1Morgan McGuiretcapellejhallard
authored
Update the wandb logger (openai#590)
* Update WandbLogger for new FineTuningJob api * remove prints * add docs link * remove pd * add pandas check * list all jobs * move pandas assert --------- Co-authored-by: Morgan McGuire <[email protected]> Co-authored-by: Thomas Capelle <[email protected]> Co-authored-by: John Allard <[email protected]>
1 parent e1bb30d commit b0dd091

File tree

4 files changed

+40
-12
lines changed

4 files changed

+40
-12
lines changed

README.md

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,14 @@ openai.Model.delete("ft:gpt-3.5-turbo:acemeco:suffix:abc123")
129129

130130
You can learn more in our [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning).
131131

132+
To log the training results from fine-tuning to Weights & Biases use:
133+
134+
```
135+
openai wandb sync
136+
```
137+
138+
For more information, read the [wandb documentation](https://docs.wandb.ai/guides/integrations/openai) on Weights & Biases.
139+
132140
### Moderation
133141

134142
OpenAI provides a free Moderation endpoint that can be used to check whether content complies with the OpenAI [content policy](https://platform.openai.com/docs/usage-policies).

openai/_openai_scripts.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ def help(args):
4747
subparsers = parser.add_subparsers()
4848
sub_api = subparsers.add_parser("api", help="Direct API calls")
4949
sub_tools = subparsers.add_parser("tools", help="Client side tools for convenience")
50-
sub_wandb = subparsers.add_parser("wandb", help="Logging with Weights & Biases")
50+
sub_wandb = subparsers.add_parser("wandb", help="Logging with Weights & Biases, see https://docs.wandb.ai/guides/integrations/openai for documentation")
5151

5252
api_register(sub_api)
5353
tools_register(sub_tools)

openai/cli.py

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1375,7 +1375,7 @@ def help(args):
13751375

13761376
def wandb_register(parser):
13771377
subparsers = parser.add_subparsers(
1378-
title="wandb", help="Logging with Weights & Biases"
1378+
title="wandb", help="Logging with Weights & Biases, see https://docs.wandb.ai/guides/integrations/openai for documentation"
13791379
)
13801380

13811381
def help(args):
@@ -1394,17 +1394,23 @@ def help(args):
13941394
)
13951395
sub.add_argument(
13961396
"--project",
1397-
default="GPT-3",
1398-
help="""Name of the project where you're sending runs. By default, it is "GPT-3".""",
1397+
default="OpenAI-Fine-Tune",
1398+
help="""Name of the Weights & Biases project where you're sending runs. By default, it is "OpenAI-Fine-Tune".""",
13991399
)
14001400
sub.add_argument(
14011401
"--entity",
1402-
help="Username or team name where you're sending runs. By default, your default entity is used, which is usually your username.",
1402+
help="Weights & Biases username or team name where you're sending runs. By default, your default entity is used, which is usually your username.",
14031403
)
14041404
sub.add_argument(
14051405
"--force",
14061406
action="store_true",
14071407
help="Forces logging and overwrite existing wandb run of the same fine-tune.",
14081408
)
1409+
sub.add_argument(
1410+
"--legacy",
1411+
action="store_true",
1412+
help="Log results from legacy OpenAI /v1/fine-tunes api",
1413+
)
14091414
sub.set_defaults(force=False)
1415+
sub.set_defaults(legacy=False)
14101416
sub.set_defaults(func=WandbLogger.sync)

openai/wandb_logger.py

Lines changed: 21 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -13,9 +13,9 @@
1313
import re
1414
from pathlib import Path
1515

16-
from openai import File, FineTune
16+
from openai import File, FineTune, FineTuningJob
1717
from openai.datalib.numpy_helper import numpy as np
18-
from openai.datalib.pandas_helper import pandas as pd
18+
from openai.datalib.pandas_helper import assert_has_pandas, pandas as pd
1919

2020

2121
class WandbLogger:
@@ -34,9 +34,10 @@ def sync(
3434
cls,
3535
id=None,
3636
n_fine_tunes=None,
37-
project="GPT-3",
37+
project="OpenAI-Fine-Tune",
3838
entity=None,
3939
force=False,
40+
legacy=False,
4041
**kwargs_wandb_init,
4142
):
4243
"""
@@ -47,18 +48,26 @@ def sync(
4748
:param entity: Username or team name where you're sending runs. By default, your default entity is used, which is usually your username.
4849
:param force: Forces logging and overwrite existing wandb run of the same fine-tune.
4950
"""
51+
52+
assert_has_pandas()
5053

5154
if not WANDB_AVAILABLE:
5255
return
5356

5457
if id:
55-
fine_tune = FineTune.retrieve(id=id)
58+
print("Retrieving fine-tune job...")
59+
if legacy:
60+
fine_tune = FineTune.retrieve(id=id)
61+
else:
62+
fine_tune = FineTuningJob.retrieve(id=id)
5663
fine_tune.pop("events", None)
5764
fine_tunes = [fine_tune]
58-
5965
else:
6066
# get list of fine_tune to log
61-
fine_tunes = FineTune.list()
67+
if legacy:
68+
fine_tunes = FineTune.list()
69+
else:
70+
fine_tunes = list(FineTuningJob.auto_paging_iter())
6271
if not fine_tunes or fine_tunes.get("data") is None:
6372
print("No fine-tune has been retrieved")
6473
return
@@ -76,6 +85,7 @@ def sync(
7685
project,
7786
entity,
7887
force,
88+
legacy,
7989
show_individual_warnings,
8090
**kwargs_wandb_init,
8191
)
@@ -94,6 +104,7 @@ def _log_fine_tune(
94104
project,
95105
entity,
96106
force,
107+
legacy,
97108
show_individual_warnings,
98109
**kwargs_wandb_init,
99110
):
@@ -110,7 +121,10 @@ def _log_fine_tune(
110121

111122
# check results are present
112123
try:
113-
results_id = fine_tune["result_files"][0]["id"]
124+
if legacy:
125+
results_id = fine_tune["result_files"][0]["id"]
126+
else:
127+
results_id = fine_tune["result_files"][0]
114128
results = File.download(id=results_id).decode("utf-8")
115129
except:
116130
if show_individual_warnings:

0 commit comments

Comments
 (0)