|
31 | 31 | CURRENT_HOST = HOST1
|
32 | 32 | CMD_ARGS = {"some_key": "some_value"}
|
33 | 33 | CLUSTER_WITH_PS = {
|
34 |
| - "master": ["{}:8890".format(HOST1)], |
35 |
| - "worker": ["{}:8890".format(HOST2)], |
| 34 | + "master": ["{}:2222".format(HOST1)], |
| 35 | + "worker": ["{}:2222".format(HOST2)], |
36 | 36 | "ps": ["{}:2223".format(HOST1), "{}:2223".format(HOST2)],
|
37 | 37 | }
|
38 | 38 | CLUSTER_WITH_MWMS = {"worker": ["{}:8890".format(HOST) for HOST in HOST_LIST]}
|
@@ -231,13 +231,13 @@ def test_train_distributed_no_ps(run, distributed_training_env):
|
231 | 231 | )
|
232 | 232 |
|
233 | 233 |
|
234 |
| -def test_build_tf_config_for_mwms(): |
235 |
| - assert training._build_tf_config_for_mwms(HOST_LIST, HOST1) == { |
| 234 | +def test_build_tf_config_for_mwm(): |
| 235 | + assert training._build_tf_config_for_mwm(HOST_LIST, HOST1) == { |
236 | 236 | "cluster": CLUSTER_WITH_MWMS,
|
237 | 237 | "environment": "cloud",
|
238 | 238 | "task": {"index": HOST_LIST.index(HOST1), "type": "worker"},
|
239 | 239 | }
|
240 |
| - assert training._build_tf_config_for_mwms(HOST_LIST, HOST2) == { |
| 240 | + assert training._build_tf_config_for_mwm(HOST_LIST, HOST2) == { |
241 | 241 | "cluster": CLUSTER_WITH_MWMS,
|
242 | 242 | "environment": "cloud",
|
243 | 243 | "task": {"index": HOST_LIST.index(HOST2), "type": "worker"},
|
|
0 commit comments