Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Drop redundant "model" method prefix #170

Merged
merged 5 commits into from
Nov 21, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions docs/guides/ORCHESTRATION_CHAT_COMPLETION.md
Original file line number Diff line number Diff line change
Expand Up @@ -200,11 +200,11 @@ Change your LLM configuration to add model parameters:
```java
OrchestrationAiModel customGPT4O =
OrchestrationAiModel.GPT_4O
.withModelParams(
.withParams(
Map.of(
"max_tokens", 50,
"temperature", 0.1,
"frequency_penalty", 0,
"presence_penalty", 0))
.withModelVersion("2024-05-13");
.withVersion("2024-05-13");
```
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
@AllArgsConstructor
public class OrchestrationAiModel {
/** The name of the model */
String modelName;
String name;

/**
* Optional parameters on this model.
Expand All @@ -26,10 +26,10 @@ public class OrchestrationAiModel {
* "presence_penalty", 0)
* }</pre>
*/
Map<String, Object> modelParams;
Map<String, Object> params;

/** The version of the model, defaults to "latest". */
String modelVersion;
String version;

/** IBM Granite 13B chat completions model */
public static final OrchestrationAiModel IBM_GRANITE_13B_CHAT =
Expand Down Expand Up @@ -106,15 +106,12 @@ public class OrchestrationAiModel {
public static final OrchestrationAiModel GEMINI_1_5_FLASH =
new OrchestrationAiModel("gemini-1.5-flash");

OrchestrationAiModel(@Nonnull final String modelName) {
this(modelName, Map.of(), "latest");
OrchestrationAiModel(@Nonnull final String name) {
this(name, Map.of(), "latest");
}

@Nonnull
LLMModuleConfig createConfig() {
return new LLMModuleConfig()
.modelName(modelName)
.modelParams(modelParams)
.modelVersion(modelVersion);
return new LLMModuleConfig().modelName(name).modelParams(params).modelVersion(version);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -69,18 +69,16 @@ void testDpiMaskingConfig() {
void testLLMConfig() {
Map<String, Object> params = Map.of("foo", "bar");
String version = "2024-05-13";
OrchestrationAiModel aiModel = GPT_4O.withModelParams(params).withModelVersion(version);
OrchestrationAiModel aiModel = GPT_4O.withParams(params).withVersion(version);
var config = new OrchestrationModuleConfig().withLlmConfig(aiModel);

assertThat(config.getLlmConfig()).isNotNull();
assertThat(config.getLlmConfig().getModelName()).isEqualTo(GPT_4O.getModelName());
assertThat(config.getLlmConfig().getModelName()).isEqualTo(GPT_4O.getName());
assertThat(config.getLlmConfig().getModelParams()).isEqualTo(params);
assertThat(config.getLlmConfig().getModelVersion()).isEqualTo(version);

assertThat(GPT_4O.getModelParams())
.withFailMessage("Static models should be unchanged")
.isEmpty();
assertThat(GPT_4O.getModelVersion())
assertThat(GPT_4O.getParams()).withFailMessage("Static models should be unchanged").isEmpty();
assertThat(GPT_4O.getVersion())
.withFailMessage("Static models should be unchanged")
.isEqualTo("latest");
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@
@WireMockTest
class OrchestrationUnitTest {
static final OrchestrationAiModel CUSTOM_GPT_35 =
GPT_35_TURBO_16K.withModelParams(
GPT_35_TURBO_16K.withParams(
Map.of(
"max_tokens", 50,
"temperature", 0.1,
Expand Down