Skip to content

Commit 5cafdf8

Browse files
[Issue-1961]: Handle clippy::unused_self (tensorzero#2849)
1 parent 14a616e commit 5cafdf8

File tree

7 files changed

+18
-59
lines changed

7 files changed

+18
-59
lines changed

Cargo.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,7 @@ trivially_copy_pass_by_ref = "deny"
8787
unimplemented = "deny"
8888
uninlined_format_args = "deny"
8989
unreachable = "deny"
90+
unused_self = "deny"
9091
unwrap_used = "deny"
9192

9293
[profile.performance]

clients/python/src/lib.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -576,6 +576,7 @@ impl TensorZeroGateway {
576576
}
577577

578578
/// Close the connection to the TensorZero gateway.
579+
#[expect(clippy::unused_self)]
579580
fn close(&self) {
580581
// TODO - implement closing the 'reqwest' connection pool: https://github.com/tensorzero/tensorzero/issues/857
581582
}

tensorzero-core/src/inference/types/storage.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ impl StorageKind {
4747
/// During a normal run, we never use a prefix on the object key.
4848
/// See `StorageKind::S3Compatible.prefix`
4949
#[cfg(not(feature = "e2e_tests"))]
50+
#[expect(clippy::unused_self)]
5051
fn prefix(&self) -> &str {
5152
""
5253
}

tensorzero-core/src/model.rs

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,6 @@ pub fn fully_qualified_name(model_name: &str, provider_name: &str) -> String {
180180

181181
impl ModelConfig {
182182
fn filter_content_blocks<'a>(
183-
&self,
184183
request: &'a ModelInferenceRequest<'a>,
185184
model_name: &str,
186185
provider: &ModelProvider,
@@ -391,7 +390,7 @@ impl ModelConfig {
391390
provider_name: provider_name.to_string(),
392391
})
393392
})?;
394-
let request = self.filter_content_blocks(request, model_name, provider);
393+
let request = Self::filter_content_blocks(request, model_name, provider);
395394
let model_provider_request = ModelProviderRequest {
396395
request: &request,
397396
model_name,
@@ -483,7 +482,7 @@ impl ModelConfig {
483482
provider_name: provider_name.to_string(),
484483
})
485484
})?;
486-
let request = self.filter_content_blocks(request, model_name, provider);
485+
let request = Self::filter_content_blocks(request, model_name, provider);
487486
let model_provider_request = ModelProviderRequest {
488487
request: &request,
489488
model_name,

tensorzero-core/src/variant/best_of_n_sampling.rs

Lines changed: 3 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -582,7 +582,6 @@ impl BestOfNEvaluatorConfig {
582582
///
583583
/// Returns an `Error` if any of the candidate outputs fail to serialize or if templating fails.
584584
fn prepare_candidate_message(
585-
&self,
586585
templates: &TemplateConfig,
587586
candidates: &[InferenceResult],
588587
) -> Result<(RequestMessage, Vec<usize>), Error> {
@@ -649,7 +648,7 @@ impl BestOfNEvaluatorConfig {
649648
) -> Result<(ModelInferenceRequest<'a>, Vec<usize>), Error> {
650649
// Do this before we prepare the system message so we can use the correct max index in the system message
651650
let (candidate_message, skipped_indices) =
652-
self.prepare_candidate_message(inference_config.templates, candidates)?;
651+
Self::prepare_candidate_message(inference_config.templates, candidates)?;
653652
// Need to subtract the skipped indices from the total number of candidates to get the correct max index
654653
let max_index = candidates
655654
.len()
@@ -949,14 +948,6 @@ mod tests {
949948
let templates = get_test_template_config();
950949

951950
// Create an EvaluatorConfig
952-
let evaluator_config = BestOfNEvaluatorConfig {
953-
inner: ChatCompletionConfig {
954-
model: "dummy".into(),
955-
weight: Some(1.0),
956-
..Default::default()
957-
},
958-
};
959-
960951
// Prepare some candidate InferenceResults
961952
let model_inference_response = ModelInferenceResponseWithMetadata {
962953
id: Uuid::now_v7(),
@@ -1033,7 +1024,7 @@ mod tests {
10331024
let candidates = vec![candidate1, candidate2];
10341025

10351026
// Call prepare_candidate_message
1036-
let result = evaluator_config.prepare_candidate_message(&templates, &candidates);
1027+
let result = BestOfNEvaluatorConfig::prepare_candidate_message(&templates, &candidates);
10371028
assert!(result.is_ok());
10381029
let (request_message, skipped_indices) = result.unwrap();
10391030
assert!(skipped_indices.is_empty());
@@ -1048,15 +1039,6 @@ mod tests {
10481039
async fn test_prepare_candidate_message_json() {
10491040
let templates = get_test_template_config();
10501041

1051-
// Create an EvaluatorConfig
1052-
let evaluator_config = BestOfNEvaluatorConfig {
1053-
inner: ChatCompletionConfig {
1054-
model: "dummy_json".into(),
1055-
weight: Some(1.0),
1056-
..Default::default()
1057-
},
1058-
};
1059-
10601042
// Prepare some candidate InferenceResults - some valid, some malformed
10611043
let model_inference_response_valid = ModelInferenceResponseWithMetadata {
10621044
id: Uuid::now_v7(),
@@ -1135,7 +1117,7 @@ mod tests {
11351117
let candidates = vec![candidate1, candidate2];
11361118

11371119
// Call prepare_candidate_message
1138-
let result = evaluator_config.prepare_candidate_message(&templates, &candidates);
1120+
let result = BestOfNEvaluatorConfig::prepare_candidate_message(&templates, &candidates);
11391121
assert!(result.is_ok());
11401122
let (request_message, skipped_indices) = result.unwrap();
11411123

tensorzero-core/src/variant/dicl.rs

Lines changed: 7 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -395,7 +395,7 @@ impl DiclConfig {
395395
/// The second message is an Assistant message with the output as native output blocks
396396
/// - For chat messages, this is a simple vector of ContentBlocks
397397
/// - For JSON messages, this is a single JSON output block (as Text)
398-
fn prepare_message(&self, example: &Example) -> Result<Vec<RequestMessage>, Error> {
398+
fn prepare_message(example: &Example) -> Result<Vec<RequestMessage>, Error> {
399399
let mut messages = Vec::new();
400400
let input = match example {
401401
Example::Chat(chat_example) => chat_example.input.clone(),
@@ -437,7 +437,7 @@ impl DiclConfig {
437437
Ok(messages)
438438
}
439439

440-
fn prepare_input_message(&self, input: &ResolvedInput) -> Result<RequestMessage, Error> {
440+
fn prepare_input_message(input: &ResolvedInput) -> Result<RequestMessage, Error> {
441441
let content = vec![serde_json::to_string(&input)
442442
.map_err(|e| {
443443
Error::new(ErrorDetails::Serialization {
@@ -489,11 +489,11 @@ impl DiclConfig {
489489
}
490490
let messages = examples
491491
.iter()
492-
.map(|example| self.prepare_message(example))
492+
.map(Self::prepare_message)
493493
.collect::<Result<Vec<Vec<RequestMessage>>, _>>()?
494494
.into_iter()
495495
.flatten()
496-
.chain(std::iter::once(self.prepare_input_message(input)?))
496+
.chain(std::iter::once(Self::prepare_input_message(input)?))
497497
.collect::<Vec<_>>();
498498

499499
let system = Some(self.system_instructions.clone());
@@ -658,9 +658,6 @@ mod tests {
658658

659659
#[test]
660660
fn test_prepare_message() {
661-
// Create an instance of DiclConfig (assuming default implementation is available)
662-
let dicl_config = DiclConfig::default();
663-
664661
// ---------- Test with ChatExample ----------
665662

666663
// Mock Input data
@@ -701,7 +698,7 @@ mod tests {
701698
output: chat_output.clone(),
702699
});
703700

704-
let chat_messages = dicl_config.prepare_message(&chat_example).unwrap();
701+
let chat_messages = DiclConfig::prepare_message(&chat_example).unwrap();
705702

706703
assert_eq!(chat_messages.len(), 2);
707704

@@ -737,7 +734,7 @@ mod tests {
737734
output: json_output.clone(),
738735
});
739736

740-
let json_messages = dicl_config.prepare_message(&json_example).unwrap();
737+
let json_messages = DiclConfig::prepare_message(&json_example).unwrap();
741738

742739
// Assertions for JsonExample
743740
assert_eq!(json_messages.len(), 2);
@@ -762,9 +759,6 @@ mod tests {
762759

763760
#[test]
764761
fn test_prepare_input_message() {
765-
// Create an instance of DiclConfig (assuming default implementation is available)
766-
let dicl_config = DiclConfig::default();
767-
768762
// Mock Input data
769763
let input_data = ResolvedInput {
770764
system: Some(json!({"assistant_name": "Dr. Mehta"})),
@@ -792,7 +786,7 @@ mod tests {
792786
};
793787

794788
// Call the prepare_input_message function
795-
let request_message = dicl_config.prepare_input_message(&input_data).unwrap();
789+
let request_message = DiclConfig::prepare_input_message(&input_data).unwrap();
796790

797791
// The role should be User
798792
assert_eq!(request_message.role, Role::User);

tensorzero-core/src/variant/mixture_of_n.rs

Lines changed: 3 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -698,7 +698,6 @@ impl FuserConfig {
698698
///
699699
/// Returns an `Error` if any of the candidate outputs fail to serialize or if templating fails.
700700
fn prepare_candidate_message(
701-
&self,
702701
templates: &TemplateConfig,
703702
candidates: &[InferenceResult],
704703
) -> Result<(RequestMessage, Vec<usize>), Error> {
@@ -769,7 +768,7 @@ impl FuserConfig {
769768
{
770769
// Do this before we prepare the system message so we can use the correct max index in the system message
771770
let (candidate_message, included_indices) =
772-
self.prepare_candidate_message(inference_config.templates, candidates)?;
771+
Self::prepare_candidate_message(inference_config.templates, candidates)?;
773772
let max_index = included_indices.len().saturating_sub(1);
774773
let system = Some(self.prepare_system_message(
775774
inference_config.templates,
@@ -1014,15 +1013,6 @@ mod tests {
10141013
async fn test_prepare_candidate_message() {
10151014
let templates = get_test_template_config();
10161015

1017-
// Create an FuserConfig
1018-
let fuser_config = FuserConfig {
1019-
inner: ChatCompletionConfig {
1020-
model: "dummy".into(),
1021-
weight: Some(1.0),
1022-
..Default::default()
1023-
},
1024-
};
1025-
10261016
// Prepare some candidate InferenceResults
10271017
let model_inference_response = ModelInferenceResponseWithMetadata {
10281018
id: Uuid::now_v7(),
@@ -1093,7 +1083,7 @@ mod tests {
10931083
let candidates = vec![candidate1, candidate2];
10941084

10951085
// Call prepare_candidate_message
1096-
let result = fuser_config.prepare_candidate_message(&templates, &candidates);
1086+
let result = FuserConfig::prepare_candidate_message(&templates, &candidates);
10971087
assert!(result.is_ok());
10981088
let (request_message, included_indices) = result.unwrap();
10991089
assert_eq!(included_indices, vec![0, 1]);
@@ -1108,15 +1098,6 @@ mod tests {
11081098
async fn test_prepare_candidate_message_json() {
11091099
let templates = get_test_template_config();
11101100

1111-
// Create a FuserConfig
1112-
let fuser_config = FuserConfig {
1113-
inner: ChatCompletionConfig {
1114-
model: "dummy_json".into(),
1115-
weight: Some(1.0),
1116-
..Default::default()
1117-
},
1118-
};
1119-
11201101
// Prepare some candidate InferenceResults - some valid, some malformed
11211102
let model_inference_response_valid = ModelInferenceResponseWithMetadata {
11221103
id: Uuid::now_v7(),
@@ -1189,7 +1170,7 @@ mod tests {
11891170
let candidates = vec![candidate1, candidate2];
11901171

11911172
// Call prepare_candidate_message
1192-
let result = fuser_config.prepare_candidate_message(&templates, &candidates);
1173+
let result = FuserConfig::prepare_candidate_message(&templates, &candidates);
11931174
assert!(result.is_ok());
11941175
let (request_message, included_indices) = result.unwrap();
11951176

0 commit comments

Comments
 (0)