Skip to content

Commit

Permalink
GRPC clients version 10.5.1
Browse files Browse the repository at this point in the history
  • Loading branch information
clarifai-prod committed Jun 10, 2024
1 parent 5f0bbbe commit 54de949
Show file tree
Hide file tree
Showing 17 changed files with 3,618 additions and 1,813 deletions.
2 changes: 1 addition & 1 deletion VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
10.5.0
10.5.1
2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "clarifai-nodejs-grpc",
"version": "10.5.0",
"version": "10.5.1",
"description": "The official Clarifai Node.js gRPC client",
"main": "src/index.js",
"repository": "https://github.com/Clarifai/clarifai-javascript-grpc",
Expand Down
46 changes: 24 additions & 22 deletions proto/clarifai/api/resources.proto
Original file line number Diff line number Diff line change
Expand Up @@ -594,14 +594,6 @@ message KnowledgeGraph {
}


// ConceptMappingJob
message ConceptMappingJob {
// The id of the knowledge graph being used for this concept mapping job
string knowledge_graph_id = 1;
// The ids of the concepts being mapped
repeated string concept_ids = 2;
}

// This represents a link to an outside source for the given concept.
// The values from here are sticked into Concept message into the name and definition fields when
// returning from the API in your default language. The "id" field here becomes the "language"
Expand Down Expand Up @@ -3378,6 +3370,7 @@ message TaskMetrics {

TaskWorkMetrics work = 2;
TaskReviewMetrics review = 3;
TaskInputSourceMetrics input_source = 4;
}

message TaskWorkMetrics {
Expand All @@ -3396,6 +3389,11 @@ message TaskReviewMetrics {
uint32 inputs_percent_estimated = 2;
}

message TaskInputSourceMetrics {
// Estimated number of inputs that are in the source of data
uint64 inputs_count_estimated = 1;
}

enum RoleType {
TEAM = 0;
ORG = 1;
Expand Down Expand Up @@ -4317,6 +4315,10 @@ message Runner {
// requirements on those object, which may be less than what the Runner allocates (as a safety
// margin for the runner to for sure run the resource).
ComputeInfo compute_info = 10;

// Number of replicas that this runner should have up.
// We keep it separate from ComputeInfo which defines how many resources each replica needs.
uint32 num_replicas = 11;
}


Expand Down Expand Up @@ -4347,7 +4349,7 @@ message Nodepool {
repeated InstanceType instance_types = 8;

// Minimum number of instances in this nodepool. This allows the nodepool to scale down to this
// amount. A nodepool needs a minimum of 1 instance.
// amount.
uint32 min_instances = 9;

// An upper limit on the number of instances in this nodepool. This allows the nodepool to scale
Expand Down Expand Up @@ -4516,6 +4518,7 @@ message AutoscaleConfig {
// scale up and down. These are unique per user_id, nodepool and model so for differnet nodepools
// you can scale differently.
message Deployment {
reserved 5, 6;
// An id for this configured deployment.
string id = 1;
// The user who owns the deployment. These live in the user/org account.
Expand All @@ -4531,19 +4534,6 @@ message Deployment {
// supports
repeated Nodepool nodepools = 4;

// The thing that the autoscaling config applies to for this nodepool.
// For a given user_id, nodepool_id, and object ID we can only have one deployment as it defines
oneof object {
// Model
Model model = 5;
// Workflow
Workflow workflow = 6;
// We could also support matching by labels here for future "job" like functionality where
// the item itself fully defines the work that needs to be done.
// This would match any resources that have these runner labels defined on them.
// RunnerLabels runner_labels = 11; // FUTURE
}

// In some scenarios it may not be obvous how we should schedule a resource to underlying nodes
// within the nodepool(s) above. The SchedulerChoice allows us to specify how to decide which
// nodepool to use when there are multiple nodepools and how to decide which type of node
Expand Down Expand Up @@ -4573,6 +4563,18 @@ message Deployment {
// To handle arbitrary json metadata:
// https://github.com/google/protobuf/blob/master/src/google/protobuf/struct.proto
google.protobuf.Struct metadata = 9;

// Short description of deployment.
string description = 10;

// The thing that the autoscaling config applies to for this nodepool.
// For a given user_id, nodepool_id, and object ID we can only have one deployment as it defines
Worker worker = 11;

// When the deployment was created.
google.protobuf.Timestamp created_at = 12;
// When the deployment was last modified.
google.protobuf.Timestamp modified_at = 13;
}


Expand Down
94 changes: 49 additions & 45 deletions proto/clarifai/api/resources_pb.d.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1029,31 +1029,6 @@ export namespace KnowledgeGraph {
}
}

export class ConceptMappingJob extends jspb.Message {
getKnowledgeGraphId(): string;
setKnowledgeGraphId(value: string): ConceptMappingJob;
clearConceptIdsList(): void;
getConceptIdsList(): Array<string>;
setConceptIdsList(value: Array<string>): ConceptMappingJob;
addConceptIds(value: string, index?: number): string;

serializeBinary(): Uint8Array;
toObject(includeInstance?: boolean): ConceptMappingJob.AsObject;
static toObject(includeInstance: boolean, msg: ConceptMappingJob): ConceptMappingJob.AsObject;
static extensions: {[key: number]: jspb.ExtensionFieldInfo<jspb.Message>};
static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo<jspb.Message>};
static serializeBinaryToWriter(message: ConceptMappingJob, writer: jspb.BinaryWriter): void;
static deserializeBinary(bytes: Uint8Array): ConceptMappingJob;
static deserializeBinaryFromReader(message: ConceptMappingJob, reader: jspb.BinaryReader): ConceptMappingJob;
}

export namespace ConceptMappingJob {
export type AsObject = {
knowledgeGraphId: string,
conceptIdsList: Array<string>,
}
}

export class ConceptLanguage extends jspb.Message {
getId(): string;
setId(value: string): ConceptLanguage;
Expand Down Expand Up @@ -6163,6 +6138,11 @@ export class TaskMetrics extends jspb.Message {
getReview(): TaskReviewMetrics | undefined;
setReview(value?: TaskReviewMetrics): TaskMetrics;

hasInputSource(): boolean;
clearInputSource(): void;
getInputSource(): TaskInputSourceMetrics | undefined;
setInputSource(value?: TaskInputSourceMetrics): TaskMetrics;

serializeBinary(): Uint8Array;
toObject(includeInstance?: boolean): TaskMetrics.AsObject;
static toObject(includeInstance: boolean, msg: TaskMetrics): TaskMetrics.AsObject;
Expand All @@ -6177,6 +6157,7 @@ export namespace TaskMetrics {
export type AsObject = {
work?: TaskWorkMetrics.AsObject,
review?: TaskReviewMetrics.AsObject,
inputSource?: TaskInputSourceMetrics.AsObject,
}
}

Expand Down Expand Up @@ -6226,6 +6207,26 @@ export namespace TaskReviewMetrics {
}
}

export class TaskInputSourceMetrics extends jspb.Message {
getInputsCountEstimated(): number;
setInputsCountEstimated(value: number): TaskInputSourceMetrics;

serializeBinary(): Uint8Array;
toObject(includeInstance?: boolean): TaskInputSourceMetrics.AsObject;
static toObject(includeInstance: boolean, msg: TaskInputSourceMetrics): TaskInputSourceMetrics.AsObject;
static extensions: {[key: number]: jspb.ExtensionFieldInfo<jspb.Message>};
static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo<jspb.Message>};
static serializeBinaryToWriter(message: TaskInputSourceMetrics, writer: jspb.BinaryWriter): void;
static deserializeBinary(bytes: Uint8Array): TaskInputSourceMetrics;
static deserializeBinaryFromReader(message: TaskInputSourceMetrics, reader: jspb.BinaryReader): TaskInputSourceMetrics;
}

export namespace TaskInputSourceMetrics {
export type AsObject = {
inputsCountEstimated: number,
}
}

export class Collector extends jspb.Message {
getId(): string;
setId(value: string): Collector;
Expand Down Expand Up @@ -7989,6 +7990,8 @@ export class Runner extends jspb.Message {
clearComputeInfo(): void;
getComputeInfo(): ComputeInfo | undefined;
setComputeInfo(value?: ComputeInfo): Runner;
getNumReplicas(): number;
setNumReplicas(value: number): Runner;

serializeBinary(): Uint8Array;
toObject(includeInstance?: boolean): Runner.AsObject;
Expand All @@ -8012,6 +8015,7 @@ export namespace Runner {
worker?: Worker.AsObject,
nodepool?: Nodepool.AsObject,
computeInfo?: ComputeInfo.AsObject,
numReplicas: number,
}
}

Expand Down Expand Up @@ -8299,16 +8303,6 @@ export class Deployment extends jspb.Message {
getNodepoolsList(): Array<Nodepool>;
setNodepoolsList(value: Array<Nodepool>): Deployment;
addNodepools(value?: Nodepool, index?: number): Nodepool;

hasModel(): boolean;
clearModel(): void;
getModel(): Model | undefined;
setModel(value?: Model): Deployment;

hasWorkflow(): boolean;
clearWorkflow(): void;
getWorkflow(): Workflow | undefined;
setWorkflow(value?: Workflow): Deployment;
getSchedulingChoice(): Deployment.SchedulingChoice;
setSchedulingChoice(value: Deployment.SchedulingChoice): Deployment;

Expand All @@ -8321,8 +8315,23 @@ export class Deployment extends jspb.Message {
clearMetadata(): void;
getMetadata(): google_protobuf_struct_pb.Struct | undefined;
setMetadata(value?: google_protobuf_struct_pb.Struct): Deployment;
getDescription(): string;
setDescription(value: string): Deployment;

hasWorker(): boolean;
clearWorker(): void;
getWorker(): Worker | undefined;
setWorker(value?: Worker): Deployment;

getObjectCase(): Deployment.ObjectCase;
hasCreatedAt(): boolean;
clearCreatedAt(): void;
getCreatedAt(): google_protobuf_timestamp_pb.Timestamp | undefined;
setCreatedAt(value?: google_protobuf_timestamp_pb.Timestamp): Deployment;

hasModifiedAt(): boolean;
clearModifiedAt(): void;
getModifiedAt(): google_protobuf_timestamp_pb.Timestamp | undefined;
setModifiedAt(value?: google_protobuf_timestamp_pb.Timestamp): Deployment;

serializeBinary(): Uint8Array;
toObject(includeInstance?: boolean): Deployment.AsObject;
Expand All @@ -8340,11 +8349,13 @@ export namespace Deployment {
userId: string,
autoscaleConfig?: AutoscaleConfig.AsObject,
nodepoolsList: Array<Nodepool.AsObject>,
model?: Model.AsObject,
workflow?: Workflow.AsObject,
schedulingChoice: Deployment.SchedulingChoice,
visibility?: Visibility.AsObject,
metadata?: google_protobuf_struct_pb.Struct.AsObject,
description: string,
worker?: Worker.AsObject,
createdAt?: google_protobuf_timestamp_pb.Timestamp.AsObject,
modifiedAt?: google_protobuf_timestamp_pb.Timestamp.AsObject,
}

export enum SchedulingChoice {
Expand All @@ -8359,13 +8370,6 @@ export namespace Deployment {
PREFER_ONDEMAND = 8,
}


export enum ObjectCase {
OBJECT_NOT_SET = 0,
MODEL = 5,
WORKFLOW = 6,
}

}

export class RunnerSelector extends jspb.Message {
Expand Down
Loading

0 comments on commit 54de949

Please sign in to comment.