mirror of
https://github.com/GayPizzaSpecifications/stable-diffusion-rpc.git
synced 2025-08-02 21:20:55 +00:00
Job management and preparation for multi-hosting.
This commit is contained in:
parent
a2d9e14f3a
commit
ace2c07aa1
@ -6,13 +6,16 @@ find_package(gRPC CONFIG REQUIRED)
|
||||
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
|
||||
|
||||
add_library(sdrpc StableDiffusion.proto)
|
||||
file(GLOB PROTO_FILES "proto/*.proto")
|
||||
|
||||
add_library(sdrpc ${PROTO_FILES})
|
||||
|
||||
get_target_property(grpc_cpp_plugin_location gRPC::grpc_cpp_plugin LOCATION)
|
||||
|
||||
protobuf_generate(TARGET sdrpc LANGUAGE cpp)
|
||||
protobuf_generate(TARGET sdrpc LANGUAGE cpp IMPORT_DIRS proto)
|
||||
protobuf_generate(TARGET sdrpc LANGUAGE grpc
|
||||
GENERATE_EXTENSIONS .grpc.pb.h .grpc.pb.cc
|
||||
IMPORT_DIRS proto
|
||||
PLUGIN "protoc-gen-grpc=${grpc_cpp_plugin_location}")
|
||||
target_include_directories(sdrpc PUBLIC ${CMAKE_CURRENT_BINARY_DIR})
|
||||
target_link_libraries(sdrpc PUBLIC protobuf::libprotobuf gRPC::grpc gRPC::grpc++)
|
||||
|
@ -1 +0,0 @@
|
||||
../../Common/StableDiffusion.proto
|
1
Clients/Cpp/proto
Symbolic link
1
Clients/Cpp/proto
Symbolic link
@ -0,0 +1 @@
|
||||
../../Common
|
@ -1,10 +1,25 @@
|
||||
#include "StableDiffusion.pb.h"
|
||||
#include "StableDiffusion.grpc.pb.h"
|
||||
#include "host.grpc.pb.h"
|
||||
#include "image_generation.grpc.pb.h"
|
||||
|
||||
#include <grpc++/grpc++.h>
|
||||
|
||||
using namespace gay::pizza::stable::diffusion;
|
||||
|
||||
int CompareModelInfoByLoadedFirst(ModelInfo& left, ModelInfo& right) {
|
||||
if (left.is_loaded() && right.is_loaded()) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (left.is_loaded()) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (right.is_loaded()) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main() {
|
||||
auto channel = grpc::CreateChannel("localhost:4546", grpc::InsecureChannelCredentials());
|
||||
auto modelService = ModelService::NewStub(channel);
|
||||
@ -19,5 +34,9 @@ int main() {
|
||||
for (const auto &item: models) {
|
||||
std::cout << "Model Name: " << item.name() << std::endl;
|
||||
}
|
||||
|
||||
std::sort(models.begin(), models.end(), CompareModelInfoByLoadedFirst);
|
||||
auto model = models.begin();
|
||||
std::cout << "Chosen Model: " << model->name() << std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
@ -1,9 +1,9 @@
|
||||
package gay.pizza.stable.diffusion.sample
|
||||
|
||||
import com.google.protobuf.ByteString
|
||||
import gay.pizza.stable.diffusion.StableDiffusion.*
|
||||
import gay.pizza.stable.diffusion.StableDiffusionRpcClient
|
||||
import gay.pizza.stable.diffusion.*
|
||||
import io.grpc.ManagedChannelBuilder
|
||||
import io.grpc.stub.StreamObserver
|
||||
import kotlinx.coroutines.async
|
||||
import kotlinx.coroutines.awaitAll
|
||||
import kotlinx.coroutines.runBlocking
|
||||
@ -23,6 +23,25 @@ fun main(args: Array<String>) {
|
||||
.build()
|
||||
|
||||
val client = StableDiffusionRpcClient(channel)
|
||||
|
||||
val jobs = mutableMapOf<Long, Job>()
|
||||
|
||||
client.jobService.streamJobUpdates(StreamJobUpdatesRequest.getDefaultInstance(), object : StreamObserver<JobUpdate> {
|
||||
override fun onNext(value: JobUpdate) {
|
||||
jobs[value.job.id] = value.job
|
||||
jobs.values.map {
|
||||
"job=${it.id} status=${it.state.name} completion=${it.overallPercentageComplete}"
|
||||
}.forEach(::println)
|
||||
}
|
||||
|
||||
override fun onError(throwable: Throwable) {
|
||||
throwable.printStackTrace()
|
||||
exitProcess(1)
|
||||
}
|
||||
|
||||
override fun onCompleted() {}
|
||||
})
|
||||
|
||||
val modelListResponse = client.modelServiceBlocking.listModels(ListModelsRequest.getDefaultInstance())
|
||||
if (modelListResponse.availableModelsList.isEmpty()) {
|
||||
println("no available models")
|
||||
|
@ -51,4 +51,20 @@ class StableDiffusionRpcClient(val channel: Channel) {
|
||||
val tokenizerServiceCoroutine: TokenizerServiceGrpcKt.TokenizerServiceCoroutineStub by lazy {
|
||||
TokenizerServiceGrpcKt.TokenizerServiceCoroutineStub(channel)
|
||||
}
|
||||
|
||||
val jobService: JobServiceGrpc.JobServiceStub by lazy {
|
||||
JobServiceGrpc.newStub(channel)
|
||||
}
|
||||
|
||||
val jobServiceBlocking: JobServiceGrpc.JobServiceBlockingStub by lazy {
|
||||
JobServiceGrpc.newBlockingStub(channel)
|
||||
}
|
||||
|
||||
val jobServiceFuture: JobServiceGrpc.JobServiceFutureStub by lazy {
|
||||
JobServiceGrpc.newFutureStub(channel)
|
||||
}
|
||||
|
||||
val jobServiceCoroutine: JobServiceGrpcKt.JobServiceCoroutineStub by lazy {
|
||||
JobServiceGrpcKt.JobServiceCoroutineStub(channel)
|
||||
}
|
||||
}
|
||||
|
@ -1,395 +0,0 @@
|
||||
/**
|
||||
* Stable Diffusion RPC service for Apple Platforms.
|
||||
*/
|
||||
syntax = "proto3";
|
||||
package gay.pizza.stable.diffusion;
|
||||
|
||||
/**
|
||||
* Utilize a prefix of 'Sd' for Swift.
|
||||
*/
|
||||
option swift_prefix = "Sd";
|
||||
|
||||
/**
|
||||
* Represents the model attention. Model attention has to do with how the model is encoded, and
|
||||
* can determine what compute units are able to support a particular model.
|
||||
*/
|
||||
enum ModelAttention {
|
||||
/**
|
||||
* The model is an original attention type. It can be loaded only onto CPU & GPU compute units.
|
||||
*/
|
||||
original = 0;
|
||||
|
||||
/**
|
||||
* The model is a split-ein-sum attention type. It can be loaded onto all compute units,
|
||||
* including the Apple Neural Engine.
|
||||
*/
|
||||
split_ein_sum = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents the schedulers that are used to sample images.
|
||||
*/
|
||||
enum Scheduler {
|
||||
/**
|
||||
* The PNDM (Pseudo numerical methods for diffusion models) scheduler.
|
||||
*/
|
||||
pndm = 0;
|
||||
|
||||
/**
|
||||
* The DPM-Solver++ scheduler.
|
||||
*/
|
||||
dpm_solver_plus_plus = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a specifier for what compute units are available for ML tasks.
|
||||
*/
|
||||
enum ComputeUnits {
|
||||
/**
|
||||
* The CPU as a singular compute unit.
|
||||
*/
|
||||
cpu = 0;
|
||||
|
||||
/**
|
||||
* The CPU & GPU combined into a singular compute unit.
|
||||
*/
|
||||
cpu_and_gpu = 1;
|
||||
|
||||
/**
|
||||
* Allow the usage of all compute units. CoreML will decided where the model is loaded.
|
||||
*/
|
||||
all = 2;
|
||||
|
||||
/**
|
||||
* The CPU & Neural Engine combined into a singular compute unit.
|
||||
*/
|
||||
cpu_and_neural_engine = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents information about an available model.
|
||||
* The primary key of a model is it's 'name' field.
|
||||
*/
|
||||
message ModelInfo {
|
||||
/**
|
||||
* The name of the available model. Note that within the context of a single RPC server,
|
||||
* the name of a model is a unique identifier. This may not be true when utilizing a cluster or
|
||||
* load balanced server, so keep that in mind.
|
||||
*/
|
||||
string name = 1;
|
||||
|
||||
/**
|
||||
* The attention of the model. Model attention determines what compute units can be used to
|
||||
* load the model and make predictions.
|
||||
*/
|
||||
ModelAttention attention = 2;
|
||||
|
||||
/**
|
||||
* Whether the model is currently loaded onto an available compute unit.
|
||||
*/
|
||||
bool is_loaded = 3;
|
||||
|
||||
/**
|
||||
* The compute unit that the model is currently loaded into, if it is loaded to one at all.
|
||||
* When is_loaded is false, the value of this field should be null.
|
||||
*/
|
||||
ComputeUnits loaded_compute_units = 4;
|
||||
|
||||
/**
|
||||
* The compute units that this model supports using.
|
||||
*/
|
||||
repeated ComputeUnits supported_compute_units = 5;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents the format of an image.
|
||||
*/
|
||||
enum ImageFormat {
|
||||
/**
|
||||
* The PNG image format.
|
||||
*/
|
||||
png = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents an image within the Stable Diffusion context.
|
||||
* This could be an input image for an image generation request, or it could be
|
||||
* a generated image from the Stable Diffusion model.
|
||||
*/
|
||||
message Image {
|
||||
/**
|
||||
* The format of the image.
|
||||
*/
|
||||
ImageFormat format = 1;
|
||||
|
||||
/**
|
||||
* The raw data of the image, in the specified format.
|
||||
*/
|
||||
bytes data = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a request to list the models available on the host.
|
||||
*/
|
||||
message ListModelsRequest {}
|
||||
|
||||
/**
|
||||
* Represents a response to listing the models available on the host.
|
||||
*/
|
||||
message ListModelsResponse {
|
||||
/**
|
||||
* The available models on the Stable Diffusion server.
|
||||
*/
|
||||
repeated ModelInfo available_models = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a request to load a model into a specified compute unit.
|
||||
*/
|
||||
message LoadModelRequest {
|
||||
/**
|
||||
* The model name to load onto the compute unit.
|
||||
*/
|
||||
string model_name = 1;
|
||||
|
||||
/**
|
||||
* The compute units to load the model onto.
|
||||
*/
|
||||
ComputeUnits compute_units = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a response to loading a model.
|
||||
*/
|
||||
message LoadModelResponse {}
|
||||
|
||||
/**
|
||||
* The model service, for management and loading of models.
|
||||
*/
|
||||
service ModelService {
|
||||
/**
|
||||
* Lists the available models on the host.
|
||||
* This will return both models that are currently loaded, and models that are not yet loaded.
|
||||
*/
|
||||
rpc ListModels(ListModelsRequest) returns (ListModelsResponse);
|
||||
|
||||
/**
|
||||
* Loads a model onto a compute unit.
|
||||
*/
|
||||
rpc LoadModel(LoadModelRequest) returns (LoadModelResponse);
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a request to generate images using a loaded model.
|
||||
*/
|
||||
message GenerateImagesRequest {
|
||||
/**
|
||||
* The model name to use for generation.
|
||||
* The model must be already be loaded using ModelService.LoadModel RPC method.
|
||||
*/
|
||||
string model_name = 1;
|
||||
|
||||
/**
|
||||
* The output format for generated images.
|
||||
*/
|
||||
ImageFormat output_image_format = 2;
|
||||
|
||||
/**
|
||||
* The number of batches of images to generate.
|
||||
*/
|
||||
uint32 batch_count = 3;
|
||||
|
||||
/**
|
||||
* The number of images inside a single batch.
|
||||
*/
|
||||
uint32 batch_size = 4;
|
||||
|
||||
/**
|
||||
* The positive textual prompt for image generation.
|
||||
*/
|
||||
string prompt = 5;
|
||||
|
||||
/**
|
||||
* The negative prompt for image generation.
|
||||
*/
|
||||
string negative_prompt = 6;
|
||||
|
||||
/**
|
||||
* The random seed to use.
|
||||
* Zero indicates that the seed should be random.
|
||||
*/
|
||||
uint32 seed = 7;
|
||||
|
||||
/**
|
||||
* An optional starting image to use for generation.
|
||||
*/
|
||||
Image starting_image = 8;
|
||||
|
||||
/**
|
||||
* Indicates whether to enable the safety check network, if it is available.
|
||||
*/
|
||||
bool enable_safety_check = 9;
|
||||
|
||||
/**
|
||||
* The scheduler to use for generation.
|
||||
* The default is PNDM, if not specified.
|
||||
*/
|
||||
Scheduler scheduler = 10;
|
||||
|
||||
/**
|
||||
* The guidance scale, which controls the influence the prompt has on the image.
|
||||
* If not specified, a reasonable default value is used.
|
||||
*/
|
||||
float guidance_scale = 11;
|
||||
|
||||
/**
|
||||
* The strength of the image generation.
|
||||
* If not specified, a reasonable default value is used.
|
||||
*/
|
||||
float strength = 12;
|
||||
|
||||
/**
|
||||
* The number of inference steps to perform.
|
||||
* If not specified, a reasonable default value is used.
|
||||
*/
|
||||
uint32 step_count = 13;
|
||||
|
||||
/**
|
||||
* Indicates whether to send intermediate images
|
||||
* while in streaming mode.
|
||||
*/
|
||||
bool send_intermediates = 14;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents the response from image generation.
|
||||
*/
|
||||
message GenerateImagesResponse {
|
||||
/**
|
||||
* The set of generated images by the Stable Diffusion pipeline.
|
||||
*/
|
||||
repeated Image images = 1;
|
||||
|
||||
/**
|
||||
* The seeds that were used to generate the images.
|
||||
*/
|
||||
repeated uint32 seeds = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a progress update for an image generation batch.
|
||||
*/
|
||||
message GenerateImagesBatchProgressUpdate {
|
||||
/**
|
||||
* The percentage of this batch that is complete.
|
||||
*/
|
||||
float percentage_complete = 1;
|
||||
|
||||
/**
|
||||
* The current state of the generated images from this batch.
|
||||
* These are not usually completed images, but partial images.
|
||||
* These are only available if the request's send_intermediates
|
||||
* parameter is set to true.
|
||||
*/
|
||||
repeated Image images = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a completion of an image generation batch.
|
||||
*/
|
||||
message GenerateImagesBatchCompletedUpdate {
|
||||
/**
|
||||
* The generated images from this batch.
|
||||
*/
|
||||
repeated Image images = 1;
|
||||
|
||||
/**
|
||||
* The seed for this batch.
|
||||
*/
|
||||
uint32 seed = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a continuous update from an image generation stream.
|
||||
*/
|
||||
message GenerateImagesStreamUpdate {
|
||||
/**
|
||||
* The current batch number that is processing.
|
||||
*/
|
||||
uint32 current_batch = 1;
|
||||
|
||||
/**
|
||||
* An update to the image generation pipeline.
|
||||
*/
|
||||
oneof update {
|
||||
/**
|
||||
* Batch progress update.
|
||||
*/
|
||||
GenerateImagesBatchProgressUpdate batch_progress = 2;
|
||||
|
||||
/**
|
||||
* Batch completion update.
|
||||
*/
|
||||
GenerateImagesBatchCompletedUpdate batch_completed = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* The percentage of completion for the entire submitted job.
|
||||
*/
|
||||
float overall_percentage_complete = 4;
|
||||
}
|
||||
|
||||
/**
|
||||
* The image generation service, for generating images from loaded models.
|
||||
*/
|
||||
service ImageGenerationService {
|
||||
/**
|
||||
* Generates images using a loaded model.
|
||||
*/
|
||||
rpc GenerateImages(GenerateImagesRequest) returns (GenerateImagesResponse);
|
||||
|
||||
/**
|
||||
* Generates images using a loaded model, providing updates along the way.
|
||||
*/
|
||||
rpc GenerateImagesStreaming(GenerateImagesRequest) returns (stream GenerateImagesStreamUpdate);
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a request to tokenize an input.
|
||||
*/
|
||||
message TokenizeRequest {
|
||||
/**
|
||||
* The name of a loaded model to use for tokenization.
|
||||
*/
|
||||
string model_name = 1;
|
||||
|
||||
/**
|
||||
* The input string to tokenize.
|
||||
*/
|
||||
string input = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a response to tokenization.
|
||||
*/
|
||||
message TokenizeResponse {
|
||||
/**
|
||||
* The tokens inside the input string.
|
||||
*/
|
||||
repeated string tokens = 1;
|
||||
|
||||
/**
|
||||
* The token IDs inside the input string.
|
||||
*/
|
||||
repeated uint64 token_ids = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* The tokenizer service, for analyzing tokens for a loaded model.
|
||||
*/
|
||||
service TokenizerService {
|
||||
/**
|
||||
* Analyze the input using a loaded model and return the results.
|
||||
*/
|
||||
rpc Tokenize(TokenizeRequest) returns (TokenizeResponse);
|
||||
}
|
63
Common/host.proto
Normal file
63
Common/host.proto
Normal file
@ -0,0 +1,63 @@
|
||||
/**
|
||||
* Host management for the Stable Diffusion RPC service.
|
||||
*/
|
||||
syntax = "proto3";
|
||||
package gay.pizza.stable.diffusion;
|
||||
import "shared.proto";
|
||||
|
||||
/**
|
||||
* Utilize a prefix of 'Sd' for Swift.
|
||||
*/
|
||||
option swift_prefix = "Sd";
|
||||
option java_multiple_files = true;
|
||||
|
||||
/**
|
||||
* Represents a request to list the models available on the host.
|
||||
*/
|
||||
message ListModelsRequest {}
|
||||
|
||||
/**
|
||||
* Represents a response to listing the models available on the host.
|
||||
*/
|
||||
message ListModelsResponse {
|
||||
/**
|
||||
* The available models on the Stable Diffusion server.
|
||||
*/
|
||||
repeated ModelInfo available_models = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a request to load a model into a specified compute unit.
|
||||
*/
|
||||
message LoadModelRequest {
|
||||
/**
|
||||
* The model name to load onto the compute unit.
|
||||
*/
|
||||
string model_name = 1;
|
||||
|
||||
/**
|
||||
* The compute units to load the model onto.
|
||||
*/
|
||||
ComputeUnits compute_units = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a response to loading a model.
|
||||
*/
|
||||
message LoadModelResponse {}
|
||||
|
||||
/**
|
||||
* The model service, for management and loading of models.
|
||||
*/
|
||||
service ModelService {
|
||||
/**
|
||||
* Lists the available models on the host.
|
||||
* This will return both models that are currently loaded, and models that are not yet loaded.
|
||||
*/
|
||||
rpc ListModels(ListModelsRequest) returns (ListModelsResponse);
|
||||
|
||||
/**
|
||||
* Loads a model onto a compute unit.
|
||||
*/
|
||||
rpc LoadModel(LoadModelRequest) returns (LoadModelResponse);
|
||||
}
|
192
Common/image_generation.proto
Normal file
192
Common/image_generation.proto
Normal file
@ -0,0 +1,192 @@
|
||||
/**
|
||||
* Image generation for the Stable Diffusion RPC service.
|
||||
*/
|
||||
syntax = "proto3";
|
||||
package gay.pizza.stable.diffusion;
|
||||
import "shared.proto";
|
||||
|
||||
/**
|
||||
* Utilize a prefix of 'Sd' for Swift.
|
||||
*/
|
||||
option swift_prefix = "Sd";
|
||||
option java_multiple_files = true;
|
||||
|
||||
/**
|
||||
* Represents a request to generate images using a loaded model.
|
||||
*/
|
||||
message GenerateImagesRequest {
|
||||
/**
|
||||
* The model name to use for generation.
|
||||
* The model must be already be loaded using ModelService.LoadModel RPC method.
|
||||
*/
|
||||
string model_name = 1;
|
||||
|
||||
/**
|
||||
* The output format for generated images.
|
||||
*/
|
||||
ImageFormat output_image_format = 2;
|
||||
|
||||
/**
|
||||
* The number of batches of images to generate.
|
||||
*/
|
||||
uint32 batch_count = 3;
|
||||
|
||||
/**
|
||||
* The number of images inside a single batch.
|
||||
*/
|
||||
uint32 batch_size = 4;
|
||||
|
||||
/**
|
||||
* The positive textual prompt for image generation.
|
||||
*/
|
||||
string prompt = 5;
|
||||
|
||||
/**
|
||||
* The negative prompt for image generation.
|
||||
*/
|
||||
string negative_prompt = 6;
|
||||
|
||||
/**
|
||||
* The random seed to use.
|
||||
* Zero indicates that the seed should be random.
|
||||
*/
|
||||
uint32 seed = 7;
|
||||
|
||||
/**
|
||||
* An optional starting image to use for generation.
|
||||
*/
|
||||
Image starting_image = 8;
|
||||
|
||||
/**
|
||||
* Indicates whether to enable the safety check network, if it is available.
|
||||
*/
|
||||
bool enable_safety_check = 9;
|
||||
|
||||
/**
|
||||
* The scheduler to use for generation.
|
||||
* The default is PNDM, if not specified.
|
||||
*/
|
||||
Scheduler scheduler = 10;
|
||||
|
||||
/**
|
||||
* The guidance scale, which controls the influence the prompt has on the image.
|
||||
* If not specified, a reasonable default value is used.
|
||||
*/
|
||||
float guidance_scale = 11;
|
||||
|
||||
/**
|
||||
* The strength of the image generation.
|
||||
* If not specified, a reasonable default value is used.
|
||||
*/
|
||||
float strength = 12;
|
||||
|
||||
/**
|
||||
* The number of inference steps to perform.
|
||||
* If not specified, a reasonable default value is used.
|
||||
*/
|
||||
uint32 step_count = 13;
|
||||
|
||||
/**
|
||||
* Indicates whether to send intermediate images
|
||||
* while in streaming mode.
|
||||
*/
|
||||
bool send_intermediates = 14;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents the response from image generation.
|
||||
*/
|
||||
message GenerateImagesResponse {
|
||||
/**
|
||||
* The set of generated images by the Stable Diffusion pipeline.
|
||||
*/
|
||||
repeated Image images = 1;
|
||||
|
||||
/**
|
||||
* The seeds that were used to generate the images.
|
||||
*/
|
||||
repeated uint32 seeds = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a progress update for an image generation batch.
|
||||
*/
|
||||
message GenerateImagesBatchProgressUpdate {
|
||||
/**
|
||||
* The percentage of this batch that is complete.
|
||||
*/
|
||||
float percentage_complete = 1;
|
||||
|
||||
/**
|
||||
* The current state of the generated images from this batch.
|
||||
* These are not usually completed images, but partial images.
|
||||
* These are only available if the request's send_intermediates
|
||||
* parameter is set to true.
|
||||
*/
|
||||
repeated Image images = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a completion of an image generation batch.
|
||||
*/
|
||||
message GenerateImagesBatchCompletedUpdate {
|
||||
/**
|
||||
* The generated images from this batch.
|
||||
*/
|
||||
repeated Image images = 1;
|
||||
|
||||
/**
|
||||
* The seed for this batch.
|
||||
*/
|
||||
uint32 seed = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a continuous update from an image generation stream.
|
||||
*/
|
||||
message GenerateImagesStreamUpdate {
|
||||
/**
|
||||
* The current batch number that is processing.
|
||||
*/
|
||||
uint32 current_batch = 1;
|
||||
|
||||
/**
|
||||
* An update to the image generation pipeline.
|
||||
*/
|
||||
oneof update {
|
||||
/**
|
||||
* Batch progress update.
|
||||
*/
|
||||
GenerateImagesBatchProgressUpdate batch_progress = 2;
|
||||
|
||||
/**
|
||||
* Batch completion update.
|
||||
*/
|
||||
GenerateImagesBatchCompletedUpdate batch_completed = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* The percentage of completion for the entire submitted job.
|
||||
*/
|
||||
float overall_percentage_complete = 4;
|
||||
|
||||
/**
|
||||
* The id of the spawned job.
|
||||
*/
|
||||
uint64 job_id = 5;
|
||||
}
|
||||
|
||||
/**
|
||||
* The image generation service, for generating images from loaded models.
|
||||
*/
|
||||
service ImageGenerationService {
|
||||
/**
|
||||
* Generates images using a loaded model.
|
||||
*/
|
||||
rpc GenerateImages(GenerateImagesRequest) returns (GenerateImagesResponse);
|
||||
|
||||
/**
|
||||
* Generates images using a loaded model, providing updates along the way.
|
||||
*/
|
||||
rpc GenerateImagesStreaming(GenerateImagesRequest) returns (stream GenerateImagesStreamUpdate);
|
||||
}
|
131
Common/jobs.proto
Normal file
131
Common/jobs.proto
Normal file
@ -0,0 +1,131 @@
|
||||
/**
|
||||
* Job management for the Stable Diffusion RPC service.
|
||||
*/
|
||||
syntax = "proto3";
|
||||
package gay.pizza.stable.diffusion;
|
||||
|
||||
/**
|
||||
* Utilize a prefix of 'Sd' for Swift.
|
||||
*/
|
||||
option swift_prefix = "Sd";
|
||||
option java_multiple_files = true;
|
||||
|
||||
/**
|
||||
* Represents the current state of a job.
|
||||
*/
|
||||
enum JobState {
|
||||
/**
|
||||
* The job is in an unknown state.
|
||||
*/
|
||||
unknown = 0;
|
||||
|
||||
/**
|
||||
* The job is queued. It has not started the work.
|
||||
*/
|
||||
queued = 1;
|
||||
|
||||
/**
|
||||
* The job is running. The work has been started.
|
||||
*/
|
||||
running = 2;
|
||||
|
||||
/**
|
||||
* The job is completed. The work has been completed.
|
||||
*/
|
||||
completed = 3;
|
||||
|
||||
/**
|
||||
* The job is cancelled. An actor requested cancellation.
|
||||
*/
|
||||
cancelled = 4;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a job that is active
|
||||
*/
|
||||
message Job {
|
||||
/**
|
||||
* Unique job identifier.
|
||||
*/
|
||||
uint64 id = 1;
|
||||
|
||||
/**
|
||||
* Job host identifier.
|
||||
*/
|
||||
uint64 host = 2;
|
||||
|
||||
/**
|
||||
* The current state of the job.
|
||||
*/
|
||||
JobState state = 3;
|
||||
|
||||
/**
|
||||
* The percentage of completion for the entire job.
|
||||
*/
|
||||
float overall_percentage_complete = 4;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a request to get the state of a job.
|
||||
*/
|
||||
message GetJobRequest {
|
||||
/**
|
||||
* The job id to retrieve the current state for.
|
||||
*/
|
||||
uint64 id = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a response to getting the state of a job.
|
||||
*/
|
||||
message GetJobResponse {
|
||||
/**
|
||||
* The current state of the job.
|
||||
*/
|
||||
Job job = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a request to cancel a job.
|
||||
*/
|
||||
message CancelJobRequest {
|
||||
/**
|
||||
* The job id to cancel.
|
||||
*/
|
||||
uint64 id = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a response to cancel a job.
|
||||
*/
|
||||
message CancelJobResponse {}
|
||||
|
||||
/**
|
||||
* Represents a request to stream job updates.
|
||||
*/
|
||||
message StreamJobUpdatesRequest {
|
||||
/**
|
||||
* The job id to stream updates for. If this is not set or is zero,
|
||||
* all job updates will be sent.
|
||||
*/
|
||||
uint64 id = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents an update to a job.
|
||||
*/
|
||||
message JobUpdate {
|
||||
/**
|
||||
* The current state of the job.
|
||||
*/
|
||||
Job job = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* The job service, for inspecting and monitoring the state of jobs executing on the service.
|
||||
*/
|
||||
service JobService {
|
||||
rpc GetJob(GetJobRequest) returns (GetJobResponse);
|
||||
rpc CancelJob(CancelJobRequest) returns (CancelJobResponse);
|
||||
rpc StreamJobUpdates(StreamJobUpdatesRequest) returns (stream JobUpdate);
|
||||
}
|
130
Common/shared.proto
Normal file
130
Common/shared.proto
Normal file
@ -0,0 +1,130 @@
|
||||
/**
|
||||
* Shared messages for the Stable Diffusion RPC service.
|
||||
*/
|
||||
syntax = "proto3";
|
||||
package gay.pizza.stable.diffusion;
|
||||
|
||||
/**
|
||||
* Utilize a prefix of 'Sd' for Swift.
|
||||
*/
|
||||
option swift_prefix = "Sd";
|
||||
option java_multiple_files = true;
|
||||
|
||||
/**
|
||||
* Represents the model attention. Model attention has to do with how the model is encoded, and
|
||||
* can determine what compute units are able to support a particular model.
|
||||
*/
|
||||
enum ModelAttention {
|
||||
/**
|
||||
* The model is an original attention type. It can be loaded only onto CPU & GPU compute units.
|
||||
*/
|
||||
original = 0;
|
||||
|
||||
/**
|
||||
* The model is a split-ein-sum attention type. It can be loaded onto all compute units,
|
||||
* including the Apple Neural Engine.
|
||||
*/
|
||||
split_ein_sum = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents the schedulers that are used to sample images.
|
||||
*/
|
||||
enum Scheduler {
|
||||
/**
|
||||
* The PNDM (Pseudo numerical methods for diffusion models) scheduler.
|
||||
*/
|
||||
pndm = 0;
|
||||
|
||||
/**
|
||||
* The DPM-Solver++ scheduler.
|
||||
*/
|
||||
dpm_solver_plus_plus = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a specifier for what compute units are available for ML tasks.
|
||||
*/
|
||||
enum ComputeUnits {
|
||||
/**
|
||||
* The CPU as a singular compute unit.
|
||||
*/
|
||||
cpu = 0;
|
||||
|
||||
/**
|
||||
* The CPU & GPU combined into a singular compute unit.
|
||||
*/
|
||||
cpu_and_gpu = 1;
|
||||
|
||||
/**
|
||||
* Allow the usage of all compute units. CoreML will decided where the model is loaded.
|
||||
*/
|
||||
all = 2;
|
||||
|
||||
/**
|
||||
* The CPU & Neural Engine combined into a singular compute unit.
|
||||
*/
|
||||
cpu_and_neural_engine = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents information about an available model.
|
||||
* The primary key of a model is it's 'name' field.
|
||||
*/
|
||||
message ModelInfo {
|
||||
/**
|
||||
* The name of the available model. Note that within the context of a single RPC server,
|
||||
* the name of a model is a unique identifier. This may not be true when utilizing a cluster or
|
||||
* load balanced server, so keep that in mind.
|
||||
*/
|
||||
string name = 1;
|
||||
|
||||
/**
|
||||
* The attention of the model. Model attention determines what compute units can be used to
|
||||
* load the model and make predictions.
|
||||
*/
|
||||
ModelAttention attention = 2;
|
||||
|
||||
/**
|
||||
* Whether the model is currently loaded onto an available compute unit.
|
||||
*/
|
||||
bool is_loaded = 3;
|
||||
|
||||
/**
|
||||
* The compute unit that the model is currently loaded into, if it is loaded to one at all.
|
||||
* When is_loaded is false, the value of this field should be null.
|
||||
*/
|
||||
ComputeUnits loaded_compute_units = 4;
|
||||
|
||||
/**
|
||||
* The compute units that this model supports using.
|
||||
*/
|
||||
repeated ComputeUnits supported_compute_units = 5;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents the format of an image.
|
||||
*/
|
||||
enum ImageFormat {
|
||||
/**
|
||||
* The PNG image format.
|
||||
*/
|
||||
png = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents an image within the Stable Diffusion context.
|
||||
* This could be an input image for an image generation request, or it could be
|
||||
* a generated image from the Stable Diffusion model.
|
||||
*/
|
||||
message Image {
|
||||
/**
|
||||
* The format of the image.
|
||||
*/
|
||||
ImageFormat format = 1;
|
||||
|
||||
/**
|
||||
* The raw data of the image, in the specified format.
|
||||
*/
|
||||
bytes data = 2;
|
||||
}
|
51
Common/tokenizer.proto
Normal file
51
Common/tokenizer.proto
Normal file
@ -0,0 +1,51 @@
|
||||
/**
|
||||
* Tokenization for the Stable Diffusion RPC service.
|
||||
*/
|
||||
syntax = "proto3";
|
||||
package gay.pizza.stable.diffusion;
|
||||
|
||||
/**
|
||||
* Utilize a prefix of 'Sd' for Swift.
|
||||
*/
|
||||
option swift_prefix = "Sd";
|
||||
option java_multiple_files = true;
|
||||
|
||||
/**
|
||||
* Represents a request to tokenize an input.
|
||||
*/
|
||||
message TokenizeRequest {
|
||||
/**
|
||||
* The name of a loaded model to use for tokenization.
|
||||
*/
|
||||
string model_name = 1;
|
||||
|
||||
/**
|
||||
* The input string to tokenize.
|
||||
*/
|
||||
string input = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a response to tokenization.
|
||||
*/
|
||||
message TokenizeResponse {
|
||||
/**
|
||||
* The tokens inside the input string.
|
||||
*/
|
||||
repeated string tokens = 1;
|
||||
|
||||
/**
|
||||
* The token IDs inside the input string.
|
||||
*/
|
||||
repeated uint64 token_ids = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* The tokenizer service, for analyzing tokens for a loaded model.
|
||||
*/
|
||||
service TokenizerService {
|
||||
/**
|
||||
* Analyze the input using a loaded model and return the results.
|
||||
*/
|
||||
rpc Tokenize(TokenizeRequest) returns (TokenizeResponse);
|
||||
}
|
@ -5,4 +5,6 @@ public enum SdCoreError: Error {
|
||||
case imageEncodeFailed
|
||||
case imageDecodeFailed
|
||||
case modelNotFound
|
||||
case jobNotFound
|
||||
case notImplemented
|
||||
}
|
||||
|
87
Sources/StableDiffusionCore/JobManager.swift
Normal file
87
Sources/StableDiffusionCore/JobManager.swift
Normal file
@ -0,0 +1,87 @@
|
||||
import Combine
|
||||
import Foundation
|
||||
import StableDiffusionProtos
|
||||
|
||||
public typealias JobUpdateSubject = PassthroughSubject<SdJob, Never>
|
||||
|
||||
public actor JobManager {
|
||||
public let jobUpdateSubject = JobUpdateSubject()
|
||||
public let jobUpdatePublisher: AsyncPublisher<JobUpdateSubject>
|
||||
|
||||
private var jobs: [UInt64: SdJob] = [:]
|
||||
private var id: UInt64 = 0
|
||||
|
||||
public init() {
|
||||
jobUpdatePublisher = AsyncPublisher(jobUpdateSubject)
|
||||
}
|
||||
|
||||
func nextId() -> UInt64 {
|
||||
id += 1
|
||||
return id
|
||||
}
|
||||
|
||||
public func create() -> SdJob {
|
||||
var job = SdJob()
|
||||
job.id = nextId()
|
||||
job.state = .queued
|
||||
jobs[job.id] = job
|
||||
return job
|
||||
}
|
||||
|
||||
public func job(id: UInt64) -> SdJob? {
|
||||
guard let job = jobs[id] else {
|
||||
return nil
|
||||
}
|
||||
return try? SdJob(serializedData: job.serializedData())
|
||||
}
|
||||
|
||||
public func updateJobQueued(_ job: SdJob) {
|
||||
guard var stored = jobs[job.id] else {
|
||||
return
|
||||
}
|
||||
stored.state = .queued
|
||||
jobUpdateSubject.send(stored)
|
||||
jobs[job.id] = stored
|
||||
}
|
||||
|
||||
public func updateJobProgress(_ job: SdJob, progress: Float) {
|
||||
guard var stored = jobs[job.id] else {
|
||||
return
|
||||
}
|
||||
stored.state = .running
|
||||
stored.overallPercentageComplete = progress
|
||||
jobUpdateSubject.send(stored)
|
||||
jobs[job.id] = stored
|
||||
}
|
||||
|
||||
public func updateJobCompleted(_ job: SdJob) {
|
||||
guard var stored = jobs[job.id] else {
|
||||
return
|
||||
}
|
||||
stored.state = .completed
|
||||
stored.overallPercentageComplete = 100.0
|
||||
jobUpdateSubject.send(stored)
|
||||
jobs[job.id] = stored
|
||||
}
|
||||
|
||||
public func updateJobRunning(_ job: SdJob) {
|
||||
guard var stored = jobs[job.id] else {
|
||||
return
|
||||
}
|
||||
stored.state = .running
|
||||
stored.overallPercentageComplete = 0.0
|
||||
jobUpdateSubject.send(stored)
|
||||
jobs[job.id] = stored
|
||||
}
|
||||
|
||||
public func listAllJobs() -> [SdJob] {
|
||||
var copy: [SdJob] = []
|
||||
for item in jobs.values {
|
||||
guard let job = try? SdJob(serializedData: item.serializedData()) else {
|
||||
continue
|
||||
}
|
||||
copy.append(job)
|
||||
}
|
||||
return copy
|
||||
}
|
||||
}
|
@ -8,9 +8,11 @@ public actor ModelManager {
|
||||
private var modelStates: [String: ModelState] = [:]
|
||||
|
||||
private let modelBaseURL: URL
|
||||
private let jobManager: JobManager
|
||||
|
||||
public init(modelBaseURL: URL) {
|
||||
public init(modelBaseURL: URL, jobManager: JobManager) {
|
||||
self.modelBaseURL = modelBaseURL
|
||||
self.jobManager = jobManager
|
||||
}
|
||||
|
||||
public func reloadAvailableModels() throws {
|
||||
@ -67,7 +69,7 @@ public actor ModelManager {
|
||||
}
|
||||
|
||||
if state == nil {
|
||||
let state = ModelState(url: url)
|
||||
let state = ModelState(url: url, jobManager: jobManager)
|
||||
modelStates[name] = state
|
||||
return state
|
||||
} else {
|
||||
|
@ -5,13 +5,16 @@ import StableDiffusion
|
||||
import StableDiffusionProtos
|
||||
|
||||
public actor ModelState {
|
||||
private let jobManager: JobManager
|
||||
|
||||
private let url: URL
|
||||
private var pipeline: StableDiffusionPipeline?
|
||||
private var tokenizer: BPETokenizer?
|
||||
private var loadedConfiguration: MLModelConfiguration?
|
||||
|
||||
public init(url: URL) {
|
||||
public init(url: URL, jobManager: JobManager) {
|
||||
self.url = url
|
||||
self.jobManager = jobManager
|
||||
}
|
||||
|
||||
public func load(request: SdLoadModelRequest) throws {
|
||||
@ -39,36 +42,21 @@ public actor ModelState {
|
||||
loadedConfiguration?.computeUnits.toSdComputeUnits()
|
||||
}
|
||||
|
||||
public func generate(_ request: SdGenerateImagesRequest) throws -> SdGenerateImagesResponse {
|
||||
public func generate(_ request: SdGenerateImagesRequest, job: SdJob, stream: GRPCAsyncResponseStreamWriter<SdGenerateImagesStreamUpdate>? = nil) async throws -> SdGenerateImagesResponse {
|
||||
guard let pipeline else {
|
||||
throw SdCoreError.modelNotLoaded
|
||||
}
|
||||
|
||||
let baseSeed: UInt32 = request.seed
|
||||
var pipelineConfig = try toPipelineConfig(request)
|
||||
|
||||
var response = SdGenerateImagesResponse()
|
||||
for _ in 0 ..< request.batchCount {
|
||||
var seed = baseSeed
|
||||
if seed == 0 {
|
||||
seed = UInt32.random(in: 0 ..< UInt32.max)
|
||||
}
|
||||
pipelineConfig.seed = seed
|
||||
let images = try pipeline.generateImages(configuration: pipelineConfig)
|
||||
try response.images.append(contentsOf: cgImagesToImages(request: request, images))
|
||||
response.seeds.append(seed)
|
||||
}
|
||||
return response
|
||||
}
|
||||
|
||||
public func generateStreaming(_ request: SdGenerateImagesRequest, stream: GRPCAsyncResponseStreamWriter<SdGenerateImagesStreamUpdate>) async throws {
|
||||
guard let pipeline else {
|
||||
throw SdCoreError.modelNotLoaded
|
||||
}
|
||||
|
||||
let baseSeed: UInt32 = request.seed
|
||||
var pipelineConfig = try toPipelineConfig(request)
|
||||
|
||||
DispatchQueue.main.async {
|
||||
Task {
|
||||
await self.jobManager.updateJobRunning(job)
|
||||
}
|
||||
}
|
||||
|
||||
for batch in 1 ... request.batchCount {
|
||||
@Sendable func currentOverallPercentage(_ batchPercentage: Float) -> Float {
|
||||
let eachSegment = 100.0 / Float(request.batchCount)
|
||||
@ -89,30 +77,51 @@ public actor ModelState {
|
||||
images = try? cgImagesToImages(request: request, progress.currentImages)
|
||||
}
|
||||
let finalImages = images
|
||||
Task {
|
||||
try await stream.send(.with { item in
|
||||
item.currentBatch = batch
|
||||
item.batchProgress = .with { update in
|
||||
update.percentageComplete = percentage
|
||||
if let finalImages {
|
||||
update.images = finalImages
|
||||
let overallPercentage = currentOverallPercentage(percentage)
|
||||
DispatchQueue.main.async {
|
||||
Task {
|
||||
await self.jobManager.updateJobProgress(job, progress: overallPercentage)
|
||||
}
|
||||
}
|
||||
if let stream {
|
||||
Task {
|
||||
try await stream.send(.with { item in
|
||||
item.currentBatch = batch
|
||||
item.batchProgress = .with { update in
|
||||
update.percentageComplete = percentage
|
||||
if let finalImages {
|
||||
update.images = finalImages
|
||||
}
|
||||
}
|
||||
}
|
||||
item.overallPercentageComplete = currentOverallPercentage(percentage)
|
||||
})
|
||||
item.overallPercentageComplete = overallPercentage
|
||||
item.jobID = job.id
|
||||
})
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
let images = try cgImagesToImages(request: request, cgImages)
|
||||
try await stream.send(.with { item in
|
||||
item.currentBatch = batch
|
||||
item.batchCompleted = .with { update in
|
||||
update.images = images
|
||||
update.seed = seed
|
||||
DispatchQueue.main.async {
|
||||
Task {
|
||||
await self.jobManager.updateJobCompleted(job)
|
||||
}
|
||||
item.overallPercentageComplete = currentOverallPercentage(100.0)
|
||||
})
|
||||
}
|
||||
if let stream {
|
||||
try await stream.send(.with { item in
|
||||
item.currentBatch = batch
|
||||
item.batchCompleted = .with { update in
|
||||
update.images = images
|
||||
update.seed = seed
|
||||
}
|
||||
item.overallPercentageComplete = currentOverallPercentage(100.0)
|
||||
item.jobID = job.id
|
||||
})
|
||||
} else {
|
||||
response.images.append(contentsOf: images)
|
||||
response.seeds.append(seed)
|
||||
}
|
||||
}
|
||||
return response
|
||||
}
|
||||
|
||||
private func cgImagesToImages(request: SdGenerateImagesRequest, _ cgImages: [CGImage?]) throws -> [SdImage] {
|
||||
|
File diff suppressed because it is too large
Load Diff
445
Sources/StableDiffusionProtos/host.grpc.swift
Normal file
445
Sources/StableDiffusionProtos/host.grpc.swift
Normal file
@ -0,0 +1,445 @@
|
||||
//
|
||||
// DO NOT EDIT.
|
||||
//
|
||||
// Generated by the protocol buffer compiler.
|
||||
// Source: host.proto
|
||||
//
|
||||
|
||||
//
|
||||
// Copyright 2018, gRPC Authors All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
import GRPC
|
||||
import NIO
|
||||
import NIOConcurrencyHelpers
|
||||
import SwiftProtobuf
|
||||
|
||||
|
||||
///*
|
||||
/// The model service, for management and loading of models.
|
||||
///
|
||||
/// Usage: instantiate `SdModelServiceClient`, then call methods of this protocol to make API calls.
|
||||
public protocol SdModelServiceClientProtocol: GRPCClient {
|
||||
var serviceName: String { get }
|
||||
var interceptors: SdModelServiceClientInterceptorFactoryProtocol? { get }
|
||||
|
||||
func listModels(
|
||||
_ request: SdListModelsRequest,
|
||||
callOptions: CallOptions?
|
||||
) -> UnaryCall<SdListModelsRequest, SdListModelsResponse>
|
||||
|
||||
func loadModel(
|
||||
_ request: SdLoadModelRequest,
|
||||
callOptions: CallOptions?
|
||||
) -> UnaryCall<SdLoadModelRequest, SdLoadModelResponse>
|
||||
}
|
||||
|
||||
extension SdModelServiceClientProtocol {
|
||||
public var serviceName: String {
|
||||
return "gay.pizza.stable.diffusion.ModelService"
|
||||
}
|
||||
|
||||
///*
|
||||
/// Lists the available models on the host.
|
||||
/// This will return both models that are currently loaded, and models that are not yet loaded.
|
||||
///
|
||||
/// - Parameters:
|
||||
/// - request: Request to send to ListModels.
|
||||
/// - callOptions: Call options.
|
||||
/// - Returns: A `UnaryCall` with futures for the metadata, status and response.
|
||||
public func listModels(
|
||||
_ request: SdListModelsRequest,
|
||||
callOptions: CallOptions? = nil
|
||||
) -> UnaryCall<SdListModelsRequest, SdListModelsResponse> {
|
||||
return self.makeUnaryCall(
|
||||
path: SdModelServiceClientMetadata.Methods.listModels.path,
|
||||
request: request,
|
||||
callOptions: callOptions ?? self.defaultCallOptions,
|
||||
interceptors: self.interceptors?.makeListModelsInterceptors() ?? []
|
||||
)
|
||||
}
|
||||
|
||||
///*
|
||||
/// Loads a model onto a compute unit.
|
||||
///
|
||||
/// - Parameters:
|
||||
/// - request: Request to send to LoadModel.
|
||||
/// - callOptions: Call options.
|
||||
/// - Returns: A `UnaryCall` with futures for the metadata, status and response.
|
||||
public func loadModel(
|
||||
_ request: SdLoadModelRequest,
|
||||
callOptions: CallOptions? = nil
|
||||
) -> UnaryCall<SdLoadModelRequest, SdLoadModelResponse> {
|
||||
return self.makeUnaryCall(
|
||||
path: SdModelServiceClientMetadata.Methods.loadModel.path,
|
||||
request: request,
|
||||
callOptions: callOptions ?? self.defaultCallOptions,
|
||||
interceptors: self.interceptors?.makeLoadModelInterceptors() ?? []
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#if compiler(>=5.6)
|
||||
@available(*, deprecated)
|
||||
extension SdModelServiceClient: @unchecked Sendable {}
|
||||
#endif // compiler(>=5.6)
|
||||
|
||||
@available(*, deprecated, renamed: "SdModelServiceNIOClient")
|
||||
public final class SdModelServiceClient: SdModelServiceClientProtocol {
|
||||
private let lock = Lock()
|
||||
private var _defaultCallOptions: CallOptions
|
||||
private var _interceptors: SdModelServiceClientInterceptorFactoryProtocol?
|
||||
public let channel: GRPCChannel
|
||||
public var defaultCallOptions: CallOptions {
|
||||
get { self.lock.withLock { return self._defaultCallOptions } }
|
||||
set { self.lock.withLockVoid { self._defaultCallOptions = newValue } }
|
||||
}
|
||||
public var interceptors: SdModelServiceClientInterceptorFactoryProtocol? {
|
||||
get { self.lock.withLock { return self._interceptors } }
|
||||
set { self.lock.withLockVoid { self._interceptors = newValue } }
|
||||
}
|
||||
|
||||
/// Creates a client for the gay.pizza.stable.diffusion.ModelService service.
|
||||
///
|
||||
/// - Parameters:
|
||||
/// - channel: `GRPCChannel` to the service host.
|
||||
/// - defaultCallOptions: Options to use for each service call if the user doesn't provide them.
|
||||
/// - interceptors: A factory providing interceptors for each RPC.
|
||||
public init(
|
||||
channel: GRPCChannel,
|
||||
defaultCallOptions: CallOptions = CallOptions(),
|
||||
interceptors: SdModelServiceClientInterceptorFactoryProtocol? = nil
|
||||
) {
|
||||
self.channel = channel
|
||||
self._defaultCallOptions = defaultCallOptions
|
||||
self._interceptors = interceptors
|
||||
}
|
||||
}
|
||||
|
||||
public struct SdModelServiceNIOClient: SdModelServiceClientProtocol {
|
||||
public var channel: GRPCChannel
|
||||
public var defaultCallOptions: CallOptions
|
||||
public var interceptors: SdModelServiceClientInterceptorFactoryProtocol?
|
||||
|
||||
/// Creates a client for the gay.pizza.stable.diffusion.ModelService service.
|
||||
///
|
||||
/// - Parameters:
|
||||
/// - channel: `GRPCChannel` to the service host.
|
||||
/// - defaultCallOptions: Options to use for each service call if the user doesn't provide them.
|
||||
/// - interceptors: A factory providing interceptors for each RPC.
|
||||
public init(
|
||||
channel: GRPCChannel,
|
||||
defaultCallOptions: CallOptions = CallOptions(),
|
||||
interceptors: SdModelServiceClientInterceptorFactoryProtocol? = nil
|
||||
) {
|
||||
self.channel = channel
|
||||
self.defaultCallOptions = defaultCallOptions
|
||||
self.interceptors = interceptors
|
||||
}
|
||||
}
|
||||
|
||||
#if compiler(>=5.6)
|
||||
///*
|
||||
/// The model service, for management and loading of models.
|
||||
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
|
||||
public protocol SdModelServiceAsyncClientProtocol: GRPCClient {
|
||||
static var serviceDescriptor: GRPCServiceDescriptor { get }
|
||||
var interceptors: SdModelServiceClientInterceptorFactoryProtocol? { get }
|
||||
|
||||
func makeListModelsCall(
|
||||
_ request: SdListModelsRequest,
|
||||
callOptions: CallOptions?
|
||||
) -> GRPCAsyncUnaryCall<SdListModelsRequest, SdListModelsResponse>
|
||||
|
||||
func makeLoadModelCall(
|
||||
_ request: SdLoadModelRequest,
|
||||
callOptions: CallOptions?
|
||||
) -> GRPCAsyncUnaryCall<SdLoadModelRequest, SdLoadModelResponse>
|
||||
}
|
||||
|
||||
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
|
||||
extension SdModelServiceAsyncClientProtocol {
|
||||
public static var serviceDescriptor: GRPCServiceDescriptor {
|
||||
return SdModelServiceClientMetadata.serviceDescriptor
|
||||
}
|
||||
|
||||
public var interceptors: SdModelServiceClientInterceptorFactoryProtocol? {
|
||||
return nil
|
||||
}
|
||||
|
||||
public func makeListModelsCall(
|
||||
_ request: SdListModelsRequest,
|
||||
callOptions: CallOptions? = nil
|
||||
) -> GRPCAsyncUnaryCall<SdListModelsRequest, SdListModelsResponse> {
|
||||
return self.makeAsyncUnaryCall(
|
||||
path: SdModelServiceClientMetadata.Methods.listModels.path,
|
||||
request: request,
|
||||
callOptions: callOptions ?? self.defaultCallOptions,
|
||||
interceptors: self.interceptors?.makeListModelsInterceptors() ?? []
|
||||
)
|
||||
}
|
||||
|
||||
public func makeLoadModelCall(
|
||||
_ request: SdLoadModelRequest,
|
||||
callOptions: CallOptions? = nil
|
||||
) -> GRPCAsyncUnaryCall<SdLoadModelRequest, SdLoadModelResponse> {
|
||||
return self.makeAsyncUnaryCall(
|
||||
path: SdModelServiceClientMetadata.Methods.loadModel.path,
|
||||
request: request,
|
||||
callOptions: callOptions ?? self.defaultCallOptions,
|
||||
interceptors: self.interceptors?.makeLoadModelInterceptors() ?? []
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
|
||||
extension SdModelServiceAsyncClientProtocol {
|
||||
public func listModels(
|
||||
_ request: SdListModelsRequest,
|
||||
callOptions: CallOptions? = nil
|
||||
) async throws -> SdListModelsResponse {
|
||||
return try await self.performAsyncUnaryCall(
|
||||
path: SdModelServiceClientMetadata.Methods.listModels.path,
|
||||
request: request,
|
||||
callOptions: callOptions ?? self.defaultCallOptions,
|
||||
interceptors: self.interceptors?.makeListModelsInterceptors() ?? []
|
||||
)
|
||||
}
|
||||
|
||||
public func loadModel(
|
||||
_ request: SdLoadModelRequest,
|
||||
callOptions: CallOptions? = nil
|
||||
) async throws -> SdLoadModelResponse {
|
||||
return try await self.performAsyncUnaryCall(
|
||||
path: SdModelServiceClientMetadata.Methods.loadModel.path,
|
||||
request: request,
|
||||
callOptions: callOptions ?? self.defaultCallOptions,
|
||||
interceptors: self.interceptors?.makeLoadModelInterceptors() ?? []
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
|
||||
public struct SdModelServiceAsyncClient: SdModelServiceAsyncClientProtocol {
|
||||
public var channel: GRPCChannel
|
||||
public var defaultCallOptions: CallOptions
|
||||
public var interceptors: SdModelServiceClientInterceptorFactoryProtocol?
|
||||
|
||||
public init(
|
||||
channel: GRPCChannel,
|
||||
defaultCallOptions: CallOptions = CallOptions(),
|
||||
interceptors: SdModelServiceClientInterceptorFactoryProtocol? = nil
|
||||
) {
|
||||
self.channel = channel
|
||||
self.defaultCallOptions = defaultCallOptions
|
||||
self.interceptors = interceptors
|
||||
}
|
||||
}
|
||||
|
||||
#endif // compiler(>=5.6)
|
||||
|
||||
public protocol SdModelServiceClientInterceptorFactoryProtocol: GRPCSendable {
|
||||
|
||||
/// - Returns: Interceptors to use when invoking 'listModels'.
|
||||
func makeListModelsInterceptors() -> [ClientInterceptor<SdListModelsRequest, SdListModelsResponse>]
|
||||
|
||||
/// - Returns: Interceptors to use when invoking 'loadModel'.
|
||||
func makeLoadModelInterceptors() -> [ClientInterceptor<SdLoadModelRequest, SdLoadModelResponse>]
|
||||
}
|
||||
|
||||
public enum SdModelServiceClientMetadata {
|
||||
public static let serviceDescriptor = GRPCServiceDescriptor(
|
||||
name: "ModelService",
|
||||
fullName: "gay.pizza.stable.diffusion.ModelService",
|
||||
methods: [
|
||||
SdModelServiceClientMetadata.Methods.listModels,
|
||||
SdModelServiceClientMetadata.Methods.loadModel,
|
||||
]
|
||||
)
|
||||
|
||||
public enum Methods {
|
||||
public static let listModels = GRPCMethodDescriptor(
|
||||
name: "ListModels",
|
||||
path: "/gay.pizza.stable.diffusion.ModelService/ListModels",
|
||||
type: GRPCCallType.unary
|
||||
)
|
||||
|
||||
public static let loadModel = GRPCMethodDescriptor(
|
||||
name: "LoadModel",
|
||||
path: "/gay.pizza.stable.diffusion.ModelService/LoadModel",
|
||||
type: GRPCCallType.unary
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
///*
|
||||
/// The model service, for management and loading of models.
|
||||
///
|
||||
/// To build a server, implement a class that conforms to this protocol.
|
||||
public protocol SdModelServiceProvider: CallHandlerProvider {
|
||||
var interceptors: SdModelServiceServerInterceptorFactoryProtocol? { get }
|
||||
|
||||
///*
|
||||
/// Lists the available models on the host.
|
||||
/// This will return both models that are currently loaded, and models that are not yet loaded.
|
||||
func listModels(request: SdListModelsRequest, context: StatusOnlyCallContext) -> EventLoopFuture<SdListModelsResponse>
|
||||
|
||||
///*
|
||||
/// Loads a model onto a compute unit.
|
||||
func loadModel(request: SdLoadModelRequest, context: StatusOnlyCallContext) -> EventLoopFuture<SdLoadModelResponse>
|
||||
}
|
||||
|
||||
extension SdModelServiceProvider {
|
||||
public var serviceName: Substring {
|
||||
return SdModelServiceServerMetadata.serviceDescriptor.fullName[...]
|
||||
}
|
||||
|
||||
/// Determines, calls and returns the appropriate request handler, depending on the request's method.
|
||||
/// Returns nil for methods not handled by this service.
|
||||
public func handle(
|
||||
method name: Substring,
|
||||
context: CallHandlerContext
|
||||
) -> GRPCServerHandlerProtocol? {
|
||||
switch name {
|
||||
case "ListModels":
|
||||
return UnaryServerHandler(
|
||||
context: context,
|
||||
requestDeserializer: ProtobufDeserializer<SdListModelsRequest>(),
|
||||
responseSerializer: ProtobufSerializer<SdListModelsResponse>(),
|
||||
interceptors: self.interceptors?.makeListModelsInterceptors() ?? [],
|
||||
userFunction: self.listModels(request:context:)
|
||||
)
|
||||
|
||||
case "LoadModel":
|
||||
return UnaryServerHandler(
|
||||
context: context,
|
||||
requestDeserializer: ProtobufDeserializer<SdLoadModelRequest>(),
|
||||
responseSerializer: ProtobufSerializer<SdLoadModelResponse>(),
|
||||
interceptors: self.interceptors?.makeLoadModelInterceptors() ?? [],
|
||||
userFunction: self.loadModel(request:context:)
|
||||
)
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if compiler(>=5.6)
|
||||
|
||||
///*
|
||||
/// The model service, for management and loading of models.
|
||||
///
|
||||
/// To implement a server, implement an object which conforms to this protocol.
|
||||
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
|
||||
public protocol SdModelServiceAsyncProvider: CallHandlerProvider {
|
||||
static var serviceDescriptor: GRPCServiceDescriptor { get }
|
||||
var interceptors: SdModelServiceServerInterceptorFactoryProtocol? { get }
|
||||
|
||||
///*
|
||||
/// Lists the available models on the host.
|
||||
/// This will return both models that are currently loaded, and models that are not yet loaded.
|
||||
@Sendable func listModels(
|
||||
request: SdListModelsRequest,
|
||||
context: GRPCAsyncServerCallContext
|
||||
) async throws -> SdListModelsResponse
|
||||
|
||||
///*
|
||||
/// Loads a model onto a compute unit.
|
||||
@Sendable func loadModel(
|
||||
request: SdLoadModelRequest,
|
||||
context: GRPCAsyncServerCallContext
|
||||
) async throws -> SdLoadModelResponse
|
||||
}
|
||||
|
||||
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
|
||||
extension SdModelServiceAsyncProvider {
|
||||
public static var serviceDescriptor: GRPCServiceDescriptor {
|
||||
return SdModelServiceServerMetadata.serviceDescriptor
|
||||
}
|
||||
|
||||
public var serviceName: Substring {
|
||||
return SdModelServiceServerMetadata.serviceDescriptor.fullName[...]
|
||||
}
|
||||
|
||||
public var interceptors: SdModelServiceServerInterceptorFactoryProtocol? {
|
||||
return nil
|
||||
}
|
||||
|
||||
public func handle(
|
||||
method name: Substring,
|
||||
context: CallHandlerContext
|
||||
) -> GRPCServerHandlerProtocol? {
|
||||
switch name {
|
||||
case "ListModels":
|
||||
return GRPCAsyncServerHandler(
|
||||
context: context,
|
||||
requestDeserializer: ProtobufDeserializer<SdListModelsRequest>(),
|
||||
responseSerializer: ProtobufSerializer<SdListModelsResponse>(),
|
||||
interceptors: self.interceptors?.makeListModelsInterceptors() ?? [],
|
||||
wrapping: self.listModels(request:context:)
|
||||
)
|
||||
|
||||
case "LoadModel":
|
||||
return GRPCAsyncServerHandler(
|
||||
context: context,
|
||||
requestDeserializer: ProtobufDeserializer<SdLoadModelRequest>(),
|
||||
responseSerializer: ProtobufSerializer<SdLoadModelResponse>(),
|
||||
interceptors: self.interceptors?.makeLoadModelInterceptors() ?? [],
|
||||
wrapping: self.loadModel(request:context:)
|
||||
)
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif // compiler(>=5.6)
|
||||
|
||||
public protocol SdModelServiceServerInterceptorFactoryProtocol {
|
||||
|
||||
/// - Returns: Interceptors to use when handling 'listModels'.
|
||||
/// Defaults to calling `self.makeInterceptors()`.
|
||||
func makeListModelsInterceptors() -> [ServerInterceptor<SdListModelsRequest, SdListModelsResponse>]
|
||||
|
||||
/// - Returns: Interceptors to use when handling 'loadModel'.
|
||||
/// Defaults to calling `self.makeInterceptors()`.
|
||||
func makeLoadModelInterceptors() -> [ServerInterceptor<SdLoadModelRequest, SdLoadModelResponse>]
|
||||
}
|
||||
|
||||
public enum SdModelServiceServerMetadata {
|
||||
public static let serviceDescriptor = GRPCServiceDescriptor(
|
||||
name: "ModelService",
|
||||
fullName: "gay.pizza.stable.diffusion.ModelService",
|
||||
methods: [
|
||||
SdModelServiceServerMetadata.Methods.listModels,
|
||||
SdModelServiceServerMetadata.Methods.loadModel,
|
||||
]
|
||||
)
|
||||
|
||||
public enum Methods {
|
||||
public static let listModels = GRPCMethodDescriptor(
|
||||
name: "ListModels",
|
||||
path: "/gay.pizza.stable.diffusion.ModelService/ListModels",
|
||||
type: GRPCCallType.unary
|
||||
)
|
||||
|
||||
public static let loadModel = GRPCMethodDescriptor(
|
||||
name: "LoadModel",
|
||||
path: "/gay.pizza.stable.diffusion.ModelService/LoadModel",
|
||||
type: GRPCCallType.unary
|
||||
)
|
||||
}
|
||||
}
|
203
Sources/StableDiffusionProtos/host.pb.swift
Normal file
203
Sources/StableDiffusionProtos/host.pb.swift
Normal file
@ -0,0 +1,203 @@
|
||||
// DO NOT EDIT.
|
||||
// swift-format-ignore-file
|
||||
//
|
||||
// Generated by the Swift generator plugin for the protocol buffer compiler.
|
||||
// Source: host.proto
|
||||
//
|
||||
// For information on using the generated types, please see the documentation:
|
||||
// https://github.com/apple/swift-protobuf/
|
||||
|
||||
///*
|
||||
/// Host messages and services for the Stable Diffusion RPC service.
|
||||
|
||||
import Foundation
|
||||
import SwiftProtobuf
|
||||
|
||||
// If the compiler emits an error on this type, it is because this file
|
||||
// was generated by a version of the `protoc` Swift plug-in that is
|
||||
// incompatible with the version of SwiftProtobuf to which you are linking.
|
||||
// Please ensure that you are building against the same version of the API
|
||||
// that was used to generate this file.
|
||||
fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck {
|
||||
struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {}
|
||||
typealias Version = _2
|
||||
}
|
||||
|
||||
///*
|
||||
/// Represents a request to list the models available on the host.
|
||||
public struct SdListModelsRequest {
|
||||
// SwiftProtobuf.Message conformance is added in an extension below. See the
|
||||
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
|
||||
// methods supported on all messages.
|
||||
|
||||
public var unknownFields = SwiftProtobuf.UnknownStorage()
|
||||
|
||||
public init() {}
|
||||
}
|
||||
|
||||
///*
|
||||
/// Represents a response to listing the models available on the host.
|
||||
public struct SdListModelsResponse {
|
||||
// SwiftProtobuf.Message conformance is added in an extension below. See the
|
||||
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
|
||||
// methods supported on all messages.
|
||||
|
||||
///*
|
||||
/// The available models on the Stable Diffusion server.
|
||||
public var availableModels: [SdModelInfo] = []
|
||||
|
||||
public var unknownFields = SwiftProtobuf.UnknownStorage()
|
||||
|
||||
public init() {}
|
||||
}
|
||||
|
||||
///*
|
||||
/// Represents a request to load a model into a specified compute unit.
|
||||
public struct SdLoadModelRequest {
|
||||
// SwiftProtobuf.Message conformance is added in an extension below. See the
|
||||
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
|
||||
// methods supported on all messages.
|
||||
|
||||
///*
|
||||
/// The model name to load onto the compute unit.
|
||||
public var modelName: String = String()
|
||||
|
||||
///*
|
||||
/// The compute units to load the model onto.
|
||||
public var computeUnits: SdComputeUnits = .cpu
|
||||
|
||||
public var unknownFields = SwiftProtobuf.UnknownStorage()
|
||||
|
||||
public init() {}
|
||||
}
|
||||
|
||||
///*
|
||||
/// Represents a response to loading a model.
|
||||
public struct SdLoadModelResponse {
|
||||
// SwiftProtobuf.Message conformance is added in an extension below. See the
|
||||
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
|
||||
// methods supported on all messages.
|
||||
|
||||
public var unknownFields = SwiftProtobuf.UnknownStorage()
|
||||
|
||||
public init() {}
|
||||
}
|
||||
|
||||
#if swift(>=5.5) && canImport(_Concurrency)
|
||||
extension SdListModelsRequest: @unchecked Sendable {}
|
||||
extension SdListModelsResponse: @unchecked Sendable {}
|
||||
extension SdLoadModelRequest: @unchecked Sendable {}
|
||||
extension SdLoadModelResponse: @unchecked Sendable {}
|
||||
#endif // swift(>=5.5) && canImport(_Concurrency)
|
||||
|
||||
// MARK: - Code below here is support for the SwiftProtobuf runtime.
|
||||
|
||||
fileprivate let _protobuf_package = "gay.pizza.stable.diffusion"
|
||||
|
||||
extension SdListModelsRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
|
||||
public static let protoMessageName: String = _protobuf_package + ".ListModelsRequest"
|
||||
public static let _protobuf_nameMap = SwiftProtobuf._NameMap()
|
||||
|
||||
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
|
||||
while let _ = try decoder.nextFieldNumber() {
|
||||
}
|
||||
}
|
||||
|
||||
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
|
||||
try unknownFields.traverse(visitor: &visitor)
|
||||
}
|
||||
|
||||
public static func ==(lhs: SdListModelsRequest, rhs: SdListModelsRequest) -> Bool {
|
||||
if lhs.unknownFields != rhs.unknownFields {return false}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
extension SdListModelsResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
|
||||
public static let protoMessageName: String = _protobuf_package + ".ListModelsResponse"
|
||||
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
|
||||
1: .standard(proto: "available_models"),
|
||||
]
|
||||
|
||||
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
|
||||
while let fieldNumber = try decoder.nextFieldNumber() {
|
||||
// The use of inline closures is to circumvent an issue where the compiler
|
||||
// allocates stack space for every case branch when no optimizations are
|
||||
// enabled. https://github.com/apple/swift-protobuf/issues/1034
|
||||
switch fieldNumber {
|
||||
case 1: try { try decoder.decodeRepeatedMessageField(value: &self.availableModels) }()
|
||||
default: break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
|
||||
if !self.availableModels.isEmpty {
|
||||
try visitor.visitRepeatedMessageField(value: self.availableModels, fieldNumber: 1)
|
||||
}
|
||||
try unknownFields.traverse(visitor: &visitor)
|
||||
}
|
||||
|
||||
public static func ==(lhs: SdListModelsResponse, rhs: SdListModelsResponse) -> Bool {
|
||||
if lhs.availableModels != rhs.availableModels {return false}
|
||||
if lhs.unknownFields != rhs.unknownFields {return false}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
extension SdLoadModelRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
|
||||
public static let protoMessageName: String = _protobuf_package + ".LoadModelRequest"
|
||||
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
|
||||
1: .standard(proto: "model_name"),
|
||||
2: .standard(proto: "compute_units"),
|
||||
]
|
||||
|
||||
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
|
||||
while let fieldNumber = try decoder.nextFieldNumber() {
|
||||
// The use of inline closures is to circumvent an issue where the compiler
|
||||
// allocates stack space for every case branch when no optimizations are
|
||||
// enabled. https://github.com/apple/swift-protobuf/issues/1034
|
||||
switch fieldNumber {
|
||||
case 1: try { try decoder.decodeSingularStringField(value: &self.modelName) }()
|
||||
case 2: try { try decoder.decodeSingularEnumField(value: &self.computeUnits) }()
|
||||
default: break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
|
||||
if !self.modelName.isEmpty {
|
||||
try visitor.visitSingularStringField(value: self.modelName, fieldNumber: 1)
|
||||
}
|
||||
if self.computeUnits != .cpu {
|
||||
try visitor.visitSingularEnumField(value: self.computeUnits, fieldNumber: 2)
|
||||
}
|
||||
try unknownFields.traverse(visitor: &visitor)
|
||||
}
|
||||
|
||||
public static func ==(lhs: SdLoadModelRequest, rhs: SdLoadModelRequest) -> Bool {
|
||||
if lhs.modelName != rhs.modelName {return false}
|
||||
if lhs.computeUnits != rhs.computeUnits {return false}
|
||||
if lhs.unknownFields != rhs.unknownFields {return false}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
extension SdLoadModelResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
|
||||
public static let protoMessageName: String = _protobuf_package + ".LoadModelResponse"
|
||||
public static let _protobuf_nameMap = SwiftProtobuf._NameMap()
|
||||
|
||||
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
|
||||
while let _ = try decoder.nextFieldNumber() {
|
||||
}
|
||||
}
|
||||
|
||||
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
|
||||
try unknownFields.traverse(visitor: &visitor)
|
||||
}
|
||||
|
||||
public static func ==(lhs: SdLoadModelResponse, rhs: SdLoadModelResponse) -> Bool {
|
||||
if lhs.unknownFields != rhs.unknownFields {return false}
|
||||
return true
|
||||
}
|
||||
}
|
447
Sources/StableDiffusionProtos/image_generation.grpc.swift
Normal file
447
Sources/StableDiffusionProtos/image_generation.grpc.swift
Normal file
@ -0,0 +1,447 @@
|
||||
//
|
||||
// DO NOT EDIT.
|
||||
//
|
||||
// Generated by the protocol buffer compiler.
|
||||
// Source: image_generation.proto
|
||||
//
|
||||
|
||||
//
|
||||
// Copyright 2018, gRPC Authors All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
import GRPC
|
||||
import NIO
|
||||
import NIOConcurrencyHelpers
|
||||
import SwiftProtobuf
|
||||
|
||||
|
||||
///*
|
||||
/// The image generation service, for generating images from loaded models.
|
||||
///
|
||||
/// Usage: instantiate `SdImageGenerationServiceClient`, then call methods of this protocol to make API calls.
|
||||
public protocol SdImageGenerationServiceClientProtocol: GRPCClient {
|
||||
var serviceName: String { get }
|
||||
var interceptors: SdImageGenerationServiceClientInterceptorFactoryProtocol? { get }
|
||||
|
||||
func generateImages(
|
||||
_ request: SdGenerateImagesRequest,
|
||||
callOptions: CallOptions?
|
||||
) -> UnaryCall<SdGenerateImagesRequest, SdGenerateImagesResponse>
|
||||
|
||||
func generateImagesStreaming(
|
||||
_ request: SdGenerateImagesRequest,
|
||||
callOptions: CallOptions?,
|
||||
handler: @escaping (SdGenerateImagesStreamUpdate) -> Void
|
||||
) -> ServerStreamingCall<SdGenerateImagesRequest, SdGenerateImagesStreamUpdate>
|
||||
}
|
||||
|
||||
extension SdImageGenerationServiceClientProtocol {
|
||||
public var serviceName: String {
|
||||
return "gay.pizza.stable.diffusion.ImageGenerationService"
|
||||
}
|
||||
|
||||
///*
|
||||
/// Generates images using a loaded model.
|
||||
///
|
||||
/// - Parameters:
|
||||
/// - request: Request to send to GenerateImages.
|
||||
/// - callOptions: Call options.
|
||||
/// - Returns: A `UnaryCall` with futures for the metadata, status and response.
|
||||
public func generateImages(
|
||||
_ request: SdGenerateImagesRequest,
|
||||
callOptions: CallOptions? = nil
|
||||
) -> UnaryCall<SdGenerateImagesRequest, SdGenerateImagesResponse> {
|
||||
return self.makeUnaryCall(
|
||||
path: SdImageGenerationServiceClientMetadata.Methods.generateImages.path,
|
||||
request: request,
|
||||
callOptions: callOptions ?? self.defaultCallOptions,
|
||||
interceptors: self.interceptors?.makeGenerateImagesInterceptors() ?? []
|
||||
)
|
||||
}
|
||||
|
||||
///*
|
||||
/// Generates images using a loaded model, providing updates along the way.
|
||||
///
|
||||
/// - Parameters:
|
||||
/// - request: Request to send to GenerateImagesStreaming.
|
||||
/// - callOptions: Call options.
|
||||
/// - handler: A closure called when each response is received from the server.
|
||||
/// - Returns: A `ServerStreamingCall` with futures for the metadata and status.
|
||||
public func generateImagesStreaming(
|
||||
_ request: SdGenerateImagesRequest,
|
||||
callOptions: CallOptions? = nil,
|
||||
handler: @escaping (SdGenerateImagesStreamUpdate) -> Void
|
||||
) -> ServerStreamingCall<SdGenerateImagesRequest, SdGenerateImagesStreamUpdate> {
|
||||
return self.makeServerStreamingCall(
|
||||
path: SdImageGenerationServiceClientMetadata.Methods.generateImagesStreaming.path,
|
||||
request: request,
|
||||
callOptions: callOptions ?? self.defaultCallOptions,
|
||||
interceptors: self.interceptors?.makeGenerateImagesStreamingInterceptors() ?? [],
|
||||
handler: handler
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#if compiler(>=5.6)
|
||||
@available(*, deprecated)
|
||||
extension SdImageGenerationServiceClient: @unchecked Sendable {}
|
||||
#endif // compiler(>=5.6)
|
||||
|
||||
@available(*, deprecated, renamed: "SdImageGenerationServiceNIOClient")
|
||||
public final class SdImageGenerationServiceClient: SdImageGenerationServiceClientProtocol {
|
||||
private let lock = Lock()
|
||||
private var _defaultCallOptions: CallOptions
|
||||
private var _interceptors: SdImageGenerationServiceClientInterceptorFactoryProtocol?
|
||||
public let channel: GRPCChannel
|
||||
public var defaultCallOptions: CallOptions {
|
||||
get { self.lock.withLock { return self._defaultCallOptions } }
|
||||
set { self.lock.withLockVoid { self._defaultCallOptions = newValue } }
|
||||
}
|
||||
public var interceptors: SdImageGenerationServiceClientInterceptorFactoryProtocol? {
|
||||
get { self.lock.withLock { return self._interceptors } }
|
||||
set { self.lock.withLockVoid { self._interceptors = newValue } }
|
||||
}
|
||||
|
||||
/// Creates a client for the gay.pizza.stable.diffusion.ImageGenerationService service.
|
||||
///
|
||||
/// - Parameters:
|
||||
/// - channel: `GRPCChannel` to the service host.
|
||||
/// - defaultCallOptions: Options to use for each service call if the user doesn't provide them.
|
||||
/// - interceptors: A factory providing interceptors for each RPC.
|
||||
public init(
|
||||
channel: GRPCChannel,
|
||||
defaultCallOptions: CallOptions = CallOptions(),
|
||||
interceptors: SdImageGenerationServiceClientInterceptorFactoryProtocol? = nil
|
||||
) {
|
||||
self.channel = channel
|
||||
self._defaultCallOptions = defaultCallOptions
|
||||
self._interceptors = interceptors
|
||||
}
|
||||
}
|
||||
|
||||
public struct SdImageGenerationServiceNIOClient: SdImageGenerationServiceClientProtocol {
|
||||
public var channel: GRPCChannel
|
||||
public var defaultCallOptions: CallOptions
|
||||
public var interceptors: SdImageGenerationServiceClientInterceptorFactoryProtocol?
|
||||
|
||||
/// Creates a client for the gay.pizza.stable.diffusion.ImageGenerationService service.
|
||||
///
|
||||
/// - Parameters:
|
||||
/// - channel: `GRPCChannel` to the service host.
|
||||
/// - defaultCallOptions: Options to use for each service call if the user doesn't provide them.
|
||||
/// - interceptors: A factory providing interceptors for each RPC.
|
||||
public init(
|
||||
channel: GRPCChannel,
|
||||
defaultCallOptions: CallOptions = CallOptions(),
|
||||
interceptors: SdImageGenerationServiceClientInterceptorFactoryProtocol? = nil
|
||||
) {
|
||||
self.channel = channel
|
||||
self.defaultCallOptions = defaultCallOptions
|
||||
self.interceptors = interceptors
|
||||
}
|
||||
}
|
||||
|
||||
#if compiler(>=5.6)
|
||||
///*
|
||||
/// The image generation service, for generating images from loaded models.
|
||||
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
|
||||
public protocol SdImageGenerationServiceAsyncClientProtocol: GRPCClient {
|
||||
static var serviceDescriptor: GRPCServiceDescriptor { get }
|
||||
var interceptors: SdImageGenerationServiceClientInterceptorFactoryProtocol? { get }
|
||||
|
||||
func makeGenerateImagesCall(
|
||||
_ request: SdGenerateImagesRequest,
|
||||
callOptions: CallOptions?
|
||||
) -> GRPCAsyncUnaryCall<SdGenerateImagesRequest, SdGenerateImagesResponse>
|
||||
|
||||
func makeGenerateImagesStreamingCall(
|
||||
_ request: SdGenerateImagesRequest,
|
||||
callOptions: CallOptions?
|
||||
) -> GRPCAsyncServerStreamingCall<SdGenerateImagesRequest, SdGenerateImagesStreamUpdate>
|
||||
}
|
||||
|
||||
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
|
||||
extension SdImageGenerationServiceAsyncClientProtocol {
|
||||
public static var serviceDescriptor: GRPCServiceDescriptor {
|
||||
return SdImageGenerationServiceClientMetadata.serviceDescriptor
|
||||
}
|
||||
|
||||
public var interceptors: SdImageGenerationServiceClientInterceptorFactoryProtocol? {
|
||||
return nil
|
||||
}
|
||||
|
||||
public func makeGenerateImagesCall(
|
||||
_ request: SdGenerateImagesRequest,
|
||||
callOptions: CallOptions? = nil
|
||||
) -> GRPCAsyncUnaryCall<SdGenerateImagesRequest, SdGenerateImagesResponse> {
|
||||
return self.makeAsyncUnaryCall(
|
||||
path: SdImageGenerationServiceClientMetadata.Methods.generateImages.path,
|
||||
request: request,
|
||||
callOptions: callOptions ?? self.defaultCallOptions,
|
||||
interceptors: self.interceptors?.makeGenerateImagesInterceptors() ?? []
|
||||
)
|
||||
}
|
||||
|
||||
public func makeGenerateImagesStreamingCall(
|
||||
_ request: SdGenerateImagesRequest,
|
||||
callOptions: CallOptions? = nil
|
||||
) -> GRPCAsyncServerStreamingCall<SdGenerateImagesRequest, SdGenerateImagesStreamUpdate> {
|
||||
return self.makeAsyncServerStreamingCall(
|
||||
path: SdImageGenerationServiceClientMetadata.Methods.generateImagesStreaming.path,
|
||||
request: request,
|
||||
callOptions: callOptions ?? self.defaultCallOptions,
|
||||
interceptors: self.interceptors?.makeGenerateImagesStreamingInterceptors() ?? []
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
|
||||
extension SdImageGenerationServiceAsyncClientProtocol {
|
||||
public func generateImages(
|
||||
_ request: SdGenerateImagesRequest,
|
||||
callOptions: CallOptions? = nil
|
||||
) async throws -> SdGenerateImagesResponse {
|
||||
return try await self.performAsyncUnaryCall(
|
||||
path: SdImageGenerationServiceClientMetadata.Methods.generateImages.path,
|
||||
request: request,
|
||||
callOptions: callOptions ?? self.defaultCallOptions,
|
||||
interceptors: self.interceptors?.makeGenerateImagesInterceptors() ?? []
|
||||
)
|
||||
}
|
||||
|
||||
public func generateImagesStreaming(
|
||||
_ request: SdGenerateImagesRequest,
|
||||
callOptions: CallOptions? = nil
|
||||
) -> GRPCAsyncResponseStream<SdGenerateImagesStreamUpdate> {
|
||||
return self.performAsyncServerStreamingCall(
|
||||
path: SdImageGenerationServiceClientMetadata.Methods.generateImagesStreaming.path,
|
||||
request: request,
|
||||
callOptions: callOptions ?? self.defaultCallOptions,
|
||||
interceptors: self.interceptors?.makeGenerateImagesStreamingInterceptors() ?? []
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
|
||||
public struct SdImageGenerationServiceAsyncClient: SdImageGenerationServiceAsyncClientProtocol {
|
||||
public var channel: GRPCChannel
|
||||
public var defaultCallOptions: CallOptions
|
||||
public var interceptors: SdImageGenerationServiceClientInterceptorFactoryProtocol?
|
||||
|
||||
public init(
|
||||
channel: GRPCChannel,
|
||||
defaultCallOptions: CallOptions = CallOptions(),
|
||||
interceptors: SdImageGenerationServiceClientInterceptorFactoryProtocol? = nil
|
||||
) {
|
||||
self.channel = channel
|
||||
self.defaultCallOptions = defaultCallOptions
|
||||
self.interceptors = interceptors
|
||||
}
|
||||
}
|
||||
|
||||
#endif // compiler(>=5.6)
|
||||
|
||||
public protocol SdImageGenerationServiceClientInterceptorFactoryProtocol: GRPCSendable {
|
||||
|
||||
/// - Returns: Interceptors to use when invoking 'generateImages'.
|
||||
func makeGenerateImagesInterceptors() -> [ClientInterceptor<SdGenerateImagesRequest, SdGenerateImagesResponse>]
|
||||
|
||||
/// - Returns: Interceptors to use when invoking 'generateImagesStreaming'.
|
||||
func makeGenerateImagesStreamingInterceptors() -> [ClientInterceptor<SdGenerateImagesRequest, SdGenerateImagesStreamUpdate>]
|
||||
}
|
||||
|
||||
public enum SdImageGenerationServiceClientMetadata {
|
||||
public static let serviceDescriptor = GRPCServiceDescriptor(
|
||||
name: "ImageGenerationService",
|
||||
fullName: "gay.pizza.stable.diffusion.ImageGenerationService",
|
||||
methods: [
|
||||
SdImageGenerationServiceClientMetadata.Methods.generateImages,
|
||||
SdImageGenerationServiceClientMetadata.Methods.generateImagesStreaming,
|
||||
]
|
||||
)
|
||||
|
||||
public enum Methods {
|
||||
public static let generateImages = GRPCMethodDescriptor(
|
||||
name: "GenerateImages",
|
||||
path: "/gay.pizza.stable.diffusion.ImageGenerationService/GenerateImages",
|
||||
type: GRPCCallType.unary
|
||||
)
|
||||
|
||||
public static let generateImagesStreaming = GRPCMethodDescriptor(
|
||||
name: "GenerateImagesStreaming",
|
||||
path: "/gay.pizza.stable.diffusion.ImageGenerationService/GenerateImagesStreaming",
|
||||
type: GRPCCallType.serverStreaming
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
///*
|
||||
/// The image generation service, for generating images from loaded models.
|
||||
///
|
||||
/// To build a server, implement a class that conforms to this protocol.
|
||||
public protocol SdImageGenerationServiceProvider: CallHandlerProvider {
|
||||
var interceptors: SdImageGenerationServiceServerInterceptorFactoryProtocol? { get }
|
||||
|
||||
///*
|
||||
/// Generates images using a loaded model.
|
||||
func generateImages(request: SdGenerateImagesRequest, context: StatusOnlyCallContext) -> EventLoopFuture<SdGenerateImagesResponse>
|
||||
|
||||
///*
|
||||
/// Generates images using a loaded model, providing updates along the way.
|
||||
func generateImagesStreaming(request: SdGenerateImagesRequest, context: StreamingResponseCallContext<SdGenerateImagesStreamUpdate>) -> EventLoopFuture<GRPCStatus>
|
||||
}
|
||||
|
||||
extension SdImageGenerationServiceProvider {
|
||||
public var serviceName: Substring {
|
||||
return SdImageGenerationServiceServerMetadata.serviceDescriptor.fullName[...]
|
||||
}
|
||||
|
||||
/// Determines, calls and returns the appropriate request handler, depending on the request's method.
|
||||
/// Returns nil for methods not handled by this service.
|
||||
public func handle(
|
||||
method name: Substring,
|
||||
context: CallHandlerContext
|
||||
) -> GRPCServerHandlerProtocol? {
|
||||
switch name {
|
||||
case "GenerateImages":
|
||||
return UnaryServerHandler(
|
||||
context: context,
|
||||
requestDeserializer: ProtobufDeserializer<SdGenerateImagesRequest>(),
|
||||
responseSerializer: ProtobufSerializer<SdGenerateImagesResponse>(),
|
||||
interceptors: self.interceptors?.makeGenerateImagesInterceptors() ?? [],
|
||||
userFunction: self.generateImages(request:context:)
|
||||
)
|
||||
|
||||
case "GenerateImagesStreaming":
|
||||
return ServerStreamingServerHandler(
|
||||
context: context,
|
||||
requestDeserializer: ProtobufDeserializer<SdGenerateImagesRequest>(),
|
||||
responseSerializer: ProtobufSerializer<SdGenerateImagesStreamUpdate>(),
|
||||
interceptors: self.interceptors?.makeGenerateImagesStreamingInterceptors() ?? [],
|
||||
userFunction: self.generateImagesStreaming(request:context:)
|
||||
)
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if compiler(>=5.6)
|
||||
|
||||
///*
|
||||
/// The image generation service, for generating images from loaded models.
|
||||
///
|
||||
/// To implement a server, implement an object which conforms to this protocol.
|
||||
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
|
||||
public protocol SdImageGenerationServiceAsyncProvider: CallHandlerProvider {
|
||||
static var serviceDescriptor: GRPCServiceDescriptor { get }
|
||||
var interceptors: SdImageGenerationServiceServerInterceptorFactoryProtocol? { get }
|
||||
|
||||
///*
|
||||
/// Generates images using a loaded model.
|
||||
@Sendable func generateImages(
|
||||
request: SdGenerateImagesRequest,
|
||||
context: GRPCAsyncServerCallContext
|
||||
) async throws -> SdGenerateImagesResponse
|
||||
|
||||
///*
|
||||
/// Generates images using a loaded model, providing updates along the way.
|
||||
@Sendable func generateImagesStreaming(
|
||||
request: SdGenerateImagesRequest,
|
||||
responseStream: GRPCAsyncResponseStreamWriter<SdGenerateImagesStreamUpdate>,
|
||||
context: GRPCAsyncServerCallContext
|
||||
) async throws
|
||||
}
|
||||
|
||||
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
|
||||
extension SdImageGenerationServiceAsyncProvider {
|
||||
public static var serviceDescriptor: GRPCServiceDescriptor {
|
||||
return SdImageGenerationServiceServerMetadata.serviceDescriptor
|
||||
}
|
||||
|
||||
public var serviceName: Substring {
|
||||
return SdImageGenerationServiceServerMetadata.serviceDescriptor.fullName[...]
|
||||
}
|
||||
|
||||
public var interceptors: SdImageGenerationServiceServerInterceptorFactoryProtocol? {
|
||||
return nil
|
||||
}
|
||||
|
||||
public func handle(
|
||||
method name: Substring,
|
||||
context: CallHandlerContext
|
||||
) -> GRPCServerHandlerProtocol? {
|
||||
switch name {
|
||||
case "GenerateImages":
|
||||
return GRPCAsyncServerHandler(
|
||||
context: context,
|
||||
requestDeserializer: ProtobufDeserializer<SdGenerateImagesRequest>(),
|
||||
responseSerializer: ProtobufSerializer<SdGenerateImagesResponse>(),
|
||||
interceptors: self.interceptors?.makeGenerateImagesInterceptors() ?? [],
|
||||
wrapping: self.generateImages(request:context:)
|
||||
)
|
||||
|
||||
case "GenerateImagesStreaming":
|
||||
return GRPCAsyncServerHandler(
|
||||
context: context,
|
||||
requestDeserializer: ProtobufDeserializer<SdGenerateImagesRequest>(),
|
||||
responseSerializer: ProtobufSerializer<SdGenerateImagesStreamUpdate>(),
|
||||
interceptors: self.interceptors?.makeGenerateImagesStreamingInterceptors() ?? [],
|
||||
wrapping: self.generateImagesStreaming(request:responseStream:context:)
|
||||
)
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif // compiler(>=5.6)
|
||||
|
||||
public protocol SdImageGenerationServiceServerInterceptorFactoryProtocol {
|
||||
|
||||
/// - Returns: Interceptors to use when handling 'generateImages'.
|
||||
/// Defaults to calling `self.makeInterceptors()`.
|
||||
func makeGenerateImagesInterceptors() -> [ServerInterceptor<SdGenerateImagesRequest, SdGenerateImagesResponse>]
|
||||
|
||||
/// - Returns: Interceptors to use when handling 'generateImagesStreaming'.
|
||||
/// Defaults to calling `self.makeInterceptors()`.
|
||||
func makeGenerateImagesStreamingInterceptors() -> [ServerInterceptor<SdGenerateImagesRequest, SdGenerateImagesStreamUpdate>]
|
||||
}
|
||||
|
||||
public enum SdImageGenerationServiceServerMetadata {
|
||||
public static let serviceDescriptor = GRPCServiceDescriptor(
|
||||
name: "ImageGenerationService",
|
||||
fullName: "gay.pizza.stable.diffusion.ImageGenerationService",
|
||||
methods: [
|
||||
SdImageGenerationServiceServerMetadata.Methods.generateImages,
|
||||
SdImageGenerationServiceServerMetadata.Methods.generateImagesStreaming,
|
||||
]
|
||||
)
|
||||
|
||||
public enum Methods {
|
||||
public static let generateImages = GRPCMethodDescriptor(
|
||||
name: "GenerateImages",
|
||||
path: "/gay.pizza.stable.diffusion.ImageGenerationService/GenerateImages",
|
||||
type: GRPCCallType.unary
|
||||
)
|
||||
|
||||
public static let generateImagesStreaming = GRPCMethodDescriptor(
|
||||
name: "GenerateImagesStreaming",
|
||||
path: "/gay.pizza.stable.diffusion.ImageGenerationService/GenerateImagesStreaming",
|
||||
type: GRPCCallType.serverStreaming
|
||||
)
|
||||
}
|
||||
}
|
@ -2,13 +2,13 @@
|
||||
// swift-format-ignore-file
|
||||
//
|
||||
// Generated by the Swift generator plugin for the protocol buffer compiler.
|
||||
// Source: StableDiffusion.proto
|
||||
// Source: image_generation.proto
|
||||
//
|
||||
// For information on using the generated types, please see the documentation:
|
||||
// https://github.com/apple/swift-protobuf/
|
||||
|
||||
///*
|
||||
/// Stable Diffusion RPC service for Apple Platforms.
|
||||
/// Image generation for the Stable Diffusion RPC service.
|
||||
|
||||
import Foundation
|
||||
import SwiftProtobuf
|
||||
@ -23,326 +23,6 @@ fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAP
|
||||
typealias Version = _2
|
||||
}
|
||||
|
||||
///*
|
||||
/// Represents the model attention. Model attention has to do with how the model is encoded, and
|
||||
/// can determine what compute units are able to support a particular model.
|
||||
public enum SdModelAttention: SwiftProtobuf.Enum {
|
||||
public typealias RawValue = Int
|
||||
|
||||
///*
|
||||
/// The model is an original attention type. It can be loaded only onto CPU & GPU compute units.
|
||||
case original // = 0
|
||||
|
||||
///*
|
||||
/// The model is a split-ein-sum attention type. It can be loaded onto all compute units,
|
||||
/// including the Apple Neural Engine.
|
||||
case splitEinSum // = 1
|
||||
case UNRECOGNIZED(Int)
|
||||
|
||||
public init() {
|
||||
self = .original
|
||||
}
|
||||
|
||||
public init?(rawValue: Int) {
|
||||
switch rawValue {
|
||||
case 0: self = .original
|
||||
case 1: self = .splitEinSum
|
||||
default: self = .UNRECOGNIZED(rawValue)
|
||||
}
|
||||
}
|
||||
|
||||
public var rawValue: Int {
|
||||
switch self {
|
||||
case .original: return 0
|
||||
case .splitEinSum: return 1
|
||||
case .UNRECOGNIZED(let i): return i
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#if swift(>=4.2)
|
||||
|
||||
extension SdModelAttention: CaseIterable {
|
||||
// The compiler won't synthesize support with the UNRECOGNIZED case.
|
||||
public static var allCases: [SdModelAttention] = [
|
||||
.original,
|
||||
.splitEinSum,
|
||||
]
|
||||
}
|
||||
|
||||
#endif // swift(>=4.2)
|
||||
|
||||
///*
|
||||
/// Represents the schedulers that are used to sample images.
|
||||
public enum SdScheduler: SwiftProtobuf.Enum {
|
||||
public typealias RawValue = Int
|
||||
|
||||
///*
|
||||
/// The PNDM (Pseudo numerical methods for diffusion models) scheduler.
|
||||
case pndm // = 0
|
||||
|
||||
///*
|
||||
/// The DPM-Solver++ scheduler.
|
||||
case dpmSolverPlusPlus // = 1
|
||||
case UNRECOGNIZED(Int)
|
||||
|
||||
public init() {
|
||||
self = .pndm
|
||||
}
|
||||
|
||||
public init?(rawValue: Int) {
|
||||
switch rawValue {
|
||||
case 0: self = .pndm
|
||||
case 1: self = .dpmSolverPlusPlus
|
||||
default: self = .UNRECOGNIZED(rawValue)
|
||||
}
|
||||
}
|
||||
|
||||
public var rawValue: Int {
|
||||
switch self {
|
||||
case .pndm: return 0
|
||||
case .dpmSolverPlusPlus: return 1
|
||||
case .UNRECOGNIZED(let i): return i
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#if swift(>=4.2)
|
||||
|
||||
extension SdScheduler: CaseIterable {
|
||||
// The compiler won't synthesize support with the UNRECOGNIZED case.
|
||||
public static var allCases: [SdScheduler] = [
|
||||
.pndm,
|
||||
.dpmSolverPlusPlus,
|
||||
]
|
||||
}
|
||||
|
||||
#endif // swift(>=4.2)
|
||||
|
||||
///*
|
||||
/// Represents a specifier for what compute units are available for ML tasks.
|
||||
public enum SdComputeUnits: SwiftProtobuf.Enum {
|
||||
public typealias RawValue = Int
|
||||
|
||||
///*
|
||||
/// The CPU as a singular compute unit.
|
||||
case cpu // = 0
|
||||
|
||||
///*
|
||||
/// The CPU & GPU combined into a singular compute unit.
|
||||
case cpuAndGpu // = 1
|
||||
|
||||
///*
|
||||
/// Allow the usage of all compute units. CoreML will decided where the model is loaded.
|
||||
case all // = 2
|
||||
|
||||
///*
|
||||
/// The CPU & Neural Engine combined into a singular compute unit.
|
||||
case cpuAndNeuralEngine // = 3
|
||||
case UNRECOGNIZED(Int)
|
||||
|
||||
public init() {
|
||||
self = .cpu
|
||||
}
|
||||
|
||||
public init?(rawValue: Int) {
|
||||
switch rawValue {
|
||||
case 0: self = .cpu
|
||||
case 1: self = .cpuAndGpu
|
||||
case 2: self = .all
|
||||
case 3: self = .cpuAndNeuralEngine
|
||||
default: self = .UNRECOGNIZED(rawValue)
|
||||
}
|
||||
}
|
||||
|
||||
public var rawValue: Int {
|
||||
switch self {
|
||||
case .cpu: return 0
|
||||
case .cpuAndGpu: return 1
|
||||
case .all: return 2
|
||||
case .cpuAndNeuralEngine: return 3
|
||||
case .UNRECOGNIZED(let i): return i
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#if swift(>=4.2)
|
||||
|
||||
extension SdComputeUnits: CaseIterable {
|
||||
// The compiler won't synthesize support with the UNRECOGNIZED case.
|
||||
public static var allCases: [SdComputeUnits] = [
|
||||
.cpu,
|
||||
.cpuAndGpu,
|
||||
.all,
|
||||
.cpuAndNeuralEngine,
|
||||
]
|
||||
}
|
||||
|
||||
#endif // swift(>=4.2)
|
||||
|
||||
///*
|
||||
/// Represents the format of an image.
|
||||
public enum SdImageFormat: SwiftProtobuf.Enum {
|
||||
public typealias RawValue = Int
|
||||
|
||||
///*
|
||||
/// The PNG image format.
|
||||
case png // = 0
|
||||
case UNRECOGNIZED(Int)
|
||||
|
||||
public init() {
|
||||
self = .png
|
||||
}
|
||||
|
||||
public init?(rawValue: Int) {
|
||||
switch rawValue {
|
||||
case 0: self = .png
|
||||
default: self = .UNRECOGNIZED(rawValue)
|
||||
}
|
||||
}
|
||||
|
||||
public var rawValue: Int {
|
||||
switch self {
|
||||
case .png: return 0
|
||||
case .UNRECOGNIZED(let i): return i
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#if swift(>=4.2)
|
||||
|
||||
extension SdImageFormat: CaseIterable {
|
||||
// The compiler won't synthesize support with the UNRECOGNIZED case.
|
||||
public static var allCases: [SdImageFormat] = [
|
||||
.png,
|
||||
]
|
||||
}
|
||||
|
||||
#endif // swift(>=4.2)
|
||||
|
||||
///*
|
||||
/// Represents information about an available model.
|
||||
/// The primary key of a model is it's 'name' field.
|
||||
public struct SdModelInfo {
|
||||
// SwiftProtobuf.Message conformance is added in an extension below. See the
|
||||
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
|
||||
// methods supported on all messages.
|
||||
|
||||
///*
|
||||
/// The name of the available model. Note that within the context of a single RPC server,
|
||||
/// the name of a model is a unique identifier. This may not be true when utilizing a cluster or
|
||||
/// load balanced server, so keep that in mind.
|
||||
public var name: String = String()
|
||||
|
||||
///*
|
||||
/// The attention of the model. Model attention determines what compute units can be used to
|
||||
/// load the model and make predictions.
|
||||
public var attention: SdModelAttention = .original
|
||||
|
||||
///*
|
||||
/// Whether the model is currently loaded onto an available compute unit.
|
||||
public var isLoaded: Bool = false
|
||||
|
||||
///*
|
||||
/// The compute unit that the model is currently loaded into, if it is loaded to one at all.
|
||||
/// When is_loaded is false, the value of this field should be null.
|
||||
public var loadedComputeUnits: SdComputeUnits = .cpu
|
||||
|
||||
///*
|
||||
/// The compute units that this model supports using.
|
||||
public var supportedComputeUnits: [SdComputeUnits] = []
|
||||
|
||||
public var unknownFields = SwiftProtobuf.UnknownStorage()
|
||||
|
||||
public init() {}
|
||||
}
|
||||
|
||||
///*
|
||||
/// Represents an image within the Stable Diffusion context.
|
||||
/// This could be an input image for an image generation request, or it could be
|
||||
/// a generated image from the Stable Diffusion model.
|
||||
public struct SdImage {
|
||||
// SwiftProtobuf.Message conformance is added in an extension below. See the
|
||||
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
|
||||
// methods supported on all messages.
|
||||
|
||||
///*
|
||||
/// The format of the image.
|
||||
public var format: SdImageFormat = .png
|
||||
|
||||
///*
|
||||
/// The raw data of the image, in the specified format.
|
||||
public var data: Data = Data()
|
||||
|
||||
public var unknownFields = SwiftProtobuf.UnknownStorage()
|
||||
|
||||
public init() {}
|
||||
}
|
||||
|
||||
///*
|
||||
/// Represents a request to list the models available on the host.
|
||||
public struct SdListModelsRequest {
|
||||
// SwiftProtobuf.Message conformance is added in an extension below. See the
|
||||
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
|
||||
// methods supported on all messages.
|
||||
|
||||
public var unknownFields = SwiftProtobuf.UnknownStorage()
|
||||
|
||||
public init() {}
|
||||
}
|
||||
|
||||
///*
|
||||
/// Represents a response to listing the models available on the host.
|
||||
public struct SdListModelsResponse {
|
||||
// SwiftProtobuf.Message conformance is added in an extension below. See the
|
||||
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
|
||||
// methods supported on all messages.
|
||||
|
||||
///*
|
||||
/// The available models on the Stable Diffusion server.
|
||||
public var availableModels: [SdModelInfo] = []
|
||||
|
||||
public var unknownFields = SwiftProtobuf.UnknownStorage()
|
||||
|
||||
public init() {}
|
||||
}
|
||||
|
||||
///*
|
||||
/// Represents a request to load a model into a specified compute unit.
|
||||
public struct SdLoadModelRequest {
|
||||
// SwiftProtobuf.Message conformance is added in an extension below. See the
|
||||
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
|
||||
// methods supported on all messages.
|
||||
|
||||
///*
|
||||
/// The model name to load onto the compute unit.
|
||||
public var modelName: String = String()
|
||||
|
||||
///*
|
||||
/// The compute units to load the model onto.
|
||||
public var computeUnits: SdComputeUnits = .cpu
|
||||
|
||||
public var unknownFields = SwiftProtobuf.UnknownStorage()
|
||||
|
||||
public init() {}
|
||||
}
|
||||
|
||||
///*
|
||||
/// Represents a response to loading a model.
|
||||
public struct SdLoadModelResponse {
|
||||
// SwiftProtobuf.Message conformance is added in an extension below. See the
|
||||
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
|
||||
// methods supported on all messages.
|
||||
|
||||
public var unknownFields = SwiftProtobuf.UnknownStorage()
|
||||
|
||||
public init() {}
|
||||
}
|
||||
|
||||
///*
|
||||
/// Represents a request to generate images using a loaded model.
|
||||
public struct SdGenerateImagesRequest {
|
||||
@ -525,8 +205,14 @@ public struct SdGenerateImagesStreamUpdate {
|
||||
set {update = .batchCompleted(newValue)}
|
||||
}
|
||||
|
||||
///*
|
||||
/// The percentage of completion for the entire submitted job.
|
||||
public var overallPercentageComplete: Float = 0
|
||||
|
||||
///*
|
||||
/// The id of the spawned job.
|
||||
public var jobID: UInt64 = 0
|
||||
|
||||
public var unknownFields = SwiftProtobuf.UnknownStorage()
|
||||
|
||||
///*
|
||||
@ -562,302 +248,19 @@ public struct SdGenerateImagesStreamUpdate {
|
||||
public init() {}
|
||||
}
|
||||
|
||||
///*
|
||||
/// Represents a request to tokenize an input.
|
||||
public struct SdTokenizeRequest {
|
||||
// SwiftProtobuf.Message conformance is added in an extension below. See the
|
||||
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
|
||||
// methods supported on all messages.
|
||||
|
||||
///*
|
||||
/// The name of a loaded model to use for tokenization.
|
||||
public var modelName: String = String()
|
||||
|
||||
///*
|
||||
/// The input string to tokenize.
|
||||
public var input: String = String()
|
||||
|
||||
public var unknownFields = SwiftProtobuf.UnknownStorage()
|
||||
|
||||
public init() {}
|
||||
}
|
||||
|
||||
///*
|
||||
/// Represents a response to tokenization.
|
||||
public struct SdTokenizeResponse {
|
||||
// SwiftProtobuf.Message conformance is added in an extension below. See the
|
||||
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
|
||||
// methods supported on all messages.
|
||||
|
||||
///*
|
||||
/// The tokens inside the input string.
|
||||
public var tokens: [String] = []
|
||||
|
||||
///*
|
||||
/// The token IDs inside the input string.
|
||||
public var tokenIds: [UInt64] = []
|
||||
|
||||
public var unknownFields = SwiftProtobuf.UnknownStorage()
|
||||
|
||||
public init() {}
|
||||
}
|
||||
|
||||
#if swift(>=5.5) && canImport(_Concurrency)
|
||||
extension SdModelAttention: @unchecked Sendable {}
|
||||
extension SdScheduler: @unchecked Sendable {}
|
||||
extension SdComputeUnits: @unchecked Sendable {}
|
||||
extension SdImageFormat: @unchecked Sendable {}
|
||||
extension SdModelInfo: @unchecked Sendable {}
|
||||
extension SdImage: @unchecked Sendable {}
|
||||
extension SdListModelsRequest: @unchecked Sendable {}
|
||||
extension SdListModelsResponse: @unchecked Sendable {}
|
||||
extension SdLoadModelRequest: @unchecked Sendable {}
|
||||
extension SdLoadModelResponse: @unchecked Sendable {}
|
||||
extension SdGenerateImagesRequest: @unchecked Sendable {}
|
||||
extension SdGenerateImagesResponse: @unchecked Sendable {}
|
||||
extension SdGenerateImagesBatchProgressUpdate: @unchecked Sendable {}
|
||||
extension SdGenerateImagesBatchCompletedUpdate: @unchecked Sendable {}
|
||||
extension SdGenerateImagesStreamUpdate: @unchecked Sendable {}
|
||||
extension SdGenerateImagesStreamUpdate.OneOf_Update: @unchecked Sendable {}
|
||||
extension SdTokenizeRequest: @unchecked Sendable {}
|
||||
extension SdTokenizeResponse: @unchecked Sendable {}
|
||||
#endif // swift(>=5.5) && canImport(_Concurrency)
|
||||
|
||||
// MARK: - Code below here is support for the SwiftProtobuf runtime.
|
||||
|
||||
fileprivate let _protobuf_package = "gay.pizza.stable.diffusion"
|
||||
|
||||
extension SdModelAttention: SwiftProtobuf._ProtoNameProviding {
|
||||
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
|
||||
0: .same(proto: "original"),
|
||||
1: .same(proto: "split_ein_sum"),
|
||||
]
|
||||
}
|
||||
|
||||
extension SdScheduler: SwiftProtobuf._ProtoNameProviding {
|
||||
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
|
||||
0: .same(proto: "pndm"),
|
||||
1: .same(proto: "dpm_solver_plus_plus"),
|
||||
]
|
||||
}
|
||||
|
||||
extension SdComputeUnits: SwiftProtobuf._ProtoNameProviding {
|
||||
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
|
||||
0: .same(proto: "cpu"),
|
||||
1: .same(proto: "cpu_and_gpu"),
|
||||
2: .same(proto: "all"),
|
||||
3: .same(proto: "cpu_and_neural_engine"),
|
||||
]
|
||||
}
|
||||
|
||||
extension SdImageFormat: SwiftProtobuf._ProtoNameProviding {
|
||||
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
|
||||
0: .same(proto: "png"),
|
||||
]
|
||||
}
|
||||
|
||||
extension SdModelInfo: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
|
||||
public static let protoMessageName: String = _protobuf_package + ".ModelInfo"
|
||||
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
|
||||
1: .same(proto: "name"),
|
||||
2: .same(proto: "attention"),
|
||||
3: .standard(proto: "is_loaded"),
|
||||
4: .standard(proto: "loaded_compute_units"),
|
||||
5: .standard(proto: "supported_compute_units"),
|
||||
]
|
||||
|
||||
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
|
||||
while let fieldNumber = try decoder.nextFieldNumber() {
|
||||
// The use of inline closures is to circumvent an issue where the compiler
|
||||
// allocates stack space for every case branch when no optimizations are
|
||||
// enabled. https://github.com/apple/swift-protobuf/issues/1034
|
||||
switch fieldNumber {
|
||||
case 1: try { try decoder.decodeSingularStringField(value: &self.name) }()
|
||||
case 2: try { try decoder.decodeSingularEnumField(value: &self.attention) }()
|
||||
case 3: try { try decoder.decodeSingularBoolField(value: &self.isLoaded) }()
|
||||
case 4: try { try decoder.decodeSingularEnumField(value: &self.loadedComputeUnits) }()
|
||||
case 5: try { try decoder.decodeRepeatedEnumField(value: &self.supportedComputeUnits) }()
|
||||
default: break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
|
||||
if !self.name.isEmpty {
|
||||
try visitor.visitSingularStringField(value: self.name, fieldNumber: 1)
|
||||
}
|
||||
if self.attention != .original {
|
||||
try visitor.visitSingularEnumField(value: self.attention, fieldNumber: 2)
|
||||
}
|
||||
if self.isLoaded != false {
|
||||
try visitor.visitSingularBoolField(value: self.isLoaded, fieldNumber: 3)
|
||||
}
|
||||
if self.loadedComputeUnits != .cpu {
|
||||
try visitor.visitSingularEnumField(value: self.loadedComputeUnits, fieldNumber: 4)
|
||||
}
|
||||
if !self.supportedComputeUnits.isEmpty {
|
||||
try visitor.visitPackedEnumField(value: self.supportedComputeUnits, fieldNumber: 5)
|
||||
}
|
||||
try unknownFields.traverse(visitor: &visitor)
|
||||
}
|
||||
|
||||
public static func ==(lhs: SdModelInfo, rhs: SdModelInfo) -> Bool {
|
||||
if lhs.name != rhs.name {return false}
|
||||
if lhs.attention != rhs.attention {return false}
|
||||
if lhs.isLoaded != rhs.isLoaded {return false}
|
||||
if lhs.loadedComputeUnits != rhs.loadedComputeUnits {return false}
|
||||
if lhs.supportedComputeUnits != rhs.supportedComputeUnits {return false}
|
||||
if lhs.unknownFields != rhs.unknownFields {return false}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
extension SdImage: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
|
||||
public static let protoMessageName: String = _protobuf_package + ".Image"
|
||||
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
|
||||
1: .same(proto: "format"),
|
||||
2: .same(proto: "data"),
|
||||
]
|
||||
|
||||
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
|
||||
while let fieldNumber = try decoder.nextFieldNumber() {
|
||||
// The use of inline closures is to circumvent an issue where the compiler
|
||||
// allocates stack space for every case branch when no optimizations are
|
||||
// enabled. https://github.com/apple/swift-protobuf/issues/1034
|
||||
switch fieldNumber {
|
||||
case 1: try { try decoder.decodeSingularEnumField(value: &self.format) }()
|
||||
case 2: try { try decoder.decodeSingularBytesField(value: &self.data) }()
|
||||
default: break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
|
||||
if self.format != .png {
|
||||
try visitor.visitSingularEnumField(value: self.format, fieldNumber: 1)
|
||||
}
|
||||
if !self.data.isEmpty {
|
||||
try visitor.visitSingularBytesField(value: self.data, fieldNumber: 2)
|
||||
}
|
||||
try unknownFields.traverse(visitor: &visitor)
|
||||
}
|
||||
|
||||
public static func ==(lhs: SdImage, rhs: SdImage) -> Bool {
|
||||
if lhs.format != rhs.format {return false}
|
||||
if lhs.data != rhs.data {return false}
|
||||
if lhs.unknownFields != rhs.unknownFields {return false}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
extension SdListModelsRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
|
||||
public static let protoMessageName: String = _protobuf_package + ".ListModelsRequest"
|
||||
public static let _protobuf_nameMap = SwiftProtobuf._NameMap()
|
||||
|
||||
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
|
||||
while let _ = try decoder.nextFieldNumber() {
|
||||
}
|
||||
}
|
||||
|
||||
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
|
||||
try unknownFields.traverse(visitor: &visitor)
|
||||
}
|
||||
|
||||
public static func ==(lhs: SdListModelsRequest, rhs: SdListModelsRequest) -> Bool {
|
||||
if lhs.unknownFields != rhs.unknownFields {return false}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
extension SdListModelsResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
|
||||
public static let protoMessageName: String = _protobuf_package + ".ListModelsResponse"
|
||||
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
|
||||
1: .standard(proto: "available_models"),
|
||||
]
|
||||
|
||||
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
|
||||
while let fieldNumber = try decoder.nextFieldNumber() {
|
||||
// The use of inline closures is to circumvent an issue where the compiler
|
||||
// allocates stack space for every case branch when no optimizations are
|
||||
// enabled. https://github.com/apple/swift-protobuf/issues/1034
|
||||
switch fieldNumber {
|
||||
case 1: try { try decoder.decodeRepeatedMessageField(value: &self.availableModels) }()
|
||||
default: break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
|
||||
if !self.availableModels.isEmpty {
|
||||
try visitor.visitRepeatedMessageField(value: self.availableModels, fieldNumber: 1)
|
||||
}
|
||||
try unknownFields.traverse(visitor: &visitor)
|
||||
}
|
||||
|
||||
public static func ==(lhs: SdListModelsResponse, rhs: SdListModelsResponse) -> Bool {
|
||||
if lhs.availableModels != rhs.availableModels {return false}
|
||||
if lhs.unknownFields != rhs.unknownFields {return false}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
extension SdLoadModelRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
|
||||
public static let protoMessageName: String = _protobuf_package + ".LoadModelRequest"
|
||||
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
|
||||
1: .standard(proto: "model_name"),
|
||||
2: .standard(proto: "compute_units"),
|
||||
]
|
||||
|
||||
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
|
||||
while let fieldNumber = try decoder.nextFieldNumber() {
|
||||
// The use of inline closures is to circumvent an issue where the compiler
|
||||
// allocates stack space for every case branch when no optimizations are
|
||||
// enabled. https://github.com/apple/swift-protobuf/issues/1034
|
||||
switch fieldNumber {
|
||||
case 1: try { try decoder.decodeSingularStringField(value: &self.modelName) }()
|
||||
case 2: try { try decoder.decodeSingularEnumField(value: &self.computeUnits) }()
|
||||
default: break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
|
||||
if !self.modelName.isEmpty {
|
||||
try visitor.visitSingularStringField(value: self.modelName, fieldNumber: 1)
|
||||
}
|
||||
if self.computeUnits != .cpu {
|
||||
try visitor.visitSingularEnumField(value: self.computeUnits, fieldNumber: 2)
|
||||
}
|
||||
try unknownFields.traverse(visitor: &visitor)
|
||||
}
|
||||
|
||||
public static func ==(lhs: SdLoadModelRequest, rhs: SdLoadModelRequest) -> Bool {
|
||||
if lhs.modelName != rhs.modelName {return false}
|
||||
if lhs.computeUnits != rhs.computeUnits {return false}
|
||||
if lhs.unknownFields != rhs.unknownFields {return false}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
extension SdLoadModelResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
|
||||
public static let protoMessageName: String = _protobuf_package + ".LoadModelResponse"
|
||||
public static let _protobuf_nameMap = SwiftProtobuf._NameMap()
|
||||
|
||||
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
|
||||
while let _ = try decoder.nextFieldNumber() {
|
||||
}
|
||||
}
|
||||
|
||||
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
|
||||
try unknownFields.traverse(visitor: &visitor)
|
||||
}
|
||||
|
||||
public static func ==(lhs: SdLoadModelResponse, rhs: SdLoadModelResponse) -> Bool {
|
||||
if lhs.unknownFields != rhs.unknownFields {return false}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
extension SdGenerateImagesRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
|
||||
public static let protoMessageName: String = _protobuf_package + ".GenerateImagesRequest"
|
||||
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
|
||||
@ -1093,6 +496,7 @@ extension SdGenerateImagesStreamUpdate: SwiftProtobuf.Message, SwiftProtobuf._Me
|
||||
2: .standard(proto: "batch_progress"),
|
||||
3: .standard(proto: "batch_completed"),
|
||||
4: .standard(proto: "overall_percentage_complete"),
|
||||
5: .standard(proto: "job_id"),
|
||||
]
|
||||
|
||||
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
|
||||
@ -1129,6 +533,7 @@ extension SdGenerateImagesStreamUpdate: SwiftProtobuf.Message, SwiftProtobuf._Me
|
||||
}
|
||||
}()
|
||||
case 4: try { try decoder.decodeSingularFloatField(value: &self.overallPercentageComplete) }()
|
||||
case 5: try { try decoder.decodeSingularUInt64Field(value: &self.jobID) }()
|
||||
default: break
|
||||
}
|
||||
}
|
||||
@ -1156,6 +561,9 @@ extension SdGenerateImagesStreamUpdate: SwiftProtobuf.Message, SwiftProtobuf._Me
|
||||
if self.overallPercentageComplete != 0 {
|
||||
try visitor.visitSingularFloatField(value: self.overallPercentageComplete, fieldNumber: 4)
|
||||
}
|
||||
if self.jobID != 0 {
|
||||
try visitor.visitSingularUInt64Field(value: self.jobID, fieldNumber: 5)
|
||||
}
|
||||
try unknownFields.traverse(visitor: &visitor)
|
||||
}
|
||||
|
||||
@ -1163,82 +571,7 @@ extension SdGenerateImagesStreamUpdate: SwiftProtobuf.Message, SwiftProtobuf._Me
|
||||
if lhs.currentBatch != rhs.currentBatch {return false}
|
||||
if lhs.update != rhs.update {return false}
|
||||
if lhs.overallPercentageComplete != rhs.overallPercentageComplete {return false}
|
||||
if lhs.unknownFields != rhs.unknownFields {return false}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
extension SdTokenizeRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
|
||||
public static let protoMessageName: String = _protobuf_package + ".TokenizeRequest"
|
||||
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
|
||||
1: .standard(proto: "model_name"),
|
||||
2: .same(proto: "input"),
|
||||
]
|
||||
|
||||
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
|
||||
while let fieldNumber = try decoder.nextFieldNumber() {
|
||||
// The use of inline closures is to circumvent an issue where the compiler
|
||||
// allocates stack space for every case branch when no optimizations are
|
||||
// enabled. https://github.com/apple/swift-protobuf/issues/1034
|
||||
switch fieldNumber {
|
||||
case 1: try { try decoder.decodeSingularStringField(value: &self.modelName) }()
|
||||
case 2: try { try decoder.decodeSingularStringField(value: &self.input) }()
|
||||
default: break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
|
||||
if !self.modelName.isEmpty {
|
||||
try visitor.visitSingularStringField(value: self.modelName, fieldNumber: 1)
|
||||
}
|
||||
if !self.input.isEmpty {
|
||||
try visitor.visitSingularStringField(value: self.input, fieldNumber: 2)
|
||||
}
|
||||
try unknownFields.traverse(visitor: &visitor)
|
||||
}
|
||||
|
||||
public static func ==(lhs: SdTokenizeRequest, rhs: SdTokenizeRequest) -> Bool {
|
||||
if lhs.modelName != rhs.modelName {return false}
|
||||
if lhs.input != rhs.input {return false}
|
||||
if lhs.unknownFields != rhs.unknownFields {return false}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
extension SdTokenizeResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
|
||||
public static let protoMessageName: String = _protobuf_package + ".TokenizeResponse"
|
||||
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
|
||||
1: .same(proto: "tokens"),
|
||||
2: .standard(proto: "token_ids"),
|
||||
]
|
||||
|
||||
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
|
||||
while let fieldNumber = try decoder.nextFieldNumber() {
|
||||
// The use of inline closures is to circumvent an issue where the compiler
|
||||
// allocates stack space for every case branch when no optimizations are
|
||||
// enabled. https://github.com/apple/swift-protobuf/issues/1034
|
||||
switch fieldNumber {
|
||||
case 1: try { try decoder.decodeRepeatedStringField(value: &self.tokens) }()
|
||||
case 2: try { try decoder.decodeRepeatedUInt64Field(value: &self.tokenIds) }()
|
||||
default: break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
|
||||
if !self.tokens.isEmpty {
|
||||
try visitor.visitRepeatedStringField(value: self.tokens, fieldNumber: 1)
|
||||
}
|
||||
if !self.tokenIds.isEmpty {
|
||||
try visitor.visitPackedUInt64Field(value: self.tokenIds, fieldNumber: 2)
|
||||
}
|
||||
try unknownFields.traverse(visitor: &visitor)
|
||||
}
|
||||
|
||||
public static func ==(lhs: SdTokenizeResponse, rhs: SdTokenizeResponse) -> Bool {
|
||||
if lhs.tokens != rhs.tokens {return false}
|
||||
if lhs.tokenIds != rhs.tokenIds {return false}
|
||||
if lhs.jobID != rhs.jobID {return false}
|
||||
if lhs.unknownFields != rhs.unknownFields {return false}
|
||||
return true
|
||||
}
|
535
Sources/StableDiffusionProtos/jobs.grpc.swift
Normal file
535
Sources/StableDiffusionProtos/jobs.grpc.swift
Normal file
@ -0,0 +1,535 @@
|
||||
//
|
||||
// DO NOT EDIT.
|
||||
//
|
||||
// Generated by the protocol buffer compiler.
|
||||
// Source: jobs.proto
|
||||
//
|
||||
|
||||
//
|
||||
// Copyright 2018, gRPC Authors All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
import GRPC
|
||||
import NIO
|
||||
import NIOConcurrencyHelpers
|
||||
import SwiftProtobuf
|
||||
|
||||
|
||||
///*
|
||||
/// The job service, for inspecting and monitoring the state of jobs executing on the service.
|
||||
///
|
||||
/// Usage: instantiate `SdJobServiceClient`, then call methods of this protocol to make API calls.
|
||||
public protocol SdJobServiceClientProtocol: GRPCClient {
|
||||
var serviceName: String { get }
|
||||
var interceptors: SdJobServiceClientInterceptorFactoryProtocol? { get }
|
||||
|
||||
func getJob(
|
||||
_ request: SdGetJobRequest,
|
||||
callOptions: CallOptions?
|
||||
) -> UnaryCall<SdGetJobRequest, SdGetJobResponse>
|
||||
|
||||
func cancelJob(
|
||||
_ request: SdCancelJobRequest,
|
||||
callOptions: CallOptions?
|
||||
) -> UnaryCall<SdCancelJobRequest, SdCancelJobResponse>
|
||||
|
||||
func streamJobUpdates(
|
||||
_ request: SdStreamJobUpdatesRequest,
|
||||
callOptions: CallOptions?,
|
||||
handler: @escaping (SdJobUpdate) -> Void
|
||||
) -> ServerStreamingCall<SdStreamJobUpdatesRequest, SdJobUpdate>
|
||||
}
|
||||
|
||||
extension SdJobServiceClientProtocol {
|
||||
public var serviceName: String {
|
||||
return "gay.pizza.stable.diffusion.JobService"
|
||||
}
|
||||
|
||||
/// Unary call to GetJob
|
||||
///
|
||||
/// - Parameters:
|
||||
/// - request: Request to send to GetJob.
|
||||
/// - callOptions: Call options.
|
||||
/// - Returns: A `UnaryCall` with futures for the metadata, status and response.
|
||||
public func getJob(
|
||||
_ request: SdGetJobRequest,
|
||||
callOptions: CallOptions? = nil
|
||||
) -> UnaryCall<SdGetJobRequest, SdGetJobResponse> {
|
||||
return self.makeUnaryCall(
|
||||
path: SdJobServiceClientMetadata.Methods.getJob.path,
|
||||
request: request,
|
||||
callOptions: callOptions ?? self.defaultCallOptions,
|
||||
interceptors: self.interceptors?.makeGetJobInterceptors() ?? []
|
||||
)
|
||||
}
|
||||
|
||||
/// Unary call to CancelJob
|
||||
///
|
||||
/// - Parameters:
|
||||
/// - request: Request to send to CancelJob.
|
||||
/// - callOptions: Call options.
|
||||
/// - Returns: A `UnaryCall` with futures for the metadata, status and response.
|
||||
public func cancelJob(
|
||||
_ request: SdCancelJobRequest,
|
||||
callOptions: CallOptions? = nil
|
||||
) -> UnaryCall<SdCancelJobRequest, SdCancelJobResponse> {
|
||||
return self.makeUnaryCall(
|
||||
path: SdJobServiceClientMetadata.Methods.cancelJob.path,
|
||||
request: request,
|
||||
callOptions: callOptions ?? self.defaultCallOptions,
|
||||
interceptors: self.interceptors?.makeCancelJobInterceptors() ?? []
|
||||
)
|
||||
}
|
||||
|
||||
/// Server streaming call to StreamJobUpdates
|
||||
///
|
||||
/// - Parameters:
|
||||
/// - request: Request to send to StreamJobUpdates.
|
||||
/// - callOptions: Call options.
|
||||
/// - handler: A closure called when each response is received from the server.
|
||||
/// - Returns: A `ServerStreamingCall` with futures for the metadata and status.
|
||||
public func streamJobUpdates(
|
||||
_ request: SdStreamJobUpdatesRequest,
|
||||
callOptions: CallOptions? = nil,
|
||||
handler: @escaping (SdJobUpdate) -> Void
|
||||
) -> ServerStreamingCall<SdStreamJobUpdatesRequest, SdJobUpdate> {
|
||||
return self.makeServerStreamingCall(
|
||||
path: SdJobServiceClientMetadata.Methods.streamJobUpdates.path,
|
||||
request: request,
|
||||
callOptions: callOptions ?? self.defaultCallOptions,
|
||||
interceptors: self.interceptors?.makeStreamJobUpdatesInterceptors() ?? [],
|
||||
handler: handler
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#if compiler(>=5.6)
|
||||
@available(*, deprecated)
|
||||
extension SdJobServiceClient: @unchecked Sendable {}
|
||||
#endif // compiler(>=5.6)
|
||||
|
||||
@available(*, deprecated, renamed: "SdJobServiceNIOClient")
|
||||
public final class SdJobServiceClient: SdJobServiceClientProtocol {
|
||||
private let lock = Lock()
|
||||
private var _defaultCallOptions: CallOptions
|
||||
private var _interceptors: SdJobServiceClientInterceptorFactoryProtocol?
|
||||
public let channel: GRPCChannel
|
||||
public var defaultCallOptions: CallOptions {
|
||||
get { self.lock.withLock { return self._defaultCallOptions } }
|
||||
set { self.lock.withLockVoid { self._defaultCallOptions = newValue } }
|
||||
}
|
||||
public var interceptors: SdJobServiceClientInterceptorFactoryProtocol? {
|
||||
get { self.lock.withLock { return self._interceptors } }
|
||||
set { self.lock.withLockVoid { self._interceptors = newValue } }
|
||||
}
|
||||
|
||||
/// Creates a client for the gay.pizza.stable.diffusion.JobService service.
|
||||
///
|
||||
/// - Parameters:
|
||||
/// - channel: `GRPCChannel` to the service host.
|
||||
/// - defaultCallOptions: Options to use for each service call if the user doesn't provide them.
|
||||
/// - interceptors: A factory providing interceptors for each RPC.
|
||||
public init(
|
||||
channel: GRPCChannel,
|
||||
defaultCallOptions: CallOptions = CallOptions(),
|
||||
interceptors: SdJobServiceClientInterceptorFactoryProtocol? = nil
|
||||
) {
|
||||
self.channel = channel
|
||||
self._defaultCallOptions = defaultCallOptions
|
||||
self._interceptors = interceptors
|
||||
}
|
||||
}
|
||||
|
||||
public struct SdJobServiceNIOClient: SdJobServiceClientProtocol {
|
||||
public var channel: GRPCChannel
|
||||
public var defaultCallOptions: CallOptions
|
||||
public var interceptors: SdJobServiceClientInterceptorFactoryProtocol?
|
||||
|
||||
/// Creates a client for the gay.pizza.stable.diffusion.JobService service.
|
||||
///
|
||||
/// - Parameters:
|
||||
/// - channel: `GRPCChannel` to the service host.
|
||||
/// - defaultCallOptions: Options to use for each service call if the user doesn't provide them.
|
||||
/// - interceptors: A factory providing interceptors for each RPC.
|
||||
public init(
|
||||
channel: GRPCChannel,
|
||||
defaultCallOptions: CallOptions = CallOptions(),
|
||||
interceptors: SdJobServiceClientInterceptorFactoryProtocol? = nil
|
||||
) {
|
||||
self.channel = channel
|
||||
self.defaultCallOptions = defaultCallOptions
|
||||
self.interceptors = interceptors
|
||||
}
|
||||
}
|
||||
|
||||
#if compiler(>=5.6)
|
||||
///*
|
||||
/// The job service, for inspecting and monitoring the state of jobs executing on the service.
|
||||
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
|
||||
public protocol SdJobServiceAsyncClientProtocol: GRPCClient {
|
||||
static var serviceDescriptor: GRPCServiceDescriptor { get }
|
||||
var interceptors: SdJobServiceClientInterceptorFactoryProtocol? { get }
|
||||
|
||||
func makeGetJobCall(
|
||||
_ request: SdGetJobRequest,
|
||||
callOptions: CallOptions?
|
||||
) -> GRPCAsyncUnaryCall<SdGetJobRequest, SdGetJobResponse>
|
||||
|
||||
func makeCancelJobCall(
|
||||
_ request: SdCancelJobRequest,
|
||||
callOptions: CallOptions?
|
||||
) -> GRPCAsyncUnaryCall<SdCancelJobRequest, SdCancelJobResponse>
|
||||
|
||||
func makeStreamJobUpdatesCall(
|
||||
_ request: SdStreamJobUpdatesRequest,
|
||||
callOptions: CallOptions?
|
||||
) -> GRPCAsyncServerStreamingCall<SdStreamJobUpdatesRequest, SdJobUpdate>
|
||||
}
|
||||
|
||||
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
|
||||
extension SdJobServiceAsyncClientProtocol {
|
||||
public static var serviceDescriptor: GRPCServiceDescriptor {
|
||||
return SdJobServiceClientMetadata.serviceDescriptor
|
||||
}
|
||||
|
||||
public var interceptors: SdJobServiceClientInterceptorFactoryProtocol? {
|
||||
return nil
|
||||
}
|
||||
|
||||
public func makeGetJobCall(
|
||||
_ request: SdGetJobRequest,
|
||||
callOptions: CallOptions? = nil
|
||||
) -> GRPCAsyncUnaryCall<SdGetJobRequest, SdGetJobResponse> {
|
||||
return self.makeAsyncUnaryCall(
|
||||
path: SdJobServiceClientMetadata.Methods.getJob.path,
|
||||
request: request,
|
||||
callOptions: callOptions ?? self.defaultCallOptions,
|
||||
interceptors: self.interceptors?.makeGetJobInterceptors() ?? []
|
||||
)
|
||||
}
|
||||
|
||||
public func makeCancelJobCall(
|
||||
_ request: SdCancelJobRequest,
|
||||
callOptions: CallOptions? = nil
|
||||
) -> GRPCAsyncUnaryCall<SdCancelJobRequest, SdCancelJobResponse> {
|
||||
return self.makeAsyncUnaryCall(
|
||||
path: SdJobServiceClientMetadata.Methods.cancelJob.path,
|
||||
request: request,
|
||||
callOptions: callOptions ?? self.defaultCallOptions,
|
||||
interceptors: self.interceptors?.makeCancelJobInterceptors() ?? []
|
||||
)
|
||||
}
|
||||
|
||||
public func makeStreamJobUpdatesCall(
|
||||
_ request: SdStreamJobUpdatesRequest,
|
||||
callOptions: CallOptions? = nil
|
||||
) -> GRPCAsyncServerStreamingCall<SdStreamJobUpdatesRequest, SdJobUpdate> {
|
||||
return self.makeAsyncServerStreamingCall(
|
||||
path: SdJobServiceClientMetadata.Methods.streamJobUpdates.path,
|
||||
request: request,
|
||||
callOptions: callOptions ?? self.defaultCallOptions,
|
||||
interceptors: self.interceptors?.makeStreamJobUpdatesInterceptors() ?? []
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
|
||||
extension SdJobServiceAsyncClientProtocol {
|
||||
public func getJob(
|
||||
_ request: SdGetJobRequest,
|
||||
callOptions: CallOptions? = nil
|
||||
) async throws -> SdGetJobResponse {
|
||||
return try await self.performAsyncUnaryCall(
|
||||
path: SdJobServiceClientMetadata.Methods.getJob.path,
|
||||
request: request,
|
||||
callOptions: callOptions ?? self.defaultCallOptions,
|
||||
interceptors: self.interceptors?.makeGetJobInterceptors() ?? []
|
||||
)
|
||||
}
|
||||
|
||||
public func cancelJob(
|
||||
_ request: SdCancelJobRequest,
|
||||
callOptions: CallOptions? = nil
|
||||
) async throws -> SdCancelJobResponse {
|
||||
return try await self.performAsyncUnaryCall(
|
||||
path: SdJobServiceClientMetadata.Methods.cancelJob.path,
|
||||
request: request,
|
||||
callOptions: callOptions ?? self.defaultCallOptions,
|
||||
interceptors: self.interceptors?.makeCancelJobInterceptors() ?? []
|
||||
)
|
||||
}
|
||||
|
||||
public func streamJobUpdates(
|
||||
_ request: SdStreamJobUpdatesRequest,
|
||||
callOptions: CallOptions? = nil
|
||||
) -> GRPCAsyncResponseStream<SdJobUpdate> {
|
||||
return self.performAsyncServerStreamingCall(
|
||||
path: SdJobServiceClientMetadata.Methods.streamJobUpdates.path,
|
||||
request: request,
|
||||
callOptions: callOptions ?? self.defaultCallOptions,
|
||||
interceptors: self.interceptors?.makeStreamJobUpdatesInterceptors() ?? []
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
|
||||
public struct SdJobServiceAsyncClient: SdJobServiceAsyncClientProtocol {
|
||||
public var channel: GRPCChannel
|
||||
public var defaultCallOptions: CallOptions
|
||||
public var interceptors: SdJobServiceClientInterceptorFactoryProtocol?
|
||||
|
||||
public init(
|
||||
channel: GRPCChannel,
|
||||
defaultCallOptions: CallOptions = CallOptions(),
|
||||
interceptors: SdJobServiceClientInterceptorFactoryProtocol? = nil
|
||||
) {
|
||||
self.channel = channel
|
||||
self.defaultCallOptions = defaultCallOptions
|
||||
self.interceptors = interceptors
|
||||
}
|
||||
}
|
||||
|
||||
#endif // compiler(>=5.6)
|
||||
|
||||
public protocol SdJobServiceClientInterceptorFactoryProtocol: GRPCSendable {
|
||||
|
||||
/// - Returns: Interceptors to use when invoking 'getJob'.
|
||||
func makeGetJobInterceptors() -> [ClientInterceptor<SdGetJobRequest, SdGetJobResponse>]
|
||||
|
||||
/// - Returns: Interceptors to use when invoking 'cancelJob'.
|
||||
func makeCancelJobInterceptors() -> [ClientInterceptor<SdCancelJobRequest, SdCancelJobResponse>]
|
||||
|
||||
/// - Returns: Interceptors to use when invoking 'streamJobUpdates'.
|
||||
func makeStreamJobUpdatesInterceptors() -> [ClientInterceptor<SdStreamJobUpdatesRequest, SdJobUpdate>]
|
||||
}
|
||||
|
||||
public enum SdJobServiceClientMetadata {
|
||||
public static let serviceDescriptor = GRPCServiceDescriptor(
|
||||
name: "JobService",
|
||||
fullName: "gay.pizza.stable.diffusion.JobService",
|
||||
methods: [
|
||||
SdJobServiceClientMetadata.Methods.getJob,
|
||||
SdJobServiceClientMetadata.Methods.cancelJob,
|
||||
SdJobServiceClientMetadata.Methods.streamJobUpdates,
|
||||
]
|
||||
)
|
||||
|
||||
public enum Methods {
|
||||
public static let getJob = GRPCMethodDescriptor(
|
||||
name: "GetJob",
|
||||
path: "/gay.pizza.stable.diffusion.JobService/GetJob",
|
||||
type: GRPCCallType.unary
|
||||
)
|
||||
|
||||
public static let cancelJob = GRPCMethodDescriptor(
|
||||
name: "CancelJob",
|
||||
path: "/gay.pizza.stable.diffusion.JobService/CancelJob",
|
||||
type: GRPCCallType.unary
|
||||
)
|
||||
|
||||
public static let streamJobUpdates = GRPCMethodDescriptor(
|
||||
name: "StreamJobUpdates",
|
||||
path: "/gay.pizza.stable.diffusion.JobService/StreamJobUpdates",
|
||||
type: GRPCCallType.serverStreaming
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
///*
|
||||
/// The job service, for inspecting and monitoring the state of jobs executing on the service.
|
||||
///
|
||||
/// To build a server, implement a class that conforms to this protocol.
|
||||
public protocol SdJobServiceProvider: CallHandlerProvider {
|
||||
var interceptors: SdJobServiceServerInterceptorFactoryProtocol? { get }
|
||||
|
||||
func getJob(request: SdGetJobRequest, context: StatusOnlyCallContext) -> EventLoopFuture<SdGetJobResponse>
|
||||
|
||||
func cancelJob(request: SdCancelJobRequest, context: StatusOnlyCallContext) -> EventLoopFuture<SdCancelJobResponse>
|
||||
|
||||
func streamJobUpdates(request: SdStreamJobUpdatesRequest, context: StreamingResponseCallContext<SdJobUpdate>) -> EventLoopFuture<GRPCStatus>
|
||||
}
|
||||
|
||||
extension SdJobServiceProvider {
|
||||
public var serviceName: Substring {
|
||||
return SdJobServiceServerMetadata.serviceDescriptor.fullName[...]
|
||||
}
|
||||
|
||||
/// Determines, calls and returns the appropriate request handler, depending on the request's method.
|
||||
/// Returns nil for methods not handled by this service.
|
||||
public func handle(
|
||||
method name: Substring,
|
||||
context: CallHandlerContext
|
||||
) -> GRPCServerHandlerProtocol? {
|
||||
switch name {
|
||||
case "GetJob":
|
||||
return UnaryServerHandler(
|
||||
context: context,
|
||||
requestDeserializer: ProtobufDeserializer<SdGetJobRequest>(),
|
||||
responseSerializer: ProtobufSerializer<SdGetJobResponse>(),
|
||||
interceptors: self.interceptors?.makeGetJobInterceptors() ?? [],
|
||||
userFunction: self.getJob(request:context:)
|
||||
)
|
||||
|
||||
case "CancelJob":
|
||||
return UnaryServerHandler(
|
||||
context: context,
|
||||
requestDeserializer: ProtobufDeserializer<SdCancelJobRequest>(),
|
||||
responseSerializer: ProtobufSerializer<SdCancelJobResponse>(),
|
||||
interceptors: self.interceptors?.makeCancelJobInterceptors() ?? [],
|
||||
userFunction: self.cancelJob(request:context:)
|
||||
)
|
||||
|
||||
case "StreamJobUpdates":
|
||||
return ServerStreamingServerHandler(
|
||||
context: context,
|
||||
requestDeserializer: ProtobufDeserializer<SdStreamJobUpdatesRequest>(),
|
||||
responseSerializer: ProtobufSerializer<SdJobUpdate>(),
|
||||
interceptors: self.interceptors?.makeStreamJobUpdatesInterceptors() ?? [],
|
||||
userFunction: self.streamJobUpdates(request:context:)
|
||||
)
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if compiler(>=5.6)
|
||||
|
||||
///*
|
||||
/// The job service, for inspecting and monitoring the state of jobs executing on the service.
|
||||
///
|
||||
/// To implement a server, implement an object which conforms to this protocol.
|
||||
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
|
||||
public protocol SdJobServiceAsyncProvider: CallHandlerProvider {
|
||||
static var serviceDescriptor: GRPCServiceDescriptor { get }
|
||||
var interceptors: SdJobServiceServerInterceptorFactoryProtocol? { get }
|
||||
|
||||
@Sendable func getJob(
|
||||
request: SdGetJobRequest,
|
||||
context: GRPCAsyncServerCallContext
|
||||
) async throws -> SdGetJobResponse
|
||||
|
||||
@Sendable func cancelJob(
|
||||
request: SdCancelJobRequest,
|
||||
context: GRPCAsyncServerCallContext
|
||||
) async throws -> SdCancelJobResponse
|
||||
|
||||
@Sendable func streamJobUpdates(
|
||||
request: SdStreamJobUpdatesRequest,
|
||||
responseStream: GRPCAsyncResponseStreamWriter<SdJobUpdate>,
|
||||
context: GRPCAsyncServerCallContext
|
||||
) async throws
|
||||
}
|
||||
|
||||
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
|
||||
extension SdJobServiceAsyncProvider {
|
||||
public static var serviceDescriptor: GRPCServiceDescriptor {
|
||||
return SdJobServiceServerMetadata.serviceDescriptor
|
||||
}
|
||||
|
||||
public var serviceName: Substring {
|
||||
return SdJobServiceServerMetadata.serviceDescriptor.fullName[...]
|
||||
}
|
||||
|
||||
public var interceptors: SdJobServiceServerInterceptorFactoryProtocol? {
|
||||
return nil
|
||||
}
|
||||
|
||||
public func handle(
|
||||
method name: Substring,
|
||||
context: CallHandlerContext
|
||||
) -> GRPCServerHandlerProtocol? {
|
||||
switch name {
|
||||
case "GetJob":
|
||||
return GRPCAsyncServerHandler(
|
||||
context: context,
|
||||
requestDeserializer: ProtobufDeserializer<SdGetJobRequest>(),
|
||||
responseSerializer: ProtobufSerializer<SdGetJobResponse>(),
|
||||
interceptors: self.interceptors?.makeGetJobInterceptors() ?? [],
|
||||
wrapping: self.getJob(request:context:)
|
||||
)
|
||||
|
||||
case "CancelJob":
|
||||
return GRPCAsyncServerHandler(
|
||||
context: context,
|
||||
requestDeserializer: ProtobufDeserializer<SdCancelJobRequest>(),
|
||||
responseSerializer: ProtobufSerializer<SdCancelJobResponse>(),
|
||||
interceptors: self.interceptors?.makeCancelJobInterceptors() ?? [],
|
||||
wrapping: self.cancelJob(request:context:)
|
||||
)
|
||||
|
||||
case "StreamJobUpdates":
|
||||
return GRPCAsyncServerHandler(
|
||||
context: context,
|
||||
requestDeserializer: ProtobufDeserializer<SdStreamJobUpdatesRequest>(),
|
||||
responseSerializer: ProtobufSerializer<SdJobUpdate>(),
|
||||
interceptors: self.interceptors?.makeStreamJobUpdatesInterceptors() ?? [],
|
||||
wrapping: self.streamJobUpdates(request:responseStream:context:)
|
||||
)
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif // compiler(>=5.6)
|
||||
|
||||
public protocol SdJobServiceServerInterceptorFactoryProtocol {
|
||||
|
||||
/// - Returns: Interceptors to use when handling 'getJob'.
|
||||
/// Defaults to calling `self.makeInterceptors()`.
|
||||
func makeGetJobInterceptors() -> [ServerInterceptor<SdGetJobRequest, SdGetJobResponse>]
|
||||
|
||||
/// - Returns: Interceptors to use when handling 'cancelJob'.
|
||||
/// Defaults to calling `self.makeInterceptors()`.
|
||||
func makeCancelJobInterceptors() -> [ServerInterceptor<SdCancelJobRequest, SdCancelJobResponse>]
|
||||
|
||||
/// - Returns: Interceptors to use when handling 'streamJobUpdates'.
|
||||
/// Defaults to calling `self.makeInterceptors()`.
|
||||
func makeStreamJobUpdatesInterceptors() -> [ServerInterceptor<SdStreamJobUpdatesRequest, SdJobUpdate>]
|
||||
}
|
||||
|
||||
public enum SdJobServiceServerMetadata {
|
||||
public static let serviceDescriptor = GRPCServiceDescriptor(
|
||||
name: "JobService",
|
||||
fullName: "gay.pizza.stable.diffusion.JobService",
|
||||
methods: [
|
||||
SdJobServiceServerMetadata.Methods.getJob,
|
||||
SdJobServiceServerMetadata.Methods.cancelJob,
|
||||
SdJobServiceServerMetadata.Methods.streamJobUpdates,
|
||||
]
|
||||
)
|
||||
|
||||
public enum Methods {
|
||||
public static let getJob = GRPCMethodDescriptor(
|
||||
name: "GetJob",
|
||||
path: "/gay.pizza.stable.diffusion.JobService/GetJob",
|
||||
type: GRPCCallType.unary
|
||||
)
|
||||
|
||||
public static let cancelJob = GRPCMethodDescriptor(
|
||||
name: "CancelJob",
|
||||
path: "/gay.pizza.stable.diffusion.JobService/CancelJob",
|
||||
type: GRPCCallType.unary
|
||||
)
|
||||
|
||||
public static let streamJobUpdates = GRPCMethodDescriptor(
|
||||
name: "StreamJobUpdates",
|
||||
path: "/gay.pizza.stable.diffusion.JobService/StreamJobUpdates",
|
||||
type: GRPCCallType.serverStreaming
|
||||
)
|
||||
}
|
||||
}
|
494
Sources/StableDiffusionProtos/jobs.pb.swift
Normal file
494
Sources/StableDiffusionProtos/jobs.pb.swift
Normal file
@ -0,0 +1,494 @@
|
||||
// DO NOT EDIT.
|
||||
// swift-format-ignore-file
|
||||
//
|
||||
// Generated by the Swift generator plugin for the protocol buffer compiler.
|
||||
// Source: jobs.proto
|
||||
//
|
||||
// For information on using the generated types, please see the documentation:
|
||||
// https://github.com/apple/swift-protobuf/
|
||||
|
||||
///*
|
||||
/// Stable Diffusion RPC service for Apple Platforms.
|
||||
|
||||
import Foundation
|
||||
import SwiftProtobuf
|
||||
|
||||
// If the compiler emits an error on this type, it is because this file
|
||||
// was generated by a version of the `protoc` Swift plug-in that is
|
||||
// incompatible with the version of SwiftProtobuf to which you are linking.
|
||||
// Please ensure that you are building against the same version of the API
|
||||
// that was used to generate this file.
|
||||
fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck {
|
||||
struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {}
|
||||
typealias Version = _2
|
||||
}
|
||||
|
||||
///*
|
||||
/// Represents the current state of a job.
|
||||
public enum SdJobState: SwiftProtobuf.Enum {
|
||||
public typealias RawValue = Int
|
||||
|
||||
///*
|
||||
/// The job is in an unknown state.
|
||||
case unknown // = 0
|
||||
|
||||
///*
|
||||
/// The job is queued. It has not started the work.
|
||||
case queued // = 1
|
||||
|
||||
///*
|
||||
/// The job is running. The work has been started.
|
||||
case running // = 2
|
||||
|
||||
///*
|
||||
/// The job is completed. The work has been completed.
|
||||
case completed // = 3
|
||||
|
||||
///*
|
||||
/// The job is cancelled. An actor requested cancellation.
|
||||
case cancelled // = 4
|
||||
case UNRECOGNIZED(Int)
|
||||
|
||||
public init() {
|
||||
self = .unknown
|
||||
}
|
||||
|
||||
public init?(rawValue: Int) {
|
||||
switch rawValue {
|
||||
case 0: self = .unknown
|
||||
case 1: self = .queued
|
||||
case 2: self = .running
|
||||
case 3: self = .completed
|
||||
case 4: self = .cancelled
|
||||
default: self = .UNRECOGNIZED(rawValue)
|
||||
}
|
||||
}
|
||||
|
||||
public var rawValue: Int {
|
||||
switch self {
|
||||
case .unknown: return 0
|
||||
case .queued: return 1
|
||||
case .running: return 2
|
||||
case .completed: return 3
|
||||
case .cancelled: return 4
|
||||
case .UNRECOGNIZED(let i): return i
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#if swift(>=4.2)
|
||||
|
||||
extension SdJobState: CaseIterable {
|
||||
// The compiler won't synthesize support with the UNRECOGNIZED case.
|
||||
public static var allCases: [SdJobState] = [
|
||||
.unknown,
|
||||
.queued,
|
||||
.running,
|
||||
.completed,
|
||||
.cancelled,
|
||||
]
|
||||
}
|
||||
|
||||
#endif // swift(>=4.2)
|
||||
|
||||
///*
|
||||
/// Represents a job that is active
|
||||
public struct SdJob {
|
||||
// SwiftProtobuf.Message conformance is added in an extension below. See the
|
||||
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
|
||||
// methods supported on all messages.
|
||||
|
||||
///*
|
||||
/// Unique job identifier.
|
||||
public var id: UInt64 = 0
|
||||
|
||||
///*
|
||||
/// Job host identifier.
|
||||
public var host: UInt64 = 0
|
||||
|
||||
///*
|
||||
/// The current state of the job.
|
||||
public var state: SdJobState = .unknown
|
||||
|
||||
///*
|
||||
/// The percentage of completion for the entire job.
|
||||
public var overallPercentageComplete: Float = 0
|
||||
|
||||
public var unknownFields = SwiftProtobuf.UnknownStorage()
|
||||
|
||||
public init() {}
|
||||
}
|
||||
|
||||
///*
|
||||
/// Represents a request to get the state of a job.
|
||||
public struct SdGetJobRequest {
|
||||
// SwiftProtobuf.Message conformance is added in an extension below. See the
|
||||
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
|
||||
// methods supported on all messages.
|
||||
|
||||
///*
|
||||
/// The job id to retrieve the current state for.
|
||||
public var id: UInt64 = 0
|
||||
|
||||
public var unknownFields = SwiftProtobuf.UnknownStorage()
|
||||
|
||||
public init() {}
|
||||
}
|
||||
|
||||
///*
|
||||
/// Represents a response to getting the state of a job.
|
||||
public struct SdGetJobResponse {
|
||||
// SwiftProtobuf.Message conformance is added in an extension below. See the
|
||||
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
|
||||
// methods supported on all messages.
|
||||
|
||||
///*
|
||||
/// The current state of the job.
|
||||
public var job: SdJob {
|
||||
get {return _job ?? SdJob()}
|
||||
set {_job = newValue}
|
||||
}
|
||||
/// Returns true if `job` has been explicitly set.
|
||||
public var hasJob: Bool {return self._job != nil}
|
||||
/// Clears the value of `job`. Subsequent reads from it will return its default value.
|
||||
public mutating func clearJob() {self._job = nil}
|
||||
|
||||
public var unknownFields = SwiftProtobuf.UnknownStorage()
|
||||
|
||||
public init() {}
|
||||
|
||||
fileprivate var _job: SdJob? = nil
|
||||
}
|
||||
|
||||
///*
|
||||
/// Represents a request to cancel a job.
|
||||
public struct SdCancelJobRequest {
|
||||
// SwiftProtobuf.Message conformance is added in an extension below. See the
|
||||
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
|
||||
// methods supported on all messages.
|
||||
|
||||
///*
|
||||
/// The job id to cancel.
|
||||
public var id: UInt64 = 0
|
||||
|
||||
public var unknownFields = SwiftProtobuf.UnknownStorage()
|
||||
|
||||
public init() {}
|
||||
}
|
||||
|
||||
///*
|
||||
/// Represents a response to cancel a job.
|
||||
public struct SdCancelJobResponse {
|
||||
// SwiftProtobuf.Message conformance is added in an extension below. See the
|
||||
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
|
||||
// methods supported on all messages.
|
||||
|
||||
public var unknownFields = SwiftProtobuf.UnknownStorage()
|
||||
|
||||
public init() {}
|
||||
}
|
||||
|
||||
///*
|
||||
/// Represents a request to stream job updates.
|
||||
public struct SdStreamJobUpdatesRequest {
|
||||
// SwiftProtobuf.Message conformance is added in an extension below. See the
|
||||
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
|
||||
// methods supported on all messages.
|
||||
|
||||
///*
|
||||
/// The job id to stream updates for. If this is not set or is zero,
|
||||
/// all job updates will be sent.
|
||||
public var id: UInt64 = 0
|
||||
|
||||
public var unknownFields = SwiftProtobuf.UnknownStorage()
|
||||
|
||||
public init() {}
|
||||
}
|
||||
|
||||
///*
|
||||
/// Represents an update to a job.
|
||||
public struct SdJobUpdate {
|
||||
// SwiftProtobuf.Message conformance is added in an extension below. See the
|
||||
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
|
||||
// methods supported on all messages.
|
||||
|
||||
///*
|
||||
/// The current state of the job.
|
||||
public var job: SdJob {
|
||||
get {return _job ?? SdJob()}
|
||||
set {_job = newValue}
|
||||
}
|
||||
/// Returns true if `job` has been explicitly set.
|
||||
public var hasJob: Bool {return self._job != nil}
|
||||
/// Clears the value of `job`. Subsequent reads from it will return its default value.
|
||||
public mutating func clearJob() {self._job = nil}
|
||||
|
||||
public var unknownFields = SwiftProtobuf.UnknownStorage()
|
||||
|
||||
public init() {}
|
||||
|
||||
fileprivate var _job: SdJob? = nil
|
||||
}
|
||||
|
||||
#if swift(>=5.5) && canImport(_Concurrency)
|
||||
extension SdJobState: @unchecked Sendable {}
|
||||
extension SdJob: @unchecked Sendable {}
|
||||
extension SdGetJobRequest: @unchecked Sendable {}
|
||||
extension SdGetJobResponse: @unchecked Sendable {}
|
||||
extension SdCancelJobRequest: @unchecked Sendable {}
|
||||
extension SdCancelJobResponse: @unchecked Sendable {}
|
||||
extension SdStreamJobUpdatesRequest: @unchecked Sendable {}
|
||||
extension SdJobUpdate: @unchecked Sendable {}
|
||||
#endif // swift(>=5.5) && canImport(_Concurrency)
|
||||
|
||||
// MARK: - Code below here is support for the SwiftProtobuf runtime.
|
||||
|
||||
fileprivate let _protobuf_package = "gay.pizza.stable.diffusion"
|
||||
|
||||
extension SdJobState: SwiftProtobuf._ProtoNameProviding {
|
||||
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
|
||||
0: .same(proto: "unknown"),
|
||||
1: .same(proto: "queued"),
|
||||
2: .same(proto: "running"),
|
||||
3: .same(proto: "completed"),
|
||||
4: .same(proto: "cancelled"),
|
||||
]
|
||||
}
|
||||
|
||||
extension SdJob: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
|
||||
public static let protoMessageName: String = _protobuf_package + ".Job"
|
||||
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
|
||||
1: .same(proto: "id"),
|
||||
2: .same(proto: "host"),
|
||||
3: .same(proto: "state"),
|
||||
4: .standard(proto: "overall_percentage_complete"),
|
||||
]
|
||||
|
||||
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
|
||||
while let fieldNumber = try decoder.nextFieldNumber() {
|
||||
// The use of inline closures is to circumvent an issue where the compiler
|
||||
// allocates stack space for every case branch when no optimizations are
|
||||
// enabled. https://github.com/apple/swift-protobuf/issues/1034
|
||||
switch fieldNumber {
|
||||
case 1: try { try decoder.decodeSingularUInt64Field(value: &self.id) }()
|
||||
case 2: try { try decoder.decodeSingularUInt64Field(value: &self.host) }()
|
||||
case 3: try { try decoder.decodeSingularEnumField(value: &self.state) }()
|
||||
case 4: try { try decoder.decodeSingularFloatField(value: &self.overallPercentageComplete) }()
|
||||
default: break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
|
||||
if self.id != 0 {
|
||||
try visitor.visitSingularUInt64Field(value: self.id, fieldNumber: 1)
|
||||
}
|
||||
if self.host != 0 {
|
||||
try visitor.visitSingularUInt64Field(value: self.host, fieldNumber: 2)
|
||||
}
|
||||
if self.state != .unknown {
|
||||
try visitor.visitSingularEnumField(value: self.state, fieldNumber: 3)
|
||||
}
|
||||
if self.overallPercentageComplete != 0 {
|
||||
try visitor.visitSingularFloatField(value: self.overallPercentageComplete, fieldNumber: 4)
|
||||
}
|
||||
try unknownFields.traverse(visitor: &visitor)
|
||||
}
|
||||
|
||||
public static func ==(lhs: SdJob, rhs: SdJob) -> Bool {
|
||||
if lhs.id != rhs.id {return false}
|
||||
if lhs.host != rhs.host {return false}
|
||||
if lhs.state != rhs.state {return false}
|
||||
if lhs.overallPercentageComplete != rhs.overallPercentageComplete {return false}
|
||||
if lhs.unknownFields != rhs.unknownFields {return false}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
extension SdGetJobRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
|
||||
public static let protoMessageName: String = _protobuf_package + ".GetJobRequest"
|
||||
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
|
||||
1: .same(proto: "id"),
|
||||
]
|
||||
|
||||
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
|
||||
while let fieldNumber = try decoder.nextFieldNumber() {
|
||||
// The use of inline closures is to circumvent an issue where the compiler
|
||||
// allocates stack space for every case branch when no optimizations are
|
||||
// enabled. https://github.com/apple/swift-protobuf/issues/1034
|
||||
switch fieldNumber {
|
||||
case 1: try { try decoder.decodeSingularUInt64Field(value: &self.id) }()
|
||||
default: break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
|
||||
if self.id != 0 {
|
||||
try visitor.visitSingularUInt64Field(value: self.id, fieldNumber: 1)
|
||||
}
|
||||
try unknownFields.traverse(visitor: &visitor)
|
||||
}
|
||||
|
||||
public static func ==(lhs: SdGetJobRequest, rhs: SdGetJobRequest) -> Bool {
|
||||
if lhs.id != rhs.id {return false}
|
||||
if lhs.unknownFields != rhs.unknownFields {return false}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
extension SdGetJobResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
|
||||
public static let protoMessageName: String = _protobuf_package + ".GetJobResponse"
|
||||
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
|
||||
1: .same(proto: "job"),
|
||||
]
|
||||
|
||||
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
|
||||
while let fieldNumber = try decoder.nextFieldNumber() {
|
||||
// The use of inline closures is to circumvent an issue where the compiler
|
||||
// allocates stack space for every case branch when no optimizations are
|
||||
// enabled. https://github.com/apple/swift-protobuf/issues/1034
|
||||
switch fieldNumber {
|
||||
case 1: try { try decoder.decodeSingularMessageField(value: &self._job) }()
|
||||
default: break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
|
||||
// The use of inline closures is to circumvent an issue where the compiler
|
||||
// allocates stack space for every if/case branch local when no optimizations
|
||||
// are enabled. https://github.com/apple/swift-protobuf/issues/1034 and
|
||||
// https://github.com/apple/swift-protobuf/issues/1182
|
||||
try { if let v = self._job {
|
||||
try visitor.visitSingularMessageField(value: v, fieldNumber: 1)
|
||||
} }()
|
||||
try unknownFields.traverse(visitor: &visitor)
|
||||
}
|
||||
|
||||
public static func ==(lhs: SdGetJobResponse, rhs: SdGetJobResponse) -> Bool {
|
||||
if lhs._job != rhs._job {return false}
|
||||
if lhs.unknownFields != rhs.unknownFields {return false}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
extension SdCancelJobRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
|
||||
public static let protoMessageName: String = _protobuf_package + ".CancelJobRequest"
|
||||
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
|
||||
1: .same(proto: "id"),
|
||||
]
|
||||
|
||||
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
|
||||
while let fieldNumber = try decoder.nextFieldNumber() {
|
||||
// The use of inline closures is to circumvent an issue where the compiler
|
||||
// allocates stack space for every case branch when no optimizations are
|
||||
// enabled. https://github.com/apple/swift-protobuf/issues/1034
|
||||
switch fieldNumber {
|
||||
case 1: try { try decoder.decodeSingularUInt64Field(value: &self.id) }()
|
||||
default: break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
|
||||
if self.id != 0 {
|
||||
try visitor.visitSingularUInt64Field(value: self.id, fieldNumber: 1)
|
||||
}
|
||||
try unknownFields.traverse(visitor: &visitor)
|
||||
}
|
||||
|
||||
public static func ==(lhs: SdCancelJobRequest, rhs: SdCancelJobRequest) -> Bool {
|
||||
if lhs.id != rhs.id {return false}
|
||||
if lhs.unknownFields != rhs.unknownFields {return false}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
extension SdCancelJobResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
|
||||
public static let protoMessageName: String = _protobuf_package + ".CancelJobResponse"
|
||||
public static let _protobuf_nameMap = SwiftProtobuf._NameMap()
|
||||
|
||||
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
|
||||
while let _ = try decoder.nextFieldNumber() {
|
||||
}
|
||||
}
|
||||
|
||||
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
|
||||
try unknownFields.traverse(visitor: &visitor)
|
||||
}
|
||||
|
||||
public static func ==(lhs: SdCancelJobResponse, rhs: SdCancelJobResponse) -> Bool {
|
||||
if lhs.unknownFields != rhs.unknownFields {return false}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
extension SdStreamJobUpdatesRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
|
||||
public static let protoMessageName: String = _protobuf_package + ".StreamJobUpdatesRequest"
|
||||
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
|
||||
1: .same(proto: "id"),
|
||||
]
|
||||
|
||||
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
|
||||
while let fieldNumber = try decoder.nextFieldNumber() {
|
||||
// The use of inline closures is to circumvent an issue where the compiler
|
||||
// allocates stack space for every case branch when no optimizations are
|
||||
// enabled. https://github.com/apple/swift-protobuf/issues/1034
|
||||
switch fieldNumber {
|
||||
case 1: try { try decoder.decodeSingularUInt64Field(value: &self.id) }()
|
||||
default: break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
|
||||
if self.id != 0 {
|
||||
try visitor.visitSingularUInt64Field(value: self.id, fieldNumber: 1)
|
||||
}
|
||||
try unknownFields.traverse(visitor: &visitor)
|
||||
}
|
||||
|
||||
public static func ==(lhs: SdStreamJobUpdatesRequest, rhs: SdStreamJobUpdatesRequest) -> Bool {
|
||||
if lhs.id != rhs.id {return false}
|
||||
if lhs.unknownFields != rhs.unknownFields {return false}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
extension SdJobUpdate: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
|
||||
public static let protoMessageName: String = _protobuf_package + ".JobUpdate"
|
||||
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
|
||||
1: .same(proto: "job"),
|
||||
]
|
||||
|
||||
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
|
||||
while let fieldNumber = try decoder.nextFieldNumber() {
|
||||
// The use of inline closures is to circumvent an issue where the compiler
|
||||
// allocates stack space for every case branch when no optimizations are
|
||||
// enabled. https://github.com/apple/swift-protobuf/issues/1034
|
||||
switch fieldNumber {
|
||||
case 1: try { try decoder.decodeSingularMessageField(value: &self._job) }()
|
||||
default: break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
|
||||
// The use of inline closures is to circumvent an issue where the compiler
|
||||
// allocates stack space for every if/case branch local when no optimizations
|
||||
// are enabled. https://github.com/apple/swift-protobuf/issues/1034 and
|
||||
// https://github.com/apple/swift-protobuf/issues/1182
|
||||
try { if let v = self._job {
|
||||
try visitor.visitSingularMessageField(value: v, fieldNumber: 1)
|
||||
} }()
|
||||
try unknownFields.traverse(visitor: &visitor)
|
||||
}
|
||||
|
||||
public static func ==(lhs: SdJobUpdate, rhs: SdJobUpdate) -> Bool {
|
||||
if lhs._job != rhs._job {return false}
|
||||
if lhs.unknownFields != rhs.unknownFields {return false}
|
||||
return true
|
||||
}
|
||||
}
|
420
Sources/StableDiffusionProtos/shared.pb.swift
Normal file
420
Sources/StableDiffusionProtos/shared.pb.swift
Normal file
@ -0,0 +1,420 @@
|
||||
// DO NOT EDIT.
|
||||
// swift-format-ignore-file
|
||||
//
|
||||
// Generated by the Swift generator plugin for the protocol buffer compiler.
|
||||
// Source: shared.proto
|
||||
//
|
||||
// For information on using the generated types, please see the documentation:
|
||||
// https://github.com/apple/swift-protobuf/
|
||||
|
||||
///*
|
||||
/// Shared messages for the Stable Diffusion RPC service.
|
||||
|
||||
import Foundation
|
||||
import SwiftProtobuf
|
||||
|
||||
// If the compiler emits an error on this type, it is because this file
|
||||
// was generated by a version of the `protoc` Swift plug-in that is
|
||||
// incompatible with the version of SwiftProtobuf to which you are linking.
|
||||
// Please ensure that you are building against the same version of the API
|
||||
// that was used to generate this file.
|
||||
fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck {
|
||||
struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {}
|
||||
typealias Version = _2
|
||||
}
|
||||
|
||||
///*
|
||||
/// Represents the model attention. Model attention has to do with how the model is encoded, and
|
||||
/// can determine what compute units are able to support a particular model.
|
||||
public enum SdModelAttention: SwiftProtobuf.Enum {
|
||||
public typealias RawValue = Int
|
||||
|
||||
///*
|
||||
/// The model is an original attention type. It can be loaded only onto CPU & GPU compute units.
|
||||
case original // = 0
|
||||
|
||||
///*
|
||||
/// The model is a split-ein-sum attention type. It can be loaded onto all compute units,
|
||||
/// including the Apple Neural Engine.
|
||||
case splitEinSum // = 1
|
||||
case UNRECOGNIZED(Int)
|
||||
|
||||
public init() {
|
||||
self = .original
|
||||
}
|
||||
|
||||
public init?(rawValue: Int) {
|
||||
switch rawValue {
|
||||
case 0: self = .original
|
||||
case 1: self = .splitEinSum
|
||||
default: self = .UNRECOGNIZED(rawValue)
|
||||
}
|
||||
}
|
||||
|
||||
public var rawValue: Int {
|
||||
switch self {
|
||||
case .original: return 0
|
||||
case .splitEinSum: return 1
|
||||
case .UNRECOGNIZED(let i): return i
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#if swift(>=4.2)
|
||||
|
||||
extension SdModelAttention: CaseIterable {
|
||||
// The compiler won't synthesize support with the UNRECOGNIZED case.
|
||||
public static var allCases: [SdModelAttention] = [
|
||||
.original,
|
||||
.splitEinSum,
|
||||
]
|
||||
}
|
||||
|
||||
#endif // swift(>=4.2)
|
||||
|
||||
///*
|
||||
/// Represents the schedulers that are used to sample images.
|
||||
public enum SdScheduler: SwiftProtobuf.Enum {
|
||||
public typealias RawValue = Int
|
||||
|
||||
///*
|
||||
/// The PNDM (Pseudo numerical methods for diffusion models) scheduler.
|
||||
case pndm // = 0
|
||||
|
||||
///*
|
||||
/// The DPM-Solver++ scheduler.
|
||||
case dpmSolverPlusPlus // = 1
|
||||
case UNRECOGNIZED(Int)
|
||||
|
||||
public init() {
|
||||
self = .pndm
|
||||
}
|
||||
|
||||
public init?(rawValue: Int) {
|
||||
switch rawValue {
|
||||
case 0: self = .pndm
|
||||
case 1: self = .dpmSolverPlusPlus
|
||||
default: self = .UNRECOGNIZED(rawValue)
|
||||
}
|
||||
}
|
||||
|
||||
public var rawValue: Int {
|
||||
switch self {
|
||||
case .pndm: return 0
|
||||
case .dpmSolverPlusPlus: return 1
|
||||
case .UNRECOGNIZED(let i): return i
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#if swift(>=4.2)
|
||||
|
||||
extension SdScheduler: CaseIterable {
|
||||
// The compiler won't synthesize support with the UNRECOGNIZED case.
|
||||
public static var allCases: [SdScheduler] = [
|
||||
.pndm,
|
||||
.dpmSolverPlusPlus,
|
||||
]
|
||||
}
|
||||
|
||||
#endif // swift(>=4.2)
|
||||
|
||||
///*
|
||||
/// Represents a specifier for what compute units are available for ML tasks.
|
||||
public enum SdComputeUnits: SwiftProtobuf.Enum {
|
||||
public typealias RawValue = Int
|
||||
|
||||
///*
|
||||
/// The CPU as a singular compute unit.
|
||||
case cpu // = 0
|
||||
|
||||
///*
|
||||
/// The CPU & GPU combined into a singular compute unit.
|
||||
case cpuAndGpu // = 1
|
||||
|
||||
///*
|
||||
/// Allow the usage of all compute units. CoreML will decided where the model is loaded.
|
||||
case all // = 2
|
||||
|
||||
///*
|
||||
/// The CPU & Neural Engine combined into a singular compute unit.
|
||||
case cpuAndNeuralEngine // = 3
|
||||
case UNRECOGNIZED(Int)
|
||||
|
||||
public init() {
|
||||
self = .cpu
|
||||
}
|
||||
|
||||
public init?(rawValue: Int) {
|
||||
switch rawValue {
|
||||
case 0: self = .cpu
|
||||
case 1: self = .cpuAndGpu
|
||||
case 2: self = .all
|
||||
case 3: self = .cpuAndNeuralEngine
|
||||
default: self = .UNRECOGNIZED(rawValue)
|
||||
}
|
||||
}
|
||||
|
||||
public var rawValue: Int {
|
||||
switch self {
|
||||
case .cpu: return 0
|
||||
case .cpuAndGpu: return 1
|
||||
case .all: return 2
|
||||
case .cpuAndNeuralEngine: return 3
|
||||
case .UNRECOGNIZED(let i): return i
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#if swift(>=4.2)
|
||||
|
||||
extension SdComputeUnits: CaseIterable {
|
||||
// The compiler won't synthesize support with the UNRECOGNIZED case.
|
||||
public static var allCases: [SdComputeUnits] = [
|
||||
.cpu,
|
||||
.cpuAndGpu,
|
||||
.all,
|
||||
.cpuAndNeuralEngine,
|
||||
]
|
||||
}
|
||||
|
||||
#endif // swift(>=4.2)
|
||||
|
||||
///*
|
||||
/// Represents the format of an image.
|
||||
public enum SdImageFormat: SwiftProtobuf.Enum {
|
||||
public typealias RawValue = Int
|
||||
|
||||
///*
|
||||
/// The PNG image format.
|
||||
case png // = 0
|
||||
case UNRECOGNIZED(Int)
|
||||
|
||||
public init() {
|
||||
self = .png
|
||||
}
|
||||
|
||||
public init?(rawValue: Int) {
|
||||
switch rawValue {
|
||||
case 0: self = .png
|
||||
default: self = .UNRECOGNIZED(rawValue)
|
||||
}
|
||||
}
|
||||
|
||||
public var rawValue: Int {
|
||||
switch self {
|
||||
case .png: return 0
|
||||
case .UNRECOGNIZED(let i): return i
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#if swift(>=4.2)
|
||||
|
||||
extension SdImageFormat: CaseIterable {
|
||||
// The compiler won't synthesize support with the UNRECOGNIZED case.
|
||||
public static var allCases: [SdImageFormat] = [
|
||||
.png,
|
||||
]
|
||||
}
|
||||
|
||||
#endif // swift(>=4.2)
|
||||
|
||||
///*
|
||||
/// Represents information about an available model.
|
||||
/// The primary key of a model is it's 'name' field.
|
||||
public struct SdModelInfo {
|
||||
// SwiftProtobuf.Message conformance is added in an extension below. See the
|
||||
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
|
||||
// methods supported on all messages.
|
||||
|
||||
///*
|
||||
/// The name of the available model. Note that within the context of a single RPC server,
|
||||
/// the name of a model is a unique identifier. This may not be true when utilizing a cluster or
|
||||
/// load balanced server, so keep that in mind.
|
||||
public var name: String = String()
|
||||
|
||||
///*
|
||||
/// The attention of the model. Model attention determines what compute units can be used to
|
||||
/// load the model and make predictions.
|
||||
public var attention: SdModelAttention = .original
|
||||
|
||||
///*
|
||||
/// Whether the model is currently loaded onto an available compute unit.
|
||||
public var isLoaded: Bool = false
|
||||
|
||||
///*
|
||||
/// The compute unit that the model is currently loaded into, if it is loaded to one at all.
|
||||
/// When is_loaded is false, the value of this field should be null.
|
||||
public var loadedComputeUnits: SdComputeUnits = .cpu
|
||||
|
||||
///*
|
||||
/// The compute units that this model supports using.
|
||||
public var supportedComputeUnits: [SdComputeUnits] = []
|
||||
|
||||
public var unknownFields = SwiftProtobuf.UnknownStorage()
|
||||
|
||||
public init() {}
|
||||
}
|
||||
|
||||
///*
|
||||
/// Represents an image within the Stable Diffusion context.
|
||||
/// This could be an input image for an image generation request, or it could be
|
||||
/// a generated image from the Stable Diffusion model.
|
||||
public struct SdImage {
|
||||
// SwiftProtobuf.Message conformance is added in an extension below. See the
|
||||
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
|
||||
// methods supported on all messages.
|
||||
|
||||
///*
|
||||
/// The format of the image.
|
||||
public var format: SdImageFormat = .png
|
||||
|
||||
///*
|
||||
/// The raw data of the image, in the specified format.
|
||||
public var data: Data = Data()
|
||||
|
||||
public var unknownFields = SwiftProtobuf.UnknownStorage()
|
||||
|
||||
public init() {}
|
||||
}
|
||||
|
||||
#if swift(>=5.5) && canImport(_Concurrency)
|
||||
extension SdModelAttention: @unchecked Sendable {}
|
||||
extension SdScheduler: @unchecked Sendable {}
|
||||
extension SdComputeUnits: @unchecked Sendable {}
|
||||
extension SdImageFormat: @unchecked Sendable {}
|
||||
extension SdModelInfo: @unchecked Sendable {}
|
||||
extension SdImage: @unchecked Sendable {}
|
||||
#endif // swift(>=5.5) && canImport(_Concurrency)
|
||||
|
||||
// MARK: - Code below here is support for the SwiftProtobuf runtime.
|
||||
|
||||
fileprivate let _protobuf_package = "gay.pizza.stable.diffusion"
|
||||
|
||||
extension SdModelAttention: SwiftProtobuf._ProtoNameProviding {
|
||||
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
|
||||
0: .same(proto: "original"),
|
||||
1: .same(proto: "split_ein_sum"),
|
||||
]
|
||||
}
|
||||
|
||||
extension SdScheduler: SwiftProtobuf._ProtoNameProviding {
|
||||
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
|
||||
0: .same(proto: "pndm"),
|
||||
1: .same(proto: "dpm_solver_plus_plus"),
|
||||
]
|
||||
}
|
||||
|
||||
extension SdComputeUnits: SwiftProtobuf._ProtoNameProviding {
|
||||
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
|
||||
0: .same(proto: "cpu"),
|
||||
1: .same(proto: "cpu_and_gpu"),
|
||||
2: .same(proto: "all"),
|
||||
3: .same(proto: "cpu_and_neural_engine"),
|
||||
]
|
||||
}
|
||||
|
||||
extension SdImageFormat: SwiftProtobuf._ProtoNameProviding {
|
||||
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
|
||||
0: .same(proto: "png"),
|
||||
]
|
||||
}
|
||||
|
||||
extension SdModelInfo: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
|
||||
public static let protoMessageName: String = _protobuf_package + ".ModelInfo"
|
||||
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
|
||||
1: .same(proto: "name"),
|
||||
2: .same(proto: "attention"),
|
||||
3: .standard(proto: "is_loaded"),
|
||||
4: .standard(proto: "loaded_compute_units"),
|
||||
5: .standard(proto: "supported_compute_units"),
|
||||
]
|
||||
|
||||
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
|
||||
while let fieldNumber = try decoder.nextFieldNumber() {
|
||||
// The use of inline closures is to circumvent an issue where the compiler
|
||||
// allocates stack space for every case branch when no optimizations are
|
||||
// enabled. https://github.com/apple/swift-protobuf/issues/1034
|
||||
switch fieldNumber {
|
||||
case 1: try { try decoder.decodeSingularStringField(value: &self.name) }()
|
||||
case 2: try { try decoder.decodeSingularEnumField(value: &self.attention) }()
|
||||
case 3: try { try decoder.decodeSingularBoolField(value: &self.isLoaded) }()
|
||||
case 4: try { try decoder.decodeSingularEnumField(value: &self.loadedComputeUnits) }()
|
||||
case 5: try { try decoder.decodeRepeatedEnumField(value: &self.supportedComputeUnits) }()
|
||||
default: break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
|
||||
if !self.name.isEmpty {
|
||||
try visitor.visitSingularStringField(value: self.name, fieldNumber: 1)
|
||||
}
|
||||
if self.attention != .original {
|
||||
try visitor.visitSingularEnumField(value: self.attention, fieldNumber: 2)
|
||||
}
|
||||
if self.isLoaded != false {
|
||||
try visitor.visitSingularBoolField(value: self.isLoaded, fieldNumber: 3)
|
||||
}
|
||||
if self.loadedComputeUnits != .cpu {
|
||||
try visitor.visitSingularEnumField(value: self.loadedComputeUnits, fieldNumber: 4)
|
||||
}
|
||||
if !self.supportedComputeUnits.isEmpty {
|
||||
try visitor.visitPackedEnumField(value: self.supportedComputeUnits, fieldNumber: 5)
|
||||
}
|
||||
try unknownFields.traverse(visitor: &visitor)
|
||||
}
|
||||
|
||||
public static func ==(lhs: SdModelInfo, rhs: SdModelInfo) -> Bool {
|
||||
if lhs.name != rhs.name {return false}
|
||||
if lhs.attention != rhs.attention {return false}
|
||||
if lhs.isLoaded != rhs.isLoaded {return false}
|
||||
if lhs.loadedComputeUnits != rhs.loadedComputeUnits {return false}
|
||||
if lhs.supportedComputeUnits != rhs.supportedComputeUnits {return false}
|
||||
if lhs.unknownFields != rhs.unknownFields {return false}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
extension SdImage: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
|
||||
public static let protoMessageName: String = _protobuf_package + ".Image"
|
||||
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
|
||||
1: .same(proto: "format"),
|
||||
2: .same(proto: "data"),
|
||||
]
|
||||
|
||||
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
|
||||
while let fieldNumber = try decoder.nextFieldNumber() {
|
||||
// The use of inline closures is to circumvent an issue where the compiler
|
||||
// allocates stack space for every case branch when no optimizations are
|
||||
// enabled. https://github.com/apple/swift-protobuf/issues/1034
|
||||
switch fieldNumber {
|
||||
case 1: try { try decoder.decodeSingularEnumField(value: &self.format) }()
|
||||
case 2: try { try decoder.decodeSingularBytesField(value: &self.data) }()
|
||||
default: break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
|
||||
if self.format != .png {
|
||||
try visitor.visitSingularEnumField(value: self.format, fieldNumber: 1)
|
||||
}
|
||||
if !self.data.isEmpty {
|
||||
try visitor.visitSingularBytesField(value: self.data, fieldNumber: 2)
|
||||
}
|
||||
try unknownFields.traverse(visitor: &visitor)
|
||||
}
|
||||
|
||||
public static func ==(lhs: SdImage, rhs: SdImage) -> Bool {
|
||||
if lhs.format != rhs.format {return false}
|
||||
if lhs.data != rhs.data {return false}
|
||||
if lhs.unknownFields != rhs.unknownFields {return false}
|
||||
return true
|
||||
}
|
||||
}
|
339
Sources/StableDiffusionProtos/tokenizer.grpc.swift
Normal file
339
Sources/StableDiffusionProtos/tokenizer.grpc.swift
Normal file
@ -0,0 +1,339 @@
|
||||
//
|
||||
// DO NOT EDIT.
|
||||
//
|
||||
// Generated by the protocol buffer compiler.
|
||||
// Source: tokenizer.proto
|
||||
//
|
||||
|
||||
//
|
||||
// Copyright 2018, gRPC Authors All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
import GRPC
|
||||
import NIO
|
||||
import NIOConcurrencyHelpers
|
||||
import SwiftProtobuf
|
||||
|
||||
|
||||
///*
|
||||
/// The tokenizer service, for analyzing tokens for a loaded model.
|
||||
///
|
||||
/// Usage: instantiate `SdTokenizerServiceClient`, then call methods of this protocol to make API calls.
|
||||
public protocol SdTokenizerServiceClientProtocol: GRPCClient {
|
||||
var serviceName: String { get }
|
||||
var interceptors: SdTokenizerServiceClientInterceptorFactoryProtocol? { get }
|
||||
|
||||
func tokenize(
|
||||
_ request: SdTokenizeRequest,
|
||||
callOptions: CallOptions?
|
||||
) -> UnaryCall<SdTokenizeRequest, SdTokenizeResponse>
|
||||
}
|
||||
|
||||
extension SdTokenizerServiceClientProtocol {
|
||||
public var serviceName: String {
|
||||
return "gay.pizza.stable.diffusion.TokenizerService"
|
||||
}
|
||||
|
||||
///*
|
||||
/// Analyze the input using a loaded model and return the results.
|
||||
///
|
||||
/// - Parameters:
|
||||
/// - request: Request to send to Tokenize.
|
||||
/// - callOptions: Call options.
|
||||
/// - Returns: A `UnaryCall` with futures for the metadata, status and response.
|
||||
public func tokenize(
|
||||
_ request: SdTokenizeRequest,
|
||||
callOptions: CallOptions? = nil
|
||||
) -> UnaryCall<SdTokenizeRequest, SdTokenizeResponse> {
|
||||
return self.makeUnaryCall(
|
||||
path: SdTokenizerServiceClientMetadata.Methods.tokenize.path,
|
||||
request: request,
|
||||
callOptions: callOptions ?? self.defaultCallOptions,
|
||||
interceptors: self.interceptors?.makeTokenizeInterceptors() ?? []
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#if compiler(>=5.6)
|
||||
@available(*, deprecated)
|
||||
extension SdTokenizerServiceClient: @unchecked Sendable {}
|
||||
#endif // compiler(>=5.6)
|
||||
|
||||
@available(*, deprecated, renamed: "SdTokenizerServiceNIOClient")
|
||||
public final class SdTokenizerServiceClient: SdTokenizerServiceClientProtocol {
|
||||
private let lock = Lock()
|
||||
private var _defaultCallOptions: CallOptions
|
||||
private var _interceptors: SdTokenizerServiceClientInterceptorFactoryProtocol?
|
||||
public let channel: GRPCChannel
|
||||
public var defaultCallOptions: CallOptions {
|
||||
get { self.lock.withLock { return self._defaultCallOptions } }
|
||||
set { self.lock.withLockVoid { self._defaultCallOptions = newValue } }
|
||||
}
|
||||
public var interceptors: SdTokenizerServiceClientInterceptorFactoryProtocol? {
|
||||
get { self.lock.withLock { return self._interceptors } }
|
||||
set { self.lock.withLockVoid { self._interceptors = newValue } }
|
||||
}
|
||||
|
||||
/// Creates a client for the gay.pizza.stable.diffusion.TokenizerService service.
|
||||
///
|
||||
/// - Parameters:
|
||||
/// - channel: `GRPCChannel` to the service host.
|
||||
/// - defaultCallOptions: Options to use for each service call if the user doesn't provide them.
|
||||
/// - interceptors: A factory providing interceptors for each RPC.
|
||||
public init(
|
||||
channel: GRPCChannel,
|
||||
defaultCallOptions: CallOptions = CallOptions(),
|
||||
interceptors: SdTokenizerServiceClientInterceptorFactoryProtocol? = nil
|
||||
) {
|
||||
self.channel = channel
|
||||
self._defaultCallOptions = defaultCallOptions
|
||||
self._interceptors = interceptors
|
||||
}
|
||||
}
|
||||
|
||||
public struct SdTokenizerServiceNIOClient: SdTokenizerServiceClientProtocol {
|
||||
public var channel: GRPCChannel
|
||||
public var defaultCallOptions: CallOptions
|
||||
public var interceptors: SdTokenizerServiceClientInterceptorFactoryProtocol?
|
||||
|
||||
/// Creates a client for the gay.pizza.stable.diffusion.TokenizerService service.
|
||||
///
|
||||
/// - Parameters:
|
||||
/// - channel: `GRPCChannel` to the service host.
|
||||
/// - defaultCallOptions: Options to use for each service call if the user doesn't provide them.
|
||||
/// - interceptors: A factory providing interceptors for each RPC.
|
||||
public init(
|
||||
channel: GRPCChannel,
|
||||
defaultCallOptions: CallOptions = CallOptions(),
|
||||
interceptors: SdTokenizerServiceClientInterceptorFactoryProtocol? = nil
|
||||
) {
|
||||
self.channel = channel
|
||||
self.defaultCallOptions = defaultCallOptions
|
||||
self.interceptors = interceptors
|
||||
}
|
||||
}
|
||||
|
||||
#if compiler(>=5.6)
|
||||
///*
|
||||
/// The tokenizer service, for analyzing tokens for a loaded model.
|
||||
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
|
||||
public protocol SdTokenizerServiceAsyncClientProtocol: GRPCClient {
|
||||
static var serviceDescriptor: GRPCServiceDescriptor { get }
|
||||
var interceptors: SdTokenizerServiceClientInterceptorFactoryProtocol? { get }
|
||||
|
||||
func makeTokenizeCall(
|
||||
_ request: SdTokenizeRequest,
|
||||
callOptions: CallOptions?
|
||||
) -> GRPCAsyncUnaryCall<SdTokenizeRequest, SdTokenizeResponse>
|
||||
}
|
||||
|
||||
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
|
||||
extension SdTokenizerServiceAsyncClientProtocol {
|
||||
public static var serviceDescriptor: GRPCServiceDescriptor {
|
||||
return SdTokenizerServiceClientMetadata.serviceDescriptor
|
||||
}
|
||||
|
||||
public var interceptors: SdTokenizerServiceClientInterceptorFactoryProtocol? {
|
||||
return nil
|
||||
}
|
||||
|
||||
public func makeTokenizeCall(
|
||||
_ request: SdTokenizeRequest,
|
||||
callOptions: CallOptions? = nil
|
||||
) -> GRPCAsyncUnaryCall<SdTokenizeRequest, SdTokenizeResponse> {
|
||||
return self.makeAsyncUnaryCall(
|
||||
path: SdTokenizerServiceClientMetadata.Methods.tokenize.path,
|
||||
request: request,
|
||||
callOptions: callOptions ?? self.defaultCallOptions,
|
||||
interceptors: self.interceptors?.makeTokenizeInterceptors() ?? []
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
|
||||
extension SdTokenizerServiceAsyncClientProtocol {
|
||||
public func tokenize(
|
||||
_ request: SdTokenizeRequest,
|
||||
callOptions: CallOptions? = nil
|
||||
) async throws -> SdTokenizeResponse {
|
||||
return try await self.performAsyncUnaryCall(
|
||||
path: SdTokenizerServiceClientMetadata.Methods.tokenize.path,
|
||||
request: request,
|
||||
callOptions: callOptions ?? self.defaultCallOptions,
|
||||
interceptors: self.interceptors?.makeTokenizeInterceptors() ?? []
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
|
||||
public struct SdTokenizerServiceAsyncClient: SdTokenizerServiceAsyncClientProtocol {
|
||||
public var channel: GRPCChannel
|
||||
public var defaultCallOptions: CallOptions
|
||||
public var interceptors: SdTokenizerServiceClientInterceptorFactoryProtocol?
|
||||
|
||||
public init(
|
||||
channel: GRPCChannel,
|
||||
defaultCallOptions: CallOptions = CallOptions(),
|
||||
interceptors: SdTokenizerServiceClientInterceptorFactoryProtocol? = nil
|
||||
) {
|
||||
self.channel = channel
|
||||
self.defaultCallOptions = defaultCallOptions
|
||||
self.interceptors = interceptors
|
||||
}
|
||||
}
|
||||
|
||||
#endif // compiler(>=5.6)
|
||||
|
||||
public protocol SdTokenizerServiceClientInterceptorFactoryProtocol: GRPCSendable {
|
||||
|
||||
/// - Returns: Interceptors to use when invoking 'tokenize'.
|
||||
func makeTokenizeInterceptors() -> [ClientInterceptor<SdTokenizeRequest, SdTokenizeResponse>]
|
||||
}
|
||||
|
||||
public enum SdTokenizerServiceClientMetadata {
|
||||
public static let serviceDescriptor = GRPCServiceDescriptor(
|
||||
name: "TokenizerService",
|
||||
fullName: "gay.pizza.stable.diffusion.TokenizerService",
|
||||
methods: [
|
||||
SdTokenizerServiceClientMetadata.Methods.tokenize,
|
||||
]
|
||||
)
|
||||
|
||||
public enum Methods {
|
||||
public static let tokenize = GRPCMethodDescriptor(
|
||||
name: "Tokenize",
|
||||
path: "/gay.pizza.stable.diffusion.TokenizerService/Tokenize",
|
||||
type: GRPCCallType.unary
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
///*
|
||||
/// The tokenizer service, for analyzing tokens for a loaded model.
|
||||
///
|
||||
/// To build a server, implement a class that conforms to this protocol.
|
||||
public protocol SdTokenizerServiceProvider: CallHandlerProvider {
|
||||
var interceptors: SdTokenizerServiceServerInterceptorFactoryProtocol? { get }
|
||||
|
||||
///*
|
||||
/// Analyze the input using a loaded model and return the results.
|
||||
func tokenize(request: SdTokenizeRequest, context: StatusOnlyCallContext) -> EventLoopFuture<SdTokenizeResponse>
|
||||
}
|
||||
|
||||
extension SdTokenizerServiceProvider {
|
||||
public var serviceName: Substring {
|
||||
return SdTokenizerServiceServerMetadata.serviceDescriptor.fullName[...]
|
||||
}
|
||||
|
||||
/// Determines, calls and returns the appropriate request handler, depending on the request's method.
|
||||
/// Returns nil for methods not handled by this service.
|
||||
public func handle(
|
||||
method name: Substring,
|
||||
context: CallHandlerContext
|
||||
) -> GRPCServerHandlerProtocol? {
|
||||
switch name {
|
||||
case "Tokenize":
|
||||
return UnaryServerHandler(
|
||||
context: context,
|
||||
requestDeserializer: ProtobufDeserializer<SdTokenizeRequest>(),
|
||||
responseSerializer: ProtobufSerializer<SdTokenizeResponse>(),
|
||||
interceptors: self.interceptors?.makeTokenizeInterceptors() ?? [],
|
||||
userFunction: self.tokenize(request:context:)
|
||||
)
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if compiler(>=5.6)
|
||||
|
||||
///*
|
||||
/// The tokenizer service, for analyzing tokens for a loaded model.
|
||||
///
|
||||
/// To implement a server, implement an object which conforms to this protocol.
|
||||
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
|
||||
public protocol SdTokenizerServiceAsyncProvider: CallHandlerProvider {
|
||||
static var serviceDescriptor: GRPCServiceDescriptor { get }
|
||||
var interceptors: SdTokenizerServiceServerInterceptorFactoryProtocol? { get }
|
||||
|
||||
///*
|
||||
/// Analyze the input using a loaded model and return the results.
|
||||
@Sendable func tokenize(
|
||||
request: SdTokenizeRequest,
|
||||
context: GRPCAsyncServerCallContext
|
||||
) async throws -> SdTokenizeResponse
|
||||
}
|
||||
|
||||
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
|
||||
extension SdTokenizerServiceAsyncProvider {
|
||||
public static var serviceDescriptor: GRPCServiceDescriptor {
|
||||
return SdTokenizerServiceServerMetadata.serviceDescriptor
|
||||
}
|
||||
|
||||
public var serviceName: Substring {
|
||||
return SdTokenizerServiceServerMetadata.serviceDescriptor.fullName[...]
|
||||
}
|
||||
|
||||
public var interceptors: SdTokenizerServiceServerInterceptorFactoryProtocol? {
|
||||
return nil
|
||||
}
|
||||
|
||||
public func handle(
|
||||
method name: Substring,
|
||||
context: CallHandlerContext
|
||||
) -> GRPCServerHandlerProtocol? {
|
||||
switch name {
|
||||
case "Tokenize":
|
||||
return GRPCAsyncServerHandler(
|
||||
context: context,
|
||||
requestDeserializer: ProtobufDeserializer<SdTokenizeRequest>(),
|
||||
responseSerializer: ProtobufSerializer<SdTokenizeResponse>(),
|
||||
interceptors: self.interceptors?.makeTokenizeInterceptors() ?? [],
|
||||
wrapping: self.tokenize(request:context:)
|
||||
)
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif // compiler(>=5.6)
|
||||
|
||||
public protocol SdTokenizerServiceServerInterceptorFactoryProtocol {
|
||||
|
||||
/// - Returns: Interceptors to use when handling 'tokenize'.
|
||||
/// Defaults to calling `self.makeInterceptors()`.
|
||||
func makeTokenizeInterceptors() -> [ServerInterceptor<SdTokenizeRequest, SdTokenizeResponse>]
|
||||
}
|
||||
|
||||
public enum SdTokenizerServiceServerMetadata {
|
||||
public static let serviceDescriptor = GRPCServiceDescriptor(
|
||||
name: "TokenizerService",
|
||||
fullName: "gay.pizza.stable.diffusion.TokenizerService",
|
||||
methods: [
|
||||
SdTokenizerServiceServerMetadata.Methods.tokenize,
|
||||
]
|
||||
)
|
||||
|
||||
public enum Methods {
|
||||
public static let tokenize = GRPCMethodDescriptor(
|
||||
name: "Tokenize",
|
||||
path: "/gay.pizza.stable.diffusion.TokenizerService/Tokenize",
|
||||
type: GRPCCallType.unary
|
||||
)
|
||||
}
|
||||
}
|
149
Sources/StableDiffusionProtos/tokenizer.pb.swift
Normal file
149
Sources/StableDiffusionProtos/tokenizer.pb.swift
Normal file
@ -0,0 +1,149 @@
|
||||
// DO NOT EDIT.
|
||||
// swift-format-ignore-file
|
||||
//
|
||||
// Generated by the Swift generator plugin for the protocol buffer compiler.
|
||||
// Source: tokenizer.proto
|
||||
//
|
||||
// For information on using the generated types, please see the documentation:
|
||||
// https://github.com/apple/swift-protobuf/
|
||||
|
||||
///*
|
||||
/// Stable Diffusion RPC service for Apple Platforms.
|
||||
|
||||
import Foundation
|
||||
import SwiftProtobuf
|
||||
|
||||
// If the compiler emits an error on this type, it is because this file
|
||||
// was generated by a version of the `protoc` Swift plug-in that is
|
||||
// incompatible with the version of SwiftProtobuf to which you are linking.
|
||||
// Please ensure that you are building against the same version of the API
|
||||
// that was used to generate this file.
|
||||
fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck {
|
||||
struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {}
|
||||
typealias Version = _2
|
||||
}
|
||||
|
||||
///*
|
||||
/// Represents a request to tokenize an input.
|
||||
public struct SdTokenizeRequest {
|
||||
// SwiftProtobuf.Message conformance is added in an extension below. See the
|
||||
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
|
||||
// methods supported on all messages.
|
||||
|
||||
///*
|
||||
/// The name of a loaded model to use for tokenization.
|
||||
public var modelName: String = String()
|
||||
|
||||
///*
|
||||
/// The input string to tokenize.
|
||||
public var input: String = String()
|
||||
|
||||
public var unknownFields = SwiftProtobuf.UnknownStorage()
|
||||
|
||||
public init() {}
|
||||
}
|
||||
|
||||
///*
|
||||
/// Represents a response to tokenization.
|
||||
public struct SdTokenizeResponse {
|
||||
// SwiftProtobuf.Message conformance is added in an extension below. See the
|
||||
// `Message` and `Message+*Additions` files in the SwiftProtobuf library for
|
||||
// methods supported on all messages.
|
||||
|
||||
///*
|
||||
/// The tokens inside the input string.
|
||||
public var tokens: [String] = []
|
||||
|
||||
///*
|
||||
/// The token IDs inside the input string.
|
||||
public var tokenIds: [UInt64] = []
|
||||
|
||||
public var unknownFields = SwiftProtobuf.UnknownStorage()
|
||||
|
||||
public init() {}
|
||||
}
|
||||
|
||||
#if swift(>=5.5) && canImport(_Concurrency)
|
||||
extension SdTokenizeRequest: @unchecked Sendable {}
|
||||
extension SdTokenizeResponse: @unchecked Sendable {}
|
||||
#endif // swift(>=5.5) && canImport(_Concurrency)
|
||||
|
||||
// MARK: - Code below here is support for the SwiftProtobuf runtime.
|
||||
|
||||
fileprivate let _protobuf_package = "gay.pizza.stable.diffusion"
|
||||
|
||||
extension SdTokenizeRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
|
||||
public static let protoMessageName: String = _protobuf_package + ".TokenizeRequest"
|
||||
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
|
||||
1: .standard(proto: "model_name"),
|
||||
2: .same(proto: "input"),
|
||||
]
|
||||
|
||||
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
|
||||
while let fieldNumber = try decoder.nextFieldNumber() {
|
||||
// The use of inline closures is to circumvent an issue where the compiler
|
||||
// allocates stack space for every case branch when no optimizations are
|
||||
// enabled. https://github.com/apple/swift-protobuf/issues/1034
|
||||
switch fieldNumber {
|
||||
case 1: try { try decoder.decodeSingularStringField(value: &self.modelName) }()
|
||||
case 2: try { try decoder.decodeSingularStringField(value: &self.input) }()
|
||||
default: break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
|
||||
if !self.modelName.isEmpty {
|
||||
try visitor.visitSingularStringField(value: self.modelName, fieldNumber: 1)
|
||||
}
|
||||
if !self.input.isEmpty {
|
||||
try visitor.visitSingularStringField(value: self.input, fieldNumber: 2)
|
||||
}
|
||||
try unknownFields.traverse(visitor: &visitor)
|
||||
}
|
||||
|
||||
public static func ==(lhs: SdTokenizeRequest, rhs: SdTokenizeRequest) -> Bool {
|
||||
if lhs.modelName != rhs.modelName {return false}
|
||||
if lhs.input != rhs.input {return false}
|
||||
if lhs.unknownFields != rhs.unknownFields {return false}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
extension SdTokenizeResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding {
|
||||
public static let protoMessageName: String = _protobuf_package + ".TokenizeResponse"
|
||||
public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [
|
||||
1: .same(proto: "tokens"),
|
||||
2: .standard(proto: "token_ids"),
|
||||
]
|
||||
|
||||
public mutating func decodeMessage<D: SwiftProtobuf.Decoder>(decoder: inout D) throws {
|
||||
while let fieldNumber = try decoder.nextFieldNumber() {
|
||||
// The use of inline closures is to circumvent an issue where the compiler
|
||||
// allocates stack space for every case branch when no optimizations are
|
||||
// enabled. https://github.com/apple/swift-protobuf/issues/1034
|
||||
switch fieldNumber {
|
||||
case 1: try { try decoder.decodeRepeatedStringField(value: &self.tokens) }()
|
||||
case 2: try { try decoder.decodeRepeatedUInt64Field(value: &self.tokenIds) }()
|
||||
default: break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public func traverse<V: SwiftProtobuf.Visitor>(visitor: inout V) throws {
|
||||
if !self.tokens.isEmpty {
|
||||
try visitor.visitRepeatedStringField(value: self.tokens, fieldNumber: 1)
|
||||
}
|
||||
if !self.tokenIds.isEmpty {
|
||||
try visitor.visitPackedUInt64Field(value: self.tokenIds, fieldNumber: 2)
|
||||
}
|
||||
try unknownFields.traverse(visitor: &visitor)
|
||||
}
|
||||
|
||||
public static func ==(lhs: SdTokenizeResponse, rhs: SdTokenizeResponse) -> Bool {
|
||||
if lhs.tokens != rhs.tokens {return false}
|
||||
if lhs.tokenIds != rhs.tokenIds {return false}
|
||||
if lhs.unknownFields != rhs.unknownFields {return false}
|
||||
return true
|
||||
}
|
||||
}
|
@ -4,9 +4,11 @@ import StableDiffusionCore
|
||||
import StableDiffusionProtos
|
||||
|
||||
class ImageGenerationServiceProvider: SdImageGenerationServiceAsyncProvider {
|
||||
private let jobManager: JobManager
|
||||
private let modelManager: ModelManager
|
||||
|
||||
init(modelManager: ModelManager) {
|
||||
init(jobManager: JobManager, modelManager: ModelManager) {
|
||||
self.jobManager = jobManager
|
||||
self.modelManager = modelManager
|
||||
}
|
||||
|
||||
@ -14,13 +16,25 @@ class ImageGenerationServiceProvider: SdImageGenerationServiceAsyncProvider {
|
||||
guard let state = await modelManager.getModelState(name: request.modelName) else {
|
||||
throw SdCoreError.modelNotFound
|
||||
}
|
||||
return try await state.generate(request)
|
||||
let job = await jobManager.create()
|
||||
DispatchQueue.main.async {
|
||||
Task {
|
||||
await self.jobManager.updateJobQueued(job)
|
||||
}
|
||||
}
|
||||
return try await state.generate(request, job: job)
|
||||
}
|
||||
|
||||
func generateImagesStreaming(request: SdGenerateImagesRequest, responseStream: GRPCAsyncResponseStreamWriter<SdGenerateImagesStreamUpdate>, context _: GRPCAsyncServerCallContext) async throws {
|
||||
guard let state = await modelManager.getModelState(name: request.modelName) else {
|
||||
throw SdCoreError.modelNotFound
|
||||
}
|
||||
try await state.generateStreaming(request, stream: responseStream)
|
||||
let job = await jobManager.create()
|
||||
DispatchQueue.main.async {
|
||||
Task {
|
||||
await self.jobManager.updateJobQueued(job)
|
||||
}
|
||||
}
|
||||
_ = try await state.generate(request, job: job, stream: responseStream)
|
||||
}
|
||||
}
|
||||
|
37
Sources/StableDiffusionServer/JobService.swift
Normal file
37
Sources/StableDiffusionServer/JobService.swift
Normal file
@ -0,0 +1,37 @@
|
||||
import Foundation
|
||||
import GRPC
|
||||
import StableDiffusionCore
|
||||
import StableDiffusionProtos
|
||||
|
||||
class JobServiceProvider: SdJobServiceAsyncProvider {
|
||||
private let jobManager: JobManager
|
||||
|
||||
init(jobManager: JobManager) {
|
||||
self.jobManager = jobManager
|
||||
}
|
||||
|
||||
func getJob(request: SdGetJobRequest, context _: GRPCAsyncServerCallContext) async throws -> SdGetJobResponse {
|
||||
var response = SdGetJobResponse()
|
||||
guard let job = await jobManager.job(id: request.id) else {
|
||||
throw SdCoreError.jobNotFound
|
||||
}
|
||||
response.job = job
|
||||
return response
|
||||
}
|
||||
|
||||
func cancelJob(request _: SdCancelJobRequest, context _: GRPCAsyncServerCallContext) async throws -> SdCancelJobResponse {
|
||||
throw SdCoreError.notImplemented
|
||||
}
|
||||
|
||||
func streamJobUpdates(request: SdStreamJobUpdatesRequest, responseStream: GRPCAsyncResponseStreamWriter<SdJobUpdate>, context _: GRPCAsyncServerCallContext) async throws {
|
||||
let isFilteredById = request.id != 0
|
||||
for await job in await jobManager.jobUpdatePublisher {
|
||||
if isFilteredById, job.id != request.id {
|
||||
continue
|
||||
}
|
||||
var update = SdJobUpdate()
|
||||
update.job = job
|
||||
try await responseStream.send(update)
|
||||
}
|
||||
}
|
||||
}
|
@ -16,8 +16,9 @@ struct ServerCommand: ParsableCommand {
|
||||
var bindPort: Int = 4546
|
||||
|
||||
mutating func run() throws {
|
||||
let jobManager = JobManager()
|
||||
let modelsDirectoryURL = URL(filePath: modelsDirectoryPath)
|
||||
let modelManager = ModelManager(modelBaseURL: modelsDirectoryURL)
|
||||
let modelManager = ModelManager(modelBaseURL: modelsDirectoryURL, jobManager: jobManager)
|
||||
|
||||
let semaphore = DispatchSemaphore(value: 0)
|
||||
Task {
|
||||
@ -34,8 +35,9 @@ struct ServerCommand: ParsableCommand {
|
||||
_ = Server.insecure(group: group)
|
||||
.withServiceProviders([
|
||||
ModelServiceProvider(modelManager: modelManager),
|
||||
ImageGenerationServiceProvider(modelManager: modelManager),
|
||||
TokenizerServiceProvider(modelManager: modelManager)
|
||||
ImageGenerationServiceProvider(jobManager: jobManager, modelManager: modelManager),
|
||||
TokenizerServiceProvider(modelManager: modelManager),
|
||||
JobServiceProvider(jobManager: jobManager)
|
||||
])
|
||||
.bind(host: bindHost, port: bindPort)
|
||||
|
||||
|
@ -3,4 +3,4 @@ set -e
|
||||
|
||||
cd "$(dirname "${0}")/../Common"
|
||||
|
||||
exec protoc --swift_opt=Visibility=Public --grpc-swift_opt=Visibility=Public --swift_out=../Sources/StableDiffusionProtos --grpc-swift_out=../Sources/StableDiffusionProtos StableDiffusion.proto
|
||||
exec protoc --swift_opt=Visibility=Public --grpc-swift_opt=Visibility=Public --swift_out=../Sources/StableDiffusionProtos --grpc-swift_out=../Sources/StableDiffusionProtos *.proto
|
||||
|
Loading…
Reference in New Issue
Block a user