+ * This is determined by the server's {@link Backoff} instruction.
+ */
+ @GuardedBy("this")
+ private int maxSize;
+
+ /**
+ * Maximum size of the serialized message in bytes.
+ * Must be greater that zero.
+ *
+ *
+ * This is determined by the {@link GrpcChannelOptions#maxMessageSize()}.
+ */
+ @GuardedBy("this")
+ private final int maxSizeBytes;
+
+ /** Total serialized size of the items in the {@link #buffer}. */
+ @GuardedBy("this")
+ private long sizeBytes;
+
+ /** An in-flight batch is unmodifiable. */
+ @GuardedBy("this")
+ private boolean inFlight = false;
+
+ /**
+ * Pending update to the {@link #maxSize}.
+ *
+ * The value is non-empty when {@link #setMaxSize} is called
+ * while the batch is {@link #inFlight}.
+ */
+ @GuardedBy("this")
+ private OptionalInt pendingMaxSize = OptionalInt.empty();
+
+ Batch(int maxSize, int maxSizeBytes) {
+ assert maxSize > 0 : "non-positive maxSize";
+
+ this.maxSizeBytes = MessageSizeUtil.maxSizeBytes(maxSizeBytes);
+ this.maxSize = maxSize;
+ this.buffer = new LinkedHashMap<>(maxSize); // LinkedHashMap preserves insertion order.
+
+ checkInvariants();
+ }
+
+ /**
+ * Returns true if batch has reached its capacity, either in terms
+ * of the item count or the batch's estimated size in bytes.
+ */
+ synchronized boolean isFull() {
+ return buffer.size() == maxSize || sizeBytes == maxSizeBytes;
+ }
+
+ /**
+ * Returns true if the batch's internal buffer is empty.
+ * If it's primary buffer is empty, its backlog is guaranteed
+ * to be empty as well.
+ */
+ synchronized boolean isEmpty() {
+ return buffer.isEmpty(); // sizeBytes == 0 is guaranteed by class invariant.
+ }
+
+ /**
+ * Prepare a request to be sent. After calling this method, this batch becomes
+ * "in-flight": an attempt to {@link #add} more items to it will be rejected
+ * with an exception.
+ */
+ synchronized Message prepare() {
+ checkInvariants();
+
+ inFlight = true;
+ return builder -> {
+ buffer.forEach((__, data) -> {
+ data.appendTo(builder);
+ });
+ };
+ }
+
+ /**
+ * Set the new {@link #maxSize} for this buffer.
+ *
+ *
extra = List.copyOf(buffer.keySet()).listIterator(buffer.size());
+ while (extra.hasPrevious() && buffer.size() > maxSize) {
+ addBacklog(buffer.remove(extra.previous()));
+ }
+ } finally {
+ checkInvariants();
+ }
+ }
+
+ /**
+ * Add a data item to the batch.
+ *
+ *
+ * We want to guarantee that, once a work item has been taken from the queue,
+ * it's going to be eventually executed. Because we cannot know if an item
+ * will overflow the batch before it's removed from the queue, the simplest
+ * and safest way to deal with it is to allow {@link Batch} to put
+ * the overflowing item in the {@link #backlog}. The batch is considered
+ * full after that.
+ *
+ * @throws DataTooBigException If the data exceeds the maximum
+ * possible batch size.
+ * @throws IllegalStateException If called on an "in-flight" batch.
+ * @see #prepare
+ * @see #inFlight
+ * @see #clear
+ */
+ synchronized void add(Data data) throws IllegalStateException, DataTooBigException {
+ requireNonNull(data, "data is null");
+ checkInvariants();
+
+ try {
+ if (inFlight) {
+ throw new IllegalStateException("Batch is in-flight");
+ }
+ if (data.sizeBytes() > maxSizeBytes) {
+ throw new DataTooBigException(data, maxSizeBytes);
+ }
+
+ long remainingBytes = maxSizeBytes - sizeBytes;
+ if (data.sizeBytes() <= remainingBytes && buffer.size() < maxSize) {
+ addSafe(data);
+ return;
+ }
+ // One of the class's invariants is that the backlog must not contain
+ // any items unless the buffer is full. In case this item overflows
+ // the buffer, we put it in the backlog, but pretend the maxSizeBytes
+ // has been reached to satisfy the invariant.
+ // This doubles as a safeguard to ensure the caller cannot add any
+ // more items to the batch before flushing it.
+ addBacklog(data);
+ sizeBytes += remainingBytes;
+ assert isFull() : "batch must be full after an overflow";
+ } finally {
+ checkInvariants();
+ }
+ }
+
+ /**
+ * Add a data item to the batch.
+ *
+ * This method does not check {@link Data#sizeBytes()}, so the caller
+ * must ensure that this item will not overflow the batch.
+ */
+ private synchronized void addSafe(Data data) {
+ buffer.put(data.id(), data);
+ sizeBytes += data.sizeBytes();
+ }
+
+ /** Add a data item to the {@link #backlog}. */
+ private synchronized void addBacklog(Data data) {
+ backlog.add(new BacklogItem(data));
+ }
+
+ /**
+ * Clear this batch's internal buffer.
+ *
+ *
+ * Once the buffer is pruned, it is re-populated from the backlog
+ * until the former is full or the latter is exhausted.
+ * If {@link #pendingMaxSize} is not empty, it is applied
+ * before re-populating the buffer.
+ *
+ * @return IDs removed from the buffer.
+ */
+ synchronized Collection clear() {
+ checkInvariants();
+
+ try {
+ inFlight = false;
+
+ Set removed = Set.copyOf(buffer.keySet());
+ buffer.clear();
+ sizeBytes = 0;
+
+ if (pendingMaxSize.isPresent()) {
+ setMaxSize(pendingMaxSize.getAsInt());
+ }
+
+ // Populate internal buffer from the backlog.
+ // We don't need to check the return value of .add(),
+ // as all items in the backlog are guaranteed to not
+ // exceed maxSizeBytes.
+ Iterator backlogIterator = backlog.iterator();
+ while (backlogIterator.hasNext() && !isFull()) {
+ addSafe(backlogIterator.next().data());
+ backlogIterator.remove();
+ }
+
+ return removed;
+ } finally {
+ checkInvariants();
+ }
+ }
+
+ private static record BacklogItem(Data data, Instant createdAt) {
+ public BacklogItem {
+ requireNonNull(data, "data is null");
+ requireNonNull(createdAt, "createdAt is null");
+ }
+
+ /**
+ * This constructor sets {@link #createdAt} automatically.
+ * It is not important that this timestamp is different from
+ * the one in {@link TaskHandle}, as longs as the order is correct.
+ */
+ public BacklogItem(Data data) {
+ this(data, Instant.now());
+ }
+
+ /** Comparator sorts BacklogItems by their creation time. */
+ private static Comparator comparator() {
+ return new Comparator() {
+
+ @Override
+ public int compare(BacklogItem a, BacklogItem b) {
+ if (a.equals(b)) {
+ return 0;
+ }
+
+ int cmpInstant = a.createdAt.compareTo(b.createdAt);
+ boolean sameInstant = cmpInstant == 0;
+ if (sameInstant) {
+ // We cannot return 0 for two items with different
+ // contents, as it may result in data loss.
+ // If they were somehow created in the same instant,
+ // let them be sorted lexicographically.
+ return a.data.id().compareTo(b.data.id());
+ }
+ return cmpInstant;
+ }
+ };
+ }
+ }
+
+ /** Get total number of items in the batch. */
+ synchronized int size() {
+ return buffer.size() + backlog.size();
+ }
+
+ /** Get current size of the batch in bytes. */
+ synchronized long sizeBytes() {
+ return sizeBytes;
+ }
+
+ synchronized int maxSizeBytes() {
+ return maxSizeBytes;
+ }
+
+ synchronized int maxSize() {
+ return maxSize;
+ }
+
+ /** Asserts the invariants of this class. */
+ private synchronized void checkInvariants() {
+ assert maxSize > 0 : "non-positive maxSize";
+ assert maxSizeBytes > 0 : "non-positive maxSizeBytes";
+ assert sizeBytes >= 0 : "negative sizeBytes";
+ assert buffer.size() <= maxSize : "buffer exceeds maxSize";
+ assert sizeBytes <= maxSizeBytes : "message exceeds maxSizeBytes";
+ if (!isFull()) {
+ assert backlog.isEmpty() : "backlog not empty when buffer not full";
+ }
+ if (buffer.isEmpty()) {
+ assert sizeBytes == 0 : "sizeBytes must be 0 when buffer is empty";
+ }
+
+ requireNonNull(pendingMaxSize, "pendingMaxSize is null");
+ if (!inFlight) {
+ assert pendingMaxSize.isEmpty() : "open batch has pending maxSize";
+ }
+ }
+}
diff --git a/src/main/java/io/weaviate/client6/v1/api/collections/batch/BatchContext.java b/src/main/java/io/weaviate/client6/v1/api/collections/batch/BatchContext.java
new file mode 100644
index 000000000..e5fb263d0
--- /dev/null
+++ b/src/main/java/io/weaviate/client6/v1/api/collections/batch/BatchContext.java
@@ -0,0 +1,1197 @@
+package io.weaviate.client6.v1.api.collections.batch;
+
+import static java.util.Objects.requireNonNull;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Objects;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.CancellationException;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.function.Predicate;
+
+import javax.annotation.concurrent.GuardedBy;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.grpc.Status;
+import io.grpc.stub.StreamObserver;
+import io.weaviate.client6.v1.api.collections.CollectionHandleDefaults;
+import io.weaviate.client6.v1.api.collections.WeaviateObject;
+import io.weaviate.client6.v1.api.collections.batch.Event.ClientError;
+import io.weaviate.client6.v1.api.collections.batch.Event.StreamHangup;
+import io.weaviate.client6.v1.api.collections.data.BatchReference;
+import io.weaviate.client6.v1.api.collections.data.InsertManyRequest;
+import io.weaviate.client6.v1.internal.ObjectBuilder;
+import io.weaviate.client6.v1.internal.orm.CollectionDescriptor;
+
+/**
+ * BatchContext stores the state of an active batch process
+ * and controls its lifecycle.
+ *
+ * Lifecycle
+ *
+ * The SSB implementation is based on gRPC bidi-streams, which are modeled as a
+ * pair of "observers" that define callbacks for inbound and outbound messages.
+ * We'll refer to them as "sender" (sending messages to the server) and "recv"
+ * (receiving and handing server-side events).
+ *
+ *
+ * When the context is started, the client exchanges a "recv" for a "sender",
+ * then stores in {@link #messages}. A {@link Send} process is started in the
+ * {@link #sendService} -- it will continue to run until the context is closed
+ * either gracefully via {@link #close} or abruptly via {@link #shutdownNow}.
+ *
+ *
+ * A "recv" always runs on some internal gRPC thread. The "recv" process is
+ * expected to exit whenever server closes its half of the stream, and will
+ * be re-created if the stream is re-opened. {@link Recv} delegates most
+ * of the operations to its parent BatchContext.
+ *
+ *
+ * {@link #send} and {@link #recv} futures track completion of the "sender"
+ * and "recv" routines.
+ *
+ *
State
+ *
+ * BatchContext organized client-side work using the
+ * State
+ * pattern. These states are defined:
+ *
+ *
+ * - {@code null} -- context hasn't been {@link #start}ed yet. The context
+ * SHOULD NOT be used in this state, as it will likely result in an NPE.
+ *
- AwaitStarted -- client's opened the stream, sent Start,
+ * and is now awaiting for the server to respond with Started.
+ *
- Active -- the server is ready to accept the next Data message.
+ *
- InFlight -- the latest batch has been sent, awaiting Acks.
+ *
- OOM -- server has OOM'ed and will not accept any more data.
+ *
- ServerShuttingDown -- server's begun a graceful shutdown.
+ *
- Reconnecting -- server's closed it's half of the stream; the client
+ * will try to reconnect to another instance up to {@link #maxReconnectRetries}
+ * times.
+ *
+ *
+ * Cancellation policy
+ * BatchContext does not rely on timing heuristics advance its state.
+ * Threads coordinate via {@link #stateChanged} conditional variable
+ * and interrupts, when appropriate.
+ *
+ * Graceful shutdown
+ * When {@link #close()} is called, the context will stop accepting
+ * new items and start draining the remaining items in the {@link #queue}
+ * and {@link #batch} backlog. The client will then continue processing
+ * server-side events until stream's EOF. By the time context is closed
+ * all submitted tasks are expected to be completed successfully or otherwise.
+ *
+ *
+ * N.B.: This may take an arbitrarily long amount time, as the client will
+ * continue to re-connect to other instances and re-submit WIP tasks in
+ * case the current stream is hung up or the server shuts down prematurely.
+ *
+ * Abrupt termination
+ * In the event of an internal client error (e.g. in the "sender" or "recv"
+ * threads), the client's half of the stream is closed immediately, and the
+ * "sender" processed is cancelled. A subsequent call to {@link #close()} will
+ * re-throw the causing exception as {@link IOException}. The stream can be
+ * terminated at any time, including during a graceful shutdown.
+ * In case the context if terminated before a graceful shutdown begins,
+ * the parent thread is also interrupted to prevent {@link #add} from blocking
+ * indefinitely, "sender" will not be there to pop items from the task queue).
+ *
+ *
+ * To prevent data loss, re-submit all incomplete tasks
+ * to the next batch context.
+ *
+ * @param the shape of properties for inserted objects.
+ *
+ * @see StreamObserver
+ * @see State
+ * @see #shutdownNow
+ * @see TaskHandle#result()
+ *
+ * @author Dyma Solovei
+ */
+public final class BatchContext implements Closeable {
+ private static final Logger log = LoggerFactory.getLogger(BatchContext.class);
+
+ private final CollectionDescriptor collectionDescriptor;
+ private final CollectionHandleDefaults collectionHandleDefaults;
+
+ /**
+ * Internal execution service. Its lifecycle is bound to that of the
+ * BatchContext: it's started when the context is initialized
+ * and shutdown on {@link #close}.
+ *
+ *
+ * In the event of abrupt stream termination ({@link Recv#onError} is called),
+ * the "recv" thread MAY shutdown this service in order to interrupt the "send"
+ * thread; the latter may be blocked on {@link Send#awaitCanSend} or
+ * {@link Send#awaitCanPrepareNext}.
+ */
+ private final ExecutorService sendService = Executors.newSingleThreadExecutor();
+
+ /**
+ * Scheduled thread pool for delayed tasks.
+ *
+ * @see Oom
+ * @see Reconnecting
+ */
+ private final ScheduledExecutorService scheduledService = Executors.newScheduledThreadPool(1);
+
+ /** The thread that created the context. */
+ private final Thread parent = Thread.currentThread();
+
+ /** Stream factory creates new streams. */
+ private final StreamFactory streamFactory;
+
+ /**
+ * Queue publishes insert tasks from the main thread to the "sender".
+ *
+ *
+ * Send {@link TaskHandle#POISON} to gracefully shut down the "sender"
+ * thread. The same queue may be re-used with a different "sender",
+ * e.g. after {@link #reconnect}, but only when the new thread is known
+ * to have started. Otherwise, the thread trying to put an item on
+ * the queue will block indefinitely.
+ */
+ private final BlockingQueue queue;
+
+ /**
+ * Work-in-progress items.
+ *
+ *
+ * An item is added to the wip map after the "sender" successfully
+ * adds it to the {@link #batch} and is removed once the server reports
+ * back the result (whether success of failure).
+ */
+ private final ConcurrentMap wip = new ConcurrentHashMap<>();
+
+ /**
+ * Current batch.
+ *
+ *
+ * An item is added to the batch after the "sender" pulls it
+ * from the queue and remains there until it's Ack'ed.
+ */
+ private final Batch batch;
+
+ /**
+ * State encapsulates state-dependent behavior of the {@link BatchContext}.
+ * Before reading state, a thread MUST acquire {@link #lock}.
+ */
+ @GuardedBy("lock")
+ private State state;
+ /** lock synchronizes access to {@link #state}. */
+ private final Lock lock = new ReentrantLock();
+ /** stateChanged notifies threads about a state transition. */
+ private final Condition stateChanged = lock.newCondition();
+
+ /**
+ * Client-side part of the current stream, created on {@link #start}.
+ * Other threads MAY use stream but MUST NOT update this field on their own.
+ */
+ private volatile StreamObserver messages;
+
+ /**
+ * Handle for the "sender" routine.
+ * Cancel this future to interrupt the "sender".
+ */
+ private volatile Future> send;
+
+ /**
+ * Indicates completion of the "recv" routine.
+ * Canceling this future will have no effect.
+ */
+ private volatile CompletableFuture> recv;
+
+ /**
+ * Maximum number of times the client will attempt to re-open the stream
+ * before terminating the context.
+ */
+ private final int maxReconnectRetries;
+
+ /** closing completes the stream. */
+ private final CompletableFuture closing = new CompletableFuture<>();
+
+ /** Executor for performing graceful shutdown sequence. */
+ private final ExecutorService shutdownService = Executors.newSingleThreadExecutor();
+
+ /** Lightweight check to ensure users cannot send on a closed context. */
+ private volatile boolean closed;
+
+ BatchContext(
+ StreamFactory streamFactory,
+ int maxSizeBytes,
+ CollectionDescriptor collectionDescriptor,
+ CollectionHandleDefaults collectionHandleDefaults,
+ int batchSize,
+ int queueSize,
+ int maxReconnectRetries) {
+ this.streamFactory = requireNonNull(streamFactory, "streamFactory is null");
+ this.collectionDescriptor = requireNonNull(collectionDescriptor, "collectionDescriptor is null");
+ this.collectionHandleDefaults = requireNonNull(collectionHandleDefaults, "collectionHandleDefaults is null");
+
+ this.queue = new ArrayBlockingQueue<>(queueSize);
+ this.batch = new Batch(batchSize, maxSizeBytes);
+ this.maxReconnectRetries = maxReconnectRetries;
+
+ setState(AWAIT_STARTED);
+ }
+
+ private BatchContext(Builder builder) {
+ this(
+ builder.streamFactory,
+ builder.maxSizeBytes,
+ builder.collectionDescriptor,
+ builder.collectionHandleDefaults,
+ builder.batchSize,
+ builder.queueSize,
+ builder.maxReconnectRetries);
+ }
+
+ /** Add {@link WeaviateObject} to the batch. */
+ public TaskHandle add(WeaviateObject object) throws InterruptedException {
+ TaskHandle handle = new TaskHandle(
+ object,
+ InsertManyRequest.buildObject(object, collectionDescriptor, collectionHandleDefaults));
+ return add(handle);
+ }
+
+ /** Add {@link BatchReference} to the batch. */
+ public TaskHandle add(BatchReference reference) throws InterruptedException {
+ TaskHandle handle = new TaskHandle(
+ reference,
+ InsertManyRequest.buildReference(reference, collectionHandleDefaults.tenant()));
+ return add(handle);
+ }
+
+ void start() {
+ if (closed) {
+ throw new IllegalStateException("context is closed");
+ }
+ openStream();
+ send = sendService.submit(new Send());
+ }
+
+ /**
+ * Reconnect re-creates the stream and renews the {@link #recv} future.
+ *
+ *
+ * Unlike {@link #start} it does not trigger a state transition, and
+ * {@link Reconnecting} will should continue to handle events until
+ * the stream is renewed successfully or {@link #maxReconnectRetries}
+ * is reached.
+ */
+ void reconnect() throws InterruptedException, ExecutionException {
+ // The "sender" survives reconnects and will not call countDown
+ // until it's interrupted or the context is closed.
+ // The "recv" thread is guaranteed to have already exited, because
+ // the context can only transition into the Reconnecting state
+ // after the server half of the stream is closed (EOF or hangup).
+ recv.get();
+ openStream();
+ }
+
+ /**
+ * Retry a task.
+ *
+ *
+ * BatchContext does not impose any limit on the number of times a task can
+ * be retried -- it is up to the user to implement an appropriate retry policy.
+ *
+ * @see TaskHandle#timesRetried
+ */
+ public TaskHandle retry(TaskHandle taskHandle) throws InterruptedException {
+ return add(taskHandle.retry());
+ }
+
+ /**
+ * Close attempts to drain the queue and send all remaining items.
+ * Calling any of BatchContext's public methods afterwards will
+ * result in an {@link IllegalStateException}.
+ *
+ * @throws IOException Propagates an exception
+ * if one has occurred in the meantime.
+ */
+ @Override
+ public void close() throws IOException {
+ boolean closedBefore = closed;
+ closed = true;
+
+ log.atDebug()
+ .addKeyValue("closed_before", closedBefore)
+ .log("Close context");
+
+ if (!closedBefore) {
+ shutdown();
+ }
+
+ try {
+ closing.get();
+ } catch (InterruptedException | ExecutionException e) {
+ if (e instanceof InterruptedException ||
+ e.getCause() instanceof InterruptedException) {
+ log.atInfo().addKeyValue("thread", Thread::currentThread).log("Interrupted");
+ Thread.currentThread().interrupt();
+ }
+ throw new IOException(e.getCause());
+ } finally {
+ shutdownExecutors();
+ }
+ }
+
+ /** Start a graceful context shutdown. */
+ private void shutdown() {
+ log.atInfo()
+ .addKeyValue("wip_tasks", wip::size)
+ .addKeyValue("queued_tasks", queue::size)
+ .log("Initiate graceful shutdown");
+
+ shutdownService.execute(() -> {
+ try {
+ // Poison the queue -- this will signal "send" to drain the remaining
+ // items in the batch and in the backlog and exit.
+ //
+ // If shutdownNow has been called previously and the "send" routine
+ // has been interrupted, this would block indefinitely.
+ // Luckily, shutdownNow resolves the `closing` future as well.
+ log.debug("Poison the queue");
+ queue.put(TaskHandle.POISON);
+
+ // Wait for both "send" to exit; "send" will not exit until "recv" completes.
+ if (send != null) {
+ send.get();
+ }
+ closing.complete(null);
+ } catch (Exception e) {
+ closing.completeExceptionally(e);
+ }
+ });
+ }
+
+ /** Emit exception as {@link Event.ClientError}. */
+ private void throwInternal(Exception e) {
+ onEvent(new Event.ClientError(e));
+ }
+
+ /** Terminate context abruptly. */
+ private void shutdownNow(Exception e) {
+ log.atInfo()
+ .addKeyValue("thread", Thread::currentThread)
+ .log("Initiate immediate shutdown");
+
+ // Now report this error to the server and terminate the stream.
+ closing.completeExceptionally(e);
+ messages.onError(Status.INTERNAL.withCause(e).asRuntimeException());
+
+ // Interrupt the "send" routine.
+ if (send != null) {
+ log.debug("Interrupt 'send' routine");
+ send.cancel(true);
+ }
+
+ if (!closed) {
+ // Since shutdownNow is never triggered by the "main" thread,
+ // it may be blocked on trying to add to the queue. While batch
+ // context is active, we own this thread and may interrupt it.
+ log.atDebug()
+ .addKeyValue("thread", Thread::currentThread)
+ .addKeyValue("closed", closed)
+ .log("Interrupt parent thread");
+ parent.interrupt();
+ }
+ }
+
+ private void shutdownExecutors() {
+ sendService.shutdownNow();
+ scheduledService.shutdownNow();
+ shutdownService.shutdownNow();
+ scheduledReconnectService.shutdownNow();
+ }
+
+ /** Set the new state and notify awaiting threads. */
+ void setState(State nextState) {
+ requireNonNull(nextState, "nextState is null");
+
+ lock.lock();
+ try {
+ log.atDebug()
+ .addKeyValue("thread", Thread::currentThread)
+ .addKeyValue("prev_state", state)
+ .addKeyValue("next_state", nextState)
+ .log("set next state");
+
+ State prev = state;
+ state = nextState;
+ state.onEnter(prev);
+ stateChanged.signalAll();
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ /**
+ * Blocks until a change in {@link #state} causes the predicate to be true.
+ *
+ *
+ * It is safe to acquire {@link #lock} before calling awaitState.
+ * If a state that satisfies the predicate need to be awaited,
+ * the {@link #stateChanged} will release the lock to let another
+ * thread update the state.
+ *
+ * @param predicate Determines if condition is satisfied.
+ * @param desc Short description of the predicate for debug logs.
+ */
+ private void awaitState(Predicate predicate, String desc) throws InterruptedException {
+ requireNonNull(predicate, "predicate is null");
+
+ log.atDebug()
+ .addKeyValue("thread", Thread::currentThread)
+ .addKeyValue("predicate", desc)
+ .log("entered");
+
+ lock.lock();
+ try {
+ while (!predicate.test(state)) {
+ stateChanged.await();
+ }
+ } finally {
+ lock.unlock();
+
+ log.atDebug()
+ .addKeyValue("thread", Thread::currentThread)
+ .addKeyValue("predicate", desc)
+ .log("fulfilled");
+ }
+ }
+
+ /** Open a new batching stream. */
+ void openStream() {
+ Recv events = new Recv(this);
+ recv = events;
+ messages = streamFactory.createStream(events);
+ messages.onNext(Message.start(collectionHandleDefaults.consistencyLevel()));
+ }
+
+ /** Close the client half of the stream. */
+ void closeStream() {
+ log.atDebug()
+ .addKeyValue("thread", Thread::currentThread)
+ .log("Close client half of the stream");
+
+ log.atTrace()
+ .addKeyValue("thread", Thread::currentThread)
+ .log("Send STOP");
+ messages.onNext(Message.stop());
+
+ log.atTrace()
+ .addKeyValue("thread", Thread::currentThread)
+ .log("Close client half of the stream");
+ messages.onCompleted();
+ }
+
+ /**
+ * onEvent delegates event handling to {@link #state}.
+ *
+ *
+ * Be mindful that most of the time this callback will run in a hot path
+ * on a gRPC thread. {@link State} implementations SHOULD offload any
+ * blocking operations to one of the provided executors. Because onEvent
+ * will hold the {@link #lock}, no state changes are guaranteed to happen
+ * until {@link State#onEvent} returns.
+ *
+ * @see #scheduledService
+ */
+ private void onEvent(Event event) {
+ requireNonNull(event, "event is null");
+
+ lock.lock();
+ try {
+ log.atDebug()
+ .addKeyValue("thread", Thread::currentThread)
+ .addKeyValue("state", state)
+ .addKeyValue("event", event)
+ .log("Incoming server message");
+
+ state.onEvent(event);
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ private TaskHandle add(final TaskHandle taskHandle) throws InterruptedException {
+ if (closed) {
+ throw new IllegalStateException("context is closed");
+ }
+ requireNonNull(taskHandle, "taskHandle is null");
+
+ TaskHandle existing = wip.get(taskHandle.id());
+ if (existing != null) {
+ throw new DuplicateTaskException(taskHandle, existing);
+ }
+
+ queue.put(taskHandle);
+ return taskHandle;
+ }
+
+ private final class Send implements Runnable {
+
+ @Override
+ public void run() {
+ String threadName = Thread.currentThread().getName();
+ Thread.currentThread().setName("sender");
+ try {
+ trySend();
+ } finally {
+ Thread.currentThread().setName(threadName);
+ }
+ }
+
+ /**
+ * trySend consumes {@link #queue} tasks and sends them in batches until it
+ * encounters a {@link TaskHandle#POISON} or is otherwise interrupted.
+ */
+ private void trySend() {
+ try {
+ awaitState(State::canPrepareNext, "can prepare next");
+
+ while (!Thread.currentThread().isInterrupted()) {
+ if (batch.isFull()) {
+ send();
+ }
+
+ TaskHandle task = queue.take();
+
+ if (task == TaskHandle.POISON) {
+ log.debug("Took poison");
+
+ drain();
+ closeStream();
+
+ // The SSB protocol requires the client to continue reading the stream
+ // until EOF. In the happy case, the server will close its half having
+ // processed all previous requests; the WIP buffer is empty in that case.
+ //
+ // It is possible that the server will be restarted or the stream will be
+ // hung up before client receives all Results, in which case we might need
+ // to re-submit the items remaining in the WIP buffer.
+ recv.get();
+
+ assert closed : "queue poisoned when context not closed";
+ if (wip.isEmpty()) {
+ log.info("All tasks completed, no more data to send");
+ return;
+ }
+
+ // Server closed the stream before reporting all expected Results.
+ // Return the poison to the queue and go another round -- if the
+ // client tried to reconnect the batch may've been filled again.
+ assert queue.isEmpty() : "queue must be empty after poison";
+ queue.add(task);
+ continue;
+ }
+
+ Data data = task.data();
+ batch.add(data);
+
+ TaskHandle existing = wip.put(task.id(), task);
+ assert existing == null : "duplicate tasks in progress, id=" + existing.id();
+ }
+ } catch (InterruptedException ignored) {
+ Thread.currentThread().interrupt();
+ } catch (Exception e) {
+ throwInternal(e);
+ }
+ }
+
+ /**
+ * Send the current portion of batch items. After this method returns, the batch
+ * is guaranteed to have space for at least one the next item (not full).
+ */
+ private void send() throws InterruptedException {
+ log.atInfo()
+ .addKeyValue("batch_size_total_bytes", batch::sizeBytes)
+ .log("Send next batch");
+
+ // Continue flushing until we get the batch to not a "not full" state.
+ // This is to account for the backlog, which might re-fill the batch
+ // after .clear().
+ while (batch.isFull()) {
+ flush();
+ }
+ assert !batch.isFull() : "batch is full after send";
+ }
+
+ /**
+ * Send all remaining items in the batch. After this method returns, the batch
+ * is guaranteed to be empty.
+ */
+ private void drain() throws InterruptedException {
+ log.atInfo()
+ .addKeyValue("batch_size_total_items", batch::size)
+ .addKeyValue("message_size_max_items", batch::maxSize)
+ .addKeyValue("message_size_max_bytes", batch::maxSizeBytes)
+ .log("Drain remaining items");
+
+ // To correctly drain the batch, we flush repeatedly
+ // until the batch becomes empty, as clearing a batch
+ // after an ACK might re-populate it from its internal backlog.
+ while (!batch.isEmpty()) {
+ flush();
+ }
+ assert batch.isEmpty() : "batch not empty after drain";
+ }
+
+ /**
+ * Block until the current state allows {@link State#canSend},
+ * then prepare the batch, send it, and set InFlight state.
+ * Block until the current state allows {@link State#canPrepareNext}.
+ *
+ *
+ * Depending on the BatchContext lifecycle, the semantics of
+ * "await can prepare next" can be one of "message is ACK'ed"
+ * "the stream has started", or, more generally,
+ * "it is safe to take a next item from the queue and add it to the batch".
+ *
+ * @see Batch#prepare
+ * @see #IN_FLIGHT
+ */
+ private void flush() throws InterruptedException {
+ lock.lock();
+ try {
+ awaitState(State::canSendNext, "can send next");
+
+ // Send and transition to IN_FLIGHT MUST be done atomically.
+ //
+ // Without synchronization, there's a potential race
+ // where the server Acks the next batch _before_ IN_FLIGHT state
+ // is set, so when finally set it may block forever. This will most
+ // likely only manifest in tests, where batches are acked instantly,
+ // but it's good to have the extra safety layer.
+ log.atTrace()
+ .addKeyValue("message_size_max_items", batch::maxSize)
+ .addKeyValue("batch_size_total_bytes", batch::sizeBytes)
+ .log("Prepare and send next batch");
+ messages.onNext(batch.prepare());
+ setState(IN_FLIGHT);
+ } finally {
+ lock.unlock();
+ }
+
+ awaitState(State::canPrepareNext, "can prepare next");
+ }
+
+ }
+
+ private static final class Recv extends CompletableFuture implements StreamObserver {
+ private final BatchContext> context;
+
+ private Recv(BatchContext> context) {
+ this.context = context;
+ }
+
+ @Override
+ public void onNext(Event event) {
+ try {
+ if (event == Event.EOF) {
+ // Handle synthetic EOF which the Oom state can send to initiate a shutdown.
+ onCompleted();
+ } else {
+ context.onEvent(event);
+ }
+ } catch (Exception e) {
+ context.onEvent(new Event.ClientError(e));
+ }
+ }
+
+ /**
+ * EOF for the server-side stream.
+ * By the time this is called, the client-side of the stream had been closed
+ * and the "send" thread has either exited or is on its way there.
+ */
+ @Override
+ public void onCompleted() {
+ try {
+ context.onEvent(Event.EOF);
+ } finally {
+ complete(null);
+ }
+ }
+
+ /** An exception occurred either on our end or in the channel internals. */
+ @Override
+ public void onError(Throwable t) {
+ try {
+ context.onEvent(Event.StreamHangup.fromThrowable(t));
+ } finally {
+ complete(null);
+ }
+ }
+ }
+
+ private final State AWAIT_STARTED = new BaseState("AWAIT_STARTED", BaseState.Action.PREPARE_NEXT);
+ private final State ACTIVE = new BaseState("ACTIVE", BaseState.Action.PREPARE_NEXT, BaseState.Action.SEND_NEXT);
+ private final State IN_FLIGHT = new BaseState("IN_FLIGHT");
+
+ /** BaseState implements default handlers for all {@link Event} subclasses. */
+ private class BaseState implements State {
+ /** State's display name for logging. */
+ private final String name;
+ /** Actions permitted in this state. */
+ private final EnumSet permitted;
+
+ enum Action {
+ /**
+ * Thy system is allowed to accept new items from the user
+ * and populate the next batch.
+ */
+ PREPARE_NEXT,
+
+ /** The system is allowed to send the next batch once it's ready. */
+ SEND_NEXT;
+ }
+
+ /**
+ * @param name Display name.
+ * @param permitted Actions permitted in this state.
+ */
+ protected BaseState(String name, Action... permitted) {
+ this.name = name;
+ this.permitted = requireNonNull(permitted, "actions is null").length == 0
+ ? EnumSet.noneOf(Action.class)
+ : EnumSet.copyOf(Arrays.asList(permitted));
+ }
+
+ @Override
+ public void onEnter(State prev) {
+ }
+
+ @Override
+ public boolean canSendNext() {
+ return permitted.contains(Action.SEND_NEXT);
+ }
+
+ @Override
+ public boolean canPrepareNext() {
+ return permitted.contains(Action.PREPARE_NEXT);
+ }
+
+ @Override
+ public void onEvent(Event event) {
+ if (event == Event.STARTED) {
+ onStarted();
+ } else if (event instanceof Event.Acks acks) {
+ onAcks(acks);
+ } else if (event instanceof Event.Oom oom) {
+ onOom(oom);
+ } else if (event instanceof Event.Results results) {
+ onResults(results);
+ } else if (event instanceof Event.Backoff backoff) {
+ onBackoff(backoff);
+ } else if (event == Event.SHUTTING_DOWN) {
+ onShuttingDown();
+ } else if (event instanceof Event.StreamHangup || event == Event.EOF) {
+ onStreamClosed(event);
+ } else if (event instanceof Event.ClientError error) {
+ onClientError(error);
+ } else {
+ throw new AssertionError("unreachable with event " + event);
+ }
+ }
+
+ private void onStarted() {
+ setState(ACTIVE);
+ }
+
+ private void onAcks(Event.Acks acks) {
+ log.atInfo()
+ .addKeyValue("count_acks", acks.acked().size())
+ .addKeyValue("wip_tasks", wip::size)
+ .log("Received Acks");
+
+ Collection removed = batch.clear();
+ if (!acks.acked().containsAll(removed)) {
+ throwInternal(ProtocolViolationException.incompleteAcks(List.copyOf(removed)));
+ }
+ acks.acked().stream()
+ .map(wip::get).filter(Objects::nonNull)
+ .forEach(TaskHandle::setAcked);
+
+ setState(ACTIVE);
+ }
+
+ private void onResults(Event.Results results) {
+ log.atInfo()
+ .addKeyValue("count_success", results.successful().size())
+ .addKeyValue("count_errors", results.errors().size())
+ .addKeyValue("wip_tasks", wip::size)
+ .log("Received Results");
+
+ results.successful().stream()
+ .map(wip::remove).filter(Objects::nonNull)
+ .forEach(TaskHandle::setSuccess);
+
+ results.errors().keySet().stream()
+ .map(wip::remove).filter(Objects::nonNull)
+ .forEach(taskHandle -> taskHandle.setError(results.errors().get(taskHandle.id())));
+ }
+
+ private void onBackoff(Event.Backoff backoff) {
+ log.atInfo()
+ .addKeyValue("prev_max_size", batch::maxSize)
+ .addKeyValue("next_max_size", backoff::maxSize)
+ .log("Received Backoff");
+
+ batch.setMaxSize(backoff.maxSize());
+ }
+
+ private void onOom(Event.Oom oom) {
+ log.atInfo()
+ .addKeyValue("wip_tasks", wip::size)
+ .log("Server is out of memory");
+
+ setState(new Oom(oom.delaySeconds()));
+ }
+
+ private void onShuttingDown() {
+ setState(new ServerShuttingDown(this));
+ }
+
+ private void onStreamClosed(Event event) {
+ log.info("Server closed its half of the stream");
+
+ if (event instanceof Event.StreamHangup hangup) {
+ log.atWarn()
+ .addKeyValue("cause", hangup::exception)
+ .log("Stream hangup");
+ }
+
+ log.atDebug()
+ .addKeyValue("closed", closed)
+ .addKeyValue("wip_tasks", wip::size)
+ .log("Client will reconnect if any tasks are pending");
+
+ // The only time we should not try to reconnect is if the context
+ // is gracefully shutting down after a call to close() and the server
+ // has returned Results for all previous batches.
+ if (closed && wip.isEmpty()) {
+ log.info("All tasks completed, no more events are expected");
+ return;
+ }
+ setState(new Reconnecting(maxReconnectRetries));
+ }
+
+ private void onClientError(Event.ClientError error) {
+ log.atError()
+ .addKeyValue("cause", error::exception)
+ .log("Internal error, context will be terminated");
+
+ shutdownNow(error.exception());
+ }
+
+ @Override
+ public String toString() {
+ return name;
+ }
+ }
+
+ /**
+ * Oom waits for {@link Event#SHUTTING_DOWN} up to a specified amount of time,
+ * after which it will force stream termination by imitating server shutdown.
+ */
+ private final class Oom extends BaseState {
+ private final long shutdownAfterSeconds;
+ private ScheduledFuture> shutdown;
+
+ private Oom(long shutdownAfterSeconds) {
+ super("OOM");
+ this.shutdownAfterSeconds = shutdownAfterSeconds;
+
+ log.atDebug()
+ .addKeyValue("grace_period", shutdownAfterSeconds)
+ .log("Server is out of memory");
+ }
+
+ @Override
+ public void onEnter(State prev) {
+ shutdown = scheduledService.schedule(this::initiateShutdown, shutdownAfterSeconds, TimeUnit.SECONDS);
+ }
+
+ /** Imitate server shutdown sequence. */
+ private void initiateShutdown() {
+ log.info("No update from the server after {}s, context will be forcibly restarted", shutdownAfterSeconds);
+
+ // We cannot route event handling via normal BatchContext#onEvent, because
+ // it delegates to the current state, which is Oom. If Oom#onEvent were to
+ // receive an Event.SHUTTING_DOWN, it would cancel this execution of this
+ // very sequence. Instead, we delegate to our parent BaseState which normally
+ // handles these events.
+ final Recv events = (Recv) recv;
+ if (!Thread.currentThread().isInterrupted()) {
+ events.onNext(Event.SHUTTING_DOWN);
+ }
+ if (!Thread.currentThread().isInterrupted()) {
+ events.onNext(Event.EOF);
+ }
+ }
+
+ @Override
+ public void onEvent(Event event) {
+ if (event instanceof StreamHangup ||
+ event instanceof ClientError) {
+ shutdown.cancel(true);
+ try {
+ shutdown.get();
+ } catch (CancellationException ignored) {
+ log.atDebug().addKeyValue("thread", Thread::currentThread).log("Canceled");
+ } catch (InterruptedException ignored) {
+ // "recv" is running on a thread from gRPC's internal thread pool,
+ // so, while onEvent allows InterruptedException to stay responsive,
+ // in practice this thread will only be interrupted by the thread pool,
+ // which already knows it's being shut down.
+ log.atDebug().addKeyValue("thread", Thread::currentThread).log("Interrupted");
+ } catch (ExecutionException e) {
+ throwInternal(e);
+ }
+ }
+ super.onEvent(event);
+ }
+ }
+
+ /**
+ * ServerShuttingDown allows preparing the next batch
+ * unless the server's OOM'ed on the previous one.
+ * Once set, the state will shutdown {@link BatchContext#sendService}
+ * to instruct the "send" thread to close our part of the stream.
+ */
+ private final class ServerShuttingDown extends BaseState {
+ private final boolean canPrepareNext;
+
+ private ServerShuttingDown(State prev) {
+ super("SERVER_SHUTTING_DOWN");
+ this.canPrepareNext = prev == null || !Oom.class.isAssignableFrom(prev.getClass());
+
+ log.atDebug()
+ .addKeyValue("prev_state", prev)
+ .addKeyValue("can_prepare_next", canPrepareNext)
+ .log("Server is shutting down");
+ }
+
+ @Override
+ public boolean canPrepareNext() {
+ return canPrepareNext;
+ }
+
+ @Override
+ public boolean canSendNext() {
+ return false;
+ }
+
+ @Override
+ public void onEnter(State prev) {
+ closeStream();
+ }
+ }
+
+ /**
+ * Reconnecting state is entered either by the server finishing a shutdown
+ * and closing its end of the stream or an unexpected stream hangup.
+ *
+ * @see Recv#onCompleted graceful server shutdown
+ * @see Recv#onError stream hangup
+ */
+ private final class Reconnecting extends BaseState {
+ private final int maxRetries;
+ private int retries = 0;
+
+ private Reconnecting(int maxRetries) {
+ super("RECONNECTING", Action.PREPARE_NEXT);
+ this.maxRetries = maxRetries;
+ }
+
+ @Override
+ public void onEnter(State prev) {
+ log.atInfo()
+ .addKeyValue("prev_state", prev)
+ .addKeyValue("max_retries", maxRetries)
+ .log("Trying to reconnect");
+
+ if (!ServerShuttingDown.class.isAssignableFrom(prev.getClass())) {
+ // This is NOT an orderly shutdown, we're reconnecting after a stream hangup.
+ // Assume all WIP items have been lost and re-submit everything.
+ // All items in the batch are contained in WIP, so it is safe to discard the
+ // batch entirely and re-populate from WIP.
+ while (!batch.isEmpty()) {
+ batch.clear();
+ }
+
+ // Unlike during normal operation, we will not stop when batch.isFull().
+ // Batch#add guarantees that data will not be discarded in the event of
+ // an overflow -- all extra items are placed into the backlog, which is
+ // unbounded.
+ wip.values().forEach(task -> batch.add(task.data()));
+ }
+
+ reconnectNow();
+ }
+
+ @Override
+ public void onEvent(Event event) {
+ assert retries <= maxRetries : "maxRetries exceeded";
+
+ if (event == Event.STARTED) {
+ log.info("Reconnected after {} retries", retries);
+
+ setState(ACTIVE);
+ } else if (event instanceof Event.StreamHangup) {
+
+ if (retries == maxRetries) {
+ throwInternal(new IOException("Server unavailable"));
+ return;
+ }
+
+ long nextDelay = (long) Math.pow(2, retries);
+ log.atInfo()
+ .addKeyValue("max_retries", maxRetries)
+ .addKeyValue("remaining_retries", maxRetries - retries)
+ .log("Retry after {}s", nextDelay);
+
+ reconnectAfter(nextDelay);
+
+ } else if (event == Event.EOF) {
+ throwInternal(ProtocolViolationException.illegalStateTransition(this, event));
+ } else {
+ super.onEvent(event);
+ }
+
+ assert retries <= maxRetries : "maxRetries exceeded";
+ }
+
+ /** Reconnect with no delay. */
+ private void reconnectNow() {
+ reconnectAfter(0);
+ }
+
+ /**
+ * Schedule a task to {@link #reconnect} after a delay.
+ *
+ *
+ * The task is scheduled on {@link #scheduledService} even if
+ * {@code delaySeconds == 0} to avoid blocking gRPC worker
+ * thread,
+ * where the {@link BatchContext#onEvent} callback runs.
+ *
+ * @param delaySeconds Delay in seconds.
+ */
+ private void reconnectAfter(long delaySeconds) {
+ retries++;
+
+ scheduledService.schedule(() -> {
+ try {
+ reconnect();
+ } catch (InterruptedException e) {
+ log.atDebug().addKeyValue("thread", Thread::currentThread).log("Interrupted");
+ Thread.currentThread().interrupt();
+ } catch (ExecutionException e) {
+ throwInternal(e);
+ }
+ }, delaySeconds, TimeUnit.SECONDS);
+ }
+ }
+
+ // --------------------------------------------------------------------------
+
+ private final ScheduledExecutorService scheduledReconnectService = Executors.newScheduledThreadPool(1);
+
+ void scheduleReconnect(int reconnectIntervalSeconds) {
+ log.atDebug()
+ .addKeyValue("interval_seconds", reconnectIntervalSeconds)
+ .log("Scheduled regular context reconnects");
+
+ scheduledReconnectService.scheduleWithFixedDelay(() -> {
+ log.debug("Imitating server shutdown");
+
+ final Recv events = (Recv) recv;
+
+ if (!Thread.currentThread().isInterrupted()) {
+ log.trace("Send synthetic SHUTTING_DOWN");
+ events.onNext(Event.SHUTTING_DOWN);
+ }
+
+ if (!Thread.currentThread().isInterrupted()) {
+ log.trace("Send synthetic EOF");
+ events.onNext(Event.EOF);
+ }
+
+ // We want to count down from the moment we re-opened the stream,
+ // not from the moment we initialized the sequence.
+ try {
+ awaitState(ACTIVE::equals, "ACTIVE");
+ } catch (InterruptedException ignored) {
+ // Let the process exit normally.
+ log.atDebug().addKeyValue("thread", Thread::currentThread).log("Interrupted");
+ }
+ }, reconnectIntervalSeconds, reconnectIntervalSeconds, TimeUnit.SECONDS);
+ }
+
+ public static class Builder implements ObjectBuilder> {
+ private final StreamFactory streamFactory;
+ private final int maxSizeBytes;
+ private final CollectionDescriptor collectionDescriptor;
+ private final CollectionHandleDefaults collectionHandleDefaults;
+
+ Builder(
+ StreamFactory streamFactory,
+ int maxSizeBytes,
+ CollectionDescriptor collectionDescriptor,
+ CollectionHandleDefaults collectionHandleDefaults) {
+ this.streamFactory = streamFactory;
+ this.maxSizeBytes = maxSizeBytes;
+ this.collectionDescriptor = collectionDescriptor;
+ this.collectionHandleDefaults = collectionHandleDefaults;
+ }
+
+ private int batchSize = 1_000;
+ private int queueSize = 1_000;
+ private int maxReconnectRetries = 5;
+
+ public Builder batchSize(int batchSize) {
+ this.batchSize = batchSize;
+ return this;
+ }
+
+ public Builder queueSize(int queueSize) {
+ this.queueSize = queueSize;
+ return this;
+ }
+
+ public Builder maxReconnectRetries(int maxReconnectRetries) {
+ this.maxReconnectRetries = maxReconnectRetries;
+ return this;
+ }
+
+ @Override
+ public BatchContext build() {
+ return new BatchContext<>(this);
+ }
+ }
+}
diff --git a/src/main/java/io/weaviate/client6/v1/api/collections/batch/Data.java b/src/main/java/io/weaviate/client6/v1/api/collections/batch/Data.java
new file mode 100644
index 000000000..dba9e79ad
--- /dev/null
+++ b/src/main/java/io/weaviate/client6/v1/api/collections/batch/Data.java
@@ -0,0 +1,94 @@
+package io.weaviate.client6.v1.api.collections.batch;
+
+import static java.util.Objects.requireNonNull;
+
+import javax.annotation.concurrent.Immutable;
+
+import com.google.protobuf.GeneratedMessage;
+import com.google.protobuf.GeneratedMessageV3;
+
+import io.weaviate.client6.v1.api.collections.WeaviateObject;
+import io.weaviate.client6.v1.api.collections.data.ObjectReference;
+import io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoBatch;
+
+@Immutable
+@SuppressWarnings("deprecation") // protoc uses GeneratedMessageV3
+class Data implements Message {
+
+ /**
+ * Raw input value, as passed by the user.
+ */
+ private final Object raw;
+
+ /**
+ * Task ID. Depending on the underlying object, this will either be
+ * {@link WeaviateObject#uuid} or {@link ObjectReference#beacon}.
+ *
+ * Since UUIDs and beacons cannot clash, ID does not encode any
+ * information about the underlying data type.
+ */
+ private final String id;
+
+ /**
+ * Serialized representation of the {@link #raw}. This valus is immutable
+ * for the entire lifecycle of the handle.
+ */
+ private final GeneratedMessage.ExtendableMessage message;
+
+ /** Estimated size of the {@link #message} when serialized. */
+ private final int sizeBytes;
+
+ enum Type {
+ OBJECT(WeaviateProtoBatch.BatchStreamRequest.Data.OBJECTS_FIELD_NUMBER),
+ REFERENCE(WeaviateProtoBatch.BatchStreamRequest.Data.REFERENCES_FIELD_NUMBER);
+
+ private final int fieldNumber;
+
+ private Type(int fieldNumber) {
+ this.fieldNumber = fieldNumber;
+ }
+
+ public int fieldNumber() {
+ return fieldNumber;
+ }
+ }
+
+ private Data(Object raw, String id, GeneratedMessage.ExtendableMessage message, int sizeBytes) {
+ this.raw = requireNonNull(raw, "raw is null");
+ this.id = requireNonNull(id, "id is null");
+ this.message = requireNonNull(message, "message is null");
+
+ assert sizeBytes >= 0;
+ this.sizeBytes = sizeBytes;
+ }
+
+ Data(Object raw, String id, GeneratedMessage.ExtendableMessage message,
+ Type type) {
+ this(raw, id, message, MessageSizeUtil.ofDataField(message, type));
+ }
+
+ String id() {
+ return id;
+ }
+
+ /** Serialized data size in bytes. */
+ int sizeBytes() {
+ return sizeBytes;
+ }
+
+ @Override
+ public void appendTo(WeaviateProtoBatch.BatchStreamRequest.Builder builder) {
+ WeaviateProtoBatch.BatchStreamRequest.Data.Builder data = requireNonNull(builder, "builder is null")
+ .getDataBuilder();
+ if (message instanceof WeaviateProtoBatch.BatchObject object) {
+ data.getObjectsBuilder().addValues(object);
+ } else if (message instanceof WeaviateProtoBatch.BatchReference ref) {
+ data.getReferencesBuilder().addValues(ref);
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "%s (%s)".formatted(raw.getClass().getSimpleName(), id);
+ }
+}
diff --git a/src/main/java/io/weaviate/client6/v1/api/collections/batch/DataTooBigException.java b/src/main/java/io/weaviate/client6/v1/api/collections/batch/DataTooBigException.java
new file mode 100644
index 000000000..5160bd27c
--- /dev/null
+++ b/src/main/java/io/weaviate/client6/v1/api/collections/batch/DataTooBigException.java
@@ -0,0 +1,16 @@
+package io.weaviate.client6.v1.api.collections.batch;
+
+import static java.util.Objects.requireNonNull;
+
+import io.weaviate.client6.v1.api.WeaviateException;
+
+/**
+ * DataTooBigException is thrown when a single object exceeds
+ * the maximum size of a gRPC message.
+ */
+public class DataTooBigException extends WeaviateException {
+ DataTooBigException(Data data, long maxSizeBytes) {
+ super("%s with size=%dB exceeds maximum message size %dB".formatted(
+ requireNonNull(data, "data is null"), data.sizeBytes(), maxSizeBytes));
+ }
+}
diff --git a/src/main/java/io/weaviate/client6/v1/api/collections/batch/DuplicateTaskException.java b/src/main/java/io/weaviate/client6/v1/api/collections/batch/DuplicateTaskException.java
new file mode 100644
index 000000000..31fb923c0
--- /dev/null
+++ b/src/main/java/io/weaviate/client6/v1/api/collections/batch/DuplicateTaskException.java
@@ -0,0 +1,23 @@
+package io.weaviate.client6.v1.api.collections.batch;
+
+import io.weaviate.client6.v1.api.WeaviateException;
+
+/**
+ * DuplicateTaskException is thrown if task is submitted to the batch
+ * while another task with the same ID is in progress.
+ */
+public class DuplicateTaskException extends WeaviateException {
+ private final TaskHandle existing;
+
+ DuplicateTaskException(TaskHandle duplicate, TaskHandle existing) {
+ super(duplicate + " cannot be added to the batch while another task with the same ID is in progress");
+ this.existing = existing;
+ }
+
+ /**
+ * Get the currently in-progress handle that's a duplicate of the one submitted.
+ */
+ public TaskHandle getExisting() {
+ return existing;
+ }
+}
diff --git a/src/main/java/io/weaviate/client6/v1/api/collections/batch/Event.java b/src/main/java/io/weaviate/client6/v1/api/collections/batch/Event.java
new file mode 100644
index 000000000..ba3f6b208
--- /dev/null
+++ b/src/main/java/io/weaviate/client6/v1/api/collections/batch/Event.java
@@ -0,0 +1,162 @@
+package io.weaviate.client6.v1.api.collections.batch;
+
+import static java.util.Objects.requireNonNull;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+import io.grpc.Status;
+import io.weaviate.client6.v1.api.collections.batch.Event.Acks;
+import io.weaviate.client6.v1.api.collections.batch.Event.Backoff;
+import io.weaviate.client6.v1.api.collections.batch.Event.ClientError;
+import io.weaviate.client6.v1.api.collections.batch.Event.Oom;
+import io.weaviate.client6.v1.api.collections.batch.Event.Results;
+import io.weaviate.client6.v1.api.collections.batch.Event.Started;
+import io.weaviate.client6.v1.api.collections.batch.Event.StreamHangup;
+import io.weaviate.client6.v1.api.collections.batch.Event.TerminationEvent;
+
+sealed interface Event
+ permits Started, Acks, Results, Backoff, Oom, TerminationEvent, StreamHangup, ClientError {
+
+ final static Event STARTED = new Started();
+ final static Event SHUTTING_DOWN = TerminationEvent.SHUTTING_DOWN;
+ final static Event EOF = TerminationEvent.EOF;
+
+ /**
+ * The server has acknowledged our Start message and is ready to receive data.
+ */
+ record Started() implements Event {
+ }
+
+ /**
+ * The server has added items from the previous message to its internal
+ * work queue, client MAY send the next batch.
+ *
+ *
+ * The protocol guarantess that {@link Acks} will contain IDs for all
+ * items sent in the previous batch.
+ */
+ record Acks(Collection acked) implements Event {
+ public Acks {
+ acked = List.copyOf(requireNonNull(acked, "acked is null"));
+ }
+
+ @Override
+ public String toString() {
+ return "Acks";
+ }
+ }
+
+ /**
+ * Results for the insertion of a previous batches.
+ *
+ *
+ * We assume that the server may return partial results, or return
+ * results out of the order of inserting messages.
+ */
+ record Results(Collection successful, Map errors) implements Event {
+ public Results {
+ successful = List.copyOf(requireNonNull(successful, "successful is null"));
+ errors = Map.copyOf(requireNonNull(errors, "errors is null"));
+ }
+
+ @Override
+ public String toString() {
+ return "Results";
+ }
+ }
+
+ /**
+ * Backoff communicates the optimal batch size (number of objects)
+ * with respect to the current load on the server.
+ *
+ *
+ * Backoff is an instruction, not a recommendation.
+ * On receiving this message, the client must ensure that
+ * all messages it produces, including the one being prepared,
+ * do not exceed the size limit indicated by {@link #maxSize}
+ * until the server sends another Backoff message. The limit
+ * MUST also be respected after a {@link BatchContext#reconnect}.
+ *
+ *
+ * The client MAY use the latest {@link #maxSize} as the default
+ * message limit in a new {@link BatchContext}, but is not required to.
+ */
+ record Backoff(int maxSize) implements Event {
+
+ @Override
+ public String toString() {
+ return "Backoff";
+ }
+ }
+
+ /**
+ * Out-Of-Memory.
+ *
+ *
+ * Items sent in the previous request cannot be accepted,
+ * as inserting them may exhaust server's available disk space.
+ * On receiving this message, the client MUST stop producing
+ * messages immediately and await {@link #SHUTTING_DOWN} event.
+ *
+ *
+ * Oom is the sibling of {@link Acks} with the opposite effect.
+ * The protocol guarantees that the server will respond with either of
+ * the two, but never both.
+ */
+ record Oom(int delaySeconds) implements Event {
+ }
+
+ /** Events that are part of the server's graceful shutdown strategy. */
+ enum TerminationEvent implements Event {
+ /**
+ * Server shutdown in progress.
+ *
+ *
+ * The server began the process of gracefull shutdown, due to a
+ * scale-up event (if it previously reported {@link #OOM}) or
+ * some other external event.
+ * On receiving this message, the client MUST stop producing
+ * messages immediately, close it's side of the stream, and
+ * continue readings server's messages until {@link #EOF}.
+ */
+ SHUTTING_DOWN,
+
+ /**
+ * Stream EOF.
+ *
+ *
+ * The server has will not receive any messages. If the client
+ * has more data to send, it SHOULD re-connect to another instance
+ * by re-opening the stream and continue processing the batch.
+ * If the client has previously sent {@link Message#STOP}, it can
+ * safely exit.
+ */
+ EOF;
+ }
+
+ /**
+ * StreamHangup means the RPC is "dead": the stream is closed
+ * and using it will result in an {@link IllegalStateException}.
+ */
+ record StreamHangup(Exception exception) implements Event {
+ static StreamHangup fromThrowable(Throwable t) {
+ Status status = Status.fromThrowable(t);
+ return new StreamHangup(status.asException());
+ }
+ }
+
+ /**
+ * ClientError means a client-side exception has happened,
+ * and is meant primarily for the "send" thread to propagate
+ * any exception it might catch.
+ *
+ *
+ * This MUST be treated as an irrecoverable condition, because
+ * it is likely caused by an internal issue (an NPE) or a bad
+ * input ({@link DataTooBigException}).
+ */
+ record ClientError(Exception exception) implements Event {
+ }
+}
diff --git a/src/main/java/io/weaviate/client6/v1/api/collections/batch/Message.java b/src/main/java/io/weaviate/client6/v1/api/collections/batch/Message.java
new file mode 100644
index 000000000..9d387d1b1
--- /dev/null
+++ b/src/main/java/io/weaviate/client6/v1/api/collections/batch/Message.java
@@ -0,0 +1,32 @@
+package io.weaviate.client6.v1.api.collections.batch;
+
+import static java.util.Objects.requireNonNull;
+
+import java.util.Optional;
+
+import io.weaviate.client6.v1.api.collections.query.ConsistencyLevel;
+import io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoBatch;
+
+@FunctionalInterface
+interface Message {
+ void appendTo(WeaviateProtoBatch.BatchStreamRequest.Builder builder);
+
+ /** Create a Start message. */
+ static Message start(Optional consistencyLevel) {
+ requireNonNull(consistencyLevel, "consistencyLevel is null");
+
+ final WeaviateProtoBatch.BatchStreamRequest.Start.Builder start = WeaviateProtoBatch.BatchStreamRequest.Start
+ .newBuilder();
+ consistencyLevel.ifPresent(value -> value.appendTo(start));
+ return builder -> builder.setStart(start);
+ }
+
+ /** Create a Stop message. */
+ static Message stop() {
+ return STOP;
+ }
+
+ static final Message STOP = builder -> builder
+ .setStop(WeaviateProtoBatch.BatchStreamRequest.Stop.getDefaultInstance());
+
+}
diff --git a/src/main/java/io/weaviate/client6/v1/api/collections/batch/MessageSizeUtil.java b/src/main/java/io/weaviate/client6/v1/api/collections/batch/MessageSizeUtil.java
new file mode 100644
index 000000000..69835d642
--- /dev/null
+++ b/src/main/java/io/weaviate/client6/v1/api/collections/batch/MessageSizeUtil.java
@@ -0,0 +1,50 @@
+package io.weaviate.client6.v1.api.collections.batch;
+
+import static java.util.Objects.requireNonNull;
+
+import com.google.protobuf.CodedOutputStream;
+import com.google.protobuf.GeneratedMessage;
+import com.google.protobuf.GeneratedMessageV3;
+
+import io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoBatch;
+
+final class MessageSizeUtil {
+ /**
+ * Safety margin of 1KB to allow for the overhead of surrounding Data field tags
+ * and the encoded length of the final payload.
+ *
+ *
+ * Package-private for testing.
+ */
+ static final int SAFETY_MARGIN = 1024;
+
+ private MessageSizeUtil() {
+ }
+
+ /**
+ * Adjust batch byte-size limit to account for the
+ * {@link WeaviateProtoBatch.BatchStreamRequest.Data} container.
+ *
+ *
+ * A protobuf field has layout {@code [tag][lenght(payload)][payload]},
+ * so to estimate the batch size correctly we must account for "tag"
+ * and "length", not just the raw payload.
+ */
+ static int maxSizeBytes(int maxSizeBytes) {
+ if (maxSizeBytes <= SAFETY_MARGIN) {
+ throw new IllegalArgumentException("Maximum batch size must be at least %dB".formatted(SAFETY_MARGIN));
+ }
+ return maxSizeBytes - SAFETY_MARGIN;
+ }
+
+ /**
+ * Calculate the size of a serialized
+ * {@link WeaviateProtoBatch.BatchStreamRequest.Data} field.
+ */
+ @SuppressWarnings("deprecation") // protoc uses GeneratedMessageV3
+ static int ofDataField(GeneratedMessage.ExtendableMessage message, Data.Type type) {
+ requireNonNull(type, "type is null");
+ requireNonNull(message, "message is null");
+ return CodedOutputStream.computeMessageSize(type.fieldNumber(), message);
+ }
+}
diff --git a/src/main/java/io/weaviate/client6/v1/api/collections/batch/ProtocolViolationException.java b/src/main/java/io/weaviate/client6/v1/api/collections/batch/ProtocolViolationException.java
new file mode 100644
index 000000000..e9a90da47
--- /dev/null
+++ b/src/main/java/io/weaviate/client6/v1/api/collections/batch/ProtocolViolationException.java
@@ -0,0 +1,47 @@
+package io.weaviate.client6.v1.api.collections.batch;
+
+import static java.util.Objects.requireNonNull;
+
+import java.util.List;
+
+import io.weaviate.client6.v1.api.WeaviateException;
+
+/**
+ * ProtocolViolationException describes unexpected server behavior in violation
+ * of the SSB protocol.
+ *
+ *
+ * This exception cannot be handled in a meaningful way and should be reported
+ * to the upstream Weaviate
+ * project.
+ */
+public class ProtocolViolationException extends WeaviateException {
+ ProtocolViolationException(String message) {
+ super(message);
+ }
+
+ /**
+ * Protocol violated because an event arrived while the client is in a state
+ * which doesn't expect to handle this event.
+ *
+ * @param current Current {@link BatchContext} state.
+ * @param event Server-side event.
+ * @return ProtocolViolationException with a formatted message.
+ */
+ static ProtocolViolationException illegalStateTransition(State current, Event event) {
+ return new ProtocolViolationException("%s arrived in %s state".formatted(event, current));
+ }
+
+ /**
+ * Protocol violated because some tasks from the previous Data message
+ * are not present in the Acks message.
+ *
+ * @param remaining IDs of the tasks that weren't ack'ed. MUST be non-empty.
+ * @return ProtocolViolationException with a formatted message.
+ */
+ static ProtocolViolationException incompleteAcks(List remaining) {
+ requireNonNull(remaining, "remaining is null");
+ return new ProtocolViolationException("IDs from previous Data message missing in Acks: '%s', ... (%d more)"
+ .formatted(remaining.get(0), remaining.size() - 1));
+ }
+}
diff --git a/src/main/java/io/weaviate/client6/v1/api/collections/batch/State.java b/src/main/java/io/weaviate/client6/v1/api/collections/batch/State.java
new file mode 100644
index 000000000..d58b4ac26
--- /dev/null
+++ b/src/main/java/io/weaviate/client6/v1/api/collections/batch/State.java
@@ -0,0 +1,42 @@
+package io.weaviate.client6.v1.api.collections.batch;
+
+interface State {
+ /**
+ * canSend returns a boolean indicating if sending
+ * an "insert" message is allowed in this state.
+ */
+ boolean canSendNext();
+
+ /**
+ * canPrepareNext returns a boolean indicating if accepting
+ * more items into the batch is allowed in this state.
+ */
+ boolean canPrepareNext();
+
+ /**
+ * Lifecycle hook that's called after the state is set.
+ *
+ *
+ *
+ * This hook MUST be called exactly once.
+ *
+ * The next state MUST NOT be set until onEnter returns.
+ *
+ * @param prev Previous state or null.
+ */
+ void onEnter(State prev);
+
+ /**
+ * onEvent handles incoming events; these can be generated by the server
+ * or by a different part of the program -- the {@link State} MUST NOT
+ * make any assumptions about the event's origin.
+ *
+ *
+ * How the event is handled is up to the concrete implementation.
+ * It may modify {@link BatchContext} internal state, via one of it's
+ * package-private methods, including transitioning the context to a
+ * different state via {@link BatchContext#setState(State)}, or start
+ * a separate process, e.g. the OOM timer.
+ */
+ void onEvent(Event event);
+}
diff --git a/src/main/java/io/weaviate/client6/v1/api/collections/batch/StreamFactory.java b/src/main/java/io/weaviate/client6/v1/api/collections/batch/StreamFactory.java
new file mode 100644
index 000000000..c50eec6ee
--- /dev/null
+++ b/src/main/java/io/weaviate/client6/v1/api/collections/batch/StreamFactory.java
@@ -0,0 +1,13 @@
+package io.weaviate.client6.v1.api.collections.batch;
+
+import io.grpc.stub.StreamObserver;
+
+/**
+ * @param the type of the object sent down the stream.
+ * @param the type of the object received from the stream.
+ */
+@FunctionalInterface
+interface StreamFactory {
+ /** Create a new stream for the send-recv observer pair. */
+ StreamObserver createStream(StreamObserver recv);
+}
diff --git a/src/main/java/io/weaviate/client6/v1/api/collections/batch/TaskHandle.java b/src/main/java/io/weaviate/client6/v1/api/collections/batch/TaskHandle.java
new file mode 100644
index 000000000..daf920a1a
--- /dev/null
+++ b/src/main/java/io/weaviate/client6/v1/api/collections/batch/TaskHandle.java
@@ -0,0 +1,179 @@
+package io.weaviate.client6.v1.api.collections.batch;
+
+import static java.util.Objects.requireNonNull;
+
+import java.time.Instant;
+import java.util.Optional;
+import java.util.concurrent.CompletableFuture;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+import com.google.protobuf.GeneratedMessage;
+import com.google.protobuf.GeneratedMessageV3;
+
+import io.weaviate.client6.v1.api.collections.WeaviateObject;
+import io.weaviate.client6.v1.api.collections.data.BatchReference;
+
+@ThreadSafe
+@SuppressWarnings("deprecation") // protoc uses GeneratedMessageV3
+public final class TaskHandle {
+ static final TaskHandle POISON = new TaskHandle();
+
+ /**
+ * Input value as passed by the user.
+ *
+ *
+ * Changes in the {@link Data}'s underlying value will not be reflected
+ * in the {@link TaskHandle} (e.g. the serialized version is not updated),
+ * so users SHOULD treat items passed to and retrieved from {@link TaskHandle}
+ * as effectively unmodifiable.
+ */
+ private final Data data;
+
+ /** Flag indicating the task has been ack'ed. */
+ private final CompletableFuture acked = new CompletableFuture<>();
+
+ public final record Result(Optional error) {
+ public Result {
+ requireNonNull(error, "error is null");
+ }
+ }
+
+ /**
+ * Task result completes when the client receives {@link Event.Results}
+ * containing this handle's {@link #id}.
+ */
+ private final CompletableFuture result = new CompletableFuture<>();
+
+ /** The number of times this task has been retried. */
+ private final int retries;
+
+ /** Task creation timestamp. */
+ private final Instant createdAt = Instant.now();
+
+ private TaskHandle(Data data, int retries) {
+ this.data = requireNonNull(data, "data is null");
+
+ assert retries >= 0 : "negative retries";
+ this.retries = retries;
+ }
+
+ /** Constructor for {@link WeaviateObject}. */
+ TaskHandle(WeaviateObject> object, GeneratedMessage.ExtendableMessage data) {
+ this(new Data(object, object.uuid(), data, Data.Type.OBJECT), 0);
+ }
+
+ /** Constructor for {@link BatchReference}. */
+ TaskHandle(BatchReference reference, GeneratedMessage.ExtendableMessage data) {
+ this(new Data(reference, reference.target().beacon(), data, Data.Type.REFERENCE), 0);
+ }
+
+ /**
+ * Poison pill constructor.
+ *
+ *
+ * A handle created with this constructor should not be
+ * used for anything other that direct comparison using {@code ==} operator;
+ * calling any method on a poison pill is likely to result in a
+ * {@link NullPointerException} being thrown.
+ */
+ private TaskHandle() {
+ this.data = null;
+ this.retries = 0;
+ }
+
+ /**
+ * Creates a new task containing the same data as this task and {@link #retries}
+ * counter incremented by 1. The {@link #acked} and {@link #result} futures
+ * are not copied to the returned task.
+ *
+ * @return Task handle.
+ */
+ TaskHandle retry() {
+ return new TaskHandle(data, retries + 1);
+ }
+
+ String id() {
+ return data.id();
+ }
+
+ Data data() {
+ return data;
+ }
+
+ /** Set the {@link #acked} flag. */
+ void setAcked() {
+ acked.complete(null);
+ }
+
+ /**
+ * Mark the task successful. This status cannot be changed, so calling
+ * {@link #setError} afterwards will have no effect.
+ */
+ void setSuccess() {
+ setResult(new Result(Optional.empty()));
+ }
+
+ /**
+ * Mark the task failed. This status cannot be changed, so calling
+ * {@link #setSuccess} afterwards will have no effect.
+ *
+ * @param error Error message. Null values are tolerated, but are only expected
+ * to occur due to a server's mistake.
+ * Do not use {@code setError(null)} if the server reports success
+ * status for the task; prefer {@link #setSuccess} in that case.
+ */
+ void setError(String error) {
+ setResult(new Result(Optional.ofNullable(error)));
+ }
+
+ /**
+ * Set result for this task.
+ *
+ * @throws IllegalStateException if the task has not been ack'ed.
+ */
+ private void setResult(Result result) {
+ if (!acked.isDone()) {
+ // TODO(dyma): can this happen due to us?
+ throw new IllegalStateException("Result can only be set for an ack'ed task");
+ }
+ this.result.complete(result);
+ }
+
+ /**
+ * Check if the task has been accepted.
+ *
+ * @return A future which completes when the server has accepted the task.
+ */
+ public CompletableFuture isAcked() {
+ return acked;
+ }
+
+ /**
+ * Retrieve the result for this task.
+ *
+ * @return A future which completes when the server
+ * has reported the result for this task.
+ */
+ public CompletableFuture result() {
+ return result;
+ }
+
+ /**
+ * Number of times this task has been retried. Since {@link TaskHandle} is
+ * immutable, this value does not change, but retrying a task via
+ * {@link BatchContext#retry} is reflected in the returned handle's
+ * timesRetried.
+ */
+ public int timesRetried() {
+ return retries;
+ }
+
+ @Override
+ public String toString() {
+ if (this == POISON) {
+ return "TaskHandle";
+ }
+ return "TaskHandle".formatted(id(), retries, createdAt);
+ }
+}
diff --git a/src/main/java/io/weaviate/client6/v1/api/collections/batch/TranslatingStreamFactory.java b/src/main/java/io/weaviate/client6/v1/api/collections/batch/TranslatingStreamFactory.java
new file mode 100644
index 000000000..cca4848bc
--- /dev/null
+++ b/src/main/java/io/weaviate/client6/v1/api/collections/batch/TranslatingStreamFactory.java
@@ -0,0 +1,143 @@
+package io.weaviate.client6.v1.api.collections.batch;
+
+import static java.util.Objects.requireNonNull;
+
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import io.grpc.stub.StreamObserver;
+import io.weaviate.client6.v1.internal.grpc.protocol.WeaviateGrpc;
+import io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoBatch;
+import io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoBatch.BatchStreamReply;
+
+/**
+ * TranslatingStreamFactory is an adaptor for the
+ * {@link WeaviateGrpc.WeaviateStub#batchStream} factory. The returned stream
+ * translates client-side messages into protobuf requests and server-side
+ * replies into events.
+ *
+ * @see Message
+ * @see Event
+ */
+class TranslatingStreamFactory implements StreamFactory {
+ private final StreamFactory protoFactory;
+
+ TranslatingStreamFactory(
+ StreamFactory protoFactory) {
+ this.protoFactory = requireNonNull(protoFactory, "protoFactory is null");
+ }
+
+ @Override
+ public StreamObserver createStream(StreamObserver recv) {
+ return new Messenger(protoFactory.createStream(new Eventer(recv)));
+ }
+
+ /**
+ * DelegatingStreamObserver delegates {@link #onCompleted} and {@link #onError}
+ * to another observer and translates the messages in {@link #onNext}.
+ *
+ * @param the type of the incoming message.
+ * @param the type of the message handed to the delegate.
+ */
+ private abstract class DelegatingStreamObserver implements StreamObserver {
+ protected final StreamObserver delegate;
+
+ protected DelegatingStreamObserver(StreamObserver delegate) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ public void onCompleted() {
+ delegate.onCompleted();
+ }
+
+ @Override
+ public void onError(Throwable t) {
+ delegate.onError(t);
+ }
+ }
+
+ /**
+ * Messeger translates client's messages into batch stream requests.
+ *
+ * @see Message
+ */
+ private final class Messenger extends DelegatingStreamObserver {
+ private Messenger(StreamObserver delegate) {
+ super(delegate);
+ }
+
+ @Override
+ public void onNext(Message message) {
+ WeaviateProtoBatch.BatchStreamRequest.Builder builder = WeaviateProtoBatch.BatchStreamRequest.newBuilder();
+ message.appendTo(builder);
+ delegate.onNext(builder.build());
+ }
+ }
+
+ /**
+ * Eventer translates server replies into events.
+ *
+ * @see Event
+ */
+ private final class Eventer extends DelegatingStreamObserver {
+ private Eventer(StreamObserver delegate) {
+ super(delegate);
+ }
+
+ @Override
+ public void onNext(BatchStreamReply reply) {
+ Event event = null;
+ switch (reply.getMessageCase()) {
+ case STARTED:
+ event = Event.STARTED;
+ break;
+ case SHUTTING_DOWN:
+ event = Event.SHUTTING_DOWN;
+ break;
+ case OUT_OF_MEMORY:
+ event = new Event.Oom(reply.getOutOfMemory().getWaitTime());
+ break;
+ case BACKOFF:
+ event = new Event.Backoff(reply.getBackoff().getBatchSize());
+ break;
+ case ACKS:
+ Stream uuids = reply.getAcks().getUuidsList().stream();
+ Stream beacons = reply.getAcks().getBeaconsList().stream();
+ event = new Event.Acks(Stream.concat(uuids, beacons).toList());
+ break;
+ case RESULTS:
+ List successful = reply.getResults().getSuccessesList().stream()
+ .map(detail -> {
+ if (detail.hasUuid()) {
+ return detail.getUuid();
+ } else if (detail.hasBeacon()) {
+ return detail.getBeacon();
+ }
+ throw new IllegalArgumentException("Result has neither UUID nor a beacon");
+ })
+ .toList();
+
+ Map errors = reply.getResults().getErrorsList().stream()
+ .map(detail -> {
+ String error = requireNonNull(detail.getError(), "error is null");
+ if (detail.hasUuid()) {
+ return Map.entry(detail.getUuid(), error);
+ } else if (detail.hasBeacon()) {
+ return Map.entry(detail.getBeacon(), error);
+ }
+ throw new IllegalArgumentException("Result has neither UUID nor a beacon");
+ })
+ .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue));
+ event = new Event.Results(successful, errors);
+ break;
+ case MESSAGE_NOT_SET:
+ throw new ProtocolViolationException("Message not set");
+ }
+
+ delegate.onNext(event);
+ }
+ }
+}
diff --git a/src/main/java/io/weaviate/client6/v1/api/collections/batch/WeaviateBatchClient.java b/src/main/java/io/weaviate/client6/v1/api/collections/batch/WeaviateBatchClient.java
new file mode 100644
index 000000000..86528370e
--- /dev/null
+++ b/src/main/java/io/weaviate/client6/v1/api/collections/batch/WeaviateBatchClient.java
@@ -0,0 +1,81 @@
+package io.weaviate.client6.v1.api.collections.batch;
+
+import static java.util.Objects.requireNonNull;
+
+import java.util.OptionalInt;
+import java.util.function.Function;
+
+import io.weaviate.client6.v1.api.collections.CollectionHandleDefaults;
+import io.weaviate.client6.v1.internal.ObjectBuilder;
+import io.weaviate.client6.v1.internal.TransportOptions;
+import io.weaviate.client6.v1.internal.grpc.GrpcTransport;
+import io.weaviate.client6.v1.internal.orm.CollectionDescriptor;
+
+public class WeaviateBatchClient {
+ private final CollectionHandleDefaults defaults;
+ private final CollectionDescriptor collectionDescriptor;
+ private final GrpcTransport grpcTransport;
+
+ public WeaviateBatchClient(
+ GrpcTransport grpcTransport,
+ CollectionDescriptor collectionDescriptor,
+ CollectionHandleDefaults defaults) {
+ this.defaults = requireNonNull(defaults, "defaults is null");
+ this.collectionDescriptor = requireNonNull(collectionDescriptor, "collectionDescriptor is null");
+ this.grpcTransport = requireNonNull(grpcTransport, "grpcTransport is null");
+ }
+
+ /** Copy constructor with new defaults. */
+ public WeaviateBatchClient(WeaviateBatchClient c, CollectionHandleDefaults defaults) {
+ this.defaults = requireNonNull(defaults, "defaults is null");
+ this.collectionDescriptor = c.collectionDescriptor;
+ this.grpcTransport = c.grpcTransport;
+ }
+
+ public BatchContext start(
+ Function, ObjectBuilder>> fn) {
+ OptionalInt maxSizeBytes = grpcTransport.maxMessageSizeBytes();
+ if (maxSizeBytes.isEmpty()) {
+ throw new IllegalStateException("Server must have grpcMaxMessageSize configured to use server-side batching");
+ }
+
+ StreamFactory streamFactory = new TranslatingStreamFactory(grpcTransport::createStream);
+ BatchContext.Builder builder = new BatchContext.Builder<>(
+ streamFactory, maxSizeBytes.getAsInt(), collectionDescriptor, defaults);
+ BatchContext context = fn.apply(builder).build();
+
+ if (isWeaviateCloudOnGoogleCloud(grpcTransport.host())) {
+ context.scheduleReconnect(GCP_RECONNECT_INTERVAL_SECONDS);
+ }
+
+ context.start();
+ return context;
+ }
+
+ public BatchContext start() {
+ OptionalInt maxSizeBytes = grpcTransport.maxMessageSizeBytes();
+ if (maxSizeBytes.isEmpty()) {
+ throw new IllegalStateException("Server must have grpcMaxMessageSize configured to use server-side batching");
+ }
+
+ StreamFactory streamFactory = new TranslatingStreamFactory(grpcTransport::createStream);
+ BatchContext context = new BatchContext.Builder<>(
+ streamFactory,
+ maxSizeBytes.getAsInt(),
+ collectionDescriptor,
+ defaults).build();
+
+ if (isWeaviateCloudOnGoogleCloud(grpcTransport.host())) {
+ context.scheduleReconnect(GCP_RECONNECT_INTERVAL_SECONDS);
+ }
+
+ context.start();
+ return context;
+ }
+
+ private static final int GCP_RECONNECT_INTERVAL_SECONDS = 160;
+
+ private static boolean isWeaviateCloudOnGoogleCloud(String host) {
+ return TransportOptions.isWeaviateDomain(host) && TransportOptions.isGoogleCloudDomain(host);
+ }
+}
diff --git a/src/main/java/io/weaviate/client6/v1/api/collections/config/GetShardsRequest.java b/src/main/java/io/weaviate/client6/v1/api/collections/config/GetShardsRequest.java
index 5237fed26..c27c13aa7 100644
--- a/src/main/java/io/weaviate/client6/v1/api/collections/config/GetShardsRequest.java
+++ b/src/main/java/io/weaviate/client6/v1/api/collections/config/GetShardsRequest.java
@@ -21,8 +21,8 @@ public static final Endpoint> endpoint(
return SimpleEndpoint.noBody(
request -> "GET",
request -> "/schema/" + collection.collectionName() + "/shards",
- request -> defaults.tenant() != null
- ? Map.of("tenant", defaults.tenant())
+ request -> defaults.tenant().isPresent()
+ ? Map.of("tenant", defaults.tenant().get())
: Collections.emptyMap(),
(statusCode, response) -> (List) JSON.deserialize(response, TypeToken.getParameterized(
List.class, Shard.class)));
diff --git a/src/main/java/io/weaviate/client6/v1/api/collections/data/BatchReference.java b/src/main/java/io/weaviate/client6/v1/api/collections/data/BatchReference.java
index a83652be5..503ec59d1 100644
--- a/src/main/java/io/weaviate/client6/v1/api/collections/data/BatchReference.java
+++ b/src/main/java/io/weaviate/client6/v1/api/collections/data/BatchReference.java
@@ -1,5 +1,7 @@
package io.weaviate.client6.v1.api.collections.data;
+import static java.util.Objects.requireNonNull;
+
import java.io.IOException;
import java.util.Arrays;
@@ -9,7 +11,14 @@
import io.weaviate.client6.v1.api.collections.WeaviateObject;
-public record BatchReference(String fromCollection, String fromProperty, String fromUuid, ObjectReference reference) {
+public record BatchReference(String fromCollection, String fromProperty, String fromUuid, ObjectReference target) {
+
+ public BatchReference {
+ requireNonNull(fromCollection, "fromCollection is null");
+ requireNonNull(fromProperty, "fromProperty is null");
+ requireNonNull(fromUuid, "fromUuid is null");
+ requireNonNull(target, "target is null");
+ }
public static BatchReference[] objects(WeaviateObject> fromObject, String fromProperty,
WeaviateObject>... toObjects) {
@@ -39,7 +48,7 @@ public void write(JsonWriter out, BatchReference value) throws IOException {
out.value(ObjectReference.toBeacon(value.fromCollection, value.fromProperty, value.fromUuid));
out.name("to");
- out.value(ObjectReference.toBeacon(value.reference.collection(), value.reference.uuid()));
+ out.value(ObjectReference.toBeacon(value.target.collection(), value.target.uuid()));
out.endObject();
}
diff --git a/src/main/java/io/weaviate/client6/v1/api/collections/data/DeleteManyRequest.java b/src/main/java/io/weaviate/client6/v1/api/collections/data/DeleteManyRequest.java
index 2fff8681a..e60133957 100644
--- a/src/main/java/io/weaviate/client6/v1/api/collections/data/DeleteManyRequest.java
+++ b/src/main/java/io/weaviate/client6/v1/api/collections/data/DeleteManyRequest.java
@@ -29,11 +29,9 @@ public static Rpc Rpc, WeaviateProtoBat
request -> {
var message = WeaviateProtoBatch.BatchObjectsRequest.newBuilder();
- var batch = request.objects.stream().map(obj -> {
- var batchObject = WeaviateProtoBatch.BatchObject.newBuilder();
- buildObject(batchObject, obj, collection, defaults);
- return batchObject.build();
- }).toList();
-
+ var batch = request.objects.stream()
+ .map(obj -> buildObject(obj, collection, defaults))
+ .toList();
message.addAllObjects(batch);
- if (defaults.consistencyLevel() != null) {
- defaults.consistencyLevel().appendTo(message);
+ if (defaults.consistencyLevel().isPresent()) {
+ defaults.consistencyLevel().get().appendTo(message);
}
+ var m = message.build();
+ m.getSerializedSize();
return message.build();
},
response -> {
@@ -92,10 +94,11 @@ public static Rpc, WeaviateProtoBat
() -> WeaviateFutureStub::batchObjects);
}
- public static void buildObject(WeaviateProtoBatch.BatchObject.Builder object,
+ public static WeaviateProtoBatch.BatchObject buildObject(
WeaviateObject insert,
CollectionDescriptor collection,
CollectionHandleDefaults defaults) {
+ var object = WeaviateProtoBatch.BatchObject.newBuilder();
object.setCollection(collection.collectionName());
if (insert.uuid() != null) {
@@ -121,9 +124,7 @@ public static void buildObject(WeaviateProtoBatch.BatchObject.Builder object
}).toList();
object.addAllVectors(vectors);
}
- if (defaults.tenant() != null) {
- object.setTenant(defaults.tenant());
- }
+ defaults.tenant().ifPresent(object::setTenant);
var singleRef = new ArrayList();
var multiRef = new ArrayList();
@@ -158,6 +159,7 @@ public static void buildObject(WeaviateProtoBatch.BatchObject.Builder object
properties.setNonRefProperties(nonRef);
}
object.setProperties(properties);
+ return object.build();
}
@SuppressWarnings("unchecked")
@@ -330,4 +332,20 @@ private static com.google.protobuf.Struct marshalStruct(Map prop
});
return struct.build();
}
+
+ public static WeaviateProtoBatch.BatchReference buildReference(BatchReference reference, Optional tenant) {
+ requireNonNull(reference, "reference is null");
+ WeaviateProtoBatch.BatchReference.Builder builder = WeaviateProtoBatch.BatchReference.newBuilder();
+ builder
+ .setName(reference.fromProperty())
+ .setFromCollection(reference.fromCollection())
+ .setFromUuid(reference.fromUuid())
+ .setToUuid(reference.target().uuid());
+
+ if (reference.target().collection() != null) {
+ builder.setToCollection(reference.target().collection());
+ }
+ tenant.ifPresent(t -> builder.setTenant(t));
+ return builder.build();
+ }
}
diff --git a/src/main/java/io/weaviate/client6/v1/api/collections/data/InsertObjectRequest.java b/src/main/java/io/weaviate/client6/v1/api/collections/data/InsertObjectRequest.java
index 8588eb760..bf4a25273 100644
--- a/src/main/java/io/weaviate/client6/v1/api/collections/data/InsertObjectRequest.java
+++ b/src/main/java/io/weaviate/client6/v1/api/collections/data/InsertObjectRequest.java
@@ -27,14 +27,14 @@ public static final Endpoint, Wea
return new SimpleEndpoint<>(
request -> "POST",
request -> "/objects/",
- request -> defaults.consistencyLevel() != null
- ? Map.of("consistency_level", defaults.consistencyLevel())
+ request -> defaults.consistencyLevel().isPresent()
+ ? Map.of("consistency_level", defaults.consistencyLevel().get())
: Collections.emptyMap(),
request -> JSON.serialize(
new WeaviateObject<>(
request.object.uuid(),
collection.collectionName(),
- defaults.tenant(),
+ defaults.tenant().orElse(null),
request.object.properties(),
request.object.vectors(),
request.object.createdAt(),
diff --git a/src/main/java/io/weaviate/client6/v1/api/collections/data/ObjectReference.java b/src/main/java/io/weaviate/client6/v1/api/collections/data/ObjectReference.java
index bb6a0f27e..822c5f54d 100644
--- a/src/main/java/io/weaviate/client6/v1/api/collections/data/ObjectReference.java
+++ b/src/main/java/io/weaviate/client6/v1/api/collections/data/ObjectReference.java
@@ -67,6 +67,10 @@ public static ObjectReference[] collection(String collection, String... uuids) {
.toArray(ObjectReference[]::new);
}
+ public String beacon() {
+ return toBeacon(collection, uuid);
+ }
+
public static String toBeacon(String collection, String uuid) {
return toBeacon(collection, null, uuid);
}
diff --git a/src/main/java/io/weaviate/client6/v1/api/collections/data/ReferenceAddManyRequest.java b/src/main/java/io/weaviate/client6/v1/api/collections/data/ReferenceAddManyRequest.java
index 284688daa..752e94adc 100644
--- a/src/main/java/io/weaviate/client6/v1/api/collections/data/ReferenceAddManyRequest.java
+++ b/src/main/java/io/weaviate/client6/v1/api/collections/data/ReferenceAddManyRequest.java
@@ -31,5 +31,4 @@ public static final Endpoint
return new ReferenceAddManyResponse(errors);
});
}
-
}
diff --git a/src/main/java/io/weaviate/client6/v1/api/collections/data/ReplaceObjectRequest.java b/src/main/java/io/weaviate/client6/v1/api/collections/data/ReplaceObjectRequest.java
index 13a1afacb..dee59f5df 100644
--- a/src/main/java/io/weaviate/client6/v1/api/collections/data/ReplaceObjectRequest.java
+++ b/src/main/java/io/weaviate/client6/v1/api/collections/data/ReplaceObjectRequest.java
@@ -26,14 +26,14 @@ static final Endpoint, Void> end
return SimpleEndpoint.sideEffect(
request -> "PUT",
request -> "/objects/" + collection.collectionName() + "/" + request.object.uuid(),
- request -> defaults.consistencyLevel() != null
- ? Map.of("consistency_level", defaults.consistencyLevel())
+ request -> defaults.consistencyLevel().isPresent()
+ ? Map.of("consistency_level", defaults.consistencyLevel().get())
: Collections.emptyMap(),
request -> JSON.serialize(
new WeaviateObject<>(
request.object.uuid(),
collection.collectionName(),
- defaults.tenant(),
+ defaults.tenant().orElse(null),
request.object.properties(),
request.object.vectors(),
request.object.createdAt(),
diff --git a/src/main/java/io/weaviate/client6/v1/api/collections/data/UpdateObjectRequest.java b/src/main/java/io/weaviate/client6/v1/api/collections/data/UpdateObjectRequest.java
index 6157a1cc8..e401ade85 100644
--- a/src/main/java/io/weaviate/client6/v1/api/collections/data/UpdateObjectRequest.java
+++ b/src/main/java/io/weaviate/client6/v1/api/collections/data/UpdateObjectRequest.java
@@ -26,14 +26,14 @@ static final Endpoint, Void> endp
return SimpleEndpoint.sideEffect(
request -> "PATCH",
request -> "/objects/" + collection.collectionName() + "/" + request.object.uuid(),
- request -> defaults.consistencyLevel() != null
- ? Map.of("consistency_level", defaults.consistencyLevel())
+ request -> defaults.consistencyLevel().isPresent()
+ ? Map.of("consistency_level", defaults.consistencyLevel().get())
: Collections.emptyMap(),
request -> JSON.serialize(
new WeaviateObject<>(
request.object.uuid(),
collection.collectionName(),
- defaults.tenant(),
+ defaults.tenant().orElse(null),
request.object.properties(),
request.object.vectors(),
request.object.createdAt(),
diff --git a/src/main/java/io/weaviate/client6/v1/api/collections/data/WeaviateDataClient.java b/src/main/java/io/weaviate/client6/v1/api/collections/data/WeaviateDataClient.java
index c7497ec64..19613dff2 100644
--- a/src/main/java/io/weaviate/client6/v1/api/collections/data/WeaviateDataClient.java
+++ b/src/main/java/io/weaviate/client6/v1/api/collections/data/WeaviateDataClient.java
@@ -35,7 +35,7 @@ public WeaviateDataClient(
this.defaults = defaults;
}
- /** Copy constructor that updates the {@link #query} to use new defaults. */
+ /** Copy constructor with new defaults. */
public WeaviateDataClient(WeaviateDataClient c, CollectionHandleDefaults defaults) {
this.restTransport = c.restTransport;
this.grpcTransport = c.grpcTransport;
diff --git a/src/main/java/io/weaviate/client6/v1/api/collections/query/ConsistencyLevel.java b/src/main/java/io/weaviate/client6/v1/api/collections/query/ConsistencyLevel.java
index 326609d2e..fc688cd41 100644
--- a/src/main/java/io/weaviate/client6/v1/api/collections/query/ConsistencyLevel.java
+++ b/src/main/java/io/weaviate/client6/v1/api/collections/query/ConsistencyLevel.java
@@ -43,6 +43,10 @@ public final void appendTo(WeaviateProtoBatch.BatchObjectsRequest.Builder req) {
req.setConsistencyLevel(consistencyLevel);
}
+ public final void appendTo(WeaviateProtoBatch.BatchStreamRequest.Start.Builder req) {
+ req.setConsistencyLevel(consistencyLevel);
+ }
+
@Override
public String toString() {
return queryParameter;
diff --git a/src/main/java/io/weaviate/client6/v1/api/collections/query/QueryRequest.java b/src/main/java/io/weaviate/client6/v1/api/collections/query/QueryRequest.java
index 625dde30d..8bb0db91b 100644
--- a/src/main/java/io/weaviate/client6/v1/api/collections/query/QueryRequest.java
+++ b/src/main/java/io/weaviate/client6/v1/api/collections/query/QueryRequest.java
@@ -37,11 +37,9 @@ public static WeaviateProtoSearchGet.SearchRequest marshal(
}
request.operator.appendTo(message);
- if (defaults.tenant() != null) {
- message.setTenant(defaults.tenant());
- }
- if (defaults.consistencyLevel() != null) {
- defaults.consistencyLevel().appendTo(message);
+ defaults.tenant().ifPresent(message::setTenant);
+ if (defaults.consistencyLevel().isPresent()) {
+ defaults.consistencyLevel().get().appendTo(message);
}
if (request.groupBy != null) {
diff --git a/src/main/java/io/weaviate/client6/v1/api/collections/query/QueryResponse.java b/src/main/java/io/weaviate/client6/v1/api/collections/query/QueryResponse.java
index b1bc7369e..751492591 100644
--- a/src/main/java/io/weaviate/client6/v1/api/collections/query/QueryResponse.java
+++ b/src/main/java/io/weaviate/client6/v1/api/collections/query/QueryResponse.java
@@ -3,7 +3,6 @@
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
-import java.util.Map;
import java.util.UUID;
import java.util.stream.Stream;
@@ -92,20 +91,9 @@ static WeaviateObject unmarshalWithReferences(
(map, ref) -> {
var refObjects = ref.getPropertiesList().stream()
.map(property -> {
- var reference = unmarshalWithReferences(
+ return (Reference) unmarshalWithReferences(
property, property.getMetadata(),
CollectionDescriptor.ofMap(property.getTargetCollection()));
- return (Reference) new WeaviateObject<>(
- reference.uuid(),
- reference.collection(),
- // TODO(dyma): we can get tenant from CollectionHandle
- null, // tenant is not returned in the query
- (Map) reference.properties(),
- reference.vectors(),
- reference.createdAt(),
- reference.lastUpdatedAt(),
- reference.queryMetadata(),
- reference.references());
})
.toList();
diff --git a/src/main/java/io/weaviate/client6/v1/internal/TransportOptions.java b/src/main/java/io/weaviate/client6/v1/internal/TransportOptions.java
index 897bb28cd..06c0b6c15 100644
--- a/src/main/java/io/weaviate/client6/v1/internal/TransportOptions.java
+++ b/src/main/java/io/weaviate/client6/v1/internal/TransportOptions.java
@@ -57,4 +57,20 @@ public H headers() {
public TrustManagerFactory trustManagerFactory() {
return this.trustManagerFactory;
}
+
+ /**
+ * isWeaviateDomain returns true if the host matches weaviate.io,
+ * semi.technology, or weaviate.cloud domain.
+ */
+ public static boolean isWeaviateDomain(String host) {
+ var lower = host.toLowerCase();
+ return lower.contains("weaviate.io") ||
+ lower.contains("semi.technology") ||
+ lower.contains("weaviate.cloud");
+ }
+
+ public static boolean isGoogleCloudDomain(String host) {
+ var lower = host.toLowerCase();
+ return lower.contains("gcp");
+ }
}
diff --git a/src/main/java/io/weaviate/client6/v1/internal/grpc/DefaultGrpcTransport.java b/src/main/java/io/weaviate/client6/v1/internal/grpc/DefaultGrpcTransport.java
index d12255d22..385808ffc 100644
--- a/src/main/java/io/weaviate/client6/v1/internal/grpc/DefaultGrpcTransport.java
+++ b/src/main/java/io/weaviate/client6/v1/internal/grpc/DefaultGrpcTransport.java
@@ -1,6 +1,10 @@
package io.weaviate.client6.v1.internal.grpc;
+import static java.util.Objects.requireNonNull;
+
+import java.util.OptionalInt;
import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
import javax.net.ssl.SSLException;
@@ -16,57 +20,57 @@
import io.grpc.netty.shaded.io.netty.handler.ssl.SslContext;
import io.grpc.stub.AbstractStub;
import io.grpc.stub.MetadataUtils;
+import io.grpc.stub.StreamObserver;
import io.weaviate.client6.v1.api.WeaviateApiException;
import io.weaviate.client6.v1.internal.grpc.protocol.WeaviateGrpc;
import io.weaviate.client6.v1.internal.grpc.protocol.WeaviateGrpc.WeaviateBlockingStub;
import io.weaviate.client6.v1.internal.grpc.protocol.WeaviateGrpc.WeaviateFutureStub;
+import io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoBatch.BatchStreamReply;
+import io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoBatch.BatchStreamRequest;
public final class DefaultGrpcTransport implements GrpcTransport {
+ /**
+ * ListenableFuture callbacks are executed
+ * in the same thread they are called from.
+ */
+ private static final Executor FUTURE_CALLBACK_EXECUTOR = Runnable::run;
+
+ private final GrpcChannelOptions transportOptions;
private final ManagedChannel channel;
private final WeaviateBlockingStub blockingStub;
private final WeaviateFutureStub futureStub;
- private final GrpcChannelOptions transportOptions;
-
private TokenCallCredentials callCredentials;
public DefaultGrpcTransport(GrpcChannelOptions transportOptions) {
- this.transportOptions = transportOptions;
- this.channel = buildChannel(transportOptions);
-
- var blockingStub = WeaviateGrpc.newBlockingStub(channel)
- .withInterceptors(MetadataUtils.newAttachHeadersInterceptor(transportOptions.headers()));
-
- var futureStub = WeaviateGrpc.newFutureStub(channel)
- .withInterceptors(MetadataUtils.newAttachHeadersInterceptor(transportOptions.headers()));
-
- if (transportOptions.maxMessageSize() != null) {
- var max = transportOptions.maxMessageSize();
- blockingStub = blockingStub.withMaxInboundMessageSize(max).withMaxOutboundMessageSize(max);
- futureStub = futureStub.withMaxInboundMessageSize(max).withMaxOutboundMessageSize(max);
- }
+ requireNonNull(transportOptions, "transportOptions is null");
+ this.transportOptions = transportOptions;
if (transportOptions.tokenProvider() != null) {
this.callCredentials = new TokenCallCredentials(transportOptions.tokenProvider());
- blockingStub = blockingStub.withCallCredentials(callCredentials);
- futureStub = futureStub.withCallCredentials(callCredentials);
}
- this.blockingStub = blockingStub;
- this.futureStub = futureStub;
+ this.channel = buildChannel(transportOptions);
+ this.blockingStub = configure(WeaviateGrpc.newBlockingStub(channel));
+ this.futureStub = configure(WeaviateGrpc.newFutureStub(channel));
}
private > StubT applyTimeout(StubT stub, Rpc, ?, ?, ?> rpc) {
if (transportOptions.timeout() == null) {
return stub;
}
- var timeout = rpc.isInsert()
+ int timeout = rpc.isInsert()
? transportOptions.timeout().insertSeconds()
: transportOptions.timeout().querySeconds();
return stub.withDeadlineAfter(timeout, TimeUnit.SECONDS);
}
+ @Override
+ public OptionalInt maxMessageSizeBytes() {
+ return transportOptions.maxMessageSize();
+ }
+
@Override
public ResponseT performRequest(RequestT request,
Rpc rpc) {
@@ -96,7 +100,9 @@ public CompletableFuture perf
* reusing the thread in which the original future is completed.
*/
private static final CompletableFuture toCompletableFuture(ListenableFuture listenable) {
- var completable = new CompletableFuture();
+ requireNonNull(listenable, "listenable is null");
+
+ CompletableFuture completable = new CompletableFuture<>();
Futures.addCallback(listenable, new FutureCallback() {
@Override
@@ -113,13 +119,14 @@ public void onFailure(Throwable t) {
completable.completeExceptionally(t);
}
- }, Runnable::run);
+ }, FUTURE_CALLBACK_EXECUTOR);
return completable;
}
private static ManagedChannel buildChannel(GrpcChannelOptions transportOptions) {
- var channel = NettyChannelBuilder.forAddress(transportOptions.host(), transportOptions.port());
+ requireNonNull(transportOptions, "transportOptions is null");
+ NettyChannelBuilder channel = NettyChannelBuilder.forAddress(transportOptions.host(), transportOptions.port());
if (transportOptions.isSecure()) {
channel.useTransportSecurity();
} else {
@@ -140,10 +147,29 @@ private static ManagedChannel buildChannel(GrpcChannelOptions transportOptions)
}
channel.intercept(MetadataUtils.newAttachHeadersInterceptor(transportOptions.headers()));
-
return channel.build();
}
+ @Override
+ public StreamObserver createStream(StreamObserver recv) {
+ return configure(WeaviateGrpc.newStub(channel)).batchStream(recv);
+ }
+
+ /** Apply common configuration to a stub. */
+ private > S configure(S stub) {
+ requireNonNull(stub, "stub is null");
+
+ stub = stub.withInterceptors(MetadataUtils.newAttachHeadersInterceptor(transportOptions.headers()));
+ if (transportOptions.maxMessageSize().isPresent()) {
+ int max = transportOptions.maxMessageSize().getAsInt();
+ stub = stub.withMaxInboundMessageSize(max).withMaxOutboundMessageSize(max);
+ }
+ if (callCredentials != null) {
+ stub = stub.withCallCredentials(callCredentials);
+ }
+ return stub;
+ }
+
@Override
public void close() throws Exception {
channel.shutdown();
@@ -151,4 +177,9 @@ public void close() throws Exception {
callCredentials.close();
}
}
+
+ @Override
+ public String host() {
+ return transportOptions.host();
+ }
}
diff --git a/src/main/java/io/weaviate/client6/v1/internal/grpc/GrpcChannelOptions.java b/src/main/java/io/weaviate/client6/v1/internal/grpc/GrpcChannelOptions.java
index 5e4453d7f..96366cb5f 100644
--- a/src/main/java/io/weaviate/client6/v1/internal/grpc/GrpcChannelOptions.java
+++ b/src/main/java/io/weaviate/client6/v1/internal/grpc/GrpcChannelOptions.java
@@ -1,6 +1,7 @@
package io.weaviate.client6.v1.internal.grpc;
import java.util.Map;
+import java.util.OptionalInt;
import javax.net.ssl.TrustManagerFactory;
@@ -10,7 +11,7 @@
import io.weaviate.client6.v1.internal.TransportOptions;
public class GrpcChannelOptions extends TransportOptions {
- private final Integer maxMessageSize;
+ private final OptionalInt maxMessageSize;
public GrpcChannelOptions(String scheme, String host, int port, Map headers,
TokenProvider tokenProvider, TrustManagerFactory tmf, Timeout timeout) {
@@ -18,17 +19,18 @@ public GrpcChannelOptions(String scheme, String host, int port, Map ResponseT performRequest(RequestT request,
- Rpc rpc);
+ ResponseT performRequest(RequestT request,
+ Rpc rpc);
+
+ CompletableFuture performRequestAsync(RequestT request,
+ Rpc rpc);
+
+ /**
+ * Create stream for batch insertion.
+ *
+ *
+ * Batch insertion is presently the only operation performed over a
+ * StreamStream connection, which is why we do not parametrize this
+ * method.
+ */
+ StreamObserver createStream(
+ StreamObserver recv);
+
+ String host();
- CompletableFuture performRequestAsync(RequestT request,
- Rpc rpc);
+ /**
+ * Maximum inbound/outbound message size supported by the underlying channel.
+ */
+ OptionalInt maxMessageSizeBytes();
}
diff --git a/src/main/java/io/weaviate/client6/v1/internal/grpc/protocol/FileReplicationServiceGrpc.java b/src/main/java/io/weaviate/client6/v1/internal/grpc/protocol/FileReplicationServiceGrpc.java
deleted file mode 100644
index 1db25a027..000000000
--- a/src/main/java/io/weaviate/client6/v1/internal/grpc/protocol/FileReplicationServiceGrpc.java
+++ /dev/null
@@ -1,557 +0,0 @@
-package io.weaviate.client6.v1.internal.grpc.protocol;
-
-import static io.grpc.MethodDescriptor.generateFullMethodName;
-
-/**
- */
-@javax.annotation.Generated(
- value = "by gRPC proto compiler (version 1.58.0)",
- comments = "Source: v1/file_replication.proto")
-@io.grpc.stub.annotations.GrpcGenerated
-public final class FileReplicationServiceGrpc {
-
- private FileReplicationServiceGrpc() {}
-
- public static final java.lang.String SERVICE_NAME = "weaviate.v1.FileReplicationService";
-
- // Static method descriptors that strictly reflect the proto.
- private static volatile io.grpc.MethodDescriptor getPauseFileActivityMethod;
-
- @io.grpc.stub.annotations.RpcMethod(
- fullMethodName = SERVICE_NAME + '/' + "PauseFileActivity",
- requestType = io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.PauseFileActivityRequest.class,
- responseType = io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.PauseFileActivityResponse.class,
- methodType = io.grpc.MethodDescriptor.MethodType.UNARY)
- public static io.grpc.MethodDescriptor getPauseFileActivityMethod() {
- io.grpc.MethodDescriptor getPauseFileActivityMethod;
- if ((getPauseFileActivityMethod = FileReplicationServiceGrpc.getPauseFileActivityMethod) == null) {
- synchronized (FileReplicationServiceGrpc.class) {
- if ((getPauseFileActivityMethod = FileReplicationServiceGrpc.getPauseFileActivityMethod) == null) {
- FileReplicationServiceGrpc.getPauseFileActivityMethod = getPauseFileActivityMethod =
- io.grpc.MethodDescriptor.newBuilder()
- .setType(io.grpc.MethodDescriptor.MethodType.UNARY)
- .setFullMethodName(generateFullMethodName(SERVICE_NAME, "PauseFileActivity"))
- .setSampledToLocalTracing(true)
- .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
- io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.PauseFileActivityRequest.getDefaultInstance()))
- .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
- io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.PauseFileActivityResponse.getDefaultInstance()))
- .setSchemaDescriptor(new FileReplicationServiceMethodDescriptorSupplier("PauseFileActivity"))
- .build();
- }
- }
- }
- return getPauseFileActivityMethod;
- }
-
- private static volatile io.grpc.MethodDescriptor getResumeFileActivityMethod;
-
- @io.grpc.stub.annotations.RpcMethod(
- fullMethodName = SERVICE_NAME + '/' + "ResumeFileActivity",
- requestType = io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.ResumeFileActivityRequest.class,
- responseType = io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.ResumeFileActivityResponse.class,
- methodType = io.grpc.MethodDescriptor.MethodType.UNARY)
- public static io.grpc.MethodDescriptor getResumeFileActivityMethod() {
- io.grpc.MethodDescriptor getResumeFileActivityMethod;
- if ((getResumeFileActivityMethod = FileReplicationServiceGrpc.getResumeFileActivityMethod) == null) {
- synchronized (FileReplicationServiceGrpc.class) {
- if ((getResumeFileActivityMethod = FileReplicationServiceGrpc.getResumeFileActivityMethod) == null) {
- FileReplicationServiceGrpc.getResumeFileActivityMethod = getResumeFileActivityMethod =
- io.grpc.MethodDescriptor.newBuilder()
- .setType(io.grpc.MethodDescriptor.MethodType.UNARY)
- .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ResumeFileActivity"))
- .setSampledToLocalTracing(true)
- .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
- io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.ResumeFileActivityRequest.getDefaultInstance()))
- .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
- io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.ResumeFileActivityResponse.getDefaultInstance()))
- .setSchemaDescriptor(new FileReplicationServiceMethodDescriptorSupplier("ResumeFileActivity"))
- .build();
- }
- }
- }
- return getResumeFileActivityMethod;
- }
-
- private static volatile io.grpc.MethodDescriptor getListFilesMethod;
-
- @io.grpc.stub.annotations.RpcMethod(
- fullMethodName = SERVICE_NAME + '/' + "ListFiles",
- requestType = io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.ListFilesRequest.class,
- responseType = io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.ListFilesResponse.class,
- methodType = io.grpc.MethodDescriptor.MethodType.UNARY)
- public static io.grpc.MethodDescriptor getListFilesMethod() {
- io.grpc.MethodDescriptor getListFilesMethod;
- if ((getListFilesMethod = FileReplicationServiceGrpc.getListFilesMethod) == null) {
- synchronized (FileReplicationServiceGrpc.class) {
- if ((getListFilesMethod = FileReplicationServiceGrpc.getListFilesMethod) == null) {
- FileReplicationServiceGrpc.getListFilesMethod = getListFilesMethod =
- io.grpc.MethodDescriptor.newBuilder()
- .setType(io.grpc.MethodDescriptor.MethodType.UNARY)
- .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ListFiles"))
- .setSampledToLocalTracing(true)
- .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
- io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.ListFilesRequest.getDefaultInstance()))
- .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
- io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.ListFilesResponse.getDefaultInstance()))
- .setSchemaDescriptor(new FileReplicationServiceMethodDescriptorSupplier("ListFiles"))
- .build();
- }
- }
- }
- return getListFilesMethod;
- }
-
- private static volatile io.grpc.MethodDescriptor getGetFileMetadataMethod;
-
- @io.grpc.stub.annotations.RpcMethod(
- fullMethodName = SERVICE_NAME + '/' + "GetFileMetadata",
- requestType = io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.GetFileMetadataRequest.class,
- responseType = io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.FileMetadata.class,
- methodType = io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING)
- public static io.grpc.MethodDescriptor getGetFileMetadataMethod() {
- io.grpc.MethodDescriptor getGetFileMetadataMethod;
- if ((getGetFileMetadataMethod = FileReplicationServiceGrpc.getGetFileMetadataMethod) == null) {
- synchronized (FileReplicationServiceGrpc.class) {
- if ((getGetFileMetadataMethod = FileReplicationServiceGrpc.getGetFileMetadataMethod) == null) {
- FileReplicationServiceGrpc.getGetFileMetadataMethod = getGetFileMetadataMethod =
- io.grpc.MethodDescriptor.newBuilder()
- .setType(io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING)
- .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetFileMetadata"))
- .setSampledToLocalTracing(true)
- .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
- io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.GetFileMetadataRequest.getDefaultInstance()))
- .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
- io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.FileMetadata.getDefaultInstance()))
- .setSchemaDescriptor(new FileReplicationServiceMethodDescriptorSupplier("GetFileMetadata"))
- .build();
- }
- }
- }
- return getGetFileMetadataMethod;
- }
-
- private static volatile io.grpc.MethodDescriptor getGetFileMethod;
-
- @io.grpc.stub.annotations.RpcMethod(
- fullMethodName = SERVICE_NAME + '/' + "GetFile",
- requestType = io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.GetFileRequest.class,
- responseType = io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.FileChunk.class,
- methodType = io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING)
- public static io.grpc.MethodDescriptor getGetFileMethod() {
- io.grpc.MethodDescriptor getGetFileMethod;
- if ((getGetFileMethod = FileReplicationServiceGrpc.getGetFileMethod) == null) {
- synchronized (FileReplicationServiceGrpc.class) {
- if ((getGetFileMethod = FileReplicationServiceGrpc.getGetFileMethod) == null) {
- FileReplicationServiceGrpc.getGetFileMethod = getGetFileMethod =
- io.grpc.MethodDescriptor.newBuilder()
- .setType(io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING)
- .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetFile"))
- .setSampledToLocalTracing(true)
- .setRequestMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
- io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.GetFileRequest.getDefaultInstance()))
- .setResponseMarshaller(io.grpc.protobuf.ProtoUtils.marshaller(
- io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.FileChunk.getDefaultInstance()))
- .setSchemaDescriptor(new FileReplicationServiceMethodDescriptorSupplier("GetFile"))
- .build();
- }
- }
- }
- return getGetFileMethod;
- }
-
- /**
- * Creates a new async stub that supports all call types for the service
- */
- public static FileReplicationServiceStub newStub(io.grpc.Channel channel) {
- io.grpc.stub.AbstractStub.StubFactory factory =
- new io.grpc.stub.AbstractStub.StubFactory() {
- @java.lang.Override
- public FileReplicationServiceStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
- return new FileReplicationServiceStub(channel, callOptions);
- }
- };
- return FileReplicationServiceStub.newStub(factory, channel);
- }
-
- /**
- * Creates a new blocking-style stub that supports unary and streaming output calls on the service
- */
- public static FileReplicationServiceBlockingStub newBlockingStub(
- io.grpc.Channel channel) {
- io.grpc.stub.AbstractStub.StubFactory factory =
- new io.grpc.stub.AbstractStub.StubFactory() {
- @java.lang.Override
- public FileReplicationServiceBlockingStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
- return new FileReplicationServiceBlockingStub(channel, callOptions);
- }
- };
- return FileReplicationServiceBlockingStub.newStub(factory, channel);
- }
-
- /**
- * Creates a new ListenableFuture-style stub that supports unary calls on the service
- */
- public static FileReplicationServiceFutureStub newFutureStub(
- io.grpc.Channel channel) {
- io.grpc.stub.AbstractStub.StubFactory factory =
- new io.grpc.stub.AbstractStub.StubFactory() {
- @java.lang.Override
- public FileReplicationServiceFutureStub newStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
- return new FileReplicationServiceFutureStub(channel, callOptions);
- }
- };
- return FileReplicationServiceFutureStub.newStub(factory, channel);
- }
-
- /**
- */
- public interface AsyncService {
-
- /**
- */
- default void pauseFileActivity(io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.PauseFileActivityRequest request,
- io.grpc.stub.StreamObserver responseObserver) {
- io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getPauseFileActivityMethod(), responseObserver);
- }
-
- /**
- */
- default void resumeFileActivity(io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.ResumeFileActivityRequest request,
- io.grpc.stub.StreamObserver responseObserver) {
- io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getResumeFileActivityMethod(), responseObserver);
- }
-
- /**
- */
- default void listFiles(io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.ListFilesRequest request,
- io.grpc.stub.StreamObserver responseObserver) {
- io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getListFilesMethod(), responseObserver);
- }
-
- /**
- */
- default io.grpc.stub.StreamObserver getFileMetadata(
- io.grpc.stub.StreamObserver responseObserver) {
- return io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall(getGetFileMetadataMethod(), responseObserver);
- }
-
- /**
- */
- default io.grpc.stub.StreamObserver getFile(
- io.grpc.stub.StreamObserver responseObserver) {
- return io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall(getGetFileMethod(), responseObserver);
- }
- }
-
- /**
- * Base class for the server implementation of the service FileReplicationService.
- */
- public static abstract class FileReplicationServiceImplBase
- implements io.grpc.BindableService, AsyncService {
-
- @java.lang.Override public final io.grpc.ServerServiceDefinition bindService() {
- return FileReplicationServiceGrpc.bindService(this);
- }
- }
-
- /**
- * A stub to allow clients to do asynchronous rpc calls to service FileReplicationService.
- */
- public static final class FileReplicationServiceStub
- extends io.grpc.stub.AbstractAsyncStub {
- private FileReplicationServiceStub(
- io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
- super(channel, callOptions);
- }
-
- @java.lang.Override
- protected FileReplicationServiceStub build(
- io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
- return new FileReplicationServiceStub(channel, callOptions);
- }
-
- /**
- */
- public void pauseFileActivity(io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.PauseFileActivityRequest request,
- io.grpc.stub.StreamObserver responseObserver) {
- io.grpc.stub.ClientCalls.asyncUnaryCall(
- getChannel().newCall(getPauseFileActivityMethod(), getCallOptions()), request, responseObserver);
- }
-
- /**
- */
- public void resumeFileActivity(io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.ResumeFileActivityRequest request,
- io.grpc.stub.StreamObserver responseObserver) {
- io.grpc.stub.ClientCalls.asyncUnaryCall(
- getChannel().newCall(getResumeFileActivityMethod(), getCallOptions()), request, responseObserver);
- }
-
- /**
- */
- public void listFiles(io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.ListFilesRequest request,
- io.grpc.stub.StreamObserver responseObserver) {
- io.grpc.stub.ClientCalls.asyncUnaryCall(
- getChannel().newCall(getListFilesMethod(), getCallOptions()), request, responseObserver);
- }
-
- /**
- */
- public io.grpc.stub.StreamObserver getFileMetadata(
- io.grpc.stub.StreamObserver responseObserver) {
- return io.grpc.stub.ClientCalls.asyncBidiStreamingCall(
- getChannel().newCall(getGetFileMetadataMethod(), getCallOptions()), responseObserver);
- }
-
- /**
- */
- public io.grpc.stub.StreamObserver getFile(
- io.grpc.stub.StreamObserver responseObserver) {
- return io.grpc.stub.ClientCalls.asyncBidiStreamingCall(
- getChannel().newCall(getGetFileMethod(), getCallOptions()), responseObserver);
- }
- }
-
- /**
- * A stub to allow clients to do synchronous rpc calls to service FileReplicationService.
- */
- public static final class FileReplicationServiceBlockingStub
- extends io.grpc.stub.AbstractBlockingStub {
- private FileReplicationServiceBlockingStub(
- io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
- super(channel, callOptions);
- }
-
- @java.lang.Override
- protected FileReplicationServiceBlockingStub build(
- io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
- return new FileReplicationServiceBlockingStub(channel, callOptions);
- }
-
- /**
- */
- public io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.PauseFileActivityResponse pauseFileActivity(io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.PauseFileActivityRequest request) {
- return io.grpc.stub.ClientCalls.blockingUnaryCall(
- getChannel(), getPauseFileActivityMethod(), getCallOptions(), request);
- }
-
- /**
- */
- public io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.ResumeFileActivityResponse resumeFileActivity(io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.ResumeFileActivityRequest request) {
- return io.grpc.stub.ClientCalls.blockingUnaryCall(
- getChannel(), getResumeFileActivityMethod(), getCallOptions(), request);
- }
-
- /**
- */
- public io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.ListFilesResponse listFiles(io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.ListFilesRequest request) {
- return io.grpc.stub.ClientCalls.blockingUnaryCall(
- getChannel(), getListFilesMethod(), getCallOptions(), request);
- }
- }
-
- /**
- * A stub to allow clients to do ListenableFuture-style rpc calls to service FileReplicationService.
- */
- public static final class FileReplicationServiceFutureStub
- extends io.grpc.stub.AbstractFutureStub {
- private FileReplicationServiceFutureStub(
- io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
- super(channel, callOptions);
- }
-
- @java.lang.Override
- protected FileReplicationServiceFutureStub build(
- io.grpc.Channel channel, io.grpc.CallOptions callOptions) {
- return new FileReplicationServiceFutureStub(channel, callOptions);
- }
-
- /**
- */
- public com.google.common.util.concurrent.ListenableFuture pauseFileActivity(
- io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.PauseFileActivityRequest request) {
- return io.grpc.stub.ClientCalls.futureUnaryCall(
- getChannel().newCall(getPauseFileActivityMethod(), getCallOptions()), request);
- }
-
- /**
- */
- public com.google.common.util.concurrent.ListenableFuture resumeFileActivity(
- io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.ResumeFileActivityRequest request) {
- return io.grpc.stub.ClientCalls.futureUnaryCall(
- getChannel().newCall(getResumeFileActivityMethod(), getCallOptions()), request);
- }
-
- /**
- */
- public com.google.common.util.concurrent.ListenableFuture listFiles(
- io.weaviate.client6.v1.internal.grpc.protocol.WeaviateProtoFileReplication.ListFilesRequest request) {
- return io.grpc.stub.ClientCalls.futureUnaryCall(
- getChannel().newCall(getListFilesMethod(), getCallOptions()), request);
- }
- }
-
- private static final int METHODID_PAUSE_FILE_ACTIVITY = 0;
- private static final int METHODID_RESUME_FILE_ACTIVITY = 1;
- private static final int METHODID_LIST_FILES = 2;
- private static final int METHODID_GET_FILE_METADATA = 3;
- private static final int METHODID_GET_FILE = 4;
-
- private static final class MethodHandlers implements
- io.grpc.stub.ServerCalls.UnaryMethod,
- io.grpc.stub.ServerCalls.ServerStreamingMethod,
- io.grpc.stub.ServerCalls.ClientStreamingMethod,
- io.grpc.stub.ServerCalls.BidiStreamingMethod {
- private final AsyncService serviceImpl;
- private final int methodId;
-
- MethodHandlers(AsyncService serviceImpl, int methodId) {
- this.serviceImpl = serviceImpl;
- this.methodId = methodId;
- }
-
- @java.lang.Override
- @java.lang.SuppressWarnings("unchecked")
- public void invoke(Req request, io.grpc.stub.StreamObserver