# Batch Sequencing with Limit Orders

On The Grid, batch processing is not a separate API. You manage your workload within your system, submit a limit order, and execute standard chat completions once filled at your set price.

This pattern replaces traditional batch APIs with two steps:

* Queue capacity with a limit order
  * Estimate how many tokens the job will need and convert that into Units
  * Place a limit buy at your max price and wait until enough Units are filled
  * This effectively schedules the job by price and supply, not by file upload
* Execute the batch
  * Once capacity is available, send requests as usual with controlled concurrency
  * Track progress and results while respecting rate limits

The key idea is that limit orders act as your batch queue. The job starts automatically when the market clears at your price, giving you cost control and predictable execution without a separate batch system. Use the following reference implementation to get started with batch processing:

{% tabs %}
{% tab title="Python" %}

```python
# Estimate job size, place a limit order for tokens (1 unit = 1M tokens),
# wait until balance is available, then run the batch.

import asyncio
import base64
import math
import os
import time
from typing import List

import requests
from nacl.signing import SigningKey
from openai import AsyncOpenAI

CONSUMPTION_BASE_URL = "https://consumption.api.thegrid.ai/api/v1"
TRADING_BASE_URL = "https://trading.api.thegrid.ai"
MARKET_ID = "market_788dcbd5-ac68-4c61-acf1-4443beaf2a1c"
MAX_PRICE = float(os.getenv("MAX_PRICE", "2.50"))
MAX_WAIT_SEC = int(os.getenv("MAX_WAIT_SEC", "120"))
INCLUDE_EXISTING_BALANCE = (
    os.getenv("INCLUDE_EXISTING_BALANCE", "false").lower() == "true"
)

PROMPTS: List[str] = [
    "What is 2 + 2?",
    "Name three colors.",
    "What is the capital of France?",
    "How many days in a week?",
    "What is H2O?",
]

CONFIG = {
    "model": "chat-fast",
    "max_concurrent": 3,
}


class SignatureAuth:
    def __init__(self, private_key_b64: str, fingerprint: str):
        self.private_key = SigningKey(base64.b64decode(private_key_b64)[:32])
        self.fingerprint = fingerprint

    def get_headers(self, method: str, path: str, body: str = ""):
        timestamp = str(int(time.time()))
        message = f"{timestamp}{method.upper()}{path}{body}"
        signature = self.private_key.sign(message.encode()).signature
        return {
            "x-thegrid-signature": base64.b64encode(signature).decode(),
            "x-thegrid-timestamp": timestamp,
            "x-thegrid-fingerprint": self.fingerprint,
        }


client = AsyncOpenAI(
    api_key=os.environ["GRID_CONSUMPTION_API_KEY"],
    base_url=CONSUMPTION_BASE_URL,
)
trading_auth = SignatureAuth(
    os.environ["GRID_TRADING_PRIVATE_KEY"],
    os.environ["GRID_TRADING_FINGERPRINT"],
)


def estimate_tokens(prompt_list: List[str]) -> int:
    return sum(math.ceil((len(prompt) + 50) / 4) for prompt in prompt_list)


def units_needed(total_tokens: int) -> int:
    return max(1, math.ceil(total_tokens / 1_000_000))


def get_consumption_balance(auth: SignatureAuth) -> float:
    path = "/api/v1/trading/consumption-accounts"
    resp = requests.get(
        f"{TRADING_BASE_URL}{path}?order_by=created_at",
        headers=auth.get_headers("GET", path),
        timeout=10,
    )
    resp.raise_for_status()
    data = resp.json()["data"]
    balance = 0.0
    for acct in data:
        try:
            available = float(acct.get("available_balance", 0) or 0)
            total = float(acct.get("total_balance", 0) or 0)
            balance += max(available, total)
        except (TypeError, ValueError):
            continue
    return balance


def place_limit_order(auth: SignatureAuth, quantity: int) -> str:
    order_data = {
        "market_id": MARKET_ID,
        "side": "buy",
        "type": "limit",
        "quantity": quantity,
        "price": f"{MAX_PRICE:.2f}",
        "time_in_force": "gtc",
        "client_order_id": f"batch-{int(time.time())}",
    }
    path = "/api/v1/trading/orders"
    body = json_dumps(order_data)
    resp = requests.post(
        f"{TRADING_BASE_URL}{path}",
        data=body,
        headers={
            "Content-Type": "application/json",
            **auth.get_headers("POST", path, body),
        },
        timeout=10,
    )
    resp.raise_for_status()
    return resp.json()["data"]["order_id"]


def get_order_status(auth: SignatureAuth, order_id: str) -> str:
    path = f"/api/v1/trading/orders/{order_id}"
    resp = requests.get(
        f"{TRADING_BASE_URL}{path}",
        headers=auth.get_headers("GET", path),
        timeout=10,
    )
    resp.raise_for_status()
    return resp.json()["data"]["status"]


def wait_for_fill(auth: SignatureAuth, target_units: int, order_id: str | None):
    fill_seen = False
    deadline = time.time() + MAX_WAIT_SEC
    while time.time() < deadline:
        balance = 0.0
        try:
            balance = get_consumption_balance(auth)
        except Exception:
            pass

        status = "unknown"
        if order_id:
            try:
                status = get_order_status(auth, order_id)
            except Exception:
                status = "unknown"

        print(
            f"Waiting: order {order_id or 'n/a'} is {status}, balance {balance}/{target_units}"
        )

        if balance >= target_units:
            return balance
        if status == "filled":
            fill_seen = True
        if status in {"canceled", "rejected"}:
            raise RuntimeError(f"Order {order_id} {status}")
        time.sleep(3)
    if fill_seen:
        raise TimeoutError(
            "Order filled but balance did not reach target within timeout"
        )
    raise TimeoutError("Timed out waiting for order fill")


async def process_prompt(prompt, index, semaphore):
    async with semaphore:
        response = await client.chat.completions.create(
            model=CONFIG["model"],
            messages=[
                {"role": "system", "content": "Be concise and correct."},
                {"role": "user", "content": prompt},
            ],
        )
        return {
            "index": index,
            "prompt": prompt,
            "response": response.choices[0].message.content,
        }


async def process_batch(prompts, max_concurrent):
    semaphore = asyncio.Semaphore(max_concurrent)
    tasks = [process_prompt(prompt, i, semaphore) for i, prompt in enumerate(prompts)]

    results = []
    for coro in asyncio.as_completed(tasks):
        try:
            result = await coro
            results.append(result)
            print(f"[{len(results)}/{len(prompts)}] {result['prompt'][:30]}...")
        except Exception as e:
            print(f"Failed: {e}")

    return sorted(results, key=lambda x: x["index"])


async def main():
    total_tokens = estimate_tokens(PROMPTS)
    required_units = units_needed(total_tokens)
    print(
        f"Estimated {total_tokens} tokens ({required_units} unit(s) of 1M tokens each)"
    )

    current_balance = get_consumption_balance(trading_auth)
    print(f"Current consumption balance: {current_balance} unit(s)")

    if INCLUDE_EXISTING_BALANCE and current_balance >= required_units:
        print("Balance already sufficient, running batch\n")
    else:
        balance_used = math.floor(current_balance) if INCLUDE_EXISTING_BALANCE else 0
        to_buy = required_units - balance_used
        if to_buy < 1:
            print("Balance already sufficient, running batch\n")
        else:
            print(f"Placing limit order for {to_buy} unit(s) at ${MAX_PRICE:.2f}...")
            order_id = place_limit_order(trading_auth, to_buy)
            print(f"Order placed: {order_id}, waiting for fill...")
            final_balance = wait_for_fill(trading_auth, required_units, order_id)
            print(f"Order filled (balance now {final_balance}), running batch\n")

    print(
        f"Processing {len(PROMPTS)} prompts (max {CONFIG['max_concurrent']} concurrent)\n"
    )
    start_time = time.time()
    results = await process_batch(PROMPTS, CONFIG["max_concurrent"])
    elapsed = time.time() - start_time

    print(f"\nCompleted {len(results)} requests in {elapsed:.2f}s\n")

    for result in results:
        print(f"{result['index'] + 1}. {result['prompt']}")
        print(f"   → {result['response'][:100]}...\n")


def json_dumps(obj) -> str:
    import json

    return json.dumps(obj)


asyncio.run(main())

```

{% endtab %}

{% tab title="JavaScript" %}

```javascript
// Estimate job size, place a limit order for tokens (1 unit = 1M tokens),
// wait until balance is available, then run the batch.

import axios from 'axios';
import OpenAI from 'openai';
import nacl from 'tweetnacl';
import util from 'tweetnacl-util';

const CONSUMPTION_BASE_URL = 'https://consumption.api.thegrid.ai/api/v1';
const TRADING_BASE_URL = 'https://trading.api.thegrid.ai';
const MARKET_ID = 'market_788dcbd5-ac68-4c61-acf1-4443beaf2a1c';
const MAX_PRICE = parseFloat(process.env.MAX_PRICE || '2.50');
const MAX_WAIT_MS = parseInt(process.env.MAX_WAIT_MS || '120000', 10);
const INCLUDE_EXISTING_BALANCE = (process.env.INCLUDE_EXISTING_BALANCE ?? 'true') === 'true';

class SignatureAuth {
  constructor(privateKeyBase64, fingerprint) {
    const raw = util.decodeBase64(privateKeyBase64);
    // Accept either a 32-byte seed or a full 64-byte secret key
    if (raw.length === nacl.sign.seedLength) {
      this.privateKey = nacl.sign.keyPair.fromSeed(raw).secretKey;
    } else if (raw.length === nacl.sign.secretKeyLength) {
      this.privateKey = raw;
    } else {
      throw new Error('Invalid private key length; expected 32-byte seed or 64-byte secret key');
    }
    this.fingerprint = fingerprint;
  }

  getHeaders(method, path, body = '') {
    const timestamp = Math.floor(Date.now() / 1000).toString();
    const message = `${timestamp}${method.toUpperCase()}${path}${body}`;
    const messageBytes = util.decodeUTF8(message);
    const signatureBytes = nacl.sign.detached(messageBytes, this.privateKey);
    return {
      'x-thegrid-signature': util.encodeBase64(signatureBytes),
      'x-thegrid-timestamp': timestamp,
      'x-thegrid-fingerprint': this.fingerprint
    };
  }
}

const prompts = [
  'What is 2 + 2?',
  'Name three colors.',
  'What is the capital of France?',
  'How many days in a week?',
  'What is H2O?',
];

const CONFIG = {
  model: 'chat-fast',
  maxConcurrent: 3
};

const client = new OpenAI({
  apiKey: process.env.GRID_CONSUMPTION_API_KEY,
  baseURL: CONSUMPTION_BASE_URL
});

const tradingAuth = new SignatureAuth(
  process.env.GRID_TRADING_PRIVATE_KEY,
  process.env.GRID_TRADING_FINGERPRINT
);

function estimateTokens(promptList) {
  // Rough heuristic: chars/4 plus small buffer
  return promptList.reduce((sum, prompt) => sum + Math.ceil((prompt.length + 50) / 4), 0);
}

function unitsNeeded(totalTokens) {
  return Math.max(1, Math.ceil(totalTokens / 1_000_000));
}

async function getConsumptionBalance(auth) {
  const path = '/api/v1/trading/consumption-accounts';
  const resp = await axios.get(`${TRADING_BASE_URL}${path}?order_by=created_at`, {
    headers: auth.getHeaders('GET', path)
  });
  return resp.data.data.reduce((sum, acct) => {
    const available = Number(acct.available_balance || 0);
    const total = Number(acct.total_balance || 0);
    return sum + Math.max(available, total);
  }, 0);
}

async function placeLimitOrder(auth, quantity) {
  const orderData = {
    market_id: MARKET_ID,
    side: 'buy',
    type: 'limit',
    quantity,
    price: MAX_PRICE.toFixed(2),
    time_in_force: 'gtc',
    client_order_id: `batch-${Date.now()}`
  };

  const path = '/api/v1/trading/orders';
  const body = JSON.stringify(orderData);
  const resp = await axios.post(`${TRADING_BASE_URL}${path}`, body, {
    headers: {
      'Content-Type': 'application/json',
      ...auth.getHeaders('POST', path, body)
    }
  });

  return resp.data.data.order_id;
}

async function getOrderStatus(auth, orderId) {
  const path = `/api/v1/trading/orders/${orderId}`;
  const resp = await axios.get(`${TRADING_BASE_URL}${path}`, {
    headers: auth.getHeaders('GET', path)
  });
  return resp.data.data.status;
}

async function waitForFill(auth, targetUnits, orderId) {
  let fillSeen = false;
  const start = Date.now();
  while (Date.now() - start < MAX_WAIT_MS) {
    const [balance, status] = await Promise.allSettled([
      getConsumptionBalance(auth),
      orderId ? getOrderStatus(auth, orderId) : Promise.resolve('unknown')
    ]);

    const balValue = balance.status === 'fulfilled' ? balance.value : 0;
    const statusValue = status.status === 'fulfilled' ? status.value : 'unknown';
    console.log(`Waiting: order ${orderId || 'n/a'} is ${statusValue}, balance ${balValue}/${targetUnits}`);

    if (balValue >= targetUnits) {
      return balValue;
    }
    if (statusValue === 'filled') {
      fillSeen = true;
      // continue waiting for balance to update
    }
    if (statusValue === 'canceled' || statusValue === 'rejected') {
      throw new Error(`Order ${orderId} ${statusValue}`);
    }
    await new Promise(resolve => setTimeout(resolve, 3000));
  }
  if (fillSeen) {
    throw new Error('Order filled but balance did not reach target within timeout');
  }
  throw new Error('Timed out waiting for order fill');
}

async function processPrompt(prompt, index) {
  const response = await client.chat.completions.create({
    model: CONFIG.model,
    messages: [
      { role: 'system', content: 'Be concise and correct.' },
      { role: 'user', content: prompt }
    ]
  });

  return {
    index,
    prompt,
    response: response.choices[0].message.content
  };
}

async function processBatch(promptList, maxConcurrent) {
  const results = [];
  const queue = [...promptList.entries()];
  const workers = Array.from({ length: maxConcurrent }, async () => {
    while (queue.length) {
      const [index, prompt] = queue.shift();
      try {
        const result = await processPrompt(prompt, index);
        results.push(result);
        console.log(`[${results.length}/${promptList.length}] ${prompt.slice(0, 30)}...`);
      } catch (err) {
        results.push({ index, prompt, error: err.message });
        console.log(`[${results.length}/${promptList.length}] Failed: ${err.message}`);
      }
    }
  });

  await Promise.all(workers);
  return results.sort((a, b) => a.index - b.index);
}

const totalTokens = estimateTokens(prompts);
const requiredUnits = unitsNeeded(totalTokens);
console.log(`Estimated ${totalTokens} tokens (${requiredUnits} unit(s) of 1M tokens each)`);

const currentBalance = await getConsumptionBalance(tradingAuth);
console.log(`Current consumption balance: ${currentBalance} unit(s)`);

if (INCLUDE_EXISTING_BALANCE && currentBalance >= requiredUnits) {
  console.log('Balance already sufficient, running batch\n');
} else {
  const balanceUsed = INCLUDE_EXISTING_BALANCE ? Math.floor(currentBalance) : 0;
  const unitsToBuy = requiredUnits - balanceUsed;
  if (unitsToBuy < 1) {
    console.log('Balance already sufficient, running batch\n');
  } else {
    console.log(`Placing limit order for ${unitsToBuy} unit(s) at $${MAX_PRICE.toFixed(2)}...`);
    const orderId = await placeLimitOrder(tradingAuth, unitsToBuy);
    console.log(`Order placed: ${orderId}, waiting for fill...`);
    const finalBalance = await waitForFill(tradingAuth, requiredUnits, orderId);
    console.log(`Order filled (balance now ${finalBalance}), running batch\n`);
  }
}

console.log(`Processing ${prompts.length} prompts (max ${CONFIG.maxConcurrent} concurrent)\n`);
const startTime = Date.now();
const results = await processBatch(prompts, CONFIG.maxConcurrent);
const elapsed = ((Date.now() - startTime) / 1000).toFixed(2);

console.log(`\nCompleted ${results.length} requests in ${elapsed}s\n`);
for (const result of results) {
  if (result.error) {
    console.log(`${result.index + 1}. ERROR: ${result.error}`);
  } else {
    console.log(`${result.index + 1}. ${result.prompt}`);
    console.log(`   → ${result.response.slice(0, 100)}...\n`);
  }
}

```

{% endtab %}

{% tab title="Go" %}

```go
// Estimate job size, place a limit order for tokens (1 unit = 1M tokens),
// wait until balance is available, then run the batch.

package main

import (
	"bytes"
	"context"
	"crypto/ed25519"
	"encoding/base64"
	"encoding/json"
	"fmt"
	"math"
	"net/http"
	"os"
	"strconv"
	"strings"
	"sync"
	"time"

	"github.com/openai/openai-go"
	"github.com/openai/openai-go/option"
)

var prompts = []string{
	"What is 2 + 2?",
	"Name three colors.",
	"What is the capital of France?",
	"How many days in a week?",
	"What is H2O?",
}

var config = struct {
	Model         string
	MaxConcurrent int
}{
	Model:         "chat-fast",
	MaxConcurrent: 3,
}

const (
	consumptionBaseURL = "https://consumption.api.thegrid.ai/api/v1"
	tradingBaseURL     = "https://trading.api.thegrid.ai"
	marketID           = "market_788dcbd5-ac68-4c61-acf1-4443beaf2a1c"
)

var (
	maxPrice               = envFloat("MAX_PRICE", 2.50)
	maxWait                = envInt("MAX_WAIT_SEC", 120)
	includeExistingBalance = envBool("INCLUDE_EXISTING_BALANCE", false)
	auth                   = newSignatureAuth(os.Getenv("GRID_TRADING_PRIVATE_KEY"), os.Getenv("GRID_TRADING_FINGERPRINT"))
	apiClient              = openai.NewClient(
		option.WithAPIKey(os.Getenv("GRID_CONSUMPTION_API_KEY")),
		option.WithBaseURL(consumptionBaseURL),
	)
)

type Result struct {
	Index    int
	Prompt   string
	Response string
	Error    error
}

type signatureAuth struct {
	privateKey  ed25519.PrivateKey
	fingerprint string
}

func newSignatureAuth(privateKeyB64, fingerprint string) *signatureAuth {
	privateKeyBytes, _ := base64.StdEncoding.DecodeString(privateKeyB64)
	var pk ed25519.PrivateKey
	if len(privateKeyBytes) == ed25519.SeedSize {
		pk = ed25519.NewKeyFromSeed(privateKeyBytes)
	} else {
		pk = ed25519.PrivateKey(privateKeyBytes)
	}
	return &signatureAuth{privateKey: pk, fingerprint: fingerprint}
}

func (sa *signatureAuth) getHeaders(method, path, body string) map[string]string {
	timestamp := strconv.FormatInt(time.Now().Unix(), 10)
	message := timestamp + method + path + body
	signature := ed25519.Sign(sa.privateKey, []byte(message))
	return map[string]string{
		"x-thegrid-signature":   base64.StdEncoding.EncodeToString(signature),
		"x-thegrid-timestamp":   timestamp,
		"x-thegrid-fingerprint": sa.fingerprint,
	}
}

func estimateTokens(list []string) int {
	total := 0
	for _, p := range list {
		total += int(math.Ceil(float64(len(p)+50) / 4))
	}
	return total
}

func unitsNeeded(totalTokens int) int {
	return int(math.Max(1, math.Ceil(float64(totalTokens)/1_000_000)))
}

func getConsumptionBalance(sa *signatureAuth) (float64, error) {
	path := "/api/v1/trading/consumption-accounts"
	req, _ := http.NewRequest("GET", tradingBaseURL+path+"?order_by=created_at", nil)
	for k, v := range sa.getHeaders("GET", path, "") {
		req.Header.Set(k, v)
	}

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return 0, err
	}
	defer resp.Body.Close()

	if resp.StatusCode < 200 || resp.StatusCode >= 300 {
		return 0, fmt.Errorf("balance request failed: %s", resp.Status)
	}

	var payload struct {
		Data []struct {
			AvailableBalance float64 `json:"available_balance"`
			TotalBalance     float64 `json:"total_balance"`
		} `json:"data"`
	}
	if err := json.NewDecoder(resp.Body).Decode(&payload); err != nil {
		return 0, err
	}

	sum := 0.0
	for _, acct := range payload.Data {
		if acct.TotalBalance > acct.AvailableBalance {
			sum += acct.TotalBalance
		} else {
			sum += acct.AvailableBalance
		}
	}
	return sum, nil
}

func placeLimitOrder(sa *signatureAuth, quantity int) (string, error) {
	orderData := map[string]any{
		"market_id":       marketID,
		"side":            "buy",
		"type":            "limit",
		"quantity":        quantity,
		"price":           fmt.Sprintf("%.2f", maxPrice),
		"time_in_force":   "gtc",
		"client_order_id": fmt.Sprintf("batch-%d", time.Now().Unix()),
	}
	bodyBytes, _ := json.Marshal(orderData)
	path := "/api/v1/trading/orders"
	req, _ := http.NewRequest("POST", tradingBaseURL+path, bytes.NewBuffer(bodyBytes))
	req.Header.Set("Content-Type", "application/json")
	for k, v := range sa.getHeaders("POST", path, string(bodyBytes)) {
		req.Header.Set(k, v)
	}

	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return "", err
	}
	defer resp.Body.Close()

	if resp.StatusCode < 200 || resp.StatusCode >= 300 {
		return "", fmt.Errorf("order request failed: %s", resp.Status)
	}

	var payload struct {
		Data struct {
			OrderID string `json:"order_id"`
		} `json:"data"`
	}
	if err := json.NewDecoder(resp.Body).Decode(&payload); err != nil {
		return "", err
	}
	return payload.Data.OrderID, nil
}

func getOrderStatus(sa *signatureAuth, orderID string) (string, error) {
	path := fmt.Sprintf("/api/v1/trading/orders/%s", orderID)
	req, _ := http.NewRequest("GET", tradingBaseURL+path, nil)
	for k, v := range sa.getHeaders("GET", path, "") {
		req.Header.Set(k, v)
	}
	resp, err := http.DefaultClient.Do(req)
	if err != nil {
		return "", err
	}
	defer resp.Body.Close()

	if resp.StatusCode < 200 || resp.StatusCode >= 300 {
		return "", fmt.Errorf("order status request failed: %s", resp.Status)
	}

	var payload struct {
		Data struct {
			Status string `json:"status"`
		} `json:"data"`
	}
	if err := json.NewDecoder(resp.Body).Decode(&payload); err != nil {
		return "", err
	}
	return payload.Data.Status, nil
}

func waitForBalance(sa *signatureAuth, targetUnits int, orderID string) error {
	fillSeen := false
	deadline := time.Now().Add(time.Duration(maxWait) * time.Second)
	for time.Now().Before(deadline) {
		balance, err := getConsumptionBalance(sa)
		status := "unknown"
		if orderID != "" {
			if s, err := getOrderStatus(sa, orderID); err == nil {
				status = s
			}
		}

		fmt.Printf("Waiting: order %s is %s, balance %.0f/%d\n", orderID, status, balance, targetUnits)

		if status == "filled" {
			fillSeen = true
		}
		if status == "canceled" || status == "rejected" {
			return fmt.Errorf("order %s %s", orderID, status)
		}

		if err == nil && balance >= float64(targetUnits) {
			return nil
		}

		time.Sleep(3 * time.Second)
	}
	if fillSeen {
		return fmt.Errorf("order filled but balance did not reach target within timeout")
	}
	return fmt.Errorf("timed out waiting for order fill")
}

func processPrompt(client *openai.Client, prompt string, index int) Result {
	ctx := context.Background()

	resp, err := client.Chat.Completions.New(ctx, openai.ChatCompletionNewParams{
		Model: config.Model,
		Messages: []openai.ChatCompletionMessageParamUnion{
			openai.SystemMessage("Be concise and correct."),
			openai.UserMessage(prompt),
		},
	})

	if err != nil {
		return Result{Index: index, Prompt: prompt, Error: err}
	}

	return Result{
		Index:    index,
		Prompt:   prompt,
		Response: resp.Choices[0].Message.Content,
	}
}

func processBatch(client *openai.Client, prompts []string, maxConcurrent int) []Result {
	results := make([]Result, len(prompts))
	sem := make(chan struct{}, maxConcurrent)
	var wg sync.WaitGroup
	var mu sync.Mutex
	completed := 0

	for i, prompt := range prompts {
		wg.Add(1)
		go func(idx int, p string) {
			defer wg.Done()
			sem <- struct{}{}
			defer func() { <-sem }()

			result := processPrompt(client, p, idx)
			results[idx] = result

			mu.Lock()
			completed++
			if result.Error != nil {
				fmt.Printf("[%d/%d] Failed: %v\n", completed, len(prompts), result.Error)
			} else {
				truncated := p
				if len(truncated) > 30 {
					truncated = truncated[:30] + "..."
				}
				fmt.Printf("[%d/%d] %s\n", completed, len(prompts), truncated)
			}
			mu.Unlock()
		}(i, prompt)
	}

	wg.Wait()
	return results
}

func main() {
	totalTokens := estimateTokens(prompts)
	requiredUnits := unitsNeeded(totalTokens)
	fmt.Printf("Estimated %d tokens (%d unit(s) of 1M tokens each)\n", totalTokens, requiredUnits)

	currentBalance, err := getConsumptionBalance(auth)
	if err != nil {
		fmt.Printf("Failed to read balance: %v\n", err)
		return
	}
	fmt.Printf("Current consumption balance: %.0f unit(s)\n", currentBalance)

	if includeExistingBalance && currentBalance >= float64(requiredUnits) {
		fmt.Println("Balance already sufficient, running batch\n")
	} else {
		balanceUsed := 0
		if includeExistingBalance {
			balanceUsed = int(math.Floor(currentBalance))
		}
		unitsToBuy := requiredUnits - balanceUsed
		if unitsToBuy < 1 {
			fmt.Println("Balance already sufficient, running batch\n")
		} else {
			fmt.Printf("Placing limit order for %d unit(s) at $%.2f...\n", unitsToBuy, maxPrice)
			orderID, err := placeLimitOrder(auth, unitsToBuy)
			if err != nil {
				fmt.Printf("Order failed: %v\n", err)
				return
			}
			fmt.Printf("Order placed: %s, waiting for fill...\n", orderID)
			if err := waitForBalance(auth, requiredUnits, orderID); err != nil {
				fmt.Println(err.Error())
				return
			}
			fmt.Println("Balance is ready, running batch\n")
		}
	}

	fmt.Printf("Processing %d prompts (max %d concurrent)\n\n", len(prompts), config.MaxConcurrent)

	startTime := time.Now()
	results := processBatch(&apiClient, prompts, config.MaxConcurrent)
	elapsed := time.Since(startTime).Seconds()

	fmt.Printf("\nCompleted %d requests in %.2fs\n\n", len(results), elapsed)

	for _, result := range results {
		if result.Error != nil {
			fmt.Printf("%d. ERROR: %v\n", result.Index+1, result.Error)
		} else {
			fmt.Printf("%d. %s\n", result.Index+1, result.Prompt)
			response := result.Response
			if len(response) > 100 {
				response = response[:100] + "..."
			}
			fmt.Printf("   → %s\n\n", response)
		}
	}
}

func envFloat(key string, fallback float64) float64 {
	if val := os.Getenv(key); val != "" {
		if parsed, err := strconv.ParseFloat(val, 64); err == nil {
			return parsed
		}
	}
	return fallback
}

func envInt(key string, fallback int) int {
	if val := os.Getenv(key); val != "" {
		if parsed, err := strconv.Atoi(val); err == nil {
			return parsed
		}
	}
	return fallback
}

func envBool(key string, fallback bool) bool {
	if val := os.Getenv(key); val != "" {
		if val == "true" || val == "1" || strings.ToLower(val) == "yes" {
			return true
		}
		if val == "false" || val == "0" || strings.ToLower(val) == "no" {
			return false
		}
	}
	return fallback
}

```

{% endtab %}
{% endtabs %}


---

# Agent Instructions: Querying This Documentation

If you need additional information that is not directly available in this page, you can query the documentation dynamically by asking a question.

Perform an HTTP GET request on the current page URL with the `ask` query parameter:

```
GET https://thegrid.ai/docs/technical-guides/batch-sequencing-with-limit-orders.md?ask=<question>
```

The question should be specific, self-contained, and written in natural language.
The response will contain a direct answer to the question and relevant excerpts and sources from the documentation.

Use this mechanism when the answer is not explicitly present in the current page, you need clarification or additional context, or you want to retrieve related documentation sections.
