go-anthropic
Anthropic Claude API wrapper for Go
Stars: 64
Go-anthropic is an unofficial API wrapper for Anthropic Claude in Go. It supports completions, streaming completions, messages, streaming messages, vision, and tool use. Users can interact with the Anthropic Claude API to generate text completions, analyze messages, process images, and utilize specific tools for various tasks.
README:
Anthropic Claude API wrapper for Go (Unofficial). Support:
- Completions
- Streaming Completions
- Messages
- Streaming Messages
- Vision
- Tool use
- Prompt Caching
go get github.com/liushuangls/go-anthropic/v2
Currently, go-anthropic requires Go version 1.21 or greater.
package main
import (
"errors"
"fmt"
"github.com/liushuangls/go-anthropic/v2"
)
func main() {
client := anthropic.NewClient("your anthropic api key")
resp, err := client.CreateMessages(context.Background(), anthropic.MessagesRequest{
Model: anthropic.ModelClaudeInstant1Dot2,
Messages: []anthropic.Message{
anthropic.NewUserTextMessage("What is your name?"),
},
MaxTokens: 1000,
})
if err != nil {
var e *anthropic.APIError
if errors.As(err, &e) {
fmt.Printf("Messages error, type: %s, message: %s", e.Type, e.Message)
} else {
fmt.Printf("Messages error: %v\n", err)
}
return
}
fmt.Println(resp.Content[0].GetText())
}
package main
import (
"errors"
"fmt"
"github.com/liushuangls/go-anthropic/v2"
)
func main() {
client := anthropic.NewClient("your anthropic api key")
resp, err := client.CreateMessagesStream(context.Background(), anthropic.MessagesStreamRequest{
MessagesRequest: anthropic.MessagesRequest{
Model: anthropic.ModelClaudeInstant1Dot2,
Messages: []anthropic.Message{
anthropic.NewUserTextMessage("What is your name?"),
},
MaxTokens: 1000,
},
OnContentBlockDelta: func(data anthropic.MessagesEventContentBlockDeltaData) {
fmt.Printf("Stream Content: %s\n", data.Delta.Text)
},
})
if err != nil {
var e *anthropic.APIError
if errors.As(err, &e) {
fmt.Printf("Messages stream error, type: %s, message: %s", e.Type, e.Message)
} else {
fmt.Printf("Messages stream error: %v\n", err)
}
return
}
fmt.Println(resp.Content[0].GetText())
}
Messages Vision example
package main
import (
"errors"
"fmt"
"github.com/liushuangls/go-anthropic/v2"
)
func main() {
client := anthropic.NewClient("your anthropic api key")
imagePath := "xxx"
imageMediaType := "image/jpeg"
imageFile, err := os.Open(imagePath)
if err != nil {
panic(err)
}
imageData, err := io.ReadAll(imageFile)
if err != nil {
panic(err)
}
resp, err := client.CreateMessages(context.Background(), anthropic.MessagesRequest{
Model: anthropic.ModelClaude3Opus20240229,
Messages: []anthropic.Message{
{
Role: anthropic.RoleUser,
Content: []anthropic.MessageContent{
anthropic.NewImageMessageContent(anthropic.MessageContentImageSource{
Type: "base64",
MediaType: imageMediaType,
Data: imageData,
}),
anthropic.NewTextMessageContent("Describe this image."),
},
},
},
MaxTokens: 1000,
})
if err != nil {
var e *anthropic.APIError
if errors.As(err, &e) {
fmt.Printf("Messages error, type: %s, message: %s", e.Type, e.Message)
} else {
fmt.Printf("Messages error: %v\n", err)
}
return
}
fmt.Println(resp.Content[0].Text)
}
Messages Tool use example
package main
import (
"context"
"fmt"
"github.com/liushuangls/go-anthropic/v2"
"github.com/liushuangls/go-anthropic/v2/jsonschema"
)
func main() {
client := anthropic.NewClient(
"your anthropic api key",
)
request := anthropic.MessagesRequest{
Model: anthropic.ModelClaude3Haiku20240307,
Messages: []anthropic.Message{
anthropic.NewUserTextMessage("What is the weather like in San Francisco?"),
},
MaxTokens: 1000,
Tools: []anthropic.ToolDefinition{
{
Name: "get_weather",
Description: "Get the current weather in a given location",
InputSchema: jsonschema.Definition{
Type: jsonschema.Object,
Properties: map[string]jsonschema.Definition{
"location": {
Type: jsonschema.String,
Description: "The city and state, e.g. San Francisco, CA",
},
"unit": {
Type: jsonschema.String,
Enum: []string{"celsius", "fahrenheit"},
Description: "The unit of temperature, either 'celsius' or 'fahrenheit'",
},
},
Required: []string{"location"},
},
},
},
}
resp, err := client.CreateMessages(context.Background(), request)
if err != nil {
panic(err)
}
request.Messages = append(request.Messages, anthropic.Message{
Role: anthropic.RoleAssistant,
Content: resp.Content,
})
var toolUse *anthropic.MessageContentToolUse
for _, c := range resp.Content {
if c.Type == anthropic.MessagesContentTypeToolUse {
toolUse = c.MessageContentToolUse
}
}
if toolUse == nil {
panic("tool use not found")
}
request.Messages = append(request.Messages, anthropic.NewToolResultsMessage(toolUse.ID, "65 degrees", false))
resp, err = client.CreateMessages(context.Background(), request)
if err != nil {
panic(err)
}
fmt.Printf("Response: %+v\n", resp)
}
Prompt Caching
doc: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching
package main
import (
"context"
"errors"
"fmt"
"github.com/liushuangls/go-anthropic/v2"
)
func main() {
client := anthropic.NewClient(
"your anthropic api key",
anthropic.WithBetaVersion(anthropic.BetaPromptCaching20240731),
)
resp, err := client.CreateMessages(
context.Background(),
anthropic.MessagesRequest{
Model: anthropic.ModelClaude3Haiku20240307,
MultiSystem: []anthropic.MessageSystemPart{
{
Type: "text",
Text: "You are an AI assistant tasked with analyzing literary works. Your goal is to provide insightful commentary on themes, characters, and writing style.",
},
{
Type: "text",
Text: "<the entire contents of Pride and Prejudice>",
CacheControl: &anthropic.MessageCacheControl{
Type: anthropic.CacheControlTypeEphemeral,
},
},
},
Messages: []anthropic.Message{
anthropic.NewUserTextMessage("Analyze the major themes in Pride and Prejudice.")
},
MaxTokens: 1000,
})
if err != nil {
var e *anthropic.APIError
if errors.As(err, &e) {
fmt.Printf("Messages error, type: %s, message: %s", e.Type, e.Message)
} else {
fmt.Printf("Messages error: %v\n", err)
}
return
}
fmt.Printf("Usage: %+v\n", resp.Usage)
fmt.Println(resp.Content[0].GetText())
}
The following project had particular influence on go-anthropic is design.
For Tasks:
Click tags to check more tools for each tasksFor Jobs:
Alternative AI tools for go-anthropic
Similar Open Source Tools
go-anthropic
Go-anthropic is an unofficial API wrapper for Anthropic Claude in Go. It supports completions, streaming completions, messages, streaming messages, vision, and tool use. Users can interact with the Anthropic Claude API to generate text completions, analyze messages, process images, and utilize specific tools for various tasks.
orch
orch is a library for building language model powered applications and agents for the Rust programming language. It can be used for tasks such as text generation, streaming text generation, structured data generation, and embedding generation. The library provides functionalities for executing various language model tasks and can be integrated into different applications and contexts. It offers flexibility for developers to create language model-powered features and applications in Rust.
aigcpanel
AigcPanel is a simple and easy-to-use all-in-one AI digital human system that even beginners can use. It supports video synthesis, voice synthesis, voice cloning, simplifies local model management, and allows one-click import and use of AI models. It prohibits the use of this product for illegal activities and users must comply with the laws and regulations of the People's Republic of China.
openapi
The `@samchon/openapi` repository is a collection of OpenAPI types and converters for various versions of OpenAPI specifications. It includes an 'emended' OpenAPI v3.1 specification that enhances clarity by removing ambiguous and duplicated expressions. The repository also provides an application composer for LLM (Large Language Model) function calling from OpenAPI documents, allowing users to easily perform LLM function calls based on the Swagger document. Conversions to different versions of OpenAPI documents are also supported, all based on the emended OpenAPI v3.1 specification. Users can validate their OpenAPI documents using the `typia` library with `@samchon/openapi` types, ensuring compliance with standard specifications.
dashscope-sdk
DashScope SDK for .NET is an unofficial SDK maintained by Cnblogs, providing various APIs for text embedding, generation, multimodal generation, image synthesis, and more. Users can interact with the SDK to perform tasks such as text completion, chat generation, function calls, file operations, and more. The project is under active development, and users are advised to check the Release Notes before upgrading.
acte
Acte is a framework designed to build GUI-like tools for AI Agents. It aims to address the issues of cognitive load and freedom degrees when interacting with multiple APIs in complex scenarios. By providing a graphical user interface (GUI) for Agents, Acte helps reduce cognitive load and constraints interaction, similar to how humans interact with computers through GUIs. The tool offers APIs for starting new sessions, executing actions, and displaying screens, accessible via HTTP requests or the SessionManager class.
dynamiq
Dynamiq is an orchestration framework designed to streamline the development of AI-powered applications, specializing in orchestrating retrieval-augmented generation (RAG) and large language model (LLM) agents. It provides an all-in-one Gen AI framework for agentic AI and LLM applications, offering tools for multi-agent orchestration, document indexing, and retrieval flows. With Dynamiq, users can easily build and deploy AI solutions for various tasks.
airtable
A simple Golang package to access the Airtable API. It provides functionalities to interact with Airtable such as initializing client, getting tables, listing records, adding records, updating records, deleting records, and bulk deleting records. The package is compatible with Go 1.13 and above.
aiotdlib
aiotdlib is a Python asyncio Telegram client based on TDLib. It provides automatic generation of types and functions from tl schema, validation, good IDE type hinting, and high-level API methods for simpler work with tdlib. The package includes prebuilt TDLib binaries for macOS (arm64) and Debian Bullseye (amd64). Users can use their own binary by passing `library_path` argument to `Client` class constructor. Compatibility with other versions of the library is not guaranteed. The tool requires Python 3.9+ and users need to get their `api_id` and `api_hash` from Telegram docs for installation and usage.
venom
Venom is a high-performance system developed with JavaScript to create a bot for WhatsApp, support for creating any interaction, such as customer service, media sending, sentence recognition based on artificial intelligence and all types of design architecture for WhatsApp.
funcchain
Funcchain is a Python library that allows you to easily write cognitive systems by leveraging Pydantic models as output schemas and LangChain in the backend. It provides a seamless integration of LLMs into your apps, utilizing OpenAI Functions or LlamaCpp grammars (json-schema-mode) for efficient structured output. Funcchain compiles the Funcchain syntax into LangChain runnables, enabling you to invoke, stream, or batch process your pipelines effortlessly.
langchain-swift
LangChain for Swift. Optimized for iOS, macOS, watchOS (part) and visionOS.(beta) This is a pure client library, no server required
python-genai
The Google Gen AI SDK is a Python library that provides access to Google AI and Vertex AI services. It allows users to create clients for different services, work with parameter types, models, generate content, call functions, handle JSON response schemas, stream text and image content, perform async operations, count and compute tokens, embed content, generate and upscale images, edit images, work with files, create and get cached content, tune models, distill models, perform batch predictions, and more. The SDK supports various features like automatic function support, manual function declaration, JSON response schema support, streaming for text and image content, async methods, tuning job APIs, distillation, batch prediction, and more.
amadeus-java
Amadeus Java SDK provides a rich set of APIs for the travel industry, allowing developers to access various functionalities such as flight search, booking, airport information, and more. The SDK simplifies interaction with the Amadeus API by providing self-contained code examples and detailed documentation. Developers can easily make API calls, handle responses, and utilize features like pagination and logging. The SDK supports various endpoints for tasks like flight search, booking management, airport information retrieval, and travel analytics. It also offers functionalities for hotel search, booking, and sentiment analysis. Overall, the Amadeus Java SDK is a comprehensive tool for integrating Amadeus APIs into Java applications.
aiohttp-pydantic
Aiohttp pydantic is an aiohttp view to easily parse and validate requests. You define using function annotations what your methods for handling HTTP verbs expect, and Aiohttp pydantic parses the HTTP request for you, validates the data, and injects the parameters you want. It provides features like query string, request body, URL path, and HTTP headers validation, as well as Open API Specification generation.
For similar tasks
Discord-AI-Selfbot
Discord-AI-Selfbot is a Python-based Discord selfbot that uses the `discord.py-self` library to automatically respond to messages mentioning its trigger word using Groq API's Llama-3 model. It functions as a normal Discord bot on a real Discord account, enabling interactions in DMs, servers, and group chats without needing to invite a bot. The selfbot comes with features like custom AI instructions, free LLM model usage, mention and reply recognition, message handling, channel-specific responses, and a psychoanalysis command to analyze user messages for insights on personality.
go-anthropic
Go-anthropic is an unofficial API wrapper for Anthropic Claude in Go. It supports completions, streaming completions, messages, streaming messages, vision, and tool use. Users can interact with the Anthropic Claude API to generate text completions, analyze messages, process images, and utilize specific tools for various tasks.
UnrealOpenAIPlugin
UnrealOpenAIPlugin is a comprehensive Unreal Engine wrapper for the OpenAI API, supporting various endpoints such as Models, Completions, Chat, Images, Vision, Embeddings, Speech, Audio, Files, Moderations, Fine-tuning, and Functions. It provides support for both C++ and Blueprints, allowing users to interact with OpenAI services seamlessly within Unreal Engine projects. The plugin also includes tutorials, updates, installation instructions, authentication steps, examples of usage, blueprint nodes overview, C++ examples, plugin structure details, documentation references, tests, packaging guidelines, and limitations. Users can leverage this plugin to integrate powerful AI capabilities into their Unreal Engine projects effortlessly.
rwkv.cpp
rwkv.cpp is a port of BlinkDL/RWKV-LM to ggerganov/ggml, supporting FP32, FP16, and quantized INT4, INT5, and INT8 inference. It focuses on CPU but also supports cuBLAS. The project provides a C library rwkv.h and a Python wrapper. RWKV is a large language model architecture with models like RWKV v5 and v6. It requires only state from the previous step for calculations, making it CPU-friendly on large context lengths. Users are advised to test all available formats for perplexity and latency on a representative dataset before serious use.
runpod-worker-comfy
runpod-worker-comfy is a serverless API tool that allows users to run any ComfyUI workflow to generate an image. Users can provide input images as base64-encoded strings, and the generated image can be returned as a base64-encoded string or uploaded to AWS S3. The tool is built on Ubuntu + NVIDIA CUDA and provides features like built-in checkpoints and VAE models. Users can configure environment variables to upload images to AWS S3 and interact with the RunPod API to generate images. The tool also supports local testing and deployment to Docker hub using Github Actions.
gptscript
GPTScript is a framework that enables Large Language Models (LLMs) to interact with various systems, including local executables, applications with OpenAPI schemas, SDK libraries, or RAG-based solutions. It simplifies the integration of systems with LLMs using minimal prompts. Sample use cases include chatting with a local CLI, OpenAPI compliant endpoint, local files/directories, and running automated workflows.
VisionCraft
VisionCraft API is a free tool that offers access to over 3000 AI models for generating images, text, and GIFs. Users can interact with the API to utilize various models like StableDiffusion, LLM, and Text2GIF. The tool provides functionalities for image generation, text generation, and GIF generation. For any inquiries or assistance, users can contact the VisionCraft team through their Telegram Channel, VisionCraft API, or Telegram Bot.
local_multimodal_ai_chat
Local Multimodal AI Chat is a hands-on project that teaches you how to build a multimodal chat application. It integrates different AI models to handle audio, images, and PDFs in a single chat interface. This project is perfect for anyone interested in AI and software development who wants to gain practical experience with these technologies.
For similar jobs
sweep
Sweep is an AI junior developer that turns bugs and feature requests into code changes. It automatically handles developer experience improvements like adding type hints and improving test coverage.
teams-ai
The Teams AI Library is a software development kit (SDK) that helps developers create bots that can interact with Teams and Microsoft 365 applications. It is built on top of the Bot Framework SDK and simplifies the process of developing bots that interact with Teams' artificial intelligence capabilities. The SDK is available for JavaScript/TypeScript, .NET, and Python.
ai-guide
This guide is dedicated to Large Language Models (LLMs) that you can run on your home computer. It assumes your PC is a lower-end, non-gaming setup.
classifai
Supercharge WordPress Content Workflows and Engagement with Artificial Intelligence. Tap into leading cloud-based services like OpenAI, Microsoft Azure AI, Google Gemini and IBM Watson to augment your WordPress-powered websites. Publish content faster while improving SEO performance and increasing audience engagement. ClassifAI integrates Artificial Intelligence and Machine Learning technologies to lighten your workload and eliminate tedious tasks, giving you more time to create original content that matters.
chatbot-ui
Chatbot UI is an open-source AI chat app that allows users to create and deploy their own AI chatbots. It is easy to use and can be customized to fit any need. Chatbot UI is perfect for businesses, developers, and anyone who wants to create a chatbot.
BricksLLM
BricksLLM is a cloud native AI gateway written in Go. Currently, it provides native support for OpenAI, Anthropic, Azure OpenAI and vLLM. BricksLLM aims to provide enterprise level infrastructure that can power any LLM production use cases. Here are some use cases for BricksLLM: * Set LLM usage limits for users on different pricing tiers * Track LLM usage on a per user and per organization basis * Block or redact requests containing PIIs * Improve LLM reliability with failovers, retries and caching * Distribute API keys with rate limits and cost limits for internal development/production use cases * Distribute API keys with rate limits and cost limits for students
uAgents
uAgents is a Python library developed by Fetch.ai that allows for the creation of autonomous AI agents. These agents can perform various tasks on a schedule or take action on various events. uAgents are easy to create and manage, and they are connected to a fast-growing network of other uAgents. They are also secure, with cryptographically secured messages and wallets.
griptape
Griptape is a modular Python framework for building AI-powered applications that securely connect to your enterprise data and APIs. It offers developers the ability to maintain control and flexibility at every step. Griptape's core components include Structures (Agents, Pipelines, and Workflows), Tasks, Tools, Memory (Conversation Memory, Task Memory, and Meta Memory), Drivers (Prompt and Embedding Drivers, Vector Store Drivers, Image Generation Drivers, Image Query Drivers, SQL Drivers, Web Scraper Drivers, and Conversation Memory Drivers), Engines (Query Engines, Extraction Engines, Summary Engines, Image Generation Engines, and Image Query Engines), and additional components (Rulesets, Loaders, Artifacts, Chunkers, and Tokenizers). Griptape enables developers to create AI-powered applications with ease and efficiency.