-
Notifications
You must be signed in to change notification settings - Fork 120
Add skills command for AI coding assistants #622
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
Changes from all commits
Commits
File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,145 @@ | ||
| package commands | ||
|
|
||
| import ( | ||
| "embed" | ||
| "fmt" | ||
| "io/fs" | ||
| "os" | ||
| "path/filepath" | ||
|
|
||
| "github.com/docker/model-runner/cmd/cli/commands/completion" | ||
| "github.com/spf13/cobra" | ||
| ) | ||
|
|
||
| //go:embed skills/* | ||
| var skillsFS embed.FS | ||
|
|
||
| type skillsOptions struct { | ||
| codex bool | ||
| claude bool | ||
| opencode bool | ||
| dest string | ||
| force bool | ||
| } | ||
|
|
||
| func newSkillsCmd() *cobra.Command { | ||
| opts := &skillsOptions{} | ||
|
|
||
| c := &cobra.Command{ | ||
| Use: "skills", | ||
| Short: "Install Docker Model Runner skills for AI coding assistants", | ||
| Long: `Install Docker Model Runner skills for AI coding assistants. | ||
|
|
||
| Skills are configuration files that help AI coding assistants understand | ||
| how to use Docker Model Runner effectively for local model inference. | ||
|
|
||
| Supported targets: | ||
| --codex Install to ~/.codex/skills (OpenAI Codex CLI) | ||
| --claude Install to ~/.claude/skills (Claude Code) | ||
| --opencode Install to ~/.config/opencode/skills (OpenCode) | ||
| --dest Install to a custom directory | ||
|
|
||
| Example: | ||
| docker model skills --claude | ||
| docker model skills --codex --claude | ||
| docker model skills --dest /path/to/skills`, | ||
| RunE: func(cmd *cobra.Command, args []string) error { | ||
| return runSkills(cmd, opts) | ||
| }, | ||
| ValidArgsFunction: completion.NoComplete, | ||
| } | ||
|
|
||
| c.Flags().BoolVar(&opts.codex, "codex", false, "Install skills for OpenAI Codex CLI (~/.codex/skills)") | ||
| c.Flags().BoolVar(&opts.claude, "claude", false, "Install skills for Claude Code (~/.claude/skills)") | ||
| c.Flags().BoolVar(&opts.opencode, "opencode", false, "Install skills for OpenCode (~/.config/opencode/skills)") | ||
| c.Flags().StringVar(&opts.dest, "dest", "", "Install skills to a custom directory") | ||
| c.Flags().BoolVarP(&opts.force, "force", "f", false, "Overwrite existing skills without prompting") | ||
|
|
||
| return c | ||
| } | ||
|
|
||
| func runSkills(cmd *cobra.Command, opts *skillsOptions) error { | ||
| // Collect target directories | ||
| var targets []string | ||
| homeDir, err := os.UserHomeDir() | ||
| if err != nil { | ||
| return fmt.Errorf("failed to get home directory: %w", err) | ||
| } | ||
|
|
||
| if opts.codex { | ||
| targets = append(targets, filepath.Join(homeDir, ".codex", "skills")) | ||
| } | ||
| if opts.claude { | ||
| targets = append(targets, filepath.Join(homeDir, ".claude", "skills")) | ||
| } | ||
| if opts.opencode { | ||
| targets = append(targets, filepath.Join(homeDir, ".config", "opencode", "skills")) | ||
| } | ||
| if opts.dest != "" { | ||
| targets = append(targets, opts.dest) | ||
| } | ||
|
|
||
| if len(targets) == 0 { | ||
| return fmt.Errorf("no target specified. Use --codex, --claude, --opencode, or --dest") | ||
| } | ||
|
|
||
| // Install skills to each target | ||
| for _, target := range targets { | ||
| if err := installSkills(cmd, target, opts.force); err != nil { | ||
| return fmt.Errorf("failed to install skills to %s: %w", target, err) | ||
| } | ||
| cmd.Printf("Installed Docker Model Runner skills to %s\n", target) | ||
| } | ||
|
|
||
| return nil | ||
| } | ||
|
|
||
| func installSkills(cmd *cobra.Command, targetDir string, force bool) error { | ||
| // Walk through embedded skills directory | ||
| return fs.WalkDir(skillsFS, "skills", func(path string, d fs.DirEntry, err error) error { | ||
| if err != nil { | ||
| return err | ||
| } | ||
|
|
||
| // Skip the root "skills" directory itself | ||
| if path == "skills" { | ||
| return nil | ||
| } | ||
|
|
||
| // Calculate the relative path from "skills/" | ||
| relPath, err := filepath.Rel("skills", path) | ||
| if err != nil { | ||
| return err | ||
| } | ||
|
|
||
| destPath := filepath.Join(targetDir, relPath) | ||
|
|
||
| if d.IsDir() { | ||
| // Create directory | ||
| return os.MkdirAll(destPath, 0755) | ||
| } | ||
|
|
||
| // Check if file exists and handle force flag | ||
| if _, err := os.Stat(destPath); err == nil && !force { | ||
| return fmt.Errorf("file already exists: %s (use --force to overwrite)", destPath) | ||
| } | ||
|
|
||
| // Read the embedded file | ||
| content, err := skillsFS.ReadFile(path) | ||
| if err != nil { | ||
| return fmt.Errorf("failed to read embedded file %s: %w", path, err) | ||
| } | ||
|
|
||
| // Ensure parent directory exists | ||
| if err := os.MkdirAll(filepath.Dir(destPath), 0755); err != nil { | ||
| return fmt.Errorf("failed to create directory for %s: %w", destPath, err) | ||
| } | ||
|
|
||
| // Write the file | ||
| if err := os.WriteFile(destPath, content, 0644); err != nil { | ||
| return fmt.Errorf("failed to write file %s: %w", destPath, err) | ||
| } | ||
|
|
||
| return nil | ||
| }) | ||
| } | ||
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,87 @@ | ||
| --- | ||
| name: docker-model-runner | ||
| description: Skills for using Docker Model Runner to run local LLM inference | ||
| --- | ||
|
|
||
| # Docker Model Runner | ||
|
|
||
| Docker Model Runner (DMR) makes it easy to run AI models locally using Docker. This skill helps you effectively use Docker Model Runner for local LLM inference in your development workflow. | ||
|
|
||
| ## Workflow | ||
|
|
||
| When helping users with local LLM inference using Docker Model Runner: | ||
|
|
||
| 1. **Check if Docker Model Runner is available** by running `docker model version` | ||
|
|
||
| 2. **List available models** with `docker model list` to see what's already pulled | ||
|
|
||
| 3. **Search for models** on Docker Hub or HuggingFace: | ||
|
ericcurtin marked this conversation as resolved.
|
||
| - `docker model search <query>` to find models | ||
| - Popular models include: `ai/gemma3`, `ai/llama3.2`, `ai/smollm2`, `ai/qwen3` | ||
|
|
||
| 4. **Pull models** before running: `docker model pull <model>` | ||
|
|
||
| 5. **Run models** for inference: | ||
| - One-time prompt: `docker model run ai/smollm2 "Your prompt here"` | ||
| - Interactive chat: `docker model run ai/smollm2` | ||
| - Pre-load model: `docker model run --detach ai/smollm2` | ||
|
|
||
| 6. **Use the OpenAI-compatible API** for programmatic access: | ||
| - Endpoint: `http://localhost:12434/engines/llama.cpp/v1/chat/completions` | ||
| - This is compatible with OpenAI client libraries | ||
|
|
||
| ## API Usage | ||
|
|
||
| Docker Model Runner exposes an OpenAI-compatible REST API: | ||
|
|
||
| ```bash | ||
| # Chat completions | ||
| curl http://localhost:12434/engines/llama.cpp/v1/chat/completions \ | ||
| -H "Content-Type: application/json" \ | ||
| -d '{ | ||
| "model": "ai/smollm2", | ||
| "messages": [ | ||
| {"role": "system", "content": "You are a helpful assistant."}, | ||
| {"role": "user", "content": "Hello!"} | ||
| ] | ||
| }' | ||
| ``` | ||
|
|
||
| For Python with the OpenAI library: | ||
|
|
||
| ```python | ||
| from openai import OpenAI | ||
|
|
||
| client = OpenAI( | ||
| base_url="http://localhost:12434/engines/llama.cpp/v1", | ||
| api_key="not-needed" # API key not required for local inference | ||
| ) | ||
|
|
||
| response = client.chat.completions.create( | ||
| model="ai/smollm2", | ||
| messages=[{"role": "user", "content": "Hello!"}] | ||
| ) | ||
| ``` | ||
|
|
||
| ## Key Commands | ||
|
|
||
| | Command | Description | | ||
| |---------|-------------| | ||
| | `docker model run <model> [prompt]` | Run a model with optional prompt | | ||
| | `docker model pull <model>` | Pull a model from registry | | ||
| | `docker model list` | List downloaded models | | ||
| | `docker model search <query>` | Search for models | | ||
| | `docker model ps` | Show running models | | ||
| | `docker model rm <model>` | Remove a model | | ||
| | `docker model inspect <model>` | Show model details | | ||
|
|
||
| ## Best Practices | ||
|
|
||
| - Use smaller models (like `ai/smollm2`) for faster responses during development | ||
| - Pre-load models with `--detach` for better performance in scripts | ||
| - Models stay loaded until another model is requested or timeout (5 min) | ||
| - Use the OpenAI-compatible API for integration with existing tools | ||
|
|
||
| ## References | ||
|
|
||
| See [references/docker-model-guide.md](references/docker-model-guide.md) for detailed documentation. | ||
Oops, something went wrong.
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.