Skip to content

Add support for specifying response file location #30

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,25 @@ steps:
cat "${{ steps.inference.outputs.response-file }}"
```

### Using a custom output from file

This can be useful when model response exceeds actions output limit

```yaml
steps:
- name: Test Local Action
id: inference
uses: actions/ai-inference@v1
with:
prompt: 'Hello!'
response-file: './path/to/response.txt'

- name: Use Response File
run: |
echo "Response saved to: ./path/to/response.txt"
cat "./path/to/response.txt"
```

## Inputs

Various inputs are defined in [`action.yml`](action.yml) to let you configure
Expand All @@ -92,6 +111,7 @@ the action:
| `prompt-file` | Path to a file containing the prompt. If both `prompt` and `prompt-file` are provided, `prompt-file` takes precedence | `""` |
| `system-prompt` | The system prompt to send to the model | `"You are a helpful assistant"` |
| `system-prompt-file` | Path to a file containing the system prompt. If both `system-prompt` and `system-prompt-file` are provided, `system-prompt-file` takes precedence | `""` |
| `response-file` | The file path where the response should be saved. | `""` |
| `model` | The model to use for inference. Must be available in the [GitHub Models](https://github.com/marketplace?type=models) catalog | `gpt-4o` |
| `endpoint` | The endpoint to use for inference. If you're running this as part of an org, you should probably use the org-specific Models endpoint | `https://models.github.ai/inference` |
| `max-tokens` | The max number of tokens to generate | 200 |
Expand Down
173 changes: 157 additions & 16 deletions __tests__/main.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
*/
import { jest } from '@jest/globals'
import * as core from '../__fixtures__/core.js'

const mockPost = jest.fn().mockImplementation(() => ({
body: {
choices: [
Expand All @@ -31,28 +32,30 @@ jest.unstable_mockModule('@azure-rest/ai-inference', () => ({
// Default to throwing errors to catch unexpected calls
const mockExistsSync = jest.fn().mockImplementation(() => {
throw new Error(
'Unexpected call to existsSync - test should override this implementation'
`Unexpected call: fs.existsSync(${mockExistsSync.mock.calls})`
)
})
const mockReadFileSync = jest.fn().mockImplementation(() => {
throw new Error(
'Unexpected call to readFileSync - test should override this implementation'
`Unexpected call: fs.readFileSync(${mockReadFileSync.mock.calls})`
)
})
const mockWriteFileSync = jest.fn()
const mockMkdirSync = jest.fn()

/**
* Helper function to mock file system operations for one or more files
* @param fileContents - Object mapping file paths to their contents
* @param nonExistentFiles - Array of file paths that should be treated as non-existent
* @param fileContents - Object mapping paths to their contents
* @param nonExistentPaths - Array of file paths that should be treated as non-existent
*/
function mockFileContent(
fileContents: Record<string, string> = {},
nonExistentFiles: string[] = []
nonExistentPaths: string[] = []
): void {
// Mock existsSync to return true for files that exist, false for those that don't
// Mock existsSync to return true for paths that exist, false for those that don't
mockExistsSync.mockImplementation((...args: unknown[]): boolean => {
const [path] = args as [string]
if (nonExistentFiles.includes(path)) {
if (nonExistentPaths.includes(path)) {
return false
}
return path in fileContents || true
Expand Down Expand Up @@ -88,19 +91,42 @@ function mockInputs(inputs: Record<string, string> = {}): void {

/**
* Helper function to verify common response assertions
* @param customResponseFile - Optional custom response file path. If not provided, verifies standard response with default path
*/
function verifyStandardResponse(): void {
function verifyStandardResponse(customResponseFile?: string): void {
expect(core.setFailed).not.toHaveBeenCalled()
expect(core.setOutput).toHaveBeenNthCalledWith(1, 'response', 'Hello, user!')
expect(core.setOutput).toHaveBeenNthCalledWith(
2,
'response-file',
expect.stringContaining('modelResponse.txt')
)

if (customResponseFile) {
expect(core.setOutput).toHaveBeenNthCalledWith(
2,
'response-file',
customResponseFile
)
expect(mockWriteFileSync).toHaveBeenCalledWith(
customResponseFile,
'Hello, user!',
'utf-8'
)
} else {
expect(core.setOutput).toHaveBeenNthCalledWith(
2,
'response-file',
expect.stringContaining('modelResponse.txt')
)
expect(mockWriteFileSync).toHaveBeenCalledWith(
expect.stringContaining('modelResponse.txt'),
'Hello, user!',
'utf-8'
)
}
}

jest.unstable_mockModule('fs', () => ({
existsSync: mockExistsSync,
readFileSync: mockReadFileSync
readFileSync: mockReadFileSync,
writeFileSync: mockWriteFileSync,
mkdirSync: mockMkdirSync
}))

jest.unstable_mockModule('@actions/core', () => core)
Expand Down Expand Up @@ -128,6 +154,31 @@ describe('main.ts', () => {
verifyStandardResponse()
})

it('Sets the response output when no system prompt is set', async () => {
// Set the action's inputs as return values from core.getInput().
mockInputs({
prompt: 'Hello, AI!'
})

await run()

expect(core.setOutput).toHaveBeenCalled()
verifyStandardResponse()
})

it('Sets a failed status when no token is set', async () => {
// Clear the getInput mock and simulate no prompt or prompt-file input
mockInputs({
prompt: 'Hello, AI!',
token: ''
})

await run()

// Verify that the action was marked as failed.
expect(core.setFailed).toHaveBeenNthCalledWith(1, 'GITHUB_TOKEN is not set')
})

it('Sets a failed status when no prompt is set', async () => {
// Clear the getInput mock and simulate no prompt or prompt-file input
mockInputs({
Expand Down Expand Up @@ -161,7 +212,6 @@ describe('main.ts', () => {

await run()

expect(mockExistsSync).toHaveBeenCalledWith(promptFile)
expect(mockReadFileSync).toHaveBeenCalledWith(promptFile, 'utf-8')
verifyStandardResponse()
})
Expand Down Expand Up @@ -355,7 +405,7 @@ describe('main.ts', () => {
})

it('passes custom max-tokens parameter to the model', async () => {
const customMaxTokens = 500
const customMaxTokens = 42

mockInputs({
prompt: 'Hello, AI!',
Expand All @@ -376,4 +426,95 @@ describe('main.ts', () => {

verifyStandardResponse()
})

it('uses custom response-file path when provided', async () => {
const customResponseFile = '/custom/path/response.txt'

mockInputs({
prompt: 'Hello, AI!',
'system-prompt': 'You are a test assistant.',
'response-file': customResponseFile
})
mockFileContent({}, ['/custom/path'])

await run()

expect(mockExistsSync).toHaveBeenCalledWith('/custom/path')
expect(mockMkdirSync).toHaveBeenCalledWith('/custom/path', {
recursive: true
})
verifyStandardResponse(customResponseFile)
})

it('uses default response-file path when not provided', async () => {
mockInputs({
prompt: 'Hello, AI!',
'system-prompt': 'You are a test assistant.'
})

await run()

expect(mockExistsSync).not.toHaveBeenCalled()
expect(mockMkdirSync).not.toHaveBeenCalled()
verifyStandardResponse()
})

it('handles empty model response content', async () => {
// Mock the API client to return empty string content
mockPost.mockImplementationOnce(() => ({
body: {
choices: [
{
message: {
content: ''
}
}
]
}
}))

mockInputs({
prompt: 'Hello, AI!'
})

await run()

expect(core.setFailed).not.toHaveBeenCalled()
expect(core.setOutput).toHaveBeenNthCalledWith(1, 'response', '')
expect(mockWriteFileSync).toHaveBeenCalledWith(
expect.stringContaining('modelResponse.txt'),
'',
'utf-8'
)
})

it('handles Error exceptions gracefully', async () => {
// Mock the API client to throw a non-Error object
mockPost.mockImplementationOnce(() => {
throw Error('Strange error')
})

mockInputs({
prompt: 'Hello, AI!'
})

await run()

expect(core.setFailed).toHaveBeenCalledWith('Strange error')
})

it('handles non-Error exceptions gracefully', async () => {
// Mock the API client to throw a non-Error object
mockPost.mockImplementationOnce(() => {
throw 'String error' // Not an Error instance
})

mockInputs({
prompt: 'Hello, AI!'
})

await run()

expect(core.setFailed).toHaveBeenCalledWith('An unexpected error occurred')
})
})
4 changes: 4 additions & 0 deletions action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,10 @@ inputs:
description: The token to use
required: false
default: ${{ github.token }}
response-file:
description: The file path where the response should be saved
required: false
default: ''

# Define your outputs here.
outputs:
Expand Down
2 changes: 1 addition & 1 deletion badges/coverage.svg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
30 changes: 23 additions & 7 deletions dist/index.js

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion dist/index.js.map

Large diffs are not rendered by default.

Loading
Loading