diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 6015116..987ffb4 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -7,6 +7,6 @@ * @segraef -.azuredevops/** @segraef -.github/** @segraef -.pipelines/** @segraef +.azuredevops/** @ShocOne +.github/** @ShocOne +.pipelines/** @ShocOne diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index a96c157..8fcb6c9 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,6 +1,5 @@ # Change -***Feel free to remove this sample text*** >Thank you for your contribution ! Please include a summary of the change and which issue is fixed. Please also include relevant motivation and context. @@ -8,7 +7,7 @@ List any dependencies that are required for this change. ## Type of Change -Please delete options that are not relevant. +Please **DELETE** options that are not relevant. - [ ] Bug fix (non-breaking change which fixes an issue) - [ ] New feature (non-breaking change which adds functionality) diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..0e2bdb4 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,17 @@ +# See GitHub's documentation for more information on this file: +# https://docs.github.com/en/code-security/supply-chain-security/keeping-your-dependencies-updated-automatically/configuration-options-for-dependency-updates +version: 2 +updates: + # Maintain dependencies for GitHub Actions + - package-ecosystem: "github-actions" + directory: "/" + schedule: + # Check for updates to GitHub Actions every week + interval: "weekly" + + # Maintain dependencies for Go modules + - package-ecosystem: "gomod" + directory: "/" + schedule: + # Check for updates to Go modules every week + interval: "daily" \ No newline at end of file diff --git a/.github/workflows/codeql.yaml b/.github/workflows/codeql.yaml new file mode 100644 index 0000000..531b5df --- /dev/null +++ b/.github/workflows/codeql.yaml @@ -0,0 +1,76 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL" + +on: + push: + branches: [ "main" ] + pull_request: + # The branches below must be a subset of the branches above + branches: [ "main" ] + schedule: + - cron: '44 8 * * 6' + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-20.04 + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ 'go' ] + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] + # Use only 'java' to analyze code written in Java, Kotlin or both + # Use only 'javascript' to analyze code written in JavaScript, TypeScript or both + # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + + # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs + # queries: security-extended,security-and-quality + + + # Autobuild attempts to build any compiled languages (C/C++, C#, Go, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v3 + + # ℹ️ Command-line programs to run using the OS shell. + # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun + + # If the Autobuild fails above, remove it and uncomment the following three lines. + # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. + + # - run: | + # echo "Run, Build Application using script" + # ./location_of_script_within_repo/buildscript.sh + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 + with: + category: "/language:${{matrix.language}}" \ No newline at end of file diff --git a/.github/workflows/gosec-scan.yml b/.github/workflows/gosec-scan.yml new file mode 100644 index 0000000..a6e5c5b --- /dev/null +++ b/.github/workflows/gosec-scan.yml @@ -0,0 +1,30 @@ +name: "Security Scan" + +# Run workflow each time code is pushed to your repository and on a schedule. +# The scheduled workflow runs every at 00:00 on Sunday UTC time. +on: + push: + branches: [ "main" ] + pull_request: + branches: [ "main" ] + schedule: + - cron: '0 0 * * 0' + +jobs: + tests: + runs-on: ubuntu-latest + env: + GO111MODULE: on + steps: + - name: Checkout Source + uses: actions/checkout@v4 + - name: Run Gosec Security Scanner + uses: securego/gosec@master + with: + # we let the report trigger content trigger a failure using the GitHub Security features. + args: '-no-fail -fmt sarif -out results.sarif ./...' + - name: Upload SARIF file + uses: github/codeql-action/upload-sarif@v3 + with: + # Path to SARIF file relative to the root of the repository + sarif_file: results.sarif \ No newline at end of file diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index 51c193b..035931d 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -8,10 +8,10 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Code - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Lint Code Base - uses: github/super-linter@v4.2.2 + uses: github/super-linter@v5.0.0 env: VALIDATE_ALL_CODEBASE: false VALIDATE_MARKDOWN: false diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..79f09f4 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,35 @@ +name: test + +on: + workflow_call: + +jobs: + run: + name: Test + runs-on: ubuntu-latest + timeout-minutes: 5 + strategy: + fail-fast: true + matrix: + go: ['stable', 'oldstable'] + + steps: + - name: Install Go + uses: actions/setup-go@v5 + with: + go-version: ${{ matrix.go }} + check-latest: true + + - name: Checkout code + uses: actions/checkout@v4 + + - name: Run tests + run: go test -v -count=1 -race -shuffle=on -coverprofile=coverage.txt ./... + + - name: Upload Coverage + uses: codecov/codecov-action@v4 + continue-on-error: true + with: + token: ${{secrets.CODECOV_TOKEN}} + file: ./coverage.txt + fail_ci_if_error: false \ No newline at end of file diff --git a/.github/workflows/workflow.yml b/.github/workflows/workflow.yml deleted file mode 100644 index 51145b0..0000000 --- a/.github/workflows/workflow.yml +++ /dev/null @@ -1,25 +0,0 @@ -name: "Sample Workflow" -on: - workflow_dispatch: - inputs: - name: - description: 'Person to greet' - required: true - default: 'You' - home: - description: 'location' - required: false - default: 'The Universe' - -jobs: - job: - name: "Job" - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v2 - - name: Hello - shell: pwsh - run: | - echo "Hello ${{ github.event.inputs.name }}!" - echo "- in ${{ github.event.inputs.home }}!" diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..00041bd --- /dev/null +++ b/go.mod @@ -0,0 +1,15 @@ +module github.com/deploymenttheory/go-api-http-client + +go 1.21 + +require ( + github.com/PuerkitoBio/goquery v1.8.1 + github.com/google/uuid v1.6.0 + go.uber.org/zap v1.26.0 +) + +require ( + github.com/andybalholm/cascadia v1.3.1 // indirect + go.uber.org/multierr v1.10.0 // indirect + golang.org/x/net v0.19.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..82ba990 --- /dev/null +++ b/go.sum @@ -0,0 +1,52 @@ +github.com/PuerkitoBio/goquery v1.8.1 h1:uQxhNlArOIdbrH1tr0UXwdVFgDcZDrZVdcpygAcwmWM= +github.com/PuerkitoBio/goquery v1.8.1/go.mod h1:Q8ICL1kNUJ2sXGoAhPGUdYDJvgQgHzJsnnd3H7Ho5jQ= +github.com/andybalholm/cascadia v1.3.1 h1:nhxRkql1kdYCc8Snf7D5/D3spOX+dBgjA6u8x004T2c= +github.com/andybalholm/cascadia v1.3.1/go.mod h1:R4bJ1UQfqADjvDa4P6HZHLh/3OxWWEqc0Sk8XGwHqvA= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= +go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= +go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= +go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210916014120-12bc252f5db8/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/apihandlers/graph/graph_api_exceptions_configuration.json b/internal/apihandlers/graph/graph_api_exceptions_configuration.json new file mode 100644 index 0000000..ace253b --- /dev/null +++ b/internal/apihandlers/graph/graph_api_exceptions_configuration.json @@ -0,0 +1,18 @@ +{ + "/api/v1/icon/download/": { + "accept": "image/*", + "content_type": null + }, + "/api/v1/branding-images/download/": { + "accept": "image/*", + "content_type": null + }, + "/api/v2/inventory-preload/csv-template": { + "accept": "text/csv", + "content_type": null + }, + "/api/v1/pki/certificate-authority/active/der": { + "accept": "application/pkix-cert", + "content_type": null + } +} diff --git a/internal/apihandlers/graph/graph_api_handler.go b/internal/apihandlers/graph/graph_api_handler.go new file mode 100644 index 0000000..78a88b7 --- /dev/null +++ b/internal/apihandlers/graph/graph_api_handler.go @@ -0,0 +1,509 @@ +// graph_api_handler.go +/* ------------------------------Summary---------------------------------------- +This is a api handler module for the http_client to accommodate specifics of +microsoft's graph api(s). It handles the encoding (marshalling) and decoding (unmarshalling) +of data. It also sets the correct content headers for the various http methods. + +This module integrates with the http_client logger for wrapped error handling +for human readable return codes. It also supports the http_client tiered logging +functionality for logging support. + +The logic of this module is defined as follows: +Graph API & Graph Beta API: + +For requests (GET, POST, PUT, DELETE): +- Encoding (Marshalling): Use JSON format. +For responses (GET, POST, PUT): +- Decoding (Unmarshalling): Use JSON format. +For responses (DELETE): +- Handle response codes as response body lacks anything useful. +Headers +- Sets accept headers based on weighting.Graph API doesn't support XML, so MIME type is skipped and returns JSON +- Set content header as application/json with edge case exceptions based on need. + +*/ +package graph + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "log" + "mime/multipart" + "net/http" + "os" + "strings" + + _ "embed" + + "github.com/deploymenttheory/go-api-http-client/internal/httpclient" +) + +// Endpoint constants represent the URL suffixes used for Graph API token interactions. +const ( + DefaultBaseDomain = "graph.microsoft.com" // DefaultBaseDomain: represents the base domain for graph. + TokenInvalidateEndpoint = "/api/v1/auth/invalidate-token" // TokenInvalidateEndpoint: The endpoint to invalidate an active token. +) + +// ConfigMap is a map that associates endpoint URL patterns with their corresponding configurations. +// The map's keys are strings that identify the endpoint, and the values are EndpointConfig structs +// that hold the configuration for that endpoint. +type ConfigMap map[string]EndpointConfig + +// Variables +var configMap ConfigMap + +// Embedded Resources +// +//go:embed graph_api_exceptions_configuration.json +var graph_api_exceptions_configuration []byte + +// Package-level Functions + +// init is invoked automatically on package initialization and is responsible for +// setting up the default state of the package by loading the default configuration. +// If an error occurs during the loading process, the program will terminate with a fatal error log. +func init() { + // Load the default configuration from an embedded resource. + err := loadDefaultConfig() + if err != nil { + log.Fatalf("Error loading default config: %s", err) + } +} + +// loadDefaultConfig reads and unmarshals the graph_api_exceptions_configuration JSON data from an embedded file +// into the configMap variable, which holds the exceptions configuration for endpoint-specific headers. +// Returns an error if the unmarshalling process fails. +func loadDefaultConfig() error { + // Unmarshal the embedded default configuration into the global configMap. + return json.Unmarshal(graph_api_exceptions_configuration, &configMap) +} + +// LoadUserConfig allows users to apply their own configuration by providing a JSON file. +// The custom configuration will override the default settings previously loaded. +// It reads the file from the provided filename path and unmarshals its content into the configMap. +// If reading or unmarshalling fails, an error is returned. +func LoadUserConfig(filename string) error { + // Read the user-provided JSON configuration file and unmarshal it into the global configMap. + userConfigBytes, err := os.ReadFile(filename) + if err != nil { + return err + } + // Override the default configuration with the user's custom settings. + return json.Unmarshal(userConfigBytes, &configMap) +} + +// Structs + +// EndpointConfig is a struct that holds configuration details for a specific API endpoint. +// It includes what type of content it can accept and what content type it should send. +type EndpointConfig struct { + Accept string `json:"accept"` // Accept specifies the MIME type the endpoint can handle in responses. + ContentType *string `json:"content_type"` // ContentType, if not nil, specifies the MIME type to set for requests sent to the endpoint. A pointer is used to distinguish between a missing field and an empty string. +} + +// UnifiedAPIHandler is a struct that implements the APIHandler interface. +// It holds a Logger instance to facilitate logging across various API handling methods. +// This handler is responsible for encoding and decoding request and response data, +// determining content types, and other API interactions as defined by the APIHandler interface. +type GraphAPIHandler struct { + logger httpclient.Logger // logger is used to output logs for the API handling processes. + endpointAcceptedFormatsCache map[string][]string +} + +// Functions + +// ConstructMSGraphAPIEndpoint constructs the full URL for an MS Graph API endpoint. +// The function takes version (e.g., "/v1.0" or "/beta") and the specific API path. +func (g *GraphAPIHandler) ConstructMSGraphAPIEndpoint(endpointPath string) string { + url := fmt.Sprintf("https://%s%s", DefaultBaseDomain, endpointPath) + g.logger.Info("Request will be made to MS Graph API URL:", "URL", url) + return url +} + +// GetAPIHandler initializes and returns an APIHandler with a configured logger. +func GetAPIHandler(config Config) APIHandler { + handler := &GraphAPIHandler{} + logger := NewDefaultLogger() + logger.SetLevel(config.LogLevel) // Use the LogLevel from the config + handler.SetLogger(logger) + return handler +} + +// SetLogger assigns a Logger instance to the UnifiedAPIHandler. +// This allows for logging throughout the handler's operations, +// enabling consistent logging that follows the configuration of the provided Logger. +func (u *GraphAPIHandler) SetLogger(logger Logger) { + u.logger = logger +} + +/* +// GetContentTypeHeader determines the appropriate Content-Type header for a given API endpoint. +// It attempts to find a content type that matches the endpoint prefix in the global configMap. +// If a match is found and the content type is defined (not nil), it returns the specified content type. +// If the content type is nil or no match is found in configMap, it falls back to default behaviors: +// - For all url endpoints it defaults to "application/json" for the graph beta and V1.0 API's. +// If the endpoint does not match any of the predefined patterns, "application/json" is used as a fallback. +// This method logs the decision process at various stages for debugging purposes. +func (u *GraphAPIHandler) GetContentTypeHeader(endpoint string) string { + // Dynamic lookup from configuration should be the first priority + for key, config := range configMap { + if strings.HasPrefix(endpoint, key) { + if config.ContentType != nil { + u.logger.Debug("Content-Type for endpoint found in configMap", "endpoint", endpoint, "content_type", *config.ContentType) + return *config.ContentType + } + u.logger.Debug("Content-Type for endpoint is nil in configMap, handling as special case", "endpoint", endpoint) + // If a nil ContentType is an expected case, do not set Content-Type header. + return "" // Return empty to indicate no Content-Type should be set. + } + } + + // Fallback to JSON if no other match is found. + u.logger.Debug("Content-Type for endpoint not found in configMap, using default JSON for Graph API for endpoint", endpoint) + return "application/json" +} +*/ + +// GetContentTypeHeader determines the appropriate Content-Type header for a given API endpoint. +// It checks a cache of previously fetched accepted formats for the endpoint. If the cache does not +// have the information, it makes an OPTIONS request to fetch and cache these formats. The function +// then selects the most appropriate Content-Type based on the accepted formats, defaulting to +// "application/json" if no specific format is found or in case of an error. +// +// Parameters: +// - endpoint: The API endpoint for which to determine the Content-Type. +// +// Returns: +// - The chosen Content-Type for the request, as a string. +func (u *GraphAPIHandler) GetContentTypeHeader(endpoint string) string { + // Initialize the cache if it's not already initialized + if u.endpointAcceptedFormatsCache == nil { + u.endpointAcceptedFormatsCache = make(map[string][]string) + } + + // Check the cache first + if formats, found := u.endpointAcceptedFormatsCache[endpoint]; found { + u.logger.Debug("Using cached accepted formats", "endpoint", endpoint, "formats", formats) + for _, format := range formats { + if format == "application/json" { + return "application/json" + } + if format == "application/xml" { + return "application/xml" + } + if format == "text/html" { + return "text/html" + } + if format == "text/csv" { + return "text/csv" + } + if format == "application/x-www-form-urlencoded" { + return "application/x-www-form-urlencoded" + } + if format == "text/plain" { + return "text/plain" + } + // Additional format conditions can be added here + } + } else { + // Fetch the supported formats as they are not in cache + formats, err := u.FetchSupportedRequestFormats(endpoint) + if err != nil { + u.logger.Warn("Failed to fetch supported request formats from api query, defaulting to 'application/json'", "error", err) + return "application/json" // Fallback to default + } + + // Cache the fetched formats + u.endpointAcceptedFormatsCache[endpoint] = formats + u.logger.Debug("Fetched and cached accepted formats", "endpoint", endpoint, "formats", formats) + + for _, format := range formats { + if format == "application/json" { + return "application/json" + } + if format == "application/xml" { + return "application/xml" + } + if format == "text/html" { + return "text/html" + } + if format == "text/csv" { + return "text/csv" + } + if format == "application/x-www-form-urlencoded" { + return "application/x-www-form-urlencoded" + } + if format == "text/plain" { + return "text/plain" + } + } + } + + return "application/json" // Default to JSON if no suitable format is found +} + +// FetchSupportedRequestFormats sends an OPTIONS request to the specified API endpoint +// and parses the response to extract the MIME types that the endpoint can accept for requests. +// This function is useful for dynamically determining the supported formats (like JSON, XML, etc.) +// for an endpoint, which can then be used to set the appropriate 'Content-Type' header in subsequent requests. +// +// Parameters: +// - endpoint: A string representing the API endpoint for which to fetch the supported request formats. +// +// Returns: +// - A slice of strings, where each string is a MIME type that the endpoint can accept. +// Example: []string{"application/json", "application/xml"} +// - An error if the request could not be sent, the response could not be processed, or if the endpoint +// does not specify accepted formats in its response headers. +// +// Note: +// - The function makes an HTTP OPTIONS request to the given endpoint and reads the 'Accept' header in the response. +// - If the 'Accept' header is not present or the OPTIONS method is not supported by the endpoint, the function +// returns an error. +// - It is the responsibility of the caller to handle any errors and to decide on the default action +// if no formats are returned or in case of an error. +func (u *GraphAPIHandler) FetchSupportedRequestFormats(endpoint string) ([]string, error) { + url := fmt.Sprintf("https://%s%s", DefaultBaseDomain, endpoint) + req, err := http.NewRequest(http.MethodOptions, url, nil) + if err != nil { + return nil, err + } + + // Add necessary headers, authentication etc. + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + // Parse the Accept header + acceptHeader := resp.Header.Get("Accept") + if acceptHeader == "" { + return nil, fmt.Errorf("no Accept header present in api response") + } + + formats := strings.Split(acceptHeader, ",") + return formats, nil +} + +// MarshalRequest encodes the request body according to the endpoint for the API. +func (u *GraphAPIHandler) MarshalRequest(body interface{}, method string, endpoint string) ([]byte, error) { + var ( + data []byte + err error + ) + + // Determine the format based on the endpoint + format := "json" + if strings.Contains(endpoint, "/JSSResource") { + format = "xml" + } else if strings.Contains(endpoint, "/api") { + format = "json" + } + + switch format { + case "xml": + data, err = xml.Marshal(body) + if err != nil { + return nil, err + } + + if method == "POST" || method == "PUT" { + u.logger.Trace("XML Request Body:", "Body", string(data)) + } + + case "json": + data, err = json.Marshal(body) + if err != nil { + u.logger.Error("Failed marshaling JSON request", "error", err) + return nil, err + } + + if method == "POST" || method == "PUT" || method == "PATCH" { + u.logger.Debug("JSON Request Body:", string(data)) + } + } + + return data, nil +} + +// UnmarshalResponse decodes the response body from XML or JSON format depending on the Content-Type header. +func (u *GraphAPIHandler) UnmarshalResponse(resp *http.Response, out interface{}) error { + // Handle DELETE method + if resp.Request.Method == "DELETE" { + if resp.StatusCode >= 200 && resp.StatusCode < 300 { + return nil + } else { + return fmt.Errorf("DELETE request failed with status code: %d", resp.StatusCode) + } + } + + // Handle PATCH method + if resp.Request.Method == "PATCH" { + if resp.StatusCode >= 200 && resp.StatusCode < 300 { + return nil + } else { + return fmt.Errorf("PATCH request failed with status code: %d", resp.StatusCode) + } + } + + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + u.logger.Error("Failed reading response body", "error", err) + return err + } + + // Log the raw response body and headers + u.logger.Trace("Raw HTTP Response:", string(bodyBytes)) + u.logger.Debug("Unmarshaling response", "status", resp.Status) + + // Log headers when in debug mode + u.logger.Debug("HTTP Response Headers:", resp.Header) + + // Check the Content-Type and Content-Disposition headers + contentType := resp.Header.Get("Content-Type") + contentDisposition := resp.Header.Get("Content-Disposition") + + // Handle binary data if necessary + if err := u.handleBinaryData(contentType, contentDisposition, bodyBytes, out); err != nil { + return err + } + + // If content type is HTML, extract the error message + if strings.Contains(contentType, "text/html") { + errMsg := extractErrorMessageFromHTML(string(bodyBytes)) + u.logger.Warn("Received HTML content", "error_message", errMsg, "status_code", resp.StatusCode) + return &APIError{ + StatusCode: resp.StatusCode, + Message: errMsg, + } + } + + // Check for non-success status codes before attempting to unmarshal + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + // Parse the error details from the response body for JSON content type + if strings.Contains(contentType, "application/json") { + var structuredErr StructuredError + if jsonErr := json.Unmarshal(bodyBytes, &structuredErr); jsonErr == nil { + detailedMessage := fmt.Sprintf("%s: %s", structuredErr.Error.Code, structuredErr.Error.Message) + u.logger.Error("Received API error response", "status_code", resp.StatusCode, "error", detailedMessage) + return &APIError{ + StatusCode: resp.StatusCode, + Message: detailedMessage, + } + } else { + u.logger.Error("Failed to parse JSON error response", "error", jsonErr) + return fmt.Errorf("received non-success status code: %d and failed to parse error response", resp.StatusCode) + } + } + + // If the response is not JSON or another error occurs, return a generic error message + u.logger.Error("Received non-success status code", "status_code", resp.StatusCode) + return fmt.Errorf("received non-success status code: %d", resp.StatusCode) + } + // Determine whether the content type is JSON or XML and unmarshal accordingly + switch { + case strings.Contains(contentType, "application/json"): + err = json.Unmarshal(bodyBytes, out) + case strings.Contains(contentType, "application/xml"), strings.Contains(contentType, "text/xml;charset=UTF-8"): + err = xml.Unmarshal(bodyBytes, out) + default: + // If the content type is neither JSON nor XML nor HTML + return fmt.Errorf("unexpected content type: %s", contentType) + } + + // Handle any errors that occurred during unmarshaling + if err != nil { + // If unmarshalling fails, check if the content might be HTML + if strings.Contains(string(bodyBytes), "") { + errMsg := extractErrorMessageFromHTML(string(bodyBytes)) + u.logger.Warn("Received HTML content instead of expected format", "error_message", errMsg, "status_code", resp.StatusCode) + return fmt.Errorf(errMsg) + } + + // Log the error and return it + u.logger.Error("Failed to unmarshal response", "error", err) + return fmt.Errorf("failed to unmarshal response: %v", err) + } + + return nil +} + +// GetAcceptHeader constructs and returns a weighted Accept header string for HTTP requests. +// The Accept header indicates the MIME types that the client can process and prioritizes them +// based on the quality factor (q) parameter. Higher q-values signal greater preference. +// This function specifies a range of MIME types with their respective weights, ensuring that +// the server is informed of the client's versatile content handling capabilities while +// indicating a preference for XML. The specified MIME types cover common content formats like +// images, JSON, XML, HTML, plain text, and certificates, with a fallback option for all other types. +func (u *GraphAPIHandler) GetAcceptHeader() string { + weightedAcceptHeader := "application/x-x509-ca-cert;q=0.95," + + "application/pkix-cert;q=0.94," + + "application/pem-certificate-chain;q=0.93," + + "application/octet-stream;q=0.8," + // For general binary files + "image/png;q=0.75," + + "image/jpeg;q=0.74," + + "image/*;q=0.7," + + "application/xml;q=0.65," + + "text/xml;q=0.64," + + "text/xml;charset=UTF-8;q=0.63," + + "application/json;q=0.5," + + "text/html;q=0.5," + + "text/plain;q=0.4," + + "*/*;q=0.05" // Fallback for any other types + return weightedAcceptHeader +} + +// MarshalMultipartFormData takes a map with form fields and file paths and returns the encoded body and content type. +func (u *GraphAPIHandler) MarshalMultipartRequest(fields map[string]string, files map[string]string) ([]byte, string, error) { + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + + // Add the simple fields to the form data + for field, value := range fields { + if err := writer.WriteField(field, value); err != nil { + return nil, "", err + } + } + + // Add the files to the form data + for formField, filepath := range files { + file, err := os.Open(filepath) + if err != nil { + return nil, "", err + } + defer file.Close() + + part, err := writer.CreateFormFile(formField, filepath) + if err != nil { + return nil, "", err + } + if _, err := io.Copy(part, file); err != nil { + return nil, "", err + } + } + + // Close the writer before returning + contentType := writer.FormDataContentType() + if err := writer.Close(); err != nil { + return nil, "", err + } + + return body.Bytes(), contentType, nil +} + +// handleBinaryData checks if the response should be treated as binary data and assigns to out if so. +func (u *GraphAPIHandler) handleBinaryData(contentType, contentDisposition string, bodyBytes []byte, out interface{}) error { + if strings.Contains(contentType, "application/octet-stream") || strings.HasPrefix(contentDisposition, "attachment") { + if outPointer, ok := out.(*[]byte); ok { + *outPointer = bodyBytes + return nil + } else { + return fmt.Errorf("output parameter is not a *[]byte for binary data") + } + } + return nil // If not binary data, no action needed +} diff --git a/internal/apihandlers/jamfpro/jamfpro_api_error_messages.go b/internal/apihandlers/jamfpro/jamfpro_api_error_messages.go new file mode 100644 index 0000000..69ee0f6 --- /dev/null +++ b/internal/apihandlers/jamfpro/jamfpro_api_error_messages.go @@ -0,0 +1,49 @@ +package jamfpro + +import ( + "bytes" + "encoding/json" + "strings" + + "github.com/PuerkitoBio/goquery" +) + +// ExtractErrorMessageFromHTML attempts to parse an HTML error page and extract a human-readable error message. +func ExtractErrorMessageFromHTML(htmlContent string) string { + r := bytes.NewReader([]byte(htmlContent)) + doc, err := goquery.NewDocumentFromReader(r) + if err != nil { + return "Unable to parse HTML content" + } + + var messages []string + doc.Find("p").Each(func(i int, s *goquery.Selection) { + messages = append(messages, s.Text()) + }) + + return strings.Join(messages, " | ") +} + +// ParseJSONErrorResponse parses the JSON error message from the response body. +func ParseJSONErrorResponse(body []byte) (string, error) { + var errorResponse struct { + HTTPStatus int `json:"httpStatus"` + Errors []struct { + Code string `json:"code"` + Description string `json:"description"` + ID string `json:"id"` + Field string `json:"field"` + } `json:"errors"` + } + + err := json.Unmarshal(body, &errorResponse) + if err != nil { + return "", err + } + + if len(errorResponse.Errors) > 0 { + return errorResponse.Errors[0].Description, nil + } + + return "No error description available", nil +} diff --git a/internal/apihandlers/jamfpro/jamfpro_api_exceptions_configuration.json b/internal/apihandlers/jamfpro/jamfpro_api_exceptions_configuration.json new file mode 100644 index 0000000..ace253b --- /dev/null +++ b/internal/apihandlers/jamfpro/jamfpro_api_exceptions_configuration.json @@ -0,0 +1,18 @@ +{ + "/api/v1/icon/download/": { + "accept": "image/*", + "content_type": null + }, + "/api/v1/branding-images/download/": { + "accept": "image/*", + "content_type": null + }, + "/api/v2/inventory-preload/csv-template": { + "accept": "text/csv", + "content_type": null + }, + "/api/v1/pki/certificate-authority/active/der": { + "accept": "application/pkix-cert", + "content_type": null + } +} diff --git a/internal/apihandlers/jamfpro/jamfpro_api_handler.go b/internal/apihandlers/jamfpro/jamfpro_api_handler.go new file mode 100644 index 0000000..135b160 --- /dev/null +++ b/internal/apihandlers/jamfpro/jamfpro_api_handler.go @@ -0,0 +1,378 @@ +// jamfpro_api_handler.go +/* ------------------------------Summary---------------------------------------- +This is a api handler module for the http_client to accommodate specifics of +jamf's api(s). It handles the encoding (marshalling) and decoding (unmarshalling) +of data. It also sets the correct content headers for the various http methods. + +This module integrates with the http_client logger for wrapped error handling +for human readable return codes. It also supports the http_client tiered logging +functionality for logging support. + +The logic of this module is defined as follows: +Classic API: + +For requests (GET, POST, PUT, DELETE): +- Encoding (Marshalling): Use XML format. +For responses (GET, POST, PUT): +- Decoding (Unmarshalling): Use XML format. +For responses (DELETE): +- Handle response codes as response body lacks anything useful. +Headers +- Sets accept headers based on weighting. XML out weighs JSON to ensure XML is returned +- Sets content header as application/xml with edge case exceptions based on need. + +JamfPro API: + +For requests (GET, POST, PUT, DELETE): +- Encoding (Marshalling): Use JSON format. +For responses (GET, POST, PUT): +- Decoding (Unmarshalling): Use JSON format. +For responses (DELETE): +- Handle response codes as response body lacks anything useful. +Headers +- Sets accept headers based on weighting. Jamf Pro API doesn't support XML, so MIME type is skipped and returns JSON +- Set content header as application/json with edge case exceptions based on need. +*/ +package jamfpro + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "log" + "mime/multipart" + "net/http" + "os" + "strings" + + _ "embed" + + "github.com/deploymenttheory/go-api-http-client/internal/httpclient" +) + +// Endpoint constants represent the URL suffixes used for Jamf API token interactions. +const ( + DefaultBaseDomain = ".jamfcloud.com" // DefaultBaseDomain: represents the base domain for the jamf instance. + OAuthTokenEndpoint = "/api/oauth/token" // OAuthTokenEndpoint: The endpoint to obtain an OAuth token. + BearerTokenEndpoint = "/api/v1/auth/token" // BearerTokenEndpoint: The endpoint to obtain a bearer token. + TokenRefreshEndpoint = "/api/v1/auth/keep-alive" // TokenRefreshEndpoint: The endpoint to refresh an existing token. + TokenInvalidateEndpoint = "/api/v1/auth/invalidate-token" // TokenInvalidateEndpoint: The endpoint to invalidate an active token. +) + +// ConfigMap is a map that associates endpoint URL patterns with their corresponding configurations. +// The map's keys are strings that identify the endpoint, and the values are EndpointConfig structs +// that hold the configuration for that endpoint. +type ConfigMap map[string]EndpointConfig + +// Variables +var configMap ConfigMap + +// Embedded Resources +// +//go:embed jamfpro_api_exceptions_configuration.json +var jamfpro_api_exceptions_configuration []byte + +// Package-level Functions + +// init is invoked automatically on package initialization and is responsible for +// setting up the default state of the package by loading the default configuration. +// If an error occurs during the loading process, the program will terminate with a fatal error log. +func init() { + // Load the default configuration from an embedded resource. + err := loadDefaultConfig() + if err != nil { + log.Fatalf("Error loading default config: %s", err) + } +} + +// loadDefaultConfig reads and unmarshals the jamfpro_api_exceptions_configuration JSON data from an embedded file +// into the configMap variable, which holds the exceptions configuration for endpoint-specific headers. +// Returns an error if the unmarshalling process fails. +func loadDefaultConfig() error { + // Unmarshal the embedded default configuration into the global configMap. + return json.Unmarshal(jamfpro_api_exceptions_configuration, &configMap) +} + +// EndpointConfig is a struct that holds configuration details for a specific API endpoint. +// It includes what type of content it can accept and what content type it should send. +type EndpointConfig struct { + Accept string `json:"accept"` // Accept specifies the MIME type the endpoint can handle in responses. + ContentType *string `json:"content_type"` // ContentType, if not nil, specifies the MIME type to set for requests sent to the endpoint. A pointer is used to distinguish between a missing field and an empty string. +} + +// JamfAPIHandler implements the APIHandler interface for the Jamf Pro API. +type JamfAPIHandler struct { + logger httpclient.Logger // logger is used to output logs for the API handling processes. + OverrideBaseDomain string // OverrideBaseDomain is used to override the base domain for URL construction. + InstanceName string // InstanceName is the name of the Jamf instance. +} + +// Functions + +// GetBaseDomain returns the appropriate base domain for URL construction. +// It uses OverrideBaseDomain if set, otherwise falls back to DefaultBaseDomain. +func (j *JamfAPIHandler) GetBaseDomain() string { + if j.OverrideBaseDomain != "" { + return j.OverrideBaseDomain + } + return DefaultBaseDomain +} + +// ConstructAPIResourceEndpoint returns the full URL for a Jamf API resource endpoint path. +func (j *JamfAPIHandler) ConstructAPIResourceEndpoint(endpointPath string) string { + baseDomain := j.GetBaseDomain() + url := fmt.Sprintf("https://%s%s%s", j.InstanceName, baseDomain, endpointPath) + j.logger.Info("Request will be made to API URL:", "URL", url) + return url +} + +// ConstructAPIAuthEndpoint returns the full URL for a Jamf API auth endpoint path. +func (j *JamfAPIHandler) ConstructAPIAuthEndpoint(endpointPath string) string { + baseDomain := j.GetBaseDomain() + url := fmt.Sprintf("https://%s%s%s", j.InstanceName, baseDomain, endpointPath) + j.logger.Info("Request will be made to API authentication URL:", "URL", url) + return url +} + +// GetContentTypeHeader determines the appropriate Content-Type header for a given API endpoint. +// It attempts to find a content type that matches the endpoint prefix in the global configMap. +// If a match is found and the content type is defined (not nil), it returns the specified content type. +// If the content type is nil or no match is found in configMap, it falls back to default behaviors: +// - For url endpoints starting with "/JSSResource", it defaults to "application/xml" for the Classic API. +// - For url endpoints starting with "/api", it defaults to "application/json" for the JamfPro API. +// If the endpoint does not match any of the predefined patterns, "application/json" is used as a fallback. +// This method logs the decision process at various stages for debugging purposes. +func (u *JamfAPIHandler) GetContentTypeHeader(endpoint string) string { + // Dynamic lookup from configuration should be the first priority + for key, config := range configMap { + if strings.HasPrefix(endpoint, key) { + if config.ContentType != nil { + u.logger.Debug("Content-Type for endpoint found in configMap", "endpoint", endpoint, "content_type", *config.ContentType) + return *config.ContentType + } + u.logger.Debug("Content-Type for endpoint is nil in configMap, handling as special case", "endpoint", endpoint) + // If a nil ContentType is an expected case, do not set Content-Type header. + return "" // Return empty to indicate no Content-Type should be set. + } + } + + // If no specific configuration is found, then check for standard URL patterns. + if strings.Contains(endpoint, "/JSSResource") { + u.logger.Debug("Content-Type for endpoint defaulting to XML for Classic API", "endpoint", endpoint) + return "application/xml" // Classic API uses XML + } else if strings.Contains(endpoint, "/api") { + u.logger.Debug("Content-Type for endpoint defaulting to JSON for JamfPro API", "endpoint", endpoint) + return "application/json" // JamfPro API uses JSON + } + + // Fallback to JSON if no other match is found. + u.logger.Debug("Content-Type for endpoint not found in configMap or standard patterns, using default JSON", "endpoint", endpoint) + return "application/json" +} + +// MarshalRequest encodes the request body according to the endpoint for the API. +func (u *JamfAPIHandler) MarshalRequest(body interface{}, method string, endpoint string) ([]byte, error) { + var ( + data []byte + err error + ) + + // Determine the format based on the endpoint + format := "json" + if strings.Contains(endpoint, "/JSSResource") { + format = "xml" + } else if strings.Contains(endpoint, "/api") { + format = "json" + } + + switch format { + case "xml": + data, err = xml.Marshal(body) + if err != nil { + return nil, err + } + + if method == "POST" || method == "PUT" { + u.logger.Trace("XML Request Body:", "Body", string(data)) + } + + case "json": + data, err = json.Marshal(body) + if err != nil { + u.logger.Error("Failed marshaling JSON request", "error", err) + return nil, err + } + + if method == "POST" || method == "PUT" || method == "PATCH" { + u.logger.Debug("JSON Request Body:", string(data)) + } + } + + return data, nil +} + +// UnmarshalResponse decodes the response body from XML or JSON format depending on the Content-Type header. +func (u *JamfAPIHandler) UnmarshalResponse(resp *http.Response, out interface{}) error { + // Handle DELETE method + if resp.Request.Method == "DELETE" { + if resp.StatusCode >= 200 && resp.StatusCode < 300 { + return nil + } else { + return fmt.Errorf("DELETE request failed with status code: %d", resp.StatusCode) + } + } + + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + u.logger.Error("Failed reading response body", "error", err) + return err + } + + // Log the raw response body and headers + u.logger.Trace("Raw HTTP Response:", string(bodyBytes)) + u.logger.Debug("Unmarshaling response", "status", resp.Status) + + // Log headers when in debug mode + u.logger.Debug("HTTP Response Headers:", resp.Header) + + // Check the Content-Type and Content-Disposition headers + contentType := resp.Header.Get("Content-Type") + contentDisposition := resp.Header.Get("Content-Disposition") + + // Handle binary data if necessary + if err := u.handleBinaryData(contentType, contentDisposition, bodyBytes, out); err != nil { + return err + } + + // If content type is HTML, extract the error message + if strings.Contains(contentType, "text/html") { + errMsg := ExtractErrorMessageFromHTML(string(bodyBytes)) + u.logger.Warn("Received HTML content", "error_message", errMsg, "status_code", resp.StatusCode) + return &APIError{ + StatusCode: resp.StatusCode, + Message: errMsg, + } + } + + // Check for non-success status codes before attempting to unmarshal + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + // Parse the error details from the response body for JSON content type + if strings.Contains(contentType, "application/json") { + description, err := ParseJSONErrorResponse(bodyBytes) + if err != nil { + u.logger.Error("Failed to parse JSON error response", "error", err) + return fmt.Errorf("received non-success status code: %d and failed to parse error response", resp.StatusCode) + } + return fmt.Errorf("received non-success status code: %d, error: %s", resp.StatusCode, description) + } + + // If the response is not JSON or another error occurs, return a generic error message + u.logger.Error("Received non-success status code", "status_code", resp.StatusCode) + return fmt.Errorf("received non-success status code: %d", resp.StatusCode) + } + + // Determine whether the content type is JSON or XML and unmarshal accordingly + switch { + case strings.Contains(contentType, "application/json"): + err = json.Unmarshal(bodyBytes, out) + case strings.Contains(contentType, "application/xml"), strings.Contains(contentType, "text/xml;charset=UTF-8"): + err = xml.Unmarshal(bodyBytes, out) + default: + // If the content type is neither JSON nor XML nor HTML + return fmt.Errorf("unexpected content type: %s", contentType) + } + + // Handle any errors that occurred during unmarshaling + if err != nil { + // If unmarshalling fails, check if the content might be HTML + if strings.Contains(string(bodyBytes), "") { + errMsg := ExtractErrorMessageFromHTML(string(bodyBytes)) + u.logger.Warn("Received HTML content instead of expected format", "error_message", errMsg, "status_code", resp.StatusCode) + return fmt.Errorf(errMsg) + } + + // Log the error and return it + u.logger.Error("Failed to unmarshal response", "error", err) + return fmt.Errorf("failed to unmarshal response: %v", err) + } + + return nil +} + +// GetAcceptHeader constructs and returns a weighted Accept header string for HTTP requests. +// The Accept header indicates the MIME types that the client can process and prioritizes them +// based on the quality factor (q) parameter. Higher q-values signal greater preference. +// This function specifies a range of MIME types with their respective weights, ensuring that +// the server is informed of the client's versatile content handling capabilities while +// indicating a preference for XML. The specified MIME types cover common content formats like +// images, JSON, XML, HTML, plain text, and certificates, with a fallback option for all other types. +func (u *JamfAPIHandler) GetAcceptHeader() string { + weightedAcceptHeader := "application/x-x509-ca-cert;q=0.95," + + "application/pkix-cert;q=0.94," + + "application/pem-certificate-chain;q=0.93," + + "application/octet-stream;q=0.8," + // For general binary files + "image/png;q=0.75," + + "image/jpeg;q=0.74," + + "image/*;q=0.7," + + "application/xml;q=0.65," + + "text/xml;q=0.64," + + "text/xml;charset=UTF-8;q=0.63," + + "application/json;q=0.5," + + "text/html;q=0.5," + + "text/plain;q=0.4," + + "*/*;q=0.05" // Fallback for any other types + return weightedAcceptHeader +} + +// MarshalMultipartFormData takes a map with form fields and file paths and returns the encoded body and content type. +func (u *JamfAPIHandler) MarshalMultipartRequest(fields map[string]string, files map[string]string) ([]byte, string, error) { + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + + // Add the simple fields to the form data + for field, value := range fields { + if err := writer.WriteField(field, value); err != nil { + return nil, "", err + } + } + + // Add the files to the form data + for formField, filepath := range files { + file, err := os.Open(filepath) + if err != nil { + return nil, "", err + } + defer file.Close() + + part, err := writer.CreateFormFile(formField, filepath) + if err != nil { + return nil, "", err + } + if _, err := io.Copy(part, file); err != nil { + return nil, "", err + } + } + + // Close the writer before returning + contentType := writer.FormDataContentType() + if err := writer.Close(); err != nil { + return nil, "", err + } + + return body.Bytes(), contentType, nil +} + +// handleBinaryData checks if the response should be treated as binary data and assigns to out if so. +func (u *JamfAPIHandler) handleBinaryData(contentType, contentDisposition string, bodyBytes []byte, out interface{}) error { + if strings.Contains(contentType, "application/octet-stream") || strings.HasPrefix(contentDisposition, "attachment") { + if outPointer, ok := out.(*[]byte); ok { + *outPointer = bodyBytes + return nil + } else { + return fmt.Errorf("output parameter is not a *[]byte for binary data") + } + } + return nil // If not binary data, no action needed +} diff --git a/internal/httpclient/api_handler.go b/internal/httpclient/api_handler.go new file mode 100644 index 0000000..7af3e02 --- /dev/null +++ b/internal/httpclient/api_handler.go @@ -0,0 +1,48 @@ +// api_handler.go +package httpclient + +import ( + "fmt" + "net/http" + + "github.com/deploymenttheory/go-api-http-client/internal/apihandlers/jamfpro" +) + +// APIHandler is an interface for encoding, decoding, and determining content types for different API implementations. +// It encapsulates behavior for encoding and decoding requests and responses. +type APIHandler interface { + GetBaseDomain() string + ConstructAPIResourceEndpoint(endpointPath string) string + ConstructAPIAuthEndpoint(endpointPath string) string + MarshalRequest(body interface{}, method string, endpoint string) ([]byte, error) + MarshalMultipartRequest(fields map[string]string, files map[string]string) ([]byte, string, error) // New method for multipart + UnmarshalResponse(resp *http.Response, out interface{}) error + GetContentTypeHeader(method string) string + GetAcceptHeader() string +} + +// LoadAPIHandler returns an APIHandler based on the provided API type. +// 'apiType' parameter could be "jamf" or "graph" to specify which API handler to load. +func LoadAPIHandler(config Config, apiType string) (APIHandler, error) { + var apiHandler APIHandler + switch apiType { + case "jamfpro": + // Assuming GetAPIHandler returns a JamfAPIHandler + apiHandler = &jamfpro.JamfAPIHandler{ + // Initialize with necessary parameters + } + /*case "graph": + // Assuming GetAPIHandler returns a GraphAPIHandler + apiHandler = &graph.GraphAPIHandler{ + // Initialize with necessary parameters + }*/ + default: + return nil, fmt.Errorf("unsupported API type: %s", apiType) + } + + // Set the logger level for the handler if needed + logger := NewDefaultLogger() // Or use config.Logger if it's not nil + logger.SetLevel(config.LogLevel) + + return apiHandler, nil +} diff --git a/internal/httpclient/architectural_design.md b/internal/httpclient/architectural_design.md new file mode 100644 index 0000000..5ceac26 --- /dev/null +++ b/internal/httpclient/architectural_design.md @@ -0,0 +1,420 @@ +# Architectural Design Decision: Configuration and Initialization of Jamf Pro SDK + +## Decision Topic: Defining a flexible, modular, and secure mechanism for the initialization and configuration of the SDK to interact with various environments of the Jamf Pro API + +## Context + +The Jamf Pro api SDK will be used in diverse contexts including Terraform providers, standalone scripts, compiled apps, and CI/CD pipelines. It needs to cater to multiple Jamf Pro environments like development, pre-production, and production. This decision outlines the configuration and initialization approach to ensure flexibility, modularity, and security. + +## Decision + +Configuration Structure: + +Implement a Go struct (Config) to encapsulate configuration parameters required by the SDK. +go + +```go +type Client struct { + BaseURL string + authMethod string + Token string + oAuthCredentials OAuthCredentials + bearerTokenAuthCredentials BearerTokenAuthCredentials + Expiry time.Time + httpClient *http.Client + tokenLock sync.Mutex + config Config + logger Logger + ConcurrencyMgr *ConcurrencyManager +} +``` + +## SDK Initialization + +Create an initialization function (NewClient). It accepts an optional Config object, which if not provided, defaults to retrieving configuration from environment variables. +go + +```go +func NewClient(cfg *Config) (*JamfClient, error) { + // Initialization logic here +} +``` + +### Configuration Loading Utility: + +Implement utility functions that will allow loading the configuration from a file, streamlining the process for users who prefer configuration files over environment variables or direct parameters. + +### Automatic Token Lifecycle Management: + +During the SDK initialization, automate the retrieval of the Bearer Token using the provided authentication details. +Internally manage the token's lifecycle, which includes auto-refreshing when nearing expiry, thus abstracting token management from the SDK users and ensuring uninterrupted API interactions. Include a buffer period that if reached will trigger the token refresh logic the next time a request is performed. Token's should be locked + +## Rationale + +Flexibility: By supporting environment variables, direct parameters, and configuration files, the SDK can be seamlessly integrated into diverse contexts, from scripts to CI/CD pipelines. + +Security: Handling token lifecycle internally reduces the risk of token mismanagement. Direct exposure of client_id and client_secret is minimized, as they can be stored in secure environment variables or encrypted configuration files. + +User Experience: By abstracting complexities like token management and environment selection, the SDK offers a streamlined experience to developers, ensuring they focus on business logic rather than SDK intricacies. + +## Implications + +Maintenance: As Jamf Pro evolves, the SDK may need updates to accommodate any changes in the authentication process or endpoint structures. + +Security Practices: Users must ensure they follow best practices in securely storing and managing client_id and client_secret, especially when using environment variables or configuration files + +Alright, let's break down the error handling architectural design decision based on your requirements. + +--- + +## Architectural Design Decision: Implementing a Comprehensive Logging System in Jamf Pro SDK + +**Decision Topic**: The introduction and implementation of a comprehensive, multi-tiered logging system within the SDK to provide detailed and adjustable logging capabilities. + +### Context: +The SDK is used by a diverse audience who have varying needs regarding the amount and type of information required from logging. There needs to be a balance between too much information, which can overwhelm users, and too little, which can hinder problem resolution. + +### Decision: + +1. **LogLevel Enumeration**: + - Define a `LogLevel` type as an enumeration to represent various logging levels, allowing users to set the granularity of logs they receive. + ```go + type LogLevel int + + const ( + LogLevelNone LogLevel = iota + LogLevelWarning + LogLevelInfo + LogLevelDebug + ) + ``` + + **Logging Interface**: + - Introduce a `Logger` interface with methods corresponding to different logging levels (`Trace`, `Debug`, `Info`, `Warn`, `Error`, `Fatal`). This enables a consistent logging approach across different parts of the SDK. + +```go + type Logger interface { + // ... method signatures ... + } +``` + +3. **Default Logger**: + - Implement a `defaultLogger` that utilizes Go’s standard logging library, encapsulating the logic for checking log levels before emitting logs. The default logger's level can be set at runtime. + +4. **Flexible Log Level Setting**: + - Provide a method to set the log level (`SetLevel`) dynamically, allowing users to adjust the verbosity of logs as needed without changing the code. + + ```go + func (d *defaultLogger) SetLevel(level LogLevel) { + d.logLevel = level + } + ``` + +5. **Level-Based Logging Logic**: + - Ensure that each logging method in the `defaultLogger` checks the current `logLevel` to decide whether to output the log message, effectively filtering logs based on the set level. + +### Rationale: + +- **Customizability**: Users can customize the verbosity of the logs by setting the appropriate `LogLevel`, making the SDK adaptable for different environments and use cases. + +- **Clarity and Relevance**: By filtering logs according to the set level, users receive only the most relevant information, reducing noise and focusing on the appropriate details for their needs. + +- **Simplicity and Familiarity**: Using Go’s standard logging library for the default implementation keeps the SDK simple and familiar to Go developers. + +- **Flexibility and Maintenance**: The logging interface allows for different logging implementations to be integrated in the future, ensuring the SDK can evolve without breaking existing functionality. + +### Implications: + +- **Performance**: The logging system is designed to minimize performance impacts by checking log levels before constructing log messages or performing output operations. + +- **Maintenance**: Developers must ensure that log messages at different levels are meaningful and appropriate, necessitating thoughtful logging throughout the SDK's development. + +- **Consistency**: A consistent logging interface across the SDK ensures that different components and external contributors adhere to the same logging standards and practices. + +--- + +This document outlines the architectural decision-making for implementing a comprehensive logging system, detailing the context, decisions, rationale, and implications, and aligns with the current implementation of the logging system in the SDK. + +## Architectural Design Decision: Dynamic Rate Limiting and Retrying Mechanism in Jamf Pro SDK + +**Decision Topic**: Implementing a responsive mechanism that automatically adapts rate limiting and retry strategies based on real-time API response behaviors and headers. + +### Context: +Given that APIs can change their rate-limiting behaviors dynamically due to various reasons such as server loads, maintenance, and other external factors, it is crucial for the SDK to adjust its request patterns accordingly to ensure optimal performance and minimize failed requests. + +### Decision: + +1. **Dynamic Inspection of Rate Limit Headers**: + - After every API call, parse the response headers to extract information regarding the rate limits. Specifically, focus on headers like `X-RateLimit-Remaining` (indicating how many requests are left in the current window) and `X-RateLimit-Reset` (indicating when the rate limit window resets). + - Based on the parsed information, adjust the rate at which the SDK sends requests. For instance, if the `X-RateLimit-Remaining` indicates only a few requests are left and the reset time is far off, the SDK should slow down its request rate. + +2. **Intelligent Retrying**: + - If a request fails due to rate limiting (typically indicated by a `429 Too Many Requests` response), the SDK should wait for the time indicated by `X-RateLimit-Reset` before retrying. + - For other types of errors, use an exponential backoff strategy with a cap to ensure the SDK doesn't retry indefinitely. + +3. **User Configurability**: + - Provide configuration options allowing users to set maximum retry attempts, define custom backoff strategies, or even disable the dynamic rate limiting if needed. + + ```go + type Config struct { + // ... other fields ... + MaxRetryAttempts int + CustomBackoff func(attempt int) time.Duration + } + ``` + +### Rationale: + +- **Adaptability**: By inspecting the API's response headers in real-time and adjusting the request rate dynamically, the SDK can adapt to varying server behaviors, ensuring optimal performance and minimizing disruptions. + +- **User Experience**: The built-in intelligence of dynamically adjusting request rates and retry strategies abstracts these complexities from the users, offering a more seamless experience. + +- **Flexibility**: Offering configuration options ensures that advanced users can fine-tune the SDK's behavior to best fit their unique scenarios. + +### Implications: + +- **Complexity**: The dynamic nature of this mechanism might introduce additional complexities in terms of maintenance and debugging. + +- **Latency**: In cases where the API is frequently rate-limiting or there are consistent transient errors, operations might experience added latency due to the wait times. + +--- + +### Architectural Design Decision: Timeouts and Deadlines in the SDK + +**Decision**: The SDK will provide a mechanism for users to configure custom timeout values. However, a default timeout will be enforced to ensure that requests don't hang indefinitely. + +**Justification**: +- **Usability**: Providing a default timeout value ensures that, out-of-the-box, users won't face issues with requests hanging indefinitely due to unforeseen network or server-side issues. + +- **Flexibility**: By allowing timeout values to be configurable, the SDK caters to advanced users who may have specific timeout requirements depending on their use case or environment. + +- **Robustness**: By explicitly handling timeouts, the SDK becomes more resilient to potential disruptions and can give meaningful error messages to the user. + +#### Implementation Details: + +1. **Default Timeout**: A reasonable default timeout can be set. Let's say `10 seconds` as an example. + +2. **Configurable Timeout**: Users should be able to easily configure this value based on their needs. + +Here's a potential implementation using Go: + +```go +package apiClient + +import ( + "net/http" + "time" +) + +const DefaultTimeout = 10 * time.Second + +type Client struct { + httpClient *http.Client + // ... other fields +} + +type ClientOption func(*Client) + +func WithTimeout(timeout time.Duration) ClientOption { + return func(c *Client) { + c.httpClient.Timeout = timeout + } +} + +func New(options ...ClientOption) *Client { + client := &Client{ + httpClient: &http.Client{ + Timeout: DefaultTimeout, + }, + } + + for _, opt := range options { + opt(client) + } + + return client +} +``` + +#### Usage: + +For users who want to use the default timeout: + +```go +client := apiClient.New() // Uses the 10 second default timeout +``` + +For users who want to set a custom timeout: + +```go +client := apiClient.New(apiClient.WithTimeout(30 * time.Second)) // Custom 30 second timeout +``` + +With this approach, the SDK provides sensible defaults while still offering configurability for varied requirements. +--- + +### Architectural Design Decision: Bearer Token Management in the SDK + +**Decision**: The SDK will handle the expiration and renewal of Bearer Tokens automatically without requiring intervention from the user. + +**Justification**: + +- **Seamlessness**: Automatic token management ensures uninterrupted SDK operations, thus providing a smoother user experience. + +- **Reliability**: By internally managing token renewals, the SDK reduces the potential for manual errors and enhances the reliability of any tool or script using it. + +- **Encapsulation**: Users of the SDK should focus on their core requirements, and not be bogged down with the intricacies of token management. The SDK will abstract these details. + +#### Implementation Details: + +1. **Token Storage**: The SDK will store the Bearer Token and its expiration date internally. + +2. **Automatic Token Renewal**: Before any API call, the SDK will check the token's expiration date. If the token is close to expiry (or expired), the SDK will proactively renew it using the `/v1/auth/keep-alive` endpoint or by obtaining a new one via `/v1/auth/token`. + +3. **Transparent to User**: The token renewal process will be transparent to the user. They will simply receive the results of their intended API call without any indication of the token being renewed (unless they're in debug mode, where such internal operations might be logged). + +Here's a rough outline in Go: + +```go +package apiClient + +import ( + "sync" + "time" +) + +type Client struct { + token string + tokenExpiry time.Time + httpClient *http.Client + tokenLock sync.Mutex + // ... other fields +} + +// This function is called before every API call to ensure the token is valid. +func (c *Client) ensureValidToken() error { + c.tokenLock.Lock() + defer c.tokenLock.Unlock() + + // If token is close to expiry or already expired, refresh it. + // The "5 minutes" buffer is just an example; it can be adjusted as needed. + if time.Until(c.tokenExpiry) < 5*time.Minute { + err := c.refreshToken() + if err != nil { + return err + } + } + return nil +} + +// refreshToken reaches out to Jamf Pro API to get a new token. +func (c *Client) refreshToken() error { + // Logic to send a POST request to /v1/auth/keep-alive or /v1/auth/token. + // Update c.token and c.tokenExpiry based on the response. + // ... + return nil +} +``` +--- + + +# Architectural Decision Record (ADR): Content Negotiation + +Jamf Pro offers two key APIs: the Classic API and the Jamf Pro API. Each API has its own set of nuances in terms of base URL, authentication mechanisms, privileges, HTTP methods, data schema, and response codes. In order to interact with both these APIs seamlessly, we require an HTTP client that can modularly address these nuances. + +#### **Decision**: + +1. **Modular Design**: The Go-based HTTP client will be modular, with separate modules dedicated to handling specifics of the Classic API and the Jamf Pro API. + +2. **Unified Interface**: Despite the modularity, there will be a unified interface to interact with both APIs to provide a seamless experience for users of the client. + +3. **Authentication**: + - **Classic API**: Implement both Client Credentials and Bearer Token authentication methods. + - **Jamf Pro API**: Implement Client Credentials and Bearer Token authentication with provisions to refresh tokens upon expiry. + +4. **Data Formats**: + - **Classic API**: Support both XML and JSON formats for GET requests, and exclusively XML for POST and PUT requests. + - **Jamf Pro API**: Primarily interact using JSON, with exceptions handled for specific workflows such as file uploads. + +5. **Error Handling**: Implement robust error handling to interpret and handle the various HTTP status codes returned by the APIs, and provide descriptive error messages to the user. + +6. **Extensibility**: The design will ensure easy extensibility to cater to any future changes or additional features in the Jamf Pro APIs. + +#### **Consequences**: + +1. The modular design will make it easier to maintain and update the client for individual API changes without affecting other parts of the system. +2. Users will benefit from a unified interface, simplifying the integration process with the Jamf Pro system. +3. Robust error handling will ensure that the client gracefully handles failures, providing clear feedback to the users. +4. The extensible nature of the design will future-proof the client against potential updates in the Jamf Pro system. + +--- + +# Handling of Concurrent Requests in the Go-Based HTTP Client for Jamf Pro APIs + +## Context: +The SDK, which is intended to be used by a Terraform provider, needs to handle potential concurrent operations gracefully, ensuring data integrity and avoiding potential issues like race conditions. While Terraform itself manages much of this concurrency, the HTTP client should be designed to safely and effectively handle concurrent requests. + +## Decision: +Rate Limiting: Introduce a rate limiter to control the rate of requests sent to the Jamf Pro APIs. This ensures that we don't overwhelm the API with too many requests at once, respecting any API rate limits, and prevents potential throttling. + +## Concurrency Control: + +Mutexes: Use mutexes (from Go's sync package) to lock critical sections of the code, especially if you have shared state or resources that shouldn't be accessed concurrently. This can prevent race conditions. + +Error Handling: Ensure that the client can handle API errors related to concurrency gracefully. For instance, if two concurrent operations result in a conflict (e.g., trying to create a resource that already exists), the client should be able to recognize this and respond appropriately. + +Connection Pooling: Utilize connection pooling for the HTTP client. This ensures that multiple concurrent requests can reuse existing connections, rather than opening a new connection for every request, which is less efficient. + +State Management: If the client maintains any state, ensure it's designed to be thread-safe. This often involves a combination of mutexes and careful design to ensure that concurrent operations don't produce unpredictable results. + +Idempotency: Whenever possible, design operations to be idempotent. This means that even if an operation is executed multiple times (e.g., due to retries), the result remains consistent. This is especially crucial for Terraform providers. + +## Consequences: +Proper handling of concurrency will ensure data consistency and prevent potential race conditions, making the Terraform provider robust and reliable. + +Introducing rate limiting and connection pooling will optimize performance without compromising on the integrity of operations. + +Designing for idempotency will provide more reliable outcomes, especially in the face of intermittent network or service issues. + +This decision can then guide the development process, ensuring that concurrency is handled in an effective and safe manner within the HTTP client used by the Terraform provider. + +# Intelligent Retrying & Rate Handling in the Go-Based HTTP Client for APIs + +## Context: + +The HTTP client is being designed with the primary goal of interacting with Jamf Pro APIs, but with the flexibility to accommodate other APIs in the future. Given the unique nature of Jamf Pro's rate limiting recommendations and the absence of built-in rate limit headers, there's a need for an intelligent mechanism to manage request rates and handle retries. + +### Decision: + +Exponential Backoff with Jitter: +Implement an exponential backoff strategy for retries. This means that for each consecutive retry, the wait time before the next retry will double. Adding "jitter" (a random variation) to the backoff will prevent many clients from starting their retries simultaneously, mitigating the risk of overwhelming the server. + +### Response Time Monitoring: + +Monitor the average response time of successful requests. If the observed response time is significantly higher than the average, it may indicate server stress. The client should dynamically adjust its behavior, increasing delay between subsequent requests or pausing for a period. + +### Generic Rate Limit Header Handling: + +For APIs that provide built-in rate limiting (through headers), the client should be able to parse these headers and adjust request rates accordingly. + +### Maximum Retries: + +Introduce a maximum retry count to ensure that the client doesn't end up in an infinite retry loop in cases of persistent failure. + +### Configurability: + +Make the client's behavior configurable, allowing users to set values for parameters like base retry delay, maximum retries, and even provide their own backoff strategy function if desired. + +### Concurrency Management: + +Introduce a mechanism to ensure that the number of concurrent requests does not exceed the recommended limit (e.g., 5 for Jamf Pro). This can be achieved using a semaphore or a similar construct in Go. + +### Error Classification: + +Implement logic to classify errors. Only transient errors, which indicate temporary server-side issues, should trigger retries. Client-side errors, which indicate issues like bad requests, should not be retried. + +## Consequences: + +By implementing an intelligent retry and rate-handling mechanism, the HTTP client will be robust, able to gracefully handle server-side issues, and respectful of server resources. This ensures optimal performance, minimizes the risk of overwhelming the server, and provides a consistent user experience. + +The flexibility and configurability of the client make it versatile enough to be adapted for other APIs in the future, ensuring longevity and reducing the need for significant rewrites. \ No newline at end of file diff --git a/internal/httpclient/http_client.go b/internal/httpclient/http_client.go new file mode 100644 index 0000000..7e743da --- /dev/null +++ b/internal/httpclient/http_client.go @@ -0,0 +1,175 @@ +// http_client.go +/* The `http_client` package provides a configurable HTTP client tailored for interacting with specific APIs. +It supports different authentication methods, including "bearer" and "oauth". The client is designed with a +focus on concurrency management, structured error handling, and flexible configuration options. +The package offers a default timeout, custom backoff strategies, dynamic rate limiting, +and detailed logging capabilities. The main `Client` structure encapsulates all necessary components, +like the baseURL, authentication details, and an embedded standard HTTP client. */ +package httpclient + +import ( + "fmt" + "net/http" + "sync" + "time" + + "go.uber.org/zap" +) + +// Config holds configuration options for the HTTP Client. +type Config struct { + // Required + InstanceName string + Auth AuthConfig // User can either supply these values manually or pass from LoadAuthConfig/Env vars + APIType string `json:"apiType"` + // Optional + LogLevel LogLevel // Field for defining tiered logging level. + MaxRetryAttempts int // Config item defines the max number of retry request attempts for retryable HTTP methods. + EnableDynamicRateLimiting bool + Logger Logger // Field for the packages initailzed logger + MaxConcurrentRequests int // Field for defining the maximum number of concurrent requests allowed in the semaphore + TokenRefreshBufferPeriod time.Duration + TotalRetryDuration time.Duration + CustomTimeout time.Duration +} + +// ClientPerformanceMetrics captures various metrics related to the client's +// interactions with the API, providing insights into its performance and behavior. +type PerformanceMetrics struct { + TotalRequests int64 + TotalRetries int64 + TotalRateLimitErrors int64 + TotalResponseTime time.Duration + TokenWaitTime time.Duration + lock sync.Mutex +} + +// ClientAuthConfig represents the structure to read authentication details from a JSON configuration file. +type AuthConfig struct { + InstanceName string `json:"instanceName,omitempty"` + OverrideBaseDomain string `json:"overrideBaseDomain,omitempty"` + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + ClientID string `json:"clientID,omitempty"` + ClientSecret string `json:"clientSecret,omitempty"` +} + +// Client represents an HTTP client to interact with a specific API. +type Client struct { + APIHandler APIHandler // APIHandler interface used to define which API handler to use + InstanceName string // Website Instance name without the root domain + AuthMethod string // Specifies the authentication method: "bearer" or "oauth" + Token string // Authentication Token + OverrideBaseDomain string // Base domain override used when the default in the api handler isn't suitable + OAuthCredentials OAuthCredentials // ClientID / Client Secret + BearerTokenAuthCredentials BearerTokenAuthCredentials // Username and Password for Basic Authentication + Expiry time.Time // Expiry time set for the auth token + httpClient *http.Client + tokenLock sync.Mutex + config Config + logger Logger + ConcurrencyMgr *ConcurrencyManager + PerfMetrics PerformanceMetrics +} + +// BuildClient creates a new HTTP client with the provided configuration. +func BuildClient(config Config) (*Client, error) { + // Use the Logger interface type for the logger variable + var logger Logger + if config.Logger == nil { + logger = NewDefaultLogger() + } else { + logger = config.Logger + } + + // Set the logger's level based on the provided configuration if present + logger.SetLevel(config.LogLevel) + + // Validate LogLevel + if config.LogLevel < LogLevelNone || config.LogLevel > LogLevelDebug { + return nil, fmt.Errorf("invalid LogLevel setting: %d", config.LogLevel) + } + + // Use the APIType from the config to determine which API handler to load + apiHandler, err := LoadAPIHandler(config, config.APIType) + if err != nil { + logger.Error("Failed to load API handler", zap.String("APIType", config.APIType), zap.Error(err)) + return nil, err // Return the original error without wrapping it in fmt.Errorf + } + + logger.Info("Initializing new HTTP client", zap.String("InstanceName", config.InstanceName), zap.String("APIType", config.APIType), zap.Int("LogLevel", int(config.LogLevel))) + // Validate and set default values for the configuration + if config.InstanceName == "" { + return nil, fmt.Errorf("instanceName cannot be empty") + } + + if config.MaxRetryAttempts < 0 { + config.MaxRetryAttempts = DefaultMaxRetryAttempts + logger.Info("MaxRetryAttempts was negative, set to default value", zap.Int("MaxRetryAttempts", DefaultMaxRetryAttempts)) + } + + if config.MaxConcurrentRequests <= 0 { + config.MaxConcurrentRequests = DefaultMaxConcurrentRequests + logger.Info("MaxConcurrentRequests was negative or zero, set to default value", zap.Int("MaxConcurrentRequests", DefaultMaxConcurrentRequests)) + } + + if config.TokenRefreshBufferPeriod < 0 { + config.TokenRefreshBufferPeriod = DefaultTokenBufferPeriod + logger.Info("TokenRefreshBufferPeriod was negative, set to default value", zap.Duration("TokenRefreshBufferPeriod", DefaultTokenBufferPeriod)) + } + + if config.TotalRetryDuration <= 0 { + config.TotalRetryDuration = DefaultTotalRetryDuration + logger.Info("TotalRetryDuration was negative or zero, set to default value", zap.Duration("TotalRetryDuration", DefaultTotalRetryDuration)) + } + + if config.TokenRefreshBufferPeriod == 0 { + config.TokenRefreshBufferPeriod = DefaultTokenBufferPeriod + logger.Info("TokenRefreshBufferPeriod not set, set to default value", zap.Duration("TokenRefreshBufferPeriod", DefaultTokenBufferPeriod)) + } + + if config.TotalRetryDuration == 0 { + config.TotalRetryDuration = DefaultTotalRetryDuration + logger.Info("TotalRetryDuration not set, set to default value", zap.Duration("TotalRetryDuration", DefaultTotalRetryDuration)) + } + + if config.CustomTimeout == 0 { + config.CustomTimeout = DefaultTimeout + logger.Info("CustomTimeout not set, set to default value", zap.Duration("CustomTimeout", DefaultTimeout)) + } + + // Determine the authentication method + AuthMethod := "unknown" + if config.Auth.Username != "" && config.Auth.Password != "" { + AuthMethod = "bearer" + } else if config.Auth.ClientID != "" && config.Auth.ClientSecret != "" { + AuthMethod = "oauth" + } else { + return nil, fmt.Errorf("invalid AuthConfig") + } + + client := &Client{ + InstanceName: config.InstanceName, + APIHandler: apiHandler, + AuthMethod: AuthMethod, + httpClient: &http.Client{Timeout: config.CustomTimeout}, + config: config, + logger: logger, + ConcurrencyMgr: NewConcurrencyManager(config.MaxConcurrentRequests, logger, true), + PerfMetrics: PerformanceMetrics{}, + } + + // Get auth token + _, err = client.ValidAuthTokenCheck() + if err != nil { + logger.Error("Failed to validate or obtain auth token", zap.Error(err)) + return nil, fmt.Errorf("failed to validate auth: %w", err) + } + + go client.StartMetricEvaluation() + + logger.Info("New client initialized", zap.String("InstanceName", client.InstanceName), zap.String("AuthMethod", AuthMethod), zap.Int("MaxRetryAttempts", config.MaxRetryAttempts), zap.Int("MaxConcurrentRequests", config.MaxConcurrentRequests), zap.Bool("EnableDynamicRateLimiting", config.EnableDynamicRateLimiting)) + + return client, nil + +} diff --git a/internal/httpclient/http_client_auth_token_management.go b/internal/httpclient/http_client_auth_token_management.go new file mode 100644 index 0000000..87fd0d0 --- /dev/null +++ b/internal/httpclient/http_client_auth_token_management.go @@ -0,0 +1,111 @@ +// http_client_auth_token_management.go +package httpclient + +import ( + "fmt" + "time" + + "go.uber.org/zap" +) + +// TokenResponse represents the structure of a token response from the API. +type TokenResponse struct { + Token string `json:"token"` + Expires time.Time `json:"expires"` +} + +/* +// ValidAuthTokenCheck checks if the current token is valid and not close to expiry. +// If the token is invalid, it tries to refresh it. +// It returns a boolean indicating the validity of the token and an error if there's a failure. +func (c *Client) ValidAuthTokenCheck() (bool, error) { + + if c.Token == "" { + if c.AuthMethod == "bearer" { + err := c.ObtainToken() + if err != nil { + return false, fmt.Errorf("failed to obtain bearer token: %w", err) + } + } else if c.AuthMethod == "oauth" { + err := c.ObtainOAuthToken(c.config.Auth) + if err != nil { + return false, fmt.Errorf("failed to obtain OAuth token: %w", err) + } + } else { + return false, fmt.Errorf("no valid credentials provided. Unable to obtain a token") + } + } + + // If token exists and is close to expiry or already expired + if time.Until(c.Expiry) < c.config.TokenRefreshBufferPeriod { + var err error + if c.BearerTokenAuthCredentials.Username != "" && c.BearerTokenAuthCredentials.Password != "" { + err = c.RefreshToken() + } else if c.OAuthCredentials.ClientID != "" && c.OAuthCredentials.ClientSecret != "" { + err = c.ObtainOAuthToken(c.config.Auth) + } else { + return false, fmt.Errorf("unknown auth method: %s", c.AuthMethod) + } + + if err != nil { + return false, fmt.Errorf("failed to refresh token: %w", err) + } + } + + if time.Until(c.Expiry) < c.config.TokenRefreshBufferPeriod { + return false, fmt.Errorf("token lifetime setting less than buffer. Buffer setting: %v, Time (seconds) until Exp: %v", c.config.TokenRefreshBufferPeriod, time.Until(c.Expiry)) + } + return true, nil +} +*/ + +// ValidAuthTokenCheck checks if the current token is valid and not close to expiry. +// If the token is invalid, it tries to refresh it. +// It returns a boolean indicating the validity of the token and an error if there's a failure. +func (c *Client) ValidAuthTokenCheck() (bool, error) { + if c.Token == "" { + if c.AuthMethod == "bearer" { + err := c.ObtainToken() + if err != nil { + c.logger.Error("Failed to obtain bearer token", zap.Error(err)) + return false, fmt.Errorf("failed to obtain bearer token: %w", err) + } + } else if c.AuthMethod == "oauth" { + err := c.ObtainOAuthToken(c.config.Auth) + if err != nil { + c.logger.Error("Failed to obtain OAuth token", zap.Error(err)) + return false, fmt.Errorf("failed to obtain OAuth token: %w", err) + } + } else { + err := fmt.Errorf("no valid credentials provided. Unable to obtain a token") + c.logger.Error("No valid credentials provided", zap.Error(err)) + return false, err + } + } + + if time.Until(c.Expiry) < c.config.TokenRefreshBufferPeriod { + var err error + if c.BearerTokenAuthCredentials.Username != "" && c.BearerTokenAuthCredentials.Password != "" { + err = c.RefreshToken() + } else if c.OAuthCredentials.ClientID != "" && c.OAuthCredentials.ClientSecret != "" { + err = c.ObtainOAuthToken(c.config.Auth) + } else { + err = fmt.Errorf("unknown auth method: %s", c.AuthMethod) + c.logger.Error("Unknown auth method", zap.String("auth_method", c.AuthMethod), zap.Error(err)) + return false, err + } + + if err != nil { + c.logger.Error("Failed to refresh token", zap.Error(err)) + return false, fmt.Errorf("failed to refresh token: %w", err) + } + } + + if time.Until(c.Expiry) < c.config.TokenRefreshBufferPeriod { + err := fmt.Errorf("token lifetime setting less than buffer. Buffer setting: %v, Time (seconds) until Exp: %v", c.config.TokenRefreshBufferPeriod, time.Until(c.Expiry)) + c.logger.Error("Token lifetime less than buffer", zap.Duration("buffer_period", c.config.TokenRefreshBufferPeriod), zap.Duration("time_until_expiry", time.Until(c.Expiry)), zap.Error(err)) + return false, err + } + + return true, nil +} diff --git a/internal/httpclient/http_client_bearer_token_auth.go b/internal/httpclient/http_client_bearer_token_auth.go new file mode 100644 index 0000000..39dd2a0 --- /dev/null +++ b/internal/httpclient/http_client_bearer_token_auth.go @@ -0,0 +1,194 @@ +// http_client_bearer_token_auth.go +/* The http_client_auth package focuses on authentication mechanisms for an HTTP client. +It provides structures and methods for handling both basic and bearer token based authentication +*/ +package httpclient + +import ( + "encoding/json" + "net/http" + "time" + + "go.uber.org/zap" +) + +// BearerTokenAuthCredentials represents the username and password for basic authentication. +type BearerTokenAuthCredentials struct { + Username string + Password string +} + +// SetBearerTokenAuthCredentials sets the BearerTokenAuthCredentials (Username and Password) +// for the client instance. These credentials are used for obtaining and refreshing +// bearer tokens for authentication. +func (c *Client) SetBearerTokenAuthCredentials(credentials BearerTokenAuthCredentials) { + c.BearerTokenAuthCredentials = credentials +} + +/* +// ObtainToken fetches and sets an authentication token using the stored basic authentication credentials. +func (c *Client) ObtainToken() error { + authenticationEndpoint := c.ConstructAPIAuthEndpoint(BearerTokenEndpoint) + + c.logger.Debug("Attempting to obtain token for user", "Username", c.BearerTokenAuthCredentials.Username) + + req, err := http.NewRequest("POST", authenticationEndpoint, nil) + if err != nil { + c.logger.Error("Failed to create new request for token", "Error", err) + return err + } + req.SetBasicAuth(c.BearerTokenAuthCredentials.Username, c.BearerTokenAuthCredentials.Password) + + resp, err := c.httpClient.Do(req) + if err != nil { + c.logger.Error("Failed to make request for token", "Error", err) + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + c.logger.Warn("Received non-OK response while obtaining token", "StatusCode", resp.StatusCode) + return c.HandleAPIError(resp) + } + + tokenResp := &TokenResponse{} + err = json.NewDecoder(resp.Body).Decode(tokenResp) + if err != nil { + c.logger.Error("Failed to decode token response", "Error", err) + return err + } + + c.Token = tokenResp.Token + c.Expiry = tokenResp.Expires + tokenDuration := time.Until(c.Expiry) + + c.logger.Info("Token obtained successfully", "Expiry", c.Expiry, "Duration", tokenDuration) + + return nil +}*/ + +// ObtainToken fetches and sets an authentication token using the stored basic authentication credentials. +func (c *Client) ObtainToken() error { + authenticationEndpoint := c.ConstructAPIAuthEndpoint(BearerTokenEndpoint) + + c.logger.Debug("Attempting to obtain token for user", zap.String("Username", c.BearerTokenAuthCredentials.Username)) + + req, err := http.NewRequest("POST", authenticationEndpoint, nil) + if err != nil { + c.logger.Error("Failed to create new request for token", zap.Error(err)) + return err + } + req.SetBasicAuth(c.BearerTokenAuthCredentials.Username, c.BearerTokenAuthCredentials.Password) + + resp, err := c.httpClient.Do(req) + if err != nil { + c.logger.Error("Failed to make request for token", zap.Error(err)) + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + c.logger.Warn("Received non-OK response while obtaining token", zap.Int("StatusCode", resp.StatusCode)) + return c.HandleAPIError(resp) + } + + tokenResp := &TokenResponse{} + err = json.NewDecoder(resp.Body).Decode(tokenResp) + if err != nil { + c.logger.Error("Failed to decode token response", zap.Error(err)) + return err + } + + c.Token = tokenResp.Token + c.Expiry = tokenResp.Expires + tokenDuration := time.Until(c.Expiry) + + c.logger.Info("Token obtained successfully", zap.Time("Expiry", c.Expiry), zap.Duration("Duration", tokenDuration)) + + return nil +} + +/* +// RefreshToken refreshes the current authentication token. +func (c *Client) RefreshToken() error { + c.tokenLock.Lock() + defer c.tokenLock.Unlock() + + tokenRefreshEndpoint := c.ConstructAPIAuthEndpoint(TokenRefreshEndpoint) + + req, err := http.NewRequest("POST", tokenRefreshEndpoint, nil) + if err != nil { + c.logger.Error("Failed to create new request for token refresh", "error", err) + return err + } + req.Header.Add("Authorization", "Bearer "+c.Token) + + c.logger.Debug("Attempting to refresh token", "URL", tokenRefreshEndpoint) + + resp, err := c.httpClient.Do(req) + if err != nil { + c.logger.Error("Failed to make request for token refresh", "error", err) + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + c.logger.Warn("Token refresh response status is not OK", "StatusCode", resp.StatusCode) + return c.HandleAPIError(resp) + } + + tokenResp := &TokenResponse{} + err = json.NewDecoder(resp.Body).Decode(tokenResp) + if err != nil { + c.logger.Error("Failed to decode token response", "error", err) + return err + } + + c.logger.Info("Token refreshed successfully", "Expiry", tokenResp.Expires) + + c.Token = tokenResp.Token + c.Expiry = tokenResp.Expires + return nil +} +*/ +// RefreshToken refreshes the current authentication token. +func (c *Client) RefreshToken() error { + c.tokenLock.Lock() + defer c.tokenLock.Unlock() + + tokenRefreshEndpoint := c.ConstructAPIAuthEndpoint(TokenRefreshEndpoint) + + req, err := http.NewRequest("POST", tokenRefreshEndpoint, nil) + if err != nil { + c.logger.Error("Failed to create new request for token refresh", zap.Error(err)) + return err + } + req.Header.Add("Authorization", "Bearer "+c.Token) + + c.logger.Debug("Attempting to refresh token", zap.String("URL", tokenRefreshEndpoint)) + + resp, err := c.httpClient.Do(req) + if err != nil { + c.logger.Error("Failed to make request for token refresh", zap.Error(err)) + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + c.logger.Warn("Token refresh response status is not OK", zap.Int("StatusCode", resp.StatusCode)) + return c.HandleAPIError(resp) + } + + tokenResp := &TokenResponse{} + err = json.NewDecoder(resp.Body).Decode(tokenResp) + if err != nil { + c.logger.Error("Failed to decode token response", zap.Error(err)) + return err + } + + c.Token = tokenResp.Token + c.Expiry = tokenResp.Expires + c.logger.Info("Token refreshed successfully", zap.Time("Expiry", tokenResp.Expires)) + + return nil +} diff --git a/internal/httpclient/http_client_defaultconfig.go b/internal/httpclient/http_client_defaultconfig.go new file mode 100644 index 0000000..54e699d --- /dev/null +++ b/internal/httpclient/http_client_defaultconfig.go @@ -0,0 +1,15 @@ +package httpclient + +import ( + "time" +) + +const ( + DefaultLogLevel = LogLevelInfo + DefaultMaxRetryAttempts = 3 + DefaultEnableDynamicRateLimiting = true + DefaultMaxConcurrentRequests = 5 + DefaultTokenBufferPeriod = 5 * time.Minute + DefaultTotalRetryDuration = 5 * time.Minute + DefaultTimeout = 10 * time.Second +) diff --git a/internal/httpclient/http_client_oauth.go b/internal/httpclient/http_client_oauth.go new file mode 100644 index 0000000..24eb3fd --- /dev/null +++ b/internal/httpclient/http_client_oauth.go @@ -0,0 +1,133 @@ +// http_client_oauth.go +/* The http_client_auth package focuses on authentication mechanisms for an HTTP client. +It provides structures and methods for handling OAuth-based authentication +*/ +package httpclient + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + "go.uber.org/zap" +) + +// OAuthResponse represents the response structure when obtaining an OAuth access token. +type OAuthResponse struct { + AccessToken string `json:"access_token"` + ExpiresIn int64 `json:"expires_in"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token,omitempty"` + Error string `json:"error,omitempty"` +} + +// OAuthCredentials contains the client ID and client secret required for OAuth authentication. +type OAuthCredentials struct { + ClientID string + ClientSecret string +} + +// SetOAuthCredentials sets the OAuth credentials (Client ID and Client Secret) +// for the client instance. These credentials are used for obtaining and refreshing +// OAuth tokens for authentication. +func (c *Client) SetOAuthCredentials(credentials OAuthCredentials) { + c.OAuthCredentials = credentials +} + +// ObtainOAuthToken fetches an OAuth access token using the provided OAuthCredentials (Client ID and Client Secret). +// It updates the client's Token and Expiry fields with the obtained values. +func (c *Client) ObtainOAuthToken(credentials AuthConfig) error { + authenticationEndpoint := c.ConstructAPIAuthEndpoint(OAuthTokenEndpoint) + data := url.Values{} + data.Set("client_id", credentials.ClientID) + data.Set("client_secret", credentials.ClientSecret) + data.Set("grant_type", "client_credentials") + + c.logger.Debug("Attempting to obtain OAuth token", zap.String("ClientID", credentials.ClientID)) + + req, err := http.NewRequest("POST", authenticationEndpoint, strings.NewReader(data.Encode())) + if err != nil { + c.logger.Error("Failed to create request for OAuth token", zap.Error(err)) + return err + } + req.Header.Add("Content-Type", "application/x-www-form-urlencoded") + + resp, err := c.httpClient.Do(req) + if err != nil { + c.logger.Error("Failed to execute request for OAuth token", zap.Error(err)) + return err + } + defer resp.Body.Close() + + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + c.logger.Error("Failed to read response body", zap.Error(err)) + return err + } + + // Reset the response body to its original state + resp.Body = io.NopCloser(bytes.NewBuffer(bodyBytes)) + + oauthResp := &OAuthResponse{} + err = json.Unmarshal(bodyBytes, oauthResp) + if err != nil { + c.logger.Error("Failed to decode OAuth response", zap.Error(err)) + return err + } + + if oauthResp.Error != "" { + c.logger.Error("Error obtaining OAuth token", zap.String("Error", oauthResp.Error)) + return fmt.Errorf("error obtaining OAuth token: %s", oauthResp.Error) + } + + if oauthResp.AccessToken == "" { + c.logger.Error("Empty access token received") + return fmt.Errorf("empty access token received") + } + + expiresIn := time.Duration(oauthResp.ExpiresIn) * time.Second + expirationTime := time.Now().Add(expiresIn) + c.logger.Info("OAuth token obtained successfully", zap.String("AccessToken", oauthResp.AccessToken), zap.Duration("ExpiresIn", expiresIn), zap.Time("ExpirationTime", expirationTime)) + + c.Token = oauthResp.AccessToken + c.Expiry = expirationTime + + return nil +} + +// InvalidateOAuthToken invalidates the current OAuth access token. +// After invalidation, the token cannot be used for further API requests. +func (c *Client) InvalidateOAuthToken() error { + invalidateTokenEndpoint := c.ConstructAPIAuthEndpoint(TokenInvalidateEndpoint) + + c.logger.Debug("Attempting to invalidate OAuth token", zap.String("Endpoint", invalidateTokenEndpoint)) + + req, err := http.NewRequest("POST", invalidateTokenEndpoint, nil) + if err != nil { + c.logger.Error("Failed to create new request for token invalidation", zap.Error(err)) + return err + } + req.Header.Add("Authorization", "Bearer "+c.Token) + + resp, err := c.httpClient.Do(req) + if err != nil { + c.logger.Error("Failed to make request for token invalidation", zap.Error(err)) + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusNoContent { + errMsg := fmt.Errorf("failed to invalidate token, status code: %d", resp.StatusCode) + c.logger.Error("Failed to invalidate OAuth token", zap.Int("StatusCode", resp.StatusCode), zap.Error(errMsg)) + return errMsg + } + + c.logger.Info("OAuth token invalidated successfully", zap.String("Endpoint", invalidateTokenEndpoint)) + + return nil +} diff --git a/internal/httpclient/http_client_test.go.fixme b/internal/httpclient/http_client_test.go.fixme new file mode 100644 index 0000000..556bf97 --- /dev/null +++ b/internal/httpclient/http_client_test.go.fixme @@ -0,0 +1,189 @@ +package httpclient + +import ( + "encoding/json" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewClientValidConfig(t *testing.T) { + assert := assert.New(t) + + // Define a complete config with some fields explicitly set + config := Config{ + MaxRetryAttempts: 3, + LogLevel: LogLevelInfo, // Example log level + EnableDynamicRateLimiting: true, // Explicitly enabling rate limiting + Logger: nil, // Assuming nil will use the default logger + MaxConcurrentRequests: 10, // Setting max concurrent requests + // TokenLifespan, TokenRefreshBufferPeriod, and TotalRetryDuration will use default values + } + logger := NewDefaultLogger() // Using a default logger for the test + client, err := NewClient("testInstance", config, logger) + + assert.NoError(err, "Expected no error during client initialization") + assert.NotNil(client, "Expected client to be initialized, got nil") + + // Assert that the explicit and default config values are correctly set + assert.Equal(config.MaxRetryAttempts, client.config.MaxRetryAttempts, "MaxRetryAttempts should match config") + assert.Equal(LogLevelInfo, client.config.LogLevel, "LogLevel should match config") + assert.Equal(true, client.config.EnableDynamicRateLimiting, "EnableDynamicRateLimiting should match config") + assert.NotNil(client.logger, "Logger should not be nil") + assert.Equal(10, client.config.MaxConcurrentRequests, "MaxConcurrentRequests should match config") + + // Default values + defaultTokenLifespan := 30 * time.Minute + defaultTokenRefreshBufferPeriod := 5 * time.Minute + defaultTotalRetryDuration := 60 * time.Second + + assert.Equal(defaultTokenLifespan, client.config.TokenLifespan, "TokenLifespan should have a default value") + assert.Equal(defaultTokenRefreshBufferPeriod, client.config.TokenRefreshBufferPeriod, "TokenRefreshBufferPeriod should have a default value") + assert.Equal(defaultTotalRetryDuration, client.config.TotalRetryDuration, "TotalRetryDuration should have a default value") +} + +func TestNewClientInvalidConfig(t *testing.T) { + assert := assert.New(t) + + // Test with an empty instance name + _, err := NewClient("", Config{}, nil) + assert.Error(err, "Expected error for empty instance name") + + // Test with a negative MaxRetryAttempts + configWithNegativeMaxRetry := Config{ + MaxRetryAttempts: -1, + } + _, err = NewClient("testInstance", configWithNegativeMaxRetry, nil) + assert.Error(err, "Expected error for negative MaxRetryAttempts") + + // Test with invalid log level (assuming you have defined constraints for valid log levels) + configWithInvalidLogLevel := Config{ + LogLevel: LogLevel(-1), // Invalid log level + } + _, err = NewClient("testInstance", configWithInvalidLogLevel, nil) + assert.Error(err, "Expected error for invalid LogLevel") + + // Test with a negative MaxConcurrentRequests + configWithNegativeConcurrentRequests := Config{ + MaxConcurrentRequests: -1, + } + _, err = NewClient("testInstance", configWithNegativeConcurrentRequests, nil) + assert.Error(err, "Expected error for negative MaxConcurrentRequests") + + // Test with a negative TokenLifespan + configWithNegativeTokenLifespan := Config{ + TokenLifespan: -1 * time.Minute, + } + _, err = NewClient("testInstance", configWithNegativeTokenLifespan, nil) + assert.Error(err, "Expected error for negative TokenLifespan") + + // Test with a negative TokenRefreshBufferPeriod + configWithNegativeTokenRefreshBufferPeriod := Config{ + TokenRefreshBufferPeriod: -1 * time.Minute, + } + _, err = NewClient("testInstance", configWithNegativeTokenRefreshBufferPeriod, nil) + assert.Error(err, "Expected error for negative TokenRefreshBufferPeriod") + + // Test with a negative TotalRetryDuration + configWithNegativeTotalRetryDuration := Config{ + TotalRetryDuration: -1 * time.Second, + } + _, err = NewClient("testInstance", configWithNegativeTotalRetryDuration, nil) + assert.Error(err, "Expected error for negative TotalRetryDuration") +} + +func TestClientOptionsApplication(t *testing.T) { + assert := assert.New(t) + + customOption := func(c *Client) { + c.config.MaxConcurrentRequests = 5 + } + client, err := NewClient("testInstance", Config{}, nil, customOption) + assert.NoError(err, "Client initialization should not return an error") + + assert.Equal(5, client.config.MaxConcurrentRequests, "Expected MaxConcurrentRequests to be set to 5") +} + +func createTempConfigFile(content []byte) (string, error) { + tmpfile, err := os.CreateTemp("", "config*.json") + if err != nil { + return "", err + } + + if _, err := tmpfile.Write(content); err != nil { + tmpfile.Close() + os.Remove(tmpfile.Name()) + return "", err + } + + if err := tmpfile.Close(); err != nil { + os.Remove(tmpfile.Name()) + return "", err + } + + return tmpfile.Name(), nil +} + +func TestLoadAuthConfig_Success(t *testing.T) { + assert := assert.New(t) + + // Create a temporary config file + config := ClientAuthConfig{ + InstanceName: "testInstance", + ClientID: "testClientID", + ClientSecret: "testClientSecret", + } + content, _ := json.Marshal(config) + filename, err := createTempConfigFile(content) + require.NoError(t, err) + defer os.Remove(filename) + + // Test loading the config + loadedConfig, err := LoadAuthConfig(filename) + assert.NoError(err) + assert.Equal(config, *loadedConfig) +} + +func TestLoadAuthConfig_FileNotFound(t *testing.T) { + assert := assert.New(t) + + _, err := LoadAuthConfig("nonexistent.json") + assert.Error(err) +} + +func TestLoadAuthConfig_InvalidJSON(t *testing.T) { + assert := assert.New(t) + + // Create a temporary config file with invalid JSON + filename, err := createTempConfigFile([]byte("{invalid json")) + require.NoError(t, err) + defer os.Remove(filename) + + // Test loading the config + _, err = LoadAuthConfig(filename) + assert.Error(err) +} + +func TestNewClientDefaultSettings(t *testing.T) { + assert := assert.New(t) + + // Test client initialization with minimal configuration + config := Config{ + // Only set the fields that are absolutely necessary + MaxRetryAttempts: 1, + } + client, err := NewClient("testInstance", config, nil) + + assert.NoError(err, "Client initialization with minimal config should not return an error") + assert.NotNil(client, "Expected client to be initialized") + assert.NotNil(client.logger, "Expected default logger to be set") + + // Test default values + assert.Equal(DefaultTimeout, client.httpClient.Timeout, "Expected default timeout to be set") + assert.Equal(30*time.Minute, client.config.TokenLifespan, "Expected default TokenLifespan to be set") + assert.Equal(5*time.Minute, client.config.TokenRefreshBufferPeriod, "Expected default TokenRefreshBufferPeriod to be set") + assert.Equal(60*time.Second, client.config.TotalRetryDuration, "Expected default TotalRetryDuration to be set") +} diff --git a/internal/httpclient/http_concurrency_management.go b/internal/httpclient/http_concurrency_management.go new file mode 100644 index 0000000..299a2ab --- /dev/null +++ b/internal/httpclient/http_concurrency_management.go @@ -0,0 +1,314 @@ +// http_concurrency_management.go +// package httpclient provides utilities to manage HTTP client interactions, including concurrency control. +// The Concurrency Manager ensures no more than a certain number of concurrent requests (e.g., 5 for Jamf Pro) are sent at the same time. This is managed using a semaphore +package httpclient + +import ( + "context" + "sync" + "time" + + "github.com/google/uuid" + "go.uber.org/zap" +) + +//------ Constants and Data Structures: + +const ( + MaxConcurrency = 10 // Maximum allowed concurrent requests + MinConcurrency = 1 // Minimum allowed concurrent requests + EvaluationInterval = 1 * time.Minute // Time interval for evaluating metrics and adjusting concurrency +) + +// ConcurrencyManager controls the number of concurrent HTTP requests. +type ConcurrencyManager struct { + sem chan struct{} + logger Logger + debugMode bool + AcquisitionTimes []time.Duration + lock sync.Mutex + lastTokenAcquisitionTime time.Time +} + +type requestIDKey struct{} + +//------ Constructor and Helper Functions: + +// NewConcurrencyManager initializes a new ConcurrencyManager with the given concurrency limit, logger, and debug mode. +// The ConcurrencyManager ensures no more than a certain number of concurrent requests are made. +// It uses a semaphore to control concurrency. +func NewConcurrencyManager(limit int, logger Logger, debugMode bool) *ConcurrencyManager { + if logger == nil { + logger = &defaultLogger{} // Assuming this is the default logger implementation + } + return &ConcurrencyManager{ + sem: make(chan struct{}, limit), + logger: logger, + debugMode: debugMode, + AcquisitionTimes: []time.Duration{}, + } +} + +// Min returns the smaller of the two integers. +func Min(a, b int) int { + if a < b { + return a + } + return b +} + +//------ Core Concurrency Functions: + +// Acquire attempts to get a token to allow an HTTP request to proceed. +// It blocks until a token is available or the context expires. +// Returns a unique request ID upon successful acquisition. +func (c *ConcurrencyManager) Acquire(ctx context.Context) (uuid.UUID, error) { + requestID := uuid.New() + startTime := time.Now() + + select { + case c.sem <- struct{}{}: + acquisitionTime := time.Since(startTime) + c.lock.Lock() + c.AcquisitionTimes = append(c.AcquisitionTimes, acquisitionTime) + c.lock.Unlock() + c.lastTokenAcquisitionTime = time.Now() + + utilizedTokens := len(c.sem) + availableTokens := cap(c.sem) - utilizedTokens + c.logger.Debug("Acquired concurrency token", + zap.String("ConcurrencyTokenID", requestID.String()), + zap.Duration("AcquisitionTime", acquisitionTime), + zap.Int("UtilizedTokens", utilizedTokens), + zap.Int("AvailableTokens", availableTokens), + ) + return requestID, nil + + case <-ctx.Done(): + c.logger.Warn("Failed to acquire concurrency token, context done", + zap.String("ConcurrencyTokenID", requestID.String()), + zap.Error(ctx.Err()), + ) + return requestID, ctx.Err() + } +} + +// Release returns a token back to the pool, allowing other requests to proceed. +// It uses the provided requestID for logging and debugging purposes. +func (c *ConcurrencyManager) Release(requestID uuid.UUID) { + <-c.sem // Release a token back to the semaphore + if c.debugMode { + utilizedTokens := len(c.sem) // Tokens currently in use + availableTokens := cap(c.sem) - utilizedTokens // Tokens available for use + + // Using zap fields for structured logging in debug mode + c.logger.Debug("Released concurrency token", + zap.String("ConcurrencyTokenID", requestID.String()), + zap.Int("UtilizedTokens", utilizedTokens), + zap.Int("AvailableTokens", availableTokens), + ) + } +} + +//------ Metric-related Functions: + +// AverageAcquisitionTime computes the average time taken to acquire a token from the semaphore. +// It helps in understanding the contention for tokens and can be used to adjust concurrency limits. +func (c *ConcurrencyManager) AverageAcquisitionTime() time.Duration { + c.lock.Lock() + defer c.lock.Unlock() + + if len(c.AcquisitionTimes) == 0 { + return 0 + } + + totalTime := time.Duration(0) + for _, t := range c.AcquisitionTimes { + totalTime += t + } + return totalTime / time.Duration(len(c.AcquisitionTimes)) +} + +// HistoricalAverageAcquisitionTime computes the average time taken to acquire a token from the semaphore over a historical period (e.g., the last 5 minutes). +// It helps in understanding the historical contention for tokens and can be used to adjust concurrency limits. +func (c *ConcurrencyManager) HistoricalAverageAcquisitionTime() time.Duration { + c.lock.Lock() + defer c.lock.Unlock() + + // For simplicity, let's say we store the last 5 minutes of acquisition times. + // This means if EvaluationInterval is 1 minute, we consider the last 5 data points. + historicalCount := 5 + if len(c.AcquisitionTimes) < historicalCount { + return c.AverageAcquisitionTime() // If not enough historical data, return the overall average + } + + totalTime := time.Duration(0) + for _, t := range c.AcquisitionTimes[len(c.AcquisitionTimes)-historicalCount:] { + totalTime += t + } + return totalTime / time.Duration(historicalCount) +} + +//------ Concurrency Adjustment Functions: + +// AdjustConcurrencyLimit dynamically modifies the maximum concurrency limit based on the newLimit provided. +// This function helps in adjusting the concurrency limit in real-time based on observed system performance and other metrics. +// It transfers the tokens from the old semaphore to the new one, ensuring that there's no loss of tokens during the transition. +func (c *ConcurrencyManager) AdjustConcurrencyLimit(newLimit int) { + c.lock.Lock() + defer c.lock.Unlock() + + if newLimit <= 0 { + return // Avoid setting a non-positive limit + } + + // Create a new semaphore with the desired limit + newSem := make(chan struct{}, newLimit) + + // Transfer tokens from the old semaphore to the new one + for i := 0; i < len(c.sem) && i < newLimit; i++ { + newSem <- struct{}{} + } + + c.sem = newSem +} + +// AdjustConcurrencyBasedOnMetrics evaluates the current metrics and adjusts the concurrency limit if required. +// It checks metrics like average token acquisition time and decides on a new concurrency limit. +// The method ensures that the new limit respects the minimum and maximum allowed concurrency bounds. +func (c *Client) AdjustConcurrencyBasedOnMetrics() { + // Get average acquisition time + avgAcquisitionTime := c.ConcurrencyMgr.AverageAcquisitionTime() + + // Get current concurrency limit + currentLimit := cap(c.ConcurrencyMgr.sem) + + // Get historical average acquisition time (e.g., over the last 5 minutes) + historicalAvgAcquisitionTime := c.ConcurrencyMgr.HistoricalAverageAcquisitionTime() + + // Decide on new limit based on metrics + newLimit := currentLimit + if avgAcquisitionTime > time.Duration(float64(historicalAvgAcquisitionTime)*1.2) { // 20% increase in acquisition time + newLimit = currentLimit - 2 // decrease concurrency more aggressively + } else if avgAcquisitionTime < time.Duration(float64(historicalAvgAcquisitionTime)*0.8) { // 20% decrease in acquisition time + newLimit = currentLimit + 2 // increase concurrency more aggressively + } else if avgAcquisitionTime > historicalAvgAcquisitionTime { + newLimit = currentLimit - 1 // decrease concurrency conservatively + } else if avgAcquisitionTime < historicalAvgAcquisitionTime { + newLimit = currentLimit + 1 // increase concurrency conservatively + } + + // Ensure newLimit is within safety bounds + if newLimit > MaxConcurrency { + newLimit = MaxConcurrency + } else if newLimit < MinConcurrency { + newLimit = MinConcurrency + } + + // Adjust concurrency if new limit is different from current + if newLimit != currentLimit { + c.ConcurrencyMgr.AdjustConcurrencyLimit(newLimit) + + c.logger.Debug("Adjusted concurrency", + zap.Int("OldLimit", currentLimit), + zap.Int("NewLimit", newLimit), + zap.String("Reason", "Based on average acquisition time"), + zap.Duration("AverageAcquisitionTime", avgAcquisitionTime), + zap.Duration("HistoricalAverageAcquisitionTime", historicalAvgAcquisitionTime), + ) + } +} + +// EvaluateMetricsAndAdjustConcurrency evaluates the performance metrics and makes necessary +// adjustments to the concurrency limit. The method assesses the average response time +// and adjusts the concurrency based on how it compares to the historical average acquisition time. +// If the average response time has significantly increased compared to the historical average, +// the concurrency limit is decreased, and vice versa. The method ensures that the concurrency +// limit remains within the bounds defined by the system's best practices. +func (c *Client) EvaluateMetricsAndAdjustConcurrency() { + c.PerfMetrics.lock.Lock() + averageResponseTime := c.PerfMetrics.TotalResponseTime / time.Duration(c.PerfMetrics.TotalRequests) + c.PerfMetrics.lock.Unlock() + + historicalAverageAcquisitionTime := c.ConcurrencyMgr.HistoricalAverageAcquisitionTime() + + if averageResponseTime > time.Duration(float64(historicalAverageAcquisitionTime)*1.2) { + // Decrease concurrency + currentLimit := cap(c.ConcurrencyMgr.sem) + newLimit := currentLimit - 1 + if newLimit < MinConcurrency { + newLimit = MinConcurrency + } + c.ConcurrencyMgr.AdjustConcurrencyLimit(newLimit) + } else if averageResponseTime < time.Duration(float64(historicalAverageAcquisitionTime)*0.8) { + // Increase concurrency + currentLimit := cap(c.ConcurrencyMgr.sem) + newLimit := currentLimit + 1 + if newLimit > MaxConcurrency || newLimit > 5 { + newLimit = Min(currentLimit, 5) + } + c.ConcurrencyMgr.AdjustConcurrencyLimit(newLimit) + } +} + +//------ Concurrency Monitoring Functions: + +// StartMetricEvaluation continuously monitors the client's interactions with the API and adjusts the concurrency limits dynamically. +// The function evaluates metrics at regular intervals to detect burst activity patterns. +// If a burst activity is detected (e.g., many requests in a short period), the evaluation interval is reduced for more frequent checks. +// Otherwise, it reverts to a default interval for regular checks. +// After each evaluation, the function calls EvaluateMetricsAndAdjustConcurrency to potentially adjust the concurrency based on observed metrics. +// +// The evaluation process works as follows: +// 1. Sleep for the defined evaluation interval. +// 2. Check if there's a burst in activity using the isBurstActivity method. +// 3. If a burst is detected, the evaluation interval is shortened to more frequently monitor and adjust the concurrency. +// 4. If no burst is detected, it maintains the default evaluation interval. +// 5. It then evaluates the metrics and adjusts the concurrency accordingly. +func (c *Client) StartMetricEvaluation() { + evalInterval := 5 * time.Minute // Initial interval + + for { + time.Sleep(evalInterval) + + if c.isBurstActivity() { + evalInterval = 1 * time.Minute + } else { + evalInterval = 5 * time.Minute + } + + c.EvaluateMetricsAndAdjustConcurrency() + } +} + +func (c *Client) isBurstActivity() bool { + // If the last token was acquired less than 2 minutes ago, consider it a burst + return time.Since(c.ConcurrencyMgr.lastTokenAcquisitionTime) < 2*time.Minute +} + +// StartConcurrencyAdjustment launches a periodic checker that evaluates current metrics and adjusts concurrency limits if needed. +// It uses a ticker to periodically trigger the adjustment logic. +func (c *Client) StartConcurrencyAdjustment() { + ticker := time.NewTicker(EvaluationInterval) + defer ticker.Stop() + + for range ticker.C { + c.AdjustConcurrencyBasedOnMetrics() + } +} + +// Returns the average Acquisition Time to get a token from the semaphore +func (c *Client) AverageAcquisitionTime() time.Duration { + // Assuming ConcurrencyMgr has a method to get this metric + return c.ConcurrencyMgr.AverageAcquisitionTime() +} + +func (c *Client) HistoricalAverageAcquisitionTime() time.Duration { + // Assuming ConcurrencyMgr has a method to get this metric + return c.ConcurrencyMgr.HistoricalAverageAcquisitionTime() +} + +// Returns performance metrics from the http client +func (c *Client) GetPerformanceMetrics() *PerformanceMetrics { + return &c.PerfMetrics +} diff --git a/internal/httpclient/http_error_handling.go b/internal/httpclient/http_error_handling.go new file mode 100644 index 0000000..5e4adae --- /dev/null +++ b/internal/httpclient/http_error_handling.go @@ -0,0 +1,128 @@ +// http_error_handling.go +// This package provides utility functions and structures for handling and categorizing HTTP error responses. +package httpclient + +import ( + "encoding/json" + "fmt" + "net/http" + + "go.uber.org/zap" +) + +// APIError represents a structured API error response. +type APIError struct { + StatusCode int + Message string +} + +// StructuredError represents a structured error response from the API. +type StructuredError struct { + Error struct { + Code string `json:"code"` + Message string `json:"message"` + } `json:"error"` +} + +// HandleAPIError handles error responses from the API, converting them into a structured error if possible. +func (c *Client) HandleAPIError(resp *http.Response) error { + var structuredErr StructuredError + err := json.NewDecoder(resp.Body).Decode(&structuredErr) + if err == nil && structuredErr.Error.Message != "" { + // Using structured logging to log the structured error details + c.logger.Warn("API returned structured error", + zap.String("status", resp.Status), + zap.String("error_code", structuredErr.Error.Code), + zap.String("error_message", structuredErr.Error.Message), + ) + return &APIError{ + StatusCode: resp.StatusCode, + Message: structuredErr.Error.Message, + } + } + + var errMsg string + err = json.NewDecoder(resp.Body).Decode(&errMsg) + if err != nil || errMsg == "" { + errMsg = fmt.Sprintf("Unexpected error with status code: %d", resp.StatusCode) + // Logging with structured fields + c.logger.Warn("Failed to decode API error message, using default error message", + zap.String("status", resp.Status), + zap.String("error_message", errMsg), + ) + } else { + // Logging non-structured error as a warning with structured fields + c.logger.Warn("API returned non-structured error", + zap.String("status", resp.Status), + zap.String("error_message", errMsg), + ) + } + + return &APIError{ + StatusCode: resp.StatusCode, + Message: errMsg, + } +} + +// Error returns a string representation of the APIError. +func (e *APIError) Error() string { + return fmt.Sprintf("API Error (Code: %d): %s", e.StatusCode, e.Message) +} + +// TranslateStatusCode provides a human-readable message for HTTP status codes. +func TranslateStatusCode(statusCode int) string { + messages := map[int]string{ + http.StatusOK: "Request successful.", + http.StatusCreated: "Request to create or update resource successful.", + http.StatusAccepted: "The request was accepted for processing, but the processing has not completed.", + http.StatusNoContent: "Request successful. Resource successfully deleted.", + http.StatusBadRequest: "Bad request. Verify the syntax of the request.", + http.StatusUnauthorized: "Authentication failed. Verify the credentials being used for the request.", + http.StatusForbidden: "Invalid permissions. Verify the account being used has the proper permissions for the resource you are trying to access.", + http.StatusNotFound: "Resource not found. Verify the URL path is correct.", + http.StatusConflict: "Conflict. See the error response for additional details.", + http.StatusPreconditionFailed: "Precondition failed. See error description for additional details.", + http.StatusRequestEntityTooLarge: "Payload too large.", + http.StatusRequestURITooLong: "Request-URI too long.", + http.StatusInternalServerError: "Internal server error. Retry the request or contact support if the error persists.", + http.StatusBadGateway: "Bad Gateway. Generally due to a timeout issue.", + http.StatusServiceUnavailable: "Service unavailable.", + } + + if message, exists := messages[statusCode]; exists { + return message + } + return "An unexpected error occurred. Please try again later." +} + +// IsNonRetryableError checks if the provided response indicates a non-retryable error. +func IsNonRetryableError(resp *http.Response) bool { + // List of non-retryable HTTP status codes + nonRetryableStatusCodes := map[int]bool{ + http.StatusBadRequest: true, // 400 + http.StatusUnauthorized: true, // 401 + http.StatusForbidden: true, // 403 + http.StatusNotFound: true, // 404 + http.StatusConflict: true, // 409 + http.StatusRequestEntityTooLarge: true, // 413 + http.StatusRequestURITooLong: true, // 414 + } + + _, isNonRetryable := nonRetryableStatusCodes[resp.StatusCode] + return isNonRetryable +} + +// IsRateLimitError checks if the provided response indicates a rate limit error. +func IsRateLimitError(resp *http.Response) bool { + return resp.StatusCode == http.StatusTooManyRequests +} + +// IsTransientError checks if an error or HTTP response indicates a transient error. +func IsTransientError(resp *http.Response) bool { + transientStatusCodes := map[int]bool{ + http.StatusInternalServerError: true, + http.StatusBadGateway: true, + http.StatusServiceUnavailable: true, + } + return resp != nil && transientStatusCodes[resp.StatusCode] +} diff --git a/internal/httpclient/http_helpers.go b/internal/httpclient/http_helpers.go new file mode 100644 index 0000000..877f1d9 --- /dev/null +++ b/internal/httpclient/http_helpers.go @@ -0,0 +1,97 @@ +// http_helpers.go +package httpclient + +import ( + "encoding/json" + "fmt" + "net/http" + "os" + "strings" + "time" + + "go.uber.org/zap" +) + +// ParseISO8601Date attempts to parse a string date in ISO 8601 format. +func ParseISO8601Date(dateStr string) (time.Time, error) { + return time.Parse(time.RFC3339, dateStr) +} + +// EnsureHTTPScheme prefixes a URL with "http://" it defaults to "https://" doesn't already have an "https://". +func EnsureHTTPScheme(url string) string { + if !strings.HasPrefix(url, "http://") && !strings.HasPrefix(url, "https://") { + return fmt.Sprintf("https://%s", url) + } + return url +} + +// CheckDeprecationHeader checks the response headers for the Deprecation header and logs a warning if present. +func CheckDeprecationHeader(resp *http.Response, logger Logger) { + deprecationHeader := resp.Header.Get("Deprecation") + if deprecationHeader != "" { + // logger is an instance of defaultLogger that wraps a *zap.Logger + zapLogger := logger.(*defaultLogger).logger // Type assertion to access the underlying *zap.Logger + zapLogger.Warn("API endpoint is deprecated", + zap.String("Date", deprecationHeader), + zap.String("Endpoint", resp.Request.URL.String()), + ) + } +} + +// SetAuthenticationCredentials interprets and sets the credentials for the Client. +func (c *Client) SetAuthenticationCredentials(creds map[string]string) { + // Check for OAuth credentials + if clientID, ok := creds["clientID"]; ok { + if clientSecret, ok := creds["clientSecret"]; ok { + c.OAuthCredentials = OAuthCredentials{ + ClientID: clientID, + ClientSecret: clientSecret, + } + c.AuthMethod = "oauth" + return + } + } + + // Check for Bearer Token credentials + if username, ok := creds["username"]; ok { + if password, ok := creds["password"]; ok { + c.BearerTokenAuthCredentials = BearerTokenAuthCredentials{ + Username: username, + Password: password, + } + c.AuthMethod = "bearer" + return + } + } +} + +// GetOAuthCredentials retrieves the current OAuth credentials (Client ID and Client Secret) +// set for the client instance. Used for test cases. +func (c *Client) GetOAuthCredentials() OAuthCredentials { + return c.OAuthCredentials +} + +// GetBearerAuthCredentials retrieves the current bearer auth credentials (Username and Password) +// set for the client instance. Used for test cases. +func (c *Client) GetBearerAuthCredentials() BearerTokenAuthCredentials { + return c.BearerTokenAuthCredentials +} + +// LoadAuthConfig reads a JSON configuration file and decodes it into a ClientAuthConfig struct. +// It is used to retrieve authentication details like BaseURL, Username, and Password for the client. +func LoadAuthConfig(filename string) (*AuthConfig, error) { + file, err := os.Open(filename) + if err != nil { + return nil, err + } + defer file.Close() + + config := &AuthConfig{} + decoder := json.NewDecoder(file) + err = decoder.Decode(config) + if err != nil { + return nil, err + } + + return config, nil +} diff --git a/internal/httpclient/http_logger.go b/internal/httpclient/http_logger.go new file mode 100644 index 0000000..02a0703 --- /dev/null +++ b/internal/httpclient/http_logger.go @@ -0,0 +1,110 @@ +package httpclient + +import ( + "go.uber.org/zap" +) + +type LogLevel int + +const ( + LogLevelNone LogLevel = iota + LogLevelDebug + LogLevelInfo + LogLevelWarning + LogLevelError + LogLevelPanic + LogLevelFatal +) + +// Logger interface as defined earlier +type Logger interface { + SetLevel(level LogLevel) + Debug(msg string, keysAndValues ...interface{}) + Info(msg string, keysAndValues ...interface{}) + Warn(msg string, keysAndValues ...interface{}) + Error(msg string, keysAndValues ...interface{}) + Panic(msg string, keysAndValues ...interface{}) + Fatal(msg string, keysAndValues ...interface{}) +} + +// defaultLogger is an implementation of the Logger interface using Uber's zap logging library. +// It provides structured, leveled logging capabilities. The logLevel field controls the verbosity +// of the logs that this logger will produce, allowing filtering of logs based on their importance. +type defaultLogger struct { + logger *zap.Logger // logger holds the reference to the zap.Logger instance. + logLevel LogLevel // logLevel determines the current logging level (e.g., DEBUG, INFO, WARN). +} + +// NewDefaultLogger initializes and returns a new instance of defaultLogger with a production +// configuration from the zap logging library. This function sets the default logging level to +// LogLevelWarning, which means that by default, DEBUG and INFO logs will be suppressed. +// In case of an error while initializing the zap.Logger, this function will panic, as the +// inability to log is considered a fatal error in production environments. +func NewDefaultLogger() Logger { + logger, err := zap.NewProduction() // Initialize a zap logger with production settings. + if err != nil { + panic(err) // Panic if there is an error initializing the logger, as logging is critical. + } + + return &defaultLogger{ + logger: logger, // Set the initialized zap.Logger. + logLevel: LogLevelWarning, // Set the default log level to warning. + } +} + +// Implement the SetLevel method for defaultLogger +func (d *defaultLogger) SetLevel(level LogLevel) { + d.logLevel = level +} + +// Convert keysAndValues to zap.Fields +func toZapFields(keysAndValues ...interface{}) []zap.Field { + var fields []zap.Field + for i := 0; i < len(keysAndValues)-1; i += 2 { + key, val := keysAndValues[i], keysAndValues[i+1] + fields = append(fields, zap.Any(key.(string), val)) + } + return fields +} + +// Debug method implementation +func (d *defaultLogger) Debug(msg string, keysAndValues ...interface{}) { + if d.logLevel >= LogLevelDebug { + d.logger.Debug(msg, toZapFields(keysAndValues...)...) + } +} + +// Info method implementation +func (d *defaultLogger) Info(msg string, keysAndValues ...interface{}) { + if d.logLevel >= LogLevelInfo { + d.logger.Info(msg, toZapFields(keysAndValues...)...) + } +} + +// Warn method implementation +func (d *defaultLogger) Warn(msg string, keysAndValues ...interface{}) { + if d.logLevel >= LogLevelWarning { + d.logger.Warn(msg, toZapFields(keysAndValues...)...) + } +} + +// Error method implementation +func (d *defaultLogger) Error(msg string, keysAndValues ...interface{}) { + if d.logLevel > LogLevelNone { + d.logger.Error(msg, toZapFields(keysAndValues...)...) + } +} + +// Panic method implementation +func (d *defaultLogger) Panic(msg string, keysAndValues ...interface{}) { + if d.logLevel >= LogLevelPanic { + d.logger.Panic(msg, toZapFields(keysAndValues...)...) + } +} + +// Fatal method implementation +func (d *defaultLogger) Fatal(msg string, keysAndValues ...interface{}) { + if d.logLevel >= LogLevelFatal { + d.logger.Fatal(msg, toZapFields(keysAndValues...)...) + } +} diff --git a/internal/httpclient/http_logging.go.refactor b/internal/httpclient/http_logging.go.refactor new file mode 100644 index 0000000..dbdbdfe --- /dev/null +++ b/internal/httpclient/http_logging.go.refactor @@ -0,0 +1,83 @@ +// http_logging.go +package httpclient + +import "log" + +type LogLevel int + +const ( + LogLevelNone LogLevel = iota + LogLevelWarning + LogLevelInfo + LogLevelDebug +) + +// Logger is an interface for logging within the SDK. +type Logger interface { + SetLevel(level LogLevel) + Trace(msg string, keysAndValues ...interface{}) // For very detailed logs + Debug(msg string, keysAndValues ...interface{}) // For development and troubleshooting + Info(msg string, keysAndValues ...interface{}) // Informational messages + Warn(msg string, keysAndValues ...interface{}) // For potentially problematic situations + Error(msg string, keysAndValues ...interface{}) // For errors that might still allow the app to continue running + Fatal(msg string, keysAndValues ...interface{}) // For errors that might prevent the app from continuing +} + +// defaultLogger is the default logger based on Go's standard log package and includes a logLevel field to keep track of the current logging level. +type defaultLogger struct { + logLevel LogLevel +} + +// SetLevel sets the current logging level for the defaultLogger. +func (d *defaultLogger) SetLevel(level LogLevel) { + d.logLevel = level +} + +// NewDefaultLogger now initializes a defaultLogger with a default log level. +func NewDefaultLogger() Logger { + return &defaultLogger{ + logLevel: LogLevelWarning, // default log level. + } +} + +// Trace checks if the current log level permits debug messages before logging. +func (d *defaultLogger) Trace(msg string, keysAndValues ...interface{}) { + if d.logLevel >= LogLevelDebug { // Trace is a part of LogLevelDebug + log.Println("[TRACE]", msg, keysAndValues) + } +} + +// Debug checks if the current log level permits debug messages before logging. +func (d *defaultLogger) Debug(msg string, keysAndValues ...interface{}) { + if d.logLevel >= LogLevelDebug { + log.Println("[DEBUG]", msg, keysAndValues) + } +} + +// Info checks if the current log level permits debug messages before logging. +func (d *defaultLogger) Info(msg string, keysAndValues ...interface{}) { + if d.logLevel >= LogLevelInfo { + log.Println("[INFO]", msg, keysAndValues) + } +} + +// Warn checks if the current log level permits Warning messages before logging. +func (d *defaultLogger) Warn(msg string, keysAndValues ...interface{}) { + if d.logLevel >= LogLevelWarning { + log.Println("[WARN]", msg, keysAndValues) + } +} + +// Error checks if the current log level is greater than LogLevelNone, before logging. +func (d *defaultLogger) Error(msg string, keysAndValues ...interface{}) { + if d.logLevel > LogLevelNone { + log.Println("[ERROR]", msg, keysAndValues) + } +} + +// Fatal checks if the current log level is greater than LogLevelNone, before logging. +func (d *defaultLogger) Fatal(msg string, keysAndValues ...interface{}) { + if d.logLevel > LogLevelNone { + log.Fatalln("[FATAL]", msg, keysAndValues) + } +} diff --git a/internal/httpclient/http_methods.go b/internal/httpclient/http_methods.go new file mode 100644 index 0000000..9c632c4 --- /dev/null +++ b/internal/httpclient/http_methods.go @@ -0,0 +1,70 @@ +// http_methods.go +package httpclient + +import "net/http" + +// Get sends a GET request to the specified endpoint and unmarshals the response into 'out'. +// The caller is responsible for closing the response body. +func (c *Client) Get(endpoint string, out interface{}) (*http.Response, error) { + c.logger.Info("Sending GET request", "endpoint", endpoint) + + resp, err := c.DoRequest(http.MethodGet, endpoint, nil, out) + if err != nil { + c.logger.Error("GET request failed", "endpoint", endpoint, "error", err) + return nil, err + } + return resp, nil +} + +// Post sends a POST request to the specified endpoint with the provided body and unmarshals the response into 'out'. +// The caller is responsible for closing the response body. +func (c *Client) Post(endpoint string, body, out interface{}) (*http.Response, error) { + c.logger.Info("Sending POST request", "endpoint", endpoint, "body", body) + + resp, err := c.DoRequest(http.MethodPost, endpoint, body, out) + if err != nil { + c.logger.Error("POST request failed", "endpoint", endpoint, "error", err) + return nil, err + } + return resp, nil +} + +// Put sends a PUT request to the specified endpoint with the provided body and unmarshals the response into 'out'. +// The caller is responsible for closing the response body. +func (c *Client) Put(endpoint string, body, out interface{}) (*http.Response, error) { + + c.logger.Debug("Sending PUT request", "endpoint", endpoint, "body", body) + + resp, err := c.DoRequest(http.MethodPut, endpoint, body, out) + if err != nil { + c.logger.Error("PUT request failed", "endpoint", endpoint, "error", err) + return nil, err + } + return resp, nil +} + +// Delete sends a DELETE request to the specified endpoint and unmarshals the response into 'out'. +// The caller is responsible for closing the response body. +func (c *Client) Delete(endpoint string, out interface{}) (*http.Response, error) { + c.logger.Debug("Sending DELETE request", "endpoint", endpoint) + + resp, err := c.DoRequest(http.MethodDelete, endpoint, nil, out) + if err != nil { + c.logger.Error("DELETE request failed", "endpoint", endpoint, "error", err) + return nil, err + } + return resp, nil +} + +// Patch sends a PATCH request to the specified endpoint with the provided body and unmarshals the response into 'out'. +// The caller is responsible for closing the response body. +func (c *Client) Patch(endpoint string, body, out interface{}) (*http.Response, error) { + c.logger.Debug("Sending PATCH request", "endpoint", endpoint, "body", body) + + resp, err := c.DoRequest(http.MethodPatch, endpoint, body, out) + if err != nil { + c.logger.Error("PATCH request failed", "endpoint", endpoint, "error", err) + return nil, err + } + return resp, nil +} diff --git a/internal/httpclient/http_rate_handler.go b/internal/httpclient/http_rate_handler.go new file mode 100644 index 0000000..3b6d2ad --- /dev/null +++ b/internal/httpclient/http_rate_handler.go @@ -0,0 +1,66 @@ +// http_rate_handler.go + +/* +Components: +Backoff Strategy: A function that calculates the delay before the next retry. It will implement exponential backoff with jitter. This strategy is more effective than a fixed delay, as it ensures that in cases of prolonged issues, the client won't keep hammering the server with a high frequency. + +Response Time Monitoring: We'll introduce a mechanism to track average response times and use deviations from this average to inform our backoff strategy. + +Error Classifier: A function to classify different types of errors. Only transient errors should be retried. + +Rate Limit Header Parser: For future compatibility, a function that can parse common rate limit headers (like X-RateLimit-Remaining and Retry-After) and adjust behavior accordingly. + +*/ + +package httpclient + +import ( + "math" + "math/rand" + "net/http" + "strconv" + "time" +) + +// Constants for exponential backoff with jitter +const ( + baseDelay = 100 * time.Millisecond // Initial delay + maxDelay = 5 * time.Second // Maximum delay + jitterFactor = 0.5 // Random jitter factor +) + +// calculateBackoff calculates the next delay for retry with exponential backoff and jitter. +func calculateBackoff(retry int) time.Duration { + delay := float64(baseDelay) * math.Pow(2, float64(retry)) + jitter := (rand.Float64() - 0.5) * jitterFactor * 2.0 // Random value between -jitterFactor and +jitterFactor + delay *= (1.0 + jitter) + + if delay > float64(maxDelay) { + return maxDelay + } + return time.Duration(delay) +} + +// parseRateLimitHeaders parses common rate limit headers and adjusts behavior accordingly. +// For future compatibility. +func parseRateLimitHeaders(resp *http.Response) time.Duration { + // Check for the Retry-After header + if retryAfter := resp.Header.Get("Retry-After"); retryAfter != "" { + if waitSeconds, err := strconv.Atoi(retryAfter); err == nil { + return time.Duration(waitSeconds) * time.Second + } + } + + // Check for X-RateLimit-Remaining; if it's 0, use X-RateLimit-Reset to determine how long to wait + if remaining := resp.Header.Get("X-RateLimit-Remaining"); remaining == "0" { + if resetTimeStr := resp.Header.Get("X-RateLimit-Reset"); resetTimeStr != "" { + if resetTimeUnix, err := strconv.ParseInt(resetTimeStr, 10, 64); err == nil { + resetTime := time.Unix(resetTimeUnix, 0) + return time.Until(resetTime) // Using time.Until instead of t.Sub(time.Now()) + } + } + } + + // No rate limiting headers found, return 0 + return 0 +} diff --git a/internal/httpclient/http_request.go b/internal/httpclient/http_request.go new file mode 100644 index 0000000..2b82572 --- /dev/null +++ b/internal/httpclient/http_request.go @@ -0,0 +1,319 @@ +// http_request.go +package httpclient + +import ( + "bytes" + "context" + "fmt" + "net/http" + "time" +) + +// DoRequest constructs and executes a standard HTTP request with support for retry logic. +// It is intended for operations that can be encoded in a single JSON or XML body such as +// creating or updating resources. This method includes token validation, concurrency control, +// performance metrics, dynamic header setting, and structured error handling. +// +// Parameters: +// - method: The HTTP method to use (e.g., GET, POST, PUT, DELETE, PATCH). +// - endpoint: The API endpoint to which the request will be sent. +// - body: The payload to send in the request, which will be marshaled based on the API handler rules. +// - out: A pointer to a variable where the unmarshaled response will be stored. +// +// Returns: +// - A pointer to the http.Response received from the server. +// - An error if the request could not be sent, the response could not be processed, or if retry attempts fail. +// +// The function starts by validating the client's authentication token and managing concurrency using +// a token system. It then determines the appropriate API handler for marshaling the request body and +// setting headers. The request is sent to the constructed URL with all necessary headers including +// authorization, content type, and user agent. +// +// If configured for debug logging, the function logs all request headers before sending. The function then +// enters a loop to handle retryable HTTP methods, implementing a retry mechanism for transient errors, +// rate limits, and other retryable conditions based on response status codes. +// +// The function also updates performance metrics to track total request count and cumulative response time. +// After processing the response, it handles any API errors and unmarshals the response body into the provided +// 'out' parameter if the response is successful. +// +// Note: +// The function assumes that retryable HTTP methods have been properly defined in the retryableHTTPMethods map. +// It is the caller's responsibility to close the response body when the request is successful to avoid resource leaks. +func (c *Client) DoRequest(method, endpoint string, body, out interface{}) (*http.Response, error) { + // Auth Token validation check + valid, err := c.ValidAuthTokenCheck() + if err != nil || !valid { + return nil, fmt.Errorf("validity of the authentication token failed with error: %w", err) + } + + // Acquire a token for concurrency management with a timeout and measure its acquisition time + tokenAcquisitionStart := time.Now() + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + requestID, err := c.ConcurrencyMgr.Acquire(ctx) + if err != nil { + return nil, err + } + defer c.ConcurrencyMgr.Release(requestID) + + tokenAcquisitionDuration := time.Since(tokenAcquisitionStart) + c.PerfMetrics.lock.Lock() + c.PerfMetrics.TokenWaitTime += tokenAcquisitionDuration + c.PerfMetrics.lock.Unlock() + + // Add the request ID to the context + ctx = context.WithValue(ctx, requestIDKey{}, requestID) + + // Determine which set of encoding and content-type request rules to use + //handler := GetAPIHandler(endpoint, c.config.LogLevel) + handler := GetAPIHandler(c.config) + + // Marshal Request with correct encoding + requestData, err := handler.MarshalRequest(body, method, endpoint) + if err != nil { + return nil, err + } + + // Construct URL using the ConstructAPIResourceEndpoint function + url := c.ConstructAPIResourceEndpoint(endpoint) + + // Initialize total request counter + c.PerfMetrics.lock.Lock() + c.PerfMetrics.TotalRequests++ + c.PerfMetrics.lock.Unlock() + + // Perform Request + req, err := http.NewRequest(method, url, bytes.NewBuffer(requestData)) + if err != nil { + return nil, err + } + + // Define header content type based on url and http method + contentType := handler.GetContentTypeHeader(endpoint) + // Define Request Headers dynamically based on handler logic + acceptHeader := handler.GetAcceptHeader() + + // Set Headers + req.Header.Add("Authorization", "Bearer "+c.Token) + req.Header.Add("Content-Type", contentType) + req.Header.Add("Accept", acceptHeader) + req.Header.Set("User-Agent", GetUserAgentHeader()) + + // Debug: Print request headers if in debug mode + c.logger.Debug("HTTP Request Headers:", req.Header) + + // Define if request is retryable + retryableHTTPMethods := map[string]bool{ + http.MethodGet: true, // GET + http.MethodDelete: true, // DELETE + http.MethodPut: true, // PUT + http.MethodPatch: true, // PATCH + } + + if retryableHTTPMethods[method] { + // Define a deadline for total retries based on http client TotalRetryDuration config + totalRetryDeadline := time.Now().Add(c.config.TotalRetryDuration) + i := 0 + for { + // Check if we've reached the maximum number of retries or if our total retry time has exceeded + if i > c.config.MaxRetryAttempts || time.Now().After(totalRetryDeadline) { + return nil, fmt.Errorf("max retry attempts reached or total retry duration exceeded") + } + + // This context is used to propagate cancellations and timeouts for the request. + // For example, if a request's context gets canceled or times out, the request will be terminated early. + req = req.WithContext(ctx) + + // Start response time measurement + responseTimeStart := time.Now() + + // Execute Request with context + resp, err := c.httpClient.Do(req) + if err != nil { + c.logger.Error("Failed to send request", "method", method, "endpoint", endpoint, "error", err) + return nil, err + } + + // After each request, compute and update response time + responseDuration := time.Since(responseTimeStart) + c.PerfMetrics.lock.Lock() + c.PerfMetrics.TotalResponseTime += responseDuration + c.PerfMetrics.lock.Unlock() + + // Checks for the presence of a deprecation header in the HTTP response and logs if found. + if i == 0 { + CheckDeprecationHeader(resp, c.logger) + } + + // Handle (unmarshall) response with API Handler + if err := handler.UnmarshalResponse(resp, out); err != nil { + switch e := err.(type) { + case *APIError: + c.logger.Error("Received an API error", "status_code", e.StatusCode, "message", e.Message) + return resp, e + default: + // Existing error handling logic + c.logger.Error("Failed to unmarshal HTTP response", "method", method, "endpoint", endpoint, "error", err) + return resp, err + } + } + + if resp.StatusCode >= 200 && resp.StatusCode < 300 { + c.logger.Info("HTTP request succeeded", "method", method, "endpoint", endpoint, "status_code", resp.StatusCode) + return resp, nil + } else if resp.StatusCode == http.StatusNotFound { + c.logger.Warn("Resource not found", "method", method, "endpoint", endpoint) + return resp, fmt.Errorf("resource not found: %s", endpoint) + } + + // Retry Logic + if IsNonRetryableError(resp) { + c.logger.Warn("Encountered a non-retryable error", "status", resp.StatusCode, "description", TranslateStatusCode(resp.StatusCode)) + return resp, c.HandleAPIError(resp) + } else if IsRateLimitError(resp) { + waitDuration := parseRateLimitHeaders(resp) // Checks for the Retry-After, X-RateLimit-Remaining and X-RateLimit-Reset headers + c.logger.Warn("Encountered a rate limit error. Retrying after wait duration.", "wait_duration", waitDuration) + time.Sleep(waitDuration) + i++ + continue // This will restart the loop, effectively "retrying" the request + } else if IsTransientError(resp) { + waitDuration := calculateBackoff(i) //uses exponential backoff (with jitter) + c.logger.Warn("Encountered a transient error. Retrying after backoff.", "wait_duration", waitDuration) + time.Sleep(waitDuration) + i++ + continue // This will restart the loop, effectively "retrying" the request + } else { + c.logger.Error("Received unexpected error status from HTTP request", "method", method, "endpoint", endpoint, "status_code", resp.StatusCode, "description", TranslateStatusCode(resp.StatusCode)) + return resp, c.HandleAPIError(resp) + } + } + } else { + // Start response time measurement + responseTimeStart := time.Now() + // For non-retryable HTTP Methods (POST - Create) + req = req.WithContext(ctx) + resp, err := c.httpClient.Do(req) + + if err != nil { + c.logger.Error("Failed to send request", "method", method, "endpoint", endpoint, "error", err) + return nil, err + } + + // After the request, compute and update response time + responseDuration := time.Since(responseTimeStart) + c.PerfMetrics.lock.Lock() + c.PerfMetrics.TotalResponseTime += responseDuration + c.PerfMetrics.lock.Unlock() + + CheckDeprecationHeader(resp, c.logger) + + // Unmarshal the response with the determined API Handler + if err := handler.UnmarshalResponse(resp, out); err != nil { + switch e := err.(type) { + case *APIError: + c.logger.Error("Received an API error", "status_code", e.StatusCode, "message", e.Message) + return resp, e + default: + // Existing error handling logic + c.logger.Error("Failed to unmarshal HTTP response", "method", method, "endpoint", endpoint, "error", err) + return resp, err + } + } + + // Check if the response status code is within the success range + if resp.StatusCode >= 200 && resp.StatusCode < 300 { + return resp, nil + } else { + statusDescription := TranslateStatusCode(resp.StatusCode) + c.logger.Error("Received non-success status code from HTTP request", "method", method, "endpoint", endpoint, "status_code", resp.StatusCode, "description", statusDescription) + return resp, fmt.Errorf("Error status code: %d - %s", resp.StatusCode, statusDescription) + } + } + // TODO refactor to remove repition. +} + +// DoMultipartRequest creates and executes a multipart HTTP request. It is used for sending files +// and form fields in a single request. This method handles the construction of the multipart +// message body, setting the appropriate headers, and sending the request to the given endpoint. +// +// Parameters: +// - method: The HTTP method to use (e.g., POST, PUT). +// - endpoint: The API endpoint to which the request will be sent. +// - fields: A map of form fields and their values to include in the multipart message. +// - files: A map of file field names to file paths that will be included as file attachments. +// - out: A pointer to a variable where the unmarshaled response will be stored. +// +// Returns: +// - A pointer to the http.Response received from the server. +// - An error if the request could not be sent or the response could not be processed. +// +// The function first validates the authentication token, then constructs the multipart +// request body based on the provided fields and files. It then constructs the full URL for +// the request, sets the required headers (including Authorization and Content-Type), and +// sends the request. +// +// If debug mode is enabled, the function logs all the request headers before sending the request. +// After the request is sent, the function checks the response status code. If the response is +// not within the success range (200-299), it logs an error and returns the response and an error. +// If the response is successful, it attempts to unmarshal the response body into the 'out' parameter. +// +// Note: +// The caller should handle closing the response body when successful. +func (c *Client) DoMultipartRequest(method, endpoint string, fields map[string]string, files map[string]string, out interface{}) (*http.Response, error) { + // Auth Token validation check + valid, err := c.ValidAuthTokenCheck() + if err != nil || !valid { + return nil, fmt.Errorf("validity of the authentication token failed with error: %w", err) + } + + // Determine which set of encoding and content-type request rules to use + //handler := GetAPIHandler(endpoint, c.config.LogLevel) + handler := GetAPIHandler(c.config) + + // Marshal the multipart form data + requestData, contentType, err := handler.MarshalMultipartRequest(fields, files) + if err != nil { + return nil, err + } + + // Construct URL using the ConstructAPIResourceEndpoint function + url := c.ConstructAPIResourceEndpoint(endpoint) + + // Create the request + req, err := http.NewRequest(method, url, bytes.NewBuffer(requestData)) + if err != nil { + return nil, err + } + + // Set Request Headers + req.Header.Add("Authorization", "Bearer "+c.Token) + req.Header.Set("Content-Type", contentType) + req.Header.Set("User-Agent", GetUserAgentHeader()) + + // Debug: Print request headers if in debug mode + + c.logger.Debug("HTTP Multipart Request Headers:", req.Header) + + // Execute the request + resp, err := c.httpClient.Do(req) + if err != nil { + c.logger.Error("Failed to send multipart request", "method", method, "endpoint", endpoint, "error", err) + return nil, err + } + + // Check for successful status code + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + c.logger.Error("Received non-success status code from multipart request", "status_code", resp.StatusCode) + return resp, fmt.Errorf("received non-success status code: %d", resp.StatusCode) + } + + // Unmarshal the response + if err := handler.UnmarshalResponse(resp, out); err != nil { + c.logger.Error("Failed to unmarshal HTTP response", "method", method, "endpoint", endpoint, "error", err) + return resp, err + } + + return resp, nil +} diff --git a/internal/httpclient/sdk_version.go b/internal/httpclient/sdk_version.go new file mode 100644 index 0000000..db01174 --- /dev/null +++ b/internal/httpclient/sdk_version.go @@ -0,0 +1,13 @@ +// sdk_version.go +package httpclient + +import "fmt" + +const ( + SDKVersion = "1.0" + UserAgentBase = "go-api-http-client" +) + +func GetUserAgentHeader() string { + return fmt.Sprintf("%s/%s", UserAgentBase, SDKVersion) +} diff --git a/main.go b/main.go new file mode 100644 index 0000000..06ab7d0 --- /dev/null +++ b/main.go @@ -0,0 +1 @@ +package main diff --git a/workload/README.md b/workload/README.md deleted file mode 100644 index e58d872..0000000 --- a/workload/README.md +++ /dev/null @@ -1 +0,0 @@ -repo workload such as source code, terraform , bicep etc go here