Skip to content

Commit

Permalink
hardware cal added
Browse files Browse the repository at this point in the history
Signed-off-by: Vivek Yadav <vivek.yadav@progress.com>
  • Loading branch information
vivek-yadav committed Jul 11, 2023
1 parent 7915957 commit 8b4a146
Show file tree
Hide file tree
Showing 10 changed files with 842 additions and 3 deletions.
56 changes: 56 additions & 0 deletions components/automate-cli/cmd/chef-automate/hardwarecal.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
package main

import (
"os"

"github.com/chef/automate/components/automate-cli/pkg/docs"
"github.com/chef/automate/components/automate-cli/pkg/status"
"github.com/chef/automate/lib/hardwarecal"
"github.com/chef/automate/lib/pmt"
"github.com/spf13/cobra"
)

func init() {
RootCmd.AddCommand(hardwareCalCmd())
}

func hardwareCalCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "hardware-cal COMMAND",
Short: "Estimate Hardware Requirements for given inputs, this calculator is for Chef Automate HA",
Long: "Estimate Hardware Requirements for given inputs, this calculator is for Chef Automate HA",
Annotations: map[string]string{
docs.Tag: docs.BastionHost,
},
}

run := &cobra.Command{
Use: "run",
Short: "This will trigger the hardware calculator to ask input values, based on which it will provide hardware requirements for Chef Automate HA",
Long: "This will trigger the hardware calculator to ask input values, based on which it will provide hardware requirements for Chef Automate HA",
RunE: runCal,
Annotations: map[string]string{
docs.Tag: docs.BastionHost,
},
}

cmd.AddCommand(run)
return cmd
}

func runCal(*cobra.Command, []string) error {
cw := writer
p := pmt.PromptFactory(os.Stdin, os.Stdout)
hc := hardwarecal.NewHardwareCalPrompt(p, cw)
err := hc.Run()
if err != nil {
return status.Wrap(
err,
status.HardwareCalError,
"Failed to Estimate Hardware Requirements",
)
}

cw.Println("* These numbers are just for estimation, based on our test environment conditions. \n* Please do your performance analysis based on your environment and usages.")
return nil
}
2 changes: 2 additions & 0 deletions components/automate-cli/pkg/status/error.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ const (
const (
PromptFailed = 124
FailedToGenConfig = 125
HardwareCalError = 126
IpAccessError = 123
InsufficientSpaceError = 122
CalDestDirSizeError = 121
Expand Down Expand Up @@ -102,6 +103,7 @@ const (
// an error code and its type and a description, but also as a way to generate
// documentation.
var ErrorMetadata = map[int][]string{
HardwareCalError: {"126", "HardwareCalError", "Hardware Calculator failed to provide Hardware Estimates"},
PromptFailed: {"124", "PromptFailed", "Prompt failed and exited with error"},
FailedToGenConfig: {"125", "FailedToGenConfig", "Failed to Generate config with given inputs"},
InsufficientSpaceError: {"122", "InsufficientSpaceError", "Insufficient disk space"},
Expand Down
1 change: 0 additions & 1 deletion lib/config/genconfig/awsregions.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ type awsRegionItem struct {
}

var AwsRegionsList = []awsRegionItem{
{"US East (Ohio) us-east-2", "us-east-2"},
{"US East (Ohio) us-east-2", "us-east-2"},
{"US East (N. Virginia) us-east-1", "us-east-1"},
{"US West (N. California) us-west-1", "us-west-1"},
Expand Down
2 changes: 1 addition & 1 deletion lib/config/genconfig/awsregions_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ func TestGetAwsRegionSuccessCase(t *testing.T) {
val, err := awsRegions.Choose()

assert.Equal(t, nil, err)
assert.Equal(t, "us-east-1", val)
assert.Equal(t, "us-west-1", val)
}

func TestGetAwsRegionErrorCase(t *testing.T) {
Expand Down
3 changes: 2 additions & 1 deletion lib/config/genconfig/hadeployconfig.go
Original file line number Diff line number Diff line change
Expand Up @@ -608,7 +608,8 @@ func (c *HaDeployConfigGen) PromptPrivatePublicCert(ip, nodeType string) (privat
return
}

fmt.Println("\n")
fmt.Println()
fmt.Println()

privateKey, err = c.Prompt.InputParagraph(msgPriKey)
if err != nil {
Expand Down
244 changes: 244 additions & 0 deletions lib/hardwarecal/hardwarecal.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,244 @@
package hardwarecal

import (
"errors"
"fmt"
"math"
)

type HardwareCalReq struct {
NoOfNodes int
FreqCountComplianceScanPerDay int
FreqCountClientRunPerDay int
FreqCountEventFeedPerDay int

DataRetentionDays int

ComplianceReportSizeKB int
ClientRunReportSizeKB int
EventFeedSizeKB int

NoOfReplicasInOpenSearch int
}

type HardwareCalRes struct {
AutomateNode *Node
ChefServerNode *Node
PostgresqlNode *Node
OpenSearchNode *Node
}

type Node struct {
InstanceCount int
CpuCount int
RamGB int
StorageGB int
Type string
}

func (h *HardwareCalReq) OpenSearchCal() (osNodeCount, opensearchRam, osNodeStorage int, err error) {
compReportsTotalRawSizePerDay := h.NoOfNodes * (h.FreqCountComplianceScanPerDay * h.ComplianceReportSizeKB)
clientReportsTotalRawSizePerDay := h.NoOfNodes * (h.FreqCountClientRunPerDay * h.ClientRunReportSizeKB)
eventReportsTotalRawSizePerDay := h.NoOfNodes * (h.FreqCountEventFeedPerDay * h.EventFeedSizeKB)

reportsTotalRawSizePerDay := compReportsTotalRawSizePerDay + clientReportsTotalRawSizePerDay + eventReportsTotalRawSizePerDay

retainedData := reportsTotalRawSizePerDay * h.DataRetentionDays
retainedDataGB := retainedData / 1024 / 1024

totalDataWithReplicationGB := retainedDataGB * (1 + h.NoOfReplicasInOpenSearch)

opensearchRam = 8
nodeCountFactor := float64(h.NoOfNodes) / 100000.0
if nodeCountFactor > 1 {
nodeCountFactor = 1
}
if nodeCountFactor < 0.8 {
nodeCountFactor = 0.8
}
MemoryDiskRatio := 160.0 / nodeCountFactor

osNodeCount = int(math.Ceil(float64(totalDataWithReplicationGB) / float64(opensearchRam) / MemoryDiskRatio))
if osNodeCount > 5 {
opensearchRam = 16
osNodeCount = int(math.Ceil(float64(totalDataWithReplicationGB) / float64(opensearchRam) / MemoryDiskRatio))
}
if osNodeCount > 5 {
opensearchRam = 32
osNodeCount = int(math.Ceil(float64(totalDataWithReplicationGB) / float64(opensearchRam) / MemoryDiskRatio))
}
if osNodeCount > 5 {
opensearchRam = 64
osNodeCount = int(math.Ceil(float64(totalDataWithReplicationGB) / float64(opensearchRam) / MemoryDiskRatio))
}

if osNodeCount%2 == 0 {
osNodeCount++
}
if osNodeCount < 3 {
osNodeCount = 3
}

IndexingOverhead := 0.1
DesiredShardSize := 50.0
ShardCount := int(float64(totalDataWithReplicationGB) * (1 + IndexingOverhead) / DesiredShardSize)

MaxShardCount := int(osNodeCount) * (opensearchRam / 2) * 20
if ShardCount > MaxShardCount {
return 0, 0, 0, errors.New(fmt.Sprint("MaxShardCount:", MaxShardCount, " is less than Shard Count: ", ShardCount))
}

if osNodeCount > 9 {
return 0, 0, 0, errors.New(fmt.Sprint("opensearch node count needed: ", osNodeCount, ", please see if you can reduce the input requirements so the OpenSearch Nodes needed comes down to max 9 for 64gb ram"))
}

diskWatermarkThreshold := 0.15
marginOfError := 0.1
totalStorageNeed := int(float64(totalDataWithReplicationGB) * (1 + diskWatermarkThreshold + marginOfError))
osNodeStorage = int(math.Ceil(float64(totalStorageNeed) / float64(osNodeCount)))
osNodeStorage = int(math.Round(float64(osNodeStorage)/100.0) * 100.0)

return
}

func (h *HardwareCalReq) PostgresqlCal() (pgNodeRam, pgStorage int) {
pgStorageNeedGB := (h.NoOfNodes * 20) / 1024
pgNodeRam = int(float64(pgStorageNeedGB) * 0.02)
if pgNodeRam < 8 {
pgNodeRam = 8
}
pgStorage = 200
if pgStorageNeedGB > pgStorage {
pgStorage = int(math.Round(float64(pgStorageNeedGB)/100.0) * 100.0)
}
return
}

func (h *HardwareCalReq) FrontendCal() (frontendNodeCount, frontendRam int) {
concurrentComp := h.NoOfNodes
if h.FreqCountComplianceScanPerDay == 0 {
concurrentComp = 0
}
concurrentClient := h.NoOfNodes
if h.FreqCountClientRunPerDay == 0 {
concurrentClient = 0
}
concurrentEvent := h.NoOfNodes
if h.FreqCountEventFeedPerDay == 0 {
concurrentEvent = 0
}
concurrencyNeeded := concurrentComp + concurrentClient + concurrentEvent

frontendNodeCount = 2
frontendRam = 8
processPerMB := 5
processesPerGB := processPerMB * 1024
maxConcurrency := frontendNodeCount * (frontendRam * processesPerGB)
for i := 0; i < 20; i++ {
if maxConcurrency < concurrencyNeeded {
frontendNodeCount++
maxConcurrency = frontendNodeCount * (frontendRam * processesPerGB)
}
}
if frontendNodeCount > 3 {
frontendNodeCount = 2
frontendRam = 16
maxConcurrency = frontendNodeCount * (frontendRam * processesPerGB)
for i := 0; i < 20; i++ {
if maxConcurrency < concurrencyNeeded {
frontendNodeCount++
maxConcurrency = frontendNodeCount * (frontendRam * processesPerGB)
}
}
}
if frontendNodeCount > 3 {
frontendNodeCount = 2
frontendRam = 32
maxConcurrency = frontendNodeCount * (frontendRam * processesPerGB)
for i := 0; i < 20; i++ {
if maxConcurrency < concurrencyNeeded {
frontendNodeCount++
maxConcurrency = frontendNodeCount * (frontendRam * processesPerGB)
}
}
}
return
}

func (h *HardwareCalReq) NewCal() (r *HardwareCalRes, err error) {
r = &HardwareCalRes{}

osNodeCount, opensearchRam, osNodeStorage, err := h.OpenSearchCal()
if err != nil {
return nil, err
}
osNode, err := GetAwsMtype(opensearchRam)
if err != nil {
return nil, err
}
osNode.InstanceCount = osNodeCount
osNode.StorageGB = osNodeStorage
r.OpenSearchNode = osNode

pgRam, pgStorage := h.PostgresqlCal()
r.PostgresqlNode, err = GetAwsMtype(pgRam)
if err != nil {
return nil, err
}
r.PostgresqlNode.InstanceCount = 3
r.PostgresqlNode.StorageGB = pgStorage

frontendNodeCount, frontendRam := h.FrontendCal()
r.AutomateNode, err = GetAwsMtype(frontendRam)
if err != nil {
return nil, err
}
r.AutomateNode.InstanceCount = frontendNodeCount
r.AutomateNode.StorageGB = 200

r.ChefServerNode, err = GetAwsMtype(frontendRam)
if err != nil {
return nil, err
}
r.ChefServerNode.InstanceCount = frontendNodeCount
r.ChefServerNode.StorageGB = 200

return
}

var AwsMtypes = []*Node{
&Node{
CpuCount: 2,
RamGB: 8,
Type: "m5.large",
},
&Node{
CpuCount: 4,
RamGB: 16,
Type: "m5.xlarge",
},
&Node{
CpuCount: 8,
RamGB: 32,
Type: "m5.2xlarge",
},
&Node{
CpuCount: 16,
RamGB: 64,
Type: "m5.4xlarge",
},
}

func GetAwsMtype(ramGB int) (*Node, error) {
for _, n := range AwsMtypes {
if ramGB <= n.RamGB {
newNode := &Node{
CpuCount: n.CpuCount,
RamGB: n.RamGB,
Type: n.Type,
}
return newNode, nil
}
}
return nil, errors.New("relevant aws machine type not found")
}

0 comments on commit 8b4a146

Please sign in to comment.